1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 /* See Fibre Channel protocol T11 FC-LS for details */ 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/slab.h> 27 #include <linux/interrupt.h> 28 #include <linux/delay.h> 29 30 #include <scsi/scsi.h> 31 #include <scsi/scsi_device.h> 32 #include <scsi/scsi_host.h> 33 #include <scsi/scsi_transport_fc.h> 34 #include <uapi/scsi/fc/fc_fs.h> 35 #include <uapi/scsi/fc/fc_els.h> 36 37 #include "lpfc_hw4.h" 38 #include "lpfc_hw.h" 39 #include "lpfc_sli.h" 40 #include "lpfc_sli4.h" 41 #include "lpfc_nl.h" 42 #include "lpfc_disc.h" 43 #include "lpfc_scsi.h" 44 #include "lpfc.h" 45 #include "lpfc_logmsg.h" 46 #include "lpfc_crtn.h" 47 #include "lpfc_vport.h" 48 #include "lpfc_debugfs.h" 49 50 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, 51 struct lpfc_iocbq *); 52 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, 53 struct lpfc_iocbq *); 54 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport); 55 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, 56 struct lpfc_nodelist *ndlp, uint8_t retry); 57 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, 58 struct lpfc_iocbq *iocb); 59 static void lpfc_cmpl_els_edc(struct lpfc_hba *phba, 60 struct lpfc_iocbq *cmdiocb, 61 struct lpfc_iocbq *rspiocb); 62 static void lpfc_cmpl_els_uvem(struct lpfc_hba *, struct lpfc_iocbq *, 63 struct lpfc_iocbq *); 64 65 static int lpfc_max_els_tries = 3; 66 67 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport); 68 static void lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max); 69 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid); 70 71 /** 72 * lpfc_els_chk_latt - Check host link attention event for a vport 73 * @vport: pointer to a host virtual N_Port data structure. 74 * 75 * This routine checks whether there is an outstanding host link 76 * attention event during the discovery process with the @vport. It is done 77 * by reading the HBA's Host Attention (HA) register. If there is any host 78 * link attention events during this @vport's discovery process, the @vport 79 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall 80 * be issued if the link state is not already in host link cleared state, 81 * and a return code shall indicate whether the host link attention event 82 * had happened. 83 * 84 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport 85 * state in LPFC_VPORT_READY, the request for checking host link attention 86 * event will be ignored and a return code shall indicate no host link 87 * attention event had happened. 88 * 89 * Return codes 90 * 0 - no host link attention event happened 91 * 1 - host link attention event happened 92 **/ 93 int 94 lpfc_els_chk_latt(struct lpfc_vport *vport) 95 { 96 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 97 struct lpfc_hba *phba = vport->phba; 98 uint32_t ha_copy; 99 100 if (vport->port_state >= LPFC_VPORT_READY || 101 phba->link_state == LPFC_LINK_DOWN || 102 phba->sli_rev > LPFC_SLI_REV3) 103 return 0; 104 105 /* Read the HBA Host Attention Register */ 106 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 107 return 1; 108 109 if (!(ha_copy & HA_LATT)) 110 return 0; 111 112 /* Pending Link Event during Discovery */ 113 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 114 "0237 Pending Link Event during " 115 "Discovery: State x%x\n", 116 phba->pport->port_state); 117 118 /* CLEAR_LA should re-enable link attention events and 119 * we should then immediately take a LATT event. The 120 * LATT processing should call lpfc_linkdown() which 121 * will cleanup any left over in-progress discovery 122 * events. 123 */ 124 spin_lock_irq(shost->host_lock); 125 vport->fc_flag |= FC_ABORT_DISCOVERY; 126 spin_unlock_irq(shost->host_lock); 127 128 if (phba->link_state != LPFC_CLEAR_LA) 129 lpfc_issue_clear_la(phba, vport); 130 131 return 1; 132 } 133 134 /** 135 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure 136 * @vport: pointer to a host virtual N_Port data structure. 137 * @expect_rsp: flag indicating whether response is expected. 138 * @cmd_size: size of the ELS command. 139 * @retry: number of retries to the command when it fails. 140 * @ndlp: pointer to a node-list data structure. 141 * @did: destination identifier. 142 * @elscmd: the ELS command code. 143 * 144 * This routine is used for allocating a lpfc-IOCB data structure from 145 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters 146 * passed into the routine for discovery state machine to issue an Extended 147 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation 148 * and preparation routine that is used by all the discovery state machine 149 * routines and the ELS command-specific fields will be later set up by 150 * the individual discovery machine routines after calling this routine 151 * allocating and preparing a generic IOCB data structure. It fills in the 152 * Buffer Descriptor Entries (BDEs), allocates buffers for both command 153 * payload and response payload (if expected). The reference count on the 154 * ndlp is incremented by 1 and the reference to the ndlp is put into 155 * ndlp of the IOCB data structure for this IOCB to hold the ndlp 156 * reference for the command's callback function to access later. 157 * 158 * Return code 159 * Pointer to the newly allocated/prepared els iocb data structure 160 * NULL - when els iocb data structure allocation/preparation failed 161 **/ 162 struct lpfc_iocbq * 163 lpfc_prep_els_iocb(struct lpfc_vport *vport, u8 expect_rsp, 164 u16 cmd_size, u8 retry, 165 struct lpfc_nodelist *ndlp, u32 did, 166 u32 elscmd) 167 { 168 struct lpfc_hba *phba = vport->phba; 169 struct lpfc_iocbq *elsiocb; 170 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist, *bmp; 171 struct ulp_bde64_le *bpl; 172 u32 timeout = 0; 173 174 if (!lpfc_is_link_up(phba)) 175 return NULL; 176 177 /* Allocate buffer for command iocb */ 178 elsiocb = lpfc_sli_get_iocbq(phba); 179 if (!elsiocb) 180 return NULL; 181 182 /* 183 * If this command is for fabric controller and HBA running 184 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. 185 */ 186 if ((did == Fabric_DID) && 187 (phba->hba_flag & HBA_FIP_SUPPORT) && 188 ((elscmd == ELS_CMD_FLOGI) || 189 (elscmd == ELS_CMD_FDISC) || 190 (elscmd == ELS_CMD_LOGO))) 191 switch (elscmd) { 192 case ELS_CMD_FLOGI: 193 elsiocb->cmd_flag |= 194 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) 195 & LPFC_FIP_ELS_ID_MASK); 196 break; 197 case ELS_CMD_FDISC: 198 elsiocb->cmd_flag |= 199 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) 200 & LPFC_FIP_ELS_ID_MASK); 201 break; 202 case ELS_CMD_LOGO: 203 elsiocb->cmd_flag |= 204 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) 205 & LPFC_FIP_ELS_ID_MASK); 206 break; 207 } 208 else 209 elsiocb->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK; 210 211 /* fill in BDEs for command */ 212 /* Allocate buffer for command payload */ 213 pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL); 214 if (pcmd) 215 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); 216 if (!pcmd || !pcmd->virt) 217 goto els_iocb_free_pcmb_exit; 218 219 INIT_LIST_HEAD(&pcmd->list); 220 221 /* Allocate buffer for response payload */ 222 if (expect_rsp) { 223 prsp = kmalloc(sizeof(*prsp), GFP_KERNEL); 224 if (prsp) 225 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 226 &prsp->phys); 227 if (!prsp || !prsp->virt) 228 goto els_iocb_free_prsp_exit; 229 INIT_LIST_HEAD(&prsp->list); 230 } else { 231 prsp = NULL; 232 } 233 234 /* Allocate buffer for Buffer ptr list */ 235 pbuflist = kmalloc(sizeof(*pbuflist), GFP_KERNEL); 236 if (pbuflist) 237 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 238 &pbuflist->phys); 239 if (!pbuflist || !pbuflist->virt) 240 goto els_iocb_free_pbuf_exit; 241 242 INIT_LIST_HEAD(&pbuflist->list); 243 244 if (expect_rsp) { 245 switch (elscmd) { 246 case ELS_CMD_FLOGI: 247 timeout = FF_DEF_RATOV * 2; 248 break; 249 case ELS_CMD_LOGO: 250 timeout = phba->fc_ratov; 251 break; 252 default: 253 timeout = phba->fc_ratov * 2; 254 } 255 256 /* Fill SGE for the num bde count */ 257 elsiocb->num_bdes = 2; 258 } 259 260 if (phba->sli_rev == LPFC_SLI_REV4) 261 bmp = pcmd; 262 else 263 bmp = pbuflist; 264 265 lpfc_sli_prep_els_req_rsp(phba, elsiocb, vport, bmp, cmd_size, did, 266 elscmd, timeout, expect_rsp); 267 268 bpl = (struct ulp_bde64_le *)pbuflist->virt; 269 bpl->addr_low = cpu_to_le32(putPaddrLow(pcmd->phys)); 270 bpl->addr_high = cpu_to_le32(putPaddrHigh(pcmd->phys)); 271 bpl->type_size = cpu_to_le32(cmd_size); 272 bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); 273 274 if (expect_rsp) { 275 bpl++; 276 bpl->addr_low = cpu_to_le32(putPaddrLow(prsp->phys)); 277 bpl->addr_high = cpu_to_le32(putPaddrHigh(prsp->phys)); 278 bpl->type_size = cpu_to_le32(FCELSSIZE); 279 bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); 280 } 281 282 elsiocb->cmd_dmabuf = pcmd; 283 elsiocb->bpl_dmabuf = pbuflist; 284 elsiocb->retry = retry; 285 elsiocb->vport = vport; 286 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; 287 288 if (prsp) 289 list_add(&prsp->list, &pcmd->list); 290 if (expect_rsp) { 291 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 292 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 293 "0116 Xmit ELS command x%x to remote " 294 "NPORT x%x I/O tag: x%x, port state:x%x " 295 "rpi x%x fc_flag:x%x\n", 296 elscmd, did, elsiocb->iotag, 297 vport->port_state, ndlp->nlp_rpi, 298 vport->fc_flag); 299 } else { 300 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 301 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 302 "0117 Xmit ELS response x%x to remote " 303 "NPORT x%x I/O tag: x%x, size: x%x " 304 "port_state x%x rpi x%x fc_flag x%x\n", 305 elscmd, ndlp->nlp_DID, elsiocb->iotag, 306 cmd_size, vport->port_state, 307 ndlp->nlp_rpi, vport->fc_flag); 308 } 309 310 return elsiocb; 311 312 els_iocb_free_pbuf_exit: 313 if (expect_rsp) 314 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 315 kfree(pbuflist); 316 317 els_iocb_free_prsp_exit: 318 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 319 kfree(prsp); 320 321 els_iocb_free_pcmb_exit: 322 kfree(pcmd); 323 lpfc_sli_release_iocbq(phba, elsiocb); 324 return NULL; 325 } 326 327 /** 328 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport 329 * @vport: pointer to a host virtual N_Port data structure. 330 * 331 * This routine issues a fabric registration login for a @vport. An 332 * active ndlp node with Fabric_DID must already exist for this @vport. 333 * The routine invokes two mailbox commands to carry out fabric registration 334 * login through the HBA firmware: the first mailbox command requests the 335 * HBA to perform link configuration for the @vport; and the second mailbox 336 * command requests the HBA to perform the actual fabric registration login 337 * with the @vport. 338 * 339 * Return code 340 * 0 - successfully issued fabric registration login for @vport 341 * -ENXIO -- failed to issue fabric registration login for @vport 342 **/ 343 int 344 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 345 { 346 struct lpfc_hba *phba = vport->phba; 347 LPFC_MBOXQ_t *mbox; 348 struct lpfc_nodelist *ndlp; 349 struct serv_parm *sp; 350 int rc; 351 int err = 0; 352 353 sp = &phba->fc_fabparam; 354 ndlp = lpfc_findnode_did(vport, Fabric_DID); 355 if (!ndlp) { 356 err = 1; 357 goto fail; 358 } 359 360 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 361 if (!mbox) { 362 err = 2; 363 goto fail; 364 } 365 366 vport->port_state = LPFC_FABRIC_CFG_LINK; 367 lpfc_config_link(phba, mbox); 368 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 369 mbox->vport = vport; 370 371 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 372 if (rc == MBX_NOT_FINISHED) { 373 err = 3; 374 goto fail_free_mbox; 375 } 376 377 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 378 if (!mbox) { 379 err = 4; 380 goto fail; 381 } 382 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 383 ndlp->nlp_rpi); 384 if (rc) { 385 err = 5; 386 goto fail_free_mbox; 387 } 388 389 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 390 mbox->vport = vport; 391 /* increment the reference count on ndlp to hold reference 392 * for the callback routine. 393 */ 394 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 395 if (!mbox->ctx_ndlp) { 396 err = 6; 397 goto fail_free_mbox; 398 } 399 400 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 401 if (rc == MBX_NOT_FINISHED) { 402 err = 7; 403 goto fail_issue_reg_login; 404 } 405 406 return 0; 407 408 fail_issue_reg_login: 409 /* decrement the reference count on ndlp just incremented 410 * for the failed mbox command. 411 */ 412 lpfc_nlp_put(ndlp); 413 fail_free_mbox: 414 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 415 fail: 416 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 417 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 418 "0249 Cannot issue Register Fabric login: Err %d\n", 419 err); 420 return -ENXIO; 421 } 422 423 /** 424 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login 425 * @vport: pointer to a host virtual N_Port data structure. 426 * 427 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for 428 * the @vport. This mailbox command is necessary for SLI4 port only. 429 * 430 * Return code 431 * 0 - successfully issued REG_VFI for @vport 432 * A failure code otherwise. 433 **/ 434 int 435 lpfc_issue_reg_vfi(struct lpfc_vport *vport) 436 { 437 struct lpfc_hba *phba = vport->phba; 438 LPFC_MBOXQ_t *mboxq = NULL; 439 struct lpfc_nodelist *ndlp; 440 struct lpfc_dmabuf *dmabuf = NULL; 441 int rc = 0; 442 443 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */ 444 if ((phba->sli_rev == LPFC_SLI_REV4) && 445 !(phba->link_flag & LS_LOOPBACK_MODE) && 446 !(vport->fc_flag & FC_PT2PT)) { 447 ndlp = lpfc_findnode_did(vport, Fabric_DID); 448 if (!ndlp) { 449 rc = -ENODEV; 450 goto fail; 451 } 452 } 453 454 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 455 if (!mboxq) { 456 rc = -ENOMEM; 457 goto fail; 458 } 459 460 /* Supply CSP's only if we are fabric connect or pt-to-pt connect */ 461 if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) { 462 rc = lpfc_mbox_rsrc_prep(phba, mboxq); 463 if (rc) { 464 rc = -ENOMEM; 465 goto fail_mbox; 466 } 467 dmabuf = mboxq->ctx_buf; 468 memcpy(dmabuf->virt, &phba->fc_fabparam, 469 sizeof(struct serv_parm)); 470 } 471 472 vport->port_state = LPFC_FABRIC_CFG_LINK; 473 if (dmabuf) { 474 lpfc_reg_vfi(mboxq, vport, dmabuf->phys); 475 /* lpfc_reg_vfi memsets the mailbox. Restore the ctx_buf. */ 476 mboxq->ctx_buf = dmabuf; 477 } else { 478 lpfc_reg_vfi(mboxq, vport, 0); 479 } 480 481 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; 482 mboxq->vport = vport; 483 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 484 if (rc == MBX_NOT_FINISHED) { 485 rc = -ENXIO; 486 goto fail_mbox; 487 } 488 return 0; 489 490 fail_mbox: 491 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED); 492 fail: 493 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 494 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 495 "0289 Issue Register VFI failed: Err %d\n", rc); 496 return rc; 497 } 498 499 /** 500 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login 501 * @vport: pointer to a host virtual N_Port data structure. 502 * 503 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for 504 * the @vport. This mailbox command is necessary for SLI4 port only. 505 * 506 * Return code 507 * 0 - successfully issued REG_VFI for @vport 508 * A failure code otherwise. 509 **/ 510 int 511 lpfc_issue_unreg_vfi(struct lpfc_vport *vport) 512 { 513 struct lpfc_hba *phba = vport->phba; 514 struct Scsi_Host *shost; 515 LPFC_MBOXQ_t *mboxq; 516 int rc; 517 518 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 519 if (!mboxq) { 520 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 521 "2556 UNREG_VFI mbox allocation failed" 522 "HBA state x%x\n", phba->pport->port_state); 523 return -ENOMEM; 524 } 525 526 lpfc_unreg_vfi(mboxq, vport); 527 mboxq->vport = vport; 528 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl; 529 530 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 531 if (rc == MBX_NOT_FINISHED) { 532 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 533 "2557 UNREG_VFI issue mbox failed rc x%x " 534 "HBA state x%x\n", 535 rc, phba->pport->port_state); 536 mempool_free(mboxq, phba->mbox_mem_pool); 537 return -EIO; 538 } 539 540 shost = lpfc_shost_from_vport(vport); 541 spin_lock_irq(shost->host_lock); 542 vport->fc_flag &= ~FC_VFI_REGISTERED; 543 spin_unlock_irq(shost->host_lock); 544 return 0; 545 } 546 547 /** 548 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. 549 * @vport: pointer to a host virtual N_Port data structure. 550 * @sp: pointer to service parameter data structure. 551 * 552 * This routine is called from FLOGI/FDISC completion handler functions. 553 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric 554 * node nodename is changed in the completion service parameter else return 555 * 0. This function also set flag in the vport data structure to delay 556 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit 557 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric 558 * node nodename is changed in the completion service parameter. 559 * 560 * Return code 561 * 0 - FCID and Fabric Nodename and Fabric portname is not changed. 562 * 1 - FCID or Fabric Nodename or Fabric portname is changed. 563 * 564 **/ 565 static uint8_t 566 lpfc_check_clean_addr_bit(struct lpfc_vport *vport, 567 struct serv_parm *sp) 568 { 569 struct lpfc_hba *phba = vport->phba; 570 uint8_t fabric_param_changed = 0; 571 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 572 573 if ((vport->fc_prevDID != vport->fc_myDID) || 574 memcmp(&vport->fabric_portname, &sp->portName, 575 sizeof(struct lpfc_name)) || 576 memcmp(&vport->fabric_nodename, &sp->nodeName, 577 sizeof(struct lpfc_name)) || 578 (vport->vport_flag & FAWWPN_PARAM_CHG)) { 579 fabric_param_changed = 1; 580 vport->vport_flag &= ~FAWWPN_PARAM_CHG; 581 } 582 /* 583 * Word 1 Bit 31 in common service parameter is overloaded. 584 * Word 1 Bit 31 in FLOGI request is multiple NPort request 585 * Word 1 Bit 31 in FLOGI response is clean address bit 586 * 587 * If fabric parameter is changed and clean address bit is 588 * cleared delay nport discovery if 589 * - vport->fc_prevDID != 0 (not initial discovery) OR 590 * - lpfc_delay_discovery module parameter is set. 591 */ 592 if (fabric_param_changed && !sp->cmn.clean_address_bit && 593 (vport->fc_prevDID || phba->cfg_delay_discovery)) { 594 spin_lock_irq(shost->host_lock); 595 vport->fc_flag |= FC_DISC_DELAYED; 596 spin_unlock_irq(shost->host_lock); 597 } 598 599 return fabric_param_changed; 600 } 601 602 603 /** 604 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 605 * @vport: pointer to a host virtual N_Port data structure. 606 * @ndlp: pointer to a node-list data structure. 607 * @sp: pointer to service parameter data structure. 608 * @ulp_word4: command response value 609 * 610 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 611 * function to handle the completion of a Fabric Login (FLOGI) into a fabric 612 * port in a fabric topology. It properly sets up the parameters to the @ndlp 613 * from the IOCB response. It also check the newly assigned N_Port ID to the 614 * @vport against the previously assigned N_Port ID. If it is different from 615 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine 616 * is invoked on all the remaining nodes with the @vport to unregister the 617 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin() 618 * is invoked to register login to the fabric. 619 * 620 * Return code 621 * 0 - Success (currently, always return 0) 622 **/ 623 static int 624 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 625 struct serv_parm *sp, uint32_t ulp_word4) 626 { 627 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 628 struct lpfc_hba *phba = vport->phba; 629 struct lpfc_nodelist *np; 630 struct lpfc_nodelist *next_np; 631 uint8_t fabric_param_changed; 632 633 spin_lock_irq(shost->host_lock); 634 vport->fc_flag |= FC_FABRIC; 635 spin_unlock_irq(shost->host_lock); 636 637 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); 638 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 639 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; 640 641 phba->fc_edtovResol = sp->cmn.edtovResolution; 642 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; 643 644 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 645 spin_lock_irq(shost->host_lock); 646 vport->fc_flag |= FC_PUBLIC_LOOP; 647 spin_unlock_irq(shost->host_lock); 648 } 649 650 vport->fc_myDID = ulp_word4 & Mask_DID; 651 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); 652 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); 653 ndlp->nlp_class_sup = 0; 654 if (sp->cls1.classValid) 655 ndlp->nlp_class_sup |= FC_COS_CLASS1; 656 if (sp->cls2.classValid) 657 ndlp->nlp_class_sup |= FC_COS_CLASS2; 658 if (sp->cls3.classValid) 659 ndlp->nlp_class_sup |= FC_COS_CLASS3; 660 if (sp->cls4.classValid) 661 ndlp->nlp_class_sup |= FC_COS_CLASS4; 662 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | 663 sp->cmn.bbRcvSizeLsb; 664 665 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 666 if (fabric_param_changed) { 667 /* Reset FDMI attribute masks based on config parameter */ 668 if (phba->cfg_enable_SmartSAN || 669 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 670 /* Setup appropriate attribute masks */ 671 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 672 if (phba->cfg_enable_SmartSAN) 673 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 674 else 675 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 676 } else { 677 vport->fdmi_hba_mask = 0; 678 vport->fdmi_port_mask = 0; 679 } 680 681 } 682 memcpy(&vport->fabric_portname, &sp->portName, 683 sizeof(struct lpfc_name)); 684 memcpy(&vport->fabric_nodename, &sp->nodeName, 685 sizeof(struct lpfc_name)); 686 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 687 688 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 689 if (sp->cmn.response_multiple_NPort) { 690 lpfc_printf_vlog(vport, KERN_WARNING, 691 LOG_ELS | LOG_VPORT, 692 "1816 FLOGI NPIV supported, " 693 "response data 0x%x\n", 694 sp->cmn.response_multiple_NPort); 695 spin_lock_irq(&phba->hbalock); 696 phba->link_flag |= LS_NPIV_FAB_SUPPORTED; 697 spin_unlock_irq(&phba->hbalock); 698 } else { 699 /* Because we asked f/w for NPIV it still expects us 700 to call reg_vnpid at least for the physical host */ 701 lpfc_printf_vlog(vport, KERN_WARNING, 702 LOG_ELS | LOG_VPORT, 703 "1817 Fabric does not support NPIV " 704 "- configuring single port mode.\n"); 705 spin_lock_irq(&phba->hbalock); 706 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 707 spin_unlock_irq(&phba->hbalock); 708 } 709 } 710 711 /* 712 * For FC we need to do some special processing because of the SLI 713 * Port's default settings of the Common Service Parameters. 714 */ 715 if ((phba->sli_rev == LPFC_SLI_REV4) && 716 (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) { 717 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 718 if (fabric_param_changed) 719 lpfc_unregister_fcf_prep(phba); 720 721 /* This should just update the VFI CSPs*/ 722 if (vport->fc_flag & FC_VFI_REGISTERED) 723 lpfc_issue_reg_vfi(vport); 724 } 725 726 if (fabric_param_changed && 727 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 728 729 /* If our NportID changed, we need to ensure all 730 * remaining NPORTs get unreg_login'ed. 731 */ 732 list_for_each_entry_safe(np, next_np, 733 &vport->fc_nodes, nlp_listp) { 734 if ((np->nlp_state != NLP_STE_NPR_NODE) || 735 !(np->nlp_flag & NLP_NPR_ADISC)) 736 continue; 737 spin_lock_irq(&np->lock); 738 np->nlp_flag &= ~NLP_NPR_ADISC; 739 spin_unlock_irq(&np->lock); 740 lpfc_unreg_rpi(vport, np); 741 } 742 lpfc_cleanup_pending_mbox(vport); 743 744 if (phba->sli_rev == LPFC_SLI_REV4) { 745 lpfc_sli4_unreg_all_rpis(vport); 746 lpfc_mbx_unreg_vpi(vport); 747 spin_lock_irq(shost->host_lock); 748 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 749 spin_unlock_irq(shost->host_lock); 750 } 751 752 /* 753 * For SLI3 and SLI4, the VPI needs to be reregistered in 754 * response to this fabric parameter change event. 755 */ 756 spin_lock_irq(shost->host_lock); 757 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 758 spin_unlock_irq(shost->host_lock); 759 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 760 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 761 /* 762 * Driver needs to re-reg VPI in order for f/w 763 * to update the MAC address. 764 */ 765 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 766 lpfc_register_new_vport(phba, vport, ndlp); 767 return 0; 768 } 769 770 if (phba->sli_rev < LPFC_SLI_REV4) { 771 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 772 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 773 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 774 lpfc_register_new_vport(phba, vport, ndlp); 775 else 776 lpfc_issue_fabric_reglogin(vport); 777 } else { 778 ndlp->nlp_type |= NLP_FABRIC; 779 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 780 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) && 781 (vport->vpi_state & LPFC_VPI_REGISTERED)) { 782 lpfc_start_fdiscs(phba); 783 lpfc_do_scr_ns_plogi(phba, vport); 784 } else if (vport->fc_flag & FC_VFI_REGISTERED) 785 lpfc_issue_init_vpi(vport); 786 else { 787 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 788 "3135 Need register VFI: (x%x/%x)\n", 789 vport->fc_prevDID, vport->fc_myDID); 790 lpfc_issue_reg_vfi(vport); 791 } 792 } 793 return 0; 794 } 795 796 /** 797 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 798 * @vport: pointer to a host virtual N_Port data structure. 799 * @ndlp: pointer to a node-list data structure. 800 * @sp: pointer to service parameter data structure. 801 * 802 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 803 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port 804 * in a point-to-point topology. First, the @vport's N_Port Name is compared 805 * with the received N_Port Name: if the @vport's N_Port Name is greater than 806 * the received N_Port Name lexicographically, this node shall assign local 807 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and 808 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise, 809 * this node shall just wait for the remote node to issue PLOGI and assign 810 * N_Port IDs. 811 * 812 * Return code 813 * 0 - Success 814 * -ENXIO - Fail 815 **/ 816 static int 817 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 818 struct serv_parm *sp) 819 { 820 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 821 struct lpfc_hba *phba = vport->phba; 822 LPFC_MBOXQ_t *mbox; 823 int rc; 824 825 spin_lock_irq(shost->host_lock); 826 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 827 vport->fc_flag |= FC_PT2PT; 828 spin_unlock_irq(shost->host_lock); 829 830 /* If we are pt2pt with another NPort, force NPIV off! */ 831 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 832 833 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 834 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { 835 lpfc_unregister_fcf_prep(phba); 836 837 spin_lock_irq(shost->host_lock); 838 vport->fc_flag &= ~FC_VFI_REGISTERED; 839 spin_unlock_irq(shost->host_lock); 840 phba->fc_topology_changed = 0; 841 } 842 843 rc = memcmp(&vport->fc_portname, &sp->portName, 844 sizeof(vport->fc_portname)); 845 846 if (rc >= 0) { 847 /* This side will initiate the PLOGI */ 848 spin_lock_irq(shost->host_lock); 849 vport->fc_flag |= FC_PT2PT_PLOGI; 850 spin_unlock_irq(shost->host_lock); 851 852 /* 853 * N_Port ID cannot be 0, set our Id to LocalID 854 * the other side will be RemoteID. 855 */ 856 857 /* not equal */ 858 if (rc) 859 vport->fc_myDID = PT2PT_LocalID; 860 861 /* If not registered with a transport, decrement ndlp reference 862 * count indicating that ndlp can be safely released when other 863 * references are removed. 864 */ 865 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) 866 lpfc_nlp_put(ndlp); 867 868 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); 869 if (!ndlp) { 870 /* 871 * Cannot find existing Fabric ndlp, so allocate a 872 * new one 873 */ 874 ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID); 875 if (!ndlp) 876 goto fail; 877 } 878 879 memcpy(&ndlp->nlp_portname, &sp->portName, 880 sizeof(struct lpfc_name)); 881 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 882 sizeof(struct lpfc_name)); 883 /* Set state will put ndlp onto node list if not already done */ 884 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 885 spin_lock_irq(&ndlp->lock); 886 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 887 spin_unlock_irq(&ndlp->lock); 888 889 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 890 if (!mbox) 891 goto fail; 892 893 lpfc_config_link(phba, mbox); 894 895 mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 896 mbox->vport = vport; 897 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 898 if (rc == MBX_NOT_FINISHED) { 899 mempool_free(mbox, phba->mbox_mem_pool); 900 goto fail; 901 } 902 } else { 903 /* This side will wait for the PLOGI. If not registered with 904 * a transport, decrement node reference count indicating that 905 * ndlp can be released when other references are removed. 906 */ 907 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) 908 lpfc_nlp_put(ndlp); 909 910 /* Start discovery - this should just do CLEAR_LA */ 911 lpfc_disc_start(vport); 912 } 913 914 return 0; 915 fail: 916 return -ENXIO; 917 } 918 919 /** 920 * lpfc_cmpl_els_flogi - Completion callback function for flogi 921 * @phba: pointer to lpfc hba data structure. 922 * @cmdiocb: pointer to lpfc command iocb data structure. 923 * @rspiocb: pointer to lpfc response iocb data structure. 924 * 925 * This routine is the top-level completion callback function for issuing 926 * a Fabric Login (FLOGI) command. If the response IOCB reported error, 927 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If 928 * retry has been made (either immediately or delayed with lpfc_els_retry() 929 * returning 1), the command IOCB will be released and function returned. 930 * If the retry attempt has been given up (possibly reach the maximum 931 * number of retries), one additional decrement of ndlp reference shall be 932 * invoked before going out after releasing the command IOCB. This will 933 * actually release the remote node (Note, lpfc_els_free_iocb() will also 934 * invoke one decrement of ndlp reference count). If no error reported in 935 * the IOCB status, the command Port ID field is used to determine whether 936 * this is a point-to-point topology or a fabric topology: if the Port ID 937 * field is assigned, it is a fabric topology; otherwise, it is a 938 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or 939 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the 940 * specific topology completion conditions. 941 **/ 942 static void 943 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 944 struct lpfc_iocbq *rspiocb) 945 { 946 struct lpfc_vport *vport = cmdiocb->vport; 947 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 948 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 949 IOCB_t *irsp; 950 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp; 951 struct serv_parm *sp; 952 uint16_t fcf_index; 953 int rc; 954 u32 ulp_status, ulp_word4, tmo; 955 bool flogi_in_retry = false; 956 957 /* Check to see if link went down during discovery */ 958 if (lpfc_els_chk_latt(vport)) { 959 /* One additional decrement on node reference count to 960 * trigger the release of the node 961 */ 962 if (!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) 963 lpfc_nlp_put(ndlp); 964 goto out; 965 } 966 967 ulp_status = get_job_ulpstatus(phba, rspiocb); 968 ulp_word4 = get_job_word4(phba, rspiocb); 969 970 if (phba->sli_rev == LPFC_SLI_REV4) { 971 tmo = get_wqe_tmo(cmdiocb); 972 } else { 973 irsp = &rspiocb->iocb; 974 tmo = irsp->ulpTimeout; 975 } 976 977 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 978 "FLOGI cmpl: status:x%x/x%x state:x%x", 979 ulp_status, ulp_word4, 980 vport->port_state); 981 982 if (ulp_status) { 983 /* 984 * In case of FIP mode, perform roundrobin FCF failover 985 * due to new FCF discovery 986 */ 987 if ((phba->hba_flag & HBA_FIP_SUPPORT) && 988 (phba->fcf.fcf_flag & FCF_DISCOVERY)) { 989 if (phba->link_state < LPFC_LINK_UP) 990 goto stop_rr_fcf_flogi; 991 if ((phba->fcoe_cvl_eventtag_attn == 992 phba->fcoe_cvl_eventtag) && 993 (ulp_status == IOSTAT_LOCAL_REJECT) && 994 ((ulp_word4 & IOERR_PARAM_MASK) == 995 IOERR_SLI_ABORTED)) 996 goto stop_rr_fcf_flogi; 997 else 998 phba->fcoe_cvl_eventtag_attn = 999 phba->fcoe_cvl_eventtag; 1000 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 1001 "2611 FLOGI failed on FCF (x%x), " 1002 "status:x%x/x%x, tmo:x%x, perform " 1003 "roundrobin FCF failover\n", 1004 phba->fcf.current_rec.fcf_indx, 1005 ulp_status, ulp_word4, tmo); 1006 lpfc_sli4_set_fcf_flogi_fail(phba, 1007 phba->fcf.current_rec.fcf_indx); 1008 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 1009 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 1010 if (rc) 1011 goto out; 1012 } 1013 1014 stop_rr_fcf_flogi: 1015 /* FLOGI failure */ 1016 if (!(ulp_status == IOSTAT_LOCAL_REJECT && 1017 ((ulp_word4 & IOERR_PARAM_MASK) == 1018 IOERR_LOOP_OPEN_FAILURE))) 1019 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1020 "2858 FLOGI failure Status:x%x/x%x TMO" 1021 ":x%x Data x%x x%x\n", 1022 ulp_status, ulp_word4, tmo, 1023 phba->hba_flag, phba->fcf.fcf_flag); 1024 1025 /* Check for retry */ 1026 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 1027 /* Address a timing race with dev_loss. If dev_loss 1028 * is active on this FPort node, put the initial ref 1029 * count back to stop premature node release actions. 1030 */ 1031 lpfc_check_nlp_post_devloss(vport, ndlp); 1032 flogi_in_retry = true; 1033 goto out; 1034 } 1035 1036 /* The FLOGI will not be retried. If the FPort node is not 1037 * registered with the SCSI transport, remove the initial 1038 * reference to trigger node release. 1039 */ 1040 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS) && 1041 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) 1042 lpfc_nlp_put(ndlp); 1043 1044 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 1045 "0150 FLOGI failure Status:x%x/x%x " 1046 "xri x%x TMO:x%x refcnt %d\n", 1047 ulp_status, ulp_word4, cmdiocb->sli4_xritag, 1048 tmo, kref_read(&ndlp->kref)); 1049 1050 /* If this is not a loop open failure, bail out */ 1051 if (!(ulp_status == IOSTAT_LOCAL_REJECT && 1052 ((ulp_word4 & IOERR_PARAM_MASK) == 1053 IOERR_LOOP_OPEN_FAILURE))) { 1054 /* FLOGI failure */ 1055 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1056 "0100 FLOGI failure Status:x%x/x%x " 1057 "TMO:x%x\n", 1058 ulp_status, ulp_word4, tmo); 1059 goto flogifail; 1060 } 1061 1062 /* FLOGI failed, so there is no fabric */ 1063 spin_lock_irq(shost->host_lock); 1064 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP | 1065 FC_PT2PT_NO_NVME); 1066 spin_unlock_irq(shost->host_lock); 1067 1068 /* If private loop, then allow max outstanding els to be 1069 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no 1070 * alpa map would take too long otherwise. 1071 */ 1072 if (phba->alpa_map[0] == 0) 1073 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; 1074 if ((phba->sli_rev == LPFC_SLI_REV4) && 1075 (!(vport->fc_flag & FC_VFI_REGISTERED) || 1076 (vport->fc_prevDID != vport->fc_myDID) || 1077 phba->fc_topology_changed)) { 1078 if (vport->fc_flag & FC_VFI_REGISTERED) { 1079 if (phba->fc_topology_changed) { 1080 lpfc_unregister_fcf_prep(phba); 1081 spin_lock_irq(shost->host_lock); 1082 vport->fc_flag &= ~FC_VFI_REGISTERED; 1083 spin_unlock_irq(shost->host_lock); 1084 phba->fc_topology_changed = 0; 1085 } else { 1086 lpfc_sli4_unreg_all_rpis(vport); 1087 } 1088 } 1089 1090 /* Do not register VFI if the driver aborted FLOGI */ 1091 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) 1092 lpfc_issue_reg_vfi(vport); 1093 1094 goto out; 1095 } 1096 goto flogifail; 1097 } 1098 spin_lock_irq(shost->host_lock); 1099 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 1100 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 1101 spin_unlock_irq(shost->host_lock); 1102 1103 /* 1104 * The FLOGI succeeded. Sync the data for the CPU before 1105 * accessing it. 1106 */ 1107 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 1108 if (!prsp) 1109 goto out; 1110 sp = prsp->virt + sizeof(uint32_t); 1111 1112 /* FLOGI completes successfully */ 1113 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1114 "0101 FLOGI completes successfully, I/O tag:x%x " 1115 "xri x%x Data: x%x x%x x%x x%x x%x x%x x%x %d\n", 1116 cmdiocb->iotag, cmdiocb->sli4_xritag, 1117 ulp_word4, sp->cmn.e_d_tov, 1118 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, 1119 vport->port_state, vport->fc_flag, 1120 sp->cmn.priority_tagging, kref_read(&ndlp->kref)); 1121 1122 if (sp->cmn.priority_tagging) 1123 vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA | 1124 LPFC_VMID_TYPE_PRIO); 1125 /* reinitialize the VMID datastructure before returning */ 1126 if (lpfc_is_vmid_enabled(phba)) 1127 lpfc_reinit_vmid(vport); 1128 1129 /* 1130 * Address a timing race with dev_loss. If dev_loss is active on 1131 * this FPort node, put the initial ref count back to stop premature 1132 * node release actions. 1133 */ 1134 lpfc_check_nlp_post_devloss(vport, ndlp); 1135 if (vport->port_state == LPFC_FLOGI) { 1136 /* 1137 * If Common Service Parameters indicate Nport 1138 * we are point to point, if Fport we are Fabric. 1139 */ 1140 if (sp->cmn.fPort) 1141 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, 1142 ulp_word4); 1143 else if (!(phba->hba_flag & HBA_FCOE_MODE)) 1144 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 1145 else { 1146 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1147 "2831 FLOGI response with cleared Fabric " 1148 "bit fcf_index 0x%x " 1149 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x " 1150 "Fabric Name " 1151 "%02x%02x%02x%02x%02x%02x%02x%02x\n", 1152 phba->fcf.current_rec.fcf_indx, 1153 phba->fcf.current_rec.switch_name[0], 1154 phba->fcf.current_rec.switch_name[1], 1155 phba->fcf.current_rec.switch_name[2], 1156 phba->fcf.current_rec.switch_name[3], 1157 phba->fcf.current_rec.switch_name[4], 1158 phba->fcf.current_rec.switch_name[5], 1159 phba->fcf.current_rec.switch_name[6], 1160 phba->fcf.current_rec.switch_name[7], 1161 phba->fcf.current_rec.fabric_name[0], 1162 phba->fcf.current_rec.fabric_name[1], 1163 phba->fcf.current_rec.fabric_name[2], 1164 phba->fcf.current_rec.fabric_name[3], 1165 phba->fcf.current_rec.fabric_name[4], 1166 phba->fcf.current_rec.fabric_name[5], 1167 phba->fcf.current_rec.fabric_name[6], 1168 phba->fcf.current_rec.fabric_name[7]); 1169 1170 lpfc_nlp_put(ndlp); 1171 spin_lock_irq(&phba->hbalock); 1172 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1173 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1174 spin_unlock_irq(&phba->hbalock); 1175 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1176 goto out; 1177 } 1178 if (!rc) { 1179 /* Mark the FCF discovery process done */ 1180 if (phba->hba_flag & HBA_FIP_SUPPORT) 1181 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | 1182 LOG_ELS, 1183 "2769 FLOGI to FCF (x%x) " 1184 "completed successfully\n", 1185 phba->fcf.current_rec.fcf_indx); 1186 spin_lock_irq(&phba->hbalock); 1187 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1188 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1189 spin_unlock_irq(&phba->hbalock); 1190 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1191 goto out; 1192 } 1193 } else if (vport->port_state > LPFC_FLOGI && 1194 vport->fc_flag & FC_PT2PT) { 1195 /* 1196 * In a p2p topology, it is possible that discovery has 1197 * already progressed, and this completion can be ignored. 1198 * Recheck the indicated topology. 1199 */ 1200 if (!sp->cmn.fPort) 1201 goto out; 1202 } 1203 1204 flogifail: 1205 spin_lock_irq(&phba->hbalock); 1206 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1207 spin_unlock_irq(&phba->hbalock); 1208 1209 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { 1210 /* FLOGI failed, so just use loop map to make discovery list */ 1211 lpfc_disc_list_loopmap(vport); 1212 1213 /* Start discovery */ 1214 lpfc_disc_start(vport); 1215 } else if (((ulp_status != IOSTAT_LOCAL_REJECT) || 1216 (((ulp_word4 & IOERR_PARAM_MASK) != 1217 IOERR_SLI_ABORTED) && 1218 ((ulp_word4 & IOERR_PARAM_MASK) != 1219 IOERR_SLI_DOWN))) && 1220 (phba->link_state != LPFC_CLEAR_LA)) { 1221 /* If FLOGI failed enable link interrupt. */ 1222 lpfc_issue_clear_la(phba, vport); 1223 } 1224 out: 1225 if (!flogi_in_retry) 1226 phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING; 1227 1228 lpfc_els_free_iocb(phba, cmdiocb); 1229 lpfc_nlp_put(ndlp); 1230 } 1231 1232 /** 1233 * lpfc_cmpl_els_link_down - Completion callback function for ELS command 1234 * aborted during a link down 1235 * @phba: pointer to lpfc hba data structure. 1236 * @cmdiocb: pointer to lpfc command iocb data structure. 1237 * @rspiocb: pointer to lpfc response iocb data structure. 1238 * 1239 */ 1240 static void 1241 lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1242 struct lpfc_iocbq *rspiocb) 1243 { 1244 uint32_t *pcmd; 1245 uint32_t cmd; 1246 u32 ulp_status, ulp_word4; 1247 1248 pcmd = (uint32_t *)cmdiocb->cmd_dmabuf->virt; 1249 cmd = *pcmd; 1250 1251 ulp_status = get_job_ulpstatus(phba, rspiocb); 1252 ulp_word4 = get_job_word4(phba, rspiocb); 1253 1254 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1255 "6445 ELS completes after LINK_DOWN: " 1256 " Status %x/%x cmd x%x flg x%x\n", 1257 ulp_status, ulp_word4, cmd, 1258 cmdiocb->cmd_flag); 1259 1260 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) { 1261 cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC; 1262 atomic_dec(&phba->fabric_iocb_count); 1263 } 1264 lpfc_els_free_iocb(phba, cmdiocb); 1265 } 1266 1267 /** 1268 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport 1269 * @vport: pointer to a host virtual N_Port data structure. 1270 * @ndlp: pointer to a node-list data structure. 1271 * @retry: number of retries to the command IOCB. 1272 * 1273 * This routine issues a Fabric Login (FLOGI) Request ELS command 1274 * for a @vport. The initiator service parameters are put into the payload 1275 * of the FLOGI Request IOCB and the top-level callback function pointer 1276 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback 1277 * function field. The lpfc_issue_fabric_iocb routine is invoked to send 1278 * out FLOGI ELS command with one outstanding fabric IOCB at a time. 1279 * 1280 * Note that the ndlp reference count will be incremented by 1 for holding the 1281 * ndlp and the reference to ndlp will be stored into the ndlp field of 1282 * the IOCB for the completion callback function to the FLOGI ELS command. 1283 * 1284 * Return code 1285 * 0 - successfully issued flogi iocb for @vport 1286 * 1 - failed to issue flogi iocb for @vport 1287 **/ 1288 static int 1289 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1290 uint8_t retry) 1291 { 1292 struct lpfc_hba *phba = vport->phba; 1293 struct serv_parm *sp; 1294 union lpfc_wqe128 *wqe = NULL; 1295 IOCB_t *icmd = NULL; 1296 struct lpfc_iocbq *elsiocb; 1297 struct lpfc_iocbq defer_flogi_acc; 1298 u8 *pcmd, ct; 1299 uint16_t cmdsize; 1300 uint32_t tmo, did; 1301 int rc; 1302 1303 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1304 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1305 ndlp->nlp_DID, ELS_CMD_FLOGI); 1306 1307 if (!elsiocb) 1308 return 1; 1309 1310 wqe = &elsiocb->wqe; 1311 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 1312 icmd = &elsiocb->iocb; 1313 1314 /* For FLOGI request, remainder of payload is service parameters */ 1315 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; 1316 pcmd += sizeof(uint32_t); 1317 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1318 sp = (struct serv_parm *) pcmd; 1319 1320 /* Setup CSPs accordingly for Fabric */ 1321 sp->cmn.e_d_tov = 0; 1322 sp->cmn.w2.r_a_tov = 0; 1323 sp->cmn.virtual_fabric_support = 0; 1324 sp->cls1.classValid = 0; 1325 if (sp->cmn.fcphLow < FC_PH3) 1326 sp->cmn.fcphLow = FC_PH3; 1327 if (sp->cmn.fcphHigh < FC_PH3) 1328 sp->cmn.fcphHigh = FC_PH3; 1329 1330 /* Determine if switch supports priority tagging */ 1331 if (phba->cfg_vmid_priority_tagging) { 1332 sp->cmn.priority_tagging = 1; 1333 /* lpfc_vmid_host_uuid is combination of wwpn and wwnn */ 1334 if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid)) { 1335 memcpy(vport->lpfc_vmid_host_uuid, phba->wwpn, 1336 sizeof(phba->wwpn)); 1337 memcpy(&vport->lpfc_vmid_host_uuid[8], phba->wwnn, 1338 sizeof(phba->wwnn)); 1339 } 1340 } 1341 1342 if (phba->sli_rev == LPFC_SLI_REV4) { 1343 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1344 LPFC_SLI_INTF_IF_TYPE_0) { 1345 /* FLOGI needs to be 3 for WQE FCFI */ 1346 ct = SLI4_CT_FCFI; 1347 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 1348 1349 /* Set the fcfi to the fcfi we registered with */ 1350 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 1351 phba->fcf.fcfi); 1352 } 1353 1354 /* Can't do SLI4 class2 without support sequence coalescing */ 1355 sp->cls2.classValid = 0; 1356 sp->cls2.seqDelivery = 0; 1357 } else { 1358 /* Historical, setting sequential-delivery bit for SLI3 */ 1359 sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0; 1360 sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0; 1361 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 1362 sp->cmn.request_multiple_Nport = 1; 1363 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 1364 icmd->ulpCt_h = 1; 1365 icmd->ulpCt_l = 0; 1366 } else { 1367 sp->cmn.request_multiple_Nport = 0; 1368 } 1369 1370 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 1371 icmd->un.elsreq64.myID = 0; 1372 icmd->un.elsreq64.fl = 1; 1373 } 1374 } 1375 1376 tmo = phba->fc_ratov; 1377 phba->fc_ratov = LPFC_DISC_FLOGI_TMO; 1378 lpfc_set_disctmo(vport); 1379 phba->fc_ratov = tmo; 1380 1381 phba->fc_stat.elsXmitFLOGI++; 1382 elsiocb->cmd_cmpl = lpfc_cmpl_els_flogi; 1383 1384 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1385 "Issue FLOGI: opt:x%x", 1386 phba->sli3_options, 0, 0); 1387 1388 elsiocb->ndlp = lpfc_nlp_get(ndlp); 1389 if (!elsiocb->ndlp) { 1390 lpfc_els_free_iocb(phba, elsiocb); 1391 return 1; 1392 } 1393 1394 /* Avoid race with FLOGI completion and hba_flags. */ 1395 phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); 1396 1397 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 1398 if (rc == IOCB_ERROR) { 1399 phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); 1400 lpfc_els_free_iocb(phba, elsiocb); 1401 lpfc_nlp_put(ndlp); 1402 return 1; 1403 } 1404 1405 /* Clear external loopback plug detected flag */ 1406 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; 1407 1408 /* Check for a deferred FLOGI ACC condition */ 1409 if (phba->defer_flogi_acc_flag) { 1410 /* lookup ndlp for received FLOGI */ 1411 ndlp = lpfc_findnode_did(vport, 0); 1412 if (!ndlp) 1413 return 0; 1414 1415 did = vport->fc_myDID; 1416 vport->fc_myDID = Fabric_DID; 1417 1418 memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq)); 1419 1420 if (phba->sli_rev == LPFC_SLI_REV4) { 1421 bf_set(wqe_ctxt_tag, 1422 &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, 1423 phba->defer_flogi_acc_rx_id); 1424 bf_set(wqe_rcvoxid, 1425 &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, 1426 phba->defer_flogi_acc_ox_id); 1427 } else { 1428 icmd = &defer_flogi_acc.iocb; 1429 icmd->ulpContext = phba->defer_flogi_acc_rx_id; 1430 icmd->unsli3.rcvsli3.ox_id = 1431 phba->defer_flogi_acc_ox_id; 1432 } 1433 1434 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1435 "3354 Xmit deferred FLOGI ACC: rx_id: x%x," 1436 " ox_id: x%x, hba_flag x%x\n", 1437 phba->defer_flogi_acc_rx_id, 1438 phba->defer_flogi_acc_ox_id, phba->hba_flag); 1439 1440 /* Send deferred FLOGI ACC */ 1441 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc, 1442 ndlp, NULL); 1443 1444 phba->defer_flogi_acc_flag = false; 1445 vport->fc_myDID = did; 1446 1447 /* Decrement ndlp reference count to indicate the node can be 1448 * released when other references are removed. 1449 */ 1450 lpfc_nlp_put(ndlp); 1451 } 1452 1453 return 0; 1454 } 1455 1456 /** 1457 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs 1458 * @phba: pointer to lpfc hba data structure. 1459 * 1460 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs 1461 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq 1462 * list and issues an abort IOCB commond on each outstanding IOCB that 1463 * contains a active Fabric_DID ndlp. Note that this function is to issue 1464 * the abort IOCB command on all the outstanding IOCBs, thus when this 1465 * function returns, it does not guarantee all the IOCBs are actually aborted. 1466 * 1467 * Return code 1468 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0) 1469 **/ 1470 int 1471 lpfc_els_abort_flogi(struct lpfc_hba *phba) 1472 { 1473 struct lpfc_sli_ring *pring; 1474 struct lpfc_iocbq *iocb, *next_iocb; 1475 struct lpfc_nodelist *ndlp; 1476 u32 ulp_command; 1477 1478 /* Abort outstanding I/O on NPort <nlp_DID> */ 1479 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1480 "0201 Abort outstanding I/O on NPort x%x\n", 1481 Fabric_DID); 1482 1483 pring = lpfc_phba_elsring(phba); 1484 if (unlikely(!pring)) 1485 return -EIO; 1486 1487 /* 1488 * Check the txcmplq for an iocb that matches the nport the driver is 1489 * searching for. 1490 */ 1491 spin_lock_irq(&phba->hbalock); 1492 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 1493 ulp_command = get_job_cmnd(phba, iocb); 1494 if (ulp_command == CMD_ELS_REQUEST64_CR) { 1495 ndlp = iocb->ndlp; 1496 if (ndlp && ndlp->nlp_DID == Fabric_DID) { 1497 if ((phba->pport->fc_flag & FC_PT2PT) && 1498 !(phba->pport->fc_flag & FC_PT2PT_PLOGI)) 1499 iocb->fabric_cmd_cmpl = 1500 lpfc_ignore_els_cmpl; 1501 lpfc_sli_issue_abort_iotag(phba, pring, iocb, 1502 NULL); 1503 } 1504 } 1505 } 1506 /* Make sure HBA is alive */ 1507 lpfc_issue_hb_tmo(phba); 1508 1509 spin_unlock_irq(&phba->hbalock); 1510 1511 return 0; 1512 } 1513 1514 /** 1515 * lpfc_initial_flogi - Issue an initial fabric login for a vport 1516 * @vport: pointer to a host virtual N_Port data structure. 1517 * 1518 * This routine issues an initial Fabric Login (FLOGI) for the @vport 1519 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1520 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1521 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1522 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine 1523 * is then invoked with the @vport and the ndlp to perform the FLOGI for the 1524 * @vport. 1525 * 1526 * Return code 1527 * 0 - failed to issue initial flogi for @vport 1528 * 1 - successfully issued initial flogi for @vport 1529 **/ 1530 int 1531 lpfc_initial_flogi(struct lpfc_vport *vport) 1532 { 1533 struct lpfc_nodelist *ndlp; 1534 1535 vport->port_state = LPFC_FLOGI; 1536 lpfc_set_disctmo(vport); 1537 1538 /* First look for the Fabric ndlp */ 1539 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1540 if (!ndlp) { 1541 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1542 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1543 if (!ndlp) 1544 return 0; 1545 /* Set the node type */ 1546 ndlp->nlp_type |= NLP_FABRIC; 1547 1548 /* Put ndlp onto node list */ 1549 lpfc_enqueue_node(vport, ndlp); 1550 } 1551 1552 /* Reset the Fabric flag, topology change may have happened */ 1553 vport->fc_flag &= ~FC_FABRIC; 1554 if (lpfc_issue_els_flogi(vport, ndlp, 0)) { 1555 /* A node reference should be retained while registered with a 1556 * transport or dev-loss-evt work is pending. 1557 * Otherwise, decrement node reference to trigger release. 1558 */ 1559 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 1560 !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 1561 lpfc_nlp_put(ndlp); 1562 return 0; 1563 } 1564 return 1; 1565 } 1566 1567 /** 1568 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport 1569 * @vport: pointer to a host virtual N_Port data structure. 1570 * 1571 * This routine issues an initial Fabric Discover (FDISC) for the @vport 1572 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1573 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1574 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1575 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine 1576 * is then invoked with the @vport and the ndlp to perform the FDISC for the 1577 * @vport. 1578 * 1579 * Return code 1580 * 0 - failed to issue initial fdisc for @vport 1581 * 1 - successfully issued initial fdisc for @vport 1582 **/ 1583 int 1584 lpfc_initial_fdisc(struct lpfc_vport *vport) 1585 { 1586 struct lpfc_nodelist *ndlp; 1587 1588 /* First look for the Fabric ndlp */ 1589 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1590 if (!ndlp) { 1591 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1592 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1593 if (!ndlp) 1594 return 0; 1595 1596 /* NPIV is only supported in Fabrics. */ 1597 ndlp->nlp_type |= NLP_FABRIC; 1598 1599 /* Put ndlp onto node list */ 1600 lpfc_enqueue_node(vport, ndlp); 1601 } 1602 1603 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { 1604 /* A node reference should be retained while registered with a 1605 * transport or dev-loss-evt work is pending. 1606 * Otherwise, decrement node reference to trigger release. 1607 */ 1608 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 1609 !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 1610 lpfc_nlp_put(ndlp); 1611 return 0; 1612 } 1613 return 1; 1614 } 1615 1616 /** 1617 * lpfc_more_plogi - Check and issue remaining plogis for a vport 1618 * @vport: pointer to a host virtual N_Port data structure. 1619 * 1620 * This routine checks whether there are more remaining Port Logins 1621 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine 1622 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes 1623 * to issue ELS PLOGIs up to the configured discover threads with the 1624 * @vport (@vport->cfg_discovery_threads). The function also decrement 1625 * the @vport's num_disc_node by 1 if it is not already 0. 1626 **/ 1627 void 1628 lpfc_more_plogi(struct lpfc_vport *vport) 1629 { 1630 if (vport->num_disc_nodes) 1631 vport->num_disc_nodes--; 1632 1633 /* Continue discovery with <num_disc_nodes> PLOGIs to go */ 1634 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1635 "0232 Continue discovery with %d PLOGIs to go " 1636 "Data: x%x x%x x%x\n", 1637 vport->num_disc_nodes, vport->fc_plogi_cnt, 1638 vport->fc_flag, vport->port_state); 1639 /* Check to see if there are more PLOGIs to be sent */ 1640 if (vport->fc_flag & FC_NLP_MORE) 1641 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 1642 lpfc_els_disc_plogi(vport); 1643 1644 return; 1645 } 1646 1647 /** 1648 * lpfc_plogi_confirm_nport - Confirm plogi wwpn matches stored ndlp 1649 * @phba: pointer to lpfc hba data structure. 1650 * @prsp: pointer to response IOCB payload. 1651 * @ndlp: pointer to a node-list data structure. 1652 * 1653 * This routine checks and indicates whether the WWPN of an N_Port, retrieved 1654 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt. 1655 * The following cases are considered N_Port confirmed: 1656 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches 1657 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but 1658 * it does not have WWPN assigned either. If the WWPN is confirmed, the 1659 * pointer to the @ndlp will be returned. If the WWPN is not confirmed: 1660 * 1) if there is a node on vport list other than the @ndlp with the same 1661 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked 1662 * on that node to release the RPI associated with the node; 2) if there is 1663 * no node found on vport list with the same WWPN of the N_Port PLOGI logged 1664 * into, a new node shall be allocated (or activated). In either case, the 1665 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall 1666 * be released and the new_ndlp shall be put on to the vport node list and 1667 * its pointer returned as the confirmed node. 1668 * 1669 * Note that before the @ndlp got "released", the keepDID from not-matching 1670 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID 1671 * of the @ndlp. This is because the release of @ndlp is actually to put it 1672 * into an inactive state on the vport node list and the vport node list 1673 * management algorithm does not allow two node with a same DID. 1674 * 1675 * Return code 1676 * pointer to the PLOGI N_Port @ndlp 1677 **/ 1678 static struct lpfc_nodelist * 1679 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, 1680 struct lpfc_nodelist *ndlp) 1681 { 1682 struct lpfc_vport *vport = ndlp->vport; 1683 struct lpfc_nodelist *new_ndlp; 1684 struct serv_parm *sp; 1685 uint8_t name[sizeof(struct lpfc_name)]; 1686 uint32_t keepDID = 0, keep_nlp_flag = 0; 1687 uint32_t keep_new_nlp_flag = 0; 1688 uint16_t keep_nlp_state; 1689 u32 keep_nlp_fc4_type = 0; 1690 struct lpfc_nvme_rport *keep_nrport = NULL; 1691 unsigned long *active_rrqs_xri_bitmap = NULL; 1692 1693 /* Fabric nodes can have the same WWPN so we don't bother searching 1694 * by WWPN. Just return the ndlp that was given to us. 1695 */ 1696 if (ndlp->nlp_type & NLP_FABRIC) 1697 return ndlp; 1698 1699 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t)); 1700 memset(name, 0, sizeof(struct lpfc_name)); 1701 1702 /* Now we find out if the NPort we are logging into, matches the WWPN 1703 * we have for that ndlp. If not, we have some work to do. 1704 */ 1705 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); 1706 1707 /* return immediately if the WWPN matches ndlp */ 1708 if (!new_ndlp || (new_ndlp == ndlp)) 1709 return ndlp; 1710 1711 /* 1712 * Unregister from backend if not done yet. Could have been skipped 1713 * due to ADISC 1714 */ 1715 lpfc_nlp_unreg_node(vport, new_ndlp); 1716 1717 if (phba->sli_rev == LPFC_SLI_REV4) { 1718 active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, 1719 GFP_KERNEL); 1720 if (active_rrqs_xri_bitmap) 1721 memset(active_rrqs_xri_bitmap, 0, 1722 phba->cfg_rrq_xri_bitmap_sz); 1723 } 1724 1725 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1726 "3178 PLOGI confirm: ndlp x%x x%x x%x: " 1727 "new_ndlp x%x x%x x%x\n", 1728 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type, 1729 (new_ndlp ? new_ndlp->nlp_DID : 0), 1730 (new_ndlp ? new_ndlp->nlp_flag : 0), 1731 (new_ndlp ? new_ndlp->nlp_fc4_type : 0)); 1732 1733 keepDID = new_ndlp->nlp_DID; 1734 1735 if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap) 1736 memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap, 1737 phba->cfg_rrq_xri_bitmap_sz); 1738 1739 /* At this point in this routine, we know new_ndlp will be 1740 * returned. however, any previous GID_FTs that were done 1741 * would have updated nlp_fc4_type in ndlp, so we must ensure 1742 * new_ndlp has the right value. 1743 */ 1744 if (vport->fc_flag & FC_FABRIC) { 1745 keep_nlp_fc4_type = new_ndlp->nlp_fc4_type; 1746 new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type; 1747 } 1748 1749 lpfc_unreg_rpi(vport, new_ndlp); 1750 new_ndlp->nlp_DID = ndlp->nlp_DID; 1751 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 1752 if (phba->sli_rev == LPFC_SLI_REV4) 1753 memcpy(new_ndlp->active_rrqs_xri_bitmap, 1754 ndlp->active_rrqs_xri_bitmap, 1755 phba->cfg_rrq_xri_bitmap_sz); 1756 1757 /* Lock both ndlps */ 1758 spin_lock_irq(&ndlp->lock); 1759 spin_lock_irq(&new_ndlp->lock); 1760 keep_new_nlp_flag = new_ndlp->nlp_flag; 1761 keep_nlp_flag = ndlp->nlp_flag; 1762 new_ndlp->nlp_flag = ndlp->nlp_flag; 1763 1764 /* if new_ndlp had NLP_UNREG_INP set, keep it */ 1765 if (keep_new_nlp_flag & NLP_UNREG_INP) 1766 new_ndlp->nlp_flag |= NLP_UNREG_INP; 1767 else 1768 new_ndlp->nlp_flag &= ~NLP_UNREG_INP; 1769 1770 /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */ 1771 if (keep_new_nlp_flag & NLP_RPI_REGISTERED) 1772 new_ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1773 else 1774 new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1775 1776 /* 1777 * Retain the DROPPED flag. This will take care of the init 1778 * refcount when affecting the state change 1779 */ 1780 if (keep_new_nlp_flag & NLP_DROPPED) 1781 new_ndlp->nlp_flag |= NLP_DROPPED; 1782 else 1783 new_ndlp->nlp_flag &= ~NLP_DROPPED; 1784 1785 ndlp->nlp_flag = keep_new_nlp_flag; 1786 1787 /* if ndlp had NLP_UNREG_INP set, keep it */ 1788 if (keep_nlp_flag & NLP_UNREG_INP) 1789 ndlp->nlp_flag |= NLP_UNREG_INP; 1790 else 1791 ndlp->nlp_flag &= ~NLP_UNREG_INP; 1792 1793 /* if ndlp had NLP_RPI_REGISTERED set, keep it */ 1794 if (keep_nlp_flag & NLP_RPI_REGISTERED) 1795 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1796 else 1797 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1798 1799 /* 1800 * Retain the DROPPED flag. This will take care of the init 1801 * refcount when affecting the state change 1802 */ 1803 if (keep_nlp_flag & NLP_DROPPED) 1804 ndlp->nlp_flag |= NLP_DROPPED; 1805 else 1806 ndlp->nlp_flag &= ~NLP_DROPPED; 1807 1808 spin_unlock_irq(&new_ndlp->lock); 1809 spin_unlock_irq(&ndlp->lock); 1810 1811 /* Set nlp_states accordingly */ 1812 keep_nlp_state = new_ndlp->nlp_state; 1813 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); 1814 1815 /* interchange the nvme remoteport structs */ 1816 keep_nrport = new_ndlp->nrport; 1817 new_ndlp->nrport = ndlp->nrport; 1818 1819 /* Move this back to NPR state */ 1820 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { 1821 /* The ndlp doesn't have a portname yet, but does have an 1822 * NPort ID. The new_ndlp portname matches the Rport's 1823 * portname. Reinstantiate the new_ndlp and reset the ndlp. 1824 */ 1825 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1826 "3179 PLOGI confirm NEW: %x %x\n", 1827 new_ndlp->nlp_DID, keepDID); 1828 1829 /* Two ndlps cannot have the same did on the nodelist. 1830 * The KeepDID and keep_nlp_fc4_type need to be swapped 1831 * because ndlp is inflight with no WWPN. 1832 */ 1833 ndlp->nlp_DID = keepDID; 1834 ndlp->nlp_fc4_type = keep_nlp_fc4_type; 1835 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1836 if (phba->sli_rev == LPFC_SLI_REV4 && 1837 active_rrqs_xri_bitmap) 1838 memcpy(ndlp->active_rrqs_xri_bitmap, 1839 active_rrqs_xri_bitmap, 1840 phba->cfg_rrq_xri_bitmap_sz); 1841 1842 } else { 1843 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1844 "3180 PLOGI confirm SWAP: %x %x\n", 1845 new_ndlp->nlp_DID, keepDID); 1846 1847 lpfc_unreg_rpi(vport, ndlp); 1848 1849 /* The ndlp and new_ndlp both have WWPNs but are swapping 1850 * NPort Ids and attributes. 1851 */ 1852 ndlp->nlp_DID = keepDID; 1853 ndlp->nlp_fc4_type = keep_nlp_fc4_type; 1854 1855 if (phba->sli_rev == LPFC_SLI_REV4 && 1856 active_rrqs_xri_bitmap) 1857 memcpy(ndlp->active_rrqs_xri_bitmap, 1858 active_rrqs_xri_bitmap, 1859 phba->cfg_rrq_xri_bitmap_sz); 1860 1861 /* Since we are switching over to the new_ndlp, 1862 * reset the old ndlp state 1863 */ 1864 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 1865 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 1866 keep_nlp_state = NLP_STE_NPR_NODE; 1867 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1868 ndlp->nrport = keep_nrport; 1869 } 1870 1871 /* 1872 * If ndlp is not associated with any rport we can drop it here else 1873 * let dev_loss_tmo_callbk trigger DEVICE_RM event 1874 */ 1875 if (!ndlp->rport && (ndlp->nlp_state == NLP_STE_NPR_NODE)) 1876 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 1877 1878 if (phba->sli_rev == LPFC_SLI_REV4 && 1879 active_rrqs_xri_bitmap) 1880 mempool_free(active_rrqs_xri_bitmap, 1881 phba->active_rrq_pool); 1882 1883 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1884 "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n", 1885 new_ndlp->nlp_DID, new_ndlp->nlp_flag, 1886 new_ndlp->nlp_fc4_type); 1887 1888 return new_ndlp; 1889 } 1890 1891 /** 1892 * lpfc_end_rscn - Check and handle more rscn for a vport 1893 * @vport: pointer to a host virtual N_Port data structure. 1894 * 1895 * This routine checks whether more Registration State Change 1896 * Notifications (RSCNs) came in while the discovery state machine was in 1897 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be 1898 * invoked to handle the additional RSCNs for the @vport. Otherwise, the 1899 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of 1900 * handling the RSCNs. 1901 **/ 1902 void 1903 lpfc_end_rscn(struct lpfc_vport *vport) 1904 { 1905 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1906 1907 if (vport->fc_flag & FC_RSCN_MODE) { 1908 /* 1909 * Check to see if more RSCNs came in while we were 1910 * processing this one. 1911 */ 1912 if (vport->fc_rscn_id_cnt || 1913 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0) 1914 lpfc_els_handle_rscn(vport); 1915 else { 1916 spin_lock_irq(shost->host_lock); 1917 vport->fc_flag &= ~FC_RSCN_MODE; 1918 spin_unlock_irq(shost->host_lock); 1919 } 1920 } 1921 } 1922 1923 /** 1924 * lpfc_cmpl_els_rrq - Completion handled for els RRQs. 1925 * @phba: pointer to lpfc hba data structure. 1926 * @cmdiocb: pointer to lpfc command iocb data structure. 1927 * @rspiocb: pointer to lpfc response iocb data structure. 1928 * 1929 * This routine will call the clear rrq function to free the rrq and 1930 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not 1931 * exist then the clear_rrq is still called because the rrq needs to 1932 * be freed. 1933 **/ 1934 1935 static void 1936 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1937 struct lpfc_iocbq *rspiocb) 1938 { 1939 struct lpfc_vport *vport = cmdiocb->vport; 1940 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 1941 struct lpfc_node_rrq *rrq; 1942 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 1943 u32 ulp_word4 = get_job_word4(phba, rspiocb); 1944 1945 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1946 rrq = cmdiocb->context_un.rrq; 1947 cmdiocb->rsp_iocb = rspiocb; 1948 1949 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1950 "RRQ cmpl: status:x%x/x%x did:x%x", 1951 ulp_status, ulp_word4, 1952 get_job_els_rsp64_did(phba, cmdiocb)); 1953 1954 1955 /* rrq completes to NPort <nlp_DID> */ 1956 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1957 "2880 RRQ completes to DID x%x " 1958 "Data: x%x x%x x%x x%x x%x\n", 1959 ndlp->nlp_DID, ulp_status, ulp_word4, 1960 get_wqe_tmo(cmdiocb), rrq->xritag, rrq->rxid); 1961 1962 if (ulp_status) { 1963 /* Check for retry */ 1964 /* RRQ failed Don't print the vport to vport rjts */ 1965 if (ulp_status != IOSTAT_LS_RJT || 1966 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && 1967 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || 1968 (phba)->pport->cfg_log_verbose & LOG_ELS) 1969 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1970 "2881 RRQ failure DID:%06X Status:" 1971 "x%x/x%x\n", 1972 ndlp->nlp_DID, ulp_status, 1973 ulp_word4); 1974 } 1975 1976 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1977 lpfc_els_free_iocb(phba, cmdiocb); 1978 lpfc_nlp_put(ndlp); 1979 return; 1980 } 1981 /** 1982 * lpfc_cmpl_els_plogi - Completion callback function for plogi 1983 * @phba: pointer to lpfc hba data structure. 1984 * @cmdiocb: pointer to lpfc command iocb data structure. 1985 * @rspiocb: pointer to lpfc response iocb data structure. 1986 * 1987 * This routine is the completion callback function for issuing the Port 1988 * Login (PLOGI) command. For PLOGI completion, there must be an active 1989 * ndlp on the vport node list that matches the remote node ID from the 1990 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply 1991 * ignored and command IOCB released. The PLOGI response IOCB status is 1992 * checked for error conditions. If there is error status reported, PLOGI 1993 * retry shall be attempted by invoking the lpfc_els_retry() routine. 1994 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on 1995 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine 1996 * (DSM) is set for this PLOGI completion. Finally, it checks whether 1997 * there are additional N_Port nodes with the vport that need to perform 1998 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition 1999 * PLOGIs. 2000 **/ 2001 static void 2002 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2003 struct lpfc_iocbq *rspiocb) 2004 { 2005 struct lpfc_vport *vport = cmdiocb->vport; 2006 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2007 IOCB_t *irsp; 2008 struct lpfc_nodelist *ndlp, *free_ndlp; 2009 struct lpfc_dmabuf *prsp; 2010 int disc; 2011 struct serv_parm *sp = NULL; 2012 u32 ulp_status, ulp_word4, did, iotag; 2013 bool release_node = false; 2014 2015 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2016 cmdiocb->rsp_iocb = rspiocb; 2017 2018 ulp_status = get_job_ulpstatus(phba, rspiocb); 2019 ulp_word4 = get_job_word4(phba, rspiocb); 2020 did = get_job_els_rsp64_did(phba, cmdiocb); 2021 2022 if (phba->sli_rev == LPFC_SLI_REV4) { 2023 iotag = get_wqe_reqtag(cmdiocb); 2024 } else { 2025 irsp = &rspiocb->iocb; 2026 iotag = irsp->ulpIoTag; 2027 } 2028 2029 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2030 "PLOGI cmpl: status:x%x/x%x did:x%x", 2031 ulp_status, ulp_word4, did); 2032 2033 ndlp = lpfc_findnode_did(vport, did); 2034 if (!ndlp) { 2035 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2036 "0136 PLOGI completes to NPort x%x " 2037 "with no ndlp. Data: x%x x%x x%x\n", 2038 did, ulp_status, ulp_word4, iotag); 2039 goto out_freeiocb; 2040 } 2041 2042 /* Since ndlp can be freed in the disc state machine, note if this node 2043 * is being used during discovery. 2044 */ 2045 spin_lock_irq(&ndlp->lock); 2046 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2047 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2048 spin_unlock_irq(&ndlp->lock); 2049 2050 /* PLOGI completes to NPort <nlp_DID> */ 2051 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2052 "0102 PLOGI completes to NPort x%06x " 2053 "Data: x%x x%x x%x x%x x%x\n", 2054 ndlp->nlp_DID, ndlp->nlp_fc4_type, 2055 ulp_status, ulp_word4, 2056 disc, vport->num_disc_nodes); 2057 2058 /* Check to see if link went down during discovery */ 2059 if (lpfc_els_chk_latt(vport)) { 2060 spin_lock_irq(&ndlp->lock); 2061 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2062 spin_unlock_irq(&ndlp->lock); 2063 goto out; 2064 } 2065 2066 if (ulp_status) { 2067 /* Check for retry */ 2068 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2069 /* ELS command is being retried */ 2070 if (disc) { 2071 spin_lock_irq(&ndlp->lock); 2072 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2073 spin_unlock_irq(&ndlp->lock); 2074 } 2075 goto out; 2076 } 2077 /* PLOGI failed Don't print the vport to vport rjts */ 2078 if (ulp_status != IOSTAT_LS_RJT || 2079 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && 2080 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || 2081 (phba)->pport->cfg_log_verbose & LOG_ELS) 2082 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2083 "2753 PLOGI failure DID:%06X " 2084 "Status:x%x/x%x\n", 2085 ndlp->nlp_DID, ulp_status, 2086 ulp_word4); 2087 2088 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2089 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) 2090 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2091 NLP_EVT_CMPL_PLOGI); 2092 2093 /* If a PLOGI collision occurred, the node needs to continue 2094 * with the reglogin process. 2095 */ 2096 spin_lock_irq(&ndlp->lock); 2097 if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) && 2098 ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { 2099 spin_unlock_irq(&ndlp->lock); 2100 goto out; 2101 } 2102 2103 /* No PLOGI collision and the node is not registered with the 2104 * scsi or nvme transport. It is no longer an active node. Just 2105 * start the device remove process. 2106 */ 2107 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2108 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2109 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 2110 release_node = true; 2111 } 2112 spin_unlock_irq(&ndlp->lock); 2113 2114 if (release_node) 2115 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2116 NLP_EVT_DEVICE_RM); 2117 } else { 2118 /* Good status, call state machine */ 2119 prsp = list_entry(cmdiocb->cmd_dmabuf->list.next, 2120 struct lpfc_dmabuf, list); 2121 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); 2122 2123 sp = (struct serv_parm *)((u8 *)prsp->virt + 2124 sizeof(u32)); 2125 2126 ndlp->vmid_support = 0; 2127 if ((phba->cfg_vmid_app_header && sp->cmn.app_hdr_support) || 2128 (phba->cfg_vmid_priority_tagging && 2129 sp->cmn.priority_tagging)) { 2130 lpfc_printf_log(phba, KERN_DEBUG, LOG_ELS, 2131 "4018 app_hdr_support %d tagging %d DID x%x\n", 2132 sp->cmn.app_hdr_support, 2133 sp->cmn.priority_tagging, 2134 ndlp->nlp_DID); 2135 /* if the dest port supports VMID, mark it in ndlp */ 2136 ndlp->vmid_support = 1; 2137 } 2138 2139 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2140 NLP_EVT_CMPL_PLOGI); 2141 } 2142 2143 if (disc && vport->num_disc_nodes) { 2144 /* Check to see if there are more PLOGIs to be sent */ 2145 lpfc_more_plogi(vport); 2146 2147 if (vport->num_disc_nodes == 0) { 2148 spin_lock_irq(shost->host_lock); 2149 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2150 spin_unlock_irq(shost->host_lock); 2151 2152 lpfc_can_disctmo(vport); 2153 lpfc_end_rscn(vport); 2154 } 2155 } 2156 2157 out: 2158 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 2159 "PLOGI Cmpl PUT: did:x%x refcnt %d", 2160 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2161 2162 out_freeiocb: 2163 /* Release the reference on the original I/O request. */ 2164 free_ndlp = cmdiocb->ndlp; 2165 2166 lpfc_els_free_iocb(phba, cmdiocb); 2167 lpfc_nlp_put(free_ndlp); 2168 return; 2169 } 2170 2171 /** 2172 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport 2173 * @vport: pointer to a host virtual N_Port data structure. 2174 * @did: destination port identifier. 2175 * @retry: number of retries to the command IOCB. 2176 * 2177 * This routine issues a Port Login (PLOGI) command to a remote N_Port 2178 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port, 2179 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list. 2180 * This routine constructs the proper fields of the PLOGI IOCB and invokes 2181 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command. 2182 * 2183 * Note that the ndlp reference count will be incremented by 1 for holding 2184 * the ndlp and the reference to ndlp will be stored into the ndlp field 2185 * of the IOCB for the completion callback function to the PLOGI ELS command. 2186 * 2187 * Return code 2188 * 0 - Successfully issued a plogi for @vport 2189 * 1 - failed to issue a plogi for @vport 2190 **/ 2191 int 2192 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) 2193 { 2194 struct lpfc_hba *phba = vport->phba; 2195 struct serv_parm *sp; 2196 struct lpfc_nodelist *ndlp; 2197 struct lpfc_iocbq *elsiocb; 2198 uint8_t *pcmd; 2199 uint16_t cmdsize; 2200 int ret; 2201 2202 ndlp = lpfc_findnode_did(vport, did); 2203 if (!ndlp) 2204 return 1; 2205 2206 /* Defer the processing of the issue PLOGI until after the 2207 * outstanding UNREG_RPI mbox command completes, unless we 2208 * are going offline. This logic does not apply for Fabric DIDs 2209 */ 2210 if ((ndlp->nlp_flag & (NLP_IGNR_REG_CMPL | NLP_UNREG_INP)) && 2211 ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && 2212 !(vport->fc_flag & FC_OFFLINE_MODE)) { 2213 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2214 "4110 Issue PLOGI x%x deferred " 2215 "on NPort x%x rpi x%x flg x%x Data:" 2216 " x%px\n", 2217 ndlp->nlp_defer_did, ndlp->nlp_DID, 2218 ndlp->nlp_rpi, ndlp->nlp_flag, ndlp); 2219 2220 /* We can only defer 1st PLOGI */ 2221 if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING) 2222 ndlp->nlp_defer_did = did; 2223 return 0; 2224 } 2225 2226 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 2227 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 2228 ELS_CMD_PLOGI); 2229 if (!elsiocb) 2230 return 1; 2231 2232 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 2233 2234 /* For PLOGI request, remainder of payload is service parameters */ 2235 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; 2236 pcmd += sizeof(uint32_t); 2237 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 2238 sp = (struct serv_parm *) pcmd; 2239 2240 /* 2241 * If we are a N-port connected to a Fabric, fix-up paramm's so logins 2242 * to device on remote loops work. 2243 */ 2244 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP)) 2245 sp->cmn.altBbCredit = 1; 2246 2247 if (sp->cmn.fcphLow < FC_PH_4_3) 2248 sp->cmn.fcphLow = FC_PH_4_3; 2249 2250 if (sp->cmn.fcphHigh < FC_PH3) 2251 sp->cmn.fcphHigh = FC_PH3; 2252 2253 sp->cmn.valid_vendor_ver_level = 0; 2254 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 2255 sp->cmn.bbRcvSizeMsb &= 0xF; 2256 2257 /* Check if the destination port supports VMID */ 2258 ndlp->vmid_support = 0; 2259 if (vport->vmid_priority_tagging) 2260 sp->cmn.priority_tagging = 1; 2261 else if (phba->cfg_vmid_app_header && 2262 bf_get(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags)) 2263 sp->cmn.app_hdr_support = 1; 2264 2265 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2266 "Issue PLOGI: did:x%x", 2267 did, 0, 0); 2268 2269 /* If our firmware supports this feature, convey that 2270 * information to the target using the vendor specific field. 2271 */ 2272 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 2273 sp->cmn.valid_vendor_ver_level = 1; 2274 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 2275 sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 2276 } 2277 2278 phba->fc_stat.elsXmitPLOGI++; 2279 elsiocb->cmd_cmpl = lpfc_cmpl_els_plogi; 2280 2281 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2282 "Issue PLOGI: did:x%x refcnt %d", 2283 did, kref_read(&ndlp->kref), 0); 2284 elsiocb->ndlp = lpfc_nlp_get(ndlp); 2285 if (!elsiocb->ndlp) { 2286 lpfc_els_free_iocb(phba, elsiocb); 2287 return 1; 2288 } 2289 2290 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2291 if (ret) { 2292 lpfc_els_free_iocb(phba, elsiocb); 2293 lpfc_nlp_put(ndlp); 2294 return 1; 2295 } 2296 2297 return 0; 2298 } 2299 2300 /** 2301 * lpfc_cmpl_els_prli - Completion callback function for prli 2302 * @phba: pointer to lpfc hba data structure. 2303 * @cmdiocb: pointer to lpfc command iocb data structure. 2304 * @rspiocb: pointer to lpfc response iocb data structure. 2305 * 2306 * This routine is the completion callback function for a Process Login 2307 * (PRLI) ELS command. The PRLI response IOCB status is checked for error 2308 * status. If there is error status reported, PRLI retry shall be attempted 2309 * by invoking the lpfc_els_retry() routine. Otherwise, the state 2310 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this 2311 * ndlp to mark the PRLI completion. 2312 **/ 2313 static void 2314 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2315 struct lpfc_iocbq *rspiocb) 2316 { 2317 struct lpfc_vport *vport = cmdiocb->vport; 2318 struct lpfc_nodelist *ndlp; 2319 char *mode; 2320 u32 loglevel; 2321 u32 ulp_status; 2322 u32 ulp_word4; 2323 bool release_node = false; 2324 2325 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2326 cmdiocb->rsp_iocb = rspiocb; 2327 2328 ndlp = cmdiocb->ndlp; 2329 2330 ulp_status = get_job_ulpstatus(phba, rspiocb); 2331 ulp_word4 = get_job_word4(phba, rspiocb); 2332 2333 spin_lock_irq(&ndlp->lock); 2334 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2335 2336 /* Driver supports multiple FC4 types. Counters matter. */ 2337 vport->fc_prli_sent--; 2338 ndlp->fc4_prli_sent--; 2339 spin_unlock_irq(&ndlp->lock); 2340 2341 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2342 "PRLI cmpl: status:x%x/x%x did:x%x", 2343 ulp_status, ulp_word4, 2344 ndlp->nlp_DID); 2345 2346 /* PRLI completes to NPort <nlp_DID> */ 2347 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2348 "0103 PRLI completes to NPort x%06x " 2349 "Data: x%x x%x x%x x%x\n", 2350 ndlp->nlp_DID, ulp_status, ulp_word4, 2351 vport->num_disc_nodes, ndlp->fc4_prli_sent); 2352 2353 /* Check to see if link went down during discovery */ 2354 if (lpfc_els_chk_latt(vport)) 2355 goto out; 2356 2357 if (ulp_status) { 2358 /* Check for retry */ 2359 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2360 /* ELS command is being retried */ 2361 goto out; 2362 } 2363 2364 /* If we don't send GFT_ID to Fabric, a PRLI error 2365 * could be expected. 2366 */ 2367 if ((vport->fc_flag & FC_FABRIC) || 2368 (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) { 2369 mode = KERN_ERR; 2370 loglevel = LOG_TRACE_EVENT; 2371 } else { 2372 mode = KERN_INFO; 2373 loglevel = LOG_ELS; 2374 } 2375 2376 /* PRLI failed */ 2377 lpfc_printf_vlog(vport, mode, loglevel, 2378 "2754 PRLI failure DID:%06X Status:x%x/x%x, " 2379 "data: x%x x%x x%x\n", 2380 ndlp->nlp_DID, ulp_status, 2381 ulp_word4, ndlp->nlp_state, 2382 ndlp->fc4_prli_sent, ndlp->nlp_flag); 2383 2384 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2385 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) 2386 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2387 NLP_EVT_CMPL_PRLI); 2388 2389 /* The following condition catches an inflight transition 2390 * mismatch typically caused by an RSCN. Skip any 2391 * processing to allow recovery. 2392 */ 2393 if ((ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && 2394 ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) || 2395 (ndlp->nlp_state == NLP_STE_NPR_NODE && 2396 ndlp->nlp_flag & NLP_DELAY_TMO)) { 2397 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, 2398 "2784 PRLI cmpl: Allow Node recovery " 2399 "DID x%06x nstate x%x nflag x%x\n", 2400 ndlp->nlp_DID, ndlp->nlp_state, 2401 ndlp->nlp_flag); 2402 goto out; 2403 } 2404 2405 /* 2406 * For P2P topology, retain the node so that PLOGI can be 2407 * attempted on it again. 2408 */ 2409 if (vport->fc_flag & FC_PT2PT) 2410 goto out; 2411 2412 /* As long as this node is not registered with the SCSI 2413 * or NVMe transport and no other PRLIs are outstanding, 2414 * it is no longer an active node. Otherwise devloss 2415 * handles the final cleanup. 2416 */ 2417 spin_lock_irq(&ndlp->lock); 2418 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 2419 !ndlp->fc4_prli_sent) { 2420 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2421 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 2422 release_node = true; 2423 } 2424 spin_unlock_irq(&ndlp->lock); 2425 2426 if (release_node) 2427 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2428 NLP_EVT_DEVICE_RM); 2429 } else { 2430 /* Good status, call state machine. However, if another 2431 * PRLI is outstanding, don't call the state machine 2432 * because final disposition to Mapped or Unmapped is 2433 * completed there. 2434 */ 2435 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2436 NLP_EVT_CMPL_PRLI); 2437 } 2438 2439 out: 2440 lpfc_els_free_iocb(phba, cmdiocb); 2441 lpfc_nlp_put(ndlp); 2442 return; 2443 } 2444 2445 /** 2446 * lpfc_issue_els_prli - Issue a prli iocb command for a vport 2447 * @vport: pointer to a host virtual N_Port data structure. 2448 * @ndlp: pointer to a node-list data structure. 2449 * @retry: number of retries to the command IOCB. 2450 * 2451 * This routine issues a Process Login (PRLI) ELS command for the 2452 * @vport. The PRLI service parameters are set up in the payload of the 2453 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine 2454 * is put to the IOCB completion callback func field before invoking the 2455 * routine lpfc_sli_issue_iocb() to send out PRLI command. 2456 * 2457 * Note that the ndlp reference count will be incremented by 1 for holding the 2458 * ndlp and the reference to ndlp will be stored into the ndlp field of 2459 * the IOCB for the completion callback function to the PRLI ELS command. 2460 * 2461 * Return code 2462 * 0 - successfully issued prli iocb command for @vport 2463 * 1 - failed to issue prli iocb command for @vport 2464 **/ 2465 int 2466 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2467 uint8_t retry) 2468 { 2469 int rc = 0; 2470 struct lpfc_hba *phba = vport->phba; 2471 PRLI *npr; 2472 struct lpfc_nvme_prli *npr_nvme; 2473 struct lpfc_iocbq *elsiocb; 2474 uint8_t *pcmd; 2475 uint16_t cmdsize; 2476 u32 local_nlp_type, elscmd; 2477 2478 /* 2479 * If we are in RSCN mode, the FC4 types supported from a 2480 * previous GFT_ID command may not be accurate. So, if we 2481 * are a NVME Initiator, always look for the possibility of 2482 * the remote NPort beng a NVME Target. 2483 */ 2484 if (phba->sli_rev == LPFC_SLI_REV4 && 2485 vport->fc_flag & FC_RSCN_MODE && 2486 vport->nvmei_support) 2487 ndlp->nlp_fc4_type |= NLP_FC4_NVME; 2488 local_nlp_type = ndlp->nlp_fc4_type; 2489 2490 /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp 2491 * fields here before any of them can complete. 2492 */ 2493 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 2494 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); 2495 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 2496 ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC); 2497 ndlp->nvme_fb_size = 0; 2498 2499 send_next_prli: 2500 if (local_nlp_type & NLP_FC4_FCP) { 2501 /* Payload is 4 + 16 = 20 x14 bytes. */ 2502 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 2503 elscmd = ELS_CMD_PRLI; 2504 } else if (local_nlp_type & NLP_FC4_NVME) { 2505 /* Payload is 4 + 20 = 24 x18 bytes. */ 2506 cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli)); 2507 elscmd = ELS_CMD_NVMEPRLI; 2508 } else { 2509 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2510 "3083 Unknown FC_TYPE x%x ndlp x%06x\n", 2511 ndlp->nlp_fc4_type, ndlp->nlp_DID); 2512 return 1; 2513 } 2514 2515 /* SLI3 ports don't support NVME. If this rport is a strict NVME 2516 * FC4 type, implicitly LOGO. 2517 */ 2518 if (phba->sli_rev == LPFC_SLI_REV3 && 2519 ndlp->nlp_fc4_type == NLP_FC4_NVME) { 2520 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2521 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n", 2522 ndlp->nlp_type); 2523 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 2524 return 1; 2525 } 2526 2527 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2528 ndlp->nlp_DID, elscmd); 2529 if (!elsiocb) 2530 return 1; 2531 2532 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 2533 2534 /* For PRLI request, remainder of payload is service parameters */ 2535 memset(pcmd, 0, cmdsize); 2536 2537 if (local_nlp_type & NLP_FC4_FCP) { 2538 /* Remainder of payload is FCP PRLI parameter page. 2539 * Note: this data structure is defined as 2540 * BE/LE in the structure definition so no 2541 * byte swap call is made. 2542 */ 2543 *((uint32_t *)(pcmd)) = ELS_CMD_PRLI; 2544 pcmd += sizeof(uint32_t); 2545 npr = (PRLI *)pcmd; 2546 2547 /* 2548 * If our firmware version is 3.20 or later, 2549 * set the following bits for FC-TAPE support. 2550 */ 2551 if (phba->vpd.rev.feaLevelHigh >= 0x02) { 2552 npr->ConfmComplAllowed = 1; 2553 npr->Retry = 1; 2554 npr->TaskRetryIdReq = 1; 2555 } 2556 npr->estabImagePair = 1; 2557 npr->readXferRdyDis = 1; 2558 if (vport->cfg_first_burst_size) 2559 npr->writeXferRdyDis = 1; 2560 2561 /* For FCP support */ 2562 npr->prliType = PRLI_FCP_TYPE; 2563 npr->initiatorFunc = 1; 2564 elsiocb->cmd_flag |= LPFC_PRLI_FCP_REQ; 2565 2566 /* Remove FCP type - processed. */ 2567 local_nlp_type &= ~NLP_FC4_FCP; 2568 } else if (local_nlp_type & NLP_FC4_NVME) { 2569 /* Remainder of payload is NVME PRLI parameter page. 2570 * This data structure is the newer definition that 2571 * uses bf macros so a byte swap is required. 2572 */ 2573 *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI; 2574 pcmd += sizeof(uint32_t); 2575 npr_nvme = (struct lpfc_nvme_prli *)pcmd; 2576 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 2577 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 2578 if (phba->nsler) { 2579 bf_set(prli_nsler, npr_nvme, 1); 2580 bf_set(prli_conf, npr_nvme, 1); 2581 } 2582 2583 /* Only initiators request first burst. */ 2584 if ((phba->cfg_nvme_enable_fb) && 2585 !phba->nvmet_support) 2586 bf_set(prli_fba, npr_nvme, 1); 2587 2588 if (phba->nvmet_support) { 2589 bf_set(prli_tgt, npr_nvme, 1); 2590 bf_set(prli_disc, npr_nvme, 1); 2591 } else { 2592 bf_set(prli_init, npr_nvme, 1); 2593 bf_set(prli_conf, npr_nvme, 1); 2594 } 2595 2596 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 2597 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 2598 elsiocb->cmd_flag |= LPFC_PRLI_NVME_REQ; 2599 2600 /* Remove NVME type - processed. */ 2601 local_nlp_type &= ~NLP_FC4_NVME; 2602 } 2603 2604 phba->fc_stat.elsXmitPRLI++; 2605 elsiocb->cmd_cmpl = lpfc_cmpl_els_prli; 2606 2607 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2608 "Issue PRLI: did:x%x refcnt %d", 2609 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2610 elsiocb->ndlp = lpfc_nlp_get(ndlp); 2611 if (!elsiocb->ndlp) { 2612 lpfc_els_free_iocb(phba, elsiocb); 2613 return 1; 2614 } 2615 2616 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2617 if (rc == IOCB_ERROR) { 2618 lpfc_els_free_iocb(phba, elsiocb); 2619 lpfc_nlp_put(ndlp); 2620 return 1; 2621 } 2622 2623 /* The vport counters are used for lpfc_scan_finished, but 2624 * the ndlp is used to track outstanding PRLIs for different 2625 * FC4 types. 2626 */ 2627 spin_lock_irq(&ndlp->lock); 2628 ndlp->nlp_flag |= NLP_PRLI_SND; 2629 vport->fc_prli_sent++; 2630 ndlp->fc4_prli_sent++; 2631 spin_unlock_irq(&ndlp->lock); 2632 2633 /* The driver supports 2 FC4 types. Make sure 2634 * a PRLI is issued for all types before exiting. 2635 */ 2636 if (phba->sli_rev == LPFC_SLI_REV4 && 2637 local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME)) 2638 goto send_next_prli; 2639 else 2640 return 0; 2641 } 2642 2643 /** 2644 * lpfc_rscn_disc - Perform rscn discovery for a vport 2645 * @vport: pointer to a host virtual N_Port data structure. 2646 * 2647 * This routine performs Registration State Change Notification (RSCN) 2648 * discovery for a @vport. If the @vport's node port recovery count is not 2649 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all 2650 * the nodes that need recovery. If none of the PLOGI were needed through 2651 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be 2652 * invoked to check and handle possible more RSCN came in during the period 2653 * of processing the current ones. 2654 **/ 2655 static void 2656 lpfc_rscn_disc(struct lpfc_vport *vport) 2657 { 2658 lpfc_can_disctmo(vport); 2659 2660 /* RSCN discovery */ 2661 /* go thru NPR nodes and issue ELS PLOGIs */ 2662 if (vport->fc_npr_cnt) 2663 if (lpfc_els_disc_plogi(vport)) 2664 return; 2665 2666 lpfc_end_rscn(vport); 2667 } 2668 2669 /** 2670 * lpfc_adisc_done - Complete the adisc phase of discovery 2671 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs. 2672 * 2673 * This function is called when the final ADISC is completed during discovery. 2674 * This function handles clearing link attention or issuing reg_vpi depending 2675 * on whether npiv is enabled. This function also kicks off the PLOGI phase of 2676 * discovery. 2677 * This function is called with no locks held. 2678 **/ 2679 static void 2680 lpfc_adisc_done(struct lpfc_vport *vport) 2681 { 2682 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2683 struct lpfc_hba *phba = vport->phba; 2684 2685 /* 2686 * For NPIV, cmpl_reg_vpi will set port_state to READY, 2687 * and continue discovery. 2688 */ 2689 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 2690 !(vport->fc_flag & FC_RSCN_MODE) && 2691 (phba->sli_rev < LPFC_SLI_REV4)) { 2692 2693 /* 2694 * If link is down, clear_la and reg_vpi will be done after 2695 * flogi following a link up event 2696 */ 2697 if (!lpfc_is_link_up(phba)) 2698 return; 2699 2700 /* The ADISCs are complete. Doesn't matter if they 2701 * succeeded or failed because the ADISC completion 2702 * routine guarantees to call the state machine and 2703 * the RPI is either unregistered (failed ADISC response) 2704 * or the RPI is still valid and the node is marked 2705 * mapped for a target. The exchanges should be in the 2706 * correct state. This code is specific to SLI3. 2707 */ 2708 lpfc_issue_clear_la(phba, vport); 2709 lpfc_issue_reg_vpi(phba, vport); 2710 return; 2711 } 2712 /* 2713 * For SLI2, we need to set port_state to READY 2714 * and continue discovery. 2715 */ 2716 if (vport->port_state < LPFC_VPORT_READY) { 2717 /* If we get here, there is nothing to ADISC */ 2718 lpfc_issue_clear_la(phba, vport); 2719 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { 2720 vport->num_disc_nodes = 0; 2721 /* go thru NPR list, issue ELS PLOGIs */ 2722 if (vport->fc_npr_cnt) 2723 lpfc_els_disc_plogi(vport); 2724 if (!vport->num_disc_nodes) { 2725 spin_lock_irq(shost->host_lock); 2726 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2727 spin_unlock_irq(shost->host_lock); 2728 lpfc_can_disctmo(vport); 2729 lpfc_end_rscn(vport); 2730 } 2731 } 2732 vport->port_state = LPFC_VPORT_READY; 2733 } else 2734 lpfc_rscn_disc(vport); 2735 } 2736 2737 /** 2738 * lpfc_more_adisc - Issue more adisc as needed 2739 * @vport: pointer to a host virtual N_Port data structure. 2740 * 2741 * This routine determines whether there are more ndlps on a @vport 2742 * node list need to have Address Discover (ADISC) issued. If so, it will 2743 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's 2744 * remaining nodes which need to have ADISC sent. 2745 **/ 2746 void 2747 lpfc_more_adisc(struct lpfc_vport *vport) 2748 { 2749 if (vport->num_disc_nodes) 2750 vport->num_disc_nodes--; 2751 /* Continue discovery with <num_disc_nodes> ADISCs to go */ 2752 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2753 "0210 Continue discovery with %d ADISCs to go " 2754 "Data: x%x x%x x%x\n", 2755 vport->num_disc_nodes, vport->fc_adisc_cnt, 2756 vport->fc_flag, vport->port_state); 2757 /* Check to see if there are more ADISCs to be sent */ 2758 if (vport->fc_flag & FC_NLP_MORE) { 2759 lpfc_set_disctmo(vport); 2760 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2761 lpfc_els_disc_adisc(vport); 2762 } 2763 if (!vport->num_disc_nodes) 2764 lpfc_adisc_done(vport); 2765 return; 2766 } 2767 2768 /** 2769 * lpfc_cmpl_els_adisc - Completion callback function for adisc 2770 * @phba: pointer to lpfc hba data structure. 2771 * @cmdiocb: pointer to lpfc command iocb data structure. 2772 * @rspiocb: pointer to lpfc response iocb data structure. 2773 * 2774 * This routine is the completion function for issuing the Address Discover 2775 * (ADISC) command. It first checks to see whether link went down during 2776 * the discovery process. If so, the node will be marked as node port 2777 * recovery for issuing discover IOCB by the link attention handler and 2778 * exit. Otherwise, the response status is checked. If error was reported 2779 * in the response status, the ADISC command shall be retried by invoking 2780 * the lpfc_els_retry() routine. Otherwise, if no error was reported in 2781 * the response status, the state machine is invoked to set transition 2782 * with respect to NLP_EVT_CMPL_ADISC event. 2783 **/ 2784 static void 2785 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2786 struct lpfc_iocbq *rspiocb) 2787 { 2788 struct lpfc_vport *vport = cmdiocb->vport; 2789 IOCB_t *irsp; 2790 struct lpfc_nodelist *ndlp; 2791 int disc; 2792 u32 ulp_status, ulp_word4, tmo; 2793 bool release_node = false; 2794 2795 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2796 cmdiocb->rsp_iocb = rspiocb; 2797 2798 ndlp = cmdiocb->ndlp; 2799 2800 ulp_status = get_job_ulpstatus(phba, rspiocb); 2801 ulp_word4 = get_job_word4(phba, rspiocb); 2802 2803 if (phba->sli_rev == LPFC_SLI_REV4) { 2804 tmo = get_wqe_tmo(cmdiocb); 2805 } else { 2806 irsp = &rspiocb->iocb; 2807 tmo = irsp->ulpTimeout; 2808 } 2809 2810 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2811 "ADISC cmpl: status:x%x/x%x did:x%x", 2812 ulp_status, ulp_word4, 2813 ndlp->nlp_DID); 2814 2815 /* Since ndlp can be freed in the disc state machine, note if this node 2816 * is being used during discovery. 2817 */ 2818 spin_lock_irq(&ndlp->lock); 2819 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2820 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); 2821 spin_unlock_irq(&ndlp->lock); 2822 /* ADISC completes to NPort <nlp_DID> */ 2823 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2824 "0104 ADISC completes to NPort x%x " 2825 "Data: x%x x%x x%x x%x x%x\n", 2826 ndlp->nlp_DID, ulp_status, ulp_word4, 2827 tmo, disc, vport->num_disc_nodes); 2828 /* Check to see if link went down during discovery */ 2829 if (lpfc_els_chk_latt(vport)) { 2830 spin_lock_irq(&ndlp->lock); 2831 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2832 spin_unlock_irq(&ndlp->lock); 2833 goto out; 2834 } 2835 2836 if (ulp_status) { 2837 /* Check for retry */ 2838 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2839 /* ELS command is being retried */ 2840 if (disc) { 2841 spin_lock_irq(&ndlp->lock); 2842 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2843 spin_unlock_irq(&ndlp->lock); 2844 lpfc_set_disctmo(vport); 2845 } 2846 goto out; 2847 } 2848 /* ADISC failed */ 2849 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2850 "2755 ADISC failure DID:%06X Status:x%x/x%x\n", 2851 ndlp->nlp_DID, ulp_status, 2852 ulp_word4); 2853 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2854 NLP_EVT_CMPL_ADISC); 2855 2856 /* As long as this node is not registered with the SCSI or NVMe 2857 * transport, it is no longer an active node. Otherwise 2858 * devloss handles the final cleanup. 2859 */ 2860 spin_lock_irq(&ndlp->lock); 2861 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2862 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2863 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 2864 release_node = true; 2865 } 2866 spin_unlock_irq(&ndlp->lock); 2867 2868 if (release_node) 2869 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2870 NLP_EVT_DEVICE_RM); 2871 } else 2872 /* Good status, call state machine */ 2873 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2874 NLP_EVT_CMPL_ADISC); 2875 2876 /* Check to see if there are more ADISCs to be sent */ 2877 if (disc && vport->num_disc_nodes) 2878 lpfc_more_adisc(vport); 2879 out: 2880 lpfc_els_free_iocb(phba, cmdiocb); 2881 lpfc_nlp_put(ndlp); 2882 return; 2883 } 2884 2885 /** 2886 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport 2887 * @vport: pointer to a virtual N_Port data structure. 2888 * @ndlp: pointer to a node-list data structure. 2889 * @retry: number of retries to the command IOCB. 2890 * 2891 * This routine issues an Address Discover (ADISC) for an @ndlp on a 2892 * @vport. It prepares the payload of the ADISC ELS command, updates the 2893 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine 2894 * to issue the ADISC ELS command. 2895 * 2896 * Note that the ndlp reference count will be incremented by 1 for holding the 2897 * ndlp and the reference to ndlp will be stored into the ndlp field of 2898 * the IOCB for the completion callback function to the ADISC ELS command. 2899 * 2900 * Return code 2901 * 0 - successfully issued adisc 2902 * 1 - failed to issue adisc 2903 **/ 2904 int 2905 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2906 uint8_t retry) 2907 { 2908 int rc = 0; 2909 struct lpfc_hba *phba = vport->phba; 2910 ADISC *ap; 2911 struct lpfc_iocbq *elsiocb; 2912 uint8_t *pcmd; 2913 uint16_t cmdsize; 2914 2915 cmdsize = (sizeof(uint32_t) + sizeof(ADISC)); 2916 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2917 ndlp->nlp_DID, ELS_CMD_ADISC); 2918 if (!elsiocb) 2919 return 1; 2920 2921 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 2922 2923 /* For ADISC request, remainder of payload is service parameters */ 2924 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; 2925 pcmd += sizeof(uint32_t); 2926 2927 /* Fill in ADISC payload */ 2928 ap = (ADISC *) pcmd; 2929 ap->hardAL_PA = phba->fc_pref_ALPA; 2930 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 2931 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 2932 ap->DID = be32_to_cpu(vport->fc_myDID); 2933 2934 phba->fc_stat.elsXmitADISC++; 2935 elsiocb->cmd_cmpl = lpfc_cmpl_els_adisc; 2936 spin_lock_irq(&ndlp->lock); 2937 ndlp->nlp_flag |= NLP_ADISC_SND; 2938 spin_unlock_irq(&ndlp->lock); 2939 elsiocb->ndlp = lpfc_nlp_get(ndlp); 2940 if (!elsiocb->ndlp) { 2941 lpfc_els_free_iocb(phba, elsiocb); 2942 goto err; 2943 } 2944 2945 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2946 "Issue ADISC: did:x%x refcnt %d", 2947 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2948 2949 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2950 if (rc == IOCB_ERROR) { 2951 lpfc_els_free_iocb(phba, elsiocb); 2952 lpfc_nlp_put(ndlp); 2953 goto err; 2954 } 2955 2956 return 0; 2957 2958 err: 2959 spin_lock_irq(&ndlp->lock); 2960 ndlp->nlp_flag &= ~NLP_ADISC_SND; 2961 spin_unlock_irq(&ndlp->lock); 2962 return 1; 2963 } 2964 2965 /** 2966 * lpfc_cmpl_els_logo - Completion callback function for logo 2967 * @phba: pointer to lpfc hba data structure. 2968 * @cmdiocb: pointer to lpfc command iocb data structure. 2969 * @rspiocb: pointer to lpfc response iocb data structure. 2970 * 2971 * This routine is the completion function for issuing the ELS Logout (LOGO) 2972 * command. If no error status was reported from the LOGO response, the 2973 * state machine of the associated ndlp shall be invoked for transition with 2974 * respect to NLP_EVT_CMPL_LOGO event. 2975 **/ 2976 static void 2977 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2978 struct lpfc_iocbq *rspiocb) 2979 { 2980 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 2981 struct lpfc_vport *vport = ndlp->vport; 2982 IOCB_t *irsp; 2983 unsigned long flags; 2984 uint32_t skip_recovery = 0; 2985 int wake_up_waiter = 0; 2986 u32 ulp_status; 2987 u32 ulp_word4; 2988 u32 tmo; 2989 2990 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2991 cmdiocb->rsp_iocb = rspiocb; 2992 2993 ulp_status = get_job_ulpstatus(phba, rspiocb); 2994 ulp_word4 = get_job_word4(phba, rspiocb); 2995 2996 if (phba->sli_rev == LPFC_SLI_REV4) { 2997 tmo = get_wqe_tmo(cmdiocb); 2998 } else { 2999 irsp = &rspiocb->iocb; 3000 tmo = irsp->ulpTimeout; 3001 } 3002 3003 spin_lock_irq(&ndlp->lock); 3004 ndlp->nlp_flag &= ~NLP_LOGO_SND; 3005 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { 3006 wake_up_waiter = 1; 3007 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; 3008 } 3009 spin_unlock_irq(&ndlp->lock); 3010 3011 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3012 "LOGO cmpl: status:x%x/x%x did:x%x", 3013 ulp_status, ulp_word4, 3014 ndlp->nlp_DID); 3015 3016 /* LOGO completes to NPort <nlp_DID> */ 3017 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3018 "0105 LOGO completes to NPort x%x " 3019 "refcnt %d nflags x%x Data: x%x x%x x%x x%x\n", 3020 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 3021 ulp_status, ulp_word4, 3022 tmo, vport->num_disc_nodes); 3023 3024 if (lpfc_els_chk_latt(vport)) { 3025 skip_recovery = 1; 3026 goto out; 3027 } 3028 3029 /* The LOGO will not be retried on failure. A LOGO was 3030 * issued to the remote rport and a ACC or RJT or no Answer are 3031 * all acceptable. Note the failure and move forward with 3032 * discovery. The PLOGI will retry. 3033 */ 3034 if (ulp_status) { 3035 /* LOGO failed */ 3036 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3037 "2756 LOGO failure, No Retry DID:%06X " 3038 "Status:x%x/x%x\n", 3039 ndlp->nlp_DID, ulp_status, 3040 ulp_word4); 3041 3042 if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) 3043 skip_recovery = 1; 3044 } 3045 3046 /* Call state machine. This will unregister the rpi if needed. */ 3047 lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); 3048 3049 if (skip_recovery) 3050 goto out; 3051 3052 /* The driver sets this flag for an NPIV instance that doesn't want to 3053 * log into the remote port. 3054 */ 3055 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { 3056 spin_lock_irq(&ndlp->lock); 3057 if (phba->sli_rev == LPFC_SLI_REV4) 3058 ndlp->nlp_flag |= NLP_RELEASE_RPI; 3059 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 3060 spin_unlock_irq(&ndlp->lock); 3061 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 3062 NLP_EVT_DEVICE_RM); 3063 goto out_rsrc_free; 3064 } 3065 3066 out: 3067 /* At this point, the LOGO processing is complete. NOTE: For a 3068 * pt2pt topology, we are assuming the NPortID will only change 3069 * on link up processing. For a LOGO / PLOGI initiated by the 3070 * Initiator, we are assuming the NPortID is not going to change. 3071 */ 3072 3073 if (wake_up_waiter && ndlp->logo_waitq) 3074 wake_up(ndlp->logo_waitq); 3075 /* 3076 * If the node is a target, the handling attempts to recover the port. 3077 * For any other port type, the rpi is unregistered as an implicit 3078 * LOGO. 3079 */ 3080 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) && 3081 skip_recovery == 0) { 3082 lpfc_cancel_retry_delay_tmo(vport, ndlp); 3083 spin_lock_irqsave(&ndlp->lock, flags); 3084 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 3085 spin_unlock_irqrestore(&ndlp->lock, flags); 3086 3087 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3088 "3187 LOGO completes to NPort x%x: Start " 3089 "Recovery Data: x%x x%x x%x x%x\n", 3090 ndlp->nlp_DID, ulp_status, 3091 ulp_word4, tmo, 3092 vport->num_disc_nodes); 3093 3094 lpfc_els_free_iocb(phba, cmdiocb); 3095 lpfc_nlp_put(ndlp); 3096 3097 lpfc_disc_start(vport); 3098 return; 3099 } 3100 3101 /* Cleanup path for failed REG_RPI handling. If REG_RPI fails, the 3102 * driver sends a LOGO to the rport to cleanup. For fabric and 3103 * initiator ports cleanup the node as long as it the node is not 3104 * register with the transport. 3105 */ 3106 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 3107 spin_lock_irq(&ndlp->lock); 3108 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 3109 spin_unlock_irq(&ndlp->lock); 3110 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 3111 NLP_EVT_DEVICE_RM); 3112 } 3113 out_rsrc_free: 3114 /* Driver is done with the I/O. */ 3115 lpfc_els_free_iocb(phba, cmdiocb); 3116 lpfc_nlp_put(ndlp); 3117 } 3118 3119 /** 3120 * lpfc_issue_els_logo - Issue a logo to an node on a vport 3121 * @vport: pointer to a virtual N_Port data structure. 3122 * @ndlp: pointer to a node-list data structure. 3123 * @retry: number of retries to the command IOCB. 3124 * 3125 * This routine constructs and issues an ELS Logout (LOGO) iocb command 3126 * to a remote node, referred by an @ndlp on a @vport. It constructs the 3127 * payload of the IOCB, properly sets up the @ndlp state, and invokes the 3128 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command. 3129 * 3130 * Note that the ndlp reference count will be incremented by 1 for holding the 3131 * ndlp and the reference to ndlp will be stored into the ndlp field of 3132 * the IOCB for the completion callback function to the LOGO ELS command. 3133 * 3134 * Callers of this routine are expected to unregister the RPI first 3135 * 3136 * Return code 3137 * 0 - successfully issued logo 3138 * 1 - failed to issue logo 3139 **/ 3140 int 3141 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 3142 uint8_t retry) 3143 { 3144 struct lpfc_hba *phba = vport->phba; 3145 struct lpfc_iocbq *elsiocb; 3146 uint8_t *pcmd; 3147 uint16_t cmdsize; 3148 int rc; 3149 3150 spin_lock_irq(&ndlp->lock); 3151 if (ndlp->nlp_flag & NLP_LOGO_SND) { 3152 spin_unlock_irq(&ndlp->lock); 3153 return 0; 3154 } 3155 spin_unlock_irq(&ndlp->lock); 3156 3157 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); 3158 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3159 ndlp->nlp_DID, ELS_CMD_LOGO); 3160 if (!elsiocb) 3161 return 1; 3162 3163 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 3164 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 3165 pcmd += sizeof(uint32_t); 3166 3167 /* Fill in LOGO payload */ 3168 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 3169 pcmd += sizeof(uint32_t); 3170 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 3171 3172 phba->fc_stat.elsXmitLOGO++; 3173 elsiocb->cmd_cmpl = lpfc_cmpl_els_logo; 3174 spin_lock_irq(&ndlp->lock); 3175 ndlp->nlp_flag |= NLP_LOGO_SND; 3176 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; 3177 spin_unlock_irq(&ndlp->lock); 3178 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3179 if (!elsiocb->ndlp) { 3180 lpfc_els_free_iocb(phba, elsiocb); 3181 goto err; 3182 } 3183 3184 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3185 "Issue LOGO: did:x%x refcnt %d", 3186 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3187 3188 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3189 if (rc == IOCB_ERROR) { 3190 lpfc_els_free_iocb(phba, elsiocb); 3191 lpfc_nlp_put(ndlp); 3192 goto err; 3193 } 3194 3195 spin_lock_irq(&ndlp->lock); 3196 ndlp->nlp_prev_state = ndlp->nlp_state; 3197 spin_unlock_irq(&ndlp->lock); 3198 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 3199 return 0; 3200 3201 err: 3202 spin_lock_irq(&ndlp->lock); 3203 ndlp->nlp_flag &= ~NLP_LOGO_SND; 3204 spin_unlock_irq(&ndlp->lock); 3205 return 1; 3206 } 3207 3208 /** 3209 * lpfc_cmpl_els_cmd - Completion callback function for generic els command 3210 * @phba: pointer to lpfc hba data structure. 3211 * @cmdiocb: pointer to lpfc command iocb data structure. 3212 * @rspiocb: pointer to lpfc response iocb data structure. 3213 * 3214 * This routine is a generic completion callback function for ELS commands. 3215 * Specifically, it is the callback function which does not need to perform 3216 * any command specific operations. It is currently used by the ELS command 3217 * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel 3218 * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). 3219 * Other than certain debug loggings, this callback function simply invokes the 3220 * lpfc_els_chk_latt() routine to check whether link went down during the 3221 * discovery process. 3222 **/ 3223 static void 3224 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3225 struct lpfc_iocbq *rspiocb) 3226 { 3227 struct lpfc_vport *vport = cmdiocb->vport; 3228 struct lpfc_nodelist *free_ndlp; 3229 IOCB_t *irsp; 3230 u32 ulp_status, ulp_word4, tmo, did, iotag; 3231 3232 ulp_status = get_job_ulpstatus(phba, rspiocb); 3233 ulp_word4 = get_job_word4(phba, rspiocb); 3234 did = get_job_els_rsp64_did(phba, cmdiocb); 3235 3236 if (phba->sli_rev == LPFC_SLI_REV4) { 3237 tmo = get_wqe_tmo(cmdiocb); 3238 iotag = get_wqe_reqtag(cmdiocb); 3239 } else { 3240 irsp = &rspiocb->iocb; 3241 tmo = irsp->ulpTimeout; 3242 iotag = irsp->ulpIoTag; 3243 } 3244 3245 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3246 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3247 ulp_status, ulp_word4, did); 3248 3249 /* ELS cmd tag <ulpIoTag> completes */ 3250 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3251 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", 3252 iotag, ulp_status, ulp_word4, tmo); 3253 3254 /* Check to see if link went down during discovery */ 3255 lpfc_els_chk_latt(vport); 3256 3257 free_ndlp = cmdiocb->ndlp; 3258 3259 lpfc_els_free_iocb(phba, cmdiocb); 3260 lpfc_nlp_put(free_ndlp); 3261 } 3262 3263 /** 3264 * lpfc_reg_fab_ctrl_node - RPI register the fabric controller node. 3265 * @vport: pointer to lpfc_vport data structure. 3266 * @fc_ndlp: pointer to the fabric controller (0xfffffd) node. 3267 * 3268 * This routine registers the rpi assigned to the fabric controller 3269 * NPort_ID (0xfffffd) with the port and moves the node to UNMAPPED 3270 * state triggering a registration with the SCSI transport. 3271 * 3272 * This routine is single out because the fabric controller node 3273 * does not receive a PLOGI. This routine is consumed by the 3274 * SCR and RDF ELS commands. Callers are expected to qualify 3275 * with SLI4 first. 3276 **/ 3277 static int 3278 lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) 3279 { 3280 int rc = 0; 3281 struct lpfc_hba *phba = vport->phba; 3282 struct lpfc_nodelist *ns_ndlp; 3283 LPFC_MBOXQ_t *mbox; 3284 3285 if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED) 3286 return rc; 3287 3288 ns_ndlp = lpfc_findnode_did(vport, NameServer_DID); 3289 if (!ns_ndlp) 3290 return -ENODEV; 3291 3292 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 3293 "0935 %s: Reg FC RPI x%x on FC DID x%x NSSte: x%x\n", 3294 __func__, fc_ndlp->nlp_rpi, fc_ndlp->nlp_DID, 3295 ns_ndlp->nlp_state); 3296 if (ns_ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 3297 return -ENODEV; 3298 3299 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3300 if (!mbox) { 3301 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3302 "0936 %s: no memory for reg_login " 3303 "Data: x%x x%x x%x x%x\n", __func__, 3304 fc_ndlp->nlp_DID, fc_ndlp->nlp_state, 3305 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); 3306 return -ENOMEM; 3307 } 3308 rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID, 3309 (u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi); 3310 if (rc) { 3311 rc = -EACCES; 3312 goto out; 3313 } 3314 3315 fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 3316 mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login; 3317 mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp); 3318 if (!mbox->ctx_ndlp) { 3319 rc = -ENOMEM; 3320 goto out; 3321 } 3322 3323 mbox->vport = vport; 3324 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 3325 if (rc == MBX_NOT_FINISHED) { 3326 rc = -ENODEV; 3327 lpfc_nlp_put(fc_ndlp); 3328 goto out; 3329 } 3330 /* Success path. Exit. */ 3331 lpfc_nlp_set_state(vport, fc_ndlp, 3332 NLP_STE_REG_LOGIN_ISSUE); 3333 return 0; 3334 3335 out: 3336 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 3337 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3338 "0938 %s: failed to format reg_login " 3339 "Data: x%x x%x x%x x%x\n", __func__, 3340 fc_ndlp->nlp_DID, fc_ndlp->nlp_state, 3341 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); 3342 return rc; 3343 } 3344 3345 /** 3346 * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd 3347 * @phba: pointer to lpfc hba data structure. 3348 * @cmdiocb: pointer to lpfc command iocb data structure. 3349 * @rspiocb: pointer to lpfc response iocb data structure. 3350 * 3351 * This routine is a generic completion callback function for Discovery ELS cmd. 3352 * Currently used by the ELS command issuing routines for the ELS State Change 3353 * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf(). 3354 * These commands will be retried once only for ELS timeout errors. 3355 **/ 3356 static void 3357 lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3358 struct lpfc_iocbq *rspiocb) 3359 { 3360 struct lpfc_vport *vport = cmdiocb->vport; 3361 IOCB_t *irsp; 3362 struct lpfc_els_rdf_rsp *prdf; 3363 struct lpfc_dmabuf *pcmd, *prsp; 3364 u32 *pdata; 3365 u32 cmd; 3366 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 3367 u32 ulp_status, ulp_word4, tmo, did, iotag; 3368 3369 ulp_status = get_job_ulpstatus(phba, rspiocb); 3370 ulp_word4 = get_job_word4(phba, rspiocb); 3371 did = get_job_els_rsp64_did(phba, cmdiocb); 3372 3373 if (phba->sli_rev == LPFC_SLI_REV4) { 3374 tmo = get_wqe_tmo(cmdiocb); 3375 iotag = get_wqe_reqtag(cmdiocb); 3376 } else { 3377 irsp = &rspiocb->iocb; 3378 tmo = irsp->ulpTimeout; 3379 iotag = irsp->ulpIoTag; 3380 } 3381 3382 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3383 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3384 ulp_status, ulp_word4, did); 3385 3386 /* ELS cmd tag <ulpIoTag> completes */ 3387 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3388 "0217 ELS cmd tag x%x completes Data: x%x x%x x%x x%x\n", 3389 iotag, ulp_status, ulp_word4, tmo, cmdiocb->retry); 3390 3391 pcmd = cmdiocb->cmd_dmabuf; 3392 if (!pcmd) 3393 goto out; 3394 3395 pdata = (u32 *)pcmd->virt; 3396 if (!pdata) 3397 goto out; 3398 cmd = *pdata; 3399 3400 /* Only 1 retry for ELS Timeout only */ 3401 if (ulp_status == IOSTAT_LOCAL_REJECT && 3402 ((ulp_word4 & IOERR_PARAM_MASK) == 3403 IOERR_SEQUENCE_TIMEOUT)) { 3404 cmdiocb->retry++; 3405 if (cmdiocb->retry <= 1) { 3406 switch (cmd) { 3407 case ELS_CMD_SCR: 3408 lpfc_issue_els_scr(vport, cmdiocb->retry); 3409 break; 3410 case ELS_CMD_EDC: 3411 lpfc_issue_els_edc(vport, cmdiocb->retry); 3412 break; 3413 case ELS_CMD_RDF: 3414 lpfc_issue_els_rdf(vport, cmdiocb->retry); 3415 break; 3416 } 3417 goto out; 3418 } 3419 phba->fc_stat.elsRetryExceeded++; 3420 } 3421 if (cmd == ELS_CMD_EDC) { 3422 /* must be called before checking uplStatus and returning */ 3423 lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb); 3424 return; 3425 } 3426 if (ulp_status) { 3427 /* ELS discovery cmd completes with error */ 3428 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, 3429 "4203 ELS cmd x%x error: x%x x%X\n", cmd, 3430 ulp_status, ulp_word4); 3431 goto out; 3432 } 3433 3434 /* The RDF response doesn't have any impact on the running driver 3435 * but the notification descriptors are dumped here for support. 3436 */ 3437 if (cmd == ELS_CMD_RDF) { 3438 int i; 3439 3440 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 3441 if (!prsp) 3442 goto out; 3443 3444 prdf = (struct lpfc_els_rdf_rsp *)prsp->virt; 3445 if (!prdf) 3446 goto out; 3447 3448 for (i = 0; i < ELS_RDF_REG_TAG_CNT && 3449 i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++) 3450 lpfc_printf_vlog(vport, KERN_INFO, 3451 LOG_ELS | LOG_CGN_MGMT, 3452 "4677 Fabric RDF Notification Grant " 3453 "Data: 0x%08x Reg: %x %x\n", 3454 be32_to_cpu( 3455 prdf->reg_d1.desc_tags[i]), 3456 phba->cgn_reg_signal, 3457 phba->cgn_reg_fpin); 3458 } 3459 3460 out: 3461 /* Check to see if link went down during discovery */ 3462 lpfc_els_chk_latt(vport); 3463 lpfc_els_free_iocb(phba, cmdiocb); 3464 lpfc_nlp_put(ndlp); 3465 return; 3466 } 3467 3468 /** 3469 * lpfc_issue_els_scr - Issue a scr to an node on a vport 3470 * @vport: pointer to a host virtual N_Port data structure. 3471 * @retry: retry counter for the command IOCB. 3472 * 3473 * This routine issues a State Change Request (SCR) to a fabric node 3474 * on a @vport. The remote node is Fabric Controller (0xfffffd). It 3475 * first search the @vport node list to find the matching ndlp. If no such 3476 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An 3477 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb() 3478 * routine is invoked to send the SCR IOCB. 3479 * 3480 * Note that the ndlp reference count will be incremented by 1 for holding the 3481 * ndlp and the reference to ndlp will be stored into the ndlp field of 3482 * the IOCB for the completion callback function to the SCR ELS command. 3483 * 3484 * Return code 3485 * 0 - Successfully issued scr command 3486 * 1 - Failed to issue scr command 3487 **/ 3488 int 3489 lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry) 3490 { 3491 int rc = 0; 3492 struct lpfc_hba *phba = vport->phba; 3493 struct lpfc_iocbq *elsiocb; 3494 uint8_t *pcmd; 3495 uint16_t cmdsize; 3496 struct lpfc_nodelist *ndlp; 3497 3498 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 3499 3500 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3501 if (!ndlp) { 3502 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3503 if (!ndlp) 3504 return 1; 3505 lpfc_enqueue_node(vport, ndlp); 3506 } 3507 3508 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3509 ndlp->nlp_DID, ELS_CMD_SCR); 3510 if (!elsiocb) 3511 return 1; 3512 3513 if (phba->sli_rev == LPFC_SLI_REV4) { 3514 rc = lpfc_reg_fab_ctrl_node(vport, ndlp); 3515 if (rc) { 3516 lpfc_els_free_iocb(phba, elsiocb); 3517 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3518 "0937 %s: Failed to reg fc node, rc %d\n", 3519 __func__, rc); 3520 return 1; 3521 } 3522 } 3523 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 3524 3525 *((uint32_t *) (pcmd)) = ELS_CMD_SCR; 3526 pcmd += sizeof(uint32_t); 3527 3528 /* For SCR, remainder of payload is SCR parameter page */ 3529 memset(pcmd, 0, sizeof(SCR)); 3530 ((SCR *) pcmd)->Function = SCR_FUNC_FULL; 3531 3532 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3533 "Issue SCR: did:x%x", 3534 ndlp->nlp_DID, 0, 0); 3535 3536 phba->fc_stat.elsXmitSCR++; 3537 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 3538 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3539 if (!elsiocb->ndlp) { 3540 lpfc_els_free_iocb(phba, elsiocb); 3541 return 1; 3542 } 3543 3544 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3545 "Issue SCR: did:x%x refcnt %d", 3546 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3547 3548 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3549 if (rc == IOCB_ERROR) { 3550 lpfc_els_free_iocb(phba, elsiocb); 3551 lpfc_nlp_put(ndlp); 3552 return 1; 3553 } 3554 3555 return 0; 3556 } 3557 3558 /** 3559 * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric) 3560 * or the other nport (pt2pt). 3561 * @vport: pointer to a host virtual N_Port data structure. 3562 * @retry: number of retries to the command IOCB. 3563 * 3564 * This routine issues a RSCN to the Fabric Controller (DID 0xFFFFFD) 3565 * when connected to a fabric, or to the remote port when connected 3566 * in point-to-point mode. When sent to the Fabric Controller, it will 3567 * replay the RSCN to registered recipients. 3568 * 3569 * Note that the ndlp reference count will be incremented by 1 for holding the 3570 * ndlp and the reference to ndlp will be stored into the ndlp field of 3571 * the IOCB for the completion callback function to the RSCN ELS command. 3572 * 3573 * Return code 3574 * 0 - Successfully issued RSCN command 3575 * 1 - Failed to issue RSCN command 3576 **/ 3577 int 3578 lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry) 3579 { 3580 int rc = 0; 3581 struct lpfc_hba *phba = vport->phba; 3582 struct lpfc_iocbq *elsiocb; 3583 struct lpfc_nodelist *ndlp; 3584 struct { 3585 struct fc_els_rscn rscn; 3586 struct fc_els_rscn_page portid; 3587 } *event; 3588 uint32_t nportid; 3589 uint16_t cmdsize = sizeof(*event); 3590 3591 /* Not supported for private loop */ 3592 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && 3593 !(vport->fc_flag & FC_PUBLIC_LOOP)) 3594 return 1; 3595 3596 if (vport->fc_flag & FC_PT2PT) { 3597 /* find any mapped nport - that would be the other nport */ 3598 ndlp = lpfc_findnode_mapped(vport); 3599 if (!ndlp) 3600 return 1; 3601 } else { 3602 nportid = FC_FID_FCTRL; 3603 /* find the fabric controller node */ 3604 ndlp = lpfc_findnode_did(vport, nportid); 3605 if (!ndlp) { 3606 /* if one didn't exist, make one */ 3607 ndlp = lpfc_nlp_init(vport, nportid); 3608 if (!ndlp) 3609 return 1; 3610 lpfc_enqueue_node(vport, ndlp); 3611 } 3612 } 3613 3614 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3615 ndlp->nlp_DID, ELS_CMD_RSCN_XMT); 3616 3617 if (!elsiocb) 3618 return 1; 3619 3620 event = elsiocb->cmd_dmabuf->virt; 3621 3622 event->rscn.rscn_cmd = ELS_RSCN; 3623 event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page); 3624 event->rscn.rscn_plen = cpu_to_be16(cmdsize); 3625 3626 nportid = vport->fc_myDID; 3627 /* appears that page flags must be 0 for fabric to broadcast RSCN */ 3628 event->portid.rscn_page_flags = 0; 3629 event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16; 3630 event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8; 3631 event->portid.rscn_fid[2] = nportid & 0x000000FF; 3632 3633 phba->fc_stat.elsXmitRSCN++; 3634 elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd; 3635 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3636 if (!elsiocb->ndlp) { 3637 lpfc_els_free_iocb(phba, elsiocb); 3638 return 1; 3639 } 3640 3641 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3642 "Issue RSCN: did:x%x", 3643 ndlp->nlp_DID, 0, 0); 3644 3645 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3646 if (rc == IOCB_ERROR) { 3647 lpfc_els_free_iocb(phba, elsiocb); 3648 lpfc_nlp_put(ndlp); 3649 return 1; 3650 } 3651 3652 return 0; 3653 } 3654 3655 /** 3656 * lpfc_issue_els_farpr - Issue a farp to an node on a vport 3657 * @vport: pointer to a host virtual N_Port data structure. 3658 * @nportid: N_Port identifier to the remote node. 3659 * @retry: number of retries to the command IOCB. 3660 * 3661 * This routine issues a Fibre Channel Address Resolution Response 3662 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid) 3663 * is passed into the function. It first search the @vport node list to find 3664 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created 3665 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the 3666 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command. 3667 * 3668 * Note that the ndlp reference count will be incremented by 1 for holding the 3669 * ndlp and the reference to ndlp will be stored into the ndlp field of 3670 * the IOCB for the completion callback function to the FARPR ELS command. 3671 * 3672 * Return code 3673 * 0 - Successfully issued farpr command 3674 * 1 - Failed to issue farpr command 3675 **/ 3676 static int 3677 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 3678 { 3679 int rc = 0; 3680 struct lpfc_hba *phba = vport->phba; 3681 struct lpfc_iocbq *elsiocb; 3682 FARP *fp; 3683 uint8_t *pcmd; 3684 uint32_t *lp; 3685 uint16_t cmdsize; 3686 struct lpfc_nodelist *ondlp; 3687 struct lpfc_nodelist *ndlp; 3688 3689 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 3690 3691 ndlp = lpfc_findnode_did(vport, nportid); 3692 if (!ndlp) { 3693 ndlp = lpfc_nlp_init(vport, nportid); 3694 if (!ndlp) 3695 return 1; 3696 lpfc_enqueue_node(vport, ndlp); 3697 } 3698 3699 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3700 ndlp->nlp_DID, ELS_CMD_FARPR); 3701 if (!elsiocb) 3702 return 1; 3703 3704 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 3705 3706 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; 3707 pcmd += sizeof(uint32_t); 3708 3709 /* Fill in FARPR payload */ 3710 fp = (FARP *) (pcmd); 3711 memset(fp, 0, sizeof(FARP)); 3712 lp = (uint32_t *) pcmd; 3713 *lp++ = be32_to_cpu(nportid); 3714 *lp++ = be32_to_cpu(vport->fc_myDID); 3715 fp->Rflags = 0; 3716 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); 3717 3718 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); 3719 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 3720 ondlp = lpfc_findnode_did(vport, nportid); 3721 if (ondlp) { 3722 memcpy(&fp->OportName, &ondlp->nlp_portname, 3723 sizeof(struct lpfc_name)); 3724 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, 3725 sizeof(struct lpfc_name)); 3726 } 3727 3728 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3729 "Issue FARPR: did:x%x", 3730 ndlp->nlp_DID, 0, 0); 3731 3732 phba->fc_stat.elsXmitFARPR++; 3733 elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd; 3734 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3735 if (!elsiocb->ndlp) { 3736 lpfc_els_free_iocb(phba, elsiocb); 3737 return 1; 3738 } 3739 3740 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3741 if (rc == IOCB_ERROR) { 3742 /* The additional lpfc_nlp_put will cause the following 3743 * lpfc_els_free_iocb routine to trigger the release of 3744 * the node. 3745 */ 3746 lpfc_els_free_iocb(phba, elsiocb); 3747 lpfc_nlp_put(ndlp); 3748 return 1; 3749 } 3750 /* This will cause the callback-function lpfc_cmpl_els_cmd to 3751 * trigger the release of the node. 3752 */ 3753 /* Don't release reference count as RDF is likely outstanding */ 3754 return 0; 3755 } 3756 3757 /** 3758 * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric. 3759 * @vport: pointer to a host virtual N_Port data structure. 3760 * @retry: retry counter for the command IOCB. 3761 * 3762 * This routine issues an ELS RDF to the Fabric Controller to register 3763 * for diagnostic functions. 3764 * 3765 * Note that the ndlp reference count will be incremented by 1 for holding the 3766 * ndlp and the reference to ndlp will be stored into the ndlp field of 3767 * the IOCB for the completion callback function to the RDF ELS command. 3768 * 3769 * Return code 3770 * 0 - Successfully issued rdf command 3771 * 1 - Failed to issue rdf command 3772 **/ 3773 int 3774 lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) 3775 { 3776 struct lpfc_hba *phba = vport->phba; 3777 struct lpfc_iocbq *elsiocb; 3778 struct lpfc_els_rdf_req *prdf; 3779 struct lpfc_nodelist *ndlp; 3780 uint16_t cmdsize; 3781 int rc; 3782 3783 cmdsize = sizeof(*prdf); 3784 3785 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3786 if (!ndlp) { 3787 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3788 if (!ndlp) 3789 return -ENODEV; 3790 lpfc_enqueue_node(vport, ndlp); 3791 } 3792 3793 /* RDF ELS is not required on an NPIV VN_Port. */ 3794 if (vport->port_type == LPFC_NPIV_PORT) 3795 return -EACCES; 3796 3797 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3798 ndlp->nlp_DID, ELS_CMD_RDF); 3799 if (!elsiocb) 3800 return -ENOMEM; 3801 3802 /* Configure the payload for the supported FPIN events. */ 3803 prdf = (struct lpfc_els_rdf_req *)elsiocb->cmd_dmabuf->virt; 3804 memset(prdf, 0, cmdsize); 3805 prdf->rdf.fpin_cmd = ELS_RDF; 3806 prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) - 3807 sizeof(struct fc_els_rdf)); 3808 prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER); 3809 prdf->reg_d1.reg_desc.desc_len = cpu_to_be32( 3810 FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1)); 3811 prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT); 3812 prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY); 3813 prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY); 3814 prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST); 3815 prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION); 3816 3817 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3818 "6444 Xmit RDF to remote NPORT x%x Reg: %x %x\n", 3819 ndlp->nlp_DID, phba->cgn_reg_signal, 3820 phba->cgn_reg_fpin); 3821 3822 phba->cgn_fpin_frequency = LPFC_FPIN_INIT_FREQ; 3823 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 3824 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3825 if (!elsiocb->ndlp) { 3826 lpfc_els_free_iocb(phba, elsiocb); 3827 return -EIO; 3828 } 3829 3830 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3831 "Issue RDF: did:x%x refcnt %d", 3832 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3833 3834 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3835 if (rc == IOCB_ERROR) { 3836 lpfc_els_free_iocb(phba, elsiocb); 3837 lpfc_nlp_put(ndlp); 3838 return -EIO; 3839 } 3840 return 0; 3841 } 3842 3843 /** 3844 * lpfc_els_rcv_rdf - Receive RDF ELS request from the fabric. 3845 * @vport: pointer to a host virtual N_Port data structure. 3846 * @cmdiocb: pointer to lpfc command iocb data structure. 3847 * @ndlp: pointer to a node-list data structure. 3848 * 3849 * A received RDF implies a possible change to fabric supported diagnostic 3850 * functions. This routine sends LS_ACC and then has the Nx_Port issue a new 3851 * RDF request to reregister for supported diagnostic functions. 3852 * 3853 * Return code 3854 * 0 - Success 3855 * -EIO - Failed to process received RDF 3856 **/ 3857 static int 3858 lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 3859 struct lpfc_nodelist *ndlp) 3860 { 3861 /* Send LS_ACC */ 3862 if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) { 3863 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3864 "1623 Failed to RDF_ACC from x%x for x%x\n", 3865 ndlp->nlp_DID, vport->fc_myDID); 3866 return -EIO; 3867 } 3868 3869 /* Issue new RDF for reregistering */ 3870 if (lpfc_issue_els_rdf(vport, 0)) { 3871 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3872 "2623 Failed to re register RDF for x%x\n", 3873 vport->fc_myDID); 3874 return -EIO; 3875 } 3876 3877 return 0; 3878 } 3879 3880 /** 3881 * lpfc_least_capable_settings - helper function for EDC rsp processing 3882 * @phba: pointer to lpfc hba data structure. 3883 * @pcgd: pointer to congestion detection descriptor in EDC rsp. 3884 * 3885 * This helper routine determines the least capable setting for 3886 * congestion signals, signal freq, including scale, from the 3887 * congestion detection descriptor in the EDC rsp. The routine 3888 * sets @phba values in preparation for a set_featues mailbox. 3889 **/ 3890 static void 3891 lpfc_least_capable_settings(struct lpfc_hba *phba, 3892 struct fc_diag_cg_sig_desc *pcgd) 3893 { 3894 u32 rsp_sig_cap = 0, drv_sig_cap = 0; 3895 u32 rsp_sig_freq_cyc = 0, rsp_sig_freq_scale = 0; 3896 3897 /* Get rsp signal and frequency capabilities. */ 3898 rsp_sig_cap = be32_to_cpu(pcgd->xmt_signal_capability); 3899 rsp_sig_freq_cyc = be16_to_cpu(pcgd->xmt_signal_frequency.count); 3900 rsp_sig_freq_scale = be16_to_cpu(pcgd->xmt_signal_frequency.units); 3901 3902 /* If the Fport does not support signals. Set FPIN only */ 3903 if (rsp_sig_cap == EDC_CG_SIG_NOTSUPPORTED) 3904 goto out_no_support; 3905 3906 /* Apply the xmt scale to the xmt cycle to get the correct frequency. 3907 * Adapter default is 100 millisSeconds. Convert all xmt cycle values 3908 * to milliSeconds. 3909 */ 3910 switch (rsp_sig_freq_scale) { 3911 case EDC_CG_SIGFREQ_SEC: 3912 rsp_sig_freq_cyc *= MSEC_PER_SEC; 3913 break; 3914 case EDC_CG_SIGFREQ_MSEC: 3915 rsp_sig_freq_cyc = 1; 3916 break; 3917 default: 3918 goto out_no_support; 3919 } 3920 3921 /* Convenient shorthand. */ 3922 drv_sig_cap = phba->cgn_reg_signal; 3923 3924 /* Choose the least capable frequency. */ 3925 if (rsp_sig_freq_cyc > phba->cgn_sig_freq) 3926 phba->cgn_sig_freq = rsp_sig_freq_cyc; 3927 3928 /* Should be some common signals support. Settle on least capable 3929 * signal and adjust FPIN values. Initialize defaults to ease the 3930 * decision. 3931 */ 3932 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 3933 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 3934 if (rsp_sig_cap == EDC_CG_SIG_WARN_ONLY && 3935 (drv_sig_cap == EDC_CG_SIG_WARN_ONLY || 3936 drv_sig_cap == EDC_CG_SIG_WARN_ALARM)) { 3937 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 3938 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 3939 } 3940 if (rsp_sig_cap == EDC_CG_SIG_WARN_ALARM) { 3941 if (drv_sig_cap == EDC_CG_SIG_WARN_ALARM) { 3942 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ALARM; 3943 phba->cgn_reg_fpin = LPFC_CGN_FPIN_NONE; 3944 } 3945 if (drv_sig_cap == EDC_CG_SIG_WARN_ONLY) { 3946 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 3947 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 3948 } 3949 } 3950 3951 /* We are NOT recording signal frequency in congestion info buffer */ 3952 return; 3953 3954 out_no_support: 3955 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 3956 phba->cgn_sig_freq = 0; 3957 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 3958 } 3959 3960 DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag, 3961 FC_LS_TLV_DTAG_INIT); 3962 3963 /** 3964 * lpfc_cmpl_els_edc - Completion callback function for EDC 3965 * @phba: pointer to lpfc hba data structure. 3966 * @cmdiocb: pointer to lpfc command iocb data structure. 3967 * @rspiocb: pointer to lpfc response iocb data structure. 3968 * 3969 * This routine is the completion callback function for issuing the Exchange 3970 * Diagnostic Capabilities (EDC) command. The driver issues an EDC to 3971 * notify the FPort of its Congestion and Link Fault capabilities. This 3972 * routine parses the FPort's response and decides on the least common 3973 * values applicable to both FPort and NPort for Warnings and Alarms that 3974 * are communicated via hardware signals. 3975 **/ 3976 static void 3977 lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3978 struct lpfc_iocbq *rspiocb) 3979 { 3980 IOCB_t *irsp_iocb; 3981 struct fc_els_edc_resp *edc_rsp; 3982 struct fc_tlv_desc *tlv; 3983 struct fc_diag_cg_sig_desc *pcgd; 3984 struct fc_diag_lnkflt_desc *plnkflt; 3985 struct lpfc_dmabuf *pcmd, *prsp; 3986 const char *dtag_nm; 3987 u32 *pdata, dtag; 3988 int desc_cnt = 0, bytes_remain; 3989 bool rcv_cap_desc = false; 3990 struct lpfc_nodelist *ndlp; 3991 u32 ulp_status, ulp_word4, tmo, did, iotag; 3992 3993 ndlp = cmdiocb->ndlp; 3994 3995 ulp_status = get_job_ulpstatus(phba, rspiocb); 3996 ulp_word4 = get_job_word4(phba, rspiocb); 3997 did = get_job_els_rsp64_did(phba, rspiocb); 3998 3999 if (phba->sli_rev == LPFC_SLI_REV4) { 4000 tmo = get_wqe_tmo(rspiocb); 4001 iotag = get_wqe_reqtag(rspiocb); 4002 } else { 4003 irsp_iocb = &rspiocb->iocb; 4004 tmo = irsp_iocb->ulpTimeout; 4005 iotag = irsp_iocb->ulpIoTag; 4006 } 4007 4008 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, 4009 "EDC cmpl: status:x%x/x%x did:x%x", 4010 ulp_status, ulp_word4, did); 4011 4012 /* ELS cmd tag <ulpIoTag> completes */ 4013 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4014 "4201 EDC cmd tag x%x completes Data: x%x x%x x%x\n", 4015 iotag, ulp_status, ulp_word4, tmo); 4016 4017 pcmd = cmdiocb->cmd_dmabuf; 4018 if (!pcmd) 4019 goto out; 4020 4021 pdata = (u32 *)pcmd->virt; 4022 if (!pdata) 4023 goto out; 4024 4025 /* Need to clear signal values, send features MB and RDF with FPIN. */ 4026 if (ulp_status) 4027 goto out; 4028 4029 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 4030 if (!prsp) 4031 goto out; 4032 4033 edc_rsp = prsp->virt; 4034 if (!edc_rsp) 4035 goto out; 4036 4037 /* ELS cmd tag <ulpIoTag> completes */ 4038 lpfc_printf_log(phba, KERN_INFO, 4039 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 4040 "4676 Fabric EDC Rsp: " 4041 "0x%02x, 0x%08x\n", 4042 edc_rsp->acc_hdr.la_cmd, 4043 be32_to_cpu(edc_rsp->desc_list_len)); 4044 4045 /* 4046 * Payload length in bytes is the response descriptor list 4047 * length minus the 12 bytes of Link Service Request 4048 * Information descriptor in the reply. 4049 */ 4050 bytes_remain = be32_to_cpu(edc_rsp->desc_list_len) - 4051 sizeof(struct fc_els_lsri_desc); 4052 if (bytes_remain <= 0) 4053 goto out; 4054 4055 tlv = edc_rsp->desc; 4056 4057 /* 4058 * cycle through EDC diagnostic descriptors to find the 4059 * congestion signaling capability descriptor 4060 */ 4061 while (bytes_remain) { 4062 if (bytes_remain < FC_TLV_DESC_HDR_SZ) { 4063 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 4064 "6461 Truncated TLV hdr on " 4065 "Diagnostic descriptor[%d]\n", 4066 desc_cnt); 4067 goto out; 4068 } 4069 4070 dtag = be32_to_cpu(tlv->desc_tag); 4071 switch (dtag) { 4072 case ELS_DTAG_LNK_FAULT_CAP: 4073 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 4074 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 4075 sizeof(struct fc_diag_lnkflt_desc)) { 4076 lpfc_printf_log(phba, KERN_WARNING, 4077 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 4078 "6462 Truncated Link Fault Diagnostic " 4079 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 4080 desc_cnt, bytes_remain, 4081 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 4082 sizeof(struct fc_diag_lnkflt_desc)); 4083 goto out; 4084 } 4085 plnkflt = (struct fc_diag_lnkflt_desc *)tlv; 4086 lpfc_printf_log(phba, KERN_INFO, 4087 LOG_ELS | LOG_LDS_EVENT, 4088 "4617 Link Fault Desc Data: 0x%08x 0x%08x " 4089 "0x%08x 0x%08x 0x%08x\n", 4090 be32_to_cpu(plnkflt->desc_tag), 4091 be32_to_cpu(plnkflt->desc_len), 4092 be32_to_cpu( 4093 plnkflt->degrade_activate_threshold), 4094 be32_to_cpu( 4095 plnkflt->degrade_deactivate_threshold), 4096 be32_to_cpu(plnkflt->fec_degrade_interval)); 4097 break; 4098 case ELS_DTAG_CG_SIGNAL_CAP: 4099 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 4100 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 4101 sizeof(struct fc_diag_cg_sig_desc)) { 4102 lpfc_printf_log( 4103 phba, KERN_WARNING, LOG_CGN_MGMT, 4104 "6463 Truncated Cgn Signal Diagnostic " 4105 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 4106 desc_cnt, bytes_remain, 4107 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 4108 sizeof(struct fc_diag_cg_sig_desc)); 4109 goto out; 4110 } 4111 4112 pcgd = (struct fc_diag_cg_sig_desc *)tlv; 4113 lpfc_printf_log( 4114 phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4115 "4616 CGN Desc Data: 0x%08x 0x%08x " 4116 "0x%08x 0x%04x 0x%04x 0x%08x 0x%04x 0x%04x\n", 4117 be32_to_cpu(pcgd->desc_tag), 4118 be32_to_cpu(pcgd->desc_len), 4119 be32_to_cpu(pcgd->xmt_signal_capability), 4120 be16_to_cpu(pcgd->xmt_signal_frequency.count), 4121 be16_to_cpu(pcgd->xmt_signal_frequency.units), 4122 be32_to_cpu(pcgd->rcv_signal_capability), 4123 be16_to_cpu(pcgd->rcv_signal_frequency.count), 4124 be16_to_cpu(pcgd->rcv_signal_frequency.units)); 4125 4126 /* Compare driver and Fport capabilities and choose 4127 * least common. 4128 */ 4129 lpfc_least_capable_settings(phba, pcgd); 4130 rcv_cap_desc = true; 4131 break; 4132 default: 4133 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 4134 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 4135 "4919 unknown Diagnostic " 4136 "Descriptor[%d]: tag x%x (%s)\n", 4137 desc_cnt, dtag, dtag_nm); 4138 } 4139 4140 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 4141 tlv = fc_tlv_next_desc(tlv); 4142 desc_cnt++; 4143 } 4144 4145 out: 4146 if (!rcv_cap_desc) { 4147 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 4148 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 4149 phba->cgn_sig_freq = 0; 4150 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, 4151 "4202 EDC rsp error - sending RDF " 4152 "for FPIN only.\n"); 4153 } 4154 4155 lpfc_config_cgn_signal(phba); 4156 4157 /* Check to see if link went down during discovery */ 4158 lpfc_els_chk_latt(phba->pport); 4159 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, 4160 "EDC Cmpl: did:x%x refcnt %d", 4161 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 4162 lpfc_els_free_iocb(phba, cmdiocb); 4163 lpfc_nlp_put(ndlp); 4164 } 4165 4166 static void 4167 lpfc_format_edc_lft_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 4168 { 4169 struct fc_diag_lnkflt_desc *lft = (struct fc_diag_lnkflt_desc *)tlv; 4170 4171 lft->desc_tag = cpu_to_be32(ELS_DTAG_LNK_FAULT_CAP); 4172 lft->desc_len = cpu_to_be32( 4173 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_lnkflt_desc)); 4174 4175 lft->degrade_activate_threshold = 4176 cpu_to_be32(phba->degrade_activate_threshold); 4177 lft->degrade_deactivate_threshold = 4178 cpu_to_be32(phba->degrade_deactivate_threshold); 4179 lft->fec_degrade_interval = cpu_to_be32(phba->fec_degrade_interval); 4180 } 4181 4182 static void 4183 lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 4184 { 4185 struct fc_diag_cg_sig_desc *cgd = (struct fc_diag_cg_sig_desc *)tlv; 4186 4187 /* We are assuming cgd was zero'ed before calling this routine */ 4188 4189 /* Configure the congestion detection capability */ 4190 cgd->desc_tag = cpu_to_be32(ELS_DTAG_CG_SIGNAL_CAP); 4191 4192 /* Descriptor len doesn't include the tag or len fields. */ 4193 cgd->desc_len = cpu_to_be32( 4194 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_cg_sig_desc)); 4195 4196 /* xmt_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. 4197 * xmt_signal_frequency.count already set to 0. 4198 * xmt_signal_frequency.units already set to 0. 4199 */ 4200 4201 if (phba->cmf_active_mode == LPFC_CFG_OFF) { 4202 /* rcv_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. 4203 * rcv_signal_frequency.count already set to 0. 4204 * rcv_signal_frequency.units already set to 0. 4205 */ 4206 phba->cgn_sig_freq = 0; 4207 return; 4208 } 4209 switch (phba->cgn_reg_signal) { 4210 case EDC_CG_SIG_WARN_ONLY: 4211 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ONLY); 4212 break; 4213 case EDC_CG_SIG_WARN_ALARM: 4214 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ALARM); 4215 break; 4216 default: 4217 /* rcv_signal_capability left 0 thus no support */ 4218 break; 4219 } 4220 4221 /* We start negotiation with lpfc_fabric_cgn_frequency, after 4222 * the completion we settle on the higher frequency. 4223 */ 4224 cgd->rcv_signal_frequency.count = 4225 cpu_to_be16(lpfc_fabric_cgn_frequency); 4226 cgd->rcv_signal_frequency.units = 4227 cpu_to_be16(EDC_CG_SIGFREQ_MSEC); 4228 } 4229 4230 static bool 4231 lpfc_link_is_lds_capable(struct lpfc_hba *phba) 4232 { 4233 if (!(phba->lmt & LMT_64Gb)) 4234 return false; 4235 if (phba->sli_rev != LPFC_SLI_REV4) 4236 return false; 4237 4238 if (phba->sli4_hba.conf_trunk) { 4239 if (phba->trunk_link.phy_lnk_speed == LPFC_USER_LINK_SPEED_64G) 4240 return true; 4241 } else if (phba->fc_linkspeed == LPFC_LINK_SPEED_64GHZ) { 4242 return true; 4243 } 4244 return false; 4245 } 4246 4247 /** 4248 * lpfc_issue_els_edc - Exchange Diagnostic Capabilities with the fabric. 4249 * @vport: pointer to a host virtual N_Port data structure. 4250 * @retry: retry counter for the command iocb. 4251 * 4252 * This routine issues an ELS EDC to the F-Port Controller to communicate 4253 * this N_Port's support of hardware signals in its Congestion 4254 * Capabilities Descriptor. 4255 * 4256 * Note: This routine does not check if one or more signals are 4257 * set in the cgn_reg_signal parameter. The caller makes the 4258 * decision to enforce cgn_reg_signal as nonzero or zero depending 4259 * on the conditions. During Fabric requests, the driver 4260 * requires cgn_reg_signals to be nonzero. But a dynamic request 4261 * to set the congestion mode to OFF from Monitor or Manage 4262 * would correctly issue an EDC with no signals enabled to 4263 * turn off switch functionality and then update the FW. 4264 * 4265 * Return code 4266 * 0 - Successfully issued edc command 4267 * 1 - Failed to issue edc command 4268 **/ 4269 int 4270 lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry) 4271 { 4272 struct lpfc_hba *phba = vport->phba; 4273 struct lpfc_iocbq *elsiocb; 4274 struct fc_els_edc *edc_req; 4275 struct fc_tlv_desc *tlv; 4276 u16 cmdsize; 4277 struct lpfc_nodelist *ndlp; 4278 u8 *pcmd = NULL; 4279 u32 cgn_desc_size, lft_desc_size; 4280 int rc; 4281 4282 if (vport->port_type == LPFC_NPIV_PORT) 4283 return -EACCES; 4284 4285 ndlp = lpfc_findnode_did(vport, Fabric_DID); 4286 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 4287 return -ENODEV; 4288 4289 cgn_desc_size = (phba->cgn_init_reg_signal) ? 4290 sizeof(struct fc_diag_cg_sig_desc) : 0; 4291 lft_desc_size = (lpfc_link_is_lds_capable(phba)) ? 4292 sizeof(struct fc_diag_lnkflt_desc) : 0; 4293 cmdsize = cgn_desc_size + lft_desc_size; 4294 4295 /* Skip EDC if no applicable descriptors */ 4296 if (!cmdsize) 4297 goto try_rdf; 4298 4299 cmdsize += sizeof(struct fc_els_edc); 4300 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 4301 ndlp->nlp_DID, ELS_CMD_EDC); 4302 if (!elsiocb) 4303 goto try_rdf; 4304 4305 /* Configure the payload for the supported Diagnostics capabilities. */ 4306 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 4307 memset(pcmd, 0, cmdsize); 4308 edc_req = (struct fc_els_edc *)pcmd; 4309 edc_req->desc_len = cpu_to_be32(cgn_desc_size + lft_desc_size); 4310 edc_req->edc_cmd = ELS_EDC; 4311 tlv = edc_req->desc; 4312 4313 if (cgn_desc_size) { 4314 lpfc_format_edc_cgn_desc(phba, tlv); 4315 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 4316 tlv = fc_tlv_next_desc(tlv); 4317 } 4318 4319 if (lft_desc_size) 4320 lpfc_format_edc_lft_desc(phba, tlv); 4321 4322 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4323 "4623 Xmit EDC to remote " 4324 "NPORT x%x reg_sig x%x reg_fpin:x%x\n", 4325 ndlp->nlp_DID, phba->cgn_reg_signal, 4326 phba->cgn_reg_fpin); 4327 4328 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 4329 elsiocb->ndlp = lpfc_nlp_get(ndlp); 4330 if (!elsiocb->ndlp) { 4331 lpfc_els_free_iocb(phba, elsiocb); 4332 return -EIO; 4333 } 4334 4335 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4336 "Issue EDC: did:x%x refcnt %d", 4337 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 4338 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4339 if (rc == IOCB_ERROR) { 4340 /* The additional lpfc_nlp_put will cause the following 4341 * lpfc_els_free_iocb routine to trigger the rlease of 4342 * the node. 4343 */ 4344 lpfc_els_free_iocb(phba, elsiocb); 4345 lpfc_nlp_put(ndlp); 4346 goto try_rdf; 4347 } 4348 return 0; 4349 try_rdf: 4350 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 4351 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 4352 rc = lpfc_issue_els_rdf(vport, 0); 4353 return rc; 4354 } 4355 4356 /** 4357 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry 4358 * @vport: pointer to a host virtual N_Port data structure. 4359 * @nlp: pointer to a node-list data structure. 4360 * 4361 * This routine cancels the timer with a delayed IOCB-command retry for 4362 * a @vport's @ndlp. It stops the timer for the delayed function retrial and 4363 * removes the ELS retry event if it presents. In addition, if the 4364 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB 4365 * commands are sent for the @vport's nodes that require issuing discovery 4366 * ADISC. 4367 **/ 4368 void 4369 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) 4370 { 4371 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4372 struct lpfc_work_evt *evtp; 4373 4374 if (!(nlp->nlp_flag & NLP_DELAY_TMO)) 4375 return; 4376 spin_lock_irq(&nlp->lock); 4377 nlp->nlp_flag &= ~NLP_DELAY_TMO; 4378 spin_unlock_irq(&nlp->lock); 4379 del_timer_sync(&nlp->nlp_delayfunc); 4380 nlp->nlp_last_elscmd = 0; 4381 if (!list_empty(&nlp->els_retry_evt.evt_listp)) { 4382 list_del_init(&nlp->els_retry_evt.evt_listp); 4383 /* Decrement nlp reference count held for the delayed retry */ 4384 evtp = &nlp->els_retry_evt; 4385 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); 4386 } 4387 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 4388 spin_lock_irq(&nlp->lock); 4389 nlp->nlp_flag &= ~NLP_NPR_2B_DISC; 4390 spin_unlock_irq(&nlp->lock); 4391 if (vport->num_disc_nodes) { 4392 if (vport->port_state < LPFC_VPORT_READY) { 4393 /* Check if there are more ADISCs to be sent */ 4394 lpfc_more_adisc(vport); 4395 } else { 4396 /* Check if there are more PLOGIs to be sent */ 4397 lpfc_more_plogi(vport); 4398 if (vport->num_disc_nodes == 0) { 4399 spin_lock_irq(shost->host_lock); 4400 vport->fc_flag &= ~FC_NDISC_ACTIVE; 4401 spin_unlock_irq(shost->host_lock); 4402 lpfc_can_disctmo(vport); 4403 lpfc_end_rscn(vport); 4404 } 4405 } 4406 } 4407 } 4408 return; 4409 } 4410 4411 /** 4412 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer 4413 * @t: pointer to the timer function associated data (ndlp). 4414 * 4415 * This routine is invoked by the ndlp delayed-function timer to check 4416 * whether there is any pending ELS retry event(s) with the node. If not, it 4417 * simply returns. Otherwise, if there is at least one ELS delayed event, it 4418 * adds the delayed events to the HBA work list and invokes the 4419 * lpfc_worker_wake_up() routine to wake up worker thread to process the 4420 * event. Note that lpfc_nlp_get() is called before posting the event to 4421 * the work list to hold reference count of ndlp so that it guarantees the 4422 * reference to ndlp will still be available when the worker thread gets 4423 * to the event associated with the ndlp. 4424 **/ 4425 void 4426 lpfc_els_retry_delay(struct timer_list *t) 4427 { 4428 struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc); 4429 struct lpfc_vport *vport = ndlp->vport; 4430 struct lpfc_hba *phba = vport->phba; 4431 unsigned long flags; 4432 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; 4433 4434 spin_lock_irqsave(&phba->hbalock, flags); 4435 if (!list_empty(&evtp->evt_listp)) { 4436 spin_unlock_irqrestore(&phba->hbalock, flags); 4437 return; 4438 } 4439 4440 /* We need to hold the node by incrementing the reference 4441 * count until the queued work is done 4442 */ 4443 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 4444 if (evtp->evt_arg1) { 4445 evtp->evt = LPFC_EVT_ELS_RETRY; 4446 list_add_tail(&evtp->evt_listp, &phba->work_list); 4447 lpfc_worker_wake_up(phba); 4448 } 4449 spin_unlock_irqrestore(&phba->hbalock, flags); 4450 return; 4451 } 4452 4453 /** 4454 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function 4455 * @ndlp: pointer to a node-list data structure. 4456 * 4457 * This routine is the worker-thread handler for processing the @ndlp delayed 4458 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves 4459 * the last ELS command from the associated ndlp and invokes the proper ELS 4460 * function according to the delayed ELS command to retry the command. 4461 **/ 4462 void 4463 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) 4464 { 4465 struct lpfc_vport *vport = ndlp->vport; 4466 uint32_t cmd, retry; 4467 4468 spin_lock_irq(&ndlp->lock); 4469 cmd = ndlp->nlp_last_elscmd; 4470 ndlp->nlp_last_elscmd = 0; 4471 4472 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 4473 spin_unlock_irq(&ndlp->lock); 4474 return; 4475 } 4476 4477 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 4478 spin_unlock_irq(&ndlp->lock); 4479 /* 4480 * If a discovery event readded nlp_delayfunc after timer 4481 * firing and before processing the timer, cancel the 4482 * nlp_delayfunc. 4483 */ 4484 del_timer_sync(&ndlp->nlp_delayfunc); 4485 retry = ndlp->nlp_retry; 4486 ndlp->nlp_retry = 0; 4487 4488 switch (cmd) { 4489 case ELS_CMD_FLOGI: 4490 lpfc_issue_els_flogi(vport, ndlp, retry); 4491 break; 4492 case ELS_CMD_PLOGI: 4493 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) { 4494 ndlp->nlp_prev_state = ndlp->nlp_state; 4495 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 4496 } 4497 break; 4498 case ELS_CMD_ADISC: 4499 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) { 4500 ndlp->nlp_prev_state = ndlp->nlp_state; 4501 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 4502 } 4503 break; 4504 case ELS_CMD_PRLI: 4505 case ELS_CMD_NVMEPRLI: 4506 if (!lpfc_issue_els_prli(vport, ndlp, retry)) { 4507 ndlp->nlp_prev_state = ndlp->nlp_state; 4508 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 4509 } 4510 break; 4511 case ELS_CMD_LOGO: 4512 if (!lpfc_issue_els_logo(vport, ndlp, retry)) { 4513 ndlp->nlp_prev_state = ndlp->nlp_state; 4514 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 4515 } 4516 break; 4517 case ELS_CMD_FDISC: 4518 if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)) 4519 lpfc_issue_els_fdisc(vport, ndlp, retry); 4520 break; 4521 } 4522 return; 4523 } 4524 4525 /** 4526 * lpfc_link_reset - Issue link reset 4527 * @vport: pointer to a virtual N_Port data structure. 4528 * 4529 * This routine performs link reset by sending INIT_LINK mailbox command. 4530 * For SLI-3 adapter, link attention interrupt is enabled before issuing 4531 * INIT_LINK mailbox command. 4532 * 4533 * Return code 4534 * 0 - Link reset initiated successfully 4535 * 1 - Failed to initiate link reset 4536 **/ 4537 int 4538 lpfc_link_reset(struct lpfc_vport *vport) 4539 { 4540 struct lpfc_hba *phba = vport->phba; 4541 LPFC_MBOXQ_t *mbox; 4542 uint32_t control; 4543 int rc; 4544 4545 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4546 "2851 Attempt link reset\n"); 4547 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4548 if (!mbox) { 4549 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4550 "2852 Failed to allocate mbox memory"); 4551 return 1; 4552 } 4553 4554 /* Enable Link attention interrupts */ 4555 if (phba->sli_rev <= LPFC_SLI_REV3) { 4556 spin_lock_irq(&phba->hbalock); 4557 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4558 control = readl(phba->HCregaddr); 4559 control |= HC_LAINT_ENA; 4560 writel(control, phba->HCregaddr); 4561 readl(phba->HCregaddr); /* flush */ 4562 spin_unlock_irq(&phba->hbalock); 4563 } 4564 4565 lpfc_init_link(phba, mbox, phba->cfg_topology, 4566 phba->cfg_link_speed); 4567 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4568 mbox->vport = vport; 4569 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4570 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 4571 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4572 "2853 Failed to issue INIT_LINK " 4573 "mbox command, rc:x%x\n", rc); 4574 mempool_free(mbox, phba->mbox_mem_pool); 4575 return 1; 4576 } 4577 4578 return 0; 4579 } 4580 4581 /** 4582 * lpfc_els_retry - Make retry decision on an els command iocb 4583 * @phba: pointer to lpfc hba data structure. 4584 * @cmdiocb: pointer to lpfc command iocb data structure. 4585 * @rspiocb: pointer to lpfc response iocb data structure. 4586 * 4587 * This routine makes a retry decision on an ELS command IOCB, which has 4588 * failed. The following ELS IOCBs use this function for retrying the command 4589 * when previously issued command responsed with error status: FLOGI, PLOGI, 4590 * PRLI, ADISC and FDISC. Based on the ELS command type and the 4591 * returned error status, it makes the decision whether a retry shall be 4592 * issued for the command, and whether a retry shall be made immediately or 4593 * delayed. In the former case, the corresponding ELS command issuing-function 4594 * is called to retry the command. In the later case, the ELS command shall 4595 * be posted to the ndlp delayed event and delayed function timer set to the 4596 * ndlp for the delayed command issusing. 4597 * 4598 * Return code 4599 * 0 - No retry of els command is made 4600 * 1 - Immediate or delayed retry of els command is made 4601 **/ 4602 static int 4603 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4604 struct lpfc_iocbq *rspiocb) 4605 { 4606 struct lpfc_vport *vport = cmdiocb->vport; 4607 union lpfc_wqe128 *irsp = &rspiocb->wqe; 4608 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 4609 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; 4610 uint32_t *elscmd; 4611 struct ls_rjt stat; 4612 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; 4613 int logerr = 0; 4614 uint32_t cmd = 0; 4615 uint32_t did; 4616 int link_reset = 0, rc; 4617 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 4618 u32 ulp_word4 = get_job_word4(phba, rspiocb); 4619 4620 4621 /* Note: cmd_dmabuf may be 0 for internal driver abort 4622 * of delays ELS command. 4623 */ 4624 4625 if (pcmd && pcmd->virt) { 4626 elscmd = (uint32_t *) (pcmd->virt); 4627 cmd = *elscmd++; 4628 } 4629 4630 if (ndlp) 4631 did = ndlp->nlp_DID; 4632 else { 4633 /* We should only hit this case for retrying PLOGI */ 4634 did = get_job_els_rsp64_did(phba, rspiocb); 4635 ndlp = lpfc_findnode_did(vport, did); 4636 if (!ndlp && (cmd != ELS_CMD_PLOGI)) 4637 return 0; 4638 } 4639 4640 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4641 "Retry ELS: wd7:x%x wd4:x%x did:x%x", 4642 *(((uint32_t *)irsp) + 7), ulp_word4, did); 4643 4644 switch (ulp_status) { 4645 case IOSTAT_FCP_RSP_ERROR: 4646 break; 4647 case IOSTAT_REMOTE_STOP: 4648 if (phba->sli_rev == LPFC_SLI_REV4) { 4649 /* This IO was aborted by the target, we don't 4650 * know the rxid and because we did not send the 4651 * ABTS we cannot generate and RRQ. 4652 */ 4653 lpfc_set_rrq_active(phba, ndlp, 4654 cmdiocb->sli4_lxritag, 0, 0); 4655 } 4656 break; 4657 case IOSTAT_LOCAL_REJECT: 4658 switch ((ulp_word4 & IOERR_PARAM_MASK)) { 4659 case IOERR_LOOP_OPEN_FAILURE: 4660 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) 4661 delay = 1000; 4662 retry = 1; 4663 break; 4664 4665 case IOERR_ILLEGAL_COMMAND: 4666 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4667 "0124 Retry illegal cmd x%x " 4668 "retry:x%x delay:x%x\n", 4669 cmd, cmdiocb->retry, delay); 4670 retry = 1; 4671 /* All command's retry policy */ 4672 maxretry = 8; 4673 if (cmdiocb->retry > 2) 4674 delay = 1000; 4675 break; 4676 4677 case IOERR_NO_RESOURCES: 4678 logerr = 1; /* HBA out of resources */ 4679 retry = 1; 4680 if (cmdiocb->retry > 100) 4681 delay = 100; 4682 maxretry = 250; 4683 break; 4684 4685 case IOERR_ILLEGAL_FRAME: 4686 delay = 100; 4687 retry = 1; 4688 break; 4689 4690 case IOERR_INVALID_RPI: 4691 if (cmd == ELS_CMD_PLOGI && 4692 did == NameServer_DID) { 4693 /* Continue forever if plogi to */ 4694 /* the nameserver fails */ 4695 maxretry = 0; 4696 delay = 100; 4697 } else if (cmd == ELS_CMD_PRLI && 4698 ndlp->nlp_state != NLP_STE_PRLI_ISSUE) { 4699 /* State-command disagreement. The PRLI was 4700 * failed with an invalid rpi meaning there 4701 * some unexpected state change. Don't retry. 4702 */ 4703 maxretry = 0; 4704 retry = 0; 4705 break; 4706 } 4707 retry = 1; 4708 break; 4709 4710 case IOERR_SEQUENCE_TIMEOUT: 4711 if (cmd == ELS_CMD_PLOGI && 4712 did == NameServer_DID && 4713 (cmdiocb->retry + 1) == maxretry) { 4714 /* Reset the Link */ 4715 link_reset = 1; 4716 break; 4717 } 4718 retry = 1; 4719 delay = 100; 4720 break; 4721 case IOERR_SLI_ABORTED: 4722 /* Retry ELS PLOGI command? 4723 * Possibly the rport just wasn't ready. 4724 */ 4725 if (cmd == ELS_CMD_PLOGI) { 4726 /* No retry if state change */ 4727 if (ndlp && 4728 ndlp->nlp_state != NLP_STE_PLOGI_ISSUE) 4729 goto out_retry; 4730 retry = 1; 4731 maxretry = 2; 4732 } 4733 break; 4734 } 4735 break; 4736 4737 case IOSTAT_NPORT_RJT: 4738 case IOSTAT_FABRIC_RJT: 4739 if (ulp_word4 & RJT_UNAVAIL_TEMP) { 4740 retry = 1; 4741 break; 4742 } 4743 break; 4744 4745 case IOSTAT_NPORT_BSY: 4746 case IOSTAT_FABRIC_BSY: 4747 logerr = 1; /* Fabric / Remote NPort out of resources */ 4748 retry = 1; 4749 break; 4750 4751 case IOSTAT_LS_RJT: 4752 stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4); 4753 /* Added for Vendor specifc support 4754 * Just keep retrying for these Rsn / Exp codes 4755 */ 4756 if ((vport->fc_flag & FC_PT2PT) && 4757 cmd == ELS_CMD_NVMEPRLI) { 4758 switch (stat.un.b.lsRjtRsnCode) { 4759 case LSRJT_UNABLE_TPC: 4760 case LSRJT_INVALID_CMD: 4761 case LSRJT_LOGICAL_ERR: 4762 case LSRJT_CMD_UNSUPPORTED: 4763 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 4764 "0168 NVME PRLI LS_RJT " 4765 "reason %x port doesn't " 4766 "support NVME, disabling NVME\n", 4767 stat.un.b.lsRjtRsnCode); 4768 retry = 0; 4769 vport->fc_flag |= FC_PT2PT_NO_NVME; 4770 goto out_retry; 4771 } 4772 } 4773 switch (stat.un.b.lsRjtRsnCode) { 4774 case LSRJT_UNABLE_TPC: 4775 /* Special case for PRLI LS_RJTs. Recall that lpfc 4776 * uses a single routine to issue both PRLI FC4 types. 4777 * If the PRLI is rejected because that FC4 type 4778 * isn't really supported, don't retry and cause 4779 * multiple transport registrations. Otherwise, parse 4780 * the reason code/reason code explanation and take the 4781 * appropriate action. 4782 */ 4783 lpfc_printf_vlog(vport, KERN_INFO, 4784 LOG_DISCOVERY | LOG_ELS | LOG_NODE, 4785 "0153 ELS cmd x%x LS_RJT by x%x. " 4786 "RsnCode x%x RsnCodeExp x%x\n", 4787 cmd, did, stat.un.b.lsRjtRsnCode, 4788 stat.un.b.lsRjtRsnCodeExp); 4789 4790 switch (stat.un.b.lsRjtRsnCodeExp) { 4791 case LSEXP_CANT_GIVE_DATA: 4792 case LSEXP_CMD_IN_PROGRESS: 4793 if (cmd == ELS_CMD_PLOGI) { 4794 delay = 1000; 4795 maxretry = 48; 4796 } 4797 retry = 1; 4798 break; 4799 case LSEXP_REQ_UNSUPPORTED: 4800 case LSEXP_NO_RSRC_ASSIGN: 4801 /* These explanation codes get no retry. */ 4802 if (cmd == ELS_CMD_PRLI || 4803 cmd == ELS_CMD_NVMEPRLI) 4804 break; 4805 fallthrough; 4806 default: 4807 /* Limit the delay and retry action to a limited 4808 * cmd set. There are other ELS commands where 4809 * a retry is not expected. 4810 */ 4811 if (cmd == ELS_CMD_PLOGI || 4812 cmd == ELS_CMD_PRLI || 4813 cmd == ELS_CMD_NVMEPRLI) { 4814 delay = 1000; 4815 maxretry = lpfc_max_els_tries + 1; 4816 retry = 1; 4817 } 4818 break; 4819 } 4820 4821 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4822 (cmd == ELS_CMD_FDISC) && 4823 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ 4824 lpfc_printf_vlog(vport, KERN_ERR, 4825 LOG_TRACE_EVENT, 4826 "0125 FDISC Failed (x%x). " 4827 "Fabric out of resources\n", 4828 stat.un.lsRjtError); 4829 lpfc_vport_set_state(vport, 4830 FC_VPORT_NO_FABRIC_RSCS); 4831 } 4832 break; 4833 4834 case LSRJT_LOGICAL_BSY: 4835 if ((cmd == ELS_CMD_PLOGI) || 4836 (cmd == ELS_CMD_PRLI) || 4837 (cmd == ELS_CMD_NVMEPRLI)) { 4838 delay = 1000; 4839 maxretry = 48; 4840 } else if (cmd == ELS_CMD_FDISC) { 4841 /* FDISC retry policy */ 4842 maxretry = 48; 4843 if (cmdiocb->retry >= 32) 4844 delay = 1000; 4845 } 4846 retry = 1; 4847 break; 4848 4849 case LSRJT_LOGICAL_ERR: 4850 /* There are some cases where switches return this 4851 * error when they are not ready and should be returning 4852 * Logical Busy. We should delay every time. 4853 */ 4854 if (cmd == ELS_CMD_FDISC && 4855 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { 4856 maxretry = 3; 4857 delay = 1000; 4858 retry = 1; 4859 } else if (cmd == ELS_CMD_FLOGI && 4860 stat.un.b.lsRjtRsnCodeExp == 4861 LSEXP_NOTHING_MORE) { 4862 vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; 4863 retry = 1; 4864 lpfc_printf_vlog(vport, KERN_ERR, 4865 LOG_TRACE_EVENT, 4866 "0820 FLOGI Failed (x%x). " 4867 "BBCredit Not Supported\n", 4868 stat.un.lsRjtError); 4869 } 4870 break; 4871 4872 case LSRJT_PROTOCOL_ERR: 4873 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4874 (cmd == ELS_CMD_FDISC) && 4875 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || 4876 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) 4877 ) { 4878 lpfc_printf_vlog(vport, KERN_ERR, 4879 LOG_TRACE_EVENT, 4880 "0122 FDISC Failed (x%x). " 4881 "Fabric Detected Bad WWN\n", 4882 stat.un.lsRjtError); 4883 lpfc_vport_set_state(vport, 4884 FC_VPORT_FABRIC_REJ_WWN); 4885 } 4886 break; 4887 case LSRJT_VENDOR_UNIQUE: 4888 if ((stat.un.b.vendorUnique == 0x45) && 4889 (cmd == ELS_CMD_FLOGI)) { 4890 goto out_retry; 4891 } 4892 break; 4893 case LSRJT_CMD_UNSUPPORTED: 4894 /* lpfc nvmet returns this type of LS_RJT when it 4895 * receives an FCP PRLI because lpfc nvmet only 4896 * support NVME. ELS request is terminated for FCP4 4897 * on this rport. 4898 */ 4899 if (stat.un.b.lsRjtRsnCodeExp == 4900 LSEXP_REQ_UNSUPPORTED) { 4901 if (cmd == ELS_CMD_PRLI) 4902 goto out_retry; 4903 } 4904 break; 4905 } 4906 break; 4907 4908 case IOSTAT_INTERMED_RSP: 4909 case IOSTAT_BA_RJT: 4910 break; 4911 4912 default: 4913 break; 4914 } 4915 4916 if (link_reset) { 4917 rc = lpfc_link_reset(vport); 4918 if (rc) { 4919 /* Do not give up. Retry PLOGI one more time and attempt 4920 * link reset if PLOGI fails again. 4921 */ 4922 retry = 1; 4923 delay = 100; 4924 goto out_retry; 4925 } 4926 return 1; 4927 } 4928 4929 if (did == FDMI_DID) 4930 retry = 1; 4931 4932 if ((cmd == ELS_CMD_FLOGI) && 4933 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) && 4934 !lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { 4935 /* FLOGI retry policy */ 4936 retry = 1; 4937 /* retry FLOGI forever */ 4938 if (phba->link_flag != LS_LOOPBACK_MODE) 4939 maxretry = 0; 4940 else 4941 maxretry = 2; 4942 4943 if (cmdiocb->retry >= 100) 4944 delay = 5000; 4945 else if (cmdiocb->retry >= 32) 4946 delay = 1000; 4947 } else if ((cmd == ELS_CMD_FDISC) && 4948 !lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { 4949 /* retry FDISCs every second up to devloss */ 4950 retry = 1; 4951 maxretry = vport->cfg_devloss_tmo; 4952 delay = 1000; 4953 } 4954 4955 cmdiocb->retry++; 4956 if (maxretry && (cmdiocb->retry >= maxretry)) { 4957 phba->fc_stat.elsRetryExceeded++; 4958 retry = 0; 4959 } 4960 4961 if ((vport->load_flag & FC_UNLOADING) != 0) 4962 retry = 0; 4963 4964 out_retry: 4965 if (retry) { 4966 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) { 4967 /* Stop retrying PLOGI and FDISC if in FCF discovery */ 4968 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4969 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4970 "2849 Stop retry ELS command " 4971 "x%x to remote NPORT x%x, " 4972 "Data: x%x x%x\n", cmd, did, 4973 cmdiocb->retry, delay); 4974 return 0; 4975 } 4976 } 4977 4978 /* Retry ELS command <elsCmd> to remote NPORT <did> */ 4979 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4980 "0107 Retry ELS command x%x to remote " 4981 "NPORT x%x Data: x%x x%x\n", 4982 cmd, did, cmdiocb->retry, delay); 4983 4984 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && 4985 ((ulp_status != IOSTAT_LOCAL_REJECT) || 4986 ((ulp_word4 & IOERR_PARAM_MASK) != 4987 IOERR_NO_RESOURCES))) { 4988 /* Don't reset timer for no resources */ 4989 4990 /* If discovery / RSCN timer is running, reset it */ 4991 if (timer_pending(&vport->fc_disctmo) || 4992 (vport->fc_flag & FC_RSCN_MODE)) 4993 lpfc_set_disctmo(vport); 4994 } 4995 4996 phba->fc_stat.elsXmitRetry++; 4997 if (ndlp && delay) { 4998 phba->fc_stat.elsDelayRetry++; 4999 ndlp->nlp_retry = cmdiocb->retry; 5000 5001 /* delay is specified in milliseconds */ 5002 mod_timer(&ndlp->nlp_delayfunc, 5003 jiffies + msecs_to_jiffies(delay)); 5004 spin_lock_irq(&ndlp->lock); 5005 ndlp->nlp_flag |= NLP_DELAY_TMO; 5006 spin_unlock_irq(&ndlp->lock); 5007 5008 ndlp->nlp_prev_state = ndlp->nlp_state; 5009 if ((cmd == ELS_CMD_PRLI) || 5010 (cmd == ELS_CMD_NVMEPRLI)) 5011 lpfc_nlp_set_state(vport, ndlp, 5012 NLP_STE_PRLI_ISSUE); 5013 else if (cmd != ELS_CMD_ADISC) 5014 lpfc_nlp_set_state(vport, ndlp, 5015 NLP_STE_NPR_NODE); 5016 ndlp->nlp_last_elscmd = cmd; 5017 5018 return 1; 5019 } 5020 switch (cmd) { 5021 case ELS_CMD_FLOGI: 5022 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry); 5023 return 1; 5024 case ELS_CMD_FDISC: 5025 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); 5026 return 1; 5027 case ELS_CMD_PLOGI: 5028 if (ndlp) { 5029 ndlp->nlp_prev_state = ndlp->nlp_state; 5030 lpfc_nlp_set_state(vport, ndlp, 5031 NLP_STE_PLOGI_ISSUE); 5032 } 5033 lpfc_issue_els_plogi(vport, did, cmdiocb->retry); 5034 return 1; 5035 case ELS_CMD_ADISC: 5036 ndlp->nlp_prev_state = ndlp->nlp_state; 5037 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 5038 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry); 5039 return 1; 5040 case ELS_CMD_PRLI: 5041 case ELS_CMD_NVMEPRLI: 5042 ndlp->nlp_prev_state = ndlp->nlp_state; 5043 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 5044 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry); 5045 return 1; 5046 case ELS_CMD_LOGO: 5047 ndlp->nlp_prev_state = ndlp->nlp_state; 5048 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 5049 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); 5050 return 1; 5051 } 5052 } 5053 /* No retry ELS command <elsCmd> to remote NPORT <did> */ 5054 if (logerr) { 5055 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5056 "0137 No retry ELS command x%x to remote " 5057 "NPORT x%x: Out of Resources: Error:x%x/%x\n", 5058 cmd, did, ulp_status, 5059 ulp_word4); 5060 } 5061 else { 5062 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5063 "0108 No retry ELS command x%x to remote " 5064 "NPORT x%x Retried:%d Error:x%x/%x\n", 5065 cmd, did, cmdiocb->retry, ulp_status, 5066 ulp_word4); 5067 } 5068 return 0; 5069 } 5070 5071 /** 5072 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb 5073 * @phba: pointer to lpfc hba data structure. 5074 * @buf_ptr1: pointer to the lpfc DMA buffer data structure. 5075 * 5076 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s) 5077 * associated with a command IOCB back to the lpfc DMA buffer pool. It first 5078 * checks to see whether there is a lpfc DMA buffer associated with the 5079 * response of the command IOCB. If so, it will be released before releasing 5080 * the lpfc DMA buffer associated with the IOCB itself. 5081 * 5082 * Return code 5083 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 5084 **/ 5085 static int 5086 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) 5087 { 5088 struct lpfc_dmabuf *buf_ptr; 5089 5090 /* Free the response before processing the command. */ 5091 if (!list_empty(&buf_ptr1->list)) { 5092 list_remove_head(&buf_ptr1->list, buf_ptr, 5093 struct lpfc_dmabuf, 5094 list); 5095 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 5096 kfree(buf_ptr); 5097 } 5098 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); 5099 kfree(buf_ptr1); 5100 return 0; 5101 } 5102 5103 /** 5104 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl 5105 * @phba: pointer to lpfc hba data structure. 5106 * @buf_ptr: pointer to the lpfc dma buffer data structure. 5107 * 5108 * This routine releases the lpfc Direct Memory Access (DMA) buffer 5109 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer 5110 * pool. 5111 * 5112 * Return code 5113 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 5114 **/ 5115 static int 5116 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) 5117 { 5118 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 5119 kfree(buf_ptr); 5120 return 0; 5121 } 5122 5123 /** 5124 * lpfc_els_free_iocb - Free a command iocb and its associated resources 5125 * @phba: pointer to lpfc hba data structure. 5126 * @elsiocb: pointer to lpfc els command iocb data structure. 5127 * 5128 * This routine frees a command IOCB and its associated resources. The 5129 * command IOCB data structure contains the reference to various associated 5130 * resources, these fields must be set to NULL if the associated reference 5131 * not present: 5132 * cmd_dmabuf - reference to cmd. 5133 * cmd_dmabuf->next - reference to rsp 5134 * rsp_dmabuf - unused 5135 * bpl_dmabuf - reference to bpl 5136 * 5137 * It first properly decrements the reference count held on ndlp for the 5138 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not 5139 * set, it invokes the lpfc_els_free_data() routine to release the Direct 5140 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it 5141 * adds the DMA buffer the @phba data structure for the delayed release. 5142 * If reference to the Buffer Pointer List (BPL) is present, the 5143 * lpfc_els_free_bpl() routine is invoked to release the DMA memory 5144 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is 5145 * invoked to release the IOCB data structure back to @phba IOCBQ list. 5146 * 5147 * Return code 5148 * 0 - Success (currently, always return 0) 5149 **/ 5150 int 5151 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) 5152 { 5153 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 5154 5155 /* The I/O iocb is complete. Clear the node and first dmbuf */ 5156 elsiocb->ndlp = NULL; 5157 5158 /* cmd_dmabuf = cmd, cmd_dmabuf->next = rsp, bpl_dmabuf = bpl */ 5159 if (elsiocb->cmd_dmabuf) { 5160 if (elsiocb->cmd_flag & LPFC_DELAY_MEM_FREE) { 5161 /* Firmware could still be in progress of DMAing 5162 * payload, so don't free data buffer till after 5163 * a hbeat. 5164 */ 5165 elsiocb->cmd_flag &= ~LPFC_DELAY_MEM_FREE; 5166 buf_ptr = elsiocb->cmd_dmabuf; 5167 elsiocb->cmd_dmabuf = NULL; 5168 if (buf_ptr) { 5169 buf_ptr1 = NULL; 5170 spin_lock_irq(&phba->hbalock); 5171 if (!list_empty(&buf_ptr->list)) { 5172 list_remove_head(&buf_ptr->list, 5173 buf_ptr1, struct lpfc_dmabuf, 5174 list); 5175 INIT_LIST_HEAD(&buf_ptr1->list); 5176 list_add_tail(&buf_ptr1->list, 5177 &phba->elsbuf); 5178 phba->elsbuf_cnt++; 5179 } 5180 INIT_LIST_HEAD(&buf_ptr->list); 5181 list_add_tail(&buf_ptr->list, &phba->elsbuf); 5182 phba->elsbuf_cnt++; 5183 spin_unlock_irq(&phba->hbalock); 5184 } 5185 } else { 5186 buf_ptr1 = elsiocb->cmd_dmabuf; 5187 lpfc_els_free_data(phba, buf_ptr1); 5188 elsiocb->cmd_dmabuf = NULL; 5189 } 5190 } 5191 5192 if (elsiocb->bpl_dmabuf) { 5193 buf_ptr = elsiocb->bpl_dmabuf; 5194 lpfc_els_free_bpl(phba, buf_ptr); 5195 elsiocb->bpl_dmabuf = NULL; 5196 } 5197 lpfc_sli_release_iocbq(phba, elsiocb); 5198 return 0; 5199 } 5200 5201 /** 5202 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response 5203 * @phba: pointer to lpfc hba data structure. 5204 * @cmdiocb: pointer to lpfc command iocb data structure. 5205 * @rspiocb: pointer to lpfc response iocb data structure. 5206 * 5207 * This routine is the completion callback function to the Logout (LOGO) 5208 * Accept (ACC) Response ELS command. This routine is invoked to indicate 5209 * the completion of the LOGO process. If the node has transitioned to NPR, 5210 * this routine unregisters the RPI if it is still registered. The 5211 * lpfc_els_free_iocb() is invoked to release the IOCB data structure. 5212 **/ 5213 static void 5214 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 5215 struct lpfc_iocbq *rspiocb) 5216 { 5217 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 5218 struct lpfc_vport *vport = cmdiocb->vport; 5219 u32 ulp_status, ulp_word4; 5220 5221 ulp_status = get_job_ulpstatus(phba, rspiocb); 5222 ulp_word4 = get_job_word4(phba, rspiocb); 5223 5224 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5225 "ACC LOGO cmpl: status:x%x/x%x did:x%x", 5226 ulp_status, ulp_word4, ndlp->nlp_DID); 5227 /* ACC to LOGO completes to NPort <nlp_DID> */ 5228 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5229 "0109 ACC to LOGO completes to NPort x%x refcnt %d " 5230 "Data: x%x x%x x%x\n", 5231 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 5232 ndlp->nlp_state, ndlp->nlp_rpi); 5233 5234 /* This clause allows the LOGO ACC to complete and free resources 5235 * for the Fabric Domain Controller. It does deliberately skip 5236 * the unreg_rpi and release rpi because some fabrics send RDP 5237 * requests after logging out from the initiator. 5238 */ 5239 if (ndlp->nlp_type & NLP_FABRIC && 5240 ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK)) 5241 goto out; 5242 5243 if (ndlp->nlp_state == NLP_STE_NPR_NODE) { 5244 /* If PLOGI is being retried, PLOGI completion will cleanup the 5245 * node. The NLP_NPR_2B_DISC flag needs to be retained to make 5246 * progress on nodes discovered from last RSCN. 5247 */ 5248 if ((ndlp->nlp_flag & NLP_DELAY_TMO) && 5249 (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI)) 5250 goto out; 5251 5252 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) 5253 lpfc_unreg_rpi(vport, ndlp); 5254 5255 } 5256 out: 5257 /* 5258 * The driver received a LOGO from the rport and has ACK'd it. 5259 * At this point, the driver is done so release the IOCB 5260 */ 5261 lpfc_els_free_iocb(phba, cmdiocb); 5262 lpfc_nlp_put(ndlp); 5263 } 5264 5265 /** 5266 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd 5267 * @phba: pointer to lpfc hba data structure. 5268 * @pmb: pointer to the driver internal queue element for mailbox command. 5269 * 5270 * This routine is the completion callback function for unregister default 5271 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases 5272 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and 5273 * decrements the ndlp reference count held for this completion callback 5274 * function. After that, it invokes the lpfc_drop_node to check 5275 * whether it is appropriate to release the node. 5276 **/ 5277 void 5278 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 5279 { 5280 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; 5281 u32 mbx_flag = pmb->mbox_flag; 5282 u32 mbx_cmd = pmb->u.mb.mbxCommand; 5283 5284 if (ndlp) { 5285 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 5286 "0006 rpi x%x DID:%x flg:%x %d x%px " 5287 "mbx_cmd x%x mbx_flag x%x x%px\n", 5288 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 5289 kref_read(&ndlp->kref), ndlp, mbx_cmd, 5290 mbx_flag, pmb); 5291 5292 /* This ends the default/temporary RPI cleanup logic for this 5293 * ndlp and the node and rpi needs to be released. Free the rpi 5294 * first on an UNREG_LOGIN and then release the final 5295 * references. 5296 */ 5297 spin_lock_irq(&ndlp->lock); 5298 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 5299 if (mbx_cmd == MBX_UNREG_LOGIN) 5300 ndlp->nlp_flag &= ~NLP_UNREG_INP; 5301 spin_unlock_irq(&ndlp->lock); 5302 lpfc_nlp_put(ndlp); 5303 lpfc_drop_node(ndlp->vport, ndlp); 5304 } 5305 5306 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 5307 } 5308 5309 /** 5310 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd 5311 * @phba: pointer to lpfc hba data structure. 5312 * @cmdiocb: pointer to lpfc command iocb data structure. 5313 * @rspiocb: pointer to lpfc response iocb data structure. 5314 * 5315 * This routine is the completion callback function for ELS Response IOCB 5316 * command. In normal case, this callback function just properly sets the 5317 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference 5318 * field in the command IOCB is not NULL, the referred mailbox command will 5319 * be send out, and then invokes the lpfc_els_free_iocb() routine to release 5320 * the IOCB. 5321 **/ 5322 static void 5323 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 5324 struct lpfc_iocbq *rspiocb) 5325 { 5326 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 5327 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; 5328 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; 5329 IOCB_t *irsp; 5330 LPFC_MBOXQ_t *mbox = NULL; 5331 u32 ulp_status, ulp_word4, tmo, did, iotag; 5332 5333 if (!vport) { 5334 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5335 "3177 ELS response failed\n"); 5336 goto out; 5337 } 5338 if (cmdiocb->context_un.mbox) 5339 mbox = cmdiocb->context_un.mbox; 5340 5341 ulp_status = get_job_ulpstatus(phba, rspiocb); 5342 ulp_word4 = get_job_word4(phba, rspiocb); 5343 did = get_job_els_rsp64_did(phba, cmdiocb); 5344 5345 if (phba->sli_rev == LPFC_SLI_REV4) { 5346 tmo = get_wqe_tmo(cmdiocb); 5347 iotag = get_wqe_reqtag(cmdiocb); 5348 } else { 5349 irsp = &rspiocb->iocb; 5350 tmo = irsp->ulpTimeout; 5351 iotag = irsp->ulpIoTag; 5352 } 5353 5354 /* Check to see if link went down during discovery */ 5355 if (!ndlp || lpfc_els_chk_latt(vport)) { 5356 if (mbox) 5357 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 5358 goto out; 5359 } 5360 5361 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5362 "ELS rsp cmpl: status:x%x/x%x did:x%x", 5363 ulp_status, ulp_word4, did); 5364 /* ELS response tag <ulpIoTag> completes */ 5365 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5366 "0110 ELS response tag x%x completes " 5367 "Data: x%x x%x x%x x%x x%x x%x x%x x%x %p %p\n", 5368 iotag, ulp_status, ulp_word4, tmo, 5369 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5370 ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox, ndlp); 5371 if (mbox) { 5372 if (ulp_status == 0 5373 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 5374 if (!lpfc_unreg_rpi(vport, ndlp) && 5375 (!(vport->fc_flag & FC_PT2PT))) { 5376 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 5377 ndlp->nlp_state == 5378 NLP_STE_REG_LOGIN_ISSUE) { 5379 lpfc_printf_vlog(vport, KERN_INFO, 5380 LOG_DISCOVERY, 5381 "0314 PLOGI recov " 5382 "DID x%x " 5383 "Data: x%x x%x x%x\n", 5384 ndlp->nlp_DID, 5385 ndlp->nlp_state, 5386 ndlp->nlp_rpi, 5387 ndlp->nlp_flag); 5388 goto out_free_mbox; 5389 } 5390 } 5391 5392 /* Increment reference count to ndlp to hold the 5393 * reference to ndlp for the callback function. 5394 */ 5395 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 5396 if (!mbox->ctx_ndlp) 5397 goto out_free_mbox; 5398 5399 mbox->vport = vport; 5400 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { 5401 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 5402 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 5403 } 5404 else { 5405 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 5406 ndlp->nlp_prev_state = ndlp->nlp_state; 5407 lpfc_nlp_set_state(vport, ndlp, 5408 NLP_STE_REG_LOGIN_ISSUE); 5409 } 5410 5411 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 5412 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 5413 != MBX_NOT_FINISHED) 5414 goto out; 5415 5416 /* Decrement the ndlp reference count we 5417 * set for this failed mailbox command. 5418 */ 5419 lpfc_nlp_put(ndlp); 5420 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 5421 5422 /* ELS rsp: Cannot issue reg_login for <NPortid> */ 5423 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5424 "0138 ELS rsp: Cannot issue reg_login for x%x " 5425 "Data: x%x x%x x%x\n", 5426 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5427 ndlp->nlp_rpi); 5428 } 5429 out_free_mbox: 5430 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 5431 } 5432 out: 5433 if (ndlp && shost) { 5434 spin_lock_irq(&ndlp->lock); 5435 if (mbox) 5436 ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; 5437 ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI; 5438 spin_unlock_irq(&ndlp->lock); 5439 } 5440 5441 /* An SLI4 NPIV instance wants to drop the node at this point under 5442 * these conditions and release the RPI. 5443 */ 5444 if (phba->sli_rev == LPFC_SLI_REV4 && 5445 vport && vport->port_type == LPFC_NPIV_PORT && 5446 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { 5447 if (ndlp->nlp_flag & NLP_RELEASE_RPI) { 5448 if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && 5449 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { 5450 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 5451 spin_lock_irq(&ndlp->lock); 5452 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 5453 ndlp->nlp_flag &= ~NLP_RELEASE_RPI; 5454 spin_unlock_irq(&ndlp->lock); 5455 } 5456 lpfc_drop_node(vport, ndlp); 5457 } else if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && 5458 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE && 5459 ndlp->nlp_state != NLP_STE_PRLI_ISSUE) { 5460 /* Drop ndlp if there is no planned or outstanding 5461 * issued PRLI. 5462 * 5463 * In cases when the ndlp is acting as both an initiator 5464 * and target function, let our issued PRLI determine 5465 * the final ndlp kref drop. 5466 */ 5467 lpfc_drop_node(vport, ndlp); 5468 } 5469 } 5470 5471 /* Release the originating I/O reference. */ 5472 lpfc_els_free_iocb(phba, cmdiocb); 5473 lpfc_nlp_put(ndlp); 5474 return; 5475 } 5476 5477 /** 5478 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command 5479 * @vport: pointer to a host virtual N_Port data structure. 5480 * @flag: the els command code to be accepted. 5481 * @oldiocb: pointer to the original lpfc command iocb data structure. 5482 * @ndlp: pointer to a node-list data structure. 5483 * @mbox: pointer to the driver internal queue element for mailbox command. 5484 * 5485 * This routine prepares and issues an Accept (ACC) response IOCB 5486 * command. It uses the @flag to properly set up the IOCB field for the 5487 * specific ACC response command to be issued and invokes the 5488 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a 5489 * @mbox pointer is passed in, it will be put into the context_un.mbox 5490 * field of the IOCB for the completion callback function to issue the 5491 * mailbox command to the HBA later when callback is invoked. 5492 * 5493 * Note that the ndlp reference count will be incremented by 1 for holding the 5494 * ndlp and the reference to ndlp will be stored into the ndlp field of 5495 * the IOCB for the completion callback function to the corresponding 5496 * response ELS IOCB command. 5497 * 5498 * Return code 5499 * 0 - Successfully issued acc response 5500 * 1 - Failed to issue acc response 5501 **/ 5502 int 5503 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, 5504 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 5505 LPFC_MBOXQ_t *mbox) 5506 { 5507 struct lpfc_hba *phba = vport->phba; 5508 IOCB_t *icmd; 5509 IOCB_t *oldcmd; 5510 union lpfc_wqe128 *wqe; 5511 union lpfc_wqe128 *oldwqe = &oldiocb->wqe; 5512 struct lpfc_iocbq *elsiocb; 5513 uint8_t *pcmd; 5514 struct serv_parm *sp; 5515 uint16_t cmdsize; 5516 int rc; 5517 ELS_PKT *els_pkt_ptr; 5518 struct fc_els_rdf_resp *rdf_resp; 5519 5520 switch (flag) { 5521 case ELS_CMD_ACC: 5522 cmdsize = sizeof(uint32_t); 5523 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5524 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5525 if (!elsiocb) { 5526 spin_lock_irq(&ndlp->lock); 5527 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 5528 spin_unlock_irq(&ndlp->lock); 5529 return 1; 5530 } 5531 5532 if (phba->sli_rev == LPFC_SLI_REV4) { 5533 wqe = &elsiocb->wqe; 5534 /* XRI / rx_id */ 5535 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5536 bf_get(wqe_ctxt_tag, 5537 &oldwqe->xmit_els_rsp.wqe_com)); 5538 5539 /* oxid */ 5540 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5541 bf_get(wqe_rcvoxid, 5542 &oldwqe->xmit_els_rsp.wqe_com)); 5543 } else { 5544 icmd = &elsiocb->iocb; 5545 oldcmd = &oldiocb->iocb; 5546 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5547 icmd->unsli3.rcvsli3.ox_id = 5548 oldcmd->unsli3.rcvsli3.ox_id; 5549 } 5550 5551 pcmd = elsiocb->cmd_dmabuf->virt; 5552 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5553 pcmd += sizeof(uint32_t); 5554 5555 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5556 "Issue ACC: did:x%x flg:x%x", 5557 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5558 break; 5559 case ELS_CMD_FLOGI: 5560 case ELS_CMD_PLOGI: 5561 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t)); 5562 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5563 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5564 if (!elsiocb) 5565 return 1; 5566 5567 if (phba->sli_rev == LPFC_SLI_REV4) { 5568 wqe = &elsiocb->wqe; 5569 /* XRI / rx_id */ 5570 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5571 bf_get(wqe_ctxt_tag, 5572 &oldwqe->xmit_els_rsp.wqe_com)); 5573 5574 /* oxid */ 5575 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5576 bf_get(wqe_rcvoxid, 5577 &oldwqe->xmit_els_rsp.wqe_com)); 5578 } else { 5579 icmd = &elsiocb->iocb; 5580 oldcmd = &oldiocb->iocb; 5581 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5582 icmd->unsli3.rcvsli3.ox_id = 5583 oldcmd->unsli3.rcvsli3.ox_id; 5584 } 5585 5586 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 5587 5588 if (mbox) 5589 elsiocb->context_un.mbox = mbox; 5590 5591 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5592 pcmd += sizeof(uint32_t); 5593 sp = (struct serv_parm *)pcmd; 5594 5595 if (flag == ELS_CMD_FLOGI) { 5596 /* Copy the received service parameters back */ 5597 memcpy(sp, &phba->fc_fabparam, 5598 sizeof(struct serv_parm)); 5599 5600 /* Clear the F_Port bit */ 5601 sp->cmn.fPort = 0; 5602 5603 /* Mark all class service parameters as invalid */ 5604 sp->cls1.classValid = 0; 5605 sp->cls2.classValid = 0; 5606 sp->cls3.classValid = 0; 5607 sp->cls4.classValid = 0; 5608 5609 /* Copy our worldwide names */ 5610 memcpy(&sp->portName, &vport->fc_sparam.portName, 5611 sizeof(struct lpfc_name)); 5612 memcpy(&sp->nodeName, &vport->fc_sparam.nodeName, 5613 sizeof(struct lpfc_name)); 5614 } else { 5615 memcpy(pcmd, &vport->fc_sparam, 5616 sizeof(struct serv_parm)); 5617 5618 sp->cmn.valid_vendor_ver_level = 0; 5619 memset(sp->un.vendorVersion, 0, 5620 sizeof(sp->un.vendorVersion)); 5621 sp->cmn.bbRcvSizeMsb &= 0xF; 5622 5623 /* If our firmware supports this feature, convey that 5624 * info to the target using the vendor specific field. 5625 */ 5626 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 5627 sp->cmn.valid_vendor_ver_level = 1; 5628 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 5629 sp->un.vv.flags = 5630 cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 5631 } 5632 } 5633 5634 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5635 "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x", 5636 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5637 break; 5638 case ELS_CMD_PRLO: 5639 cmdsize = sizeof(uint32_t) + sizeof(PRLO); 5640 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5641 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); 5642 if (!elsiocb) 5643 return 1; 5644 5645 if (phba->sli_rev == LPFC_SLI_REV4) { 5646 wqe = &elsiocb->wqe; 5647 /* XRI / rx_id */ 5648 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5649 bf_get(wqe_ctxt_tag, 5650 &oldwqe->xmit_els_rsp.wqe_com)); 5651 5652 /* oxid */ 5653 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5654 bf_get(wqe_rcvoxid, 5655 &oldwqe->xmit_els_rsp.wqe_com)); 5656 } else { 5657 icmd = &elsiocb->iocb; 5658 oldcmd = &oldiocb->iocb; 5659 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5660 icmd->unsli3.rcvsli3.ox_id = 5661 oldcmd->unsli3.rcvsli3.ox_id; 5662 } 5663 5664 pcmd = (u8 *) elsiocb->cmd_dmabuf->virt; 5665 5666 memcpy(pcmd, oldiocb->cmd_dmabuf->virt, 5667 sizeof(uint32_t) + sizeof(PRLO)); 5668 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; 5669 els_pkt_ptr = (ELS_PKT *) pcmd; 5670 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; 5671 5672 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5673 "Issue ACC PRLO: did:x%x flg:x%x", 5674 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5675 break; 5676 case ELS_CMD_RDF: 5677 cmdsize = sizeof(*rdf_resp); 5678 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5679 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5680 if (!elsiocb) 5681 return 1; 5682 5683 if (phba->sli_rev == LPFC_SLI_REV4) { 5684 wqe = &elsiocb->wqe; 5685 /* XRI / rx_id */ 5686 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5687 bf_get(wqe_ctxt_tag, 5688 &oldwqe->xmit_els_rsp.wqe_com)); 5689 5690 /* oxid */ 5691 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5692 bf_get(wqe_rcvoxid, 5693 &oldwqe->xmit_els_rsp.wqe_com)); 5694 } else { 5695 icmd = &elsiocb->iocb; 5696 oldcmd = &oldiocb->iocb; 5697 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5698 icmd->unsli3.rcvsli3.ox_id = 5699 oldcmd->unsli3.rcvsli3.ox_id; 5700 } 5701 5702 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 5703 rdf_resp = (struct fc_els_rdf_resp *)pcmd; 5704 memset(rdf_resp, 0, sizeof(*rdf_resp)); 5705 rdf_resp->acc_hdr.la_cmd = ELS_LS_ACC; 5706 5707 /* FC-LS-5 specifies desc_list_len shall be set to 12 */ 5708 rdf_resp->desc_list_len = cpu_to_be32(12); 5709 5710 /* FC-LS-5 specifies LS REQ Information descriptor */ 5711 rdf_resp->lsri.desc_tag = cpu_to_be32(1); 5712 rdf_resp->lsri.desc_len = cpu_to_be32(sizeof(u32)); 5713 rdf_resp->lsri.rqst_w0.cmd = ELS_RDF; 5714 break; 5715 default: 5716 return 1; 5717 } 5718 if (ndlp->nlp_flag & NLP_LOGO_ACC) { 5719 spin_lock_irq(&ndlp->lock); 5720 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED || 5721 ndlp->nlp_flag & NLP_REG_LOGIN_SEND)) 5722 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 5723 spin_unlock_irq(&ndlp->lock); 5724 elsiocb->cmd_cmpl = lpfc_cmpl_els_logo_acc; 5725 } else { 5726 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5727 } 5728 5729 phba->fc_stat.elsXmitACC++; 5730 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5731 if (!elsiocb->ndlp) { 5732 lpfc_els_free_iocb(phba, elsiocb); 5733 return 1; 5734 } 5735 5736 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5737 if (rc == IOCB_ERROR) { 5738 lpfc_els_free_iocb(phba, elsiocb); 5739 lpfc_nlp_put(ndlp); 5740 return 1; 5741 } 5742 5743 /* Xmit ELS ACC response tag <ulpIoTag> */ 5744 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5745 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " 5746 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5747 "RPI: x%x, fc_flag x%x refcnt %d\n", 5748 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5749 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5750 ndlp->nlp_rpi, vport->fc_flag, kref_read(&ndlp->kref)); 5751 return 0; 5752 } 5753 5754 /** 5755 * lpfc_els_rsp_reject - Prepare and issue a rjt response iocb command 5756 * @vport: pointer to a virtual N_Port data structure. 5757 * @rejectError: reject response to issue 5758 * @oldiocb: pointer to the original lpfc command iocb data structure. 5759 * @ndlp: pointer to a node-list data structure. 5760 * @mbox: pointer to the driver internal queue element for mailbox command. 5761 * 5762 * This routine prepares and issue an Reject (RJT) response IOCB 5763 * command. If a @mbox pointer is passed in, it will be put into the 5764 * context_un.mbox field of the IOCB for the completion callback function 5765 * to issue to the HBA later. 5766 * 5767 * Note that the ndlp reference count will be incremented by 1 for holding the 5768 * ndlp and the reference to ndlp will be stored into the ndlp field of 5769 * the IOCB for the completion callback function to the reject response 5770 * ELS IOCB command. 5771 * 5772 * Return code 5773 * 0 - Successfully issued reject response 5774 * 1 - Failed to issue reject response 5775 **/ 5776 int 5777 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, 5778 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 5779 LPFC_MBOXQ_t *mbox) 5780 { 5781 int rc; 5782 struct lpfc_hba *phba = vport->phba; 5783 IOCB_t *icmd; 5784 IOCB_t *oldcmd; 5785 union lpfc_wqe128 *wqe; 5786 struct lpfc_iocbq *elsiocb; 5787 uint8_t *pcmd; 5788 uint16_t cmdsize; 5789 5790 cmdsize = 2 * sizeof(uint32_t); 5791 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5792 ndlp->nlp_DID, ELS_CMD_LS_RJT); 5793 if (!elsiocb) 5794 return 1; 5795 5796 if (phba->sli_rev == LPFC_SLI_REV4) { 5797 wqe = &elsiocb->wqe; 5798 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 5799 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 5800 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5801 get_job_rcvoxid(phba, oldiocb)); 5802 } else { 5803 icmd = &elsiocb->iocb; 5804 oldcmd = &oldiocb->iocb; 5805 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5806 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5807 } 5808 5809 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 5810 5811 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 5812 pcmd += sizeof(uint32_t); 5813 *((uint32_t *) (pcmd)) = rejectError; 5814 5815 if (mbox) 5816 elsiocb->context_un.mbox = mbox; 5817 5818 /* Xmit ELS RJT <err> response tag <ulpIoTag> */ 5819 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5820 "0129 Xmit ELS RJT x%x response tag x%x " 5821 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 5822 "rpi x%x\n", 5823 rejectError, elsiocb->iotag, 5824 get_job_ulpcontext(phba, elsiocb), ndlp->nlp_DID, 5825 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 5826 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5827 "Issue LS_RJT: did:x%x flg:x%x err:x%x", 5828 ndlp->nlp_DID, ndlp->nlp_flag, rejectError); 5829 5830 phba->fc_stat.elsXmitLSRJT++; 5831 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5832 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5833 if (!elsiocb->ndlp) { 5834 lpfc_els_free_iocb(phba, elsiocb); 5835 return 1; 5836 } 5837 5838 /* The NPIV instance is rejecting this unsolicited ELS. Make sure the 5839 * node's assigned RPI gets released provided this node is not already 5840 * registered with the transport. 5841 */ 5842 if (phba->sli_rev == LPFC_SLI_REV4 && 5843 vport->port_type == LPFC_NPIV_PORT && 5844 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { 5845 spin_lock_irq(&ndlp->lock); 5846 ndlp->nlp_flag |= NLP_RELEASE_RPI; 5847 spin_unlock_irq(&ndlp->lock); 5848 } 5849 5850 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5851 if (rc == IOCB_ERROR) { 5852 lpfc_els_free_iocb(phba, elsiocb); 5853 lpfc_nlp_put(ndlp); 5854 return 1; 5855 } 5856 5857 return 0; 5858 } 5859 5860 /** 5861 * lpfc_issue_els_edc_rsp - Exchange Diagnostic Capabilities with the fabric. 5862 * @vport: pointer to a host virtual N_Port data structure. 5863 * @cmdiocb: pointer to the original lpfc command iocb data structure. 5864 * @ndlp: NPort to where rsp is directed 5865 * 5866 * This routine issues an EDC ACC RSP to the F-Port Controller to communicate 5867 * this N_Port's support of hardware signals in its Congestion 5868 * Capabilities Descriptor. 5869 * 5870 * Return code 5871 * 0 - Successfully issued edc rsp command 5872 * 1 - Failed to issue edc rsp command 5873 **/ 5874 static int 5875 lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5876 struct lpfc_nodelist *ndlp) 5877 { 5878 struct lpfc_hba *phba = vport->phba; 5879 struct fc_els_edc_resp *edc_rsp; 5880 struct fc_tlv_desc *tlv; 5881 struct lpfc_iocbq *elsiocb; 5882 IOCB_t *icmd, *cmd; 5883 union lpfc_wqe128 *wqe; 5884 u32 cgn_desc_size, lft_desc_size; 5885 u16 cmdsize; 5886 uint8_t *pcmd; 5887 int rc; 5888 5889 cmdsize = sizeof(struct fc_els_edc_resp); 5890 cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc); 5891 lft_desc_size = (lpfc_link_is_lds_capable(phba)) ? 5892 sizeof(struct fc_diag_lnkflt_desc) : 0; 5893 cmdsize += cgn_desc_size + lft_desc_size; 5894 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, cmdiocb->retry, 5895 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5896 if (!elsiocb) 5897 return 1; 5898 5899 if (phba->sli_rev == LPFC_SLI_REV4) { 5900 wqe = &elsiocb->wqe; 5901 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 5902 get_job_ulpcontext(phba, cmdiocb)); /* Xri / rx_id */ 5903 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5904 get_job_rcvoxid(phba, cmdiocb)); 5905 } else { 5906 icmd = &elsiocb->iocb; 5907 cmd = &cmdiocb->iocb; 5908 icmd->ulpContext = cmd->ulpContext; /* Xri / rx_id */ 5909 icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id; 5910 } 5911 5912 pcmd = elsiocb->cmd_dmabuf->virt; 5913 memset(pcmd, 0, cmdsize); 5914 5915 edc_rsp = (struct fc_els_edc_resp *)pcmd; 5916 edc_rsp->acc_hdr.la_cmd = ELS_LS_ACC; 5917 edc_rsp->desc_list_len = cpu_to_be32(sizeof(struct fc_els_lsri_desc) + 5918 cgn_desc_size + lft_desc_size); 5919 edc_rsp->lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO); 5920 edc_rsp->lsri.desc_len = cpu_to_be32( 5921 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_els_lsri_desc)); 5922 edc_rsp->lsri.rqst_w0.cmd = ELS_EDC; 5923 tlv = edc_rsp->desc; 5924 lpfc_format_edc_cgn_desc(phba, tlv); 5925 tlv = fc_tlv_next_desc(tlv); 5926 if (lft_desc_size) 5927 lpfc_format_edc_lft_desc(phba, tlv); 5928 5929 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5930 "Issue EDC ACC: did:x%x flg:x%x refcnt %d", 5931 ndlp->nlp_DID, ndlp->nlp_flag, 5932 kref_read(&ndlp->kref)); 5933 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5934 5935 phba->fc_stat.elsXmitACC++; 5936 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5937 if (!elsiocb->ndlp) { 5938 lpfc_els_free_iocb(phba, elsiocb); 5939 return 1; 5940 } 5941 5942 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5943 if (rc == IOCB_ERROR) { 5944 lpfc_els_free_iocb(phba, elsiocb); 5945 lpfc_nlp_put(ndlp); 5946 return 1; 5947 } 5948 5949 /* Xmit ELS ACC response tag <ulpIoTag> */ 5950 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5951 "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, " 5952 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5953 "RPI: x%x, fc_flag x%x\n", 5954 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5955 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5956 ndlp->nlp_rpi, vport->fc_flag); 5957 5958 return 0; 5959 } 5960 5961 /** 5962 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd 5963 * @vport: pointer to a virtual N_Port data structure. 5964 * @oldiocb: pointer to the original lpfc command iocb data structure. 5965 * @ndlp: pointer to a node-list data structure. 5966 * 5967 * This routine prepares and issues an Accept (ACC) response to Address 5968 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB 5969 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 5970 * 5971 * Note that the ndlp reference count will be incremented by 1 for holding the 5972 * ndlp and the reference to ndlp will be stored into the ndlp field of 5973 * the IOCB for the completion callback function to the ADISC Accept response 5974 * ELS IOCB command. 5975 * 5976 * Return code 5977 * 0 - Successfully issued acc adisc response 5978 * 1 - Failed to issue adisc acc response 5979 **/ 5980 int 5981 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 5982 struct lpfc_nodelist *ndlp) 5983 { 5984 struct lpfc_hba *phba = vport->phba; 5985 ADISC *ap; 5986 IOCB_t *icmd, *oldcmd; 5987 union lpfc_wqe128 *wqe; 5988 struct lpfc_iocbq *elsiocb; 5989 uint8_t *pcmd; 5990 uint16_t cmdsize; 5991 int rc; 5992 u32 ulp_context; 5993 5994 cmdsize = sizeof(uint32_t) + sizeof(ADISC); 5995 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5996 ndlp->nlp_DID, ELS_CMD_ACC); 5997 if (!elsiocb) 5998 return 1; 5999 6000 if (phba->sli_rev == LPFC_SLI_REV4) { 6001 wqe = &elsiocb->wqe; 6002 /* XRI / rx_id */ 6003 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6004 get_job_ulpcontext(phba, oldiocb)); 6005 ulp_context = get_job_ulpcontext(phba, elsiocb); 6006 /* oxid */ 6007 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6008 get_job_rcvoxid(phba, oldiocb)); 6009 } else { 6010 icmd = &elsiocb->iocb; 6011 oldcmd = &oldiocb->iocb; 6012 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6013 ulp_context = elsiocb->iocb.ulpContext; 6014 icmd->unsli3.rcvsli3.ox_id = 6015 oldcmd->unsli3.rcvsli3.ox_id; 6016 } 6017 6018 /* Xmit ADISC ACC response tag <ulpIoTag> */ 6019 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6020 "0130 Xmit ADISC ACC response iotag x%x xri: " 6021 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", 6022 elsiocb->iotag, ulp_context, 6023 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 6024 ndlp->nlp_rpi); 6025 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6026 6027 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6028 pcmd += sizeof(uint32_t); 6029 6030 ap = (ADISC *) (pcmd); 6031 ap->hardAL_PA = phba->fc_pref_ALPA; 6032 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 6033 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 6034 ap->DID = be32_to_cpu(vport->fc_myDID); 6035 6036 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6037 "Issue ACC ADISC: did:x%x flg:x%x refcnt %d", 6038 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6039 6040 phba->fc_stat.elsXmitACC++; 6041 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6042 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6043 if (!elsiocb->ndlp) { 6044 lpfc_els_free_iocb(phba, elsiocb); 6045 return 1; 6046 } 6047 6048 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6049 if (rc == IOCB_ERROR) { 6050 lpfc_els_free_iocb(phba, elsiocb); 6051 lpfc_nlp_put(ndlp); 6052 return 1; 6053 } 6054 6055 return 0; 6056 } 6057 6058 /** 6059 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd 6060 * @vport: pointer to a virtual N_Port data structure. 6061 * @oldiocb: pointer to the original lpfc command iocb data structure. 6062 * @ndlp: pointer to a node-list data structure. 6063 * 6064 * This routine prepares and issues an Accept (ACC) response to Process 6065 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB 6066 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 6067 * 6068 * Note that the ndlp reference count will be incremented by 1 for holding the 6069 * ndlp and the reference to ndlp will be stored into the ndlp field of 6070 * the IOCB for the completion callback function to the PRLI Accept response 6071 * ELS IOCB command. 6072 * 6073 * Return code 6074 * 0 - Successfully issued acc prli response 6075 * 1 - Failed to issue acc prli response 6076 **/ 6077 int 6078 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 6079 struct lpfc_nodelist *ndlp) 6080 { 6081 struct lpfc_hba *phba = vport->phba; 6082 PRLI *npr; 6083 struct lpfc_nvme_prli *npr_nvme; 6084 lpfc_vpd_t *vpd; 6085 IOCB_t *icmd; 6086 IOCB_t *oldcmd; 6087 union lpfc_wqe128 *wqe; 6088 struct lpfc_iocbq *elsiocb; 6089 uint8_t *pcmd; 6090 uint16_t cmdsize; 6091 uint32_t prli_fc4_req, *req_payload; 6092 struct lpfc_dmabuf *req_buf; 6093 int rc; 6094 u32 elsrspcmd, ulp_context; 6095 6096 /* Need the incoming PRLI payload to determine if the ACC is for an 6097 * FC4 or NVME PRLI type. The PRLI type is at word 1. 6098 */ 6099 req_buf = oldiocb->cmd_dmabuf; 6100 req_payload = (((uint32_t *)req_buf->virt) + 1); 6101 6102 /* PRLI type payload is at byte 3 for FCP or NVME. */ 6103 prli_fc4_req = be32_to_cpu(*req_payload); 6104 prli_fc4_req = (prli_fc4_req >> 24) & 0xff; 6105 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6106 "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n", 6107 prli_fc4_req, *((uint32_t *)req_payload)); 6108 6109 if (prli_fc4_req == PRLI_FCP_TYPE) { 6110 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 6111 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); 6112 } else if (prli_fc4_req == PRLI_NVME_TYPE) { 6113 cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli); 6114 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK)); 6115 } else { 6116 return 1; 6117 } 6118 6119 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6120 ndlp->nlp_DID, elsrspcmd); 6121 if (!elsiocb) 6122 return 1; 6123 6124 if (phba->sli_rev == LPFC_SLI_REV4) { 6125 wqe = &elsiocb->wqe; 6126 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6127 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6128 ulp_context = get_job_ulpcontext(phba, elsiocb); 6129 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6130 get_job_rcvoxid(phba, oldiocb)); 6131 } else { 6132 icmd = &elsiocb->iocb; 6133 oldcmd = &oldiocb->iocb; 6134 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6135 ulp_context = elsiocb->iocb.ulpContext; 6136 icmd->unsli3.rcvsli3.ox_id = 6137 oldcmd->unsli3.rcvsli3.ox_id; 6138 } 6139 6140 /* Xmit PRLI ACC response tag <ulpIoTag> */ 6141 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6142 "0131 Xmit PRLI ACC response tag x%x xri x%x, " 6143 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 6144 elsiocb->iotag, ulp_context, 6145 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 6146 ndlp->nlp_rpi); 6147 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6148 memset(pcmd, 0, cmdsize); 6149 6150 *((uint32_t *)(pcmd)) = elsrspcmd; 6151 pcmd += sizeof(uint32_t); 6152 6153 /* For PRLI, remainder of payload is PRLI parameter page */ 6154 vpd = &phba->vpd; 6155 6156 if (prli_fc4_req == PRLI_FCP_TYPE) { 6157 /* 6158 * If the remote port is a target and our firmware version 6159 * is 3.20 or later, set the following bits for FC-TAPE 6160 * support. 6161 */ 6162 npr = (PRLI *) pcmd; 6163 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 6164 (vpd->rev.feaLevelHigh >= 0x02)) { 6165 npr->ConfmComplAllowed = 1; 6166 npr->Retry = 1; 6167 npr->TaskRetryIdReq = 1; 6168 } 6169 npr->acceptRspCode = PRLI_REQ_EXECUTED; 6170 6171 /* Set image pair for complementary pairs only. */ 6172 if (ndlp->nlp_type & NLP_FCP_TARGET) 6173 npr->estabImagePair = 1; 6174 else 6175 npr->estabImagePair = 0; 6176 npr->readXferRdyDis = 1; 6177 npr->ConfmComplAllowed = 1; 6178 npr->prliType = PRLI_FCP_TYPE; 6179 npr->initiatorFunc = 1; 6180 6181 /* Xmit PRLI ACC response tag <ulpIoTag> */ 6182 lpfc_printf_vlog(vport, KERN_INFO, 6183 LOG_ELS | LOG_NODE | LOG_DISCOVERY, 6184 "6014 FCP issue PRLI ACC imgpair %d " 6185 "retry %d task %d\n", 6186 npr->estabImagePair, 6187 npr->Retry, npr->TaskRetryIdReq); 6188 6189 } else if (prli_fc4_req == PRLI_NVME_TYPE) { 6190 /* Respond with an NVME PRLI Type */ 6191 npr_nvme = (struct lpfc_nvme_prli *) pcmd; 6192 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 6193 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 6194 bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED); 6195 if (phba->nvmet_support) { 6196 bf_set(prli_tgt, npr_nvme, 1); 6197 bf_set(prli_disc, npr_nvme, 1); 6198 if (phba->cfg_nvme_enable_fb) { 6199 bf_set(prli_fba, npr_nvme, 1); 6200 6201 /* TBD. Target mode needs to post buffers 6202 * that support the configured first burst 6203 * byte size. 6204 */ 6205 bf_set(prli_fb_sz, npr_nvme, 6206 phba->cfg_nvmet_fb_size); 6207 } 6208 } else { 6209 bf_set(prli_init, npr_nvme, 1); 6210 } 6211 6212 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 6213 "6015 NVME issue PRLI ACC word1 x%08x " 6214 "word4 x%08x word5 x%08x flag x%x, " 6215 "fcp_info x%x nlp_type x%x\n", 6216 npr_nvme->word1, npr_nvme->word4, 6217 npr_nvme->word5, ndlp->nlp_flag, 6218 ndlp->nlp_fcp_info, ndlp->nlp_type); 6219 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 6220 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 6221 npr_nvme->word5 = cpu_to_be32(npr_nvme->word5); 6222 } else 6223 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6224 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n", 6225 prli_fc4_req, ndlp->nlp_fc4_type, 6226 ndlp->nlp_DID); 6227 6228 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6229 "Issue ACC PRLI: did:x%x flg:x%x", 6230 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6231 6232 phba->fc_stat.elsXmitACC++; 6233 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6234 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6235 if (!elsiocb->ndlp) { 6236 lpfc_els_free_iocb(phba, elsiocb); 6237 return 1; 6238 } 6239 6240 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6241 if (rc == IOCB_ERROR) { 6242 lpfc_els_free_iocb(phba, elsiocb); 6243 lpfc_nlp_put(ndlp); 6244 return 1; 6245 } 6246 6247 return 0; 6248 } 6249 6250 /** 6251 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command 6252 * @vport: pointer to a virtual N_Port data structure. 6253 * @format: rnid command format. 6254 * @oldiocb: pointer to the original lpfc command iocb data structure. 6255 * @ndlp: pointer to a node-list data structure. 6256 * 6257 * This routine issues a Request Node Identification Data (RNID) Accept 6258 * (ACC) response. It constructs the RNID ACC response command according to 6259 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to 6260 * issue the response. 6261 * 6262 * Note that the ndlp reference count will be incremented by 1 for holding the 6263 * ndlp and the reference to ndlp will be stored into the ndlp field of 6264 * the IOCB for the completion callback function. 6265 * 6266 * Return code 6267 * 0 - Successfully issued acc rnid response 6268 * 1 - Failed to issue acc rnid response 6269 **/ 6270 static int 6271 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, 6272 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 6273 { 6274 struct lpfc_hba *phba = vport->phba; 6275 RNID *rn; 6276 IOCB_t *icmd, *oldcmd; 6277 union lpfc_wqe128 *wqe; 6278 struct lpfc_iocbq *elsiocb; 6279 uint8_t *pcmd; 6280 uint16_t cmdsize; 6281 int rc; 6282 u32 ulp_context; 6283 6284 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 6285 + (2 * sizeof(struct lpfc_name)); 6286 if (format) 6287 cmdsize += sizeof(RNID_TOP_DISC); 6288 6289 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6290 ndlp->nlp_DID, ELS_CMD_ACC); 6291 if (!elsiocb) 6292 return 1; 6293 6294 if (phba->sli_rev == LPFC_SLI_REV4) { 6295 wqe = &elsiocb->wqe; 6296 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6297 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6298 ulp_context = get_job_ulpcontext(phba, elsiocb); 6299 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6300 get_job_rcvoxid(phba, oldiocb)); 6301 } else { 6302 icmd = &elsiocb->iocb; 6303 oldcmd = &oldiocb->iocb; 6304 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6305 ulp_context = elsiocb->iocb.ulpContext; 6306 icmd->unsli3.rcvsli3.ox_id = 6307 oldcmd->unsli3.rcvsli3.ox_id; 6308 } 6309 6310 /* Xmit RNID ACC response tag <ulpIoTag> */ 6311 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6312 "0132 Xmit RNID ACC response tag x%x xri x%x\n", 6313 elsiocb->iotag, ulp_context); 6314 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6315 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6316 pcmd += sizeof(uint32_t); 6317 6318 memset(pcmd, 0, sizeof(RNID)); 6319 rn = (RNID *) (pcmd); 6320 rn->Format = format; 6321 rn->CommonLen = (2 * sizeof(struct lpfc_name)); 6322 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 6323 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 6324 switch (format) { 6325 case 0: 6326 rn->SpecificLen = 0; 6327 break; 6328 case RNID_TOPOLOGY_DISC: 6329 rn->SpecificLen = sizeof(RNID_TOP_DISC); 6330 memcpy(&rn->un.topologyDisc.portName, 6331 &vport->fc_portname, sizeof(struct lpfc_name)); 6332 rn->un.topologyDisc.unitType = RNID_HBA; 6333 rn->un.topologyDisc.physPort = 0; 6334 rn->un.topologyDisc.attachedNodes = 0; 6335 break; 6336 default: 6337 rn->CommonLen = 0; 6338 rn->SpecificLen = 0; 6339 break; 6340 } 6341 6342 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6343 "Issue ACC RNID: did:x%x flg:x%x refcnt %d", 6344 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6345 6346 phba->fc_stat.elsXmitACC++; 6347 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6348 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6349 if (!elsiocb->ndlp) { 6350 lpfc_els_free_iocb(phba, elsiocb); 6351 return 1; 6352 } 6353 6354 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6355 if (rc == IOCB_ERROR) { 6356 lpfc_els_free_iocb(phba, elsiocb); 6357 lpfc_nlp_put(ndlp); 6358 return 1; 6359 } 6360 6361 return 0; 6362 } 6363 6364 /** 6365 * lpfc_els_clear_rrq - Clear the rq that this rrq describes. 6366 * @vport: pointer to a virtual N_Port data structure. 6367 * @iocb: pointer to the lpfc command iocb data structure. 6368 * @ndlp: pointer to a node-list data structure. 6369 * 6370 * Return 6371 **/ 6372 static void 6373 lpfc_els_clear_rrq(struct lpfc_vport *vport, 6374 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) 6375 { 6376 struct lpfc_hba *phba = vport->phba; 6377 uint8_t *pcmd; 6378 struct RRQ *rrq; 6379 uint16_t rxid; 6380 uint16_t xri; 6381 struct lpfc_node_rrq *prrq; 6382 6383 6384 pcmd = (uint8_t *)iocb->cmd_dmabuf->virt; 6385 pcmd += sizeof(uint32_t); 6386 rrq = (struct RRQ *)pcmd; 6387 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); 6388 rxid = bf_get(rrq_rxid, rrq); 6389 6390 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6391 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" 6392 " x%x x%x\n", 6393 be32_to_cpu(bf_get(rrq_did, rrq)), 6394 bf_get(rrq_oxid, rrq), 6395 rxid, 6396 get_wqe_reqtag(iocb), 6397 get_job_ulpcontext(phba, iocb)); 6398 6399 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6400 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", 6401 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); 6402 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) 6403 xri = bf_get(rrq_oxid, rrq); 6404 else 6405 xri = rxid; 6406 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); 6407 if (prrq) 6408 lpfc_clr_rrq_active(phba, xri, prrq); 6409 return; 6410 } 6411 6412 /** 6413 * lpfc_els_rsp_echo_acc - Issue echo acc response 6414 * @vport: pointer to a virtual N_Port data structure. 6415 * @data: pointer to echo data to return in the accept. 6416 * @oldiocb: pointer to the original lpfc command iocb data structure. 6417 * @ndlp: pointer to a node-list data structure. 6418 * 6419 * Return code 6420 * 0 - Successfully issued acc echo response 6421 * 1 - Failed to issue acc echo response 6422 **/ 6423 static int 6424 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, 6425 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 6426 { 6427 struct lpfc_hba *phba = vport->phba; 6428 IOCB_t *icmd, *oldcmd; 6429 union lpfc_wqe128 *wqe; 6430 struct lpfc_iocbq *elsiocb; 6431 uint8_t *pcmd; 6432 uint16_t cmdsize; 6433 int rc; 6434 u32 ulp_context; 6435 6436 if (phba->sli_rev == LPFC_SLI_REV4) 6437 cmdsize = oldiocb->wcqe_cmpl.total_data_placed; 6438 else 6439 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; 6440 6441 /* The accumulated length can exceed the BPL_SIZE. For 6442 * now, use this as the limit 6443 */ 6444 if (cmdsize > LPFC_BPL_SIZE) 6445 cmdsize = LPFC_BPL_SIZE; 6446 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6447 ndlp->nlp_DID, ELS_CMD_ACC); 6448 if (!elsiocb) 6449 return 1; 6450 6451 if (phba->sli_rev == LPFC_SLI_REV4) { 6452 wqe = &elsiocb->wqe; 6453 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6454 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6455 ulp_context = get_job_ulpcontext(phba, elsiocb); 6456 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6457 get_job_rcvoxid(phba, oldiocb)); 6458 } else { 6459 icmd = &elsiocb->iocb; 6460 oldcmd = &oldiocb->iocb; 6461 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6462 ulp_context = elsiocb->iocb.ulpContext; 6463 icmd->unsli3.rcvsli3.ox_id = 6464 oldcmd->unsli3.rcvsli3.ox_id; 6465 } 6466 6467 /* Xmit ECHO ACC response tag <ulpIoTag> */ 6468 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6469 "2876 Xmit ECHO ACC response tag x%x xri x%x\n", 6470 elsiocb->iotag, ulp_context); 6471 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6472 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6473 pcmd += sizeof(uint32_t); 6474 memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); 6475 6476 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6477 "Issue ACC ECHO: did:x%x flg:x%x refcnt %d", 6478 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6479 6480 phba->fc_stat.elsXmitACC++; 6481 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6482 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6483 if (!elsiocb->ndlp) { 6484 lpfc_els_free_iocb(phba, elsiocb); 6485 return 1; 6486 } 6487 6488 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6489 if (rc == IOCB_ERROR) { 6490 lpfc_els_free_iocb(phba, elsiocb); 6491 lpfc_nlp_put(ndlp); 6492 return 1; 6493 } 6494 6495 return 0; 6496 } 6497 6498 /** 6499 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport 6500 * @vport: pointer to a host virtual N_Port data structure. 6501 * 6502 * This routine issues Address Discover (ADISC) ELS commands to those 6503 * N_Ports which are in node port recovery state and ADISC has not been issued 6504 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the 6505 * lpfc_issue_els_adisc() routine, the per @vport number of discover count 6506 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a 6507 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will 6508 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC 6509 * IOCBs quit for later pick up. On the other hand, after walking through 6510 * all the ndlps with the @vport and there is none ADISC IOCB issued, the 6511 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is 6512 * no more ADISC need to be sent. 6513 * 6514 * Return code 6515 * The number of N_Ports with adisc issued. 6516 **/ 6517 int 6518 lpfc_els_disc_adisc(struct lpfc_vport *vport) 6519 { 6520 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6521 struct lpfc_nodelist *ndlp, *next_ndlp; 6522 int sentadisc = 0; 6523 6524 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 6525 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 6526 6527 if (ndlp->nlp_state != NLP_STE_NPR_NODE || 6528 !(ndlp->nlp_flag & NLP_NPR_ADISC)) 6529 continue; 6530 6531 spin_lock_irq(&ndlp->lock); 6532 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 6533 spin_unlock_irq(&ndlp->lock); 6534 6535 if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { 6536 /* This node was marked for ADISC but was not picked 6537 * for discovery. This is possible if the node was 6538 * missing in gidft response. 6539 * 6540 * At time of marking node for ADISC, we skipped unreg 6541 * from backend 6542 */ 6543 lpfc_nlp_unreg_node(vport, ndlp); 6544 lpfc_unreg_rpi(vport, ndlp); 6545 continue; 6546 } 6547 6548 ndlp->nlp_prev_state = ndlp->nlp_state; 6549 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 6550 lpfc_issue_els_adisc(vport, ndlp, 0); 6551 sentadisc++; 6552 vport->num_disc_nodes++; 6553 if (vport->num_disc_nodes >= 6554 vport->cfg_discovery_threads) { 6555 spin_lock_irq(shost->host_lock); 6556 vport->fc_flag |= FC_NLP_MORE; 6557 spin_unlock_irq(shost->host_lock); 6558 break; 6559 } 6560 6561 } 6562 if (sentadisc == 0) { 6563 spin_lock_irq(shost->host_lock); 6564 vport->fc_flag &= ~FC_NLP_MORE; 6565 spin_unlock_irq(shost->host_lock); 6566 } 6567 return sentadisc; 6568 } 6569 6570 /** 6571 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc 6572 * @vport: pointer to a host virtual N_Port data structure. 6573 * 6574 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports 6575 * which are in node port recovery state, with a @vport. Each time an ELS 6576 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine, 6577 * the per @vport number of discover count (num_disc_nodes) shall be 6578 * incremented. If the num_disc_nodes reaches a pre-configured threshold 6579 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE 6580 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for 6581 * later pick up. On the other hand, after walking through all the ndlps with 6582 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag 6583 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC 6584 * PLOGI need to be sent. 6585 * 6586 * Return code 6587 * The number of N_Ports with plogi issued. 6588 **/ 6589 int 6590 lpfc_els_disc_plogi(struct lpfc_vport *vport) 6591 { 6592 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6593 struct lpfc_nodelist *ndlp, *next_ndlp; 6594 int sentplogi = 0; 6595 6596 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 6597 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 6598 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 6599 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 6600 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && 6601 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { 6602 ndlp->nlp_prev_state = ndlp->nlp_state; 6603 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 6604 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 6605 sentplogi++; 6606 vport->num_disc_nodes++; 6607 if (vport->num_disc_nodes >= 6608 vport->cfg_discovery_threads) { 6609 spin_lock_irq(shost->host_lock); 6610 vport->fc_flag |= FC_NLP_MORE; 6611 spin_unlock_irq(shost->host_lock); 6612 break; 6613 } 6614 } 6615 } 6616 6617 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6618 "6452 Discover PLOGI %d flag x%x\n", 6619 sentplogi, vport->fc_flag); 6620 6621 if (sentplogi) { 6622 lpfc_set_disctmo(vport); 6623 } 6624 else { 6625 spin_lock_irq(shost->host_lock); 6626 vport->fc_flag &= ~FC_NLP_MORE; 6627 spin_unlock_irq(shost->host_lock); 6628 } 6629 return sentplogi; 6630 } 6631 6632 static uint32_t 6633 lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc, 6634 uint32_t word0) 6635 { 6636 6637 desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG); 6638 desc->payload.els_req = word0; 6639 desc->length = cpu_to_be32(sizeof(desc->payload)); 6640 6641 return sizeof(struct fc_rdp_link_service_desc); 6642 } 6643 6644 static uint32_t 6645 lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc, 6646 uint8_t *page_a0, uint8_t *page_a2) 6647 { 6648 uint16_t wavelength; 6649 uint16_t temperature; 6650 uint16_t rx_power; 6651 uint16_t tx_bias; 6652 uint16_t tx_power; 6653 uint16_t vcc; 6654 uint16_t flag = 0; 6655 struct sff_trasnceiver_codes_byte4 *trasn_code_byte4; 6656 struct sff_trasnceiver_codes_byte5 *trasn_code_byte5; 6657 6658 desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG); 6659 6660 trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *) 6661 &page_a0[SSF_TRANSCEIVER_CODE_B4]; 6662 trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *) 6663 &page_a0[SSF_TRANSCEIVER_CODE_B5]; 6664 6665 if ((trasn_code_byte4->fc_sw_laser) || 6666 (trasn_code_byte5->fc_sw_laser_sl) || 6667 (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */ 6668 flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT); 6669 } else if (trasn_code_byte4->fc_lw_laser) { 6670 wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) | 6671 page_a0[SSF_WAVELENGTH_B0]; 6672 if (wavelength == SFP_WAVELENGTH_LC1310) 6673 flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT; 6674 if (wavelength == SFP_WAVELENGTH_LL1550) 6675 flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT; 6676 } 6677 /* check if its SFP+ */ 6678 flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ? 6679 SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN) 6680 << SFP_FLAG_CT_SHIFT; 6681 6682 /* check if its OPTICAL */ 6683 flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ? 6684 SFP_FLAG_IS_OPTICAL_PORT : 0) 6685 << SFP_FLAG_IS_OPTICAL_SHIFT; 6686 6687 temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 | 6688 page_a2[SFF_TEMPERATURE_B0]); 6689 vcc = (page_a2[SFF_VCC_B1] << 8 | 6690 page_a2[SFF_VCC_B0]); 6691 tx_power = (page_a2[SFF_TXPOWER_B1] << 8 | 6692 page_a2[SFF_TXPOWER_B0]); 6693 tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 | 6694 page_a2[SFF_TX_BIAS_CURRENT_B0]); 6695 rx_power = (page_a2[SFF_RXPOWER_B1] << 8 | 6696 page_a2[SFF_RXPOWER_B0]); 6697 desc->sfp_info.temperature = cpu_to_be16(temperature); 6698 desc->sfp_info.rx_power = cpu_to_be16(rx_power); 6699 desc->sfp_info.tx_bias = cpu_to_be16(tx_bias); 6700 desc->sfp_info.tx_power = cpu_to_be16(tx_power); 6701 desc->sfp_info.vcc = cpu_to_be16(vcc); 6702 6703 desc->sfp_info.flags = cpu_to_be16(flag); 6704 desc->length = cpu_to_be32(sizeof(desc->sfp_info)); 6705 6706 return sizeof(struct fc_rdp_sfp_desc); 6707 } 6708 6709 static uint32_t 6710 lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, 6711 READ_LNK_VAR *stat) 6712 { 6713 uint32_t type; 6714 6715 desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG); 6716 6717 type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT; 6718 6719 desc->info.port_type = cpu_to_be32(type); 6720 6721 desc->info.link_status.link_failure_cnt = 6722 cpu_to_be32(stat->linkFailureCnt); 6723 desc->info.link_status.loss_of_synch_cnt = 6724 cpu_to_be32(stat->lossSyncCnt); 6725 desc->info.link_status.loss_of_signal_cnt = 6726 cpu_to_be32(stat->lossSignalCnt); 6727 desc->info.link_status.primitive_seq_proto_err = 6728 cpu_to_be32(stat->primSeqErrCnt); 6729 desc->info.link_status.invalid_trans_word = 6730 cpu_to_be32(stat->invalidXmitWord); 6731 desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt); 6732 6733 desc->length = cpu_to_be32(sizeof(desc->info)); 6734 6735 return sizeof(struct fc_rdp_link_error_status_desc); 6736 } 6737 6738 static uint32_t 6739 lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat, 6740 struct lpfc_vport *vport) 6741 { 6742 uint32_t bbCredit; 6743 6744 desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG); 6745 6746 bbCredit = vport->fc_sparam.cmn.bbCreditLsb | 6747 (vport->fc_sparam.cmn.bbCreditMsb << 8); 6748 desc->bbc_info.port_bbc = cpu_to_be32(bbCredit); 6749 if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 6750 bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb | 6751 (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8); 6752 desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit); 6753 } else { 6754 desc->bbc_info.attached_port_bbc = 0; 6755 } 6756 6757 desc->bbc_info.rtt = 0; 6758 desc->length = cpu_to_be32(sizeof(desc->bbc_info)); 6759 6760 return sizeof(struct fc_rdp_bbc_desc); 6761 } 6762 6763 static uint32_t 6764 lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba, 6765 struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) 6766 { 6767 uint32_t flags = 0; 6768 6769 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6770 6771 desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM]; 6772 desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM]; 6773 desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING]; 6774 desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING]; 6775 6776 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 6777 flags |= RDP_OET_HIGH_ALARM; 6778 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 6779 flags |= RDP_OET_LOW_ALARM; 6780 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 6781 flags |= RDP_OET_HIGH_WARNING; 6782 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 6783 flags |= RDP_OET_LOW_WARNING; 6784 6785 flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT); 6786 desc->oed_info.function_flags = cpu_to_be32(flags); 6787 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6788 return sizeof(struct fc_rdp_oed_sfp_desc); 6789 } 6790 6791 static uint32_t 6792 lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba, 6793 struct fc_rdp_oed_sfp_desc *desc, 6794 uint8_t *page_a2) 6795 { 6796 uint32_t flags = 0; 6797 6798 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6799 6800 desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM]; 6801 desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM]; 6802 desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING]; 6803 desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING]; 6804 6805 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 6806 flags |= RDP_OET_HIGH_ALARM; 6807 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE) 6808 flags |= RDP_OET_LOW_ALARM; 6809 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 6810 flags |= RDP_OET_HIGH_WARNING; 6811 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE) 6812 flags |= RDP_OET_LOW_WARNING; 6813 6814 flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT); 6815 desc->oed_info.function_flags = cpu_to_be32(flags); 6816 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6817 return sizeof(struct fc_rdp_oed_sfp_desc); 6818 } 6819 6820 static uint32_t 6821 lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba, 6822 struct fc_rdp_oed_sfp_desc *desc, 6823 uint8_t *page_a2) 6824 { 6825 uint32_t flags = 0; 6826 6827 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6828 6829 desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM]; 6830 desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM]; 6831 desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING]; 6832 desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING]; 6833 6834 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS) 6835 flags |= RDP_OET_HIGH_ALARM; 6836 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS) 6837 flags |= RDP_OET_LOW_ALARM; 6838 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS) 6839 flags |= RDP_OET_HIGH_WARNING; 6840 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS) 6841 flags |= RDP_OET_LOW_WARNING; 6842 6843 flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT); 6844 desc->oed_info.function_flags = cpu_to_be32(flags); 6845 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6846 return sizeof(struct fc_rdp_oed_sfp_desc); 6847 } 6848 6849 static uint32_t 6850 lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba, 6851 struct fc_rdp_oed_sfp_desc *desc, 6852 uint8_t *page_a2) 6853 { 6854 uint32_t flags = 0; 6855 6856 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6857 6858 desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM]; 6859 desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM]; 6860 desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING]; 6861 desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING]; 6862 6863 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER) 6864 flags |= RDP_OET_HIGH_ALARM; 6865 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER) 6866 flags |= RDP_OET_LOW_ALARM; 6867 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER) 6868 flags |= RDP_OET_HIGH_WARNING; 6869 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER) 6870 flags |= RDP_OET_LOW_WARNING; 6871 6872 flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT); 6873 desc->oed_info.function_flags = cpu_to_be32(flags); 6874 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6875 return sizeof(struct fc_rdp_oed_sfp_desc); 6876 } 6877 6878 6879 static uint32_t 6880 lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba, 6881 struct fc_rdp_oed_sfp_desc *desc, 6882 uint8_t *page_a2) 6883 { 6884 uint32_t flags = 0; 6885 6886 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6887 6888 desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM]; 6889 desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM]; 6890 desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING]; 6891 desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING]; 6892 6893 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER) 6894 flags |= RDP_OET_HIGH_ALARM; 6895 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER) 6896 flags |= RDP_OET_LOW_ALARM; 6897 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER) 6898 flags |= RDP_OET_HIGH_WARNING; 6899 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER) 6900 flags |= RDP_OET_LOW_WARNING; 6901 6902 flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT); 6903 desc->oed_info.function_flags = cpu_to_be32(flags); 6904 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6905 return sizeof(struct fc_rdp_oed_sfp_desc); 6906 } 6907 6908 static uint32_t 6909 lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, 6910 uint8_t *page_a0, struct lpfc_vport *vport) 6911 { 6912 desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG); 6913 memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16); 6914 memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16); 6915 memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16); 6916 memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4); 6917 memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8); 6918 desc->length = cpu_to_be32(sizeof(desc->opd_info)); 6919 return sizeof(struct fc_rdp_opd_sfp_desc); 6920 } 6921 6922 static uint32_t 6923 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) 6924 { 6925 if (bf_get(lpfc_read_link_stat_gec2, stat) == 0) 6926 return 0; 6927 desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG); 6928 6929 desc->info.CorrectedBlocks = 6930 cpu_to_be32(stat->fecCorrBlkCount); 6931 desc->info.UncorrectableBlocks = 6932 cpu_to_be32(stat->fecUncorrBlkCount); 6933 6934 desc->length = cpu_to_be32(sizeof(desc->info)); 6935 6936 return sizeof(struct fc_fec_rdp_desc); 6937 } 6938 6939 static uint32_t 6940 lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) 6941 { 6942 uint16_t rdp_cap = 0; 6943 uint16_t rdp_speed; 6944 6945 desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG); 6946 6947 switch (phba->fc_linkspeed) { 6948 case LPFC_LINK_SPEED_1GHZ: 6949 rdp_speed = RDP_PS_1GB; 6950 break; 6951 case LPFC_LINK_SPEED_2GHZ: 6952 rdp_speed = RDP_PS_2GB; 6953 break; 6954 case LPFC_LINK_SPEED_4GHZ: 6955 rdp_speed = RDP_PS_4GB; 6956 break; 6957 case LPFC_LINK_SPEED_8GHZ: 6958 rdp_speed = RDP_PS_8GB; 6959 break; 6960 case LPFC_LINK_SPEED_10GHZ: 6961 rdp_speed = RDP_PS_10GB; 6962 break; 6963 case LPFC_LINK_SPEED_16GHZ: 6964 rdp_speed = RDP_PS_16GB; 6965 break; 6966 case LPFC_LINK_SPEED_32GHZ: 6967 rdp_speed = RDP_PS_32GB; 6968 break; 6969 case LPFC_LINK_SPEED_64GHZ: 6970 rdp_speed = RDP_PS_64GB; 6971 break; 6972 case LPFC_LINK_SPEED_128GHZ: 6973 rdp_speed = RDP_PS_128GB; 6974 break; 6975 case LPFC_LINK_SPEED_256GHZ: 6976 rdp_speed = RDP_PS_256GB; 6977 break; 6978 default: 6979 rdp_speed = RDP_PS_UNKNOWN; 6980 break; 6981 } 6982 6983 desc->info.port_speed.speed = cpu_to_be16(rdp_speed); 6984 6985 if (phba->lmt & LMT_256Gb) 6986 rdp_cap |= RDP_PS_256GB; 6987 if (phba->lmt & LMT_128Gb) 6988 rdp_cap |= RDP_PS_128GB; 6989 if (phba->lmt & LMT_64Gb) 6990 rdp_cap |= RDP_PS_64GB; 6991 if (phba->lmt & LMT_32Gb) 6992 rdp_cap |= RDP_PS_32GB; 6993 if (phba->lmt & LMT_16Gb) 6994 rdp_cap |= RDP_PS_16GB; 6995 if (phba->lmt & LMT_10Gb) 6996 rdp_cap |= RDP_PS_10GB; 6997 if (phba->lmt & LMT_8Gb) 6998 rdp_cap |= RDP_PS_8GB; 6999 if (phba->lmt & LMT_4Gb) 7000 rdp_cap |= RDP_PS_4GB; 7001 if (phba->lmt & LMT_2Gb) 7002 rdp_cap |= RDP_PS_2GB; 7003 if (phba->lmt & LMT_1Gb) 7004 rdp_cap |= RDP_PS_1GB; 7005 7006 if (rdp_cap == 0) 7007 rdp_cap = RDP_CAP_UNKNOWN; 7008 if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO) 7009 rdp_cap |= RDP_CAP_USER_CONFIGURED; 7010 7011 desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap); 7012 desc->length = cpu_to_be32(sizeof(desc->info)); 7013 return sizeof(struct fc_rdp_port_speed_desc); 7014 } 7015 7016 static uint32_t 7017 lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, 7018 struct lpfc_vport *vport) 7019 { 7020 7021 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 7022 7023 memcpy(desc->port_names.wwnn, &vport->fc_nodename, 7024 sizeof(desc->port_names.wwnn)); 7025 7026 memcpy(desc->port_names.wwpn, &vport->fc_portname, 7027 sizeof(desc->port_names.wwpn)); 7028 7029 desc->length = cpu_to_be32(sizeof(desc->port_names)); 7030 return sizeof(struct fc_rdp_port_name_desc); 7031 } 7032 7033 static uint32_t 7034 lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, 7035 struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 7036 { 7037 7038 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 7039 if (vport->fc_flag & FC_FABRIC) { 7040 memcpy(desc->port_names.wwnn, &vport->fabric_nodename, 7041 sizeof(desc->port_names.wwnn)); 7042 7043 memcpy(desc->port_names.wwpn, &vport->fabric_portname, 7044 sizeof(desc->port_names.wwpn)); 7045 } else { /* Point to Point */ 7046 memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename, 7047 sizeof(desc->port_names.wwnn)); 7048 7049 memcpy(desc->port_names.wwpn, &ndlp->nlp_portname, 7050 sizeof(desc->port_names.wwpn)); 7051 } 7052 7053 desc->length = cpu_to_be32(sizeof(desc->port_names)); 7054 return sizeof(struct fc_rdp_port_name_desc); 7055 } 7056 7057 static void 7058 lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, 7059 int status) 7060 { 7061 struct lpfc_nodelist *ndlp = rdp_context->ndlp; 7062 struct lpfc_vport *vport = ndlp->vport; 7063 struct lpfc_iocbq *elsiocb; 7064 struct ulp_bde64 *bpl; 7065 IOCB_t *icmd; 7066 union lpfc_wqe128 *wqe; 7067 uint8_t *pcmd; 7068 struct ls_rjt *stat; 7069 struct fc_rdp_res_frame *rdp_res; 7070 uint32_t cmdsize, len; 7071 uint16_t *flag_ptr; 7072 int rc; 7073 u32 ulp_context; 7074 7075 if (status != SUCCESS) 7076 goto error; 7077 7078 /* This will change once we know the true size of the RDP payload */ 7079 cmdsize = sizeof(struct fc_rdp_res_frame); 7080 7081 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, 7082 lpfc_max_els_tries, rdp_context->ndlp, 7083 rdp_context->ndlp->nlp_DID, ELS_CMD_ACC); 7084 if (!elsiocb) 7085 goto free_rdp_context; 7086 7087 ulp_context = get_job_ulpcontext(phba, elsiocb); 7088 if (phba->sli_rev == LPFC_SLI_REV4) { 7089 wqe = &elsiocb->wqe; 7090 /* ox-id of the frame */ 7091 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7092 rdp_context->ox_id); 7093 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 7094 rdp_context->rx_id); 7095 } else { 7096 icmd = &elsiocb->iocb; 7097 icmd->ulpContext = rdp_context->rx_id; 7098 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 7099 } 7100 7101 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7102 "2171 Xmit RDP response tag x%x xri x%x, " 7103 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x", 7104 elsiocb->iotag, ulp_context, 7105 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 7106 ndlp->nlp_rpi); 7107 rdp_res = (struct fc_rdp_res_frame *)elsiocb->cmd_dmabuf->virt; 7108 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7109 memset(pcmd, 0, sizeof(struct fc_rdp_res_frame)); 7110 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 7111 7112 /* Update Alarm and Warning */ 7113 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS); 7114 phba->sfp_alarm |= *flag_ptr; 7115 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS); 7116 phba->sfp_warning |= *flag_ptr; 7117 7118 /* For RDP payload */ 7119 len = 8; 7120 len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *) 7121 (len + pcmd), ELS_CMD_RDP); 7122 7123 len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd), 7124 rdp_context->page_a0, rdp_context->page_a2); 7125 len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd), 7126 phba); 7127 len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *) 7128 (len + pcmd), &rdp_context->link_stat); 7129 len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *) 7130 (len + pcmd), vport); 7131 len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *) 7132 (len + pcmd), vport, ndlp); 7133 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd), 7134 &rdp_context->link_stat); 7135 len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd), 7136 &rdp_context->link_stat, vport); 7137 len += lpfc_rdp_res_oed_temp_desc(phba, 7138 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7139 rdp_context->page_a2); 7140 len += lpfc_rdp_res_oed_voltage_desc(phba, 7141 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7142 rdp_context->page_a2); 7143 len += lpfc_rdp_res_oed_txbias_desc(phba, 7144 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7145 rdp_context->page_a2); 7146 len += lpfc_rdp_res_oed_txpower_desc(phba, 7147 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7148 rdp_context->page_a2); 7149 len += lpfc_rdp_res_oed_rxpower_desc(phba, 7150 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7151 rdp_context->page_a2); 7152 len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd), 7153 rdp_context->page_a0, vport); 7154 7155 rdp_res->length = cpu_to_be32(len - 8); 7156 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7157 7158 /* Now that we know the true size of the payload, update the BPL */ 7159 bpl = (struct ulp_bde64 *)elsiocb->bpl_dmabuf->virt; 7160 bpl->tus.f.bdeSize = len; 7161 bpl->tus.f.bdeFlags = 0; 7162 bpl->tus.w = le32_to_cpu(bpl->tus.w); 7163 7164 phba->fc_stat.elsXmitACC++; 7165 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7166 if (!elsiocb->ndlp) { 7167 lpfc_els_free_iocb(phba, elsiocb); 7168 goto free_rdp_context; 7169 } 7170 7171 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7172 if (rc == IOCB_ERROR) { 7173 lpfc_els_free_iocb(phba, elsiocb); 7174 lpfc_nlp_put(ndlp); 7175 } 7176 7177 goto free_rdp_context; 7178 7179 error: 7180 cmdsize = 2 * sizeof(uint32_t); 7181 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries, 7182 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); 7183 if (!elsiocb) 7184 goto free_rdp_context; 7185 7186 if (phba->sli_rev == LPFC_SLI_REV4) { 7187 wqe = &elsiocb->wqe; 7188 /* ox-id of the frame */ 7189 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7190 rdp_context->ox_id); 7191 bf_set(wqe_ctxt_tag, 7192 &wqe->xmit_els_rsp.wqe_com, 7193 rdp_context->rx_id); 7194 } else { 7195 icmd = &elsiocb->iocb; 7196 icmd->ulpContext = rdp_context->rx_id; 7197 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 7198 } 7199 7200 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7201 7202 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 7203 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 7204 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7205 7206 phba->fc_stat.elsXmitLSRJT++; 7207 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7208 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7209 if (!elsiocb->ndlp) { 7210 lpfc_els_free_iocb(phba, elsiocb); 7211 goto free_rdp_context; 7212 } 7213 7214 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7215 if (rc == IOCB_ERROR) { 7216 lpfc_els_free_iocb(phba, elsiocb); 7217 lpfc_nlp_put(ndlp); 7218 } 7219 7220 free_rdp_context: 7221 /* This reference put is for the original unsolicited RDP. If the 7222 * prep failed, there is no reference to remove. 7223 */ 7224 lpfc_nlp_put(ndlp); 7225 kfree(rdp_context); 7226 } 7227 7228 static int 7229 lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context) 7230 { 7231 LPFC_MBOXQ_t *mbox = NULL; 7232 int rc; 7233 7234 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7235 if (!mbox) { 7236 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, 7237 "7105 failed to allocate mailbox memory"); 7238 return 1; 7239 } 7240 7241 if (lpfc_sli4_dump_page_a0(phba, mbox)) 7242 goto rdp_fail; 7243 mbox->vport = rdp_context->ndlp->vport; 7244 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0; 7245 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; 7246 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 7247 if (rc == MBX_NOT_FINISHED) { 7248 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 7249 return 1; 7250 } 7251 7252 return 0; 7253 7254 rdp_fail: 7255 mempool_free(mbox, phba->mbox_mem_pool); 7256 return 1; 7257 } 7258 7259 int lpfc_get_sfp_info_wait(struct lpfc_hba *phba, 7260 struct lpfc_rdp_context *rdp_context) 7261 { 7262 LPFC_MBOXQ_t *mbox = NULL; 7263 int rc; 7264 struct lpfc_dmabuf *mp; 7265 struct lpfc_dmabuf *mpsave; 7266 void *virt; 7267 MAILBOX_t *mb; 7268 7269 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7270 if (!mbox) { 7271 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, 7272 "7205 failed to allocate mailbox memory"); 7273 return 1; 7274 } 7275 7276 if (lpfc_sli4_dump_page_a0(phba, mbox)) 7277 goto sfp_fail; 7278 mp = mbox->ctx_buf; 7279 mpsave = mp; 7280 virt = mp->virt; 7281 if (phba->sli_rev < LPFC_SLI_REV4) { 7282 mb = &mbox->u.mb; 7283 mb->un.varDmp.cv = 1; 7284 mb->un.varDmp.co = 1; 7285 mb->un.varWords[2] = 0; 7286 mb->un.varWords[3] = DMP_SFF_PAGE_A0_SIZE / 4; 7287 mb->un.varWords[4] = 0; 7288 mb->un.varWords[5] = 0; 7289 mb->un.varWords[6] = 0; 7290 mb->un.varWords[7] = 0; 7291 mb->un.varWords[8] = 0; 7292 mb->un.varWords[9] = 0; 7293 mb->un.varWords[10] = 0; 7294 mbox->in_ext_byte_len = DMP_SFF_PAGE_A0_SIZE; 7295 mbox->out_ext_byte_len = DMP_SFF_PAGE_A0_SIZE; 7296 mbox->mbox_offset_word = 5; 7297 mbox->ctx_buf = virt; 7298 } else { 7299 bf_set(lpfc_mbx_memory_dump_type3_length, 7300 &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE); 7301 mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys); 7302 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); 7303 } 7304 mbox->vport = phba->pport; 7305 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; 7306 7307 rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30); 7308 if (rc == MBX_NOT_FINISHED) { 7309 rc = 1; 7310 goto error; 7311 } 7312 7313 if (phba->sli_rev == LPFC_SLI_REV4) 7314 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); 7315 else 7316 mp = mpsave; 7317 7318 if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) { 7319 rc = 1; 7320 goto error; 7321 } 7322 7323 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a0, 7324 DMP_SFF_PAGE_A0_SIZE); 7325 7326 memset(mbox, 0, sizeof(*mbox)); 7327 memset(mp->virt, 0, DMP_SFF_PAGE_A2_SIZE); 7328 INIT_LIST_HEAD(&mp->list); 7329 7330 /* save address for completion */ 7331 mbox->ctx_buf = mp; 7332 mbox->vport = phba->pport; 7333 7334 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY); 7335 bf_set(lpfc_mbx_memory_dump_type3_type, 7336 &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD); 7337 bf_set(lpfc_mbx_memory_dump_type3_link, 7338 &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port); 7339 bf_set(lpfc_mbx_memory_dump_type3_page_no, 7340 &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A2); 7341 if (phba->sli_rev < LPFC_SLI_REV4) { 7342 mb = &mbox->u.mb; 7343 mb->un.varDmp.cv = 1; 7344 mb->un.varDmp.co = 1; 7345 mb->un.varWords[2] = 0; 7346 mb->un.varWords[3] = DMP_SFF_PAGE_A2_SIZE / 4; 7347 mb->un.varWords[4] = 0; 7348 mb->un.varWords[5] = 0; 7349 mb->un.varWords[6] = 0; 7350 mb->un.varWords[7] = 0; 7351 mb->un.varWords[8] = 0; 7352 mb->un.varWords[9] = 0; 7353 mb->un.varWords[10] = 0; 7354 mbox->in_ext_byte_len = DMP_SFF_PAGE_A2_SIZE; 7355 mbox->out_ext_byte_len = DMP_SFF_PAGE_A2_SIZE; 7356 mbox->mbox_offset_word = 5; 7357 mbox->ctx_buf = virt; 7358 } else { 7359 bf_set(lpfc_mbx_memory_dump_type3_length, 7360 &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A2_SIZE); 7361 mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys); 7362 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); 7363 } 7364 7365 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; 7366 rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30); 7367 if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) { 7368 rc = 1; 7369 goto error; 7370 } 7371 rc = 0; 7372 7373 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2, 7374 DMP_SFF_PAGE_A2_SIZE); 7375 7376 error: 7377 mbox->ctx_buf = mpsave; 7378 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 7379 7380 return rc; 7381 7382 sfp_fail: 7383 mempool_free(mbox, phba->mbox_mem_pool); 7384 return 1; 7385 } 7386 7387 /* 7388 * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS. 7389 * @vport: pointer to a host virtual N_Port data structure. 7390 * @cmdiocb: pointer to lpfc command iocb data structure. 7391 * @ndlp: pointer to a node-list data structure. 7392 * 7393 * This routine processes an unsolicited RDP(Read Diagnostic Parameters) 7394 * IOCB. First, the payload of the unsolicited RDP is checked. 7395 * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3 7396 * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2, 7397 * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl 7398 * gather all data and send RDP response. 7399 * 7400 * Return code 7401 * 0 - Sent the acc response 7402 * 1 - Sent the reject response. 7403 */ 7404 static int 7405 lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7406 struct lpfc_nodelist *ndlp) 7407 { 7408 struct lpfc_hba *phba = vport->phba; 7409 struct lpfc_dmabuf *pcmd; 7410 uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE; 7411 struct fc_rdp_req_frame *rdp_req; 7412 struct lpfc_rdp_context *rdp_context; 7413 union lpfc_wqe128 *cmd = NULL; 7414 struct ls_rjt stat; 7415 7416 if (phba->sli_rev < LPFC_SLI_REV4 || 7417 bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 7418 LPFC_SLI_INTF_IF_TYPE_2) { 7419 rjt_err = LSRJT_UNABLE_TPC; 7420 rjt_expl = LSEXP_REQ_UNSUPPORTED; 7421 goto error; 7422 } 7423 7424 if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) { 7425 rjt_err = LSRJT_UNABLE_TPC; 7426 rjt_expl = LSEXP_REQ_UNSUPPORTED; 7427 goto error; 7428 } 7429 7430 pcmd = cmdiocb->cmd_dmabuf; 7431 rdp_req = (struct fc_rdp_req_frame *) pcmd->virt; 7432 7433 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7434 "2422 ELS RDP Request " 7435 "dec len %d tag x%x port_id %d len %d\n", 7436 be32_to_cpu(rdp_req->rdp_des_length), 7437 be32_to_cpu(rdp_req->nport_id_desc.tag), 7438 be32_to_cpu(rdp_req->nport_id_desc.nport_id), 7439 be32_to_cpu(rdp_req->nport_id_desc.length)); 7440 7441 if (sizeof(struct fc_rdp_nport_desc) != 7442 be32_to_cpu(rdp_req->rdp_des_length)) 7443 goto rjt_logerr; 7444 if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag)) 7445 goto rjt_logerr; 7446 if (RDP_NPORT_ID_SIZE != 7447 be32_to_cpu(rdp_req->nport_id_desc.length)) 7448 goto rjt_logerr; 7449 rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL); 7450 if (!rdp_context) { 7451 rjt_err = LSRJT_UNABLE_TPC; 7452 goto error; 7453 } 7454 7455 cmd = &cmdiocb->wqe; 7456 rdp_context->ndlp = lpfc_nlp_get(ndlp); 7457 if (!rdp_context->ndlp) { 7458 kfree(rdp_context); 7459 rjt_err = LSRJT_UNABLE_TPC; 7460 goto error; 7461 } 7462 rdp_context->ox_id = bf_get(wqe_rcvoxid, 7463 &cmd->xmit_els_rsp.wqe_com); 7464 rdp_context->rx_id = bf_get(wqe_ctxt_tag, 7465 &cmd->xmit_els_rsp.wqe_com); 7466 rdp_context->cmpl = lpfc_els_rdp_cmpl; 7467 if (lpfc_get_rdp_info(phba, rdp_context)) { 7468 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS, 7469 "2423 Unable to send mailbox"); 7470 kfree(rdp_context); 7471 rjt_err = LSRJT_UNABLE_TPC; 7472 lpfc_nlp_put(ndlp); 7473 goto error; 7474 } 7475 7476 return 0; 7477 7478 rjt_logerr: 7479 rjt_err = LSRJT_LOGICAL_ERR; 7480 7481 error: 7482 memset(&stat, 0, sizeof(stat)); 7483 stat.un.b.lsRjtRsnCode = rjt_err; 7484 stat.un.b.lsRjtRsnCodeExp = rjt_expl; 7485 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7486 return 1; 7487 } 7488 7489 7490 static void 7491 lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 7492 { 7493 MAILBOX_t *mb; 7494 IOCB_t *icmd; 7495 union lpfc_wqe128 *wqe; 7496 uint8_t *pcmd; 7497 struct lpfc_iocbq *elsiocb; 7498 struct lpfc_nodelist *ndlp; 7499 struct ls_rjt *stat; 7500 union lpfc_sli4_cfg_shdr *shdr; 7501 struct lpfc_lcb_context *lcb_context; 7502 struct fc_lcb_res_frame *lcb_res; 7503 uint32_t cmdsize, shdr_status, shdr_add_status; 7504 int rc; 7505 7506 mb = &pmb->u.mb; 7507 lcb_context = (struct lpfc_lcb_context *)pmb->ctx_ndlp; 7508 ndlp = lcb_context->ndlp; 7509 pmb->ctx_ndlp = NULL; 7510 pmb->ctx_buf = NULL; 7511 7512 shdr = (union lpfc_sli4_cfg_shdr *) 7513 &pmb->u.mqe.un.beacon_config.header.cfg_shdr; 7514 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7515 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 7516 7517 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX, 7518 "0194 SET_BEACON_CONFIG mailbox " 7519 "completed with status x%x add_status x%x," 7520 " mbx status x%x\n", 7521 shdr_status, shdr_add_status, mb->mbxStatus); 7522 7523 if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status || 7524 (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) || 7525 (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) { 7526 mempool_free(pmb, phba->mbox_mem_pool); 7527 goto error; 7528 } 7529 7530 mempool_free(pmb, phba->mbox_mem_pool); 7531 cmdsize = sizeof(struct fc_lcb_res_frame); 7532 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7533 lpfc_max_els_tries, ndlp, 7534 ndlp->nlp_DID, ELS_CMD_ACC); 7535 7536 /* Decrement the ndlp reference count from previous mbox command */ 7537 lpfc_nlp_put(ndlp); 7538 7539 if (!elsiocb) 7540 goto free_lcb_context; 7541 7542 lcb_res = (struct fc_lcb_res_frame *)elsiocb->cmd_dmabuf->virt; 7543 7544 memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame)); 7545 7546 if (phba->sli_rev == LPFC_SLI_REV4) { 7547 wqe = &elsiocb->wqe; 7548 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id); 7549 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7550 lcb_context->ox_id); 7551 } else { 7552 icmd = &elsiocb->iocb; 7553 icmd->ulpContext = lcb_context->rx_id; 7554 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 7555 } 7556 7557 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7558 *((uint32_t *)(pcmd)) = ELS_CMD_ACC; 7559 lcb_res->lcb_sub_command = lcb_context->sub_command; 7560 lcb_res->lcb_type = lcb_context->type; 7561 lcb_res->capability = lcb_context->capability; 7562 lcb_res->lcb_frequency = lcb_context->frequency; 7563 lcb_res->lcb_duration = lcb_context->duration; 7564 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7565 phba->fc_stat.elsXmitACC++; 7566 7567 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7568 if (!elsiocb->ndlp) { 7569 lpfc_els_free_iocb(phba, elsiocb); 7570 goto out; 7571 } 7572 7573 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7574 if (rc == IOCB_ERROR) { 7575 lpfc_els_free_iocb(phba, elsiocb); 7576 lpfc_nlp_put(ndlp); 7577 } 7578 out: 7579 kfree(lcb_context); 7580 return; 7581 7582 error: 7583 cmdsize = sizeof(struct fc_lcb_res_frame); 7584 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7585 lpfc_max_els_tries, ndlp, 7586 ndlp->nlp_DID, ELS_CMD_LS_RJT); 7587 lpfc_nlp_put(ndlp); 7588 if (!elsiocb) 7589 goto free_lcb_context; 7590 7591 if (phba->sli_rev == LPFC_SLI_REV4) { 7592 wqe = &elsiocb->wqe; 7593 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id); 7594 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7595 lcb_context->ox_id); 7596 } else { 7597 icmd = &elsiocb->iocb; 7598 icmd->ulpContext = lcb_context->rx_id; 7599 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 7600 } 7601 7602 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7603 7604 *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT; 7605 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 7606 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7607 7608 if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) 7609 stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; 7610 7611 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7612 phba->fc_stat.elsXmitLSRJT++; 7613 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7614 if (!elsiocb->ndlp) { 7615 lpfc_els_free_iocb(phba, elsiocb); 7616 goto free_lcb_context; 7617 } 7618 7619 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7620 if (rc == IOCB_ERROR) { 7621 lpfc_els_free_iocb(phba, elsiocb); 7622 lpfc_nlp_put(ndlp); 7623 } 7624 free_lcb_context: 7625 kfree(lcb_context); 7626 } 7627 7628 static int 7629 lpfc_sli4_set_beacon(struct lpfc_vport *vport, 7630 struct lpfc_lcb_context *lcb_context, 7631 uint32_t beacon_state) 7632 { 7633 struct lpfc_hba *phba = vport->phba; 7634 union lpfc_sli4_cfg_shdr *cfg_shdr; 7635 LPFC_MBOXQ_t *mbox = NULL; 7636 uint32_t len; 7637 int rc; 7638 7639 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7640 if (!mbox) 7641 return 1; 7642 7643 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; 7644 len = sizeof(struct lpfc_mbx_set_beacon_config) - 7645 sizeof(struct lpfc_sli4_cfg_mhdr); 7646 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 7647 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len, 7648 LPFC_SLI4_MBX_EMBED); 7649 mbox->ctx_ndlp = (void *)lcb_context; 7650 mbox->vport = phba->pport; 7651 mbox->mbox_cmpl = lpfc_els_lcb_rsp; 7652 bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config, 7653 phba->sli4_hba.physical_port); 7654 bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config, 7655 beacon_state); 7656 mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */ 7657 7658 /* 7659 * Check bv1s bit before issuing the mailbox 7660 * if bv1s == 1, LCB V1 supported 7661 * else, LCB V0 supported 7662 */ 7663 7664 if (phba->sli4_hba.pc_sli4_params.bv1s) { 7665 /* COMMON_SET_BEACON_CONFIG_V1 */ 7666 cfg_shdr->request.word9 = BEACON_VERSION_V1; 7667 lcb_context->capability |= LCB_CAPABILITY_DURATION; 7668 bf_set(lpfc_mbx_set_beacon_port_type, 7669 &mbox->u.mqe.un.beacon_config, 0); 7670 bf_set(lpfc_mbx_set_beacon_duration_v1, 7671 &mbox->u.mqe.un.beacon_config, 7672 be16_to_cpu(lcb_context->duration)); 7673 } else { 7674 /* COMMON_SET_BEACON_CONFIG_V0 */ 7675 if (be16_to_cpu(lcb_context->duration) != 0) { 7676 mempool_free(mbox, phba->mbox_mem_pool); 7677 return 1; 7678 } 7679 cfg_shdr->request.word9 = BEACON_VERSION_V0; 7680 lcb_context->capability &= ~(LCB_CAPABILITY_DURATION); 7681 bf_set(lpfc_mbx_set_beacon_state, 7682 &mbox->u.mqe.un.beacon_config, beacon_state); 7683 bf_set(lpfc_mbx_set_beacon_port_type, 7684 &mbox->u.mqe.un.beacon_config, 1); 7685 bf_set(lpfc_mbx_set_beacon_duration, 7686 &mbox->u.mqe.un.beacon_config, 7687 be16_to_cpu(lcb_context->duration)); 7688 } 7689 7690 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 7691 if (rc == MBX_NOT_FINISHED) { 7692 mempool_free(mbox, phba->mbox_mem_pool); 7693 return 1; 7694 } 7695 7696 return 0; 7697 } 7698 7699 7700 /** 7701 * lpfc_els_rcv_lcb - Process an unsolicited LCB 7702 * @vport: pointer to a host virtual N_Port data structure. 7703 * @cmdiocb: pointer to lpfc command iocb data structure. 7704 * @ndlp: pointer to a node-list data structure. 7705 * 7706 * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB. 7707 * First, the payload of the unsolicited LCB is checked. 7708 * Then based on Subcommand beacon will either turn on or off. 7709 * 7710 * Return code 7711 * 0 - Sent the acc response 7712 * 1 - Sent the reject response. 7713 **/ 7714 static int 7715 lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7716 struct lpfc_nodelist *ndlp) 7717 { 7718 struct lpfc_hba *phba = vport->phba; 7719 struct lpfc_dmabuf *pcmd; 7720 uint8_t *lp; 7721 struct fc_lcb_request_frame *beacon; 7722 struct lpfc_lcb_context *lcb_context; 7723 u8 state, rjt_err = 0; 7724 struct ls_rjt stat; 7725 7726 pcmd = cmdiocb->cmd_dmabuf; 7727 lp = (uint8_t *)pcmd->virt; 7728 beacon = (struct fc_lcb_request_frame *)pcmd->virt; 7729 7730 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7731 "0192 ELS LCB Data x%x x%x x%x x%x sub x%x " 7732 "type x%x frequency %x duration x%x\n", 7733 lp[0], lp[1], lp[2], 7734 beacon->lcb_command, 7735 beacon->lcb_sub_command, 7736 beacon->lcb_type, 7737 beacon->lcb_frequency, 7738 be16_to_cpu(beacon->lcb_duration)); 7739 7740 if (beacon->lcb_sub_command != LPFC_LCB_ON && 7741 beacon->lcb_sub_command != LPFC_LCB_OFF) { 7742 rjt_err = LSRJT_CMD_UNSUPPORTED; 7743 goto rjt; 7744 } 7745 7746 if (phba->sli_rev < LPFC_SLI_REV4 || 7747 phba->hba_flag & HBA_FCOE_MODE || 7748 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 7749 LPFC_SLI_INTF_IF_TYPE_2)) { 7750 rjt_err = LSRJT_CMD_UNSUPPORTED; 7751 goto rjt; 7752 } 7753 7754 lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL); 7755 if (!lcb_context) { 7756 rjt_err = LSRJT_UNABLE_TPC; 7757 goto rjt; 7758 } 7759 7760 state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0; 7761 lcb_context->sub_command = beacon->lcb_sub_command; 7762 lcb_context->capability = 0; 7763 lcb_context->type = beacon->lcb_type; 7764 lcb_context->frequency = beacon->lcb_frequency; 7765 lcb_context->duration = beacon->lcb_duration; 7766 lcb_context->ox_id = get_job_rcvoxid(phba, cmdiocb); 7767 lcb_context->rx_id = get_job_ulpcontext(phba, cmdiocb); 7768 lcb_context->ndlp = lpfc_nlp_get(ndlp); 7769 if (!lcb_context->ndlp) { 7770 rjt_err = LSRJT_UNABLE_TPC; 7771 goto rjt_free; 7772 } 7773 7774 if (lpfc_sli4_set_beacon(vport, lcb_context, state)) { 7775 lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT, 7776 "0193 failed to send mail box"); 7777 lpfc_nlp_put(ndlp); 7778 rjt_err = LSRJT_UNABLE_TPC; 7779 goto rjt_free; 7780 } 7781 return 0; 7782 7783 rjt_free: 7784 kfree(lcb_context); 7785 rjt: 7786 memset(&stat, 0, sizeof(stat)); 7787 stat.un.b.lsRjtRsnCode = rjt_err; 7788 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7789 return 1; 7790 } 7791 7792 7793 /** 7794 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport 7795 * @vport: pointer to a host virtual N_Port data structure. 7796 * 7797 * This routine cleans up any Registration State Change Notification 7798 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the 7799 * @vport together with the host_lock is used to prevent multiple thread 7800 * trying to access the RSCN array on a same @vport at the same time. 7801 **/ 7802 void 7803 lpfc_els_flush_rscn(struct lpfc_vport *vport) 7804 { 7805 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7806 struct lpfc_hba *phba = vport->phba; 7807 int i; 7808 7809 spin_lock_irq(shost->host_lock); 7810 if (vport->fc_rscn_flush) { 7811 /* Another thread is walking fc_rscn_id_list on this vport */ 7812 spin_unlock_irq(shost->host_lock); 7813 return; 7814 } 7815 /* Indicate we are walking lpfc_els_flush_rscn on this vport */ 7816 vport->fc_rscn_flush = 1; 7817 spin_unlock_irq(shost->host_lock); 7818 7819 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 7820 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); 7821 vport->fc_rscn_id_list[i] = NULL; 7822 } 7823 spin_lock_irq(shost->host_lock); 7824 vport->fc_rscn_id_cnt = 0; 7825 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); 7826 spin_unlock_irq(shost->host_lock); 7827 lpfc_can_disctmo(vport); 7828 /* Indicate we are done walking this fc_rscn_id_list */ 7829 vport->fc_rscn_flush = 0; 7830 } 7831 7832 /** 7833 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did 7834 * @vport: pointer to a host virtual N_Port data structure. 7835 * @did: remote destination port identifier. 7836 * 7837 * This routine checks whether there is any pending Registration State 7838 * Configuration Notification (RSCN) to a @did on @vport. 7839 * 7840 * Return code 7841 * None zero - The @did matched with a pending rscn 7842 * 0 - not able to match @did with a pending rscn 7843 **/ 7844 int 7845 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) 7846 { 7847 D_ID ns_did; 7848 D_ID rscn_did; 7849 uint32_t *lp; 7850 uint32_t payload_len, i; 7851 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7852 7853 ns_did.un.word = did; 7854 7855 /* Never match fabric nodes for RSCNs */ 7856 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 7857 return 0; 7858 7859 /* If we are doing a FULL RSCN rediscovery, match everything */ 7860 if (vport->fc_flag & FC_RSCN_DISCOVERY) 7861 return did; 7862 7863 spin_lock_irq(shost->host_lock); 7864 if (vport->fc_rscn_flush) { 7865 /* Another thread is walking fc_rscn_id_list on this vport */ 7866 spin_unlock_irq(shost->host_lock); 7867 return 0; 7868 } 7869 /* Indicate we are walking fc_rscn_id_list on this vport */ 7870 vport->fc_rscn_flush = 1; 7871 spin_unlock_irq(shost->host_lock); 7872 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 7873 lp = vport->fc_rscn_id_list[i]->virt; 7874 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 7875 payload_len -= sizeof(uint32_t); /* take off word 0 */ 7876 while (payload_len) { 7877 rscn_did.un.word = be32_to_cpu(*lp++); 7878 payload_len -= sizeof(uint32_t); 7879 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 7880 case RSCN_ADDRESS_FORMAT_PORT: 7881 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 7882 && (ns_did.un.b.area == rscn_did.un.b.area) 7883 && (ns_did.un.b.id == rscn_did.un.b.id)) 7884 goto return_did_out; 7885 break; 7886 case RSCN_ADDRESS_FORMAT_AREA: 7887 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 7888 && (ns_did.un.b.area == rscn_did.un.b.area)) 7889 goto return_did_out; 7890 break; 7891 case RSCN_ADDRESS_FORMAT_DOMAIN: 7892 if (ns_did.un.b.domain == rscn_did.un.b.domain) 7893 goto return_did_out; 7894 break; 7895 case RSCN_ADDRESS_FORMAT_FABRIC: 7896 goto return_did_out; 7897 } 7898 } 7899 } 7900 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 7901 vport->fc_rscn_flush = 0; 7902 return 0; 7903 return_did_out: 7904 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 7905 vport->fc_rscn_flush = 0; 7906 return did; 7907 } 7908 7909 /** 7910 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn 7911 * @vport: pointer to a host virtual N_Port data structure. 7912 * 7913 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the 7914 * state machine for a @vport's nodes that are with pending RSCN (Registration 7915 * State Change Notification). 7916 * 7917 * Return code 7918 * 0 - Successful (currently alway return 0) 7919 **/ 7920 static int 7921 lpfc_rscn_recovery_check(struct lpfc_vport *vport) 7922 { 7923 struct lpfc_nodelist *ndlp = NULL, *n; 7924 7925 /* Move all affected nodes by pending RSCNs to NPR state. */ 7926 list_for_each_entry_safe(ndlp, n, &vport->fc_nodes, nlp_listp) { 7927 if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) || 7928 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) 7929 continue; 7930 7931 /* NVME Target mode does not do RSCN Recovery. */ 7932 if (vport->phba->nvmet_support) 7933 continue; 7934 7935 /* If we are in the process of doing discovery on this 7936 * NPort, let it continue on its own. 7937 */ 7938 switch (ndlp->nlp_state) { 7939 case NLP_STE_PLOGI_ISSUE: 7940 case NLP_STE_ADISC_ISSUE: 7941 case NLP_STE_REG_LOGIN_ISSUE: 7942 case NLP_STE_PRLI_ISSUE: 7943 case NLP_STE_LOGO_ISSUE: 7944 continue; 7945 } 7946 7947 lpfc_disc_state_machine(vport, ndlp, NULL, 7948 NLP_EVT_DEVICE_RECOVERY); 7949 lpfc_cancel_retry_delay_tmo(vport, ndlp); 7950 } 7951 return 0; 7952 } 7953 7954 /** 7955 * lpfc_send_rscn_event - Send an RSCN event to management application 7956 * @vport: pointer to a host virtual N_Port data structure. 7957 * @cmdiocb: pointer to lpfc command iocb data structure. 7958 * 7959 * lpfc_send_rscn_event sends an RSCN netlink event to management 7960 * applications. 7961 */ 7962 static void 7963 lpfc_send_rscn_event(struct lpfc_vport *vport, 7964 struct lpfc_iocbq *cmdiocb) 7965 { 7966 struct lpfc_dmabuf *pcmd; 7967 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7968 uint32_t *payload_ptr; 7969 uint32_t payload_len; 7970 struct lpfc_rscn_event_header *rscn_event_data; 7971 7972 pcmd = cmdiocb->cmd_dmabuf; 7973 payload_ptr = (uint32_t *) pcmd->virt; 7974 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK); 7975 7976 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) + 7977 payload_len, GFP_KERNEL); 7978 if (!rscn_event_data) { 7979 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 7980 "0147 Failed to allocate memory for RSCN event\n"); 7981 return; 7982 } 7983 rscn_event_data->event_type = FC_REG_RSCN_EVENT; 7984 rscn_event_data->payload_length = payload_len; 7985 memcpy(rscn_event_data->rscn_payload, payload_ptr, 7986 payload_len); 7987 7988 fc_host_post_vendor_event(shost, 7989 fc_get_event_number(), 7990 sizeof(struct lpfc_rscn_event_header) + payload_len, 7991 (char *)rscn_event_data, 7992 LPFC_NL_VENDOR_ID); 7993 7994 kfree(rscn_event_data); 7995 } 7996 7997 /** 7998 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb 7999 * @vport: pointer to a host virtual N_Port data structure. 8000 * @cmdiocb: pointer to lpfc command iocb data structure. 8001 * @ndlp: pointer to a node-list data structure. 8002 * 8003 * This routine processes an unsolicited RSCN (Registration State Change 8004 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked 8005 * to invoke fc_host_post_event() routine to the FC transport layer. If the 8006 * discover state machine is about to begin discovery, it just accepts the 8007 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only 8008 * contains N_Port IDs for other vports on this HBA, it just accepts the 8009 * RSCN and ignore processing it. If the state machine is in the recovery 8010 * state, the fc_rscn_id_list of this @vport is walked and the 8011 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for 8012 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn() 8013 * routine is invoked to handle the RSCN event. 8014 * 8015 * Return code 8016 * 0 - Just sent the acc response 8017 * 1 - Sent the acc response and waited for name server completion 8018 **/ 8019 static int 8020 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8021 struct lpfc_nodelist *ndlp) 8022 { 8023 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8024 struct lpfc_hba *phba = vport->phba; 8025 struct lpfc_dmabuf *pcmd; 8026 uint32_t *lp, *datap; 8027 uint32_t payload_len, length, nportid, *cmd; 8028 int rscn_cnt; 8029 int rscn_id = 0, hba_id = 0; 8030 int i, tmo; 8031 8032 pcmd = cmdiocb->cmd_dmabuf; 8033 lp = (uint32_t *) pcmd->virt; 8034 8035 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 8036 payload_len -= sizeof(uint32_t); /* take off word 0 */ 8037 /* RSCN received */ 8038 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8039 "0214 RSCN received Data: x%x x%x x%x x%x\n", 8040 vport->fc_flag, payload_len, *lp, 8041 vport->fc_rscn_id_cnt); 8042 8043 /* Send an RSCN event to the management application */ 8044 lpfc_send_rscn_event(vport, cmdiocb); 8045 8046 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 8047 fc_host_post_event(shost, fc_get_event_number(), 8048 FCH_EVT_RSCN, lp[i]); 8049 8050 /* Check if RSCN is coming from a direct-connected remote NPort */ 8051 if (vport->fc_flag & FC_PT2PT) { 8052 /* If so, just ACC it, no other action needed for now */ 8053 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8054 "2024 pt2pt RSCN %08x Data: x%x x%x\n", 8055 *lp, vport->fc_flag, payload_len); 8056 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8057 8058 /* Check to see if we need to NVME rescan this target 8059 * remoteport. 8060 */ 8061 if (ndlp->nlp_fc4_type & NLP_FC4_NVME && 8062 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) 8063 lpfc_nvme_rescan_port(vport, ndlp); 8064 return 0; 8065 } 8066 8067 /* If we are about to begin discovery, just ACC the RSCN. 8068 * Discovery processing will satisfy it. 8069 */ 8070 if (vport->port_state <= LPFC_NS_QRY) { 8071 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8072 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", 8073 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 8074 8075 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8076 return 0; 8077 } 8078 8079 /* If this RSCN just contains NPortIDs for other vports on this HBA, 8080 * just ACC and ignore it. 8081 */ 8082 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 8083 !(vport->cfg_peer_port_login)) { 8084 i = payload_len; 8085 datap = lp; 8086 while (i > 0) { 8087 nportid = *datap++; 8088 nportid = ((be32_to_cpu(nportid)) & Mask_DID); 8089 i -= sizeof(uint32_t); 8090 rscn_id++; 8091 if (lpfc_find_vport_by_did(phba, nportid)) 8092 hba_id++; 8093 } 8094 if (rscn_id == hba_id) { 8095 /* ALL NPortIDs in RSCN are on HBA */ 8096 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8097 "0219 Ignore RSCN " 8098 "Data: x%x x%x x%x x%x\n", 8099 vport->fc_flag, payload_len, 8100 *lp, vport->fc_rscn_id_cnt); 8101 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8102 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", 8103 ndlp->nlp_DID, vport->port_state, 8104 ndlp->nlp_flag); 8105 8106 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, 8107 ndlp, NULL); 8108 /* Restart disctmo if its already running */ 8109 if (vport->fc_flag & FC_DISC_TMO) { 8110 tmo = ((phba->fc_ratov * 3) + 3); 8111 mod_timer(&vport->fc_disctmo, 8112 jiffies + 8113 msecs_to_jiffies(1000 * tmo)); 8114 } 8115 return 0; 8116 } 8117 } 8118 8119 spin_lock_irq(shost->host_lock); 8120 if (vport->fc_rscn_flush) { 8121 /* Another thread is walking fc_rscn_id_list on this vport */ 8122 vport->fc_flag |= FC_RSCN_DISCOVERY; 8123 spin_unlock_irq(shost->host_lock); 8124 /* Send back ACC */ 8125 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8126 return 0; 8127 } 8128 /* Indicate we are walking fc_rscn_id_list on this vport */ 8129 vport->fc_rscn_flush = 1; 8130 spin_unlock_irq(shost->host_lock); 8131 /* Get the array count after successfully have the token */ 8132 rscn_cnt = vport->fc_rscn_id_cnt; 8133 /* If we are already processing an RSCN, save the received 8134 * RSCN payload buffer, cmdiocb->cmd_dmabuf to process later. 8135 */ 8136 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) { 8137 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8138 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", 8139 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 8140 8141 spin_lock_irq(shost->host_lock); 8142 vport->fc_flag |= FC_RSCN_DEFERRED; 8143 8144 /* Restart disctmo if its already running */ 8145 if (vport->fc_flag & FC_DISC_TMO) { 8146 tmo = ((phba->fc_ratov * 3) + 3); 8147 mod_timer(&vport->fc_disctmo, 8148 jiffies + msecs_to_jiffies(1000 * tmo)); 8149 } 8150 if ((rscn_cnt < FC_MAX_HOLD_RSCN) && 8151 !(vport->fc_flag & FC_RSCN_DISCOVERY)) { 8152 vport->fc_flag |= FC_RSCN_MODE; 8153 spin_unlock_irq(shost->host_lock); 8154 if (rscn_cnt) { 8155 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt; 8156 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK); 8157 } 8158 if ((rscn_cnt) && 8159 (payload_len + length <= LPFC_BPL_SIZE)) { 8160 *cmd &= ELS_CMD_MASK; 8161 *cmd |= cpu_to_be32(payload_len + length); 8162 memcpy(((uint8_t *)cmd) + length, lp, 8163 payload_len); 8164 } else { 8165 vport->fc_rscn_id_list[rscn_cnt] = pcmd; 8166 vport->fc_rscn_id_cnt++; 8167 /* If we zero, cmdiocb->cmd_dmabuf, the calling 8168 * routine will not try to free it. 8169 */ 8170 cmdiocb->cmd_dmabuf = NULL; 8171 } 8172 /* Deferred RSCN */ 8173 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8174 "0235 Deferred RSCN " 8175 "Data: x%x x%x x%x\n", 8176 vport->fc_rscn_id_cnt, vport->fc_flag, 8177 vport->port_state); 8178 } else { 8179 vport->fc_flag |= FC_RSCN_DISCOVERY; 8180 spin_unlock_irq(shost->host_lock); 8181 /* ReDiscovery RSCN */ 8182 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8183 "0234 ReDiscovery RSCN " 8184 "Data: x%x x%x x%x\n", 8185 vport->fc_rscn_id_cnt, vport->fc_flag, 8186 vport->port_state); 8187 } 8188 /* Indicate we are done walking fc_rscn_id_list on this vport */ 8189 vport->fc_rscn_flush = 0; 8190 /* Send back ACC */ 8191 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8192 /* send RECOVERY event for ALL nodes that match RSCN payload */ 8193 lpfc_rscn_recovery_check(vport); 8194 return 0; 8195 } 8196 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8197 "RCV RSCN: did:x%x/ste:x%x flg:x%x", 8198 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 8199 8200 spin_lock_irq(shost->host_lock); 8201 vport->fc_flag |= FC_RSCN_MODE; 8202 spin_unlock_irq(shost->host_lock); 8203 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; 8204 /* Indicate we are done walking fc_rscn_id_list on this vport */ 8205 vport->fc_rscn_flush = 0; 8206 /* 8207 * If we zero, cmdiocb->cmd_dmabuf, the calling routine will 8208 * not try to free it. 8209 */ 8210 cmdiocb->cmd_dmabuf = NULL; 8211 lpfc_set_disctmo(vport); 8212 /* Send back ACC */ 8213 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8214 /* send RECOVERY event for ALL nodes that match RSCN payload */ 8215 lpfc_rscn_recovery_check(vport); 8216 return lpfc_els_handle_rscn(vport); 8217 } 8218 8219 /** 8220 * lpfc_els_handle_rscn - Handle rscn for a vport 8221 * @vport: pointer to a host virtual N_Port data structure. 8222 * 8223 * This routine handles the Registration State Configuration Notification 8224 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall 8225 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise, 8226 * if the ndlp to NameServer exists, a Common Transport (CT) command to the 8227 * NameServer shall be issued. If CT command to the NameServer fails to be 8228 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any 8229 * RSCN activities with the @vport. 8230 * 8231 * Return code 8232 * 0 - Cleaned up rscn on the @vport 8233 * 1 - Wait for plogi to name server before proceed 8234 **/ 8235 int 8236 lpfc_els_handle_rscn(struct lpfc_vport *vport) 8237 { 8238 struct lpfc_nodelist *ndlp; 8239 struct lpfc_hba *phba = vport->phba; 8240 8241 /* Ignore RSCN if the port is being torn down. */ 8242 if (vport->load_flag & FC_UNLOADING) { 8243 lpfc_els_flush_rscn(vport); 8244 return 0; 8245 } 8246 8247 /* Start timer for RSCN processing */ 8248 lpfc_set_disctmo(vport); 8249 8250 /* RSCN processed */ 8251 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8252 "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n", 8253 vport->fc_flag, 0, vport->fc_rscn_id_cnt, 8254 vport->port_state, vport->num_disc_nodes, 8255 vport->gidft_inp); 8256 8257 /* To process RSCN, first compare RSCN data with NameServer */ 8258 vport->fc_ns_retry = 0; 8259 vport->num_disc_nodes = 0; 8260 8261 ndlp = lpfc_findnode_did(vport, NameServer_DID); 8262 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 8263 /* Good ndlp, issue CT Request to NameServer. Need to 8264 * know how many gidfts were issued. If none, then just 8265 * flush the RSCN. Otherwise, the outstanding requests 8266 * need to complete. 8267 */ 8268 if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) { 8269 if (lpfc_issue_gidft(vport) > 0) 8270 return 1; 8271 } else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) { 8272 if (lpfc_issue_gidpt(vport) > 0) 8273 return 1; 8274 } else { 8275 return 1; 8276 } 8277 } else { 8278 /* Nameserver login in question. Revalidate. */ 8279 if (ndlp) { 8280 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 8281 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 8282 } else { 8283 ndlp = lpfc_nlp_init(vport, NameServer_DID); 8284 if (!ndlp) { 8285 lpfc_els_flush_rscn(vport); 8286 return 0; 8287 } 8288 ndlp->nlp_prev_state = ndlp->nlp_state; 8289 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 8290 } 8291 ndlp->nlp_type |= NLP_FABRIC; 8292 lpfc_issue_els_plogi(vport, NameServer_DID, 0); 8293 /* Wait for NameServer login cmpl before we can 8294 * continue 8295 */ 8296 return 1; 8297 } 8298 8299 lpfc_els_flush_rscn(vport); 8300 return 0; 8301 } 8302 8303 /** 8304 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb 8305 * @vport: pointer to a host virtual N_Port data structure. 8306 * @cmdiocb: pointer to lpfc command iocb data structure. 8307 * @ndlp: pointer to a node-list data structure. 8308 * 8309 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS 8310 * unsolicited event. An unsolicited FLOGI can be received in a point-to- 8311 * point topology. As an unsolicited FLOGI should not be received in a loop 8312 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The 8313 * lpfc_check_sparm() routine is invoked to check the parameters in the 8314 * unsolicited FLOGI. If parameters validation failed, the routine 8315 * lpfc_els_rsp_reject() shall be called with reject reason code set to 8316 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the 8317 * FLOGI shall be compared with the Port WWN of the @vport to determine who 8318 * will initiate PLOGI. The higher lexicographical value party shall has 8319 * higher priority (as the winning port) and will initiate PLOGI and 8320 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result 8321 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI 8322 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI. 8323 * 8324 * Return code 8325 * 0 - Successfully processed the unsolicited flogi 8326 * 1 - Failed to process the unsolicited flogi 8327 **/ 8328 static int 8329 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8330 struct lpfc_nodelist *ndlp) 8331 { 8332 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8333 struct lpfc_hba *phba = vport->phba; 8334 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; 8335 uint32_t *lp = (uint32_t *) pcmd->virt; 8336 union lpfc_wqe128 *wqe = &cmdiocb->wqe; 8337 struct serv_parm *sp; 8338 LPFC_MBOXQ_t *mbox; 8339 uint32_t cmd, did; 8340 int rc; 8341 uint32_t fc_flag = 0; 8342 uint32_t port_state = 0; 8343 8344 /* Clear external loopback plug detected flag */ 8345 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; 8346 8347 cmd = *lp++; 8348 sp = (struct serv_parm *) lp; 8349 8350 /* FLOGI received */ 8351 8352 lpfc_set_disctmo(vport); 8353 8354 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 8355 /* We should never receive a FLOGI in loop mode, ignore it */ 8356 did = bf_get(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest); 8357 8358 /* An FLOGI ELS command <elsCmd> was received from DID <did> in 8359 Loop Mode */ 8360 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8361 "0113 An FLOGI ELS command x%x was " 8362 "received from DID x%x in Loop Mode\n", 8363 cmd, did); 8364 return 1; 8365 } 8366 8367 (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1); 8368 8369 /* 8370 * If our portname is greater than the remote portname, 8371 * then we initiate Nport login. 8372 */ 8373 8374 rc = memcmp(&vport->fc_portname, &sp->portName, 8375 sizeof(struct lpfc_name)); 8376 8377 if (!rc) { 8378 if (phba->sli_rev < LPFC_SLI_REV4) { 8379 mbox = mempool_alloc(phba->mbox_mem_pool, 8380 GFP_KERNEL); 8381 if (!mbox) 8382 return 1; 8383 lpfc_linkdown(phba); 8384 lpfc_init_link(phba, mbox, 8385 phba->cfg_topology, 8386 phba->cfg_link_speed); 8387 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 8388 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 8389 mbox->vport = vport; 8390 rc = lpfc_sli_issue_mbox(phba, mbox, 8391 MBX_NOWAIT); 8392 lpfc_set_loopback_flag(phba); 8393 if (rc == MBX_NOT_FINISHED) 8394 mempool_free(mbox, phba->mbox_mem_pool); 8395 return 1; 8396 } 8397 8398 /* External loopback plug insertion detected */ 8399 phba->link_flag |= LS_EXTERNAL_LOOPBACK; 8400 8401 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_LIBDFC, 8402 "1119 External Loopback plug detected\n"); 8403 8404 /* abort the flogi coming back to ourselves 8405 * due to external loopback on the port. 8406 */ 8407 lpfc_els_abort_flogi(phba); 8408 return 0; 8409 8410 } else if (rc > 0) { /* greater than */ 8411 spin_lock_irq(shost->host_lock); 8412 vport->fc_flag |= FC_PT2PT_PLOGI; 8413 spin_unlock_irq(shost->host_lock); 8414 8415 /* If we have the high WWPN we can assign our own 8416 * myDID; otherwise, we have to WAIT for a PLOGI 8417 * from the remote NPort to find out what it 8418 * will be. 8419 */ 8420 vport->fc_myDID = PT2PT_LocalID; 8421 } else { 8422 vport->fc_myDID = PT2PT_RemoteID; 8423 } 8424 8425 /* 8426 * The vport state should go to LPFC_FLOGI only 8427 * AFTER we issue a FLOGI, not receive one. 8428 */ 8429 spin_lock_irq(shost->host_lock); 8430 fc_flag = vport->fc_flag; 8431 port_state = vport->port_state; 8432 vport->fc_flag |= FC_PT2PT; 8433 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 8434 8435 /* Acking an unsol FLOGI. Count 1 for link bounce 8436 * work-around. 8437 */ 8438 vport->rcv_flogi_cnt++; 8439 spin_unlock_irq(shost->host_lock); 8440 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8441 "3311 Rcv Flogi PS x%x new PS x%x " 8442 "fc_flag x%x new fc_flag x%x\n", 8443 port_state, vport->port_state, 8444 fc_flag, vport->fc_flag); 8445 8446 /* 8447 * We temporarily set fc_myDID to make it look like we are 8448 * a Fabric. This is done just so we end up with the right 8449 * did / sid on the FLOGI ACC rsp. 8450 */ 8451 did = vport->fc_myDID; 8452 vport->fc_myDID = Fabric_DID; 8453 8454 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 8455 8456 /* Defer ACC response until AFTER we issue a FLOGI */ 8457 if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) { 8458 phba->defer_flogi_acc_rx_id = bf_get(wqe_ctxt_tag, 8459 &wqe->xmit_els_rsp.wqe_com); 8460 phba->defer_flogi_acc_ox_id = bf_get(wqe_rcvoxid, 8461 &wqe->xmit_els_rsp.wqe_com); 8462 8463 vport->fc_myDID = did; 8464 8465 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8466 "3344 Deferring FLOGI ACC: rx_id: x%x," 8467 " ox_id: x%x, hba_flag x%x\n", 8468 phba->defer_flogi_acc_rx_id, 8469 phba->defer_flogi_acc_ox_id, phba->hba_flag); 8470 8471 phba->defer_flogi_acc_flag = true; 8472 8473 return 0; 8474 } 8475 8476 /* Send back ACC */ 8477 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL); 8478 8479 /* Now lets put fc_myDID back to what its supposed to be */ 8480 vport->fc_myDID = did; 8481 8482 return 0; 8483 } 8484 8485 /** 8486 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb 8487 * @vport: pointer to a host virtual N_Port data structure. 8488 * @cmdiocb: pointer to lpfc command iocb data structure. 8489 * @ndlp: pointer to a node-list data structure. 8490 * 8491 * This routine processes Request Node Identification Data (RNID) IOCB 8492 * received as an ELS unsolicited event. Only when the RNID specified format 8493 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data) 8494 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to 8495 * Accept (ACC) the RNID ELS command. All the other RNID formats are 8496 * rejected by invoking the lpfc_els_rsp_reject() routine. 8497 * 8498 * Return code 8499 * 0 - Successfully processed rnid iocb (currently always return 0) 8500 **/ 8501 static int 8502 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8503 struct lpfc_nodelist *ndlp) 8504 { 8505 struct lpfc_dmabuf *pcmd; 8506 uint32_t *lp; 8507 RNID *rn; 8508 struct ls_rjt stat; 8509 8510 pcmd = cmdiocb->cmd_dmabuf; 8511 lp = (uint32_t *) pcmd->virt; 8512 8513 lp++; 8514 rn = (RNID *) lp; 8515 8516 /* RNID received */ 8517 8518 switch (rn->Format) { 8519 case 0: 8520 case RNID_TOPOLOGY_DISC: 8521 /* Send back ACC */ 8522 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp); 8523 break; 8524 default: 8525 /* Reject this request because format not supported */ 8526 stat.un.b.lsRjtRsvd0 = 0; 8527 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8528 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8529 stat.un.b.vendorUnique = 0; 8530 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 8531 NULL); 8532 } 8533 return 0; 8534 } 8535 8536 /** 8537 * lpfc_els_rcv_echo - Process an unsolicited echo iocb 8538 * @vport: pointer to a host virtual N_Port data structure. 8539 * @cmdiocb: pointer to lpfc command iocb data structure. 8540 * @ndlp: pointer to a node-list data structure. 8541 * 8542 * Return code 8543 * 0 - Successfully processed echo iocb (currently always return 0) 8544 **/ 8545 static int 8546 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8547 struct lpfc_nodelist *ndlp) 8548 { 8549 uint8_t *pcmd; 8550 8551 pcmd = (uint8_t *)cmdiocb->cmd_dmabuf->virt; 8552 8553 /* skip over first word of echo command to find echo data */ 8554 pcmd += sizeof(uint32_t); 8555 8556 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp); 8557 return 0; 8558 } 8559 8560 /** 8561 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb 8562 * @vport: pointer to a host virtual N_Port data structure. 8563 * @cmdiocb: pointer to lpfc command iocb data structure. 8564 * @ndlp: pointer to a node-list data structure. 8565 * 8566 * This routine processes a Link Incident Report Registration(LIRR) IOCB 8567 * received as an ELS unsolicited event. Currently, this function just invokes 8568 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally. 8569 * 8570 * Return code 8571 * 0 - Successfully processed lirr iocb (currently always return 0) 8572 **/ 8573 static int 8574 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8575 struct lpfc_nodelist *ndlp) 8576 { 8577 struct ls_rjt stat; 8578 8579 /* For now, unconditionally reject this command */ 8580 stat.un.b.lsRjtRsvd0 = 0; 8581 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8582 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8583 stat.un.b.vendorUnique = 0; 8584 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8585 return 0; 8586 } 8587 8588 /** 8589 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb 8590 * @vport: pointer to a host virtual N_Port data structure. 8591 * @cmdiocb: pointer to lpfc command iocb data structure. 8592 * @ndlp: pointer to a node-list data structure. 8593 * 8594 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB 8595 * received as an ELS unsolicited event. A request to RRQ shall only 8596 * be accepted if the Originator Nx_Port N_Port_ID or the Responder 8597 * Nx_Port N_Port_ID of the target Exchange is the same as the 8598 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is 8599 * not accepted, an LS_RJT with reason code "Unable to perform 8600 * command request" and reason code explanation "Invalid Originator 8601 * S_ID" shall be returned. For now, we just unconditionally accept 8602 * RRQ from the target. 8603 **/ 8604 static void 8605 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8606 struct lpfc_nodelist *ndlp) 8607 { 8608 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8609 if (vport->phba->sli_rev == LPFC_SLI_REV4) 8610 lpfc_els_clear_rrq(vport, cmdiocb, ndlp); 8611 } 8612 8613 /** 8614 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 8615 * @phba: pointer to lpfc hba data structure. 8616 * @pmb: pointer to the driver internal queue element for mailbox command. 8617 * 8618 * This routine is the completion callback function for the MBX_READ_LNK_STAT 8619 * mailbox command. This callback function is to actually send the Accept 8620 * (ACC) response to a Read Link Status (RLS) unsolicited IOCB event. It 8621 * collects the link statistics from the completion of the MBX_READ_LNK_STAT 8622 * mailbox command, constructs the RLS response with the link statistics 8623 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 8624 * response to the RLS. 8625 * 8626 * Note that the ndlp reference count will be incremented by 1 for holding the 8627 * ndlp and the reference to ndlp will be stored into the ndlp field of 8628 * the IOCB for the completion callback function to the RLS Accept Response 8629 * ELS IOCB command. 8630 * 8631 **/ 8632 static void 8633 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 8634 { 8635 int rc = 0; 8636 MAILBOX_t *mb; 8637 IOCB_t *icmd; 8638 union lpfc_wqe128 *wqe; 8639 struct RLS_RSP *rls_rsp; 8640 uint8_t *pcmd; 8641 struct lpfc_iocbq *elsiocb; 8642 struct lpfc_nodelist *ndlp; 8643 uint16_t oxid; 8644 uint16_t rxid; 8645 uint32_t cmdsize; 8646 u32 ulp_context; 8647 8648 mb = &pmb->u.mb; 8649 8650 ndlp = pmb->ctx_ndlp; 8651 rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff); 8652 oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff); 8653 pmb->ctx_buf = NULL; 8654 pmb->ctx_ndlp = NULL; 8655 8656 if (mb->mbxStatus) { 8657 mempool_free(pmb, phba->mbox_mem_pool); 8658 return; 8659 } 8660 8661 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); 8662 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 8663 lpfc_max_els_tries, ndlp, 8664 ndlp->nlp_DID, ELS_CMD_ACC); 8665 8666 /* Decrement the ndlp reference count from previous mbox command */ 8667 lpfc_nlp_put(ndlp); 8668 8669 if (!elsiocb) { 8670 mempool_free(pmb, phba->mbox_mem_pool); 8671 return; 8672 } 8673 8674 ulp_context = get_job_ulpcontext(phba, elsiocb); 8675 if (phba->sli_rev == LPFC_SLI_REV4) { 8676 wqe = &elsiocb->wqe; 8677 /* Xri / rx_id */ 8678 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, rxid); 8679 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, oxid); 8680 } else { 8681 icmd = &elsiocb->iocb; 8682 icmd->ulpContext = rxid; 8683 icmd->unsli3.rcvsli3.ox_id = oxid; 8684 } 8685 8686 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 8687 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8688 pcmd += sizeof(uint32_t); /* Skip past command */ 8689 rls_rsp = (struct RLS_RSP *)pcmd; 8690 8691 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 8692 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 8693 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 8694 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 8695 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 8696 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 8697 mempool_free(pmb, phba->mbox_mem_pool); 8698 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 8699 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 8700 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " 8701 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 8702 elsiocb->iotag, ulp_context, 8703 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8704 ndlp->nlp_rpi); 8705 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 8706 phba->fc_stat.elsXmitACC++; 8707 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8708 if (!elsiocb->ndlp) { 8709 lpfc_els_free_iocb(phba, elsiocb); 8710 return; 8711 } 8712 8713 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8714 if (rc == IOCB_ERROR) { 8715 lpfc_els_free_iocb(phba, elsiocb); 8716 lpfc_nlp_put(ndlp); 8717 } 8718 return; 8719 } 8720 8721 /** 8722 * lpfc_els_rcv_rls - Process an unsolicited rls iocb 8723 * @vport: pointer to a host virtual N_Port data structure. 8724 * @cmdiocb: pointer to lpfc command iocb data structure. 8725 * @ndlp: pointer to a node-list data structure. 8726 * 8727 * This routine processes Read Link Status (RLS) IOCB received as an 8728 * ELS unsolicited event. It first checks the remote port state. If the 8729 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 8730 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 8731 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 8732 * for reading the HBA link statistics. It is for the callback function, 8733 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command 8734 * to actually sending out RPL Accept (ACC) response. 8735 * 8736 * Return codes 8737 * 0 - Successfully processed rls iocb (currently always return 0) 8738 **/ 8739 static int 8740 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8741 struct lpfc_nodelist *ndlp) 8742 { 8743 struct lpfc_hba *phba = vport->phba; 8744 LPFC_MBOXQ_t *mbox; 8745 struct ls_rjt stat; 8746 u32 ctx = get_job_ulpcontext(phba, cmdiocb); 8747 u32 ox_id = get_job_rcvoxid(phba, cmdiocb); 8748 8749 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8750 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 8751 /* reject the unsolicited RLS request and done with it */ 8752 goto reject_out; 8753 8754 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 8755 if (mbox) { 8756 lpfc_read_lnk_stat(phba, mbox); 8757 mbox->ctx_buf = (void *)((unsigned long) 8758 (ox_id << 16 | ctx)); 8759 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 8760 if (!mbox->ctx_ndlp) 8761 goto node_err; 8762 mbox->vport = vport; 8763 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; 8764 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 8765 != MBX_NOT_FINISHED) 8766 /* Mbox completion will send ELS Response */ 8767 return 0; 8768 /* Decrement reference count used for the failed mbox 8769 * command. 8770 */ 8771 lpfc_nlp_put(ndlp); 8772 node_err: 8773 mempool_free(mbox, phba->mbox_mem_pool); 8774 } 8775 reject_out: 8776 /* issue rejection response */ 8777 stat.un.b.lsRjtRsvd0 = 0; 8778 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8779 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8780 stat.un.b.vendorUnique = 0; 8781 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8782 return 0; 8783 } 8784 8785 /** 8786 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb 8787 * @vport: pointer to a host virtual N_Port data structure. 8788 * @cmdiocb: pointer to lpfc command iocb data structure. 8789 * @ndlp: pointer to a node-list data structure. 8790 * 8791 * This routine processes Read Timout Value (RTV) IOCB received as an 8792 * ELS unsolicited event. It first checks the remote port state. If the 8793 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 8794 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 8795 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout 8796 * Value (RTV) unsolicited IOCB event. 8797 * 8798 * Note that the ndlp reference count will be incremented by 1 for holding the 8799 * ndlp and the reference to ndlp will be stored into the ndlp field of 8800 * the IOCB for the completion callback function to the RTV Accept Response 8801 * ELS IOCB command. 8802 * 8803 * Return codes 8804 * 0 - Successfully processed rtv iocb (currently always return 0) 8805 **/ 8806 static int 8807 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8808 struct lpfc_nodelist *ndlp) 8809 { 8810 int rc = 0; 8811 IOCB_t *icmd; 8812 union lpfc_wqe128 *wqe; 8813 struct lpfc_hba *phba = vport->phba; 8814 struct ls_rjt stat; 8815 struct RTV_RSP *rtv_rsp; 8816 uint8_t *pcmd; 8817 struct lpfc_iocbq *elsiocb; 8818 uint32_t cmdsize; 8819 u32 ulp_context; 8820 8821 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8822 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 8823 /* reject the unsolicited RTV request and done with it */ 8824 goto reject_out; 8825 8826 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t); 8827 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 8828 lpfc_max_els_tries, ndlp, 8829 ndlp->nlp_DID, ELS_CMD_ACC); 8830 8831 if (!elsiocb) 8832 return 1; 8833 8834 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 8835 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8836 pcmd += sizeof(uint32_t); /* Skip past command */ 8837 8838 ulp_context = get_job_ulpcontext(phba, elsiocb); 8839 /* use the command's xri in the response */ 8840 if (phba->sli_rev == LPFC_SLI_REV4) { 8841 wqe = &elsiocb->wqe; 8842 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 8843 get_job_ulpcontext(phba, cmdiocb)); 8844 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 8845 get_job_rcvoxid(phba, cmdiocb)); 8846 } else { 8847 icmd = &elsiocb->iocb; 8848 icmd->ulpContext = get_job_ulpcontext(phba, cmdiocb); 8849 icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, cmdiocb); 8850 } 8851 8852 rtv_rsp = (struct RTV_RSP *)pcmd; 8853 8854 /* populate RTV payload */ 8855 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */ 8856 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov); 8857 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0); 8858 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */ 8859 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov); 8860 8861 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 8862 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 8863 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " 8864 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " 8865 "Data: x%x x%x x%x\n", 8866 elsiocb->iotag, ulp_context, 8867 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8868 ndlp->nlp_rpi, 8869 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov); 8870 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 8871 phba->fc_stat.elsXmitACC++; 8872 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8873 if (!elsiocb->ndlp) { 8874 lpfc_els_free_iocb(phba, elsiocb); 8875 return 0; 8876 } 8877 8878 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8879 if (rc == IOCB_ERROR) { 8880 lpfc_els_free_iocb(phba, elsiocb); 8881 lpfc_nlp_put(ndlp); 8882 } 8883 return 0; 8884 8885 reject_out: 8886 /* issue rejection response */ 8887 stat.un.b.lsRjtRsvd0 = 0; 8888 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8889 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8890 stat.un.b.vendorUnique = 0; 8891 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8892 return 0; 8893 } 8894 8895 /* lpfc_issue_els_rrq - Process an unsolicited rrq iocb 8896 * @vport: pointer to a host virtual N_Port data structure. 8897 * @ndlp: pointer to a node-list data structure. 8898 * @did: DID of the target. 8899 * @rrq: Pointer to the rrq struct. 8900 * 8901 * Build a ELS RRQ command and send it to the target. If the issue_iocb is 8902 * successful, the completion handler will clear the RRQ. 8903 * 8904 * Return codes 8905 * 0 - Successfully sent rrq els iocb. 8906 * 1 - Failed to send rrq els iocb. 8907 **/ 8908 static int 8909 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 8910 uint32_t did, struct lpfc_node_rrq *rrq) 8911 { 8912 struct lpfc_hba *phba = vport->phba; 8913 struct RRQ *els_rrq; 8914 struct lpfc_iocbq *elsiocb; 8915 uint8_t *pcmd; 8916 uint16_t cmdsize; 8917 int ret; 8918 8919 if (!ndlp) 8920 return 1; 8921 8922 /* If ndlp is not NULL, we will bump the reference count on it */ 8923 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ)); 8924 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did, 8925 ELS_CMD_RRQ); 8926 if (!elsiocb) 8927 return 1; 8928 8929 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 8930 8931 /* For RRQ request, remainder of payload is Exchange IDs */ 8932 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ; 8933 pcmd += sizeof(uint32_t); 8934 els_rrq = (struct RRQ *) pcmd; 8935 8936 bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]); 8937 bf_set(rrq_rxid, els_rrq, rrq->rxid); 8938 bf_set(rrq_did, els_rrq, vport->fc_myDID); 8939 els_rrq->rrq = cpu_to_be32(els_rrq->rrq); 8940 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg); 8941 8942 8943 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 8944 "Issue RRQ: did:x%x", 8945 did, rrq->xritag, rrq->rxid); 8946 elsiocb->context_un.rrq = rrq; 8947 elsiocb->cmd_cmpl = lpfc_cmpl_els_rrq; 8948 8949 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8950 if (!elsiocb->ndlp) 8951 goto io_err; 8952 8953 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8954 if (ret == IOCB_ERROR) { 8955 lpfc_nlp_put(ndlp); 8956 goto io_err; 8957 } 8958 return 0; 8959 8960 io_err: 8961 lpfc_els_free_iocb(phba, elsiocb); 8962 return 1; 8963 } 8964 8965 /** 8966 * lpfc_send_rrq - Sends ELS RRQ if needed. 8967 * @phba: pointer to lpfc hba data structure. 8968 * @rrq: pointer to the active rrq. 8969 * 8970 * This routine will call the lpfc_issue_els_rrq if the rrq is 8971 * still active for the xri. If this function returns a failure then 8972 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq. 8973 * 8974 * Returns 0 Success. 8975 * 1 Failure. 8976 **/ 8977 int 8978 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) 8979 { 8980 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, 8981 rrq->nlp_DID); 8982 if (!ndlp) 8983 return 1; 8984 8985 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) 8986 return lpfc_issue_els_rrq(rrq->vport, ndlp, 8987 rrq->nlp_DID, rrq); 8988 else 8989 return 1; 8990 } 8991 8992 /** 8993 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command 8994 * @vport: pointer to a host virtual N_Port data structure. 8995 * @cmdsize: size of the ELS command. 8996 * @oldiocb: pointer to the original lpfc command iocb data structure. 8997 * @ndlp: pointer to a node-list data structure. 8998 * 8999 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command. 9000 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL. 9001 * 9002 * Note that the ndlp reference count will be incremented by 1 for holding the 9003 * ndlp and the reference to ndlp will be stored into the ndlp field of 9004 * the IOCB for the completion callback function to the RPL Accept Response 9005 * ELS command. 9006 * 9007 * Return code 9008 * 0 - Successfully issued ACC RPL ELS command 9009 * 1 - Failed to issue ACC RPL ELS command 9010 **/ 9011 static int 9012 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, 9013 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 9014 { 9015 int rc = 0; 9016 struct lpfc_hba *phba = vport->phba; 9017 IOCB_t *icmd; 9018 union lpfc_wqe128 *wqe; 9019 RPL_RSP rpl_rsp; 9020 struct lpfc_iocbq *elsiocb; 9021 uint8_t *pcmd; 9022 u32 ulp_context; 9023 9024 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 9025 ndlp->nlp_DID, ELS_CMD_ACC); 9026 9027 if (!elsiocb) 9028 return 1; 9029 9030 ulp_context = get_job_ulpcontext(phba, elsiocb); 9031 if (phba->sli_rev == LPFC_SLI_REV4) { 9032 wqe = &elsiocb->wqe; 9033 /* Xri / rx_id */ 9034 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 9035 get_job_ulpcontext(phba, oldiocb)); 9036 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 9037 get_job_rcvoxid(phba, oldiocb)); 9038 } else { 9039 icmd = &elsiocb->iocb; 9040 icmd->ulpContext = get_job_ulpcontext(phba, oldiocb); 9041 icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, oldiocb); 9042 } 9043 9044 pcmd = elsiocb->cmd_dmabuf->virt; 9045 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 9046 pcmd += sizeof(uint16_t); 9047 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); 9048 pcmd += sizeof(uint16_t); 9049 9050 /* Setup the RPL ACC payload */ 9051 rpl_rsp.listLen = be32_to_cpu(1); 9052 rpl_rsp.index = 0; 9053 rpl_rsp.port_num_blk.portNum = 0; 9054 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID); 9055 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname, 9056 sizeof(struct lpfc_name)); 9057 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t)); 9058 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 9059 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9060 "0120 Xmit ELS RPL ACC response tag x%x " 9061 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 9062 "rpi x%x\n", 9063 elsiocb->iotag, ulp_context, 9064 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 9065 ndlp->nlp_rpi); 9066 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 9067 phba->fc_stat.elsXmitACC++; 9068 elsiocb->ndlp = lpfc_nlp_get(ndlp); 9069 if (!elsiocb->ndlp) { 9070 lpfc_els_free_iocb(phba, elsiocb); 9071 return 1; 9072 } 9073 9074 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 9075 if (rc == IOCB_ERROR) { 9076 lpfc_els_free_iocb(phba, elsiocb); 9077 lpfc_nlp_put(ndlp); 9078 return 1; 9079 } 9080 9081 return 0; 9082 } 9083 9084 /** 9085 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb 9086 * @vport: pointer to a host virtual N_Port data structure. 9087 * @cmdiocb: pointer to lpfc command iocb data structure. 9088 * @ndlp: pointer to a node-list data structure. 9089 * 9090 * This routine processes Read Port List (RPL) IOCB received as an ELS 9091 * unsolicited event. It first checks the remote port state. If the remote 9092 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it 9093 * invokes the lpfc_els_rsp_reject() routine to send reject response. 9094 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine 9095 * to accept the RPL. 9096 * 9097 * Return code 9098 * 0 - Successfully processed rpl iocb (currently always return 0) 9099 **/ 9100 static int 9101 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9102 struct lpfc_nodelist *ndlp) 9103 { 9104 struct lpfc_dmabuf *pcmd; 9105 uint32_t *lp; 9106 uint32_t maxsize; 9107 uint16_t cmdsize; 9108 RPL *rpl; 9109 struct ls_rjt stat; 9110 9111 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 9112 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 9113 /* issue rejection response */ 9114 stat.un.b.lsRjtRsvd0 = 0; 9115 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 9116 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 9117 stat.un.b.vendorUnique = 0; 9118 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 9119 NULL); 9120 /* rejected the unsolicited RPL request and done with it */ 9121 return 0; 9122 } 9123 9124 pcmd = cmdiocb->cmd_dmabuf; 9125 lp = (uint32_t *) pcmd->virt; 9126 rpl = (RPL *) (lp + 1); 9127 maxsize = be32_to_cpu(rpl->maxsize); 9128 9129 /* We support only one port */ 9130 if ((rpl->index == 0) && 9131 ((maxsize == 0) || 9132 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) { 9133 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP); 9134 } else { 9135 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t); 9136 } 9137 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp); 9138 9139 return 0; 9140 } 9141 9142 /** 9143 * lpfc_els_rcv_farp - Process an unsolicited farp request els command 9144 * @vport: pointer to a virtual N_Port data structure. 9145 * @cmdiocb: pointer to lpfc command iocb data structure. 9146 * @ndlp: pointer to a node-list data structure. 9147 * 9148 * This routine processes Fibre Channel Address Resolution Protocol 9149 * (FARP) Request IOCB received as an ELS unsolicited event. Currently, 9150 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such, 9151 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the 9152 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the 9153 * remote PortName is compared against the FC PortName stored in the @vport 9154 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is 9155 * compared against the FC NodeName stored in the @vport data structure. 9156 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the 9157 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is 9158 * invoked to send out FARP Response to the remote node. Before sending the 9159 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP 9160 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi() 9161 * routine is invoked to log into the remote port first. 9162 * 9163 * Return code 9164 * 0 - Either the FARP Match Mode not supported or successfully processed 9165 **/ 9166 static int 9167 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9168 struct lpfc_nodelist *ndlp) 9169 { 9170 struct lpfc_dmabuf *pcmd; 9171 uint32_t *lp; 9172 FARP *fp; 9173 uint32_t cnt, did; 9174 9175 did = get_job_els_rsp64_did(vport->phba, cmdiocb); 9176 pcmd = cmdiocb->cmd_dmabuf; 9177 lp = (uint32_t *) pcmd->virt; 9178 9179 lp++; 9180 fp = (FARP *) lp; 9181 /* FARP-REQ received from DID <did> */ 9182 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9183 "0601 FARP-REQ received from DID x%x\n", did); 9184 /* We will only support match on WWPN or WWNN */ 9185 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { 9186 return 0; 9187 } 9188 9189 cnt = 0; 9190 /* If this FARP command is searching for my portname */ 9191 if (fp->Mflags & FARP_MATCH_PORT) { 9192 if (memcmp(&fp->RportName, &vport->fc_portname, 9193 sizeof(struct lpfc_name)) == 0) 9194 cnt = 1; 9195 } 9196 9197 /* If this FARP command is searching for my nodename */ 9198 if (fp->Mflags & FARP_MATCH_NODE) { 9199 if (memcmp(&fp->RnodeName, &vport->fc_nodename, 9200 sizeof(struct lpfc_name)) == 0) 9201 cnt = 1; 9202 } 9203 9204 if (cnt) { 9205 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 9206 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 9207 /* Log back into the node before sending the FARP. */ 9208 if (fp->Rflags & FARP_REQUEST_PLOGI) { 9209 ndlp->nlp_prev_state = ndlp->nlp_state; 9210 lpfc_nlp_set_state(vport, ndlp, 9211 NLP_STE_PLOGI_ISSUE); 9212 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 9213 } 9214 9215 /* Send a FARP response to that node */ 9216 if (fp->Rflags & FARP_REQUEST_FARPR) 9217 lpfc_issue_els_farpr(vport, did, 0); 9218 } 9219 } 9220 return 0; 9221 } 9222 9223 /** 9224 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb 9225 * @vport: pointer to a host virtual N_Port data structure. 9226 * @cmdiocb: pointer to lpfc command iocb data structure. 9227 * @ndlp: pointer to a node-list data structure. 9228 * 9229 * This routine processes Fibre Channel Address Resolution Protocol 9230 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply 9231 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept 9232 * the FARP response request. 9233 * 9234 * Return code 9235 * 0 - Successfully processed FARPR IOCB (currently always return 0) 9236 **/ 9237 static int 9238 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9239 struct lpfc_nodelist *ndlp) 9240 { 9241 uint32_t did; 9242 9243 did = get_job_els_rsp64_did(vport->phba, cmdiocb); 9244 9245 /* FARP-RSP received from DID <did> */ 9246 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9247 "0600 FARP-RSP received from DID x%x\n", did); 9248 /* ACCEPT the Farp resp request */ 9249 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 9250 9251 return 0; 9252 } 9253 9254 /** 9255 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command 9256 * @vport: pointer to a host virtual N_Port data structure. 9257 * @cmdiocb: pointer to lpfc command iocb data structure. 9258 * @fan_ndlp: pointer to a node-list data structure. 9259 * 9260 * This routine processes a Fabric Address Notification (FAN) IOCB 9261 * command received as an ELS unsolicited event. The FAN ELS command will 9262 * only be processed on a physical port (i.e., the @vport represents the 9263 * physical port). The fabric NodeName and PortName from the FAN IOCB are 9264 * compared against those in the phba data structure. If any of those is 9265 * different, the lpfc_initial_flogi() routine is invoked to initialize 9266 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise, 9267 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine 9268 * is invoked to register login to the fabric. 9269 * 9270 * Return code 9271 * 0 - Successfully processed fan iocb (currently always return 0). 9272 **/ 9273 static int 9274 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9275 struct lpfc_nodelist *fan_ndlp) 9276 { 9277 struct lpfc_hba *phba = vport->phba; 9278 uint32_t *lp; 9279 FAN *fp; 9280 9281 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n"); 9282 lp = (uint32_t *)cmdiocb->cmd_dmabuf->virt; 9283 fp = (FAN *) ++lp; 9284 /* FAN received; Fan does not have a reply sequence */ 9285 if ((vport == phba->pport) && 9286 (vport->port_state == LPFC_LOCAL_CFG_LINK)) { 9287 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, 9288 sizeof(struct lpfc_name))) || 9289 (memcmp(&phba->fc_fabparam.portName, &fp->FportName, 9290 sizeof(struct lpfc_name)))) { 9291 /* This port has switched fabrics. FLOGI is required */ 9292 lpfc_issue_init_vfi(vport); 9293 } else { 9294 /* FAN verified - skip FLOGI */ 9295 vport->fc_myDID = vport->fc_prevDID; 9296 if (phba->sli_rev < LPFC_SLI_REV4) 9297 lpfc_issue_fabric_reglogin(vport); 9298 else { 9299 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9300 "3138 Need register VFI: (x%x/%x)\n", 9301 vport->fc_prevDID, vport->fc_myDID); 9302 lpfc_issue_reg_vfi(vport); 9303 } 9304 } 9305 } 9306 return 0; 9307 } 9308 9309 /** 9310 * lpfc_els_rcv_edc - Process an unsolicited EDC iocb 9311 * @vport: pointer to a host virtual N_Port data structure. 9312 * @cmdiocb: pointer to lpfc command iocb data structure. 9313 * @ndlp: pointer to a node-list data structure. 9314 * 9315 * Return code 9316 * 0 - Successfully processed echo iocb (currently always return 0) 9317 **/ 9318 static int 9319 lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9320 struct lpfc_nodelist *ndlp) 9321 { 9322 struct lpfc_hba *phba = vport->phba; 9323 struct fc_els_edc *edc_req; 9324 struct fc_tlv_desc *tlv; 9325 uint8_t *payload; 9326 uint32_t *ptr, dtag; 9327 const char *dtag_nm; 9328 int desc_cnt = 0, bytes_remain; 9329 struct fc_diag_lnkflt_desc *plnkflt; 9330 9331 payload = cmdiocb->cmd_dmabuf->virt; 9332 9333 edc_req = (struct fc_els_edc *)payload; 9334 bytes_remain = be32_to_cpu(edc_req->desc_len); 9335 9336 ptr = (uint32_t *)payload; 9337 lpfc_printf_vlog(vport, KERN_INFO, 9338 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9339 "3319 Rcv EDC payload len %d: x%x x%x x%x\n", 9340 bytes_remain, be32_to_cpu(*ptr), 9341 be32_to_cpu(*(ptr + 1)), be32_to_cpu(*(ptr + 2))); 9342 9343 /* No signal support unless there is a congestion descriptor */ 9344 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 9345 phba->cgn_sig_freq = 0; 9346 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 9347 9348 if (bytes_remain <= 0) 9349 goto out; 9350 9351 tlv = edc_req->desc; 9352 9353 /* 9354 * cycle through EDC diagnostic descriptors to find the 9355 * congestion signaling capability descriptor 9356 */ 9357 while (bytes_remain) { 9358 if (bytes_remain < FC_TLV_DESC_HDR_SZ) { 9359 lpfc_printf_log(phba, KERN_WARNING, 9360 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9361 "6464 Truncated TLV hdr on " 9362 "Diagnostic descriptor[%d]\n", 9363 desc_cnt); 9364 goto out; 9365 } 9366 9367 dtag = be32_to_cpu(tlv->desc_tag); 9368 switch (dtag) { 9369 case ELS_DTAG_LNK_FAULT_CAP: 9370 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 9371 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 9372 sizeof(struct fc_diag_lnkflt_desc)) { 9373 lpfc_printf_log(phba, KERN_WARNING, 9374 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9375 "6465 Truncated Link Fault Diagnostic " 9376 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 9377 desc_cnt, bytes_remain, 9378 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 9379 sizeof(struct fc_diag_lnkflt_desc)); 9380 goto out; 9381 } 9382 plnkflt = (struct fc_diag_lnkflt_desc *)tlv; 9383 lpfc_printf_log(phba, KERN_INFO, 9384 LOG_ELS | LOG_LDS_EVENT, 9385 "4626 Link Fault Desc Data: x%08x len x%x " 9386 "da x%x dd x%x interval x%x\n", 9387 be32_to_cpu(plnkflt->desc_tag), 9388 be32_to_cpu(plnkflt->desc_len), 9389 be32_to_cpu( 9390 plnkflt->degrade_activate_threshold), 9391 be32_to_cpu( 9392 plnkflt->degrade_deactivate_threshold), 9393 be32_to_cpu(plnkflt->fec_degrade_interval)); 9394 break; 9395 case ELS_DTAG_CG_SIGNAL_CAP: 9396 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 9397 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 9398 sizeof(struct fc_diag_cg_sig_desc)) { 9399 lpfc_printf_log( 9400 phba, KERN_WARNING, LOG_CGN_MGMT, 9401 "6466 Truncated cgn signal Diagnostic " 9402 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 9403 desc_cnt, bytes_remain, 9404 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 9405 sizeof(struct fc_diag_cg_sig_desc)); 9406 goto out; 9407 } 9408 9409 phba->cgn_reg_fpin = phba->cgn_init_reg_fpin; 9410 phba->cgn_reg_signal = phba->cgn_init_reg_signal; 9411 9412 /* We start negotiation with lpfc_fabric_cgn_frequency. 9413 * When we process the EDC, we will settle on the 9414 * higher frequency. 9415 */ 9416 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 9417 9418 lpfc_least_capable_settings( 9419 phba, (struct fc_diag_cg_sig_desc *)tlv); 9420 break; 9421 default: 9422 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 9423 lpfc_printf_log(phba, KERN_WARNING, 9424 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9425 "6467 unknown Diagnostic " 9426 "Descriptor[%d]: tag x%x (%s)\n", 9427 desc_cnt, dtag, dtag_nm); 9428 } 9429 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 9430 tlv = fc_tlv_next_desc(tlv); 9431 desc_cnt++; 9432 } 9433 out: 9434 /* Need to send back an ACC */ 9435 lpfc_issue_els_edc_rsp(vport, cmdiocb, ndlp); 9436 9437 lpfc_config_cgn_signal(phba); 9438 return 0; 9439 } 9440 9441 /** 9442 * lpfc_els_timeout - Handler funciton to the els timer 9443 * @t: timer context used to obtain the vport. 9444 * 9445 * This routine is invoked by the ELS timer after timeout. It posts the ELS 9446 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port 9447 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake 9448 * up the worker thread. It is for the worker thread to invoke the routine 9449 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO. 9450 **/ 9451 void 9452 lpfc_els_timeout(struct timer_list *t) 9453 { 9454 struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc); 9455 struct lpfc_hba *phba = vport->phba; 9456 uint32_t tmo_posted; 9457 unsigned long iflag; 9458 9459 spin_lock_irqsave(&vport->work_port_lock, iflag); 9460 tmo_posted = vport->work_port_events & WORKER_ELS_TMO; 9461 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 9462 vport->work_port_events |= WORKER_ELS_TMO; 9463 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 9464 9465 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 9466 lpfc_worker_wake_up(phba); 9467 return; 9468 } 9469 9470 9471 /** 9472 * lpfc_els_timeout_handler - Process an els timeout event 9473 * @vport: pointer to a virtual N_Port data structure. 9474 * 9475 * This routine is the actual handler function that processes an ELS timeout 9476 * event. It walks the ELS ring to get and abort all the IOCBs (except the 9477 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by 9478 * invoking the lpfc_sli_issue_abort_iotag() routine. 9479 **/ 9480 void 9481 lpfc_els_timeout_handler(struct lpfc_vport *vport) 9482 { 9483 struct lpfc_hba *phba = vport->phba; 9484 struct lpfc_sli_ring *pring; 9485 struct lpfc_iocbq *tmp_iocb, *piocb; 9486 IOCB_t *cmd = NULL; 9487 struct lpfc_dmabuf *pcmd; 9488 uint32_t els_command = 0; 9489 uint32_t timeout; 9490 uint32_t remote_ID = 0xffffffff; 9491 LIST_HEAD(abort_list); 9492 u32 ulp_command = 0, ulp_context = 0, did = 0, iotag = 0; 9493 9494 9495 timeout = (uint32_t)(phba->fc_ratov << 1); 9496 9497 pring = lpfc_phba_elsring(phba); 9498 if (unlikely(!pring)) 9499 return; 9500 9501 if (phba->pport->load_flag & FC_UNLOADING) 9502 return; 9503 9504 spin_lock_irq(&phba->hbalock); 9505 if (phba->sli_rev == LPFC_SLI_REV4) 9506 spin_lock(&pring->ring_lock); 9507 9508 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 9509 ulp_command = get_job_cmnd(phba, piocb); 9510 ulp_context = get_job_ulpcontext(phba, piocb); 9511 did = get_job_els_rsp64_did(phba, piocb); 9512 9513 if (phba->sli_rev == LPFC_SLI_REV4) { 9514 iotag = get_wqe_reqtag(piocb); 9515 } else { 9516 cmd = &piocb->iocb; 9517 iotag = cmd->ulpIoTag; 9518 } 9519 9520 if ((piocb->cmd_flag & LPFC_IO_LIBDFC) != 0 || 9521 ulp_command == CMD_ABORT_XRI_CX || 9522 ulp_command == CMD_ABORT_XRI_CN || 9523 ulp_command == CMD_CLOSE_XRI_CN) 9524 continue; 9525 9526 if (piocb->vport != vport) 9527 continue; 9528 9529 pcmd = piocb->cmd_dmabuf; 9530 if (pcmd) 9531 els_command = *(uint32_t *) (pcmd->virt); 9532 9533 if (els_command == ELS_CMD_FARP || 9534 els_command == ELS_CMD_FARPR || 9535 els_command == ELS_CMD_FDISC) 9536 continue; 9537 9538 if (piocb->drvrTimeout > 0) { 9539 if (piocb->drvrTimeout >= timeout) 9540 piocb->drvrTimeout -= timeout; 9541 else 9542 piocb->drvrTimeout = 0; 9543 continue; 9544 } 9545 9546 remote_ID = 0xffffffff; 9547 if (ulp_command != CMD_GEN_REQUEST64_CR) { 9548 remote_ID = did; 9549 } else { 9550 struct lpfc_nodelist *ndlp; 9551 ndlp = __lpfc_findnode_rpi(vport, ulp_context); 9552 if (ndlp) 9553 remote_ID = ndlp->nlp_DID; 9554 } 9555 list_add_tail(&piocb->dlist, &abort_list); 9556 } 9557 if (phba->sli_rev == LPFC_SLI_REV4) 9558 spin_unlock(&pring->ring_lock); 9559 spin_unlock_irq(&phba->hbalock); 9560 9561 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 9562 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9563 "0127 ELS timeout Data: x%x x%x x%x " 9564 "x%x\n", els_command, 9565 remote_ID, ulp_command, iotag); 9566 9567 spin_lock_irq(&phba->hbalock); 9568 list_del_init(&piocb->dlist); 9569 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 9570 spin_unlock_irq(&phba->hbalock); 9571 } 9572 9573 /* Make sure HBA is alive */ 9574 lpfc_issue_hb_tmo(phba); 9575 9576 if (!list_empty(&pring->txcmplq)) 9577 if (!(phba->pport->load_flag & FC_UNLOADING)) 9578 mod_timer(&vport->els_tmofunc, 9579 jiffies + msecs_to_jiffies(1000 * timeout)); 9580 } 9581 9582 /** 9583 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport 9584 * @vport: pointer to a host virtual N_Port data structure. 9585 * 9586 * This routine is used to clean up all the outstanding ELS commands on a 9587 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport() 9588 * routine. After that, it walks the ELS transmit queue to remove all the 9589 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For 9590 * the IOCBs with a non-NULL completion callback function, the callback 9591 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 9592 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion 9593 * callback function, the IOCB will simply be released. Finally, it walks 9594 * the ELS transmit completion queue to issue an abort IOCB to any transmit 9595 * completion queue IOCB that is associated with the @vport and is not 9596 * an IOCB from libdfc (i.e., the management plane IOCBs that are not 9597 * part of the discovery state machine) out to HBA by invoking the 9598 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the 9599 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee 9600 * the IOCBs are aborted when this function returns. 9601 **/ 9602 void 9603 lpfc_els_flush_cmd(struct lpfc_vport *vport) 9604 { 9605 LIST_HEAD(abort_list); 9606 LIST_HEAD(cancel_list); 9607 struct lpfc_hba *phba = vport->phba; 9608 struct lpfc_sli_ring *pring; 9609 struct lpfc_iocbq *tmp_iocb, *piocb; 9610 u32 ulp_command; 9611 unsigned long iflags = 0; 9612 bool mbx_tmo_err; 9613 9614 lpfc_fabric_abort_vport(vport); 9615 9616 /* 9617 * For SLI3, only the hbalock is required. But SLI4 needs to coordinate 9618 * with the ring insert operation. Because lpfc_sli_issue_abort_iotag 9619 * ultimately grabs the ring_lock, the driver must splice the list into 9620 * a working list and release the locks before calling the abort. 9621 */ 9622 spin_lock_irqsave(&phba->hbalock, iflags); 9623 pring = lpfc_phba_elsring(phba); 9624 9625 /* Bail out if we've no ELS wq, like in PCI error recovery case. */ 9626 if (unlikely(!pring)) { 9627 spin_unlock_irqrestore(&phba->hbalock, iflags); 9628 return; 9629 } 9630 9631 if (phba->sli_rev == LPFC_SLI_REV4) 9632 spin_lock(&pring->ring_lock); 9633 9634 mbx_tmo_err = test_bit(MBX_TMO_ERR, &phba->bit_flags); 9635 /* First we need to issue aborts to outstanding cmds on txcmpl */ 9636 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 9637 if (piocb->cmd_flag & LPFC_IO_LIBDFC && !mbx_tmo_err) 9638 continue; 9639 9640 if (piocb->vport != vport) 9641 continue; 9642 9643 if (piocb->cmd_flag & LPFC_DRIVER_ABORTED && !mbx_tmo_err) 9644 continue; 9645 9646 /* On the ELS ring we can have ELS_REQUESTs or 9647 * GEN_REQUESTs waiting for a response. 9648 */ 9649 ulp_command = get_job_cmnd(phba, piocb); 9650 if (ulp_command == CMD_ELS_REQUEST64_CR) { 9651 list_add_tail(&piocb->dlist, &abort_list); 9652 9653 /* If the link is down when flushing ELS commands 9654 * the firmware will not complete them till after 9655 * the link comes back up. This may confuse 9656 * discovery for the new link up, so we need to 9657 * change the compl routine to just clean up the iocb 9658 * and avoid any retry logic. 9659 */ 9660 if (phba->link_state == LPFC_LINK_DOWN) 9661 piocb->cmd_cmpl = lpfc_cmpl_els_link_down; 9662 } else if (ulp_command == CMD_GEN_REQUEST64_CR || 9663 mbx_tmo_err) 9664 list_add_tail(&piocb->dlist, &abort_list); 9665 } 9666 9667 if (phba->sli_rev == LPFC_SLI_REV4) 9668 spin_unlock(&pring->ring_lock); 9669 spin_unlock_irqrestore(&phba->hbalock, iflags); 9670 9671 /* Abort each txcmpl iocb on aborted list and remove the dlist links. */ 9672 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 9673 spin_lock_irqsave(&phba->hbalock, iflags); 9674 list_del_init(&piocb->dlist); 9675 if (mbx_tmo_err) 9676 list_move_tail(&piocb->list, &cancel_list); 9677 else 9678 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 9679 9680 spin_unlock_irqrestore(&phba->hbalock, iflags); 9681 } 9682 if (!list_empty(&cancel_list)) 9683 lpfc_sli_cancel_iocbs(phba, &cancel_list, IOSTAT_LOCAL_REJECT, 9684 IOERR_SLI_ABORTED); 9685 else 9686 /* Make sure HBA is alive */ 9687 lpfc_issue_hb_tmo(phba); 9688 9689 if (!list_empty(&abort_list)) 9690 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9691 "3387 abort list for txq not empty\n"); 9692 INIT_LIST_HEAD(&abort_list); 9693 9694 spin_lock_irqsave(&phba->hbalock, iflags); 9695 if (phba->sli_rev == LPFC_SLI_REV4) 9696 spin_lock(&pring->ring_lock); 9697 9698 /* No need to abort the txq list, 9699 * just queue them up for lpfc_sli_cancel_iocbs 9700 */ 9701 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 9702 ulp_command = get_job_cmnd(phba, piocb); 9703 9704 if (piocb->cmd_flag & LPFC_IO_LIBDFC) 9705 continue; 9706 9707 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 9708 if (ulp_command == CMD_QUE_RING_BUF_CN || 9709 ulp_command == CMD_QUE_RING_BUF64_CN || 9710 ulp_command == CMD_CLOSE_XRI_CN || 9711 ulp_command == CMD_ABORT_XRI_CN || 9712 ulp_command == CMD_ABORT_XRI_CX) 9713 continue; 9714 9715 if (piocb->vport != vport) 9716 continue; 9717 9718 list_del_init(&piocb->list); 9719 list_add_tail(&piocb->list, &abort_list); 9720 } 9721 9722 /* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */ 9723 if (vport == phba->pport) { 9724 list_for_each_entry_safe(piocb, tmp_iocb, 9725 &phba->fabric_iocb_list, list) { 9726 list_del_init(&piocb->list); 9727 list_add_tail(&piocb->list, &abort_list); 9728 } 9729 } 9730 9731 if (phba->sli_rev == LPFC_SLI_REV4) 9732 spin_unlock(&pring->ring_lock); 9733 spin_unlock_irqrestore(&phba->hbalock, iflags); 9734 9735 /* Cancel all the IOCBs from the completions list */ 9736 lpfc_sli_cancel_iocbs(phba, &abort_list, 9737 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 9738 9739 return; 9740 } 9741 9742 /** 9743 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA 9744 * @phba: pointer to lpfc hba data structure. 9745 * 9746 * This routine is used to clean up all the outstanding ELS commands on a 9747 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba() 9748 * routine. After that, it walks the ELS transmit queue to remove all the 9749 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For 9750 * the IOCBs with the completion callback function associated, the callback 9751 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 9752 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion 9753 * callback function associated, the IOCB will simply be released. Finally, 9754 * it walks the ELS transmit completion queue to issue an abort IOCB to any 9755 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the 9756 * management plane IOCBs that are not part of the discovery state machine) 9757 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine. 9758 **/ 9759 void 9760 lpfc_els_flush_all_cmd(struct lpfc_hba *phba) 9761 { 9762 struct lpfc_vport *vport; 9763 9764 spin_lock_irq(&phba->port_list_lock); 9765 list_for_each_entry(vport, &phba->port_list, listentry) 9766 lpfc_els_flush_cmd(vport); 9767 spin_unlock_irq(&phba->port_list_lock); 9768 9769 return; 9770 } 9771 9772 /** 9773 * lpfc_send_els_failure_event - Posts an ELS command failure event 9774 * @phba: Pointer to hba context object. 9775 * @cmdiocbp: Pointer to command iocb which reported error. 9776 * @rspiocbp: Pointer to response iocb which reported error. 9777 * 9778 * This function sends an event when there is an ELS command 9779 * failure. 9780 **/ 9781 void 9782 lpfc_send_els_failure_event(struct lpfc_hba *phba, 9783 struct lpfc_iocbq *cmdiocbp, 9784 struct lpfc_iocbq *rspiocbp) 9785 { 9786 struct lpfc_vport *vport = cmdiocbp->vport; 9787 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9788 struct lpfc_lsrjt_event lsrjt_event; 9789 struct lpfc_fabric_event_header fabric_event; 9790 struct ls_rjt stat; 9791 struct lpfc_nodelist *ndlp; 9792 uint32_t *pcmd; 9793 u32 ulp_status, ulp_word4; 9794 9795 ndlp = cmdiocbp->ndlp; 9796 if (!ndlp) 9797 return; 9798 9799 ulp_status = get_job_ulpstatus(phba, rspiocbp); 9800 ulp_word4 = get_job_word4(phba, rspiocbp); 9801 9802 if (ulp_status == IOSTAT_LS_RJT) { 9803 lsrjt_event.header.event_type = FC_REG_ELS_EVENT; 9804 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV; 9805 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname, 9806 sizeof(struct lpfc_name)); 9807 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename, 9808 sizeof(struct lpfc_name)); 9809 pcmd = (uint32_t *)cmdiocbp->cmd_dmabuf->virt; 9810 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0; 9811 stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4); 9812 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode; 9813 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp; 9814 fc_host_post_vendor_event(shost, 9815 fc_get_event_number(), 9816 sizeof(lsrjt_event), 9817 (char *)&lsrjt_event, 9818 LPFC_NL_VENDOR_ID); 9819 return; 9820 } 9821 if (ulp_status == IOSTAT_NPORT_BSY || 9822 ulp_status == IOSTAT_FABRIC_BSY) { 9823 fabric_event.event_type = FC_REG_FABRIC_EVENT; 9824 if (ulp_status == IOSTAT_NPORT_BSY) 9825 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY; 9826 else 9827 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY; 9828 memcpy(fabric_event.wwpn, &ndlp->nlp_portname, 9829 sizeof(struct lpfc_name)); 9830 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename, 9831 sizeof(struct lpfc_name)); 9832 fc_host_post_vendor_event(shost, 9833 fc_get_event_number(), 9834 sizeof(fabric_event), 9835 (char *)&fabric_event, 9836 LPFC_NL_VENDOR_ID); 9837 return; 9838 } 9839 9840 } 9841 9842 /** 9843 * lpfc_send_els_event - Posts unsolicited els event 9844 * @vport: Pointer to vport object. 9845 * @ndlp: Pointer FC node object. 9846 * @payload: ELS command code type. 9847 * 9848 * This function posts an event when there is an incoming 9849 * unsolicited ELS command. 9850 **/ 9851 static void 9852 lpfc_send_els_event(struct lpfc_vport *vport, 9853 struct lpfc_nodelist *ndlp, 9854 uint32_t *payload) 9855 { 9856 struct lpfc_els_event_header *els_data = NULL; 9857 struct lpfc_logo_event *logo_data = NULL; 9858 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9859 9860 if (*payload == ELS_CMD_LOGO) { 9861 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL); 9862 if (!logo_data) { 9863 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9864 "0148 Failed to allocate memory " 9865 "for LOGO event\n"); 9866 return; 9867 } 9868 els_data = &logo_data->header; 9869 } else { 9870 els_data = kmalloc(sizeof(struct lpfc_els_event_header), 9871 GFP_KERNEL); 9872 if (!els_data) { 9873 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9874 "0149 Failed to allocate memory " 9875 "for ELS event\n"); 9876 return; 9877 } 9878 } 9879 els_data->event_type = FC_REG_ELS_EVENT; 9880 switch (*payload) { 9881 case ELS_CMD_PLOGI: 9882 els_data->subcategory = LPFC_EVENT_PLOGI_RCV; 9883 break; 9884 case ELS_CMD_PRLO: 9885 els_data->subcategory = LPFC_EVENT_PRLO_RCV; 9886 break; 9887 case ELS_CMD_ADISC: 9888 els_data->subcategory = LPFC_EVENT_ADISC_RCV; 9889 break; 9890 case ELS_CMD_LOGO: 9891 els_data->subcategory = LPFC_EVENT_LOGO_RCV; 9892 /* Copy the WWPN in the LOGO payload */ 9893 memcpy(logo_data->logo_wwpn, &payload[2], 9894 sizeof(struct lpfc_name)); 9895 break; 9896 default: 9897 kfree(els_data); 9898 return; 9899 } 9900 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); 9901 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 9902 if (*payload == ELS_CMD_LOGO) { 9903 fc_host_post_vendor_event(shost, 9904 fc_get_event_number(), 9905 sizeof(struct lpfc_logo_event), 9906 (char *)logo_data, 9907 LPFC_NL_VENDOR_ID); 9908 kfree(logo_data); 9909 } else { 9910 fc_host_post_vendor_event(shost, 9911 fc_get_event_number(), 9912 sizeof(struct lpfc_els_event_header), 9913 (char *)els_data, 9914 LPFC_NL_VENDOR_ID); 9915 kfree(els_data); 9916 } 9917 9918 return; 9919 } 9920 9921 9922 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types, 9923 FC_FPIN_LI_EVT_TYPES_INIT); 9924 9925 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_deli_event_nm, fc_fpin_deli_event_types, 9926 FC_FPIN_DELI_EVT_TYPES_INIT); 9927 9928 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_event_nm, fc_fpin_congn_event_types, 9929 FC_FPIN_CONGN_EVT_TYPES_INIT); 9930 9931 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_severity_nm, 9932 fc_fpin_congn_severity_types, 9933 FC_FPIN_CONGN_SEVERITY_INIT); 9934 9935 9936 /** 9937 * lpfc_display_fpin_wwpn - Display WWPNs accessible by the attached port 9938 * @phba: Pointer to phba object. 9939 * @wwnlist: Pointer to list of WWPNs in FPIN payload 9940 * @cnt: count of WWPNs in FPIN payload 9941 * 9942 * This routine is called by LI and PC descriptors. 9943 * Limit the number of WWPNs displayed to 6 log messages, 6 per log message 9944 */ 9945 static void 9946 lpfc_display_fpin_wwpn(struct lpfc_hba *phba, __be64 *wwnlist, u32 cnt) 9947 { 9948 char buf[LPFC_FPIN_WWPN_LINE_SZ]; 9949 __be64 wwn; 9950 u64 wwpn; 9951 int i, len; 9952 int line = 0; 9953 int wcnt = 0; 9954 bool endit = false; 9955 9956 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, "Accessible WWPNs:"); 9957 for (i = 0; i < cnt; i++) { 9958 /* Are we on the last WWPN */ 9959 if (i == (cnt - 1)) 9960 endit = true; 9961 9962 /* Extract the next WWPN from the payload */ 9963 wwn = *wwnlist++; 9964 wwpn = be64_to_cpu(wwn); 9965 len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ - len, 9966 " %016llx", wwpn); 9967 9968 /* Log a message if we are on the last WWPN 9969 * or if we hit the max allowed per message. 9970 */ 9971 wcnt++; 9972 if (wcnt == LPFC_FPIN_WWPN_LINE_CNT || endit) { 9973 buf[len] = 0; 9974 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9975 "4686 %s\n", buf); 9976 9977 /* Check if we reached the last WWPN */ 9978 if (endit) 9979 return; 9980 9981 /* Limit the number of log message displayed per FPIN */ 9982 line++; 9983 if (line == LPFC_FPIN_WWPN_NUM_LINE) { 9984 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9985 "4687 %d WWPNs Truncated\n", 9986 cnt - i - 1); 9987 return; 9988 } 9989 9990 /* Start over with next log message */ 9991 wcnt = 0; 9992 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, 9993 "Additional WWPNs:"); 9994 } 9995 } 9996 } 9997 9998 /** 9999 * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event. 10000 * @phba: Pointer to phba object. 10001 * @tlv: Pointer to the Link Integrity Notification Descriptor. 10002 * 10003 * This function processes a Link Integrity FPIN event by logging a message. 10004 **/ 10005 static void 10006 lpfc_els_rcv_fpin_li(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 10007 { 10008 struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv; 10009 const char *li_evt_str; 10010 u32 li_evt, cnt; 10011 10012 li_evt = be16_to_cpu(li->event_type); 10013 li_evt_str = lpfc_get_fpin_li_event_nm(li_evt); 10014 cnt = be32_to_cpu(li->pname_count); 10015 10016 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 10017 "4680 FPIN Link Integrity %s (x%x) " 10018 "Detecting PN x%016llx Attached PN x%016llx " 10019 "Duration %d mSecs Count %d Port Cnt %d\n", 10020 li_evt_str, li_evt, 10021 be64_to_cpu(li->detecting_wwpn), 10022 be64_to_cpu(li->attached_wwpn), 10023 be32_to_cpu(li->event_threshold), 10024 be32_to_cpu(li->event_count), cnt); 10025 10026 lpfc_display_fpin_wwpn(phba, (__be64 *)&li->pname_list, cnt); 10027 } 10028 10029 /** 10030 * lpfc_els_rcv_fpin_del - Process an FPIN Delivery Event. 10031 * @phba: Pointer to hba object. 10032 * @tlv: Pointer to the Delivery Notification Descriptor TLV 10033 * 10034 * This function processes a Delivery FPIN event by logging a message. 10035 **/ 10036 static void 10037 lpfc_els_rcv_fpin_del(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 10038 { 10039 struct fc_fn_deli_desc *del = (struct fc_fn_deli_desc *)tlv; 10040 const char *del_rsn_str; 10041 u32 del_rsn; 10042 __be32 *frame; 10043 10044 del_rsn = be16_to_cpu(del->deli_reason_code); 10045 del_rsn_str = lpfc_get_fpin_deli_event_nm(del_rsn); 10046 10047 /* Skip over desc_tag/desc_len header to payload */ 10048 frame = (__be32 *)(del + 1); 10049 10050 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 10051 "4681 FPIN Delivery %s (x%x) " 10052 "Detecting PN x%016llx Attached PN x%016llx " 10053 "DiscHdr0 x%08x " 10054 "DiscHdr1 x%08x DiscHdr2 x%08x DiscHdr3 x%08x " 10055 "DiscHdr4 x%08x DiscHdr5 x%08x\n", 10056 del_rsn_str, del_rsn, 10057 be64_to_cpu(del->detecting_wwpn), 10058 be64_to_cpu(del->attached_wwpn), 10059 be32_to_cpu(frame[0]), 10060 be32_to_cpu(frame[1]), 10061 be32_to_cpu(frame[2]), 10062 be32_to_cpu(frame[3]), 10063 be32_to_cpu(frame[4]), 10064 be32_to_cpu(frame[5])); 10065 } 10066 10067 /** 10068 * lpfc_els_rcv_fpin_peer_cgn - Process a FPIN Peer Congestion Event. 10069 * @phba: Pointer to hba object. 10070 * @tlv: Pointer to the Peer Congestion Notification Descriptor TLV 10071 * 10072 * This function processes a Peer Congestion FPIN event by logging a message. 10073 **/ 10074 static void 10075 lpfc_els_rcv_fpin_peer_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 10076 { 10077 struct fc_fn_peer_congn_desc *pc = (struct fc_fn_peer_congn_desc *)tlv; 10078 const char *pc_evt_str; 10079 u32 pc_evt, cnt; 10080 10081 pc_evt = be16_to_cpu(pc->event_type); 10082 pc_evt_str = lpfc_get_fpin_congn_event_nm(pc_evt); 10083 cnt = be32_to_cpu(pc->pname_count); 10084 10085 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_ELS, 10086 "4684 FPIN Peer Congestion %s (x%x) " 10087 "Duration %d mSecs " 10088 "Detecting PN x%016llx Attached PN x%016llx " 10089 "Impacted Port Cnt %d\n", 10090 pc_evt_str, pc_evt, 10091 be32_to_cpu(pc->event_period), 10092 be64_to_cpu(pc->detecting_wwpn), 10093 be64_to_cpu(pc->attached_wwpn), 10094 cnt); 10095 10096 lpfc_display_fpin_wwpn(phba, (__be64 *)&pc->pname_list, cnt); 10097 } 10098 10099 /** 10100 * lpfc_els_rcv_fpin_cgn - Process an FPIN Congestion notification 10101 * @phba: Pointer to hba object. 10102 * @tlv: Pointer to the Congestion Notification Descriptor TLV 10103 * 10104 * This function processes an FPIN Congestion Notifiction. The notification 10105 * could be an Alarm or Warning. This routine feeds that data into driver's 10106 * running congestion algorithm. It also processes the FPIN by 10107 * logging a message. It returns 1 to indicate deliver this message 10108 * to the upper layer or 0 to indicate don't deliver it. 10109 **/ 10110 static int 10111 lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 10112 { 10113 struct lpfc_cgn_info *cp; 10114 struct fc_fn_congn_desc *cgn = (struct fc_fn_congn_desc *)tlv; 10115 const char *cgn_evt_str; 10116 u32 cgn_evt; 10117 const char *cgn_sev_str; 10118 u32 cgn_sev; 10119 uint16_t value; 10120 u32 crc; 10121 bool nm_log = false; 10122 int rc = 1; 10123 10124 cgn_evt = be16_to_cpu(cgn->event_type); 10125 cgn_evt_str = lpfc_get_fpin_congn_event_nm(cgn_evt); 10126 cgn_sev = cgn->severity; 10127 cgn_sev_str = lpfc_get_fpin_congn_severity_nm(cgn_sev); 10128 10129 /* The driver only takes action on a Credit Stall or Oversubscription 10130 * event type to engage the IO algorithm. The driver prints an 10131 * unmaskable message only for Lost Credit and Credit Stall. 10132 * TODO: Still need to have definition of host action on clear, 10133 * lost credit and device specific event types. 10134 */ 10135 switch (cgn_evt) { 10136 case FPIN_CONGN_LOST_CREDIT: 10137 nm_log = true; 10138 break; 10139 case FPIN_CONGN_CREDIT_STALL: 10140 nm_log = true; 10141 fallthrough; 10142 case FPIN_CONGN_OVERSUBSCRIPTION: 10143 if (cgn_evt == FPIN_CONGN_OVERSUBSCRIPTION) 10144 nm_log = false; 10145 switch (cgn_sev) { 10146 case FPIN_CONGN_SEVERITY_ERROR: 10147 /* Take action here for an Alarm event */ 10148 if (phba->cmf_active_mode != LPFC_CFG_OFF) { 10149 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) { 10150 /* Track of alarm cnt for SYNC_WQE */ 10151 atomic_inc(&phba->cgn_sync_alarm_cnt); 10152 } 10153 /* Track alarm cnt for cgn_info regardless 10154 * of whether CMF is configured for Signals 10155 * or FPINs. 10156 */ 10157 atomic_inc(&phba->cgn_fabric_alarm_cnt); 10158 goto cleanup; 10159 } 10160 break; 10161 case FPIN_CONGN_SEVERITY_WARNING: 10162 /* Take action here for a Warning event */ 10163 if (phba->cmf_active_mode != LPFC_CFG_OFF) { 10164 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) { 10165 /* Track of warning cnt for SYNC_WQE */ 10166 atomic_inc(&phba->cgn_sync_warn_cnt); 10167 } 10168 /* Track warning cnt and freq for cgn_info 10169 * regardless of whether CMF is configured for 10170 * Signals or FPINs. 10171 */ 10172 atomic_inc(&phba->cgn_fabric_warn_cnt); 10173 cleanup: 10174 /* Save frequency in ms */ 10175 phba->cgn_fpin_frequency = 10176 be32_to_cpu(cgn->event_period); 10177 value = phba->cgn_fpin_frequency; 10178 if (phba->cgn_i) { 10179 cp = (struct lpfc_cgn_info *) 10180 phba->cgn_i->virt; 10181 cp->cgn_alarm_freq = 10182 cpu_to_le16(value); 10183 cp->cgn_warn_freq = 10184 cpu_to_le16(value); 10185 crc = lpfc_cgn_calc_crc32 10186 (cp, 10187 LPFC_CGN_INFO_SZ, 10188 LPFC_CGN_CRC32_SEED); 10189 cp->cgn_info_crc = cpu_to_le32(crc); 10190 } 10191 10192 /* Don't deliver to upper layer since 10193 * driver took action on this tlv. 10194 */ 10195 rc = 0; 10196 } 10197 break; 10198 } 10199 break; 10200 } 10201 10202 /* Change the log level to unmaskable for the following event types. */ 10203 lpfc_printf_log(phba, (nm_log ? KERN_WARNING : KERN_INFO), 10204 LOG_CGN_MGMT | LOG_ELS, 10205 "4683 FPIN CONGESTION %s type %s (x%x) Event " 10206 "Duration %d mSecs\n", 10207 cgn_sev_str, cgn_evt_str, cgn_evt, 10208 be32_to_cpu(cgn->event_period)); 10209 return rc; 10210 } 10211 10212 void 10213 lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length) 10214 { 10215 struct lpfc_hba *phba = vport->phba; 10216 struct fc_els_fpin *fpin = (struct fc_els_fpin *)p; 10217 struct fc_tlv_desc *tlv, *first_tlv, *current_tlv; 10218 const char *dtag_nm; 10219 int desc_cnt = 0, bytes_remain, cnt; 10220 u32 dtag, deliver = 0; 10221 int len; 10222 10223 /* FPINs handled only if we are in the right discovery state */ 10224 if (vport->port_state < LPFC_DISC_AUTH) 10225 return; 10226 10227 /* make sure there is the full fpin header */ 10228 if (fpin_length < sizeof(struct fc_els_fpin)) 10229 return; 10230 10231 /* Sanity check descriptor length. The desc_len value does not 10232 * include space for the ELS command and the desc_len fields. 10233 */ 10234 len = be32_to_cpu(fpin->desc_len); 10235 if (fpin_length < len + sizeof(struct fc_els_fpin)) { 10236 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10237 "4671 Bad ELS FPIN length %d: %d\n", 10238 len, fpin_length); 10239 return; 10240 } 10241 10242 tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0]; 10243 first_tlv = tlv; 10244 bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc); 10245 bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len)); 10246 10247 /* process each descriptor separately */ 10248 while (bytes_remain >= FC_TLV_DESC_HDR_SZ && 10249 bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) { 10250 dtag = be32_to_cpu(tlv->desc_tag); 10251 switch (dtag) { 10252 case ELS_DTAG_LNK_INTEGRITY: 10253 lpfc_els_rcv_fpin_li(phba, tlv); 10254 deliver = 1; 10255 break; 10256 case ELS_DTAG_DELIVERY: 10257 lpfc_els_rcv_fpin_del(phba, tlv); 10258 deliver = 1; 10259 break; 10260 case ELS_DTAG_PEER_CONGEST: 10261 lpfc_els_rcv_fpin_peer_cgn(phba, tlv); 10262 deliver = 1; 10263 break; 10264 case ELS_DTAG_CONGESTION: 10265 deliver = lpfc_els_rcv_fpin_cgn(phba, tlv); 10266 break; 10267 default: 10268 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 10269 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10270 "4678 unknown FPIN descriptor[%d]: " 10271 "tag x%x (%s)\n", 10272 desc_cnt, dtag, dtag_nm); 10273 10274 /* If descriptor is bad, drop the rest of the data */ 10275 return; 10276 } 10277 lpfc_cgn_update_stat(phba, dtag); 10278 cnt = be32_to_cpu(tlv->desc_len); 10279 10280 /* Sanity check descriptor length. The desc_len value does not 10281 * include space for the desc_tag and the desc_len fields. 10282 */ 10283 len -= (cnt + sizeof(struct fc_tlv_desc)); 10284 if (len < 0) { 10285 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 10286 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10287 "4672 Bad FPIN descriptor TLV length " 10288 "%d: %d %d %s\n", 10289 cnt, len, fpin_length, dtag_nm); 10290 return; 10291 } 10292 10293 current_tlv = tlv; 10294 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 10295 tlv = fc_tlv_next_desc(tlv); 10296 10297 /* Format payload such that the FPIN delivered to the 10298 * upper layer is a single descriptor FPIN. 10299 */ 10300 if (desc_cnt) 10301 memcpy(first_tlv, current_tlv, 10302 (cnt + sizeof(struct fc_els_fpin))); 10303 10304 /* Adjust the length so that it only reflects a 10305 * single descriptor FPIN. 10306 */ 10307 fpin_length = cnt + sizeof(struct fc_els_fpin); 10308 fpin->desc_len = cpu_to_be32(fpin_length); 10309 fpin_length += sizeof(struct fc_els_fpin); /* the entire FPIN */ 10310 10311 /* Send every descriptor individually to the upper layer */ 10312 if (deliver) 10313 fc_host_fpin_rcv(lpfc_shost_from_vport(vport), 10314 fpin_length, (char *)fpin, 0); 10315 desc_cnt++; 10316 } 10317 } 10318 10319 /** 10320 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer 10321 * @phba: pointer to lpfc hba data structure. 10322 * @pring: pointer to a SLI ring. 10323 * @vport: pointer to a host virtual N_Port data structure. 10324 * @elsiocb: pointer to lpfc els command iocb data structure. 10325 * 10326 * This routine is used for processing the IOCB associated with a unsolicited 10327 * event. It first determines whether there is an existing ndlp that matches 10328 * the DID from the unsolicited IOCB. If not, it will create a new one with 10329 * the DID from the unsolicited IOCB. The ELS command from the unsolicited 10330 * IOCB is then used to invoke the proper routine and to set up proper state 10331 * of the discovery state machine. 10332 **/ 10333 static void 10334 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10335 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) 10336 { 10337 struct lpfc_nodelist *ndlp; 10338 struct ls_rjt stat; 10339 u32 *payload, payload_len; 10340 u32 cmd = 0, did = 0, newnode, status = 0; 10341 uint8_t rjt_exp, rjt_err = 0, init_link = 0; 10342 struct lpfc_wcqe_complete *wcqe_cmpl = NULL; 10343 LPFC_MBOXQ_t *mbox; 10344 10345 if (!vport || !elsiocb->cmd_dmabuf) 10346 goto dropit; 10347 10348 newnode = 0; 10349 wcqe_cmpl = &elsiocb->wcqe_cmpl; 10350 payload = elsiocb->cmd_dmabuf->virt; 10351 if (phba->sli_rev == LPFC_SLI_REV4) 10352 payload_len = wcqe_cmpl->total_data_placed; 10353 else 10354 payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len; 10355 status = get_job_ulpstatus(phba, elsiocb); 10356 cmd = *payload; 10357 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) 10358 lpfc_sli3_post_buffer(phba, pring, 1); 10359 10360 did = get_job_els_rsp64_did(phba, elsiocb); 10361 if (status) { 10362 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10363 "RCV Unsol ELS: status:x%x/x%x did:x%x", 10364 status, get_job_word4(phba, elsiocb), did); 10365 goto dropit; 10366 } 10367 10368 /* Check to see if link went down during discovery */ 10369 if (lpfc_els_chk_latt(vport)) 10370 goto dropit; 10371 10372 /* Ignore traffic received during vport shutdown. */ 10373 if (vport->load_flag & FC_UNLOADING) 10374 goto dropit; 10375 10376 /* If NPort discovery is delayed drop incoming ELS */ 10377 if ((vport->fc_flag & FC_DISC_DELAYED) && 10378 (cmd != ELS_CMD_PLOGI)) 10379 goto dropit; 10380 10381 ndlp = lpfc_findnode_did(vport, did); 10382 if (!ndlp) { 10383 /* Cannot find existing Fabric ndlp, so allocate a new one */ 10384 ndlp = lpfc_nlp_init(vport, did); 10385 if (!ndlp) 10386 goto dropit; 10387 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 10388 newnode = 1; 10389 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 10390 ndlp->nlp_type |= NLP_FABRIC; 10391 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 10392 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 10393 newnode = 1; 10394 } 10395 10396 phba->fc_stat.elsRcvFrame++; 10397 10398 /* 10399 * Do not process any unsolicited ELS commands 10400 * if the ndlp is in DEV_LOSS 10401 */ 10402 spin_lock_irq(&ndlp->lock); 10403 if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { 10404 spin_unlock_irq(&ndlp->lock); 10405 if (newnode) 10406 lpfc_nlp_put(ndlp); 10407 goto dropit; 10408 } 10409 spin_unlock_irq(&ndlp->lock); 10410 10411 elsiocb->ndlp = lpfc_nlp_get(ndlp); 10412 if (!elsiocb->ndlp) 10413 goto dropit; 10414 elsiocb->vport = vport; 10415 10416 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { 10417 cmd &= ELS_CMD_MASK; 10418 } 10419 /* ELS command <elsCmd> received from NPORT <did> */ 10420 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 10421 "0112 ELS command x%x received from NPORT x%x " 10422 "refcnt %d Data: x%x x%x x%x x%x\n", 10423 cmd, did, kref_read(&ndlp->kref), vport->port_state, 10424 vport->fc_flag, vport->fc_myDID, vport->fc_prevDID); 10425 10426 /* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */ 10427 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) && 10428 (cmd != ELS_CMD_FLOGI) && 10429 !((cmd == ELS_CMD_PLOGI) && (vport->fc_flag & FC_PT2PT))) { 10430 rjt_err = LSRJT_LOGICAL_BSY; 10431 rjt_exp = LSEXP_NOTHING_MORE; 10432 goto lsrjt; 10433 } 10434 10435 switch (cmd) { 10436 case ELS_CMD_PLOGI: 10437 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10438 "RCV PLOGI: did:x%x/ste:x%x flg:x%x", 10439 did, vport->port_state, ndlp->nlp_flag); 10440 10441 phba->fc_stat.elsRcvPLOGI++; 10442 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 10443 if (phba->sli_rev == LPFC_SLI_REV4 && 10444 (phba->pport->fc_flag & FC_PT2PT)) { 10445 vport->fc_prevDID = vport->fc_myDID; 10446 /* Our DID needs to be updated before registering 10447 * the vfi. This is done in lpfc_rcv_plogi but 10448 * that is called after the reg_vfi. 10449 */ 10450 vport->fc_myDID = 10451 bf_get(els_rsp64_sid, 10452 &elsiocb->wqe.xmit_els_rsp); 10453 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 10454 "3312 Remote port assigned DID x%x " 10455 "%x\n", vport->fc_myDID, 10456 vport->fc_prevDID); 10457 } 10458 10459 lpfc_send_els_event(vport, ndlp, payload); 10460 10461 /* If Nport discovery is delayed, reject PLOGIs */ 10462 if (vport->fc_flag & FC_DISC_DELAYED) { 10463 rjt_err = LSRJT_UNABLE_TPC; 10464 rjt_exp = LSEXP_NOTHING_MORE; 10465 break; 10466 } 10467 10468 if (vport->port_state < LPFC_DISC_AUTH) { 10469 if (!(phba->pport->fc_flag & FC_PT2PT) || 10470 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 10471 rjt_err = LSRJT_UNABLE_TPC; 10472 rjt_exp = LSEXP_NOTHING_MORE; 10473 break; 10474 } 10475 } 10476 10477 spin_lock_irq(&ndlp->lock); 10478 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; 10479 spin_unlock_irq(&ndlp->lock); 10480 10481 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10482 NLP_EVT_RCV_PLOGI); 10483 10484 break; 10485 case ELS_CMD_FLOGI: 10486 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10487 "RCV FLOGI: did:x%x/ste:x%x flg:x%x", 10488 did, vport->port_state, ndlp->nlp_flag); 10489 10490 phba->fc_stat.elsRcvFLOGI++; 10491 10492 /* If the driver believes fabric discovery is done and is ready, 10493 * bounce the link. There is some descrepancy. 10494 */ 10495 if (vport->port_state >= LPFC_LOCAL_CFG_LINK && 10496 vport->fc_flag & FC_PT2PT && 10497 vport->rcv_flogi_cnt >= 1) { 10498 rjt_err = LSRJT_LOGICAL_BSY; 10499 rjt_exp = LSEXP_NOTHING_MORE; 10500 init_link++; 10501 goto lsrjt; 10502 } 10503 10504 lpfc_els_rcv_flogi(vport, elsiocb, ndlp); 10505 /* retain node if our response is deferred */ 10506 if (phba->defer_flogi_acc_flag) 10507 break; 10508 if (newnode) 10509 lpfc_disc_state_machine(vport, ndlp, NULL, 10510 NLP_EVT_DEVICE_RM); 10511 break; 10512 case ELS_CMD_LOGO: 10513 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10514 "RCV LOGO: did:x%x/ste:x%x flg:x%x", 10515 did, vport->port_state, ndlp->nlp_flag); 10516 10517 phba->fc_stat.elsRcvLOGO++; 10518 lpfc_send_els_event(vport, ndlp, payload); 10519 if (vport->port_state < LPFC_DISC_AUTH) { 10520 rjt_err = LSRJT_UNABLE_TPC; 10521 rjt_exp = LSEXP_NOTHING_MORE; 10522 break; 10523 } 10524 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); 10525 if (newnode) 10526 lpfc_disc_state_machine(vport, ndlp, NULL, 10527 NLP_EVT_DEVICE_RM); 10528 break; 10529 case ELS_CMD_PRLO: 10530 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10531 "RCV PRLO: did:x%x/ste:x%x flg:x%x", 10532 did, vport->port_state, ndlp->nlp_flag); 10533 10534 phba->fc_stat.elsRcvPRLO++; 10535 lpfc_send_els_event(vport, ndlp, payload); 10536 if (vport->port_state < LPFC_DISC_AUTH) { 10537 rjt_err = LSRJT_UNABLE_TPC; 10538 rjt_exp = LSEXP_NOTHING_MORE; 10539 break; 10540 } 10541 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); 10542 break; 10543 case ELS_CMD_LCB: 10544 phba->fc_stat.elsRcvLCB++; 10545 lpfc_els_rcv_lcb(vport, elsiocb, ndlp); 10546 break; 10547 case ELS_CMD_RDP: 10548 phba->fc_stat.elsRcvRDP++; 10549 lpfc_els_rcv_rdp(vport, elsiocb, ndlp); 10550 break; 10551 case ELS_CMD_RSCN: 10552 phba->fc_stat.elsRcvRSCN++; 10553 lpfc_els_rcv_rscn(vport, elsiocb, ndlp); 10554 if (newnode) 10555 lpfc_disc_state_machine(vport, ndlp, NULL, 10556 NLP_EVT_DEVICE_RM); 10557 break; 10558 case ELS_CMD_ADISC: 10559 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10560 "RCV ADISC: did:x%x/ste:x%x flg:x%x", 10561 did, vport->port_state, ndlp->nlp_flag); 10562 10563 lpfc_send_els_event(vport, ndlp, payload); 10564 phba->fc_stat.elsRcvADISC++; 10565 if (vport->port_state < LPFC_DISC_AUTH) { 10566 rjt_err = LSRJT_UNABLE_TPC; 10567 rjt_exp = LSEXP_NOTHING_MORE; 10568 break; 10569 } 10570 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10571 NLP_EVT_RCV_ADISC); 10572 break; 10573 case ELS_CMD_PDISC: 10574 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10575 "RCV PDISC: did:x%x/ste:x%x flg:x%x", 10576 did, vport->port_state, ndlp->nlp_flag); 10577 10578 phba->fc_stat.elsRcvPDISC++; 10579 if (vport->port_state < LPFC_DISC_AUTH) { 10580 rjt_err = LSRJT_UNABLE_TPC; 10581 rjt_exp = LSEXP_NOTHING_MORE; 10582 break; 10583 } 10584 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10585 NLP_EVT_RCV_PDISC); 10586 break; 10587 case ELS_CMD_FARPR: 10588 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10589 "RCV FARPR: did:x%x/ste:x%x flg:x%x", 10590 did, vport->port_state, ndlp->nlp_flag); 10591 10592 phba->fc_stat.elsRcvFARPR++; 10593 lpfc_els_rcv_farpr(vport, elsiocb, ndlp); 10594 break; 10595 case ELS_CMD_FARP: 10596 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10597 "RCV FARP: did:x%x/ste:x%x flg:x%x", 10598 did, vport->port_state, ndlp->nlp_flag); 10599 10600 phba->fc_stat.elsRcvFARP++; 10601 lpfc_els_rcv_farp(vport, elsiocb, ndlp); 10602 break; 10603 case ELS_CMD_FAN: 10604 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10605 "RCV FAN: did:x%x/ste:x%x flg:x%x", 10606 did, vport->port_state, ndlp->nlp_flag); 10607 10608 phba->fc_stat.elsRcvFAN++; 10609 lpfc_els_rcv_fan(vport, elsiocb, ndlp); 10610 break; 10611 case ELS_CMD_PRLI: 10612 case ELS_CMD_NVMEPRLI: 10613 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10614 "RCV PRLI: did:x%x/ste:x%x flg:x%x", 10615 did, vport->port_state, ndlp->nlp_flag); 10616 10617 phba->fc_stat.elsRcvPRLI++; 10618 if ((vport->port_state < LPFC_DISC_AUTH) && 10619 (vport->fc_flag & FC_FABRIC)) { 10620 rjt_err = LSRJT_UNABLE_TPC; 10621 rjt_exp = LSEXP_NOTHING_MORE; 10622 break; 10623 } 10624 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); 10625 break; 10626 case ELS_CMD_LIRR: 10627 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10628 "RCV LIRR: did:x%x/ste:x%x flg:x%x", 10629 did, vport->port_state, ndlp->nlp_flag); 10630 10631 phba->fc_stat.elsRcvLIRR++; 10632 lpfc_els_rcv_lirr(vport, elsiocb, ndlp); 10633 if (newnode) 10634 lpfc_disc_state_machine(vport, ndlp, NULL, 10635 NLP_EVT_DEVICE_RM); 10636 break; 10637 case ELS_CMD_RLS: 10638 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10639 "RCV RLS: did:x%x/ste:x%x flg:x%x", 10640 did, vport->port_state, ndlp->nlp_flag); 10641 10642 phba->fc_stat.elsRcvRLS++; 10643 lpfc_els_rcv_rls(vport, elsiocb, ndlp); 10644 if (newnode) 10645 lpfc_disc_state_machine(vport, ndlp, NULL, 10646 NLP_EVT_DEVICE_RM); 10647 break; 10648 case ELS_CMD_RPL: 10649 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10650 "RCV RPL: did:x%x/ste:x%x flg:x%x", 10651 did, vport->port_state, ndlp->nlp_flag); 10652 10653 phba->fc_stat.elsRcvRPL++; 10654 lpfc_els_rcv_rpl(vport, elsiocb, ndlp); 10655 if (newnode) 10656 lpfc_disc_state_machine(vport, ndlp, NULL, 10657 NLP_EVT_DEVICE_RM); 10658 break; 10659 case ELS_CMD_RNID: 10660 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10661 "RCV RNID: did:x%x/ste:x%x flg:x%x", 10662 did, vport->port_state, ndlp->nlp_flag); 10663 10664 phba->fc_stat.elsRcvRNID++; 10665 lpfc_els_rcv_rnid(vport, elsiocb, ndlp); 10666 if (newnode) 10667 lpfc_disc_state_machine(vport, ndlp, NULL, 10668 NLP_EVT_DEVICE_RM); 10669 break; 10670 case ELS_CMD_RTV: 10671 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10672 "RCV RTV: did:x%x/ste:x%x flg:x%x", 10673 did, vport->port_state, ndlp->nlp_flag); 10674 phba->fc_stat.elsRcvRTV++; 10675 lpfc_els_rcv_rtv(vport, elsiocb, ndlp); 10676 if (newnode) 10677 lpfc_disc_state_machine(vport, ndlp, NULL, 10678 NLP_EVT_DEVICE_RM); 10679 break; 10680 case ELS_CMD_RRQ: 10681 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10682 "RCV RRQ: did:x%x/ste:x%x flg:x%x", 10683 did, vport->port_state, ndlp->nlp_flag); 10684 10685 phba->fc_stat.elsRcvRRQ++; 10686 lpfc_els_rcv_rrq(vport, elsiocb, ndlp); 10687 if (newnode) 10688 lpfc_disc_state_machine(vport, ndlp, NULL, 10689 NLP_EVT_DEVICE_RM); 10690 break; 10691 case ELS_CMD_ECHO: 10692 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10693 "RCV ECHO: did:x%x/ste:x%x flg:x%x", 10694 did, vport->port_state, ndlp->nlp_flag); 10695 10696 phba->fc_stat.elsRcvECHO++; 10697 lpfc_els_rcv_echo(vport, elsiocb, ndlp); 10698 if (newnode) 10699 lpfc_disc_state_machine(vport, ndlp, NULL, 10700 NLP_EVT_DEVICE_RM); 10701 break; 10702 case ELS_CMD_REC: 10703 /* receive this due to exchange closed */ 10704 rjt_err = LSRJT_UNABLE_TPC; 10705 rjt_exp = LSEXP_INVALID_OX_RX; 10706 break; 10707 case ELS_CMD_FPIN: 10708 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10709 "RCV FPIN: did:x%x/ste:x%x flg:x%x", 10710 did, vport->port_state, ndlp->nlp_flag); 10711 10712 lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload, 10713 payload_len); 10714 10715 /* There are no replies, so no rjt codes */ 10716 break; 10717 case ELS_CMD_EDC: 10718 lpfc_els_rcv_edc(vport, elsiocb, ndlp); 10719 break; 10720 case ELS_CMD_RDF: 10721 phba->fc_stat.elsRcvRDF++; 10722 /* Accept RDF only from fabric controller */ 10723 if (did != Fabric_Cntl_DID) { 10724 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 10725 "1115 Received RDF from invalid DID " 10726 "x%x\n", did); 10727 rjt_err = LSRJT_PROTOCOL_ERR; 10728 rjt_exp = LSEXP_NOTHING_MORE; 10729 goto lsrjt; 10730 } 10731 10732 lpfc_els_rcv_rdf(vport, elsiocb, ndlp); 10733 break; 10734 default: 10735 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10736 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 10737 cmd, did, vport->port_state); 10738 10739 /* Unsupported ELS command, reject */ 10740 rjt_err = LSRJT_CMD_UNSUPPORTED; 10741 rjt_exp = LSEXP_NOTHING_MORE; 10742 10743 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 10744 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10745 "0115 Unknown ELS command x%x " 10746 "received from NPORT x%x\n", cmd, did); 10747 if (newnode) 10748 lpfc_disc_state_machine(vport, ndlp, NULL, 10749 NLP_EVT_DEVICE_RM); 10750 break; 10751 } 10752 10753 lsrjt: 10754 /* check if need to LS_RJT received ELS cmd */ 10755 if (rjt_err) { 10756 memset(&stat, 0, sizeof(stat)); 10757 stat.un.b.lsRjtRsnCode = rjt_err; 10758 stat.un.b.lsRjtRsnCodeExp = rjt_exp; 10759 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, 10760 NULL); 10761 /* Remove the reference from above for new nodes. */ 10762 if (newnode) 10763 lpfc_disc_state_machine(vport, ndlp, NULL, 10764 NLP_EVT_DEVICE_RM); 10765 } 10766 10767 /* Release the reference on this elsiocb, not the ndlp. */ 10768 lpfc_nlp_put(elsiocb->ndlp); 10769 elsiocb->ndlp = NULL; 10770 10771 /* Special case. Driver received an unsolicited command that 10772 * unsupportable given the driver's current state. Reset the 10773 * link and start over. 10774 */ 10775 if (init_link) { 10776 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10777 if (!mbox) 10778 return; 10779 lpfc_linkdown(phba); 10780 lpfc_init_link(phba, mbox, 10781 phba->cfg_topology, 10782 phba->cfg_link_speed); 10783 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 10784 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 10785 mbox->vport = vport; 10786 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == 10787 MBX_NOT_FINISHED) 10788 mempool_free(mbox, phba->mbox_mem_pool); 10789 } 10790 10791 return; 10792 10793 dropit: 10794 if (vport && !(vport->load_flag & FC_UNLOADING)) 10795 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10796 "0111 Dropping received ELS cmd " 10797 "Data: x%x x%x x%x x%x\n", 10798 cmd, status, get_job_word4(phba, elsiocb), did); 10799 10800 phba->fc_stat.elsRcvDrop++; 10801 } 10802 10803 /** 10804 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring 10805 * @phba: pointer to lpfc hba data structure. 10806 * @pring: pointer to a SLI ring. 10807 * @elsiocb: pointer to lpfc els iocb data structure. 10808 * 10809 * This routine is used to process an unsolicited event received from a SLI 10810 * (Service Level Interface) ring. The actual processing of the data buffer 10811 * associated with the unsolicited event is done by invoking the routine 10812 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the 10813 * SLI ring on which the unsolicited event was received. 10814 **/ 10815 void 10816 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10817 struct lpfc_iocbq *elsiocb) 10818 { 10819 struct lpfc_vport *vport = elsiocb->vport; 10820 u32 ulp_command, status, parameter, bde_count = 0; 10821 IOCB_t *icmd; 10822 struct lpfc_wcqe_complete *wcqe_cmpl = NULL; 10823 struct lpfc_dmabuf *bdeBuf1 = elsiocb->cmd_dmabuf; 10824 struct lpfc_dmabuf *bdeBuf2 = elsiocb->bpl_dmabuf; 10825 dma_addr_t paddr; 10826 10827 elsiocb->cmd_dmabuf = NULL; 10828 elsiocb->rsp_dmabuf = NULL; 10829 elsiocb->bpl_dmabuf = NULL; 10830 10831 wcqe_cmpl = &elsiocb->wcqe_cmpl; 10832 ulp_command = get_job_cmnd(phba, elsiocb); 10833 status = get_job_ulpstatus(phba, elsiocb); 10834 parameter = get_job_word4(phba, elsiocb); 10835 if (phba->sli_rev == LPFC_SLI_REV4) 10836 bde_count = wcqe_cmpl->word3; 10837 else 10838 bde_count = elsiocb->iocb.ulpBdeCount; 10839 10840 if (status == IOSTAT_NEED_BUFFER) { 10841 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 10842 } else if (status == IOSTAT_LOCAL_REJECT && 10843 (parameter & IOERR_PARAM_MASK) == 10844 IOERR_RCV_BUFFER_WAITING) { 10845 phba->fc_stat.NoRcvBuf++; 10846 /* Not enough posted buffers; Try posting more buffers */ 10847 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 10848 lpfc_sli3_post_buffer(phba, pring, 0); 10849 return; 10850 } 10851 10852 if (phba->sli_rev == LPFC_SLI_REV3) { 10853 icmd = &elsiocb->iocb; 10854 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 10855 (ulp_command == CMD_IOCB_RCV_ELS64_CX || 10856 ulp_command == CMD_IOCB_RCV_SEQ64_CX)) { 10857 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 10858 vport = phba->pport; 10859 else 10860 vport = lpfc_find_vport_by_vpid(phba, 10861 icmd->unsli3.rcvsli3.vpi); 10862 } 10863 } 10864 10865 /* If there are no BDEs associated 10866 * with this IOCB, there is nothing to do. 10867 */ 10868 if (bde_count == 0) 10869 return; 10870 10871 /* Account for SLI2 or SLI3 and later unsolicited buffering */ 10872 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 10873 elsiocb->cmd_dmabuf = bdeBuf1; 10874 if (bde_count == 2) 10875 elsiocb->bpl_dmabuf = bdeBuf2; 10876 } else { 10877 icmd = &elsiocb->iocb; 10878 paddr = getPaddr(icmd->un.cont64[0].addrHigh, 10879 icmd->un.cont64[0].addrLow); 10880 elsiocb->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, 10881 paddr); 10882 if (bde_count == 2) { 10883 paddr = getPaddr(icmd->un.cont64[1].addrHigh, 10884 icmd->un.cont64[1].addrLow); 10885 elsiocb->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba, 10886 pring, 10887 paddr); 10888 } 10889 } 10890 10891 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 10892 /* 10893 * The different unsolicited event handlers would tell us 10894 * if they are done with "mp" by setting cmd_dmabuf to NULL. 10895 */ 10896 if (elsiocb->cmd_dmabuf) { 10897 lpfc_in_buf_free(phba, elsiocb->cmd_dmabuf); 10898 elsiocb->cmd_dmabuf = NULL; 10899 } 10900 10901 if (elsiocb->bpl_dmabuf) { 10902 lpfc_in_buf_free(phba, elsiocb->bpl_dmabuf); 10903 elsiocb->bpl_dmabuf = NULL; 10904 } 10905 10906 } 10907 10908 static void 10909 lpfc_start_fdmi(struct lpfc_vport *vport) 10910 { 10911 struct lpfc_nodelist *ndlp; 10912 10913 /* If this is the first time, allocate an ndlp and initialize 10914 * it. Otherwise, make sure the node is enabled and then do the 10915 * login. 10916 */ 10917 ndlp = lpfc_findnode_did(vport, FDMI_DID); 10918 if (!ndlp) { 10919 ndlp = lpfc_nlp_init(vport, FDMI_DID); 10920 if (ndlp) { 10921 ndlp->nlp_type |= NLP_FABRIC; 10922 } else { 10923 return; 10924 } 10925 } 10926 10927 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 10928 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 10929 } 10930 10931 /** 10932 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr 10933 * @phba: pointer to lpfc hba data structure. 10934 * @vport: pointer to a virtual N_Port data structure. 10935 * 10936 * This routine issues a Port Login (PLOGI) to the Name Server with 10937 * State Change Request (SCR) for a @vport. This routine will create an 10938 * ndlp for the Name Server associated to the @vport if such node does 10939 * not already exist. The PLOGI to Name Server is issued by invoking the 10940 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface 10941 * (FDMI) is configured to the @vport, a FDMI node will be created and 10942 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine. 10943 **/ 10944 void 10945 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) 10946 { 10947 struct lpfc_nodelist *ndlp; 10948 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10949 10950 /* 10951 * If lpfc_delay_discovery parameter is set and the clean address 10952 * bit is cleared and fc fabric parameters chenged, delay FC NPort 10953 * discovery. 10954 */ 10955 spin_lock_irq(shost->host_lock); 10956 if (vport->fc_flag & FC_DISC_DELAYED) { 10957 spin_unlock_irq(shost->host_lock); 10958 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10959 "3334 Delay fc port discovery for %d secs\n", 10960 phba->fc_ratov); 10961 mod_timer(&vport->delayed_disc_tmo, 10962 jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); 10963 return; 10964 } 10965 spin_unlock_irq(shost->host_lock); 10966 10967 ndlp = lpfc_findnode_did(vport, NameServer_DID); 10968 if (!ndlp) { 10969 ndlp = lpfc_nlp_init(vport, NameServer_DID); 10970 if (!ndlp) { 10971 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 10972 lpfc_disc_start(vport); 10973 return; 10974 } 10975 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10976 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10977 "0251 NameServer login: no memory\n"); 10978 return; 10979 } 10980 } 10981 10982 ndlp->nlp_type |= NLP_FABRIC; 10983 10984 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 10985 10986 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) { 10987 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10988 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10989 "0252 Cannot issue NameServer login\n"); 10990 return; 10991 } 10992 10993 if ((phba->cfg_enable_SmartSAN || 10994 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) && 10995 (vport->load_flag & FC_ALLOW_FDMI)) 10996 lpfc_start_fdmi(vport); 10997 } 10998 10999 /** 11000 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport 11001 * @phba: pointer to lpfc hba data structure. 11002 * @pmb: pointer to the driver internal queue element for mailbox command. 11003 * 11004 * This routine is the completion callback function to register new vport 11005 * mailbox command. If the new vport mailbox command completes successfully, 11006 * the fabric registration login shall be performed on physical port (the 11007 * new vport created is actually a physical port, with VPI 0) or the port 11008 * login to Name Server for State Change Request (SCR) will be performed 11009 * on virtual port (real virtual port, with VPI greater than 0). 11010 **/ 11011 static void 11012 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 11013 { 11014 struct lpfc_vport *vport = pmb->vport; 11015 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11016 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; 11017 MAILBOX_t *mb = &pmb->u.mb; 11018 int rc; 11019 11020 spin_lock_irq(shost->host_lock); 11021 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 11022 spin_unlock_irq(shost->host_lock); 11023 11024 if (mb->mbxStatus) { 11025 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11026 "0915 Register VPI failed : Status: x%x" 11027 " upd bit: x%x \n", mb->mbxStatus, 11028 mb->un.varRegVpi.upd); 11029 if (phba->sli_rev == LPFC_SLI_REV4 && 11030 mb->un.varRegVpi.upd) 11031 goto mbox_err_exit ; 11032 11033 switch (mb->mbxStatus) { 11034 case 0x11: /* unsupported feature */ 11035 case 0x9603: /* max_vpi exceeded */ 11036 case 0x9602: /* Link event since CLEAR_LA */ 11037 /* giving up on vport registration */ 11038 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11039 spin_lock_irq(shost->host_lock); 11040 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 11041 spin_unlock_irq(shost->host_lock); 11042 lpfc_can_disctmo(vport); 11043 break; 11044 /* If reg_vpi fail with invalid VPI status, re-init VPI */ 11045 case 0x20: 11046 spin_lock_irq(shost->host_lock); 11047 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 11048 spin_unlock_irq(shost->host_lock); 11049 lpfc_init_vpi(phba, pmb, vport->vpi); 11050 pmb->vport = vport; 11051 pmb->mbox_cmpl = lpfc_init_vpi_cmpl; 11052 rc = lpfc_sli_issue_mbox(phba, pmb, 11053 MBX_NOWAIT); 11054 if (rc == MBX_NOT_FINISHED) { 11055 lpfc_printf_vlog(vport, KERN_ERR, 11056 LOG_TRACE_EVENT, 11057 "2732 Failed to issue INIT_VPI" 11058 " mailbox command\n"); 11059 } else { 11060 lpfc_nlp_put(ndlp); 11061 return; 11062 } 11063 fallthrough; 11064 default: 11065 /* Try to recover from this error */ 11066 if (phba->sli_rev == LPFC_SLI_REV4) 11067 lpfc_sli4_unreg_all_rpis(vport); 11068 lpfc_mbx_unreg_vpi(vport); 11069 spin_lock_irq(shost->host_lock); 11070 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 11071 spin_unlock_irq(shost->host_lock); 11072 if (mb->mbxStatus == MBX_NOT_FINISHED) 11073 break; 11074 if ((vport->port_type == LPFC_PHYSICAL_PORT) && 11075 !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) { 11076 if (phba->sli_rev == LPFC_SLI_REV4) 11077 lpfc_issue_init_vfi(vport); 11078 else 11079 lpfc_initial_flogi(vport); 11080 } else { 11081 lpfc_initial_fdisc(vport); 11082 } 11083 break; 11084 } 11085 } else { 11086 spin_lock_irq(shost->host_lock); 11087 vport->vpi_state |= LPFC_VPI_REGISTERED; 11088 spin_unlock_irq(shost->host_lock); 11089 if (vport == phba->pport) { 11090 if (phba->sli_rev < LPFC_SLI_REV4) 11091 lpfc_issue_fabric_reglogin(vport); 11092 else { 11093 /* 11094 * If the physical port is instantiated using 11095 * FDISC, do not start vport discovery. 11096 */ 11097 if (vport->port_state != LPFC_FDISC) 11098 lpfc_start_fdiscs(phba); 11099 lpfc_do_scr_ns_plogi(phba, vport); 11100 } 11101 } else { 11102 lpfc_do_scr_ns_plogi(phba, vport); 11103 } 11104 } 11105 mbox_err_exit: 11106 /* Now, we decrement the ndlp reference count held for this 11107 * callback function 11108 */ 11109 lpfc_nlp_put(ndlp); 11110 11111 mempool_free(pmb, phba->mbox_mem_pool); 11112 return; 11113 } 11114 11115 /** 11116 * lpfc_register_new_vport - Register a new vport with a HBA 11117 * @phba: pointer to lpfc hba data structure. 11118 * @vport: pointer to a host virtual N_Port data structure. 11119 * @ndlp: pointer to a node-list data structure. 11120 * 11121 * This routine registers the @vport as a new virtual port with a HBA. 11122 * It is done through a registering vpi mailbox command. 11123 **/ 11124 void 11125 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, 11126 struct lpfc_nodelist *ndlp) 11127 { 11128 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11129 LPFC_MBOXQ_t *mbox; 11130 11131 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11132 if (mbox) { 11133 lpfc_reg_vpi(vport, mbox); 11134 mbox->vport = vport; 11135 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 11136 if (!mbox->ctx_ndlp) { 11137 mempool_free(mbox, phba->mbox_mem_pool); 11138 goto mbox_err_exit; 11139 } 11140 11141 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 11142 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 11143 == MBX_NOT_FINISHED) { 11144 /* mailbox command not success, decrement ndlp 11145 * reference count for this command 11146 */ 11147 lpfc_nlp_put(ndlp); 11148 mempool_free(mbox, phba->mbox_mem_pool); 11149 11150 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11151 "0253 Register VPI: Can't send mbox\n"); 11152 goto mbox_err_exit; 11153 } 11154 } else { 11155 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11156 "0254 Register VPI: no memory\n"); 11157 goto mbox_err_exit; 11158 } 11159 return; 11160 11161 mbox_err_exit: 11162 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11163 spin_lock_irq(shost->host_lock); 11164 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 11165 spin_unlock_irq(shost->host_lock); 11166 return; 11167 } 11168 11169 /** 11170 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer 11171 * @phba: pointer to lpfc hba data structure. 11172 * 11173 * This routine cancels the retry delay timers to all the vports. 11174 **/ 11175 void 11176 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba) 11177 { 11178 struct lpfc_vport **vports; 11179 struct lpfc_nodelist *ndlp; 11180 uint32_t link_state; 11181 int i; 11182 11183 /* Treat this failure as linkdown for all vports */ 11184 link_state = phba->link_state; 11185 lpfc_linkdown(phba); 11186 phba->link_state = link_state; 11187 11188 vports = lpfc_create_vport_work_array(phba); 11189 11190 if (vports) { 11191 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 11192 ndlp = lpfc_findnode_did(vports[i], Fabric_DID); 11193 if (ndlp) 11194 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 11195 lpfc_els_flush_cmd(vports[i]); 11196 } 11197 lpfc_destroy_vport_work_array(phba, vports); 11198 } 11199 } 11200 11201 /** 11202 * lpfc_retry_pport_discovery - Start timer to retry FLOGI. 11203 * @phba: pointer to lpfc hba data structure. 11204 * 11205 * This routine abort all pending discovery commands and 11206 * start a timer to retry FLOGI for the physical port 11207 * discovery. 11208 **/ 11209 void 11210 lpfc_retry_pport_discovery(struct lpfc_hba *phba) 11211 { 11212 struct lpfc_nodelist *ndlp; 11213 11214 /* Cancel the all vports retry delay retry timers */ 11215 lpfc_cancel_all_vport_retry_delay_timer(phba); 11216 11217 /* If fabric require FLOGI, then re-instantiate physical login */ 11218 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 11219 if (!ndlp) 11220 return; 11221 11222 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); 11223 spin_lock_irq(&ndlp->lock); 11224 ndlp->nlp_flag |= NLP_DELAY_TMO; 11225 spin_unlock_irq(&ndlp->lock); 11226 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; 11227 phba->pport->port_state = LPFC_FLOGI; 11228 return; 11229 } 11230 11231 /** 11232 * lpfc_fabric_login_reqd - Check if FLOGI required. 11233 * @phba: pointer to lpfc hba data structure. 11234 * @cmdiocb: pointer to FDISC command iocb. 11235 * @rspiocb: pointer to FDISC response iocb. 11236 * 11237 * This routine checks if a FLOGI is reguired for FDISC 11238 * to succeed. 11239 **/ 11240 static int 11241 lpfc_fabric_login_reqd(struct lpfc_hba *phba, 11242 struct lpfc_iocbq *cmdiocb, 11243 struct lpfc_iocbq *rspiocb) 11244 { 11245 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 11246 u32 ulp_word4 = get_job_word4(phba, rspiocb); 11247 11248 if (ulp_status != IOSTAT_FABRIC_RJT || 11249 ulp_word4 != RJT_LOGIN_REQUIRED) 11250 return 0; 11251 else 11252 return 1; 11253 } 11254 11255 /** 11256 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command 11257 * @phba: pointer to lpfc hba data structure. 11258 * @cmdiocb: pointer to lpfc command iocb data structure. 11259 * @rspiocb: pointer to lpfc response iocb data structure. 11260 * 11261 * This routine is the completion callback function to a Fabric Discover 11262 * (FDISC) ELS command. Since all the FDISC ELS commands are issued 11263 * single threaded, each FDISC completion callback function will reset 11264 * the discovery timer for all vports such that the timers will not get 11265 * unnecessary timeout. The function checks the FDISC IOCB status. If error 11266 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the 11267 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID 11268 * assigned to the vport has been changed with the completion of the FDISC 11269 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index) 11270 * are unregistered from the HBA, and then the lpfc_register_new_vport() 11271 * routine is invoked to register new vport with the HBA. Otherwise, the 11272 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name 11273 * Server for State Change Request (SCR). 11274 **/ 11275 static void 11276 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11277 struct lpfc_iocbq *rspiocb) 11278 { 11279 struct lpfc_vport *vport = cmdiocb->vport; 11280 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11281 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 11282 struct lpfc_nodelist *np; 11283 struct lpfc_nodelist *next_np; 11284 struct lpfc_iocbq *piocb; 11285 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp; 11286 struct serv_parm *sp; 11287 uint8_t fabric_param_changed; 11288 u32 ulp_status, ulp_word4; 11289 11290 ulp_status = get_job_ulpstatus(phba, rspiocb); 11291 ulp_word4 = get_job_word4(phba, rspiocb); 11292 11293 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 11294 "0123 FDISC completes. x%x/x%x prevDID: x%x\n", 11295 ulp_status, ulp_word4, 11296 vport->fc_prevDID); 11297 /* Since all FDISCs are being single threaded, we 11298 * must reset the discovery timer for ALL vports 11299 * waiting to send FDISC when one completes. 11300 */ 11301 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) { 11302 lpfc_set_disctmo(piocb->vport); 11303 } 11304 11305 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11306 "FDISC cmpl: status:x%x/x%x prevdid:x%x", 11307 ulp_status, ulp_word4, vport->fc_prevDID); 11308 11309 if (ulp_status) { 11310 11311 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) { 11312 lpfc_retry_pport_discovery(phba); 11313 goto out; 11314 } 11315 11316 /* Check for retry */ 11317 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 11318 goto out; 11319 /* FDISC failed */ 11320 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11321 "0126 FDISC failed. (x%x/x%x)\n", 11322 ulp_status, ulp_word4); 11323 goto fdisc_failed; 11324 } 11325 11326 lpfc_check_nlp_post_devloss(vport, ndlp); 11327 11328 spin_lock_irq(shost->host_lock); 11329 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 11330 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 11331 vport->fc_flag |= FC_FABRIC; 11332 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP) 11333 vport->fc_flag |= FC_PUBLIC_LOOP; 11334 spin_unlock_irq(shost->host_lock); 11335 11336 vport->fc_myDID = ulp_word4 & Mask_DID; 11337 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); 11338 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 11339 if (!prsp) 11340 goto out; 11341 sp = prsp->virt + sizeof(uint32_t); 11342 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 11343 memcpy(&vport->fabric_portname, &sp->portName, 11344 sizeof(struct lpfc_name)); 11345 memcpy(&vport->fabric_nodename, &sp->nodeName, 11346 sizeof(struct lpfc_name)); 11347 if (fabric_param_changed && 11348 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 11349 /* If our NportID changed, we need to ensure all 11350 * remaining NPORTs get unreg_login'ed so we can 11351 * issue unreg_vpi. 11352 */ 11353 list_for_each_entry_safe(np, next_np, 11354 &vport->fc_nodes, nlp_listp) { 11355 if ((np->nlp_state != NLP_STE_NPR_NODE) || 11356 !(np->nlp_flag & NLP_NPR_ADISC)) 11357 continue; 11358 spin_lock_irq(&ndlp->lock); 11359 np->nlp_flag &= ~NLP_NPR_ADISC; 11360 spin_unlock_irq(&ndlp->lock); 11361 lpfc_unreg_rpi(vport, np); 11362 } 11363 lpfc_cleanup_pending_mbox(vport); 11364 11365 if (phba->sli_rev == LPFC_SLI_REV4) 11366 lpfc_sli4_unreg_all_rpis(vport); 11367 11368 lpfc_mbx_unreg_vpi(vport); 11369 spin_lock_irq(shost->host_lock); 11370 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 11371 if (phba->sli_rev == LPFC_SLI_REV4) 11372 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 11373 else 11374 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG; 11375 spin_unlock_irq(shost->host_lock); 11376 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 11377 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 11378 /* 11379 * Driver needs to re-reg VPI in order for f/w 11380 * to update the MAC address. 11381 */ 11382 lpfc_register_new_vport(phba, vport, ndlp); 11383 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 11384 goto out; 11385 } 11386 11387 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) 11388 lpfc_issue_init_vpi(vport); 11389 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 11390 lpfc_register_new_vport(phba, vport, ndlp); 11391 else 11392 lpfc_do_scr_ns_plogi(phba, vport); 11393 11394 /* The FDISC completed successfully. Move the fabric ndlp to 11395 * UNMAPPED state and register with the transport. 11396 */ 11397 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 11398 goto out; 11399 11400 fdisc_failed: 11401 if (vport->fc_vport && 11402 (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS)) 11403 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11404 /* Cancel discovery timer */ 11405 lpfc_can_disctmo(vport); 11406 out: 11407 lpfc_els_free_iocb(phba, cmdiocb); 11408 lpfc_nlp_put(ndlp); 11409 } 11410 11411 /** 11412 * lpfc_issue_els_fdisc - Issue a fdisc iocb command 11413 * @vport: pointer to a virtual N_Port data structure. 11414 * @ndlp: pointer to a node-list data structure. 11415 * @retry: number of retries to the command IOCB. 11416 * 11417 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to 11418 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb() 11419 * routine to issue the IOCB, which makes sure only one outstanding fabric 11420 * IOCB will be sent off HBA at any given time. 11421 * 11422 * Note that the ndlp reference count will be incremented by 1 for holding the 11423 * ndlp and the reference to ndlp will be stored into the ndlp field of 11424 * the IOCB for the completion callback function to the FDISC ELS command. 11425 * 11426 * Return code 11427 * 0 - Successfully issued fdisc iocb command 11428 * 1 - Failed to issue fdisc iocb command 11429 **/ 11430 static int 11431 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 11432 uint8_t retry) 11433 { 11434 struct lpfc_hba *phba = vport->phba; 11435 IOCB_t *icmd; 11436 union lpfc_wqe128 *wqe = NULL; 11437 struct lpfc_iocbq *elsiocb; 11438 struct serv_parm *sp; 11439 uint8_t *pcmd; 11440 uint16_t cmdsize; 11441 int did = ndlp->nlp_DID; 11442 int rc; 11443 11444 vport->port_state = LPFC_FDISC; 11445 vport->fc_myDID = 0; 11446 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 11447 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 11448 ELS_CMD_FDISC); 11449 if (!elsiocb) { 11450 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11451 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11452 "0255 Issue FDISC: no IOCB\n"); 11453 return 1; 11454 } 11455 11456 if (phba->sli_rev == LPFC_SLI_REV4) { 11457 wqe = &elsiocb->wqe; 11458 bf_set(els_req64_sid, &wqe->els_req, 0); 11459 bf_set(els_req64_sp, &wqe->els_req, 1); 11460 } else { 11461 icmd = &elsiocb->iocb; 11462 icmd->un.elsreq64.myID = 0; 11463 icmd->un.elsreq64.fl = 1; 11464 icmd->ulpCt_h = 1; 11465 icmd->ulpCt_l = 0; 11466 } 11467 11468 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 11469 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; 11470 pcmd += sizeof(uint32_t); /* CSP Word 1 */ 11471 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm)); 11472 sp = (struct serv_parm *) pcmd; 11473 /* Setup CSPs accordingly for Fabric */ 11474 sp->cmn.e_d_tov = 0; 11475 sp->cmn.w2.r_a_tov = 0; 11476 sp->cmn.virtual_fabric_support = 0; 11477 sp->cls1.classValid = 0; 11478 sp->cls2.seqDelivery = 1; 11479 sp->cls3.seqDelivery = 1; 11480 11481 pcmd += sizeof(uint32_t); /* CSP Word 2 */ 11482 pcmd += sizeof(uint32_t); /* CSP Word 3 */ 11483 pcmd += sizeof(uint32_t); /* CSP Word 4 */ 11484 pcmd += sizeof(uint32_t); /* Port Name */ 11485 memcpy(pcmd, &vport->fc_portname, 8); 11486 pcmd += sizeof(uint32_t); /* Node Name */ 11487 pcmd += sizeof(uint32_t); /* Node Name */ 11488 memcpy(pcmd, &vport->fc_nodename, 8); 11489 sp->cmn.valid_vendor_ver_level = 0; 11490 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 11491 lpfc_set_disctmo(vport); 11492 11493 phba->fc_stat.elsXmitFDISC++; 11494 elsiocb->cmd_cmpl = lpfc_cmpl_els_fdisc; 11495 11496 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11497 "Issue FDISC: did:x%x", 11498 did, 0, 0); 11499 11500 elsiocb->ndlp = lpfc_nlp_get(ndlp); 11501 if (!elsiocb->ndlp) 11502 goto err_out; 11503 11504 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 11505 if (rc == IOCB_ERROR) { 11506 lpfc_nlp_put(ndlp); 11507 goto err_out; 11508 } 11509 11510 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); 11511 return 0; 11512 11513 err_out: 11514 lpfc_els_free_iocb(phba, elsiocb); 11515 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11516 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11517 "0256 Issue FDISC: Cannot send IOCB\n"); 11518 return 1; 11519 } 11520 11521 /** 11522 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo 11523 * @phba: pointer to lpfc hba data structure. 11524 * @cmdiocb: pointer to lpfc command iocb data structure. 11525 * @rspiocb: pointer to lpfc response iocb data structure. 11526 * 11527 * This routine is the completion callback function to the issuing of a LOGO 11528 * ELS command off a vport. It frees the command IOCB and then decrement the 11529 * reference count held on ndlp for this completion function, indicating that 11530 * the reference to the ndlp is no long needed. Note that the 11531 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this 11532 * callback function and an additional explicit ndlp reference decrementation 11533 * will trigger the actual release of the ndlp. 11534 **/ 11535 static void 11536 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11537 struct lpfc_iocbq *rspiocb) 11538 { 11539 struct lpfc_vport *vport = cmdiocb->vport; 11540 IOCB_t *irsp; 11541 struct lpfc_nodelist *ndlp; 11542 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11543 u32 ulp_status, ulp_word4, did, tmo; 11544 11545 ndlp = cmdiocb->ndlp; 11546 11547 ulp_status = get_job_ulpstatus(phba, rspiocb); 11548 ulp_word4 = get_job_word4(phba, rspiocb); 11549 11550 if (phba->sli_rev == LPFC_SLI_REV4) { 11551 did = get_job_els_rsp64_did(phba, cmdiocb); 11552 tmo = get_wqe_tmo(cmdiocb); 11553 } else { 11554 irsp = &rspiocb->iocb; 11555 did = get_job_els_rsp64_did(phba, rspiocb); 11556 tmo = irsp->ulpTimeout; 11557 } 11558 11559 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11560 "LOGO npiv cmpl: status:x%x/x%x did:x%x", 11561 ulp_status, ulp_word4, did); 11562 11563 /* NPIV LOGO completes to NPort <nlp_DID> */ 11564 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 11565 "2928 NPIV LOGO completes to NPort x%x " 11566 "Data: x%x x%x x%x x%x x%x x%x x%x\n", 11567 ndlp->nlp_DID, ulp_status, ulp_word4, 11568 tmo, vport->num_disc_nodes, 11569 kref_read(&ndlp->kref), ndlp->nlp_flag, 11570 ndlp->fc4_xpt_flags); 11571 11572 if (ulp_status == IOSTAT_SUCCESS) { 11573 spin_lock_irq(shost->host_lock); 11574 vport->fc_flag &= ~FC_NDISC_ACTIVE; 11575 vport->fc_flag &= ~FC_FABRIC; 11576 spin_unlock_irq(shost->host_lock); 11577 lpfc_can_disctmo(vport); 11578 } 11579 11580 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { 11581 /* Wake up lpfc_vport_delete if waiting...*/ 11582 if (ndlp->logo_waitq) 11583 wake_up(ndlp->logo_waitq); 11584 spin_lock_irq(&ndlp->lock); 11585 ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND); 11586 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; 11587 spin_unlock_irq(&ndlp->lock); 11588 } 11589 11590 /* Safe to release resources now. */ 11591 lpfc_els_free_iocb(phba, cmdiocb); 11592 lpfc_nlp_put(ndlp); 11593 } 11594 11595 /** 11596 * lpfc_issue_els_npiv_logo - Issue a logo off a vport 11597 * @vport: pointer to a virtual N_Port data structure. 11598 * @ndlp: pointer to a node-list data structure. 11599 * 11600 * This routine issues a LOGO ELS command to an @ndlp off a @vport. 11601 * 11602 * Note that the ndlp reference count will be incremented by 1 for holding the 11603 * ndlp and the reference to ndlp will be stored into the ndlp field of 11604 * the IOCB for the completion callback function to the LOGO ELS command. 11605 * 11606 * Return codes 11607 * 0 - Successfully issued logo off the @vport 11608 * 1 - Failed to issue logo off the @vport 11609 **/ 11610 int 11611 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 11612 { 11613 int rc = 0; 11614 struct lpfc_hba *phba = vport->phba; 11615 struct lpfc_iocbq *elsiocb; 11616 uint8_t *pcmd; 11617 uint16_t cmdsize; 11618 11619 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name); 11620 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, 11621 ELS_CMD_LOGO); 11622 if (!elsiocb) 11623 return 1; 11624 11625 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 11626 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 11627 pcmd += sizeof(uint32_t); 11628 11629 /* Fill in LOGO payload */ 11630 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 11631 pcmd += sizeof(uint32_t); 11632 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 11633 11634 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11635 "Issue LOGO npiv did:x%x flg:x%x", 11636 ndlp->nlp_DID, ndlp->nlp_flag, 0); 11637 11638 elsiocb->cmd_cmpl = lpfc_cmpl_els_npiv_logo; 11639 spin_lock_irq(&ndlp->lock); 11640 ndlp->nlp_flag |= NLP_LOGO_SND; 11641 spin_unlock_irq(&ndlp->lock); 11642 elsiocb->ndlp = lpfc_nlp_get(ndlp); 11643 if (!elsiocb->ndlp) { 11644 lpfc_els_free_iocb(phba, elsiocb); 11645 goto err; 11646 } 11647 11648 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 11649 if (rc == IOCB_ERROR) { 11650 lpfc_els_free_iocb(phba, elsiocb); 11651 lpfc_nlp_put(ndlp); 11652 goto err; 11653 } 11654 return 0; 11655 11656 err: 11657 spin_lock_irq(&ndlp->lock); 11658 ndlp->nlp_flag &= ~NLP_LOGO_SND; 11659 spin_unlock_irq(&ndlp->lock); 11660 return 1; 11661 } 11662 11663 /** 11664 * lpfc_fabric_block_timeout - Handler function to the fabric block timer 11665 * @t: timer context used to obtain the lpfc hba. 11666 * 11667 * This routine is invoked by the fabric iocb block timer after 11668 * timeout. It posts the fabric iocb block timeout event by setting the 11669 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes 11670 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for 11671 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the 11672 * posted event WORKER_FABRIC_BLOCK_TMO. 11673 **/ 11674 void 11675 lpfc_fabric_block_timeout(struct timer_list *t) 11676 { 11677 struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer); 11678 unsigned long iflags; 11679 uint32_t tmo_posted; 11680 11681 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 11682 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; 11683 if (!tmo_posted) 11684 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; 11685 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 11686 11687 if (!tmo_posted) 11688 lpfc_worker_wake_up(phba); 11689 return; 11690 } 11691 11692 /** 11693 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list 11694 * @phba: pointer to lpfc hba data structure. 11695 * 11696 * This routine issues one fabric iocb from the driver internal list to 11697 * the HBA. It first checks whether it's ready to issue one fabric iocb to 11698 * the HBA (whether there is no outstanding fabric iocb). If so, it shall 11699 * remove one pending fabric iocb from the driver internal list and invokes 11700 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA. 11701 **/ 11702 static void 11703 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) 11704 { 11705 struct lpfc_iocbq *iocb; 11706 unsigned long iflags; 11707 int ret; 11708 11709 repeat: 11710 iocb = NULL; 11711 spin_lock_irqsave(&phba->hbalock, iflags); 11712 /* Post any pending iocb to the SLI layer */ 11713 if (atomic_read(&phba->fabric_iocb_count) == 0) { 11714 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), 11715 list); 11716 if (iocb) 11717 /* Increment fabric iocb count to hold the position */ 11718 atomic_inc(&phba->fabric_iocb_count); 11719 } 11720 spin_unlock_irqrestore(&phba->hbalock, iflags); 11721 if (iocb) { 11722 iocb->fabric_cmd_cmpl = iocb->cmd_cmpl; 11723 iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb; 11724 iocb->cmd_flag |= LPFC_IO_FABRIC; 11725 11726 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 11727 "Fabric sched1: ste:x%x", 11728 iocb->vport->port_state, 0, 0); 11729 11730 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 11731 11732 if (ret == IOCB_ERROR) { 11733 iocb->cmd_cmpl = iocb->fabric_cmd_cmpl; 11734 iocb->fabric_cmd_cmpl = NULL; 11735 iocb->cmd_flag &= ~LPFC_IO_FABRIC; 11736 set_job_ulpstatus(iocb, IOSTAT_LOCAL_REJECT); 11737 iocb->wcqe_cmpl.parameter = IOERR_SLI_ABORTED; 11738 iocb->cmd_cmpl(phba, iocb, iocb); 11739 11740 atomic_dec(&phba->fabric_iocb_count); 11741 goto repeat; 11742 } 11743 } 11744 } 11745 11746 /** 11747 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command 11748 * @phba: pointer to lpfc hba data structure. 11749 * 11750 * This routine unblocks the issuing fabric iocb command. The function 11751 * will clear the fabric iocb block bit and then invoke the routine 11752 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb 11753 * from the driver internal fabric iocb list. 11754 **/ 11755 void 11756 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) 11757 { 11758 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11759 11760 lpfc_resume_fabric_iocbs(phba); 11761 return; 11762 } 11763 11764 /** 11765 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command 11766 * @phba: pointer to lpfc hba data structure. 11767 * 11768 * This routine blocks the issuing fabric iocb for a specified amount of 11769 * time (currently 100 ms). This is done by set the fabric iocb block bit 11770 * and set up a timeout timer for 100ms. When the block bit is set, no more 11771 * fabric iocb will be issued out of the HBA. 11772 **/ 11773 static void 11774 lpfc_block_fabric_iocbs(struct lpfc_hba *phba) 11775 { 11776 int blocked; 11777 11778 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11779 /* Start a timer to unblock fabric iocbs after 100ms */ 11780 if (!blocked) 11781 mod_timer(&phba->fabric_block_timer, 11782 jiffies + msecs_to_jiffies(100)); 11783 11784 return; 11785 } 11786 11787 /** 11788 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb 11789 * @phba: pointer to lpfc hba data structure. 11790 * @cmdiocb: pointer to lpfc command iocb data structure. 11791 * @rspiocb: pointer to lpfc response iocb data structure. 11792 * 11793 * This routine is the callback function that is put to the fabric iocb's 11794 * callback function pointer (iocb->cmd_cmpl). The original iocb's callback 11795 * function pointer has been stored in iocb->fabric_cmd_cmpl. This callback 11796 * function first restores and invokes the original iocb's callback function 11797 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next 11798 * fabric bound iocb from the driver internal fabric iocb list onto the wire. 11799 **/ 11800 static void 11801 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11802 struct lpfc_iocbq *rspiocb) 11803 { 11804 struct ls_rjt stat; 11805 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 11806 u32 ulp_word4 = get_job_word4(phba, rspiocb); 11807 11808 WARN_ON((cmdiocb->cmd_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC); 11809 11810 switch (ulp_status) { 11811 case IOSTAT_NPORT_RJT: 11812 case IOSTAT_FABRIC_RJT: 11813 if (ulp_word4 & RJT_UNAVAIL_TEMP) 11814 lpfc_block_fabric_iocbs(phba); 11815 break; 11816 11817 case IOSTAT_NPORT_BSY: 11818 case IOSTAT_FABRIC_BSY: 11819 lpfc_block_fabric_iocbs(phba); 11820 break; 11821 11822 case IOSTAT_LS_RJT: 11823 stat.un.ls_rjt_error_be = 11824 cpu_to_be32(ulp_word4); 11825 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) || 11826 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY)) 11827 lpfc_block_fabric_iocbs(phba); 11828 break; 11829 } 11830 11831 BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0); 11832 11833 cmdiocb->cmd_cmpl = cmdiocb->fabric_cmd_cmpl; 11834 cmdiocb->fabric_cmd_cmpl = NULL; 11835 cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC; 11836 cmdiocb->cmd_cmpl(phba, cmdiocb, rspiocb); 11837 11838 atomic_dec(&phba->fabric_iocb_count); 11839 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { 11840 /* Post any pending iocbs to HBA */ 11841 lpfc_resume_fabric_iocbs(phba); 11842 } 11843 } 11844 11845 /** 11846 * lpfc_issue_fabric_iocb - Issue a fabric iocb command 11847 * @phba: pointer to lpfc hba data structure. 11848 * @iocb: pointer to lpfc command iocb data structure. 11849 * 11850 * This routine is used as the top-level API for issuing a fabric iocb command 11851 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver 11852 * function makes sure that only one fabric bound iocb will be outstanding at 11853 * any given time. As such, this function will first check to see whether there 11854 * is already an outstanding fabric iocb on the wire. If so, it will put the 11855 * newly issued iocb onto the driver internal fabric iocb list, waiting to be 11856 * issued later. Otherwise, it will issue the iocb on the wire and update the 11857 * fabric iocb count it indicate that there is one fabric iocb on the wire. 11858 * 11859 * Note, this implementation has a potential sending out fabric IOCBs out of 11860 * order. The problem is caused by the construction of the "ready" boolen does 11861 * not include the condition that the internal fabric IOCB list is empty. As 11862 * such, it is possible a fabric IOCB issued by this routine might be "jump" 11863 * ahead of the fabric IOCBs in the internal list. 11864 * 11865 * Return code 11866 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully 11867 * IOCB_ERROR - failed to issue fabric iocb 11868 **/ 11869 static int 11870 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 11871 { 11872 unsigned long iflags; 11873 int ready; 11874 int ret; 11875 11876 BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1); 11877 11878 spin_lock_irqsave(&phba->hbalock, iflags); 11879 ready = atomic_read(&phba->fabric_iocb_count) == 0 && 11880 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11881 11882 if (ready) 11883 /* Increment fabric iocb count to hold the position */ 11884 atomic_inc(&phba->fabric_iocb_count); 11885 spin_unlock_irqrestore(&phba->hbalock, iflags); 11886 if (ready) { 11887 iocb->fabric_cmd_cmpl = iocb->cmd_cmpl; 11888 iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb; 11889 iocb->cmd_flag |= LPFC_IO_FABRIC; 11890 11891 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 11892 "Fabric sched2: ste:x%x", 11893 iocb->vport->port_state, 0, 0); 11894 11895 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 11896 11897 if (ret == IOCB_ERROR) { 11898 iocb->cmd_cmpl = iocb->fabric_cmd_cmpl; 11899 iocb->fabric_cmd_cmpl = NULL; 11900 iocb->cmd_flag &= ~LPFC_IO_FABRIC; 11901 atomic_dec(&phba->fabric_iocb_count); 11902 } 11903 } else { 11904 spin_lock_irqsave(&phba->hbalock, iflags); 11905 list_add_tail(&iocb->list, &phba->fabric_iocb_list); 11906 spin_unlock_irqrestore(&phba->hbalock, iflags); 11907 ret = IOCB_SUCCESS; 11908 } 11909 return ret; 11910 } 11911 11912 /** 11913 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list 11914 * @vport: pointer to a virtual N_Port data structure. 11915 * 11916 * This routine aborts all the IOCBs associated with a @vport from the 11917 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 11918 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 11919 * list, removes each IOCB associated with the @vport off the list, set the 11920 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 11921 * associated with the IOCB. 11922 **/ 11923 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) 11924 { 11925 LIST_HEAD(completions); 11926 struct lpfc_hba *phba = vport->phba; 11927 struct lpfc_iocbq *tmp_iocb, *piocb; 11928 11929 spin_lock_irq(&phba->hbalock); 11930 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 11931 list) { 11932 11933 if (piocb->vport != vport) 11934 continue; 11935 11936 list_move_tail(&piocb->list, &completions); 11937 } 11938 spin_unlock_irq(&phba->hbalock); 11939 11940 /* Cancel all the IOCBs from the completions list */ 11941 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11942 IOERR_SLI_ABORTED); 11943 } 11944 11945 /** 11946 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list 11947 * @ndlp: pointer to a node-list data structure. 11948 * 11949 * This routine aborts all the IOCBs associated with an @ndlp from the 11950 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 11951 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 11952 * list, removes each IOCB associated with the @ndlp off the list, set the 11953 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 11954 * associated with the IOCB. 11955 **/ 11956 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) 11957 { 11958 LIST_HEAD(completions); 11959 struct lpfc_hba *phba = ndlp->phba; 11960 struct lpfc_iocbq *tmp_iocb, *piocb; 11961 struct lpfc_sli_ring *pring; 11962 11963 pring = lpfc_phba_elsring(phba); 11964 11965 if (unlikely(!pring)) 11966 return; 11967 11968 spin_lock_irq(&phba->hbalock); 11969 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 11970 list) { 11971 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) { 11972 11973 list_move_tail(&piocb->list, &completions); 11974 } 11975 } 11976 spin_unlock_irq(&phba->hbalock); 11977 11978 /* Cancel all the IOCBs from the completions list */ 11979 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11980 IOERR_SLI_ABORTED); 11981 } 11982 11983 /** 11984 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list 11985 * @phba: pointer to lpfc hba data structure. 11986 * 11987 * This routine aborts all the IOCBs currently on the driver internal 11988 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS 11989 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB 11990 * list, removes IOCBs off the list, set the status field to 11991 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with 11992 * the IOCB. 11993 **/ 11994 void lpfc_fabric_abort_hba(struct lpfc_hba *phba) 11995 { 11996 LIST_HEAD(completions); 11997 11998 spin_lock_irq(&phba->hbalock); 11999 list_splice_init(&phba->fabric_iocb_list, &completions); 12000 spin_unlock_irq(&phba->hbalock); 12001 12002 /* Cancel all the IOCBs from the completions list */ 12003 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 12004 IOERR_SLI_ABORTED); 12005 } 12006 12007 /** 12008 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport 12009 * @vport: pointer to lpfc vport data structure. 12010 * 12011 * This routine is invoked by the vport cleanup for deletions and the cleanup 12012 * for an ndlp on removal. 12013 **/ 12014 void 12015 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) 12016 { 12017 struct lpfc_hba *phba = vport->phba; 12018 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 12019 struct lpfc_nodelist *ndlp = NULL; 12020 unsigned long iflag = 0; 12021 12022 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 12023 list_for_each_entry_safe(sglq_entry, sglq_next, 12024 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 12025 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) { 12026 lpfc_nlp_put(sglq_entry->ndlp); 12027 ndlp = sglq_entry->ndlp; 12028 sglq_entry->ndlp = NULL; 12029 12030 /* If the xri on the abts_els_sgl list is for the Fport 12031 * node and the vport is unloading, the xri aborted wcqe 12032 * likely isn't coming back. Just release the sgl. 12033 */ 12034 if ((vport->load_flag & FC_UNLOADING) && 12035 ndlp->nlp_DID == Fabric_DID) { 12036 list_del(&sglq_entry->list); 12037 sglq_entry->state = SGL_FREED; 12038 list_add_tail(&sglq_entry->list, 12039 &phba->sli4_hba.lpfc_els_sgl_list); 12040 } 12041 } 12042 } 12043 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 12044 return; 12045 } 12046 12047 /** 12048 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort 12049 * @phba: pointer to lpfc hba data structure. 12050 * @axri: pointer to the els xri abort wcqe structure. 12051 * 12052 * This routine is invoked by the worker thread to process a SLI4 slow-path 12053 * ELS aborted xri. 12054 **/ 12055 void 12056 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, 12057 struct sli4_wcqe_xri_aborted *axri) 12058 { 12059 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 12060 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 12061 uint16_t lxri = 0; 12062 12063 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 12064 unsigned long iflag = 0; 12065 struct lpfc_nodelist *ndlp; 12066 struct lpfc_sli_ring *pring; 12067 12068 pring = lpfc_phba_elsring(phba); 12069 12070 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 12071 list_for_each_entry_safe(sglq_entry, sglq_next, 12072 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 12073 if (sglq_entry->sli4_xritag == xri) { 12074 list_del(&sglq_entry->list); 12075 ndlp = sglq_entry->ndlp; 12076 sglq_entry->ndlp = NULL; 12077 list_add_tail(&sglq_entry->list, 12078 &phba->sli4_hba.lpfc_els_sgl_list); 12079 sglq_entry->state = SGL_FREED; 12080 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, 12081 iflag); 12082 12083 if (ndlp) { 12084 lpfc_set_rrq_active(phba, ndlp, 12085 sglq_entry->sli4_lxritag, 12086 rxid, 1); 12087 lpfc_nlp_put(ndlp); 12088 } 12089 12090 /* Check if TXQ queue needs to be serviced */ 12091 if (pring && !list_empty(&pring->txq)) 12092 lpfc_worker_wake_up(phba); 12093 return; 12094 } 12095 } 12096 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 12097 lxri = lpfc_sli4_xri_inrange(phba, xri); 12098 if (lxri == NO_XRI) 12099 return; 12100 12101 spin_lock_irqsave(&phba->hbalock, iflag); 12102 sglq_entry = __lpfc_get_active_sglq(phba, lxri); 12103 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { 12104 spin_unlock_irqrestore(&phba->hbalock, iflag); 12105 return; 12106 } 12107 sglq_entry->state = SGL_XRI_ABORTED; 12108 spin_unlock_irqrestore(&phba->hbalock, iflag); 12109 return; 12110 } 12111 12112 /* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req. 12113 * @vport: pointer to virtual port object. 12114 * @ndlp: nodelist pointer for the impacted node. 12115 * 12116 * The driver calls this routine in response to an SLI4 XRI ABORT CQE 12117 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event, 12118 * the driver is required to send a LOGO to the remote node before it 12119 * attempts to recover its login to the remote node. 12120 */ 12121 void 12122 lpfc_sli_abts_recover_port(struct lpfc_vport *vport, 12123 struct lpfc_nodelist *ndlp) 12124 { 12125 struct Scsi_Host *shost; 12126 struct lpfc_hba *phba; 12127 unsigned long flags = 0; 12128 12129 shost = lpfc_shost_from_vport(vport); 12130 phba = vport->phba; 12131 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { 12132 lpfc_printf_log(phba, KERN_INFO, 12133 LOG_SLI, "3093 No rport recovery needed. " 12134 "rport in state 0x%x\n", ndlp->nlp_state); 12135 return; 12136 } 12137 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12138 "3094 Start rport recovery on shost id 0x%x " 12139 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " 12140 "flags 0x%x\n", 12141 shost->host_no, ndlp->nlp_DID, 12142 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, 12143 ndlp->nlp_flag); 12144 /* 12145 * The rport is not responding. Remove the FCP-2 flag to prevent 12146 * an ADISC in the follow-up recovery code. 12147 */ 12148 spin_lock_irqsave(&ndlp->lock, flags); 12149 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 12150 ndlp->nlp_flag |= NLP_ISSUE_LOGO; 12151 spin_unlock_irqrestore(&ndlp->lock, flags); 12152 lpfc_unreg_rpi(vport, ndlp); 12153 } 12154 12155 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport) 12156 { 12157 bitmap_zero(vport->vmid_priority_range, LPFC_VMID_MAX_PRIORITY_RANGE); 12158 } 12159 12160 static void 12161 lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max) 12162 { 12163 u32 i; 12164 12165 if ((min > max) || (max > LPFC_VMID_MAX_PRIORITY_RANGE)) 12166 return; 12167 12168 for (i = min; i <= max; i++) 12169 set_bit(i, vport->vmid_priority_range); 12170 } 12171 12172 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid) 12173 { 12174 set_bit(ctcl_vmid, vport->vmid_priority_range); 12175 } 12176 12177 u32 lpfc_vmid_get_cs_ctl(struct lpfc_vport *vport) 12178 { 12179 u32 i; 12180 12181 i = find_first_bit(vport->vmid_priority_range, 12182 LPFC_VMID_MAX_PRIORITY_RANGE); 12183 12184 if (i == LPFC_VMID_MAX_PRIORITY_RANGE) 12185 return 0; 12186 12187 clear_bit(i, vport->vmid_priority_range); 12188 return i; 12189 } 12190 12191 #define MAX_PRIORITY_DESC 255 12192 12193 static void 12194 lpfc_cmpl_els_qfpa(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 12195 struct lpfc_iocbq *rspiocb) 12196 { 12197 struct lpfc_vport *vport = cmdiocb->vport; 12198 struct priority_range_desc *desc; 12199 struct lpfc_dmabuf *prsp = NULL; 12200 struct lpfc_vmid_priority_range *vmid_range = NULL; 12201 u32 *data; 12202 struct lpfc_dmabuf *dmabuf = cmdiocb->cmd_dmabuf; 12203 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 12204 u32 ulp_word4 = get_job_word4(phba, rspiocb); 12205 u8 *pcmd, max_desc; 12206 u32 len, i; 12207 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 12208 12209 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); 12210 if (!prsp) 12211 goto out; 12212 12213 pcmd = prsp->virt; 12214 data = (u32 *)pcmd; 12215 if (data[0] == ELS_CMD_LS_RJT) { 12216 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12217 "3277 QFPA LS_RJT x%x x%x\n", 12218 data[0], data[1]); 12219 goto out; 12220 } 12221 if (ulp_status) { 12222 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 12223 "6529 QFPA failed with status x%x x%x\n", 12224 ulp_status, ulp_word4); 12225 goto out; 12226 } 12227 12228 if (!vport->qfpa_res) { 12229 max_desc = FCELSSIZE / sizeof(*vport->qfpa_res); 12230 vport->qfpa_res = kcalloc(max_desc, sizeof(*vport->qfpa_res), 12231 GFP_KERNEL); 12232 if (!vport->qfpa_res) 12233 goto out; 12234 } 12235 12236 len = *((u32 *)(pcmd + 4)); 12237 len = be32_to_cpu(len); 12238 memcpy(vport->qfpa_res, pcmd, len + 8); 12239 len = len / LPFC_PRIORITY_RANGE_DESC_SIZE; 12240 12241 desc = (struct priority_range_desc *)(pcmd + 8); 12242 vmid_range = vport->vmid_priority.vmid_range; 12243 if (!vmid_range) { 12244 vmid_range = kcalloc(MAX_PRIORITY_DESC, sizeof(*vmid_range), 12245 GFP_KERNEL); 12246 if (!vmid_range) { 12247 kfree(vport->qfpa_res); 12248 goto out; 12249 } 12250 vport->vmid_priority.vmid_range = vmid_range; 12251 } 12252 vport->vmid_priority.num_descriptors = len; 12253 12254 for (i = 0; i < len; i++, vmid_range++, desc++) { 12255 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, 12256 "6539 vmid values low=%d, high=%d, qos=%d, " 12257 "local ve id=%d\n", desc->lo_range, 12258 desc->hi_range, desc->qos_priority, 12259 desc->local_ve_id); 12260 12261 vmid_range->low = desc->lo_range << 1; 12262 if (desc->local_ve_id == QFPA_ODD_ONLY) 12263 vmid_range->low++; 12264 if (desc->qos_priority) 12265 vport->vmid_flag |= LPFC_VMID_QOS_ENABLED; 12266 vmid_range->qos = desc->qos_priority; 12267 12268 vmid_range->high = desc->hi_range << 1; 12269 if ((desc->local_ve_id == QFPA_ODD_ONLY) || 12270 (desc->local_ve_id == QFPA_EVEN_ODD)) 12271 vmid_range->high++; 12272 } 12273 lpfc_init_cs_ctl_bitmap(vport); 12274 for (i = 0; i < vport->vmid_priority.num_descriptors; i++) { 12275 lpfc_vmid_set_cs_ctl_range(vport, 12276 vport->vmid_priority.vmid_range[i].low, 12277 vport->vmid_priority.vmid_range[i].high); 12278 } 12279 12280 vport->vmid_flag |= LPFC_VMID_QFPA_CMPL; 12281 out: 12282 lpfc_els_free_iocb(phba, cmdiocb); 12283 lpfc_nlp_put(ndlp); 12284 } 12285 12286 int lpfc_issue_els_qfpa(struct lpfc_vport *vport) 12287 { 12288 struct lpfc_hba *phba = vport->phba; 12289 struct lpfc_nodelist *ndlp; 12290 struct lpfc_iocbq *elsiocb; 12291 u8 *pcmd; 12292 int ret; 12293 12294 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 12295 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12296 return -ENXIO; 12297 12298 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_QFPA_SIZE, 2, ndlp, 12299 ndlp->nlp_DID, ELS_CMD_QFPA); 12300 if (!elsiocb) 12301 return -ENOMEM; 12302 12303 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 12304 12305 *((u32 *)(pcmd)) = ELS_CMD_QFPA; 12306 pcmd += 4; 12307 12308 elsiocb->cmd_cmpl = lpfc_cmpl_els_qfpa; 12309 12310 elsiocb->ndlp = lpfc_nlp_get(ndlp); 12311 if (!elsiocb->ndlp) { 12312 lpfc_els_free_iocb(vport->phba, elsiocb); 12313 return -ENXIO; 12314 } 12315 12316 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 2); 12317 if (ret != IOCB_SUCCESS) { 12318 lpfc_els_free_iocb(phba, elsiocb); 12319 lpfc_nlp_put(ndlp); 12320 return -EIO; 12321 } 12322 vport->vmid_flag &= ~LPFC_VMID_QOS_ENABLED; 12323 return 0; 12324 } 12325 12326 int 12327 lpfc_vmid_uvem(struct lpfc_vport *vport, 12328 struct lpfc_vmid *vmid, bool instantiated) 12329 { 12330 struct lpfc_vem_id_desc *vem_id_desc; 12331 struct lpfc_nodelist *ndlp; 12332 struct lpfc_iocbq *elsiocb; 12333 struct instantiated_ve_desc *inst_desc; 12334 struct lpfc_vmid_context *vmid_context; 12335 u8 *pcmd; 12336 u32 *len; 12337 int ret = 0; 12338 12339 ndlp = lpfc_findnode_did(vport, Fabric_DID); 12340 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12341 return -ENXIO; 12342 12343 vmid_context = kmalloc(sizeof(*vmid_context), GFP_KERNEL); 12344 if (!vmid_context) 12345 return -ENOMEM; 12346 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_UVEM_SIZE, 2, 12347 ndlp, Fabric_DID, ELS_CMD_UVEM); 12348 if (!elsiocb) 12349 goto out; 12350 12351 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, 12352 "3427 Host vmid %s %d\n", 12353 vmid->host_vmid, instantiated); 12354 vmid_context->vmp = vmid; 12355 vmid_context->nlp = ndlp; 12356 vmid_context->instantiated = instantiated; 12357 elsiocb->vmid_tag.vmid_context = vmid_context; 12358 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 12359 12360 if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid)) 12361 memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid, 12362 LPFC_COMPRESS_VMID_SIZE); 12363 12364 *((u32 *)(pcmd)) = ELS_CMD_UVEM; 12365 len = (u32 *)(pcmd + 4); 12366 *len = cpu_to_be32(LPFC_UVEM_SIZE - 8); 12367 12368 vem_id_desc = (struct lpfc_vem_id_desc *)(pcmd + 8); 12369 vem_id_desc->tag = be32_to_cpu(VEM_ID_DESC_TAG); 12370 vem_id_desc->length = be32_to_cpu(LPFC_UVEM_VEM_ID_DESC_SIZE); 12371 memcpy(vem_id_desc->vem_id, vport->lpfc_vmid_host_uuid, 12372 LPFC_COMPRESS_VMID_SIZE); 12373 12374 inst_desc = (struct instantiated_ve_desc *)(pcmd + 32); 12375 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); 12376 inst_desc->length = be32_to_cpu(LPFC_UVEM_VE_MAP_DESC_SIZE); 12377 memcpy(inst_desc->global_vem_id, vmid->host_vmid, 12378 LPFC_COMPRESS_VMID_SIZE); 12379 12380 bf_set(lpfc_instantiated_nport_id, inst_desc, vport->fc_myDID); 12381 bf_set(lpfc_instantiated_local_id, inst_desc, 12382 vmid->un.cs_ctl_vmid); 12383 if (instantiated) { 12384 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); 12385 } else { 12386 inst_desc->tag = be32_to_cpu(DEINSTANTIATED_VE_DESC_TAG); 12387 lpfc_vmid_put_cs_ctl(vport, vmid->un.cs_ctl_vmid); 12388 } 12389 inst_desc->word6 = cpu_to_be32(inst_desc->word6); 12390 12391 elsiocb->cmd_cmpl = lpfc_cmpl_els_uvem; 12392 12393 elsiocb->ndlp = lpfc_nlp_get(ndlp); 12394 if (!elsiocb->ndlp) { 12395 lpfc_els_free_iocb(vport->phba, elsiocb); 12396 goto out; 12397 } 12398 12399 ret = lpfc_sli_issue_iocb(vport->phba, LPFC_ELS_RING, elsiocb, 0); 12400 if (ret != IOCB_SUCCESS) { 12401 lpfc_els_free_iocb(vport->phba, elsiocb); 12402 lpfc_nlp_put(ndlp); 12403 goto out; 12404 } 12405 12406 return 0; 12407 out: 12408 kfree(vmid_context); 12409 return -EIO; 12410 } 12411 12412 static void 12413 lpfc_cmpl_els_uvem(struct lpfc_hba *phba, struct lpfc_iocbq *icmdiocb, 12414 struct lpfc_iocbq *rspiocb) 12415 { 12416 struct lpfc_vport *vport = icmdiocb->vport; 12417 struct lpfc_dmabuf *prsp = NULL; 12418 struct lpfc_vmid_context *vmid_context = 12419 icmdiocb->vmid_tag.vmid_context; 12420 struct lpfc_nodelist *ndlp = icmdiocb->ndlp; 12421 u8 *pcmd; 12422 u32 *data; 12423 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 12424 u32 ulp_word4 = get_job_word4(phba, rspiocb); 12425 struct lpfc_dmabuf *dmabuf = icmdiocb->cmd_dmabuf; 12426 struct lpfc_vmid *vmid; 12427 12428 vmid = vmid_context->vmp; 12429 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12430 ndlp = NULL; 12431 12432 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); 12433 if (!prsp) 12434 goto out; 12435 pcmd = prsp->virt; 12436 data = (u32 *)pcmd; 12437 if (data[0] == ELS_CMD_LS_RJT) { 12438 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12439 "4532 UVEM LS_RJT %x %x\n", data[0], data[1]); 12440 goto out; 12441 } 12442 if (ulp_status) { 12443 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12444 "4533 UVEM error status %x: %x\n", 12445 ulp_status, ulp_word4); 12446 goto out; 12447 } 12448 spin_lock(&phba->hbalock); 12449 /* Set IN USE flag */ 12450 vport->vmid_flag |= LPFC_VMID_IN_USE; 12451 phba->pport->vmid_flag |= LPFC_VMID_IN_USE; 12452 spin_unlock(&phba->hbalock); 12453 12454 if (vmid_context->instantiated) { 12455 write_lock(&vport->vmid_lock); 12456 vmid->flag |= LPFC_VMID_REGISTERED; 12457 vmid->flag &= ~LPFC_VMID_REQ_REGISTER; 12458 write_unlock(&vport->vmid_lock); 12459 } 12460 12461 out: 12462 kfree(vmid_context); 12463 lpfc_els_free_iocb(phba, icmdiocb); 12464 lpfc_nlp_put(ndlp); 12465 } 12466