1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 /* See Fibre Channel protocol T11 FC-LS for details */ 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/slab.h> 27 #include <linux/interrupt.h> 28 #include <linux/delay.h> 29 30 #include <scsi/scsi.h> 31 #include <scsi/scsi_device.h> 32 #include <scsi/scsi_host.h> 33 #include <scsi/scsi_transport_fc.h> 34 #include <uapi/scsi/fc/fc_fs.h> 35 #include <uapi/scsi/fc/fc_els.h> 36 37 #include "lpfc_hw4.h" 38 #include "lpfc_hw.h" 39 #include "lpfc_sli.h" 40 #include "lpfc_sli4.h" 41 #include "lpfc_nl.h" 42 #include "lpfc_disc.h" 43 #include "lpfc_scsi.h" 44 #include "lpfc.h" 45 #include "lpfc_logmsg.h" 46 #include "lpfc_crtn.h" 47 #include "lpfc_vport.h" 48 #include "lpfc_debugfs.h" 49 50 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, 51 struct lpfc_iocbq *); 52 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, 53 struct lpfc_iocbq *); 54 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport); 55 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, 56 struct lpfc_nodelist *ndlp, uint8_t retry); 57 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, 58 struct lpfc_iocbq *iocb); 59 static void lpfc_cmpl_els_edc(struct lpfc_hba *phba, 60 struct lpfc_iocbq *cmdiocb, 61 struct lpfc_iocbq *rspiocb); 62 static void lpfc_cmpl_els_uvem(struct lpfc_hba *, struct lpfc_iocbq *, 63 struct lpfc_iocbq *); 64 65 static int lpfc_max_els_tries = 3; 66 67 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport); 68 static void lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max); 69 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid); 70 71 /** 72 * lpfc_els_chk_latt - Check host link attention event for a vport 73 * @vport: pointer to a host virtual N_Port data structure. 74 * 75 * This routine checks whether there is an outstanding host link 76 * attention event during the discovery process with the @vport. It is done 77 * by reading the HBA's Host Attention (HA) register. If there is any host 78 * link attention events during this @vport's discovery process, the @vport 79 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall 80 * be issued if the link state is not already in host link cleared state, 81 * and a return code shall indicate whether the host link attention event 82 * had happened. 83 * 84 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport 85 * state in LPFC_VPORT_READY, the request for checking host link attention 86 * event will be ignored and a return code shall indicate no host link 87 * attention event had happened. 88 * 89 * Return codes 90 * 0 - no host link attention event happened 91 * 1 - host link attention event happened 92 **/ 93 int 94 lpfc_els_chk_latt(struct lpfc_vport *vport) 95 { 96 struct lpfc_hba *phba = vport->phba; 97 uint32_t ha_copy; 98 99 if (vport->port_state >= LPFC_VPORT_READY || 100 phba->link_state == LPFC_LINK_DOWN || 101 phba->sli_rev > LPFC_SLI_REV3) 102 return 0; 103 104 /* Read the HBA Host Attention Register */ 105 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 106 return 1; 107 108 if (!(ha_copy & HA_LATT)) 109 return 0; 110 111 /* Pending Link Event during Discovery */ 112 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 113 "0237 Pending Link Event during " 114 "Discovery: State x%x\n", 115 phba->pport->port_state); 116 117 /* CLEAR_LA should re-enable link attention events and 118 * we should then immediately take a LATT event. The 119 * LATT processing should call lpfc_linkdown() which 120 * will cleanup any left over in-progress discovery 121 * events. 122 */ 123 set_bit(FC_ABORT_DISCOVERY, &vport->fc_flag); 124 125 if (phba->link_state != LPFC_CLEAR_LA) 126 lpfc_issue_clear_la(phba, vport); 127 128 return 1; 129 } 130 131 static bool lpfc_is_els_acc_rsp(struct lpfc_dmabuf *buf) 132 { 133 struct fc_els_ls_acc *rsp = buf->virt; 134 135 if (rsp && rsp->la_cmd == ELS_LS_ACC) 136 return true; 137 return false; 138 } 139 140 /** 141 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure 142 * @vport: pointer to a host virtual N_Port data structure. 143 * @expect_rsp: flag indicating whether response is expected. 144 * @cmd_size: size of the ELS command. 145 * @retry: number of retries to the command when it fails. 146 * @ndlp: pointer to a node-list data structure. 147 * @did: destination identifier. 148 * @elscmd: the ELS command code. 149 * 150 * This routine is used for allocating a lpfc-IOCB data structure from 151 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters 152 * passed into the routine for discovery state machine to issue an Extended 153 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation 154 * and preparation routine that is used by all the discovery state machine 155 * routines and the ELS command-specific fields will be later set up by 156 * the individual discovery machine routines after calling this routine 157 * allocating and preparing a generic IOCB data structure. It fills in the 158 * Buffer Descriptor Entries (BDEs), allocates buffers for both command 159 * payload and response payload (if expected). The reference count on the 160 * ndlp is incremented by 1 and the reference to the ndlp is put into 161 * ndlp of the IOCB data structure for this IOCB to hold the ndlp 162 * reference for the command's callback function to access later. 163 * 164 * Return code 165 * Pointer to the newly allocated/prepared els iocb data structure 166 * NULL - when els iocb data structure allocation/preparation failed 167 **/ 168 struct lpfc_iocbq * 169 lpfc_prep_els_iocb(struct lpfc_vport *vport, u8 expect_rsp, 170 u16 cmd_size, u8 retry, 171 struct lpfc_nodelist *ndlp, u32 did, 172 u32 elscmd) 173 { 174 struct lpfc_hba *phba = vport->phba; 175 struct lpfc_iocbq *elsiocb; 176 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist, *bmp; 177 struct ulp_bde64_le *bpl; 178 u32 timeout = 0; 179 180 if (!lpfc_is_link_up(phba)) 181 return NULL; 182 183 /* Allocate buffer for command iocb */ 184 elsiocb = lpfc_sli_get_iocbq(phba); 185 if (!elsiocb) 186 return NULL; 187 188 /* 189 * If this command is for fabric controller and HBA running 190 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. 191 */ 192 if (did == Fabric_DID && 193 test_bit(HBA_FIP_SUPPORT, &phba->hba_flag) && 194 (elscmd == ELS_CMD_FLOGI || 195 elscmd == ELS_CMD_FDISC || 196 elscmd == ELS_CMD_LOGO)) 197 switch (elscmd) { 198 case ELS_CMD_FLOGI: 199 elsiocb->cmd_flag |= 200 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) 201 & LPFC_FIP_ELS_ID_MASK); 202 break; 203 case ELS_CMD_FDISC: 204 elsiocb->cmd_flag |= 205 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) 206 & LPFC_FIP_ELS_ID_MASK); 207 break; 208 case ELS_CMD_LOGO: 209 elsiocb->cmd_flag |= 210 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) 211 & LPFC_FIP_ELS_ID_MASK); 212 break; 213 } 214 else 215 elsiocb->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK; 216 217 /* fill in BDEs for command */ 218 /* Allocate buffer for command payload */ 219 pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL); 220 if (pcmd) 221 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); 222 if (!pcmd || !pcmd->virt) 223 goto els_iocb_free_pcmb_exit; 224 225 INIT_LIST_HEAD(&pcmd->list); 226 227 /* Allocate buffer for response payload */ 228 if (expect_rsp) { 229 prsp = kmalloc(sizeof(*prsp), GFP_KERNEL); 230 if (prsp) 231 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 232 &prsp->phys); 233 if (!prsp || !prsp->virt) 234 goto els_iocb_free_prsp_exit; 235 INIT_LIST_HEAD(&prsp->list); 236 } else { 237 prsp = NULL; 238 } 239 240 /* Allocate buffer for Buffer ptr list */ 241 pbuflist = kmalloc(sizeof(*pbuflist), GFP_KERNEL); 242 if (pbuflist) 243 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 244 &pbuflist->phys); 245 if (!pbuflist || !pbuflist->virt) 246 goto els_iocb_free_pbuf_exit; 247 248 INIT_LIST_HEAD(&pbuflist->list); 249 250 if (expect_rsp) { 251 switch (elscmd) { 252 case ELS_CMD_FLOGI: 253 timeout = FF_DEF_RATOV * 2; 254 break; 255 case ELS_CMD_LOGO: 256 timeout = phba->fc_ratov; 257 break; 258 default: 259 timeout = phba->fc_ratov * 2; 260 } 261 262 /* Fill SGE for the num bde count */ 263 elsiocb->num_bdes = 2; 264 } 265 266 if (phba->sli_rev == LPFC_SLI_REV4) 267 bmp = pcmd; 268 else 269 bmp = pbuflist; 270 271 lpfc_sli_prep_els_req_rsp(phba, elsiocb, vport, bmp, cmd_size, did, 272 elscmd, timeout, expect_rsp); 273 274 bpl = (struct ulp_bde64_le *)pbuflist->virt; 275 bpl->addr_low = cpu_to_le32(putPaddrLow(pcmd->phys)); 276 bpl->addr_high = cpu_to_le32(putPaddrHigh(pcmd->phys)); 277 bpl->type_size = cpu_to_le32(cmd_size); 278 bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); 279 280 if (expect_rsp) { 281 bpl++; 282 bpl->addr_low = cpu_to_le32(putPaddrLow(prsp->phys)); 283 bpl->addr_high = cpu_to_le32(putPaddrHigh(prsp->phys)); 284 bpl->type_size = cpu_to_le32(FCELSSIZE); 285 bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); 286 } 287 288 elsiocb->cmd_dmabuf = pcmd; 289 elsiocb->bpl_dmabuf = pbuflist; 290 elsiocb->retry = retry; 291 elsiocb->vport = vport; 292 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; 293 294 if (prsp) 295 list_add(&prsp->list, &pcmd->list); 296 if (expect_rsp) { 297 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 298 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 299 "0116 Xmit ELS command x%x to remote " 300 "NPORT x%x I/O tag: x%x, port state:x%x " 301 "rpi x%x fc_flag:x%lx\n", 302 elscmd, did, elsiocb->iotag, 303 vport->port_state, ndlp->nlp_rpi, 304 vport->fc_flag); 305 } else { 306 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 307 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 308 "0117 Xmit ELS response x%x to remote " 309 "NPORT x%x I/O tag: x%x, size: x%x " 310 "port_state x%x rpi x%x fc_flag x%lx\n", 311 elscmd, ndlp->nlp_DID, elsiocb->iotag, 312 cmd_size, vport->port_state, 313 ndlp->nlp_rpi, vport->fc_flag); 314 } 315 316 return elsiocb; 317 318 els_iocb_free_pbuf_exit: 319 if (expect_rsp) 320 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 321 kfree(pbuflist); 322 323 els_iocb_free_prsp_exit: 324 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 325 kfree(prsp); 326 327 els_iocb_free_pcmb_exit: 328 kfree(pcmd); 329 lpfc_sli_release_iocbq(phba, elsiocb); 330 return NULL; 331 } 332 333 /** 334 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport 335 * @vport: pointer to a host virtual N_Port data structure. 336 * 337 * This routine issues a fabric registration login for a @vport. An 338 * active ndlp node with Fabric_DID must already exist for this @vport. 339 * The routine invokes two mailbox commands to carry out fabric registration 340 * login through the HBA firmware: the first mailbox command requests the 341 * HBA to perform link configuration for the @vport; and the second mailbox 342 * command requests the HBA to perform the actual fabric registration login 343 * with the @vport. 344 * 345 * Return code 346 * 0 - successfully issued fabric registration login for @vport 347 * -ENXIO -- failed to issue fabric registration login for @vport 348 **/ 349 int 350 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 351 { 352 struct lpfc_hba *phba = vport->phba; 353 LPFC_MBOXQ_t *mbox; 354 struct lpfc_nodelist *ndlp; 355 struct serv_parm *sp; 356 int rc; 357 int err = 0; 358 359 sp = &phba->fc_fabparam; 360 ndlp = lpfc_findnode_did(vport, Fabric_DID); 361 if (!ndlp) { 362 err = 1; 363 goto fail; 364 } 365 366 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 367 if (!mbox) { 368 err = 2; 369 goto fail; 370 } 371 372 vport->port_state = LPFC_FABRIC_CFG_LINK; 373 lpfc_config_link(phba, mbox); 374 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 375 mbox->vport = vport; 376 377 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 378 if (rc == MBX_NOT_FINISHED) { 379 err = 3; 380 goto fail_free_mbox; 381 } 382 383 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 384 if (!mbox) { 385 err = 4; 386 goto fail; 387 } 388 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 389 ndlp->nlp_rpi); 390 if (rc) { 391 err = 5; 392 goto fail_free_mbox; 393 } 394 395 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 396 mbox->vport = vport; 397 /* increment the reference count on ndlp to hold reference 398 * for the callback routine. 399 */ 400 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 401 if (!mbox->ctx_ndlp) { 402 err = 6; 403 goto fail_free_mbox; 404 } 405 406 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 407 if (rc == MBX_NOT_FINISHED) { 408 err = 7; 409 goto fail_issue_reg_login; 410 } 411 412 return 0; 413 414 fail_issue_reg_login: 415 /* decrement the reference count on ndlp just incremented 416 * for the failed mbox command. 417 */ 418 lpfc_nlp_put(ndlp); 419 fail_free_mbox: 420 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 421 fail: 422 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 423 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 424 "0249 Cannot issue Register Fabric login: Err %d\n", 425 err); 426 return -ENXIO; 427 } 428 429 /** 430 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login 431 * @vport: pointer to a host virtual N_Port data structure. 432 * 433 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for 434 * the @vport. This mailbox command is necessary for SLI4 port only. 435 * 436 * Return code 437 * 0 - successfully issued REG_VFI for @vport 438 * A failure code otherwise. 439 **/ 440 int 441 lpfc_issue_reg_vfi(struct lpfc_vport *vport) 442 { 443 struct lpfc_hba *phba = vport->phba; 444 LPFC_MBOXQ_t *mboxq = NULL; 445 struct lpfc_nodelist *ndlp; 446 struct lpfc_dmabuf *dmabuf = NULL; 447 int rc = 0; 448 449 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */ 450 if ((phba->sli_rev == LPFC_SLI_REV4) && 451 !(phba->link_flag & LS_LOOPBACK_MODE) && 452 !test_bit(FC_PT2PT, &vport->fc_flag)) { 453 ndlp = lpfc_findnode_did(vport, Fabric_DID); 454 if (!ndlp) { 455 rc = -ENODEV; 456 goto fail; 457 } 458 } 459 460 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 461 if (!mboxq) { 462 rc = -ENOMEM; 463 goto fail; 464 } 465 466 /* Supply CSP's only if we are fabric connect or pt-to-pt connect */ 467 if (test_bit(FC_FABRIC, &vport->fc_flag) || 468 test_bit(FC_PT2PT, &vport->fc_flag)) { 469 rc = lpfc_mbox_rsrc_prep(phba, mboxq); 470 if (rc) { 471 rc = -ENOMEM; 472 goto fail_mbox; 473 } 474 dmabuf = mboxq->ctx_buf; 475 memcpy(dmabuf->virt, &phba->fc_fabparam, 476 sizeof(struct serv_parm)); 477 } 478 479 vport->port_state = LPFC_FABRIC_CFG_LINK; 480 if (dmabuf) { 481 lpfc_reg_vfi(mboxq, vport, dmabuf->phys); 482 /* lpfc_reg_vfi memsets the mailbox. Restore the ctx_buf. */ 483 mboxq->ctx_buf = dmabuf; 484 } else { 485 lpfc_reg_vfi(mboxq, vport, 0); 486 } 487 488 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; 489 mboxq->vport = vport; 490 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 491 if (rc == MBX_NOT_FINISHED) { 492 rc = -ENXIO; 493 goto fail_mbox; 494 } 495 return 0; 496 497 fail_mbox: 498 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED); 499 fail: 500 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 501 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 502 "0289 Issue Register VFI failed: Err %d\n", rc); 503 return rc; 504 } 505 506 /** 507 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login 508 * @vport: pointer to a host virtual N_Port data structure. 509 * 510 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for 511 * the @vport. This mailbox command is necessary for SLI4 port only. 512 * 513 * Return code 514 * 0 - successfully issued REG_VFI for @vport 515 * A failure code otherwise. 516 **/ 517 int 518 lpfc_issue_unreg_vfi(struct lpfc_vport *vport) 519 { 520 struct lpfc_hba *phba = vport->phba; 521 LPFC_MBOXQ_t *mboxq; 522 int rc; 523 524 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 525 if (!mboxq) { 526 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 527 "2556 UNREG_VFI mbox allocation failed" 528 "HBA state x%x\n", phba->pport->port_state); 529 return -ENOMEM; 530 } 531 532 lpfc_unreg_vfi(mboxq, vport); 533 mboxq->vport = vport; 534 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl; 535 536 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 537 if (rc == MBX_NOT_FINISHED) { 538 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 539 "2557 UNREG_VFI issue mbox failed rc x%x " 540 "HBA state x%x\n", 541 rc, phba->pport->port_state); 542 mempool_free(mboxq, phba->mbox_mem_pool); 543 return -EIO; 544 } 545 546 clear_bit(FC_VFI_REGISTERED, &vport->fc_flag); 547 return 0; 548 } 549 550 /** 551 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. 552 * @vport: pointer to a host virtual N_Port data structure. 553 * @sp: pointer to service parameter data structure. 554 * 555 * This routine is called from FLOGI/FDISC completion handler functions. 556 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric 557 * node nodename is changed in the completion service parameter else return 558 * 0. This function also set flag in the vport data structure to delay 559 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit 560 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric 561 * node nodename is changed in the completion service parameter. 562 * 563 * Return code 564 * 0 - FCID and Fabric Nodename and Fabric portname is not changed. 565 * 1 - FCID or Fabric Nodename or Fabric portname is changed. 566 * 567 **/ 568 static uint8_t 569 lpfc_check_clean_addr_bit(struct lpfc_vport *vport, 570 struct serv_parm *sp) 571 { 572 struct lpfc_hba *phba = vport->phba; 573 uint8_t fabric_param_changed = 0; 574 575 if ((vport->fc_prevDID != vport->fc_myDID) || 576 memcmp(&vport->fabric_portname, &sp->portName, 577 sizeof(struct lpfc_name)) || 578 memcmp(&vport->fabric_nodename, &sp->nodeName, 579 sizeof(struct lpfc_name)) || 580 (vport->vport_flag & FAWWPN_PARAM_CHG)) { 581 fabric_param_changed = 1; 582 vport->vport_flag &= ~FAWWPN_PARAM_CHG; 583 } 584 /* 585 * Word 1 Bit 31 in common service parameter is overloaded. 586 * Word 1 Bit 31 in FLOGI request is multiple NPort request 587 * Word 1 Bit 31 in FLOGI response is clean address bit 588 * 589 * If fabric parameter is changed and clean address bit is 590 * cleared delay nport discovery if 591 * - vport->fc_prevDID != 0 (not initial discovery) OR 592 * - lpfc_delay_discovery module parameter is set. 593 */ 594 if (fabric_param_changed && !sp->cmn.clean_address_bit && 595 (vport->fc_prevDID || phba->cfg_delay_discovery)) 596 set_bit(FC_DISC_DELAYED, &vport->fc_flag); 597 598 return fabric_param_changed; 599 } 600 601 602 /** 603 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 604 * @vport: pointer to a host virtual N_Port data structure. 605 * @ndlp: pointer to a node-list data structure. 606 * @sp: pointer to service parameter data structure. 607 * @ulp_word4: command response value 608 * 609 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 610 * function to handle the completion of a Fabric Login (FLOGI) into a fabric 611 * port in a fabric topology. It properly sets up the parameters to the @ndlp 612 * from the IOCB response. It also check the newly assigned N_Port ID to the 613 * @vport against the previously assigned N_Port ID. If it is different from 614 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine 615 * is invoked on all the remaining nodes with the @vport to unregister the 616 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin() 617 * is invoked to register login to the fabric. 618 * 619 * Return code 620 * 0 - Success (currently, always return 0) 621 **/ 622 static int 623 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 624 struct serv_parm *sp, uint32_t ulp_word4) 625 { 626 struct lpfc_hba *phba = vport->phba; 627 struct lpfc_nodelist *np; 628 struct lpfc_nodelist *next_np; 629 uint8_t fabric_param_changed; 630 631 set_bit(FC_FABRIC, &vport->fc_flag); 632 633 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); 634 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 635 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; 636 637 phba->fc_edtovResol = sp->cmn.edtovResolution; 638 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; 639 640 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) 641 set_bit(FC_PUBLIC_LOOP, &vport->fc_flag); 642 643 vport->fc_myDID = ulp_word4 & Mask_DID; 644 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); 645 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); 646 ndlp->nlp_class_sup = 0; 647 if (sp->cls1.classValid) 648 ndlp->nlp_class_sup |= FC_COS_CLASS1; 649 if (sp->cls2.classValid) 650 ndlp->nlp_class_sup |= FC_COS_CLASS2; 651 if (sp->cls3.classValid) 652 ndlp->nlp_class_sup |= FC_COS_CLASS3; 653 if (sp->cls4.classValid) 654 ndlp->nlp_class_sup |= FC_COS_CLASS4; 655 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | 656 sp->cmn.bbRcvSizeLsb; 657 658 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 659 if (fabric_param_changed) { 660 /* Reset FDMI attribute masks based on config parameter */ 661 if (phba->cfg_enable_SmartSAN || 662 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 663 /* Setup appropriate attribute masks */ 664 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 665 if (phba->cfg_enable_SmartSAN) 666 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 667 else 668 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 669 } else { 670 vport->fdmi_hba_mask = 0; 671 vport->fdmi_port_mask = 0; 672 } 673 674 } 675 memcpy(&vport->fabric_portname, &sp->portName, 676 sizeof(struct lpfc_name)); 677 memcpy(&vport->fabric_nodename, &sp->nodeName, 678 sizeof(struct lpfc_name)); 679 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 680 681 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 682 if (sp->cmn.response_multiple_NPort) { 683 lpfc_printf_vlog(vport, KERN_WARNING, 684 LOG_ELS | LOG_VPORT, 685 "1816 FLOGI NPIV supported, " 686 "response data 0x%x\n", 687 sp->cmn.response_multiple_NPort); 688 spin_lock_irq(&phba->hbalock); 689 phba->link_flag |= LS_NPIV_FAB_SUPPORTED; 690 spin_unlock_irq(&phba->hbalock); 691 } else { 692 /* Because we asked f/w for NPIV it still expects us 693 to call reg_vnpid at least for the physical host */ 694 lpfc_printf_vlog(vport, KERN_WARNING, 695 LOG_ELS | LOG_VPORT, 696 "1817 Fabric does not support NPIV " 697 "- configuring single port mode.\n"); 698 spin_lock_irq(&phba->hbalock); 699 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 700 spin_unlock_irq(&phba->hbalock); 701 } 702 } 703 704 /* 705 * For FC we need to do some special processing because of the SLI 706 * Port's default settings of the Common Service Parameters. 707 */ 708 if ((phba->sli_rev == LPFC_SLI_REV4) && 709 (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) { 710 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 711 if (fabric_param_changed) 712 lpfc_unregister_fcf_prep(phba); 713 714 /* This should just update the VFI CSPs*/ 715 if (test_bit(FC_VFI_REGISTERED, &vport->fc_flag)) 716 lpfc_issue_reg_vfi(vport); 717 } 718 719 if (fabric_param_changed && 720 !test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) { 721 722 /* If our NportID changed, we need to ensure all 723 * remaining NPORTs get unreg_login'ed. 724 */ 725 list_for_each_entry_safe(np, next_np, 726 &vport->fc_nodes, nlp_listp) { 727 if ((np->nlp_state != NLP_STE_NPR_NODE) || 728 !(np->nlp_flag & NLP_NPR_ADISC)) 729 continue; 730 spin_lock_irq(&np->lock); 731 np->nlp_flag &= ~NLP_NPR_ADISC; 732 spin_unlock_irq(&np->lock); 733 lpfc_unreg_rpi(vport, np); 734 } 735 lpfc_cleanup_pending_mbox(vport); 736 737 if (phba->sli_rev == LPFC_SLI_REV4) { 738 lpfc_sli4_unreg_all_rpis(vport); 739 lpfc_mbx_unreg_vpi(vport); 740 set_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag); 741 } 742 743 /* 744 * For SLI3 and SLI4, the VPI needs to be reregistered in 745 * response to this fabric parameter change event. 746 */ 747 set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag); 748 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 749 !test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) { 750 /* 751 * Driver needs to re-reg VPI in order for f/w 752 * to update the MAC address. 753 */ 754 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 755 lpfc_register_new_vport(phba, vport, ndlp); 756 return 0; 757 } 758 759 if (phba->sli_rev < LPFC_SLI_REV4) { 760 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 761 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 762 test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) 763 lpfc_register_new_vport(phba, vport, ndlp); 764 else 765 lpfc_issue_fabric_reglogin(vport); 766 } else { 767 ndlp->nlp_type |= NLP_FABRIC; 768 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 769 if ((!test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) && 770 (vport->vpi_state & LPFC_VPI_REGISTERED)) { 771 lpfc_start_fdiscs(phba); 772 lpfc_do_scr_ns_plogi(phba, vport); 773 } else if (test_bit(FC_VFI_REGISTERED, &vport->fc_flag)) 774 lpfc_issue_init_vpi(vport); 775 else { 776 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 777 "3135 Need register VFI: (x%x/%x)\n", 778 vport->fc_prevDID, vport->fc_myDID); 779 lpfc_issue_reg_vfi(vport); 780 } 781 } 782 return 0; 783 } 784 785 /** 786 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 787 * @vport: pointer to a host virtual N_Port data structure. 788 * @ndlp: pointer to a node-list data structure. 789 * @sp: pointer to service parameter data structure. 790 * 791 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 792 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port 793 * in a point-to-point topology. First, the @vport's N_Port Name is compared 794 * with the received N_Port Name: if the @vport's N_Port Name is greater than 795 * the received N_Port Name lexicographically, this node shall assign local 796 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and 797 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise, 798 * this node shall just wait for the remote node to issue PLOGI and assign 799 * N_Port IDs. 800 * 801 * Return code 802 * 0 - Success 803 * -ENXIO - Fail 804 **/ 805 static int 806 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 807 struct serv_parm *sp) 808 { 809 struct lpfc_hba *phba = vport->phba; 810 LPFC_MBOXQ_t *mbox; 811 int rc; 812 813 clear_bit(FC_FABRIC, &vport->fc_flag); 814 clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag); 815 set_bit(FC_PT2PT, &vport->fc_flag); 816 817 /* If we are pt2pt with another NPort, force NPIV off! */ 818 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 819 820 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 821 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { 822 lpfc_unregister_fcf_prep(phba); 823 clear_bit(FC_VFI_REGISTERED, &vport->fc_flag); 824 phba->fc_topology_changed = 0; 825 } 826 827 rc = memcmp(&vport->fc_portname, &sp->portName, 828 sizeof(vport->fc_portname)); 829 830 if (rc >= 0) { 831 /* This side will initiate the PLOGI */ 832 set_bit(FC_PT2PT_PLOGI, &vport->fc_flag); 833 834 /* 835 * N_Port ID cannot be 0, set our Id to LocalID 836 * the other side will be RemoteID. 837 */ 838 839 /* not equal */ 840 if (rc) 841 vport->fc_myDID = PT2PT_LocalID; 842 843 /* If not registered with a transport, decrement ndlp reference 844 * count indicating that ndlp can be safely released when other 845 * references are removed. 846 */ 847 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) 848 lpfc_nlp_put(ndlp); 849 850 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); 851 if (!ndlp) { 852 /* 853 * Cannot find existing Fabric ndlp, so allocate a 854 * new one 855 */ 856 ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID); 857 if (!ndlp) 858 goto fail; 859 } 860 861 memcpy(&ndlp->nlp_portname, &sp->portName, 862 sizeof(struct lpfc_name)); 863 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 864 sizeof(struct lpfc_name)); 865 /* Set state will put ndlp onto node list if not already done */ 866 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 867 spin_lock_irq(&ndlp->lock); 868 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 869 spin_unlock_irq(&ndlp->lock); 870 871 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 872 if (!mbox) 873 goto fail; 874 875 lpfc_config_link(phba, mbox); 876 877 mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 878 mbox->vport = vport; 879 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 880 if (rc == MBX_NOT_FINISHED) { 881 mempool_free(mbox, phba->mbox_mem_pool); 882 goto fail; 883 } 884 } else { 885 /* This side will wait for the PLOGI. If not registered with 886 * a transport, decrement node reference count indicating that 887 * ndlp can be released when other references are removed. 888 */ 889 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) 890 lpfc_nlp_put(ndlp); 891 892 /* Start discovery - this should just do CLEAR_LA */ 893 lpfc_disc_start(vport); 894 } 895 896 return 0; 897 fail: 898 return -ENXIO; 899 } 900 901 /** 902 * lpfc_cmpl_els_flogi - Completion callback function for flogi 903 * @phba: pointer to lpfc hba data structure. 904 * @cmdiocb: pointer to lpfc command iocb data structure. 905 * @rspiocb: pointer to lpfc response iocb data structure. 906 * 907 * This routine is the top-level completion callback function for issuing 908 * a Fabric Login (FLOGI) command. If the response IOCB reported error, 909 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If 910 * retry has been made (either immediately or delayed with lpfc_els_retry() 911 * returning 1), the command IOCB will be released and function returned. 912 * If the retry attempt has been given up (possibly reach the maximum 913 * number of retries), one additional decrement of ndlp reference shall be 914 * invoked before going out after releasing the command IOCB. This will 915 * actually release the remote node (Note, lpfc_els_free_iocb() will also 916 * invoke one decrement of ndlp reference count). If no error reported in 917 * the IOCB status, the command Port ID field is used to determine whether 918 * this is a point-to-point topology or a fabric topology: if the Port ID 919 * field is assigned, it is a fabric topology; otherwise, it is a 920 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or 921 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the 922 * specific topology completion conditions. 923 **/ 924 static void 925 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 926 struct lpfc_iocbq *rspiocb) 927 { 928 struct lpfc_vport *vport = cmdiocb->vport; 929 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 930 IOCB_t *irsp; 931 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp; 932 struct serv_parm *sp; 933 uint16_t fcf_index; 934 int rc; 935 u32 ulp_status, ulp_word4, tmo; 936 bool flogi_in_retry = false; 937 938 /* Check to see if link went down during discovery */ 939 if (lpfc_els_chk_latt(vport)) { 940 /* One additional decrement on node reference count to 941 * trigger the release of the node 942 */ 943 if (!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) 944 lpfc_nlp_put(ndlp); 945 goto out; 946 } 947 948 ulp_status = get_job_ulpstatus(phba, rspiocb); 949 ulp_word4 = get_job_word4(phba, rspiocb); 950 951 if (phba->sli_rev == LPFC_SLI_REV4) { 952 tmo = get_wqe_tmo(cmdiocb); 953 } else { 954 irsp = &rspiocb->iocb; 955 tmo = irsp->ulpTimeout; 956 } 957 958 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 959 "FLOGI cmpl: status:x%x/x%x state:x%x", 960 ulp_status, ulp_word4, 961 vport->port_state); 962 963 if (ulp_status) { 964 /* 965 * In case of FIP mode, perform roundrobin FCF failover 966 * due to new FCF discovery 967 */ 968 if (test_bit(HBA_FIP_SUPPORT, &phba->hba_flag) && 969 (phba->fcf.fcf_flag & FCF_DISCOVERY)) { 970 if (phba->link_state < LPFC_LINK_UP) 971 goto stop_rr_fcf_flogi; 972 if ((phba->fcoe_cvl_eventtag_attn == 973 phba->fcoe_cvl_eventtag) && 974 (ulp_status == IOSTAT_LOCAL_REJECT) && 975 ((ulp_word4 & IOERR_PARAM_MASK) == 976 IOERR_SLI_ABORTED)) 977 goto stop_rr_fcf_flogi; 978 else 979 phba->fcoe_cvl_eventtag_attn = 980 phba->fcoe_cvl_eventtag; 981 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 982 "2611 FLOGI failed on FCF (x%x), " 983 "status:x%x/x%x, tmo:x%x, perform " 984 "roundrobin FCF failover\n", 985 phba->fcf.current_rec.fcf_indx, 986 ulp_status, ulp_word4, tmo); 987 lpfc_sli4_set_fcf_flogi_fail(phba, 988 phba->fcf.current_rec.fcf_indx); 989 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 990 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 991 if (rc) 992 goto out; 993 } 994 995 stop_rr_fcf_flogi: 996 /* FLOGI failure */ 997 if (!(ulp_status == IOSTAT_LOCAL_REJECT && 998 ((ulp_word4 & IOERR_PARAM_MASK) == 999 IOERR_LOOP_OPEN_FAILURE))) 1000 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1001 "2858 FLOGI failure Status:x%x/x%x TMO" 1002 ":x%x Data x%lx x%x\n", 1003 ulp_status, ulp_word4, tmo, 1004 phba->hba_flag, phba->fcf.fcf_flag); 1005 1006 /* Check for retry */ 1007 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 1008 /* Address a timing race with dev_loss. If dev_loss 1009 * is active on this FPort node, put the initial ref 1010 * count back to stop premature node release actions. 1011 */ 1012 lpfc_check_nlp_post_devloss(vport, ndlp); 1013 flogi_in_retry = true; 1014 goto out; 1015 } 1016 1017 /* The FLOGI will not be retried. If the FPort node is not 1018 * registered with the SCSI transport, remove the initial 1019 * reference to trigger node release. 1020 */ 1021 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS) && 1022 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) 1023 lpfc_nlp_put(ndlp); 1024 1025 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 1026 "0150 FLOGI failure Status:x%x/x%x " 1027 "xri x%x TMO:x%x refcnt %d\n", 1028 ulp_status, ulp_word4, cmdiocb->sli4_xritag, 1029 tmo, kref_read(&ndlp->kref)); 1030 1031 /* If this is not a loop open failure, bail out */ 1032 if (!(ulp_status == IOSTAT_LOCAL_REJECT && 1033 ((ulp_word4 & IOERR_PARAM_MASK) == 1034 IOERR_LOOP_OPEN_FAILURE))) { 1035 /* FLOGI failure */ 1036 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1037 "0100 FLOGI failure Status:x%x/x%x " 1038 "TMO:x%x\n", 1039 ulp_status, ulp_word4, tmo); 1040 goto flogifail; 1041 } 1042 1043 /* FLOGI failed, so there is no fabric */ 1044 clear_bit(FC_FABRIC, &vport->fc_flag); 1045 clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag); 1046 clear_bit(FC_PT2PT_NO_NVME, &vport->fc_flag); 1047 1048 /* If private loop, then allow max outstanding els to be 1049 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no 1050 * alpa map would take too long otherwise. 1051 */ 1052 if (phba->alpa_map[0] == 0) 1053 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; 1054 if ((phba->sli_rev == LPFC_SLI_REV4) && 1055 (!test_bit(FC_VFI_REGISTERED, &vport->fc_flag) || 1056 (vport->fc_prevDID != vport->fc_myDID) || 1057 phba->fc_topology_changed)) { 1058 if (test_bit(FC_VFI_REGISTERED, &vport->fc_flag)) { 1059 if (phba->fc_topology_changed) { 1060 lpfc_unregister_fcf_prep(phba); 1061 clear_bit(FC_VFI_REGISTERED, 1062 &vport->fc_flag); 1063 phba->fc_topology_changed = 0; 1064 } else { 1065 lpfc_sli4_unreg_all_rpis(vport); 1066 } 1067 } 1068 1069 /* Do not register VFI if the driver aborted FLOGI */ 1070 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) 1071 lpfc_issue_reg_vfi(vport); 1072 1073 goto out; 1074 } 1075 goto flogifail; 1076 } 1077 clear_bit(FC_VPORT_CVL_RCVD, &vport->fc_flag); 1078 clear_bit(FC_VPORT_LOGO_RCVD, &vport->fc_flag); 1079 1080 /* 1081 * The FLOGI succeeded. Sync the data for the CPU before 1082 * accessing it. 1083 */ 1084 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 1085 if (!prsp) 1086 goto out; 1087 if (!lpfc_is_els_acc_rsp(prsp)) 1088 goto out; 1089 sp = prsp->virt + sizeof(uint32_t); 1090 1091 /* FLOGI completes successfully */ 1092 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1093 "0101 FLOGI completes successfully, I/O tag:x%x " 1094 "xri x%x Data: x%x x%x x%x x%x x%x x%lx x%x %d\n", 1095 cmdiocb->iotag, cmdiocb->sli4_xritag, 1096 ulp_word4, sp->cmn.e_d_tov, 1097 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, 1098 vport->port_state, vport->fc_flag, 1099 sp->cmn.priority_tagging, kref_read(&ndlp->kref)); 1100 1101 /* reinitialize the VMID datastructure before returning */ 1102 if (lpfc_is_vmid_enabled(phba)) 1103 lpfc_reinit_vmid(vport); 1104 if (sp->cmn.priority_tagging) 1105 vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA | 1106 LPFC_VMID_TYPE_PRIO); 1107 1108 /* 1109 * Address a timing race with dev_loss. If dev_loss is active on 1110 * this FPort node, put the initial ref count back to stop premature 1111 * node release actions. 1112 */ 1113 lpfc_check_nlp_post_devloss(vport, ndlp); 1114 if (vport->port_state == LPFC_FLOGI) { 1115 /* 1116 * If Common Service Parameters indicate Nport 1117 * we are point to point, if Fport we are Fabric. 1118 */ 1119 if (sp->cmn.fPort) 1120 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, 1121 ulp_word4); 1122 else if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) 1123 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 1124 else { 1125 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1126 "2831 FLOGI response with cleared Fabric " 1127 "bit fcf_index 0x%x " 1128 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x " 1129 "Fabric Name " 1130 "%02x%02x%02x%02x%02x%02x%02x%02x\n", 1131 phba->fcf.current_rec.fcf_indx, 1132 phba->fcf.current_rec.switch_name[0], 1133 phba->fcf.current_rec.switch_name[1], 1134 phba->fcf.current_rec.switch_name[2], 1135 phba->fcf.current_rec.switch_name[3], 1136 phba->fcf.current_rec.switch_name[4], 1137 phba->fcf.current_rec.switch_name[5], 1138 phba->fcf.current_rec.switch_name[6], 1139 phba->fcf.current_rec.switch_name[7], 1140 phba->fcf.current_rec.fabric_name[0], 1141 phba->fcf.current_rec.fabric_name[1], 1142 phba->fcf.current_rec.fabric_name[2], 1143 phba->fcf.current_rec.fabric_name[3], 1144 phba->fcf.current_rec.fabric_name[4], 1145 phba->fcf.current_rec.fabric_name[5], 1146 phba->fcf.current_rec.fabric_name[6], 1147 phba->fcf.current_rec.fabric_name[7]); 1148 1149 lpfc_nlp_put(ndlp); 1150 spin_lock_irq(&phba->hbalock); 1151 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1152 spin_unlock_irq(&phba->hbalock); 1153 clear_bit(FCF_RR_INPROG, &phba->hba_flag); 1154 clear_bit(HBA_DEVLOSS_TMO, &phba->hba_flag); 1155 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1156 goto out; 1157 } 1158 if (!rc) { 1159 /* Mark the FCF discovery process done */ 1160 if (test_bit(HBA_FIP_SUPPORT, &phba->hba_flag)) 1161 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | 1162 LOG_ELS, 1163 "2769 FLOGI to FCF (x%x) " 1164 "completed successfully\n", 1165 phba->fcf.current_rec.fcf_indx); 1166 spin_lock_irq(&phba->hbalock); 1167 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1168 spin_unlock_irq(&phba->hbalock); 1169 clear_bit(FCF_RR_INPROG, &phba->hba_flag); 1170 clear_bit(HBA_DEVLOSS_TMO, &phba->hba_flag); 1171 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1172 goto out; 1173 } 1174 } else if (vport->port_state > LPFC_FLOGI && 1175 test_bit(FC_PT2PT, &vport->fc_flag)) { 1176 /* 1177 * In a p2p topology, it is possible that discovery has 1178 * already progressed, and this completion can be ignored. 1179 * Recheck the indicated topology. 1180 */ 1181 if (!sp->cmn.fPort) 1182 goto out; 1183 } 1184 1185 flogifail: 1186 spin_lock_irq(&phba->hbalock); 1187 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1188 spin_unlock_irq(&phba->hbalock); 1189 1190 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { 1191 /* FLOGI failed, so just use loop map to make discovery list */ 1192 lpfc_disc_list_loopmap(vport); 1193 1194 /* Start discovery */ 1195 lpfc_disc_start(vport); 1196 } else if (((ulp_status != IOSTAT_LOCAL_REJECT) || 1197 (((ulp_word4 & IOERR_PARAM_MASK) != 1198 IOERR_SLI_ABORTED) && 1199 ((ulp_word4 & IOERR_PARAM_MASK) != 1200 IOERR_SLI_DOWN))) && 1201 (phba->link_state != LPFC_CLEAR_LA)) { 1202 /* If FLOGI failed enable link interrupt. */ 1203 lpfc_issue_clear_la(phba, vport); 1204 } 1205 out: 1206 if (!flogi_in_retry) 1207 clear_bit(HBA_FLOGI_OUTSTANDING, &phba->hba_flag); 1208 1209 lpfc_els_free_iocb(phba, cmdiocb); 1210 lpfc_nlp_put(ndlp); 1211 } 1212 1213 /** 1214 * lpfc_cmpl_els_link_down - Completion callback function for ELS command 1215 * aborted during a link down 1216 * @phba: pointer to lpfc hba data structure. 1217 * @cmdiocb: pointer to lpfc command iocb data structure. 1218 * @rspiocb: pointer to lpfc response iocb data structure. 1219 * 1220 */ 1221 static void 1222 lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1223 struct lpfc_iocbq *rspiocb) 1224 { 1225 uint32_t *pcmd; 1226 uint32_t cmd; 1227 u32 ulp_status, ulp_word4; 1228 1229 pcmd = (uint32_t *)cmdiocb->cmd_dmabuf->virt; 1230 cmd = *pcmd; 1231 1232 ulp_status = get_job_ulpstatus(phba, rspiocb); 1233 ulp_word4 = get_job_word4(phba, rspiocb); 1234 1235 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1236 "6445 ELS completes after LINK_DOWN: " 1237 " Status %x/%x cmd x%x flg x%x\n", 1238 ulp_status, ulp_word4, cmd, 1239 cmdiocb->cmd_flag); 1240 1241 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) { 1242 cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC; 1243 atomic_dec(&phba->fabric_iocb_count); 1244 } 1245 lpfc_els_free_iocb(phba, cmdiocb); 1246 } 1247 1248 /** 1249 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport 1250 * @vport: pointer to a host virtual N_Port data structure. 1251 * @ndlp: pointer to a node-list data structure. 1252 * @retry: number of retries to the command IOCB. 1253 * 1254 * This routine issues a Fabric Login (FLOGI) Request ELS command 1255 * for a @vport. The initiator service parameters are put into the payload 1256 * of the FLOGI Request IOCB and the top-level callback function pointer 1257 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback 1258 * function field. The lpfc_issue_fabric_iocb routine is invoked to send 1259 * out FLOGI ELS command with one outstanding fabric IOCB at a time. 1260 * 1261 * Note that the ndlp reference count will be incremented by 1 for holding the 1262 * ndlp and the reference to ndlp will be stored into the ndlp field of 1263 * the IOCB for the completion callback function to the FLOGI ELS command. 1264 * 1265 * Return code 1266 * 0 - successfully issued flogi iocb for @vport 1267 * 1 - failed to issue flogi iocb for @vport 1268 **/ 1269 static int 1270 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1271 uint8_t retry) 1272 { 1273 struct lpfc_hba *phba = vport->phba; 1274 struct serv_parm *sp; 1275 union lpfc_wqe128 *wqe = NULL; 1276 IOCB_t *icmd = NULL; 1277 struct lpfc_iocbq *elsiocb; 1278 struct lpfc_iocbq defer_flogi_acc; 1279 u8 *pcmd, ct; 1280 uint16_t cmdsize; 1281 uint32_t tmo, did; 1282 int rc; 1283 1284 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1285 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1286 ndlp->nlp_DID, ELS_CMD_FLOGI); 1287 1288 if (!elsiocb) 1289 return 1; 1290 1291 wqe = &elsiocb->wqe; 1292 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 1293 icmd = &elsiocb->iocb; 1294 1295 /* For FLOGI request, remainder of payload is service parameters */ 1296 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; 1297 pcmd += sizeof(uint32_t); 1298 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1299 sp = (struct serv_parm *) pcmd; 1300 1301 /* Setup CSPs accordingly for Fabric */ 1302 sp->cmn.e_d_tov = 0; 1303 sp->cmn.w2.r_a_tov = 0; 1304 sp->cmn.virtual_fabric_support = 0; 1305 sp->cls1.classValid = 0; 1306 if (sp->cmn.fcphLow < FC_PH3) 1307 sp->cmn.fcphLow = FC_PH3; 1308 if (sp->cmn.fcphHigh < FC_PH3) 1309 sp->cmn.fcphHigh = FC_PH3; 1310 1311 /* Determine if switch supports priority tagging */ 1312 if (phba->cfg_vmid_priority_tagging) { 1313 sp->cmn.priority_tagging = 1; 1314 /* lpfc_vmid_host_uuid is combination of wwpn and wwnn */ 1315 if (!memchr_inv(vport->lpfc_vmid_host_uuid, 0, 1316 sizeof(vport->lpfc_vmid_host_uuid))) { 1317 memcpy(vport->lpfc_vmid_host_uuid, phba->wwpn, 1318 sizeof(phba->wwpn)); 1319 memcpy(&vport->lpfc_vmid_host_uuid[8], phba->wwnn, 1320 sizeof(phba->wwnn)); 1321 } 1322 } 1323 1324 if (phba->sli_rev == LPFC_SLI_REV4) { 1325 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1326 LPFC_SLI_INTF_IF_TYPE_0) { 1327 /* FLOGI needs to be 3 for WQE FCFI */ 1328 ct = SLI4_CT_FCFI; 1329 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 1330 1331 /* Set the fcfi to the fcfi we registered with */ 1332 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 1333 phba->fcf.fcfi); 1334 } 1335 1336 /* Can't do SLI4 class2 without support sequence coalescing */ 1337 sp->cls2.classValid = 0; 1338 sp->cls2.seqDelivery = 0; 1339 } else { 1340 /* Historical, setting sequential-delivery bit for SLI3 */ 1341 sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0; 1342 sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0; 1343 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 1344 sp->cmn.request_multiple_Nport = 1; 1345 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 1346 icmd->ulpCt_h = 1; 1347 icmd->ulpCt_l = 0; 1348 } else { 1349 sp->cmn.request_multiple_Nport = 0; 1350 } 1351 1352 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 1353 icmd->un.elsreq64.myID = 0; 1354 icmd->un.elsreq64.fl = 1; 1355 } 1356 } 1357 1358 tmo = phba->fc_ratov; 1359 phba->fc_ratov = LPFC_DISC_FLOGI_TMO; 1360 lpfc_set_disctmo(vport); 1361 phba->fc_ratov = tmo; 1362 1363 phba->fc_stat.elsXmitFLOGI++; 1364 elsiocb->cmd_cmpl = lpfc_cmpl_els_flogi; 1365 1366 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1367 "Issue FLOGI: opt:x%x", 1368 phba->sli3_options, 0, 0); 1369 1370 elsiocb->ndlp = lpfc_nlp_get(ndlp); 1371 if (!elsiocb->ndlp) { 1372 lpfc_els_free_iocb(phba, elsiocb); 1373 return 1; 1374 } 1375 1376 /* Avoid race with FLOGI completion and hba_flags. */ 1377 set_bit(HBA_FLOGI_ISSUED, &phba->hba_flag); 1378 set_bit(HBA_FLOGI_OUTSTANDING, &phba->hba_flag); 1379 1380 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 1381 if (rc == IOCB_ERROR) { 1382 clear_bit(HBA_FLOGI_ISSUED, &phba->hba_flag); 1383 clear_bit(HBA_FLOGI_OUTSTANDING, &phba->hba_flag); 1384 lpfc_els_free_iocb(phba, elsiocb); 1385 lpfc_nlp_put(ndlp); 1386 return 1; 1387 } 1388 1389 /* Clear external loopback plug detected flag */ 1390 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; 1391 1392 /* Check for a deferred FLOGI ACC condition */ 1393 if (phba->defer_flogi_acc_flag) { 1394 /* lookup ndlp for received FLOGI */ 1395 ndlp = lpfc_findnode_did(vport, 0); 1396 if (!ndlp) 1397 return 0; 1398 1399 did = vport->fc_myDID; 1400 vport->fc_myDID = Fabric_DID; 1401 1402 memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq)); 1403 1404 if (phba->sli_rev == LPFC_SLI_REV4) { 1405 bf_set(wqe_ctxt_tag, 1406 &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, 1407 phba->defer_flogi_acc_rx_id); 1408 bf_set(wqe_rcvoxid, 1409 &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, 1410 phba->defer_flogi_acc_ox_id); 1411 } else { 1412 icmd = &defer_flogi_acc.iocb; 1413 icmd->ulpContext = phba->defer_flogi_acc_rx_id; 1414 icmd->unsli3.rcvsli3.ox_id = 1415 phba->defer_flogi_acc_ox_id; 1416 } 1417 1418 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1419 "3354 Xmit deferred FLOGI ACC: rx_id: x%x," 1420 " ox_id: x%x, hba_flag x%lx\n", 1421 phba->defer_flogi_acc_rx_id, 1422 phba->defer_flogi_acc_ox_id, phba->hba_flag); 1423 1424 /* Send deferred FLOGI ACC */ 1425 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc, 1426 ndlp, NULL); 1427 1428 phba->defer_flogi_acc_flag = false; 1429 vport->fc_myDID = did; 1430 1431 /* Decrement ndlp reference count to indicate the node can be 1432 * released when other references are removed. 1433 */ 1434 lpfc_nlp_put(ndlp); 1435 } 1436 1437 return 0; 1438 } 1439 1440 /** 1441 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs 1442 * @phba: pointer to lpfc hba data structure. 1443 * 1444 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs 1445 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq 1446 * list and issues an abort IOCB commond on each outstanding IOCB that 1447 * contains a active Fabric_DID ndlp. Note that this function is to issue 1448 * the abort IOCB command on all the outstanding IOCBs, thus when this 1449 * function returns, it does not guarantee all the IOCBs are actually aborted. 1450 * 1451 * Return code 1452 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0) 1453 **/ 1454 int 1455 lpfc_els_abort_flogi(struct lpfc_hba *phba) 1456 { 1457 struct lpfc_sli_ring *pring; 1458 struct lpfc_iocbq *iocb, *next_iocb; 1459 struct lpfc_nodelist *ndlp; 1460 u32 ulp_command; 1461 1462 /* Abort outstanding I/O on NPort <nlp_DID> */ 1463 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1464 "0201 Abort outstanding I/O on NPort x%x\n", 1465 Fabric_DID); 1466 1467 pring = lpfc_phba_elsring(phba); 1468 if (unlikely(!pring)) 1469 return -EIO; 1470 1471 /* 1472 * Check the txcmplq for an iocb that matches the nport the driver is 1473 * searching for. 1474 */ 1475 spin_lock_irq(&phba->hbalock); 1476 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 1477 ulp_command = get_job_cmnd(phba, iocb); 1478 if (ulp_command == CMD_ELS_REQUEST64_CR) { 1479 ndlp = iocb->ndlp; 1480 if (ndlp && ndlp->nlp_DID == Fabric_DID) { 1481 if (test_bit(FC_PT2PT, &phba->pport->fc_flag) && 1482 !test_bit(FC_PT2PT_PLOGI, 1483 &phba->pport->fc_flag)) 1484 iocb->fabric_cmd_cmpl = 1485 lpfc_ignore_els_cmpl; 1486 lpfc_sli_issue_abort_iotag(phba, pring, iocb, 1487 NULL); 1488 } 1489 } 1490 } 1491 /* Make sure HBA is alive */ 1492 lpfc_issue_hb_tmo(phba); 1493 1494 spin_unlock_irq(&phba->hbalock); 1495 1496 return 0; 1497 } 1498 1499 /** 1500 * lpfc_initial_flogi - Issue an initial fabric login for a vport 1501 * @vport: pointer to a host virtual N_Port data structure. 1502 * 1503 * This routine issues an initial Fabric Login (FLOGI) for the @vport 1504 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1505 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1506 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1507 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine 1508 * is then invoked with the @vport and the ndlp to perform the FLOGI for the 1509 * @vport. 1510 * 1511 * Return code 1512 * 0 - failed to issue initial flogi for @vport 1513 * 1 - successfully issued initial flogi for @vport 1514 **/ 1515 int 1516 lpfc_initial_flogi(struct lpfc_vport *vport) 1517 { 1518 struct lpfc_nodelist *ndlp; 1519 1520 vport->port_state = LPFC_FLOGI; 1521 lpfc_set_disctmo(vport); 1522 1523 /* First look for the Fabric ndlp */ 1524 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1525 if (!ndlp) { 1526 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1527 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1528 if (!ndlp) 1529 return 0; 1530 /* Set the node type */ 1531 ndlp->nlp_type |= NLP_FABRIC; 1532 1533 /* Put ndlp onto node list */ 1534 lpfc_enqueue_node(vport, ndlp); 1535 } 1536 1537 /* Reset the Fabric flag, topology change may have happened */ 1538 clear_bit(FC_FABRIC, &vport->fc_flag); 1539 if (lpfc_issue_els_flogi(vport, ndlp, 0)) { 1540 /* A node reference should be retained while registered with a 1541 * transport or dev-loss-evt work is pending. 1542 * Otherwise, decrement node reference to trigger release. 1543 */ 1544 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 1545 !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 1546 lpfc_nlp_put(ndlp); 1547 return 0; 1548 } 1549 return 1; 1550 } 1551 1552 /** 1553 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport 1554 * @vport: pointer to a host virtual N_Port data structure. 1555 * 1556 * This routine issues an initial Fabric Discover (FDISC) for the @vport 1557 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1558 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1559 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1560 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine 1561 * is then invoked with the @vport and the ndlp to perform the FDISC for the 1562 * @vport. 1563 * 1564 * Return code 1565 * 0 - failed to issue initial fdisc for @vport 1566 * 1 - successfully issued initial fdisc for @vport 1567 **/ 1568 int 1569 lpfc_initial_fdisc(struct lpfc_vport *vport) 1570 { 1571 struct lpfc_nodelist *ndlp; 1572 1573 /* First look for the Fabric ndlp */ 1574 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1575 if (!ndlp) { 1576 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1577 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1578 if (!ndlp) 1579 return 0; 1580 1581 /* NPIV is only supported in Fabrics. */ 1582 ndlp->nlp_type |= NLP_FABRIC; 1583 1584 /* Put ndlp onto node list */ 1585 lpfc_enqueue_node(vport, ndlp); 1586 } 1587 1588 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { 1589 /* A node reference should be retained while registered with a 1590 * transport or dev-loss-evt work is pending. 1591 * Otherwise, decrement node reference to trigger release. 1592 */ 1593 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 1594 !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 1595 lpfc_nlp_put(ndlp); 1596 return 0; 1597 } 1598 return 1; 1599 } 1600 1601 /** 1602 * lpfc_more_plogi - Check and issue remaining plogis for a vport 1603 * @vport: pointer to a host virtual N_Port data structure. 1604 * 1605 * This routine checks whether there are more remaining Port Logins 1606 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine 1607 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes 1608 * to issue ELS PLOGIs up to the configured discover threads with the 1609 * @vport (@vport->cfg_discovery_threads). The function also decrement 1610 * the @vport's num_disc_node by 1 if it is not already 0. 1611 **/ 1612 void 1613 lpfc_more_plogi(struct lpfc_vport *vport) 1614 { 1615 if (vport->num_disc_nodes) 1616 vport->num_disc_nodes--; 1617 1618 /* Continue discovery with <num_disc_nodes> PLOGIs to go */ 1619 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1620 "0232 Continue discovery with %d PLOGIs to go " 1621 "Data: x%x x%lx x%x\n", 1622 vport->num_disc_nodes, 1623 atomic_read(&vport->fc_plogi_cnt), 1624 vport->fc_flag, vport->port_state); 1625 /* Check to see if there are more PLOGIs to be sent */ 1626 if (test_bit(FC_NLP_MORE, &vport->fc_flag)) 1627 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 1628 lpfc_els_disc_plogi(vport); 1629 1630 return; 1631 } 1632 1633 /** 1634 * lpfc_plogi_confirm_nport - Confirm plogi wwpn matches stored ndlp 1635 * @phba: pointer to lpfc hba data structure. 1636 * @prsp: pointer to response IOCB payload. 1637 * @ndlp: pointer to a node-list data structure. 1638 * 1639 * This routine checks and indicates whether the WWPN of an N_Port, retrieved 1640 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt. 1641 * The following cases are considered N_Port confirmed: 1642 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches 1643 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but 1644 * it does not have WWPN assigned either. If the WWPN is confirmed, the 1645 * pointer to the @ndlp will be returned. If the WWPN is not confirmed: 1646 * 1) if there is a node on vport list other than the @ndlp with the same 1647 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked 1648 * on that node to release the RPI associated with the node; 2) if there is 1649 * no node found on vport list with the same WWPN of the N_Port PLOGI logged 1650 * into, a new node shall be allocated (or activated). In either case, the 1651 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall 1652 * be released and the new_ndlp shall be put on to the vport node list and 1653 * its pointer returned as the confirmed node. 1654 * 1655 * Note that before the @ndlp got "released", the keepDID from not-matching 1656 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID 1657 * of the @ndlp. This is because the release of @ndlp is actually to put it 1658 * into an inactive state on the vport node list and the vport node list 1659 * management algorithm does not allow two node with a same DID. 1660 * 1661 * Return code 1662 * pointer to the PLOGI N_Port @ndlp 1663 **/ 1664 static struct lpfc_nodelist * 1665 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, 1666 struct lpfc_nodelist *ndlp) 1667 { 1668 struct lpfc_vport *vport = ndlp->vport; 1669 struct lpfc_nodelist *new_ndlp; 1670 struct serv_parm *sp; 1671 uint8_t name[sizeof(struct lpfc_name)]; 1672 uint32_t keepDID = 0, keep_nlp_flag = 0; 1673 int rc; 1674 uint32_t keep_new_nlp_flag = 0; 1675 uint16_t keep_nlp_state; 1676 u32 keep_nlp_fc4_type = 0; 1677 struct lpfc_nvme_rport *keep_nrport = NULL; 1678 unsigned long *active_rrqs_xri_bitmap = NULL; 1679 1680 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t)); 1681 memset(name, 0, sizeof(struct lpfc_name)); 1682 1683 /* Now we find out if the NPort we are logging into, matches the WWPN 1684 * we have for that ndlp. If not, we have some work to do. 1685 */ 1686 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); 1687 1688 /* return immediately if the WWPN matches ndlp */ 1689 if (new_ndlp == ndlp) 1690 return ndlp; 1691 1692 if (phba->sli_rev == LPFC_SLI_REV4) { 1693 active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, 1694 GFP_KERNEL); 1695 if (active_rrqs_xri_bitmap) 1696 memset(active_rrqs_xri_bitmap, 0, 1697 phba->cfg_rrq_xri_bitmap_sz); 1698 } 1699 1700 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1701 "3178 PLOGI confirm: ndlp x%x x%x x%x: " 1702 "new_ndlp x%x x%x x%x\n", 1703 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type, 1704 (new_ndlp ? new_ndlp->nlp_DID : 0), 1705 (new_ndlp ? new_ndlp->nlp_flag : 0), 1706 (new_ndlp ? new_ndlp->nlp_fc4_type : 0)); 1707 1708 if (!new_ndlp) { 1709 rc = memcmp(&ndlp->nlp_portname, name, 1710 sizeof(struct lpfc_name)); 1711 if (!rc) { 1712 if (active_rrqs_xri_bitmap) 1713 mempool_free(active_rrqs_xri_bitmap, 1714 phba->active_rrq_pool); 1715 return ndlp; 1716 } 1717 new_ndlp = lpfc_nlp_init(vport, ndlp->nlp_DID); 1718 if (!new_ndlp) { 1719 if (active_rrqs_xri_bitmap) 1720 mempool_free(active_rrqs_xri_bitmap, 1721 phba->active_rrq_pool); 1722 return ndlp; 1723 } 1724 } else { 1725 if (phba->sli_rev == LPFC_SLI_REV4 && 1726 active_rrqs_xri_bitmap) 1727 memcpy(active_rrqs_xri_bitmap, 1728 new_ndlp->active_rrqs_xri_bitmap, 1729 phba->cfg_rrq_xri_bitmap_sz); 1730 1731 /* 1732 * Unregister from backend if not done yet. Could have been 1733 * skipped due to ADISC 1734 */ 1735 lpfc_nlp_unreg_node(vport, new_ndlp); 1736 } 1737 1738 keepDID = new_ndlp->nlp_DID; 1739 1740 /* At this point in this routine, we know new_ndlp will be 1741 * returned. however, any previous GID_FTs that were done 1742 * would have updated nlp_fc4_type in ndlp, so we must ensure 1743 * new_ndlp has the right value. 1744 */ 1745 if (test_bit(FC_FABRIC, &vport->fc_flag)) { 1746 keep_nlp_fc4_type = new_ndlp->nlp_fc4_type; 1747 new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type; 1748 } 1749 1750 lpfc_unreg_rpi(vport, new_ndlp); 1751 new_ndlp->nlp_DID = ndlp->nlp_DID; 1752 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 1753 if (phba->sli_rev == LPFC_SLI_REV4) 1754 memcpy(new_ndlp->active_rrqs_xri_bitmap, 1755 ndlp->active_rrqs_xri_bitmap, 1756 phba->cfg_rrq_xri_bitmap_sz); 1757 1758 /* Lock both ndlps */ 1759 spin_lock_irq(&ndlp->lock); 1760 spin_lock_irq(&new_ndlp->lock); 1761 keep_new_nlp_flag = new_ndlp->nlp_flag; 1762 keep_nlp_flag = ndlp->nlp_flag; 1763 new_ndlp->nlp_flag = ndlp->nlp_flag; 1764 1765 /* if new_ndlp had NLP_UNREG_INP set, keep it */ 1766 if (keep_new_nlp_flag & NLP_UNREG_INP) 1767 new_ndlp->nlp_flag |= NLP_UNREG_INP; 1768 else 1769 new_ndlp->nlp_flag &= ~NLP_UNREG_INP; 1770 1771 /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */ 1772 if (keep_new_nlp_flag & NLP_RPI_REGISTERED) 1773 new_ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1774 else 1775 new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1776 1777 /* 1778 * Retain the DROPPED flag. This will take care of the init 1779 * refcount when affecting the state change 1780 */ 1781 if (keep_new_nlp_flag & NLP_DROPPED) 1782 new_ndlp->nlp_flag |= NLP_DROPPED; 1783 else 1784 new_ndlp->nlp_flag &= ~NLP_DROPPED; 1785 1786 ndlp->nlp_flag = keep_new_nlp_flag; 1787 1788 /* if ndlp had NLP_UNREG_INP set, keep it */ 1789 if (keep_nlp_flag & NLP_UNREG_INP) 1790 ndlp->nlp_flag |= NLP_UNREG_INP; 1791 else 1792 ndlp->nlp_flag &= ~NLP_UNREG_INP; 1793 1794 /* if ndlp had NLP_RPI_REGISTERED set, keep it */ 1795 if (keep_nlp_flag & NLP_RPI_REGISTERED) 1796 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1797 else 1798 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1799 1800 /* 1801 * Retain the DROPPED flag. This will take care of the init 1802 * refcount when affecting the state change 1803 */ 1804 if (keep_nlp_flag & NLP_DROPPED) 1805 ndlp->nlp_flag |= NLP_DROPPED; 1806 else 1807 ndlp->nlp_flag &= ~NLP_DROPPED; 1808 1809 spin_unlock_irq(&new_ndlp->lock); 1810 spin_unlock_irq(&ndlp->lock); 1811 1812 /* Set nlp_states accordingly */ 1813 keep_nlp_state = new_ndlp->nlp_state; 1814 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); 1815 1816 /* interchange the nvme remoteport structs */ 1817 keep_nrport = new_ndlp->nrport; 1818 new_ndlp->nrport = ndlp->nrport; 1819 1820 /* Move this back to NPR state */ 1821 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { 1822 /* The ndlp doesn't have a portname yet, but does have an 1823 * NPort ID. The new_ndlp portname matches the Rport's 1824 * portname. Reinstantiate the new_ndlp and reset the ndlp. 1825 */ 1826 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1827 "3179 PLOGI confirm NEW: %x %x\n", 1828 new_ndlp->nlp_DID, keepDID); 1829 1830 /* Two ndlps cannot have the same did on the nodelist. 1831 * The KeepDID and keep_nlp_fc4_type need to be swapped 1832 * because ndlp is inflight with no WWPN. 1833 */ 1834 ndlp->nlp_DID = keepDID; 1835 ndlp->nlp_fc4_type = keep_nlp_fc4_type; 1836 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1837 if (phba->sli_rev == LPFC_SLI_REV4 && 1838 active_rrqs_xri_bitmap) 1839 memcpy(ndlp->active_rrqs_xri_bitmap, 1840 active_rrqs_xri_bitmap, 1841 phba->cfg_rrq_xri_bitmap_sz); 1842 1843 } else { 1844 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1845 "3180 PLOGI confirm SWAP: %x %x\n", 1846 new_ndlp->nlp_DID, keepDID); 1847 1848 lpfc_unreg_rpi(vport, ndlp); 1849 1850 /* The ndlp and new_ndlp both have WWPNs but are swapping 1851 * NPort Ids and attributes. 1852 */ 1853 ndlp->nlp_DID = keepDID; 1854 ndlp->nlp_fc4_type = keep_nlp_fc4_type; 1855 1856 if (phba->sli_rev == LPFC_SLI_REV4 && 1857 active_rrqs_xri_bitmap) 1858 memcpy(ndlp->active_rrqs_xri_bitmap, 1859 active_rrqs_xri_bitmap, 1860 phba->cfg_rrq_xri_bitmap_sz); 1861 1862 /* Since we are switching over to the new_ndlp, 1863 * reset the old ndlp state 1864 */ 1865 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 1866 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 1867 keep_nlp_state = NLP_STE_NPR_NODE; 1868 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1869 ndlp->nrport = keep_nrport; 1870 } 1871 1872 /* 1873 * If ndlp is not associated with any rport we can drop it here else 1874 * let dev_loss_tmo_callbk trigger DEVICE_RM event 1875 */ 1876 if (!ndlp->rport && (ndlp->nlp_state == NLP_STE_NPR_NODE)) 1877 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 1878 1879 if (phba->sli_rev == LPFC_SLI_REV4 && 1880 active_rrqs_xri_bitmap) 1881 mempool_free(active_rrqs_xri_bitmap, 1882 phba->active_rrq_pool); 1883 1884 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1885 "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n", 1886 new_ndlp->nlp_DID, new_ndlp->nlp_flag, 1887 new_ndlp->nlp_fc4_type); 1888 1889 return new_ndlp; 1890 } 1891 1892 /** 1893 * lpfc_end_rscn - Check and handle more rscn for a vport 1894 * @vport: pointer to a host virtual N_Port data structure. 1895 * 1896 * This routine checks whether more Registration State Change 1897 * Notifications (RSCNs) came in while the discovery state machine was in 1898 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be 1899 * invoked to handle the additional RSCNs for the @vport. Otherwise, the 1900 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of 1901 * handling the RSCNs. 1902 **/ 1903 void 1904 lpfc_end_rscn(struct lpfc_vport *vport) 1905 { 1906 1907 if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) { 1908 /* 1909 * Check to see if more RSCNs came in while we were 1910 * processing this one. 1911 */ 1912 if (vport->fc_rscn_id_cnt || 1913 test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag)) 1914 lpfc_els_handle_rscn(vport); 1915 else 1916 clear_bit(FC_RSCN_MODE, &vport->fc_flag); 1917 } 1918 } 1919 1920 /** 1921 * lpfc_cmpl_els_rrq - Completion handled for els RRQs. 1922 * @phba: pointer to lpfc hba data structure. 1923 * @cmdiocb: pointer to lpfc command iocb data structure. 1924 * @rspiocb: pointer to lpfc response iocb data structure. 1925 * 1926 * This routine will call the clear rrq function to free the rrq and 1927 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not 1928 * exist then the clear_rrq is still called because the rrq needs to 1929 * be freed. 1930 **/ 1931 1932 static void 1933 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1934 struct lpfc_iocbq *rspiocb) 1935 { 1936 struct lpfc_vport *vport = cmdiocb->vport; 1937 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 1938 struct lpfc_node_rrq *rrq; 1939 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 1940 u32 ulp_word4 = get_job_word4(phba, rspiocb); 1941 1942 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1943 rrq = cmdiocb->context_un.rrq; 1944 cmdiocb->rsp_iocb = rspiocb; 1945 1946 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1947 "RRQ cmpl: status:x%x/x%x did:x%x", 1948 ulp_status, ulp_word4, 1949 get_job_els_rsp64_did(phba, cmdiocb)); 1950 1951 1952 /* rrq completes to NPort <nlp_DID> */ 1953 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1954 "2880 RRQ completes to DID x%x " 1955 "Data: x%x x%x x%x x%x x%x\n", 1956 ndlp->nlp_DID, ulp_status, ulp_word4, 1957 get_wqe_tmo(cmdiocb), rrq->xritag, rrq->rxid); 1958 1959 if (ulp_status) { 1960 /* Check for retry */ 1961 /* RRQ failed Don't print the vport to vport rjts */ 1962 if (ulp_status != IOSTAT_LS_RJT || 1963 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && 1964 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || 1965 (phba)->pport->cfg_log_verbose & LOG_ELS) 1966 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1967 "2881 RRQ failure DID:%06X Status:" 1968 "x%x/x%x\n", 1969 ndlp->nlp_DID, ulp_status, 1970 ulp_word4); 1971 } 1972 1973 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1974 lpfc_els_free_iocb(phba, cmdiocb); 1975 lpfc_nlp_put(ndlp); 1976 return; 1977 } 1978 /** 1979 * lpfc_cmpl_els_plogi - Completion callback function for plogi 1980 * @phba: pointer to lpfc hba data structure. 1981 * @cmdiocb: pointer to lpfc command iocb data structure. 1982 * @rspiocb: pointer to lpfc response iocb data structure. 1983 * 1984 * This routine is the completion callback function for issuing the Port 1985 * Login (PLOGI) command. For PLOGI completion, there must be an active 1986 * ndlp on the vport node list that matches the remote node ID from the 1987 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply 1988 * ignored and command IOCB released. The PLOGI response IOCB status is 1989 * checked for error conditions. If there is error status reported, PLOGI 1990 * retry shall be attempted by invoking the lpfc_els_retry() routine. 1991 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on 1992 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine 1993 * (DSM) is set for this PLOGI completion. Finally, it checks whether 1994 * there are additional N_Port nodes with the vport that need to perform 1995 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition 1996 * PLOGIs. 1997 **/ 1998 static void 1999 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2000 struct lpfc_iocbq *rspiocb) 2001 { 2002 struct lpfc_vport *vport = cmdiocb->vport; 2003 IOCB_t *irsp; 2004 struct lpfc_nodelist *ndlp, *free_ndlp; 2005 struct lpfc_dmabuf *prsp; 2006 int disc; 2007 struct serv_parm *sp = NULL; 2008 u32 ulp_status, ulp_word4, did, iotag; 2009 bool release_node = false; 2010 2011 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2012 cmdiocb->rsp_iocb = rspiocb; 2013 2014 ulp_status = get_job_ulpstatus(phba, rspiocb); 2015 ulp_word4 = get_job_word4(phba, rspiocb); 2016 did = get_job_els_rsp64_did(phba, cmdiocb); 2017 2018 if (phba->sli_rev == LPFC_SLI_REV4) { 2019 iotag = get_wqe_reqtag(cmdiocb); 2020 } else { 2021 irsp = &rspiocb->iocb; 2022 iotag = irsp->ulpIoTag; 2023 } 2024 2025 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2026 "PLOGI cmpl: status:x%x/x%x did:x%x", 2027 ulp_status, ulp_word4, did); 2028 2029 ndlp = lpfc_findnode_did(vport, did); 2030 if (!ndlp) { 2031 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2032 "0136 PLOGI completes to NPort x%x " 2033 "with no ndlp. Data: x%x x%x x%x\n", 2034 did, ulp_status, ulp_word4, iotag); 2035 goto out_freeiocb; 2036 } 2037 2038 /* Since ndlp can be freed in the disc state machine, note if this node 2039 * is being used during discovery. 2040 */ 2041 spin_lock_irq(&ndlp->lock); 2042 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2043 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2044 spin_unlock_irq(&ndlp->lock); 2045 2046 /* PLOGI completes to NPort <nlp_DID> */ 2047 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2048 "0102 PLOGI completes to NPort x%06x " 2049 "IoTag x%x Data: x%x x%x x%x x%x x%x\n", 2050 ndlp->nlp_DID, iotag, 2051 ndlp->nlp_fc4_type, 2052 ulp_status, ulp_word4, 2053 disc, vport->num_disc_nodes); 2054 2055 /* Check to see if link went down during discovery */ 2056 if (lpfc_els_chk_latt(vport)) { 2057 spin_lock_irq(&ndlp->lock); 2058 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2059 spin_unlock_irq(&ndlp->lock); 2060 goto out; 2061 } 2062 2063 if (ulp_status) { 2064 /* Check for retry */ 2065 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2066 /* ELS command is being retried */ 2067 if (disc) { 2068 spin_lock_irq(&ndlp->lock); 2069 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2070 spin_unlock_irq(&ndlp->lock); 2071 } 2072 goto out; 2073 } 2074 /* PLOGI failed Don't print the vport to vport rjts */ 2075 if (ulp_status != IOSTAT_LS_RJT || 2076 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && 2077 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || 2078 (phba)->pport->cfg_log_verbose & LOG_ELS) 2079 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2080 "2753 PLOGI failure DID:%06X " 2081 "Status:x%x/x%x\n", 2082 ndlp->nlp_DID, ulp_status, 2083 ulp_word4); 2084 2085 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2086 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) 2087 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2088 NLP_EVT_CMPL_PLOGI); 2089 2090 /* If a PLOGI collision occurred, the node needs to continue 2091 * with the reglogin process. 2092 */ 2093 spin_lock_irq(&ndlp->lock); 2094 if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) && 2095 ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { 2096 spin_unlock_irq(&ndlp->lock); 2097 goto out; 2098 } 2099 2100 /* No PLOGI collision and the node is not registered with the 2101 * scsi or nvme transport. It is no longer an active node. Just 2102 * start the device remove process. 2103 */ 2104 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2105 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2106 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 2107 release_node = true; 2108 } 2109 spin_unlock_irq(&ndlp->lock); 2110 2111 if (release_node) 2112 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2113 NLP_EVT_DEVICE_RM); 2114 } else { 2115 /* Good status, call state machine */ 2116 prsp = list_get_first(&cmdiocb->cmd_dmabuf->list, 2117 struct lpfc_dmabuf, list); 2118 if (!prsp) 2119 goto out; 2120 if (!lpfc_is_els_acc_rsp(prsp)) 2121 goto out; 2122 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); 2123 2124 sp = (struct serv_parm *)((u8 *)prsp->virt + 2125 sizeof(u32)); 2126 2127 ndlp->vmid_support = 0; 2128 if ((phba->cfg_vmid_app_header && sp->cmn.app_hdr_support) || 2129 (phba->cfg_vmid_priority_tagging && 2130 sp->cmn.priority_tagging)) { 2131 lpfc_printf_log(phba, KERN_DEBUG, LOG_ELS, 2132 "4018 app_hdr_support %d tagging %d DID x%x\n", 2133 sp->cmn.app_hdr_support, 2134 sp->cmn.priority_tagging, 2135 ndlp->nlp_DID); 2136 /* if the dest port supports VMID, mark it in ndlp */ 2137 ndlp->vmid_support = 1; 2138 } 2139 2140 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2141 NLP_EVT_CMPL_PLOGI); 2142 } 2143 2144 if (disc && vport->num_disc_nodes) { 2145 /* Check to see if there are more PLOGIs to be sent */ 2146 lpfc_more_plogi(vport); 2147 2148 if (vport->num_disc_nodes == 0) { 2149 clear_bit(FC_NDISC_ACTIVE, &vport->fc_flag); 2150 2151 lpfc_can_disctmo(vport); 2152 lpfc_end_rscn(vport); 2153 } 2154 } 2155 2156 out: 2157 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 2158 "PLOGI Cmpl PUT: did:x%x refcnt %d", 2159 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2160 2161 out_freeiocb: 2162 /* Release the reference on the original I/O request. */ 2163 free_ndlp = cmdiocb->ndlp; 2164 2165 lpfc_els_free_iocb(phba, cmdiocb); 2166 lpfc_nlp_put(free_ndlp); 2167 return; 2168 } 2169 2170 /** 2171 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport 2172 * @vport: pointer to a host virtual N_Port data structure. 2173 * @did: destination port identifier. 2174 * @retry: number of retries to the command IOCB. 2175 * 2176 * This routine issues a Port Login (PLOGI) command to a remote N_Port 2177 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port, 2178 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list. 2179 * This routine constructs the proper fields of the PLOGI IOCB and invokes 2180 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command. 2181 * 2182 * Note that the ndlp reference count will be incremented by 1 for holding 2183 * the ndlp and the reference to ndlp will be stored into the ndlp field 2184 * of the IOCB for the completion callback function to the PLOGI ELS command. 2185 * 2186 * Return code 2187 * 0 - Successfully issued a plogi for @vport 2188 * 1 - failed to issue a plogi for @vport 2189 **/ 2190 int 2191 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) 2192 { 2193 struct lpfc_hba *phba = vport->phba; 2194 struct serv_parm *sp; 2195 struct lpfc_nodelist *ndlp; 2196 struct lpfc_iocbq *elsiocb; 2197 uint8_t *pcmd; 2198 uint16_t cmdsize; 2199 int ret; 2200 2201 ndlp = lpfc_findnode_did(vport, did); 2202 if (!ndlp) 2203 return 1; 2204 2205 /* Defer the processing of the issue PLOGI until after the 2206 * outstanding UNREG_RPI mbox command completes, unless we 2207 * are going offline. This logic does not apply for Fabric DIDs 2208 */ 2209 if ((ndlp->nlp_flag & (NLP_IGNR_REG_CMPL | NLP_UNREG_INP)) && 2210 ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && 2211 !test_bit(FC_OFFLINE_MODE, &vport->fc_flag)) { 2212 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2213 "4110 Issue PLOGI x%x deferred " 2214 "on NPort x%x rpi x%x flg x%x Data:" 2215 " x%px\n", 2216 ndlp->nlp_defer_did, ndlp->nlp_DID, 2217 ndlp->nlp_rpi, ndlp->nlp_flag, ndlp); 2218 2219 /* We can only defer 1st PLOGI */ 2220 if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING) 2221 ndlp->nlp_defer_did = did; 2222 return 0; 2223 } 2224 2225 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 2226 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 2227 ELS_CMD_PLOGI); 2228 if (!elsiocb) 2229 return 1; 2230 2231 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 2232 2233 /* For PLOGI request, remainder of payload is service parameters */ 2234 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; 2235 pcmd += sizeof(uint32_t); 2236 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 2237 sp = (struct serv_parm *) pcmd; 2238 2239 /* 2240 * If we are a N-port connected to a Fabric, fix-up paramm's so logins 2241 * to device on remote loops work. 2242 */ 2243 if (test_bit(FC_FABRIC, &vport->fc_flag) && 2244 !test_bit(FC_PUBLIC_LOOP, &vport->fc_flag)) 2245 sp->cmn.altBbCredit = 1; 2246 2247 if (sp->cmn.fcphLow < FC_PH_4_3) 2248 sp->cmn.fcphLow = FC_PH_4_3; 2249 2250 if (sp->cmn.fcphHigh < FC_PH3) 2251 sp->cmn.fcphHigh = FC_PH3; 2252 2253 sp->cmn.valid_vendor_ver_level = 0; 2254 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 2255 sp->cmn.bbRcvSizeMsb &= 0xF; 2256 2257 /* Check if the destination port supports VMID */ 2258 ndlp->vmid_support = 0; 2259 if (vport->vmid_priority_tagging) 2260 sp->cmn.priority_tagging = 1; 2261 else if (phba->cfg_vmid_app_header && 2262 bf_get(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags)) 2263 sp->cmn.app_hdr_support = 1; 2264 2265 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2266 "Issue PLOGI: did:x%x", 2267 did, 0, 0); 2268 2269 /* If our firmware supports this feature, convey that 2270 * information to the target using the vendor specific field. 2271 */ 2272 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 2273 sp->cmn.valid_vendor_ver_level = 1; 2274 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 2275 sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 2276 } 2277 2278 phba->fc_stat.elsXmitPLOGI++; 2279 elsiocb->cmd_cmpl = lpfc_cmpl_els_plogi; 2280 2281 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2282 "Issue PLOGI: did:x%x refcnt %d", 2283 did, kref_read(&ndlp->kref), 0); 2284 elsiocb->ndlp = lpfc_nlp_get(ndlp); 2285 if (!elsiocb->ndlp) { 2286 lpfc_els_free_iocb(phba, elsiocb); 2287 return 1; 2288 } 2289 2290 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2291 if (ret) { 2292 lpfc_els_free_iocb(phba, elsiocb); 2293 lpfc_nlp_put(ndlp); 2294 return 1; 2295 } 2296 2297 return 0; 2298 } 2299 2300 /** 2301 * lpfc_cmpl_els_prli - Completion callback function for prli 2302 * @phba: pointer to lpfc hba data structure. 2303 * @cmdiocb: pointer to lpfc command iocb data structure. 2304 * @rspiocb: pointer to lpfc response iocb data structure. 2305 * 2306 * This routine is the completion callback function for a Process Login 2307 * (PRLI) ELS command. The PRLI response IOCB status is checked for error 2308 * status. If there is error status reported, PRLI retry shall be attempted 2309 * by invoking the lpfc_els_retry() routine. Otherwise, the state 2310 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this 2311 * ndlp to mark the PRLI completion. 2312 **/ 2313 static void 2314 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2315 struct lpfc_iocbq *rspiocb) 2316 { 2317 struct lpfc_vport *vport = cmdiocb->vport; 2318 struct lpfc_nodelist *ndlp; 2319 char *mode; 2320 u32 loglevel; 2321 u32 ulp_status; 2322 u32 ulp_word4; 2323 bool release_node = false; 2324 2325 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2326 cmdiocb->rsp_iocb = rspiocb; 2327 2328 ndlp = cmdiocb->ndlp; 2329 2330 ulp_status = get_job_ulpstatus(phba, rspiocb); 2331 ulp_word4 = get_job_word4(phba, rspiocb); 2332 2333 spin_lock_irq(&ndlp->lock); 2334 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2335 2336 /* Driver supports multiple FC4 types. Counters matter. */ 2337 vport->fc_prli_sent--; 2338 ndlp->fc4_prli_sent--; 2339 spin_unlock_irq(&ndlp->lock); 2340 2341 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2342 "PRLI cmpl: status:x%x/x%x did:x%x", 2343 ulp_status, ulp_word4, 2344 ndlp->nlp_DID); 2345 2346 /* PRLI completes to NPort <nlp_DID> */ 2347 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2348 "0103 PRLI completes to NPort x%06x " 2349 "Data: x%x x%x x%x x%x x%x\n", 2350 ndlp->nlp_DID, ulp_status, ulp_word4, 2351 vport->num_disc_nodes, ndlp->fc4_prli_sent, 2352 ndlp->fc4_xpt_flags); 2353 2354 /* Check to see if link went down during discovery */ 2355 if (lpfc_els_chk_latt(vport)) 2356 goto out; 2357 2358 if (ulp_status) { 2359 /* Check for retry */ 2360 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2361 /* ELS command is being retried */ 2362 goto out; 2363 } 2364 2365 /* If we don't send GFT_ID to Fabric, a PRLI error 2366 * could be expected. 2367 */ 2368 if (test_bit(FC_FABRIC, &vport->fc_flag) || 2369 vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH) { 2370 mode = KERN_ERR; 2371 loglevel = LOG_TRACE_EVENT; 2372 } else { 2373 mode = KERN_INFO; 2374 loglevel = LOG_ELS; 2375 } 2376 2377 /* PRLI failed */ 2378 lpfc_printf_vlog(vport, mode, loglevel, 2379 "2754 PRLI failure DID:%06X Status:x%x/x%x, " 2380 "data: x%x x%x x%x\n", 2381 ndlp->nlp_DID, ulp_status, 2382 ulp_word4, ndlp->nlp_state, 2383 ndlp->fc4_prli_sent, ndlp->nlp_flag); 2384 2385 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2386 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) 2387 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2388 NLP_EVT_CMPL_PRLI); 2389 2390 /* The following condition catches an inflight transition 2391 * mismatch typically caused by an RSCN. Skip any 2392 * processing to allow recovery. 2393 */ 2394 if ((ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && 2395 ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) || 2396 (ndlp->nlp_state == NLP_STE_NPR_NODE && 2397 ndlp->nlp_flag & NLP_DELAY_TMO)) { 2398 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, 2399 "2784 PRLI cmpl: Allow Node recovery " 2400 "DID x%06x nstate x%x nflag x%x\n", 2401 ndlp->nlp_DID, ndlp->nlp_state, 2402 ndlp->nlp_flag); 2403 goto out; 2404 } 2405 2406 /* 2407 * For P2P topology, retain the node so that PLOGI can be 2408 * attempted on it again. 2409 */ 2410 if (test_bit(FC_PT2PT, &vport->fc_flag)) 2411 goto out; 2412 2413 /* As long as this node is not registered with the SCSI 2414 * or NVMe transport and no other PRLIs are outstanding, 2415 * it is no longer an active node. Otherwise devloss 2416 * handles the final cleanup. 2417 */ 2418 spin_lock_irq(&ndlp->lock); 2419 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 2420 !ndlp->fc4_prli_sent) { 2421 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2422 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 2423 release_node = true; 2424 } 2425 spin_unlock_irq(&ndlp->lock); 2426 2427 if (release_node) 2428 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2429 NLP_EVT_DEVICE_RM); 2430 } else { 2431 /* Good status, call state machine. However, if another 2432 * PRLI is outstanding, don't call the state machine 2433 * because final disposition to Mapped or Unmapped is 2434 * completed there. 2435 */ 2436 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2437 NLP_EVT_CMPL_PRLI); 2438 } 2439 2440 out: 2441 lpfc_els_free_iocb(phba, cmdiocb); 2442 lpfc_nlp_put(ndlp); 2443 return; 2444 } 2445 2446 /** 2447 * lpfc_issue_els_prli - Issue a prli iocb command for a vport 2448 * @vport: pointer to a host virtual N_Port data structure. 2449 * @ndlp: pointer to a node-list data structure. 2450 * @retry: number of retries to the command IOCB. 2451 * 2452 * This routine issues a Process Login (PRLI) ELS command for the 2453 * @vport. The PRLI service parameters are set up in the payload of the 2454 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine 2455 * is put to the IOCB completion callback func field before invoking the 2456 * routine lpfc_sli_issue_iocb() to send out PRLI command. 2457 * 2458 * Note that the ndlp reference count will be incremented by 1 for holding the 2459 * ndlp and the reference to ndlp will be stored into the ndlp field of 2460 * the IOCB for the completion callback function to the PRLI ELS command. 2461 * 2462 * Return code 2463 * 0 - successfully issued prli iocb command for @vport 2464 * 1 - failed to issue prli iocb command for @vport 2465 **/ 2466 int 2467 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2468 uint8_t retry) 2469 { 2470 int rc = 0; 2471 struct lpfc_hba *phba = vport->phba; 2472 PRLI *npr; 2473 struct lpfc_nvme_prli *npr_nvme; 2474 struct lpfc_iocbq *elsiocb; 2475 uint8_t *pcmd; 2476 uint16_t cmdsize; 2477 u32 local_nlp_type, elscmd; 2478 2479 /* 2480 * If we are in RSCN mode, the FC4 types supported from a 2481 * previous GFT_ID command may not be accurate. So, if we 2482 * are a NVME Initiator, always look for the possibility of 2483 * the remote NPort beng a NVME Target. 2484 */ 2485 if (phba->sli_rev == LPFC_SLI_REV4 && 2486 test_bit(FC_RSCN_MODE, &vport->fc_flag) && 2487 vport->nvmei_support) 2488 ndlp->nlp_fc4_type |= NLP_FC4_NVME; 2489 local_nlp_type = ndlp->nlp_fc4_type; 2490 2491 /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp 2492 * fields here before any of them can complete. 2493 */ 2494 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 2495 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); 2496 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 2497 ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC); 2498 ndlp->nvme_fb_size = 0; 2499 2500 send_next_prli: 2501 if (local_nlp_type & NLP_FC4_FCP) { 2502 /* Payload is 4 + 16 = 20 x14 bytes. */ 2503 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 2504 elscmd = ELS_CMD_PRLI; 2505 } else if (local_nlp_type & NLP_FC4_NVME) { 2506 /* Payload is 4 + 20 = 24 x18 bytes. */ 2507 cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli)); 2508 elscmd = ELS_CMD_NVMEPRLI; 2509 } else { 2510 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2511 "3083 Unknown FC_TYPE x%x ndlp x%06x\n", 2512 ndlp->nlp_fc4_type, ndlp->nlp_DID); 2513 return 1; 2514 } 2515 2516 /* SLI3 ports don't support NVME. If this rport is a strict NVME 2517 * FC4 type, implicitly LOGO. 2518 */ 2519 if (phba->sli_rev == LPFC_SLI_REV3 && 2520 ndlp->nlp_fc4_type == NLP_FC4_NVME) { 2521 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2522 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n", 2523 ndlp->nlp_type); 2524 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 2525 return 1; 2526 } 2527 2528 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2529 ndlp->nlp_DID, elscmd); 2530 if (!elsiocb) 2531 return 1; 2532 2533 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 2534 2535 /* For PRLI request, remainder of payload is service parameters */ 2536 memset(pcmd, 0, cmdsize); 2537 2538 if (local_nlp_type & NLP_FC4_FCP) { 2539 /* Remainder of payload is FCP PRLI parameter page. 2540 * Note: this data structure is defined as 2541 * BE/LE in the structure definition so no 2542 * byte swap call is made. 2543 */ 2544 *((uint32_t *)(pcmd)) = ELS_CMD_PRLI; 2545 pcmd += sizeof(uint32_t); 2546 npr = (PRLI *)pcmd; 2547 2548 /* 2549 * If our firmware version is 3.20 or later, 2550 * set the following bits for FC-TAPE support. 2551 */ 2552 if (phba->vpd.rev.feaLevelHigh >= 0x02) { 2553 npr->ConfmComplAllowed = 1; 2554 npr->Retry = 1; 2555 npr->TaskRetryIdReq = 1; 2556 } 2557 npr->estabImagePair = 1; 2558 npr->readXferRdyDis = 1; 2559 if (vport->cfg_first_burst_size) 2560 npr->writeXferRdyDis = 1; 2561 2562 /* For FCP support */ 2563 npr->prliType = PRLI_FCP_TYPE; 2564 npr->initiatorFunc = 1; 2565 elsiocb->cmd_flag |= LPFC_PRLI_FCP_REQ; 2566 2567 /* Remove FCP type - processed. */ 2568 local_nlp_type &= ~NLP_FC4_FCP; 2569 } else if (local_nlp_type & NLP_FC4_NVME) { 2570 /* Remainder of payload is NVME PRLI parameter page. 2571 * This data structure is the newer definition that 2572 * uses bf macros so a byte swap is required. 2573 */ 2574 *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI; 2575 pcmd += sizeof(uint32_t); 2576 npr_nvme = (struct lpfc_nvme_prli *)pcmd; 2577 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 2578 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 2579 if (phba->nsler) { 2580 bf_set(prli_nsler, npr_nvme, 1); 2581 bf_set(prli_conf, npr_nvme, 1); 2582 } 2583 2584 /* Only initiators request first burst. */ 2585 if ((phba->cfg_nvme_enable_fb) && 2586 !phba->nvmet_support) 2587 bf_set(prli_fba, npr_nvme, 1); 2588 2589 if (phba->nvmet_support) { 2590 bf_set(prli_tgt, npr_nvme, 1); 2591 bf_set(prli_disc, npr_nvme, 1); 2592 } else { 2593 bf_set(prli_init, npr_nvme, 1); 2594 bf_set(prli_conf, npr_nvme, 1); 2595 } 2596 2597 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 2598 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 2599 elsiocb->cmd_flag |= LPFC_PRLI_NVME_REQ; 2600 2601 /* Remove NVME type - processed. */ 2602 local_nlp_type &= ~NLP_FC4_NVME; 2603 } 2604 2605 phba->fc_stat.elsXmitPRLI++; 2606 elsiocb->cmd_cmpl = lpfc_cmpl_els_prli; 2607 2608 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2609 "Issue PRLI: did:x%x refcnt %d", 2610 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2611 elsiocb->ndlp = lpfc_nlp_get(ndlp); 2612 if (!elsiocb->ndlp) { 2613 lpfc_els_free_iocb(phba, elsiocb); 2614 return 1; 2615 } 2616 2617 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2618 if (rc == IOCB_ERROR) { 2619 lpfc_els_free_iocb(phba, elsiocb); 2620 lpfc_nlp_put(ndlp); 2621 return 1; 2622 } 2623 2624 /* The vport counters are used for lpfc_scan_finished, but 2625 * the ndlp is used to track outstanding PRLIs for different 2626 * FC4 types. 2627 */ 2628 spin_lock_irq(&ndlp->lock); 2629 ndlp->nlp_flag |= NLP_PRLI_SND; 2630 vport->fc_prli_sent++; 2631 ndlp->fc4_prli_sent++; 2632 spin_unlock_irq(&ndlp->lock); 2633 2634 /* The driver supports 2 FC4 types. Make sure 2635 * a PRLI is issued for all types before exiting. 2636 */ 2637 if (phba->sli_rev == LPFC_SLI_REV4 && 2638 local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME)) 2639 goto send_next_prli; 2640 else 2641 return 0; 2642 } 2643 2644 /** 2645 * lpfc_rscn_disc - Perform rscn discovery for a vport 2646 * @vport: pointer to a host virtual N_Port data structure. 2647 * 2648 * This routine performs Registration State Change Notification (RSCN) 2649 * discovery for a @vport. If the @vport's node port recovery count is not 2650 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all 2651 * the nodes that need recovery. If none of the PLOGI were needed through 2652 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be 2653 * invoked to check and handle possible more RSCN came in during the period 2654 * of processing the current ones. 2655 **/ 2656 static void 2657 lpfc_rscn_disc(struct lpfc_vport *vport) 2658 { 2659 lpfc_can_disctmo(vport); 2660 2661 /* RSCN discovery */ 2662 /* go thru NPR nodes and issue ELS PLOGIs */ 2663 if (atomic_read(&vport->fc_npr_cnt)) 2664 if (lpfc_els_disc_plogi(vport)) 2665 return; 2666 2667 lpfc_end_rscn(vport); 2668 } 2669 2670 /** 2671 * lpfc_adisc_done - Complete the adisc phase of discovery 2672 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs. 2673 * 2674 * This function is called when the final ADISC is completed during discovery. 2675 * This function handles clearing link attention or issuing reg_vpi depending 2676 * on whether npiv is enabled. This function also kicks off the PLOGI phase of 2677 * discovery. 2678 * This function is called with no locks held. 2679 **/ 2680 static void 2681 lpfc_adisc_done(struct lpfc_vport *vport) 2682 { 2683 struct lpfc_hba *phba = vport->phba; 2684 2685 /* 2686 * For NPIV, cmpl_reg_vpi will set port_state to READY, 2687 * and continue discovery. 2688 */ 2689 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 2690 !test_bit(FC_RSCN_MODE, &vport->fc_flag) && 2691 (phba->sli_rev < LPFC_SLI_REV4)) { 2692 2693 /* 2694 * If link is down, clear_la and reg_vpi will be done after 2695 * flogi following a link up event 2696 */ 2697 if (!lpfc_is_link_up(phba)) 2698 return; 2699 2700 /* The ADISCs are complete. Doesn't matter if they 2701 * succeeded or failed because the ADISC completion 2702 * routine guarantees to call the state machine and 2703 * the RPI is either unregistered (failed ADISC response) 2704 * or the RPI is still valid and the node is marked 2705 * mapped for a target. The exchanges should be in the 2706 * correct state. This code is specific to SLI3. 2707 */ 2708 lpfc_issue_clear_la(phba, vport); 2709 lpfc_issue_reg_vpi(phba, vport); 2710 return; 2711 } 2712 /* 2713 * For SLI2, we need to set port_state to READY 2714 * and continue discovery. 2715 */ 2716 if (vport->port_state < LPFC_VPORT_READY) { 2717 /* If we get here, there is nothing to ADISC */ 2718 lpfc_issue_clear_la(phba, vport); 2719 if (!test_bit(FC_ABORT_DISCOVERY, &vport->fc_flag)) { 2720 vport->num_disc_nodes = 0; 2721 /* go thru NPR list, issue ELS PLOGIs */ 2722 if (atomic_read(&vport->fc_npr_cnt)) 2723 lpfc_els_disc_plogi(vport); 2724 if (!vport->num_disc_nodes) { 2725 clear_bit(FC_NDISC_ACTIVE, &vport->fc_flag); 2726 lpfc_can_disctmo(vport); 2727 lpfc_end_rscn(vport); 2728 } 2729 } 2730 vport->port_state = LPFC_VPORT_READY; 2731 } else 2732 lpfc_rscn_disc(vport); 2733 } 2734 2735 /** 2736 * lpfc_more_adisc - Issue more adisc as needed 2737 * @vport: pointer to a host virtual N_Port data structure. 2738 * 2739 * This routine determines whether there are more ndlps on a @vport 2740 * node list need to have Address Discover (ADISC) issued. If so, it will 2741 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's 2742 * remaining nodes which need to have ADISC sent. 2743 **/ 2744 void 2745 lpfc_more_adisc(struct lpfc_vport *vport) 2746 { 2747 if (vport->num_disc_nodes) 2748 vport->num_disc_nodes--; 2749 /* Continue discovery with <num_disc_nodes> ADISCs to go */ 2750 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2751 "0210 Continue discovery with %d ADISCs to go " 2752 "Data: x%x x%lx x%x\n", 2753 vport->num_disc_nodes, 2754 atomic_read(&vport->fc_adisc_cnt), 2755 vport->fc_flag, vport->port_state); 2756 /* Check to see if there are more ADISCs to be sent */ 2757 if (test_bit(FC_NLP_MORE, &vport->fc_flag)) { 2758 lpfc_set_disctmo(vport); 2759 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2760 lpfc_els_disc_adisc(vport); 2761 } 2762 if (!vport->num_disc_nodes) 2763 lpfc_adisc_done(vport); 2764 return; 2765 } 2766 2767 /** 2768 * lpfc_cmpl_els_adisc - Completion callback function for adisc 2769 * @phba: pointer to lpfc hba data structure. 2770 * @cmdiocb: pointer to lpfc command iocb data structure. 2771 * @rspiocb: pointer to lpfc response iocb data structure. 2772 * 2773 * This routine is the completion function for issuing the Address Discover 2774 * (ADISC) command. It first checks to see whether link went down during 2775 * the discovery process. If so, the node will be marked as node port 2776 * recovery for issuing discover IOCB by the link attention handler and 2777 * exit. Otherwise, the response status is checked. If error was reported 2778 * in the response status, the ADISC command shall be retried by invoking 2779 * the lpfc_els_retry() routine. Otherwise, if no error was reported in 2780 * the response status, the state machine is invoked to set transition 2781 * with respect to NLP_EVT_CMPL_ADISC event. 2782 **/ 2783 static void 2784 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2785 struct lpfc_iocbq *rspiocb) 2786 { 2787 struct lpfc_vport *vport = cmdiocb->vport; 2788 IOCB_t *irsp; 2789 struct lpfc_nodelist *ndlp; 2790 int disc; 2791 u32 ulp_status, ulp_word4, tmo, iotag; 2792 bool release_node = false; 2793 2794 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2795 cmdiocb->rsp_iocb = rspiocb; 2796 2797 ndlp = cmdiocb->ndlp; 2798 2799 ulp_status = get_job_ulpstatus(phba, rspiocb); 2800 ulp_word4 = get_job_word4(phba, rspiocb); 2801 2802 if (phba->sli_rev == LPFC_SLI_REV4) { 2803 tmo = get_wqe_tmo(cmdiocb); 2804 iotag = get_wqe_reqtag(cmdiocb); 2805 } else { 2806 irsp = &rspiocb->iocb; 2807 tmo = irsp->ulpTimeout; 2808 iotag = irsp->ulpIoTag; 2809 } 2810 2811 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2812 "ADISC cmpl: status:x%x/x%x did:x%x", 2813 ulp_status, ulp_word4, 2814 ndlp->nlp_DID); 2815 2816 /* Since ndlp can be freed in the disc state machine, note if this node 2817 * is being used during discovery. 2818 */ 2819 spin_lock_irq(&ndlp->lock); 2820 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2821 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); 2822 spin_unlock_irq(&ndlp->lock); 2823 /* ADISC completes to NPort <nlp_DID> */ 2824 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2825 "0104 ADISC completes to NPort x%x " 2826 "IoTag x%x Data: x%x x%x x%x x%x x%x\n", 2827 ndlp->nlp_DID, iotag, 2828 ulp_status, ulp_word4, 2829 tmo, disc, vport->num_disc_nodes); 2830 2831 /* Check to see if link went down during discovery */ 2832 if (lpfc_els_chk_latt(vport)) { 2833 spin_lock_irq(&ndlp->lock); 2834 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2835 spin_unlock_irq(&ndlp->lock); 2836 goto out; 2837 } 2838 2839 if (ulp_status) { 2840 /* Check for retry */ 2841 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2842 /* ELS command is being retried */ 2843 if (disc) { 2844 spin_lock_irq(&ndlp->lock); 2845 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2846 spin_unlock_irq(&ndlp->lock); 2847 lpfc_set_disctmo(vport); 2848 } 2849 goto out; 2850 } 2851 /* ADISC failed */ 2852 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2853 "2755 ADISC failure DID:%06X Status:x%x/x%x\n", 2854 ndlp->nlp_DID, ulp_status, 2855 ulp_word4); 2856 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2857 NLP_EVT_CMPL_ADISC); 2858 2859 /* As long as this node is not registered with the SCSI or NVMe 2860 * transport, it is no longer an active node. Otherwise 2861 * devloss handles the final cleanup. 2862 */ 2863 spin_lock_irq(&ndlp->lock); 2864 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2865 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2866 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 2867 release_node = true; 2868 } 2869 spin_unlock_irq(&ndlp->lock); 2870 2871 if (release_node) 2872 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2873 NLP_EVT_DEVICE_RM); 2874 } else 2875 /* Good status, call state machine */ 2876 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2877 NLP_EVT_CMPL_ADISC); 2878 2879 /* Check to see if there are more ADISCs to be sent */ 2880 if (disc && vport->num_disc_nodes) 2881 lpfc_more_adisc(vport); 2882 out: 2883 lpfc_els_free_iocb(phba, cmdiocb); 2884 lpfc_nlp_put(ndlp); 2885 return; 2886 } 2887 2888 /** 2889 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport 2890 * @vport: pointer to a virtual N_Port data structure. 2891 * @ndlp: pointer to a node-list data structure. 2892 * @retry: number of retries to the command IOCB. 2893 * 2894 * This routine issues an Address Discover (ADISC) for an @ndlp on a 2895 * @vport. It prepares the payload of the ADISC ELS command, updates the 2896 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine 2897 * to issue the ADISC ELS command. 2898 * 2899 * Note that the ndlp reference count will be incremented by 1 for holding the 2900 * ndlp and the reference to ndlp will be stored into the ndlp field of 2901 * the IOCB for the completion callback function to the ADISC ELS command. 2902 * 2903 * Return code 2904 * 0 - successfully issued adisc 2905 * 1 - failed to issue adisc 2906 **/ 2907 int 2908 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2909 uint8_t retry) 2910 { 2911 int rc = 0; 2912 struct lpfc_hba *phba = vport->phba; 2913 ADISC *ap; 2914 struct lpfc_iocbq *elsiocb; 2915 uint8_t *pcmd; 2916 uint16_t cmdsize; 2917 2918 cmdsize = (sizeof(uint32_t) + sizeof(ADISC)); 2919 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2920 ndlp->nlp_DID, ELS_CMD_ADISC); 2921 if (!elsiocb) 2922 return 1; 2923 2924 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 2925 2926 /* For ADISC request, remainder of payload is service parameters */ 2927 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; 2928 pcmd += sizeof(uint32_t); 2929 2930 /* Fill in ADISC payload */ 2931 ap = (ADISC *) pcmd; 2932 ap->hardAL_PA = phba->fc_pref_ALPA; 2933 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 2934 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 2935 ap->DID = be32_to_cpu(vport->fc_myDID); 2936 2937 phba->fc_stat.elsXmitADISC++; 2938 elsiocb->cmd_cmpl = lpfc_cmpl_els_adisc; 2939 spin_lock_irq(&ndlp->lock); 2940 ndlp->nlp_flag |= NLP_ADISC_SND; 2941 spin_unlock_irq(&ndlp->lock); 2942 elsiocb->ndlp = lpfc_nlp_get(ndlp); 2943 if (!elsiocb->ndlp) { 2944 lpfc_els_free_iocb(phba, elsiocb); 2945 goto err; 2946 } 2947 2948 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2949 "Issue ADISC: did:x%x refcnt %d", 2950 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2951 2952 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2953 if (rc == IOCB_ERROR) { 2954 lpfc_els_free_iocb(phba, elsiocb); 2955 lpfc_nlp_put(ndlp); 2956 goto err; 2957 } 2958 2959 return 0; 2960 2961 err: 2962 spin_lock_irq(&ndlp->lock); 2963 ndlp->nlp_flag &= ~NLP_ADISC_SND; 2964 spin_unlock_irq(&ndlp->lock); 2965 return 1; 2966 } 2967 2968 /** 2969 * lpfc_cmpl_els_logo - Completion callback function for logo 2970 * @phba: pointer to lpfc hba data structure. 2971 * @cmdiocb: pointer to lpfc command iocb data structure. 2972 * @rspiocb: pointer to lpfc response iocb data structure. 2973 * 2974 * This routine is the completion function for issuing the ELS Logout (LOGO) 2975 * command. If no error status was reported from the LOGO response, the 2976 * state machine of the associated ndlp shall be invoked for transition with 2977 * respect to NLP_EVT_CMPL_LOGO event. 2978 **/ 2979 static void 2980 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2981 struct lpfc_iocbq *rspiocb) 2982 { 2983 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 2984 struct lpfc_vport *vport = ndlp->vport; 2985 IOCB_t *irsp; 2986 unsigned long flags; 2987 uint32_t skip_recovery = 0; 2988 int wake_up_waiter = 0; 2989 u32 ulp_status; 2990 u32 ulp_word4; 2991 u32 tmo, iotag; 2992 2993 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2994 cmdiocb->rsp_iocb = rspiocb; 2995 2996 ulp_status = get_job_ulpstatus(phba, rspiocb); 2997 ulp_word4 = get_job_word4(phba, rspiocb); 2998 2999 if (phba->sli_rev == LPFC_SLI_REV4) { 3000 tmo = get_wqe_tmo(cmdiocb); 3001 iotag = get_wqe_reqtag(cmdiocb); 3002 } else { 3003 irsp = &rspiocb->iocb; 3004 tmo = irsp->ulpTimeout; 3005 iotag = irsp->ulpIoTag; 3006 } 3007 3008 spin_lock_irq(&ndlp->lock); 3009 ndlp->nlp_flag &= ~NLP_LOGO_SND; 3010 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { 3011 wake_up_waiter = 1; 3012 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; 3013 } 3014 spin_unlock_irq(&ndlp->lock); 3015 3016 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3017 "LOGO cmpl: status:x%x/x%x did:x%x", 3018 ulp_status, ulp_word4, 3019 ndlp->nlp_DID); 3020 3021 /* LOGO completes to NPort <nlp_DID> */ 3022 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3023 "0105 LOGO completes to NPort x%x " 3024 "IoTag x%x refcnt %d nflags x%x xflags x%x " 3025 "Data: x%x x%x x%x x%x\n", 3026 ndlp->nlp_DID, iotag, 3027 kref_read(&ndlp->kref), ndlp->nlp_flag, 3028 ndlp->fc4_xpt_flags, ulp_status, ulp_word4, 3029 tmo, vport->num_disc_nodes); 3030 3031 if (lpfc_els_chk_latt(vport)) { 3032 skip_recovery = 1; 3033 goto out; 3034 } 3035 3036 /* The LOGO will not be retried on failure. A LOGO was 3037 * issued to the remote rport and a ACC or RJT or no Answer are 3038 * all acceptable. Note the failure and move forward with 3039 * discovery. The PLOGI will retry. 3040 */ 3041 if (ulp_status) { 3042 /* LOGO failed */ 3043 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3044 "2756 LOGO failure, No Retry DID:%06X " 3045 "Status:x%x/x%x\n", 3046 ndlp->nlp_DID, ulp_status, 3047 ulp_word4); 3048 3049 if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) 3050 skip_recovery = 1; 3051 } 3052 3053 /* Call state machine. This will unregister the rpi if needed. */ 3054 lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); 3055 3056 if (skip_recovery) 3057 goto out; 3058 3059 /* The driver sets this flag for an NPIV instance that doesn't want to 3060 * log into the remote port. 3061 */ 3062 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { 3063 spin_lock_irq(&ndlp->lock); 3064 if (phba->sli_rev == LPFC_SLI_REV4) 3065 ndlp->nlp_flag |= NLP_RELEASE_RPI; 3066 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 3067 spin_unlock_irq(&ndlp->lock); 3068 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 3069 NLP_EVT_DEVICE_RM); 3070 goto out_rsrc_free; 3071 } 3072 3073 out: 3074 /* At this point, the LOGO processing is complete. NOTE: For a 3075 * pt2pt topology, we are assuming the NPortID will only change 3076 * on link up processing. For a LOGO / PLOGI initiated by the 3077 * Initiator, we are assuming the NPortID is not going to change. 3078 */ 3079 3080 if (wake_up_waiter && ndlp->logo_waitq) 3081 wake_up(ndlp->logo_waitq); 3082 /* 3083 * If the node is a target, the handling attempts to recover the port. 3084 * For any other port type, the rpi is unregistered as an implicit 3085 * LOGO. 3086 */ 3087 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) && 3088 skip_recovery == 0) { 3089 lpfc_cancel_retry_delay_tmo(vport, ndlp); 3090 spin_lock_irqsave(&ndlp->lock, flags); 3091 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 3092 spin_unlock_irqrestore(&ndlp->lock, flags); 3093 3094 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3095 "3187 LOGO completes to NPort x%x: Start " 3096 "Recovery Data: x%x x%x x%x x%x\n", 3097 ndlp->nlp_DID, ulp_status, 3098 ulp_word4, tmo, 3099 vport->num_disc_nodes); 3100 3101 lpfc_els_free_iocb(phba, cmdiocb); 3102 lpfc_nlp_put(ndlp); 3103 3104 lpfc_disc_start(vport); 3105 return; 3106 } 3107 3108 /* Cleanup path for failed REG_RPI handling. If REG_RPI fails, the 3109 * driver sends a LOGO to the rport to cleanup. For fabric and 3110 * initiator ports cleanup the node as long as it the node is not 3111 * register with the transport. 3112 */ 3113 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 3114 spin_lock_irq(&ndlp->lock); 3115 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 3116 spin_unlock_irq(&ndlp->lock); 3117 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 3118 NLP_EVT_DEVICE_RM); 3119 } 3120 out_rsrc_free: 3121 /* Driver is done with the I/O. */ 3122 lpfc_els_free_iocb(phba, cmdiocb); 3123 lpfc_nlp_put(ndlp); 3124 } 3125 3126 /** 3127 * lpfc_issue_els_logo - Issue a logo to an node on a vport 3128 * @vport: pointer to a virtual N_Port data structure. 3129 * @ndlp: pointer to a node-list data structure. 3130 * @retry: number of retries to the command IOCB. 3131 * 3132 * This routine constructs and issues an ELS Logout (LOGO) iocb command 3133 * to a remote node, referred by an @ndlp on a @vport. It constructs the 3134 * payload of the IOCB, properly sets up the @ndlp state, and invokes the 3135 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command. 3136 * 3137 * Note that the ndlp reference count will be incremented by 1 for holding the 3138 * ndlp and the reference to ndlp will be stored into the ndlp field of 3139 * the IOCB for the completion callback function to the LOGO ELS command. 3140 * 3141 * Callers of this routine are expected to unregister the RPI first 3142 * 3143 * Return code 3144 * 0 - successfully issued logo 3145 * 1 - failed to issue logo 3146 **/ 3147 int 3148 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 3149 uint8_t retry) 3150 { 3151 struct lpfc_hba *phba = vport->phba; 3152 struct lpfc_iocbq *elsiocb; 3153 uint8_t *pcmd; 3154 uint16_t cmdsize; 3155 int rc; 3156 3157 spin_lock_irq(&ndlp->lock); 3158 if (ndlp->nlp_flag & NLP_LOGO_SND) { 3159 spin_unlock_irq(&ndlp->lock); 3160 return 0; 3161 } 3162 spin_unlock_irq(&ndlp->lock); 3163 3164 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); 3165 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3166 ndlp->nlp_DID, ELS_CMD_LOGO); 3167 if (!elsiocb) 3168 return 1; 3169 3170 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 3171 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 3172 pcmd += sizeof(uint32_t); 3173 3174 /* Fill in LOGO payload */ 3175 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 3176 pcmd += sizeof(uint32_t); 3177 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 3178 3179 phba->fc_stat.elsXmitLOGO++; 3180 elsiocb->cmd_cmpl = lpfc_cmpl_els_logo; 3181 spin_lock_irq(&ndlp->lock); 3182 ndlp->nlp_flag |= NLP_LOGO_SND; 3183 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; 3184 spin_unlock_irq(&ndlp->lock); 3185 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3186 if (!elsiocb->ndlp) { 3187 lpfc_els_free_iocb(phba, elsiocb); 3188 goto err; 3189 } 3190 3191 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3192 "Issue LOGO: did:x%x refcnt %d", 3193 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3194 3195 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3196 if (rc == IOCB_ERROR) { 3197 lpfc_els_free_iocb(phba, elsiocb); 3198 lpfc_nlp_put(ndlp); 3199 goto err; 3200 } 3201 3202 spin_lock_irq(&ndlp->lock); 3203 ndlp->nlp_prev_state = ndlp->nlp_state; 3204 spin_unlock_irq(&ndlp->lock); 3205 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 3206 return 0; 3207 3208 err: 3209 spin_lock_irq(&ndlp->lock); 3210 ndlp->nlp_flag &= ~NLP_LOGO_SND; 3211 spin_unlock_irq(&ndlp->lock); 3212 return 1; 3213 } 3214 3215 /** 3216 * lpfc_cmpl_els_cmd - Completion callback function for generic els command 3217 * @phba: pointer to lpfc hba data structure. 3218 * @cmdiocb: pointer to lpfc command iocb data structure. 3219 * @rspiocb: pointer to lpfc response iocb data structure. 3220 * 3221 * This routine is a generic completion callback function for ELS commands. 3222 * Specifically, it is the callback function which does not need to perform 3223 * any command specific operations. It is currently used by the ELS command 3224 * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel 3225 * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). 3226 * Other than certain debug loggings, this callback function simply invokes the 3227 * lpfc_els_chk_latt() routine to check whether link went down during the 3228 * discovery process. 3229 **/ 3230 static void 3231 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3232 struct lpfc_iocbq *rspiocb) 3233 { 3234 struct lpfc_vport *vport = cmdiocb->vport; 3235 struct lpfc_nodelist *free_ndlp; 3236 IOCB_t *irsp; 3237 u32 ulp_status, ulp_word4, tmo, did, iotag; 3238 3239 ulp_status = get_job_ulpstatus(phba, rspiocb); 3240 ulp_word4 = get_job_word4(phba, rspiocb); 3241 did = get_job_els_rsp64_did(phba, cmdiocb); 3242 3243 if (phba->sli_rev == LPFC_SLI_REV4) { 3244 tmo = get_wqe_tmo(cmdiocb); 3245 iotag = get_wqe_reqtag(cmdiocb); 3246 } else { 3247 irsp = &rspiocb->iocb; 3248 tmo = irsp->ulpTimeout; 3249 iotag = irsp->ulpIoTag; 3250 } 3251 3252 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3253 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3254 ulp_status, ulp_word4, did); 3255 3256 /* ELS cmd tag <ulpIoTag> completes */ 3257 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3258 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", 3259 iotag, ulp_status, ulp_word4, tmo); 3260 3261 /* Check to see if link went down during discovery */ 3262 lpfc_els_chk_latt(vport); 3263 3264 free_ndlp = cmdiocb->ndlp; 3265 3266 lpfc_els_free_iocb(phba, cmdiocb); 3267 lpfc_nlp_put(free_ndlp); 3268 } 3269 3270 /** 3271 * lpfc_reg_fab_ctrl_node - RPI register the fabric controller node. 3272 * @vport: pointer to lpfc_vport data structure. 3273 * @fc_ndlp: pointer to the fabric controller (0xfffffd) node. 3274 * 3275 * This routine registers the rpi assigned to the fabric controller 3276 * NPort_ID (0xfffffd) with the port and moves the node to UNMAPPED 3277 * state triggering a registration with the SCSI transport. 3278 * 3279 * This routine is single out because the fabric controller node 3280 * does not receive a PLOGI. This routine is consumed by the 3281 * SCR and RDF ELS commands. Callers are expected to qualify 3282 * with SLI4 first. 3283 **/ 3284 static int 3285 lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) 3286 { 3287 int rc = 0; 3288 struct lpfc_hba *phba = vport->phba; 3289 struct lpfc_nodelist *ns_ndlp; 3290 LPFC_MBOXQ_t *mbox; 3291 3292 if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED) 3293 return rc; 3294 3295 ns_ndlp = lpfc_findnode_did(vport, NameServer_DID); 3296 if (!ns_ndlp) 3297 return -ENODEV; 3298 3299 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 3300 "0935 %s: Reg FC RPI x%x on FC DID x%x NSSte: x%x\n", 3301 __func__, fc_ndlp->nlp_rpi, fc_ndlp->nlp_DID, 3302 ns_ndlp->nlp_state); 3303 if (ns_ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 3304 return -ENODEV; 3305 3306 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3307 if (!mbox) { 3308 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3309 "0936 %s: no memory for reg_login " 3310 "Data: x%x x%x x%x x%x\n", __func__, 3311 fc_ndlp->nlp_DID, fc_ndlp->nlp_state, 3312 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); 3313 return -ENOMEM; 3314 } 3315 rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID, 3316 (u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi); 3317 if (rc) { 3318 rc = -EACCES; 3319 goto out; 3320 } 3321 3322 fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 3323 mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login; 3324 mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp); 3325 if (!mbox->ctx_ndlp) { 3326 rc = -ENOMEM; 3327 goto out; 3328 } 3329 3330 mbox->vport = vport; 3331 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 3332 if (rc == MBX_NOT_FINISHED) { 3333 rc = -ENODEV; 3334 lpfc_nlp_put(fc_ndlp); 3335 goto out; 3336 } 3337 /* Success path. Exit. */ 3338 lpfc_nlp_set_state(vport, fc_ndlp, 3339 NLP_STE_REG_LOGIN_ISSUE); 3340 return 0; 3341 3342 out: 3343 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 3344 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3345 "0938 %s: failed to format reg_login " 3346 "Data: x%x x%x x%x x%x\n", __func__, 3347 fc_ndlp->nlp_DID, fc_ndlp->nlp_state, 3348 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); 3349 return rc; 3350 } 3351 3352 /** 3353 * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd 3354 * @phba: pointer to lpfc hba data structure. 3355 * @cmdiocb: pointer to lpfc command iocb data structure. 3356 * @rspiocb: pointer to lpfc response iocb data structure. 3357 * 3358 * This routine is a generic completion callback function for Discovery ELS cmd. 3359 * Currently used by the ELS command issuing routines for the ELS State Change 3360 * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf(). 3361 * These commands will be retried once only for ELS timeout errors. 3362 **/ 3363 static void 3364 lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3365 struct lpfc_iocbq *rspiocb) 3366 { 3367 struct lpfc_vport *vport = cmdiocb->vport; 3368 IOCB_t *irsp; 3369 struct lpfc_els_rdf_rsp *prdf; 3370 struct lpfc_dmabuf *pcmd, *prsp; 3371 u32 *pdata; 3372 u32 cmd; 3373 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 3374 u32 ulp_status, ulp_word4, tmo, did, iotag; 3375 3376 ulp_status = get_job_ulpstatus(phba, rspiocb); 3377 ulp_word4 = get_job_word4(phba, rspiocb); 3378 did = get_job_els_rsp64_did(phba, cmdiocb); 3379 3380 if (phba->sli_rev == LPFC_SLI_REV4) { 3381 tmo = get_wqe_tmo(cmdiocb); 3382 iotag = get_wqe_reqtag(cmdiocb); 3383 } else { 3384 irsp = &rspiocb->iocb; 3385 tmo = irsp->ulpTimeout; 3386 iotag = irsp->ulpIoTag; 3387 } 3388 3389 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3390 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3391 ulp_status, ulp_word4, did); 3392 3393 /* ELS cmd tag <ulpIoTag> completes */ 3394 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3395 "0217 ELS cmd tag x%x completes Data: x%x x%x x%x x%x\n", 3396 iotag, ulp_status, ulp_word4, tmo, cmdiocb->retry); 3397 3398 pcmd = cmdiocb->cmd_dmabuf; 3399 if (!pcmd) 3400 goto out; 3401 3402 pdata = (u32 *)pcmd->virt; 3403 if (!pdata) 3404 goto out; 3405 cmd = *pdata; 3406 3407 /* Only 1 retry for ELS Timeout only */ 3408 if (ulp_status == IOSTAT_LOCAL_REJECT && 3409 ((ulp_word4 & IOERR_PARAM_MASK) == 3410 IOERR_SEQUENCE_TIMEOUT)) { 3411 cmdiocb->retry++; 3412 if (cmdiocb->retry <= 1) { 3413 switch (cmd) { 3414 case ELS_CMD_SCR: 3415 lpfc_issue_els_scr(vport, cmdiocb->retry); 3416 break; 3417 case ELS_CMD_EDC: 3418 lpfc_issue_els_edc(vport, cmdiocb->retry); 3419 break; 3420 case ELS_CMD_RDF: 3421 lpfc_issue_els_rdf(vport, cmdiocb->retry); 3422 break; 3423 } 3424 goto out; 3425 } 3426 phba->fc_stat.elsRetryExceeded++; 3427 } 3428 if (cmd == ELS_CMD_EDC) { 3429 /* must be called before checking uplStatus and returning */ 3430 lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb); 3431 return; 3432 } 3433 if (ulp_status) { 3434 /* ELS discovery cmd completes with error */ 3435 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, 3436 "4203 ELS cmd x%x error: x%x x%X\n", cmd, 3437 ulp_status, ulp_word4); 3438 goto out; 3439 } 3440 3441 /* The RDF response doesn't have any impact on the running driver 3442 * but the notification descriptors are dumped here for support. 3443 */ 3444 if (cmd == ELS_CMD_RDF) { 3445 int i; 3446 3447 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 3448 if (!prsp) 3449 goto out; 3450 3451 prdf = (struct lpfc_els_rdf_rsp *)prsp->virt; 3452 if (!prdf) 3453 goto out; 3454 if (!lpfc_is_els_acc_rsp(prsp)) 3455 goto out; 3456 3457 for (i = 0; i < ELS_RDF_REG_TAG_CNT && 3458 i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++) 3459 lpfc_printf_vlog(vport, KERN_INFO, 3460 LOG_ELS | LOG_CGN_MGMT, 3461 "4677 Fabric RDF Notification Grant " 3462 "Data: 0x%08x Reg: %x %x\n", 3463 be32_to_cpu( 3464 prdf->reg_d1.desc_tags[i]), 3465 phba->cgn_reg_signal, 3466 phba->cgn_reg_fpin); 3467 } 3468 3469 out: 3470 /* Check to see if link went down during discovery */ 3471 lpfc_els_chk_latt(vport); 3472 lpfc_els_free_iocb(phba, cmdiocb); 3473 lpfc_nlp_put(ndlp); 3474 return; 3475 } 3476 3477 /** 3478 * lpfc_issue_els_scr - Issue a scr to an node on a vport 3479 * @vport: pointer to a host virtual N_Port data structure. 3480 * @retry: retry counter for the command IOCB. 3481 * 3482 * This routine issues a State Change Request (SCR) to a fabric node 3483 * on a @vport. The remote node is Fabric Controller (0xfffffd). It 3484 * first search the @vport node list to find the matching ndlp. If no such 3485 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An 3486 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb() 3487 * routine is invoked to send the SCR IOCB. 3488 * 3489 * Note that the ndlp reference count will be incremented by 1 for holding the 3490 * ndlp and the reference to ndlp will be stored into the ndlp field of 3491 * the IOCB for the completion callback function to the SCR ELS command. 3492 * 3493 * Return code 3494 * 0 - Successfully issued scr command 3495 * 1 - Failed to issue scr command 3496 **/ 3497 int 3498 lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry) 3499 { 3500 int rc = 0; 3501 struct lpfc_hba *phba = vport->phba; 3502 struct lpfc_iocbq *elsiocb; 3503 uint8_t *pcmd; 3504 uint16_t cmdsize; 3505 struct lpfc_nodelist *ndlp; 3506 3507 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 3508 3509 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3510 if (!ndlp) { 3511 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3512 if (!ndlp) 3513 return 1; 3514 lpfc_enqueue_node(vport, ndlp); 3515 } 3516 3517 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3518 ndlp->nlp_DID, ELS_CMD_SCR); 3519 if (!elsiocb) 3520 return 1; 3521 3522 if (phba->sli_rev == LPFC_SLI_REV4) { 3523 rc = lpfc_reg_fab_ctrl_node(vport, ndlp); 3524 if (rc) { 3525 lpfc_els_free_iocb(phba, elsiocb); 3526 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3527 "0937 %s: Failed to reg fc node, rc %d\n", 3528 __func__, rc); 3529 return 1; 3530 } 3531 } 3532 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 3533 3534 *((uint32_t *) (pcmd)) = ELS_CMD_SCR; 3535 pcmd += sizeof(uint32_t); 3536 3537 /* For SCR, remainder of payload is SCR parameter page */ 3538 memset(pcmd, 0, sizeof(SCR)); 3539 ((SCR *) pcmd)->Function = SCR_FUNC_FULL; 3540 3541 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3542 "Issue SCR: did:x%x", 3543 ndlp->nlp_DID, 0, 0); 3544 3545 phba->fc_stat.elsXmitSCR++; 3546 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 3547 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3548 if (!elsiocb->ndlp) { 3549 lpfc_els_free_iocb(phba, elsiocb); 3550 return 1; 3551 } 3552 3553 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3554 "Issue SCR: did:x%x refcnt %d", 3555 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3556 3557 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3558 if (rc == IOCB_ERROR) { 3559 lpfc_els_free_iocb(phba, elsiocb); 3560 lpfc_nlp_put(ndlp); 3561 return 1; 3562 } 3563 3564 return 0; 3565 } 3566 3567 /** 3568 * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric) 3569 * or the other nport (pt2pt). 3570 * @vport: pointer to a host virtual N_Port data structure. 3571 * @retry: number of retries to the command IOCB. 3572 * 3573 * This routine issues a RSCN to the Fabric Controller (DID 0xFFFFFD) 3574 * when connected to a fabric, or to the remote port when connected 3575 * in point-to-point mode. When sent to the Fabric Controller, it will 3576 * replay the RSCN to registered recipients. 3577 * 3578 * Note that the ndlp reference count will be incremented by 1 for holding the 3579 * ndlp and the reference to ndlp will be stored into the ndlp field of 3580 * the IOCB for the completion callback function to the RSCN ELS command. 3581 * 3582 * Return code 3583 * 0 - Successfully issued RSCN command 3584 * 1 - Failed to issue RSCN command 3585 **/ 3586 int 3587 lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry) 3588 { 3589 int rc = 0; 3590 struct lpfc_hba *phba = vport->phba; 3591 struct lpfc_iocbq *elsiocb; 3592 struct lpfc_nodelist *ndlp; 3593 struct { 3594 struct fc_els_rscn rscn; 3595 struct fc_els_rscn_page portid; 3596 } *event; 3597 uint32_t nportid; 3598 uint16_t cmdsize = sizeof(*event); 3599 3600 /* Not supported for private loop */ 3601 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && 3602 !test_bit(FC_PUBLIC_LOOP, &vport->fc_flag)) 3603 return 1; 3604 3605 if (test_bit(FC_PT2PT, &vport->fc_flag)) { 3606 /* find any mapped nport - that would be the other nport */ 3607 ndlp = lpfc_findnode_mapped(vport); 3608 if (!ndlp) 3609 return 1; 3610 } else { 3611 nportid = FC_FID_FCTRL; 3612 /* find the fabric controller node */ 3613 ndlp = lpfc_findnode_did(vport, nportid); 3614 if (!ndlp) { 3615 /* if one didn't exist, make one */ 3616 ndlp = lpfc_nlp_init(vport, nportid); 3617 if (!ndlp) 3618 return 1; 3619 lpfc_enqueue_node(vport, ndlp); 3620 } 3621 } 3622 3623 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3624 ndlp->nlp_DID, ELS_CMD_RSCN_XMT); 3625 3626 if (!elsiocb) 3627 return 1; 3628 3629 event = elsiocb->cmd_dmabuf->virt; 3630 3631 event->rscn.rscn_cmd = ELS_RSCN; 3632 event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page); 3633 event->rscn.rscn_plen = cpu_to_be16(cmdsize); 3634 3635 nportid = vport->fc_myDID; 3636 /* appears that page flags must be 0 for fabric to broadcast RSCN */ 3637 event->portid.rscn_page_flags = 0; 3638 event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16; 3639 event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8; 3640 event->portid.rscn_fid[2] = nportid & 0x000000FF; 3641 3642 phba->fc_stat.elsXmitRSCN++; 3643 elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd; 3644 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3645 if (!elsiocb->ndlp) { 3646 lpfc_els_free_iocb(phba, elsiocb); 3647 return 1; 3648 } 3649 3650 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3651 "Issue RSCN: did:x%x", 3652 ndlp->nlp_DID, 0, 0); 3653 3654 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3655 if (rc == IOCB_ERROR) { 3656 lpfc_els_free_iocb(phba, elsiocb); 3657 lpfc_nlp_put(ndlp); 3658 return 1; 3659 } 3660 3661 return 0; 3662 } 3663 3664 /** 3665 * lpfc_issue_els_farpr - Issue a farp to an node on a vport 3666 * @vport: pointer to a host virtual N_Port data structure. 3667 * @nportid: N_Port identifier to the remote node. 3668 * @retry: number of retries to the command IOCB. 3669 * 3670 * This routine issues a Fibre Channel Address Resolution Response 3671 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid) 3672 * is passed into the function. It first search the @vport node list to find 3673 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created 3674 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the 3675 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command. 3676 * 3677 * Note that the ndlp reference count will be incremented by 1 for holding the 3678 * ndlp and the reference to ndlp will be stored into the ndlp field of 3679 * the IOCB for the completion callback function to the FARPR ELS command. 3680 * 3681 * Return code 3682 * 0 - Successfully issued farpr command 3683 * 1 - Failed to issue farpr command 3684 **/ 3685 static int 3686 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 3687 { 3688 int rc = 0; 3689 struct lpfc_hba *phba = vport->phba; 3690 struct lpfc_iocbq *elsiocb; 3691 FARP *fp; 3692 uint8_t *pcmd; 3693 uint32_t *lp; 3694 uint16_t cmdsize; 3695 struct lpfc_nodelist *ondlp; 3696 struct lpfc_nodelist *ndlp; 3697 3698 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 3699 3700 ndlp = lpfc_findnode_did(vport, nportid); 3701 if (!ndlp) { 3702 ndlp = lpfc_nlp_init(vport, nportid); 3703 if (!ndlp) 3704 return 1; 3705 lpfc_enqueue_node(vport, ndlp); 3706 } 3707 3708 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3709 ndlp->nlp_DID, ELS_CMD_FARPR); 3710 if (!elsiocb) 3711 return 1; 3712 3713 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 3714 3715 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; 3716 pcmd += sizeof(uint32_t); 3717 3718 /* Fill in FARPR payload */ 3719 fp = (FARP *) (pcmd); 3720 memset(fp, 0, sizeof(FARP)); 3721 lp = (uint32_t *) pcmd; 3722 *lp++ = be32_to_cpu(nportid); 3723 *lp++ = be32_to_cpu(vport->fc_myDID); 3724 fp->Rflags = 0; 3725 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); 3726 3727 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); 3728 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 3729 ondlp = lpfc_findnode_did(vport, nportid); 3730 if (ondlp) { 3731 memcpy(&fp->OportName, &ondlp->nlp_portname, 3732 sizeof(struct lpfc_name)); 3733 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, 3734 sizeof(struct lpfc_name)); 3735 } 3736 3737 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3738 "Issue FARPR: did:x%x", 3739 ndlp->nlp_DID, 0, 0); 3740 3741 phba->fc_stat.elsXmitFARPR++; 3742 elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd; 3743 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3744 if (!elsiocb->ndlp) { 3745 lpfc_els_free_iocb(phba, elsiocb); 3746 return 1; 3747 } 3748 3749 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3750 if (rc == IOCB_ERROR) { 3751 /* The additional lpfc_nlp_put will cause the following 3752 * lpfc_els_free_iocb routine to trigger the release of 3753 * the node. 3754 */ 3755 lpfc_els_free_iocb(phba, elsiocb); 3756 lpfc_nlp_put(ndlp); 3757 return 1; 3758 } 3759 /* This will cause the callback-function lpfc_cmpl_els_cmd to 3760 * trigger the release of the node. 3761 */ 3762 /* Don't release reference count as RDF is likely outstanding */ 3763 return 0; 3764 } 3765 3766 /** 3767 * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric. 3768 * @vport: pointer to a host virtual N_Port data structure. 3769 * @retry: retry counter for the command IOCB. 3770 * 3771 * This routine issues an ELS RDF to the Fabric Controller to register 3772 * for diagnostic functions. 3773 * 3774 * Note that the ndlp reference count will be incremented by 1 for holding the 3775 * ndlp and the reference to ndlp will be stored into the ndlp field of 3776 * the IOCB for the completion callback function to the RDF ELS command. 3777 * 3778 * Return code 3779 * 0 - Successfully issued rdf command 3780 * 1 - Failed to issue rdf command 3781 **/ 3782 int 3783 lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) 3784 { 3785 struct lpfc_hba *phba = vport->phba; 3786 struct lpfc_iocbq *elsiocb; 3787 struct lpfc_els_rdf_req *prdf; 3788 struct lpfc_nodelist *ndlp; 3789 uint16_t cmdsize; 3790 int rc; 3791 3792 cmdsize = sizeof(*prdf); 3793 3794 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3795 if (!ndlp) { 3796 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3797 if (!ndlp) 3798 return -ENODEV; 3799 lpfc_enqueue_node(vport, ndlp); 3800 } 3801 3802 /* RDF ELS is not required on an NPIV VN_Port. */ 3803 if (vport->port_type == LPFC_NPIV_PORT) 3804 return -EACCES; 3805 3806 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3807 ndlp->nlp_DID, ELS_CMD_RDF); 3808 if (!elsiocb) 3809 return -ENOMEM; 3810 3811 /* Configure the payload for the supported FPIN events. */ 3812 prdf = (struct lpfc_els_rdf_req *)elsiocb->cmd_dmabuf->virt; 3813 memset(prdf, 0, cmdsize); 3814 prdf->rdf.fpin_cmd = ELS_RDF; 3815 prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) - 3816 sizeof(struct fc_els_rdf)); 3817 prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER); 3818 prdf->reg_d1.reg_desc.desc_len = cpu_to_be32( 3819 FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1)); 3820 prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT); 3821 prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY); 3822 prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY); 3823 prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST); 3824 prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION); 3825 3826 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3827 "6444 Xmit RDF to remote NPORT x%x Reg: %x %x\n", 3828 ndlp->nlp_DID, phba->cgn_reg_signal, 3829 phba->cgn_reg_fpin); 3830 3831 phba->cgn_fpin_frequency = LPFC_FPIN_INIT_FREQ; 3832 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 3833 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3834 if (!elsiocb->ndlp) { 3835 lpfc_els_free_iocb(phba, elsiocb); 3836 return -EIO; 3837 } 3838 3839 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3840 "Issue RDF: did:x%x refcnt %d", 3841 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3842 3843 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3844 if (rc == IOCB_ERROR) { 3845 lpfc_els_free_iocb(phba, elsiocb); 3846 lpfc_nlp_put(ndlp); 3847 return -EIO; 3848 } 3849 return 0; 3850 } 3851 3852 /** 3853 * lpfc_els_rcv_rdf - Receive RDF ELS request from the fabric. 3854 * @vport: pointer to a host virtual N_Port data structure. 3855 * @cmdiocb: pointer to lpfc command iocb data structure. 3856 * @ndlp: pointer to a node-list data structure. 3857 * 3858 * A received RDF implies a possible change to fabric supported diagnostic 3859 * functions. This routine sends LS_ACC and then has the Nx_Port issue a new 3860 * RDF request to reregister for supported diagnostic functions. 3861 * 3862 * Return code 3863 * 0 - Success 3864 * -EIO - Failed to process received RDF 3865 **/ 3866 static int 3867 lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 3868 struct lpfc_nodelist *ndlp) 3869 { 3870 /* Send LS_ACC */ 3871 if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) { 3872 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3873 "1623 Failed to RDF_ACC from x%x for x%x\n", 3874 ndlp->nlp_DID, vport->fc_myDID); 3875 return -EIO; 3876 } 3877 3878 /* Issue new RDF for reregistering */ 3879 if (lpfc_issue_els_rdf(vport, 0)) { 3880 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3881 "2623 Failed to re register RDF for x%x\n", 3882 vport->fc_myDID); 3883 return -EIO; 3884 } 3885 3886 return 0; 3887 } 3888 3889 /** 3890 * lpfc_least_capable_settings - helper function for EDC rsp processing 3891 * @phba: pointer to lpfc hba data structure. 3892 * @pcgd: pointer to congestion detection descriptor in EDC rsp. 3893 * 3894 * This helper routine determines the least capable setting for 3895 * congestion signals, signal freq, including scale, from the 3896 * congestion detection descriptor in the EDC rsp. The routine 3897 * sets @phba values in preparation for a set_featues mailbox. 3898 **/ 3899 static void 3900 lpfc_least_capable_settings(struct lpfc_hba *phba, 3901 struct fc_diag_cg_sig_desc *pcgd) 3902 { 3903 u32 rsp_sig_cap = 0, drv_sig_cap = 0; 3904 u32 rsp_sig_freq_cyc = 0, rsp_sig_freq_scale = 0; 3905 3906 /* Get rsp signal and frequency capabilities. */ 3907 rsp_sig_cap = be32_to_cpu(pcgd->xmt_signal_capability); 3908 rsp_sig_freq_cyc = be16_to_cpu(pcgd->xmt_signal_frequency.count); 3909 rsp_sig_freq_scale = be16_to_cpu(pcgd->xmt_signal_frequency.units); 3910 3911 /* If the Fport does not support signals. Set FPIN only */ 3912 if (rsp_sig_cap == EDC_CG_SIG_NOTSUPPORTED) 3913 goto out_no_support; 3914 3915 /* Apply the xmt scale to the xmt cycle to get the correct frequency. 3916 * Adapter default is 100 millisSeconds. Convert all xmt cycle values 3917 * to milliSeconds. 3918 */ 3919 switch (rsp_sig_freq_scale) { 3920 case EDC_CG_SIGFREQ_SEC: 3921 rsp_sig_freq_cyc *= MSEC_PER_SEC; 3922 break; 3923 case EDC_CG_SIGFREQ_MSEC: 3924 rsp_sig_freq_cyc = 1; 3925 break; 3926 default: 3927 goto out_no_support; 3928 } 3929 3930 /* Convenient shorthand. */ 3931 drv_sig_cap = phba->cgn_reg_signal; 3932 3933 /* Choose the least capable frequency. */ 3934 if (rsp_sig_freq_cyc > phba->cgn_sig_freq) 3935 phba->cgn_sig_freq = rsp_sig_freq_cyc; 3936 3937 /* Should be some common signals support. Settle on least capable 3938 * signal and adjust FPIN values. Initialize defaults to ease the 3939 * decision. 3940 */ 3941 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 3942 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 3943 if (rsp_sig_cap == EDC_CG_SIG_WARN_ONLY && 3944 (drv_sig_cap == EDC_CG_SIG_WARN_ONLY || 3945 drv_sig_cap == EDC_CG_SIG_WARN_ALARM)) { 3946 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 3947 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 3948 } 3949 if (rsp_sig_cap == EDC_CG_SIG_WARN_ALARM) { 3950 if (drv_sig_cap == EDC_CG_SIG_WARN_ALARM) { 3951 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ALARM; 3952 phba->cgn_reg_fpin = LPFC_CGN_FPIN_NONE; 3953 } 3954 if (drv_sig_cap == EDC_CG_SIG_WARN_ONLY) { 3955 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 3956 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 3957 } 3958 } 3959 3960 /* We are NOT recording signal frequency in congestion info buffer */ 3961 return; 3962 3963 out_no_support: 3964 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 3965 phba->cgn_sig_freq = 0; 3966 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 3967 } 3968 3969 DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag, 3970 FC_LS_TLV_DTAG_INIT); 3971 3972 /** 3973 * lpfc_cmpl_els_edc - Completion callback function for EDC 3974 * @phba: pointer to lpfc hba data structure. 3975 * @cmdiocb: pointer to lpfc command iocb data structure. 3976 * @rspiocb: pointer to lpfc response iocb data structure. 3977 * 3978 * This routine is the completion callback function for issuing the Exchange 3979 * Diagnostic Capabilities (EDC) command. The driver issues an EDC to 3980 * notify the FPort of its Congestion and Link Fault capabilities. This 3981 * routine parses the FPort's response and decides on the least common 3982 * values applicable to both FPort and NPort for Warnings and Alarms that 3983 * are communicated via hardware signals. 3984 **/ 3985 static void 3986 lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3987 struct lpfc_iocbq *rspiocb) 3988 { 3989 IOCB_t *irsp_iocb; 3990 struct fc_els_edc_resp *edc_rsp; 3991 struct fc_tlv_desc *tlv; 3992 struct fc_diag_cg_sig_desc *pcgd; 3993 struct fc_diag_lnkflt_desc *plnkflt; 3994 struct lpfc_dmabuf *pcmd, *prsp; 3995 const char *dtag_nm; 3996 u32 *pdata, dtag; 3997 int desc_cnt = 0, bytes_remain; 3998 bool rcv_cap_desc = false; 3999 struct lpfc_nodelist *ndlp; 4000 u32 ulp_status, ulp_word4, tmo, did, iotag; 4001 4002 ndlp = cmdiocb->ndlp; 4003 4004 ulp_status = get_job_ulpstatus(phba, rspiocb); 4005 ulp_word4 = get_job_word4(phba, rspiocb); 4006 did = get_job_els_rsp64_did(phba, rspiocb); 4007 4008 if (phba->sli_rev == LPFC_SLI_REV4) { 4009 tmo = get_wqe_tmo(rspiocb); 4010 iotag = get_wqe_reqtag(rspiocb); 4011 } else { 4012 irsp_iocb = &rspiocb->iocb; 4013 tmo = irsp_iocb->ulpTimeout; 4014 iotag = irsp_iocb->ulpIoTag; 4015 } 4016 4017 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, 4018 "EDC cmpl: status:x%x/x%x did:x%x", 4019 ulp_status, ulp_word4, did); 4020 4021 /* ELS cmd tag <ulpIoTag> completes */ 4022 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4023 "4201 EDC cmd tag x%x completes Data: x%x x%x x%x\n", 4024 iotag, ulp_status, ulp_word4, tmo); 4025 4026 pcmd = cmdiocb->cmd_dmabuf; 4027 if (!pcmd) 4028 goto out; 4029 4030 pdata = (u32 *)pcmd->virt; 4031 if (!pdata) 4032 goto out; 4033 4034 /* Need to clear signal values, send features MB and RDF with FPIN. */ 4035 if (ulp_status) 4036 goto out; 4037 4038 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 4039 if (!prsp) 4040 goto out; 4041 4042 edc_rsp = prsp->virt; 4043 if (!edc_rsp) 4044 goto out; 4045 4046 /* ELS cmd tag <ulpIoTag> completes */ 4047 lpfc_printf_log(phba, KERN_INFO, 4048 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 4049 "4676 Fabric EDC Rsp: " 4050 "0x%02x, 0x%08x\n", 4051 edc_rsp->acc_hdr.la_cmd, 4052 be32_to_cpu(edc_rsp->desc_list_len)); 4053 4054 if (!lpfc_is_els_acc_rsp(prsp)) 4055 goto out; 4056 4057 /* 4058 * Payload length in bytes is the response descriptor list 4059 * length minus the 12 bytes of Link Service Request 4060 * Information descriptor in the reply. 4061 */ 4062 bytes_remain = be32_to_cpu(edc_rsp->desc_list_len) - 4063 sizeof(struct fc_els_lsri_desc); 4064 if (bytes_remain <= 0) 4065 goto out; 4066 4067 tlv = edc_rsp->desc; 4068 4069 /* 4070 * cycle through EDC diagnostic descriptors to find the 4071 * congestion signaling capability descriptor 4072 */ 4073 while (bytes_remain) { 4074 if (bytes_remain < FC_TLV_DESC_HDR_SZ) { 4075 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 4076 "6461 Truncated TLV hdr on " 4077 "Diagnostic descriptor[%d]\n", 4078 desc_cnt); 4079 goto out; 4080 } 4081 4082 dtag = be32_to_cpu(tlv->desc_tag); 4083 switch (dtag) { 4084 case ELS_DTAG_LNK_FAULT_CAP: 4085 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 4086 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 4087 sizeof(struct fc_diag_lnkflt_desc)) { 4088 lpfc_printf_log(phba, KERN_WARNING, 4089 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 4090 "6462 Truncated Link Fault Diagnostic " 4091 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 4092 desc_cnt, bytes_remain, 4093 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 4094 sizeof(struct fc_diag_lnkflt_desc)); 4095 goto out; 4096 } 4097 plnkflt = (struct fc_diag_lnkflt_desc *)tlv; 4098 lpfc_printf_log(phba, KERN_INFO, 4099 LOG_ELS | LOG_LDS_EVENT, 4100 "4617 Link Fault Desc Data: 0x%08x 0x%08x " 4101 "0x%08x 0x%08x 0x%08x\n", 4102 be32_to_cpu(plnkflt->desc_tag), 4103 be32_to_cpu(plnkflt->desc_len), 4104 be32_to_cpu( 4105 plnkflt->degrade_activate_threshold), 4106 be32_to_cpu( 4107 plnkflt->degrade_deactivate_threshold), 4108 be32_to_cpu(plnkflt->fec_degrade_interval)); 4109 break; 4110 case ELS_DTAG_CG_SIGNAL_CAP: 4111 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 4112 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 4113 sizeof(struct fc_diag_cg_sig_desc)) { 4114 lpfc_printf_log( 4115 phba, KERN_WARNING, LOG_CGN_MGMT, 4116 "6463 Truncated Cgn Signal Diagnostic " 4117 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 4118 desc_cnt, bytes_remain, 4119 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 4120 sizeof(struct fc_diag_cg_sig_desc)); 4121 goto out; 4122 } 4123 4124 pcgd = (struct fc_diag_cg_sig_desc *)tlv; 4125 lpfc_printf_log( 4126 phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4127 "4616 CGN Desc Data: 0x%08x 0x%08x " 4128 "0x%08x 0x%04x 0x%04x 0x%08x 0x%04x 0x%04x\n", 4129 be32_to_cpu(pcgd->desc_tag), 4130 be32_to_cpu(pcgd->desc_len), 4131 be32_to_cpu(pcgd->xmt_signal_capability), 4132 be16_to_cpu(pcgd->xmt_signal_frequency.count), 4133 be16_to_cpu(pcgd->xmt_signal_frequency.units), 4134 be32_to_cpu(pcgd->rcv_signal_capability), 4135 be16_to_cpu(pcgd->rcv_signal_frequency.count), 4136 be16_to_cpu(pcgd->rcv_signal_frequency.units)); 4137 4138 /* Compare driver and Fport capabilities and choose 4139 * least common. 4140 */ 4141 lpfc_least_capable_settings(phba, pcgd); 4142 rcv_cap_desc = true; 4143 break; 4144 default: 4145 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 4146 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 4147 "4919 unknown Diagnostic " 4148 "Descriptor[%d]: tag x%x (%s)\n", 4149 desc_cnt, dtag, dtag_nm); 4150 } 4151 4152 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 4153 tlv = fc_tlv_next_desc(tlv); 4154 desc_cnt++; 4155 } 4156 4157 out: 4158 if (!rcv_cap_desc) { 4159 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 4160 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 4161 phba->cgn_sig_freq = 0; 4162 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, 4163 "4202 EDC rsp error - sending RDF " 4164 "for FPIN only.\n"); 4165 } 4166 4167 lpfc_config_cgn_signal(phba); 4168 4169 /* Check to see if link went down during discovery */ 4170 lpfc_els_chk_latt(phba->pport); 4171 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, 4172 "EDC Cmpl: did:x%x refcnt %d", 4173 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 4174 lpfc_els_free_iocb(phba, cmdiocb); 4175 lpfc_nlp_put(ndlp); 4176 } 4177 4178 static void 4179 lpfc_format_edc_lft_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 4180 { 4181 struct fc_diag_lnkflt_desc *lft = (struct fc_diag_lnkflt_desc *)tlv; 4182 4183 lft->desc_tag = cpu_to_be32(ELS_DTAG_LNK_FAULT_CAP); 4184 lft->desc_len = cpu_to_be32( 4185 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_lnkflt_desc)); 4186 4187 lft->degrade_activate_threshold = 4188 cpu_to_be32(phba->degrade_activate_threshold); 4189 lft->degrade_deactivate_threshold = 4190 cpu_to_be32(phba->degrade_deactivate_threshold); 4191 lft->fec_degrade_interval = cpu_to_be32(phba->fec_degrade_interval); 4192 } 4193 4194 static void 4195 lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 4196 { 4197 struct fc_diag_cg_sig_desc *cgd = (struct fc_diag_cg_sig_desc *)tlv; 4198 4199 /* We are assuming cgd was zero'ed before calling this routine */ 4200 4201 /* Configure the congestion detection capability */ 4202 cgd->desc_tag = cpu_to_be32(ELS_DTAG_CG_SIGNAL_CAP); 4203 4204 /* Descriptor len doesn't include the tag or len fields. */ 4205 cgd->desc_len = cpu_to_be32( 4206 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_cg_sig_desc)); 4207 4208 /* xmt_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. 4209 * xmt_signal_frequency.count already set to 0. 4210 * xmt_signal_frequency.units already set to 0. 4211 */ 4212 4213 if (phba->cmf_active_mode == LPFC_CFG_OFF) { 4214 /* rcv_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. 4215 * rcv_signal_frequency.count already set to 0. 4216 * rcv_signal_frequency.units already set to 0. 4217 */ 4218 phba->cgn_sig_freq = 0; 4219 return; 4220 } 4221 switch (phba->cgn_reg_signal) { 4222 case EDC_CG_SIG_WARN_ONLY: 4223 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ONLY); 4224 break; 4225 case EDC_CG_SIG_WARN_ALARM: 4226 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ALARM); 4227 break; 4228 default: 4229 /* rcv_signal_capability left 0 thus no support */ 4230 break; 4231 } 4232 4233 /* We start negotiation with lpfc_fabric_cgn_frequency, after 4234 * the completion we settle on the higher frequency. 4235 */ 4236 cgd->rcv_signal_frequency.count = 4237 cpu_to_be16(lpfc_fabric_cgn_frequency); 4238 cgd->rcv_signal_frequency.units = 4239 cpu_to_be16(EDC_CG_SIGFREQ_MSEC); 4240 } 4241 4242 static bool 4243 lpfc_link_is_lds_capable(struct lpfc_hba *phba) 4244 { 4245 if (!(phba->lmt & LMT_64Gb)) 4246 return false; 4247 if (phba->sli_rev != LPFC_SLI_REV4) 4248 return false; 4249 4250 if (phba->sli4_hba.conf_trunk) { 4251 if (phba->trunk_link.phy_lnk_speed == LPFC_USER_LINK_SPEED_64G) 4252 return true; 4253 } else if (phba->fc_linkspeed == LPFC_LINK_SPEED_64GHZ) { 4254 return true; 4255 } 4256 return false; 4257 } 4258 4259 /** 4260 * lpfc_issue_els_edc - Exchange Diagnostic Capabilities with the fabric. 4261 * @vport: pointer to a host virtual N_Port data structure. 4262 * @retry: retry counter for the command iocb. 4263 * 4264 * This routine issues an ELS EDC to the F-Port Controller to communicate 4265 * this N_Port's support of hardware signals in its Congestion 4266 * Capabilities Descriptor. 4267 * 4268 * Note: This routine does not check if one or more signals are 4269 * set in the cgn_reg_signal parameter. The caller makes the 4270 * decision to enforce cgn_reg_signal as nonzero or zero depending 4271 * on the conditions. During Fabric requests, the driver 4272 * requires cgn_reg_signals to be nonzero. But a dynamic request 4273 * to set the congestion mode to OFF from Monitor or Manage 4274 * would correctly issue an EDC with no signals enabled to 4275 * turn off switch functionality and then update the FW. 4276 * 4277 * Return code 4278 * 0 - Successfully issued edc command 4279 * 1 - Failed to issue edc command 4280 **/ 4281 int 4282 lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry) 4283 { 4284 struct lpfc_hba *phba = vport->phba; 4285 struct lpfc_iocbq *elsiocb; 4286 struct fc_els_edc *edc_req; 4287 struct fc_tlv_desc *tlv; 4288 u16 cmdsize; 4289 struct lpfc_nodelist *ndlp; 4290 u8 *pcmd = NULL; 4291 u32 cgn_desc_size, lft_desc_size; 4292 int rc; 4293 4294 if (vport->port_type == LPFC_NPIV_PORT) 4295 return -EACCES; 4296 4297 ndlp = lpfc_findnode_did(vport, Fabric_DID); 4298 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 4299 return -ENODEV; 4300 4301 cgn_desc_size = (phba->cgn_init_reg_signal) ? 4302 sizeof(struct fc_diag_cg_sig_desc) : 0; 4303 lft_desc_size = (lpfc_link_is_lds_capable(phba)) ? 4304 sizeof(struct fc_diag_lnkflt_desc) : 0; 4305 cmdsize = cgn_desc_size + lft_desc_size; 4306 4307 /* Skip EDC if no applicable descriptors */ 4308 if (!cmdsize) 4309 goto try_rdf; 4310 4311 cmdsize += sizeof(struct fc_els_edc); 4312 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 4313 ndlp->nlp_DID, ELS_CMD_EDC); 4314 if (!elsiocb) 4315 goto try_rdf; 4316 4317 /* Configure the payload for the supported Diagnostics capabilities. */ 4318 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 4319 memset(pcmd, 0, cmdsize); 4320 edc_req = (struct fc_els_edc *)pcmd; 4321 edc_req->desc_len = cpu_to_be32(cgn_desc_size + lft_desc_size); 4322 edc_req->edc_cmd = ELS_EDC; 4323 tlv = edc_req->desc; 4324 4325 if (cgn_desc_size) { 4326 lpfc_format_edc_cgn_desc(phba, tlv); 4327 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 4328 tlv = fc_tlv_next_desc(tlv); 4329 } 4330 4331 if (lft_desc_size) 4332 lpfc_format_edc_lft_desc(phba, tlv); 4333 4334 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4335 "4623 Xmit EDC to remote " 4336 "NPORT x%x reg_sig x%x reg_fpin:x%x\n", 4337 ndlp->nlp_DID, phba->cgn_reg_signal, 4338 phba->cgn_reg_fpin); 4339 4340 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 4341 elsiocb->ndlp = lpfc_nlp_get(ndlp); 4342 if (!elsiocb->ndlp) { 4343 lpfc_els_free_iocb(phba, elsiocb); 4344 return -EIO; 4345 } 4346 4347 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4348 "Issue EDC: did:x%x refcnt %d", 4349 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 4350 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4351 if (rc == IOCB_ERROR) { 4352 /* The additional lpfc_nlp_put will cause the following 4353 * lpfc_els_free_iocb routine to trigger the rlease of 4354 * the node. 4355 */ 4356 lpfc_els_free_iocb(phba, elsiocb); 4357 lpfc_nlp_put(ndlp); 4358 goto try_rdf; 4359 } 4360 return 0; 4361 try_rdf: 4362 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 4363 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 4364 rc = lpfc_issue_els_rdf(vport, 0); 4365 return rc; 4366 } 4367 4368 /** 4369 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry 4370 * @vport: pointer to a host virtual N_Port data structure. 4371 * @nlp: pointer to a node-list data structure. 4372 * 4373 * This routine cancels the timer with a delayed IOCB-command retry for 4374 * a @vport's @ndlp. It stops the timer for the delayed function retrial and 4375 * removes the ELS retry event if it presents. In addition, if the 4376 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB 4377 * commands are sent for the @vport's nodes that require issuing discovery 4378 * ADISC. 4379 **/ 4380 void 4381 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) 4382 { 4383 struct lpfc_work_evt *evtp; 4384 4385 if (!(nlp->nlp_flag & NLP_DELAY_TMO)) 4386 return; 4387 spin_lock_irq(&nlp->lock); 4388 nlp->nlp_flag &= ~NLP_DELAY_TMO; 4389 spin_unlock_irq(&nlp->lock); 4390 del_timer_sync(&nlp->nlp_delayfunc); 4391 nlp->nlp_last_elscmd = 0; 4392 if (!list_empty(&nlp->els_retry_evt.evt_listp)) { 4393 list_del_init(&nlp->els_retry_evt.evt_listp); 4394 /* Decrement nlp reference count held for the delayed retry */ 4395 evtp = &nlp->els_retry_evt; 4396 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); 4397 } 4398 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 4399 spin_lock_irq(&nlp->lock); 4400 nlp->nlp_flag &= ~NLP_NPR_2B_DISC; 4401 spin_unlock_irq(&nlp->lock); 4402 if (vport->num_disc_nodes) { 4403 if (vport->port_state < LPFC_VPORT_READY) { 4404 /* Check if there are more ADISCs to be sent */ 4405 lpfc_more_adisc(vport); 4406 } else { 4407 /* Check if there are more PLOGIs to be sent */ 4408 lpfc_more_plogi(vport); 4409 if (vport->num_disc_nodes == 0) { 4410 clear_bit(FC_NDISC_ACTIVE, 4411 &vport->fc_flag); 4412 lpfc_can_disctmo(vport); 4413 lpfc_end_rscn(vport); 4414 } 4415 } 4416 } 4417 } 4418 return; 4419 } 4420 4421 /** 4422 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer 4423 * @t: pointer to the timer function associated data (ndlp). 4424 * 4425 * This routine is invoked by the ndlp delayed-function timer to check 4426 * whether there is any pending ELS retry event(s) with the node. If not, it 4427 * simply returns. Otherwise, if there is at least one ELS delayed event, it 4428 * adds the delayed events to the HBA work list and invokes the 4429 * lpfc_worker_wake_up() routine to wake up worker thread to process the 4430 * event. Note that lpfc_nlp_get() is called before posting the event to 4431 * the work list to hold reference count of ndlp so that it guarantees the 4432 * reference to ndlp will still be available when the worker thread gets 4433 * to the event associated with the ndlp. 4434 **/ 4435 void 4436 lpfc_els_retry_delay(struct timer_list *t) 4437 { 4438 struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc); 4439 struct lpfc_vport *vport = ndlp->vport; 4440 struct lpfc_hba *phba = vport->phba; 4441 unsigned long flags; 4442 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; 4443 4444 /* Hold a node reference for outstanding queued work */ 4445 if (!lpfc_nlp_get(ndlp)) 4446 return; 4447 4448 spin_lock_irqsave(&phba->hbalock, flags); 4449 if (!list_empty(&evtp->evt_listp)) { 4450 spin_unlock_irqrestore(&phba->hbalock, flags); 4451 lpfc_nlp_put(ndlp); 4452 return; 4453 } 4454 4455 evtp->evt_arg1 = ndlp; 4456 evtp->evt = LPFC_EVT_ELS_RETRY; 4457 list_add_tail(&evtp->evt_listp, &phba->work_list); 4458 spin_unlock_irqrestore(&phba->hbalock, flags); 4459 4460 lpfc_worker_wake_up(phba); 4461 } 4462 4463 /** 4464 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function 4465 * @ndlp: pointer to a node-list data structure. 4466 * 4467 * This routine is the worker-thread handler for processing the @ndlp delayed 4468 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves 4469 * the last ELS command from the associated ndlp and invokes the proper ELS 4470 * function according to the delayed ELS command to retry the command. 4471 **/ 4472 void 4473 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) 4474 { 4475 struct lpfc_vport *vport = ndlp->vport; 4476 uint32_t cmd, retry; 4477 4478 spin_lock_irq(&ndlp->lock); 4479 cmd = ndlp->nlp_last_elscmd; 4480 ndlp->nlp_last_elscmd = 0; 4481 4482 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 4483 spin_unlock_irq(&ndlp->lock); 4484 return; 4485 } 4486 4487 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 4488 spin_unlock_irq(&ndlp->lock); 4489 /* 4490 * If a discovery event readded nlp_delayfunc after timer 4491 * firing and before processing the timer, cancel the 4492 * nlp_delayfunc. 4493 */ 4494 del_timer_sync(&ndlp->nlp_delayfunc); 4495 retry = ndlp->nlp_retry; 4496 ndlp->nlp_retry = 0; 4497 4498 switch (cmd) { 4499 case ELS_CMD_FLOGI: 4500 lpfc_issue_els_flogi(vport, ndlp, retry); 4501 break; 4502 case ELS_CMD_PLOGI: 4503 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) { 4504 ndlp->nlp_prev_state = ndlp->nlp_state; 4505 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 4506 } 4507 break; 4508 case ELS_CMD_ADISC: 4509 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) { 4510 ndlp->nlp_prev_state = ndlp->nlp_state; 4511 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 4512 } 4513 break; 4514 case ELS_CMD_PRLI: 4515 case ELS_CMD_NVMEPRLI: 4516 if (!lpfc_issue_els_prli(vport, ndlp, retry)) { 4517 ndlp->nlp_prev_state = ndlp->nlp_state; 4518 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 4519 } 4520 break; 4521 case ELS_CMD_LOGO: 4522 if (!lpfc_issue_els_logo(vport, ndlp, retry)) { 4523 ndlp->nlp_prev_state = ndlp->nlp_state; 4524 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 4525 } 4526 break; 4527 case ELS_CMD_FDISC: 4528 if (!test_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag)) 4529 lpfc_issue_els_fdisc(vport, ndlp, retry); 4530 break; 4531 } 4532 return; 4533 } 4534 4535 /** 4536 * lpfc_link_reset - Issue link reset 4537 * @vport: pointer to a virtual N_Port data structure. 4538 * 4539 * This routine performs link reset by sending INIT_LINK mailbox command. 4540 * For SLI-3 adapter, link attention interrupt is enabled before issuing 4541 * INIT_LINK mailbox command. 4542 * 4543 * Return code 4544 * 0 - Link reset initiated successfully 4545 * 1 - Failed to initiate link reset 4546 **/ 4547 int 4548 lpfc_link_reset(struct lpfc_vport *vport) 4549 { 4550 struct lpfc_hba *phba = vport->phba; 4551 LPFC_MBOXQ_t *mbox; 4552 uint32_t control; 4553 int rc; 4554 4555 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4556 "2851 Attempt link reset\n"); 4557 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4558 if (!mbox) { 4559 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4560 "2852 Failed to allocate mbox memory"); 4561 return 1; 4562 } 4563 4564 /* Enable Link attention interrupts */ 4565 if (phba->sli_rev <= LPFC_SLI_REV3) { 4566 spin_lock_irq(&phba->hbalock); 4567 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4568 control = readl(phba->HCregaddr); 4569 control |= HC_LAINT_ENA; 4570 writel(control, phba->HCregaddr); 4571 readl(phba->HCregaddr); /* flush */ 4572 spin_unlock_irq(&phba->hbalock); 4573 } 4574 4575 lpfc_init_link(phba, mbox, phba->cfg_topology, 4576 phba->cfg_link_speed); 4577 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4578 mbox->vport = vport; 4579 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4580 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 4581 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4582 "2853 Failed to issue INIT_LINK " 4583 "mbox command, rc:x%x\n", rc); 4584 mempool_free(mbox, phba->mbox_mem_pool); 4585 return 1; 4586 } 4587 4588 return 0; 4589 } 4590 4591 /** 4592 * lpfc_els_retry - Make retry decision on an els command iocb 4593 * @phba: pointer to lpfc hba data structure. 4594 * @cmdiocb: pointer to lpfc command iocb data structure. 4595 * @rspiocb: pointer to lpfc response iocb data structure. 4596 * 4597 * This routine makes a retry decision on an ELS command IOCB, which has 4598 * failed. The following ELS IOCBs use this function for retrying the command 4599 * when previously issued command responsed with error status: FLOGI, PLOGI, 4600 * PRLI, ADISC and FDISC. Based on the ELS command type and the 4601 * returned error status, it makes the decision whether a retry shall be 4602 * issued for the command, and whether a retry shall be made immediately or 4603 * delayed. In the former case, the corresponding ELS command issuing-function 4604 * is called to retry the command. In the later case, the ELS command shall 4605 * be posted to the ndlp delayed event and delayed function timer set to the 4606 * ndlp for the delayed command issusing. 4607 * 4608 * Return code 4609 * 0 - No retry of els command is made 4610 * 1 - Immediate or delayed retry of els command is made 4611 **/ 4612 static int 4613 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4614 struct lpfc_iocbq *rspiocb) 4615 { 4616 struct lpfc_vport *vport = cmdiocb->vport; 4617 union lpfc_wqe128 *irsp = &rspiocb->wqe; 4618 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 4619 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; 4620 uint32_t *elscmd; 4621 struct ls_rjt stat; 4622 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; 4623 int logerr = 0; 4624 uint32_t cmd = 0; 4625 uint32_t did; 4626 int link_reset = 0, rc; 4627 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 4628 u32 ulp_word4 = get_job_word4(phba, rspiocb); 4629 4630 4631 /* Note: cmd_dmabuf may be 0 for internal driver abort 4632 * of delays ELS command. 4633 */ 4634 4635 if (pcmd && pcmd->virt) { 4636 elscmd = (uint32_t *) (pcmd->virt); 4637 cmd = *elscmd++; 4638 } 4639 4640 if (ndlp) 4641 did = ndlp->nlp_DID; 4642 else { 4643 /* We should only hit this case for retrying PLOGI */ 4644 did = get_job_els_rsp64_did(phba, rspiocb); 4645 ndlp = lpfc_findnode_did(vport, did); 4646 if (!ndlp && (cmd != ELS_CMD_PLOGI)) 4647 return 0; 4648 } 4649 4650 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4651 "Retry ELS: wd7:x%x wd4:x%x did:x%x", 4652 *(((uint32_t *)irsp) + 7), ulp_word4, did); 4653 4654 switch (ulp_status) { 4655 case IOSTAT_FCP_RSP_ERROR: 4656 break; 4657 case IOSTAT_REMOTE_STOP: 4658 if (phba->sli_rev == LPFC_SLI_REV4) { 4659 /* This IO was aborted by the target, we don't 4660 * know the rxid and because we did not send the 4661 * ABTS we cannot generate and RRQ. 4662 */ 4663 lpfc_set_rrq_active(phba, ndlp, 4664 cmdiocb->sli4_lxritag, 0, 0); 4665 } 4666 break; 4667 case IOSTAT_LOCAL_REJECT: 4668 switch ((ulp_word4 & IOERR_PARAM_MASK)) { 4669 case IOERR_LOOP_OPEN_FAILURE: 4670 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) 4671 delay = 1000; 4672 retry = 1; 4673 break; 4674 4675 case IOERR_ILLEGAL_COMMAND: 4676 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4677 "0124 Retry illegal cmd x%x " 4678 "retry:x%x delay:x%x\n", 4679 cmd, cmdiocb->retry, delay); 4680 retry = 1; 4681 /* All command's retry policy */ 4682 maxretry = 8; 4683 if (cmdiocb->retry > 2) 4684 delay = 1000; 4685 break; 4686 4687 case IOERR_NO_RESOURCES: 4688 logerr = 1; /* HBA out of resources */ 4689 retry = 1; 4690 if (cmdiocb->retry > 100) 4691 delay = 100; 4692 maxretry = 250; 4693 break; 4694 4695 case IOERR_ILLEGAL_FRAME: 4696 delay = 100; 4697 retry = 1; 4698 break; 4699 4700 case IOERR_INVALID_RPI: 4701 if (cmd == ELS_CMD_PLOGI && 4702 did == NameServer_DID) { 4703 /* Continue forever if plogi to */ 4704 /* the nameserver fails */ 4705 maxretry = 0; 4706 delay = 100; 4707 } else if (cmd == ELS_CMD_PRLI && 4708 ndlp->nlp_state != NLP_STE_PRLI_ISSUE) { 4709 /* State-command disagreement. The PRLI was 4710 * failed with an invalid rpi meaning there 4711 * some unexpected state change. Don't retry. 4712 */ 4713 maxretry = 0; 4714 retry = 0; 4715 break; 4716 } 4717 retry = 1; 4718 break; 4719 4720 case IOERR_SEQUENCE_TIMEOUT: 4721 if (cmd == ELS_CMD_PLOGI && 4722 did == NameServer_DID && 4723 (cmdiocb->retry + 1) == maxretry) { 4724 /* Reset the Link */ 4725 link_reset = 1; 4726 break; 4727 } 4728 retry = 1; 4729 delay = 100; 4730 break; 4731 case IOERR_SLI_ABORTED: 4732 /* Retry ELS PLOGI command? 4733 * Possibly the rport just wasn't ready. 4734 */ 4735 if (cmd == ELS_CMD_PLOGI) { 4736 /* No retry if state change */ 4737 if (ndlp && 4738 ndlp->nlp_state != NLP_STE_PLOGI_ISSUE) 4739 goto out_retry; 4740 retry = 1; 4741 maxretry = 2; 4742 } 4743 break; 4744 } 4745 break; 4746 4747 case IOSTAT_NPORT_RJT: 4748 case IOSTAT_FABRIC_RJT: 4749 if (ulp_word4 & RJT_UNAVAIL_TEMP) { 4750 retry = 1; 4751 break; 4752 } 4753 break; 4754 4755 case IOSTAT_NPORT_BSY: 4756 case IOSTAT_FABRIC_BSY: 4757 logerr = 1; /* Fabric / Remote NPort out of resources */ 4758 retry = 1; 4759 break; 4760 4761 case IOSTAT_LS_RJT: 4762 stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4); 4763 /* Added for Vendor specifc support 4764 * Just keep retrying for these Rsn / Exp codes 4765 */ 4766 if (test_bit(FC_PT2PT, &vport->fc_flag) && 4767 cmd == ELS_CMD_NVMEPRLI) { 4768 switch (stat.un.b.lsRjtRsnCode) { 4769 case LSRJT_UNABLE_TPC: 4770 case LSRJT_INVALID_CMD: 4771 case LSRJT_LOGICAL_ERR: 4772 case LSRJT_CMD_UNSUPPORTED: 4773 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 4774 "0168 NVME PRLI LS_RJT " 4775 "reason %x port doesn't " 4776 "support NVME, disabling NVME\n", 4777 stat.un.b.lsRjtRsnCode); 4778 retry = 0; 4779 set_bit(FC_PT2PT_NO_NVME, &vport->fc_flag); 4780 goto out_retry; 4781 } 4782 } 4783 switch (stat.un.b.lsRjtRsnCode) { 4784 case LSRJT_UNABLE_TPC: 4785 /* Special case for PRLI LS_RJTs. Recall that lpfc 4786 * uses a single routine to issue both PRLI FC4 types. 4787 * If the PRLI is rejected because that FC4 type 4788 * isn't really supported, don't retry and cause 4789 * multiple transport registrations. Otherwise, parse 4790 * the reason code/reason code explanation and take the 4791 * appropriate action. 4792 */ 4793 lpfc_printf_vlog(vport, KERN_INFO, 4794 LOG_DISCOVERY | LOG_ELS | LOG_NODE, 4795 "0153 ELS cmd x%x LS_RJT by x%x. " 4796 "RsnCode x%x RsnCodeExp x%x\n", 4797 cmd, did, stat.un.b.lsRjtRsnCode, 4798 stat.un.b.lsRjtRsnCodeExp); 4799 4800 switch (stat.un.b.lsRjtRsnCodeExp) { 4801 case LSEXP_CANT_GIVE_DATA: 4802 case LSEXP_CMD_IN_PROGRESS: 4803 if (cmd == ELS_CMD_PLOGI) { 4804 delay = 1000; 4805 maxretry = 48; 4806 } 4807 retry = 1; 4808 break; 4809 case LSEXP_REQ_UNSUPPORTED: 4810 case LSEXP_NO_RSRC_ASSIGN: 4811 /* These explanation codes get no retry. */ 4812 if (cmd == ELS_CMD_PRLI || 4813 cmd == ELS_CMD_NVMEPRLI) 4814 break; 4815 fallthrough; 4816 default: 4817 /* Limit the delay and retry action to a limited 4818 * cmd set. There are other ELS commands where 4819 * a retry is not expected. 4820 */ 4821 if (cmd == ELS_CMD_PLOGI || 4822 cmd == ELS_CMD_PRLI || 4823 cmd == ELS_CMD_NVMEPRLI) { 4824 delay = 1000; 4825 maxretry = lpfc_max_els_tries + 1; 4826 retry = 1; 4827 } 4828 break; 4829 } 4830 4831 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4832 (cmd == ELS_CMD_FDISC) && 4833 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ 4834 lpfc_printf_vlog(vport, KERN_ERR, 4835 LOG_TRACE_EVENT, 4836 "0125 FDISC Failed (x%x). " 4837 "Fabric out of resources\n", 4838 stat.un.lsRjtError); 4839 lpfc_vport_set_state(vport, 4840 FC_VPORT_NO_FABRIC_RSCS); 4841 } 4842 break; 4843 4844 case LSRJT_LOGICAL_BSY: 4845 if ((cmd == ELS_CMD_PLOGI) || 4846 (cmd == ELS_CMD_PRLI) || 4847 (cmd == ELS_CMD_NVMEPRLI)) { 4848 delay = 1000; 4849 maxretry = 48; 4850 } else if (cmd == ELS_CMD_FDISC) { 4851 /* FDISC retry policy */ 4852 maxretry = 48; 4853 if (cmdiocb->retry >= 32) 4854 delay = 1000; 4855 } 4856 retry = 1; 4857 break; 4858 4859 case LSRJT_LOGICAL_ERR: 4860 /* There are some cases where switches return this 4861 * error when they are not ready and should be returning 4862 * Logical Busy. We should delay every time. 4863 */ 4864 if (cmd == ELS_CMD_FDISC && 4865 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { 4866 maxretry = 3; 4867 delay = 1000; 4868 retry = 1; 4869 } else if (cmd == ELS_CMD_FLOGI && 4870 stat.un.b.lsRjtRsnCodeExp == 4871 LSEXP_NOTHING_MORE) { 4872 vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; 4873 retry = 1; 4874 lpfc_printf_vlog(vport, KERN_ERR, 4875 LOG_TRACE_EVENT, 4876 "0820 FLOGI Failed (x%x). " 4877 "BBCredit Not Supported\n", 4878 stat.un.lsRjtError); 4879 } 4880 break; 4881 4882 case LSRJT_PROTOCOL_ERR: 4883 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4884 (cmd == ELS_CMD_FDISC) && 4885 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || 4886 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) 4887 ) { 4888 lpfc_printf_vlog(vport, KERN_ERR, 4889 LOG_TRACE_EVENT, 4890 "0122 FDISC Failed (x%x). " 4891 "Fabric Detected Bad WWN\n", 4892 stat.un.lsRjtError); 4893 lpfc_vport_set_state(vport, 4894 FC_VPORT_FABRIC_REJ_WWN); 4895 } 4896 break; 4897 case LSRJT_VENDOR_UNIQUE: 4898 if ((stat.un.b.vendorUnique == 0x45) && 4899 (cmd == ELS_CMD_FLOGI)) { 4900 goto out_retry; 4901 } 4902 break; 4903 case LSRJT_CMD_UNSUPPORTED: 4904 /* lpfc nvmet returns this type of LS_RJT when it 4905 * receives an FCP PRLI because lpfc nvmet only 4906 * support NVME. ELS request is terminated for FCP4 4907 * on this rport. 4908 */ 4909 if (stat.un.b.lsRjtRsnCodeExp == 4910 LSEXP_REQ_UNSUPPORTED) { 4911 if (cmd == ELS_CMD_PRLI) 4912 goto out_retry; 4913 } 4914 break; 4915 } 4916 break; 4917 4918 case IOSTAT_INTERMED_RSP: 4919 case IOSTAT_BA_RJT: 4920 break; 4921 4922 default: 4923 break; 4924 } 4925 4926 if (link_reset) { 4927 rc = lpfc_link_reset(vport); 4928 if (rc) { 4929 /* Do not give up. Retry PLOGI one more time and attempt 4930 * link reset if PLOGI fails again. 4931 */ 4932 retry = 1; 4933 delay = 100; 4934 goto out_retry; 4935 } 4936 return 1; 4937 } 4938 4939 if (did == FDMI_DID) 4940 retry = 1; 4941 4942 if ((cmd == ELS_CMD_FLOGI) && 4943 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) && 4944 !lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { 4945 /* FLOGI retry policy */ 4946 retry = 1; 4947 /* retry FLOGI forever */ 4948 if (phba->link_flag != LS_LOOPBACK_MODE) 4949 maxretry = 0; 4950 else 4951 maxretry = 2; 4952 4953 if (cmdiocb->retry >= 100) 4954 delay = 5000; 4955 else if (cmdiocb->retry >= 32) 4956 delay = 1000; 4957 } else if ((cmd == ELS_CMD_FDISC) && 4958 !lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { 4959 /* retry FDISCs every second up to devloss */ 4960 retry = 1; 4961 maxretry = vport->cfg_devloss_tmo; 4962 delay = 1000; 4963 } 4964 4965 cmdiocb->retry++; 4966 if (maxretry && (cmdiocb->retry >= maxretry)) { 4967 phba->fc_stat.elsRetryExceeded++; 4968 retry = 0; 4969 } 4970 4971 if (test_bit(FC_UNLOADING, &vport->load_flag)) 4972 retry = 0; 4973 4974 out_retry: 4975 if (retry) { 4976 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) { 4977 /* Stop retrying PLOGI and FDISC if in FCF discovery */ 4978 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4979 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4980 "2849 Stop retry ELS command " 4981 "x%x to remote NPORT x%x, " 4982 "Data: x%x x%x\n", cmd, did, 4983 cmdiocb->retry, delay); 4984 return 0; 4985 } 4986 } 4987 4988 /* Retry ELS command <elsCmd> to remote NPORT <did> */ 4989 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4990 "0107 Retry ELS command x%x to remote " 4991 "NPORT x%x Data: x%x x%x\n", 4992 cmd, did, cmdiocb->retry, delay); 4993 4994 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && 4995 ((ulp_status != IOSTAT_LOCAL_REJECT) || 4996 ((ulp_word4 & IOERR_PARAM_MASK) != 4997 IOERR_NO_RESOURCES))) { 4998 /* Don't reset timer for no resources */ 4999 5000 /* If discovery / RSCN timer is running, reset it */ 5001 if (timer_pending(&vport->fc_disctmo) || 5002 test_bit(FC_RSCN_MODE, &vport->fc_flag)) 5003 lpfc_set_disctmo(vport); 5004 } 5005 5006 phba->fc_stat.elsXmitRetry++; 5007 if (ndlp && delay) { 5008 phba->fc_stat.elsDelayRetry++; 5009 ndlp->nlp_retry = cmdiocb->retry; 5010 5011 /* delay is specified in milliseconds */ 5012 mod_timer(&ndlp->nlp_delayfunc, 5013 jiffies + msecs_to_jiffies(delay)); 5014 spin_lock_irq(&ndlp->lock); 5015 ndlp->nlp_flag |= NLP_DELAY_TMO; 5016 spin_unlock_irq(&ndlp->lock); 5017 5018 ndlp->nlp_prev_state = ndlp->nlp_state; 5019 if ((cmd == ELS_CMD_PRLI) || 5020 (cmd == ELS_CMD_NVMEPRLI)) 5021 lpfc_nlp_set_state(vport, ndlp, 5022 NLP_STE_PRLI_ISSUE); 5023 else if (cmd != ELS_CMD_ADISC) 5024 lpfc_nlp_set_state(vport, ndlp, 5025 NLP_STE_NPR_NODE); 5026 ndlp->nlp_last_elscmd = cmd; 5027 5028 return 1; 5029 } 5030 switch (cmd) { 5031 case ELS_CMD_FLOGI: 5032 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry); 5033 return 1; 5034 case ELS_CMD_FDISC: 5035 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); 5036 return 1; 5037 case ELS_CMD_PLOGI: 5038 if (ndlp) { 5039 ndlp->nlp_prev_state = ndlp->nlp_state; 5040 lpfc_nlp_set_state(vport, ndlp, 5041 NLP_STE_PLOGI_ISSUE); 5042 } 5043 lpfc_issue_els_plogi(vport, did, cmdiocb->retry); 5044 return 1; 5045 case ELS_CMD_ADISC: 5046 ndlp->nlp_prev_state = ndlp->nlp_state; 5047 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 5048 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry); 5049 return 1; 5050 case ELS_CMD_PRLI: 5051 case ELS_CMD_NVMEPRLI: 5052 ndlp->nlp_prev_state = ndlp->nlp_state; 5053 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 5054 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry); 5055 return 1; 5056 case ELS_CMD_LOGO: 5057 ndlp->nlp_prev_state = ndlp->nlp_state; 5058 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 5059 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); 5060 return 1; 5061 } 5062 } 5063 /* No retry ELS command <elsCmd> to remote NPORT <did> */ 5064 if (logerr) { 5065 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5066 "0137 No retry ELS command x%x to remote " 5067 "NPORT x%x: Out of Resources: Error:x%x/%x " 5068 "IoTag x%x\n", 5069 cmd, did, ulp_status, ulp_word4, 5070 cmdiocb->iotag); 5071 } 5072 else { 5073 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5074 "0108 No retry ELS command x%x to remote " 5075 "NPORT x%x Retried:%d Error:x%x/%x " 5076 "IoTag x%x nflags x%x\n", 5077 cmd, did, cmdiocb->retry, ulp_status, 5078 ulp_word4, cmdiocb->iotag, 5079 (ndlp ? ndlp->nlp_flag : 0)); 5080 } 5081 return 0; 5082 } 5083 5084 /** 5085 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb 5086 * @phba: pointer to lpfc hba data structure. 5087 * @buf_ptr1: pointer to the lpfc DMA buffer data structure. 5088 * 5089 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s) 5090 * associated with a command IOCB back to the lpfc DMA buffer pool. It first 5091 * checks to see whether there is a lpfc DMA buffer associated with the 5092 * response of the command IOCB. If so, it will be released before releasing 5093 * the lpfc DMA buffer associated with the IOCB itself. 5094 * 5095 * Return code 5096 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 5097 **/ 5098 static int 5099 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) 5100 { 5101 struct lpfc_dmabuf *buf_ptr; 5102 5103 /* Free the response before processing the command. */ 5104 if (!list_empty(&buf_ptr1->list)) { 5105 list_remove_head(&buf_ptr1->list, buf_ptr, 5106 struct lpfc_dmabuf, 5107 list); 5108 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 5109 kfree(buf_ptr); 5110 } 5111 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); 5112 kfree(buf_ptr1); 5113 return 0; 5114 } 5115 5116 /** 5117 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl 5118 * @phba: pointer to lpfc hba data structure. 5119 * @buf_ptr: pointer to the lpfc dma buffer data structure. 5120 * 5121 * This routine releases the lpfc Direct Memory Access (DMA) buffer 5122 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer 5123 * pool. 5124 * 5125 * Return code 5126 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 5127 **/ 5128 static int 5129 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) 5130 { 5131 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 5132 kfree(buf_ptr); 5133 return 0; 5134 } 5135 5136 /** 5137 * lpfc_els_free_iocb - Free a command iocb and its associated resources 5138 * @phba: pointer to lpfc hba data structure. 5139 * @elsiocb: pointer to lpfc els command iocb data structure. 5140 * 5141 * This routine frees a command IOCB and its associated resources. The 5142 * command IOCB data structure contains the reference to various associated 5143 * resources, these fields must be set to NULL if the associated reference 5144 * not present: 5145 * cmd_dmabuf - reference to cmd. 5146 * cmd_dmabuf->next - reference to rsp 5147 * rsp_dmabuf - unused 5148 * bpl_dmabuf - reference to bpl 5149 * 5150 * It first properly decrements the reference count held on ndlp for the 5151 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not 5152 * set, it invokes the lpfc_els_free_data() routine to release the Direct 5153 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it 5154 * adds the DMA buffer the @phba data structure for the delayed release. 5155 * If reference to the Buffer Pointer List (BPL) is present, the 5156 * lpfc_els_free_bpl() routine is invoked to release the DMA memory 5157 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is 5158 * invoked to release the IOCB data structure back to @phba IOCBQ list. 5159 * 5160 * Return code 5161 * 0 - Success (currently, always return 0) 5162 **/ 5163 int 5164 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) 5165 { 5166 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 5167 5168 /* The I/O iocb is complete. Clear the node and first dmbuf */ 5169 elsiocb->ndlp = NULL; 5170 5171 /* cmd_dmabuf = cmd, cmd_dmabuf->next = rsp, bpl_dmabuf = bpl */ 5172 if (elsiocb->cmd_dmabuf) { 5173 if (elsiocb->cmd_flag & LPFC_DELAY_MEM_FREE) { 5174 /* Firmware could still be in progress of DMAing 5175 * payload, so don't free data buffer till after 5176 * a hbeat. 5177 */ 5178 elsiocb->cmd_flag &= ~LPFC_DELAY_MEM_FREE; 5179 buf_ptr = elsiocb->cmd_dmabuf; 5180 elsiocb->cmd_dmabuf = NULL; 5181 if (buf_ptr) { 5182 buf_ptr1 = NULL; 5183 spin_lock_irq(&phba->hbalock); 5184 if (!list_empty(&buf_ptr->list)) { 5185 list_remove_head(&buf_ptr->list, 5186 buf_ptr1, struct lpfc_dmabuf, 5187 list); 5188 INIT_LIST_HEAD(&buf_ptr1->list); 5189 list_add_tail(&buf_ptr1->list, 5190 &phba->elsbuf); 5191 phba->elsbuf_cnt++; 5192 } 5193 INIT_LIST_HEAD(&buf_ptr->list); 5194 list_add_tail(&buf_ptr->list, &phba->elsbuf); 5195 phba->elsbuf_cnt++; 5196 spin_unlock_irq(&phba->hbalock); 5197 } 5198 } else { 5199 buf_ptr1 = elsiocb->cmd_dmabuf; 5200 lpfc_els_free_data(phba, buf_ptr1); 5201 elsiocb->cmd_dmabuf = NULL; 5202 } 5203 } 5204 5205 if (elsiocb->bpl_dmabuf) { 5206 buf_ptr = elsiocb->bpl_dmabuf; 5207 lpfc_els_free_bpl(phba, buf_ptr); 5208 elsiocb->bpl_dmabuf = NULL; 5209 } 5210 lpfc_sli_release_iocbq(phba, elsiocb); 5211 return 0; 5212 } 5213 5214 /** 5215 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response 5216 * @phba: pointer to lpfc hba data structure. 5217 * @cmdiocb: pointer to lpfc command iocb data structure. 5218 * @rspiocb: pointer to lpfc response iocb data structure. 5219 * 5220 * This routine is the completion callback function to the Logout (LOGO) 5221 * Accept (ACC) Response ELS command. This routine is invoked to indicate 5222 * the completion of the LOGO process. If the node has transitioned to NPR, 5223 * this routine unregisters the RPI if it is still registered. The 5224 * lpfc_els_free_iocb() is invoked to release the IOCB data structure. 5225 **/ 5226 static void 5227 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 5228 struct lpfc_iocbq *rspiocb) 5229 { 5230 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 5231 struct lpfc_vport *vport = cmdiocb->vport; 5232 u32 ulp_status, ulp_word4; 5233 5234 ulp_status = get_job_ulpstatus(phba, rspiocb); 5235 ulp_word4 = get_job_word4(phba, rspiocb); 5236 5237 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5238 "ACC LOGO cmpl: status:x%x/x%x did:x%x", 5239 ulp_status, ulp_word4, ndlp->nlp_DID); 5240 /* ACC to LOGO completes to NPort <nlp_DID> */ 5241 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5242 "0109 ACC to LOGO completes to NPort x%x refcnt %d " 5243 "Data: x%x x%x x%x\n", 5244 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 5245 ndlp->nlp_state, ndlp->nlp_rpi); 5246 5247 /* This clause allows the LOGO ACC to complete and free resources 5248 * for the Fabric Domain Controller. It does deliberately skip 5249 * the unreg_rpi and release rpi because some fabrics send RDP 5250 * requests after logging out from the initiator. 5251 */ 5252 if (ndlp->nlp_type & NLP_FABRIC && 5253 ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK)) 5254 goto out; 5255 5256 if (ndlp->nlp_state == NLP_STE_NPR_NODE) { 5257 /* If PLOGI is being retried, PLOGI completion will cleanup the 5258 * node. The NLP_NPR_2B_DISC flag needs to be retained to make 5259 * progress on nodes discovered from last RSCN. 5260 */ 5261 if ((ndlp->nlp_flag & NLP_DELAY_TMO) && 5262 (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI)) 5263 goto out; 5264 5265 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) 5266 lpfc_unreg_rpi(vport, ndlp); 5267 5268 } 5269 out: 5270 /* 5271 * The driver received a LOGO from the rport and has ACK'd it. 5272 * At this point, the driver is done so release the IOCB 5273 */ 5274 lpfc_els_free_iocb(phba, cmdiocb); 5275 lpfc_nlp_put(ndlp); 5276 } 5277 5278 /** 5279 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd 5280 * @phba: pointer to lpfc hba data structure. 5281 * @pmb: pointer to the driver internal queue element for mailbox command. 5282 * 5283 * This routine is the completion callback function for unregister default 5284 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases 5285 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and 5286 * decrements the ndlp reference count held for this completion callback 5287 * function. After that, it invokes the lpfc_drop_node to check 5288 * whether it is appropriate to release the node. 5289 **/ 5290 void 5291 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 5292 { 5293 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; 5294 u32 mbx_flag = pmb->mbox_flag; 5295 u32 mbx_cmd = pmb->u.mb.mbxCommand; 5296 5297 if (ndlp) { 5298 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 5299 "0006 rpi x%x DID:%x flg:%x %d x%px " 5300 "mbx_cmd x%x mbx_flag x%x x%px\n", 5301 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 5302 kref_read(&ndlp->kref), ndlp, mbx_cmd, 5303 mbx_flag, pmb); 5304 5305 /* This ends the default/temporary RPI cleanup logic for this 5306 * ndlp and the node and rpi needs to be released. Free the rpi 5307 * first on an UNREG_LOGIN and then release the final 5308 * references. 5309 */ 5310 spin_lock_irq(&ndlp->lock); 5311 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 5312 if (mbx_cmd == MBX_UNREG_LOGIN) 5313 ndlp->nlp_flag &= ~NLP_UNREG_INP; 5314 spin_unlock_irq(&ndlp->lock); 5315 lpfc_nlp_put(ndlp); 5316 lpfc_drop_node(ndlp->vport, ndlp); 5317 } 5318 5319 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 5320 } 5321 5322 /** 5323 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd 5324 * @phba: pointer to lpfc hba data structure. 5325 * @cmdiocb: pointer to lpfc command iocb data structure. 5326 * @rspiocb: pointer to lpfc response iocb data structure. 5327 * 5328 * This routine is the completion callback function for ELS Response IOCB 5329 * command. In normal case, this callback function just properly sets the 5330 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference 5331 * field in the command IOCB is not NULL, the referred mailbox command will 5332 * be send out, and then invokes the lpfc_els_free_iocb() routine to release 5333 * the IOCB. 5334 **/ 5335 static void 5336 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 5337 struct lpfc_iocbq *rspiocb) 5338 { 5339 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 5340 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; 5341 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; 5342 IOCB_t *irsp; 5343 LPFC_MBOXQ_t *mbox = NULL; 5344 u32 ulp_status, ulp_word4, tmo, did, iotag; 5345 5346 if (!vport) { 5347 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5348 "3177 ELS response failed\n"); 5349 goto out; 5350 } 5351 if (cmdiocb->context_un.mbox) 5352 mbox = cmdiocb->context_un.mbox; 5353 5354 ulp_status = get_job_ulpstatus(phba, rspiocb); 5355 ulp_word4 = get_job_word4(phba, rspiocb); 5356 did = get_job_els_rsp64_did(phba, cmdiocb); 5357 5358 if (phba->sli_rev == LPFC_SLI_REV4) { 5359 tmo = get_wqe_tmo(cmdiocb); 5360 iotag = get_wqe_reqtag(cmdiocb); 5361 } else { 5362 irsp = &rspiocb->iocb; 5363 tmo = irsp->ulpTimeout; 5364 iotag = irsp->ulpIoTag; 5365 } 5366 5367 /* Check to see if link went down during discovery */ 5368 if (!ndlp || lpfc_els_chk_latt(vport)) { 5369 if (mbox) 5370 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 5371 goto out; 5372 } 5373 5374 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5375 "ELS rsp cmpl: status:x%x/x%x did:x%x", 5376 ulp_status, ulp_word4, did); 5377 /* ELS response tag <ulpIoTag> completes */ 5378 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5379 "0110 ELS response tag x%x completes " 5380 "Data: x%x x%x x%x x%x x%x x%x x%x x%x %p %p\n", 5381 iotag, ulp_status, ulp_word4, tmo, 5382 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5383 ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox, ndlp); 5384 if (mbox) { 5385 if (ulp_status == 0 5386 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 5387 if (!lpfc_unreg_rpi(vport, ndlp) && 5388 !test_bit(FC_PT2PT, &vport->fc_flag)) { 5389 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 5390 ndlp->nlp_state == 5391 NLP_STE_REG_LOGIN_ISSUE) { 5392 lpfc_printf_vlog(vport, KERN_INFO, 5393 LOG_DISCOVERY, 5394 "0314 PLOGI recov " 5395 "DID x%x " 5396 "Data: x%x x%x x%x\n", 5397 ndlp->nlp_DID, 5398 ndlp->nlp_state, 5399 ndlp->nlp_rpi, 5400 ndlp->nlp_flag); 5401 goto out_free_mbox; 5402 } 5403 } 5404 5405 /* Increment reference count to ndlp to hold the 5406 * reference to ndlp for the callback function. 5407 */ 5408 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 5409 if (!mbox->ctx_ndlp) 5410 goto out_free_mbox; 5411 5412 mbox->vport = vport; 5413 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { 5414 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 5415 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 5416 } 5417 else { 5418 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 5419 ndlp->nlp_prev_state = ndlp->nlp_state; 5420 lpfc_nlp_set_state(vport, ndlp, 5421 NLP_STE_REG_LOGIN_ISSUE); 5422 } 5423 5424 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 5425 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 5426 != MBX_NOT_FINISHED) 5427 goto out; 5428 5429 /* Decrement the ndlp reference count we 5430 * set for this failed mailbox command. 5431 */ 5432 lpfc_nlp_put(ndlp); 5433 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 5434 5435 /* ELS rsp: Cannot issue reg_login for <NPortid> */ 5436 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5437 "0138 ELS rsp: Cannot issue reg_login for x%x " 5438 "Data: x%x x%x x%x\n", 5439 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5440 ndlp->nlp_rpi); 5441 } 5442 out_free_mbox: 5443 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 5444 } 5445 out: 5446 if (ndlp && shost) { 5447 spin_lock_irq(&ndlp->lock); 5448 if (mbox) 5449 ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; 5450 ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI; 5451 spin_unlock_irq(&ndlp->lock); 5452 } 5453 5454 /* An SLI4 NPIV instance wants to drop the node at this point under 5455 * these conditions and release the RPI. 5456 */ 5457 if (phba->sli_rev == LPFC_SLI_REV4 && 5458 vport && vport->port_type == LPFC_NPIV_PORT && 5459 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { 5460 if (ndlp->nlp_flag & NLP_RELEASE_RPI) { 5461 if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && 5462 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { 5463 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 5464 spin_lock_irq(&ndlp->lock); 5465 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 5466 ndlp->nlp_flag &= ~NLP_RELEASE_RPI; 5467 spin_unlock_irq(&ndlp->lock); 5468 } 5469 lpfc_drop_node(vport, ndlp); 5470 } else if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && 5471 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE && 5472 ndlp->nlp_state != NLP_STE_PRLI_ISSUE) { 5473 /* Drop ndlp if there is no planned or outstanding 5474 * issued PRLI. 5475 * 5476 * In cases when the ndlp is acting as both an initiator 5477 * and target function, let our issued PRLI determine 5478 * the final ndlp kref drop. 5479 */ 5480 lpfc_drop_node(vport, ndlp); 5481 } 5482 } 5483 5484 /* Release the originating I/O reference. */ 5485 lpfc_els_free_iocb(phba, cmdiocb); 5486 lpfc_nlp_put(ndlp); 5487 return; 5488 } 5489 5490 /** 5491 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command 5492 * @vport: pointer to a host virtual N_Port data structure. 5493 * @flag: the els command code to be accepted. 5494 * @oldiocb: pointer to the original lpfc command iocb data structure. 5495 * @ndlp: pointer to a node-list data structure. 5496 * @mbox: pointer to the driver internal queue element for mailbox command. 5497 * 5498 * This routine prepares and issues an Accept (ACC) response IOCB 5499 * command. It uses the @flag to properly set up the IOCB field for the 5500 * specific ACC response command to be issued and invokes the 5501 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a 5502 * @mbox pointer is passed in, it will be put into the context_un.mbox 5503 * field of the IOCB for the completion callback function to issue the 5504 * mailbox command to the HBA later when callback is invoked. 5505 * 5506 * Note that the ndlp reference count will be incremented by 1 for holding the 5507 * ndlp and the reference to ndlp will be stored into the ndlp field of 5508 * the IOCB for the completion callback function to the corresponding 5509 * response ELS IOCB command. 5510 * 5511 * Return code 5512 * 0 - Successfully issued acc response 5513 * 1 - Failed to issue acc response 5514 **/ 5515 int 5516 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, 5517 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 5518 LPFC_MBOXQ_t *mbox) 5519 { 5520 struct lpfc_hba *phba = vport->phba; 5521 IOCB_t *icmd; 5522 IOCB_t *oldcmd; 5523 union lpfc_wqe128 *wqe; 5524 union lpfc_wqe128 *oldwqe = &oldiocb->wqe; 5525 struct lpfc_iocbq *elsiocb; 5526 uint8_t *pcmd; 5527 struct serv_parm *sp; 5528 uint16_t cmdsize; 5529 int rc; 5530 ELS_PKT *els_pkt_ptr; 5531 struct fc_els_rdf_resp *rdf_resp; 5532 5533 switch (flag) { 5534 case ELS_CMD_ACC: 5535 cmdsize = sizeof(uint32_t); 5536 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5537 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5538 if (!elsiocb) { 5539 spin_lock_irq(&ndlp->lock); 5540 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 5541 spin_unlock_irq(&ndlp->lock); 5542 return 1; 5543 } 5544 5545 if (phba->sli_rev == LPFC_SLI_REV4) { 5546 wqe = &elsiocb->wqe; 5547 /* XRI / rx_id */ 5548 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5549 bf_get(wqe_ctxt_tag, 5550 &oldwqe->xmit_els_rsp.wqe_com)); 5551 5552 /* oxid */ 5553 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5554 bf_get(wqe_rcvoxid, 5555 &oldwqe->xmit_els_rsp.wqe_com)); 5556 } else { 5557 icmd = &elsiocb->iocb; 5558 oldcmd = &oldiocb->iocb; 5559 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5560 icmd->unsli3.rcvsli3.ox_id = 5561 oldcmd->unsli3.rcvsli3.ox_id; 5562 } 5563 5564 pcmd = elsiocb->cmd_dmabuf->virt; 5565 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5566 pcmd += sizeof(uint32_t); 5567 5568 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5569 "Issue ACC: did:x%x flg:x%x", 5570 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5571 break; 5572 case ELS_CMD_FLOGI: 5573 case ELS_CMD_PLOGI: 5574 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t)); 5575 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5576 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5577 if (!elsiocb) 5578 return 1; 5579 5580 if (phba->sli_rev == LPFC_SLI_REV4) { 5581 wqe = &elsiocb->wqe; 5582 /* XRI / rx_id */ 5583 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5584 bf_get(wqe_ctxt_tag, 5585 &oldwqe->xmit_els_rsp.wqe_com)); 5586 5587 /* oxid */ 5588 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5589 bf_get(wqe_rcvoxid, 5590 &oldwqe->xmit_els_rsp.wqe_com)); 5591 } else { 5592 icmd = &elsiocb->iocb; 5593 oldcmd = &oldiocb->iocb; 5594 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5595 icmd->unsli3.rcvsli3.ox_id = 5596 oldcmd->unsli3.rcvsli3.ox_id; 5597 } 5598 5599 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 5600 5601 if (mbox) 5602 elsiocb->context_un.mbox = mbox; 5603 5604 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5605 pcmd += sizeof(uint32_t); 5606 sp = (struct serv_parm *)pcmd; 5607 5608 if (flag == ELS_CMD_FLOGI) { 5609 /* Copy the received service parameters back */ 5610 memcpy(sp, &phba->fc_fabparam, 5611 sizeof(struct serv_parm)); 5612 5613 /* Clear the F_Port bit */ 5614 sp->cmn.fPort = 0; 5615 5616 /* Mark all class service parameters as invalid */ 5617 sp->cls1.classValid = 0; 5618 sp->cls2.classValid = 0; 5619 sp->cls3.classValid = 0; 5620 sp->cls4.classValid = 0; 5621 5622 /* Copy our worldwide names */ 5623 memcpy(&sp->portName, &vport->fc_sparam.portName, 5624 sizeof(struct lpfc_name)); 5625 memcpy(&sp->nodeName, &vport->fc_sparam.nodeName, 5626 sizeof(struct lpfc_name)); 5627 } else { 5628 memcpy(pcmd, &vport->fc_sparam, 5629 sizeof(struct serv_parm)); 5630 5631 sp->cmn.valid_vendor_ver_level = 0; 5632 memset(sp->un.vendorVersion, 0, 5633 sizeof(sp->un.vendorVersion)); 5634 sp->cmn.bbRcvSizeMsb &= 0xF; 5635 5636 /* If our firmware supports this feature, convey that 5637 * info to the target using the vendor specific field. 5638 */ 5639 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 5640 sp->cmn.valid_vendor_ver_level = 1; 5641 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 5642 sp->un.vv.flags = 5643 cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 5644 } 5645 } 5646 5647 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5648 "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x", 5649 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5650 break; 5651 case ELS_CMD_PRLO: 5652 cmdsize = sizeof(uint32_t) + sizeof(PRLO); 5653 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5654 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); 5655 if (!elsiocb) 5656 return 1; 5657 5658 if (phba->sli_rev == LPFC_SLI_REV4) { 5659 wqe = &elsiocb->wqe; 5660 /* XRI / rx_id */ 5661 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5662 bf_get(wqe_ctxt_tag, 5663 &oldwqe->xmit_els_rsp.wqe_com)); 5664 5665 /* oxid */ 5666 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5667 bf_get(wqe_rcvoxid, 5668 &oldwqe->xmit_els_rsp.wqe_com)); 5669 } else { 5670 icmd = &elsiocb->iocb; 5671 oldcmd = &oldiocb->iocb; 5672 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5673 icmd->unsli3.rcvsli3.ox_id = 5674 oldcmd->unsli3.rcvsli3.ox_id; 5675 } 5676 5677 pcmd = (u8 *) elsiocb->cmd_dmabuf->virt; 5678 5679 memcpy(pcmd, oldiocb->cmd_dmabuf->virt, 5680 sizeof(uint32_t) + sizeof(PRLO)); 5681 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; 5682 els_pkt_ptr = (ELS_PKT *) pcmd; 5683 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; 5684 5685 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5686 "Issue ACC PRLO: did:x%x flg:x%x", 5687 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5688 break; 5689 case ELS_CMD_RDF: 5690 cmdsize = sizeof(*rdf_resp); 5691 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5692 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5693 if (!elsiocb) 5694 return 1; 5695 5696 if (phba->sli_rev == LPFC_SLI_REV4) { 5697 wqe = &elsiocb->wqe; 5698 /* XRI / rx_id */ 5699 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5700 bf_get(wqe_ctxt_tag, 5701 &oldwqe->xmit_els_rsp.wqe_com)); 5702 5703 /* oxid */ 5704 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5705 bf_get(wqe_rcvoxid, 5706 &oldwqe->xmit_els_rsp.wqe_com)); 5707 } else { 5708 icmd = &elsiocb->iocb; 5709 oldcmd = &oldiocb->iocb; 5710 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5711 icmd->unsli3.rcvsli3.ox_id = 5712 oldcmd->unsli3.rcvsli3.ox_id; 5713 } 5714 5715 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 5716 rdf_resp = (struct fc_els_rdf_resp *)pcmd; 5717 memset(rdf_resp, 0, sizeof(*rdf_resp)); 5718 rdf_resp->acc_hdr.la_cmd = ELS_LS_ACC; 5719 5720 /* FC-LS-5 specifies desc_list_len shall be set to 12 */ 5721 rdf_resp->desc_list_len = cpu_to_be32(12); 5722 5723 /* FC-LS-5 specifies LS REQ Information descriptor */ 5724 rdf_resp->lsri.desc_tag = cpu_to_be32(1); 5725 rdf_resp->lsri.desc_len = cpu_to_be32(sizeof(u32)); 5726 rdf_resp->lsri.rqst_w0.cmd = ELS_RDF; 5727 break; 5728 default: 5729 return 1; 5730 } 5731 if (ndlp->nlp_flag & NLP_LOGO_ACC) { 5732 spin_lock_irq(&ndlp->lock); 5733 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED || 5734 ndlp->nlp_flag & NLP_REG_LOGIN_SEND)) 5735 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 5736 spin_unlock_irq(&ndlp->lock); 5737 elsiocb->cmd_cmpl = lpfc_cmpl_els_logo_acc; 5738 } else { 5739 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5740 } 5741 5742 phba->fc_stat.elsXmitACC++; 5743 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5744 if (!elsiocb->ndlp) { 5745 lpfc_els_free_iocb(phba, elsiocb); 5746 return 1; 5747 } 5748 5749 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5750 if (rc == IOCB_ERROR) { 5751 lpfc_els_free_iocb(phba, elsiocb); 5752 lpfc_nlp_put(ndlp); 5753 return 1; 5754 } 5755 5756 /* Xmit ELS ACC response tag <ulpIoTag> */ 5757 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5758 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " 5759 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5760 "RPI: x%x, fc_flag x%lx refcnt %d\n", 5761 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5762 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5763 ndlp->nlp_rpi, vport->fc_flag, kref_read(&ndlp->kref)); 5764 return 0; 5765 } 5766 5767 /** 5768 * lpfc_els_rsp_reject - Prepare and issue a rjt response iocb command 5769 * @vport: pointer to a virtual N_Port data structure. 5770 * @rejectError: reject response to issue 5771 * @oldiocb: pointer to the original lpfc command iocb data structure. 5772 * @ndlp: pointer to a node-list data structure. 5773 * @mbox: pointer to the driver internal queue element for mailbox command. 5774 * 5775 * This routine prepares and issue an Reject (RJT) response IOCB 5776 * command. If a @mbox pointer is passed in, it will be put into the 5777 * context_un.mbox field of the IOCB for the completion callback function 5778 * to issue to the HBA later. 5779 * 5780 * Note that the ndlp reference count will be incremented by 1 for holding the 5781 * ndlp and the reference to ndlp will be stored into the ndlp field of 5782 * the IOCB for the completion callback function to the reject response 5783 * ELS IOCB command. 5784 * 5785 * Return code 5786 * 0 - Successfully issued reject response 5787 * 1 - Failed to issue reject response 5788 **/ 5789 int 5790 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, 5791 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 5792 LPFC_MBOXQ_t *mbox) 5793 { 5794 int rc; 5795 struct lpfc_hba *phba = vport->phba; 5796 IOCB_t *icmd; 5797 IOCB_t *oldcmd; 5798 union lpfc_wqe128 *wqe; 5799 struct lpfc_iocbq *elsiocb; 5800 uint8_t *pcmd; 5801 uint16_t cmdsize; 5802 5803 cmdsize = 2 * sizeof(uint32_t); 5804 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5805 ndlp->nlp_DID, ELS_CMD_LS_RJT); 5806 if (!elsiocb) 5807 return 1; 5808 5809 if (phba->sli_rev == LPFC_SLI_REV4) { 5810 wqe = &elsiocb->wqe; 5811 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 5812 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 5813 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5814 get_job_rcvoxid(phba, oldiocb)); 5815 } else { 5816 icmd = &elsiocb->iocb; 5817 oldcmd = &oldiocb->iocb; 5818 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5819 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5820 } 5821 5822 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 5823 5824 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 5825 pcmd += sizeof(uint32_t); 5826 *((uint32_t *) (pcmd)) = rejectError; 5827 5828 if (mbox) 5829 elsiocb->context_un.mbox = mbox; 5830 5831 /* Xmit ELS RJT <err> response tag <ulpIoTag> */ 5832 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5833 "0129 Xmit ELS RJT x%x response tag x%x " 5834 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 5835 "rpi x%x\n", 5836 rejectError, elsiocb->iotag, 5837 get_job_ulpcontext(phba, elsiocb), ndlp->nlp_DID, 5838 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 5839 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5840 "Issue LS_RJT: did:x%x flg:x%x err:x%x", 5841 ndlp->nlp_DID, ndlp->nlp_flag, rejectError); 5842 5843 phba->fc_stat.elsXmitLSRJT++; 5844 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5845 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5846 if (!elsiocb->ndlp) { 5847 lpfc_els_free_iocb(phba, elsiocb); 5848 return 1; 5849 } 5850 5851 /* The NPIV instance is rejecting this unsolicited ELS. Make sure the 5852 * node's assigned RPI gets released provided this node is not already 5853 * registered with the transport. 5854 */ 5855 if (phba->sli_rev == LPFC_SLI_REV4 && 5856 vport->port_type == LPFC_NPIV_PORT && 5857 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { 5858 spin_lock_irq(&ndlp->lock); 5859 ndlp->nlp_flag |= NLP_RELEASE_RPI; 5860 spin_unlock_irq(&ndlp->lock); 5861 } 5862 5863 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5864 if (rc == IOCB_ERROR) { 5865 lpfc_els_free_iocb(phba, elsiocb); 5866 lpfc_nlp_put(ndlp); 5867 return 1; 5868 } 5869 5870 return 0; 5871 } 5872 5873 /** 5874 * lpfc_issue_els_edc_rsp - Exchange Diagnostic Capabilities with the fabric. 5875 * @vport: pointer to a host virtual N_Port data structure. 5876 * @cmdiocb: pointer to the original lpfc command iocb data structure. 5877 * @ndlp: NPort to where rsp is directed 5878 * 5879 * This routine issues an EDC ACC RSP to the F-Port Controller to communicate 5880 * this N_Port's support of hardware signals in its Congestion 5881 * Capabilities Descriptor. 5882 * 5883 * Return code 5884 * 0 - Successfully issued edc rsp command 5885 * 1 - Failed to issue edc rsp command 5886 **/ 5887 static int 5888 lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5889 struct lpfc_nodelist *ndlp) 5890 { 5891 struct lpfc_hba *phba = vport->phba; 5892 struct fc_els_edc_resp *edc_rsp; 5893 struct fc_tlv_desc *tlv; 5894 struct lpfc_iocbq *elsiocb; 5895 IOCB_t *icmd, *cmd; 5896 union lpfc_wqe128 *wqe; 5897 u32 cgn_desc_size, lft_desc_size; 5898 u16 cmdsize; 5899 uint8_t *pcmd; 5900 int rc; 5901 5902 cmdsize = sizeof(struct fc_els_edc_resp); 5903 cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc); 5904 lft_desc_size = (lpfc_link_is_lds_capable(phba)) ? 5905 sizeof(struct fc_diag_lnkflt_desc) : 0; 5906 cmdsize += cgn_desc_size + lft_desc_size; 5907 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, cmdiocb->retry, 5908 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5909 if (!elsiocb) 5910 return 1; 5911 5912 if (phba->sli_rev == LPFC_SLI_REV4) { 5913 wqe = &elsiocb->wqe; 5914 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 5915 get_job_ulpcontext(phba, cmdiocb)); /* Xri / rx_id */ 5916 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5917 get_job_rcvoxid(phba, cmdiocb)); 5918 } else { 5919 icmd = &elsiocb->iocb; 5920 cmd = &cmdiocb->iocb; 5921 icmd->ulpContext = cmd->ulpContext; /* Xri / rx_id */ 5922 icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id; 5923 } 5924 5925 pcmd = elsiocb->cmd_dmabuf->virt; 5926 memset(pcmd, 0, cmdsize); 5927 5928 edc_rsp = (struct fc_els_edc_resp *)pcmd; 5929 edc_rsp->acc_hdr.la_cmd = ELS_LS_ACC; 5930 edc_rsp->desc_list_len = cpu_to_be32(sizeof(struct fc_els_lsri_desc) + 5931 cgn_desc_size + lft_desc_size); 5932 edc_rsp->lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO); 5933 edc_rsp->lsri.desc_len = cpu_to_be32( 5934 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_els_lsri_desc)); 5935 edc_rsp->lsri.rqst_w0.cmd = ELS_EDC; 5936 tlv = edc_rsp->desc; 5937 lpfc_format_edc_cgn_desc(phba, tlv); 5938 tlv = fc_tlv_next_desc(tlv); 5939 if (lft_desc_size) 5940 lpfc_format_edc_lft_desc(phba, tlv); 5941 5942 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5943 "Issue EDC ACC: did:x%x flg:x%x refcnt %d", 5944 ndlp->nlp_DID, ndlp->nlp_flag, 5945 kref_read(&ndlp->kref)); 5946 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5947 5948 phba->fc_stat.elsXmitACC++; 5949 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5950 if (!elsiocb->ndlp) { 5951 lpfc_els_free_iocb(phba, elsiocb); 5952 return 1; 5953 } 5954 5955 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5956 if (rc == IOCB_ERROR) { 5957 lpfc_els_free_iocb(phba, elsiocb); 5958 lpfc_nlp_put(ndlp); 5959 return 1; 5960 } 5961 5962 /* Xmit ELS ACC response tag <ulpIoTag> */ 5963 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5964 "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, " 5965 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5966 "RPI: x%x, fc_flag x%lx\n", 5967 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5968 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5969 ndlp->nlp_rpi, vport->fc_flag); 5970 5971 return 0; 5972 } 5973 5974 /** 5975 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd 5976 * @vport: pointer to a virtual N_Port data structure. 5977 * @oldiocb: pointer to the original lpfc command iocb data structure. 5978 * @ndlp: pointer to a node-list data structure. 5979 * 5980 * This routine prepares and issues an Accept (ACC) response to Address 5981 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB 5982 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 5983 * 5984 * Note that the ndlp reference count will be incremented by 1 for holding the 5985 * ndlp and the reference to ndlp will be stored into the ndlp field of 5986 * the IOCB for the completion callback function to the ADISC Accept response 5987 * ELS IOCB command. 5988 * 5989 * Return code 5990 * 0 - Successfully issued acc adisc response 5991 * 1 - Failed to issue adisc acc response 5992 **/ 5993 int 5994 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 5995 struct lpfc_nodelist *ndlp) 5996 { 5997 struct lpfc_hba *phba = vport->phba; 5998 ADISC *ap; 5999 IOCB_t *icmd, *oldcmd; 6000 union lpfc_wqe128 *wqe; 6001 struct lpfc_iocbq *elsiocb; 6002 uint8_t *pcmd; 6003 uint16_t cmdsize; 6004 int rc; 6005 u32 ulp_context; 6006 6007 cmdsize = sizeof(uint32_t) + sizeof(ADISC); 6008 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6009 ndlp->nlp_DID, ELS_CMD_ACC); 6010 if (!elsiocb) 6011 return 1; 6012 6013 if (phba->sli_rev == LPFC_SLI_REV4) { 6014 wqe = &elsiocb->wqe; 6015 /* XRI / rx_id */ 6016 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6017 get_job_ulpcontext(phba, oldiocb)); 6018 ulp_context = get_job_ulpcontext(phba, elsiocb); 6019 /* oxid */ 6020 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6021 get_job_rcvoxid(phba, oldiocb)); 6022 } else { 6023 icmd = &elsiocb->iocb; 6024 oldcmd = &oldiocb->iocb; 6025 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6026 ulp_context = elsiocb->iocb.ulpContext; 6027 icmd->unsli3.rcvsli3.ox_id = 6028 oldcmd->unsli3.rcvsli3.ox_id; 6029 } 6030 6031 /* Xmit ADISC ACC response tag <ulpIoTag> */ 6032 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6033 "0130 Xmit ADISC ACC response iotag x%x xri: " 6034 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", 6035 elsiocb->iotag, ulp_context, 6036 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 6037 ndlp->nlp_rpi); 6038 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6039 6040 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6041 pcmd += sizeof(uint32_t); 6042 6043 ap = (ADISC *) (pcmd); 6044 ap->hardAL_PA = phba->fc_pref_ALPA; 6045 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 6046 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 6047 ap->DID = be32_to_cpu(vport->fc_myDID); 6048 6049 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6050 "Issue ACC ADISC: did:x%x flg:x%x refcnt %d", 6051 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6052 6053 phba->fc_stat.elsXmitACC++; 6054 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6055 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6056 if (!elsiocb->ndlp) { 6057 lpfc_els_free_iocb(phba, elsiocb); 6058 return 1; 6059 } 6060 6061 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6062 if (rc == IOCB_ERROR) { 6063 lpfc_els_free_iocb(phba, elsiocb); 6064 lpfc_nlp_put(ndlp); 6065 return 1; 6066 } 6067 6068 return 0; 6069 } 6070 6071 /** 6072 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd 6073 * @vport: pointer to a virtual N_Port data structure. 6074 * @oldiocb: pointer to the original lpfc command iocb data structure. 6075 * @ndlp: pointer to a node-list data structure. 6076 * 6077 * This routine prepares and issues an Accept (ACC) response to Process 6078 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB 6079 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 6080 * 6081 * Note that the ndlp reference count will be incremented by 1 for holding the 6082 * ndlp and the reference to ndlp will be stored into the ndlp field of 6083 * the IOCB for the completion callback function to the PRLI Accept response 6084 * ELS IOCB command. 6085 * 6086 * Return code 6087 * 0 - Successfully issued acc prli response 6088 * 1 - Failed to issue acc prli response 6089 **/ 6090 int 6091 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 6092 struct lpfc_nodelist *ndlp) 6093 { 6094 struct lpfc_hba *phba = vport->phba; 6095 PRLI *npr; 6096 struct lpfc_nvme_prli *npr_nvme; 6097 lpfc_vpd_t *vpd; 6098 IOCB_t *icmd; 6099 IOCB_t *oldcmd; 6100 union lpfc_wqe128 *wqe; 6101 struct lpfc_iocbq *elsiocb; 6102 uint8_t *pcmd; 6103 uint16_t cmdsize; 6104 uint32_t prli_fc4_req, *req_payload; 6105 struct lpfc_dmabuf *req_buf; 6106 int rc; 6107 u32 elsrspcmd, ulp_context; 6108 6109 /* Need the incoming PRLI payload to determine if the ACC is for an 6110 * FC4 or NVME PRLI type. The PRLI type is at word 1. 6111 */ 6112 req_buf = oldiocb->cmd_dmabuf; 6113 req_payload = (((uint32_t *)req_buf->virt) + 1); 6114 6115 /* PRLI type payload is at byte 3 for FCP or NVME. */ 6116 prli_fc4_req = be32_to_cpu(*req_payload); 6117 prli_fc4_req = (prli_fc4_req >> 24) & 0xff; 6118 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6119 "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n", 6120 prli_fc4_req, *((uint32_t *)req_payload)); 6121 6122 if (prli_fc4_req == PRLI_FCP_TYPE) { 6123 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 6124 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); 6125 } else if (prli_fc4_req == PRLI_NVME_TYPE) { 6126 cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli); 6127 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK)); 6128 } else { 6129 return 1; 6130 } 6131 6132 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6133 ndlp->nlp_DID, elsrspcmd); 6134 if (!elsiocb) 6135 return 1; 6136 6137 if (phba->sli_rev == LPFC_SLI_REV4) { 6138 wqe = &elsiocb->wqe; 6139 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6140 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6141 ulp_context = get_job_ulpcontext(phba, elsiocb); 6142 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6143 get_job_rcvoxid(phba, oldiocb)); 6144 } else { 6145 icmd = &elsiocb->iocb; 6146 oldcmd = &oldiocb->iocb; 6147 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6148 ulp_context = elsiocb->iocb.ulpContext; 6149 icmd->unsli3.rcvsli3.ox_id = 6150 oldcmd->unsli3.rcvsli3.ox_id; 6151 } 6152 6153 /* Xmit PRLI ACC response tag <ulpIoTag> */ 6154 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6155 "0131 Xmit PRLI ACC response tag x%x xri x%x, " 6156 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 6157 elsiocb->iotag, ulp_context, 6158 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 6159 ndlp->nlp_rpi); 6160 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6161 memset(pcmd, 0, cmdsize); 6162 6163 *((uint32_t *)(pcmd)) = elsrspcmd; 6164 pcmd += sizeof(uint32_t); 6165 6166 /* For PRLI, remainder of payload is PRLI parameter page */ 6167 vpd = &phba->vpd; 6168 6169 if (prli_fc4_req == PRLI_FCP_TYPE) { 6170 /* 6171 * If the remote port is a target and our firmware version 6172 * is 3.20 or later, set the following bits for FC-TAPE 6173 * support. 6174 */ 6175 npr = (PRLI *) pcmd; 6176 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 6177 (vpd->rev.feaLevelHigh >= 0x02)) { 6178 npr->ConfmComplAllowed = 1; 6179 npr->Retry = 1; 6180 npr->TaskRetryIdReq = 1; 6181 } 6182 npr->acceptRspCode = PRLI_REQ_EXECUTED; 6183 6184 /* Set image pair for complementary pairs only. */ 6185 if (ndlp->nlp_type & NLP_FCP_TARGET) 6186 npr->estabImagePair = 1; 6187 else 6188 npr->estabImagePair = 0; 6189 npr->readXferRdyDis = 1; 6190 npr->ConfmComplAllowed = 1; 6191 npr->prliType = PRLI_FCP_TYPE; 6192 npr->initiatorFunc = 1; 6193 6194 /* Xmit PRLI ACC response tag <ulpIoTag> */ 6195 lpfc_printf_vlog(vport, KERN_INFO, 6196 LOG_ELS | LOG_NODE | LOG_DISCOVERY, 6197 "6014 FCP issue PRLI ACC imgpair %d " 6198 "retry %d task %d\n", 6199 npr->estabImagePair, 6200 npr->Retry, npr->TaskRetryIdReq); 6201 6202 } else if (prli_fc4_req == PRLI_NVME_TYPE) { 6203 /* Respond with an NVME PRLI Type */ 6204 npr_nvme = (struct lpfc_nvme_prli *) pcmd; 6205 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 6206 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 6207 bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED); 6208 if (phba->nvmet_support) { 6209 bf_set(prli_tgt, npr_nvme, 1); 6210 bf_set(prli_disc, npr_nvme, 1); 6211 if (phba->cfg_nvme_enable_fb) { 6212 bf_set(prli_fba, npr_nvme, 1); 6213 6214 /* TBD. Target mode needs to post buffers 6215 * that support the configured first burst 6216 * byte size. 6217 */ 6218 bf_set(prli_fb_sz, npr_nvme, 6219 phba->cfg_nvmet_fb_size); 6220 } 6221 } else { 6222 bf_set(prli_init, npr_nvme, 1); 6223 } 6224 6225 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 6226 "6015 NVME issue PRLI ACC word1 x%08x " 6227 "word4 x%08x word5 x%08x flag x%x, " 6228 "fcp_info x%x nlp_type x%x\n", 6229 npr_nvme->word1, npr_nvme->word4, 6230 npr_nvme->word5, ndlp->nlp_flag, 6231 ndlp->nlp_fcp_info, ndlp->nlp_type); 6232 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 6233 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 6234 npr_nvme->word5 = cpu_to_be32(npr_nvme->word5); 6235 } else 6236 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6237 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n", 6238 prli_fc4_req, ndlp->nlp_fc4_type, 6239 ndlp->nlp_DID); 6240 6241 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6242 "Issue ACC PRLI: did:x%x flg:x%x", 6243 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6244 6245 phba->fc_stat.elsXmitACC++; 6246 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6247 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6248 if (!elsiocb->ndlp) { 6249 lpfc_els_free_iocb(phba, elsiocb); 6250 return 1; 6251 } 6252 6253 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6254 if (rc == IOCB_ERROR) { 6255 lpfc_els_free_iocb(phba, elsiocb); 6256 lpfc_nlp_put(ndlp); 6257 return 1; 6258 } 6259 6260 return 0; 6261 } 6262 6263 /** 6264 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command 6265 * @vport: pointer to a virtual N_Port data structure. 6266 * @format: rnid command format. 6267 * @oldiocb: pointer to the original lpfc command iocb data structure. 6268 * @ndlp: pointer to a node-list data structure. 6269 * 6270 * This routine issues a Request Node Identification Data (RNID) Accept 6271 * (ACC) response. It constructs the RNID ACC response command according to 6272 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to 6273 * issue the response. 6274 * 6275 * Note that the ndlp reference count will be incremented by 1 for holding the 6276 * ndlp and the reference to ndlp will be stored into the ndlp field of 6277 * the IOCB for the completion callback function. 6278 * 6279 * Return code 6280 * 0 - Successfully issued acc rnid response 6281 * 1 - Failed to issue acc rnid response 6282 **/ 6283 static int 6284 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, 6285 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 6286 { 6287 struct lpfc_hba *phba = vport->phba; 6288 RNID *rn; 6289 IOCB_t *icmd, *oldcmd; 6290 union lpfc_wqe128 *wqe; 6291 struct lpfc_iocbq *elsiocb; 6292 uint8_t *pcmd; 6293 uint16_t cmdsize; 6294 int rc; 6295 u32 ulp_context; 6296 6297 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 6298 + (2 * sizeof(struct lpfc_name)); 6299 if (format) 6300 cmdsize += sizeof(RNID_TOP_DISC); 6301 6302 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6303 ndlp->nlp_DID, ELS_CMD_ACC); 6304 if (!elsiocb) 6305 return 1; 6306 6307 if (phba->sli_rev == LPFC_SLI_REV4) { 6308 wqe = &elsiocb->wqe; 6309 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6310 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6311 ulp_context = get_job_ulpcontext(phba, elsiocb); 6312 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6313 get_job_rcvoxid(phba, oldiocb)); 6314 } else { 6315 icmd = &elsiocb->iocb; 6316 oldcmd = &oldiocb->iocb; 6317 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6318 ulp_context = elsiocb->iocb.ulpContext; 6319 icmd->unsli3.rcvsli3.ox_id = 6320 oldcmd->unsli3.rcvsli3.ox_id; 6321 } 6322 6323 /* Xmit RNID ACC response tag <ulpIoTag> */ 6324 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6325 "0132 Xmit RNID ACC response tag x%x xri x%x\n", 6326 elsiocb->iotag, ulp_context); 6327 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6328 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6329 pcmd += sizeof(uint32_t); 6330 6331 memset(pcmd, 0, sizeof(RNID)); 6332 rn = (RNID *) (pcmd); 6333 rn->Format = format; 6334 rn->CommonLen = (2 * sizeof(struct lpfc_name)); 6335 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 6336 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 6337 switch (format) { 6338 case 0: 6339 rn->SpecificLen = 0; 6340 break; 6341 case RNID_TOPOLOGY_DISC: 6342 rn->SpecificLen = sizeof(RNID_TOP_DISC); 6343 memcpy(&rn->un.topologyDisc.portName, 6344 &vport->fc_portname, sizeof(struct lpfc_name)); 6345 rn->un.topologyDisc.unitType = RNID_HBA; 6346 rn->un.topologyDisc.physPort = 0; 6347 rn->un.topologyDisc.attachedNodes = 0; 6348 break; 6349 default: 6350 rn->CommonLen = 0; 6351 rn->SpecificLen = 0; 6352 break; 6353 } 6354 6355 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6356 "Issue ACC RNID: did:x%x flg:x%x refcnt %d", 6357 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6358 6359 phba->fc_stat.elsXmitACC++; 6360 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6361 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6362 if (!elsiocb->ndlp) { 6363 lpfc_els_free_iocb(phba, elsiocb); 6364 return 1; 6365 } 6366 6367 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6368 if (rc == IOCB_ERROR) { 6369 lpfc_els_free_iocb(phba, elsiocb); 6370 lpfc_nlp_put(ndlp); 6371 return 1; 6372 } 6373 6374 return 0; 6375 } 6376 6377 /** 6378 * lpfc_els_clear_rrq - Clear the rq that this rrq describes. 6379 * @vport: pointer to a virtual N_Port data structure. 6380 * @iocb: pointer to the lpfc command iocb data structure. 6381 * @ndlp: pointer to a node-list data structure. 6382 * 6383 * Return 6384 **/ 6385 static void 6386 lpfc_els_clear_rrq(struct lpfc_vport *vport, 6387 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) 6388 { 6389 struct lpfc_hba *phba = vport->phba; 6390 uint8_t *pcmd; 6391 struct RRQ *rrq; 6392 uint16_t rxid; 6393 uint16_t xri; 6394 struct lpfc_node_rrq *prrq; 6395 6396 6397 pcmd = (uint8_t *)iocb->cmd_dmabuf->virt; 6398 pcmd += sizeof(uint32_t); 6399 rrq = (struct RRQ *)pcmd; 6400 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); 6401 rxid = bf_get(rrq_rxid, rrq); 6402 6403 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6404 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" 6405 " x%x x%x\n", 6406 be32_to_cpu(bf_get(rrq_did, rrq)), 6407 bf_get(rrq_oxid, rrq), 6408 rxid, 6409 get_wqe_reqtag(iocb), 6410 get_job_ulpcontext(phba, iocb)); 6411 6412 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6413 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", 6414 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); 6415 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) 6416 xri = bf_get(rrq_oxid, rrq); 6417 else 6418 xri = rxid; 6419 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); 6420 if (prrq) 6421 lpfc_clr_rrq_active(phba, xri, prrq); 6422 return; 6423 } 6424 6425 /** 6426 * lpfc_els_rsp_echo_acc - Issue echo acc response 6427 * @vport: pointer to a virtual N_Port data structure. 6428 * @data: pointer to echo data to return in the accept. 6429 * @oldiocb: pointer to the original lpfc command iocb data structure. 6430 * @ndlp: pointer to a node-list data structure. 6431 * 6432 * Return code 6433 * 0 - Successfully issued acc echo response 6434 * 1 - Failed to issue acc echo response 6435 **/ 6436 static int 6437 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, 6438 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 6439 { 6440 struct lpfc_hba *phba = vport->phba; 6441 IOCB_t *icmd, *oldcmd; 6442 union lpfc_wqe128 *wqe; 6443 struct lpfc_iocbq *elsiocb; 6444 uint8_t *pcmd; 6445 uint16_t cmdsize; 6446 int rc; 6447 u32 ulp_context; 6448 6449 if (phba->sli_rev == LPFC_SLI_REV4) 6450 cmdsize = oldiocb->wcqe_cmpl.total_data_placed; 6451 else 6452 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; 6453 6454 /* The accumulated length can exceed the BPL_SIZE. For 6455 * now, use this as the limit 6456 */ 6457 if (cmdsize > LPFC_BPL_SIZE) 6458 cmdsize = LPFC_BPL_SIZE; 6459 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6460 ndlp->nlp_DID, ELS_CMD_ACC); 6461 if (!elsiocb) 6462 return 1; 6463 6464 if (phba->sli_rev == LPFC_SLI_REV4) { 6465 wqe = &elsiocb->wqe; 6466 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6467 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6468 ulp_context = get_job_ulpcontext(phba, elsiocb); 6469 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6470 get_job_rcvoxid(phba, oldiocb)); 6471 } else { 6472 icmd = &elsiocb->iocb; 6473 oldcmd = &oldiocb->iocb; 6474 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6475 ulp_context = elsiocb->iocb.ulpContext; 6476 icmd->unsli3.rcvsli3.ox_id = 6477 oldcmd->unsli3.rcvsli3.ox_id; 6478 } 6479 6480 /* Xmit ECHO ACC response tag <ulpIoTag> */ 6481 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6482 "2876 Xmit ECHO ACC response tag x%x xri x%x\n", 6483 elsiocb->iotag, ulp_context); 6484 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6485 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6486 pcmd += sizeof(uint32_t); 6487 memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); 6488 6489 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6490 "Issue ACC ECHO: did:x%x flg:x%x refcnt %d", 6491 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6492 6493 phba->fc_stat.elsXmitACC++; 6494 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6495 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6496 if (!elsiocb->ndlp) { 6497 lpfc_els_free_iocb(phba, elsiocb); 6498 return 1; 6499 } 6500 6501 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6502 if (rc == IOCB_ERROR) { 6503 lpfc_els_free_iocb(phba, elsiocb); 6504 lpfc_nlp_put(ndlp); 6505 return 1; 6506 } 6507 6508 return 0; 6509 } 6510 6511 /** 6512 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport 6513 * @vport: pointer to a host virtual N_Port data structure. 6514 * 6515 * This routine issues Address Discover (ADISC) ELS commands to those 6516 * N_Ports which are in node port recovery state and ADISC has not been issued 6517 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the 6518 * lpfc_issue_els_adisc() routine, the per @vport number of discover count 6519 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a 6520 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will 6521 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC 6522 * IOCBs quit for later pick up. On the other hand, after walking through 6523 * all the ndlps with the @vport and there is none ADISC IOCB issued, the 6524 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is 6525 * no more ADISC need to be sent. 6526 * 6527 * Return code 6528 * The number of N_Ports with adisc issued. 6529 **/ 6530 int 6531 lpfc_els_disc_adisc(struct lpfc_vport *vport) 6532 { 6533 struct lpfc_nodelist *ndlp, *next_ndlp; 6534 int sentadisc = 0; 6535 6536 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 6537 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 6538 6539 if (ndlp->nlp_state != NLP_STE_NPR_NODE || 6540 !(ndlp->nlp_flag & NLP_NPR_ADISC)) 6541 continue; 6542 6543 spin_lock_irq(&ndlp->lock); 6544 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 6545 spin_unlock_irq(&ndlp->lock); 6546 6547 if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { 6548 /* This node was marked for ADISC but was not picked 6549 * for discovery. This is possible if the node was 6550 * missing in gidft response. 6551 * 6552 * At time of marking node for ADISC, we skipped unreg 6553 * from backend 6554 */ 6555 lpfc_nlp_unreg_node(vport, ndlp); 6556 lpfc_unreg_rpi(vport, ndlp); 6557 continue; 6558 } 6559 6560 ndlp->nlp_prev_state = ndlp->nlp_state; 6561 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 6562 lpfc_issue_els_adisc(vport, ndlp, 0); 6563 sentadisc++; 6564 vport->num_disc_nodes++; 6565 if (vport->num_disc_nodes >= 6566 vport->cfg_discovery_threads) { 6567 set_bit(FC_NLP_MORE, &vport->fc_flag); 6568 break; 6569 } 6570 6571 } 6572 if (sentadisc == 0) 6573 clear_bit(FC_NLP_MORE, &vport->fc_flag); 6574 return sentadisc; 6575 } 6576 6577 /** 6578 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc 6579 * @vport: pointer to a host virtual N_Port data structure. 6580 * 6581 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports 6582 * which are in node port recovery state, with a @vport. Each time an ELS 6583 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine, 6584 * the per @vport number of discover count (num_disc_nodes) shall be 6585 * incremented. If the num_disc_nodes reaches a pre-configured threshold 6586 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE 6587 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for 6588 * later pick up. On the other hand, after walking through all the ndlps with 6589 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag 6590 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC 6591 * PLOGI need to be sent. 6592 * 6593 * Return code 6594 * The number of N_Ports with plogi issued. 6595 **/ 6596 int 6597 lpfc_els_disc_plogi(struct lpfc_vport *vport) 6598 { 6599 struct lpfc_nodelist *ndlp, *next_ndlp; 6600 int sentplogi = 0; 6601 6602 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 6603 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 6604 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 6605 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 6606 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && 6607 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { 6608 ndlp->nlp_prev_state = ndlp->nlp_state; 6609 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 6610 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 6611 sentplogi++; 6612 vport->num_disc_nodes++; 6613 if (vport->num_disc_nodes >= 6614 vport->cfg_discovery_threads) { 6615 set_bit(FC_NLP_MORE, &vport->fc_flag); 6616 break; 6617 } 6618 } 6619 } 6620 6621 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6622 "6452 Discover PLOGI %d flag x%lx\n", 6623 sentplogi, vport->fc_flag); 6624 6625 if (sentplogi) 6626 lpfc_set_disctmo(vport); 6627 else 6628 clear_bit(FC_NLP_MORE, &vport->fc_flag); 6629 return sentplogi; 6630 } 6631 6632 static uint32_t 6633 lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc, 6634 uint32_t word0) 6635 { 6636 6637 desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG); 6638 desc->payload.els_req = word0; 6639 desc->length = cpu_to_be32(sizeof(desc->payload)); 6640 6641 return sizeof(struct fc_rdp_link_service_desc); 6642 } 6643 6644 static uint32_t 6645 lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc, 6646 uint8_t *page_a0, uint8_t *page_a2) 6647 { 6648 uint16_t wavelength; 6649 uint16_t temperature; 6650 uint16_t rx_power; 6651 uint16_t tx_bias; 6652 uint16_t tx_power; 6653 uint16_t vcc; 6654 uint16_t flag = 0; 6655 struct sff_trasnceiver_codes_byte4 *trasn_code_byte4; 6656 struct sff_trasnceiver_codes_byte5 *trasn_code_byte5; 6657 6658 desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG); 6659 6660 trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *) 6661 &page_a0[SSF_TRANSCEIVER_CODE_B4]; 6662 trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *) 6663 &page_a0[SSF_TRANSCEIVER_CODE_B5]; 6664 6665 if ((trasn_code_byte4->fc_sw_laser) || 6666 (trasn_code_byte5->fc_sw_laser_sl) || 6667 (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */ 6668 flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT); 6669 } else if (trasn_code_byte4->fc_lw_laser) { 6670 wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) | 6671 page_a0[SSF_WAVELENGTH_B0]; 6672 if (wavelength == SFP_WAVELENGTH_LC1310) 6673 flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT; 6674 if (wavelength == SFP_WAVELENGTH_LL1550) 6675 flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT; 6676 } 6677 /* check if its SFP+ */ 6678 flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ? 6679 SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN) 6680 << SFP_FLAG_CT_SHIFT; 6681 6682 /* check if its OPTICAL */ 6683 flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ? 6684 SFP_FLAG_IS_OPTICAL_PORT : 0) 6685 << SFP_FLAG_IS_OPTICAL_SHIFT; 6686 6687 temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 | 6688 page_a2[SFF_TEMPERATURE_B0]); 6689 vcc = (page_a2[SFF_VCC_B1] << 8 | 6690 page_a2[SFF_VCC_B0]); 6691 tx_power = (page_a2[SFF_TXPOWER_B1] << 8 | 6692 page_a2[SFF_TXPOWER_B0]); 6693 tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 | 6694 page_a2[SFF_TX_BIAS_CURRENT_B0]); 6695 rx_power = (page_a2[SFF_RXPOWER_B1] << 8 | 6696 page_a2[SFF_RXPOWER_B0]); 6697 desc->sfp_info.temperature = cpu_to_be16(temperature); 6698 desc->sfp_info.rx_power = cpu_to_be16(rx_power); 6699 desc->sfp_info.tx_bias = cpu_to_be16(tx_bias); 6700 desc->sfp_info.tx_power = cpu_to_be16(tx_power); 6701 desc->sfp_info.vcc = cpu_to_be16(vcc); 6702 6703 desc->sfp_info.flags = cpu_to_be16(flag); 6704 desc->length = cpu_to_be32(sizeof(desc->sfp_info)); 6705 6706 return sizeof(struct fc_rdp_sfp_desc); 6707 } 6708 6709 static uint32_t 6710 lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, 6711 READ_LNK_VAR *stat) 6712 { 6713 uint32_t type; 6714 6715 desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG); 6716 6717 type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT; 6718 6719 desc->info.port_type = cpu_to_be32(type); 6720 6721 desc->info.link_status.link_failure_cnt = 6722 cpu_to_be32(stat->linkFailureCnt); 6723 desc->info.link_status.loss_of_synch_cnt = 6724 cpu_to_be32(stat->lossSyncCnt); 6725 desc->info.link_status.loss_of_signal_cnt = 6726 cpu_to_be32(stat->lossSignalCnt); 6727 desc->info.link_status.primitive_seq_proto_err = 6728 cpu_to_be32(stat->primSeqErrCnt); 6729 desc->info.link_status.invalid_trans_word = 6730 cpu_to_be32(stat->invalidXmitWord); 6731 desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt); 6732 6733 desc->length = cpu_to_be32(sizeof(desc->info)); 6734 6735 return sizeof(struct fc_rdp_link_error_status_desc); 6736 } 6737 6738 static uint32_t 6739 lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat, 6740 struct lpfc_vport *vport) 6741 { 6742 uint32_t bbCredit; 6743 6744 desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG); 6745 6746 bbCredit = vport->fc_sparam.cmn.bbCreditLsb | 6747 (vport->fc_sparam.cmn.bbCreditMsb << 8); 6748 desc->bbc_info.port_bbc = cpu_to_be32(bbCredit); 6749 if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 6750 bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb | 6751 (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8); 6752 desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit); 6753 } else { 6754 desc->bbc_info.attached_port_bbc = 0; 6755 } 6756 6757 desc->bbc_info.rtt = 0; 6758 desc->length = cpu_to_be32(sizeof(desc->bbc_info)); 6759 6760 return sizeof(struct fc_rdp_bbc_desc); 6761 } 6762 6763 static uint32_t 6764 lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba, 6765 struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) 6766 { 6767 uint32_t flags = 0; 6768 6769 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6770 6771 desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM]; 6772 desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM]; 6773 desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING]; 6774 desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING]; 6775 6776 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 6777 flags |= RDP_OET_HIGH_ALARM; 6778 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 6779 flags |= RDP_OET_LOW_ALARM; 6780 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 6781 flags |= RDP_OET_HIGH_WARNING; 6782 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 6783 flags |= RDP_OET_LOW_WARNING; 6784 6785 flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT); 6786 desc->oed_info.function_flags = cpu_to_be32(flags); 6787 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6788 return sizeof(struct fc_rdp_oed_sfp_desc); 6789 } 6790 6791 static uint32_t 6792 lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba, 6793 struct fc_rdp_oed_sfp_desc *desc, 6794 uint8_t *page_a2) 6795 { 6796 uint32_t flags = 0; 6797 6798 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6799 6800 desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM]; 6801 desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM]; 6802 desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING]; 6803 desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING]; 6804 6805 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 6806 flags |= RDP_OET_HIGH_ALARM; 6807 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE) 6808 flags |= RDP_OET_LOW_ALARM; 6809 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 6810 flags |= RDP_OET_HIGH_WARNING; 6811 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE) 6812 flags |= RDP_OET_LOW_WARNING; 6813 6814 flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT); 6815 desc->oed_info.function_flags = cpu_to_be32(flags); 6816 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6817 return sizeof(struct fc_rdp_oed_sfp_desc); 6818 } 6819 6820 static uint32_t 6821 lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba, 6822 struct fc_rdp_oed_sfp_desc *desc, 6823 uint8_t *page_a2) 6824 { 6825 uint32_t flags = 0; 6826 6827 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6828 6829 desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM]; 6830 desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM]; 6831 desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING]; 6832 desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING]; 6833 6834 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS) 6835 flags |= RDP_OET_HIGH_ALARM; 6836 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS) 6837 flags |= RDP_OET_LOW_ALARM; 6838 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS) 6839 flags |= RDP_OET_HIGH_WARNING; 6840 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS) 6841 flags |= RDP_OET_LOW_WARNING; 6842 6843 flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT); 6844 desc->oed_info.function_flags = cpu_to_be32(flags); 6845 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6846 return sizeof(struct fc_rdp_oed_sfp_desc); 6847 } 6848 6849 static uint32_t 6850 lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba, 6851 struct fc_rdp_oed_sfp_desc *desc, 6852 uint8_t *page_a2) 6853 { 6854 uint32_t flags = 0; 6855 6856 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6857 6858 desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM]; 6859 desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM]; 6860 desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING]; 6861 desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING]; 6862 6863 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER) 6864 flags |= RDP_OET_HIGH_ALARM; 6865 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER) 6866 flags |= RDP_OET_LOW_ALARM; 6867 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER) 6868 flags |= RDP_OET_HIGH_WARNING; 6869 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER) 6870 flags |= RDP_OET_LOW_WARNING; 6871 6872 flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT); 6873 desc->oed_info.function_flags = cpu_to_be32(flags); 6874 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6875 return sizeof(struct fc_rdp_oed_sfp_desc); 6876 } 6877 6878 6879 static uint32_t 6880 lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba, 6881 struct fc_rdp_oed_sfp_desc *desc, 6882 uint8_t *page_a2) 6883 { 6884 uint32_t flags = 0; 6885 6886 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6887 6888 desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM]; 6889 desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM]; 6890 desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING]; 6891 desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING]; 6892 6893 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER) 6894 flags |= RDP_OET_HIGH_ALARM; 6895 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER) 6896 flags |= RDP_OET_LOW_ALARM; 6897 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER) 6898 flags |= RDP_OET_HIGH_WARNING; 6899 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER) 6900 flags |= RDP_OET_LOW_WARNING; 6901 6902 flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT); 6903 desc->oed_info.function_flags = cpu_to_be32(flags); 6904 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6905 return sizeof(struct fc_rdp_oed_sfp_desc); 6906 } 6907 6908 static uint32_t 6909 lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, 6910 uint8_t *page_a0, struct lpfc_vport *vport) 6911 { 6912 desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG); 6913 memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16); 6914 memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16); 6915 memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16); 6916 memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4); 6917 memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8); 6918 desc->length = cpu_to_be32(sizeof(desc->opd_info)); 6919 return sizeof(struct fc_rdp_opd_sfp_desc); 6920 } 6921 6922 static uint32_t 6923 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) 6924 { 6925 if (bf_get(lpfc_read_link_stat_gec2, stat) == 0) 6926 return 0; 6927 desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG); 6928 6929 desc->info.CorrectedBlocks = 6930 cpu_to_be32(stat->fecCorrBlkCount); 6931 desc->info.UncorrectableBlocks = 6932 cpu_to_be32(stat->fecUncorrBlkCount); 6933 6934 desc->length = cpu_to_be32(sizeof(desc->info)); 6935 6936 return sizeof(struct fc_fec_rdp_desc); 6937 } 6938 6939 static uint32_t 6940 lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) 6941 { 6942 uint16_t rdp_cap = 0; 6943 uint16_t rdp_speed; 6944 6945 desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG); 6946 6947 switch (phba->fc_linkspeed) { 6948 case LPFC_LINK_SPEED_1GHZ: 6949 rdp_speed = RDP_PS_1GB; 6950 break; 6951 case LPFC_LINK_SPEED_2GHZ: 6952 rdp_speed = RDP_PS_2GB; 6953 break; 6954 case LPFC_LINK_SPEED_4GHZ: 6955 rdp_speed = RDP_PS_4GB; 6956 break; 6957 case LPFC_LINK_SPEED_8GHZ: 6958 rdp_speed = RDP_PS_8GB; 6959 break; 6960 case LPFC_LINK_SPEED_10GHZ: 6961 rdp_speed = RDP_PS_10GB; 6962 break; 6963 case LPFC_LINK_SPEED_16GHZ: 6964 rdp_speed = RDP_PS_16GB; 6965 break; 6966 case LPFC_LINK_SPEED_32GHZ: 6967 rdp_speed = RDP_PS_32GB; 6968 break; 6969 case LPFC_LINK_SPEED_64GHZ: 6970 rdp_speed = RDP_PS_64GB; 6971 break; 6972 case LPFC_LINK_SPEED_128GHZ: 6973 rdp_speed = RDP_PS_128GB; 6974 break; 6975 case LPFC_LINK_SPEED_256GHZ: 6976 rdp_speed = RDP_PS_256GB; 6977 break; 6978 default: 6979 rdp_speed = RDP_PS_UNKNOWN; 6980 break; 6981 } 6982 6983 desc->info.port_speed.speed = cpu_to_be16(rdp_speed); 6984 6985 if (phba->lmt & LMT_256Gb) 6986 rdp_cap |= RDP_PS_256GB; 6987 if (phba->lmt & LMT_128Gb) 6988 rdp_cap |= RDP_PS_128GB; 6989 if (phba->lmt & LMT_64Gb) 6990 rdp_cap |= RDP_PS_64GB; 6991 if (phba->lmt & LMT_32Gb) 6992 rdp_cap |= RDP_PS_32GB; 6993 if (phba->lmt & LMT_16Gb) 6994 rdp_cap |= RDP_PS_16GB; 6995 if (phba->lmt & LMT_10Gb) 6996 rdp_cap |= RDP_PS_10GB; 6997 if (phba->lmt & LMT_8Gb) 6998 rdp_cap |= RDP_PS_8GB; 6999 if (phba->lmt & LMT_4Gb) 7000 rdp_cap |= RDP_PS_4GB; 7001 if (phba->lmt & LMT_2Gb) 7002 rdp_cap |= RDP_PS_2GB; 7003 if (phba->lmt & LMT_1Gb) 7004 rdp_cap |= RDP_PS_1GB; 7005 7006 if (rdp_cap == 0) 7007 rdp_cap = RDP_CAP_UNKNOWN; 7008 if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO) 7009 rdp_cap |= RDP_CAP_USER_CONFIGURED; 7010 7011 desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap); 7012 desc->length = cpu_to_be32(sizeof(desc->info)); 7013 return sizeof(struct fc_rdp_port_speed_desc); 7014 } 7015 7016 static uint32_t 7017 lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, 7018 struct lpfc_vport *vport) 7019 { 7020 7021 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 7022 7023 memcpy(desc->port_names.wwnn, &vport->fc_nodename, 7024 sizeof(desc->port_names.wwnn)); 7025 7026 memcpy(desc->port_names.wwpn, &vport->fc_portname, 7027 sizeof(desc->port_names.wwpn)); 7028 7029 desc->length = cpu_to_be32(sizeof(desc->port_names)); 7030 return sizeof(struct fc_rdp_port_name_desc); 7031 } 7032 7033 static uint32_t 7034 lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, 7035 struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 7036 { 7037 7038 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 7039 if (test_bit(FC_FABRIC, &vport->fc_flag)) { 7040 memcpy(desc->port_names.wwnn, &vport->fabric_nodename, 7041 sizeof(desc->port_names.wwnn)); 7042 7043 memcpy(desc->port_names.wwpn, &vport->fabric_portname, 7044 sizeof(desc->port_names.wwpn)); 7045 } else { /* Point to Point */ 7046 memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename, 7047 sizeof(desc->port_names.wwnn)); 7048 7049 memcpy(desc->port_names.wwpn, &ndlp->nlp_portname, 7050 sizeof(desc->port_names.wwpn)); 7051 } 7052 7053 desc->length = cpu_to_be32(sizeof(desc->port_names)); 7054 return sizeof(struct fc_rdp_port_name_desc); 7055 } 7056 7057 static void 7058 lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, 7059 int status) 7060 { 7061 struct lpfc_nodelist *ndlp = rdp_context->ndlp; 7062 struct lpfc_vport *vport = ndlp->vport; 7063 struct lpfc_iocbq *elsiocb; 7064 struct ulp_bde64 *bpl; 7065 IOCB_t *icmd; 7066 union lpfc_wqe128 *wqe; 7067 uint8_t *pcmd; 7068 struct ls_rjt *stat; 7069 struct fc_rdp_res_frame *rdp_res; 7070 uint32_t cmdsize, len; 7071 uint16_t *flag_ptr; 7072 int rc; 7073 u32 ulp_context; 7074 7075 if (status != SUCCESS) 7076 goto error; 7077 7078 /* This will change once we know the true size of the RDP payload */ 7079 cmdsize = sizeof(struct fc_rdp_res_frame); 7080 7081 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, 7082 lpfc_max_els_tries, rdp_context->ndlp, 7083 rdp_context->ndlp->nlp_DID, ELS_CMD_ACC); 7084 if (!elsiocb) 7085 goto free_rdp_context; 7086 7087 ulp_context = get_job_ulpcontext(phba, elsiocb); 7088 if (phba->sli_rev == LPFC_SLI_REV4) { 7089 wqe = &elsiocb->wqe; 7090 /* ox-id of the frame */ 7091 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7092 rdp_context->ox_id); 7093 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 7094 rdp_context->rx_id); 7095 } else { 7096 icmd = &elsiocb->iocb; 7097 icmd->ulpContext = rdp_context->rx_id; 7098 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 7099 } 7100 7101 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7102 "2171 Xmit RDP response tag x%x xri x%x, " 7103 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x", 7104 elsiocb->iotag, ulp_context, 7105 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 7106 ndlp->nlp_rpi); 7107 rdp_res = (struct fc_rdp_res_frame *)elsiocb->cmd_dmabuf->virt; 7108 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7109 memset(pcmd, 0, sizeof(struct fc_rdp_res_frame)); 7110 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 7111 7112 /* Update Alarm and Warning */ 7113 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS); 7114 phba->sfp_alarm |= *flag_ptr; 7115 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS); 7116 phba->sfp_warning |= *flag_ptr; 7117 7118 /* For RDP payload */ 7119 len = 8; 7120 len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *) 7121 (len + pcmd), ELS_CMD_RDP); 7122 7123 len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd), 7124 rdp_context->page_a0, rdp_context->page_a2); 7125 len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd), 7126 phba); 7127 len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *) 7128 (len + pcmd), &rdp_context->link_stat); 7129 len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *) 7130 (len + pcmd), vport); 7131 len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *) 7132 (len + pcmd), vport, ndlp); 7133 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd), 7134 &rdp_context->link_stat); 7135 len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd), 7136 &rdp_context->link_stat, vport); 7137 len += lpfc_rdp_res_oed_temp_desc(phba, 7138 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7139 rdp_context->page_a2); 7140 len += lpfc_rdp_res_oed_voltage_desc(phba, 7141 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7142 rdp_context->page_a2); 7143 len += lpfc_rdp_res_oed_txbias_desc(phba, 7144 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7145 rdp_context->page_a2); 7146 len += lpfc_rdp_res_oed_txpower_desc(phba, 7147 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7148 rdp_context->page_a2); 7149 len += lpfc_rdp_res_oed_rxpower_desc(phba, 7150 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7151 rdp_context->page_a2); 7152 len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd), 7153 rdp_context->page_a0, vport); 7154 7155 rdp_res->length = cpu_to_be32(len - 8); 7156 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7157 7158 /* Now that we know the true size of the payload, update the BPL */ 7159 bpl = (struct ulp_bde64 *)elsiocb->bpl_dmabuf->virt; 7160 bpl->tus.f.bdeSize = len; 7161 bpl->tus.f.bdeFlags = 0; 7162 bpl->tus.w = le32_to_cpu(bpl->tus.w); 7163 7164 phba->fc_stat.elsXmitACC++; 7165 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7166 if (!elsiocb->ndlp) { 7167 lpfc_els_free_iocb(phba, elsiocb); 7168 goto free_rdp_context; 7169 } 7170 7171 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7172 if (rc == IOCB_ERROR) { 7173 lpfc_els_free_iocb(phba, elsiocb); 7174 lpfc_nlp_put(ndlp); 7175 } 7176 7177 goto free_rdp_context; 7178 7179 error: 7180 cmdsize = 2 * sizeof(uint32_t); 7181 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries, 7182 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); 7183 if (!elsiocb) 7184 goto free_rdp_context; 7185 7186 if (phba->sli_rev == LPFC_SLI_REV4) { 7187 wqe = &elsiocb->wqe; 7188 /* ox-id of the frame */ 7189 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7190 rdp_context->ox_id); 7191 bf_set(wqe_ctxt_tag, 7192 &wqe->xmit_els_rsp.wqe_com, 7193 rdp_context->rx_id); 7194 } else { 7195 icmd = &elsiocb->iocb; 7196 icmd->ulpContext = rdp_context->rx_id; 7197 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 7198 } 7199 7200 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7201 7202 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 7203 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 7204 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7205 7206 phba->fc_stat.elsXmitLSRJT++; 7207 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7208 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7209 if (!elsiocb->ndlp) { 7210 lpfc_els_free_iocb(phba, elsiocb); 7211 goto free_rdp_context; 7212 } 7213 7214 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7215 if (rc == IOCB_ERROR) { 7216 lpfc_els_free_iocb(phba, elsiocb); 7217 lpfc_nlp_put(ndlp); 7218 } 7219 7220 free_rdp_context: 7221 /* This reference put is for the original unsolicited RDP. If the 7222 * prep failed, there is no reference to remove. 7223 */ 7224 lpfc_nlp_put(ndlp); 7225 kfree(rdp_context); 7226 } 7227 7228 static int 7229 lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context) 7230 { 7231 LPFC_MBOXQ_t *mbox = NULL; 7232 int rc; 7233 7234 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7235 if (!mbox) { 7236 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, 7237 "7105 failed to allocate mailbox memory"); 7238 return 1; 7239 } 7240 7241 if (lpfc_sli4_dump_page_a0(phba, mbox)) 7242 goto rdp_fail; 7243 mbox->vport = rdp_context->ndlp->vport; 7244 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0; 7245 mbox->ctx_u.rdp = rdp_context; 7246 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 7247 if (rc == MBX_NOT_FINISHED) { 7248 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 7249 return 1; 7250 } 7251 7252 return 0; 7253 7254 rdp_fail: 7255 mempool_free(mbox, phba->mbox_mem_pool); 7256 return 1; 7257 } 7258 7259 int lpfc_get_sfp_info_wait(struct lpfc_hba *phba, 7260 struct lpfc_rdp_context *rdp_context) 7261 { 7262 LPFC_MBOXQ_t *mbox = NULL; 7263 int rc; 7264 struct lpfc_dmabuf *mp; 7265 struct lpfc_dmabuf *mpsave; 7266 void *virt; 7267 MAILBOX_t *mb; 7268 7269 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7270 if (!mbox) { 7271 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, 7272 "7205 failed to allocate mailbox memory"); 7273 return 1; 7274 } 7275 7276 if (lpfc_sli4_dump_page_a0(phba, mbox)) 7277 goto sfp_fail; 7278 mp = mbox->ctx_buf; 7279 mpsave = mp; 7280 virt = mp->virt; 7281 if (phba->sli_rev < LPFC_SLI_REV4) { 7282 mb = &mbox->u.mb; 7283 mb->un.varDmp.cv = 1; 7284 mb->un.varDmp.co = 1; 7285 mb->un.varWords[2] = 0; 7286 mb->un.varWords[3] = DMP_SFF_PAGE_A0_SIZE / 4; 7287 mb->un.varWords[4] = 0; 7288 mb->un.varWords[5] = 0; 7289 mb->un.varWords[6] = 0; 7290 mb->un.varWords[7] = 0; 7291 mb->un.varWords[8] = 0; 7292 mb->un.varWords[9] = 0; 7293 mb->un.varWords[10] = 0; 7294 mbox->in_ext_byte_len = DMP_SFF_PAGE_A0_SIZE; 7295 mbox->out_ext_byte_len = DMP_SFF_PAGE_A0_SIZE; 7296 mbox->mbox_offset_word = 5; 7297 mbox->ext_buf = virt; 7298 } else { 7299 bf_set(lpfc_mbx_memory_dump_type3_length, 7300 &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE); 7301 mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys); 7302 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); 7303 } 7304 mbox->vport = phba->pport; 7305 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_SLI4_CONFIG_TMO); 7306 if (rc == MBX_NOT_FINISHED) { 7307 rc = 1; 7308 goto error; 7309 } 7310 if (rc == MBX_TIMEOUT) 7311 goto error; 7312 if (phba->sli_rev == LPFC_SLI_REV4) 7313 mp = mbox->ctx_buf; 7314 else 7315 mp = mpsave; 7316 7317 if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) { 7318 rc = 1; 7319 goto error; 7320 } 7321 7322 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a0, 7323 DMP_SFF_PAGE_A0_SIZE); 7324 7325 memset(mbox, 0, sizeof(*mbox)); 7326 memset(mp->virt, 0, DMP_SFF_PAGE_A2_SIZE); 7327 INIT_LIST_HEAD(&mp->list); 7328 7329 /* save address for completion */ 7330 mbox->ctx_buf = mp; 7331 mbox->vport = phba->pport; 7332 7333 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY); 7334 bf_set(lpfc_mbx_memory_dump_type3_type, 7335 &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD); 7336 bf_set(lpfc_mbx_memory_dump_type3_link, 7337 &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port); 7338 bf_set(lpfc_mbx_memory_dump_type3_page_no, 7339 &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A2); 7340 if (phba->sli_rev < LPFC_SLI_REV4) { 7341 mb = &mbox->u.mb; 7342 mb->un.varDmp.cv = 1; 7343 mb->un.varDmp.co = 1; 7344 mb->un.varWords[2] = 0; 7345 mb->un.varWords[3] = DMP_SFF_PAGE_A2_SIZE / 4; 7346 mb->un.varWords[4] = 0; 7347 mb->un.varWords[5] = 0; 7348 mb->un.varWords[6] = 0; 7349 mb->un.varWords[7] = 0; 7350 mb->un.varWords[8] = 0; 7351 mb->un.varWords[9] = 0; 7352 mb->un.varWords[10] = 0; 7353 mbox->in_ext_byte_len = DMP_SFF_PAGE_A2_SIZE; 7354 mbox->out_ext_byte_len = DMP_SFF_PAGE_A2_SIZE; 7355 mbox->mbox_offset_word = 5; 7356 mbox->ext_buf = virt; 7357 } else { 7358 bf_set(lpfc_mbx_memory_dump_type3_length, 7359 &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A2_SIZE); 7360 mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys); 7361 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); 7362 } 7363 7364 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_SLI4_CONFIG_TMO); 7365 7366 if (rc == MBX_TIMEOUT) 7367 goto error; 7368 if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) { 7369 rc = 1; 7370 goto error; 7371 } 7372 rc = 0; 7373 7374 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2, 7375 DMP_SFF_PAGE_A2_SIZE); 7376 7377 error: 7378 if (mbox->mbox_flag & LPFC_MBX_WAKE) { 7379 mbox->ctx_buf = mpsave; 7380 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 7381 } 7382 7383 return rc; 7384 7385 sfp_fail: 7386 mempool_free(mbox, phba->mbox_mem_pool); 7387 return 1; 7388 } 7389 7390 /* 7391 * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS. 7392 * @vport: pointer to a host virtual N_Port data structure. 7393 * @cmdiocb: pointer to lpfc command iocb data structure. 7394 * @ndlp: pointer to a node-list data structure. 7395 * 7396 * This routine processes an unsolicited RDP(Read Diagnostic Parameters) 7397 * IOCB. First, the payload of the unsolicited RDP is checked. 7398 * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3 7399 * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2, 7400 * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl 7401 * gather all data and send RDP response. 7402 * 7403 * Return code 7404 * 0 - Sent the acc response 7405 * 1 - Sent the reject response. 7406 */ 7407 static int 7408 lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7409 struct lpfc_nodelist *ndlp) 7410 { 7411 struct lpfc_hba *phba = vport->phba; 7412 struct lpfc_dmabuf *pcmd; 7413 uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE; 7414 struct fc_rdp_req_frame *rdp_req; 7415 struct lpfc_rdp_context *rdp_context; 7416 union lpfc_wqe128 *cmd = NULL; 7417 struct ls_rjt stat; 7418 7419 if (phba->sli_rev < LPFC_SLI_REV4 || 7420 bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 7421 LPFC_SLI_INTF_IF_TYPE_2) { 7422 rjt_err = LSRJT_UNABLE_TPC; 7423 rjt_expl = LSEXP_REQ_UNSUPPORTED; 7424 goto error; 7425 } 7426 7427 if (phba->sli_rev < LPFC_SLI_REV4 || 7428 test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { 7429 rjt_err = LSRJT_UNABLE_TPC; 7430 rjt_expl = LSEXP_REQ_UNSUPPORTED; 7431 goto error; 7432 } 7433 7434 pcmd = cmdiocb->cmd_dmabuf; 7435 rdp_req = (struct fc_rdp_req_frame *) pcmd->virt; 7436 7437 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7438 "2422 ELS RDP Request " 7439 "dec len %d tag x%x port_id %d len %d\n", 7440 be32_to_cpu(rdp_req->rdp_des_length), 7441 be32_to_cpu(rdp_req->nport_id_desc.tag), 7442 be32_to_cpu(rdp_req->nport_id_desc.nport_id), 7443 be32_to_cpu(rdp_req->nport_id_desc.length)); 7444 7445 if (sizeof(struct fc_rdp_nport_desc) != 7446 be32_to_cpu(rdp_req->rdp_des_length)) 7447 goto rjt_logerr; 7448 if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag)) 7449 goto rjt_logerr; 7450 if (RDP_NPORT_ID_SIZE != 7451 be32_to_cpu(rdp_req->nport_id_desc.length)) 7452 goto rjt_logerr; 7453 rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL); 7454 if (!rdp_context) { 7455 rjt_err = LSRJT_UNABLE_TPC; 7456 goto error; 7457 } 7458 7459 cmd = &cmdiocb->wqe; 7460 rdp_context->ndlp = lpfc_nlp_get(ndlp); 7461 if (!rdp_context->ndlp) { 7462 kfree(rdp_context); 7463 rjt_err = LSRJT_UNABLE_TPC; 7464 goto error; 7465 } 7466 rdp_context->ox_id = bf_get(wqe_rcvoxid, 7467 &cmd->xmit_els_rsp.wqe_com); 7468 rdp_context->rx_id = bf_get(wqe_ctxt_tag, 7469 &cmd->xmit_els_rsp.wqe_com); 7470 rdp_context->cmpl = lpfc_els_rdp_cmpl; 7471 if (lpfc_get_rdp_info(phba, rdp_context)) { 7472 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS, 7473 "2423 Unable to send mailbox"); 7474 kfree(rdp_context); 7475 rjt_err = LSRJT_UNABLE_TPC; 7476 lpfc_nlp_put(ndlp); 7477 goto error; 7478 } 7479 7480 return 0; 7481 7482 rjt_logerr: 7483 rjt_err = LSRJT_LOGICAL_ERR; 7484 7485 error: 7486 memset(&stat, 0, sizeof(stat)); 7487 stat.un.b.lsRjtRsnCode = rjt_err; 7488 stat.un.b.lsRjtRsnCodeExp = rjt_expl; 7489 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7490 return 1; 7491 } 7492 7493 7494 static void 7495 lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 7496 { 7497 MAILBOX_t *mb; 7498 IOCB_t *icmd; 7499 union lpfc_wqe128 *wqe; 7500 uint8_t *pcmd; 7501 struct lpfc_iocbq *elsiocb; 7502 struct lpfc_nodelist *ndlp; 7503 struct ls_rjt *stat; 7504 union lpfc_sli4_cfg_shdr *shdr; 7505 struct lpfc_lcb_context *lcb_context; 7506 struct fc_lcb_res_frame *lcb_res; 7507 uint32_t cmdsize, shdr_status, shdr_add_status; 7508 int rc; 7509 7510 mb = &pmb->u.mb; 7511 lcb_context = pmb->ctx_u.lcb; 7512 ndlp = lcb_context->ndlp; 7513 memset(&pmb->ctx_u, 0, sizeof(pmb->ctx_u)); 7514 pmb->ctx_buf = NULL; 7515 7516 shdr = (union lpfc_sli4_cfg_shdr *) 7517 &pmb->u.mqe.un.beacon_config.header.cfg_shdr; 7518 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7519 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 7520 7521 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX, 7522 "0194 SET_BEACON_CONFIG mailbox " 7523 "completed with status x%x add_status x%x," 7524 " mbx status x%x\n", 7525 shdr_status, shdr_add_status, mb->mbxStatus); 7526 7527 if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status || 7528 (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) || 7529 (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) { 7530 mempool_free(pmb, phba->mbox_mem_pool); 7531 goto error; 7532 } 7533 7534 mempool_free(pmb, phba->mbox_mem_pool); 7535 cmdsize = sizeof(struct fc_lcb_res_frame); 7536 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7537 lpfc_max_els_tries, ndlp, 7538 ndlp->nlp_DID, ELS_CMD_ACC); 7539 7540 /* Decrement the ndlp reference count from previous mbox command */ 7541 lpfc_nlp_put(ndlp); 7542 7543 if (!elsiocb) 7544 goto free_lcb_context; 7545 7546 lcb_res = (struct fc_lcb_res_frame *)elsiocb->cmd_dmabuf->virt; 7547 7548 memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame)); 7549 7550 if (phba->sli_rev == LPFC_SLI_REV4) { 7551 wqe = &elsiocb->wqe; 7552 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id); 7553 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7554 lcb_context->ox_id); 7555 } else { 7556 icmd = &elsiocb->iocb; 7557 icmd->ulpContext = lcb_context->rx_id; 7558 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 7559 } 7560 7561 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7562 *((uint32_t *)(pcmd)) = ELS_CMD_ACC; 7563 lcb_res->lcb_sub_command = lcb_context->sub_command; 7564 lcb_res->lcb_type = lcb_context->type; 7565 lcb_res->capability = lcb_context->capability; 7566 lcb_res->lcb_frequency = lcb_context->frequency; 7567 lcb_res->lcb_duration = lcb_context->duration; 7568 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7569 phba->fc_stat.elsXmitACC++; 7570 7571 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7572 if (!elsiocb->ndlp) { 7573 lpfc_els_free_iocb(phba, elsiocb); 7574 goto out; 7575 } 7576 7577 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7578 if (rc == IOCB_ERROR) { 7579 lpfc_els_free_iocb(phba, elsiocb); 7580 lpfc_nlp_put(ndlp); 7581 } 7582 out: 7583 kfree(lcb_context); 7584 return; 7585 7586 error: 7587 cmdsize = sizeof(struct fc_lcb_res_frame); 7588 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7589 lpfc_max_els_tries, ndlp, 7590 ndlp->nlp_DID, ELS_CMD_LS_RJT); 7591 lpfc_nlp_put(ndlp); 7592 if (!elsiocb) 7593 goto free_lcb_context; 7594 7595 if (phba->sli_rev == LPFC_SLI_REV4) { 7596 wqe = &elsiocb->wqe; 7597 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id); 7598 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7599 lcb_context->ox_id); 7600 } else { 7601 icmd = &elsiocb->iocb; 7602 icmd->ulpContext = lcb_context->rx_id; 7603 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 7604 } 7605 7606 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7607 7608 *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT; 7609 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 7610 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7611 7612 if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) 7613 stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; 7614 7615 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7616 phba->fc_stat.elsXmitLSRJT++; 7617 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7618 if (!elsiocb->ndlp) { 7619 lpfc_els_free_iocb(phba, elsiocb); 7620 goto free_lcb_context; 7621 } 7622 7623 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7624 if (rc == IOCB_ERROR) { 7625 lpfc_els_free_iocb(phba, elsiocb); 7626 lpfc_nlp_put(ndlp); 7627 } 7628 free_lcb_context: 7629 kfree(lcb_context); 7630 } 7631 7632 static int 7633 lpfc_sli4_set_beacon(struct lpfc_vport *vport, 7634 struct lpfc_lcb_context *lcb_context, 7635 uint32_t beacon_state) 7636 { 7637 struct lpfc_hba *phba = vport->phba; 7638 union lpfc_sli4_cfg_shdr *cfg_shdr; 7639 LPFC_MBOXQ_t *mbox = NULL; 7640 uint32_t len; 7641 int rc; 7642 7643 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7644 if (!mbox) 7645 return 1; 7646 7647 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; 7648 len = sizeof(struct lpfc_mbx_set_beacon_config) - 7649 sizeof(struct lpfc_sli4_cfg_mhdr); 7650 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 7651 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len, 7652 LPFC_SLI4_MBX_EMBED); 7653 mbox->ctx_u.lcb = lcb_context; 7654 mbox->vport = phba->pport; 7655 mbox->mbox_cmpl = lpfc_els_lcb_rsp; 7656 bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config, 7657 phba->sli4_hba.physical_port); 7658 bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config, 7659 beacon_state); 7660 mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */ 7661 7662 /* 7663 * Check bv1s bit before issuing the mailbox 7664 * if bv1s == 1, LCB V1 supported 7665 * else, LCB V0 supported 7666 */ 7667 7668 if (phba->sli4_hba.pc_sli4_params.bv1s) { 7669 /* COMMON_SET_BEACON_CONFIG_V1 */ 7670 cfg_shdr->request.word9 = BEACON_VERSION_V1; 7671 lcb_context->capability |= LCB_CAPABILITY_DURATION; 7672 bf_set(lpfc_mbx_set_beacon_port_type, 7673 &mbox->u.mqe.un.beacon_config, 0); 7674 bf_set(lpfc_mbx_set_beacon_duration_v1, 7675 &mbox->u.mqe.un.beacon_config, 7676 be16_to_cpu(lcb_context->duration)); 7677 } else { 7678 /* COMMON_SET_BEACON_CONFIG_V0 */ 7679 if (be16_to_cpu(lcb_context->duration) != 0) { 7680 mempool_free(mbox, phba->mbox_mem_pool); 7681 return 1; 7682 } 7683 cfg_shdr->request.word9 = BEACON_VERSION_V0; 7684 lcb_context->capability &= ~(LCB_CAPABILITY_DURATION); 7685 bf_set(lpfc_mbx_set_beacon_state, 7686 &mbox->u.mqe.un.beacon_config, beacon_state); 7687 bf_set(lpfc_mbx_set_beacon_port_type, 7688 &mbox->u.mqe.un.beacon_config, 1); 7689 bf_set(lpfc_mbx_set_beacon_duration, 7690 &mbox->u.mqe.un.beacon_config, 7691 be16_to_cpu(lcb_context->duration)); 7692 } 7693 7694 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 7695 if (rc == MBX_NOT_FINISHED) { 7696 mempool_free(mbox, phba->mbox_mem_pool); 7697 return 1; 7698 } 7699 7700 return 0; 7701 } 7702 7703 7704 /** 7705 * lpfc_els_rcv_lcb - Process an unsolicited LCB 7706 * @vport: pointer to a host virtual N_Port data structure. 7707 * @cmdiocb: pointer to lpfc command iocb data structure. 7708 * @ndlp: pointer to a node-list data structure. 7709 * 7710 * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB. 7711 * First, the payload of the unsolicited LCB is checked. 7712 * Then based on Subcommand beacon will either turn on or off. 7713 * 7714 * Return code 7715 * 0 - Sent the acc response 7716 * 1 - Sent the reject response. 7717 **/ 7718 static int 7719 lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7720 struct lpfc_nodelist *ndlp) 7721 { 7722 struct lpfc_hba *phba = vport->phba; 7723 struct lpfc_dmabuf *pcmd; 7724 uint8_t *lp; 7725 struct fc_lcb_request_frame *beacon; 7726 struct lpfc_lcb_context *lcb_context; 7727 u8 state, rjt_err = 0; 7728 struct ls_rjt stat; 7729 7730 pcmd = cmdiocb->cmd_dmabuf; 7731 lp = (uint8_t *)pcmd->virt; 7732 beacon = (struct fc_lcb_request_frame *)pcmd->virt; 7733 7734 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7735 "0192 ELS LCB Data x%x x%x x%x x%x sub x%x " 7736 "type x%x frequency %x duration x%x\n", 7737 lp[0], lp[1], lp[2], 7738 beacon->lcb_command, 7739 beacon->lcb_sub_command, 7740 beacon->lcb_type, 7741 beacon->lcb_frequency, 7742 be16_to_cpu(beacon->lcb_duration)); 7743 7744 if (beacon->lcb_sub_command != LPFC_LCB_ON && 7745 beacon->lcb_sub_command != LPFC_LCB_OFF) { 7746 rjt_err = LSRJT_CMD_UNSUPPORTED; 7747 goto rjt; 7748 } 7749 7750 if (phba->sli_rev < LPFC_SLI_REV4 || 7751 test_bit(HBA_FCOE_MODE, &phba->hba_flag) || 7752 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 7753 LPFC_SLI_INTF_IF_TYPE_2)) { 7754 rjt_err = LSRJT_CMD_UNSUPPORTED; 7755 goto rjt; 7756 } 7757 7758 lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL); 7759 if (!lcb_context) { 7760 rjt_err = LSRJT_UNABLE_TPC; 7761 goto rjt; 7762 } 7763 7764 state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0; 7765 lcb_context->sub_command = beacon->lcb_sub_command; 7766 lcb_context->capability = 0; 7767 lcb_context->type = beacon->lcb_type; 7768 lcb_context->frequency = beacon->lcb_frequency; 7769 lcb_context->duration = beacon->lcb_duration; 7770 lcb_context->ox_id = get_job_rcvoxid(phba, cmdiocb); 7771 lcb_context->rx_id = get_job_ulpcontext(phba, cmdiocb); 7772 lcb_context->ndlp = lpfc_nlp_get(ndlp); 7773 if (!lcb_context->ndlp) { 7774 rjt_err = LSRJT_UNABLE_TPC; 7775 goto rjt_free; 7776 } 7777 7778 if (lpfc_sli4_set_beacon(vport, lcb_context, state)) { 7779 lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT, 7780 "0193 failed to send mail box"); 7781 lpfc_nlp_put(ndlp); 7782 rjt_err = LSRJT_UNABLE_TPC; 7783 goto rjt_free; 7784 } 7785 return 0; 7786 7787 rjt_free: 7788 kfree(lcb_context); 7789 rjt: 7790 memset(&stat, 0, sizeof(stat)); 7791 stat.un.b.lsRjtRsnCode = rjt_err; 7792 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7793 return 1; 7794 } 7795 7796 7797 /** 7798 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport 7799 * @vport: pointer to a host virtual N_Port data structure. 7800 * 7801 * This routine cleans up any Registration State Change Notification 7802 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the 7803 * @vport together with the host_lock is used to prevent multiple thread 7804 * trying to access the RSCN array on a same @vport at the same time. 7805 **/ 7806 void 7807 lpfc_els_flush_rscn(struct lpfc_vport *vport) 7808 { 7809 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7810 struct lpfc_hba *phba = vport->phba; 7811 int i; 7812 7813 spin_lock_irq(shost->host_lock); 7814 if (vport->fc_rscn_flush) { 7815 /* Another thread is walking fc_rscn_id_list on this vport */ 7816 spin_unlock_irq(shost->host_lock); 7817 return; 7818 } 7819 /* Indicate we are walking lpfc_els_flush_rscn on this vport */ 7820 vport->fc_rscn_flush = 1; 7821 spin_unlock_irq(shost->host_lock); 7822 7823 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 7824 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); 7825 vport->fc_rscn_id_list[i] = NULL; 7826 } 7827 clear_bit(FC_RSCN_MODE, &vport->fc_flag); 7828 clear_bit(FC_RSCN_DISCOVERY, &vport->fc_flag); 7829 spin_lock_irq(shost->host_lock); 7830 vport->fc_rscn_id_cnt = 0; 7831 spin_unlock_irq(shost->host_lock); 7832 lpfc_can_disctmo(vport); 7833 /* Indicate we are done walking this fc_rscn_id_list */ 7834 vport->fc_rscn_flush = 0; 7835 } 7836 7837 /** 7838 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did 7839 * @vport: pointer to a host virtual N_Port data structure. 7840 * @did: remote destination port identifier. 7841 * 7842 * This routine checks whether there is any pending Registration State 7843 * Configuration Notification (RSCN) to a @did on @vport. 7844 * 7845 * Return code 7846 * None zero - The @did matched with a pending rscn 7847 * 0 - not able to match @did with a pending rscn 7848 **/ 7849 int 7850 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) 7851 { 7852 D_ID ns_did; 7853 D_ID rscn_did; 7854 uint32_t *lp; 7855 uint32_t payload_len, i; 7856 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7857 7858 ns_did.un.word = did; 7859 7860 /* Never match fabric nodes for RSCNs */ 7861 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 7862 return 0; 7863 7864 /* If we are doing a FULL RSCN rediscovery, match everything */ 7865 if (test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag)) 7866 return did; 7867 7868 spin_lock_irq(shost->host_lock); 7869 if (vport->fc_rscn_flush) { 7870 /* Another thread is walking fc_rscn_id_list on this vport */ 7871 spin_unlock_irq(shost->host_lock); 7872 return 0; 7873 } 7874 /* Indicate we are walking fc_rscn_id_list on this vport */ 7875 vport->fc_rscn_flush = 1; 7876 spin_unlock_irq(shost->host_lock); 7877 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 7878 lp = vport->fc_rscn_id_list[i]->virt; 7879 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 7880 payload_len -= sizeof(uint32_t); /* take off word 0 */ 7881 while (payload_len) { 7882 rscn_did.un.word = be32_to_cpu(*lp++); 7883 payload_len -= sizeof(uint32_t); 7884 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 7885 case RSCN_ADDRESS_FORMAT_PORT: 7886 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 7887 && (ns_did.un.b.area == rscn_did.un.b.area) 7888 && (ns_did.un.b.id == rscn_did.un.b.id)) 7889 goto return_did_out; 7890 break; 7891 case RSCN_ADDRESS_FORMAT_AREA: 7892 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 7893 && (ns_did.un.b.area == rscn_did.un.b.area)) 7894 goto return_did_out; 7895 break; 7896 case RSCN_ADDRESS_FORMAT_DOMAIN: 7897 if (ns_did.un.b.domain == rscn_did.un.b.domain) 7898 goto return_did_out; 7899 break; 7900 case RSCN_ADDRESS_FORMAT_FABRIC: 7901 goto return_did_out; 7902 } 7903 } 7904 } 7905 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 7906 vport->fc_rscn_flush = 0; 7907 return 0; 7908 return_did_out: 7909 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 7910 vport->fc_rscn_flush = 0; 7911 return did; 7912 } 7913 7914 /** 7915 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn 7916 * @vport: pointer to a host virtual N_Port data structure. 7917 * 7918 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the 7919 * state machine for a @vport's nodes that are with pending RSCN (Registration 7920 * State Change Notification). 7921 * 7922 * Return code 7923 * 0 - Successful (currently alway return 0) 7924 **/ 7925 static int 7926 lpfc_rscn_recovery_check(struct lpfc_vport *vport) 7927 { 7928 struct lpfc_nodelist *ndlp = NULL, *n; 7929 7930 /* Move all affected nodes by pending RSCNs to NPR state. */ 7931 list_for_each_entry_safe(ndlp, n, &vport->fc_nodes, nlp_listp) { 7932 if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) || 7933 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) 7934 continue; 7935 7936 /* NVME Target mode does not do RSCN Recovery. */ 7937 if (vport->phba->nvmet_support) 7938 continue; 7939 7940 /* If we are in the process of doing discovery on this 7941 * NPort, let it continue on its own. 7942 */ 7943 switch (ndlp->nlp_state) { 7944 case NLP_STE_PLOGI_ISSUE: 7945 case NLP_STE_ADISC_ISSUE: 7946 case NLP_STE_REG_LOGIN_ISSUE: 7947 case NLP_STE_PRLI_ISSUE: 7948 case NLP_STE_LOGO_ISSUE: 7949 continue; 7950 } 7951 7952 lpfc_disc_state_machine(vport, ndlp, NULL, 7953 NLP_EVT_DEVICE_RECOVERY); 7954 lpfc_cancel_retry_delay_tmo(vport, ndlp); 7955 } 7956 return 0; 7957 } 7958 7959 /** 7960 * lpfc_send_rscn_event - Send an RSCN event to management application 7961 * @vport: pointer to a host virtual N_Port data structure. 7962 * @cmdiocb: pointer to lpfc command iocb data structure. 7963 * 7964 * lpfc_send_rscn_event sends an RSCN netlink event to management 7965 * applications. 7966 */ 7967 static void 7968 lpfc_send_rscn_event(struct lpfc_vport *vport, 7969 struct lpfc_iocbq *cmdiocb) 7970 { 7971 struct lpfc_dmabuf *pcmd; 7972 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7973 uint32_t *payload_ptr; 7974 uint32_t payload_len; 7975 struct lpfc_rscn_event_header *rscn_event_data; 7976 7977 pcmd = cmdiocb->cmd_dmabuf; 7978 payload_ptr = (uint32_t *) pcmd->virt; 7979 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK); 7980 7981 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) + 7982 payload_len, GFP_KERNEL); 7983 if (!rscn_event_data) { 7984 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 7985 "0147 Failed to allocate memory for RSCN event\n"); 7986 return; 7987 } 7988 rscn_event_data->event_type = FC_REG_RSCN_EVENT; 7989 rscn_event_data->payload_length = payload_len; 7990 memcpy(rscn_event_data->rscn_payload, payload_ptr, 7991 payload_len); 7992 7993 fc_host_post_vendor_event(shost, 7994 fc_get_event_number(), 7995 sizeof(struct lpfc_rscn_event_header) + payload_len, 7996 (char *)rscn_event_data, 7997 LPFC_NL_VENDOR_ID); 7998 7999 kfree(rscn_event_data); 8000 } 8001 8002 /** 8003 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb 8004 * @vport: pointer to a host virtual N_Port data structure. 8005 * @cmdiocb: pointer to lpfc command iocb data structure. 8006 * @ndlp: pointer to a node-list data structure. 8007 * 8008 * This routine processes an unsolicited RSCN (Registration State Change 8009 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked 8010 * to invoke fc_host_post_event() routine to the FC transport layer. If the 8011 * discover state machine is about to begin discovery, it just accepts the 8012 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only 8013 * contains N_Port IDs for other vports on this HBA, it just accepts the 8014 * RSCN and ignore processing it. If the state machine is in the recovery 8015 * state, the fc_rscn_id_list of this @vport is walked and the 8016 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for 8017 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn() 8018 * routine is invoked to handle the RSCN event. 8019 * 8020 * Return code 8021 * 0 - Just sent the acc response 8022 * 1 - Sent the acc response and waited for name server completion 8023 **/ 8024 static int 8025 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8026 struct lpfc_nodelist *ndlp) 8027 { 8028 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8029 struct lpfc_hba *phba = vport->phba; 8030 struct lpfc_dmabuf *pcmd; 8031 uint32_t *lp, *datap; 8032 uint32_t payload_len, length, nportid, *cmd; 8033 int rscn_cnt; 8034 int rscn_id = 0, hba_id = 0; 8035 int i, tmo; 8036 8037 pcmd = cmdiocb->cmd_dmabuf; 8038 lp = (uint32_t *) pcmd->virt; 8039 8040 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 8041 payload_len -= sizeof(uint32_t); /* take off word 0 */ 8042 /* RSCN received */ 8043 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8044 "0214 RSCN received Data: x%lx x%x x%x x%x\n", 8045 vport->fc_flag, payload_len, *lp, 8046 vport->fc_rscn_id_cnt); 8047 8048 /* Send an RSCN event to the management application */ 8049 lpfc_send_rscn_event(vport, cmdiocb); 8050 8051 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 8052 fc_host_post_event(shost, fc_get_event_number(), 8053 FCH_EVT_RSCN, lp[i]); 8054 8055 /* Check if RSCN is coming from a direct-connected remote NPort */ 8056 if (test_bit(FC_PT2PT, &vport->fc_flag)) { 8057 /* If so, just ACC it, no other action needed for now */ 8058 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8059 "2024 pt2pt RSCN %08x Data: x%lx x%x\n", 8060 *lp, vport->fc_flag, payload_len); 8061 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8062 8063 /* Check to see if we need to NVME rescan this target 8064 * remoteport. 8065 */ 8066 if (ndlp->nlp_fc4_type & NLP_FC4_NVME && 8067 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) 8068 lpfc_nvme_rescan_port(vport, ndlp); 8069 return 0; 8070 } 8071 8072 /* If we are about to begin discovery, just ACC the RSCN. 8073 * Discovery processing will satisfy it. 8074 */ 8075 if (vport->port_state <= LPFC_NS_QRY) { 8076 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8077 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", 8078 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 8079 8080 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8081 return 0; 8082 } 8083 8084 /* If this RSCN just contains NPortIDs for other vports on this HBA, 8085 * just ACC and ignore it. 8086 */ 8087 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 8088 !(vport->cfg_peer_port_login)) { 8089 i = payload_len; 8090 datap = lp; 8091 while (i > 0) { 8092 nportid = *datap++; 8093 nportid = ((be32_to_cpu(nportid)) & Mask_DID); 8094 i -= sizeof(uint32_t); 8095 rscn_id++; 8096 if (lpfc_find_vport_by_did(phba, nportid)) 8097 hba_id++; 8098 } 8099 if (rscn_id == hba_id) { 8100 /* ALL NPortIDs in RSCN are on HBA */ 8101 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8102 "0219 Ignore RSCN " 8103 "Data: x%lx x%x x%x x%x\n", 8104 vport->fc_flag, payload_len, 8105 *lp, vport->fc_rscn_id_cnt); 8106 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8107 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", 8108 ndlp->nlp_DID, vport->port_state, 8109 ndlp->nlp_flag); 8110 8111 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, 8112 ndlp, NULL); 8113 /* Restart disctmo if its already running */ 8114 if (test_bit(FC_DISC_TMO, &vport->fc_flag)) { 8115 tmo = ((phba->fc_ratov * 3) + 3); 8116 mod_timer(&vport->fc_disctmo, 8117 jiffies + 8118 msecs_to_jiffies(1000 * tmo)); 8119 } 8120 return 0; 8121 } 8122 } 8123 8124 spin_lock_irq(shost->host_lock); 8125 if (vport->fc_rscn_flush) { 8126 /* Another thread is walking fc_rscn_id_list on this vport */ 8127 spin_unlock_irq(shost->host_lock); 8128 set_bit(FC_RSCN_DISCOVERY, &vport->fc_flag); 8129 /* Send back ACC */ 8130 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8131 return 0; 8132 } 8133 /* Indicate we are walking fc_rscn_id_list on this vport */ 8134 vport->fc_rscn_flush = 1; 8135 spin_unlock_irq(shost->host_lock); 8136 /* Get the array count after successfully have the token */ 8137 rscn_cnt = vport->fc_rscn_id_cnt; 8138 /* If we are already processing an RSCN, save the received 8139 * RSCN payload buffer, cmdiocb->cmd_dmabuf to process later. 8140 */ 8141 if (test_bit(FC_RSCN_MODE, &vport->fc_flag) || 8142 test_bit(FC_NDISC_ACTIVE, &vport->fc_flag)) { 8143 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8144 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", 8145 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 8146 8147 set_bit(FC_RSCN_DEFERRED, &vport->fc_flag); 8148 8149 /* Restart disctmo if its already running */ 8150 if (test_bit(FC_DISC_TMO, &vport->fc_flag)) { 8151 tmo = ((phba->fc_ratov * 3) + 3); 8152 mod_timer(&vport->fc_disctmo, 8153 jiffies + msecs_to_jiffies(1000 * tmo)); 8154 } 8155 if ((rscn_cnt < FC_MAX_HOLD_RSCN) && 8156 !test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag)) { 8157 set_bit(FC_RSCN_MODE, &vport->fc_flag); 8158 if (rscn_cnt) { 8159 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt; 8160 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK); 8161 } 8162 if ((rscn_cnt) && 8163 (payload_len + length <= LPFC_BPL_SIZE)) { 8164 *cmd &= ELS_CMD_MASK; 8165 *cmd |= cpu_to_be32(payload_len + length); 8166 memcpy(((uint8_t *)cmd) + length, lp, 8167 payload_len); 8168 } else { 8169 vport->fc_rscn_id_list[rscn_cnt] = pcmd; 8170 vport->fc_rscn_id_cnt++; 8171 /* If we zero, cmdiocb->cmd_dmabuf, the calling 8172 * routine will not try to free it. 8173 */ 8174 cmdiocb->cmd_dmabuf = NULL; 8175 } 8176 /* Deferred RSCN */ 8177 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8178 "0235 Deferred RSCN " 8179 "Data: x%x x%lx x%x\n", 8180 vport->fc_rscn_id_cnt, vport->fc_flag, 8181 vport->port_state); 8182 } else { 8183 set_bit(FC_RSCN_DISCOVERY, &vport->fc_flag); 8184 /* ReDiscovery RSCN */ 8185 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8186 "0234 ReDiscovery RSCN " 8187 "Data: x%x x%lx x%x\n", 8188 vport->fc_rscn_id_cnt, vport->fc_flag, 8189 vport->port_state); 8190 } 8191 /* Indicate we are done walking fc_rscn_id_list on this vport */ 8192 vport->fc_rscn_flush = 0; 8193 /* Send back ACC */ 8194 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8195 /* send RECOVERY event for ALL nodes that match RSCN payload */ 8196 lpfc_rscn_recovery_check(vport); 8197 return 0; 8198 } 8199 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8200 "RCV RSCN: did:x%x/ste:x%x flg:x%x", 8201 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 8202 8203 set_bit(FC_RSCN_MODE, &vport->fc_flag); 8204 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; 8205 /* Indicate we are done walking fc_rscn_id_list on this vport */ 8206 vport->fc_rscn_flush = 0; 8207 /* 8208 * If we zero, cmdiocb->cmd_dmabuf, the calling routine will 8209 * not try to free it. 8210 */ 8211 cmdiocb->cmd_dmabuf = NULL; 8212 lpfc_set_disctmo(vport); 8213 /* Send back ACC */ 8214 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8215 /* send RECOVERY event for ALL nodes that match RSCN payload */ 8216 lpfc_rscn_recovery_check(vport); 8217 return lpfc_els_handle_rscn(vport); 8218 } 8219 8220 /** 8221 * lpfc_els_handle_rscn - Handle rscn for a vport 8222 * @vport: pointer to a host virtual N_Port data structure. 8223 * 8224 * This routine handles the Registration State Configuration Notification 8225 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall 8226 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise, 8227 * if the ndlp to NameServer exists, a Common Transport (CT) command to the 8228 * NameServer shall be issued. If CT command to the NameServer fails to be 8229 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any 8230 * RSCN activities with the @vport. 8231 * 8232 * Return code 8233 * 0 - Cleaned up rscn on the @vport 8234 * 1 - Wait for plogi to name server before proceed 8235 **/ 8236 int 8237 lpfc_els_handle_rscn(struct lpfc_vport *vport) 8238 { 8239 struct lpfc_nodelist *ndlp; 8240 struct lpfc_hba *phba = vport->phba; 8241 8242 /* Ignore RSCN if the port is being torn down. */ 8243 if (test_bit(FC_UNLOADING, &vport->load_flag)) { 8244 lpfc_els_flush_rscn(vport); 8245 return 0; 8246 } 8247 8248 /* Start timer for RSCN processing */ 8249 lpfc_set_disctmo(vport); 8250 8251 /* RSCN processed */ 8252 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8253 "0215 RSCN processed Data: x%lx x%x x%x x%x x%x x%x\n", 8254 vport->fc_flag, 0, vport->fc_rscn_id_cnt, 8255 vport->port_state, vport->num_disc_nodes, 8256 vport->gidft_inp); 8257 8258 /* To process RSCN, first compare RSCN data with NameServer */ 8259 vport->fc_ns_retry = 0; 8260 vport->num_disc_nodes = 0; 8261 8262 ndlp = lpfc_findnode_did(vport, NameServer_DID); 8263 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 8264 /* Good ndlp, issue CT Request to NameServer. Need to 8265 * know how many gidfts were issued. If none, then just 8266 * flush the RSCN. Otherwise, the outstanding requests 8267 * need to complete. 8268 */ 8269 if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) { 8270 if (lpfc_issue_gidft(vport) > 0) 8271 return 1; 8272 } else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) { 8273 if (lpfc_issue_gidpt(vport) > 0) 8274 return 1; 8275 } else { 8276 return 1; 8277 } 8278 } else { 8279 /* Nameserver login in question. Revalidate. */ 8280 if (ndlp) { 8281 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 8282 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 8283 } else { 8284 ndlp = lpfc_nlp_init(vport, NameServer_DID); 8285 if (!ndlp) { 8286 lpfc_els_flush_rscn(vport); 8287 return 0; 8288 } 8289 ndlp->nlp_prev_state = ndlp->nlp_state; 8290 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 8291 } 8292 ndlp->nlp_type |= NLP_FABRIC; 8293 lpfc_issue_els_plogi(vport, NameServer_DID, 0); 8294 /* Wait for NameServer login cmpl before we can 8295 * continue 8296 */ 8297 return 1; 8298 } 8299 8300 lpfc_els_flush_rscn(vport); 8301 return 0; 8302 } 8303 8304 /** 8305 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb 8306 * @vport: pointer to a host virtual N_Port data structure. 8307 * @cmdiocb: pointer to lpfc command iocb data structure. 8308 * @ndlp: pointer to a node-list data structure. 8309 * 8310 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS 8311 * unsolicited event. An unsolicited FLOGI can be received in a point-to- 8312 * point topology. As an unsolicited FLOGI should not be received in a loop 8313 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The 8314 * lpfc_check_sparm() routine is invoked to check the parameters in the 8315 * unsolicited FLOGI. If parameters validation failed, the routine 8316 * lpfc_els_rsp_reject() shall be called with reject reason code set to 8317 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the 8318 * FLOGI shall be compared with the Port WWN of the @vport to determine who 8319 * will initiate PLOGI. The higher lexicographical value party shall has 8320 * higher priority (as the winning port) and will initiate PLOGI and 8321 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result 8322 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI 8323 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI. 8324 * 8325 * Return code 8326 * 0 - Successfully processed the unsolicited flogi 8327 * 1 - Failed to process the unsolicited flogi 8328 **/ 8329 static int 8330 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8331 struct lpfc_nodelist *ndlp) 8332 { 8333 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8334 struct lpfc_hba *phba = vport->phba; 8335 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; 8336 uint32_t *lp = (uint32_t *) pcmd->virt; 8337 union lpfc_wqe128 *wqe = &cmdiocb->wqe; 8338 struct serv_parm *sp; 8339 LPFC_MBOXQ_t *mbox; 8340 uint32_t cmd, did; 8341 int rc; 8342 unsigned long fc_flag = 0; 8343 uint32_t port_state = 0; 8344 8345 /* Clear external loopback plug detected flag */ 8346 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; 8347 8348 cmd = *lp++; 8349 sp = (struct serv_parm *) lp; 8350 8351 /* FLOGI received */ 8352 8353 lpfc_set_disctmo(vport); 8354 8355 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 8356 /* We should never receive a FLOGI in loop mode, ignore it */ 8357 did = bf_get(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest); 8358 8359 /* An FLOGI ELS command <elsCmd> was received from DID <did> in 8360 Loop Mode */ 8361 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8362 "0113 An FLOGI ELS command x%x was " 8363 "received from DID x%x in Loop Mode\n", 8364 cmd, did); 8365 return 1; 8366 } 8367 8368 (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1); 8369 8370 /* 8371 * If our portname is greater than the remote portname, 8372 * then we initiate Nport login. 8373 */ 8374 8375 rc = memcmp(&vport->fc_portname, &sp->portName, 8376 sizeof(struct lpfc_name)); 8377 8378 if (!rc) { 8379 if (phba->sli_rev < LPFC_SLI_REV4) { 8380 mbox = mempool_alloc(phba->mbox_mem_pool, 8381 GFP_KERNEL); 8382 if (!mbox) 8383 return 1; 8384 lpfc_linkdown(phba); 8385 lpfc_init_link(phba, mbox, 8386 phba->cfg_topology, 8387 phba->cfg_link_speed); 8388 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 8389 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 8390 mbox->vport = vport; 8391 rc = lpfc_sli_issue_mbox(phba, mbox, 8392 MBX_NOWAIT); 8393 lpfc_set_loopback_flag(phba); 8394 if (rc == MBX_NOT_FINISHED) 8395 mempool_free(mbox, phba->mbox_mem_pool); 8396 return 1; 8397 } 8398 8399 /* External loopback plug insertion detected */ 8400 phba->link_flag |= LS_EXTERNAL_LOOPBACK; 8401 8402 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_LIBDFC, 8403 "1119 External Loopback plug detected\n"); 8404 8405 /* abort the flogi coming back to ourselves 8406 * due to external loopback on the port. 8407 */ 8408 lpfc_els_abort_flogi(phba); 8409 return 0; 8410 8411 } else if (rc > 0) { /* greater than */ 8412 set_bit(FC_PT2PT_PLOGI, &vport->fc_flag); 8413 8414 /* If we have the high WWPN we can assign our own 8415 * myDID; otherwise, we have to WAIT for a PLOGI 8416 * from the remote NPort to find out what it 8417 * will be. 8418 */ 8419 vport->fc_myDID = PT2PT_LocalID; 8420 } else { 8421 vport->fc_myDID = PT2PT_RemoteID; 8422 } 8423 8424 /* 8425 * The vport state should go to LPFC_FLOGI only 8426 * AFTER we issue a FLOGI, not receive one. 8427 */ 8428 spin_lock_irq(shost->host_lock); 8429 fc_flag = vport->fc_flag; 8430 port_state = vport->port_state; 8431 /* Acking an unsol FLOGI. Count 1 for link bounce 8432 * work-around. 8433 */ 8434 vport->rcv_flogi_cnt++; 8435 spin_unlock_irq(shost->host_lock); 8436 set_bit(FC_PT2PT, &vport->fc_flag); 8437 clear_bit(FC_FABRIC, &vport->fc_flag); 8438 clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag); 8439 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8440 "3311 Rcv Flogi PS x%x new PS x%x " 8441 "fc_flag x%lx new fc_flag x%lx\n", 8442 port_state, vport->port_state, 8443 fc_flag, vport->fc_flag); 8444 8445 /* 8446 * We temporarily set fc_myDID to make it look like we are 8447 * a Fabric. This is done just so we end up with the right 8448 * did / sid on the FLOGI ACC rsp. 8449 */ 8450 did = vport->fc_myDID; 8451 vport->fc_myDID = Fabric_DID; 8452 8453 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 8454 8455 /* Defer ACC response until AFTER we issue a FLOGI */ 8456 if (!test_bit(HBA_FLOGI_ISSUED, &phba->hba_flag)) { 8457 phba->defer_flogi_acc_rx_id = bf_get(wqe_ctxt_tag, 8458 &wqe->xmit_els_rsp.wqe_com); 8459 phba->defer_flogi_acc_ox_id = bf_get(wqe_rcvoxid, 8460 &wqe->xmit_els_rsp.wqe_com); 8461 8462 vport->fc_myDID = did; 8463 8464 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8465 "3344 Deferring FLOGI ACC: rx_id: x%x," 8466 " ox_id: x%x, hba_flag x%lx\n", 8467 phba->defer_flogi_acc_rx_id, 8468 phba->defer_flogi_acc_ox_id, phba->hba_flag); 8469 8470 phba->defer_flogi_acc_flag = true; 8471 8472 return 0; 8473 } 8474 8475 /* Send back ACC */ 8476 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL); 8477 8478 /* Now lets put fc_myDID back to what its supposed to be */ 8479 vport->fc_myDID = did; 8480 8481 return 0; 8482 } 8483 8484 /** 8485 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb 8486 * @vport: pointer to a host virtual N_Port data structure. 8487 * @cmdiocb: pointer to lpfc command iocb data structure. 8488 * @ndlp: pointer to a node-list data structure. 8489 * 8490 * This routine processes Request Node Identification Data (RNID) IOCB 8491 * received as an ELS unsolicited event. Only when the RNID specified format 8492 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data) 8493 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to 8494 * Accept (ACC) the RNID ELS command. All the other RNID formats are 8495 * rejected by invoking the lpfc_els_rsp_reject() routine. 8496 * 8497 * Return code 8498 * 0 - Successfully processed rnid iocb (currently always return 0) 8499 **/ 8500 static int 8501 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8502 struct lpfc_nodelist *ndlp) 8503 { 8504 struct lpfc_dmabuf *pcmd; 8505 uint32_t *lp; 8506 RNID *rn; 8507 struct ls_rjt stat; 8508 8509 pcmd = cmdiocb->cmd_dmabuf; 8510 lp = (uint32_t *) pcmd->virt; 8511 8512 lp++; 8513 rn = (RNID *) lp; 8514 8515 /* RNID received */ 8516 8517 switch (rn->Format) { 8518 case 0: 8519 case RNID_TOPOLOGY_DISC: 8520 /* Send back ACC */ 8521 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp); 8522 break; 8523 default: 8524 /* Reject this request because format not supported */ 8525 stat.un.b.lsRjtRsvd0 = 0; 8526 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8527 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8528 stat.un.b.vendorUnique = 0; 8529 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 8530 NULL); 8531 } 8532 return 0; 8533 } 8534 8535 /** 8536 * lpfc_els_rcv_echo - Process an unsolicited echo iocb 8537 * @vport: pointer to a host virtual N_Port data structure. 8538 * @cmdiocb: pointer to lpfc command iocb data structure. 8539 * @ndlp: pointer to a node-list data structure. 8540 * 8541 * Return code 8542 * 0 - Successfully processed echo iocb (currently always return 0) 8543 **/ 8544 static int 8545 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8546 struct lpfc_nodelist *ndlp) 8547 { 8548 uint8_t *pcmd; 8549 8550 pcmd = (uint8_t *)cmdiocb->cmd_dmabuf->virt; 8551 8552 /* skip over first word of echo command to find echo data */ 8553 pcmd += sizeof(uint32_t); 8554 8555 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp); 8556 return 0; 8557 } 8558 8559 /** 8560 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb 8561 * @vport: pointer to a host virtual N_Port data structure. 8562 * @cmdiocb: pointer to lpfc command iocb data structure. 8563 * @ndlp: pointer to a node-list data structure. 8564 * 8565 * This routine processes a Link Incident Report Registration(LIRR) IOCB 8566 * received as an ELS unsolicited event. Currently, this function just invokes 8567 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally. 8568 * 8569 * Return code 8570 * 0 - Successfully processed lirr iocb (currently always return 0) 8571 **/ 8572 static int 8573 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8574 struct lpfc_nodelist *ndlp) 8575 { 8576 struct ls_rjt stat; 8577 8578 /* For now, unconditionally reject this command */ 8579 stat.un.b.lsRjtRsvd0 = 0; 8580 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8581 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8582 stat.un.b.vendorUnique = 0; 8583 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8584 return 0; 8585 } 8586 8587 /** 8588 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb 8589 * @vport: pointer to a host virtual N_Port data structure. 8590 * @cmdiocb: pointer to lpfc command iocb data structure. 8591 * @ndlp: pointer to a node-list data structure. 8592 * 8593 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB 8594 * received as an ELS unsolicited event. A request to RRQ shall only 8595 * be accepted if the Originator Nx_Port N_Port_ID or the Responder 8596 * Nx_Port N_Port_ID of the target Exchange is the same as the 8597 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is 8598 * not accepted, an LS_RJT with reason code "Unable to perform 8599 * command request" and reason code explanation "Invalid Originator 8600 * S_ID" shall be returned. For now, we just unconditionally accept 8601 * RRQ from the target. 8602 **/ 8603 static void 8604 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8605 struct lpfc_nodelist *ndlp) 8606 { 8607 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8608 if (vport->phba->sli_rev == LPFC_SLI_REV4) 8609 lpfc_els_clear_rrq(vport, cmdiocb, ndlp); 8610 } 8611 8612 /** 8613 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 8614 * @phba: pointer to lpfc hba data structure. 8615 * @pmb: pointer to the driver internal queue element for mailbox command. 8616 * 8617 * This routine is the completion callback function for the MBX_READ_LNK_STAT 8618 * mailbox command. This callback function is to actually send the Accept 8619 * (ACC) response to a Read Link Status (RLS) unsolicited IOCB event. It 8620 * collects the link statistics from the completion of the MBX_READ_LNK_STAT 8621 * mailbox command, constructs the RLS response with the link statistics 8622 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 8623 * response to the RLS. 8624 * 8625 * Note that the ndlp reference count will be incremented by 1 for holding the 8626 * ndlp and the reference to ndlp will be stored into the ndlp field of 8627 * the IOCB for the completion callback function to the RLS Accept Response 8628 * ELS IOCB command. 8629 * 8630 **/ 8631 static void 8632 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 8633 { 8634 int rc = 0; 8635 MAILBOX_t *mb; 8636 IOCB_t *icmd; 8637 union lpfc_wqe128 *wqe; 8638 struct RLS_RSP *rls_rsp; 8639 uint8_t *pcmd; 8640 struct lpfc_iocbq *elsiocb; 8641 struct lpfc_nodelist *ndlp; 8642 uint16_t oxid; 8643 uint16_t rxid; 8644 uint32_t cmdsize; 8645 u32 ulp_context; 8646 8647 mb = &pmb->u.mb; 8648 8649 ndlp = pmb->ctx_ndlp; 8650 rxid = (uint16_t)(pmb->ctx_u.ox_rx_id & 0xffff); 8651 oxid = (uint16_t)((pmb->ctx_u.ox_rx_id >> 16) & 0xffff); 8652 memset(&pmb->ctx_u, 0, sizeof(pmb->ctx_u)); 8653 pmb->ctx_ndlp = NULL; 8654 8655 if (mb->mbxStatus) { 8656 mempool_free(pmb, phba->mbox_mem_pool); 8657 return; 8658 } 8659 8660 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); 8661 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 8662 lpfc_max_els_tries, ndlp, 8663 ndlp->nlp_DID, ELS_CMD_ACC); 8664 8665 /* Decrement the ndlp reference count from previous mbox command */ 8666 lpfc_nlp_put(ndlp); 8667 8668 if (!elsiocb) { 8669 mempool_free(pmb, phba->mbox_mem_pool); 8670 return; 8671 } 8672 8673 ulp_context = get_job_ulpcontext(phba, elsiocb); 8674 if (phba->sli_rev == LPFC_SLI_REV4) { 8675 wqe = &elsiocb->wqe; 8676 /* Xri / rx_id */ 8677 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, rxid); 8678 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, oxid); 8679 } else { 8680 icmd = &elsiocb->iocb; 8681 icmd->ulpContext = rxid; 8682 icmd->unsli3.rcvsli3.ox_id = oxid; 8683 } 8684 8685 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 8686 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8687 pcmd += sizeof(uint32_t); /* Skip past command */ 8688 rls_rsp = (struct RLS_RSP *)pcmd; 8689 8690 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 8691 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 8692 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 8693 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 8694 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 8695 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 8696 mempool_free(pmb, phba->mbox_mem_pool); 8697 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 8698 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 8699 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " 8700 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 8701 elsiocb->iotag, ulp_context, 8702 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8703 ndlp->nlp_rpi); 8704 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 8705 phba->fc_stat.elsXmitACC++; 8706 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8707 if (!elsiocb->ndlp) { 8708 lpfc_els_free_iocb(phba, elsiocb); 8709 return; 8710 } 8711 8712 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8713 if (rc == IOCB_ERROR) { 8714 lpfc_els_free_iocb(phba, elsiocb); 8715 lpfc_nlp_put(ndlp); 8716 } 8717 return; 8718 } 8719 8720 /** 8721 * lpfc_els_rcv_rls - Process an unsolicited rls iocb 8722 * @vport: pointer to a host virtual N_Port data structure. 8723 * @cmdiocb: pointer to lpfc command iocb data structure. 8724 * @ndlp: pointer to a node-list data structure. 8725 * 8726 * This routine processes Read Link Status (RLS) IOCB received as an 8727 * ELS unsolicited event. It first checks the remote port state. If the 8728 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 8729 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 8730 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 8731 * for reading the HBA link statistics. It is for the callback function, 8732 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command 8733 * to actually sending out RPL Accept (ACC) response. 8734 * 8735 * Return codes 8736 * 0 - Successfully processed rls iocb (currently always return 0) 8737 **/ 8738 static int 8739 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8740 struct lpfc_nodelist *ndlp) 8741 { 8742 struct lpfc_hba *phba = vport->phba; 8743 LPFC_MBOXQ_t *mbox; 8744 struct ls_rjt stat; 8745 u32 ctx = get_job_ulpcontext(phba, cmdiocb); 8746 u32 ox_id = get_job_rcvoxid(phba, cmdiocb); 8747 8748 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8749 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 8750 /* reject the unsolicited RLS request and done with it */ 8751 goto reject_out; 8752 8753 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 8754 if (mbox) { 8755 lpfc_read_lnk_stat(phba, mbox); 8756 mbox->ctx_u.ox_rx_id = ox_id << 16 | ctx; 8757 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 8758 if (!mbox->ctx_ndlp) 8759 goto node_err; 8760 mbox->vport = vport; 8761 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; 8762 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 8763 != MBX_NOT_FINISHED) 8764 /* Mbox completion will send ELS Response */ 8765 return 0; 8766 /* Decrement reference count used for the failed mbox 8767 * command. 8768 */ 8769 lpfc_nlp_put(ndlp); 8770 node_err: 8771 mempool_free(mbox, phba->mbox_mem_pool); 8772 } 8773 reject_out: 8774 /* issue rejection response */ 8775 stat.un.b.lsRjtRsvd0 = 0; 8776 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8777 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8778 stat.un.b.vendorUnique = 0; 8779 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8780 return 0; 8781 } 8782 8783 /** 8784 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb 8785 * @vport: pointer to a host virtual N_Port data structure. 8786 * @cmdiocb: pointer to lpfc command iocb data structure. 8787 * @ndlp: pointer to a node-list data structure. 8788 * 8789 * This routine processes Read Timout Value (RTV) IOCB received as an 8790 * ELS unsolicited event. It first checks the remote port state. If the 8791 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 8792 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 8793 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout 8794 * Value (RTV) unsolicited IOCB event. 8795 * 8796 * Note that the ndlp reference count will be incremented by 1 for holding the 8797 * ndlp and the reference to ndlp will be stored into the ndlp field of 8798 * the IOCB for the completion callback function to the RTV Accept Response 8799 * ELS IOCB command. 8800 * 8801 * Return codes 8802 * 0 - Successfully processed rtv iocb (currently always return 0) 8803 **/ 8804 static int 8805 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8806 struct lpfc_nodelist *ndlp) 8807 { 8808 int rc = 0; 8809 IOCB_t *icmd; 8810 union lpfc_wqe128 *wqe; 8811 struct lpfc_hba *phba = vport->phba; 8812 struct ls_rjt stat; 8813 struct RTV_RSP *rtv_rsp; 8814 uint8_t *pcmd; 8815 struct lpfc_iocbq *elsiocb; 8816 uint32_t cmdsize; 8817 u32 ulp_context; 8818 8819 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8820 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 8821 /* reject the unsolicited RTV request and done with it */ 8822 goto reject_out; 8823 8824 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t); 8825 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 8826 lpfc_max_els_tries, ndlp, 8827 ndlp->nlp_DID, ELS_CMD_ACC); 8828 8829 if (!elsiocb) 8830 return 1; 8831 8832 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 8833 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8834 pcmd += sizeof(uint32_t); /* Skip past command */ 8835 8836 ulp_context = get_job_ulpcontext(phba, elsiocb); 8837 /* use the command's xri in the response */ 8838 if (phba->sli_rev == LPFC_SLI_REV4) { 8839 wqe = &elsiocb->wqe; 8840 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 8841 get_job_ulpcontext(phba, cmdiocb)); 8842 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 8843 get_job_rcvoxid(phba, cmdiocb)); 8844 } else { 8845 icmd = &elsiocb->iocb; 8846 icmd->ulpContext = get_job_ulpcontext(phba, cmdiocb); 8847 icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, cmdiocb); 8848 } 8849 8850 rtv_rsp = (struct RTV_RSP *)pcmd; 8851 8852 /* populate RTV payload */ 8853 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */ 8854 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov); 8855 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0); 8856 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */ 8857 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov); 8858 8859 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 8860 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 8861 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " 8862 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " 8863 "Data: x%x x%x x%x\n", 8864 elsiocb->iotag, ulp_context, 8865 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8866 ndlp->nlp_rpi, 8867 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov); 8868 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 8869 phba->fc_stat.elsXmitACC++; 8870 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8871 if (!elsiocb->ndlp) { 8872 lpfc_els_free_iocb(phba, elsiocb); 8873 return 0; 8874 } 8875 8876 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8877 if (rc == IOCB_ERROR) { 8878 lpfc_els_free_iocb(phba, elsiocb); 8879 lpfc_nlp_put(ndlp); 8880 } 8881 return 0; 8882 8883 reject_out: 8884 /* issue rejection response */ 8885 stat.un.b.lsRjtRsvd0 = 0; 8886 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8887 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8888 stat.un.b.vendorUnique = 0; 8889 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8890 return 0; 8891 } 8892 8893 /* lpfc_issue_els_rrq - Process an unsolicited rrq iocb 8894 * @vport: pointer to a host virtual N_Port data structure. 8895 * @ndlp: pointer to a node-list data structure. 8896 * @did: DID of the target. 8897 * @rrq: Pointer to the rrq struct. 8898 * 8899 * Build a ELS RRQ command and send it to the target. If the issue_iocb is 8900 * successful, the completion handler will clear the RRQ. 8901 * 8902 * Return codes 8903 * 0 - Successfully sent rrq els iocb. 8904 * 1 - Failed to send rrq els iocb. 8905 **/ 8906 static int 8907 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 8908 uint32_t did, struct lpfc_node_rrq *rrq) 8909 { 8910 struct lpfc_hba *phba = vport->phba; 8911 struct RRQ *els_rrq; 8912 struct lpfc_iocbq *elsiocb; 8913 uint8_t *pcmd; 8914 uint16_t cmdsize; 8915 int ret; 8916 8917 if (!ndlp) 8918 return 1; 8919 8920 /* If ndlp is not NULL, we will bump the reference count on it */ 8921 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ)); 8922 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did, 8923 ELS_CMD_RRQ); 8924 if (!elsiocb) 8925 return 1; 8926 8927 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 8928 8929 /* For RRQ request, remainder of payload is Exchange IDs */ 8930 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ; 8931 pcmd += sizeof(uint32_t); 8932 els_rrq = (struct RRQ *) pcmd; 8933 8934 bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]); 8935 bf_set(rrq_rxid, els_rrq, rrq->rxid); 8936 bf_set(rrq_did, els_rrq, vport->fc_myDID); 8937 els_rrq->rrq = cpu_to_be32(els_rrq->rrq); 8938 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg); 8939 8940 8941 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 8942 "Issue RRQ: did:x%x", 8943 did, rrq->xritag, rrq->rxid); 8944 elsiocb->context_un.rrq = rrq; 8945 elsiocb->cmd_cmpl = lpfc_cmpl_els_rrq; 8946 8947 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8948 if (!elsiocb->ndlp) 8949 goto io_err; 8950 8951 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8952 if (ret == IOCB_ERROR) { 8953 lpfc_nlp_put(ndlp); 8954 goto io_err; 8955 } 8956 return 0; 8957 8958 io_err: 8959 lpfc_els_free_iocb(phba, elsiocb); 8960 return 1; 8961 } 8962 8963 /** 8964 * lpfc_send_rrq - Sends ELS RRQ if needed. 8965 * @phba: pointer to lpfc hba data structure. 8966 * @rrq: pointer to the active rrq. 8967 * 8968 * This routine will call the lpfc_issue_els_rrq if the rrq is 8969 * still active for the xri. If this function returns a failure then 8970 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq. 8971 * 8972 * Returns 0 Success. 8973 * 1 Failure. 8974 **/ 8975 int 8976 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) 8977 { 8978 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, 8979 rrq->nlp_DID); 8980 if (!ndlp) 8981 return 1; 8982 8983 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) 8984 return lpfc_issue_els_rrq(rrq->vport, ndlp, 8985 rrq->nlp_DID, rrq); 8986 else 8987 return 1; 8988 } 8989 8990 /** 8991 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command 8992 * @vport: pointer to a host virtual N_Port data structure. 8993 * @cmdsize: size of the ELS command. 8994 * @oldiocb: pointer to the original lpfc command iocb data structure. 8995 * @ndlp: pointer to a node-list data structure. 8996 * 8997 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command. 8998 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL. 8999 * 9000 * Note that the ndlp reference count will be incremented by 1 for holding the 9001 * ndlp and the reference to ndlp will be stored into the ndlp field of 9002 * the IOCB for the completion callback function to the RPL Accept Response 9003 * ELS command. 9004 * 9005 * Return code 9006 * 0 - Successfully issued ACC RPL ELS command 9007 * 1 - Failed to issue ACC RPL ELS command 9008 **/ 9009 static int 9010 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, 9011 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 9012 { 9013 int rc = 0; 9014 struct lpfc_hba *phba = vport->phba; 9015 IOCB_t *icmd; 9016 union lpfc_wqe128 *wqe; 9017 RPL_RSP rpl_rsp; 9018 struct lpfc_iocbq *elsiocb; 9019 uint8_t *pcmd; 9020 u32 ulp_context; 9021 9022 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 9023 ndlp->nlp_DID, ELS_CMD_ACC); 9024 9025 if (!elsiocb) 9026 return 1; 9027 9028 ulp_context = get_job_ulpcontext(phba, elsiocb); 9029 if (phba->sli_rev == LPFC_SLI_REV4) { 9030 wqe = &elsiocb->wqe; 9031 /* Xri / rx_id */ 9032 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 9033 get_job_ulpcontext(phba, oldiocb)); 9034 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 9035 get_job_rcvoxid(phba, oldiocb)); 9036 } else { 9037 icmd = &elsiocb->iocb; 9038 icmd->ulpContext = get_job_ulpcontext(phba, oldiocb); 9039 icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, oldiocb); 9040 } 9041 9042 pcmd = elsiocb->cmd_dmabuf->virt; 9043 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 9044 pcmd += sizeof(uint16_t); 9045 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); 9046 pcmd += sizeof(uint16_t); 9047 9048 /* Setup the RPL ACC payload */ 9049 rpl_rsp.listLen = be32_to_cpu(1); 9050 rpl_rsp.index = 0; 9051 rpl_rsp.port_num_blk.portNum = 0; 9052 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID); 9053 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname, 9054 sizeof(struct lpfc_name)); 9055 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t)); 9056 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 9057 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9058 "0120 Xmit ELS RPL ACC response tag x%x " 9059 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 9060 "rpi x%x\n", 9061 elsiocb->iotag, ulp_context, 9062 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 9063 ndlp->nlp_rpi); 9064 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 9065 phba->fc_stat.elsXmitACC++; 9066 elsiocb->ndlp = lpfc_nlp_get(ndlp); 9067 if (!elsiocb->ndlp) { 9068 lpfc_els_free_iocb(phba, elsiocb); 9069 return 1; 9070 } 9071 9072 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 9073 if (rc == IOCB_ERROR) { 9074 lpfc_els_free_iocb(phba, elsiocb); 9075 lpfc_nlp_put(ndlp); 9076 return 1; 9077 } 9078 9079 return 0; 9080 } 9081 9082 /** 9083 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb 9084 * @vport: pointer to a host virtual N_Port data structure. 9085 * @cmdiocb: pointer to lpfc command iocb data structure. 9086 * @ndlp: pointer to a node-list data structure. 9087 * 9088 * This routine processes Read Port List (RPL) IOCB received as an ELS 9089 * unsolicited event. It first checks the remote port state. If the remote 9090 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it 9091 * invokes the lpfc_els_rsp_reject() routine to send reject response. 9092 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine 9093 * to accept the RPL. 9094 * 9095 * Return code 9096 * 0 - Successfully processed rpl iocb (currently always return 0) 9097 **/ 9098 static int 9099 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9100 struct lpfc_nodelist *ndlp) 9101 { 9102 struct lpfc_dmabuf *pcmd; 9103 uint32_t *lp; 9104 uint32_t maxsize; 9105 uint16_t cmdsize; 9106 RPL *rpl; 9107 struct ls_rjt stat; 9108 9109 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 9110 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 9111 /* issue rejection response */ 9112 stat.un.b.lsRjtRsvd0 = 0; 9113 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 9114 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 9115 stat.un.b.vendorUnique = 0; 9116 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 9117 NULL); 9118 /* rejected the unsolicited RPL request and done with it */ 9119 return 0; 9120 } 9121 9122 pcmd = cmdiocb->cmd_dmabuf; 9123 lp = (uint32_t *) pcmd->virt; 9124 rpl = (RPL *) (lp + 1); 9125 maxsize = be32_to_cpu(rpl->maxsize); 9126 9127 /* We support only one port */ 9128 if ((rpl->index == 0) && 9129 ((maxsize == 0) || 9130 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) { 9131 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP); 9132 } else { 9133 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t); 9134 } 9135 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp); 9136 9137 return 0; 9138 } 9139 9140 /** 9141 * lpfc_els_rcv_farp - Process an unsolicited farp request els command 9142 * @vport: pointer to a virtual N_Port data structure. 9143 * @cmdiocb: pointer to lpfc command iocb data structure. 9144 * @ndlp: pointer to a node-list data structure. 9145 * 9146 * This routine processes Fibre Channel Address Resolution Protocol 9147 * (FARP) Request IOCB received as an ELS unsolicited event. Currently, 9148 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such, 9149 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the 9150 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the 9151 * remote PortName is compared against the FC PortName stored in the @vport 9152 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is 9153 * compared against the FC NodeName stored in the @vport data structure. 9154 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the 9155 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is 9156 * invoked to send out FARP Response to the remote node. Before sending the 9157 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP 9158 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi() 9159 * routine is invoked to log into the remote port first. 9160 * 9161 * Return code 9162 * 0 - Either the FARP Match Mode not supported or successfully processed 9163 **/ 9164 static int 9165 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9166 struct lpfc_nodelist *ndlp) 9167 { 9168 struct lpfc_dmabuf *pcmd; 9169 uint32_t *lp; 9170 FARP *fp; 9171 uint32_t cnt, did; 9172 9173 did = get_job_els_rsp64_did(vport->phba, cmdiocb); 9174 pcmd = cmdiocb->cmd_dmabuf; 9175 lp = (uint32_t *) pcmd->virt; 9176 9177 lp++; 9178 fp = (FARP *) lp; 9179 /* FARP-REQ received from DID <did> */ 9180 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9181 "0601 FARP-REQ received from DID x%x\n", did); 9182 /* We will only support match on WWPN or WWNN */ 9183 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { 9184 return 0; 9185 } 9186 9187 cnt = 0; 9188 /* If this FARP command is searching for my portname */ 9189 if (fp->Mflags & FARP_MATCH_PORT) { 9190 if (memcmp(&fp->RportName, &vport->fc_portname, 9191 sizeof(struct lpfc_name)) == 0) 9192 cnt = 1; 9193 } 9194 9195 /* If this FARP command is searching for my nodename */ 9196 if (fp->Mflags & FARP_MATCH_NODE) { 9197 if (memcmp(&fp->RnodeName, &vport->fc_nodename, 9198 sizeof(struct lpfc_name)) == 0) 9199 cnt = 1; 9200 } 9201 9202 if (cnt) { 9203 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 9204 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 9205 /* Log back into the node before sending the FARP. */ 9206 if (fp->Rflags & FARP_REQUEST_PLOGI) { 9207 ndlp->nlp_prev_state = ndlp->nlp_state; 9208 lpfc_nlp_set_state(vport, ndlp, 9209 NLP_STE_PLOGI_ISSUE); 9210 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 9211 } 9212 9213 /* Send a FARP response to that node */ 9214 if (fp->Rflags & FARP_REQUEST_FARPR) 9215 lpfc_issue_els_farpr(vport, did, 0); 9216 } 9217 } 9218 return 0; 9219 } 9220 9221 /** 9222 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb 9223 * @vport: pointer to a host virtual N_Port data structure. 9224 * @cmdiocb: pointer to lpfc command iocb data structure. 9225 * @ndlp: pointer to a node-list data structure. 9226 * 9227 * This routine processes Fibre Channel Address Resolution Protocol 9228 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply 9229 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept 9230 * the FARP response request. 9231 * 9232 * Return code 9233 * 0 - Successfully processed FARPR IOCB (currently always return 0) 9234 **/ 9235 static int 9236 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9237 struct lpfc_nodelist *ndlp) 9238 { 9239 uint32_t did; 9240 9241 did = get_job_els_rsp64_did(vport->phba, cmdiocb); 9242 9243 /* FARP-RSP received from DID <did> */ 9244 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9245 "0600 FARP-RSP received from DID x%x\n", did); 9246 /* ACCEPT the Farp resp request */ 9247 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 9248 9249 return 0; 9250 } 9251 9252 /** 9253 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command 9254 * @vport: pointer to a host virtual N_Port data structure. 9255 * @cmdiocb: pointer to lpfc command iocb data structure. 9256 * @fan_ndlp: pointer to a node-list data structure. 9257 * 9258 * This routine processes a Fabric Address Notification (FAN) IOCB 9259 * command received as an ELS unsolicited event. The FAN ELS command will 9260 * only be processed on a physical port (i.e., the @vport represents the 9261 * physical port). The fabric NodeName and PortName from the FAN IOCB are 9262 * compared against those in the phba data structure. If any of those is 9263 * different, the lpfc_initial_flogi() routine is invoked to initialize 9264 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise, 9265 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine 9266 * is invoked to register login to the fabric. 9267 * 9268 * Return code 9269 * 0 - Successfully processed fan iocb (currently always return 0). 9270 **/ 9271 static int 9272 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9273 struct lpfc_nodelist *fan_ndlp) 9274 { 9275 struct lpfc_hba *phba = vport->phba; 9276 uint32_t *lp; 9277 FAN *fp; 9278 9279 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n"); 9280 lp = (uint32_t *)cmdiocb->cmd_dmabuf->virt; 9281 fp = (FAN *) ++lp; 9282 /* FAN received; Fan does not have a reply sequence */ 9283 if ((vport == phba->pport) && 9284 (vport->port_state == LPFC_LOCAL_CFG_LINK)) { 9285 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, 9286 sizeof(struct lpfc_name))) || 9287 (memcmp(&phba->fc_fabparam.portName, &fp->FportName, 9288 sizeof(struct lpfc_name)))) { 9289 /* This port has switched fabrics. FLOGI is required */ 9290 lpfc_issue_init_vfi(vport); 9291 } else { 9292 /* FAN verified - skip FLOGI */ 9293 vport->fc_myDID = vport->fc_prevDID; 9294 if (phba->sli_rev < LPFC_SLI_REV4) 9295 lpfc_issue_fabric_reglogin(vport); 9296 else { 9297 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9298 "3138 Need register VFI: (x%x/%x)\n", 9299 vport->fc_prevDID, vport->fc_myDID); 9300 lpfc_issue_reg_vfi(vport); 9301 } 9302 } 9303 } 9304 return 0; 9305 } 9306 9307 /** 9308 * lpfc_els_rcv_edc - Process an unsolicited EDC iocb 9309 * @vport: pointer to a host virtual N_Port data structure. 9310 * @cmdiocb: pointer to lpfc command iocb data structure. 9311 * @ndlp: pointer to a node-list data structure. 9312 * 9313 * Return code 9314 * 0 - Successfully processed echo iocb (currently always return 0) 9315 **/ 9316 static int 9317 lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9318 struct lpfc_nodelist *ndlp) 9319 { 9320 struct lpfc_hba *phba = vport->phba; 9321 struct fc_els_edc *edc_req; 9322 struct fc_tlv_desc *tlv; 9323 uint8_t *payload; 9324 uint32_t *ptr, dtag; 9325 const char *dtag_nm; 9326 int desc_cnt = 0, bytes_remain; 9327 struct fc_diag_lnkflt_desc *plnkflt; 9328 9329 payload = cmdiocb->cmd_dmabuf->virt; 9330 9331 edc_req = (struct fc_els_edc *)payload; 9332 bytes_remain = be32_to_cpu(edc_req->desc_len); 9333 9334 ptr = (uint32_t *)payload; 9335 lpfc_printf_vlog(vport, KERN_INFO, 9336 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9337 "3319 Rcv EDC payload len %d: x%x x%x x%x\n", 9338 bytes_remain, be32_to_cpu(*ptr), 9339 be32_to_cpu(*(ptr + 1)), be32_to_cpu(*(ptr + 2))); 9340 9341 /* No signal support unless there is a congestion descriptor */ 9342 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 9343 phba->cgn_sig_freq = 0; 9344 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 9345 9346 if (bytes_remain <= 0) 9347 goto out; 9348 9349 tlv = edc_req->desc; 9350 9351 /* 9352 * cycle through EDC diagnostic descriptors to find the 9353 * congestion signaling capability descriptor 9354 */ 9355 while (bytes_remain) { 9356 if (bytes_remain < FC_TLV_DESC_HDR_SZ) { 9357 lpfc_printf_log(phba, KERN_WARNING, 9358 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9359 "6464 Truncated TLV hdr on " 9360 "Diagnostic descriptor[%d]\n", 9361 desc_cnt); 9362 goto out; 9363 } 9364 9365 dtag = be32_to_cpu(tlv->desc_tag); 9366 switch (dtag) { 9367 case ELS_DTAG_LNK_FAULT_CAP: 9368 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 9369 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 9370 sizeof(struct fc_diag_lnkflt_desc)) { 9371 lpfc_printf_log(phba, KERN_WARNING, 9372 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9373 "6465 Truncated Link Fault Diagnostic " 9374 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 9375 desc_cnt, bytes_remain, 9376 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 9377 sizeof(struct fc_diag_lnkflt_desc)); 9378 goto out; 9379 } 9380 plnkflt = (struct fc_diag_lnkflt_desc *)tlv; 9381 lpfc_printf_log(phba, KERN_INFO, 9382 LOG_ELS | LOG_LDS_EVENT, 9383 "4626 Link Fault Desc Data: x%08x len x%x " 9384 "da x%x dd x%x interval x%x\n", 9385 be32_to_cpu(plnkflt->desc_tag), 9386 be32_to_cpu(plnkflt->desc_len), 9387 be32_to_cpu( 9388 plnkflt->degrade_activate_threshold), 9389 be32_to_cpu( 9390 plnkflt->degrade_deactivate_threshold), 9391 be32_to_cpu(plnkflt->fec_degrade_interval)); 9392 break; 9393 case ELS_DTAG_CG_SIGNAL_CAP: 9394 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 9395 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 9396 sizeof(struct fc_diag_cg_sig_desc)) { 9397 lpfc_printf_log( 9398 phba, KERN_WARNING, LOG_CGN_MGMT, 9399 "6466 Truncated cgn signal Diagnostic " 9400 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 9401 desc_cnt, bytes_remain, 9402 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 9403 sizeof(struct fc_diag_cg_sig_desc)); 9404 goto out; 9405 } 9406 9407 phba->cgn_reg_fpin = phba->cgn_init_reg_fpin; 9408 phba->cgn_reg_signal = phba->cgn_init_reg_signal; 9409 9410 /* We start negotiation with lpfc_fabric_cgn_frequency. 9411 * When we process the EDC, we will settle on the 9412 * higher frequency. 9413 */ 9414 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 9415 9416 lpfc_least_capable_settings( 9417 phba, (struct fc_diag_cg_sig_desc *)tlv); 9418 break; 9419 default: 9420 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 9421 lpfc_printf_log(phba, KERN_WARNING, 9422 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9423 "6467 unknown Diagnostic " 9424 "Descriptor[%d]: tag x%x (%s)\n", 9425 desc_cnt, dtag, dtag_nm); 9426 } 9427 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 9428 tlv = fc_tlv_next_desc(tlv); 9429 desc_cnt++; 9430 } 9431 out: 9432 /* Need to send back an ACC */ 9433 lpfc_issue_els_edc_rsp(vport, cmdiocb, ndlp); 9434 9435 lpfc_config_cgn_signal(phba); 9436 return 0; 9437 } 9438 9439 /** 9440 * lpfc_els_timeout - Handler funciton to the els timer 9441 * @t: timer context used to obtain the vport. 9442 * 9443 * This routine is invoked by the ELS timer after timeout. It posts the ELS 9444 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port 9445 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake 9446 * up the worker thread. It is for the worker thread to invoke the routine 9447 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO. 9448 **/ 9449 void 9450 lpfc_els_timeout(struct timer_list *t) 9451 { 9452 struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc); 9453 struct lpfc_hba *phba = vport->phba; 9454 uint32_t tmo_posted; 9455 unsigned long iflag; 9456 9457 spin_lock_irqsave(&vport->work_port_lock, iflag); 9458 tmo_posted = vport->work_port_events & WORKER_ELS_TMO; 9459 if (!tmo_posted && !test_bit(FC_UNLOADING, &vport->load_flag)) 9460 vport->work_port_events |= WORKER_ELS_TMO; 9461 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 9462 9463 if (!tmo_posted && !test_bit(FC_UNLOADING, &vport->load_flag)) 9464 lpfc_worker_wake_up(phba); 9465 return; 9466 } 9467 9468 9469 /** 9470 * lpfc_els_timeout_handler - Process an els timeout event 9471 * @vport: pointer to a virtual N_Port data structure. 9472 * 9473 * This routine is the actual handler function that processes an ELS timeout 9474 * event. It walks the ELS ring to get and abort all the IOCBs (except the 9475 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by 9476 * invoking the lpfc_sli_issue_abort_iotag() routine. 9477 **/ 9478 void 9479 lpfc_els_timeout_handler(struct lpfc_vport *vport) 9480 { 9481 struct lpfc_hba *phba = vport->phba; 9482 struct lpfc_sli_ring *pring; 9483 struct lpfc_iocbq *tmp_iocb, *piocb; 9484 IOCB_t *cmd = NULL; 9485 struct lpfc_dmabuf *pcmd; 9486 uint32_t els_command = 0; 9487 uint32_t timeout; 9488 uint32_t remote_ID = 0xffffffff; 9489 LIST_HEAD(abort_list); 9490 u32 ulp_command = 0, ulp_context = 0, did = 0, iotag = 0; 9491 9492 9493 timeout = (uint32_t)(phba->fc_ratov << 1); 9494 9495 pring = lpfc_phba_elsring(phba); 9496 if (unlikely(!pring)) 9497 return; 9498 9499 if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) 9500 return; 9501 9502 spin_lock_irq(&phba->hbalock); 9503 if (phba->sli_rev == LPFC_SLI_REV4) 9504 spin_lock(&pring->ring_lock); 9505 9506 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 9507 ulp_command = get_job_cmnd(phba, piocb); 9508 ulp_context = get_job_ulpcontext(phba, piocb); 9509 did = get_job_els_rsp64_did(phba, piocb); 9510 9511 if (phba->sli_rev == LPFC_SLI_REV4) { 9512 iotag = get_wqe_reqtag(piocb); 9513 } else { 9514 cmd = &piocb->iocb; 9515 iotag = cmd->ulpIoTag; 9516 } 9517 9518 if ((piocb->cmd_flag & LPFC_IO_LIBDFC) != 0 || 9519 ulp_command == CMD_ABORT_XRI_CX || 9520 ulp_command == CMD_ABORT_XRI_CN || 9521 ulp_command == CMD_CLOSE_XRI_CN) 9522 continue; 9523 9524 if (piocb->vport != vport) 9525 continue; 9526 9527 pcmd = piocb->cmd_dmabuf; 9528 if (pcmd) 9529 els_command = *(uint32_t *) (pcmd->virt); 9530 9531 if (els_command == ELS_CMD_FARP || 9532 els_command == ELS_CMD_FARPR || 9533 els_command == ELS_CMD_FDISC) 9534 continue; 9535 9536 if (piocb->drvrTimeout > 0) { 9537 if (piocb->drvrTimeout >= timeout) 9538 piocb->drvrTimeout -= timeout; 9539 else 9540 piocb->drvrTimeout = 0; 9541 continue; 9542 } 9543 9544 remote_ID = 0xffffffff; 9545 if (ulp_command != CMD_GEN_REQUEST64_CR) { 9546 remote_ID = did; 9547 } else { 9548 struct lpfc_nodelist *ndlp; 9549 ndlp = __lpfc_findnode_rpi(vport, ulp_context); 9550 if (ndlp) 9551 remote_ID = ndlp->nlp_DID; 9552 } 9553 list_add_tail(&piocb->dlist, &abort_list); 9554 } 9555 if (phba->sli_rev == LPFC_SLI_REV4) 9556 spin_unlock(&pring->ring_lock); 9557 spin_unlock_irq(&phba->hbalock); 9558 9559 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 9560 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9561 "0127 ELS timeout Data: x%x x%x x%x " 9562 "x%x\n", els_command, 9563 remote_ID, ulp_command, iotag); 9564 9565 spin_lock_irq(&phba->hbalock); 9566 list_del_init(&piocb->dlist); 9567 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 9568 spin_unlock_irq(&phba->hbalock); 9569 } 9570 9571 /* Make sure HBA is alive */ 9572 lpfc_issue_hb_tmo(phba); 9573 9574 if (!list_empty(&pring->txcmplq)) 9575 if (!test_bit(FC_UNLOADING, &phba->pport->load_flag)) 9576 mod_timer(&vport->els_tmofunc, 9577 jiffies + msecs_to_jiffies(1000 * timeout)); 9578 } 9579 9580 /** 9581 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport 9582 * @vport: pointer to a host virtual N_Port data structure. 9583 * 9584 * This routine is used to clean up all the outstanding ELS commands on a 9585 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport() 9586 * routine. After that, it walks the ELS transmit queue to remove all the 9587 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For 9588 * the IOCBs with a non-NULL completion callback function, the callback 9589 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 9590 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion 9591 * callback function, the IOCB will simply be released. Finally, it walks 9592 * the ELS transmit completion queue to issue an abort IOCB to any transmit 9593 * completion queue IOCB that is associated with the @vport and is not 9594 * an IOCB from libdfc (i.e., the management plane IOCBs that are not 9595 * part of the discovery state machine) out to HBA by invoking the 9596 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the 9597 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee 9598 * the IOCBs are aborted when this function returns. 9599 **/ 9600 void 9601 lpfc_els_flush_cmd(struct lpfc_vport *vport) 9602 { 9603 LIST_HEAD(abort_list); 9604 LIST_HEAD(cancel_list); 9605 struct lpfc_hba *phba = vport->phba; 9606 struct lpfc_sli_ring *pring; 9607 struct lpfc_iocbq *tmp_iocb, *piocb; 9608 u32 ulp_command; 9609 unsigned long iflags = 0; 9610 bool mbx_tmo_err; 9611 9612 lpfc_fabric_abort_vport(vport); 9613 9614 /* 9615 * For SLI3, only the hbalock is required. But SLI4 needs to coordinate 9616 * with the ring insert operation. Because lpfc_sli_issue_abort_iotag 9617 * ultimately grabs the ring_lock, the driver must splice the list into 9618 * a working list and release the locks before calling the abort. 9619 */ 9620 spin_lock_irqsave(&phba->hbalock, iflags); 9621 pring = lpfc_phba_elsring(phba); 9622 9623 /* Bail out if we've no ELS wq, like in PCI error recovery case. */ 9624 if (unlikely(!pring)) { 9625 spin_unlock_irqrestore(&phba->hbalock, iflags); 9626 return; 9627 } 9628 9629 if (phba->sli_rev == LPFC_SLI_REV4) 9630 spin_lock(&pring->ring_lock); 9631 9632 mbx_tmo_err = test_bit(MBX_TMO_ERR, &phba->bit_flags); 9633 /* First we need to issue aborts to outstanding cmds on txcmpl */ 9634 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 9635 if (piocb->cmd_flag & LPFC_IO_LIBDFC && !mbx_tmo_err) 9636 continue; 9637 9638 if (piocb->vport != vport) 9639 continue; 9640 9641 if (piocb->cmd_flag & LPFC_DRIVER_ABORTED && !mbx_tmo_err) 9642 continue; 9643 9644 /* On the ELS ring we can have ELS_REQUESTs or 9645 * GEN_REQUESTs waiting for a response. 9646 */ 9647 ulp_command = get_job_cmnd(phba, piocb); 9648 if (ulp_command == CMD_ELS_REQUEST64_CR) { 9649 list_add_tail(&piocb->dlist, &abort_list); 9650 9651 /* If the link is down when flushing ELS commands 9652 * the firmware will not complete them till after 9653 * the link comes back up. This may confuse 9654 * discovery for the new link up, so we need to 9655 * change the compl routine to just clean up the iocb 9656 * and avoid any retry logic. 9657 */ 9658 if (phba->link_state == LPFC_LINK_DOWN) 9659 piocb->cmd_cmpl = lpfc_cmpl_els_link_down; 9660 } else if (ulp_command == CMD_GEN_REQUEST64_CR || 9661 mbx_tmo_err) 9662 list_add_tail(&piocb->dlist, &abort_list); 9663 } 9664 9665 if (phba->sli_rev == LPFC_SLI_REV4) 9666 spin_unlock(&pring->ring_lock); 9667 spin_unlock_irqrestore(&phba->hbalock, iflags); 9668 9669 /* Abort each txcmpl iocb on aborted list and remove the dlist links. */ 9670 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 9671 spin_lock_irqsave(&phba->hbalock, iflags); 9672 list_del_init(&piocb->dlist); 9673 if (mbx_tmo_err || !(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) 9674 list_move_tail(&piocb->list, &cancel_list); 9675 else 9676 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 9677 9678 spin_unlock_irqrestore(&phba->hbalock, iflags); 9679 } 9680 if (!list_empty(&cancel_list)) 9681 lpfc_sli_cancel_iocbs(phba, &cancel_list, IOSTAT_LOCAL_REJECT, 9682 IOERR_SLI_ABORTED); 9683 else 9684 /* Make sure HBA is alive */ 9685 lpfc_issue_hb_tmo(phba); 9686 9687 if (!list_empty(&abort_list)) 9688 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9689 "3387 abort list for txq not empty\n"); 9690 INIT_LIST_HEAD(&abort_list); 9691 9692 spin_lock_irqsave(&phba->hbalock, iflags); 9693 if (phba->sli_rev == LPFC_SLI_REV4) 9694 spin_lock(&pring->ring_lock); 9695 9696 /* No need to abort the txq list, 9697 * just queue them up for lpfc_sli_cancel_iocbs 9698 */ 9699 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 9700 ulp_command = get_job_cmnd(phba, piocb); 9701 9702 if (piocb->cmd_flag & LPFC_IO_LIBDFC) 9703 continue; 9704 9705 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 9706 if (ulp_command == CMD_QUE_RING_BUF_CN || 9707 ulp_command == CMD_QUE_RING_BUF64_CN || 9708 ulp_command == CMD_CLOSE_XRI_CN || 9709 ulp_command == CMD_ABORT_XRI_CN || 9710 ulp_command == CMD_ABORT_XRI_CX) 9711 continue; 9712 9713 if (piocb->vport != vport) 9714 continue; 9715 9716 list_del_init(&piocb->list); 9717 list_add_tail(&piocb->list, &abort_list); 9718 } 9719 9720 /* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */ 9721 if (vport == phba->pport) { 9722 list_for_each_entry_safe(piocb, tmp_iocb, 9723 &phba->fabric_iocb_list, list) { 9724 list_del_init(&piocb->list); 9725 list_add_tail(&piocb->list, &abort_list); 9726 } 9727 } 9728 9729 if (phba->sli_rev == LPFC_SLI_REV4) 9730 spin_unlock(&pring->ring_lock); 9731 spin_unlock_irqrestore(&phba->hbalock, iflags); 9732 9733 /* Cancel all the IOCBs from the completions list */ 9734 lpfc_sli_cancel_iocbs(phba, &abort_list, 9735 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 9736 9737 return; 9738 } 9739 9740 /** 9741 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA 9742 * @phba: pointer to lpfc hba data structure. 9743 * 9744 * This routine is used to clean up all the outstanding ELS commands on a 9745 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba() 9746 * routine. After that, it walks the ELS transmit queue to remove all the 9747 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For 9748 * the IOCBs with the completion callback function associated, the callback 9749 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 9750 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion 9751 * callback function associated, the IOCB will simply be released. Finally, 9752 * it walks the ELS transmit completion queue to issue an abort IOCB to any 9753 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the 9754 * management plane IOCBs that are not part of the discovery state machine) 9755 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine. 9756 **/ 9757 void 9758 lpfc_els_flush_all_cmd(struct lpfc_hba *phba) 9759 { 9760 struct lpfc_vport *vport; 9761 9762 spin_lock_irq(&phba->port_list_lock); 9763 list_for_each_entry(vport, &phba->port_list, listentry) 9764 lpfc_els_flush_cmd(vport); 9765 spin_unlock_irq(&phba->port_list_lock); 9766 9767 return; 9768 } 9769 9770 /** 9771 * lpfc_send_els_failure_event - Posts an ELS command failure event 9772 * @phba: Pointer to hba context object. 9773 * @cmdiocbp: Pointer to command iocb which reported error. 9774 * @rspiocbp: Pointer to response iocb which reported error. 9775 * 9776 * This function sends an event when there is an ELS command 9777 * failure. 9778 **/ 9779 void 9780 lpfc_send_els_failure_event(struct lpfc_hba *phba, 9781 struct lpfc_iocbq *cmdiocbp, 9782 struct lpfc_iocbq *rspiocbp) 9783 { 9784 struct lpfc_vport *vport = cmdiocbp->vport; 9785 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9786 struct lpfc_lsrjt_event lsrjt_event; 9787 struct lpfc_fabric_event_header fabric_event; 9788 struct ls_rjt stat; 9789 struct lpfc_nodelist *ndlp; 9790 uint32_t *pcmd; 9791 u32 ulp_status, ulp_word4; 9792 9793 ndlp = cmdiocbp->ndlp; 9794 if (!ndlp) 9795 return; 9796 9797 ulp_status = get_job_ulpstatus(phba, rspiocbp); 9798 ulp_word4 = get_job_word4(phba, rspiocbp); 9799 9800 if (ulp_status == IOSTAT_LS_RJT) { 9801 lsrjt_event.header.event_type = FC_REG_ELS_EVENT; 9802 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV; 9803 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname, 9804 sizeof(struct lpfc_name)); 9805 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename, 9806 sizeof(struct lpfc_name)); 9807 pcmd = (uint32_t *)cmdiocbp->cmd_dmabuf->virt; 9808 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0; 9809 stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4); 9810 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode; 9811 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp; 9812 fc_host_post_vendor_event(shost, 9813 fc_get_event_number(), 9814 sizeof(lsrjt_event), 9815 (char *)&lsrjt_event, 9816 LPFC_NL_VENDOR_ID); 9817 return; 9818 } 9819 if (ulp_status == IOSTAT_NPORT_BSY || 9820 ulp_status == IOSTAT_FABRIC_BSY) { 9821 fabric_event.event_type = FC_REG_FABRIC_EVENT; 9822 if (ulp_status == IOSTAT_NPORT_BSY) 9823 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY; 9824 else 9825 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY; 9826 memcpy(fabric_event.wwpn, &ndlp->nlp_portname, 9827 sizeof(struct lpfc_name)); 9828 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename, 9829 sizeof(struct lpfc_name)); 9830 fc_host_post_vendor_event(shost, 9831 fc_get_event_number(), 9832 sizeof(fabric_event), 9833 (char *)&fabric_event, 9834 LPFC_NL_VENDOR_ID); 9835 return; 9836 } 9837 9838 } 9839 9840 /** 9841 * lpfc_send_els_event - Posts unsolicited els event 9842 * @vport: Pointer to vport object. 9843 * @ndlp: Pointer FC node object. 9844 * @payload: ELS command code type. 9845 * 9846 * This function posts an event when there is an incoming 9847 * unsolicited ELS command. 9848 **/ 9849 static void 9850 lpfc_send_els_event(struct lpfc_vport *vport, 9851 struct lpfc_nodelist *ndlp, 9852 uint32_t *payload) 9853 { 9854 struct lpfc_els_event_header *els_data = NULL; 9855 struct lpfc_logo_event *logo_data = NULL; 9856 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9857 9858 if (*payload == ELS_CMD_LOGO) { 9859 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL); 9860 if (!logo_data) { 9861 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9862 "0148 Failed to allocate memory " 9863 "for LOGO event\n"); 9864 return; 9865 } 9866 els_data = &logo_data->header; 9867 } else { 9868 els_data = kmalloc(sizeof(struct lpfc_els_event_header), 9869 GFP_KERNEL); 9870 if (!els_data) { 9871 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9872 "0149 Failed to allocate memory " 9873 "for ELS event\n"); 9874 return; 9875 } 9876 } 9877 els_data->event_type = FC_REG_ELS_EVENT; 9878 switch (*payload) { 9879 case ELS_CMD_PLOGI: 9880 els_data->subcategory = LPFC_EVENT_PLOGI_RCV; 9881 break; 9882 case ELS_CMD_PRLO: 9883 els_data->subcategory = LPFC_EVENT_PRLO_RCV; 9884 break; 9885 case ELS_CMD_ADISC: 9886 els_data->subcategory = LPFC_EVENT_ADISC_RCV; 9887 break; 9888 case ELS_CMD_LOGO: 9889 els_data->subcategory = LPFC_EVENT_LOGO_RCV; 9890 /* Copy the WWPN in the LOGO payload */ 9891 memcpy(logo_data->logo_wwpn, &payload[2], 9892 sizeof(struct lpfc_name)); 9893 break; 9894 default: 9895 kfree(els_data); 9896 return; 9897 } 9898 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); 9899 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 9900 if (*payload == ELS_CMD_LOGO) { 9901 fc_host_post_vendor_event(shost, 9902 fc_get_event_number(), 9903 sizeof(struct lpfc_logo_event), 9904 (char *)logo_data, 9905 LPFC_NL_VENDOR_ID); 9906 kfree(logo_data); 9907 } else { 9908 fc_host_post_vendor_event(shost, 9909 fc_get_event_number(), 9910 sizeof(struct lpfc_els_event_header), 9911 (char *)els_data, 9912 LPFC_NL_VENDOR_ID); 9913 kfree(els_data); 9914 } 9915 9916 return; 9917 } 9918 9919 9920 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types, 9921 FC_FPIN_LI_EVT_TYPES_INIT); 9922 9923 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_deli_event_nm, fc_fpin_deli_event_types, 9924 FC_FPIN_DELI_EVT_TYPES_INIT); 9925 9926 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_event_nm, fc_fpin_congn_event_types, 9927 FC_FPIN_CONGN_EVT_TYPES_INIT); 9928 9929 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_severity_nm, 9930 fc_fpin_congn_severity_types, 9931 FC_FPIN_CONGN_SEVERITY_INIT); 9932 9933 9934 /** 9935 * lpfc_display_fpin_wwpn - Display WWPNs accessible by the attached port 9936 * @phba: Pointer to phba object. 9937 * @wwnlist: Pointer to list of WWPNs in FPIN payload 9938 * @cnt: count of WWPNs in FPIN payload 9939 * 9940 * This routine is called by LI and PC descriptors. 9941 * Limit the number of WWPNs displayed to 6 log messages, 6 per log message 9942 */ 9943 static void 9944 lpfc_display_fpin_wwpn(struct lpfc_hba *phba, __be64 *wwnlist, u32 cnt) 9945 { 9946 char buf[LPFC_FPIN_WWPN_LINE_SZ]; 9947 __be64 wwn; 9948 u64 wwpn; 9949 int i, len; 9950 int line = 0; 9951 int wcnt = 0; 9952 bool endit = false; 9953 9954 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, "Accessible WWPNs:"); 9955 for (i = 0; i < cnt; i++) { 9956 /* Are we on the last WWPN */ 9957 if (i == (cnt - 1)) 9958 endit = true; 9959 9960 /* Extract the next WWPN from the payload */ 9961 wwn = *wwnlist++; 9962 wwpn = be64_to_cpu(wwn); 9963 len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ - len, 9964 " %016llx", wwpn); 9965 9966 /* Log a message if we are on the last WWPN 9967 * or if we hit the max allowed per message. 9968 */ 9969 wcnt++; 9970 if (wcnt == LPFC_FPIN_WWPN_LINE_CNT || endit) { 9971 buf[len] = 0; 9972 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9973 "4686 %s\n", buf); 9974 9975 /* Check if we reached the last WWPN */ 9976 if (endit) 9977 return; 9978 9979 /* Limit the number of log message displayed per FPIN */ 9980 line++; 9981 if (line == LPFC_FPIN_WWPN_NUM_LINE) { 9982 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9983 "4687 %d WWPNs Truncated\n", 9984 cnt - i - 1); 9985 return; 9986 } 9987 9988 /* Start over with next log message */ 9989 wcnt = 0; 9990 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, 9991 "Additional WWPNs:"); 9992 } 9993 } 9994 } 9995 9996 /** 9997 * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event. 9998 * @phba: Pointer to phba object. 9999 * @tlv: Pointer to the Link Integrity Notification Descriptor. 10000 * 10001 * This function processes a Link Integrity FPIN event by logging a message. 10002 **/ 10003 static void 10004 lpfc_els_rcv_fpin_li(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 10005 { 10006 struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv; 10007 const char *li_evt_str; 10008 u32 li_evt, cnt; 10009 10010 li_evt = be16_to_cpu(li->event_type); 10011 li_evt_str = lpfc_get_fpin_li_event_nm(li_evt); 10012 cnt = be32_to_cpu(li->pname_count); 10013 10014 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 10015 "4680 FPIN Link Integrity %s (x%x) " 10016 "Detecting PN x%016llx Attached PN x%016llx " 10017 "Duration %d mSecs Count %d Port Cnt %d\n", 10018 li_evt_str, li_evt, 10019 be64_to_cpu(li->detecting_wwpn), 10020 be64_to_cpu(li->attached_wwpn), 10021 be32_to_cpu(li->event_threshold), 10022 be32_to_cpu(li->event_count), cnt); 10023 10024 lpfc_display_fpin_wwpn(phba, (__be64 *)&li->pname_list, cnt); 10025 } 10026 10027 /** 10028 * lpfc_els_rcv_fpin_del - Process an FPIN Delivery Event. 10029 * @phba: Pointer to hba object. 10030 * @tlv: Pointer to the Delivery Notification Descriptor TLV 10031 * 10032 * This function processes a Delivery FPIN event by logging a message. 10033 **/ 10034 static void 10035 lpfc_els_rcv_fpin_del(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 10036 { 10037 struct fc_fn_deli_desc *del = (struct fc_fn_deli_desc *)tlv; 10038 const char *del_rsn_str; 10039 u32 del_rsn; 10040 __be32 *frame; 10041 10042 del_rsn = be16_to_cpu(del->deli_reason_code); 10043 del_rsn_str = lpfc_get_fpin_deli_event_nm(del_rsn); 10044 10045 /* Skip over desc_tag/desc_len header to payload */ 10046 frame = (__be32 *)(del + 1); 10047 10048 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 10049 "4681 FPIN Delivery %s (x%x) " 10050 "Detecting PN x%016llx Attached PN x%016llx " 10051 "DiscHdr0 x%08x " 10052 "DiscHdr1 x%08x DiscHdr2 x%08x DiscHdr3 x%08x " 10053 "DiscHdr4 x%08x DiscHdr5 x%08x\n", 10054 del_rsn_str, del_rsn, 10055 be64_to_cpu(del->detecting_wwpn), 10056 be64_to_cpu(del->attached_wwpn), 10057 be32_to_cpu(frame[0]), 10058 be32_to_cpu(frame[1]), 10059 be32_to_cpu(frame[2]), 10060 be32_to_cpu(frame[3]), 10061 be32_to_cpu(frame[4]), 10062 be32_to_cpu(frame[5])); 10063 } 10064 10065 /** 10066 * lpfc_els_rcv_fpin_peer_cgn - Process a FPIN Peer Congestion Event. 10067 * @phba: Pointer to hba object. 10068 * @tlv: Pointer to the Peer Congestion Notification Descriptor TLV 10069 * 10070 * This function processes a Peer Congestion FPIN event by logging a message. 10071 **/ 10072 static void 10073 lpfc_els_rcv_fpin_peer_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 10074 { 10075 struct fc_fn_peer_congn_desc *pc = (struct fc_fn_peer_congn_desc *)tlv; 10076 const char *pc_evt_str; 10077 u32 pc_evt, cnt; 10078 10079 pc_evt = be16_to_cpu(pc->event_type); 10080 pc_evt_str = lpfc_get_fpin_congn_event_nm(pc_evt); 10081 cnt = be32_to_cpu(pc->pname_count); 10082 10083 /* Capture FPIN frequency */ 10084 phba->cgn_fpin_frequency = be32_to_cpu(pc->event_period); 10085 10086 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_ELS, 10087 "4684 FPIN Peer Congestion %s (x%x) " 10088 "Duration %d mSecs " 10089 "Detecting PN x%016llx Attached PN x%016llx " 10090 "Impacted Port Cnt %d\n", 10091 pc_evt_str, pc_evt, 10092 be32_to_cpu(pc->event_period), 10093 be64_to_cpu(pc->detecting_wwpn), 10094 be64_to_cpu(pc->attached_wwpn), 10095 cnt); 10096 10097 lpfc_display_fpin_wwpn(phba, (__be64 *)&pc->pname_list, cnt); 10098 } 10099 10100 /** 10101 * lpfc_els_rcv_fpin_cgn - Process an FPIN Congestion notification 10102 * @phba: Pointer to hba object. 10103 * @tlv: Pointer to the Congestion Notification Descriptor TLV 10104 * 10105 * This function processes an FPIN Congestion Notifiction. The notification 10106 * could be an Alarm or Warning. This routine feeds that data into driver's 10107 * running congestion algorithm. It also processes the FPIN by 10108 * logging a message. It returns 1 to indicate deliver this message 10109 * to the upper layer or 0 to indicate don't deliver it. 10110 **/ 10111 static int 10112 lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 10113 { 10114 struct lpfc_cgn_info *cp; 10115 struct fc_fn_congn_desc *cgn = (struct fc_fn_congn_desc *)tlv; 10116 const char *cgn_evt_str; 10117 u32 cgn_evt; 10118 const char *cgn_sev_str; 10119 u32 cgn_sev; 10120 uint16_t value; 10121 u32 crc; 10122 bool nm_log = false; 10123 int rc = 1; 10124 10125 cgn_evt = be16_to_cpu(cgn->event_type); 10126 cgn_evt_str = lpfc_get_fpin_congn_event_nm(cgn_evt); 10127 cgn_sev = cgn->severity; 10128 cgn_sev_str = lpfc_get_fpin_congn_severity_nm(cgn_sev); 10129 10130 /* The driver only takes action on a Credit Stall or Oversubscription 10131 * event type to engage the IO algorithm. The driver prints an 10132 * unmaskable message only for Lost Credit and Credit Stall. 10133 * TODO: Still need to have definition of host action on clear, 10134 * lost credit and device specific event types. 10135 */ 10136 switch (cgn_evt) { 10137 case FPIN_CONGN_LOST_CREDIT: 10138 nm_log = true; 10139 break; 10140 case FPIN_CONGN_CREDIT_STALL: 10141 nm_log = true; 10142 fallthrough; 10143 case FPIN_CONGN_OVERSUBSCRIPTION: 10144 if (cgn_evt == FPIN_CONGN_OVERSUBSCRIPTION) 10145 nm_log = false; 10146 switch (cgn_sev) { 10147 case FPIN_CONGN_SEVERITY_ERROR: 10148 /* Take action here for an Alarm event */ 10149 if (phba->cmf_active_mode != LPFC_CFG_OFF) { 10150 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) { 10151 /* Track of alarm cnt for SYNC_WQE */ 10152 atomic_inc(&phba->cgn_sync_alarm_cnt); 10153 } 10154 /* Track alarm cnt for cgn_info regardless 10155 * of whether CMF is configured for Signals 10156 * or FPINs. 10157 */ 10158 atomic_inc(&phba->cgn_fabric_alarm_cnt); 10159 goto cleanup; 10160 } 10161 break; 10162 case FPIN_CONGN_SEVERITY_WARNING: 10163 /* Take action here for a Warning event */ 10164 if (phba->cmf_active_mode != LPFC_CFG_OFF) { 10165 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) { 10166 /* Track of warning cnt for SYNC_WQE */ 10167 atomic_inc(&phba->cgn_sync_warn_cnt); 10168 } 10169 /* Track warning cnt and freq for cgn_info 10170 * regardless of whether CMF is configured for 10171 * Signals or FPINs. 10172 */ 10173 atomic_inc(&phba->cgn_fabric_warn_cnt); 10174 cleanup: 10175 /* Save frequency in ms */ 10176 phba->cgn_fpin_frequency = 10177 be32_to_cpu(cgn->event_period); 10178 value = phba->cgn_fpin_frequency; 10179 if (phba->cgn_i) { 10180 cp = (struct lpfc_cgn_info *) 10181 phba->cgn_i->virt; 10182 cp->cgn_alarm_freq = 10183 cpu_to_le16(value); 10184 cp->cgn_warn_freq = 10185 cpu_to_le16(value); 10186 crc = lpfc_cgn_calc_crc32 10187 (cp, 10188 LPFC_CGN_INFO_SZ, 10189 LPFC_CGN_CRC32_SEED); 10190 cp->cgn_info_crc = cpu_to_le32(crc); 10191 } 10192 10193 /* Don't deliver to upper layer since 10194 * driver took action on this tlv. 10195 */ 10196 rc = 0; 10197 } 10198 break; 10199 } 10200 break; 10201 } 10202 10203 /* Change the log level to unmaskable for the following event types. */ 10204 lpfc_printf_log(phba, (nm_log ? KERN_WARNING : KERN_INFO), 10205 LOG_CGN_MGMT | LOG_ELS, 10206 "4683 FPIN CONGESTION %s type %s (x%x) Event " 10207 "Duration %d mSecs\n", 10208 cgn_sev_str, cgn_evt_str, cgn_evt, 10209 be32_to_cpu(cgn->event_period)); 10210 return rc; 10211 } 10212 10213 void 10214 lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length) 10215 { 10216 struct lpfc_hba *phba = vport->phba; 10217 struct fc_els_fpin *fpin = (struct fc_els_fpin *)p; 10218 struct fc_tlv_desc *tlv, *first_tlv, *current_tlv; 10219 const char *dtag_nm; 10220 int desc_cnt = 0, bytes_remain, cnt; 10221 u32 dtag, deliver = 0; 10222 int len; 10223 10224 /* FPINs handled only if we are in the right discovery state */ 10225 if (vport->port_state < LPFC_DISC_AUTH) 10226 return; 10227 10228 /* make sure there is the full fpin header */ 10229 if (fpin_length < sizeof(struct fc_els_fpin)) 10230 return; 10231 10232 /* Sanity check descriptor length. The desc_len value does not 10233 * include space for the ELS command and the desc_len fields. 10234 */ 10235 len = be32_to_cpu(fpin->desc_len); 10236 if (fpin_length < len + sizeof(struct fc_els_fpin)) { 10237 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10238 "4671 Bad ELS FPIN length %d: %d\n", 10239 len, fpin_length); 10240 return; 10241 } 10242 10243 tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0]; 10244 first_tlv = tlv; 10245 bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc); 10246 bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len)); 10247 10248 /* process each descriptor separately */ 10249 while (bytes_remain >= FC_TLV_DESC_HDR_SZ && 10250 bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) { 10251 dtag = be32_to_cpu(tlv->desc_tag); 10252 switch (dtag) { 10253 case ELS_DTAG_LNK_INTEGRITY: 10254 lpfc_els_rcv_fpin_li(phba, tlv); 10255 deliver = 1; 10256 break; 10257 case ELS_DTAG_DELIVERY: 10258 lpfc_els_rcv_fpin_del(phba, tlv); 10259 deliver = 1; 10260 break; 10261 case ELS_DTAG_PEER_CONGEST: 10262 lpfc_els_rcv_fpin_peer_cgn(phba, tlv); 10263 deliver = 1; 10264 break; 10265 case ELS_DTAG_CONGESTION: 10266 deliver = lpfc_els_rcv_fpin_cgn(phba, tlv); 10267 break; 10268 default: 10269 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 10270 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10271 "4678 unknown FPIN descriptor[%d]: " 10272 "tag x%x (%s)\n", 10273 desc_cnt, dtag, dtag_nm); 10274 10275 /* If descriptor is bad, drop the rest of the data */ 10276 return; 10277 } 10278 lpfc_cgn_update_stat(phba, dtag); 10279 cnt = be32_to_cpu(tlv->desc_len); 10280 10281 /* Sanity check descriptor length. The desc_len value does not 10282 * include space for the desc_tag and the desc_len fields. 10283 */ 10284 len -= (cnt + sizeof(struct fc_tlv_desc)); 10285 if (len < 0) { 10286 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 10287 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10288 "4672 Bad FPIN descriptor TLV length " 10289 "%d: %d %d %s\n", 10290 cnt, len, fpin_length, dtag_nm); 10291 return; 10292 } 10293 10294 current_tlv = tlv; 10295 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 10296 tlv = fc_tlv_next_desc(tlv); 10297 10298 /* Format payload such that the FPIN delivered to the 10299 * upper layer is a single descriptor FPIN. 10300 */ 10301 if (desc_cnt) 10302 memcpy(first_tlv, current_tlv, 10303 (cnt + sizeof(struct fc_els_fpin))); 10304 10305 /* Adjust the length so that it only reflects a 10306 * single descriptor FPIN. 10307 */ 10308 fpin_length = cnt + sizeof(struct fc_els_fpin); 10309 fpin->desc_len = cpu_to_be32(fpin_length); 10310 fpin_length += sizeof(struct fc_els_fpin); /* the entire FPIN */ 10311 10312 /* Send every descriptor individually to the upper layer */ 10313 if (deliver) 10314 fc_host_fpin_rcv(lpfc_shost_from_vport(vport), 10315 fpin_length, (char *)fpin, 0); 10316 desc_cnt++; 10317 } 10318 } 10319 10320 /** 10321 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer 10322 * @phba: pointer to lpfc hba data structure. 10323 * @pring: pointer to a SLI ring. 10324 * @vport: pointer to a host virtual N_Port data structure. 10325 * @elsiocb: pointer to lpfc els command iocb data structure. 10326 * 10327 * This routine is used for processing the IOCB associated with a unsolicited 10328 * event. It first determines whether there is an existing ndlp that matches 10329 * the DID from the unsolicited IOCB. If not, it will create a new one with 10330 * the DID from the unsolicited IOCB. The ELS command from the unsolicited 10331 * IOCB is then used to invoke the proper routine and to set up proper state 10332 * of the discovery state machine. 10333 **/ 10334 static void 10335 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10336 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) 10337 { 10338 struct lpfc_nodelist *ndlp; 10339 struct ls_rjt stat; 10340 u32 *payload, payload_len; 10341 u32 cmd = 0, did = 0, newnode, status = 0; 10342 uint8_t rjt_exp, rjt_err = 0, init_link = 0; 10343 struct lpfc_wcqe_complete *wcqe_cmpl = NULL; 10344 LPFC_MBOXQ_t *mbox; 10345 10346 if (!vport || !elsiocb->cmd_dmabuf) 10347 goto dropit; 10348 10349 newnode = 0; 10350 wcqe_cmpl = &elsiocb->wcqe_cmpl; 10351 payload = elsiocb->cmd_dmabuf->virt; 10352 if (phba->sli_rev == LPFC_SLI_REV4) 10353 payload_len = wcqe_cmpl->total_data_placed; 10354 else 10355 payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len; 10356 status = get_job_ulpstatus(phba, elsiocb); 10357 cmd = *payload; 10358 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) 10359 lpfc_sli3_post_buffer(phba, pring, 1); 10360 10361 did = get_job_els_rsp64_did(phba, elsiocb); 10362 if (status) { 10363 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10364 "RCV Unsol ELS: status:x%x/x%x did:x%x", 10365 status, get_job_word4(phba, elsiocb), did); 10366 goto dropit; 10367 } 10368 10369 /* Check to see if link went down during discovery */ 10370 if (lpfc_els_chk_latt(vport)) 10371 goto dropit; 10372 10373 /* Ignore traffic received during vport shutdown. */ 10374 if (test_bit(FC_UNLOADING, &vport->load_flag)) 10375 goto dropit; 10376 10377 /* If NPort discovery is delayed drop incoming ELS */ 10378 if (test_bit(FC_DISC_DELAYED, &vport->fc_flag) && 10379 cmd != ELS_CMD_PLOGI) 10380 goto dropit; 10381 10382 ndlp = lpfc_findnode_did(vport, did); 10383 if (!ndlp) { 10384 /* Cannot find existing Fabric ndlp, so allocate a new one */ 10385 ndlp = lpfc_nlp_init(vport, did); 10386 if (!ndlp) 10387 goto dropit; 10388 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 10389 newnode = 1; 10390 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 10391 ndlp->nlp_type |= NLP_FABRIC; 10392 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 10393 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 10394 newnode = 1; 10395 } 10396 10397 phba->fc_stat.elsRcvFrame++; 10398 10399 /* 10400 * Do not process any unsolicited ELS commands 10401 * if the ndlp is in DEV_LOSS 10402 */ 10403 spin_lock_irq(&ndlp->lock); 10404 if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { 10405 spin_unlock_irq(&ndlp->lock); 10406 if (newnode) 10407 lpfc_nlp_put(ndlp); 10408 goto dropit; 10409 } 10410 spin_unlock_irq(&ndlp->lock); 10411 10412 elsiocb->ndlp = lpfc_nlp_get(ndlp); 10413 if (!elsiocb->ndlp) 10414 goto dropit; 10415 elsiocb->vport = vport; 10416 10417 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { 10418 cmd &= ELS_CMD_MASK; 10419 } 10420 /* ELS command <elsCmd> received from NPORT <did> */ 10421 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 10422 "0112 ELS command x%x received from NPORT x%x " 10423 "refcnt %d Data: x%x x%lx x%x x%x\n", 10424 cmd, did, kref_read(&ndlp->kref), vport->port_state, 10425 vport->fc_flag, vport->fc_myDID, vport->fc_prevDID); 10426 10427 /* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */ 10428 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) && 10429 (cmd != ELS_CMD_FLOGI) && 10430 !((cmd == ELS_CMD_PLOGI) && test_bit(FC_PT2PT, &vport->fc_flag))) { 10431 rjt_err = LSRJT_LOGICAL_BSY; 10432 rjt_exp = LSEXP_NOTHING_MORE; 10433 goto lsrjt; 10434 } 10435 10436 switch (cmd) { 10437 case ELS_CMD_PLOGI: 10438 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10439 "RCV PLOGI: did:x%x/ste:x%x flg:x%x", 10440 did, vport->port_state, ndlp->nlp_flag); 10441 10442 phba->fc_stat.elsRcvPLOGI++; 10443 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 10444 if (phba->sli_rev == LPFC_SLI_REV4 && 10445 test_bit(FC_PT2PT, &phba->pport->fc_flag)) { 10446 vport->fc_prevDID = vport->fc_myDID; 10447 /* Our DID needs to be updated before registering 10448 * the vfi. This is done in lpfc_rcv_plogi but 10449 * that is called after the reg_vfi. 10450 */ 10451 vport->fc_myDID = 10452 bf_get(els_rsp64_sid, 10453 &elsiocb->wqe.xmit_els_rsp); 10454 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 10455 "3312 Remote port assigned DID x%x " 10456 "%x\n", vport->fc_myDID, 10457 vport->fc_prevDID); 10458 } 10459 10460 lpfc_send_els_event(vport, ndlp, payload); 10461 10462 /* If Nport discovery is delayed, reject PLOGIs */ 10463 if (test_bit(FC_DISC_DELAYED, &vport->fc_flag)) { 10464 rjt_err = LSRJT_UNABLE_TPC; 10465 rjt_exp = LSEXP_NOTHING_MORE; 10466 break; 10467 } 10468 10469 if (vport->port_state < LPFC_DISC_AUTH) { 10470 if (!test_bit(FC_PT2PT, &phba->pport->fc_flag) || 10471 test_bit(FC_PT2PT_PLOGI, &phba->pport->fc_flag)) { 10472 rjt_err = LSRJT_UNABLE_TPC; 10473 rjt_exp = LSEXP_NOTHING_MORE; 10474 break; 10475 } 10476 } 10477 10478 spin_lock_irq(&ndlp->lock); 10479 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; 10480 spin_unlock_irq(&ndlp->lock); 10481 10482 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10483 NLP_EVT_RCV_PLOGI); 10484 10485 break; 10486 case ELS_CMD_FLOGI: 10487 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10488 "RCV FLOGI: did:x%x/ste:x%x flg:x%x", 10489 did, vport->port_state, ndlp->nlp_flag); 10490 10491 phba->fc_stat.elsRcvFLOGI++; 10492 10493 /* If the driver believes fabric discovery is done and is ready, 10494 * bounce the link. There is some descrepancy. 10495 */ 10496 if (vport->port_state >= LPFC_LOCAL_CFG_LINK && 10497 test_bit(FC_PT2PT, &vport->fc_flag) && 10498 vport->rcv_flogi_cnt >= 1) { 10499 rjt_err = LSRJT_LOGICAL_BSY; 10500 rjt_exp = LSEXP_NOTHING_MORE; 10501 init_link++; 10502 goto lsrjt; 10503 } 10504 10505 lpfc_els_rcv_flogi(vport, elsiocb, ndlp); 10506 /* retain node if our response is deferred */ 10507 if (phba->defer_flogi_acc_flag) 10508 break; 10509 if (newnode) 10510 lpfc_disc_state_machine(vport, ndlp, NULL, 10511 NLP_EVT_DEVICE_RM); 10512 break; 10513 case ELS_CMD_LOGO: 10514 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10515 "RCV LOGO: did:x%x/ste:x%x flg:x%x", 10516 did, vport->port_state, ndlp->nlp_flag); 10517 10518 phba->fc_stat.elsRcvLOGO++; 10519 lpfc_send_els_event(vport, ndlp, payload); 10520 if (vport->port_state < LPFC_DISC_AUTH) { 10521 rjt_err = LSRJT_UNABLE_TPC; 10522 rjt_exp = LSEXP_NOTHING_MORE; 10523 break; 10524 } 10525 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); 10526 if (newnode) 10527 lpfc_disc_state_machine(vport, ndlp, NULL, 10528 NLP_EVT_DEVICE_RM); 10529 break; 10530 case ELS_CMD_PRLO: 10531 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10532 "RCV PRLO: did:x%x/ste:x%x flg:x%x", 10533 did, vport->port_state, ndlp->nlp_flag); 10534 10535 phba->fc_stat.elsRcvPRLO++; 10536 lpfc_send_els_event(vport, ndlp, payload); 10537 if (vport->port_state < LPFC_DISC_AUTH) { 10538 rjt_err = LSRJT_UNABLE_TPC; 10539 rjt_exp = LSEXP_NOTHING_MORE; 10540 break; 10541 } 10542 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); 10543 break; 10544 case ELS_CMD_LCB: 10545 phba->fc_stat.elsRcvLCB++; 10546 lpfc_els_rcv_lcb(vport, elsiocb, ndlp); 10547 break; 10548 case ELS_CMD_RDP: 10549 phba->fc_stat.elsRcvRDP++; 10550 lpfc_els_rcv_rdp(vport, elsiocb, ndlp); 10551 break; 10552 case ELS_CMD_RSCN: 10553 phba->fc_stat.elsRcvRSCN++; 10554 lpfc_els_rcv_rscn(vport, elsiocb, ndlp); 10555 if (newnode) 10556 lpfc_disc_state_machine(vport, ndlp, NULL, 10557 NLP_EVT_DEVICE_RM); 10558 break; 10559 case ELS_CMD_ADISC: 10560 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10561 "RCV ADISC: did:x%x/ste:x%x flg:x%x", 10562 did, vport->port_state, ndlp->nlp_flag); 10563 10564 lpfc_send_els_event(vport, ndlp, payload); 10565 phba->fc_stat.elsRcvADISC++; 10566 if (vport->port_state < LPFC_DISC_AUTH) { 10567 rjt_err = LSRJT_UNABLE_TPC; 10568 rjt_exp = LSEXP_NOTHING_MORE; 10569 break; 10570 } 10571 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10572 NLP_EVT_RCV_ADISC); 10573 break; 10574 case ELS_CMD_PDISC: 10575 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10576 "RCV PDISC: did:x%x/ste:x%x flg:x%x", 10577 did, vport->port_state, ndlp->nlp_flag); 10578 10579 phba->fc_stat.elsRcvPDISC++; 10580 if (vport->port_state < LPFC_DISC_AUTH) { 10581 rjt_err = LSRJT_UNABLE_TPC; 10582 rjt_exp = LSEXP_NOTHING_MORE; 10583 break; 10584 } 10585 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10586 NLP_EVT_RCV_PDISC); 10587 break; 10588 case ELS_CMD_FARPR: 10589 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10590 "RCV FARPR: did:x%x/ste:x%x flg:x%x", 10591 did, vport->port_state, ndlp->nlp_flag); 10592 10593 phba->fc_stat.elsRcvFARPR++; 10594 lpfc_els_rcv_farpr(vport, elsiocb, ndlp); 10595 break; 10596 case ELS_CMD_FARP: 10597 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10598 "RCV FARP: did:x%x/ste:x%x flg:x%x", 10599 did, vport->port_state, ndlp->nlp_flag); 10600 10601 phba->fc_stat.elsRcvFARP++; 10602 lpfc_els_rcv_farp(vport, elsiocb, ndlp); 10603 break; 10604 case ELS_CMD_FAN: 10605 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10606 "RCV FAN: did:x%x/ste:x%x flg:x%x", 10607 did, vport->port_state, ndlp->nlp_flag); 10608 10609 phba->fc_stat.elsRcvFAN++; 10610 lpfc_els_rcv_fan(vport, elsiocb, ndlp); 10611 break; 10612 case ELS_CMD_PRLI: 10613 case ELS_CMD_NVMEPRLI: 10614 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10615 "RCV PRLI: did:x%x/ste:x%x flg:x%x", 10616 did, vport->port_state, ndlp->nlp_flag); 10617 10618 phba->fc_stat.elsRcvPRLI++; 10619 if ((vport->port_state < LPFC_DISC_AUTH) && 10620 test_bit(FC_FABRIC, &vport->fc_flag)) { 10621 rjt_err = LSRJT_UNABLE_TPC; 10622 rjt_exp = LSEXP_NOTHING_MORE; 10623 break; 10624 } 10625 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); 10626 break; 10627 case ELS_CMD_LIRR: 10628 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10629 "RCV LIRR: did:x%x/ste:x%x flg:x%x", 10630 did, vport->port_state, ndlp->nlp_flag); 10631 10632 phba->fc_stat.elsRcvLIRR++; 10633 lpfc_els_rcv_lirr(vport, elsiocb, ndlp); 10634 if (newnode) 10635 lpfc_disc_state_machine(vport, ndlp, NULL, 10636 NLP_EVT_DEVICE_RM); 10637 break; 10638 case ELS_CMD_RLS: 10639 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10640 "RCV RLS: did:x%x/ste:x%x flg:x%x", 10641 did, vport->port_state, ndlp->nlp_flag); 10642 10643 phba->fc_stat.elsRcvRLS++; 10644 lpfc_els_rcv_rls(vport, elsiocb, ndlp); 10645 if (newnode) 10646 lpfc_disc_state_machine(vport, ndlp, NULL, 10647 NLP_EVT_DEVICE_RM); 10648 break; 10649 case ELS_CMD_RPL: 10650 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10651 "RCV RPL: did:x%x/ste:x%x flg:x%x", 10652 did, vport->port_state, ndlp->nlp_flag); 10653 10654 phba->fc_stat.elsRcvRPL++; 10655 lpfc_els_rcv_rpl(vport, elsiocb, ndlp); 10656 if (newnode) 10657 lpfc_disc_state_machine(vport, ndlp, NULL, 10658 NLP_EVT_DEVICE_RM); 10659 break; 10660 case ELS_CMD_RNID: 10661 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10662 "RCV RNID: did:x%x/ste:x%x flg:x%x", 10663 did, vport->port_state, ndlp->nlp_flag); 10664 10665 phba->fc_stat.elsRcvRNID++; 10666 lpfc_els_rcv_rnid(vport, elsiocb, ndlp); 10667 if (newnode) 10668 lpfc_disc_state_machine(vport, ndlp, NULL, 10669 NLP_EVT_DEVICE_RM); 10670 break; 10671 case ELS_CMD_RTV: 10672 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10673 "RCV RTV: did:x%x/ste:x%x flg:x%x", 10674 did, vport->port_state, ndlp->nlp_flag); 10675 phba->fc_stat.elsRcvRTV++; 10676 lpfc_els_rcv_rtv(vport, elsiocb, ndlp); 10677 if (newnode) 10678 lpfc_disc_state_machine(vport, ndlp, NULL, 10679 NLP_EVT_DEVICE_RM); 10680 break; 10681 case ELS_CMD_RRQ: 10682 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10683 "RCV RRQ: did:x%x/ste:x%x flg:x%x", 10684 did, vport->port_state, ndlp->nlp_flag); 10685 10686 phba->fc_stat.elsRcvRRQ++; 10687 lpfc_els_rcv_rrq(vport, elsiocb, ndlp); 10688 if (newnode) 10689 lpfc_disc_state_machine(vport, ndlp, NULL, 10690 NLP_EVT_DEVICE_RM); 10691 break; 10692 case ELS_CMD_ECHO: 10693 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10694 "RCV ECHO: did:x%x/ste:x%x flg:x%x", 10695 did, vport->port_state, ndlp->nlp_flag); 10696 10697 phba->fc_stat.elsRcvECHO++; 10698 lpfc_els_rcv_echo(vport, elsiocb, ndlp); 10699 if (newnode) 10700 lpfc_disc_state_machine(vport, ndlp, NULL, 10701 NLP_EVT_DEVICE_RM); 10702 break; 10703 case ELS_CMD_REC: 10704 /* receive this due to exchange closed */ 10705 rjt_err = LSRJT_UNABLE_TPC; 10706 rjt_exp = LSEXP_INVALID_OX_RX; 10707 break; 10708 case ELS_CMD_FPIN: 10709 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10710 "RCV FPIN: did:x%x/ste:x%x flg:x%x", 10711 did, vport->port_state, ndlp->nlp_flag); 10712 10713 lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload, 10714 payload_len); 10715 10716 /* There are no replies, so no rjt codes */ 10717 break; 10718 case ELS_CMD_EDC: 10719 lpfc_els_rcv_edc(vport, elsiocb, ndlp); 10720 break; 10721 case ELS_CMD_RDF: 10722 phba->fc_stat.elsRcvRDF++; 10723 /* Accept RDF only from fabric controller */ 10724 if (did != Fabric_Cntl_DID) { 10725 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 10726 "1115 Received RDF from invalid DID " 10727 "x%x\n", did); 10728 rjt_err = LSRJT_PROTOCOL_ERR; 10729 rjt_exp = LSEXP_NOTHING_MORE; 10730 goto lsrjt; 10731 } 10732 10733 lpfc_els_rcv_rdf(vport, elsiocb, ndlp); 10734 break; 10735 default: 10736 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10737 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 10738 cmd, did, vport->port_state); 10739 10740 /* Unsupported ELS command, reject */ 10741 rjt_err = LSRJT_CMD_UNSUPPORTED; 10742 rjt_exp = LSEXP_NOTHING_MORE; 10743 10744 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 10745 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10746 "0115 Unknown ELS command x%x " 10747 "received from NPORT x%x\n", cmd, did); 10748 if (newnode) 10749 lpfc_disc_state_machine(vport, ndlp, NULL, 10750 NLP_EVT_DEVICE_RM); 10751 break; 10752 } 10753 10754 lsrjt: 10755 /* check if need to LS_RJT received ELS cmd */ 10756 if (rjt_err) { 10757 memset(&stat, 0, sizeof(stat)); 10758 stat.un.b.lsRjtRsnCode = rjt_err; 10759 stat.un.b.lsRjtRsnCodeExp = rjt_exp; 10760 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, 10761 NULL); 10762 /* Remove the reference from above for new nodes. */ 10763 if (newnode) 10764 lpfc_disc_state_machine(vport, ndlp, NULL, 10765 NLP_EVT_DEVICE_RM); 10766 } 10767 10768 /* Release the reference on this elsiocb, not the ndlp. */ 10769 lpfc_nlp_put(elsiocb->ndlp); 10770 elsiocb->ndlp = NULL; 10771 10772 /* Special case. Driver received an unsolicited command that 10773 * unsupportable given the driver's current state. Reset the 10774 * link and start over. 10775 */ 10776 if (init_link) { 10777 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10778 if (!mbox) 10779 return; 10780 lpfc_linkdown(phba); 10781 lpfc_init_link(phba, mbox, 10782 phba->cfg_topology, 10783 phba->cfg_link_speed); 10784 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 10785 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 10786 mbox->vport = vport; 10787 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == 10788 MBX_NOT_FINISHED) 10789 mempool_free(mbox, phba->mbox_mem_pool); 10790 } 10791 10792 return; 10793 10794 dropit: 10795 if (vport && !test_bit(FC_UNLOADING, &vport->load_flag)) 10796 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10797 "0111 Dropping received ELS cmd " 10798 "Data: x%x x%x x%x x%x\n", 10799 cmd, status, get_job_word4(phba, elsiocb), did); 10800 10801 phba->fc_stat.elsRcvDrop++; 10802 } 10803 10804 /** 10805 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring 10806 * @phba: pointer to lpfc hba data structure. 10807 * @pring: pointer to a SLI ring. 10808 * @elsiocb: pointer to lpfc els iocb data structure. 10809 * 10810 * This routine is used to process an unsolicited event received from a SLI 10811 * (Service Level Interface) ring. The actual processing of the data buffer 10812 * associated with the unsolicited event is done by invoking the routine 10813 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the 10814 * SLI ring on which the unsolicited event was received. 10815 **/ 10816 void 10817 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10818 struct lpfc_iocbq *elsiocb) 10819 { 10820 struct lpfc_vport *vport = elsiocb->vport; 10821 u32 ulp_command, status, parameter, bde_count = 0; 10822 IOCB_t *icmd; 10823 struct lpfc_wcqe_complete *wcqe_cmpl = NULL; 10824 struct lpfc_dmabuf *bdeBuf1 = elsiocb->cmd_dmabuf; 10825 struct lpfc_dmabuf *bdeBuf2 = elsiocb->bpl_dmabuf; 10826 dma_addr_t paddr; 10827 10828 elsiocb->cmd_dmabuf = NULL; 10829 elsiocb->rsp_dmabuf = NULL; 10830 elsiocb->bpl_dmabuf = NULL; 10831 10832 wcqe_cmpl = &elsiocb->wcqe_cmpl; 10833 ulp_command = get_job_cmnd(phba, elsiocb); 10834 status = get_job_ulpstatus(phba, elsiocb); 10835 parameter = get_job_word4(phba, elsiocb); 10836 if (phba->sli_rev == LPFC_SLI_REV4) 10837 bde_count = wcqe_cmpl->word3; 10838 else 10839 bde_count = elsiocb->iocb.ulpBdeCount; 10840 10841 if (status == IOSTAT_NEED_BUFFER) { 10842 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 10843 } else if (status == IOSTAT_LOCAL_REJECT && 10844 (parameter & IOERR_PARAM_MASK) == 10845 IOERR_RCV_BUFFER_WAITING) { 10846 phba->fc_stat.NoRcvBuf++; 10847 /* Not enough posted buffers; Try posting more buffers */ 10848 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 10849 lpfc_sli3_post_buffer(phba, pring, 0); 10850 return; 10851 } 10852 10853 if (phba->sli_rev == LPFC_SLI_REV3) { 10854 icmd = &elsiocb->iocb; 10855 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 10856 (ulp_command == CMD_IOCB_RCV_ELS64_CX || 10857 ulp_command == CMD_IOCB_RCV_SEQ64_CX)) { 10858 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 10859 vport = phba->pport; 10860 else 10861 vport = lpfc_find_vport_by_vpid(phba, 10862 icmd->unsli3.rcvsli3.vpi); 10863 } 10864 } 10865 10866 /* If there are no BDEs associated 10867 * with this IOCB, there is nothing to do. 10868 */ 10869 if (bde_count == 0) 10870 return; 10871 10872 /* Account for SLI2 or SLI3 and later unsolicited buffering */ 10873 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 10874 elsiocb->cmd_dmabuf = bdeBuf1; 10875 if (bde_count == 2) 10876 elsiocb->bpl_dmabuf = bdeBuf2; 10877 } else { 10878 icmd = &elsiocb->iocb; 10879 paddr = getPaddr(icmd->un.cont64[0].addrHigh, 10880 icmd->un.cont64[0].addrLow); 10881 elsiocb->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, 10882 paddr); 10883 if (bde_count == 2) { 10884 paddr = getPaddr(icmd->un.cont64[1].addrHigh, 10885 icmd->un.cont64[1].addrLow); 10886 elsiocb->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba, 10887 pring, 10888 paddr); 10889 } 10890 } 10891 10892 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 10893 /* 10894 * The different unsolicited event handlers would tell us 10895 * if they are done with "mp" by setting cmd_dmabuf to NULL. 10896 */ 10897 if (elsiocb->cmd_dmabuf) { 10898 lpfc_in_buf_free(phba, elsiocb->cmd_dmabuf); 10899 elsiocb->cmd_dmabuf = NULL; 10900 } 10901 10902 if (elsiocb->bpl_dmabuf) { 10903 lpfc_in_buf_free(phba, elsiocb->bpl_dmabuf); 10904 elsiocb->bpl_dmabuf = NULL; 10905 } 10906 10907 } 10908 10909 static void 10910 lpfc_start_fdmi(struct lpfc_vport *vport) 10911 { 10912 struct lpfc_nodelist *ndlp; 10913 10914 /* If this is the first time, allocate an ndlp and initialize 10915 * it. Otherwise, make sure the node is enabled and then do the 10916 * login. 10917 */ 10918 ndlp = lpfc_findnode_did(vport, FDMI_DID); 10919 if (!ndlp) { 10920 ndlp = lpfc_nlp_init(vport, FDMI_DID); 10921 if (ndlp) { 10922 ndlp->nlp_type |= NLP_FABRIC; 10923 } else { 10924 return; 10925 } 10926 } 10927 10928 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 10929 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 10930 } 10931 10932 /** 10933 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr 10934 * @phba: pointer to lpfc hba data structure. 10935 * @vport: pointer to a virtual N_Port data structure. 10936 * 10937 * This routine issues a Port Login (PLOGI) to the Name Server with 10938 * State Change Request (SCR) for a @vport. This routine will create an 10939 * ndlp for the Name Server associated to the @vport if such node does 10940 * not already exist. The PLOGI to Name Server is issued by invoking the 10941 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface 10942 * (FDMI) is configured to the @vport, a FDMI node will be created and 10943 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine. 10944 **/ 10945 void 10946 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) 10947 { 10948 struct lpfc_nodelist *ndlp; 10949 10950 /* 10951 * If lpfc_delay_discovery parameter is set and the clean address 10952 * bit is cleared and fc fabric parameters chenged, delay FC NPort 10953 * discovery. 10954 */ 10955 if (test_bit(FC_DISC_DELAYED, &vport->fc_flag)) { 10956 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10957 "3334 Delay fc port discovery for %d secs\n", 10958 phba->fc_ratov); 10959 mod_timer(&vport->delayed_disc_tmo, 10960 jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); 10961 return; 10962 } 10963 10964 ndlp = lpfc_findnode_did(vport, NameServer_DID); 10965 if (!ndlp) { 10966 ndlp = lpfc_nlp_init(vport, NameServer_DID); 10967 if (!ndlp) { 10968 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 10969 lpfc_disc_start(vport); 10970 return; 10971 } 10972 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10973 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10974 "0251 NameServer login: no memory\n"); 10975 return; 10976 } 10977 } 10978 10979 ndlp->nlp_type |= NLP_FABRIC; 10980 10981 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 10982 10983 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) { 10984 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10985 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10986 "0252 Cannot issue NameServer login\n"); 10987 return; 10988 } 10989 10990 if ((phba->cfg_enable_SmartSAN || 10991 phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) && 10992 test_bit(FC_ALLOW_FDMI, &vport->load_flag)) 10993 lpfc_start_fdmi(vport); 10994 } 10995 10996 /** 10997 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport 10998 * @phba: pointer to lpfc hba data structure. 10999 * @pmb: pointer to the driver internal queue element for mailbox command. 11000 * 11001 * This routine is the completion callback function to register new vport 11002 * mailbox command. If the new vport mailbox command completes successfully, 11003 * the fabric registration login shall be performed on physical port (the 11004 * new vport created is actually a physical port, with VPI 0) or the port 11005 * login to Name Server for State Change Request (SCR) will be performed 11006 * on virtual port (real virtual port, with VPI greater than 0). 11007 **/ 11008 static void 11009 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 11010 { 11011 struct lpfc_vport *vport = pmb->vport; 11012 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11013 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; 11014 MAILBOX_t *mb = &pmb->u.mb; 11015 int rc; 11016 11017 clear_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag); 11018 11019 if (mb->mbxStatus) { 11020 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11021 "0915 Register VPI failed : Status: x%x" 11022 " upd bit: x%x \n", mb->mbxStatus, 11023 mb->un.varRegVpi.upd); 11024 if (phba->sli_rev == LPFC_SLI_REV4 && 11025 mb->un.varRegVpi.upd) 11026 goto mbox_err_exit ; 11027 11028 switch (mb->mbxStatus) { 11029 case 0x11: /* unsupported feature */ 11030 case 0x9603: /* max_vpi exceeded */ 11031 case 0x9602: /* Link event since CLEAR_LA */ 11032 /* giving up on vport registration */ 11033 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11034 clear_bit(FC_FABRIC, &vport->fc_flag); 11035 clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag); 11036 lpfc_can_disctmo(vport); 11037 break; 11038 /* If reg_vpi fail with invalid VPI status, re-init VPI */ 11039 case 0x20: 11040 set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag); 11041 lpfc_init_vpi(phba, pmb, vport->vpi); 11042 pmb->vport = vport; 11043 pmb->mbox_cmpl = lpfc_init_vpi_cmpl; 11044 rc = lpfc_sli_issue_mbox(phba, pmb, 11045 MBX_NOWAIT); 11046 if (rc == MBX_NOT_FINISHED) { 11047 lpfc_printf_vlog(vport, KERN_ERR, 11048 LOG_TRACE_EVENT, 11049 "2732 Failed to issue INIT_VPI" 11050 " mailbox command\n"); 11051 } else { 11052 lpfc_nlp_put(ndlp); 11053 return; 11054 } 11055 fallthrough; 11056 default: 11057 /* Try to recover from this error */ 11058 if (phba->sli_rev == LPFC_SLI_REV4) 11059 lpfc_sli4_unreg_all_rpis(vport); 11060 lpfc_mbx_unreg_vpi(vport); 11061 set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag); 11062 if (mb->mbxStatus == MBX_NOT_FINISHED) 11063 break; 11064 if ((vport->port_type == LPFC_PHYSICAL_PORT) && 11065 !test_bit(FC_LOGO_RCVD_DID_CHNG, &vport->fc_flag)) { 11066 if (phba->sli_rev == LPFC_SLI_REV4) 11067 lpfc_issue_init_vfi(vport); 11068 else 11069 lpfc_initial_flogi(vport); 11070 } else { 11071 lpfc_initial_fdisc(vport); 11072 } 11073 break; 11074 } 11075 } else { 11076 spin_lock_irq(shost->host_lock); 11077 vport->vpi_state |= LPFC_VPI_REGISTERED; 11078 spin_unlock_irq(shost->host_lock); 11079 if (vport == phba->pport) { 11080 if (phba->sli_rev < LPFC_SLI_REV4) 11081 lpfc_issue_fabric_reglogin(vport); 11082 else { 11083 /* 11084 * If the physical port is instantiated using 11085 * FDISC, do not start vport discovery. 11086 */ 11087 if (vport->port_state != LPFC_FDISC) 11088 lpfc_start_fdiscs(phba); 11089 lpfc_do_scr_ns_plogi(phba, vport); 11090 } 11091 } else { 11092 lpfc_do_scr_ns_plogi(phba, vport); 11093 } 11094 } 11095 mbox_err_exit: 11096 /* Now, we decrement the ndlp reference count held for this 11097 * callback function 11098 */ 11099 lpfc_nlp_put(ndlp); 11100 11101 mempool_free(pmb, phba->mbox_mem_pool); 11102 11103 /* reinitialize the VMID datastructure before returning. 11104 * this is specifically for vport 11105 */ 11106 if (lpfc_is_vmid_enabled(phba)) 11107 lpfc_reinit_vmid(vport); 11108 vport->vmid_flag = vport->phba->pport->vmid_flag; 11109 11110 return; 11111 } 11112 11113 /** 11114 * lpfc_register_new_vport - Register a new vport with a HBA 11115 * @phba: pointer to lpfc hba data structure. 11116 * @vport: pointer to a host virtual N_Port data structure. 11117 * @ndlp: pointer to a node-list data structure. 11118 * 11119 * This routine registers the @vport as a new virtual port with a HBA. 11120 * It is done through a registering vpi mailbox command. 11121 **/ 11122 void 11123 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, 11124 struct lpfc_nodelist *ndlp) 11125 { 11126 LPFC_MBOXQ_t *mbox; 11127 11128 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11129 if (mbox) { 11130 lpfc_reg_vpi(vport, mbox); 11131 mbox->vport = vport; 11132 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 11133 if (!mbox->ctx_ndlp) { 11134 mempool_free(mbox, phba->mbox_mem_pool); 11135 goto mbox_err_exit; 11136 } 11137 11138 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 11139 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 11140 == MBX_NOT_FINISHED) { 11141 /* mailbox command not success, decrement ndlp 11142 * reference count for this command 11143 */ 11144 lpfc_nlp_put(ndlp); 11145 mempool_free(mbox, phba->mbox_mem_pool); 11146 11147 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11148 "0253 Register VPI: Can't send mbox\n"); 11149 goto mbox_err_exit; 11150 } 11151 } else { 11152 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11153 "0254 Register VPI: no memory\n"); 11154 goto mbox_err_exit; 11155 } 11156 return; 11157 11158 mbox_err_exit: 11159 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11160 clear_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag); 11161 return; 11162 } 11163 11164 /** 11165 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer 11166 * @phba: pointer to lpfc hba data structure. 11167 * 11168 * This routine cancels the retry delay timers to all the vports. 11169 **/ 11170 void 11171 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba) 11172 { 11173 struct lpfc_vport **vports; 11174 struct lpfc_nodelist *ndlp; 11175 uint32_t link_state; 11176 int i; 11177 11178 /* Treat this failure as linkdown for all vports */ 11179 link_state = phba->link_state; 11180 lpfc_linkdown(phba); 11181 phba->link_state = link_state; 11182 11183 vports = lpfc_create_vport_work_array(phba); 11184 11185 if (vports) { 11186 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 11187 ndlp = lpfc_findnode_did(vports[i], Fabric_DID); 11188 if (ndlp) 11189 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 11190 lpfc_els_flush_cmd(vports[i]); 11191 } 11192 lpfc_destroy_vport_work_array(phba, vports); 11193 } 11194 } 11195 11196 /** 11197 * lpfc_retry_pport_discovery - Start timer to retry FLOGI. 11198 * @phba: pointer to lpfc hba data structure. 11199 * 11200 * This routine abort all pending discovery commands and 11201 * start a timer to retry FLOGI for the physical port 11202 * discovery. 11203 **/ 11204 void 11205 lpfc_retry_pport_discovery(struct lpfc_hba *phba) 11206 { 11207 struct lpfc_nodelist *ndlp; 11208 11209 /* Cancel the all vports retry delay retry timers */ 11210 lpfc_cancel_all_vport_retry_delay_timer(phba); 11211 11212 /* If fabric require FLOGI, then re-instantiate physical login */ 11213 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 11214 if (!ndlp) 11215 return; 11216 11217 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); 11218 spin_lock_irq(&ndlp->lock); 11219 ndlp->nlp_flag |= NLP_DELAY_TMO; 11220 spin_unlock_irq(&ndlp->lock); 11221 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; 11222 phba->pport->port_state = LPFC_FLOGI; 11223 return; 11224 } 11225 11226 /** 11227 * lpfc_fabric_login_reqd - Check if FLOGI required. 11228 * @phba: pointer to lpfc hba data structure. 11229 * @cmdiocb: pointer to FDISC command iocb. 11230 * @rspiocb: pointer to FDISC response iocb. 11231 * 11232 * This routine checks if a FLOGI is reguired for FDISC 11233 * to succeed. 11234 **/ 11235 static int 11236 lpfc_fabric_login_reqd(struct lpfc_hba *phba, 11237 struct lpfc_iocbq *cmdiocb, 11238 struct lpfc_iocbq *rspiocb) 11239 { 11240 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 11241 u32 ulp_word4 = get_job_word4(phba, rspiocb); 11242 11243 if (ulp_status != IOSTAT_FABRIC_RJT || 11244 ulp_word4 != RJT_LOGIN_REQUIRED) 11245 return 0; 11246 else 11247 return 1; 11248 } 11249 11250 /** 11251 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command 11252 * @phba: pointer to lpfc hba data structure. 11253 * @cmdiocb: pointer to lpfc command iocb data structure. 11254 * @rspiocb: pointer to lpfc response iocb data structure. 11255 * 11256 * This routine is the completion callback function to a Fabric Discover 11257 * (FDISC) ELS command. Since all the FDISC ELS commands are issued 11258 * single threaded, each FDISC completion callback function will reset 11259 * the discovery timer for all vports such that the timers will not get 11260 * unnecessary timeout. The function checks the FDISC IOCB status. If error 11261 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the 11262 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID 11263 * assigned to the vport has been changed with the completion of the FDISC 11264 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index) 11265 * are unregistered from the HBA, and then the lpfc_register_new_vport() 11266 * routine is invoked to register new vport with the HBA. Otherwise, the 11267 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name 11268 * Server for State Change Request (SCR). 11269 **/ 11270 static void 11271 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11272 struct lpfc_iocbq *rspiocb) 11273 { 11274 struct lpfc_vport *vport = cmdiocb->vport; 11275 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 11276 struct lpfc_nodelist *np; 11277 struct lpfc_nodelist *next_np; 11278 struct lpfc_iocbq *piocb; 11279 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp; 11280 struct serv_parm *sp; 11281 uint8_t fabric_param_changed; 11282 u32 ulp_status, ulp_word4; 11283 11284 ulp_status = get_job_ulpstatus(phba, rspiocb); 11285 ulp_word4 = get_job_word4(phba, rspiocb); 11286 11287 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 11288 "0123 FDISC completes. x%x/x%x prevDID: x%x\n", 11289 ulp_status, ulp_word4, 11290 vport->fc_prevDID); 11291 /* Since all FDISCs are being single threaded, we 11292 * must reset the discovery timer for ALL vports 11293 * waiting to send FDISC when one completes. 11294 */ 11295 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) { 11296 lpfc_set_disctmo(piocb->vport); 11297 } 11298 11299 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11300 "FDISC cmpl: status:x%x/x%x prevdid:x%x", 11301 ulp_status, ulp_word4, vport->fc_prevDID); 11302 11303 if (ulp_status) { 11304 11305 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) { 11306 lpfc_retry_pport_discovery(phba); 11307 goto out; 11308 } 11309 11310 /* Check for retry */ 11311 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 11312 goto out; 11313 /* FDISC failed */ 11314 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11315 "0126 FDISC failed. (x%x/x%x)\n", 11316 ulp_status, ulp_word4); 11317 goto fdisc_failed; 11318 } 11319 11320 lpfc_check_nlp_post_devloss(vport, ndlp); 11321 11322 clear_bit(FC_VPORT_CVL_RCVD, &vport->fc_flag); 11323 clear_bit(FC_VPORT_LOGO_RCVD, &vport->fc_flag); 11324 set_bit(FC_FABRIC, &vport->fc_flag); 11325 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP) 11326 set_bit(FC_PUBLIC_LOOP, &vport->fc_flag); 11327 11328 vport->fc_myDID = ulp_word4 & Mask_DID; 11329 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); 11330 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 11331 if (!prsp) 11332 goto out; 11333 if (!lpfc_is_els_acc_rsp(prsp)) 11334 goto out; 11335 11336 sp = prsp->virt + sizeof(uint32_t); 11337 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 11338 memcpy(&vport->fabric_portname, &sp->portName, 11339 sizeof(struct lpfc_name)); 11340 memcpy(&vport->fabric_nodename, &sp->nodeName, 11341 sizeof(struct lpfc_name)); 11342 if (fabric_param_changed && 11343 !test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) { 11344 /* If our NportID changed, we need to ensure all 11345 * remaining NPORTs get unreg_login'ed so we can 11346 * issue unreg_vpi. 11347 */ 11348 list_for_each_entry_safe(np, next_np, 11349 &vport->fc_nodes, nlp_listp) { 11350 if ((np->nlp_state != NLP_STE_NPR_NODE) || 11351 !(np->nlp_flag & NLP_NPR_ADISC)) 11352 continue; 11353 spin_lock_irq(&ndlp->lock); 11354 np->nlp_flag &= ~NLP_NPR_ADISC; 11355 spin_unlock_irq(&ndlp->lock); 11356 lpfc_unreg_rpi(vport, np); 11357 } 11358 lpfc_cleanup_pending_mbox(vport); 11359 11360 if (phba->sli_rev == LPFC_SLI_REV4) 11361 lpfc_sli4_unreg_all_rpis(vport); 11362 11363 lpfc_mbx_unreg_vpi(vport); 11364 set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag); 11365 if (phba->sli_rev == LPFC_SLI_REV4) 11366 set_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag); 11367 else 11368 set_bit(FC_LOGO_RCVD_DID_CHNG, &vport->fc_flag); 11369 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 11370 !test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) { 11371 /* 11372 * Driver needs to re-reg VPI in order for f/w 11373 * to update the MAC address. 11374 */ 11375 lpfc_register_new_vport(phba, vport, ndlp); 11376 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 11377 goto out; 11378 } 11379 11380 if (test_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag)) 11381 lpfc_issue_init_vpi(vport); 11382 else if (test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) 11383 lpfc_register_new_vport(phba, vport, ndlp); 11384 else 11385 lpfc_do_scr_ns_plogi(phba, vport); 11386 11387 /* The FDISC completed successfully. Move the fabric ndlp to 11388 * UNMAPPED state and register with the transport. 11389 */ 11390 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 11391 goto out; 11392 11393 fdisc_failed: 11394 if (vport->fc_vport && 11395 (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS)) 11396 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11397 /* Cancel discovery timer */ 11398 lpfc_can_disctmo(vport); 11399 out: 11400 lpfc_els_free_iocb(phba, cmdiocb); 11401 lpfc_nlp_put(ndlp); 11402 } 11403 11404 /** 11405 * lpfc_issue_els_fdisc - Issue a fdisc iocb command 11406 * @vport: pointer to a virtual N_Port data structure. 11407 * @ndlp: pointer to a node-list data structure. 11408 * @retry: number of retries to the command IOCB. 11409 * 11410 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to 11411 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb() 11412 * routine to issue the IOCB, which makes sure only one outstanding fabric 11413 * IOCB will be sent off HBA at any given time. 11414 * 11415 * Note that the ndlp reference count will be incremented by 1 for holding the 11416 * ndlp and the reference to ndlp will be stored into the ndlp field of 11417 * the IOCB for the completion callback function to the FDISC ELS command. 11418 * 11419 * Return code 11420 * 0 - Successfully issued fdisc iocb command 11421 * 1 - Failed to issue fdisc iocb command 11422 **/ 11423 static int 11424 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 11425 uint8_t retry) 11426 { 11427 struct lpfc_hba *phba = vport->phba; 11428 IOCB_t *icmd; 11429 union lpfc_wqe128 *wqe = NULL; 11430 struct lpfc_iocbq *elsiocb; 11431 struct serv_parm *sp; 11432 uint8_t *pcmd; 11433 uint16_t cmdsize; 11434 int did = ndlp->nlp_DID; 11435 int rc; 11436 11437 vport->port_state = LPFC_FDISC; 11438 vport->fc_myDID = 0; 11439 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 11440 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 11441 ELS_CMD_FDISC); 11442 if (!elsiocb) { 11443 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11444 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11445 "0255 Issue FDISC: no IOCB\n"); 11446 return 1; 11447 } 11448 11449 if (phba->sli_rev == LPFC_SLI_REV4) { 11450 wqe = &elsiocb->wqe; 11451 bf_set(els_req64_sid, &wqe->els_req, 0); 11452 bf_set(els_req64_sp, &wqe->els_req, 1); 11453 } else { 11454 icmd = &elsiocb->iocb; 11455 icmd->un.elsreq64.myID = 0; 11456 icmd->un.elsreq64.fl = 1; 11457 icmd->ulpCt_h = 1; 11458 icmd->ulpCt_l = 0; 11459 } 11460 11461 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 11462 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; 11463 pcmd += sizeof(uint32_t); /* CSP Word 1 */ 11464 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm)); 11465 sp = (struct serv_parm *) pcmd; 11466 /* Setup CSPs accordingly for Fabric */ 11467 sp->cmn.e_d_tov = 0; 11468 sp->cmn.w2.r_a_tov = 0; 11469 sp->cmn.virtual_fabric_support = 0; 11470 sp->cls1.classValid = 0; 11471 sp->cls2.seqDelivery = 1; 11472 sp->cls3.seqDelivery = 1; 11473 11474 pcmd += sizeof(uint32_t); /* CSP Word 2 */ 11475 pcmd += sizeof(uint32_t); /* CSP Word 3 */ 11476 pcmd += sizeof(uint32_t); /* CSP Word 4 */ 11477 pcmd += sizeof(uint32_t); /* Port Name */ 11478 memcpy(pcmd, &vport->fc_portname, 8); 11479 pcmd += sizeof(uint32_t); /* Node Name */ 11480 pcmd += sizeof(uint32_t); /* Node Name */ 11481 memcpy(pcmd, &vport->fc_nodename, 8); 11482 sp->cmn.valid_vendor_ver_level = 0; 11483 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 11484 lpfc_set_disctmo(vport); 11485 11486 phba->fc_stat.elsXmitFDISC++; 11487 elsiocb->cmd_cmpl = lpfc_cmpl_els_fdisc; 11488 11489 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11490 "Issue FDISC: did:x%x", 11491 did, 0, 0); 11492 11493 elsiocb->ndlp = lpfc_nlp_get(ndlp); 11494 if (!elsiocb->ndlp) 11495 goto err_out; 11496 11497 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 11498 if (rc == IOCB_ERROR) { 11499 lpfc_nlp_put(ndlp); 11500 goto err_out; 11501 } 11502 11503 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); 11504 return 0; 11505 11506 err_out: 11507 lpfc_els_free_iocb(phba, elsiocb); 11508 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11509 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11510 "0256 Issue FDISC: Cannot send IOCB\n"); 11511 return 1; 11512 } 11513 11514 /** 11515 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo 11516 * @phba: pointer to lpfc hba data structure. 11517 * @cmdiocb: pointer to lpfc command iocb data structure. 11518 * @rspiocb: pointer to lpfc response iocb data structure. 11519 * 11520 * This routine is the completion callback function to the issuing of a LOGO 11521 * ELS command off a vport. It frees the command IOCB and then decrement the 11522 * reference count held on ndlp for this completion function, indicating that 11523 * the reference to the ndlp is no long needed. Note that the 11524 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this 11525 * callback function and an additional explicit ndlp reference decrementation 11526 * will trigger the actual release of the ndlp. 11527 **/ 11528 static void 11529 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11530 struct lpfc_iocbq *rspiocb) 11531 { 11532 struct lpfc_vport *vport = cmdiocb->vport; 11533 IOCB_t *irsp; 11534 struct lpfc_nodelist *ndlp; 11535 u32 ulp_status, ulp_word4, did, tmo; 11536 11537 ndlp = cmdiocb->ndlp; 11538 11539 ulp_status = get_job_ulpstatus(phba, rspiocb); 11540 ulp_word4 = get_job_word4(phba, rspiocb); 11541 11542 if (phba->sli_rev == LPFC_SLI_REV4) { 11543 did = get_job_els_rsp64_did(phba, cmdiocb); 11544 tmo = get_wqe_tmo(cmdiocb); 11545 } else { 11546 irsp = &rspiocb->iocb; 11547 did = get_job_els_rsp64_did(phba, rspiocb); 11548 tmo = irsp->ulpTimeout; 11549 } 11550 11551 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11552 "LOGO npiv cmpl: status:x%x/x%x did:x%x", 11553 ulp_status, ulp_word4, did); 11554 11555 /* NPIV LOGO completes to NPort <nlp_DID> */ 11556 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 11557 "2928 NPIV LOGO completes to NPort x%x " 11558 "Data: x%x x%x x%x x%x x%x x%x x%x\n", 11559 ndlp->nlp_DID, ulp_status, ulp_word4, 11560 tmo, vport->num_disc_nodes, 11561 kref_read(&ndlp->kref), ndlp->nlp_flag, 11562 ndlp->fc4_xpt_flags); 11563 11564 if (ulp_status == IOSTAT_SUCCESS) { 11565 clear_bit(FC_NDISC_ACTIVE, &vport->fc_flag); 11566 clear_bit(FC_FABRIC, &vport->fc_flag); 11567 lpfc_can_disctmo(vport); 11568 } 11569 11570 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { 11571 /* Wake up lpfc_vport_delete if waiting...*/ 11572 if (ndlp->logo_waitq) 11573 wake_up(ndlp->logo_waitq); 11574 spin_lock_irq(&ndlp->lock); 11575 ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND); 11576 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; 11577 spin_unlock_irq(&ndlp->lock); 11578 } 11579 11580 /* Safe to release resources now. */ 11581 lpfc_els_free_iocb(phba, cmdiocb); 11582 lpfc_nlp_put(ndlp); 11583 } 11584 11585 /** 11586 * lpfc_issue_els_npiv_logo - Issue a logo off a vport 11587 * @vport: pointer to a virtual N_Port data structure. 11588 * @ndlp: pointer to a node-list data structure. 11589 * 11590 * This routine issues a LOGO ELS command to an @ndlp off a @vport. 11591 * 11592 * Note that the ndlp reference count will be incremented by 1 for holding the 11593 * ndlp and the reference to ndlp will be stored into the ndlp field of 11594 * the IOCB for the completion callback function to the LOGO ELS command. 11595 * 11596 * Return codes 11597 * 0 - Successfully issued logo off the @vport 11598 * 1 - Failed to issue logo off the @vport 11599 **/ 11600 int 11601 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 11602 { 11603 int rc = 0; 11604 struct lpfc_hba *phba = vport->phba; 11605 struct lpfc_iocbq *elsiocb; 11606 uint8_t *pcmd; 11607 uint16_t cmdsize; 11608 11609 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name); 11610 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, 11611 ELS_CMD_LOGO); 11612 if (!elsiocb) 11613 return 1; 11614 11615 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 11616 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 11617 pcmd += sizeof(uint32_t); 11618 11619 /* Fill in LOGO payload */ 11620 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 11621 pcmd += sizeof(uint32_t); 11622 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 11623 11624 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11625 "Issue LOGO npiv did:x%x flg:x%x", 11626 ndlp->nlp_DID, ndlp->nlp_flag, 0); 11627 11628 elsiocb->cmd_cmpl = lpfc_cmpl_els_npiv_logo; 11629 spin_lock_irq(&ndlp->lock); 11630 ndlp->nlp_flag |= NLP_LOGO_SND; 11631 spin_unlock_irq(&ndlp->lock); 11632 elsiocb->ndlp = lpfc_nlp_get(ndlp); 11633 if (!elsiocb->ndlp) { 11634 lpfc_els_free_iocb(phba, elsiocb); 11635 goto err; 11636 } 11637 11638 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 11639 if (rc == IOCB_ERROR) { 11640 lpfc_els_free_iocb(phba, elsiocb); 11641 lpfc_nlp_put(ndlp); 11642 goto err; 11643 } 11644 return 0; 11645 11646 err: 11647 spin_lock_irq(&ndlp->lock); 11648 ndlp->nlp_flag &= ~NLP_LOGO_SND; 11649 spin_unlock_irq(&ndlp->lock); 11650 return 1; 11651 } 11652 11653 /** 11654 * lpfc_fabric_block_timeout - Handler function to the fabric block timer 11655 * @t: timer context used to obtain the lpfc hba. 11656 * 11657 * This routine is invoked by the fabric iocb block timer after 11658 * timeout. It posts the fabric iocb block timeout event by setting the 11659 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes 11660 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for 11661 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the 11662 * posted event WORKER_FABRIC_BLOCK_TMO. 11663 **/ 11664 void 11665 lpfc_fabric_block_timeout(struct timer_list *t) 11666 { 11667 struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer); 11668 unsigned long iflags; 11669 uint32_t tmo_posted; 11670 11671 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 11672 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; 11673 if (!tmo_posted) 11674 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; 11675 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 11676 11677 if (!tmo_posted) 11678 lpfc_worker_wake_up(phba); 11679 return; 11680 } 11681 11682 /** 11683 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list 11684 * @phba: pointer to lpfc hba data structure. 11685 * 11686 * This routine issues one fabric iocb from the driver internal list to 11687 * the HBA. It first checks whether it's ready to issue one fabric iocb to 11688 * the HBA (whether there is no outstanding fabric iocb). If so, it shall 11689 * remove one pending fabric iocb from the driver internal list and invokes 11690 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA. 11691 **/ 11692 static void 11693 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) 11694 { 11695 struct lpfc_iocbq *iocb; 11696 unsigned long iflags; 11697 int ret; 11698 11699 repeat: 11700 iocb = NULL; 11701 spin_lock_irqsave(&phba->hbalock, iflags); 11702 /* Post any pending iocb to the SLI layer */ 11703 if (atomic_read(&phba->fabric_iocb_count) == 0) { 11704 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), 11705 list); 11706 if (iocb) 11707 /* Increment fabric iocb count to hold the position */ 11708 atomic_inc(&phba->fabric_iocb_count); 11709 } 11710 spin_unlock_irqrestore(&phba->hbalock, iflags); 11711 if (iocb) { 11712 iocb->fabric_cmd_cmpl = iocb->cmd_cmpl; 11713 iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb; 11714 iocb->cmd_flag |= LPFC_IO_FABRIC; 11715 11716 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 11717 "Fabric sched1: ste:x%x", 11718 iocb->vport->port_state, 0, 0); 11719 11720 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 11721 11722 if (ret == IOCB_ERROR) { 11723 iocb->cmd_cmpl = iocb->fabric_cmd_cmpl; 11724 iocb->fabric_cmd_cmpl = NULL; 11725 iocb->cmd_flag &= ~LPFC_IO_FABRIC; 11726 set_job_ulpstatus(iocb, IOSTAT_LOCAL_REJECT); 11727 iocb->wcqe_cmpl.parameter = IOERR_SLI_ABORTED; 11728 iocb->cmd_cmpl(phba, iocb, iocb); 11729 11730 atomic_dec(&phba->fabric_iocb_count); 11731 goto repeat; 11732 } 11733 } 11734 } 11735 11736 /** 11737 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command 11738 * @phba: pointer to lpfc hba data structure. 11739 * 11740 * This routine unblocks the issuing fabric iocb command. The function 11741 * will clear the fabric iocb block bit and then invoke the routine 11742 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb 11743 * from the driver internal fabric iocb list. 11744 **/ 11745 void 11746 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) 11747 { 11748 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11749 11750 lpfc_resume_fabric_iocbs(phba); 11751 return; 11752 } 11753 11754 /** 11755 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command 11756 * @phba: pointer to lpfc hba data structure. 11757 * 11758 * This routine blocks the issuing fabric iocb for a specified amount of 11759 * time (currently 100 ms). This is done by set the fabric iocb block bit 11760 * and set up a timeout timer for 100ms. When the block bit is set, no more 11761 * fabric iocb will be issued out of the HBA. 11762 **/ 11763 static void 11764 lpfc_block_fabric_iocbs(struct lpfc_hba *phba) 11765 { 11766 int blocked; 11767 11768 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11769 /* Start a timer to unblock fabric iocbs after 100ms */ 11770 if (!blocked) 11771 mod_timer(&phba->fabric_block_timer, 11772 jiffies + msecs_to_jiffies(100)); 11773 11774 return; 11775 } 11776 11777 /** 11778 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb 11779 * @phba: pointer to lpfc hba data structure. 11780 * @cmdiocb: pointer to lpfc command iocb data structure. 11781 * @rspiocb: pointer to lpfc response iocb data structure. 11782 * 11783 * This routine is the callback function that is put to the fabric iocb's 11784 * callback function pointer (iocb->cmd_cmpl). The original iocb's callback 11785 * function pointer has been stored in iocb->fabric_cmd_cmpl. This callback 11786 * function first restores and invokes the original iocb's callback function 11787 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next 11788 * fabric bound iocb from the driver internal fabric iocb list onto the wire. 11789 **/ 11790 static void 11791 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11792 struct lpfc_iocbq *rspiocb) 11793 { 11794 struct ls_rjt stat; 11795 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 11796 u32 ulp_word4 = get_job_word4(phba, rspiocb); 11797 11798 WARN_ON((cmdiocb->cmd_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC); 11799 11800 switch (ulp_status) { 11801 case IOSTAT_NPORT_RJT: 11802 case IOSTAT_FABRIC_RJT: 11803 if (ulp_word4 & RJT_UNAVAIL_TEMP) 11804 lpfc_block_fabric_iocbs(phba); 11805 break; 11806 11807 case IOSTAT_NPORT_BSY: 11808 case IOSTAT_FABRIC_BSY: 11809 lpfc_block_fabric_iocbs(phba); 11810 break; 11811 11812 case IOSTAT_LS_RJT: 11813 stat.un.ls_rjt_error_be = 11814 cpu_to_be32(ulp_word4); 11815 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) || 11816 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY)) 11817 lpfc_block_fabric_iocbs(phba); 11818 break; 11819 } 11820 11821 BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0); 11822 11823 cmdiocb->cmd_cmpl = cmdiocb->fabric_cmd_cmpl; 11824 cmdiocb->fabric_cmd_cmpl = NULL; 11825 cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC; 11826 cmdiocb->cmd_cmpl(phba, cmdiocb, rspiocb); 11827 11828 atomic_dec(&phba->fabric_iocb_count); 11829 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { 11830 /* Post any pending iocbs to HBA */ 11831 lpfc_resume_fabric_iocbs(phba); 11832 } 11833 } 11834 11835 /** 11836 * lpfc_issue_fabric_iocb - Issue a fabric iocb command 11837 * @phba: pointer to lpfc hba data structure. 11838 * @iocb: pointer to lpfc command iocb data structure. 11839 * 11840 * This routine is used as the top-level API for issuing a fabric iocb command 11841 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver 11842 * function makes sure that only one fabric bound iocb will be outstanding at 11843 * any given time. As such, this function will first check to see whether there 11844 * is already an outstanding fabric iocb on the wire. If so, it will put the 11845 * newly issued iocb onto the driver internal fabric iocb list, waiting to be 11846 * issued later. Otherwise, it will issue the iocb on the wire and update the 11847 * fabric iocb count it indicate that there is one fabric iocb on the wire. 11848 * 11849 * Note, this implementation has a potential sending out fabric IOCBs out of 11850 * order. The problem is caused by the construction of the "ready" boolen does 11851 * not include the condition that the internal fabric IOCB list is empty. As 11852 * such, it is possible a fabric IOCB issued by this routine might be "jump" 11853 * ahead of the fabric IOCBs in the internal list. 11854 * 11855 * Return code 11856 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully 11857 * IOCB_ERROR - failed to issue fabric iocb 11858 **/ 11859 static int 11860 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 11861 { 11862 unsigned long iflags; 11863 int ready; 11864 int ret; 11865 11866 BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1); 11867 11868 spin_lock_irqsave(&phba->hbalock, iflags); 11869 ready = atomic_read(&phba->fabric_iocb_count) == 0 && 11870 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11871 11872 if (ready) 11873 /* Increment fabric iocb count to hold the position */ 11874 atomic_inc(&phba->fabric_iocb_count); 11875 spin_unlock_irqrestore(&phba->hbalock, iflags); 11876 if (ready) { 11877 iocb->fabric_cmd_cmpl = iocb->cmd_cmpl; 11878 iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb; 11879 iocb->cmd_flag |= LPFC_IO_FABRIC; 11880 11881 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 11882 "Fabric sched2: ste:x%x", 11883 iocb->vport->port_state, 0, 0); 11884 11885 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 11886 11887 if (ret == IOCB_ERROR) { 11888 iocb->cmd_cmpl = iocb->fabric_cmd_cmpl; 11889 iocb->fabric_cmd_cmpl = NULL; 11890 iocb->cmd_flag &= ~LPFC_IO_FABRIC; 11891 atomic_dec(&phba->fabric_iocb_count); 11892 } 11893 } else { 11894 spin_lock_irqsave(&phba->hbalock, iflags); 11895 list_add_tail(&iocb->list, &phba->fabric_iocb_list); 11896 spin_unlock_irqrestore(&phba->hbalock, iflags); 11897 ret = IOCB_SUCCESS; 11898 } 11899 return ret; 11900 } 11901 11902 /** 11903 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list 11904 * @vport: pointer to a virtual N_Port data structure. 11905 * 11906 * This routine aborts all the IOCBs associated with a @vport from the 11907 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 11908 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 11909 * list, removes each IOCB associated with the @vport off the list, set the 11910 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 11911 * associated with the IOCB. 11912 **/ 11913 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) 11914 { 11915 LIST_HEAD(completions); 11916 struct lpfc_hba *phba = vport->phba; 11917 struct lpfc_iocbq *tmp_iocb, *piocb; 11918 11919 spin_lock_irq(&phba->hbalock); 11920 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 11921 list) { 11922 11923 if (piocb->vport != vport) 11924 continue; 11925 11926 list_move_tail(&piocb->list, &completions); 11927 } 11928 spin_unlock_irq(&phba->hbalock); 11929 11930 /* Cancel all the IOCBs from the completions list */ 11931 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11932 IOERR_SLI_ABORTED); 11933 } 11934 11935 /** 11936 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list 11937 * @ndlp: pointer to a node-list data structure. 11938 * 11939 * This routine aborts all the IOCBs associated with an @ndlp from the 11940 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 11941 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 11942 * list, removes each IOCB associated with the @ndlp off the list, set the 11943 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 11944 * associated with the IOCB. 11945 **/ 11946 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) 11947 { 11948 LIST_HEAD(completions); 11949 struct lpfc_hba *phba = ndlp->phba; 11950 struct lpfc_iocbq *tmp_iocb, *piocb; 11951 struct lpfc_sli_ring *pring; 11952 11953 pring = lpfc_phba_elsring(phba); 11954 11955 if (unlikely(!pring)) 11956 return; 11957 11958 spin_lock_irq(&phba->hbalock); 11959 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 11960 list) { 11961 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) { 11962 11963 list_move_tail(&piocb->list, &completions); 11964 } 11965 } 11966 spin_unlock_irq(&phba->hbalock); 11967 11968 /* Cancel all the IOCBs from the completions list */ 11969 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11970 IOERR_SLI_ABORTED); 11971 } 11972 11973 /** 11974 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list 11975 * @phba: pointer to lpfc hba data structure. 11976 * 11977 * This routine aborts all the IOCBs currently on the driver internal 11978 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS 11979 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB 11980 * list, removes IOCBs off the list, set the status field to 11981 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with 11982 * the IOCB. 11983 **/ 11984 void lpfc_fabric_abort_hba(struct lpfc_hba *phba) 11985 { 11986 LIST_HEAD(completions); 11987 11988 spin_lock_irq(&phba->hbalock); 11989 list_splice_init(&phba->fabric_iocb_list, &completions); 11990 spin_unlock_irq(&phba->hbalock); 11991 11992 /* Cancel all the IOCBs from the completions list */ 11993 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11994 IOERR_SLI_ABORTED); 11995 } 11996 11997 /** 11998 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport 11999 * @vport: pointer to lpfc vport data structure. 12000 * 12001 * This routine is invoked by the vport cleanup for deletions and the cleanup 12002 * for an ndlp on removal. 12003 **/ 12004 void 12005 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) 12006 { 12007 struct lpfc_hba *phba = vport->phba; 12008 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 12009 struct lpfc_nodelist *ndlp = NULL; 12010 unsigned long iflag = 0; 12011 12012 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 12013 list_for_each_entry_safe(sglq_entry, sglq_next, 12014 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 12015 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) { 12016 lpfc_nlp_put(sglq_entry->ndlp); 12017 ndlp = sglq_entry->ndlp; 12018 sglq_entry->ndlp = NULL; 12019 12020 /* If the xri on the abts_els_sgl list is for the Fport 12021 * node and the vport is unloading, the xri aborted wcqe 12022 * likely isn't coming back. Just release the sgl. 12023 */ 12024 if (test_bit(FC_UNLOADING, &vport->load_flag) && 12025 ndlp->nlp_DID == Fabric_DID) { 12026 list_del(&sglq_entry->list); 12027 sglq_entry->state = SGL_FREED; 12028 list_add_tail(&sglq_entry->list, 12029 &phba->sli4_hba.lpfc_els_sgl_list); 12030 } 12031 } 12032 } 12033 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 12034 return; 12035 } 12036 12037 /** 12038 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort 12039 * @phba: pointer to lpfc hba data structure. 12040 * @axri: pointer to the els xri abort wcqe structure. 12041 * 12042 * This routine is invoked by the worker thread to process a SLI4 slow-path 12043 * ELS aborted xri. 12044 **/ 12045 void 12046 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, 12047 struct sli4_wcqe_xri_aborted *axri) 12048 { 12049 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 12050 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 12051 uint16_t lxri = 0; 12052 12053 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 12054 unsigned long iflag = 0; 12055 struct lpfc_nodelist *ndlp; 12056 struct lpfc_sli_ring *pring; 12057 12058 pring = lpfc_phba_elsring(phba); 12059 12060 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 12061 list_for_each_entry_safe(sglq_entry, sglq_next, 12062 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 12063 if (sglq_entry->sli4_xritag == xri) { 12064 list_del(&sglq_entry->list); 12065 ndlp = sglq_entry->ndlp; 12066 sglq_entry->ndlp = NULL; 12067 list_add_tail(&sglq_entry->list, 12068 &phba->sli4_hba.lpfc_els_sgl_list); 12069 sglq_entry->state = SGL_FREED; 12070 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, 12071 iflag); 12072 12073 if (ndlp) { 12074 lpfc_set_rrq_active(phba, ndlp, 12075 sglq_entry->sli4_lxritag, 12076 rxid, 1); 12077 lpfc_nlp_put(ndlp); 12078 } 12079 12080 /* Check if TXQ queue needs to be serviced */ 12081 if (pring && !list_empty(&pring->txq)) 12082 lpfc_worker_wake_up(phba); 12083 return; 12084 } 12085 } 12086 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 12087 lxri = lpfc_sli4_xri_inrange(phba, xri); 12088 if (lxri == NO_XRI) 12089 return; 12090 12091 spin_lock_irqsave(&phba->hbalock, iflag); 12092 sglq_entry = __lpfc_get_active_sglq(phba, lxri); 12093 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { 12094 spin_unlock_irqrestore(&phba->hbalock, iflag); 12095 return; 12096 } 12097 sglq_entry->state = SGL_XRI_ABORTED; 12098 spin_unlock_irqrestore(&phba->hbalock, iflag); 12099 return; 12100 } 12101 12102 /* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req. 12103 * @vport: pointer to virtual port object. 12104 * @ndlp: nodelist pointer for the impacted node. 12105 * 12106 * The driver calls this routine in response to an SLI4 XRI ABORT CQE 12107 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event, 12108 * the driver is required to send a LOGO to the remote node before it 12109 * attempts to recover its login to the remote node. 12110 */ 12111 void 12112 lpfc_sli_abts_recover_port(struct lpfc_vport *vport, 12113 struct lpfc_nodelist *ndlp) 12114 { 12115 struct Scsi_Host *shost; 12116 struct lpfc_hba *phba; 12117 unsigned long flags = 0; 12118 12119 shost = lpfc_shost_from_vport(vport); 12120 phba = vport->phba; 12121 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { 12122 lpfc_printf_log(phba, KERN_INFO, 12123 LOG_SLI, "3093 No rport recovery needed. " 12124 "rport in state 0x%x\n", ndlp->nlp_state); 12125 return; 12126 } 12127 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12128 "3094 Start rport recovery on shost id 0x%x " 12129 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " 12130 "flags 0x%x\n", 12131 shost->host_no, ndlp->nlp_DID, 12132 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, 12133 ndlp->nlp_flag); 12134 /* 12135 * The rport is not responding. Remove the FCP-2 flag to prevent 12136 * an ADISC in the follow-up recovery code. 12137 */ 12138 spin_lock_irqsave(&ndlp->lock, flags); 12139 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 12140 ndlp->nlp_flag |= NLP_ISSUE_LOGO; 12141 spin_unlock_irqrestore(&ndlp->lock, flags); 12142 lpfc_unreg_rpi(vport, ndlp); 12143 } 12144 12145 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport) 12146 { 12147 bitmap_zero(vport->vmid_priority_range, LPFC_VMID_MAX_PRIORITY_RANGE); 12148 } 12149 12150 static void 12151 lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max) 12152 { 12153 u32 i; 12154 12155 if ((min > max) || (max > LPFC_VMID_MAX_PRIORITY_RANGE)) 12156 return; 12157 12158 for (i = min; i <= max; i++) 12159 set_bit(i, vport->vmid_priority_range); 12160 } 12161 12162 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid) 12163 { 12164 set_bit(ctcl_vmid, vport->vmid_priority_range); 12165 } 12166 12167 u32 lpfc_vmid_get_cs_ctl(struct lpfc_vport *vport) 12168 { 12169 u32 i; 12170 12171 i = find_first_bit(vport->vmid_priority_range, 12172 LPFC_VMID_MAX_PRIORITY_RANGE); 12173 12174 if (i == LPFC_VMID_MAX_PRIORITY_RANGE) 12175 return 0; 12176 12177 clear_bit(i, vport->vmid_priority_range); 12178 return i; 12179 } 12180 12181 #define MAX_PRIORITY_DESC 255 12182 12183 static void 12184 lpfc_cmpl_els_qfpa(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 12185 struct lpfc_iocbq *rspiocb) 12186 { 12187 struct lpfc_vport *vport = cmdiocb->vport; 12188 struct priority_range_desc *desc; 12189 struct lpfc_dmabuf *prsp = NULL; 12190 struct lpfc_vmid_priority_range *vmid_range = NULL; 12191 u32 *data; 12192 struct lpfc_dmabuf *dmabuf = cmdiocb->cmd_dmabuf; 12193 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 12194 u32 ulp_word4 = get_job_word4(phba, rspiocb); 12195 u8 *pcmd, max_desc; 12196 u32 len, i; 12197 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 12198 12199 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); 12200 if (!prsp) 12201 goto out; 12202 12203 pcmd = prsp->virt; 12204 data = (u32 *)pcmd; 12205 if (data[0] == ELS_CMD_LS_RJT) { 12206 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12207 "3277 QFPA LS_RJT x%x x%x\n", 12208 data[0], data[1]); 12209 goto out; 12210 } 12211 if (ulp_status) { 12212 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 12213 "6529 QFPA failed with status x%x x%x\n", 12214 ulp_status, ulp_word4); 12215 goto out; 12216 } 12217 12218 if (!vport->qfpa_res) { 12219 max_desc = FCELSSIZE / sizeof(*vport->qfpa_res); 12220 vport->qfpa_res = kcalloc(max_desc, sizeof(*vport->qfpa_res), 12221 GFP_KERNEL); 12222 if (!vport->qfpa_res) 12223 goto out; 12224 } 12225 12226 len = *((u32 *)(pcmd + 4)); 12227 len = be32_to_cpu(len); 12228 memcpy(vport->qfpa_res, pcmd, len + 8); 12229 len = len / LPFC_PRIORITY_RANGE_DESC_SIZE; 12230 12231 desc = (struct priority_range_desc *)(pcmd + 8); 12232 vmid_range = vport->vmid_priority.vmid_range; 12233 if (!vmid_range) { 12234 vmid_range = kcalloc(MAX_PRIORITY_DESC, sizeof(*vmid_range), 12235 GFP_KERNEL); 12236 if (!vmid_range) { 12237 kfree(vport->qfpa_res); 12238 goto out; 12239 } 12240 vport->vmid_priority.vmid_range = vmid_range; 12241 } 12242 vport->vmid_priority.num_descriptors = len; 12243 12244 for (i = 0; i < len; i++, vmid_range++, desc++) { 12245 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, 12246 "6539 vmid values low=%d, high=%d, qos=%d, " 12247 "local ve id=%d\n", desc->lo_range, 12248 desc->hi_range, desc->qos_priority, 12249 desc->local_ve_id); 12250 12251 vmid_range->low = desc->lo_range << 1; 12252 if (desc->local_ve_id == QFPA_ODD_ONLY) 12253 vmid_range->low++; 12254 if (desc->qos_priority) 12255 vport->vmid_flag |= LPFC_VMID_QOS_ENABLED; 12256 vmid_range->qos = desc->qos_priority; 12257 12258 vmid_range->high = desc->hi_range << 1; 12259 if ((desc->local_ve_id == QFPA_ODD_ONLY) || 12260 (desc->local_ve_id == QFPA_EVEN_ODD)) 12261 vmid_range->high++; 12262 } 12263 lpfc_init_cs_ctl_bitmap(vport); 12264 for (i = 0; i < vport->vmid_priority.num_descriptors; i++) { 12265 lpfc_vmid_set_cs_ctl_range(vport, 12266 vport->vmid_priority.vmid_range[i].low, 12267 vport->vmid_priority.vmid_range[i].high); 12268 } 12269 12270 vport->vmid_flag |= LPFC_VMID_QFPA_CMPL; 12271 out: 12272 lpfc_els_free_iocb(phba, cmdiocb); 12273 lpfc_nlp_put(ndlp); 12274 } 12275 12276 int lpfc_issue_els_qfpa(struct lpfc_vport *vport) 12277 { 12278 struct lpfc_hba *phba = vport->phba; 12279 struct lpfc_nodelist *ndlp; 12280 struct lpfc_iocbq *elsiocb; 12281 u8 *pcmd; 12282 int ret; 12283 12284 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 12285 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12286 return -ENXIO; 12287 12288 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_QFPA_SIZE, 2, ndlp, 12289 ndlp->nlp_DID, ELS_CMD_QFPA); 12290 if (!elsiocb) 12291 return -ENOMEM; 12292 12293 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 12294 12295 *((u32 *)(pcmd)) = ELS_CMD_QFPA; 12296 pcmd += 4; 12297 12298 elsiocb->cmd_cmpl = lpfc_cmpl_els_qfpa; 12299 12300 elsiocb->ndlp = lpfc_nlp_get(ndlp); 12301 if (!elsiocb->ndlp) { 12302 lpfc_els_free_iocb(vport->phba, elsiocb); 12303 return -ENXIO; 12304 } 12305 12306 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 2); 12307 if (ret != IOCB_SUCCESS) { 12308 lpfc_els_free_iocb(phba, elsiocb); 12309 lpfc_nlp_put(ndlp); 12310 return -EIO; 12311 } 12312 vport->vmid_flag &= ~LPFC_VMID_QOS_ENABLED; 12313 return 0; 12314 } 12315 12316 int 12317 lpfc_vmid_uvem(struct lpfc_vport *vport, 12318 struct lpfc_vmid *vmid, bool instantiated) 12319 { 12320 struct lpfc_vem_id_desc *vem_id_desc; 12321 struct lpfc_nodelist *ndlp; 12322 struct lpfc_iocbq *elsiocb; 12323 struct instantiated_ve_desc *inst_desc; 12324 struct lpfc_vmid_context *vmid_context; 12325 u8 *pcmd; 12326 u32 *len; 12327 int ret = 0; 12328 12329 ndlp = lpfc_findnode_did(vport, Fabric_DID); 12330 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12331 return -ENXIO; 12332 12333 vmid_context = kmalloc(sizeof(*vmid_context), GFP_KERNEL); 12334 if (!vmid_context) 12335 return -ENOMEM; 12336 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_UVEM_SIZE, 2, 12337 ndlp, Fabric_DID, ELS_CMD_UVEM); 12338 if (!elsiocb) 12339 goto out; 12340 12341 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, 12342 "3427 Host vmid %s %d\n", 12343 vmid->host_vmid, instantiated); 12344 vmid_context->vmp = vmid; 12345 vmid_context->nlp = ndlp; 12346 vmid_context->instantiated = instantiated; 12347 elsiocb->vmid_tag.vmid_context = vmid_context; 12348 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 12349 12350 if (!memchr_inv(vport->lpfc_vmid_host_uuid, 0, 12351 sizeof(vport->lpfc_vmid_host_uuid))) 12352 memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid, 12353 sizeof(vport->lpfc_vmid_host_uuid)); 12354 12355 *((u32 *)(pcmd)) = ELS_CMD_UVEM; 12356 len = (u32 *)(pcmd + 4); 12357 *len = cpu_to_be32(LPFC_UVEM_SIZE - 8); 12358 12359 vem_id_desc = (struct lpfc_vem_id_desc *)(pcmd + 8); 12360 vem_id_desc->tag = be32_to_cpu(VEM_ID_DESC_TAG); 12361 vem_id_desc->length = be32_to_cpu(LPFC_UVEM_VEM_ID_DESC_SIZE); 12362 memcpy(vem_id_desc->vem_id, vport->lpfc_vmid_host_uuid, 12363 sizeof(vem_id_desc->vem_id)); 12364 12365 inst_desc = (struct instantiated_ve_desc *)(pcmd + 32); 12366 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); 12367 inst_desc->length = be32_to_cpu(LPFC_UVEM_VE_MAP_DESC_SIZE); 12368 memcpy(inst_desc->global_vem_id, vmid->host_vmid, 12369 sizeof(inst_desc->global_vem_id)); 12370 12371 bf_set(lpfc_instantiated_nport_id, inst_desc, vport->fc_myDID); 12372 bf_set(lpfc_instantiated_local_id, inst_desc, 12373 vmid->un.cs_ctl_vmid); 12374 if (instantiated) { 12375 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); 12376 } else { 12377 inst_desc->tag = be32_to_cpu(DEINSTANTIATED_VE_DESC_TAG); 12378 lpfc_vmid_put_cs_ctl(vport, vmid->un.cs_ctl_vmid); 12379 } 12380 inst_desc->word6 = cpu_to_be32(inst_desc->word6); 12381 12382 elsiocb->cmd_cmpl = lpfc_cmpl_els_uvem; 12383 12384 elsiocb->ndlp = lpfc_nlp_get(ndlp); 12385 if (!elsiocb->ndlp) { 12386 lpfc_els_free_iocb(vport->phba, elsiocb); 12387 goto out; 12388 } 12389 12390 ret = lpfc_sli_issue_iocb(vport->phba, LPFC_ELS_RING, elsiocb, 0); 12391 if (ret != IOCB_SUCCESS) { 12392 lpfc_els_free_iocb(vport->phba, elsiocb); 12393 lpfc_nlp_put(ndlp); 12394 goto out; 12395 } 12396 12397 return 0; 12398 out: 12399 kfree(vmid_context); 12400 return -EIO; 12401 } 12402 12403 static void 12404 lpfc_cmpl_els_uvem(struct lpfc_hba *phba, struct lpfc_iocbq *icmdiocb, 12405 struct lpfc_iocbq *rspiocb) 12406 { 12407 struct lpfc_vport *vport = icmdiocb->vport; 12408 struct lpfc_dmabuf *prsp = NULL; 12409 struct lpfc_vmid_context *vmid_context = 12410 icmdiocb->vmid_tag.vmid_context; 12411 struct lpfc_nodelist *ndlp = icmdiocb->ndlp; 12412 u8 *pcmd; 12413 u32 *data; 12414 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 12415 u32 ulp_word4 = get_job_word4(phba, rspiocb); 12416 struct lpfc_dmabuf *dmabuf = icmdiocb->cmd_dmabuf; 12417 struct lpfc_vmid *vmid; 12418 12419 vmid = vmid_context->vmp; 12420 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12421 ndlp = NULL; 12422 12423 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); 12424 if (!prsp) 12425 goto out; 12426 pcmd = prsp->virt; 12427 data = (u32 *)pcmd; 12428 if (data[0] == ELS_CMD_LS_RJT) { 12429 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12430 "4532 UVEM LS_RJT %x %x\n", data[0], data[1]); 12431 goto out; 12432 } 12433 if (ulp_status) { 12434 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12435 "4533 UVEM error status %x: %x\n", 12436 ulp_status, ulp_word4); 12437 goto out; 12438 } 12439 spin_lock(&phba->hbalock); 12440 /* Set IN USE flag */ 12441 vport->vmid_flag |= LPFC_VMID_IN_USE; 12442 phba->pport->vmid_flag |= LPFC_VMID_IN_USE; 12443 spin_unlock(&phba->hbalock); 12444 12445 if (vmid_context->instantiated) { 12446 write_lock(&vport->vmid_lock); 12447 vmid->flag |= LPFC_VMID_REGISTERED; 12448 vmid->flag &= ~LPFC_VMID_REQ_REGISTER; 12449 write_unlock(&vport->vmid_lock); 12450 } 12451 12452 out: 12453 kfree(vmid_context); 12454 lpfc_els_free_iocb(phba, icmdiocb); 12455 lpfc_nlp_put(ndlp); 12456 } 12457