1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 /* See Fibre Channel protocol T11 FC-LS for details */ 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/slab.h> 27 #include <linux/interrupt.h> 28 #include <linux/delay.h> 29 30 #include <scsi/scsi.h> 31 #include <scsi/scsi_device.h> 32 #include <scsi/scsi_host.h> 33 #include <scsi/scsi_transport_fc.h> 34 #include <uapi/scsi/fc/fc_fs.h> 35 #include <uapi/scsi/fc/fc_els.h> 36 37 #include "lpfc_hw4.h" 38 #include "lpfc_hw.h" 39 #include "lpfc_sli.h" 40 #include "lpfc_sli4.h" 41 #include "lpfc_nl.h" 42 #include "lpfc_disc.h" 43 #include "lpfc_scsi.h" 44 #include "lpfc.h" 45 #include "lpfc_logmsg.h" 46 #include "lpfc_crtn.h" 47 #include "lpfc_vport.h" 48 #include "lpfc_debugfs.h" 49 50 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, 51 struct lpfc_iocbq *); 52 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, 53 struct lpfc_iocbq *); 54 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport); 55 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, 56 struct lpfc_nodelist *ndlp, uint8_t retry); 57 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, 58 struct lpfc_iocbq *iocb); 59 static void lpfc_cmpl_els_edc(struct lpfc_hba *phba, 60 struct lpfc_iocbq *cmdiocb, 61 struct lpfc_iocbq *rspiocb); 62 static void lpfc_cmpl_els_uvem(struct lpfc_hba *, struct lpfc_iocbq *, 63 struct lpfc_iocbq *); 64 65 static int lpfc_max_els_tries = 3; 66 67 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport); 68 static void lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max); 69 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid); 70 71 /** 72 * lpfc_els_chk_latt - Check host link attention event for a vport 73 * @vport: pointer to a host virtual N_Port data structure. 74 * 75 * This routine checks whether there is an outstanding host link 76 * attention event during the discovery process with the @vport. It is done 77 * by reading the HBA's Host Attention (HA) register. If there is any host 78 * link attention events during this @vport's discovery process, the @vport 79 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall 80 * be issued if the link state is not already in host link cleared state, 81 * and a return code shall indicate whether the host link attention event 82 * had happened. 83 * 84 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport 85 * state in LPFC_VPORT_READY, the request for checking host link attention 86 * event will be ignored and a return code shall indicate no host link 87 * attention event had happened. 88 * 89 * Return codes 90 * 0 - no host link attention event happened 91 * 1 - host link attention event happened 92 **/ 93 int 94 lpfc_els_chk_latt(struct lpfc_vport *vport) 95 { 96 struct lpfc_hba *phba = vport->phba; 97 uint32_t ha_copy; 98 99 if (vport->port_state >= LPFC_VPORT_READY || 100 phba->link_state == LPFC_LINK_DOWN || 101 phba->sli_rev > LPFC_SLI_REV3) 102 return 0; 103 104 /* Read the HBA Host Attention Register */ 105 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 106 return 1; 107 108 if (!(ha_copy & HA_LATT)) 109 return 0; 110 111 /* Pending Link Event during Discovery */ 112 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 113 "0237 Pending Link Event during " 114 "Discovery: State x%x\n", 115 phba->pport->port_state); 116 117 /* CLEAR_LA should re-enable link attention events and 118 * we should then immediately take a LATT event. The 119 * LATT processing should call lpfc_linkdown() which 120 * will cleanup any left over in-progress discovery 121 * events. 122 */ 123 set_bit(FC_ABORT_DISCOVERY, &vport->fc_flag); 124 125 if (phba->link_state != LPFC_CLEAR_LA) 126 lpfc_issue_clear_la(phba, vport); 127 128 return 1; 129 } 130 131 static bool lpfc_is_els_acc_rsp(struct lpfc_dmabuf *buf) 132 { 133 struct fc_els_ls_acc *rsp = buf->virt; 134 135 if (rsp && rsp->la_cmd == ELS_LS_ACC) 136 return true; 137 return false; 138 } 139 140 /** 141 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure 142 * @vport: pointer to a host virtual N_Port data structure. 143 * @expect_rsp: flag indicating whether response is expected. 144 * @cmd_size: size of the ELS command. 145 * @retry: number of retries to the command when it fails. 146 * @ndlp: pointer to a node-list data structure. 147 * @did: destination identifier. 148 * @elscmd: the ELS command code. 149 * 150 * This routine is used for allocating a lpfc-IOCB data structure from 151 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters 152 * passed into the routine for discovery state machine to issue an Extended 153 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation 154 * and preparation routine that is used by all the discovery state machine 155 * routines and the ELS command-specific fields will be later set up by 156 * the individual discovery machine routines after calling this routine 157 * allocating and preparing a generic IOCB data structure. It fills in the 158 * Buffer Descriptor Entries (BDEs), allocates buffers for both command 159 * payload and response payload (if expected). The reference count on the 160 * ndlp is incremented by 1 and the reference to the ndlp is put into 161 * ndlp of the IOCB data structure for this IOCB to hold the ndlp 162 * reference for the command's callback function to access later. 163 * 164 * Return code 165 * Pointer to the newly allocated/prepared els iocb data structure 166 * NULL - when els iocb data structure allocation/preparation failed 167 **/ 168 struct lpfc_iocbq * 169 lpfc_prep_els_iocb(struct lpfc_vport *vport, u8 expect_rsp, 170 u16 cmd_size, u8 retry, 171 struct lpfc_nodelist *ndlp, u32 did, 172 u32 elscmd) 173 { 174 struct lpfc_hba *phba = vport->phba; 175 struct lpfc_iocbq *elsiocb; 176 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist, *bmp; 177 struct ulp_bde64_le *bpl; 178 u32 timeout = 0; 179 180 if (!lpfc_is_link_up(phba)) 181 return NULL; 182 183 /* Allocate buffer for command iocb */ 184 elsiocb = lpfc_sli_get_iocbq(phba); 185 if (!elsiocb) 186 return NULL; 187 188 /* 189 * If this command is for fabric controller and HBA running 190 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. 191 */ 192 if ((did == Fabric_DID) && 193 (phba->hba_flag & HBA_FIP_SUPPORT) && 194 ((elscmd == ELS_CMD_FLOGI) || 195 (elscmd == ELS_CMD_FDISC) || 196 (elscmd == ELS_CMD_LOGO))) 197 switch (elscmd) { 198 case ELS_CMD_FLOGI: 199 elsiocb->cmd_flag |= 200 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) 201 & LPFC_FIP_ELS_ID_MASK); 202 break; 203 case ELS_CMD_FDISC: 204 elsiocb->cmd_flag |= 205 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) 206 & LPFC_FIP_ELS_ID_MASK); 207 break; 208 case ELS_CMD_LOGO: 209 elsiocb->cmd_flag |= 210 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) 211 & LPFC_FIP_ELS_ID_MASK); 212 break; 213 } 214 else 215 elsiocb->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK; 216 217 /* fill in BDEs for command */ 218 /* Allocate buffer for command payload */ 219 pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL); 220 if (pcmd) 221 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); 222 if (!pcmd || !pcmd->virt) 223 goto els_iocb_free_pcmb_exit; 224 225 INIT_LIST_HEAD(&pcmd->list); 226 227 /* Allocate buffer for response payload */ 228 if (expect_rsp) { 229 prsp = kmalloc(sizeof(*prsp), GFP_KERNEL); 230 if (prsp) 231 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 232 &prsp->phys); 233 if (!prsp || !prsp->virt) 234 goto els_iocb_free_prsp_exit; 235 INIT_LIST_HEAD(&prsp->list); 236 } else { 237 prsp = NULL; 238 } 239 240 /* Allocate buffer for Buffer ptr list */ 241 pbuflist = kmalloc(sizeof(*pbuflist), GFP_KERNEL); 242 if (pbuflist) 243 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 244 &pbuflist->phys); 245 if (!pbuflist || !pbuflist->virt) 246 goto els_iocb_free_pbuf_exit; 247 248 INIT_LIST_HEAD(&pbuflist->list); 249 250 if (expect_rsp) { 251 switch (elscmd) { 252 case ELS_CMD_FLOGI: 253 timeout = FF_DEF_RATOV * 2; 254 break; 255 case ELS_CMD_LOGO: 256 timeout = phba->fc_ratov; 257 break; 258 default: 259 timeout = phba->fc_ratov * 2; 260 } 261 262 /* Fill SGE for the num bde count */ 263 elsiocb->num_bdes = 2; 264 } 265 266 if (phba->sli_rev == LPFC_SLI_REV4) 267 bmp = pcmd; 268 else 269 bmp = pbuflist; 270 271 lpfc_sli_prep_els_req_rsp(phba, elsiocb, vport, bmp, cmd_size, did, 272 elscmd, timeout, expect_rsp); 273 274 bpl = (struct ulp_bde64_le *)pbuflist->virt; 275 bpl->addr_low = cpu_to_le32(putPaddrLow(pcmd->phys)); 276 bpl->addr_high = cpu_to_le32(putPaddrHigh(pcmd->phys)); 277 bpl->type_size = cpu_to_le32(cmd_size); 278 bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); 279 280 if (expect_rsp) { 281 bpl++; 282 bpl->addr_low = cpu_to_le32(putPaddrLow(prsp->phys)); 283 bpl->addr_high = cpu_to_le32(putPaddrHigh(prsp->phys)); 284 bpl->type_size = cpu_to_le32(FCELSSIZE); 285 bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); 286 } 287 288 elsiocb->cmd_dmabuf = pcmd; 289 elsiocb->bpl_dmabuf = pbuflist; 290 elsiocb->retry = retry; 291 elsiocb->vport = vport; 292 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; 293 294 if (prsp) 295 list_add(&prsp->list, &pcmd->list); 296 if (expect_rsp) { 297 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 298 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 299 "0116 Xmit ELS command x%x to remote " 300 "NPORT x%x I/O tag: x%x, port state:x%x " 301 "rpi x%x fc_flag:x%lx\n", 302 elscmd, did, elsiocb->iotag, 303 vport->port_state, ndlp->nlp_rpi, 304 vport->fc_flag); 305 } else { 306 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 307 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 308 "0117 Xmit ELS response x%x to remote " 309 "NPORT x%x I/O tag: x%x, size: x%x " 310 "port_state x%x rpi x%x fc_flag x%lx\n", 311 elscmd, ndlp->nlp_DID, elsiocb->iotag, 312 cmd_size, vport->port_state, 313 ndlp->nlp_rpi, vport->fc_flag); 314 } 315 316 return elsiocb; 317 318 els_iocb_free_pbuf_exit: 319 if (expect_rsp) 320 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 321 kfree(pbuflist); 322 323 els_iocb_free_prsp_exit: 324 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 325 kfree(prsp); 326 327 els_iocb_free_pcmb_exit: 328 kfree(pcmd); 329 lpfc_sli_release_iocbq(phba, elsiocb); 330 return NULL; 331 } 332 333 /** 334 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport 335 * @vport: pointer to a host virtual N_Port data structure. 336 * 337 * This routine issues a fabric registration login for a @vport. An 338 * active ndlp node with Fabric_DID must already exist for this @vport. 339 * The routine invokes two mailbox commands to carry out fabric registration 340 * login through the HBA firmware: the first mailbox command requests the 341 * HBA to perform link configuration for the @vport; and the second mailbox 342 * command requests the HBA to perform the actual fabric registration login 343 * with the @vport. 344 * 345 * Return code 346 * 0 - successfully issued fabric registration login for @vport 347 * -ENXIO -- failed to issue fabric registration login for @vport 348 **/ 349 int 350 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 351 { 352 struct lpfc_hba *phba = vport->phba; 353 LPFC_MBOXQ_t *mbox; 354 struct lpfc_nodelist *ndlp; 355 struct serv_parm *sp; 356 int rc; 357 int err = 0; 358 359 sp = &phba->fc_fabparam; 360 ndlp = lpfc_findnode_did(vport, Fabric_DID); 361 if (!ndlp) { 362 err = 1; 363 goto fail; 364 } 365 366 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 367 if (!mbox) { 368 err = 2; 369 goto fail; 370 } 371 372 vport->port_state = LPFC_FABRIC_CFG_LINK; 373 lpfc_config_link(phba, mbox); 374 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 375 mbox->vport = vport; 376 377 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 378 if (rc == MBX_NOT_FINISHED) { 379 err = 3; 380 goto fail_free_mbox; 381 } 382 383 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 384 if (!mbox) { 385 err = 4; 386 goto fail; 387 } 388 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 389 ndlp->nlp_rpi); 390 if (rc) { 391 err = 5; 392 goto fail_free_mbox; 393 } 394 395 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 396 mbox->vport = vport; 397 /* increment the reference count on ndlp to hold reference 398 * for the callback routine. 399 */ 400 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 401 if (!mbox->ctx_ndlp) { 402 err = 6; 403 goto fail_free_mbox; 404 } 405 406 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 407 if (rc == MBX_NOT_FINISHED) { 408 err = 7; 409 goto fail_issue_reg_login; 410 } 411 412 return 0; 413 414 fail_issue_reg_login: 415 /* decrement the reference count on ndlp just incremented 416 * for the failed mbox command. 417 */ 418 lpfc_nlp_put(ndlp); 419 fail_free_mbox: 420 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 421 fail: 422 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 423 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 424 "0249 Cannot issue Register Fabric login: Err %d\n", 425 err); 426 return -ENXIO; 427 } 428 429 /** 430 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login 431 * @vport: pointer to a host virtual N_Port data structure. 432 * 433 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for 434 * the @vport. This mailbox command is necessary for SLI4 port only. 435 * 436 * Return code 437 * 0 - successfully issued REG_VFI for @vport 438 * A failure code otherwise. 439 **/ 440 int 441 lpfc_issue_reg_vfi(struct lpfc_vport *vport) 442 { 443 struct lpfc_hba *phba = vport->phba; 444 LPFC_MBOXQ_t *mboxq = NULL; 445 struct lpfc_nodelist *ndlp; 446 struct lpfc_dmabuf *dmabuf = NULL; 447 int rc = 0; 448 449 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */ 450 if ((phba->sli_rev == LPFC_SLI_REV4) && 451 !(phba->link_flag & LS_LOOPBACK_MODE) && 452 !test_bit(FC_PT2PT, &vport->fc_flag)) { 453 ndlp = lpfc_findnode_did(vport, Fabric_DID); 454 if (!ndlp) { 455 rc = -ENODEV; 456 goto fail; 457 } 458 } 459 460 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 461 if (!mboxq) { 462 rc = -ENOMEM; 463 goto fail; 464 } 465 466 /* Supply CSP's only if we are fabric connect or pt-to-pt connect */ 467 if (test_bit(FC_FABRIC, &vport->fc_flag) || 468 test_bit(FC_PT2PT, &vport->fc_flag)) { 469 rc = lpfc_mbox_rsrc_prep(phba, mboxq); 470 if (rc) { 471 rc = -ENOMEM; 472 goto fail_mbox; 473 } 474 dmabuf = mboxq->ctx_buf; 475 memcpy(dmabuf->virt, &phba->fc_fabparam, 476 sizeof(struct serv_parm)); 477 } 478 479 vport->port_state = LPFC_FABRIC_CFG_LINK; 480 if (dmabuf) { 481 lpfc_reg_vfi(mboxq, vport, dmabuf->phys); 482 /* lpfc_reg_vfi memsets the mailbox. Restore the ctx_buf. */ 483 mboxq->ctx_buf = dmabuf; 484 } else { 485 lpfc_reg_vfi(mboxq, vport, 0); 486 } 487 488 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; 489 mboxq->vport = vport; 490 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 491 if (rc == MBX_NOT_FINISHED) { 492 rc = -ENXIO; 493 goto fail_mbox; 494 } 495 return 0; 496 497 fail_mbox: 498 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED); 499 fail: 500 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 501 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 502 "0289 Issue Register VFI failed: Err %d\n", rc); 503 return rc; 504 } 505 506 /** 507 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login 508 * @vport: pointer to a host virtual N_Port data structure. 509 * 510 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for 511 * the @vport. This mailbox command is necessary for SLI4 port only. 512 * 513 * Return code 514 * 0 - successfully issued REG_VFI for @vport 515 * A failure code otherwise. 516 **/ 517 int 518 lpfc_issue_unreg_vfi(struct lpfc_vport *vport) 519 { 520 struct lpfc_hba *phba = vport->phba; 521 LPFC_MBOXQ_t *mboxq; 522 int rc; 523 524 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 525 if (!mboxq) { 526 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 527 "2556 UNREG_VFI mbox allocation failed" 528 "HBA state x%x\n", phba->pport->port_state); 529 return -ENOMEM; 530 } 531 532 lpfc_unreg_vfi(mboxq, vport); 533 mboxq->vport = vport; 534 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl; 535 536 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 537 if (rc == MBX_NOT_FINISHED) { 538 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 539 "2557 UNREG_VFI issue mbox failed rc x%x " 540 "HBA state x%x\n", 541 rc, phba->pport->port_state); 542 mempool_free(mboxq, phba->mbox_mem_pool); 543 return -EIO; 544 } 545 546 clear_bit(FC_VFI_REGISTERED, &vport->fc_flag); 547 return 0; 548 } 549 550 /** 551 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. 552 * @vport: pointer to a host virtual N_Port data structure. 553 * @sp: pointer to service parameter data structure. 554 * 555 * This routine is called from FLOGI/FDISC completion handler functions. 556 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric 557 * node nodename is changed in the completion service parameter else return 558 * 0. This function also set flag in the vport data structure to delay 559 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit 560 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric 561 * node nodename is changed in the completion service parameter. 562 * 563 * Return code 564 * 0 - FCID and Fabric Nodename and Fabric portname is not changed. 565 * 1 - FCID or Fabric Nodename or Fabric portname is changed. 566 * 567 **/ 568 static uint8_t 569 lpfc_check_clean_addr_bit(struct lpfc_vport *vport, 570 struct serv_parm *sp) 571 { 572 struct lpfc_hba *phba = vport->phba; 573 uint8_t fabric_param_changed = 0; 574 575 if ((vport->fc_prevDID != vport->fc_myDID) || 576 memcmp(&vport->fabric_portname, &sp->portName, 577 sizeof(struct lpfc_name)) || 578 memcmp(&vport->fabric_nodename, &sp->nodeName, 579 sizeof(struct lpfc_name)) || 580 (vport->vport_flag & FAWWPN_PARAM_CHG)) { 581 fabric_param_changed = 1; 582 vport->vport_flag &= ~FAWWPN_PARAM_CHG; 583 } 584 /* 585 * Word 1 Bit 31 in common service parameter is overloaded. 586 * Word 1 Bit 31 in FLOGI request is multiple NPort request 587 * Word 1 Bit 31 in FLOGI response is clean address bit 588 * 589 * If fabric parameter is changed and clean address bit is 590 * cleared delay nport discovery if 591 * - vport->fc_prevDID != 0 (not initial discovery) OR 592 * - lpfc_delay_discovery module parameter is set. 593 */ 594 if (fabric_param_changed && !sp->cmn.clean_address_bit && 595 (vport->fc_prevDID || phba->cfg_delay_discovery)) 596 set_bit(FC_DISC_DELAYED, &vport->fc_flag); 597 598 return fabric_param_changed; 599 } 600 601 602 /** 603 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 604 * @vport: pointer to a host virtual N_Port data structure. 605 * @ndlp: pointer to a node-list data structure. 606 * @sp: pointer to service parameter data structure. 607 * @ulp_word4: command response value 608 * 609 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 610 * function to handle the completion of a Fabric Login (FLOGI) into a fabric 611 * port in a fabric topology. It properly sets up the parameters to the @ndlp 612 * from the IOCB response. It also check the newly assigned N_Port ID to the 613 * @vport against the previously assigned N_Port ID. If it is different from 614 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine 615 * is invoked on all the remaining nodes with the @vport to unregister the 616 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin() 617 * is invoked to register login to the fabric. 618 * 619 * Return code 620 * 0 - Success (currently, always return 0) 621 **/ 622 static int 623 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 624 struct serv_parm *sp, uint32_t ulp_word4) 625 { 626 struct lpfc_hba *phba = vport->phba; 627 struct lpfc_nodelist *np; 628 struct lpfc_nodelist *next_np; 629 uint8_t fabric_param_changed; 630 631 set_bit(FC_FABRIC, &vport->fc_flag); 632 633 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); 634 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 635 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; 636 637 phba->fc_edtovResol = sp->cmn.edtovResolution; 638 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; 639 640 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) 641 set_bit(FC_PUBLIC_LOOP, &vport->fc_flag); 642 643 vport->fc_myDID = ulp_word4 & Mask_DID; 644 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); 645 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); 646 ndlp->nlp_class_sup = 0; 647 if (sp->cls1.classValid) 648 ndlp->nlp_class_sup |= FC_COS_CLASS1; 649 if (sp->cls2.classValid) 650 ndlp->nlp_class_sup |= FC_COS_CLASS2; 651 if (sp->cls3.classValid) 652 ndlp->nlp_class_sup |= FC_COS_CLASS3; 653 if (sp->cls4.classValid) 654 ndlp->nlp_class_sup |= FC_COS_CLASS4; 655 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | 656 sp->cmn.bbRcvSizeLsb; 657 658 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 659 if (fabric_param_changed) { 660 /* Reset FDMI attribute masks based on config parameter */ 661 if (phba->cfg_enable_SmartSAN || 662 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 663 /* Setup appropriate attribute masks */ 664 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 665 if (phba->cfg_enable_SmartSAN) 666 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 667 else 668 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 669 } else { 670 vport->fdmi_hba_mask = 0; 671 vport->fdmi_port_mask = 0; 672 } 673 674 } 675 memcpy(&vport->fabric_portname, &sp->portName, 676 sizeof(struct lpfc_name)); 677 memcpy(&vport->fabric_nodename, &sp->nodeName, 678 sizeof(struct lpfc_name)); 679 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 680 681 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 682 if (sp->cmn.response_multiple_NPort) { 683 lpfc_printf_vlog(vport, KERN_WARNING, 684 LOG_ELS | LOG_VPORT, 685 "1816 FLOGI NPIV supported, " 686 "response data 0x%x\n", 687 sp->cmn.response_multiple_NPort); 688 spin_lock_irq(&phba->hbalock); 689 phba->link_flag |= LS_NPIV_FAB_SUPPORTED; 690 spin_unlock_irq(&phba->hbalock); 691 } else { 692 /* Because we asked f/w for NPIV it still expects us 693 to call reg_vnpid at least for the physical host */ 694 lpfc_printf_vlog(vport, KERN_WARNING, 695 LOG_ELS | LOG_VPORT, 696 "1817 Fabric does not support NPIV " 697 "- configuring single port mode.\n"); 698 spin_lock_irq(&phba->hbalock); 699 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 700 spin_unlock_irq(&phba->hbalock); 701 } 702 } 703 704 /* 705 * For FC we need to do some special processing because of the SLI 706 * Port's default settings of the Common Service Parameters. 707 */ 708 if ((phba->sli_rev == LPFC_SLI_REV4) && 709 (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) { 710 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 711 if (fabric_param_changed) 712 lpfc_unregister_fcf_prep(phba); 713 714 /* This should just update the VFI CSPs*/ 715 if (test_bit(FC_VFI_REGISTERED, &vport->fc_flag)) 716 lpfc_issue_reg_vfi(vport); 717 } 718 719 if (fabric_param_changed && 720 !test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) { 721 722 /* If our NportID changed, we need to ensure all 723 * remaining NPORTs get unreg_login'ed. 724 */ 725 list_for_each_entry_safe(np, next_np, 726 &vport->fc_nodes, nlp_listp) { 727 if ((np->nlp_state != NLP_STE_NPR_NODE) || 728 !(np->nlp_flag & NLP_NPR_ADISC)) 729 continue; 730 spin_lock_irq(&np->lock); 731 np->nlp_flag &= ~NLP_NPR_ADISC; 732 spin_unlock_irq(&np->lock); 733 lpfc_unreg_rpi(vport, np); 734 } 735 lpfc_cleanup_pending_mbox(vport); 736 737 if (phba->sli_rev == LPFC_SLI_REV4) { 738 lpfc_sli4_unreg_all_rpis(vport); 739 lpfc_mbx_unreg_vpi(vport); 740 set_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag); 741 } 742 743 /* 744 * For SLI3 and SLI4, the VPI needs to be reregistered in 745 * response to this fabric parameter change event. 746 */ 747 set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag); 748 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 749 !test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) { 750 /* 751 * Driver needs to re-reg VPI in order for f/w 752 * to update the MAC address. 753 */ 754 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 755 lpfc_register_new_vport(phba, vport, ndlp); 756 return 0; 757 } 758 759 if (phba->sli_rev < LPFC_SLI_REV4) { 760 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 761 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 762 test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) 763 lpfc_register_new_vport(phba, vport, ndlp); 764 else 765 lpfc_issue_fabric_reglogin(vport); 766 } else { 767 ndlp->nlp_type |= NLP_FABRIC; 768 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 769 if ((!test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) && 770 (vport->vpi_state & LPFC_VPI_REGISTERED)) { 771 lpfc_start_fdiscs(phba); 772 lpfc_do_scr_ns_plogi(phba, vport); 773 } else if (test_bit(FC_VFI_REGISTERED, &vport->fc_flag)) 774 lpfc_issue_init_vpi(vport); 775 else { 776 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 777 "3135 Need register VFI: (x%x/%x)\n", 778 vport->fc_prevDID, vport->fc_myDID); 779 lpfc_issue_reg_vfi(vport); 780 } 781 } 782 return 0; 783 } 784 785 /** 786 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 787 * @vport: pointer to a host virtual N_Port data structure. 788 * @ndlp: pointer to a node-list data structure. 789 * @sp: pointer to service parameter data structure. 790 * 791 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 792 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port 793 * in a point-to-point topology. First, the @vport's N_Port Name is compared 794 * with the received N_Port Name: if the @vport's N_Port Name is greater than 795 * the received N_Port Name lexicographically, this node shall assign local 796 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and 797 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise, 798 * this node shall just wait for the remote node to issue PLOGI and assign 799 * N_Port IDs. 800 * 801 * Return code 802 * 0 - Success 803 * -ENXIO - Fail 804 **/ 805 static int 806 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 807 struct serv_parm *sp) 808 { 809 struct lpfc_hba *phba = vport->phba; 810 LPFC_MBOXQ_t *mbox; 811 int rc; 812 813 clear_bit(FC_FABRIC, &vport->fc_flag); 814 clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag); 815 set_bit(FC_PT2PT, &vport->fc_flag); 816 817 /* If we are pt2pt with another NPort, force NPIV off! */ 818 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 819 820 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 821 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { 822 lpfc_unregister_fcf_prep(phba); 823 clear_bit(FC_VFI_REGISTERED, &vport->fc_flag); 824 phba->fc_topology_changed = 0; 825 } 826 827 rc = memcmp(&vport->fc_portname, &sp->portName, 828 sizeof(vport->fc_portname)); 829 830 if (rc >= 0) { 831 /* This side will initiate the PLOGI */ 832 set_bit(FC_PT2PT_PLOGI, &vport->fc_flag); 833 834 /* 835 * N_Port ID cannot be 0, set our Id to LocalID 836 * the other side will be RemoteID. 837 */ 838 839 /* not equal */ 840 if (rc) 841 vport->fc_myDID = PT2PT_LocalID; 842 843 /* If not registered with a transport, decrement ndlp reference 844 * count indicating that ndlp can be safely released when other 845 * references are removed. 846 */ 847 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) 848 lpfc_nlp_put(ndlp); 849 850 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); 851 if (!ndlp) { 852 /* 853 * Cannot find existing Fabric ndlp, so allocate a 854 * new one 855 */ 856 ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID); 857 if (!ndlp) 858 goto fail; 859 } 860 861 memcpy(&ndlp->nlp_portname, &sp->portName, 862 sizeof(struct lpfc_name)); 863 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 864 sizeof(struct lpfc_name)); 865 /* Set state will put ndlp onto node list if not already done */ 866 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 867 spin_lock_irq(&ndlp->lock); 868 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 869 spin_unlock_irq(&ndlp->lock); 870 871 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 872 if (!mbox) 873 goto fail; 874 875 lpfc_config_link(phba, mbox); 876 877 mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 878 mbox->vport = vport; 879 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 880 if (rc == MBX_NOT_FINISHED) { 881 mempool_free(mbox, phba->mbox_mem_pool); 882 goto fail; 883 } 884 } else { 885 /* This side will wait for the PLOGI. If not registered with 886 * a transport, decrement node reference count indicating that 887 * ndlp can be released when other references are removed. 888 */ 889 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) 890 lpfc_nlp_put(ndlp); 891 892 /* Start discovery - this should just do CLEAR_LA */ 893 lpfc_disc_start(vport); 894 } 895 896 return 0; 897 fail: 898 return -ENXIO; 899 } 900 901 /** 902 * lpfc_cmpl_els_flogi - Completion callback function for flogi 903 * @phba: pointer to lpfc hba data structure. 904 * @cmdiocb: pointer to lpfc command iocb data structure. 905 * @rspiocb: pointer to lpfc response iocb data structure. 906 * 907 * This routine is the top-level completion callback function for issuing 908 * a Fabric Login (FLOGI) command. If the response IOCB reported error, 909 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If 910 * retry has been made (either immediately or delayed with lpfc_els_retry() 911 * returning 1), the command IOCB will be released and function returned. 912 * If the retry attempt has been given up (possibly reach the maximum 913 * number of retries), one additional decrement of ndlp reference shall be 914 * invoked before going out after releasing the command IOCB. This will 915 * actually release the remote node (Note, lpfc_els_free_iocb() will also 916 * invoke one decrement of ndlp reference count). If no error reported in 917 * the IOCB status, the command Port ID field is used to determine whether 918 * this is a point-to-point topology or a fabric topology: if the Port ID 919 * field is assigned, it is a fabric topology; otherwise, it is a 920 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or 921 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the 922 * specific topology completion conditions. 923 **/ 924 static void 925 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 926 struct lpfc_iocbq *rspiocb) 927 { 928 struct lpfc_vport *vport = cmdiocb->vport; 929 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 930 IOCB_t *irsp; 931 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp; 932 struct serv_parm *sp; 933 uint16_t fcf_index; 934 int rc; 935 u32 ulp_status, ulp_word4, tmo; 936 bool flogi_in_retry = false; 937 938 /* Check to see if link went down during discovery */ 939 if (lpfc_els_chk_latt(vport)) { 940 /* One additional decrement on node reference count to 941 * trigger the release of the node 942 */ 943 if (!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) 944 lpfc_nlp_put(ndlp); 945 goto out; 946 } 947 948 ulp_status = get_job_ulpstatus(phba, rspiocb); 949 ulp_word4 = get_job_word4(phba, rspiocb); 950 951 if (phba->sli_rev == LPFC_SLI_REV4) { 952 tmo = get_wqe_tmo(cmdiocb); 953 } else { 954 irsp = &rspiocb->iocb; 955 tmo = irsp->ulpTimeout; 956 } 957 958 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 959 "FLOGI cmpl: status:x%x/x%x state:x%x", 960 ulp_status, ulp_word4, 961 vport->port_state); 962 963 if (ulp_status) { 964 /* 965 * In case of FIP mode, perform roundrobin FCF failover 966 * due to new FCF discovery 967 */ 968 if ((phba->hba_flag & HBA_FIP_SUPPORT) && 969 (phba->fcf.fcf_flag & FCF_DISCOVERY)) { 970 if (phba->link_state < LPFC_LINK_UP) 971 goto stop_rr_fcf_flogi; 972 if ((phba->fcoe_cvl_eventtag_attn == 973 phba->fcoe_cvl_eventtag) && 974 (ulp_status == IOSTAT_LOCAL_REJECT) && 975 ((ulp_word4 & IOERR_PARAM_MASK) == 976 IOERR_SLI_ABORTED)) 977 goto stop_rr_fcf_flogi; 978 else 979 phba->fcoe_cvl_eventtag_attn = 980 phba->fcoe_cvl_eventtag; 981 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 982 "2611 FLOGI failed on FCF (x%x), " 983 "status:x%x/x%x, tmo:x%x, perform " 984 "roundrobin FCF failover\n", 985 phba->fcf.current_rec.fcf_indx, 986 ulp_status, ulp_word4, tmo); 987 lpfc_sli4_set_fcf_flogi_fail(phba, 988 phba->fcf.current_rec.fcf_indx); 989 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 990 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 991 if (rc) 992 goto out; 993 } 994 995 stop_rr_fcf_flogi: 996 /* FLOGI failure */ 997 if (!(ulp_status == IOSTAT_LOCAL_REJECT && 998 ((ulp_word4 & IOERR_PARAM_MASK) == 999 IOERR_LOOP_OPEN_FAILURE))) 1000 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1001 "2858 FLOGI failure Status:x%x/x%x TMO" 1002 ":x%x Data x%x x%x\n", 1003 ulp_status, ulp_word4, tmo, 1004 phba->hba_flag, phba->fcf.fcf_flag); 1005 1006 /* Check for retry */ 1007 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 1008 /* Address a timing race with dev_loss. If dev_loss 1009 * is active on this FPort node, put the initial ref 1010 * count back to stop premature node release actions. 1011 */ 1012 lpfc_check_nlp_post_devloss(vport, ndlp); 1013 flogi_in_retry = true; 1014 goto out; 1015 } 1016 1017 /* The FLOGI will not be retried. If the FPort node is not 1018 * registered with the SCSI transport, remove the initial 1019 * reference to trigger node release. 1020 */ 1021 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS) && 1022 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) 1023 lpfc_nlp_put(ndlp); 1024 1025 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 1026 "0150 FLOGI failure Status:x%x/x%x " 1027 "xri x%x TMO:x%x refcnt %d\n", 1028 ulp_status, ulp_word4, cmdiocb->sli4_xritag, 1029 tmo, kref_read(&ndlp->kref)); 1030 1031 /* If this is not a loop open failure, bail out */ 1032 if (!(ulp_status == IOSTAT_LOCAL_REJECT && 1033 ((ulp_word4 & IOERR_PARAM_MASK) == 1034 IOERR_LOOP_OPEN_FAILURE))) { 1035 /* FLOGI failure */ 1036 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1037 "0100 FLOGI failure Status:x%x/x%x " 1038 "TMO:x%x\n", 1039 ulp_status, ulp_word4, tmo); 1040 goto flogifail; 1041 } 1042 1043 /* FLOGI failed, so there is no fabric */ 1044 clear_bit(FC_FABRIC, &vport->fc_flag); 1045 clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag); 1046 clear_bit(FC_PT2PT_NO_NVME, &vport->fc_flag); 1047 1048 /* If private loop, then allow max outstanding els to be 1049 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no 1050 * alpa map would take too long otherwise. 1051 */ 1052 if (phba->alpa_map[0] == 0) 1053 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; 1054 if ((phba->sli_rev == LPFC_SLI_REV4) && 1055 (!test_bit(FC_VFI_REGISTERED, &vport->fc_flag) || 1056 (vport->fc_prevDID != vport->fc_myDID) || 1057 phba->fc_topology_changed)) { 1058 if (test_bit(FC_VFI_REGISTERED, &vport->fc_flag)) { 1059 if (phba->fc_topology_changed) { 1060 lpfc_unregister_fcf_prep(phba); 1061 clear_bit(FC_VFI_REGISTERED, 1062 &vport->fc_flag); 1063 phba->fc_topology_changed = 0; 1064 } else { 1065 lpfc_sli4_unreg_all_rpis(vport); 1066 } 1067 } 1068 1069 /* Do not register VFI if the driver aborted FLOGI */ 1070 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) 1071 lpfc_issue_reg_vfi(vport); 1072 1073 goto out; 1074 } 1075 goto flogifail; 1076 } 1077 clear_bit(FC_VPORT_CVL_RCVD, &vport->fc_flag); 1078 clear_bit(FC_VPORT_LOGO_RCVD, &vport->fc_flag); 1079 1080 /* 1081 * The FLOGI succeeded. Sync the data for the CPU before 1082 * accessing it. 1083 */ 1084 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 1085 if (!prsp) 1086 goto out; 1087 if (!lpfc_is_els_acc_rsp(prsp)) 1088 goto out; 1089 sp = prsp->virt + sizeof(uint32_t); 1090 1091 /* FLOGI completes successfully */ 1092 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1093 "0101 FLOGI completes successfully, I/O tag:x%x " 1094 "xri x%x Data: x%x x%x x%x x%x x%x x%lx x%x %d\n", 1095 cmdiocb->iotag, cmdiocb->sli4_xritag, 1096 ulp_word4, sp->cmn.e_d_tov, 1097 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, 1098 vport->port_state, vport->fc_flag, 1099 sp->cmn.priority_tagging, kref_read(&ndlp->kref)); 1100 1101 /* reinitialize the VMID datastructure before returning */ 1102 if (lpfc_is_vmid_enabled(phba)) 1103 lpfc_reinit_vmid(vport); 1104 if (sp->cmn.priority_tagging) 1105 vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA | 1106 LPFC_VMID_TYPE_PRIO); 1107 1108 /* 1109 * Address a timing race with dev_loss. If dev_loss is active on 1110 * this FPort node, put the initial ref count back to stop premature 1111 * node release actions. 1112 */ 1113 lpfc_check_nlp_post_devloss(vport, ndlp); 1114 if (vport->port_state == LPFC_FLOGI) { 1115 /* 1116 * If Common Service Parameters indicate Nport 1117 * we are point to point, if Fport we are Fabric. 1118 */ 1119 if (sp->cmn.fPort) 1120 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, 1121 ulp_word4); 1122 else if (!(phba->hba_flag & HBA_FCOE_MODE)) 1123 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 1124 else { 1125 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1126 "2831 FLOGI response with cleared Fabric " 1127 "bit fcf_index 0x%x " 1128 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x " 1129 "Fabric Name " 1130 "%02x%02x%02x%02x%02x%02x%02x%02x\n", 1131 phba->fcf.current_rec.fcf_indx, 1132 phba->fcf.current_rec.switch_name[0], 1133 phba->fcf.current_rec.switch_name[1], 1134 phba->fcf.current_rec.switch_name[2], 1135 phba->fcf.current_rec.switch_name[3], 1136 phba->fcf.current_rec.switch_name[4], 1137 phba->fcf.current_rec.switch_name[5], 1138 phba->fcf.current_rec.switch_name[6], 1139 phba->fcf.current_rec.switch_name[7], 1140 phba->fcf.current_rec.fabric_name[0], 1141 phba->fcf.current_rec.fabric_name[1], 1142 phba->fcf.current_rec.fabric_name[2], 1143 phba->fcf.current_rec.fabric_name[3], 1144 phba->fcf.current_rec.fabric_name[4], 1145 phba->fcf.current_rec.fabric_name[5], 1146 phba->fcf.current_rec.fabric_name[6], 1147 phba->fcf.current_rec.fabric_name[7]); 1148 1149 lpfc_nlp_put(ndlp); 1150 spin_lock_irq(&phba->hbalock); 1151 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1152 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1153 spin_unlock_irq(&phba->hbalock); 1154 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1155 goto out; 1156 } 1157 if (!rc) { 1158 /* Mark the FCF discovery process done */ 1159 if (phba->hba_flag & HBA_FIP_SUPPORT) 1160 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | 1161 LOG_ELS, 1162 "2769 FLOGI to FCF (x%x) " 1163 "completed successfully\n", 1164 phba->fcf.current_rec.fcf_indx); 1165 spin_lock_irq(&phba->hbalock); 1166 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1167 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1168 spin_unlock_irq(&phba->hbalock); 1169 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1170 goto out; 1171 } 1172 } else if (vport->port_state > LPFC_FLOGI && 1173 test_bit(FC_PT2PT, &vport->fc_flag)) { 1174 /* 1175 * In a p2p topology, it is possible that discovery has 1176 * already progressed, and this completion can be ignored. 1177 * Recheck the indicated topology. 1178 */ 1179 if (!sp->cmn.fPort) 1180 goto out; 1181 } 1182 1183 flogifail: 1184 spin_lock_irq(&phba->hbalock); 1185 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1186 spin_unlock_irq(&phba->hbalock); 1187 1188 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { 1189 /* FLOGI failed, so just use loop map to make discovery list */ 1190 lpfc_disc_list_loopmap(vport); 1191 1192 /* Start discovery */ 1193 lpfc_disc_start(vport); 1194 } else if (((ulp_status != IOSTAT_LOCAL_REJECT) || 1195 (((ulp_word4 & IOERR_PARAM_MASK) != 1196 IOERR_SLI_ABORTED) && 1197 ((ulp_word4 & IOERR_PARAM_MASK) != 1198 IOERR_SLI_DOWN))) && 1199 (phba->link_state != LPFC_CLEAR_LA)) { 1200 /* If FLOGI failed enable link interrupt. */ 1201 lpfc_issue_clear_la(phba, vport); 1202 } 1203 out: 1204 if (!flogi_in_retry) 1205 phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING; 1206 1207 lpfc_els_free_iocb(phba, cmdiocb); 1208 lpfc_nlp_put(ndlp); 1209 } 1210 1211 /** 1212 * lpfc_cmpl_els_link_down - Completion callback function for ELS command 1213 * aborted during a link down 1214 * @phba: pointer to lpfc hba data structure. 1215 * @cmdiocb: pointer to lpfc command iocb data structure. 1216 * @rspiocb: pointer to lpfc response iocb data structure. 1217 * 1218 */ 1219 static void 1220 lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1221 struct lpfc_iocbq *rspiocb) 1222 { 1223 uint32_t *pcmd; 1224 uint32_t cmd; 1225 u32 ulp_status, ulp_word4; 1226 1227 pcmd = (uint32_t *)cmdiocb->cmd_dmabuf->virt; 1228 cmd = *pcmd; 1229 1230 ulp_status = get_job_ulpstatus(phba, rspiocb); 1231 ulp_word4 = get_job_word4(phba, rspiocb); 1232 1233 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1234 "6445 ELS completes after LINK_DOWN: " 1235 " Status %x/%x cmd x%x flg x%x\n", 1236 ulp_status, ulp_word4, cmd, 1237 cmdiocb->cmd_flag); 1238 1239 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) { 1240 cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC; 1241 atomic_dec(&phba->fabric_iocb_count); 1242 } 1243 lpfc_els_free_iocb(phba, cmdiocb); 1244 } 1245 1246 /** 1247 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport 1248 * @vport: pointer to a host virtual N_Port data structure. 1249 * @ndlp: pointer to a node-list data structure. 1250 * @retry: number of retries to the command IOCB. 1251 * 1252 * This routine issues a Fabric Login (FLOGI) Request ELS command 1253 * for a @vport. The initiator service parameters are put into the payload 1254 * of the FLOGI Request IOCB and the top-level callback function pointer 1255 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback 1256 * function field. The lpfc_issue_fabric_iocb routine is invoked to send 1257 * out FLOGI ELS command with one outstanding fabric IOCB at a time. 1258 * 1259 * Note that the ndlp reference count will be incremented by 1 for holding the 1260 * ndlp and the reference to ndlp will be stored into the ndlp field of 1261 * the IOCB for the completion callback function to the FLOGI ELS command. 1262 * 1263 * Return code 1264 * 0 - successfully issued flogi iocb for @vport 1265 * 1 - failed to issue flogi iocb for @vport 1266 **/ 1267 static int 1268 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1269 uint8_t retry) 1270 { 1271 struct lpfc_hba *phba = vport->phba; 1272 struct serv_parm *sp; 1273 union lpfc_wqe128 *wqe = NULL; 1274 IOCB_t *icmd = NULL; 1275 struct lpfc_iocbq *elsiocb; 1276 struct lpfc_iocbq defer_flogi_acc; 1277 u8 *pcmd, ct; 1278 uint16_t cmdsize; 1279 uint32_t tmo, did; 1280 int rc; 1281 1282 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1283 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1284 ndlp->nlp_DID, ELS_CMD_FLOGI); 1285 1286 if (!elsiocb) 1287 return 1; 1288 1289 wqe = &elsiocb->wqe; 1290 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 1291 icmd = &elsiocb->iocb; 1292 1293 /* For FLOGI request, remainder of payload is service parameters */ 1294 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; 1295 pcmd += sizeof(uint32_t); 1296 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1297 sp = (struct serv_parm *) pcmd; 1298 1299 /* Setup CSPs accordingly for Fabric */ 1300 sp->cmn.e_d_tov = 0; 1301 sp->cmn.w2.r_a_tov = 0; 1302 sp->cmn.virtual_fabric_support = 0; 1303 sp->cls1.classValid = 0; 1304 if (sp->cmn.fcphLow < FC_PH3) 1305 sp->cmn.fcphLow = FC_PH3; 1306 if (sp->cmn.fcphHigh < FC_PH3) 1307 sp->cmn.fcphHigh = FC_PH3; 1308 1309 /* Determine if switch supports priority tagging */ 1310 if (phba->cfg_vmid_priority_tagging) { 1311 sp->cmn.priority_tagging = 1; 1312 /* lpfc_vmid_host_uuid is combination of wwpn and wwnn */ 1313 if (!memchr_inv(vport->lpfc_vmid_host_uuid, 0, 1314 sizeof(vport->lpfc_vmid_host_uuid))) { 1315 memcpy(vport->lpfc_vmid_host_uuid, phba->wwpn, 1316 sizeof(phba->wwpn)); 1317 memcpy(&vport->lpfc_vmid_host_uuid[8], phba->wwnn, 1318 sizeof(phba->wwnn)); 1319 } 1320 } 1321 1322 if (phba->sli_rev == LPFC_SLI_REV4) { 1323 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1324 LPFC_SLI_INTF_IF_TYPE_0) { 1325 /* FLOGI needs to be 3 for WQE FCFI */ 1326 ct = SLI4_CT_FCFI; 1327 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 1328 1329 /* Set the fcfi to the fcfi we registered with */ 1330 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 1331 phba->fcf.fcfi); 1332 } 1333 1334 /* Can't do SLI4 class2 without support sequence coalescing */ 1335 sp->cls2.classValid = 0; 1336 sp->cls2.seqDelivery = 0; 1337 } else { 1338 /* Historical, setting sequential-delivery bit for SLI3 */ 1339 sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0; 1340 sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0; 1341 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 1342 sp->cmn.request_multiple_Nport = 1; 1343 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 1344 icmd->ulpCt_h = 1; 1345 icmd->ulpCt_l = 0; 1346 } else { 1347 sp->cmn.request_multiple_Nport = 0; 1348 } 1349 1350 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 1351 icmd->un.elsreq64.myID = 0; 1352 icmd->un.elsreq64.fl = 1; 1353 } 1354 } 1355 1356 tmo = phba->fc_ratov; 1357 phba->fc_ratov = LPFC_DISC_FLOGI_TMO; 1358 lpfc_set_disctmo(vport); 1359 phba->fc_ratov = tmo; 1360 1361 phba->fc_stat.elsXmitFLOGI++; 1362 elsiocb->cmd_cmpl = lpfc_cmpl_els_flogi; 1363 1364 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1365 "Issue FLOGI: opt:x%x", 1366 phba->sli3_options, 0, 0); 1367 1368 elsiocb->ndlp = lpfc_nlp_get(ndlp); 1369 if (!elsiocb->ndlp) { 1370 lpfc_els_free_iocb(phba, elsiocb); 1371 return 1; 1372 } 1373 1374 /* Avoid race with FLOGI completion and hba_flags. */ 1375 phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); 1376 1377 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 1378 if (rc == IOCB_ERROR) { 1379 phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); 1380 lpfc_els_free_iocb(phba, elsiocb); 1381 lpfc_nlp_put(ndlp); 1382 return 1; 1383 } 1384 1385 /* Clear external loopback plug detected flag */ 1386 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; 1387 1388 /* Check for a deferred FLOGI ACC condition */ 1389 if (phba->defer_flogi_acc_flag) { 1390 /* lookup ndlp for received FLOGI */ 1391 ndlp = lpfc_findnode_did(vport, 0); 1392 if (!ndlp) 1393 return 0; 1394 1395 did = vport->fc_myDID; 1396 vport->fc_myDID = Fabric_DID; 1397 1398 memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq)); 1399 1400 if (phba->sli_rev == LPFC_SLI_REV4) { 1401 bf_set(wqe_ctxt_tag, 1402 &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, 1403 phba->defer_flogi_acc_rx_id); 1404 bf_set(wqe_rcvoxid, 1405 &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com, 1406 phba->defer_flogi_acc_ox_id); 1407 } else { 1408 icmd = &defer_flogi_acc.iocb; 1409 icmd->ulpContext = phba->defer_flogi_acc_rx_id; 1410 icmd->unsli3.rcvsli3.ox_id = 1411 phba->defer_flogi_acc_ox_id; 1412 } 1413 1414 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1415 "3354 Xmit deferred FLOGI ACC: rx_id: x%x," 1416 " ox_id: x%x, hba_flag x%x\n", 1417 phba->defer_flogi_acc_rx_id, 1418 phba->defer_flogi_acc_ox_id, phba->hba_flag); 1419 1420 /* Send deferred FLOGI ACC */ 1421 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc, 1422 ndlp, NULL); 1423 1424 phba->defer_flogi_acc_flag = false; 1425 vport->fc_myDID = did; 1426 1427 /* Decrement ndlp reference count to indicate the node can be 1428 * released when other references are removed. 1429 */ 1430 lpfc_nlp_put(ndlp); 1431 } 1432 1433 return 0; 1434 } 1435 1436 /** 1437 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs 1438 * @phba: pointer to lpfc hba data structure. 1439 * 1440 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs 1441 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq 1442 * list and issues an abort IOCB commond on each outstanding IOCB that 1443 * contains a active Fabric_DID ndlp. Note that this function is to issue 1444 * the abort IOCB command on all the outstanding IOCBs, thus when this 1445 * function returns, it does not guarantee all the IOCBs are actually aborted. 1446 * 1447 * Return code 1448 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0) 1449 **/ 1450 int 1451 lpfc_els_abort_flogi(struct lpfc_hba *phba) 1452 { 1453 struct lpfc_sli_ring *pring; 1454 struct lpfc_iocbq *iocb, *next_iocb; 1455 struct lpfc_nodelist *ndlp; 1456 u32 ulp_command; 1457 1458 /* Abort outstanding I/O on NPort <nlp_DID> */ 1459 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1460 "0201 Abort outstanding I/O on NPort x%x\n", 1461 Fabric_DID); 1462 1463 pring = lpfc_phba_elsring(phba); 1464 if (unlikely(!pring)) 1465 return -EIO; 1466 1467 /* 1468 * Check the txcmplq for an iocb that matches the nport the driver is 1469 * searching for. 1470 */ 1471 spin_lock_irq(&phba->hbalock); 1472 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 1473 ulp_command = get_job_cmnd(phba, iocb); 1474 if (ulp_command == CMD_ELS_REQUEST64_CR) { 1475 ndlp = iocb->ndlp; 1476 if (ndlp && ndlp->nlp_DID == Fabric_DID) { 1477 if (test_bit(FC_PT2PT, &phba->pport->fc_flag) && 1478 !test_bit(FC_PT2PT_PLOGI, 1479 &phba->pport->fc_flag)) 1480 iocb->fabric_cmd_cmpl = 1481 lpfc_ignore_els_cmpl; 1482 lpfc_sli_issue_abort_iotag(phba, pring, iocb, 1483 NULL); 1484 } 1485 } 1486 } 1487 /* Make sure HBA is alive */ 1488 lpfc_issue_hb_tmo(phba); 1489 1490 spin_unlock_irq(&phba->hbalock); 1491 1492 return 0; 1493 } 1494 1495 /** 1496 * lpfc_initial_flogi - Issue an initial fabric login for a vport 1497 * @vport: pointer to a host virtual N_Port data structure. 1498 * 1499 * This routine issues an initial Fabric Login (FLOGI) for the @vport 1500 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1501 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1502 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1503 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine 1504 * is then invoked with the @vport and the ndlp to perform the FLOGI for the 1505 * @vport. 1506 * 1507 * Return code 1508 * 0 - failed to issue initial flogi for @vport 1509 * 1 - successfully issued initial flogi for @vport 1510 **/ 1511 int 1512 lpfc_initial_flogi(struct lpfc_vport *vport) 1513 { 1514 struct lpfc_nodelist *ndlp; 1515 1516 vport->port_state = LPFC_FLOGI; 1517 lpfc_set_disctmo(vport); 1518 1519 /* First look for the Fabric ndlp */ 1520 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1521 if (!ndlp) { 1522 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1523 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1524 if (!ndlp) 1525 return 0; 1526 /* Set the node type */ 1527 ndlp->nlp_type |= NLP_FABRIC; 1528 1529 /* Put ndlp onto node list */ 1530 lpfc_enqueue_node(vport, ndlp); 1531 } 1532 1533 /* Reset the Fabric flag, topology change may have happened */ 1534 clear_bit(FC_FABRIC, &vport->fc_flag); 1535 if (lpfc_issue_els_flogi(vport, ndlp, 0)) { 1536 /* A node reference should be retained while registered with a 1537 * transport or dev-loss-evt work is pending. 1538 * Otherwise, decrement node reference to trigger release. 1539 */ 1540 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 1541 !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 1542 lpfc_nlp_put(ndlp); 1543 return 0; 1544 } 1545 return 1; 1546 } 1547 1548 /** 1549 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport 1550 * @vport: pointer to a host virtual N_Port data structure. 1551 * 1552 * This routine issues an initial Fabric Discover (FDISC) for the @vport 1553 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1554 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1555 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1556 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine 1557 * is then invoked with the @vport and the ndlp to perform the FDISC for the 1558 * @vport. 1559 * 1560 * Return code 1561 * 0 - failed to issue initial fdisc for @vport 1562 * 1 - successfully issued initial fdisc for @vport 1563 **/ 1564 int 1565 lpfc_initial_fdisc(struct lpfc_vport *vport) 1566 { 1567 struct lpfc_nodelist *ndlp; 1568 1569 /* First look for the Fabric ndlp */ 1570 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1571 if (!ndlp) { 1572 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1573 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1574 if (!ndlp) 1575 return 0; 1576 1577 /* NPIV is only supported in Fabrics. */ 1578 ndlp->nlp_type |= NLP_FABRIC; 1579 1580 /* Put ndlp onto node list */ 1581 lpfc_enqueue_node(vport, ndlp); 1582 } 1583 1584 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { 1585 /* A node reference should be retained while registered with a 1586 * transport or dev-loss-evt work is pending. 1587 * Otherwise, decrement node reference to trigger release. 1588 */ 1589 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 1590 !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 1591 lpfc_nlp_put(ndlp); 1592 return 0; 1593 } 1594 return 1; 1595 } 1596 1597 /** 1598 * lpfc_more_plogi - Check and issue remaining plogis for a vport 1599 * @vport: pointer to a host virtual N_Port data structure. 1600 * 1601 * This routine checks whether there are more remaining Port Logins 1602 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine 1603 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes 1604 * to issue ELS PLOGIs up to the configured discover threads with the 1605 * @vport (@vport->cfg_discovery_threads). The function also decrement 1606 * the @vport's num_disc_node by 1 if it is not already 0. 1607 **/ 1608 void 1609 lpfc_more_plogi(struct lpfc_vport *vport) 1610 { 1611 if (vport->num_disc_nodes) 1612 vport->num_disc_nodes--; 1613 1614 /* Continue discovery with <num_disc_nodes> PLOGIs to go */ 1615 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1616 "0232 Continue discovery with %d PLOGIs to go " 1617 "Data: x%x x%lx x%x\n", 1618 vport->num_disc_nodes, 1619 atomic_read(&vport->fc_plogi_cnt), 1620 vport->fc_flag, vport->port_state); 1621 /* Check to see if there are more PLOGIs to be sent */ 1622 if (test_bit(FC_NLP_MORE, &vport->fc_flag)) 1623 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 1624 lpfc_els_disc_plogi(vport); 1625 1626 return; 1627 } 1628 1629 /** 1630 * lpfc_plogi_confirm_nport - Confirm plogi wwpn matches stored ndlp 1631 * @phba: pointer to lpfc hba data structure. 1632 * @prsp: pointer to response IOCB payload. 1633 * @ndlp: pointer to a node-list data structure. 1634 * 1635 * This routine checks and indicates whether the WWPN of an N_Port, retrieved 1636 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt. 1637 * The following cases are considered N_Port confirmed: 1638 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches 1639 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but 1640 * it does not have WWPN assigned either. If the WWPN is confirmed, the 1641 * pointer to the @ndlp will be returned. If the WWPN is not confirmed: 1642 * 1) if there is a node on vport list other than the @ndlp with the same 1643 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked 1644 * on that node to release the RPI associated with the node; 2) if there is 1645 * no node found on vport list with the same WWPN of the N_Port PLOGI logged 1646 * into, a new node shall be allocated (or activated). In either case, the 1647 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall 1648 * be released and the new_ndlp shall be put on to the vport node list and 1649 * its pointer returned as the confirmed node. 1650 * 1651 * Note that before the @ndlp got "released", the keepDID from not-matching 1652 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID 1653 * of the @ndlp. This is because the release of @ndlp is actually to put it 1654 * into an inactive state on the vport node list and the vport node list 1655 * management algorithm does not allow two node with a same DID. 1656 * 1657 * Return code 1658 * pointer to the PLOGI N_Port @ndlp 1659 **/ 1660 static struct lpfc_nodelist * 1661 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, 1662 struct lpfc_nodelist *ndlp) 1663 { 1664 struct lpfc_vport *vport = ndlp->vport; 1665 struct lpfc_nodelist *new_ndlp; 1666 struct serv_parm *sp; 1667 uint8_t name[sizeof(struct lpfc_name)]; 1668 uint32_t keepDID = 0, keep_nlp_flag = 0; 1669 int rc; 1670 uint32_t keep_new_nlp_flag = 0; 1671 uint16_t keep_nlp_state; 1672 u32 keep_nlp_fc4_type = 0; 1673 struct lpfc_nvme_rport *keep_nrport = NULL; 1674 unsigned long *active_rrqs_xri_bitmap = NULL; 1675 1676 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t)); 1677 memset(name, 0, sizeof(struct lpfc_name)); 1678 1679 /* Now we find out if the NPort we are logging into, matches the WWPN 1680 * we have for that ndlp. If not, we have some work to do. 1681 */ 1682 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); 1683 1684 /* return immediately if the WWPN matches ndlp */ 1685 if (new_ndlp == ndlp) 1686 return ndlp; 1687 1688 if (phba->sli_rev == LPFC_SLI_REV4) { 1689 active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, 1690 GFP_KERNEL); 1691 if (active_rrqs_xri_bitmap) 1692 memset(active_rrqs_xri_bitmap, 0, 1693 phba->cfg_rrq_xri_bitmap_sz); 1694 } 1695 1696 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1697 "3178 PLOGI confirm: ndlp x%x x%x x%x: " 1698 "new_ndlp x%x x%x x%x\n", 1699 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type, 1700 (new_ndlp ? new_ndlp->nlp_DID : 0), 1701 (new_ndlp ? new_ndlp->nlp_flag : 0), 1702 (new_ndlp ? new_ndlp->nlp_fc4_type : 0)); 1703 1704 if (!new_ndlp) { 1705 rc = memcmp(&ndlp->nlp_portname, name, 1706 sizeof(struct lpfc_name)); 1707 if (!rc) { 1708 if (active_rrqs_xri_bitmap) 1709 mempool_free(active_rrqs_xri_bitmap, 1710 phba->active_rrq_pool); 1711 return ndlp; 1712 } 1713 new_ndlp = lpfc_nlp_init(vport, ndlp->nlp_DID); 1714 if (!new_ndlp) { 1715 if (active_rrqs_xri_bitmap) 1716 mempool_free(active_rrqs_xri_bitmap, 1717 phba->active_rrq_pool); 1718 return ndlp; 1719 } 1720 } else { 1721 if (phba->sli_rev == LPFC_SLI_REV4 && 1722 active_rrqs_xri_bitmap) 1723 memcpy(active_rrqs_xri_bitmap, 1724 new_ndlp->active_rrqs_xri_bitmap, 1725 phba->cfg_rrq_xri_bitmap_sz); 1726 1727 /* 1728 * Unregister from backend if not done yet. Could have been 1729 * skipped due to ADISC 1730 */ 1731 lpfc_nlp_unreg_node(vport, new_ndlp); 1732 } 1733 1734 keepDID = new_ndlp->nlp_DID; 1735 1736 /* At this point in this routine, we know new_ndlp will be 1737 * returned. however, any previous GID_FTs that were done 1738 * would have updated nlp_fc4_type in ndlp, so we must ensure 1739 * new_ndlp has the right value. 1740 */ 1741 if (test_bit(FC_FABRIC, &vport->fc_flag)) { 1742 keep_nlp_fc4_type = new_ndlp->nlp_fc4_type; 1743 new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type; 1744 } 1745 1746 lpfc_unreg_rpi(vport, new_ndlp); 1747 new_ndlp->nlp_DID = ndlp->nlp_DID; 1748 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 1749 if (phba->sli_rev == LPFC_SLI_REV4) 1750 memcpy(new_ndlp->active_rrqs_xri_bitmap, 1751 ndlp->active_rrqs_xri_bitmap, 1752 phba->cfg_rrq_xri_bitmap_sz); 1753 1754 /* Lock both ndlps */ 1755 spin_lock_irq(&ndlp->lock); 1756 spin_lock_irq(&new_ndlp->lock); 1757 keep_new_nlp_flag = new_ndlp->nlp_flag; 1758 keep_nlp_flag = ndlp->nlp_flag; 1759 new_ndlp->nlp_flag = ndlp->nlp_flag; 1760 1761 /* if new_ndlp had NLP_UNREG_INP set, keep it */ 1762 if (keep_new_nlp_flag & NLP_UNREG_INP) 1763 new_ndlp->nlp_flag |= NLP_UNREG_INP; 1764 else 1765 new_ndlp->nlp_flag &= ~NLP_UNREG_INP; 1766 1767 /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */ 1768 if (keep_new_nlp_flag & NLP_RPI_REGISTERED) 1769 new_ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1770 else 1771 new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1772 1773 /* 1774 * Retain the DROPPED flag. This will take care of the init 1775 * refcount when affecting the state change 1776 */ 1777 if (keep_new_nlp_flag & NLP_DROPPED) 1778 new_ndlp->nlp_flag |= NLP_DROPPED; 1779 else 1780 new_ndlp->nlp_flag &= ~NLP_DROPPED; 1781 1782 ndlp->nlp_flag = keep_new_nlp_flag; 1783 1784 /* if ndlp had NLP_UNREG_INP set, keep it */ 1785 if (keep_nlp_flag & NLP_UNREG_INP) 1786 ndlp->nlp_flag |= NLP_UNREG_INP; 1787 else 1788 ndlp->nlp_flag &= ~NLP_UNREG_INP; 1789 1790 /* if ndlp had NLP_RPI_REGISTERED set, keep it */ 1791 if (keep_nlp_flag & NLP_RPI_REGISTERED) 1792 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1793 else 1794 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1795 1796 /* 1797 * Retain the DROPPED flag. This will take care of the init 1798 * refcount when affecting the state change 1799 */ 1800 if (keep_nlp_flag & NLP_DROPPED) 1801 ndlp->nlp_flag |= NLP_DROPPED; 1802 else 1803 ndlp->nlp_flag &= ~NLP_DROPPED; 1804 1805 spin_unlock_irq(&new_ndlp->lock); 1806 spin_unlock_irq(&ndlp->lock); 1807 1808 /* Set nlp_states accordingly */ 1809 keep_nlp_state = new_ndlp->nlp_state; 1810 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); 1811 1812 /* interchange the nvme remoteport structs */ 1813 keep_nrport = new_ndlp->nrport; 1814 new_ndlp->nrport = ndlp->nrport; 1815 1816 /* Move this back to NPR state */ 1817 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { 1818 /* The ndlp doesn't have a portname yet, but does have an 1819 * NPort ID. The new_ndlp portname matches the Rport's 1820 * portname. Reinstantiate the new_ndlp and reset the ndlp. 1821 */ 1822 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1823 "3179 PLOGI confirm NEW: %x %x\n", 1824 new_ndlp->nlp_DID, keepDID); 1825 1826 /* Two ndlps cannot have the same did on the nodelist. 1827 * The KeepDID and keep_nlp_fc4_type need to be swapped 1828 * because ndlp is inflight with no WWPN. 1829 */ 1830 ndlp->nlp_DID = keepDID; 1831 ndlp->nlp_fc4_type = keep_nlp_fc4_type; 1832 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1833 if (phba->sli_rev == LPFC_SLI_REV4 && 1834 active_rrqs_xri_bitmap) 1835 memcpy(ndlp->active_rrqs_xri_bitmap, 1836 active_rrqs_xri_bitmap, 1837 phba->cfg_rrq_xri_bitmap_sz); 1838 1839 } else { 1840 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1841 "3180 PLOGI confirm SWAP: %x %x\n", 1842 new_ndlp->nlp_DID, keepDID); 1843 1844 lpfc_unreg_rpi(vport, ndlp); 1845 1846 /* The ndlp and new_ndlp both have WWPNs but are swapping 1847 * NPort Ids and attributes. 1848 */ 1849 ndlp->nlp_DID = keepDID; 1850 ndlp->nlp_fc4_type = keep_nlp_fc4_type; 1851 1852 if (phba->sli_rev == LPFC_SLI_REV4 && 1853 active_rrqs_xri_bitmap) 1854 memcpy(ndlp->active_rrqs_xri_bitmap, 1855 active_rrqs_xri_bitmap, 1856 phba->cfg_rrq_xri_bitmap_sz); 1857 1858 /* Since we are switching over to the new_ndlp, 1859 * reset the old ndlp state 1860 */ 1861 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 1862 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 1863 keep_nlp_state = NLP_STE_NPR_NODE; 1864 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1865 ndlp->nrport = keep_nrport; 1866 } 1867 1868 /* 1869 * If ndlp is not associated with any rport we can drop it here else 1870 * let dev_loss_tmo_callbk trigger DEVICE_RM event 1871 */ 1872 if (!ndlp->rport && (ndlp->nlp_state == NLP_STE_NPR_NODE)) 1873 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 1874 1875 if (phba->sli_rev == LPFC_SLI_REV4 && 1876 active_rrqs_xri_bitmap) 1877 mempool_free(active_rrqs_xri_bitmap, 1878 phba->active_rrq_pool); 1879 1880 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1881 "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n", 1882 new_ndlp->nlp_DID, new_ndlp->nlp_flag, 1883 new_ndlp->nlp_fc4_type); 1884 1885 return new_ndlp; 1886 } 1887 1888 /** 1889 * lpfc_end_rscn - Check and handle more rscn for a vport 1890 * @vport: pointer to a host virtual N_Port data structure. 1891 * 1892 * This routine checks whether more Registration State Change 1893 * Notifications (RSCNs) came in while the discovery state machine was in 1894 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be 1895 * invoked to handle the additional RSCNs for the @vport. Otherwise, the 1896 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of 1897 * handling the RSCNs. 1898 **/ 1899 void 1900 lpfc_end_rscn(struct lpfc_vport *vport) 1901 { 1902 1903 if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) { 1904 /* 1905 * Check to see if more RSCNs came in while we were 1906 * processing this one. 1907 */ 1908 if (vport->fc_rscn_id_cnt || 1909 test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag)) 1910 lpfc_els_handle_rscn(vport); 1911 else 1912 clear_bit(FC_RSCN_MODE, &vport->fc_flag); 1913 } 1914 } 1915 1916 /** 1917 * lpfc_cmpl_els_rrq - Completion handled for els RRQs. 1918 * @phba: pointer to lpfc hba data structure. 1919 * @cmdiocb: pointer to lpfc command iocb data structure. 1920 * @rspiocb: pointer to lpfc response iocb data structure. 1921 * 1922 * This routine will call the clear rrq function to free the rrq and 1923 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not 1924 * exist then the clear_rrq is still called because the rrq needs to 1925 * be freed. 1926 **/ 1927 1928 static void 1929 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1930 struct lpfc_iocbq *rspiocb) 1931 { 1932 struct lpfc_vport *vport = cmdiocb->vport; 1933 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 1934 struct lpfc_node_rrq *rrq; 1935 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 1936 u32 ulp_word4 = get_job_word4(phba, rspiocb); 1937 1938 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1939 rrq = cmdiocb->context_un.rrq; 1940 cmdiocb->rsp_iocb = rspiocb; 1941 1942 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1943 "RRQ cmpl: status:x%x/x%x did:x%x", 1944 ulp_status, ulp_word4, 1945 get_job_els_rsp64_did(phba, cmdiocb)); 1946 1947 1948 /* rrq completes to NPort <nlp_DID> */ 1949 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1950 "2880 RRQ completes to DID x%x " 1951 "Data: x%x x%x x%x x%x x%x\n", 1952 ndlp->nlp_DID, ulp_status, ulp_word4, 1953 get_wqe_tmo(cmdiocb), rrq->xritag, rrq->rxid); 1954 1955 if (ulp_status) { 1956 /* Check for retry */ 1957 /* RRQ failed Don't print the vport to vport rjts */ 1958 if (ulp_status != IOSTAT_LS_RJT || 1959 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && 1960 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || 1961 (phba)->pport->cfg_log_verbose & LOG_ELS) 1962 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1963 "2881 RRQ failure DID:%06X Status:" 1964 "x%x/x%x\n", 1965 ndlp->nlp_DID, ulp_status, 1966 ulp_word4); 1967 } 1968 1969 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1970 lpfc_els_free_iocb(phba, cmdiocb); 1971 lpfc_nlp_put(ndlp); 1972 return; 1973 } 1974 /** 1975 * lpfc_cmpl_els_plogi - Completion callback function for plogi 1976 * @phba: pointer to lpfc hba data structure. 1977 * @cmdiocb: pointer to lpfc command iocb data structure. 1978 * @rspiocb: pointer to lpfc response iocb data structure. 1979 * 1980 * This routine is the completion callback function for issuing the Port 1981 * Login (PLOGI) command. For PLOGI completion, there must be an active 1982 * ndlp on the vport node list that matches the remote node ID from the 1983 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply 1984 * ignored and command IOCB released. The PLOGI response IOCB status is 1985 * checked for error conditions. If there is error status reported, PLOGI 1986 * retry shall be attempted by invoking the lpfc_els_retry() routine. 1987 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on 1988 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine 1989 * (DSM) is set for this PLOGI completion. Finally, it checks whether 1990 * there are additional N_Port nodes with the vport that need to perform 1991 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition 1992 * PLOGIs. 1993 **/ 1994 static void 1995 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1996 struct lpfc_iocbq *rspiocb) 1997 { 1998 struct lpfc_vport *vport = cmdiocb->vport; 1999 IOCB_t *irsp; 2000 struct lpfc_nodelist *ndlp, *free_ndlp; 2001 struct lpfc_dmabuf *prsp; 2002 int disc; 2003 struct serv_parm *sp = NULL; 2004 u32 ulp_status, ulp_word4, did, iotag; 2005 bool release_node = false; 2006 2007 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2008 cmdiocb->rsp_iocb = rspiocb; 2009 2010 ulp_status = get_job_ulpstatus(phba, rspiocb); 2011 ulp_word4 = get_job_word4(phba, rspiocb); 2012 did = get_job_els_rsp64_did(phba, cmdiocb); 2013 2014 if (phba->sli_rev == LPFC_SLI_REV4) { 2015 iotag = get_wqe_reqtag(cmdiocb); 2016 } else { 2017 irsp = &rspiocb->iocb; 2018 iotag = irsp->ulpIoTag; 2019 } 2020 2021 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2022 "PLOGI cmpl: status:x%x/x%x did:x%x", 2023 ulp_status, ulp_word4, did); 2024 2025 ndlp = lpfc_findnode_did(vport, did); 2026 if (!ndlp) { 2027 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2028 "0136 PLOGI completes to NPort x%x " 2029 "with no ndlp. Data: x%x x%x x%x\n", 2030 did, ulp_status, ulp_word4, iotag); 2031 goto out_freeiocb; 2032 } 2033 2034 /* Since ndlp can be freed in the disc state machine, note if this node 2035 * is being used during discovery. 2036 */ 2037 spin_lock_irq(&ndlp->lock); 2038 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2039 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2040 spin_unlock_irq(&ndlp->lock); 2041 2042 /* PLOGI completes to NPort <nlp_DID> */ 2043 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2044 "0102 PLOGI completes to NPort x%06x " 2045 "IoTag x%x Data: x%x x%x x%x x%x x%x\n", 2046 ndlp->nlp_DID, iotag, 2047 ndlp->nlp_fc4_type, 2048 ulp_status, ulp_word4, 2049 disc, vport->num_disc_nodes); 2050 2051 /* Check to see if link went down during discovery */ 2052 if (lpfc_els_chk_latt(vport)) { 2053 spin_lock_irq(&ndlp->lock); 2054 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2055 spin_unlock_irq(&ndlp->lock); 2056 goto out; 2057 } 2058 2059 if (ulp_status) { 2060 /* Check for retry */ 2061 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2062 /* ELS command is being retried */ 2063 if (disc) { 2064 spin_lock_irq(&ndlp->lock); 2065 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2066 spin_unlock_irq(&ndlp->lock); 2067 } 2068 goto out; 2069 } 2070 /* PLOGI failed Don't print the vport to vport rjts */ 2071 if (ulp_status != IOSTAT_LS_RJT || 2072 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) && 2073 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) || 2074 (phba)->pport->cfg_log_verbose & LOG_ELS) 2075 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2076 "2753 PLOGI failure DID:%06X " 2077 "Status:x%x/x%x\n", 2078 ndlp->nlp_DID, ulp_status, 2079 ulp_word4); 2080 2081 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2082 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) 2083 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2084 NLP_EVT_CMPL_PLOGI); 2085 2086 /* If a PLOGI collision occurred, the node needs to continue 2087 * with the reglogin process. 2088 */ 2089 spin_lock_irq(&ndlp->lock); 2090 if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) && 2091 ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { 2092 spin_unlock_irq(&ndlp->lock); 2093 goto out; 2094 } 2095 2096 /* No PLOGI collision and the node is not registered with the 2097 * scsi or nvme transport. It is no longer an active node. Just 2098 * start the device remove process. 2099 */ 2100 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2101 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2102 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 2103 release_node = true; 2104 } 2105 spin_unlock_irq(&ndlp->lock); 2106 2107 if (release_node) 2108 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2109 NLP_EVT_DEVICE_RM); 2110 } else { 2111 /* Good status, call state machine */ 2112 prsp = list_get_first(&cmdiocb->cmd_dmabuf->list, 2113 struct lpfc_dmabuf, list); 2114 if (!prsp) 2115 goto out; 2116 if (!lpfc_is_els_acc_rsp(prsp)) 2117 goto out; 2118 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); 2119 2120 sp = (struct serv_parm *)((u8 *)prsp->virt + 2121 sizeof(u32)); 2122 2123 ndlp->vmid_support = 0; 2124 if ((phba->cfg_vmid_app_header && sp->cmn.app_hdr_support) || 2125 (phba->cfg_vmid_priority_tagging && 2126 sp->cmn.priority_tagging)) { 2127 lpfc_printf_log(phba, KERN_DEBUG, LOG_ELS, 2128 "4018 app_hdr_support %d tagging %d DID x%x\n", 2129 sp->cmn.app_hdr_support, 2130 sp->cmn.priority_tagging, 2131 ndlp->nlp_DID); 2132 /* if the dest port supports VMID, mark it in ndlp */ 2133 ndlp->vmid_support = 1; 2134 } 2135 2136 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2137 NLP_EVT_CMPL_PLOGI); 2138 } 2139 2140 if (disc && vport->num_disc_nodes) { 2141 /* Check to see if there are more PLOGIs to be sent */ 2142 lpfc_more_plogi(vport); 2143 2144 if (vport->num_disc_nodes == 0) { 2145 clear_bit(FC_NDISC_ACTIVE, &vport->fc_flag); 2146 2147 lpfc_can_disctmo(vport); 2148 lpfc_end_rscn(vport); 2149 } 2150 } 2151 2152 out: 2153 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 2154 "PLOGI Cmpl PUT: did:x%x refcnt %d", 2155 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2156 2157 out_freeiocb: 2158 /* Release the reference on the original I/O request. */ 2159 free_ndlp = cmdiocb->ndlp; 2160 2161 lpfc_els_free_iocb(phba, cmdiocb); 2162 lpfc_nlp_put(free_ndlp); 2163 return; 2164 } 2165 2166 /** 2167 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport 2168 * @vport: pointer to a host virtual N_Port data structure. 2169 * @did: destination port identifier. 2170 * @retry: number of retries to the command IOCB. 2171 * 2172 * This routine issues a Port Login (PLOGI) command to a remote N_Port 2173 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port, 2174 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list. 2175 * This routine constructs the proper fields of the PLOGI IOCB and invokes 2176 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command. 2177 * 2178 * Note that the ndlp reference count will be incremented by 1 for holding 2179 * the ndlp and the reference to ndlp will be stored into the ndlp field 2180 * of the IOCB for the completion callback function to the PLOGI ELS command. 2181 * 2182 * Return code 2183 * 0 - Successfully issued a plogi for @vport 2184 * 1 - failed to issue a plogi for @vport 2185 **/ 2186 int 2187 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) 2188 { 2189 struct lpfc_hba *phba = vport->phba; 2190 struct serv_parm *sp; 2191 struct lpfc_nodelist *ndlp; 2192 struct lpfc_iocbq *elsiocb; 2193 uint8_t *pcmd; 2194 uint16_t cmdsize; 2195 int ret; 2196 2197 ndlp = lpfc_findnode_did(vport, did); 2198 if (!ndlp) 2199 return 1; 2200 2201 /* Defer the processing of the issue PLOGI until after the 2202 * outstanding UNREG_RPI mbox command completes, unless we 2203 * are going offline. This logic does not apply for Fabric DIDs 2204 */ 2205 if ((ndlp->nlp_flag & (NLP_IGNR_REG_CMPL | NLP_UNREG_INP)) && 2206 ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && 2207 !test_bit(FC_OFFLINE_MODE, &vport->fc_flag)) { 2208 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2209 "4110 Issue PLOGI x%x deferred " 2210 "on NPort x%x rpi x%x flg x%x Data:" 2211 " x%px\n", 2212 ndlp->nlp_defer_did, ndlp->nlp_DID, 2213 ndlp->nlp_rpi, ndlp->nlp_flag, ndlp); 2214 2215 /* We can only defer 1st PLOGI */ 2216 if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING) 2217 ndlp->nlp_defer_did = did; 2218 return 0; 2219 } 2220 2221 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 2222 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 2223 ELS_CMD_PLOGI); 2224 if (!elsiocb) 2225 return 1; 2226 2227 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 2228 2229 /* For PLOGI request, remainder of payload is service parameters */ 2230 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; 2231 pcmd += sizeof(uint32_t); 2232 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 2233 sp = (struct serv_parm *) pcmd; 2234 2235 /* 2236 * If we are a N-port connected to a Fabric, fix-up paramm's so logins 2237 * to device on remote loops work. 2238 */ 2239 if (test_bit(FC_FABRIC, &vport->fc_flag) && 2240 !test_bit(FC_PUBLIC_LOOP, &vport->fc_flag)) 2241 sp->cmn.altBbCredit = 1; 2242 2243 if (sp->cmn.fcphLow < FC_PH_4_3) 2244 sp->cmn.fcphLow = FC_PH_4_3; 2245 2246 if (sp->cmn.fcphHigh < FC_PH3) 2247 sp->cmn.fcphHigh = FC_PH3; 2248 2249 sp->cmn.valid_vendor_ver_level = 0; 2250 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 2251 sp->cmn.bbRcvSizeMsb &= 0xF; 2252 2253 /* Check if the destination port supports VMID */ 2254 ndlp->vmid_support = 0; 2255 if (vport->vmid_priority_tagging) 2256 sp->cmn.priority_tagging = 1; 2257 else if (phba->cfg_vmid_app_header && 2258 bf_get(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags)) 2259 sp->cmn.app_hdr_support = 1; 2260 2261 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2262 "Issue PLOGI: did:x%x", 2263 did, 0, 0); 2264 2265 /* If our firmware supports this feature, convey that 2266 * information to the target using the vendor specific field. 2267 */ 2268 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 2269 sp->cmn.valid_vendor_ver_level = 1; 2270 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 2271 sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 2272 } 2273 2274 phba->fc_stat.elsXmitPLOGI++; 2275 elsiocb->cmd_cmpl = lpfc_cmpl_els_plogi; 2276 2277 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2278 "Issue PLOGI: did:x%x refcnt %d", 2279 did, kref_read(&ndlp->kref), 0); 2280 elsiocb->ndlp = lpfc_nlp_get(ndlp); 2281 if (!elsiocb->ndlp) { 2282 lpfc_els_free_iocb(phba, elsiocb); 2283 return 1; 2284 } 2285 2286 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2287 if (ret) { 2288 lpfc_els_free_iocb(phba, elsiocb); 2289 lpfc_nlp_put(ndlp); 2290 return 1; 2291 } 2292 2293 return 0; 2294 } 2295 2296 /** 2297 * lpfc_cmpl_els_prli - Completion callback function for prli 2298 * @phba: pointer to lpfc hba data structure. 2299 * @cmdiocb: pointer to lpfc command iocb data structure. 2300 * @rspiocb: pointer to lpfc response iocb data structure. 2301 * 2302 * This routine is the completion callback function for a Process Login 2303 * (PRLI) ELS command. The PRLI response IOCB status is checked for error 2304 * status. If there is error status reported, PRLI retry shall be attempted 2305 * by invoking the lpfc_els_retry() routine. Otherwise, the state 2306 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this 2307 * ndlp to mark the PRLI completion. 2308 **/ 2309 static void 2310 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2311 struct lpfc_iocbq *rspiocb) 2312 { 2313 struct lpfc_vport *vport = cmdiocb->vport; 2314 struct lpfc_nodelist *ndlp; 2315 char *mode; 2316 u32 loglevel; 2317 u32 ulp_status; 2318 u32 ulp_word4; 2319 bool release_node = false; 2320 2321 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2322 cmdiocb->rsp_iocb = rspiocb; 2323 2324 ndlp = cmdiocb->ndlp; 2325 2326 ulp_status = get_job_ulpstatus(phba, rspiocb); 2327 ulp_word4 = get_job_word4(phba, rspiocb); 2328 2329 spin_lock_irq(&ndlp->lock); 2330 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2331 2332 /* Driver supports multiple FC4 types. Counters matter. */ 2333 vport->fc_prli_sent--; 2334 ndlp->fc4_prli_sent--; 2335 spin_unlock_irq(&ndlp->lock); 2336 2337 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2338 "PRLI cmpl: status:x%x/x%x did:x%x", 2339 ulp_status, ulp_word4, 2340 ndlp->nlp_DID); 2341 2342 /* PRLI completes to NPort <nlp_DID> */ 2343 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2344 "0103 PRLI completes to NPort x%06x " 2345 "Data: x%x x%x x%x x%x x%x\n", 2346 ndlp->nlp_DID, ulp_status, ulp_word4, 2347 vport->num_disc_nodes, ndlp->fc4_prli_sent, 2348 ndlp->fc4_xpt_flags); 2349 2350 /* Check to see if link went down during discovery */ 2351 if (lpfc_els_chk_latt(vport)) 2352 goto out; 2353 2354 if (ulp_status) { 2355 /* Check for retry */ 2356 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2357 /* ELS command is being retried */ 2358 goto out; 2359 } 2360 2361 /* If we don't send GFT_ID to Fabric, a PRLI error 2362 * could be expected. 2363 */ 2364 if (test_bit(FC_FABRIC, &vport->fc_flag) || 2365 vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH) { 2366 mode = KERN_ERR; 2367 loglevel = LOG_TRACE_EVENT; 2368 } else { 2369 mode = KERN_INFO; 2370 loglevel = LOG_ELS; 2371 } 2372 2373 /* PRLI failed */ 2374 lpfc_printf_vlog(vport, mode, loglevel, 2375 "2754 PRLI failure DID:%06X Status:x%x/x%x, " 2376 "data: x%x x%x x%x\n", 2377 ndlp->nlp_DID, ulp_status, 2378 ulp_word4, ndlp->nlp_state, 2379 ndlp->fc4_prli_sent, ndlp->nlp_flag); 2380 2381 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2382 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) 2383 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2384 NLP_EVT_CMPL_PRLI); 2385 2386 /* The following condition catches an inflight transition 2387 * mismatch typically caused by an RSCN. Skip any 2388 * processing to allow recovery. 2389 */ 2390 if ((ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && 2391 ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) || 2392 (ndlp->nlp_state == NLP_STE_NPR_NODE && 2393 ndlp->nlp_flag & NLP_DELAY_TMO)) { 2394 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, 2395 "2784 PRLI cmpl: Allow Node recovery " 2396 "DID x%06x nstate x%x nflag x%x\n", 2397 ndlp->nlp_DID, ndlp->nlp_state, 2398 ndlp->nlp_flag); 2399 goto out; 2400 } 2401 2402 /* 2403 * For P2P topology, retain the node so that PLOGI can be 2404 * attempted on it again. 2405 */ 2406 if (test_bit(FC_PT2PT, &vport->fc_flag)) 2407 goto out; 2408 2409 /* As long as this node is not registered with the SCSI 2410 * or NVMe transport and no other PRLIs are outstanding, 2411 * it is no longer an active node. Otherwise devloss 2412 * handles the final cleanup. 2413 */ 2414 spin_lock_irq(&ndlp->lock); 2415 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 2416 !ndlp->fc4_prli_sent) { 2417 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2418 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 2419 release_node = true; 2420 } 2421 spin_unlock_irq(&ndlp->lock); 2422 2423 if (release_node) 2424 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2425 NLP_EVT_DEVICE_RM); 2426 } else { 2427 /* Good status, call state machine. However, if another 2428 * PRLI is outstanding, don't call the state machine 2429 * because final disposition to Mapped or Unmapped is 2430 * completed there. 2431 */ 2432 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2433 NLP_EVT_CMPL_PRLI); 2434 } 2435 2436 out: 2437 lpfc_els_free_iocb(phba, cmdiocb); 2438 lpfc_nlp_put(ndlp); 2439 return; 2440 } 2441 2442 /** 2443 * lpfc_issue_els_prli - Issue a prli iocb command for a vport 2444 * @vport: pointer to a host virtual N_Port data structure. 2445 * @ndlp: pointer to a node-list data structure. 2446 * @retry: number of retries to the command IOCB. 2447 * 2448 * This routine issues a Process Login (PRLI) ELS command for the 2449 * @vport. The PRLI service parameters are set up in the payload of the 2450 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine 2451 * is put to the IOCB completion callback func field before invoking the 2452 * routine lpfc_sli_issue_iocb() to send out PRLI command. 2453 * 2454 * Note that the ndlp reference count will be incremented by 1 for holding the 2455 * ndlp and the reference to ndlp will be stored into the ndlp field of 2456 * the IOCB for the completion callback function to the PRLI ELS command. 2457 * 2458 * Return code 2459 * 0 - successfully issued prli iocb command for @vport 2460 * 1 - failed to issue prli iocb command for @vport 2461 **/ 2462 int 2463 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2464 uint8_t retry) 2465 { 2466 int rc = 0; 2467 struct lpfc_hba *phba = vport->phba; 2468 PRLI *npr; 2469 struct lpfc_nvme_prli *npr_nvme; 2470 struct lpfc_iocbq *elsiocb; 2471 uint8_t *pcmd; 2472 uint16_t cmdsize; 2473 u32 local_nlp_type, elscmd; 2474 2475 /* 2476 * If we are in RSCN mode, the FC4 types supported from a 2477 * previous GFT_ID command may not be accurate. So, if we 2478 * are a NVME Initiator, always look for the possibility of 2479 * the remote NPort beng a NVME Target. 2480 */ 2481 if (phba->sli_rev == LPFC_SLI_REV4 && 2482 test_bit(FC_RSCN_MODE, &vport->fc_flag) && 2483 vport->nvmei_support) 2484 ndlp->nlp_fc4_type |= NLP_FC4_NVME; 2485 local_nlp_type = ndlp->nlp_fc4_type; 2486 2487 /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp 2488 * fields here before any of them can complete. 2489 */ 2490 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 2491 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); 2492 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 2493 ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC); 2494 ndlp->nvme_fb_size = 0; 2495 2496 send_next_prli: 2497 if (local_nlp_type & NLP_FC4_FCP) { 2498 /* Payload is 4 + 16 = 20 x14 bytes. */ 2499 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 2500 elscmd = ELS_CMD_PRLI; 2501 } else if (local_nlp_type & NLP_FC4_NVME) { 2502 /* Payload is 4 + 20 = 24 x18 bytes. */ 2503 cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli)); 2504 elscmd = ELS_CMD_NVMEPRLI; 2505 } else { 2506 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2507 "3083 Unknown FC_TYPE x%x ndlp x%06x\n", 2508 ndlp->nlp_fc4_type, ndlp->nlp_DID); 2509 return 1; 2510 } 2511 2512 /* SLI3 ports don't support NVME. If this rport is a strict NVME 2513 * FC4 type, implicitly LOGO. 2514 */ 2515 if (phba->sli_rev == LPFC_SLI_REV3 && 2516 ndlp->nlp_fc4_type == NLP_FC4_NVME) { 2517 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2518 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n", 2519 ndlp->nlp_type); 2520 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 2521 return 1; 2522 } 2523 2524 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2525 ndlp->nlp_DID, elscmd); 2526 if (!elsiocb) 2527 return 1; 2528 2529 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 2530 2531 /* For PRLI request, remainder of payload is service parameters */ 2532 memset(pcmd, 0, cmdsize); 2533 2534 if (local_nlp_type & NLP_FC4_FCP) { 2535 /* Remainder of payload is FCP PRLI parameter page. 2536 * Note: this data structure is defined as 2537 * BE/LE in the structure definition so no 2538 * byte swap call is made. 2539 */ 2540 *((uint32_t *)(pcmd)) = ELS_CMD_PRLI; 2541 pcmd += sizeof(uint32_t); 2542 npr = (PRLI *)pcmd; 2543 2544 /* 2545 * If our firmware version is 3.20 or later, 2546 * set the following bits for FC-TAPE support. 2547 */ 2548 if (phba->vpd.rev.feaLevelHigh >= 0x02) { 2549 npr->ConfmComplAllowed = 1; 2550 npr->Retry = 1; 2551 npr->TaskRetryIdReq = 1; 2552 } 2553 npr->estabImagePair = 1; 2554 npr->readXferRdyDis = 1; 2555 if (vport->cfg_first_burst_size) 2556 npr->writeXferRdyDis = 1; 2557 2558 /* For FCP support */ 2559 npr->prliType = PRLI_FCP_TYPE; 2560 npr->initiatorFunc = 1; 2561 elsiocb->cmd_flag |= LPFC_PRLI_FCP_REQ; 2562 2563 /* Remove FCP type - processed. */ 2564 local_nlp_type &= ~NLP_FC4_FCP; 2565 } else if (local_nlp_type & NLP_FC4_NVME) { 2566 /* Remainder of payload is NVME PRLI parameter page. 2567 * This data structure is the newer definition that 2568 * uses bf macros so a byte swap is required. 2569 */ 2570 *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI; 2571 pcmd += sizeof(uint32_t); 2572 npr_nvme = (struct lpfc_nvme_prli *)pcmd; 2573 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 2574 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 2575 if (phba->nsler) { 2576 bf_set(prli_nsler, npr_nvme, 1); 2577 bf_set(prli_conf, npr_nvme, 1); 2578 } 2579 2580 /* Only initiators request first burst. */ 2581 if ((phba->cfg_nvme_enable_fb) && 2582 !phba->nvmet_support) 2583 bf_set(prli_fba, npr_nvme, 1); 2584 2585 if (phba->nvmet_support) { 2586 bf_set(prli_tgt, npr_nvme, 1); 2587 bf_set(prli_disc, npr_nvme, 1); 2588 } else { 2589 bf_set(prli_init, npr_nvme, 1); 2590 bf_set(prli_conf, npr_nvme, 1); 2591 } 2592 2593 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 2594 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 2595 elsiocb->cmd_flag |= LPFC_PRLI_NVME_REQ; 2596 2597 /* Remove NVME type - processed. */ 2598 local_nlp_type &= ~NLP_FC4_NVME; 2599 } 2600 2601 phba->fc_stat.elsXmitPRLI++; 2602 elsiocb->cmd_cmpl = lpfc_cmpl_els_prli; 2603 2604 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2605 "Issue PRLI: did:x%x refcnt %d", 2606 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2607 elsiocb->ndlp = lpfc_nlp_get(ndlp); 2608 if (!elsiocb->ndlp) { 2609 lpfc_els_free_iocb(phba, elsiocb); 2610 return 1; 2611 } 2612 2613 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2614 if (rc == IOCB_ERROR) { 2615 lpfc_els_free_iocb(phba, elsiocb); 2616 lpfc_nlp_put(ndlp); 2617 return 1; 2618 } 2619 2620 /* The vport counters are used for lpfc_scan_finished, but 2621 * the ndlp is used to track outstanding PRLIs for different 2622 * FC4 types. 2623 */ 2624 spin_lock_irq(&ndlp->lock); 2625 ndlp->nlp_flag |= NLP_PRLI_SND; 2626 vport->fc_prli_sent++; 2627 ndlp->fc4_prli_sent++; 2628 spin_unlock_irq(&ndlp->lock); 2629 2630 /* The driver supports 2 FC4 types. Make sure 2631 * a PRLI is issued for all types before exiting. 2632 */ 2633 if (phba->sli_rev == LPFC_SLI_REV4 && 2634 local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME)) 2635 goto send_next_prli; 2636 else 2637 return 0; 2638 } 2639 2640 /** 2641 * lpfc_rscn_disc - Perform rscn discovery for a vport 2642 * @vport: pointer to a host virtual N_Port data structure. 2643 * 2644 * This routine performs Registration State Change Notification (RSCN) 2645 * discovery for a @vport. If the @vport's node port recovery count is not 2646 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all 2647 * the nodes that need recovery. If none of the PLOGI were needed through 2648 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be 2649 * invoked to check and handle possible more RSCN came in during the period 2650 * of processing the current ones. 2651 **/ 2652 static void 2653 lpfc_rscn_disc(struct lpfc_vport *vport) 2654 { 2655 lpfc_can_disctmo(vport); 2656 2657 /* RSCN discovery */ 2658 /* go thru NPR nodes and issue ELS PLOGIs */ 2659 if (atomic_read(&vport->fc_npr_cnt)) 2660 if (lpfc_els_disc_plogi(vport)) 2661 return; 2662 2663 lpfc_end_rscn(vport); 2664 } 2665 2666 /** 2667 * lpfc_adisc_done - Complete the adisc phase of discovery 2668 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs. 2669 * 2670 * This function is called when the final ADISC is completed during discovery. 2671 * This function handles clearing link attention or issuing reg_vpi depending 2672 * on whether npiv is enabled. This function also kicks off the PLOGI phase of 2673 * discovery. 2674 * This function is called with no locks held. 2675 **/ 2676 static void 2677 lpfc_adisc_done(struct lpfc_vport *vport) 2678 { 2679 struct lpfc_hba *phba = vport->phba; 2680 2681 /* 2682 * For NPIV, cmpl_reg_vpi will set port_state to READY, 2683 * and continue discovery. 2684 */ 2685 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 2686 !test_bit(FC_RSCN_MODE, &vport->fc_flag) && 2687 (phba->sli_rev < LPFC_SLI_REV4)) { 2688 2689 /* 2690 * If link is down, clear_la and reg_vpi will be done after 2691 * flogi following a link up event 2692 */ 2693 if (!lpfc_is_link_up(phba)) 2694 return; 2695 2696 /* The ADISCs are complete. Doesn't matter if they 2697 * succeeded or failed because the ADISC completion 2698 * routine guarantees to call the state machine and 2699 * the RPI is either unregistered (failed ADISC response) 2700 * or the RPI is still valid and the node is marked 2701 * mapped for a target. The exchanges should be in the 2702 * correct state. This code is specific to SLI3. 2703 */ 2704 lpfc_issue_clear_la(phba, vport); 2705 lpfc_issue_reg_vpi(phba, vport); 2706 return; 2707 } 2708 /* 2709 * For SLI2, we need to set port_state to READY 2710 * and continue discovery. 2711 */ 2712 if (vport->port_state < LPFC_VPORT_READY) { 2713 /* If we get here, there is nothing to ADISC */ 2714 lpfc_issue_clear_la(phba, vport); 2715 if (!test_bit(FC_ABORT_DISCOVERY, &vport->fc_flag)) { 2716 vport->num_disc_nodes = 0; 2717 /* go thru NPR list, issue ELS PLOGIs */ 2718 if (atomic_read(&vport->fc_npr_cnt)) 2719 lpfc_els_disc_plogi(vport); 2720 if (!vport->num_disc_nodes) { 2721 clear_bit(FC_NDISC_ACTIVE, &vport->fc_flag); 2722 lpfc_can_disctmo(vport); 2723 lpfc_end_rscn(vport); 2724 } 2725 } 2726 vport->port_state = LPFC_VPORT_READY; 2727 } else 2728 lpfc_rscn_disc(vport); 2729 } 2730 2731 /** 2732 * lpfc_more_adisc - Issue more adisc as needed 2733 * @vport: pointer to a host virtual N_Port data structure. 2734 * 2735 * This routine determines whether there are more ndlps on a @vport 2736 * node list need to have Address Discover (ADISC) issued. If so, it will 2737 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's 2738 * remaining nodes which need to have ADISC sent. 2739 **/ 2740 void 2741 lpfc_more_adisc(struct lpfc_vport *vport) 2742 { 2743 if (vport->num_disc_nodes) 2744 vport->num_disc_nodes--; 2745 /* Continue discovery with <num_disc_nodes> ADISCs to go */ 2746 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2747 "0210 Continue discovery with %d ADISCs to go " 2748 "Data: x%x x%lx x%x\n", 2749 vport->num_disc_nodes, 2750 atomic_read(&vport->fc_adisc_cnt), 2751 vport->fc_flag, vport->port_state); 2752 /* Check to see if there are more ADISCs to be sent */ 2753 if (test_bit(FC_NLP_MORE, &vport->fc_flag)) { 2754 lpfc_set_disctmo(vport); 2755 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2756 lpfc_els_disc_adisc(vport); 2757 } 2758 if (!vport->num_disc_nodes) 2759 lpfc_adisc_done(vport); 2760 return; 2761 } 2762 2763 /** 2764 * lpfc_cmpl_els_adisc - Completion callback function for adisc 2765 * @phba: pointer to lpfc hba data structure. 2766 * @cmdiocb: pointer to lpfc command iocb data structure. 2767 * @rspiocb: pointer to lpfc response iocb data structure. 2768 * 2769 * This routine is the completion function for issuing the Address Discover 2770 * (ADISC) command. It first checks to see whether link went down during 2771 * the discovery process. If so, the node will be marked as node port 2772 * recovery for issuing discover IOCB by the link attention handler and 2773 * exit. Otherwise, the response status is checked. If error was reported 2774 * in the response status, the ADISC command shall be retried by invoking 2775 * the lpfc_els_retry() routine. Otherwise, if no error was reported in 2776 * the response status, the state machine is invoked to set transition 2777 * with respect to NLP_EVT_CMPL_ADISC event. 2778 **/ 2779 static void 2780 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2781 struct lpfc_iocbq *rspiocb) 2782 { 2783 struct lpfc_vport *vport = cmdiocb->vport; 2784 IOCB_t *irsp; 2785 struct lpfc_nodelist *ndlp; 2786 int disc; 2787 u32 ulp_status, ulp_word4, tmo, iotag; 2788 bool release_node = false; 2789 2790 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2791 cmdiocb->rsp_iocb = rspiocb; 2792 2793 ndlp = cmdiocb->ndlp; 2794 2795 ulp_status = get_job_ulpstatus(phba, rspiocb); 2796 ulp_word4 = get_job_word4(phba, rspiocb); 2797 2798 if (phba->sli_rev == LPFC_SLI_REV4) { 2799 tmo = get_wqe_tmo(cmdiocb); 2800 iotag = get_wqe_reqtag(cmdiocb); 2801 } else { 2802 irsp = &rspiocb->iocb; 2803 tmo = irsp->ulpTimeout; 2804 iotag = irsp->ulpIoTag; 2805 } 2806 2807 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2808 "ADISC cmpl: status:x%x/x%x did:x%x", 2809 ulp_status, ulp_word4, 2810 ndlp->nlp_DID); 2811 2812 /* Since ndlp can be freed in the disc state machine, note if this node 2813 * is being used during discovery. 2814 */ 2815 spin_lock_irq(&ndlp->lock); 2816 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2817 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); 2818 spin_unlock_irq(&ndlp->lock); 2819 /* ADISC completes to NPort <nlp_DID> */ 2820 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2821 "0104 ADISC completes to NPort x%x " 2822 "IoTag x%x Data: x%x x%x x%x x%x x%x\n", 2823 ndlp->nlp_DID, iotag, 2824 ulp_status, ulp_word4, 2825 tmo, disc, vport->num_disc_nodes); 2826 2827 /* Check to see if link went down during discovery */ 2828 if (lpfc_els_chk_latt(vport)) { 2829 spin_lock_irq(&ndlp->lock); 2830 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2831 spin_unlock_irq(&ndlp->lock); 2832 goto out; 2833 } 2834 2835 if (ulp_status) { 2836 /* Check for retry */ 2837 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2838 /* ELS command is being retried */ 2839 if (disc) { 2840 spin_lock_irq(&ndlp->lock); 2841 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2842 spin_unlock_irq(&ndlp->lock); 2843 lpfc_set_disctmo(vport); 2844 } 2845 goto out; 2846 } 2847 /* ADISC failed */ 2848 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2849 "2755 ADISC failure DID:%06X Status:x%x/x%x\n", 2850 ndlp->nlp_DID, ulp_status, 2851 ulp_word4); 2852 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2853 NLP_EVT_CMPL_ADISC); 2854 2855 /* As long as this node is not registered with the SCSI or NVMe 2856 * transport, it is no longer an active node. Otherwise 2857 * devloss handles the final cleanup. 2858 */ 2859 spin_lock_irq(&ndlp->lock); 2860 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2861 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2862 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) 2863 release_node = true; 2864 } 2865 spin_unlock_irq(&ndlp->lock); 2866 2867 if (release_node) 2868 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2869 NLP_EVT_DEVICE_RM); 2870 } else 2871 /* Good status, call state machine */ 2872 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2873 NLP_EVT_CMPL_ADISC); 2874 2875 /* Check to see if there are more ADISCs to be sent */ 2876 if (disc && vport->num_disc_nodes) 2877 lpfc_more_adisc(vport); 2878 out: 2879 lpfc_els_free_iocb(phba, cmdiocb); 2880 lpfc_nlp_put(ndlp); 2881 return; 2882 } 2883 2884 /** 2885 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport 2886 * @vport: pointer to a virtual N_Port data structure. 2887 * @ndlp: pointer to a node-list data structure. 2888 * @retry: number of retries to the command IOCB. 2889 * 2890 * This routine issues an Address Discover (ADISC) for an @ndlp on a 2891 * @vport. It prepares the payload of the ADISC ELS command, updates the 2892 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine 2893 * to issue the ADISC ELS command. 2894 * 2895 * Note that the ndlp reference count will be incremented by 1 for holding the 2896 * ndlp and the reference to ndlp will be stored into the ndlp field of 2897 * the IOCB for the completion callback function to the ADISC ELS command. 2898 * 2899 * Return code 2900 * 0 - successfully issued adisc 2901 * 1 - failed to issue adisc 2902 **/ 2903 int 2904 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2905 uint8_t retry) 2906 { 2907 int rc = 0; 2908 struct lpfc_hba *phba = vport->phba; 2909 ADISC *ap; 2910 struct lpfc_iocbq *elsiocb; 2911 uint8_t *pcmd; 2912 uint16_t cmdsize; 2913 2914 cmdsize = (sizeof(uint32_t) + sizeof(ADISC)); 2915 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2916 ndlp->nlp_DID, ELS_CMD_ADISC); 2917 if (!elsiocb) 2918 return 1; 2919 2920 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 2921 2922 /* For ADISC request, remainder of payload is service parameters */ 2923 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; 2924 pcmd += sizeof(uint32_t); 2925 2926 /* Fill in ADISC payload */ 2927 ap = (ADISC *) pcmd; 2928 ap->hardAL_PA = phba->fc_pref_ALPA; 2929 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 2930 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 2931 ap->DID = be32_to_cpu(vport->fc_myDID); 2932 2933 phba->fc_stat.elsXmitADISC++; 2934 elsiocb->cmd_cmpl = lpfc_cmpl_els_adisc; 2935 spin_lock_irq(&ndlp->lock); 2936 ndlp->nlp_flag |= NLP_ADISC_SND; 2937 spin_unlock_irq(&ndlp->lock); 2938 elsiocb->ndlp = lpfc_nlp_get(ndlp); 2939 if (!elsiocb->ndlp) { 2940 lpfc_els_free_iocb(phba, elsiocb); 2941 goto err; 2942 } 2943 2944 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2945 "Issue ADISC: did:x%x refcnt %d", 2946 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2947 2948 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2949 if (rc == IOCB_ERROR) { 2950 lpfc_els_free_iocb(phba, elsiocb); 2951 lpfc_nlp_put(ndlp); 2952 goto err; 2953 } 2954 2955 return 0; 2956 2957 err: 2958 spin_lock_irq(&ndlp->lock); 2959 ndlp->nlp_flag &= ~NLP_ADISC_SND; 2960 spin_unlock_irq(&ndlp->lock); 2961 return 1; 2962 } 2963 2964 /** 2965 * lpfc_cmpl_els_logo - Completion callback function for logo 2966 * @phba: pointer to lpfc hba data structure. 2967 * @cmdiocb: pointer to lpfc command iocb data structure. 2968 * @rspiocb: pointer to lpfc response iocb data structure. 2969 * 2970 * This routine is the completion function for issuing the ELS Logout (LOGO) 2971 * command. If no error status was reported from the LOGO response, the 2972 * state machine of the associated ndlp shall be invoked for transition with 2973 * respect to NLP_EVT_CMPL_LOGO event. 2974 **/ 2975 static void 2976 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2977 struct lpfc_iocbq *rspiocb) 2978 { 2979 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 2980 struct lpfc_vport *vport = ndlp->vport; 2981 IOCB_t *irsp; 2982 unsigned long flags; 2983 uint32_t skip_recovery = 0; 2984 int wake_up_waiter = 0; 2985 u32 ulp_status; 2986 u32 ulp_word4; 2987 u32 tmo, iotag; 2988 2989 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2990 cmdiocb->rsp_iocb = rspiocb; 2991 2992 ulp_status = get_job_ulpstatus(phba, rspiocb); 2993 ulp_word4 = get_job_word4(phba, rspiocb); 2994 2995 if (phba->sli_rev == LPFC_SLI_REV4) { 2996 tmo = get_wqe_tmo(cmdiocb); 2997 iotag = get_wqe_reqtag(cmdiocb); 2998 } else { 2999 irsp = &rspiocb->iocb; 3000 tmo = irsp->ulpTimeout; 3001 iotag = irsp->ulpIoTag; 3002 } 3003 3004 spin_lock_irq(&ndlp->lock); 3005 ndlp->nlp_flag &= ~NLP_LOGO_SND; 3006 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { 3007 wake_up_waiter = 1; 3008 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; 3009 } 3010 spin_unlock_irq(&ndlp->lock); 3011 3012 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3013 "LOGO cmpl: status:x%x/x%x did:x%x", 3014 ulp_status, ulp_word4, 3015 ndlp->nlp_DID); 3016 3017 /* LOGO completes to NPort <nlp_DID> */ 3018 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3019 "0105 LOGO completes to NPort x%x " 3020 "IoTag x%x refcnt %d nflags x%x xflags x%x " 3021 "Data: x%x x%x x%x x%x\n", 3022 ndlp->nlp_DID, iotag, 3023 kref_read(&ndlp->kref), ndlp->nlp_flag, 3024 ndlp->fc4_xpt_flags, ulp_status, ulp_word4, 3025 tmo, vport->num_disc_nodes); 3026 3027 if (lpfc_els_chk_latt(vport)) { 3028 skip_recovery = 1; 3029 goto out; 3030 } 3031 3032 /* The LOGO will not be retried on failure. A LOGO was 3033 * issued to the remote rport and a ACC or RJT or no Answer are 3034 * all acceptable. Note the failure and move forward with 3035 * discovery. The PLOGI will retry. 3036 */ 3037 if (ulp_status) { 3038 /* LOGO failed */ 3039 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3040 "2756 LOGO failure, No Retry DID:%06X " 3041 "Status:x%x/x%x\n", 3042 ndlp->nlp_DID, ulp_status, 3043 ulp_word4); 3044 3045 if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) 3046 skip_recovery = 1; 3047 } 3048 3049 /* Call state machine. This will unregister the rpi if needed. */ 3050 lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); 3051 3052 if (skip_recovery) 3053 goto out; 3054 3055 /* The driver sets this flag for an NPIV instance that doesn't want to 3056 * log into the remote port. 3057 */ 3058 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { 3059 spin_lock_irq(&ndlp->lock); 3060 if (phba->sli_rev == LPFC_SLI_REV4) 3061 ndlp->nlp_flag |= NLP_RELEASE_RPI; 3062 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 3063 spin_unlock_irq(&ndlp->lock); 3064 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 3065 NLP_EVT_DEVICE_RM); 3066 goto out_rsrc_free; 3067 } 3068 3069 out: 3070 /* At this point, the LOGO processing is complete. NOTE: For a 3071 * pt2pt topology, we are assuming the NPortID will only change 3072 * on link up processing. For a LOGO / PLOGI initiated by the 3073 * Initiator, we are assuming the NPortID is not going to change. 3074 */ 3075 3076 if (wake_up_waiter && ndlp->logo_waitq) 3077 wake_up(ndlp->logo_waitq); 3078 /* 3079 * If the node is a target, the handling attempts to recover the port. 3080 * For any other port type, the rpi is unregistered as an implicit 3081 * LOGO. 3082 */ 3083 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) && 3084 skip_recovery == 0) { 3085 lpfc_cancel_retry_delay_tmo(vport, ndlp); 3086 spin_lock_irqsave(&ndlp->lock, flags); 3087 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 3088 spin_unlock_irqrestore(&ndlp->lock, flags); 3089 3090 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3091 "3187 LOGO completes to NPort x%x: Start " 3092 "Recovery Data: x%x x%x x%x x%x\n", 3093 ndlp->nlp_DID, ulp_status, 3094 ulp_word4, tmo, 3095 vport->num_disc_nodes); 3096 3097 lpfc_els_free_iocb(phba, cmdiocb); 3098 lpfc_nlp_put(ndlp); 3099 3100 lpfc_disc_start(vport); 3101 return; 3102 } 3103 3104 /* Cleanup path for failed REG_RPI handling. If REG_RPI fails, the 3105 * driver sends a LOGO to the rport to cleanup. For fabric and 3106 * initiator ports cleanup the node as long as it the node is not 3107 * register with the transport. 3108 */ 3109 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 3110 spin_lock_irq(&ndlp->lock); 3111 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 3112 spin_unlock_irq(&ndlp->lock); 3113 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 3114 NLP_EVT_DEVICE_RM); 3115 } 3116 out_rsrc_free: 3117 /* Driver is done with the I/O. */ 3118 lpfc_els_free_iocb(phba, cmdiocb); 3119 lpfc_nlp_put(ndlp); 3120 } 3121 3122 /** 3123 * lpfc_issue_els_logo - Issue a logo to an node on a vport 3124 * @vport: pointer to a virtual N_Port data structure. 3125 * @ndlp: pointer to a node-list data structure. 3126 * @retry: number of retries to the command IOCB. 3127 * 3128 * This routine constructs and issues an ELS Logout (LOGO) iocb command 3129 * to a remote node, referred by an @ndlp on a @vport. It constructs the 3130 * payload of the IOCB, properly sets up the @ndlp state, and invokes the 3131 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command. 3132 * 3133 * Note that the ndlp reference count will be incremented by 1 for holding the 3134 * ndlp and the reference to ndlp will be stored into the ndlp field of 3135 * the IOCB for the completion callback function to the LOGO ELS command. 3136 * 3137 * Callers of this routine are expected to unregister the RPI first 3138 * 3139 * Return code 3140 * 0 - successfully issued logo 3141 * 1 - failed to issue logo 3142 **/ 3143 int 3144 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 3145 uint8_t retry) 3146 { 3147 struct lpfc_hba *phba = vport->phba; 3148 struct lpfc_iocbq *elsiocb; 3149 uint8_t *pcmd; 3150 uint16_t cmdsize; 3151 int rc; 3152 3153 spin_lock_irq(&ndlp->lock); 3154 if (ndlp->nlp_flag & NLP_LOGO_SND) { 3155 spin_unlock_irq(&ndlp->lock); 3156 return 0; 3157 } 3158 spin_unlock_irq(&ndlp->lock); 3159 3160 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); 3161 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3162 ndlp->nlp_DID, ELS_CMD_LOGO); 3163 if (!elsiocb) 3164 return 1; 3165 3166 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 3167 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 3168 pcmd += sizeof(uint32_t); 3169 3170 /* Fill in LOGO payload */ 3171 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 3172 pcmd += sizeof(uint32_t); 3173 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 3174 3175 phba->fc_stat.elsXmitLOGO++; 3176 elsiocb->cmd_cmpl = lpfc_cmpl_els_logo; 3177 spin_lock_irq(&ndlp->lock); 3178 ndlp->nlp_flag |= NLP_LOGO_SND; 3179 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; 3180 spin_unlock_irq(&ndlp->lock); 3181 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3182 if (!elsiocb->ndlp) { 3183 lpfc_els_free_iocb(phba, elsiocb); 3184 goto err; 3185 } 3186 3187 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3188 "Issue LOGO: did:x%x refcnt %d", 3189 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3190 3191 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3192 if (rc == IOCB_ERROR) { 3193 lpfc_els_free_iocb(phba, elsiocb); 3194 lpfc_nlp_put(ndlp); 3195 goto err; 3196 } 3197 3198 spin_lock_irq(&ndlp->lock); 3199 ndlp->nlp_prev_state = ndlp->nlp_state; 3200 spin_unlock_irq(&ndlp->lock); 3201 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 3202 return 0; 3203 3204 err: 3205 spin_lock_irq(&ndlp->lock); 3206 ndlp->nlp_flag &= ~NLP_LOGO_SND; 3207 spin_unlock_irq(&ndlp->lock); 3208 return 1; 3209 } 3210 3211 /** 3212 * lpfc_cmpl_els_cmd - Completion callback function for generic els command 3213 * @phba: pointer to lpfc hba data structure. 3214 * @cmdiocb: pointer to lpfc command iocb data structure. 3215 * @rspiocb: pointer to lpfc response iocb data structure. 3216 * 3217 * This routine is a generic completion callback function for ELS commands. 3218 * Specifically, it is the callback function which does not need to perform 3219 * any command specific operations. It is currently used by the ELS command 3220 * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel 3221 * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). 3222 * Other than certain debug loggings, this callback function simply invokes the 3223 * lpfc_els_chk_latt() routine to check whether link went down during the 3224 * discovery process. 3225 **/ 3226 static void 3227 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3228 struct lpfc_iocbq *rspiocb) 3229 { 3230 struct lpfc_vport *vport = cmdiocb->vport; 3231 struct lpfc_nodelist *free_ndlp; 3232 IOCB_t *irsp; 3233 u32 ulp_status, ulp_word4, tmo, did, iotag; 3234 3235 ulp_status = get_job_ulpstatus(phba, rspiocb); 3236 ulp_word4 = get_job_word4(phba, rspiocb); 3237 did = get_job_els_rsp64_did(phba, cmdiocb); 3238 3239 if (phba->sli_rev == LPFC_SLI_REV4) { 3240 tmo = get_wqe_tmo(cmdiocb); 3241 iotag = get_wqe_reqtag(cmdiocb); 3242 } else { 3243 irsp = &rspiocb->iocb; 3244 tmo = irsp->ulpTimeout; 3245 iotag = irsp->ulpIoTag; 3246 } 3247 3248 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3249 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3250 ulp_status, ulp_word4, did); 3251 3252 /* ELS cmd tag <ulpIoTag> completes */ 3253 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3254 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", 3255 iotag, ulp_status, ulp_word4, tmo); 3256 3257 /* Check to see if link went down during discovery */ 3258 lpfc_els_chk_latt(vport); 3259 3260 free_ndlp = cmdiocb->ndlp; 3261 3262 lpfc_els_free_iocb(phba, cmdiocb); 3263 lpfc_nlp_put(free_ndlp); 3264 } 3265 3266 /** 3267 * lpfc_reg_fab_ctrl_node - RPI register the fabric controller node. 3268 * @vport: pointer to lpfc_vport data structure. 3269 * @fc_ndlp: pointer to the fabric controller (0xfffffd) node. 3270 * 3271 * This routine registers the rpi assigned to the fabric controller 3272 * NPort_ID (0xfffffd) with the port and moves the node to UNMAPPED 3273 * state triggering a registration with the SCSI transport. 3274 * 3275 * This routine is single out because the fabric controller node 3276 * does not receive a PLOGI. This routine is consumed by the 3277 * SCR and RDF ELS commands. Callers are expected to qualify 3278 * with SLI4 first. 3279 **/ 3280 static int 3281 lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) 3282 { 3283 int rc = 0; 3284 struct lpfc_hba *phba = vport->phba; 3285 struct lpfc_nodelist *ns_ndlp; 3286 LPFC_MBOXQ_t *mbox; 3287 3288 if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED) 3289 return rc; 3290 3291 ns_ndlp = lpfc_findnode_did(vport, NameServer_DID); 3292 if (!ns_ndlp) 3293 return -ENODEV; 3294 3295 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 3296 "0935 %s: Reg FC RPI x%x on FC DID x%x NSSte: x%x\n", 3297 __func__, fc_ndlp->nlp_rpi, fc_ndlp->nlp_DID, 3298 ns_ndlp->nlp_state); 3299 if (ns_ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 3300 return -ENODEV; 3301 3302 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3303 if (!mbox) { 3304 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3305 "0936 %s: no memory for reg_login " 3306 "Data: x%x x%x x%x x%x\n", __func__, 3307 fc_ndlp->nlp_DID, fc_ndlp->nlp_state, 3308 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); 3309 return -ENOMEM; 3310 } 3311 rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID, 3312 (u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi); 3313 if (rc) { 3314 rc = -EACCES; 3315 goto out; 3316 } 3317 3318 fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 3319 mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login; 3320 mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp); 3321 if (!mbox->ctx_ndlp) { 3322 rc = -ENOMEM; 3323 goto out; 3324 } 3325 3326 mbox->vport = vport; 3327 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 3328 if (rc == MBX_NOT_FINISHED) { 3329 rc = -ENODEV; 3330 lpfc_nlp_put(fc_ndlp); 3331 goto out; 3332 } 3333 /* Success path. Exit. */ 3334 lpfc_nlp_set_state(vport, fc_ndlp, 3335 NLP_STE_REG_LOGIN_ISSUE); 3336 return 0; 3337 3338 out: 3339 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 3340 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3341 "0938 %s: failed to format reg_login " 3342 "Data: x%x x%x x%x x%x\n", __func__, 3343 fc_ndlp->nlp_DID, fc_ndlp->nlp_state, 3344 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); 3345 return rc; 3346 } 3347 3348 /** 3349 * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd 3350 * @phba: pointer to lpfc hba data structure. 3351 * @cmdiocb: pointer to lpfc command iocb data structure. 3352 * @rspiocb: pointer to lpfc response iocb data structure. 3353 * 3354 * This routine is a generic completion callback function for Discovery ELS cmd. 3355 * Currently used by the ELS command issuing routines for the ELS State Change 3356 * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf(). 3357 * These commands will be retried once only for ELS timeout errors. 3358 **/ 3359 static void 3360 lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3361 struct lpfc_iocbq *rspiocb) 3362 { 3363 struct lpfc_vport *vport = cmdiocb->vport; 3364 IOCB_t *irsp; 3365 struct lpfc_els_rdf_rsp *prdf; 3366 struct lpfc_dmabuf *pcmd, *prsp; 3367 u32 *pdata; 3368 u32 cmd; 3369 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 3370 u32 ulp_status, ulp_word4, tmo, did, iotag; 3371 3372 ulp_status = get_job_ulpstatus(phba, rspiocb); 3373 ulp_word4 = get_job_word4(phba, rspiocb); 3374 did = get_job_els_rsp64_did(phba, cmdiocb); 3375 3376 if (phba->sli_rev == LPFC_SLI_REV4) { 3377 tmo = get_wqe_tmo(cmdiocb); 3378 iotag = get_wqe_reqtag(cmdiocb); 3379 } else { 3380 irsp = &rspiocb->iocb; 3381 tmo = irsp->ulpTimeout; 3382 iotag = irsp->ulpIoTag; 3383 } 3384 3385 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3386 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3387 ulp_status, ulp_word4, did); 3388 3389 /* ELS cmd tag <ulpIoTag> completes */ 3390 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3391 "0217 ELS cmd tag x%x completes Data: x%x x%x x%x x%x\n", 3392 iotag, ulp_status, ulp_word4, tmo, cmdiocb->retry); 3393 3394 pcmd = cmdiocb->cmd_dmabuf; 3395 if (!pcmd) 3396 goto out; 3397 3398 pdata = (u32 *)pcmd->virt; 3399 if (!pdata) 3400 goto out; 3401 cmd = *pdata; 3402 3403 /* Only 1 retry for ELS Timeout only */ 3404 if (ulp_status == IOSTAT_LOCAL_REJECT && 3405 ((ulp_word4 & IOERR_PARAM_MASK) == 3406 IOERR_SEQUENCE_TIMEOUT)) { 3407 cmdiocb->retry++; 3408 if (cmdiocb->retry <= 1) { 3409 switch (cmd) { 3410 case ELS_CMD_SCR: 3411 lpfc_issue_els_scr(vport, cmdiocb->retry); 3412 break; 3413 case ELS_CMD_EDC: 3414 lpfc_issue_els_edc(vport, cmdiocb->retry); 3415 break; 3416 case ELS_CMD_RDF: 3417 lpfc_issue_els_rdf(vport, cmdiocb->retry); 3418 break; 3419 } 3420 goto out; 3421 } 3422 phba->fc_stat.elsRetryExceeded++; 3423 } 3424 if (cmd == ELS_CMD_EDC) { 3425 /* must be called before checking uplStatus and returning */ 3426 lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb); 3427 return; 3428 } 3429 if (ulp_status) { 3430 /* ELS discovery cmd completes with error */ 3431 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, 3432 "4203 ELS cmd x%x error: x%x x%X\n", cmd, 3433 ulp_status, ulp_word4); 3434 goto out; 3435 } 3436 3437 /* The RDF response doesn't have any impact on the running driver 3438 * but the notification descriptors are dumped here for support. 3439 */ 3440 if (cmd == ELS_CMD_RDF) { 3441 int i; 3442 3443 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 3444 if (!prsp) 3445 goto out; 3446 3447 prdf = (struct lpfc_els_rdf_rsp *)prsp->virt; 3448 if (!prdf) 3449 goto out; 3450 if (!lpfc_is_els_acc_rsp(prsp)) 3451 goto out; 3452 3453 for (i = 0; i < ELS_RDF_REG_TAG_CNT && 3454 i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++) 3455 lpfc_printf_vlog(vport, KERN_INFO, 3456 LOG_ELS | LOG_CGN_MGMT, 3457 "4677 Fabric RDF Notification Grant " 3458 "Data: 0x%08x Reg: %x %x\n", 3459 be32_to_cpu( 3460 prdf->reg_d1.desc_tags[i]), 3461 phba->cgn_reg_signal, 3462 phba->cgn_reg_fpin); 3463 } 3464 3465 out: 3466 /* Check to see if link went down during discovery */ 3467 lpfc_els_chk_latt(vport); 3468 lpfc_els_free_iocb(phba, cmdiocb); 3469 lpfc_nlp_put(ndlp); 3470 return; 3471 } 3472 3473 /** 3474 * lpfc_issue_els_scr - Issue a scr to an node on a vport 3475 * @vport: pointer to a host virtual N_Port data structure. 3476 * @retry: retry counter for the command IOCB. 3477 * 3478 * This routine issues a State Change Request (SCR) to a fabric node 3479 * on a @vport. The remote node is Fabric Controller (0xfffffd). It 3480 * first search the @vport node list to find the matching ndlp. If no such 3481 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An 3482 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb() 3483 * routine is invoked to send the SCR IOCB. 3484 * 3485 * Note that the ndlp reference count will be incremented by 1 for holding the 3486 * ndlp and the reference to ndlp will be stored into the ndlp field of 3487 * the IOCB for the completion callback function to the SCR ELS command. 3488 * 3489 * Return code 3490 * 0 - Successfully issued scr command 3491 * 1 - Failed to issue scr command 3492 **/ 3493 int 3494 lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry) 3495 { 3496 int rc = 0; 3497 struct lpfc_hba *phba = vport->phba; 3498 struct lpfc_iocbq *elsiocb; 3499 uint8_t *pcmd; 3500 uint16_t cmdsize; 3501 struct lpfc_nodelist *ndlp; 3502 3503 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 3504 3505 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3506 if (!ndlp) { 3507 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3508 if (!ndlp) 3509 return 1; 3510 lpfc_enqueue_node(vport, ndlp); 3511 } 3512 3513 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3514 ndlp->nlp_DID, ELS_CMD_SCR); 3515 if (!elsiocb) 3516 return 1; 3517 3518 if (phba->sli_rev == LPFC_SLI_REV4) { 3519 rc = lpfc_reg_fab_ctrl_node(vport, ndlp); 3520 if (rc) { 3521 lpfc_els_free_iocb(phba, elsiocb); 3522 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3523 "0937 %s: Failed to reg fc node, rc %d\n", 3524 __func__, rc); 3525 return 1; 3526 } 3527 } 3528 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 3529 3530 *((uint32_t *) (pcmd)) = ELS_CMD_SCR; 3531 pcmd += sizeof(uint32_t); 3532 3533 /* For SCR, remainder of payload is SCR parameter page */ 3534 memset(pcmd, 0, sizeof(SCR)); 3535 ((SCR *) pcmd)->Function = SCR_FUNC_FULL; 3536 3537 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3538 "Issue SCR: did:x%x", 3539 ndlp->nlp_DID, 0, 0); 3540 3541 phba->fc_stat.elsXmitSCR++; 3542 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 3543 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3544 if (!elsiocb->ndlp) { 3545 lpfc_els_free_iocb(phba, elsiocb); 3546 return 1; 3547 } 3548 3549 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3550 "Issue SCR: did:x%x refcnt %d", 3551 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3552 3553 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3554 if (rc == IOCB_ERROR) { 3555 lpfc_els_free_iocb(phba, elsiocb); 3556 lpfc_nlp_put(ndlp); 3557 return 1; 3558 } 3559 3560 return 0; 3561 } 3562 3563 /** 3564 * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric) 3565 * or the other nport (pt2pt). 3566 * @vport: pointer to a host virtual N_Port data structure. 3567 * @retry: number of retries to the command IOCB. 3568 * 3569 * This routine issues a RSCN to the Fabric Controller (DID 0xFFFFFD) 3570 * when connected to a fabric, or to the remote port when connected 3571 * in point-to-point mode. When sent to the Fabric Controller, it will 3572 * replay the RSCN to registered recipients. 3573 * 3574 * Note that the ndlp reference count will be incremented by 1 for holding the 3575 * ndlp and the reference to ndlp will be stored into the ndlp field of 3576 * the IOCB for the completion callback function to the RSCN ELS command. 3577 * 3578 * Return code 3579 * 0 - Successfully issued RSCN command 3580 * 1 - Failed to issue RSCN command 3581 **/ 3582 int 3583 lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry) 3584 { 3585 int rc = 0; 3586 struct lpfc_hba *phba = vport->phba; 3587 struct lpfc_iocbq *elsiocb; 3588 struct lpfc_nodelist *ndlp; 3589 struct { 3590 struct fc_els_rscn rscn; 3591 struct fc_els_rscn_page portid; 3592 } *event; 3593 uint32_t nportid; 3594 uint16_t cmdsize = sizeof(*event); 3595 3596 /* Not supported for private loop */ 3597 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && 3598 !test_bit(FC_PUBLIC_LOOP, &vport->fc_flag)) 3599 return 1; 3600 3601 if (test_bit(FC_PT2PT, &vport->fc_flag)) { 3602 /* find any mapped nport - that would be the other nport */ 3603 ndlp = lpfc_findnode_mapped(vport); 3604 if (!ndlp) 3605 return 1; 3606 } else { 3607 nportid = FC_FID_FCTRL; 3608 /* find the fabric controller node */ 3609 ndlp = lpfc_findnode_did(vport, nportid); 3610 if (!ndlp) { 3611 /* if one didn't exist, make one */ 3612 ndlp = lpfc_nlp_init(vport, nportid); 3613 if (!ndlp) 3614 return 1; 3615 lpfc_enqueue_node(vport, ndlp); 3616 } 3617 } 3618 3619 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3620 ndlp->nlp_DID, ELS_CMD_RSCN_XMT); 3621 3622 if (!elsiocb) 3623 return 1; 3624 3625 event = elsiocb->cmd_dmabuf->virt; 3626 3627 event->rscn.rscn_cmd = ELS_RSCN; 3628 event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page); 3629 event->rscn.rscn_plen = cpu_to_be16(cmdsize); 3630 3631 nportid = vport->fc_myDID; 3632 /* appears that page flags must be 0 for fabric to broadcast RSCN */ 3633 event->portid.rscn_page_flags = 0; 3634 event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16; 3635 event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8; 3636 event->portid.rscn_fid[2] = nportid & 0x000000FF; 3637 3638 phba->fc_stat.elsXmitRSCN++; 3639 elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd; 3640 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3641 if (!elsiocb->ndlp) { 3642 lpfc_els_free_iocb(phba, elsiocb); 3643 return 1; 3644 } 3645 3646 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3647 "Issue RSCN: did:x%x", 3648 ndlp->nlp_DID, 0, 0); 3649 3650 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3651 if (rc == IOCB_ERROR) { 3652 lpfc_els_free_iocb(phba, elsiocb); 3653 lpfc_nlp_put(ndlp); 3654 return 1; 3655 } 3656 3657 return 0; 3658 } 3659 3660 /** 3661 * lpfc_issue_els_farpr - Issue a farp to an node on a vport 3662 * @vport: pointer to a host virtual N_Port data structure. 3663 * @nportid: N_Port identifier to the remote node. 3664 * @retry: number of retries to the command IOCB. 3665 * 3666 * This routine issues a Fibre Channel Address Resolution Response 3667 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid) 3668 * is passed into the function. It first search the @vport node list to find 3669 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created 3670 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the 3671 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command. 3672 * 3673 * Note that the ndlp reference count will be incremented by 1 for holding the 3674 * ndlp and the reference to ndlp will be stored into the ndlp field of 3675 * the IOCB for the completion callback function to the FARPR ELS command. 3676 * 3677 * Return code 3678 * 0 - Successfully issued farpr command 3679 * 1 - Failed to issue farpr command 3680 **/ 3681 static int 3682 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 3683 { 3684 int rc = 0; 3685 struct lpfc_hba *phba = vport->phba; 3686 struct lpfc_iocbq *elsiocb; 3687 FARP *fp; 3688 uint8_t *pcmd; 3689 uint32_t *lp; 3690 uint16_t cmdsize; 3691 struct lpfc_nodelist *ondlp; 3692 struct lpfc_nodelist *ndlp; 3693 3694 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 3695 3696 ndlp = lpfc_findnode_did(vport, nportid); 3697 if (!ndlp) { 3698 ndlp = lpfc_nlp_init(vport, nportid); 3699 if (!ndlp) 3700 return 1; 3701 lpfc_enqueue_node(vport, ndlp); 3702 } 3703 3704 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3705 ndlp->nlp_DID, ELS_CMD_FARPR); 3706 if (!elsiocb) 3707 return 1; 3708 3709 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 3710 3711 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; 3712 pcmd += sizeof(uint32_t); 3713 3714 /* Fill in FARPR payload */ 3715 fp = (FARP *) (pcmd); 3716 memset(fp, 0, sizeof(FARP)); 3717 lp = (uint32_t *) pcmd; 3718 *lp++ = be32_to_cpu(nportid); 3719 *lp++ = be32_to_cpu(vport->fc_myDID); 3720 fp->Rflags = 0; 3721 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); 3722 3723 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); 3724 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 3725 ondlp = lpfc_findnode_did(vport, nportid); 3726 if (ondlp) { 3727 memcpy(&fp->OportName, &ondlp->nlp_portname, 3728 sizeof(struct lpfc_name)); 3729 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, 3730 sizeof(struct lpfc_name)); 3731 } 3732 3733 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3734 "Issue FARPR: did:x%x", 3735 ndlp->nlp_DID, 0, 0); 3736 3737 phba->fc_stat.elsXmitFARPR++; 3738 elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd; 3739 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3740 if (!elsiocb->ndlp) { 3741 lpfc_els_free_iocb(phba, elsiocb); 3742 return 1; 3743 } 3744 3745 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3746 if (rc == IOCB_ERROR) { 3747 /* The additional lpfc_nlp_put will cause the following 3748 * lpfc_els_free_iocb routine to trigger the release of 3749 * the node. 3750 */ 3751 lpfc_els_free_iocb(phba, elsiocb); 3752 lpfc_nlp_put(ndlp); 3753 return 1; 3754 } 3755 /* This will cause the callback-function lpfc_cmpl_els_cmd to 3756 * trigger the release of the node. 3757 */ 3758 /* Don't release reference count as RDF is likely outstanding */ 3759 return 0; 3760 } 3761 3762 /** 3763 * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric. 3764 * @vport: pointer to a host virtual N_Port data structure. 3765 * @retry: retry counter for the command IOCB. 3766 * 3767 * This routine issues an ELS RDF to the Fabric Controller to register 3768 * for diagnostic functions. 3769 * 3770 * Note that the ndlp reference count will be incremented by 1 for holding the 3771 * ndlp and the reference to ndlp will be stored into the ndlp field of 3772 * the IOCB for the completion callback function to the RDF ELS command. 3773 * 3774 * Return code 3775 * 0 - Successfully issued rdf command 3776 * 1 - Failed to issue rdf command 3777 **/ 3778 int 3779 lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) 3780 { 3781 struct lpfc_hba *phba = vport->phba; 3782 struct lpfc_iocbq *elsiocb; 3783 struct lpfc_els_rdf_req *prdf; 3784 struct lpfc_nodelist *ndlp; 3785 uint16_t cmdsize; 3786 int rc; 3787 3788 cmdsize = sizeof(*prdf); 3789 3790 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3791 if (!ndlp) { 3792 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3793 if (!ndlp) 3794 return -ENODEV; 3795 lpfc_enqueue_node(vport, ndlp); 3796 } 3797 3798 /* RDF ELS is not required on an NPIV VN_Port. */ 3799 if (vport->port_type == LPFC_NPIV_PORT) 3800 return -EACCES; 3801 3802 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3803 ndlp->nlp_DID, ELS_CMD_RDF); 3804 if (!elsiocb) 3805 return -ENOMEM; 3806 3807 /* Configure the payload for the supported FPIN events. */ 3808 prdf = (struct lpfc_els_rdf_req *)elsiocb->cmd_dmabuf->virt; 3809 memset(prdf, 0, cmdsize); 3810 prdf->rdf.fpin_cmd = ELS_RDF; 3811 prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) - 3812 sizeof(struct fc_els_rdf)); 3813 prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER); 3814 prdf->reg_d1.reg_desc.desc_len = cpu_to_be32( 3815 FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1)); 3816 prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT); 3817 prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY); 3818 prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY); 3819 prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST); 3820 prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION); 3821 3822 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3823 "6444 Xmit RDF to remote NPORT x%x Reg: %x %x\n", 3824 ndlp->nlp_DID, phba->cgn_reg_signal, 3825 phba->cgn_reg_fpin); 3826 3827 phba->cgn_fpin_frequency = LPFC_FPIN_INIT_FREQ; 3828 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 3829 elsiocb->ndlp = lpfc_nlp_get(ndlp); 3830 if (!elsiocb->ndlp) { 3831 lpfc_els_free_iocb(phba, elsiocb); 3832 return -EIO; 3833 } 3834 3835 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3836 "Issue RDF: did:x%x refcnt %d", 3837 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3838 3839 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3840 if (rc == IOCB_ERROR) { 3841 lpfc_els_free_iocb(phba, elsiocb); 3842 lpfc_nlp_put(ndlp); 3843 return -EIO; 3844 } 3845 return 0; 3846 } 3847 3848 /** 3849 * lpfc_els_rcv_rdf - Receive RDF ELS request from the fabric. 3850 * @vport: pointer to a host virtual N_Port data structure. 3851 * @cmdiocb: pointer to lpfc command iocb data structure. 3852 * @ndlp: pointer to a node-list data structure. 3853 * 3854 * A received RDF implies a possible change to fabric supported diagnostic 3855 * functions. This routine sends LS_ACC and then has the Nx_Port issue a new 3856 * RDF request to reregister for supported diagnostic functions. 3857 * 3858 * Return code 3859 * 0 - Success 3860 * -EIO - Failed to process received RDF 3861 **/ 3862 static int 3863 lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 3864 struct lpfc_nodelist *ndlp) 3865 { 3866 /* Send LS_ACC */ 3867 if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) { 3868 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3869 "1623 Failed to RDF_ACC from x%x for x%x\n", 3870 ndlp->nlp_DID, vport->fc_myDID); 3871 return -EIO; 3872 } 3873 3874 /* Issue new RDF for reregistering */ 3875 if (lpfc_issue_els_rdf(vport, 0)) { 3876 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3877 "2623 Failed to re register RDF for x%x\n", 3878 vport->fc_myDID); 3879 return -EIO; 3880 } 3881 3882 return 0; 3883 } 3884 3885 /** 3886 * lpfc_least_capable_settings - helper function for EDC rsp processing 3887 * @phba: pointer to lpfc hba data structure. 3888 * @pcgd: pointer to congestion detection descriptor in EDC rsp. 3889 * 3890 * This helper routine determines the least capable setting for 3891 * congestion signals, signal freq, including scale, from the 3892 * congestion detection descriptor in the EDC rsp. The routine 3893 * sets @phba values in preparation for a set_featues mailbox. 3894 **/ 3895 static void 3896 lpfc_least_capable_settings(struct lpfc_hba *phba, 3897 struct fc_diag_cg_sig_desc *pcgd) 3898 { 3899 u32 rsp_sig_cap = 0, drv_sig_cap = 0; 3900 u32 rsp_sig_freq_cyc = 0, rsp_sig_freq_scale = 0; 3901 3902 /* Get rsp signal and frequency capabilities. */ 3903 rsp_sig_cap = be32_to_cpu(pcgd->xmt_signal_capability); 3904 rsp_sig_freq_cyc = be16_to_cpu(pcgd->xmt_signal_frequency.count); 3905 rsp_sig_freq_scale = be16_to_cpu(pcgd->xmt_signal_frequency.units); 3906 3907 /* If the Fport does not support signals. Set FPIN only */ 3908 if (rsp_sig_cap == EDC_CG_SIG_NOTSUPPORTED) 3909 goto out_no_support; 3910 3911 /* Apply the xmt scale to the xmt cycle to get the correct frequency. 3912 * Adapter default is 100 millisSeconds. Convert all xmt cycle values 3913 * to milliSeconds. 3914 */ 3915 switch (rsp_sig_freq_scale) { 3916 case EDC_CG_SIGFREQ_SEC: 3917 rsp_sig_freq_cyc *= MSEC_PER_SEC; 3918 break; 3919 case EDC_CG_SIGFREQ_MSEC: 3920 rsp_sig_freq_cyc = 1; 3921 break; 3922 default: 3923 goto out_no_support; 3924 } 3925 3926 /* Convenient shorthand. */ 3927 drv_sig_cap = phba->cgn_reg_signal; 3928 3929 /* Choose the least capable frequency. */ 3930 if (rsp_sig_freq_cyc > phba->cgn_sig_freq) 3931 phba->cgn_sig_freq = rsp_sig_freq_cyc; 3932 3933 /* Should be some common signals support. Settle on least capable 3934 * signal and adjust FPIN values. Initialize defaults to ease the 3935 * decision. 3936 */ 3937 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 3938 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 3939 if (rsp_sig_cap == EDC_CG_SIG_WARN_ONLY && 3940 (drv_sig_cap == EDC_CG_SIG_WARN_ONLY || 3941 drv_sig_cap == EDC_CG_SIG_WARN_ALARM)) { 3942 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 3943 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 3944 } 3945 if (rsp_sig_cap == EDC_CG_SIG_WARN_ALARM) { 3946 if (drv_sig_cap == EDC_CG_SIG_WARN_ALARM) { 3947 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ALARM; 3948 phba->cgn_reg_fpin = LPFC_CGN_FPIN_NONE; 3949 } 3950 if (drv_sig_cap == EDC_CG_SIG_WARN_ONLY) { 3951 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 3952 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 3953 } 3954 } 3955 3956 /* We are NOT recording signal frequency in congestion info buffer */ 3957 return; 3958 3959 out_no_support: 3960 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 3961 phba->cgn_sig_freq = 0; 3962 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 3963 } 3964 3965 DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag, 3966 FC_LS_TLV_DTAG_INIT); 3967 3968 /** 3969 * lpfc_cmpl_els_edc - Completion callback function for EDC 3970 * @phba: pointer to lpfc hba data structure. 3971 * @cmdiocb: pointer to lpfc command iocb data structure. 3972 * @rspiocb: pointer to lpfc response iocb data structure. 3973 * 3974 * This routine is the completion callback function for issuing the Exchange 3975 * Diagnostic Capabilities (EDC) command. The driver issues an EDC to 3976 * notify the FPort of its Congestion and Link Fault capabilities. This 3977 * routine parses the FPort's response and decides on the least common 3978 * values applicable to both FPort and NPort for Warnings and Alarms that 3979 * are communicated via hardware signals. 3980 **/ 3981 static void 3982 lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3983 struct lpfc_iocbq *rspiocb) 3984 { 3985 IOCB_t *irsp_iocb; 3986 struct fc_els_edc_resp *edc_rsp; 3987 struct fc_tlv_desc *tlv; 3988 struct fc_diag_cg_sig_desc *pcgd; 3989 struct fc_diag_lnkflt_desc *plnkflt; 3990 struct lpfc_dmabuf *pcmd, *prsp; 3991 const char *dtag_nm; 3992 u32 *pdata, dtag; 3993 int desc_cnt = 0, bytes_remain; 3994 bool rcv_cap_desc = false; 3995 struct lpfc_nodelist *ndlp; 3996 u32 ulp_status, ulp_word4, tmo, did, iotag; 3997 3998 ndlp = cmdiocb->ndlp; 3999 4000 ulp_status = get_job_ulpstatus(phba, rspiocb); 4001 ulp_word4 = get_job_word4(phba, rspiocb); 4002 did = get_job_els_rsp64_did(phba, rspiocb); 4003 4004 if (phba->sli_rev == LPFC_SLI_REV4) { 4005 tmo = get_wqe_tmo(rspiocb); 4006 iotag = get_wqe_reqtag(rspiocb); 4007 } else { 4008 irsp_iocb = &rspiocb->iocb; 4009 tmo = irsp_iocb->ulpTimeout; 4010 iotag = irsp_iocb->ulpIoTag; 4011 } 4012 4013 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, 4014 "EDC cmpl: status:x%x/x%x did:x%x", 4015 ulp_status, ulp_word4, did); 4016 4017 /* ELS cmd tag <ulpIoTag> completes */ 4018 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4019 "4201 EDC cmd tag x%x completes Data: x%x x%x x%x\n", 4020 iotag, ulp_status, ulp_word4, tmo); 4021 4022 pcmd = cmdiocb->cmd_dmabuf; 4023 if (!pcmd) 4024 goto out; 4025 4026 pdata = (u32 *)pcmd->virt; 4027 if (!pdata) 4028 goto out; 4029 4030 /* Need to clear signal values, send features MB and RDF with FPIN. */ 4031 if (ulp_status) 4032 goto out; 4033 4034 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 4035 if (!prsp) 4036 goto out; 4037 4038 edc_rsp = prsp->virt; 4039 if (!edc_rsp) 4040 goto out; 4041 4042 /* ELS cmd tag <ulpIoTag> completes */ 4043 lpfc_printf_log(phba, KERN_INFO, 4044 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 4045 "4676 Fabric EDC Rsp: " 4046 "0x%02x, 0x%08x\n", 4047 edc_rsp->acc_hdr.la_cmd, 4048 be32_to_cpu(edc_rsp->desc_list_len)); 4049 4050 if (!lpfc_is_els_acc_rsp(prsp)) 4051 goto out; 4052 4053 /* 4054 * Payload length in bytes is the response descriptor list 4055 * length minus the 12 bytes of Link Service Request 4056 * Information descriptor in the reply. 4057 */ 4058 bytes_remain = be32_to_cpu(edc_rsp->desc_list_len) - 4059 sizeof(struct fc_els_lsri_desc); 4060 if (bytes_remain <= 0) 4061 goto out; 4062 4063 tlv = edc_rsp->desc; 4064 4065 /* 4066 * cycle through EDC diagnostic descriptors to find the 4067 * congestion signaling capability descriptor 4068 */ 4069 while (bytes_remain) { 4070 if (bytes_remain < FC_TLV_DESC_HDR_SZ) { 4071 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 4072 "6461 Truncated TLV hdr on " 4073 "Diagnostic descriptor[%d]\n", 4074 desc_cnt); 4075 goto out; 4076 } 4077 4078 dtag = be32_to_cpu(tlv->desc_tag); 4079 switch (dtag) { 4080 case ELS_DTAG_LNK_FAULT_CAP: 4081 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 4082 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 4083 sizeof(struct fc_diag_lnkflt_desc)) { 4084 lpfc_printf_log(phba, KERN_WARNING, 4085 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 4086 "6462 Truncated Link Fault Diagnostic " 4087 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 4088 desc_cnt, bytes_remain, 4089 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 4090 sizeof(struct fc_diag_lnkflt_desc)); 4091 goto out; 4092 } 4093 plnkflt = (struct fc_diag_lnkflt_desc *)tlv; 4094 lpfc_printf_log(phba, KERN_INFO, 4095 LOG_ELS | LOG_LDS_EVENT, 4096 "4617 Link Fault Desc Data: 0x%08x 0x%08x " 4097 "0x%08x 0x%08x 0x%08x\n", 4098 be32_to_cpu(plnkflt->desc_tag), 4099 be32_to_cpu(plnkflt->desc_len), 4100 be32_to_cpu( 4101 plnkflt->degrade_activate_threshold), 4102 be32_to_cpu( 4103 plnkflt->degrade_deactivate_threshold), 4104 be32_to_cpu(plnkflt->fec_degrade_interval)); 4105 break; 4106 case ELS_DTAG_CG_SIGNAL_CAP: 4107 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 4108 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 4109 sizeof(struct fc_diag_cg_sig_desc)) { 4110 lpfc_printf_log( 4111 phba, KERN_WARNING, LOG_CGN_MGMT, 4112 "6463 Truncated Cgn Signal Diagnostic " 4113 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 4114 desc_cnt, bytes_remain, 4115 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 4116 sizeof(struct fc_diag_cg_sig_desc)); 4117 goto out; 4118 } 4119 4120 pcgd = (struct fc_diag_cg_sig_desc *)tlv; 4121 lpfc_printf_log( 4122 phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4123 "4616 CGN Desc Data: 0x%08x 0x%08x " 4124 "0x%08x 0x%04x 0x%04x 0x%08x 0x%04x 0x%04x\n", 4125 be32_to_cpu(pcgd->desc_tag), 4126 be32_to_cpu(pcgd->desc_len), 4127 be32_to_cpu(pcgd->xmt_signal_capability), 4128 be16_to_cpu(pcgd->xmt_signal_frequency.count), 4129 be16_to_cpu(pcgd->xmt_signal_frequency.units), 4130 be32_to_cpu(pcgd->rcv_signal_capability), 4131 be16_to_cpu(pcgd->rcv_signal_frequency.count), 4132 be16_to_cpu(pcgd->rcv_signal_frequency.units)); 4133 4134 /* Compare driver and Fport capabilities and choose 4135 * least common. 4136 */ 4137 lpfc_least_capable_settings(phba, pcgd); 4138 rcv_cap_desc = true; 4139 break; 4140 default: 4141 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 4142 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 4143 "4919 unknown Diagnostic " 4144 "Descriptor[%d]: tag x%x (%s)\n", 4145 desc_cnt, dtag, dtag_nm); 4146 } 4147 4148 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 4149 tlv = fc_tlv_next_desc(tlv); 4150 desc_cnt++; 4151 } 4152 4153 out: 4154 if (!rcv_cap_desc) { 4155 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 4156 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 4157 phba->cgn_sig_freq = 0; 4158 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, 4159 "4202 EDC rsp error - sending RDF " 4160 "for FPIN only.\n"); 4161 } 4162 4163 lpfc_config_cgn_signal(phba); 4164 4165 /* Check to see if link went down during discovery */ 4166 lpfc_els_chk_latt(phba->pport); 4167 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, 4168 "EDC Cmpl: did:x%x refcnt %d", 4169 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 4170 lpfc_els_free_iocb(phba, cmdiocb); 4171 lpfc_nlp_put(ndlp); 4172 } 4173 4174 static void 4175 lpfc_format_edc_lft_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 4176 { 4177 struct fc_diag_lnkflt_desc *lft = (struct fc_diag_lnkflt_desc *)tlv; 4178 4179 lft->desc_tag = cpu_to_be32(ELS_DTAG_LNK_FAULT_CAP); 4180 lft->desc_len = cpu_to_be32( 4181 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_lnkflt_desc)); 4182 4183 lft->degrade_activate_threshold = 4184 cpu_to_be32(phba->degrade_activate_threshold); 4185 lft->degrade_deactivate_threshold = 4186 cpu_to_be32(phba->degrade_deactivate_threshold); 4187 lft->fec_degrade_interval = cpu_to_be32(phba->fec_degrade_interval); 4188 } 4189 4190 static void 4191 lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 4192 { 4193 struct fc_diag_cg_sig_desc *cgd = (struct fc_diag_cg_sig_desc *)tlv; 4194 4195 /* We are assuming cgd was zero'ed before calling this routine */ 4196 4197 /* Configure the congestion detection capability */ 4198 cgd->desc_tag = cpu_to_be32(ELS_DTAG_CG_SIGNAL_CAP); 4199 4200 /* Descriptor len doesn't include the tag or len fields. */ 4201 cgd->desc_len = cpu_to_be32( 4202 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_cg_sig_desc)); 4203 4204 /* xmt_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. 4205 * xmt_signal_frequency.count already set to 0. 4206 * xmt_signal_frequency.units already set to 0. 4207 */ 4208 4209 if (phba->cmf_active_mode == LPFC_CFG_OFF) { 4210 /* rcv_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. 4211 * rcv_signal_frequency.count already set to 0. 4212 * rcv_signal_frequency.units already set to 0. 4213 */ 4214 phba->cgn_sig_freq = 0; 4215 return; 4216 } 4217 switch (phba->cgn_reg_signal) { 4218 case EDC_CG_SIG_WARN_ONLY: 4219 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ONLY); 4220 break; 4221 case EDC_CG_SIG_WARN_ALARM: 4222 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ALARM); 4223 break; 4224 default: 4225 /* rcv_signal_capability left 0 thus no support */ 4226 break; 4227 } 4228 4229 /* We start negotiation with lpfc_fabric_cgn_frequency, after 4230 * the completion we settle on the higher frequency. 4231 */ 4232 cgd->rcv_signal_frequency.count = 4233 cpu_to_be16(lpfc_fabric_cgn_frequency); 4234 cgd->rcv_signal_frequency.units = 4235 cpu_to_be16(EDC_CG_SIGFREQ_MSEC); 4236 } 4237 4238 static bool 4239 lpfc_link_is_lds_capable(struct lpfc_hba *phba) 4240 { 4241 if (!(phba->lmt & LMT_64Gb)) 4242 return false; 4243 if (phba->sli_rev != LPFC_SLI_REV4) 4244 return false; 4245 4246 if (phba->sli4_hba.conf_trunk) { 4247 if (phba->trunk_link.phy_lnk_speed == LPFC_USER_LINK_SPEED_64G) 4248 return true; 4249 } else if (phba->fc_linkspeed == LPFC_LINK_SPEED_64GHZ) { 4250 return true; 4251 } 4252 return false; 4253 } 4254 4255 /** 4256 * lpfc_issue_els_edc - Exchange Diagnostic Capabilities with the fabric. 4257 * @vport: pointer to a host virtual N_Port data structure. 4258 * @retry: retry counter for the command iocb. 4259 * 4260 * This routine issues an ELS EDC to the F-Port Controller to communicate 4261 * this N_Port's support of hardware signals in its Congestion 4262 * Capabilities Descriptor. 4263 * 4264 * Note: This routine does not check if one or more signals are 4265 * set in the cgn_reg_signal parameter. The caller makes the 4266 * decision to enforce cgn_reg_signal as nonzero or zero depending 4267 * on the conditions. During Fabric requests, the driver 4268 * requires cgn_reg_signals to be nonzero. But a dynamic request 4269 * to set the congestion mode to OFF from Monitor or Manage 4270 * would correctly issue an EDC with no signals enabled to 4271 * turn off switch functionality and then update the FW. 4272 * 4273 * Return code 4274 * 0 - Successfully issued edc command 4275 * 1 - Failed to issue edc command 4276 **/ 4277 int 4278 lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry) 4279 { 4280 struct lpfc_hba *phba = vport->phba; 4281 struct lpfc_iocbq *elsiocb; 4282 struct fc_els_edc *edc_req; 4283 struct fc_tlv_desc *tlv; 4284 u16 cmdsize; 4285 struct lpfc_nodelist *ndlp; 4286 u8 *pcmd = NULL; 4287 u32 cgn_desc_size, lft_desc_size; 4288 int rc; 4289 4290 if (vport->port_type == LPFC_NPIV_PORT) 4291 return -EACCES; 4292 4293 ndlp = lpfc_findnode_did(vport, Fabric_DID); 4294 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 4295 return -ENODEV; 4296 4297 cgn_desc_size = (phba->cgn_init_reg_signal) ? 4298 sizeof(struct fc_diag_cg_sig_desc) : 0; 4299 lft_desc_size = (lpfc_link_is_lds_capable(phba)) ? 4300 sizeof(struct fc_diag_lnkflt_desc) : 0; 4301 cmdsize = cgn_desc_size + lft_desc_size; 4302 4303 /* Skip EDC if no applicable descriptors */ 4304 if (!cmdsize) 4305 goto try_rdf; 4306 4307 cmdsize += sizeof(struct fc_els_edc); 4308 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 4309 ndlp->nlp_DID, ELS_CMD_EDC); 4310 if (!elsiocb) 4311 goto try_rdf; 4312 4313 /* Configure the payload for the supported Diagnostics capabilities. */ 4314 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 4315 memset(pcmd, 0, cmdsize); 4316 edc_req = (struct fc_els_edc *)pcmd; 4317 edc_req->desc_len = cpu_to_be32(cgn_desc_size + lft_desc_size); 4318 edc_req->edc_cmd = ELS_EDC; 4319 tlv = edc_req->desc; 4320 4321 if (cgn_desc_size) { 4322 lpfc_format_edc_cgn_desc(phba, tlv); 4323 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 4324 tlv = fc_tlv_next_desc(tlv); 4325 } 4326 4327 if (lft_desc_size) 4328 lpfc_format_edc_lft_desc(phba, tlv); 4329 4330 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4331 "4623 Xmit EDC to remote " 4332 "NPORT x%x reg_sig x%x reg_fpin:x%x\n", 4333 ndlp->nlp_DID, phba->cgn_reg_signal, 4334 phba->cgn_reg_fpin); 4335 4336 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd; 4337 elsiocb->ndlp = lpfc_nlp_get(ndlp); 4338 if (!elsiocb->ndlp) { 4339 lpfc_els_free_iocb(phba, elsiocb); 4340 return -EIO; 4341 } 4342 4343 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4344 "Issue EDC: did:x%x refcnt %d", 4345 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 4346 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4347 if (rc == IOCB_ERROR) { 4348 /* The additional lpfc_nlp_put will cause the following 4349 * lpfc_els_free_iocb routine to trigger the rlease of 4350 * the node. 4351 */ 4352 lpfc_els_free_iocb(phba, elsiocb); 4353 lpfc_nlp_put(ndlp); 4354 goto try_rdf; 4355 } 4356 return 0; 4357 try_rdf: 4358 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 4359 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 4360 rc = lpfc_issue_els_rdf(vport, 0); 4361 return rc; 4362 } 4363 4364 /** 4365 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry 4366 * @vport: pointer to a host virtual N_Port data structure. 4367 * @nlp: pointer to a node-list data structure. 4368 * 4369 * This routine cancels the timer with a delayed IOCB-command retry for 4370 * a @vport's @ndlp. It stops the timer for the delayed function retrial and 4371 * removes the ELS retry event if it presents. In addition, if the 4372 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB 4373 * commands are sent for the @vport's nodes that require issuing discovery 4374 * ADISC. 4375 **/ 4376 void 4377 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) 4378 { 4379 struct lpfc_work_evt *evtp; 4380 4381 if (!(nlp->nlp_flag & NLP_DELAY_TMO)) 4382 return; 4383 spin_lock_irq(&nlp->lock); 4384 nlp->nlp_flag &= ~NLP_DELAY_TMO; 4385 spin_unlock_irq(&nlp->lock); 4386 del_timer_sync(&nlp->nlp_delayfunc); 4387 nlp->nlp_last_elscmd = 0; 4388 if (!list_empty(&nlp->els_retry_evt.evt_listp)) { 4389 list_del_init(&nlp->els_retry_evt.evt_listp); 4390 /* Decrement nlp reference count held for the delayed retry */ 4391 evtp = &nlp->els_retry_evt; 4392 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); 4393 } 4394 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 4395 spin_lock_irq(&nlp->lock); 4396 nlp->nlp_flag &= ~NLP_NPR_2B_DISC; 4397 spin_unlock_irq(&nlp->lock); 4398 if (vport->num_disc_nodes) { 4399 if (vport->port_state < LPFC_VPORT_READY) { 4400 /* Check if there are more ADISCs to be sent */ 4401 lpfc_more_adisc(vport); 4402 } else { 4403 /* Check if there are more PLOGIs to be sent */ 4404 lpfc_more_plogi(vport); 4405 if (vport->num_disc_nodes == 0) { 4406 clear_bit(FC_NDISC_ACTIVE, 4407 &vport->fc_flag); 4408 lpfc_can_disctmo(vport); 4409 lpfc_end_rscn(vport); 4410 } 4411 } 4412 } 4413 } 4414 return; 4415 } 4416 4417 /** 4418 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer 4419 * @t: pointer to the timer function associated data (ndlp). 4420 * 4421 * This routine is invoked by the ndlp delayed-function timer to check 4422 * whether there is any pending ELS retry event(s) with the node. If not, it 4423 * simply returns. Otherwise, if there is at least one ELS delayed event, it 4424 * adds the delayed events to the HBA work list and invokes the 4425 * lpfc_worker_wake_up() routine to wake up worker thread to process the 4426 * event. Note that lpfc_nlp_get() is called before posting the event to 4427 * the work list to hold reference count of ndlp so that it guarantees the 4428 * reference to ndlp will still be available when the worker thread gets 4429 * to the event associated with the ndlp. 4430 **/ 4431 void 4432 lpfc_els_retry_delay(struct timer_list *t) 4433 { 4434 struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc); 4435 struct lpfc_vport *vport = ndlp->vport; 4436 struct lpfc_hba *phba = vport->phba; 4437 unsigned long flags; 4438 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; 4439 4440 /* Hold a node reference for outstanding queued work */ 4441 if (!lpfc_nlp_get(ndlp)) 4442 return; 4443 4444 spin_lock_irqsave(&phba->hbalock, flags); 4445 if (!list_empty(&evtp->evt_listp)) { 4446 spin_unlock_irqrestore(&phba->hbalock, flags); 4447 lpfc_nlp_put(ndlp); 4448 return; 4449 } 4450 4451 evtp->evt_arg1 = ndlp; 4452 evtp->evt = LPFC_EVT_ELS_RETRY; 4453 list_add_tail(&evtp->evt_listp, &phba->work_list); 4454 spin_unlock_irqrestore(&phba->hbalock, flags); 4455 4456 lpfc_worker_wake_up(phba); 4457 } 4458 4459 /** 4460 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function 4461 * @ndlp: pointer to a node-list data structure. 4462 * 4463 * This routine is the worker-thread handler for processing the @ndlp delayed 4464 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves 4465 * the last ELS command from the associated ndlp and invokes the proper ELS 4466 * function according to the delayed ELS command to retry the command. 4467 **/ 4468 void 4469 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) 4470 { 4471 struct lpfc_vport *vport = ndlp->vport; 4472 uint32_t cmd, retry; 4473 4474 spin_lock_irq(&ndlp->lock); 4475 cmd = ndlp->nlp_last_elscmd; 4476 ndlp->nlp_last_elscmd = 0; 4477 4478 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 4479 spin_unlock_irq(&ndlp->lock); 4480 return; 4481 } 4482 4483 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 4484 spin_unlock_irq(&ndlp->lock); 4485 /* 4486 * If a discovery event readded nlp_delayfunc after timer 4487 * firing and before processing the timer, cancel the 4488 * nlp_delayfunc. 4489 */ 4490 del_timer_sync(&ndlp->nlp_delayfunc); 4491 retry = ndlp->nlp_retry; 4492 ndlp->nlp_retry = 0; 4493 4494 switch (cmd) { 4495 case ELS_CMD_FLOGI: 4496 lpfc_issue_els_flogi(vport, ndlp, retry); 4497 break; 4498 case ELS_CMD_PLOGI: 4499 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) { 4500 ndlp->nlp_prev_state = ndlp->nlp_state; 4501 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 4502 } 4503 break; 4504 case ELS_CMD_ADISC: 4505 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) { 4506 ndlp->nlp_prev_state = ndlp->nlp_state; 4507 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 4508 } 4509 break; 4510 case ELS_CMD_PRLI: 4511 case ELS_CMD_NVMEPRLI: 4512 if (!lpfc_issue_els_prli(vport, ndlp, retry)) { 4513 ndlp->nlp_prev_state = ndlp->nlp_state; 4514 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 4515 } 4516 break; 4517 case ELS_CMD_LOGO: 4518 if (!lpfc_issue_els_logo(vport, ndlp, retry)) { 4519 ndlp->nlp_prev_state = ndlp->nlp_state; 4520 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 4521 } 4522 break; 4523 case ELS_CMD_FDISC: 4524 if (!test_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag)) 4525 lpfc_issue_els_fdisc(vport, ndlp, retry); 4526 break; 4527 } 4528 return; 4529 } 4530 4531 /** 4532 * lpfc_link_reset - Issue link reset 4533 * @vport: pointer to a virtual N_Port data structure. 4534 * 4535 * This routine performs link reset by sending INIT_LINK mailbox command. 4536 * For SLI-3 adapter, link attention interrupt is enabled before issuing 4537 * INIT_LINK mailbox command. 4538 * 4539 * Return code 4540 * 0 - Link reset initiated successfully 4541 * 1 - Failed to initiate link reset 4542 **/ 4543 int 4544 lpfc_link_reset(struct lpfc_vport *vport) 4545 { 4546 struct lpfc_hba *phba = vport->phba; 4547 LPFC_MBOXQ_t *mbox; 4548 uint32_t control; 4549 int rc; 4550 4551 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4552 "2851 Attempt link reset\n"); 4553 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4554 if (!mbox) { 4555 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4556 "2852 Failed to allocate mbox memory"); 4557 return 1; 4558 } 4559 4560 /* Enable Link attention interrupts */ 4561 if (phba->sli_rev <= LPFC_SLI_REV3) { 4562 spin_lock_irq(&phba->hbalock); 4563 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4564 control = readl(phba->HCregaddr); 4565 control |= HC_LAINT_ENA; 4566 writel(control, phba->HCregaddr); 4567 readl(phba->HCregaddr); /* flush */ 4568 spin_unlock_irq(&phba->hbalock); 4569 } 4570 4571 lpfc_init_link(phba, mbox, phba->cfg_topology, 4572 phba->cfg_link_speed); 4573 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4574 mbox->vport = vport; 4575 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4576 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 4577 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4578 "2853 Failed to issue INIT_LINK " 4579 "mbox command, rc:x%x\n", rc); 4580 mempool_free(mbox, phba->mbox_mem_pool); 4581 return 1; 4582 } 4583 4584 return 0; 4585 } 4586 4587 /** 4588 * lpfc_els_retry - Make retry decision on an els command iocb 4589 * @phba: pointer to lpfc hba data structure. 4590 * @cmdiocb: pointer to lpfc command iocb data structure. 4591 * @rspiocb: pointer to lpfc response iocb data structure. 4592 * 4593 * This routine makes a retry decision on an ELS command IOCB, which has 4594 * failed. The following ELS IOCBs use this function for retrying the command 4595 * when previously issued command responsed with error status: FLOGI, PLOGI, 4596 * PRLI, ADISC and FDISC. Based on the ELS command type and the 4597 * returned error status, it makes the decision whether a retry shall be 4598 * issued for the command, and whether a retry shall be made immediately or 4599 * delayed. In the former case, the corresponding ELS command issuing-function 4600 * is called to retry the command. In the later case, the ELS command shall 4601 * be posted to the ndlp delayed event and delayed function timer set to the 4602 * ndlp for the delayed command issusing. 4603 * 4604 * Return code 4605 * 0 - No retry of els command is made 4606 * 1 - Immediate or delayed retry of els command is made 4607 **/ 4608 static int 4609 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4610 struct lpfc_iocbq *rspiocb) 4611 { 4612 struct lpfc_vport *vport = cmdiocb->vport; 4613 union lpfc_wqe128 *irsp = &rspiocb->wqe; 4614 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 4615 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; 4616 uint32_t *elscmd; 4617 struct ls_rjt stat; 4618 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; 4619 int logerr = 0; 4620 uint32_t cmd = 0; 4621 uint32_t did; 4622 int link_reset = 0, rc; 4623 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 4624 u32 ulp_word4 = get_job_word4(phba, rspiocb); 4625 4626 4627 /* Note: cmd_dmabuf may be 0 for internal driver abort 4628 * of delays ELS command. 4629 */ 4630 4631 if (pcmd && pcmd->virt) { 4632 elscmd = (uint32_t *) (pcmd->virt); 4633 cmd = *elscmd++; 4634 } 4635 4636 if (ndlp) 4637 did = ndlp->nlp_DID; 4638 else { 4639 /* We should only hit this case for retrying PLOGI */ 4640 did = get_job_els_rsp64_did(phba, rspiocb); 4641 ndlp = lpfc_findnode_did(vport, did); 4642 if (!ndlp && (cmd != ELS_CMD_PLOGI)) 4643 return 0; 4644 } 4645 4646 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4647 "Retry ELS: wd7:x%x wd4:x%x did:x%x", 4648 *(((uint32_t *)irsp) + 7), ulp_word4, did); 4649 4650 switch (ulp_status) { 4651 case IOSTAT_FCP_RSP_ERROR: 4652 break; 4653 case IOSTAT_REMOTE_STOP: 4654 if (phba->sli_rev == LPFC_SLI_REV4) { 4655 /* This IO was aborted by the target, we don't 4656 * know the rxid and because we did not send the 4657 * ABTS we cannot generate and RRQ. 4658 */ 4659 lpfc_set_rrq_active(phba, ndlp, 4660 cmdiocb->sli4_lxritag, 0, 0); 4661 } 4662 break; 4663 case IOSTAT_LOCAL_REJECT: 4664 switch ((ulp_word4 & IOERR_PARAM_MASK)) { 4665 case IOERR_LOOP_OPEN_FAILURE: 4666 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) 4667 delay = 1000; 4668 retry = 1; 4669 break; 4670 4671 case IOERR_ILLEGAL_COMMAND: 4672 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4673 "0124 Retry illegal cmd x%x " 4674 "retry:x%x delay:x%x\n", 4675 cmd, cmdiocb->retry, delay); 4676 retry = 1; 4677 /* All command's retry policy */ 4678 maxretry = 8; 4679 if (cmdiocb->retry > 2) 4680 delay = 1000; 4681 break; 4682 4683 case IOERR_NO_RESOURCES: 4684 logerr = 1; /* HBA out of resources */ 4685 retry = 1; 4686 if (cmdiocb->retry > 100) 4687 delay = 100; 4688 maxretry = 250; 4689 break; 4690 4691 case IOERR_ILLEGAL_FRAME: 4692 delay = 100; 4693 retry = 1; 4694 break; 4695 4696 case IOERR_INVALID_RPI: 4697 if (cmd == ELS_CMD_PLOGI && 4698 did == NameServer_DID) { 4699 /* Continue forever if plogi to */ 4700 /* the nameserver fails */ 4701 maxretry = 0; 4702 delay = 100; 4703 } else if (cmd == ELS_CMD_PRLI && 4704 ndlp->nlp_state != NLP_STE_PRLI_ISSUE) { 4705 /* State-command disagreement. The PRLI was 4706 * failed with an invalid rpi meaning there 4707 * some unexpected state change. Don't retry. 4708 */ 4709 maxretry = 0; 4710 retry = 0; 4711 break; 4712 } 4713 retry = 1; 4714 break; 4715 4716 case IOERR_SEQUENCE_TIMEOUT: 4717 if (cmd == ELS_CMD_PLOGI && 4718 did == NameServer_DID && 4719 (cmdiocb->retry + 1) == maxretry) { 4720 /* Reset the Link */ 4721 link_reset = 1; 4722 break; 4723 } 4724 retry = 1; 4725 delay = 100; 4726 break; 4727 case IOERR_SLI_ABORTED: 4728 /* Retry ELS PLOGI command? 4729 * Possibly the rport just wasn't ready. 4730 */ 4731 if (cmd == ELS_CMD_PLOGI) { 4732 /* No retry if state change */ 4733 if (ndlp && 4734 ndlp->nlp_state != NLP_STE_PLOGI_ISSUE) 4735 goto out_retry; 4736 retry = 1; 4737 maxretry = 2; 4738 } 4739 break; 4740 } 4741 break; 4742 4743 case IOSTAT_NPORT_RJT: 4744 case IOSTAT_FABRIC_RJT: 4745 if (ulp_word4 & RJT_UNAVAIL_TEMP) { 4746 retry = 1; 4747 break; 4748 } 4749 break; 4750 4751 case IOSTAT_NPORT_BSY: 4752 case IOSTAT_FABRIC_BSY: 4753 logerr = 1; /* Fabric / Remote NPort out of resources */ 4754 retry = 1; 4755 break; 4756 4757 case IOSTAT_LS_RJT: 4758 stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4); 4759 /* Added for Vendor specifc support 4760 * Just keep retrying for these Rsn / Exp codes 4761 */ 4762 if (test_bit(FC_PT2PT, &vport->fc_flag) && 4763 cmd == ELS_CMD_NVMEPRLI) { 4764 switch (stat.un.b.lsRjtRsnCode) { 4765 case LSRJT_UNABLE_TPC: 4766 case LSRJT_INVALID_CMD: 4767 case LSRJT_LOGICAL_ERR: 4768 case LSRJT_CMD_UNSUPPORTED: 4769 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 4770 "0168 NVME PRLI LS_RJT " 4771 "reason %x port doesn't " 4772 "support NVME, disabling NVME\n", 4773 stat.un.b.lsRjtRsnCode); 4774 retry = 0; 4775 set_bit(FC_PT2PT_NO_NVME, &vport->fc_flag); 4776 goto out_retry; 4777 } 4778 } 4779 switch (stat.un.b.lsRjtRsnCode) { 4780 case LSRJT_UNABLE_TPC: 4781 /* Special case for PRLI LS_RJTs. Recall that lpfc 4782 * uses a single routine to issue both PRLI FC4 types. 4783 * If the PRLI is rejected because that FC4 type 4784 * isn't really supported, don't retry and cause 4785 * multiple transport registrations. Otherwise, parse 4786 * the reason code/reason code explanation and take the 4787 * appropriate action. 4788 */ 4789 lpfc_printf_vlog(vport, KERN_INFO, 4790 LOG_DISCOVERY | LOG_ELS | LOG_NODE, 4791 "0153 ELS cmd x%x LS_RJT by x%x. " 4792 "RsnCode x%x RsnCodeExp x%x\n", 4793 cmd, did, stat.un.b.lsRjtRsnCode, 4794 stat.un.b.lsRjtRsnCodeExp); 4795 4796 switch (stat.un.b.lsRjtRsnCodeExp) { 4797 case LSEXP_CANT_GIVE_DATA: 4798 case LSEXP_CMD_IN_PROGRESS: 4799 if (cmd == ELS_CMD_PLOGI) { 4800 delay = 1000; 4801 maxretry = 48; 4802 } 4803 retry = 1; 4804 break; 4805 case LSEXP_REQ_UNSUPPORTED: 4806 case LSEXP_NO_RSRC_ASSIGN: 4807 /* These explanation codes get no retry. */ 4808 if (cmd == ELS_CMD_PRLI || 4809 cmd == ELS_CMD_NVMEPRLI) 4810 break; 4811 fallthrough; 4812 default: 4813 /* Limit the delay and retry action to a limited 4814 * cmd set. There are other ELS commands where 4815 * a retry is not expected. 4816 */ 4817 if (cmd == ELS_CMD_PLOGI || 4818 cmd == ELS_CMD_PRLI || 4819 cmd == ELS_CMD_NVMEPRLI) { 4820 delay = 1000; 4821 maxretry = lpfc_max_els_tries + 1; 4822 retry = 1; 4823 } 4824 break; 4825 } 4826 4827 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4828 (cmd == ELS_CMD_FDISC) && 4829 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ 4830 lpfc_printf_vlog(vport, KERN_ERR, 4831 LOG_TRACE_EVENT, 4832 "0125 FDISC Failed (x%x). " 4833 "Fabric out of resources\n", 4834 stat.un.lsRjtError); 4835 lpfc_vport_set_state(vport, 4836 FC_VPORT_NO_FABRIC_RSCS); 4837 } 4838 break; 4839 4840 case LSRJT_LOGICAL_BSY: 4841 if ((cmd == ELS_CMD_PLOGI) || 4842 (cmd == ELS_CMD_PRLI) || 4843 (cmd == ELS_CMD_NVMEPRLI)) { 4844 delay = 1000; 4845 maxretry = 48; 4846 } else if (cmd == ELS_CMD_FDISC) { 4847 /* FDISC retry policy */ 4848 maxretry = 48; 4849 if (cmdiocb->retry >= 32) 4850 delay = 1000; 4851 } 4852 retry = 1; 4853 break; 4854 4855 case LSRJT_LOGICAL_ERR: 4856 /* There are some cases where switches return this 4857 * error when they are not ready and should be returning 4858 * Logical Busy. We should delay every time. 4859 */ 4860 if (cmd == ELS_CMD_FDISC && 4861 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { 4862 maxretry = 3; 4863 delay = 1000; 4864 retry = 1; 4865 } else if (cmd == ELS_CMD_FLOGI && 4866 stat.un.b.lsRjtRsnCodeExp == 4867 LSEXP_NOTHING_MORE) { 4868 vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; 4869 retry = 1; 4870 lpfc_printf_vlog(vport, KERN_ERR, 4871 LOG_TRACE_EVENT, 4872 "0820 FLOGI Failed (x%x). " 4873 "BBCredit Not Supported\n", 4874 stat.un.lsRjtError); 4875 } 4876 break; 4877 4878 case LSRJT_PROTOCOL_ERR: 4879 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4880 (cmd == ELS_CMD_FDISC) && 4881 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || 4882 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) 4883 ) { 4884 lpfc_printf_vlog(vport, KERN_ERR, 4885 LOG_TRACE_EVENT, 4886 "0122 FDISC Failed (x%x). " 4887 "Fabric Detected Bad WWN\n", 4888 stat.un.lsRjtError); 4889 lpfc_vport_set_state(vport, 4890 FC_VPORT_FABRIC_REJ_WWN); 4891 } 4892 break; 4893 case LSRJT_VENDOR_UNIQUE: 4894 if ((stat.un.b.vendorUnique == 0x45) && 4895 (cmd == ELS_CMD_FLOGI)) { 4896 goto out_retry; 4897 } 4898 break; 4899 case LSRJT_CMD_UNSUPPORTED: 4900 /* lpfc nvmet returns this type of LS_RJT when it 4901 * receives an FCP PRLI because lpfc nvmet only 4902 * support NVME. ELS request is terminated for FCP4 4903 * on this rport. 4904 */ 4905 if (stat.un.b.lsRjtRsnCodeExp == 4906 LSEXP_REQ_UNSUPPORTED) { 4907 if (cmd == ELS_CMD_PRLI) 4908 goto out_retry; 4909 } 4910 break; 4911 } 4912 break; 4913 4914 case IOSTAT_INTERMED_RSP: 4915 case IOSTAT_BA_RJT: 4916 break; 4917 4918 default: 4919 break; 4920 } 4921 4922 if (link_reset) { 4923 rc = lpfc_link_reset(vport); 4924 if (rc) { 4925 /* Do not give up. Retry PLOGI one more time and attempt 4926 * link reset if PLOGI fails again. 4927 */ 4928 retry = 1; 4929 delay = 100; 4930 goto out_retry; 4931 } 4932 return 1; 4933 } 4934 4935 if (did == FDMI_DID) 4936 retry = 1; 4937 4938 if ((cmd == ELS_CMD_FLOGI) && 4939 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) && 4940 !lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { 4941 /* FLOGI retry policy */ 4942 retry = 1; 4943 /* retry FLOGI forever */ 4944 if (phba->link_flag != LS_LOOPBACK_MODE) 4945 maxretry = 0; 4946 else 4947 maxretry = 2; 4948 4949 if (cmdiocb->retry >= 100) 4950 delay = 5000; 4951 else if (cmdiocb->retry >= 32) 4952 delay = 1000; 4953 } else if ((cmd == ELS_CMD_FDISC) && 4954 !lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { 4955 /* retry FDISCs every second up to devloss */ 4956 retry = 1; 4957 maxretry = vport->cfg_devloss_tmo; 4958 delay = 1000; 4959 } 4960 4961 cmdiocb->retry++; 4962 if (maxretry && (cmdiocb->retry >= maxretry)) { 4963 phba->fc_stat.elsRetryExceeded++; 4964 retry = 0; 4965 } 4966 4967 if (test_bit(FC_UNLOADING, &vport->load_flag)) 4968 retry = 0; 4969 4970 out_retry: 4971 if (retry) { 4972 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) { 4973 /* Stop retrying PLOGI and FDISC if in FCF discovery */ 4974 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4975 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4976 "2849 Stop retry ELS command " 4977 "x%x to remote NPORT x%x, " 4978 "Data: x%x x%x\n", cmd, did, 4979 cmdiocb->retry, delay); 4980 return 0; 4981 } 4982 } 4983 4984 /* Retry ELS command <elsCmd> to remote NPORT <did> */ 4985 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4986 "0107 Retry ELS command x%x to remote " 4987 "NPORT x%x Data: x%x x%x\n", 4988 cmd, did, cmdiocb->retry, delay); 4989 4990 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && 4991 ((ulp_status != IOSTAT_LOCAL_REJECT) || 4992 ((ulp_word4 & IOERR_PARAM_MASK) != 4993 IOERR_NO_RESOURCES))) { 4994 /* Don't reset timer for no resources */ 4995 4996 /* If discovery / RSCN timer is running, reset it */ 4997 if (timer_pending(&vport->fc_disctmo) || 4998 test_bit(FC_RSCN_MODE, &vport->fc_flag)) 4999 lpfc_set_disctmo(vport); 5000 } 5001 5002 phba->fc_stat.elsXmitRetry++; 5003 if (ndlp && delay) { 5004 phba->fc_stat.elsDelayRetry++; 5005 ndlp->nlp_retry = cmdiocb->retry; 5006 5007 /* delay is specified in milliseconds */ 5008 mod_timer(&ndlp->nlp_delayfunc, 5009 jiffies + msecs_to_jiffies(delay)); 5010 spin_lock_irq(&ndlp->lock); 5011 ndlp->nlp_flag |= NLP_DELAY_TMO; 5012 spin_unlock_irq(&ndlp->lock); 5013 5014 ndlp->nlp_prev_state = ndlp->nlp_state; 5015 if ((cmd == ELS_CMD_PRLI) || 5016 (cmd == ELS_CMD_NVMEPRLI)) 5017 lpfc_nlp_set_state(vport, ndlp, 5018 NLP_STE_PRLI_ISSUE); 5019 else if (cmd != ELS_CMD_ADISC) 5020 lpfc_nlp_set_state(vport, ndlp, 5021 NLP_STE_NPR_NODE); 5022 ndlp->nlp_last_elscmd = cmd; 5023 5024 return 1; 5025 } 5026 switch (cmd) { 5027 case ELS_CMD_FLOGI: 5028 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry); 5029 return 1; 5030 case ELS_CMD_FDISC: 5031 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); 5032 return 1; 5033 case ELS_CMD_PLOGI: 5034 if (ndlp) { 5035 ndlp->nlp_prev_state = ndlp->nlp_state; 5036 lpfc_nlp_set_state(vport, ndlp, 5037 NLP_STE_PLOGI_ISSUE); 5038 } 5039 lpfc_issue_els_plogi(vport, did, cmdiocb->retry); 5040 return 1; 5041 case ELS_CMD_ADISC: 5042 ndlp->nlp_prev_state = ndlp->nlp_state; 5043 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 5044 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry); 5045 return 1; 5046 case ELS_CMD_PRLI: 5047 case ELS_CMD_NVMEPRLI: 5048 ndlp->nlp_prev_state = ndlp->nlp_state; 5049 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 5050 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry); 5051 return 1; 5052 case ELS_CMD_LOGO: 5053 ndlp->nlp_prev_state = ndlp->nlp_state; 5054 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 5055 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); 5056 return 1; 5057 } 5058 } 5059 /* No retry ELS command <elsCmd> to remote NPORT <did> */ 5060 if (logerr) { 5061 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5062 "0137 No retry ELS command x%x to remote " 5063 "NPORT x%x: Out of Resources: Error:x%x/%x " 5064 "IoTag x%x\n", 5065 cmd, did, ulp_status, ulp_word4, 5066 cmdiocb->iotag); 5067 } 5068 else { 5069 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5070 "0108 No retry ELS command x%x to remote " 5071 "NPORT x%x Retried:%d Error:x%x/%x " 5072 "IoTag x%x nflags x%x\n", 5073 cmd, did, cmdiocb->retry, ulp_status, 5074 ulp_word4, cmdiocb->iotag, 5075 (ndlp ? ndlp->nlp_flag : 0)); 5076 } 5077 return 0; 5078 } 5079 5080 /** 5081 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb 5082 * @phba: pointer to lpfc hba data structure. 5083 * @buf_ptr1: pointer to the lpfc DMA buffer data structure. 5084 * 5085 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s) 5086 * associated with a command IOCB back to the lpfc DMA buffer pool. It first 5087 * checks to see whether there is a lpfc DMA buffer associated with the 5088 * response of the command IOCB. If so, it will be released before releasing 5089 * the lpfc DMA buffer associated with the IOCB itself. 5090 * 5091 * Return code 5092 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 5093 **/ 5094 static int 5095 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) 5096 { 5097 struct lpfc_dmabuf *buf_ptr; 5098 5099 /* Free the response before processing the command. */ 5100 if (!list_empty(&buf_ptr1->list)) { 5101 list_remove_head(&buf_ptr1->list, buf_ptr, 5102 struct lpfc_dmabuf, 5103 list); 5104 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 5105 kfree(buf_ptr); 5106 } 5107 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); 5108 kfree(buf_ptr1); 5109 return 0; 5110 } 5111 5112 /** 5113 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl 5114 * @phba: pointer to lpfc hba data structure. 5115 * @buf_ptr: pointer to the lpfc dma buffer data structure. 5116 * 5117 * This routine releases the lpfc Direct Memory Access (DMA) buffer 5118 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer 5119 * pool. 5120 * 5121 * Return code 5122 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 5123 **/ 5124 static int 5125 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) 5126 { 5127 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 5128 kfree(buf_ptr); 5129 return 0; 5130 } 5131 5132 /** 5133 * lpfc_els_free_iocb - Free a command iocb and its associated resources 5134 * @phba: pointer to lpfc hba data structure. 5135 * @elsiocb: pointer to lpfc els command iocb data structure. 5136 * 5137 * This routine frees a command IOCB and its associated resources. The 5138 * command IOCB data structure contains the reference to various associated 5139 * resources, these fields must be set to NULL if the associated reference 5140 * not present: 5141 * cmd_dmabuf - reference to cmd. 5142 * cmd_dmabuf->next - reference to rsp 5143 * rsp_dmabuf - unused 5144 * bpl_dmabuf - reference to bpl 5145 * 5146 * It first properly decrements the reference count held on ndlp for the 5147 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not 5148 * set, it invokes the lpfc_els_free_data() routine to release the Direct 5149 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it 5150 * adds the DMA buffer the @phba data structure for the delayed release. 5151 * If reference to the Buffer Pointer List (BPL) is present, the 5152 * lpfc_els_free_bpl() routine is invoked to release the DMA memory 5153 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is 5154 * invoked to release the IOCB data structure back to @phba IOCBQ list. 5155 * 5156 * Return code 5157 * 0 - Success (currently, always return 0) 5158 **/ 5159 int 5160 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) 5161 { 5162 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 5163 5164 /* The I/O iocb is complete. Clear the node and first dmbuf */ 5165 elsiocb->ndlp = NULL; 5166 5167 /* cmd_dmabuf = cmd, cmd_dmabuf->next = rsp, bpl_dmabuf = bpl */ 5168 if (elsiocb->cmd_dmabuf) { 5169 if (elsiocb->cmd_flag & LPFC_DELAY_MEM_FREE) { 5170 /* Firmware could still be in progress of DMAing 5171 * payload, so don't free data buffer till after 5172 * a hbeat. 5173 */ 5174 elsiocb->cmd_flag &= ~LPFC_DELAY_MEM_FREE; 5175 buf_ptr = elsiocb->cmd_dmabuf; 5176 elsiocb->cmd_dmabuf = NULL; 5177 if (buf_ptr) { 5178 buf_ptr1 = NULL; 5179 spin_lock_irq(&phba->hbalock); 5180 if (!list_empty(&buf_ptr->list)) { 5181 list_remove_head(&buf_ptr->list, 5182 buf_ptr1, struct lpfc_dmabuf, 5183 list); 5184 INIT_LIST_HEAD(&buf_ptr1->list); 5185 list_add_tail(&buf_ptr1->list, 5186 &phba->elsbuf); 5187 phba->elsbuf_cnt++; 5188 } 5189 INIT_LIST_HEAD(&buf_ptr->list); 5190 list_add_tail(&buf_ptr->list, &phba->elsbuf); 5191 phba->elsbuf_cnt++; 5192 spin_unlock_irq(&phba->hbalock); 5193 } 5194 } else { 5195 buf_ptr1 = elsiocb->cmd_dmabuf; 5196 lpfc_els_free_data(phba, buf_ptr1); 5197 elsiocb->cmd_dmabuf = NULL; 5198 } 5199 } 5200 5201 if (elsiocb->bpl_dmabuf) { 5202 buf_ptr = elsiocb->bpl_dmabuf; 5203 lpfc_els_free_bpl(phba, buf_ptr); 5204 elsiocb->bpl_dmabuf = NULL; 5205 } 5206 lpfc_sli_release_iocbq(phba, elsiocb); 5207 return 0; 5208 } 5209 5210 /** 5211 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response 5212 * @phba: pointer to lpfc hba data structure. 5213 * @cmdiocb: pointer to lpfc command iocb data structure. 5214 * @rspiocb: pointer to lpfc response iocb data structure. 5215 * 5216 * This routine is the completion callback function to the Logout (LOGO) 5217 * Accept (ACC) Response ELS command. This routine is invoked to indicate 5218 * the completion of the LOGO process. If the node has transitioned to NPR, 5219 * this routine unregisters the RPI if it is still registered. The 5220 * lpfc_els_free_iocb() is invoked to release the IOCB data structure. 5221 **/ 5222 static void 5223 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 5224 struct lpfc_iocbq *rspiocb) 5225 { 5226 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 5227 struct lpfc_vport *vport = cmdiocb->vport; 5228 u32 ulp_status, ulp_word4; 5229 5230 ulp_status = get_job_ulpstatus(phba, rspiocb); 5231 ulp_word4 = get_job_word4(phba, rspiocb); 5232 5233 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5234 "ACC LOGO cmpl: status:x%x/x%x did:x%x", 5235 ulp_status, ulp_word4, ndlp->nlp_DID); 5236 /* ACC to LOGO completes to NPort <nlp_DID> */ 5237 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5238 "0109 ACC to LOGO completes to NPort x%x refcnt %d " 5239 "Data: x%x x%x x%x\n", 5240 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 5241 ndlp->nlp_state, ndlp->nlp_rpi); 5242 5243 /* This clause allows the LOGO ACC to complete and free resources 5244 * for the Fabric Domain Controller. It does deliberately skip 5245 * the unreg_rpi and release rpi because some fabrics send RDP 5246 * requests after logging out from the initiator. 5247 */ 5248 if (ndlp->nlp_type & NLP_FABRIC && 5249 ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK)) 5250 goto out; 5251 5252 if (ndlp->nlp_state == NLP_STE_NPR_NODE) { 5253 /* If PLOGI is being retried, PLOGI completion will cleanup the 5254 * node. The NLP_NPR_2B_DISC flag needs to be retained to make 5255 * progress on nodes discovered from last RSCN. 5256 */ 5257 if ((ndlp->nlp_flag & NLP_DELAY_TMO) && 5258 (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI)) 5259 goto out; 5260 5261 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) 5262 lpfc_unreg_rpi(vport, ndlp); 5263 5264 } 5265 out: 5266 /* 5267 * The driver received a LOGO from the rport and has ACK'd it. 5268 * At this point, the driver is done so release the IOCB 5269 */ 5270 lpfc_els_free_iocb(phba, cmdiocb); 5271 lpfc_nlp_put(ndlp); 5272 } 5273 5274 /** 5275 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd 5276 * @phba: pointer to lpfc hba data structure. 5277 * @pmb: pointer to the driver internal queue element for mailbox command. 5278 * 5279 * This routine is the completion callback function for unregister default 5280 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases 5281 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and 5282 * decrements the ndlp reference count held for this completion callback 5283 * function. After that, it invokes the lpfc_drop_node to check 5284 * whether it is appropriate to release the node. 5285 **/ 5286 void 5287 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 5288 { 5289 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; 5290 u32 mbx_flag = pmb->mbox_flag; 5291 u32 mbx_cmd = pmb->u.mb.mbxCommand; 5292 5293 if (ndlp) { 5294 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 5295 "0006 rpi x%x DID:%x flg:%x %d x%px " 5296 "mbx_cmd x%x mbx_flag x%x x%px\n", 5297 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 5298 kref_read(&ndlp->kref), ndlp, mbx_cmd, 5299 mbx_flag, pmb); 5300 5301 /* This ends the default/temporary RPI cleanup logic for this 5302 * ndlp and the node and rpi needs to be released. Free the rpi 5303 * first on an UNREG_LOGIN and then release the final 5304 * references. 5305 */ 5306 spin_lock_irq(&ndlp->lock); 5307 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 5308 if (mbx_cmd == MBX_UNREG_LOGIN) 5309 ndlp->nlp_flag &= ~NLP_UNREG_INP; 5310 spin_unlock_irq(&ndlp->lock); 5311 lpfc_nlp_put(ndlp); 5312 lpfc_drop_node(ndlp->vport, ndlp); 5313 } 5314 5315 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 5316 } 5317 5318 /** 5319 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd 5320 * @phba: pointer to lpfc hba data structure. 5321 * @cmdiocb: pointer to lpfc command iocb data structure. 5322 * @rspiocb: pointer to lpfc response iocb data structure. 5323 * 5324 * This routine is the completion callback function for ELS Response IOCB 5325 * command. In normal case, this callback function just properly sets the 5326 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference 5327 * field in the command IOCB is not NULL, the referred mailbox command will 5328 * be send out, and then invokes the lpfc_els_free_iocb() routine to release 5329 * the IOCB. 5330 **/ 5331 static void 5332 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 5333 struct lpfc_iocbq *rspiocb) 5334 { 5335 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 5336 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; 5337 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; 5338 IOCB_t *irsp; 5339 LPFC_MBOXQ_t *mbox = NULL; 5340 u32 ulp_status, ulp_word4, tmo, did, iotag; 5341 5342 if (!vport) { 5343 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5344 "3177 ELS response failed\n"); 5345 goto out; 5346 } 5347 if (cmdiocb->context_un.mbox) 5348 mbox = cmdiocb->context_un.mbox; 5349 5350 ulp_status = get_job_ulpstatus(phba, rspiocb); 5351 ulp_word4 = get_job_word4(phba, rspiocb); 5352 did = get_job_els_rsp64_did(phba, cmdiocb); 5353 5354 if (phba->sli_rev == LPFC_SLI_REV4) { 5355 tmo = get_wqe_tmo(cmdiocb); 5356 iotag = get_wqe_reqtag(cmdiocb); 5357 } else { 5358 irsp = &rspiocb->iocb; 5359 tmo = irsp->ulpTimeout; 5360 iotag = irsp->ulpIoTag; 5361 } 5362 5363 /* Check to see if link went down during discovery */ 5364 if (!ndlp || lpfc_els_chk_latt(vport)) { 5365 if (mbox) 5366 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 5367 goto out; 5368 } 5369 5370 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5371 "ELS rsp cmpl: status:x%x/x%x did:x%x", 5372 ulp_status, ulp_word4, did); 5373 /* ELS response tag <ulpIoTag> completes */ 5374 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5375 "0110 ELS response tag x%x completes " 5376 "Data: x%x x%x x%x x%x x%x x%x x%x x%x %p %p\n", 5377 iotag, ulp_status, ulp_word4, tmo, 5378 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5379 ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox, ndlp); 5380 if (mbox) { 5381 if (ulp_status == 0 5382 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 5383 if (!lpfc_unreg_rpi(vport, ndlp) && 5384 !test_bit(FC_PT2PT, &vport->fc_flag)) { 5385 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 5386 ndlp->nlp_state == 5387 NLP_STE_REG_LOGIN_ISSUE) { 5388 lpfc_printf_vlog(vport, KERN_INFO, 5389 LOG_DISCOVERY, 5390 "0314 PLOGI recov " 5391 "DID x%x " 5392 "Data: x%x x%x x%x\n", 5393 ndlp->nlp_DID, 5394 ndlp->nlp_state, 5395 ndlp->nlp_rpi, 5396 ndlp->nlp_flag); 5397 goto out_free_mbox; 5398 } 5399 } 5400 5401 /* Increment reference count to ndlp to hold the 5402 * reference to ndlp for the callback function. 5403 */ 5404 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 5405 if (!mbox->ctx_ndlp) 5406 goto out_free_mbox; 5407 5408 mbox->vport = vport; 5409 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { 5410 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 5411 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 5412 } 5413 else { 5414 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 5415 ndlp->nlp_prev_state = ndlp->nlp_state; 5416 lpfc_nlp_set_state(vport, ndlp, 5417 NLP_STE_REG_LOGIN_ISSUE); 5418 } 5419 5420 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 5421 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 5422 != MBX_NOT_FINISHED) 5423 goto out; 5424 5425 /* Decrement the ndlp reference count we 5426 * set for this failed mailbox command. 5427 */ 5428 lpfc_nlp_put(ndlp); 5429 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 5430 5431 /* ELS rsp: Cannot issue reg_login for <NPortid> */ 5432 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5433 "0138 ELS rsp: Cannot issue reg_login for x%x " 5434 "Data: x%x x%x x%x\n", 5435 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5436 ndlp->nlp_rpi); 5437 } 5438 out_free_mbox: 5439 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 5440 } 5441 out: 5442 if (ndlp && shost) { 5443 spin_lock_irq(&ndlp->lock); 5444 if (mbox) 5445 ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; 5446 ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI; 5447 spin_unlock_irq(&ndlp->lock); 5448 } 5449 5450 /* An SLI4 NPIV instance wants to drop the node at this point under 5451 * these conditions and release the RPI. 5452 */ 5453 if (phba->sli_rev == LPFC_SLI_REV4 && 5454 vport && vport->port_type == LPFC_NPIV_PORT && 5455 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { 5456 if (ndlp->nlp_flag & NLP_RELEASE_RPI) { 5457 if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && 5458 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { 5459 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 5460 spin_lock_irq(&ndlp->lock); 5461 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 5462 ndlp->nlp_flag &= ~NLP_RELEASE_RPI; 5463 spin_unlock_irq(&ndlp->lock); 5464 } 5465 lpfc_drop_node(vport, ndlp); 5466 } else if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && 5467 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE && 5468 ndlp->nlp_state != NLP_STE_PRLI_ISSUE) { 5469 /* Drop ndlp if there is no planned or outstanding 5470 * issued PRLI. 5471 * 5472 * In cases when the ndlp is acting as both an initiator 5473 * and target function, let our issued PRLI determine 5474 * the final ndlp kref drop. 5475 */ 5476 lpfc_drop_node(vport, ndlp); 5477 } 5478 } 5479 5480 /* Release the originating I/O reference. */ 5481 lpfc_els_free_iocb(phba, cmdiocb); 5482 lpfc_nlp_put(ndlp); 5483 return; 5484 } 5485 5486 /** 5487 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command 5488 * @vport: pointer to a host virtual N_Port data structure. 5489 * @flag: the els command code to be accepted. 5490 * @oldiocb: pointer to the original lpfc command iocb data structure. 5491 * @ndlp: pointer to a node-list data structure. 5492 * @mbox: pointer to the driver internal queue element for mailbox command. 5493 * 5494 * This routine prepares and issues an Accept (ACC) response IOCB 5495 * command. It uses the @flag to properly set up the IOCB field for the 5496 * specific ACC response command to be issued and invokes the 5497 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a 5498 * @mbox pointer is passed in, it will be put into the context_un.mbox 5499 * field of the IOCB for the completion callback function to issue the 5500 * mailbox command to the HBA later when callback is invoked. 5501 * 5502 * Note that the ndlp reference count will be incremented by 1 for holding the 5503 * ndlp and the reference to ndlp will be stored into the ndlp field of 5504 * the IOCB for the completion callback function to the corresponding 5505 * response ELS IOCB command. 5506 * 5507 * Return code 5508 * 0 - Successfully issued acc response 5509 * 1 - Failed to issue acc response 5510 **/ 5511 int 5512 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, 5513 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 5514 LPFC_MBOXQ_t *mbox) 5515 { 5516 struct lpfc_hba *phba = vport->phba; 5517 IOCB_t *icmd; 5518 IOCB_t *oldcmd; 5519 union lpfc_wqe128 *wqe; 5520 union lpfc_wqe128 *oldwqe = &oldiocb->wqe; 5521 struct lpfc_iocbq *elsiocb; 5522 uint8_t *pcmd; 5523 struct serv_parm *sp; 5524 uint16_t cmdsize; 5525 int rc; 5526 ELS_PKT *els_pkt_ptr; 5527 struct fc_els_rdf_resp *rdf_resp; 5528 5529 switch (flag) { 5530 case ELS_CMD_ACC: 5531 cmdsize = sizeof(uint32_t); 5532 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5533 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5534 if (!elsiocb) { 5535 spin_lock_irq(&ndlp->lock); 5536 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 5537 spin_unlock_irq(&ndlp->lock); 5538 return 1; 5539 } 5540 5541 if (phba->sli_rev == LPFC_SLI_REV4) { 5542 wqe = &elsiocb->wqe; 5543 /* XRI / rx_id */ 5544 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5545 bf_get(wqe_ctxt_tag, 5546 &oldwqe->xmit_els_rsp.wqe_com)); 5547 5548 /* oxid */ 5549 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5550 bf_get(wqe_rcvoxid, 5551 &oldwqe->xmit_els_rsp.wqe_com)); 5552 } else { 5553 icmd = &elsiocb->iocb; 5554 oldcmd = &oldiocb->iocb; 5555 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5556 icmd->unsli3.rcvsli3.ox_id = 5557 oldcmd->unsli3.rcvsli3.ox_id; 5558 } 5559 5560 pcmd = elsiocb->cmd_dmabuf->virt; 5561 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5562 pcmd += sizeof(uint32_t); 5563 5564 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5565 "Issue ACC: did:x%x flg:x%x", 5566 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5567 break; 5568 case ELS_CMD_FLOGI: 5569 case ELS_CMD_PLOGI: 5570 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t)); 5571 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5572 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5573 if (!elsiocb) 5574 return 1; 5575 5576 if (phba->sli_rev == LPFC_SLI_REV4) { 5577 wqe = &elsiocb->wqe; 5578 /* XRI / rx_id */ 5579 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5580 bf_get(wqe_ctxt_tag, 5581 &oldwqe->xmit_els_rsp.wqe_com)); 5582 5583 /* oxid */ 5584 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5585 bf_get(wqe_rcvoxid, 5586 &oldwqe->xmit_els_rsp.wqe_com)); 5587 } else { 5588 icmd = &elsiocb->iocb; 5589 oldcmd = &oldiocb->iocb; 5590 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5591 icmd->unsli3.rcvsli3.ox_id = 5592 oldcmd->unsli3.rcvsli3.ox_id; 5593 } 5594 5595 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 5596 5597 if (mbox) 5598 elsiocb->context_un.mbox = mbox; 5599 5600 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5601 pcmd += sizeof(uint32_t); 5602 sp = (struct serv_parm *)pcmd; 5603 5604 if (flag == ELS_CMD_FLOGI) { 5605 /* Copy the received service parameters back */ 5606 memcpy(sp, &phba->fc_fabparam, 5607 sizeof(struct serv_parm)); 5608 5609 /* Clear the F_Port bit */ 5610 sp->cmn.fPort = 0; 5611 5612 /* Mark all class service parameters as invalid */ 5613 sp->cls1.classValid = 0; 5614 sp->cls2.classValid = 0; 5615 sp->cls3.classValid = 0; 5616 sp->cls4.classValid = 0; 5617 5618 /* Copy our worldwide names */ 5619 memcpy(&sp->portName, &vport->fc_sparam.portName, 5620 sizeof(struct lpfc_name)); 5621 memcpy(&sp->nodeName, &vport->fc_sparam.nodeName, 5622 sizeof(struct lpfc_name)); 5623 } else { 5624 memcpy(pcmd, &vport->fc_sparam, 5625 sizeof(struct serv_parm)); 5626 5627 sp->cmn.valid_vendor_ver_level = 0; 5628 memset(sp->un.vendorVersion, 0, 5629 sizeof(sp->un.vendorVersion)); 5630 sp->cmn.bbRcvSizeMsb &= 0xF; 5631 5632 /* If our firmware supports this feature, convey that 5633 * info to the target using the vendor specific field. 5634 */ 5635 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 5636 sp->cmn.valid_vendor_ver_level = 1; 5637 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 5638 sp->un.vv.flags = 5639 cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 5640 } 5641 } 5642 5643 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5644 "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x", 5645 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5646 break; 5647 case ELS_CMD_PRLO: 5648 cmdsize = sizeof(uint32_t) + sizeof(PRLO); 5649 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5650 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); 5651 if (!elsiocb) 5652 return 1; 5653 5654 if (phba->sli_rev == LPFC_SLI_REV4) { 5655 wqe = &elsiocb->wqe; 5656 /* XRI / rx_id */ 5657 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5658 bf_get(wqe_ctxt_tag, 5659 &oldwqe->xmit_els_rsp.wqe_com)); 5660 5661 /* oxid */ 5662 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5663 bf_get(wqe_rcvoxid, 5664 &oldwqe->xmit_els_rsp.wqe_com)); 5665 } else { 5666 icmd = &elsiocb->iocb; 5667 oldcmd = &oldiocb->iocb; 5668 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5669 icmd->unsli3.rcvsli3.ox_id = 5670 oldcmd->unsli3.rcvsli3.ox_id; 5671 } 5672 5673 pcmd = (u8 *) elsiocb->cmd_dmabuf->virt; 5674 5675 memcpy(pcmd, oldiocb->cmd_dmabuf->virt, 5676 sizeof(uint32_t) + sizeof(PRLO)); 5677 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; 5678 els_pkt_ptr = (ELS_PKT *) pcmd; 5679 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; 5680 5681 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5682 "Issue ACC PRLO: did:x%x flg:x%x", 5683 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5684 break; 5685 case ELS_CMD_RDF: 5686 cmdsize = sizeof(*rdf_resp); 5687 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5688 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5689 if (!elsiocb) 5690 return 1; 5691 5692 if (phba->sli_rev == LPFC_SLI_REV4) { 5693 wqe = &elsiocb->wqe; 5694 /* XRI / rx_id */ 5695 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 5696 bf_get(wqe_ctxt_tag, 5697 &oldwqe->xmit_els_rsp.wqe_com)); 5698 5699 /* oxid */ 5700 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5701 bf_get(wqe_rcvoxid, 5702 &oldwqe->xmit_els_rsp.wqe_com)); 5703 } else { 5704 icmd = &elsiocb->iocb; 5705 oldcmd = &oldiocb->iocb; 5706 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5707 icmd->unsli3.rcvsli3.ox_id = 5708 oldcmd->unsli3.rcvsli3.ox_id; 5709 } 5710 5711 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 5712 rdf_resp = (struct fc_els_rdf_resp *)pcmd; 5713 memset(rdf_resp, 0, sizeof(*rdf_resp)); 5714 rdf_resp->acc_hdr.la_cmd = ELS_LS_ACC; 5715 5716 /* FC-LS-5 specifies desc_list_len shall be set to 12 */ 5717 rdf_resp->desc_list_len = cpu_to_be32(12); 5718 5719 /* FC-LS-5 specifies LS REQ Information descriptor */ 5720 rdf_resp->lsri.desc_tag = cpu_to_be32(1); 5721 rdf_resp->lsri.desc_len = cpu_to_be32(sizeof(u32)); 5722 rdf_resp->lsri.rqst_w0.cmd = ELS_RDF; 5723 break; 5724 default: 5725 return 1; 5726 } 5727 if (ndlp->nlp_flag & NLP_LOGO_ACC) { 5728 spin_lock_irq(&ndlp->lock); 5729 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED || 5730 ndlp->nlp_flag & NLP_REG_LOGIN_SEND)) 5731 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 5732 spin_unlock_irq(&ndlp->lock); 5733 elsiocb->cmd_cmpl = lpfc_cmpl_els_logo_acc; 5734 } else { 5735 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5736 } 5737 5738 phba->fc_stat.elsXmitACC++; 5739 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5740 if (!elsiocb->ndlp) { 5741 lpfc_els_free_iocb(phba, elsiocb); 5742 return 1; 5743 } 5744 5745 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5746 if (rc == IOCB_ERROR) { 5747 lpfc_els_free_iocb(phba, elsiocb); 5748 lpfc_nlp_put(ndlp); 5749 return 1; 5750 } 5751 5752 /* Xmit ELS ACC response tag <ulpIoTag> */ 5753 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5754 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " 5755 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5756 "RPI: x%x, fc_flag x%lx refcnt %d\n", 5757 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5758 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5759 ndlp->nlp_rpi, vport->fc_flag, kref_read(&ndlp->kref)); 5760 return 0; 5761 } 5762 5763 /** 5764 * lpfc_els_rsp_reject - Prepare and issue a rjt response iocb command 5765 * @vport: pointer to a virtual N_Port data structure. 5766 * @rejectError: reject response to issue 5767 * @oldiocb: pointer to the original lpfc command iocb data structure. 5768 * @ndlp: pointer to a node-list data structure. 5769 * @mbox: pointer to the driver internal queue element for mailbox command. 5770 * 5771 * This routine prepares and issue an Reject (RJT) response IOCB 5772 * command. If a @mbox pointer is passed in, it will be put into the 5773 * context_un.mbox field of the IOCB for the completion callback function 5774 * to issue to the HBA later. 5775 * 5776 * Note that the ndlp reference count will be incremented by 1 for holding the 5777 * ndlp and the reference to ndlp will be stored into the ndlp field of 5778 * the IOCB for the completion callback function to the reject response 5779 * ELS IOCB command. 5780 * 5781 * Return code 5782 * 0 - Successfully issued reject response 5783 * 1 - Failed to issue reject response 5784 **/ 5785 int 5786 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, 5787 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 5788 LPFC_MBOXQ_t *mbox) 5789 { 5790 int rc; 5791 struct lpfc_hba *phba = vport->phba; 5792 IOCB_t *icmd; 5793 IOCB_t *oldcmd; 5794 union lpfc_wqe128 *wqe; 5795 struct lpfc_iocbq *elsiocb; 5796 uint8_t *pcmd; 5797 uint16_t cmdsize; 5798 5799 cmdsize = 2 * sizeof(uint32_t); 5800 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5801 ndlp->nlp_DID, ELS_CMD_LS_RJT); 5802 if (!elsiocb) 5803 return 1; 5804 5805 if (phba->sli_rev == LPFC_SLI_REV4) { 5806 wqe = &elsiocb->wqe; 5807 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 5808 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 5809 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5810 get_job_rcvoxid(phba, oldiocb)); 5811 } else { 5812 icmd = &elsiocb->iocb; 5813 oldcmd = &oldiocb->iocb; 5814 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5815 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5816 } 5817 5818 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 5819 5820 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 5821 pcmd += sizeof(uint32_t); 5822 *((uint32_t *) (pcmd)) = rejectError; 5823 5824 if (mbox) 5825 elsiocb->context_un.mbox = mbox; 5826 5827 /* Xmit ELS RJT <err> response tag <ulpIoTag> */ 5828 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5829 "0129 Xmit ELS RJT x%x response tag x%x " 5830 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 5831 "rpi x%x\n", 5832 rejectError, elsiocb->iotag, 5833 get_job_ulpcontext(phba, elsiocb), ndlp->nlp_DID, 5834 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 5835 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5836 "Issue LS_RJT: did:x%x flg:x%x err:x%x", 5837 ndlp->nlp_DID, ndlp->nlp_flag, rejectError); 5838 5839 phba->fc_stat.elsXmitLSRJT++; 5840 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5841 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5842 if (!elsiocb->ndlp) { 5843 lpfc_els_free_iocb(phba, elsiocb); 5844 return 1; 5845 } 5846 5847 /* The NPIV instance is rejecting this unsolicited ELS. Make sure the 5848 * node's assigned RPI gets released provided this node is not already 5849 * registered with the transport. 5850 */ 5851 if (phba->sli_rev == LPFC_SLI_REV4 && 5852 vport->port_type == LPFC_NPIV_PORT && 5853 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { 5854 spin_lock_irq(&ndlp->lock); 5855 ndlp->nlp_flag |= NLP_RELEASE_RPI; 5856 spin_unlock_irq(&ndlp->lock); 5857 } 5858 5859 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5860 if (rc == IOCB_ERROR) { 5861 lpfc_els_free_iocb(phba, elsiocb); 5862 lpfc_nlp_put(ndlp); 5863 return 1; 5864 } 5865 5866 return 0; 5867 } 5868 5869 /** 5870 * lpfc_issue_els_edc_rsp - Exchange Diagnostic Capabilities with the fabric. 5871 * @vport: pointer to a host virtual N_Port data structure. 5872 * @cmdiocb: pointer to the original lpfc command iocb data structure. 5873 * @ndlp: NPort to where rsp is directed 5874 * 5875 * This routine issues an EDC ACC RSP to the F-Port Controller to communicate 5876 * this N_Port's support of hardware signals in its Congestion 5877 * Capabilities Descriptor. 5878 * 5879 * Return code 5880 * 0 - Successfully issued edc rsp command 5881 * 1 - Failed to issue edc rsp command 5882 **/ 5883 static int 5884 lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5885 struct lpfc_nodelist *ndlp) 5886 { 5887 struct lpfc_hba *phba = vport->phba; 5888 struct fc_els_edc_resp *edc_rsp; 5889 struct fc_tlv_desc *tlv; 5890 struct lpfc_iocbq *elsiocb; 5891 IOCB_t *icmd, *cmd; 5892 union lpfc_wqe128 *wqe; 5893 u32 cgn_desc_size, lft_desc_size; 5894 u16 cmdsize; 5895 uint8_t *pcmd; 5896 int rc; 5897 5898 cmdsize = sizeof(struct fc_els_edc_resp); 5899 cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc); 5900 lft_desc_size = (lpfc_link_is_lds_capable(phba)) ? 5901 sizeof(struct fc_diag_lnkflt_desc) : 0; 5902 cmdsize += cgn_desc_size + lft_desc_size; 5903 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, cmdiocb->retry, 5904 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5905 if (!elsiocb) 5906 return 1; 5907 5908 if (phba->sli_rev == LPFC_SLI_REV4) { 5909 wqe = &elsiocb->wqe; 5910 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 5911 get_job_ulpcontext(phba, cmdiocb)); /* Xri / rx_id */ 5912 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 5913 get_job_rcvoxid(phba, cmdiocb)); 5914 } else { 5915 icmd = &elsiocb->iocb; 5916 cmd = &cmdiocb->iocb; 5917 icmd->ulpContext = cmd->ulpContext; /* Xri / rx_id */ 5918 icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id; 5919 } 5920 5921 pcmd = elsiocb->cmd_dmabuf->virt; 5922 memset(pcmd, 0, cmdsize); 5923 5924 edc_rsp = (struct fc_els_edc_resp *)pcmd; 5925 edc_rsp->acc_hdr.la_cmd = ELS_LS_ACC; 5926 edc_rsp->desc_list_len = cpu_to_be32(sizeof(struct fc_els_lsri_desc) + 5927 cgn_desc_size + lft_desc_size); 5928 edc_rsp->lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO); 5929 edc_rsp->lsri.desc_len = cpu_to_be32( 5930 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_els_lsri_desc)); 5931 edc_rsp->lsri.rqst_w0.cmd = ELS_EDC; 5932 tlv = edc_rsp->desc; 5933 lpfc_format_edc_cgn_desc(phba, tlv); 5934 tlv = fc_tlv_next_desc(tlv); 5935 if (lft_desc_size) 5936 lpfc_format_edc_lft_desc(phba, tlv); 5937 5938 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5939 "Issue EDC ACC: did:x%x flg:x%x refcnt %d", 5940 ndlp->nlp_DID, ndlp->nlp_flag, 5941 kref_read(&ndlp->kref)); 5942 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 5943 5944 phba->fc_stat.elsXmitACC++; 5945 elsiocb->ndlp = lpfc_nlp_get(ndlp); 5946 if (!elsiocb->ndlp) { 5947 lpfc_els_free_iocb(phba, elsiocb); 5948 return 1; 5949 } 5950 5951 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5952 if (rc == IOCB_ERROR) { 5953 lpfc_els_free_iocb(phba, elsiocb); 5954 lpfc_nlp_put(ndlp); 5955 return 1; 5956 } 5957 5958 /* Xmit ELS ACC response tag <ulpIoTag> */ 5959 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5960 "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, " 5961 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5962 "RPI: x%x, fc_flag x%lx\n", 5963 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5964 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5965 ndlp->nlp_rpi, vport->fc_flag); 5966 5967 return 0; 5968 } 5969 5970 /** 5971 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd 5972 * @vport: pointer to a virtual N_Port data structure. 5973 * @oldiocb: pointer to the original lpfc command iocb data structure. 5974 * @ndlp: pointer to a node-list data structure. 5975 * 5976 * This routine prepares and issues an Accept (ACC) response to Address 5977 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB 5978 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 5979 * 5980 * Note that the ndlp reference count will be incremented by 1 for holding the 5981 * ndlp and the reference to ndlp will be stored into the ndlp field of 5982 * the IOCB for the completion callback function to the ADISC Accept response 5983 * ELS IOCB command. 5984 * 5985 * Return code 5986 * 0 - Successfully issued acc adisc response 5987 * 1 - Failed to issue adisc acc response 5988 **/ 5989 int 5990 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 5991 struct lpfc_nodelist *ndlp) 5992 { 5993 struct lpfc_hba *phba = vport->phba; 5994 ADISC *ap; 5995 IOCB_t *icmd, *oldcmd; 5996 union lpfc_wqe128 *wqe; 5997 struct lpfc_iocbq *elsiocb; 5998 uint8_t *pcmd; 5999 uint16_t cmdsize; 6000 int rc; 6001 u32 ulp_context; 6002 6003 cmdsize = sizeof(uint32_t) + sizeof(ADISC); 6004 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6005 ndlp->nlp_DID, ELS_CMD_ACC); 6006 if (!elsiocb) 6007 return 1; 6008 6009 if (phba->sli_rev == LPFC_SLI_REV4) { 6010 wqe = &elsiocb->wqe; 6011 /* XRI / rx_id */ 6012 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6013 get_job_ulpcontext(phba, oldiocb)); 6014 ulp_context = get_job_ulpcontext(phba, elsiocb); 6015 /* oxid */ 6016 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6017 get_job_rcvoxid(phba, oldiocb)); 6018 } else { 6019 icmd = &elsiocb->iocb; 6020 oldcmd = &oldiocb->iocb; 6021 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6022 ulp_context = elsiocb->iocb.ulpContext; 6023 icmd->unsli3.rcvsli3.ox_id = 6024 oldcmd->unsli3.rcvsli3.ox_id; 6025 } 6026 6027 /* Xmit ADISC ACC response tag <ulpIoTag> */ 6028 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6029 "0130 Xmit ADISC ACC response iotag x%x xri: " 6030 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", 6031 elsiocb->iotag, ulp_context, 6032 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 6033 ndlp->nlp_rpi); 6034 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6035 6036 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6037 pcmd += sizeof(uint32_t); 6038 6039 ap = (ADISC *) (pcmd); 6040 ap->hardAL_PA = phba->fc_pref_ALPA; 6041 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 6042 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 6043 ap->DID = be32_to_cpu(vport->fc_myDID); 6044 6045 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6046 "Issue ACC ADISC: did:x%x flg:x%x refcnt %d", 6047 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6048 6049 phba->fc_stat.elsXmitACC++; 6050 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6051 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6052 if (!elsiocb->ndlp) { 6053 lpfc_els_free_iocb(phba, elsiocb); 6054 return 1; 6055 } 6056 6057 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6058 if (rc == IOCB_ERROR) { 6059 lpfc_els_free_iocb(phba, elsiocb); 6060 lpfc_nlp_put(ndlp); 6061 return 1; 6062 } 6063 6064 return 0; 6065 } 6066 6067 /** 6068 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd 6069 * @vport: pointer to a virtual N_Port data structure. 6070 * @oldiocb: pointer to the original lpfc command iocb data structure. 6071 * @ndlp: pointer to a node-list data structure. 6072 * 6073 * This routine prepares and issues an Accept (ACC) response to Process 6074 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB 6075 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 6076 * 6077 * Note that the ndlp reference count will be incremented by 1 for holding the 6078 * ndlp and the reference to ndlp will be stored into the ndlp field of 6079 * the IOCB for the completion callback function to the PRLI Accept response 6080 * ELS IOCB command. 6081 * 6082 * Return code 6083 * 0 - Successfully issued acc prli response 6084 * 1 - Failed to issue acc prli response 6085 **/ 6086 int 6087 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 6088 struct lpfc_nodelist *ndlp) 6089 { 6090 struct lpfc_hba *phba = vport->phba; 6091 PRLI *npr; 6092 struct lpfc_nvme_prli *npr_nvme; 6093 lpfc_vpd_t *vpd; 6094 IOCB_t *icmd; 6095 IOCB_t *oldcmd; 6096 union lpfc_wqe128 *wqe; 6097 struct lpfc_iocbq *elsiocb; 6098 uint8_t *pcmd; 6099 uint16_t cmdsize; 6100 uint32_t prli_fc4_req, *req_payload; 6101 struct lpfc_dmabuf *req_buf; 6102 int rc; 6103 u32 elsrspcmd, ulp_context; 6104 6105 /* Need the incoming PRLI payload to determine if the ACC is for an 6106 * FC4 or NVME PRLI type. The PRLI type is at word 1. 6107 */ 6108 req_buf = oldiocb->cmd_dmabuf; 6109 req_payload = (((uint32_t *)req_buf->virt) + 1); 6110 6111 /* PRLI type payload is at byte 3 for FCP or NVME. */ 6112 prli_fc4_req = be32_to_cpu(*req_payload); 6113 prli_fc4_req = (prli_fc4_req >> 24) & 0xff; 6114 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6115 "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n", 6116 prli_fc4_req, *((uint32_t *)req_payload)); 6117 6118 if (prli_fc4_req == PRLI_FCP_TYPE) { 6119 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 6120 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); 6121 } else if (prli_fc4_req == PRLI_NVME_TYPE) { 6122 cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli); 6123 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK)); 6124 } else { 6125 return 1; 6126 } 6127 6128 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6129 ndlp->nlp_DID, elsrspcmd); 6130 if (!elsiocb) 6131 return 1; 6132 6133 if (phba->sli_rev == LPFC_SLI_REV4) { 6134 wqe = &elsiocb->wqe; 6135 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6136 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6137 ulp_context = get_job_ulpcontext(phba, elsiocb); 6138 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6139 get_job_rcvoxid(phba, oldiocb)); 6140 } else { 6141 icmd = &elsiocb->iocb; 6142 oldcmd = &oldiocb->iocb; 6143 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6144 ulp_context = elsiocb->iocb.ulpContext; 6145 icmd->unsli3.rcvsli3.ox_id = 6146 oldcmd->unsli3.rcvsli3.ox_id; 6147 } 6148 6149 /* Xmit PRLI ACC response tag <ulpIoTag> */ 6150 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6151 "0131 Xmit PRLI ACC response tag x%x xri x%x, " 6152 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 6153 elsiocb->iotag, ulp_context, 6154 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 6155 ndlp->nlp_rpi); 6156 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6157 memset(pcmd, 0, cmdsize); 6158 6159 *((uint32_t *)(pcmd)) = elsrspcmd; 6160 pcmd += sizeof(uint32_t); 6161 6162 /* For PRLI, remainder of payload is PRLI parameter page */ 6163 vpd = &phba->vpd; 6164 6165 if (prli_fc4_req == PRLI_FCP_TYPE) { 6166 /* 6167 * If the remote port is a target and our firmware version 6168 * is 3.20 or later, set the following bits for FC-TAPE 6169 * support. 6170 */ 6171 npr = (PRLI *) pcmd; 6172 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 6173 (vpd->rev.feaLevelHigh >= 0x02)) { 6174 npr->ConfmComplAllowed = 1; 6175 npr->Retry = 1; 6176 npr->TaskRetryIdReq = 1; 6177 } 6178 npr->acceptRspCode = PRLI_REQ_EXECUTED; 6179 6180 /* Set image pair for complementary pairs only. */ 6181 if (ndlp->nlp_type & NLP_FCP_TARGET) 6182 npr->estabImagePair = 1; 6183 else 6184 npr->estabImagePair = 0; 6185 npr->readXferRdyDis = 1; 6186 npr->ConfmComplAllowed = 1; 6187 npr->prliType = PRLI_FCP_TYPE; 6188 npr->initiatorFunc = 1; 6189 6190 /* Xmit PRLI ACC response tag <ulpIoTag> */ 6191 lpfc_printf_vlog(vport, KERN_INFO, 6192 LOG_ELS | LOG_NODE | LOG_DISCOVERY, 6193 "6014 FCP issue PRLI ACC imgpair %d " 6194 "retry %d task %d\n", 6195 npr->estabImagePair, 6196 npr->Retry, npr->TaskRetryIdReq); 6197 6198 } else if (prli_fc4_req == PRLI_NVME_TYPE) { 6199 /* Respond with an NVME PRLI Type */ 6200 npr_nvme = (struct lpfc_nvme_prli *) pcmd; 6201 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 6202 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 6203 bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED); 6204 if (phba->nvmet_support) { 6205 bf_set(prli_tgt, npr_nvme, 1); 6206 bf_set(prli_disc, npr_nvme, 1); 6207 if (phba->cfg_nvme_enable_fb) { 6208 bf_set(prli_fba, npr_nvme, 1); 6209 6210 /* TBD. Target mode needs to post buffers 6211 * that support the configured first burst 6212 * byte size. 6213 */ 6214 bf_set(prli_fb_sz, npr_nvme, 6215 phba->cfg_nvmet_fb_size); 6216 } 6217 } else { 6218 bf_set(prli_init, npr_nvme, 1); 6219 } 6220 6221 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 6222 "6015 NVME issue PRLI ACC word1 x%08x " 6223 "word4 x%08x word5 x%08x flag x%x, " 6224 "fcp_info x%x nlp_type x%x\n", 6225 npr_nvme->word1, npr_nvme->word4, 6226 npr_nvme->word5, ndlp->nlp_flag, 6227 ndlp->nlp_fcp_info, ndlp->nlp_type); 6228 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 6229 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 6230 npr_nvme->word5 = cpu_to_be32(npr_nvme->word5); 6231 } else 6232 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6233 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n", 6234 prli_fc4_req, ndlp->nlp_fc4_type, 6235 ndlp->nlp_DID); 6236 6237 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6238 "Issue ACC PRLI: did:x%x flg:x%x", 6239 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6240 6241 phba->fc_stat.elsXmitACC++; 6242 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6243 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6244 if (!elsiocb->ndlp) { 6245 lpfc_els_free_iocb(phba, elsiocb); 6246 return 1; 6247 } 6248 6249 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6250 if (rc == IOCB_ERROR) { 6251 lpfc_els_free_iocb(phba, elsiocb); 6252 lpfc_nlp_put(ndlp); 6253 return 1; 6254 } 6255 6256 return 0; 6257 } 6258 6259 /** 6260 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command 6261 * @vport: pointer to a virtual N_Port data structure. 6262 * @format: rnid command format. 6263 * @oldiocb: pointer to the original lpfc command iocb data structure. 6264 * @ndlp: pointer to a node-list data structure. 6265 * 6266 * This routine issues a Request Node Identification Data (RNID) Accept 6267 * (ACC) response. It constructs the RNID ACC response command according to 6268 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to 6269 * issue the response. 6270 * 6271 * Note that the ndlp reference count will be incremented by 1 for holding the 6272 * ndlp and the reference to ndlp will be stored into the ndlp field of 6273 * the IOCB for the completion callback function. 6274 * 6275 * Return code 6276 * 0 - Successfully issued acc rnid response 6277 * 1 - Failed to issue acc rnid response 6278 **/ 6279 static int 6280 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, 6281 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 6282 { 6283 struct lpfc_hba *phba = vport->phba; 6284 RNID *rn; 6285 IOCB_t *icmd, *oldcmd; 6286 union lpfc_wqe128 *wqe; 6287 struct lpfc_iocbq *elsiocb; 6288 uint8_t *pcmd; 6289 uint16_t cmdsize; 6290 int rc; 6291 u32 ulp_context; 6292 6293 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 6294 + (2 * sizeof(struct lpfc_name)); 6295 if (format) 6296 cmdsize += sizeof(RNID_TOP_DISC); 6297 6298 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6299 ndlp->nlp_DID, ELS_CMD_ACC); 6300 if (!elsiocb) 6301 return 1; 6302 6303 if (phba->sli_rev == LPFC_SLI_REV4) { 6304 wqe = &elsiocb->wqe; 6305 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6306 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6307 ulp_context = get_job_ulpcontext(phba, elsiocb); 6308 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6309 get_job_rcvoxid(phba, oldiocb)); 6310 } else { 6311 icmd = &elsiocb->iocb; 6312 oldcmd = &oldiocb->iocb; 6313 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6314 ulp_context = elsiocb->iocb.ulpContext; 6315 icmd->unsli3.rcvsli3.ox_id = 6316 oldcmd->unsli3.rcvsli3.ox_id; 6317 } 6318 6319 /* Xmit RNID ACC response tag <ulpIoTag> */ 6320 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6321 "0132 Xmit RNID ACC response tag x%x xri x%x\n", 6322 elsiocb->iotag, ulp_context); 6323 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6324 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6325 pcmd += sizeof(uint32_t); 6326 6327 memset(pcmd, 0, sizeof(RNID)); 6328 rn = (RNID *) (pcmd); 6329 rn->Format = format; 6330 rn->CommonLen = (2 * sizeof(struct lpfc_name)); 6331 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 6332 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 6333 switch (format) { 6334 case 0: 6335 rn->SpecificLen = 0; 6336 break; 6337 case RNID_TOPOLOGY_DISC: 6338 rn->SpecificLen = sizeof(RNID_TOP_DISC); 6339 memcpy(&rn->un.topologyDisc.portName, 6340 &vport->fc_portname, sizeof(struct lpfc_name)); 6341 rn->un.topologyDisc.unitType = RNID_HBA; 6342 rn->un.topologyDisc.physPort = 0; 6343 rn->un.topologyDisc.attachedNodes = 0; 6344 break; 6345 default: 6346 rn->CommonLen = 0; 6347 rn->SpecificLen = 0; 6348 break; 6349 } 6350 6351 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6352 "Issue ACC RNID: did:x%x flg:x%x refcnt %d", 6353 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6354 6355 phba->fc_stat.elsXmitACC++; 6356 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6357 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6358 if (!elsiocb->ndlp) { 6359 lpfc_els_free_iocb(phba, elsiocb); 6360 return 1; 6361 } 6362 6363 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6364 if (rc == IOCB_ERROR) { 6365 lpfc_els_free_iocb(phba, elsiocb); 6366 lpfc_nlp_put(ndlp); 6367 return 1; 6368 } 6369 6370 return 0; 6371 } 6372 6373 /** 6374 * lpfc_els_clear_rrq - Clear the rq that this rrq describes. 6375 * @vport: pointer to a virtual N_Port data structure. 6376 * @iocb: pointer to the lpfc command iocb data structure. 6377 * @ndlp: pointer to a node-list data structure. 6378 * 6379 * Return 6380 **/ 6381 static void 6382 lpfc_els_clear_rrq(struct lpfc_vport *vport, 6383 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) 6384 { 6385 struct lpfc_hba *phba = vport->phba; 6386 uint8_t *pcmd; 6387 struct RRQ *rrq; 6388 uint16_t rxid; 6389 uint16_t xri; 6390 struct lpfc_node_rrq *prrq; 6391 6392 6393 pcmd = (uint8_t *)iocb->cmd_dmabuf->virt; 6394 pcmd += sizeof(uint32_t); 6395 rrq = (struct RRQ *)pcmd; 6396 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); 6397 rxid = bf_get(rrq_rxid, rrq); 6398 6399 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6400 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" 6401 " x%x x%x\n", 6402 be32_to_cpu(bf_get(rrq_did, rrq)), 6403 bf_get(rrq_oxid, rrq), 6404 rxid, 6405 get_wqe_reqtag(iocb), 6406 get_job_ulpcontext(phba, iocb)); 6407 6408 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6409 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", 6410 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); 6411 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) 6412 xri = bf_get(rrq_oxid, rrq); 6413 else 6414 xri = rxid; 6415 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); 6416 if (prrq) 6417 lpfc_clr_rrq_active(phba, xri, prrq); 6418 return; 6419 } 6420 6421 /** 6422 * lpfc_els_rsp_echo_acc - Issue echo acc response 6423 * @vport: pointer to a virtual N_Port data structure. 6424 * @data: pointer to echo data to return in the accept. 6425 * @oldiocb: pointer to the original lpfc command iocb data structure. 6426 * @ndlp: pointer to a node-list data structure. 6427 * 6428 * Return code 6429 * 0 - Successfully issued acc echo response 6430 * 1 - Failed to issue acc echo response 6431 **/ 6432 static int 6433 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, 6434 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 6435 { 6436 struct lpfc_hba *phba = vport->phba; 6437 IOCB_t *icmd, *oldcmd; 6438 union lpfc_wqe128 *wqe; 6439 struct lpfc_iocbq *elsiocb; 6440 uint8_t *pcmd; 6441 uint16_t cmdsize; 6442 int rc; 6443 u32 ulp_context; 6444 6445 if (phba->sli_rev == LPFC_SLI_REV4) 6446 cmdsize = oldiocb->wcqe_cmpl.total_data_placed; 6447 else 6448 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; 6449 6450 /* The accumulated length can exceed the BPL_SIZE. For 6451 * now, use this as the limit 6452 */ 6453 if (cmdsize > LPFC_BPL_SIZE) 6454 cmdsize = LPFC_BPL_SIZE; 6455 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6456 ndlp->nlp_DID, ELS_CMD_ACC); 6457 if (!elsiocb) 6458 return 1; 6459 6460 if (phba->sli_rev == LPFC_SLI_REV4) { 6461 wqe = &elsiocb->wqe; 6462 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 6463 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */ 6464 ulp_context = get_job_ulpcontext(phba, elsiocb); 6465 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6466 get_job_rcvoxid(phba, oldiocb)); 6467 } else { 6468 icmd = &elsiocb->iocb; 6469 oldcmd = &oldiocb->iocb; 6470 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6471 ulp_context = elsiocb->iocb.ulpContext; 6472 icmd->unsli3.rcvsli3.ox_id = 6473 oldcmd->unsli3.rcvsli3.ox_id; 6474 } 6475 6476 /* Xmit ECHO ACC response tag <ulpIoTag> */ 6477 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6478 "2876 Xmit ECHO ACC response tag x%x xri x%x\n", 6479 elsiocb->iotag, ulp_context); 6480 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 6481 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6482 pcmd += sizeof(uint32_t); 6483 memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); 6484 6485 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6486 "Issue ACC ECHO: did:x%x flg:x%x refcnt %d", 6487 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6488 6489 phba->fc_stat.elsXmitACC++; 6490 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 6491 elsiocb->ndlp = lpfc_nlp_get(ndlp); 6492 if (!elsiocb->ndlp) { 6493 lpfc_els_free_iocb(phba, elsiocb); 6494 return 1; 6495 } 6496 6497 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6498 if (rc == IOCB_ERROR) { 6499 lpfc_els_free_iocb(phba, elsiocb); 6500 lpfc_nlp_put(ndlp); 6501 return 1; 6502 } 6503 6504 return 0; 6505 } 6506 6507 /** 6508 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport 6509 * @vport: pointer to a host virtual N_Port data structure. 6510 * 6511 * This routine issues Address Discover (ADISC) ELS commands to those 6512 * N_Ports which are in node port recovery state and ADISC has not been issued 6513 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the 6514 * lpfc_issue_els_adisc() routine, the per @vport number of discover count 6515 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a 6516 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will 6517 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC 6518 * IOCBs quit for later pick up. On the other hand, after walking through 6519 * all the ndlps with the @vport and there is none ADISC IOCB issued, the 6520 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is 6521 * no more ADISC need to be sent. 6522 * 6523 * Return code 6524 * The number of N_Ports with adisc issued. 6525 **/ 6526 int 6527 lpfc_els_disc_adisc(struct lpfc_vport *vport) 6528 { 6529 struct lpfc_nodelist *ndlp, *next_ndlp; 6530 int sentadisc = 0; 6531 6532 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 6533 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 6534 6535 if (ndlp->nlp_state != NLP_STE_NPR_NODE || 6536 !(ndlp->nlp_flag & NLP_NPR_ADISC)) 6537 continue; 6538 6539 spin_lock_irq(&ndlp->lock); 6540 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 6541 spin_unlock_irq(&ndlp->lock); 6542 6543 if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { 6544 /* This node was marked for ADISC but was not picked 6545 * for discovery. This is possible if the node was 6546 * missing in gidft response. 6547 * 6548 * At time of marking node for ADISC, we skipped unreg 6549 * from backend 6550 */ 6551 lpfc_nlp_unreg_node(vport, ndlp); 6552 lpfc_unreg_rpi(vport, ndlp); 6553 continue; 6554 } 6555 6556 ndlp->nlp_prev_state = ndlp->nlp_state; 6557 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 6558 lpfc_issue_els_adisc(vport, ndlp, 0); 6559 sentadisc++; 6560 vport->num_disc_nodes++; 6561 if (vport->num_disc_nodes >= 6562 vport->cfg_discovery_threads) { 6563 set_bit(FC_NLP_MORE, &vport->fc_flag); 6564 break; 6565 } 6566 6567 } 6568 if (sentadisc == 0) 6569 clear_bit(FC_NLP_MORE, &vport->fc_flag); 6570 return sentadisc; 6571 } 6572 6573 /** 6574 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc 6575 * @vport: pointer to a host virtual N_Port data structure. 6576 * 6577 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports 6578 * which are in node port recovery state, with a @vport. Each time an ELS 6579 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine, 6580 * the per @vport number of discover count (num_disc_nodes) shall be 6581 * incremented. If the num_disc_nodes reaches a pre-configured threshold 6582 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE 6583 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for 6584 * later pick up. On the other hand, after walking through all the ndlps with 6585 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag 6586 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC 6587 * PLOGI need to be sent. 6588 * 6589 * Return code 6590 * The number of N_Ports with plogi issued. 6591 **/ 6592 int 6593 lpfc_els_disc_plogi(struct lpfc_vport *vport) 6594 { 6595 struct lpfc_nodelist *ndlp, *next_ndlp; 6596 int sentplogi = 0; 6597 6598 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 6599 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 6600 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 6601 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 6602 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && 6603 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { 6604 ndlp->nlp_prev_state = ndlp->nlp_state; 6605 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 6606 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 6607 sentplogi++; 6608 vport->num_disc_nodes++; 6609 if (vport->num_disc_nodes >= 6610 vport->cfg_discovery_threads) { 6611 set_bit(FC_NLP_MORE, &vport->fc_flag); 6612 break; 6613 } 6614 } 6615 } 6616 6617 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6618 "6452 Discover PLOGI %d flag x%lx\n", 6619 sentplogi, vport->fc_flag); 6620 6621 if (sentplogi) 6622 lpfc_set_disctmo(vport); 6623 else 6624 clear_bit(FC_NLP_MORE, &vport->fc_flag); 6625 return sentplogi; 6626 } 6627 6628 static uint32_t 6629 lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc, 6630 uint32_t word0) 6631 { 6632 6633 desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG); 6634 desc->payload.els_req = word0; 6635 desc->length = cpu_to_be32(sizeof(desc->payload)); 6636 6637 return sizeof(struct fc_rdp_link_service_desc); 6638 } 6639 6640 static uint32_t 6641 lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc, 6642 uint8_t *page_a0, uint8_t *page_a2) 6643 { 6644 uint16_t wavelength; 6645 uint16_t temperature; 6646 uint16_t rx_power; 6647 uint16_t tx_bias; 6648 uint16_t tx_power; 6649 uint16_t vcc; 6650 uint16_t flag = 0; 6651 struct sff_trasnceiver_codes_byte4 *trasn_code_byte4; 6652 struct sff_trasnceiver_codes_byte5 *trasn_code_byte5; 6653 6654 desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG); 6655 6656 trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *) 6657 &page_a0[SSF_TRANSCEIVER_CODE_B4]; 6658 trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *) 6659 &page_a0[SSF_TRANSCEIVER_CODE_B5]; 6660 6661 if ((trasn_code_byte4->fc_sw_laser) || 6662 (trasn_code_byte5->fc_sw_laser_sl) || 6663 (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */ 6664 flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT); 6665 } else if (trasn_code_byte4->fc_lw_laser) { 6666 wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) | 6667 page_a0[SSF_WAVELENGTH_B0]; 6668 if (wavelength == SFP_WAVELENGTH_LC1310) 6669 flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT; 6670 if (wavelength == SFP_WAVELENGTH_LL1550) 6671 flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT; 6672 } 6673 /* check if its SFP+ */ 6674 flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ? 6675 SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN) 6676 << SFP_FLAG_CT_SHIFT; 6677 6678 /* check if its OPTICAL */ 6679 flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ? 6680 SFP_FLAG_IS_OPTICAL_PORT : 0) 6681 << SFP_FLAG_IS_OPTICAL_SHIFT; 6682 6683 temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 | 6684 page_a2[SFF_TEMPERATURE_B0]); 6685 vcc = (page_a2[SFF_VCC_B1] << 8 | 6686 page_a2[SFF_VCC_B0]); 6687 tx_power = (page_a2[SFF_TXPOWER_B1] << 8 | 6688 page_a2[SFF_TXPOWER_B0]); 6689 tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 | 6690 page_a2[SFF_TX_BIAS_CURRENT_B0]); 6691 rx_power = (page_a2[SFF_RXPOWER_B1] << 8 | 6692 page_a2[SFF_RXPOWER_B0]); 6693 desc->sfp_info.temperature = cpu_to_be16(temperature); 6694 desc->sfp_info.rx_power = cpu_to_be16(rx_power); 6695 desc->sfp_info.tx_bias = cpu_to_be16(tx_bias); 6696 desc->sfp_info.tx_power = cpu_to_be16(tx_power); 6697 desc->sfp_info.vcc = cpu_to_be16(vcc); 6698 6699 desc->sfp_info.flags = cpu_to_be16(flag); 6700 desc->length = cpu_to_be32(sizeof(desc->sfp_info)); 6701 6702 return sizeof(struct fc_rdp_sfp_desc); 6703 } 6704 6705 static uint32_t 6706 lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, 6707 READ_LNK_VAR *stat) 6708 { 6709 uint32_t type; 6710 6711 desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG); 6712 6713 type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT; 6714 6715 desc->info.port_type = cpu_to_be32(type); 6716 6717 desc->info.link_status.link_failure_cnt = 6718 cpu_to_be32(stat->linkFailureCnt); 6719 desc->info.link_status.loss_of_synch_cnt = 6720 cpu_to_be32(stat->lossSyncCnt); 6721 desc->info.link_status.loss_of_signal_cnt = 6722 cpu_to_be32(stat->lossSignalCnt); 6723 desc->info.link_status.primitive_seq_proto_err = 6724 cpu_to_be32(stat->primSeqErrCnt); 6725 desc->info.link_status.invalid_trans_word = 6726 cpu_to_be32(stat->invalidXmitWord); 6727 desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt); 6728 6729 desc->length = cpu_to_be32(sizeof(desc->info)); 6730 6731 return sizeof(struct fc_rdp_link_error_status_desc); 6732 } 6733 6734 static uint32_t 6735 lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat, 6736 struct lpfc_vport *vport) 6737 { 6738 uint32_t bbCredit; 6739 6740 desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG); 6741 6742 bbCredit = vport->fc_sparam.cmn.bbCreditLsb | 6743 (vport->fc_sparam.cmn.bbCreditMsb << 8); 6744 desc->bbc_info.port_bbc = cpu_to_be32(bbCredit); 6745 if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 6746 bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb | 6747 (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8); 6748 desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit); 6749 } else { 6750 desc->bbc_info.attached_port_bbc = 0; 6751 } 6752 6753 desc->bbc_info.rtt = 0; 6754 desc->length = cpu_to_be32(sizeof(desc->bbc_info)); 6755 6756 return sizeof(struct fc_rdp_bbc_desc); 6757 } 6758 6759 static uint32_t 6760 lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba, 6761 struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) 6762 { 6763 uint32_t flags = 0; 6764 6765 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6766 6767 desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM]; 6768 desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM]; 6769 desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING]; 6770 desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING]; 6771 6772 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 6773 flags |= RDP_OET_HIGH_ALARM; 6774 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 6775 flags |= RDP_OET_LOW_ALARM; 6776 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 6777 flags |= RDP_OET_HIGH_WARNING; 6778 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 6779 flags |= RDP_OET_LOW_WARNING; 6780 6781 flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT); 6782 desc->oed_info.function_flags = cpu_to_be32(flags); 6783 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6784 return sizeof(struct fc_rdp_oed_sfp_desc); 6785 } 6786 6787 static uint32_t 6788 lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba, 6789 struct fc_rdp_oed_sfp_desc *desc, 6790 uint8_t *page_a2) 6791 { 6792 uint32_t flags = 0; 6793 6794 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6795 6796 desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM]; 6797 desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM]; 6798 desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING]; 6799 desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING]; 6800 6801 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 6802 flags |= RDP_OET_HIGH_ALARM; 6803 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE) 6804 flags |= RDP_OET_LOW_ALARM; 6805 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 6806 flags |= RDP_OET_HIGH_WARNING; 6807 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE) 6808 flags |= RDP_OET_LOW_WARNING; 6809 6810 flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT); 6811 desc->oed_info.function_flags = cpu_to_be32(flags); 6812 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6813 return sizeof(struct fc_rdp_oed_sfp_desc); 6814 } 6815 6816 static uint32_t 6817 lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba, 6818 struct fc_rdp_oed_sfp_desc *desc, 6819 uint8_t *page_a2) 6820 { 6821 uint32_t flags = 0; 6822 6823 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6824 6825 desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM]; 6826 desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM]; 6827 desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING]; 6828 desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING]; 6829 6830 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS) 6831 flags |= RDP_OET_HIGH_ALARM; 6832 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS) 6833 flags |= RDP_OET_LOW_ALARM; 6834 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS) 6835 flags |= RDP_OET_HIGH_WARNING; 6836 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS) 6837 flags |= RDP_OET_LOW_WARNING; 6838 6839 flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT); 6840 desc->oed_info.function_flags = cpu_to_be32(flags); 6841 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6842 return sizeof(struct fc_rdp_oed_sfp_desc); 6843 } 6844 6845 static uint32_t 6846 lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba, 6847 struct fc_rdp_oed_sfp_desc *desc, 6848 uint8_t *page_a2) 6849 { 6850 uint32_t flags = 0; 6851 6852 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6853 6854 desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM]; 6855 desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM]; 6856 desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING]; 6857 desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING]; 6858 6859 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER) 6860 flags |= RDP_OET_HIGH_ALARM; 6861 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER) 6862 flags |= RDP_OET_LOW_ALARM; 6863 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER) 6864 flags |= RDP_OET_HIGH_WARNING; 6865 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER) 6866 flags |= RDP_OET_LOW_WARNING; 6867 6868 flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT); 6869 desc->oed_info.function_flags = cpu_to_be32(flags); 6870 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6871 return sizeof(struct fc_rdp_oed_sfp_desc); 6872 } 6873 6874 6875 static uint32_t 6876 lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba, 6877 struct fc_rdp_oed_sfp_desc *desc, 6878 uint8_t *page_a2) 6879 { 6880 uint32_t flags = 0; 6881 6882 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6883 6884 desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM]; 6885 desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM]; 6886 desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING]; 6887 desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING]; 6888 6889 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER) 6890 flags |= RDP_OET_HIGH_ALARM; 6891 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER) 6892 flags |= RDP_OET_LOW_ALARM; 6893 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER) 6894 flags |= RDP_OET_HIGH_WARNING; 6895 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER) 6896 flags |= RDP_OET_LOW_WARNING; 6897 6898 flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT); 6899 desc->oed_info.function_flags = cpu_to_be32(flags); 6900 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6901 return sizeof(struct fc_rdp_oed_sfp_desc); 6902 } 6903 6904 static uint32_t 6905 lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, 6906 uint8_t *page_a0, struct lpfc_vport *vport) 6907 { 6908 desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG); 6909 memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16); 6910 memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16); 6911 memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16); 6912 memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4); 6913 memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8); 6914 desc->length = cpu_to_be32(sizeof(desc->opd_info)); 6915 return sizeof(struct fc_rdp_opd_sfp_desc); 6916 } 6917 6918 static uint32_t 6919 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) 6920 { 6921 if (bf_get(lpfc_read_link_stat_gec2, stat) == 0) 6922 return 0; 6923 desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG); 6924 6925 desc->info.CorrectedBlocks = 6926 cpu_to_be32(stat->fecCorrBlkCount); 6927 desc->info.UncorrectableBlocks = 6928 cpu_to_be32(stat->fecUncorrBlkCount); 6929 6930 desc->length = cpu_to_be32(sizeof(desc->info)); 6931 6932 return sizeof(struct fc_fec_rdp_desc); 6933 } 6934 6935 static uint32_t 6936 lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) 6937 { 6938 uint16_t rdp_cap = 0; 6939 uint16_t rdp_speed; 6940 6941 desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG); 6942 6943 switch (phba->fc_linkspeed) { 6944 case LPFC_LINK_SPEED_1GHZ: 6945 rdp_speed = RDP_PS_1GB; 6946 break; 6947 case LPFC_LINK_SPEED_2GHZ: 6948 rdp_speed = RDP_PS_2GB; 6949 break; 6950 case LPFC_LINK_SPEED_4GHZ: 6951 rdp_speed = RDP_PS_4GB; 6952 break; 6953 case LPFC_LINK_SPEED_8GHZ: 6954 rdp_speed = RDP_PS_8GB; 6955 break; 6956 case LPFC_LINK_SPEED_10GHZ: 6957 rdp_speed = RDP_PS_10GB; 6958 break; 6959 case LPFC_LINK_SPEED_16GHZ: 6960 rdp_speed = RDP_PS_16GB; 6961 break; 6962 case LPFC_LINK_SPEED_32GHZ: 6963 rdp_speed = RDP_PS_32GB; 6964 break; 6965 case LPFC_LINK_SPEED_64GHZ: 6966 rdp_speed = RDP_PS_64GB; 6967 break; 6968 case LPFC_LINK_SPEED_128GHZ: 6969 rdp_speed = RDP_PS_128GB; 6970 break; 6971 case LPFC_LINK_SPEED_256GHZ: 6972 rdp_speed = RDP_PS_256GB; 6973 break; 6974 default: 6975 rdp_speed = RDP_PS_UNKNOWN; 6976 break; 6977 } 6978 6979 desc->info.port_speed.speed = cpu_to_be16(rdp_speed); 6980 6981 if (phba->lmt & LMT_256Gb) 6982 rdp_cap |= RDP_PS_256GB; 6983 if (phba->lmt & LMT_128Gb) 6984 rdp_cap |= RDP_PS_128GB; 6985 if (phba->lmt & LMT_64Gb) 6986 rdp_cap |= RDP_PS_64GB; 6987 if (phba->lmt & LMT_32Gb) 6988 rdp_cap |= RDP_PS_32GB; 6989 if (phba->lmt & LMT_16Gb) 6990 rdp_cap |= RDP_PS_16GB; 6991 if (phba->lmt & LMT_10Gb) 6992 rdp_cap |= RDP_PS_10GB; 6993 if (phba->lmt & LMT_8Gb) 6994 rdp_cap |= RDP_PS_8GB; 6995 if (phba->lmt & LMT_4Gb) 6996 rdp_cap |= RDP_PS_4GB; 6997 if (phba->lmt & LMT_2Gb) 6998 rdp_cap |= RDP_PS_2GB; 6999 if (phba->lmt & LMT_1Gb) 7000 rdp_cap |= RDP_PS_1GB; 7001 7002 if (rdp_cap == 0) 7003 rdp_cap = RDP_CAP_UNKNOWN; 7004 if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO) 7005 rdp_cap |= RDP_CAP_USER_CONFIGURED; 7006 7007 desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap); 7008 desc->length = cpu_to_be32(sizeof(desc->info)); 7009 return sizeof(struct fc_rdp_port_speed_desc); 7010 } 7011 7012 static uint32_t 7013 lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, 7014 struct lpfc_vport *vport) 7015 { 7016 7017 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 7018 7019 memcpy(desc->port_names.wwnn, &vport->fc_nodename, 7020 sizeof(desc->port_names.wwnn)); 7021 7022 memcpy(desc->port_names.wwpn, &vport->fc_portname, 7023 sizeof(desc->port_names.wwpn)); 7024 7025 desc->length = cpu_to_be32(sizeof(desc->port_names)); 7026 return sizeof(struct fc_rdp_port_name_desc); 7027 } 7028 7029 static uint32_t 7030 lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, 7031 struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 7032 { 7033 7034 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 7035 if (test_bit(FC_FABRIC, &vport->fc_flag)) { 7036 memcpy(desc->port_names.wwnn, &vport->fabric_nodename, 7037 sizeof(desc->port_names.wwnn)); 7038 7039 memcpy(desc->port_names.wwpn, &vport->fabric_portname, 7040 sizeof(desc->port_names.wwpn)); 7041 } else { /* Point to Point */ 7042 memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename, 7043 sizeof(desc->port_names.wwnn)); 7044 7045 memcpy(desc->port_names.wwpn, &ndlp->nlp_portname, 7046 sizeof(desc->port_names.wwpn)); 7047 } 7048 7049 desc->length = cpu_to_be32(sizeof(desc->port_names)); 7050 return sizeof(struct fc_rdp_port_name_desc); 7051 } 7052 7053 static void 7054 lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, 7055 int status) 7056 { 7057 struct lpfc_nodelist *ndlp = rdp_context->ndlp; 7058 struct lpfc_vport *vport = ndlp->vport; 7059 struct lpfc_iocbq *elsiocb; 7060 struct ulp_bde64 *bpl; 7061 IOCB_t *icmd; 7062 union lpfc_wqe128 *wqe; 7063 uint8_t *pcmd; 7064 struct ls_rjt *stat; 7065 struct fc_rdp_res_frame *rdp_res; 7066 uint32_t cmdsize, len; 7067 uint16_t *flag_ptr; 7068 int rc; 7069 u32 ulp_context; 7070 7071 if (status != SUCCESS) 7072 goto error; 7073 7074 /* This will change once we know the true size of the RDP payload */ 7075 cmdsize = sizeof(struct fc_rdp_res_frame); 7076 7077 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, 7078 lpfc_max_els_tries, rdp_context->ndlp, 7079 rdp_context->ndlp->nlp_DID, ELS_CMD_ACC); 7080 if (!elsiocb) 7081 goto free_rdp_context; 7082 7083 ulp_context = get_job_ulpcontext(phba, elsiocb); 7084 if (phba->sli_rev == LPFC_SLI_REV4) { 7085 wqe = &elsiocb->wqe; 7086 /* ox-id of the frame */ 7087 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7088 rdp_context->ox_id); 7089 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 7090 rdp_context->rx_id); 7091 } else { 7092 icmd = &elsiocb->iocb; 7093 icmd->ulpContext = rdp_context->rx_id; 7094 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 7095 } 7096 7097 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7098 "2171 Xmit RDP response tag x%x xri x%x, " 7099 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x", 7100 elsiocb->iotag, ulp_context, 7101 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 7102 ndlp->nlp_rpi); 7103 rdp_res = (struct fc_rdp_res_frame *)elsiocb->cmd_dmabuf->virt; 7104 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7105 memset(pcmd, 0, sizeof(struct fc_rdp_res_frame)); 7106 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 7107 7108 /* Update Alarm and Warning */ 7109 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS); 7110 phba->sfp_alarm |= *flag_ptr; 7111 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS); 7112 phba->sfp_warning |= *flag_ptr; 7113 7114 /* For RDP payload */ 7115 len = 8; 7116 len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *) 7117 (len + pcmd), ELS_CMD_RDP); 7118 7119 len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd), 7120 rdp_context->page_a0, rdp_context->page_a2); 7121 len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd), 7122 phba); 7123 len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *) 7124 (len + pcmd), &rdp_context->link_stat); 7125 len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *) 7126 (len + pcmd), vport); 7127 len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *) 7128 (len + pcmd), vport, ndlp); 7129 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd), 7130 &rdp_context->link_stat); 7131 len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd), 7132 &rdp_context->link_stat, vport); 7133 len += lpfc_rdp_res_oed_temp_desc(phba, 7134 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7135 rdp_context->page_a2); 7136 len += lpfc_rdp_res_oed_voltage_desc(phba, 7137 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7138 rdp_context->page_a2); 7139 len += lpfc_rdp_res_oed_txbias_desc(phba, 7140 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7141 rdp_context->page_a2); 7142 len += lpfc_rdp_res_oed_txpower_desc(phba, 7143 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7144 rdp_context->page_a2); 7145 len += lpfc_rdp_res_oed_rxpower_desc(phba, 7146 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 7147 rdp_context->page_a2); 7148 len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd), 7149 rdp_context->page_a0, vport); 7150 7151 rdp_res->length = cpu_to_be32(len - 8); 7152 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7153 7154 /* Now that we know the true size of the payload, update the BPL */ 7155 bpl = (struct ulp_bde64 *)elsiocb->bpl_dmabuf->virt; 7156 bpl->tus.f.bdeSize = len; 7157 bpl->tus.f.bdeFlags = 0; 7158 bpl->tus.w = le32_to_cpu(bpl->tus.w); 7159 7160 phba->fc_stat.elsXmitACC++; 7161 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7162 if (!elsiocb->ndlp) { 7163 lpfc_els_free_iocb(phba, elsiocb); 7164 goto free_rdp_context; 7165 } 7166 7167 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7168 if (rc == IOCB_ERROR) { 7169 lpfc_els_free_iocb(phba, elsiocb); 7170 lpfc_nlp_put(ndlp); 7171 } 7172 7173 goto free_rdp_context; 7174 7175 error: 7176 cmdsize = 2 * sizeof(uint32_t); 7177 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries, 7178 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); 7179 if (!elsiocb) 7180 goto free_rdp_context; 7181 7182 if (phba->sli_rev == LPFC_SLI_REV4) { 7183 wqe = &elsiocb->wqe; 7184 /* ox-id of the frame */ 7185 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7186 rdp_context->ox_id); 7187 bf_set(wqe_ctxt_tag, 7188 &wqe->xmit_els_rsp.wqe_com, 7189 rdp_context->rx_id); 7190 } else { 7191 icmd = &elsiocb->iocb; 7192 icmd->ulpContext = rdp_context->rx_id; 7193 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 7194 } 7195 7196 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7197 7198 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 7199 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 7200 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7201 7202 phba->fc_stat.elsXmitLSRJT++; 7203 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7204 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7205 if (!elsiocb->ndlp) { 7206 lpfc_els_free_iocb(phba, elsiocb); 7207 goto free_rdp_context; 7208 } 7209 7210 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7211 if (rc == IOCB_ERROR) { 7212 lpfc_els_free_iocb(phba, elsiocb); 7213 lpfc_nlp_put(ndlp); 7214 } 7215 7216 free_rdp_context: 7217 /* This reference put is for the original unsolicited RDP. If the 7218 * prep failed, there is no reference to remove. 7219 */ 7220 lpfc_nlp_put(ndlp); 7221 kfree(rdp_context); 7222 } 7223 7224 static int 7225 lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context) 7226 { 7227 LPFC_MBOXQ_t *mbox = NULL; 7228 int rc; 7229 7230 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7231 if (!mbox) { 7232 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, 7233 "7105 failed to allocate mailbox memory"); 7234 return 1; 7235 } 7236 7237 if (lpfc_sli4_dump_page_a0(phba, mbox)) 7238 goto rdp_fail; 7239 mbox->vport = rdp_context->ndlp->vport; 7240 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0; 7241 mbox->ctx_u.rdp = rdp_context; 7242 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 7243 if (rc == MBX_NOT_FINISHED) { 7244 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 7245 return 1; 7246 } 7247 7248 return 0; 7249 7250 rdp_fail: 7251 mempool_free(mbox, phba->mbox_mem_pool); 7252 return 1; 7253 } 7254 7255 int lpfc_get_sfp_info_wait(struct lpfc_hba *phba, 7256 struct lpfc_rdp_context *rdp_context) 7257 { 7258 LPFC_MBOXQ_t *mbox = NULL; 7259 int rc; 7260 struct lpfc_dmabuf *mp; 7261 struct lpfc_dmabuf *mpsave; 7262 void *virt; 7263 MAILBOX_t *mb; 7264 7265 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7266 if (!mbox) { 7267 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, 7268 "7205 failed to allocate mailbox memory"); 7269 return 1; 7270 } 7271 7272 if (lpfc_sli4_dump_page_a0(phba, mbox)) 7273 goto sfp_fail; 7274 mp = mbox->ctx_buf; 7275 mpsave = mp; 7276 virt = mp->virt; 7277 if (phba->sli_rev < LPFC_SLI_REV4) { 7278 mb = &mbox->u.mb; 7279 mb->un.varDmp.cv = 1; 7280 mb->un.varDmp.co = 1; 7281 mb->un.varWords[2] = 0; 7282 mb->un.varWords[3] = DMP_SFF_PAGE_A0_SIZE / 4; 7283 mb->un.varWords[4] = 0; 7284 mb->un.varWords[5] = 0; 7285 mb->un.varWords[6] = 0; 7286 mb->un.varWords[7] = 0; 7287 mb->un.varWords[8] = 0; 7288 mb->un.varWords[9] = 0; 7289 mb->un.varWords[10] = 0; 7290 mbox->in_ext_byte_len = DMP_SFF_PAGE_A0_SIZE; 7291 mbox->out_ext_byte_len = DMP_SFF_PAGE_A0_SIZE; 7292 mbox->mbox_offset_word = 5; 7293 mbox->ext_buf = virt; 7294 } else { 7295 bf_set(lpfc_mbx_memory_dump_type3_length, 7296 &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE); 7297 mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys); 7298 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); 7299 } 7300 mbox->vport = phba->pport; 7301 7302 rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30); 7303 if (rc == MBX_NOT_FINISHED) { 7304 rc = 1; 7305 goto error; 7306 } 7307 7308 if (phba->sli_rev == LPFC_SLI_REV4) 7309 mp = mbox->ctx_buf; 7310 else 7311 mp = mpsave; 7312 7313 if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) { 7314 rc = 1; 7315 goto error; 7316 } 7317 7318 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a0, 7319 DMP_SFF_PAGE_A0_SIZE); 7320 7321 memset(mbox, 0, sizeof(*mbox)); 7322 memset(mp->virt, 0, DMP_SFF_PAGE_A2_SIZE); 7323 INIT_LIST_HEAD(&mp->list); 7324 7325 /* save address for completion */ 7326 mbox->ctx_buf = mp; 7327 mbox->vport = phba->pport; 7328 7329 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY); 7330 bf_set(lpfc_mbx_memory_dump_type3_type, 7331 &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD); 7332 bf_set(lpfc_mbx_memory_dump_type3_link, 7333 &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port); 7334 bf_set(lpfc_mbx_memory_dump_type3_page_no, 7335 &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A2); 7336 if (phba->sli_rev < LPFC_SLI_REV4) { 7337 mb = &mbox->u.mb; 7338 mb->un.varDmp.cv = 1; 7339 mb->un.varDmp.co = 1; 7340 mb->un.varWords[2] = 0; 7341 mb->un.varWords[3] = DMP_SFF_PAGE_A2_SIZE / 4; 7342 mb->un.varWords[4] = 0; 7343 mb->un.varWords[5] = 0; 7344 mb->un.varWords[6] = 0; 7345 mb->un.varWords[7] = 0; 7346 mb->un.varWords[8] = 0; 7347 mb->un.varWords[9] = 0; 7348 mb->un.varWords[10] = 0; 7349 mbox->in_ext_byte_len = DMP_SFF_PAGE_A2_SIZE; 7350 mbox->out_ext_byte_len = DMP_SFF_PAGE_A2_SIZE; 7351 mbox->mbox_offset_word = 5; 7352 mbox->ext_buf = virt; 7353 } else { 7354 bf_set(lpfc_mbx_memory_dump_type3_length, 7355 &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A2_SIZE); 7356 mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys); 7357 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); 7358 } 7359 7360 rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30); 7361 if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) { 7362 rc = 1; 7363 goto error; 7364 } 7365 rc = 0; 7366 7367 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2, 7368 DMP_SFF_PAGE_A2_SIZE); 7369 7370 error: 7371 mbox->ctx_buf = mpsave; 7372 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); 7373 7374 return rc; 7375 7376 sfp_fail: 7377 mempool_free(mbox, phba->mbox_mem_pool); 7378 return 1; 7379 } 7380 7381 /* 7382 * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS. 7383 * @vport: pointer to a host virtual N_Port data structure. 7384 * @cmdiocb: pointer to lpfc command iocb data structure. 7385 * @ndlp: pointer to a node-list data structure. 7386 * 7387 * This routine processes an unsolicited RDP(Read Diagnostic Parameters) 7388 * IOCB. First, the payload of the unsolicited RDP is checked. 7389 * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3 7390 * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2, 7391 * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl 7392 * gather all data and send RDP response. 7393 * 7394 * Return code 7395 * 0 - Sent the acc response 7396 * 1 - Sent the reject response. 7397 */ 7398 static int 7399 lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7400 struct lpfc_nodelist *ndlp) 7401 { 7402 struct lpfc_hba *phba = vport->phba; 7403 struct lpfc_dmabuf *pcmd; 7404 uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE; 7405 struct fc_rdp_req_frame *rdp_req; 7406 struct lpfc_rdp_context *rdp_context; 7407 union lpfc_wqe128 *cmd = NULL; 7408 struct ls_rjt stat; 7409 7410 if (phba->sli_rev < LPFC_SLI_REV4 || 7411 bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 7412 LPFC_SLI_INTF_IF_TYPE_2) { 7413 rjt_err = LSRJT_UNABLE_TPC; 7414 rjt_expl = LSEXP_REQ_UNSUPPORTED; 7415 goto error; 7416 } 7417 7418 if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) { 7419 rjt_err = LSRJT_UNABLE_TPC; 7420 rjt_expl = LSEXP_REQ_UNSUPPORTED; 7421 goto error; 7422 } 7423 7424 pcmd = cmdiocb->cmd_dmabuf; 7425 rdp_req = (struct fc_rdp_req_frame *) pcmd->virt; 7426 7427 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7428 "2422 ELS RDP Request " 7429 "dec len %d tag x%x port_id %d len %d\n", 7430 be32_to_cpu(rdp_req->rdp_des_length), 7431 be32_to_cpu(rdp_req->nport_id_desc.tag), 7432 be32_to_cpu(rdp_req->nport_id_desc.nport_id), 7433 be32_to_cpu(rdp_req->nport_id_desc.length)); 7434 7435 if (sizeof(struct fc_rdp_nport_desc) != 7436 be32_to_cpu(rdp_req->rdp_des_length)) 7437 goto rjt_logerr; 7438 if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag)) 7439 goto rjt_logerr; 7440 if (RDP_NPORT_ID_SIZE != 7441 be32_to_cpu(rdp_req->nport_id_desc.length)) 7442 goto rjt_logerr; 7443 rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL); 7444 if (!rdp_context) { 7445 rjt_err = LSRJT_UNABLE_TPC; 7446 goto error; 7447 } 7448 7449 cmd = &cmdiocb->wqe; 7450 rdp_context->ndlp = lpfc_nlp_get(ndlp); 7451 if (!rdp_context->ndlp) { 7452 kfree(rdp_context); 7453 rjt_err = LSRJT_UNABLE_TPC; 7454 goto error; 7455 } 7456 rdp_context->ox_id = bf_get(wqe_rcvoxid, 7457 &cmd->xmit_els_rsp.wqe_com); 7458 rdp_context->rx_id = bf_get(wqe_ctxt_tag, 7459 &cmd->xmit_els_rsp.wqe_com); 7460 rdp_context->cmpl = lpfc_els_rdp_cmpl; 7461 if (lpfc_get_rdp_info(phba, rdp_context)) { 7462 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS, 7463 "2423 Unable to send mailbox"); 7464 kfree(rdp_context); 7465 rjt_err = LSRJT_UNABLE_TPC; 7466 lpfc_nlp_put(ndlp); 7467 goto error; 7468 } 7469 7470 return 0; 7471 7472 rjt_logerr: 7473 rjt_err = LSRJT_LOGICAL_ERR; 7474 7475 error: 7476 memset(&stat, 0, sizeof(stat)); 7477 stat.un.b.lsRjtRsnCode = rjt_err; 7478 stat.un.b.lsRjtRsnCodeExp = rjt_expl; 7479 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7480 return 1; 7481 } 7482 7483 7484 static void 7485 lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 7486 { 7487 MAILBOX_t *mb; 7488 IOCB_t *icmd; 7489 union lpfc_wqe128 *wqe; 7490 uint8_t *pcmd; 7491 struct lpfc_iocbq *elsiocb; 7492 struct lpfc_nodelist *ndlp; 7493 struct ls_rjt *stat; 7494 union lpfc_sli4_cfg_shdr *shdr; 7495 struct lpfc_lcb_context *lcb_context; 7496 struct fc_lcb_res_frame *lcb_res; 7497 uint32_t cmdsize, shdr_status, shdr_add_status; 7498 int rc; 7499 7500 mb = &pmb->u.mb; 7501 lcb_context = pmb->ctx_u.lcb; 7502 ndlp = lcb_context->ndlp; 7503 memset(&pmb->ctx_u, 0, sizeof(pmb->ctx_u)); 7504 pmb->ctx_buf = NULL; 7505 7506 shdr = (union lpfc_sli4_cfg_shdr *) 7507 &pmb->u.mqe.un.beacon_config.header.cfg_shdr; 7508 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7509 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 7510 7511 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX, 7512 "0194 SET_BEACON_CONFIG mailbox " 7513 "completed with status x%x add_status x%x," 7514 " mbx status x%x\n", 7515 shdr_status, shdr_add_status, mb->mbxStatus); 7516 7517 if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status || 7518 (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) || 7519 (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) { 7520 mempool_free(pmb, phba->mbox_mem_pool); 7521 goto error; 7522 } 7523 7524 mempool_free(pmb, phba->mbox_mem_pool); 7525 cmdsize = sizeof(struct fc_lcb_res_frame); 7526 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7527 lpfc_max_els_tries, ndlp, 7528 ndlp->nlp_DID, ELS_CMD_ACC); 7529 7530 /* Decrement the ndlp reference count from previous mbox command */ 7531 lpfc_nlp_put(ndlp); 7532 7533 if (!elsiocb) 7534 goto free_lcb_context; 7535 7536 lcb_res = (struct fc_lcb_res_frame *)elsiocb->cmd_dmabuf->virt; 7537 7538 memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame)); 7539 7540 if (phba->sli_rev == LPFC_SLI_REV4) { 7541 wqe = &elsiocb->wqe; 7542 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id); 7543 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7544 lcb_context->ox_id); 7545 } else { 7546 icmd = &elsiocb->iocb; 7547 icmd->ulpContext = lcb_context->rx_id; 7548 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 7549 } 7550 7551 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7552 *((uint32_t *)(pcmd)) = ELS_CMD_ACC; 7553 lcb_res->lcb_sub_command = lcb_context->sub_command; 7554 lcb_res->lcb_type = lcb_context->type; 7555 lcb_res->capability = lcb_context->capability; 7556 lcb_res->lcb_frequency = lcb_context->frequency; 7557 lcb_res->lcb_duration = lcb_context->duration; 7558 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7559 phba->fc_stat.elsXmitACC++; 7560 7561 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7562 if (!elsiocb->ndlp) { 7563 lpfc_els_free_iocb(phba, elsiocb); 7564 goto out; 7565 } 7566 7567 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7568 if (rc == IOCB_ERROR) { 7569 lpfc_els_free_iocb(phba, elsiocb); 7570 lpfc_nlp_put(ndlp); 7571 } 7572 out: 7573 kfree(lcb_context); 7574 return; 7575 7576 error: 7577 cmdsize = sizeof(struct fc_lcb_res_frame); 7578 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7579 lpfc_max_els_tries, ndlp, 7580 ndlp->nlp_DID, ELS_CMD_LS_RJT); 7581 lpfc_nlp_put(ndlp); 7582 if (!elsiocb) 7583 goto free_lcb_context; 7584 7585 if (phba->sli_rev == LPFC_SLI_REV4) { 7586 wqe = &elsiocb->wqe; 7587 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id); 7588 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7589 lcb_context->ox_id); 7590 } else { 7591 icmd = &elsiocb->iocb; 7592 icmd->ulpContext = lcb_context->rx_id; 7593 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 7594 } 7595 7596 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 7597 7598 *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT; 7599 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 7600 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7601 7602 if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) 7603 stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; 7604 7605 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 7606 phba->fc_stat.elsXmitLSRJT++; 7607 elsiocb->ndlp = lpfc_nlp_get(ndlp); 7608 if (!elsiocb->ndlp) { 7609 lpfc_els_free_iocb(phba, elsiocb); 7610 goto free_lcb_context; 7611 } 7612 7613 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7614 if (rc == IOCB_ERROR) { 7615 lpfc_els_free_iocb(phba, elsiocb); 7616 lpfc_nlp_put(ndlp); 7617 } 7618 free_lcb_context: 7619 kfree(lcb_context); 7620 } 7621 7622 static int 7623 lpfc_sli4_set_beacon(struct lpfc_vport *vport, 7624 struct lpfc_lcb_context *lcb_context, 7625 uint32_t beacon_state) 7626 { 7627 struct lpfc_hba *phba = vport->phba; 7628 union lpfc_sli4_cfg_shdr *cfg_shdr; 7629 LPFC_MBOXQ_t *mbox = NULL; 7630 uint32_t len; 7631 int rc; 7632 7633 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7634 if (!mbox) 7635 return 1; 7636 7637 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; 7638 len = sizeof(struct lpfc_mbx_set_beacon_config) - 7639 sizeof(struct lpfc_sli4_cfg_mhdr); 7640 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 7641 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len, 7642 LPFC_SLI4_MBX_EMBED); 7643 mbox->ctx_u.lcb = lcb_context; 7644 mbox->vport = phba->pport; 7645 mbox->mbox_cmpl = lpfc_els_lcb_rsp; 7646 bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config, 7647 phba->sli4_hba.physical_port); 7648 bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config, 7649 beacon_state); 7650 mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */ 7651 7652 /* 7653 * Check bv1s bit before issuing the mailbox 7654 * if bv1s == 1, LCB V1 supported 7655 * else, LCB V0 supported 7656 */ 7657 7658 if (phba->sli4_hba.pc_sli4_params.bv1s) { 7659 /* COMMON_SET_BEACON_CONFIG_V1 */ 7660 cfg_shdr->request.word9 = BEACON_VERSION_V1; 7661 lcb_context->capability |= LCB_CAPABILITY_DURATION; 7662 bf_set(lpfc_mbx_set_beacon_port_type, 7663 &mbox->u.mqe.un.beacon_config, 0); 7664 bf_set(lpfc_mbx_set_beacon_duration_v1, 7665 &mbox->u.mqe.un.beacon_config, 7666 be16_to_cpu(lcb_context->duration)); 7667 } else { 7668 /* COMMON_SET_BEACON_CONFIG_V0 */ 7669 if (be16_to_cpu(lcb_context->duration) != 0) { 7670 mempool_free(mbox, phba->mbox_mem_pool); 7671 return 1; 7672 } 7673 cfg_shdr->request.word9 = BEACON_VERSION_V0; 7674 lcb_context->capability &= ~(LCB_CAPABILITY_DURATION); 7675 bf_set(lpfc_mbx_set_beacon_state, 7676 &mbox->u.mqe.un.beacon_config, beacon_state); 7677 bf_set(lpfc_mbx_set_beacon_port_type, 7678 &mbox->u.mqe.un.beacon_config, 1); 7679 bf_set(lpfc_mbx_set_beacon_duration, 7680 &mbox->u.mqe.un.beacon_config, 7681 be16_to_cpu(lcb_context->duration)); 7682 } 7683 7684 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 7685 if (rc == MBX_NOT_FINISHED) { 7686 mempool_free(mbox, phba->mbox_mem_pool); 7687 return 1; 7688 } 7689 7690 return 0; 7691 } 7692 7693 7694 /** 7695 * lpfc_els_rcv_lcb - Process an unsolicited LCB 7696 * @vport: pointer to a host virtual N_Port data structure. 7697 * @cmdiocb: pointer to lpfc command iocb data structure. 7698 * @ndlp: pointer to a node-list data structure. 7699 * 7700 * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB. 7701 * First, the payload of the unsolicited LCB is checked. 7702 * Then based on Subcommand beacon will either turn on or off. 7703 * 7704 * Return code 7705 * 0 - Sent the acc response 7706 * 1 - Sent the reject response. 7707 **/ 7708 static int 7709 lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7710 struct lpfc_nodelist *ndlp) 7711 { 7712 struct lpfc_hba *phba = vport->phba; 7713 struct lpfc_dmabuf *pcmd; 7714 uint8_t *lp; 7715 struct fc_lcb_request_frame *beacon; 7716 struct lpfc_lcb_context *lcb_context; 7717 u8 state, rjt_err = 0; 7718 struct ls_rjt stat; 7719 7720 pcmd = cmdiocb->cmd_dmabuf; 7721 lp = (uint8_t *)pcmd->virt; 7722 beacon = (struct fc_lcb_request_frame *)pcmd->virt; 7723 7724 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7725 "0192 ELS LCB Data x%x x%x x%x x%x sub x%x " 7726 "type x%x frequency %x duration x%x\n", 7727 lp[0], lp[1], lp[2], 7728 beacon->lcb_command, 7729 beacon->lcb_sub_command, 7730 beacon->lcb_type, 7731 beacon->lcb_frequency, 7732 be16_to_cpu(beacon->lcb_duration)); 7733 7734 if (beacon->lcb_sub_command != LPFC_LCB_ON && 7735 beacon->lcb_sub_command != LPFC_LCB_OFF) { 7736 rjt_err = LSRJT_CMD_UNSUPPORTED; 7737 goto rjt; 7738 } 7739 7740 if (phba->sli_rev < LPFC_SLI_REV4 || 7741 phba->hba_flag & HBA_FCOE_MODE || 7742 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 7743 LPFC_SLI_INTF_IF_TYPE_2)) { 7744 rjt_err = LSRJT_CMD_UNSUPPORTED; 7745 goto rjt; 7746 } 7747 7748 lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL); 7749 if (!lcb_context) { 7750 rjt_err = LSRJT_UNABLE_TPC; 7751 goto rjt; 7752 } 7753 7754 state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0; 7755 lcb_context->sub_command = beacon->lcb_sub_command; 7756 lcb_context->capability = 0; 7757 lcb_context->type = beacon->lcb_type; 7758 lcb_context->frequency = beacon->lcb_frequency; 7759 lcb_context->duration = beacon->lcb_duration; 7760 lcb_context->ox_id = get_job_rcvoxid(phba, cmdiocb); 7761 lcb_context->rx_id = get_job_ulpcontext(phba, cmdiocb); 7762 lcb_context->ndlp = lpfc_nlp_get(ndlp); 7763 if (!lcb_context->ndlp) { 7764 rjt_err = LSRJT_UNABLE_TPC; 7765 goto rjt_free; 7766 } 7767 7768 if (lpfc_sli4_set_beacon(vport, lcb_context, state)) { 7769 lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT, 7770 "0193 failed to send mail box"); 7771 lpfc_nlp_put(ndlp); 7772 rjt_err = LSRJT_UNABLE_TPC; 7773 goto rjt_free; 7774 } 7775 return 0; 7776 7777 rjt_free: 7778 kfree(lcb_context); 7779 rjt: 7780 memset(&stat, 0, sizeof(stat)); 7781 stat.un.b.lsRjtRsnCode = rjt_err; 7782 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7783 return 1; 7784 } 7785 7786 7787 /** 7788 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport 7789 * @vport: pointer to a host virtual N_Port data structure. 7790 * 7791 * This routine cleans up any Registration State Change Notification 7792 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the 7793 * @vport together with the host_lock is used to prevent multiple thread 7794 * trying to access the RSCN array on a same @vport at the same time. 7795 **/ 7796 void 7797 lpfc_els_flush_rscn(struct lpfc_vport *vport) 7798 { 7799 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7800 struct lpfc_hba *phba = vport->phba; 7801 int i; 7802 7803 spin_lock_irq(shost->host_lock); 7804 if (vport->fc_rscn_flush) { 7805 /* Another thread is walking fc_rscn_id_list on this vport */ 7806 spin_unlock_irq(shost->host_lock); 7807 return; 7808 } 7809 /* Indicate we are walking lpfc_els_flush_rscn on this vport */ 7810 vport->fc_rscn_flush = 1; 7811 spin_unlock_irq(shost->host_lock); 7812 7813 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 7814 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); 7815 vport->fc_rscn_id_list[i] = NULL; 7816 } 7817 clear_bit(FC_RSCN_MODE, &vport->fc_flag); 7818 clear_bit(FC_RSCN_DISCOVERY, &vport->fc_flag); 7819 spin_lock_irq(shost->host_lock); 7820 vport->fc_rscn_id_cnt = 0; 7821 spin_unlock_irq(shost->host_lock); 7822 lpfc_can_disctmo(vport); 7823 /* Indicate we are done walking this fc_rscn_id_list */ 7824 vport->fc_rscn_flush = 0; 7825 } 7826 7827 /** 7828 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did 7829 * @vport: pointer to a host virtual N_Port data structure. 7830 * @did: remote destination port identifier. 7831 * 7832 * This routine checks whether there is any pending Registration State 7833 * Configuration Notification (RSCN) to a @did on @vport. 7834 * 7835 * Return code 7836 * None zero - The @did matched with a pending rscn 7837 * 0 - not able to match @did with a pending rscn 7838 **/ 7839 int 7840 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) 7841 { 7842 D_ID ns_did; 7843 D_ID rscn_did; 7844 uint32_t *lp; 7845 uint32_t payload_len, i; 7846 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7847 7848 ns_did.un.word = did; 7849 7850 /* Never match fabric nodes for RSCNs */ 7851 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 7852 return 0; 7853 7854 /* If we are doing a FULL RSCN rediscovery, match everything */ 7855 if (test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag)) 7856 return did; 7857 7858 spin_lock_irq(shost->host_lock); 7859 if (vport->fc_rscn_flush) { 7860 /* Another thread is walking fc_rscn_id_list on this vport */ 7861 spin_unlock_irq(shost->host_lock); 7862 return 0; 7863 } 7864 /* Indicate we are walking fc_rscn_id_list on this vport */ 7865 vport->fc_rscn_flush = 1; 7866 spin_unlock_irq(shost->host_lock); 7867 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 7868 lp = vport->fc_rscn_id_list[i]->virt; 7869 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 7870 payload_len -= sizeof(uint32_t); /* take off word 0 */ 7871 while (payload_len) { 7872 rscn_did.un.word = be32_to_cpu(*lp++); 7873 payload_len -= sizeof(uint32_t); 7874 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 7875 case RSCN_ADDRESS_FORMAT_PORT: 7876 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 7877 && (ns_did.un.b.area == rscn_did.un.b.area) 7878 && (ns_did.un.b.id == rscn_did.un.b.id)) 7879 goto return_did_out; 7880 break; 7881 case RSCN_ADDRESS_FORMAT_AREA: 7882 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 7883 && (ns_did.un.b.area == rscn_did.un.b.area)) 7884 goto return_did_out; 7885 break; 7886 case RSCN_ADDRESS_FORMAT_DOMAIN: 7887 if (ns_did.un.b.domain == rscn_did.un.b.domain) 7888 goto return_did_out; 7889 break; 7890 case RSCN_ADDRESS_FORMAT_FABRIC: 7891 goto return_did_out; 7892 } 7893 } 7894 } 7895 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 7896 vport->fc_rscn_flush = 0; 7897 return 0; 7898 return_did_out: 7899 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 7900 vport->fc_rscn_flush = 0; 7901 return did; 7902 } 7903 7904 /** 7905 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn 7906 * @vport: pointer to a host virtual N_Port data structure. 7907 * 7908 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the 7909 * state machine for a @vport's nodes that are with pending RSCN (Registration 7910 * State Change Notification). 7911 * 7912 * Return code 7913 * 0 - Successful (currently alway return 0) 7914 **/ 7915 static int 7916 lpfc_rscn_recovery_check(struct lpfc_vport *vport) 7917 { 7918 struct lpfc_nodelist *ndlp = NULL, *n; 7919 7920 /* Move all affected nodes by pending RSCNs to NPR state. */ 7921 list_for_each_entry_safe(ndlp, n, &vport->fc_nodes, nlp_listp) { 7922 if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) || 7923 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) 7924 continue; 7925 7926 /* NVME Target mode does not do RSCN Recovery. */ 7927 if (vport->phba->nvmet_support) 7928 continue; 7929 7930 /* If we are in the process of doing discovery on this 7931 * NPort, let it continue on its own. 7932 */ 7933 switch (ndlp->nlp_state) { 7934 case NLP_STE_PLOGI_ISSUE: 7935 case NLP_STE_ADISC_ISSUE: 7936 case NLP_STE_REG_LOGIN_ISSUE: 7937 case NLP_STE_PRLI_ISSUE: 7938 case NLP_STE_LOGO_ISSUE: 7939 continue; 7940 } 7941 7942 lpfc_disc_state_machine(vport, ndlp, NULL, 7943 NLP_EVT_DEVICE_RECOVERY); 7944 lpfc_cancel_retry_delay_tmo(vport, ndlp); 7945 } 7946 return 0; 7947 } 7948 7949 /** 7950 * lpfc_send_rscn_event - Send an RSCN event to management application 7951 * @vport: pointer to a host virtual N_Port data structure. 7952 * @cmdiocb: pointer to lpfc command iocb data structure. 7953 * 7954 * lpfc_send_rscn_event sends an RSCN netlink event to management 7955 * applications. 7956 */ 7957 static void 7958 lpfc_send_rscn_event(struct lpfc_vport *vport, 7959 struct lpfc_iocbq *cmdiocb) 7960 { 7961 struct lpfc_dmabuf *pcmd; 7962 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7963 uint32_t *payload_ptr; 7964 uint32_t payload_len; 7965 struct lpfc_rscn_event_header *rscn_event_data; 7966 7967 pcmd = cmdiocb->cmd_dmabuf; 7968 payload_ptr = (uint32_t *) pcmd->virt; 7969 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK); 7970 7971 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) + 7972 payload_len, GFP_KERNEL); 7973 if (!rscn_event_data) { 7974 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 7975 "0147 Failed to allocate memory for RSCN event\n"); 7976 return; 7977 } 7978 rscn_event_data->event_type = FC_REG_RSCN_EVENT; 7979 rscn_event_data->payload_length = payload_len; 7980 memcpy(rscn_event_data->rscn_payload, payload_ptr, 7981 payload_len); 7982 7983 fc_host_post_vendor_event(shost, 7984 fc_get_event_number(), 7985 sizeof(struct lpfc_rscn_event_header) + payload_len, 7986 (char *)rscn_event_data, 7987 LPFC_NL_VENDOR_ID); 7988 7989 kfree(rscn_event_data); 7990 } 7991 7992 /** 7993 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb 7994 * @vport: pointer to a host virtual N_Port data structure. 7995 * @cmdiocb: pointer to lpfc command iocb data structure. 7996 * @ndlp: pointer to a node-list data structure. 7997 * 7998 * This routine processes an unsolicited RSCN (Registration State Change 7999 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked 8000 * to invoke fc_host_post_event() routine to the FC transport layer. If the 8001 * discover state machine is about to begin discovery, it just accepts the 8002 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only 8003 * contains N_Port IDs for other vports on this HBA, it just accepts the 8004 * RSCN and ignore processing it. If the state machine is in the recovery 8005 * state, the fc_rscn_id_list of this @vport is walked and the 8006 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for 8007 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn() 8008 * routine is invoked to handle the RSCN event. 8009 * 8010 * Return code 8011 * 0 - Just sent the acc response 8012 * 1 - Sent the acc response and waited for name server completion 8013 **/ 8014 static int 8015 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8016 struct lpfc_nodelist *ndlp) 8017 { 8018 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8019 struct lpfc_hba *phba = vport->phba; 8020 struct lpfc_dmabuf *pcmd; 8021 uint32_t *lp, *datap; 8022 uint32_t payload_len, length, nportid, *cmd; 8023 int rscn_cnt; 8024 int rscn_id = 0, hba_id = 0; 8025 int i, tmo; 8026 8027 pcmd = cmdiocb->cmd_dmabuf; 8028 lp = (uint32_t *) pcmd->virt; 8029 8030 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 8031 payload_len -= sizeof(uint32_t); /* take off word 0 */ 8032 /* RSCN received */ 8033 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8034 "0214 RSCN received Data: x%lx x%x x%x x%x\n", 8035 vport->fc_flag, payload_len, *lp, 8036 vport->fc_rscn_id_cnt); 8037 8038 /* Send an RSCN event to the management application */ 8039 lpfc_send_rscn_event(vport, cmdiocb); 8040 8041 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 8042 fc_host_post_event(shost, fc_get_event_number(), 8043 FCH_EVT_RSCN, lp[i]); 8044 8045 /* Check if RSCN is coming from a direct-connected remote NPort */ 8046 if (test_bit(FC_PT2PT, &vport->fc_flag)) { 8047 /* If so, just ACC it, no other action needed for now */ 8048 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8049 "2024 pt2pt RSCN %08x Data: x%lx x%x\n", 8050 *lp, vport->fc_flag, payload_len); 8051 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8052 8053 /* Check to see if we need to NVME rescan this target 8054 * remoteport. 8055 */ 8056 if (ndlp->nlp_fc4_type & NLP_FC4_NVME && 8057 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) 8058 lpfc_nvme_rescan_port(vport, ndlp); 8059 return 0; 8060 } 8061 8062 /* If we are about to begin discovery, just ACC the RSCN. 8063 * Discovery processing will satisfy it. 8064 */ 8065 if (vport->port_state <= LPFC_NS_QRY) { 8066 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8067 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", 8068 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 8069 8070 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8071 return 0; 8072 } 8073 8074 /* If this RSCN just contains NPortIDs for other vports on this HBA, 8075 * just ACC and ignore it. 8076 */ 8077 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 8078 !(vport->cfg_peer_port_login)) { 8079 i = payload_len; 8080 datap = lp; 8081 while (i > 0) { 8082 nportid = *datap++; 8083 nportid = ((be32_to_cpu(nportid)) & Mask_DID); 8084 i -= sizeof(uint32_t); 8085 rscn_id++; 8086 if (lpfc_find_vport_by_did(phba, nportid)) 8087 hba_id++; 8088 } 8089 if (rscn_id == hba_id) { 8090 /* ALL NPortIDs in RSCN are on HBA */ 8091 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8092 "0219 Ignore RSCN " 8093 "Data: x%lx x%x x%x x%x\n", 8094 vport->fc_flag, payload_len, 8095 *lp, vport->fc_rscn_id_cnt); 8096 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8097 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", 8098 ndlp->nlp_DID, vport->port_state, 8099 ndlp->nlp_flag); 8100 8101 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, 8102 ndlp, NULL); 8103 /* Restart disctmo if its already running */ 8104 if (test_bit(FC_DISC_TMO, &vport->fc_flag)) { 8105 tmo = ((phba->fc_ratov * 3) + 3); 8106 mod_timer(&vport->fc_disctmo, 8107 jiffies + 8108 msecs_to_jiffies(1000 * tmo)); 8109 } 8110 return 0; 8111 } 8112 } 8113 8114 spin_lock_irq(shost->host_lock); 8115 if (vport->fc_rscn_flush) { 8116 /* Another thread is walking fc_rscn_id_list on this vport */ 8117 spin_unlock_irq(shost->host_lock); 8118 set_bit(FC_RSCN_DISCOVERY, &vport->fc_flag); 8119 /* Send back ACC */ 8120 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8121 return 0; 8122 } 8123 /* Indicate we are walking fc_rscn_id_list on this vport */ 8124 vport->fc_rscn_flush = 1; 8125 spin_unlock_irq(shost->host_lock); 8126 /* Get the array count after successfully have the token */ 8127 rscn_cnt = vport->fc_rscn_id_cnt; 8128 /* If we are already processing an RSCN, save the received 8129 * RSCN payload buffer, cmdiocb->cmd_dmabuf to process later. 8130 */ 8131 if (test_bit(FC_RSCN_MODE, &vport->fc_flag) || 8132 test_bit(FC_NDISC_ACTIVE, &vport->fc_flag)) { 8133 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8134 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", 8135 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 8136 8137 set_bit(FC_RSCN_DEFERRED, &vport->fc_flag); 8138 8139 /* Restart disctmo if its already running */ 8140 if (test_bit(FC_DISC_TMO, &vport->fc_flag)) { 8141 tmo = ((phba->fc_ratov * 3) + 3); 8142 mod_timer(&vport->fc_disctmo, 8143 jiffies + msecs_to_jiffies(1000 * tmo)); 8144 } 8145 if ((rscn_cnt < FC_MAX_HOLD_RSCN) && 8146 !test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag)) { 8147 set_bit(FC_RSCN_MODE, &vport->fc_flag); 8148 if (rscn_cnt) { 8149 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt; 8150 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK); 8151 } 8152 if ((rscn_cnt) && 8153 (payload_len + length <= LPFC_BPL_SIZE)) { 8154 *cmd &= ELS_CMD_MASK; 8155 *cmd |= cpu_to_be32(payload_len + length); 8156 memcpy(((uint8_t *)cmd) + length, lp, 8157 payload_len); 8158 } else { 8159 vport->fc_rscn_id_list[rscn_cnt] = pcmd; 8160 vport->fc_rscn_id_cnt++; 8161 /* If we zero, cmdiocb->cmd_dmabuf, the calling 8162 * routine will not try to free it. 8163 */ 8164 cmdiocb->cmd_dmabuf = NULL; 8165 } 8166 /* Deferred RSCN */ 8167 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8168 "0235 Deferred RSCN " 8169 "Data: x%x x%lx x%x\n", 8170 vport->fc_rscn_id_cnt, vport->fc_flag, 8171 vport->port_state); 8172 } else { 8173 set_bit(FC_RSCN_DISCOVERY, &vport->fc_flag); 8174 /* ReDiscovery RSCN */ 8175 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8176 "0234 ReDiscovery RSCN " 8177 "Data: x%x x%lx x%x\n", 8178 vport->fc_rscn_id_cnt, vport->fc_flag, 8179 vport->port_state); 8180 } 8181 /* Indicate we are done walking fc_rscn_id_list on this vport */ 8182 vport->fc_rscn_flush = 0; 8183 /* Send back ACC */ 8184 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8185 /* send RECOVERY event for ALL nodes that match RSCN payload */ 8186 lpfc_rscn_recovery_check(vport); 8187 return 0; 8188 } 8189 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8190 "RCV RSCN: did:x%x/ste:x%x flg:x%x", 8191 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 8192 8193 set_bit(FC_RSCN_MODE, &vport->fc_flag); 8194 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; 8195 /* Indicate we are done walking fc_rscn_id_list on this vport */ 8196 vport->fc_rscn_flush = 0; 8197 /* 8198 * If we zero, cmdiocb->cmd_dmabuf, the calling routine will 8199 * not try to free it. 8200 */ 8201 cmdiocb->cmd_dmabuf = NULL; 8202 lpfc_set_disctmo(vport); 8203 /* Send back ACC */ 8204 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8205 /* send RECOVERY event for ALL nodes that match RSCN payload */ 8206 lpfc_rscn_recovery_check(vport); 8207 return lpfc_els_handle_rscn(vport); 8208 } 8209 8210 /** 8211 * lpfc_els_handle_rscn - Handle rscn for a vport 8212 * @vport: pointer to a host virtual N_Port data structure. 8213 * 8214 * This routine handles the Registration State Configuration Notification 8215 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall 8216 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise, 8217 * if the ndlp to NameServer exists, a Common Transport (CT) command to the 8218 * NameServer shall be issued. If CT command to the NameServer fails to be 8219 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any 8220 * RSCN activities with the @vport. 8221 * 8222 * Return code 8223 * 0 - Cleaned up rscn on the @vport 8224 * 1 - Wait for plogi to name server before proceed 8225 **/ 8226 int 8227 lpfc_els_handle_rscn(struct lpfc_vport *vport) 8228 { 8229 struct lpfc_nodelist *ndlp; 8230 struct lpfc_hba *phba = vport->phba; 8231 8232 /* Ignore RSCN if the port is being torn down. */ 8233 if (test_bit(FC_UNLOADING, &vport->load_flag)) { 8234 lpfc_els_flush_rscn(vport); 8235 return 0; 8236 } 8237 8238 /* Start timer for RSCN processing */ 8239 lpfc_set_disctmo(vport); 8240 8241 /* RSCN processed */ 8242 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 8243 "0215 RSCN processed Data: x%lx x%x x%x x%x x%x x%x\n", 8244 vport->fc_flag, 0, vport->fc_rscn_id_cnt, 8245 vport->port_state, vport->num_disc_nodes, 8246 vport->gidft_inp); 8247 8248 /* To process RSCN, first compare RSCN data with NameServer */ 8249 vport->fc_ns_retry = 0; 8250 vport->num_disc_nodes = 0; 8251 8252 ndlp = lpfc_findnode_did(vport, NameServer_DID); 8253 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 8254 /* Good ndlp, issue CT Request to NameServer. Need to 8255 * know how many gidfts were issued. If none, then just 8256 * flush the RSCN. Otherwise, the outstanding requests 8257 * need to complete. 8258 */ 8259 if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) { 8260 if (lpfc_issue_gidft(vport) > 0) 8261 return 1; 8262 } else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) { 8263 if (lpfc_issue_gidpt(vport) > 0) 8264 return 1; 8265 } else { 8266 return 1; 8267 } 8268 } else { 8269 /* Nameserver login in question. Revalidate. */ 8270 if (ndlp) { 8271 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 8272 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 8273 } else { 8274 ndlp = lpfc_nlp_init(vport, NameServer_DID); 8275 if (!ndlp) { 8276 lpfc_els_flush_rscn(vport); 8277 return 0; 8278 } 8279 ndlp->nlp_prev_state = ndlp->nlp_state; 8280 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 8281 } 8282 ndlp->nlp_type |= NLP_FABRIC; 8283 lpfc_issue_els_plogi(vport, NameServer_DID, 0); 8284 /* Wait for NameServer login cmpl before we can 8285 * continue 8286 */ 8287 return 1; 8288 } 8289 8290 lpfc_els_flush_rscn(vport); 8291 return 0; 8292 } 8293 8294 /** 8295 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb 8296 * @vport: pointer to a host virtual N_Port data structure. 8297 * @cmdiocb: pointer to lpfc command iocb data structure. 8298 * @ndlp: pointer to a node-list data structure. 8299 * 8300 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS 8301 * unsolicited event. An unsolicited FLOGI can be received in a point-to- 8302 * point topology. As an unsolicited FLOGI should not be received in a loop 8303 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The 8304 * lpfc_check_sparm() routine is invoked to check the parameters in the 8305 * unsolicited FLOGI. If parameters validation failed, the routine 8306 * lpfc_els_rsp_reject() shall be called with reject reason code set to 8307 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the 8308 * FLOGI shall be compared with the Port WWN of the @vport to determine who 8309 * will initiate PLOGI. The higher lexicographical value party shall has 8310 * higher priority (as the winning port) and will initiate PLOGI and 8311 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result 8312 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI 8313 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI. 8314 * 8315 * Return code 8316 * 0 - Successfully processed the unsolicited flogi 8317 * 1 - Failed to process the unsolicited flogi 8318 **/ 8319 static int 8320 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8321 struct lpfc_nodelist *ndlp) 8322 { 8323 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8324 struct lpfc_hba *phba = vport->phba; 8325 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf; 8326 uint32_t *lp = (uint32_t *) pcmd->virt; 8327 union lpfc_wqe128 *wqe = &cmdiocb->wqe; 8328 struct serv_parm *sp; 8329 LPFC_MBOXQ_t *mbox; 8330 uint32_t cmd, did; 8331 int rc; 8332 unsigned long fc_flag = 0; 8333 uint32_t port_state = 0; 8334 8335 /* Clear external loopback plug detected flag */ 8336 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; 8337 8338 cmd = *lp++; 8339 sp = (struct serv_parm *) lp; 8340 8341 /* FLOGI received */ 8342 8343 lpfc_set_disctmo(vport); 8344 8345 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 8346 /* We should never receive a FLOGI in loop mode, ignore it */ 8347 did = bf_get(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest); 8348 8349 /* An FLOGI ELS command <elsCmd> was received from DID <did> in 8350 Loop Mode */ 8351 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8352 "0113 An FLOGI ELS command x%x was " 8353 "received from DID x%x in Loop Mode\n", 8354 cmd, did); 8355 return 1; 8356 } 8357 8358 (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1); 8359 8360 /* 8361 * If our portname is greater than the remote portname, 8362 * then we initiate Nport login. 8363 */ 8364 8365 rc = memcmp(&vport->fc_portname, &sp->portName, 8366 sizeof(struct lpfc_name)); 8367 8368 if (!rc) { 8369 if (phba->sli_rev < LPFC_SLI_REV4) { 8370 mbox = mempool_alloc(phba->mbox_mem_pool, 8371 GFP_KERNEL); 8372 if (!mbox) 8373 return 1; 8374 lpfc_linkdown(phba); 8375 lpfc_init_link(phba, mbox, 8376 phba->cfg_topology, 8377 phba->cfg_link_speed); 8378 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 8379 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 8380 mbox->vport = vport; 8381 rc = lpfc_sli_issue_mbox(phba, mbox, 8382 MBX_NOWAIT); 8383 lpfc_set_loopback_flag(phba); 8384 if (rc == MBX_NOT_FINISHED) 8385 mempool_free(mbox, phba->mbox_mem_pool); 8386 return 1; 8387 } 8388 8389 /* External loopback plug insertion detected */ 8390 phba->link_flag |= LS_EXTERNAL_LOOPBACK; 8391 8392 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_LIBDFC, 8393 "1119 External Loopback plug detected\n"); 8394 8395 /* abort the flogi coming back to ourselves 8396 * due to external loopback on the port. 8397 */ 8398 lpfc_els_abort_flogi(phba); 8399 return 0; 8400 8401 } else if (rc > 0) { /* greater than */ 8402 set_bit(FC_PT2PT_PLOGI, &vport->fc_flag); 8403 8404 /* If we have the high WWPN we can assign our own 8405 * myDID; otherwise, we have to WAIT for a PLOGI 8406 * from the remote NPort to find out what it 8407 * will be. 8408 */ 8409 vport->fc_myDID = PT2PT_LocalID; 8410 } else { 8411 vport->fc_myDID = PT2PT_RemoteID; 8412 } 8413 8414 /* 8415 * The vport state should go to LPFC_FLOGI only 8416 * AFTER we issue a FLOGI, not receive one. 8417 */ 8418 spin_lock_irq(shost->host_lock); 8419 fc_flag = vport->fc_flag; 8420 port_state = vport->port_state; 8421 /* Acking an unsol FLOGI. Count 1 for link bounce 8422 * work-around. 8423 */ 8424 vport->rcv_flogi_cnt++; 8425 spin_unlock_irq(shost->host_lock); 8426 set_bit(FC_PT2PT, &vport->fc_flag); 8427 clear_bit(FC_FABRIC, &vport->fc_flag); 8428 clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag); 8429 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8430 "3311 Rcv Flogi PS x%x new PS x%x " 8431 "fc_flag x%lx new fc_flag x%lx\n", 8432 port_state, vport->port_state, 8433 fc_flag, vport->fc_flag); 8434 8435 /* 8436 * We temporarily set fc_myDID to make it look like we are 8437 * a Fabric. This is done just so we end up with the right 8438 * did / sid on the FLOGI ACC rsp. 8439 */ 8440 did = vport->fc_myDID; 8441 vport->fc_myDID = Fabric_DID; 8442 8443 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 8444 8445 /* Defer ACC response until AFTER we issue a FLOGI */ 8446 if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) { 8447 phba->defer_flogi_acc_rx_id = bf_get(wqe_ctxt_tag, 8448 &wqe->xmit_els_rsp.wqe_com); 8449 phba->defer_flogi_acc_ox_id = bf_get(wqe_rcvoxid, 8450 &wqe->xmit_els_rsp.wqe_com); 8451 8452 vport->fc_myDID = did; 8453 8454 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8455 "3344 Deferring FLOGI ACC: rx_id: x%x," 8456 " ox_id: x%x, hba_flag x%x\n", 8457 phba->defer_flogi_acc_rx_id, 8458 phba->defer_flogi_acc_ox_id, phba->hba_flag); 8459 8460 phba->defer_flogi_acc_flag = true; 8461 8462 return 0; 8463 } 8464 8465 /* Send back ACC */ 8466 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL); 8467 8468 /* Now lets put fc_myDID back to what its supposed to be */ 8469 vport->fc_myDID = did; 8470 8471 return 0; 8472 } 8473 8474 /** 8475 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb 8476 * @vport: pointer to a host virtual N_Port data structure. 8477 * @cmdiocb: pointer to lpfc command iocb data structure. 8478 * @ndlp: pointer to a node-list data structure. 8479 * 8480 * This routine processes Request Node Identification Data (RNID) IOCB 8481 * received as an ELS unsolicited event. Only when the RNID specified format 8482 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data) 8483 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to 8484 * Accept (ACC) the RNID ELS command. All the other RNID formats are 8485 * rejected by invoking the lpfc_els_rsp_reject() routine. 8486 * 8487 * Return code 8488 * 0 - Successfully processed rnid iocb (currently always return 0) 8489 **/ 8490 static int 8491 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8492 struct lpfc_nodelist *ndlp) 8493 { 8494 struct lpfc_dmabuf *pcmd; 8495 uint32_t *lp; 8496 RNID *rn; 8497 struct ls_rjt stat; 8498 8499 pcmd = cmdiocb->cmd_dmabuf; 8500 lp = (uint32_t *) pcmd->virt; 8501 8502 lp++; 8503 rn = (RNID *) lp; 8504 8505 /* RNID received */ 8506 8507 switch (rn->Format) { 8508 case 0: 8509 case RNID_TOPOLOGY_DISC: 8510 /* Send back ACC */ 8511 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp); 8512 break; 8513 default: 8514 /* Reject this request because format not supported */ 8515 stat.un.b.lsRjtRsvd0 = 0; 8516 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8517 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8518 stat.un.b.vendorUnique = 0; 8519 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 8520 NULL); 8521 } 8522 return 0; 8523 } 8524 8525 /** 8526 * lpfc_els_rcv_echo - Process an unsolicited echo iocb 8527 * @vport: pointer to a host virtual N_Port data structure. 8528 * @cmdiocb: pointer to lpfc command iocb data structure. 8529 * @ndlp: pointer to a node-list data structure. 8530 * 8531 * Return code 8532 * 0 - Successfully processed echo iocb (currently always return 0) 8533 **/ 8534 static int 8535 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8536 struct lpfc_nodelist *ndlp) 8537 { 8538 uint8_t *pcmd; 8539 8540 pcmd = (uint8_t *)cmdiocb->cmd_dmabuf->virt; 8541 8542 /* skip over first word of echo command to find echo data */ 8543 pcmd += sizeof(uint32_t); 8544 8545 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp); 8546 return 0; 8547 } 8548 8549 /** 8550 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb 8551 * @vport: pointer to a host virtual N_Port data structure. 8552 * @cmdiocb: pointer to lpfc command iocb data structure. 8553 * @ndlp: pointer to a node-list data structure. 8554 * 8555 * This routine processes a Link Incident Report Registration(LIRR) IOCB 8556 * received as an ELS unsolicited event. Currently, this function just invokes 8557 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally. 8558 * 8559 * Return code 8560 * 0 - Successfully processed lirr iocb (currently always return 0) 8561 **/ 8562 static int 8563 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8564 struct lpfc_nodelist *ndlp) 8565 { 8566 struct ls_rjt stat; 8567 8568 /* For now, unconditionally reject this command */ 8569 stat.un.b.lsRjtRsvd0 = 0; 8570 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8571 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8572 stat.un.b.vendorUnique = 0; 8573 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8574 return 0; 8575 } 8576 8577 /** 8578 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb 8579 * @vport: pointer to a host virtual N_Port data structure. 8580 * @cmdiocb: pointer to lpfc command iocb data structure. 8581 * @ndlp: pointer to a node-list data structure. 8582 * 8583 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB 8584 * received as an ELS unsolicited event. A request to RRQ shall only 8585 * be accepted if the Originator Nx_Port N_Port_ID or the Responder 8586 * Nx_Port N_Port_ID of the target Exchange is the same as the 8587 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is 8588 * not accepted, an LS_RJT with reason code "Unable to perform 8589 * command request" and reason code explanation "Invalid Originator 8590 * S_ID" shall be returned. For now, we just unconditionally accept 8591 * RRQ from the target. 8592 **/ 8593 static void 8594 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8595 struct lpfc_nodelist *ndlp) 8596 { 8597 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8598 if (vport->phba->sli_rev == LPFC_SLI_REV4) 8599 lpfc_els_clear_rrq(vport, cmdiocb, ndlp); 8600 } 8601 8602 /** 8603 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 8604 * @phba: pointer to lpfc hba data structure. 8605 * @pmb: pointer to the driver internal queue element for mailbox command. 8606 * 8607 * This routine is the completion callback function for the MBX_READ_LNK_STAT 8608 * mailbox command. This callback function is to actually send the Accept 8609 * (ACC) response to a Read Link Status (RLS) unsolicited IOCB event. It 8610 * collects the link statistics from the completion of the MBX_READ_LNK_STAT 8611 * mailbox command, constructs the RLS response with the link statistics 8612 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 8613 * response to the RLS. 8614 * 8615 * Note that the ndlp reference count will be incremented by 1 for holding the 8616 * ndlp and the reference to ndlp will be stored into the ndlp field of 8617 * the IOCB for the completion callback function to the RLS Accept Response 8618 * ELS IOCB command. 8619 * 8620 **/ 8621 static void 8622 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 8623 { 8624 int rc = 0; 8625 MAILBOX_t *mb; 8626 IOCB_t *icmd; 8627 union lpfc_wqe128 *wqe; 8628 struct RLS_RSP *rls_rsp; 8629 uint8_t *pcmd; 8630 struct lpfc_iocbq *elsiocb; 8631 struct lpfc_nodelist *ndlp; 8632 uint16_t oxid; 8633 uint16_t rxid; 8634 uint32_t cmdsize; 8635 u32 ulp_context; 8636 8637 mb = &pmb->u.mb; 8638 8639 ndlp = pmb->ctx_ndlp; 8640 rxid = (uint16_t)(pmb->ctx_u.ox_rx_id & 0xffff); 8641 oxid = (uint16_t)((pmb->ctx_u.ox_rx_id >> 16) & 0xffff); 8642 memset(&pmb->ctx_u, 0, sizeof(pmb->ctx_u)); 8643 pmb->ctx_ndlp = NULL; 8644 8645 if (mb->mbxStatus) { 8646 mempool_free(pmb, phba->mbox_mem_pool); 8647 return; 8648 } 8649 8650 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); 8651 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 8652 lpfc_max_els_tries, ndlp, 8653 ndlp->nlp_DID, ELS_CMD_ACC); 8654 8655 /* Decrement the ndlp reference count from previous mbox command */ 8656 lpfc_nlp_put(ndlp); 8657 8658 if (!elsiocb) { 8659 mempool_free(pmb, phba->mbox_mem_pool); 8660 return; 8661 } 8662 8663 ulp_context = get_job_ulpcontext(phba, elsiocb); 8664 if (phba->sli_rev == LPFC_SLI_REV4) { 8665 wqe = &elsiocb->wqe; 8666 /* Xri / rx_id */ 8667 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, rxid); 8668 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, oxid); 8669 } else { 8670 icmd = &elsiocb->iocb; 8671 icmd->ulpContext = rxid; 8672 icmd->unsli3.rcvsli3.ox_id = oxid; 8673 } 8674 8675 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 8676 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8677 pcmd += sizeof(uint32_t); /* Skip past command */ 8678 rls_rsp = (struct RLS_RSP *)pcmd; 8679 8680 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 8681 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 8682 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 8683 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 8684 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 8685 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 8686 mempool_free(pmb, phba->mbox_mem_pool); 8687 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 8688 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 8689 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " 8690 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 8691 elsiocb->iotag, ulp_context, 8692 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8693 ndlp->nlp_rpi); 8694 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 8695 phba->fc_stat.elsXmitACC++; 8696 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8697 if (!elsiocb->ndlp) { 8698 lpfc_els_free_iocb(phba, elsiocb); 8699 return; 8700 } 8701 8702 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8703 if (rc == IOCB_ERROR) { 8704 lpfc_els_free_iocb(phba, elsiocb); 8705 lpfc_nlp_put(ndlp); 8706 } 8707 return; 8708 } 8709 8710 /** 8711 * lpfc_els_rcv_rls - Process an unsolicited rls iocb 8712 * @vport: pointer to a host virtual N_Port data structure. 8713 * @cmdiocb: pointer to lpfc command iocb data structure. 8714 * @ndlp: pointer to a node-list data structure. 8715 * 8716 * This routine processes Read Link Status (RLS) IOCB received as an 8717 * ELS unsolicited event. It first checks the remote port state. If the 8718 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 8719 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 8720 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 8721 * for reading the HBA link statistics. It is for the callback function, 8722 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command 8723 * to actually sending out RPL Accept (ACC) response. 8724 * 8725 * Return codes 8726 * 0 - Successfully processed rls iocb (currently always return 0) 8727 **/ 8728 static int 8729 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8730 struct lpfc_nodelist *ndlp) 8731 { 8732 struct lpfc_hba *phba = vport->phba; 8733 LPFC_MBOXQ_t *mbox; 8734 struct ls_rjt stat; 8735 u32 ctx = get_job_ulpcontext(phba, cmdiocb); 8736 u32 ox_id = get_job_rcvoxid(phba, cmdiocb); 8737 8738 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8739 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 8740 /* reject the unsolicited RLS request and done with it */ 8741 goto reject_out; 8742 8743 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 8744 if (mbox) { 8745 lpfc_read_lnk_stat(phba, mbox); 8746 mbox->ctx_u.ox_rx_id = ox_id << 16 | ctx; 8747 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 8748 if (!mbox->ctx_ndlp) 8749 goto node_err; 8750 mbox->vport = vport; 8751 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; 8752 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 8753 != MBX_NOT_FINISHED) 8754 /* Mbox completion will send ELS Response */ 8755 return 0; 8756 /* Decrement reference count used for the failed mbox 8757 * command. 8758 */ 8759 lpfc_nlp_put(ndlp); 8760 node_err: 8761 mempool_free(mbox, phba->mbox_mem_pool); 8762 } 8763 reject_out: 8764 /* issue rejection response */ 8765 stat.un.b.lsRjtRsvd0 = 0; 8766 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8767 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8768 stat.un.b.vendorUnique = 0; 8769 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8770 return 0; 8771 } 8772 8773 /** 8774 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb 8775 * @vport: pointer to a host virtual N_Port data structure. 8776 * @cmdiocb: pointer to lpfc command iocb data structure. 8777 * @ndlp: pointer to a node-list data structure. 8778 * 8779 * This routine processes Read Timout Value (RTV) IOCB received as an 8780 * ELS unsolicited event. It first checks the remote port state. If the 8781 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 8782 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 8783 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout 8784 * Value (RTV) unsolicited IOCB event. 8785 * 8786 * Note that the ndlp reference count will be incremented by 1 for holding the 8787 * ndlp and the reference to ndlp will be stored into the ndlp field of 8788 * the IOCB for the completion callback function to the RTV Accept Response 8789 * ELS IOCB command. 8790 * 8791 * Return codes 8792 * 0 - Successfully processed rtv iocb (currently always return 0) 8793 **/ 8794 static int 8795 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8796 struct lpfc_nodelist *ndlp) 8797 { 8798 int rc = 0; 8799 IOCB_t *icmd; 8800 union lpfc_wqe128 *wqe; 8801 struct lpfc_hba *phba = vport->phba; 8802 struct ls_rjt stat; 8803 struct RTV_RSP *rtv_rsp; 8804 uint8_t *pcmd; 8805 struct lpfc_iocbq *elsiocb; 8806 uint32_t cmdsize; 8807 u32 ulp_context; 8808 8809 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8810 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 8811 /* reject the unsolicited RTV request and done with it */ 8812 goto reject_out; 8813 8814 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t); 8815 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 8816 lpfc_max_els_tries, ndlp, 8817 ndlp->nlp_DID, ELS_CMD_ACC); 8818 8819 if (!elsiocb) 8820 return 1; 8821 8822 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 8823 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8824 pcmd += sizeof(uint32_t); /* Skip past command */ 8825 8826 ulp_context = get_job_ulpcontext(phba, elsiocb); 8827 /* use the command's xri in the response */ 8828 if (phba->sli_rev == LPFC_SLI_REV4) { 8829 wqe = &elsiocb->wqe; 8830 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 8831 get_job_ulpcontext(phba, cmdiocb)); 8832 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 8833 get_job_rcvoxid(phba, cmdiocb)); 8834 } else { 8835 icmd = &elsiocb->iocb; 8836 icmd->ulpContext = get_job_ulpcontext(phba, cmdiocb); 8837 icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, cmdiocb); 8838 } 8839 8840 rtv_rsp = (struct RTV_RSP *)pcmd; 8841 8842 /* populate RTV payload */ 8843 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */ 8844 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov); 8845 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0); 8846 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */ 8847 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov); 8848 8849 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 8850 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 8851 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " 8852 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " 8853 "Data: x%x x%x x%x\n", 8854 elsiocb->iotag, ulp_context, 8855 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8856 ndlp->nlp_rpi, 8857 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov); 8858 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 8859 phba->fc_stat.elsXmitACC++; 8860 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8861 if (!elsiocb->ndlp) { 8862 lpfc_els_free_iocb(phba, elsiocb); 8863 return 0; 8864 } 8865 8866 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8867 if (rc == IOCB_ERROR) { 8868 lpfc_els_free_iocb(phba, elsiocb); 8869 lpfc_nlp_put(ndlp); 8870 } 8871 return 0; 8872 8873 reject_out: 8874 /* issue rejection response */ 8875 stat.un.b.lsRjtRsvd0 = 0; 8876 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8877 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8878 stat.un.b.vendorUnique = 0; 8879 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8880 return 0; 8881 } 8882 8883 /* lpfc_issue_els_rrq - Process an unsolicited rrq iocb 8884 * @vport: pointer to a host virtual N_Port data structure. 8885 * @ndlp: pointer to a node-list data structure. 8886 * @did: DID of the target. 8887 * @rrq: Pointer to the rrq struct. 8888 * 8889 * Build a ELS RRQ command and send it to the target. If the issue_iocb is 8890 * successful, the completion handler will clear the RRQ. 8891 * 8892 * Return codes 8893 * 0 - Successfully sent rrq els iocb. 8894 * 1 - Failed to send rrq els iocb. 8895 **/ 8896 static int 8897 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 8898 uint32_t did, struct lpfc_node_rrq *rrq) 8899 { 8900 struct lpfc_hba *phba = vport->phba; 8901 struct RRQ *els_rrq; 8902 struct lpfc_iocbq *elsiocb; 8903 uint8_t *pcmd; 8904 uint16_t cmdsize; 8905 int ret; 8906 8907 if (!ndlp) 8908 return 1; 8909 8910 /* If ndlp is not NULL, we will bump the reference count on it */ 8911 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ)); 8912 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did, 8913 ELS_CMD_RRQ); 8914 if (!elsiocb) 8915 return 1; 8916 8917 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 8918 8919 /* For RRQ request, remainder of payload is Exchange IDs */ 8920 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ; 8921 pcmd += sizeof(uint32_t); 8922 els_rrq = (struct RRQ *) pcmd; 8923 8924 bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]); 8925 bf_set(rrq_rxid, els_rrq, rrq->rxid); 8926 bf_set(rrq_did, els_rrq, vport->fc_myDID); 8927 els_rrq->rrq = cpu_to_be32(els_rrq->rrq); 8928 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg); 8929 8930 8931 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 8932 "Issue RRQ: did:x%x", 8933 did, rrq->xritag, rrq->rxid); 8934 elsiocb->context_un.rrq = rrq; 8935 elsiocb->cmd_cmpl = lpfc_cmpl_els_rrq; 8936 8937 elsiocb->ndlp = lpfc_nlp_get(ndlp); 8938 if (!elsiocb->ndlp) 8939 goto io_err; 8940 8941 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8942 if (ret == IOCB_ERROR) { 8943 lpfc_nlp_put(ndlp); 8944 goto io_err; 8945 } 8946 return 0; 8947 8948 io_err: 8949 lpfc_els_free_iocb(phba, elsiocb); 8950 return 1; 8951 } 8952 8953 /** 8954 * lpfc_send_rrq - Sends ELS RRQ if needed. 8955 * @phba: pointer to lpfc hba data structure. 8956 * @rrq: pointer to the active rrq. 8957 * 8958 * This routine will call the lpfc_issue_els_rrq if the rrq is 8959 * still active for the xri. If this function returns a failure then 8960 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq. 8961 * 8962 * Returns 0 Success. 8963 * 1 Failure. 8964 **/ 8965 int 8966 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) 8967 { 8968 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, 8969 rrq->nlp_DID); 8970 if (!ndlp) 8971 return 1; 8972 8973 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) 8974 return lpfc_issue_els_rrq(rrq->vport, ndlp, 8975 rrq->nlp_DID, rrq); 8976 else 8977 return 1; 8978 } 8979 8980 /** 8981 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command 8982 * @vport: pointer to a host virtual N_Port data structure. 8983 * @cmdsize: size of the ELS command. 8984 * @oldiocb: pointer to the original lpfc command iocb data structure. 8985 * @ndlp: pointer to a node-list data structure. 8986 * 8987 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command. 8988 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL. 8989 * 8990 * Note that the ndlp reference count will be incremented by 1 for holding the 8991 * ndlp and the reference to ndlp will be stored into the ndlp field of 8992 * the IOCB for the completion callback function to the RPL Accept Response 8993 * ELS command. 8994 * 8995 * Return code 8996 * 0 - Successfully issued ACC RPL ELS command 8997 * 1 - Failed to issue ACC RPL ELS command 8998 **/ 8999 static int 9000 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, 9001 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 9002 { 9003 int rc = 0; 9004 struct lpfc_hba *phba = vport->phba; 9005 IOCB_t *icmd; 9006 union lpfc_wqe128 *wqe; 9007 RPL_RSP rpl_rsp; 9008 struct lpfc_iocbq *elsiocb; 9009 uint8_t *pcmd; 9010 u32 ulp_context; 9011 9012 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 9013 ndlp->nlp_DID, ELS_CMD_ACC); 9014 9015 if (!elsiocb) 9016 return 1; 9017 9018 ulp_context = get_job_ulpcontext(phba, elsiocb); 9019 if (phba->sli_rev == LPFC_SLI_REV4) { 9020 wqe = &elsiocb->wqe; 9021 /* Xri / rx_id */ 9022 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 9023 get_job_ulpcontext(phba, oldiocb)); 9024 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 9025 get_job_rcvoxid(phba, oldiocb)); 9026 } else { 9027 icmd = &elsiocb->iocb; 9028 icmd->ulpContext = get_job_ulpcontext(phba, oldiocb); 9029 icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, oldiocb); 9030 } 9031 9032 pcmd = elsiocb->cmd_dmabuf->virt; 9033 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 9034 pcmd += sizeof(uint16_t); 9035 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); 9036 pcmd += sizeof(uint16_t); 9037 9038 /* Setup the RPL ACC payload */ 9039 rpl_rsp.listLen = be32_to_cpu(1); 9040 rpl_rsp.index = 0; 9041 rpl_rsp.port_num_blk.portNum = 0; 9042 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID); 9043 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname, 9044 sizeof(struct lpfc_name)); 9045 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t)); 9046 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 9047 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9048 "0120 Xmit ELS RPL ACC response tag x%x " 9049 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 9050 "rpi x%x\n", 9051 elsiocb->iotag, ulp_context, 9052 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 9053 ndlp->nlp_rpi); 9054 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; 9055 phba->fc_stat.elsXmitACC++; 9056 elsiocb->ndlp = lpfc_nlp_get(ndlp); 9057 if (!elsiocb->ndlp) { 9058 lpfc_els_free_iocb(phba, elsiocb); 9059 return 1; 9060 } 9061 9062 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 9063 if (rc == IOCB_ERROR) { 9064 lpfc_els_free_iocb(phba, elsiocb); 9065 lpfc_nlp_put(ndlp); 9066 return 1; 9067 } 9068 9069 return 0; 9070 } 9071 9072 /** 9073 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb 9074 * @vport: pointer to a host virtual N_Port data structure. 9075 * @cmdiocb: pointer to lpfc command iocb data structure. 9076 * @ndlp: pointer to a node-list data structure. 9077 * 9078 * This routine processes Read Port List (RPL) IOCB received as an ELS 9079 * unsolicited event. It first checks the remote port state. If the remote 9080 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it 9081 * invokes the lpfc_els_rsp_reject() routine to send reject response. 9082 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine 9083 * to accept the RPL. 9084 * 9085 * Return code 9086 * 0 - Successfully processed rpl iocb (currently always return 0) 9087 **/ 9088 static int 9089 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9090 struct lpfc_nodelist *ndlp) 9091 { 9092 struct lpfc_dmabuf *pcmd; 9093 uint32_t *lp; 9094 uint32_t maxsize; 9095 uint16_t cmdsize; 9096 RPL *rpl; 9097 struct ls_rjt stat; 9098 9099 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 9100 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 9101 /* issue rejection response */ 9102 stat.un.b.lsRjtRsvd0 = 0; 9103 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 9104 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 9105 stat.un.b.vendorUnique = 0; 9106 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 9107 NULL); 9108 /* rejected the unsolicited RPL request and done with it */ 9109 return 0; 9110 } 9111 9112 pcmd = cmdiocb->cmd_dmabuf; 9113 lp = (uint32_t *) pcmd->virt; 9114 rpl = (RPL *) (lp + 1); 9115 maxsize = be32_to_cpu(rpl->maxsize); 9116 9117 /* We support only one port */ 9118 if ((rpl->index == 0) && 9119 ((maxsize == 0) || 9120 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) { 9121 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP); 9122 } else { 9123 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t); 9124 } 9125 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp); 9126 9127 return 0; 9128 } 9129 9130 /** 9131 * lpfc_els_rcv_farp - Process an unsolicited farp request els command 9132 * @vport: pointer to a virtual N_Port data structure. 9133 * @cmdiocb: pointer to lpfc command iocb data structure. 9134 * @ndlp: pointer to a node-list data structure. 9135 * 9136 * This routine processes Fibre Channel Address Resolution Protocol 9137 * (FARP) Request IOCB received as an ELS unsolicited event. Currently, 9138 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such, 9139 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the 9140 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the 9141 * remote PortName is compared against the FC PortName stored in the @vport 9142 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is 9143 * compared against the FC NodeName stored in the @vport data structure. 9144 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the 9145 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is 9146 * invoked to send out FARP Response to the remote node. Before sending the 9147 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP 9148 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi() 9149 * routine is invoked to log into the remote port first. 9150 * 9151 * Return code 9152 * 0 - Either the FARP Match Mode not supported or successfully processed 9153 **/ 9154 static int 9155 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9156 struct lpfc_nodelist *ndlp) 9157 { 9158 struct lpfc_dmabuf *pcmd; 9159 uint32_t *lp; 9160 FARP *fp; 9161 uint32_t cnt, did; 9162 9163 did = get_job_els_rsp64_did(vport->phba, cmdiocb); 9164 pcmd = cmdiocb->cmd_dmabuf; 9165 lp = (uint32_t *) pcmd->virt; 9166 9167 lp++; 9168 fp = (FARP *) lp; 9169 /* FARP-REQ received from DID <did> */ 9170 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9171 "0601 FARP-REQ received from DID x%x\n", did); 9172 /* We will only support match on WWPN or WWNN */ 9173 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { 9174 return 0; 9175 } 9176 9177 cnt = 0; 9178 /* If this FARP command is searching for my portname */ 9179 if (fp->Mflags & FARP_MATCH_PORT) { 9180 if (memcmp(&fp->RportName, &vport->fc_portname, 9181 sizeof(struct lpfc_name)) == 0) 9182 cnt = 1; 9183 } 9184 9185 /* If this FARP command is searching for my nodename */ 9186 if (fp->Mflags & FARP_MATCH_NODE) { 9187 if (memcmp(&fp->RnodeName, &vport->fc_nodename, 9188 sizeof(struct lpfc_name)) == 0) 9189 cnt = 1; 9190 } 9191 9192 if (cnt) { 9193 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 9194 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 9195 /* Log back into the node before sending the FARP. */ 9196 if (fp->Rflags & FARP_REQUEST_PLOGI) { 9197 ndlp->nlp_prev_state = ndlp->nlp_state; 9198 lpfc_nlp_set_state(vport, ndlp, 9199 NLP_STE_PLOGI_ISSUE); 9200 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 9201 } 9202 9203 /* Send a FARP response to that node */ 9204 if (fp->Rflags & FARP_REQUEST_FARPR) 9205 lpfc_issue_els_farpr(vport, did, 0); 9206 } 9207 } 9208 return 0; 9209 } 9210 9211 /** 9212 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb 9213 * @vport: pointer to a host virtual N_Port data structure. 9214 * @cmdiocb: pointer to lpfc command iocb data structure. 9215 * @ndlp: pointer to a node-list data structure. 9216 * 9217 * This routine processes Fibre Channel Address Resolution Protocol 9218 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply 9219 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept 9220 * the FARP response request. 9221 * 9222 * Return code 9223 * 0 - Successfully processed FARPR IOCB (currently always return 0) 9224 **/ 9225 static int 9226 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9227 struct lpfc_nodelist *ndlp) 9228 { 9229 uint32_t did; 9230 9231 did = get_job_els_rsp64_did(vport->phba, cmdiocb); 9232 9233 /* FARP-RSP received from DID <did> */ 9234 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9235 "0600 FARP-RSP received from DID x%x\n", did); 9236 /* ACCEPT the Farp resp request */ 9237 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 9238 9239 return 0; 9240 } 9241 9242 /** 9243 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command 9244 * @vport: pointer to a host virtual N_Port data structure. 9245 * @cmdiocb: pointer to lpfc command iocb data structure. 9246 * @fan_ndlp: pointer to a node-list data structure. 9247 * 9248 * This routine processes a Fabric Address Notification (FAN) IOCB 9249 * command received as an ELS unsolicited event. The FAN ELS command will 9250 * only be processed on a physical port (i.e., the @vport represents the 9251 * physical port). The fabric NodeName and PortName from the FAN IOCB are 9252 * compared against those in the phba data structure. If any of those is 9253 * different, the lpfc_initial_flogi() routine is invoked to initialize 9254 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise, 9255 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine 9256 * is invoked to register login to the fabric. 9257 * 9258 * Return code 9259 * 0 - Successfully processed fan iocb (currently always return 0). 9260 **/ 9261 static int 9262 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9263 struct lpfc_nodelist *fan_ndlp) 9264 { 9265 struct lpfc_hba *phba = vport->phba; 9266 uint32_t *lp; 9267 FAN *fp; 9268 9269 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n"); 9270 lp = (uint32_t *)cmdiocb->cmd_dmabuf->virt; 9271 fp = (FAN *) ++lp; 9272 /* FAN received; Fan does not have a reply sequence */ 9273 if ((vport == phba->pport) && 9274 (vport->port_state == LPFC_LOCAL_CFG_LINK)) { 9275 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, 9276 sizeof(struct lpfc_name))) || 9277 (memcmp(&phba->fc_fabparam.portName, &fp->FportName, 9278 sizeof(struct lpfc_name)))) { 9279 /* This port has switched fabrics. FLOGI is required */ 9280 lpfc_issue_init_vfi(vport); 9281 } else { 9282 /* FAN verified - skip FLOGI */ 9283 vport->fc_myDID = vport->fc_prevDID; 9284 if (phba->sli_rev < LPFC_SLI_REV4) 9285 lpfc_issue_fabric_reglogin(vport); 9286 else { 9287 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9288 "3138 Need register VFI: (x%x/%x)\n", 9289 vport->fc_prevDID, vport->fc_myDID); 9290 lpfc_issue_reg_vfi(vport); 9291 } 9292 } 9293 } 9294 return 0; 9295 } 9296 9297 /** 9298 * lpfc_els_rcv_edc - Process an unsolicited EDC iocb 9299 * @vport: pointer to a host virtual N_Port data structure. 9300 * @cmdiocb: pointer to lpfc command iocb data structure. 9301 * @ndlp: pointer to a node-list data structure. 9302 * 9303 * Return code 9304 * 0 - Successfully processed echo iocb (currently always return 0) 9305 **/ 9306 static int 9307 lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 9308 struct lpfc_nodelist *ndlp) 9309 { 9310 struct lpfc_hba *phba = vport->phba; 9311 struct fc_els_edc *edc_req; 9312 struct fc_tlv_desc *tlv; 9313 uint8_t *payload; 9314 uint32_t *ptr, dtag; 9315 const char *dtag_nm; 9316 int desc_cnt = 0, bytes_remain; 9317 struct fc_diag_lnkflt_desc *plnkflt; 9318 9319 payload = cmdiocb->cmd_dmabuf->virt; 9320 9321 edc_req = (struct fc_els_edc *)payload; 9322 bytes_remain = be32_to_cpu(edc_req->desc_len); 9323 9324 ptr = (uint32_t *)payload; 9325 lpfc_printf_vlog(vport, KERN_INFO, 9326 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9327 "3319 Rcv EDC payload len %d: x%x x%x x%x\n", 9328 bytes_remain, be32_to_cpu(*ptr), 9329 be32_to_cpu(*(ptr + 1)), be32_to_cpu(*(ptr + 2))); 9330 9331 /* No signal support unless there is a congestion descriptor */ 9332 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 9333 phba->cgn_sig_freq = 0; 9334 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 9335 9336 if (bytes_remain <= 0) 9337 goto out; 9338 9339 tlv = edc_req->desc; 9340 9341 /* 9342 * cycle through EDC diagnostic descriptors to find the 9343 * congestion signaling capability descriptor 9344 */ 9345 while (bytes_remain) { 9346 if (bytes_remain < FC_TLV_DESC_HDR_SZ) { 9347 lpfc_printf_log(phba, KERN_WARNING, 9348 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9349 "6464 Truncated TLV hdr on " 9350 "Diagnostic descriptor[%d]\n", 9351 desc_cnt); 9352 goto out; 9353 } 9354 9355 dtag = be32_to_cpu(tlv->desc_tag); 9356 switch (dtag) { 9357 case ELS_DTAG_LNK_FAULT_CAP: 9358 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 9359 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 9360 sizeof(struct fc_diag_lnkflt_desc)) { 9361 lpfc_printf_log(phba, KERN_WARNING, 9362 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9363 "6465 Truncated Link Fault Diagnostic " 9364 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 9365 desc_cnt, bytes_remain, 9366 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 9367 sizeof(struct fc_diag_lnkflt_desc)); 9368 goto out; 9369 } 9370 plnkflt = (struct fc_diag_lnkflt_desc *)tlv; 9371 lpfc_printf_log(phba, KERN_INFO, 9372 LOG_ELS | LOG_LDS_EVENT, 9373 "4626 Link Fault Desc Data: x%08x len x%x " 9374 "da x%x dd x%x interval x%x\n", 9375 be32_to_cpu(plnkflt->desc_tag), 9376 be32_to_cpu(plnkflt->desc_len), 9377 be32_to_cpu( 9378 plnkflt->degrade_activate_threshold), 9379 be32_to_cpu( 9380 plnkflt->degrade_deactivate_threshold), 9381 be32_to_cpu(plnkflt->fec_degrade_interval)); 9382 break; 9383 case ELS_DTAG_CG_SIGNAL_CAP: 9384 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 9385 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 9386 sizeof(struct fc_diag_cg_sig_desc)) { 9387 lpfc_printf_log( 9388 phba, KERN_WARNING, LOG_CGN_MGMT, 9389 "6466 Truncated cgn signal Diagnostic " 9390 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 9391 desc_cnt, bytes_remain, 9392 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 9393 sizeof(struct fc_diag_cg_sig_desc)); 9394 goto out; 9395 } 9396 9397 phba->cgn_reg_fpin = phba->cgn_init_reg_fpin; 9398 phba->cgn_reg_signal = phba->cgn_init_reg_signal; 9399 9400 /* We start negotiation with lpfc_fabric_cgn_frequency. 9401 * When we process the EDC, we will settle on the 9402 * higher frequency. 9403 */ 9404 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 9405 9406 lpfc_least_capable_settings( 9407 phba, (struct fc_diag_cg_sig_desc *)tlv); 9408 break; 9409 default: 9410 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 9411 lpfc_printf_log(phba, KERN_WARNING, 9412 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT, 9413 "6467 unknown Diagnostic " 9414 "Descriptor[%d]: tag x%x (%s)\n", 9415 desc_cnt, dtag, dtag_nm); 9416 } 9417 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 9418 tlv = fc_tlv_next_desc(tlv); 9419 desc_cnt++; 9420 } 9421 out: 9422 /* Need to send back an ACC */ 9423 lpfc_issue_els_edc_rsp(vport, cmdiocb, ndlp); 9424 9425 lpfc_config_cgn_signal(phba); 9426 return 0; 9427 } 9428 9429 /** 9430 * lpfc_els_timeout - Handler funciton to the els timer 9431 * @t: timer context used to obtain the vport. 9432 * 9433 * This routine is invoked by the ELS timer after timeout. It posts the ELS 9434 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port 9435 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake 9436 * up the worker thread. It is for the worker thread to invoke the routine 9437 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO. 9438 **/ 9439 void 9440 lpfc_els_timeout(struct timer_list *t) 9441 { 9442 struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc); 9443 struct lpfc_hba *phba = vport->phba; 9444 uint32_t tmo_posted; 9445 unsigned long iflag; 9446 9447 spin_lock_irqsave(&vport->work_port_lock, iflag); 9448 tmo_posted = vport->work_port_events & WORKER_ELS_TMO; 9449 if (!tmo_posted && !test_bit(FC_UNLOADING, &vport->load_flag)) 9450 vport->work_port_events |= WORKER_ELS_TMO; 9451 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 9452 9453 if (!tmo_posted && !test_bit(FC_UNLOADING, &vport->load_flag)) 9454 lpfc_worker_wake_up(phba); 9455 return; 9456 } 9457 9458 9459 /** 9460 * lpfc_els_timeout_handler - Process an els timeout event 9461 * @vport: pointer to a virtual N_Port data structure. 9462 * 9463 * This routine is the actual handler function that processes an ELS timeout 9464 * event. It walks the ELS ring to get and abort all the IOCBs (except the 9465 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by 9466 * invoking the lpfc_sli_issue_abort_iotag() routine. 9467 **/ 9468 void 9469 lpfc_els_timeout_handler(struct lpfc_vport *vport) 9470 { 9471 struct lpfc_hba *phba = vport->phba; 9472 struct lpfc_sli_ring *pring; 9473 struct lpfc_iocbq *tmp_iocb, *piocb; 9474 IOCB_t *cmd = NULL; 9475 struct lpfc_dmabuf *pcmd; 9476 uint32_t els_command = 0; 9477 uint32_t timeout; 9478 uint32_t remote_ID = 0xffffffff; 9479 LIST_HEAD(abort_list); 9480 u32 ulp_command = 0, ulp_context = 0, did = 0, iotag = 0; 9481 9482 9483 timeout = (uint32_t)(phba->fc_ratov << 1); 9484 9485 pring = lpfc_phba_elsring(phba); 9486 if (unlikely(!pring)) 9487 return; 9488 9489 if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) 9490 return; 9491 9492 spin_lock_irq(&phba->hbalock); 9493 if (phba->sli_rev == LPFC_SLI_REV4) 9494 spin_lock(&pring->ring_lock); 9495 9496 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 9497 ulp_command = get_job_cmnd(phba, piocb); 9498 ulp_context = get_job_ulpcontext(phba, piocb); 9499 did = get_job_els_rsp64_did(phba, piocb); 9500 9501 if (phba->sli_rev == LPFC_SLI_REV4) { 9502 iotag = get_wqe_reqtag(piocb); 9503 } else { 9504 cmd = &piocb->iocb; 9505 iotag = cmd->ulpIoTag; 9506 } 9507 9508 if ((piocb->cmd_flag & LPFC_IO_LIBDFC) != 0 || 9509 ulp_command == CMD_ABORT_XRI_CX || 9510 ulp_command == CMD_ABORT_XRI_CN || 9511 ulp_command == CMD_CLOSE_XRI_CN) 9512 continue; 9513 9514 if (piocb->vport != vport) 9515 continue; 9516 9517 pcmd = piocb->cmd_dmabuf; 9518 if (pcmd) 9519 els_command = *(uint32_t *) (pcmd->virt); 9520 9521 if (els_command == ELS_CMD_FARP || 9522 els_command == ELS_CMD_FARPR || 9523 els_command == ELS_CMD_FDISC) 9524 continue; 9525 9526 if (piocb->drvrTimeout > 0) { 9527 if (piocb->drvrTimeout >= timeout) 9528 piocb->drvrTimeout -= timeout; 9529 else 9530 piocb->drvrTimeout = 0; 9531 continue; 9532 } 9533 9534 remote_ID = 0xffffffff; 9535 if (ulp_command != CMD_GEN_REQUEST64_CR) { 9536 remote_ID = did; 9537 } else { 9538 struct lpfc_nodelist *ndlp; 9539 ndlp = __lpfc_findnode_rpi(vport, ulp_context); 9540 if (ndlp) 9541 remote_ID = ndlp->nlp_DID; 9542 } 9543 list_add_tail(&piocb->dlist, &abort_list); 9544 } 9545 if (phba->sli_rev == LPFC_SLI_REV4) 9546 spin_unlock(&pring->ring_lock); 9547 spin_unlock_irq(&phba->hbalock); 9548 9549 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 9550 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9551 "0127 ELS timeout Data: x%x x%x x%x " 9552 "x%x\n", els_command, 9553 remote_ID, ulp_command, iotag); 9554 9555 spin_lock_irq(&phba->hbalock); 9556 list_del_init(&piocb->dlist); 9557 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 9558 spin_unlock_irq(&phba->hbalock); 9559 } 9560 9561 /* Make sure HBA is alive */ 9562 lpfc_issue_hb_tmo(phba); 9563 9564 if (!list_empty(&pring->txcmplq)) 9565 if (!test_bit(FC_UNLOADING, &phba->pport->load_flag)) 9566 mod_timer(&vport->els_tmofunc, 9567 jiffies + msecs_to_jiffies(1000 * timeout)); 9568 } 9569 9570 /** 9571 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport 9572 * @vport: pointer to a host virtual N_Port data structure. 9573 * 9574 * This routine is used to clean up all the outstanding ELS commands on a 9575 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport() 9576 * routine. After that, it walks the ELS transmit queue to remove all the 9577 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For 9578 * the IOCBs with a non-NULL completion callback function, the callback 9579 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 9580 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion 9581 * callback function, the IOCB will simply be released. Finally, it walks 9582 * the ELS transmit completion queue to issue an abort IOCB to any transmit 9583 * completion queue IOCB that is associated with the @vport and is not 9584 * an IOCB from libdfc (i.e., the management plane IOCBs that are not 9585 * part of the discovery state machine) out to HBA by invoking the 9586 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the 9587 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee 9588 * the IOCBs are aborted when this function returns. 9589 **/ 9590 void 9591 lpfc_els_flush_cmd(struct lpfc_vport *vport) 9592 { 9593 LIST_HEAD(abort_list); 9594 LIST_HEAD(cancel_list); 9595 struct lpfc_hba *phba = vport->phba; 9596 struct lpfc_sli_ring *pring; 9597 struct lpfc_iocbq *tmp_iocb, *piocb; 9598 u32 ulp_command; 9599 unsigned long iflags = 0; 9600 bool mbx_tmo_err; 9601 9602 lpfc_fabric_abort_vport(vport); 9603 9604 /* 9605 * For SLI3, only the hbalock is required. But SLI4 needs to coordinate 9606 * with the ring insert operation. Because lpfc_sli_issue_abort_iotag 9607 * ultimately grabs the ring_lock, the driver must splice the list into 9608 * a working list and release the locks before calling the abort. 9609 */ 9610 spin_lock_irqsave(&phba->hbalock, iflags); 9611 pring = lpfc_phba_elsring(phba); 9612 9613 /* Bail out if we've no ELS wq, like in PCI error recovery case. */ 9614 if (unlikely(!pring)) { 9615 spin_unlock_irqrestore(&phba->hbalock, iflags); 9616 return; 9617 } 9618 9619 if (phba->sli_rev == LPFC_SLI_REV4) 9620 spin_lock(&pring->ring_lock); 9621 9622 mbx_tmo_err = test_bit(MBX_TMO_ERR, &phba->bit_flags); 9623 /* First we need to issue aborts to outstanding cmds on txcmpl */ 9624 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 9625 if (piocb->cmd_flag & LPFC_IO_LIBDFC && !mbx_tmo_err) 9626 continue; 9627 9628 if (piocb->vport != vport) 9629 continue; 9630 9631 if (piocb->cmd_flag & LPFC_DRIVER_ABORTED && !mbx_tmo_err) 9632 continue; 9633 9634 /* On the ELS ring we can have ELS_REQUESTs or 9635 * GEN_REQUESTs waiting for a response. 9636 */ 9637 ulp_command = get_job_cmnd(phba, piocb); 9638 if (ulp_command == CMD_ELS_REQUEST64_CR) { 9639 list_add_tail(&piocb->dlist, &abort_list); 9640 9641 /* If the link is down when flushing ELS commands 9642 * the firmware will not complete them till after 9643 * the link comes back up. This may confuse 9644 * discovery for the new link up, so we need to 9645 * change the compl routine to just clean up the iocb 9646 * and avoid any retry logic. 9647 */ 9648 if (phba->link_state == LPFC_LINK_DOWN) 9649 piocb->cmd_cmpl = lpfc_cmpl_els_link_down; 9650 } else if (ulp_command == CMD_GEN_REQUEST64_CR || 9651 mbx_tmo_err) 9652 list_add_tail(&piocb->dlist, &abort_list); 9653 } 9654 9655 if (phba->sli_rev == LPFC_SLI_REV4) 9656 spin_unlock(&pring->ring_lock); 9657 spin_unlock_irqrestore(&phba->hbalock, iflags); 9658 9659 /* Abort each txcmpl iocb on aborted list and remove the dlist links. */ 9660 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 9661 spin_lock_irqsave(&phba->hbalock, iflags); 9662 list_del_init(&piocb->dlist); 9663 if (mbx_tmo_err) 9664 list_move_tail(&piocb->list, &cancel_list); 9665 else 9666 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 9667 9668 spin_unlock_irqrestore(&phba->hbalock, iflags); 9669 } 9670 if (!list_empty(&cancel_list)) 9671 lpfc_sli_cancel_iocbs(phba, &cancel_list, IOSTAT_LOCAL_REJECT, 9672 IOERR_SLI_ABORTED); 9673 else 9674 /* Make sure HBA is alive */ 9675 lpfc_issue_hb_tmo(phba); 9676 9677 if (!list_empty(&abort_list)) 9678 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9679 "3387 abort list for txq not empty\n"); 9680 INIT_LIST_HEAD(&abort_list); 9681 9682 spin_lock_irqsave(&phba->hbalock, iflags); 9683 if (phba->sli_rev == LPFC_SLI_REV4) 9684 spin_lock(&pring->ring_lock); 9685 9686 /* No need to abort the txq list, 9687 * just queue them up for lpfc_sli_cancel_iocbs 9688 */ 9689 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 9690 ulp_command = get_job_cmnd(phba, piocb); 9691 9692 if (piocb->cmd_flag & LPFC_IO_LIBDFC) 9693 continue; 9694 9695 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 9696 if (ulp_command == CMD_QUE_RING_BUF_CN || 9697 ulp_command == CMD_QUE_RING_BUF64_CN || 9698 ulp_command == CMD_CLOSE_XRI_CN || 9699 ulp_command == CMD_ABORT_XRI_CN || 9700 ulp_command == CMD_ABORT_XRI_CX) 9701 continue; 9702 9703 if (piocb->vport != vport) 9704 continue; 9705 9706 list_del_init(&piocb->list); 9707 list_add_tail(&piocb->list, &abort_list); 9708 } 9709 9710 /* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */ 9711 if (vport == phba->pport) { 9712 list_for_each_entry_safe(piocb, tmp_iocb, 9713 &phba->fabric_iocb_list, list) { 9714 list_del_init(&piocb->list); 9715 list_add_tail(&piocb->list, &abort_list); 9716 } 9717 } 9718 9719 if (phba->sli_rev == LPFC_SLI_REV4) 9720 spin_unlock(&pring->ring_lock); 9721 spin_unlock_irqrestore(&phba->hbalock, iflags); 9722 9723 /* Cancel all the IOCBs from the completions list */ 9724 lpfc_sli_cancel_iocbs(phba, &abort_list, 9725 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 9726 9727 return; 9728 } 9729 9730 /** 9731 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA 9732 * @phba: pointer to lpfc hba data structure. 9733 * 9734 * This routine is used to clean up all the outstanding ELS commands on a 9735 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba() 9736 * routine. After that, it walks the ELS transmit queue to remove all the 9737 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For 9738 * the IOCBs with the completion callback function associated, the callback 9739 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 9740 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion 9741 * callback function associated, the IOCB will simply be released. Finally, 9742 * it walks the ELS transmit completion queue to issue an abort IOCB to any 9743 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the 9744 * management plane IOCBs that are not part of the discovery state machine) 9745 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine. 9746 **/ 9747 void 9748 lpfc_els_flush_all_cmd(struct lpfc_hba *phba) 9749 { 9750 struct lpfc_vport *vport; 9751 9752 spin_lock_irq(&phba->port_list_lock); 9753 list_for_each_entry(vport, &phba->port_list, listentry) 9754 lpfc_els_flush_cmd(vport); 9755 spin_unlock_irq(&phba->port_list_lock); 9756 9757 return; 9758 } 9759 9760 /** 9761 * lpfc_send_els_failure_event - Posts an ELS command failure event 9762 * @phba: Pointer to hba context object. 9763 * @cmdiocbp: Pointer to command iocb which reported error. 9764 * @rspiocbp: Pointer to response iocb which reported error. 9765 * 9766 * This function sends an event when there is an ELS command 9767 * failure. 9768 **/ 9769 void 9770 lpfc_send_els_failure_event(struct lpfc_hba *phba, 9771 struct lpfc_iocbq *cmdiocbp, 9772 struct lpfc_iocbq *rspiocbp) 9773 { 9774 struct lpfc_vport *vport = cmdiocbp->vport; 9775 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9776 struct lpfc_lsrjt_event lsrjt_event; 9777 struct lpfc_fabric_event_header fabric_event; 9778 struct ls_rjt stat; 9779 struct lpfc_nodelist *ndlp; 9780 uint32_t *pcmd; 9781 u32 ulp_status, ulp_word4; 9782 9783 ndlp = cmdiocbp->ndlp; 9784 if (!ndlp) 9785 return; 9786 9787 ulp_status = get_job_ulpstatus(phba, rspiocbp); 9788 ulp_word4 = get_job_word4(phba, rspiocbp); 9789 9790 if (ulp_status == IOSTAT_LS_RJT) { 9791 lsrjt_event.header.event_type = FC_REG_ELS_EVENT; 9792 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV; 9793 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname, 9794 sizeof(struct lpfc_name)); 9795 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename, 9796 sizeof(struct lpfc_name)); 9797 pcmd = (uint32_t *)cmdiocbp->cmd_dmabuf->virt; 9798 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0; 9799 stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4); 9800 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode; 9801 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp; 9802 fc_host_post_vendor_event(shost, 9803 fc_get_event_number(), 9804 sizeof(lsrjt_event), 9805 (char *)&lsrjt_event, 9806 LPFC_NL_VENDOR_ID); 9807 return; 9808 } 9809 if (ulp_status == IOSTAT_NPORT_BSY || 9810 ulp_status == IOSTAT_FABRIC_BSY) { 9811 fabric_event.event_type = FC_REG_FABRIC_EVENT; 9812 if (ulp_status == IOSTAT_NPORT_BSY) 9813 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY; 9814 else 9815 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY; 9816 memcpy(fabric_event.wwpn, &ndlp->nlp_portname, 9817 sizeof(struct lpfc_name)); 9818 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename, 9819 sizeof(struct lpfc_name)); 9820 fc_host_post_vendor_event(shost, 9821 fc_get_event_number(), 9822 sizeof(fabric_event), 9823 (char *)&fabric_event, 9824 LPFC_NL_VENDOR_ID); 9825 return; 9826 } 9827 9828 } 9829 9830 /** 9831 * lpfc_send_els_event - Posts unsolicited els event 9832 * @vport: Pointer to vport object. 9833 * @ndlp: Pointer FC node object. 9834 * @payload: ELS command code type. 9835 * 9836 * This function posts an event when there is an incoming 9837 * unsolicited ELS command. 9838 **/ 9839 static void 9840 lpfc_send_els_event(struct lpfc_vport *vport, 9841 struct lpfc_nodelist *ndlp, 9842 uint32_t *payload) 9843 { 9844 struct lpfc_els_event_header *els_data = NULL; 9845 struct lpfc_logo_event *logo_data = NULL; 9846 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9847 9848 if (*payload == ELS_CMD_LOGO) { 9849 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL); 9850 if (!logo_data) { 9851 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9852 "0148 Failed to allocate memory " 9853 "for LOGO event\n"); 9854 return; 9855 } 9856 els_data = &logo_data->header; 9857 } else { 9858 els_data = kmalloc(sizeof(struct lpfc_els_event_header), 9859 GFP_KERNEL); 9860 if (!els_data) { 9861 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9862 "0149 Failed to allocate memory " 9863 "for ELS event\n"); 9864 return; 9865 } 9866 } 9867 els_data->event_type = FC_REG_ELS_EVENT; 9868 switch (*payload) { 9869 case ELS_CMD_PLOGI: 9870 els_data->subcategory = LPFC_EVENT_PLOGI_RCV; 9871 break; 9872 case ELS_CMD_PRLO: 9873 els_data->subcategory = LPFC_EVENT_PRLO_RCV; 9874 break; 9875 case ELS_CMD_ADISC: 9876 els_data->subcategory = LPFC_EVENT_ADISC_RCV; 9877 break; 9878 case ELS_CMD_LOGO: 9879 els_data->subcategory = LPFC_EVENT_LOGO_RCV; 9880 /* Copy the WWPN in the LOGO payload */ 9881 memcpy(logo_data->logo_wwpn, &payload[2], 9882 sizeof(struct lpfc_name)); 9883 break; 9884 default: 9885 kfree(els_data); 9886 return; 9887 } 9888 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); 9889 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 9890 if (*payload == ELS_CMD_LOGO) { 9891 fc_host_post_vendor_event(shost, 9892 fc_get_event_number(), 9893 sizeof(struct lpfc_logo_event), 9894 (char *)logo_data, 9895 LPFC_NL_VENDOR_ID); 9896 kfree(logo_data); 9897 } else { 9898 fc_host_post_vendor_event(shost, 9899 fc_get_event_number(), 9900 sizeof(struct lpfc_els_event_header), 9901 (char *)els_data, 9902 LPFC_NL_VENDOR_ID); 9903 kfree(els_data); 9904 } 9905 9906 return; 9907 } 9908 9909 9910 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types, 9911 FC_FPIN_LI_EVT_TYPES_INIT); 9912 9913 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_deli_event_nm, fc_fpin_deli_event_types, 9914 FC_FPIN_DELI_EVT_TYPES_INIT); 9915 9916 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_event_nm, fc_fpin_congn_event_types, 9917 FC_FPIN_CONGN_EVT_TYPES_INIT); 9918 9919 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_severity_nm, 9920 fc_fpin_congn_severity_types, 9921 FC_FPIN_CONGN_SEVERITY_INIT); 9922 9923 9924 /** 9925 * lpfc_display_fpin_wwpn - Display WWPNs accessible by the attached port 9926 * @phba: Pointer to phba object. 9927 * @wwnlist: Pointer to list of WWPNs in FPIN payload 9928 * @cnt: count of WWPNs in FPIN payload 9929 * 9930 * This routine is called by LI and PC descriptors. 9931 * Limit the number of WWPNs displayed to 6 log messages, 6 per log message 9932 */ 9933 static void 9934 lpfc_display_fpin_wwpn(struct lpfc_hba *phba, __be64 *wwnlist, u32 cnt) 9935 { 9936 char buf[LPFC_FPIN_WWPN_LINE_SZ]; 9937 __be64 wwn; 9938 u64 wwpn; 9939 int i, len; 9940 int line = 0; 9941 int wcnt = 0; 9942 bool endit = false; 9943 9944 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, "Accessible WWPNs:"); 9945 for (i = 0; i < cnt; i++) { 9946 /* Are we on the last WWPN */ 9947 if (i == (cnt - 1)) 9948 endit = true; 9949 9950 /* Extract the next WWPN from the payload */ 9951 wwn = *wwnlist++; 9952 wwpn = be64_to_cpu(wwn); 9953 len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ - len, 9954 " %016llx", wwpn); 9955 9956 /* Log a message if we are on the last WWPN 9957 * or if we hit the max allowed per message. 9958 */ 9959 wcnt++; 9960 if (wcnt == LPFC_FPIN_WWPN_LINE_CNT || endit) { 9961 buf[len] = 0; 9962 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9963 "4686 %s\n", buf); 9964 9965 /* Check if we reached the last WWPN */ 9966 if (endit) 9967 return; 9968 9969 /* Limit the number of log message displayed per FPIN */ 9970 line++; 9971 if (line == LPFC_FPIN_WWPN_NUM_LINE) { 9972 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9973 "4687 %d WWPNs Truncated\n", 9974 cnt - i - 1); 9975 return; 9976 } 9977 9978 /* Start over with next log message */ 9979 wcnt = 0; 9980 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, 9981 "Additional WWPNs:"); 9982 } 9983 } 9984 } 9985 9986 /** 9987 * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event. 9988 * @phba: Pointer to phba object. 9989 * @tlv: Pointer to the Link Integrity Notification Descriptor. 9990 * 9991 * This function processes a Link Integrity FPIN event by logging a message. 9992 **/ 9993 static void 9994 lpfc_els_rcv_fpin_li(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9995 { 9996 struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv; 9997 const char *li_evt_str; 9998 u32 li_evt, cnt; 9999 10000 li_evt = be16_to_cpu(li->event_type); 10001 li_evt_str = lpfc_get_fpin_li_event_nm(li_evt); 10002 cnt = be32_to_cpu(li->pname_count); 10003 10004 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 10005 "4680 FPIN Link Integrity %s (x%x) " 10006 "Detecting PN x%016llx Attached PN x%016llx " 10007 "Duration %d mSecs Count %d Port Cnt %d\n", 10008 li_evt_str, li_evt, 10009 be64_to_cpu(li->detecting_wwpn), 10010 be64_to_cpu(li->attached_wwpn), 10011 be32_to_cpu(li->event_threshold), 10012 be32_to_cpu(li->event_count), cnt); 10013 10014 lpfc_display_fpin_wwpn(phba, (__be64 *)&li->pname_list, cnt); 10015 } 10016 10017 /** 10018 * lpfc_els_rcv_fpin_del - Process an FPIN Delivery Event. 10019 * @phba: Pointer to hba object. 10020 * @tlv: Pointer to the Delivery Notification Descriptor TLV 10021 * 10022 * This function processes a Delivery FPIN event by logging a message. 10023 **/ 10024 static void 10025 lpfc_els_rcv_fpin_del(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 10026 { 10027 struct fc_fn_deli_desc *del = (struct fc_fn_deli_desc *)tlv; 10028 const char *del_rsn_str; 10029 u32 del_rsn; 10030 __be32 *frame; 10031 10032 del_rsn = be16_to_cpu(del->deli_reason_code); 10033 del_rsn_str = lpfc_get_fpin_deli_event_nm(del_rsn); 10034 10035 /* Skip over desc_tag/desc_len header to payload */ 10036 frame = (__be32 *)(del + 1); 10037 10038 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 10039 "4681 FPIN Delivery %s (x%x) " 10040 "Detecting PN x%016llx Attached PN x%016llx " 10041 "DiscHdr0 x%08x " 10042 "DiscHdr1 x%08x DiscHdr2 x%08x DiscHdr3 x%08x " 10043 "DiscHdr4 x%08x DiscHdr5 x%08x\n", 10044 del_rsn_str, del_rsn, 10045 be64_to_cpu(del->detecting_wwpn), 10046 be64_to_cpu(del->attached_wwpn), 10047 be32_to_cpu(frame[0]), 10048 be32_to_cpu(frame[1]), 10049 be32_to_cpu(frame[2]), 10050 be32_to_cpu(frame[3]), 10051 be32_to_cpu(frame[4]), 10052 be32_to_cpu(frame[5])); 10053 } 10054 10055 /** 10056 * lpfc_els_rcv_fpin_peer_cgn - Process a FPIN Peer Congestion Event. 10057 * @phba: Pointer to hba object. 10058 * @tlv: Pointer to the Peer Congestion Notification Descriptor TLV 10059 * 10060 * This function processes a Peer Congestion FPIN event by logging a message. 10061 **/ 10062 static void 10063 lpfc_els_rcv_fpin_peer_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 10064 { 10065 struct fc_fn_peer_congn_desc *pc = (struct fc_fn_peer_congn_desc *)tlv; 10066 const char *pc_evt_str; 10067 u32 pc_evt, cnt; 10068 10069 pc_evt = be16_to_cpu(pc->event_type); 10070 pc_evt_str = lpfc_get_fpin_congn_event_nm(pc_evt); 10071 cnt = be32_to_cpu(pc->pname_count); 10072 10073 /* Capture FPIN frequency */ 10074 phba->cgn_fpin_frequency = be32_to_cpu(pc->event_period); 10075 10076 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_ELS, 10077 "4684 FPIN Peer Congestion %s (x%x) " 10078 "Duration %d mSecs " 10079 "Detecting PN x%016llx Attached PN x%016llx " 10080 "Impacted Port Cnt %d\n", 10081 pc_evt_str, pc_evt, 10082 be32_to_cpu(pc->event_period), 10083 be64_to_cpu(pc->detecting_wwpn), 10084 be64_to_cpu(pc->attached_wwpn), 10085 cnt); 10086 10087 lpfc_display_fpin_wwpn(phba, (__be64 *)&pc->pname_list, cnt); 10088 } 10089 10090 /** 10091 * lpfc_els_rcv_fpin_cgn - Process an FPIN Congestion notification 10092 * @phba: Pointer to hba object. 10093 * @tlv: Pointer to the Congestion Notification Descriptor TLV 10094 * 10095 * This function processes an FPIN Congestion Notifiction. The notification 10096 * could be an Alarm or Warning. This routine feeds that data into driver's 10097 * running congestion algorithm. It also processes the FPIN by 10098 * logging a message. It returns 1 to indicate deliver this message 10099 * to the upper layer or 0 to indicate don't deliver it. 10100 **/ 10101 static int 10102 lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 10103 { 10104 struct lpfc_cgn_info *cp; 10105 struct fc_fn_congn_desc *cgn = (struct fc_fn_congn_desc *)tlv; 10106 const char *cgn_evt_str; 10107 u32 cgn_evt; 10108 const char *cgn_sev_str; 10109 u32 cgn_sev; 10110 uint16_t value; 10111 u32 crc; 10112 bool nm_log = false; 10113 int rc = 1; 10114 10115 cgn_evt = be16_to_cpu(cgn->event_type); 10116 cgn_evt_str = lpfc_get_fpin_congn_event_nm(cgn_evt); 10117 cgn_sev = cgn->severity; 10118 cgn_sev_str = lpfc_get_fpin_congn_severity_nm(cgn_sev); 10119 10120 /* The driver only takes action on a Credit Stall or Oversubscription 10121 * event type to engage the IO algorithm. The driver prints an 10122 * unmaskable message only for Lost Credit and Credit Stall. 10123 * TODO: Still need to have definition of host action on clear, 10124 * lost credit and device specific event types. 10125 */ 10126 switch (cgn_evt) { 10127 case FPIN_CONGN_LOST_CREDIT: 10128 nm_log = true; 10129 break; 10130 case FPIN_CONGN_CREDIT_STALL: 10131 nm_log = true; 10132 fallthrough; 10133 case FPIN_CONGN_OVERSUBSCRIPTION: 10134 if (cgn_evt == FPIN_CONGN_OVERSUBSCRIPTION) 10135 nm_log = false; 10136 switch (cgn_sev) { 10137 case FPIN_CONGN_SEVERITY_ERROR: 10138 /* Take action here for an Alarm event */ 10139 if (phba->cmf_active_mode != LPFC_CFG_OFF) { 10140 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) { 10141 /* Track of alarm cnt for SYNC_WQE */ 10142 atomic_inc(&phba->cgn_sync_alarm_cnt); 10143 } 10144 /* Track alarm cnt for cgn_info regardless 10145 * of whether CMF is configured for Signals 10146 * or FPINs. 10147 */ 10148 atomic_inc(&phba->cgn_fabric_alarm_cnt); 10149 goto cleanup; 10150 } 10151 break; 10152 case FPIN_CONGN_SEVERITY_WARNING: 10153 /* Take action here for a Warning event */ 10154 if (phba->cmf_active_mode != LPFC_CFG_OFF) { 10155 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) { 10156 /* Track of warning cnt for SYNC_WQE */ 10157 atomic_inc(&phba->cgn_sync_warn_cnt); 10158 } 10159 /* Track warning cnt and freq for cgn_info 10160 * regardless of whether CMF is configured for 10161 * Signals or FPINs. 10162 */ 10163 atomic_inc(&phba->cgn_fabric_warn_cnt); 10164 cleanup: 10165 /* Save frequency in ms */ 10166 phba->cgn_fpin_frequency = 10167 be32_to_cpu(cgn->event_period); 10168 value = phba->cgn_fpin_frequency; 10169 if (phba->cgn_i) { 10170 cp = (struct lpfc_cgn_info *) 10171 phba->cgn_i->virt; 10172 cp->cgn_alarm_freq = 10173 cpu_to_le16(value); 10174 cp->cgn_warn_freq = 10175 cpu_to_le16(value); 10176 crc = lpfc_cgn_calc_crc32 10177 (cp, 10178 LPFC_CGN_INFO_SZ, 10179 LPFC_CGN_CRC32_SEED); 10180 cp->cgn_info_crc = cpu_to_le32(crc); 10181 } 10182 10183 /* Don't deliver to upper layer since 10184 * driver took action on this tlv. 10185 */ 10186 rc = 0; 10187 } 10188 break; 10189 } 10190 break; 10191 } 10192 10193 /* Change the log level to unmaskable for the following event types. */ 10194 lpfc_printf_log(phba, (nm_log ? KERN_WARNING : KERN_INFO), 10195 LOG_CGN_MGMT | LOG_ELS, 10196 "4683 FPIN CONGESTION %s type %s (x%x) Event " 10197 "Duration %d mSecs\n", 10198 cgn_sev_str, cgn_evt_str, cgn_evt, 10199 be32_to_cpu(cgn->event_period)); 10200 return rc; 10201 } 10202 10203 void 10204 lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length) 10205 { 10206 struct lpfc_hba *phba = vport->phba; 10207 struct fc_els_fpin *fpin = (struct fc_els_fpin *)p; 10208 struct fc_tlv_desc *tlv, *first_tlv, *current_tlv; 10209 const char *dtag_nm; 10210 int desc_cnt = 0, bytes_remain, cnt; 10211 u32 dtag, deliver = 0; 10212 int len; 10213 10214 /* FPINs handled only if we are in the right discovery state */ 10215 if (vport->port_state < LPFC_DISC_AUTH) 10216 return; 10217 10218 /* make sure there is the full fpin header */ 10219 if (fpin_length < sizeof(struct fc_els_fpin)) 10220 return; 10221 10222 /* Sanity check descriptor length. The desc_len value does not 10223 * include space for the ELS command and the desc_len fields. 10224 */ 10225 len = be32_to_cpu(fpin->desc_len); 10226 if (fpin_length < len + sizeof(struct fc_els_fpin)) { 10227 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10228 "4671 Bad ELS FPIN length %d: %d\n", 10229 len, fpin_length); 10230 return; 10231 } 10232 10233 tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0]; 10234 first_tlv = tlv; 10235 bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc); 10236 bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len)); 10237 10238 /* process each descriptor separately */ 10239 while (bytes_remain >= FC_TLV_DESC_HDR_SZ && 10240 bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) { 10241 dtag = be32_to_cpu(tlv->desc_tag); 10242 switch (dtag) { 10243 case ELS_DTAG_LNK_INTEGRITY: 10244 lpfc_els_rcv_fpin_li(phba, tlv); 10245 deliver = 1; 10246 break; 10247 case ELS_DTAG_DELIVERY: 10248 lpfc_els_rcv_fpin_del(phba, tlv); 10249 deliver = 1; 10250 break; 10251 case ELS_DTAG_PEER_CONGEST: 10252 lpfc_els_rcv_fpin_peer_cgn(phba, tlv); 10253 deliver = 1; 10254 break; 10255 case ELS_DTAG_CONGESTION: 10256 deliver = lpfc_els_rcv_fpin_cgn(phba, tlv); 10257 break; 10258 default: 10259 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 10260 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10261 "4678 unknown FPIN descriptor[%d]: " 10262 "tag x%x (%s)\n", 10263 desc_cnt, dtag, dtag_nm); 10264 10265 /* If descriptor is bad, drop the rest of the data */ 10266 return; 10267 } 10268 lpfc_cgn_update_stat(phba, dtag); 10269 cnt = be32_to_cpu(tlv->desc_len); 10270 10271 /* Sanity check descriptor length. The desc_len value does not 10272 * include space for the desc_tag and the desc_len fields. 10273 */ 10274 len -= (cnt + sizeof(struct fc_tlv_desc)); 10275 if (len < 0) { 10276 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 10277 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 10278 "4672 Bad FPIN descriptor TLV length " 10279 "%d: %d %d %s\n", 10280 cnt, len, fpin_length, dtag_nm); 10281 return; 10282 } 10283 10284 current_tlv = tlv; 10285 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 10286 tlv = fc_tlv_next_desc(tlv); 10287 10288 /* Format payload such that the FPIN delivered to the 10289 * upper layer is a single descriptor FPIN. 10290 */ 10291 if (desc_cnt) 10292 memcpy(first_tlv, current_tlv, 10293 (cnt + sizeof(struct fc_els_fpin))); 10294 10295 /* Adjust the length so that it only reflects a 10296 * single descriptor FPIN. 10297 */ 10298 fpin_length = cnt + sizeof(struct fc_els_fpin); 10299 fpin->desc_len = cpu_to_be32(fpin_length); 10300 fpin_length += sizeof(struct fc_els_fpin); /* the entire FPIN */ 10301 10302 /* Send every descriptor individually to the upper layer */ 10303 if (deliver) 10304 fc_host_fpin_rcv(lpfc_shost_from_vport(vport), 10305 fpin_length, (char *)fpin, 0); 10306 desc_cnt++; 10307 } 10308 } 10309 10310 /** 10311 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer 10312 * @phba: pointer to lpfc hba data structure. 10313 * @pring: pointer to a SLI ring. 10314 * @vport: pointer to a host virtual N_Port data structure. 10315 * @elsiocb: pointer to lpfc els command iocb data structure. 10316 * 10317 * This routine is used for processing the IOCB associated with a unsolicited 10318 * event. It first determines whether there is an existing ndlp that matches 10319 * the DID from the unsolicited IOCB. If not, it will create a new one with 10320 * the DID from the unsolicited IOCB. The ELS command from the unsolicited 10321 * IOCB is then used to invoke the proper routine and to set up proper state 10322 * of the discovery state machine. 10323 **/ 10324 static void 10325 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10326 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) 10327 { 10328 struct lpfc_nodelist *ndlp; 10329 struct ls_rjt stat; 10330 u32 *payload, payload_len; 10331 u32 cmd = 0, did = 0, newnode, status = 0; 10332 uint8_t rjt_exp, rjt_err = 0, init_link = 0; 10333 struct lpfc_wcqe_complete *wcqe_cmpl = NULL; 10334 LPFC_MBOXQ_t *mbox; 10335 10336 if (!vport || !elsiocb->cmd_dmabuf) 10337 goto dropit; 10338 10339 newnode = 0; 10340 wcqe_cmpl = &elsiocb->wcqe_cmpl; 10341 payload = elsiocb->cmd_dmabuf->virt; 10342 if (phba->sli_rev == LPFC_SLI_REV4) 10343 payload_len = wcqe_cmpl->total_data_placed; 10344 else 10345 payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len; 10346 status = get_job_ulpstatus(phba, elsiocb); 10347 cmd = *payload; 10348 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) 10349 lpfc_sli3_post_buffer(phba, pring, 1); 10350 10351 did = get_job_els_rsp64_did(phba, elsiocb); 10352 if (status) { 10353 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10354 "RCV Unsol ELS: status:x%x/x%x did:x%x", 10355 status, get_job_word4(phba, elsiocb), did); 10356 goto dropit; 10357 } 10358 10359 /* Check to see if link went down during discovery */ 10360 if (lpfc_els_chk_latt(vport)) 10361 goto dropit; 10362 10363 /* Ignore traffic received during vport shutdown. */ 10364 if (test_bit(FC_UNLOADING, &vport->load_flag)) 10365 goto dropit; 10366 10367 /* If NPort discovery is delayed drop incoming ELS */ 10368 if (test_bit(FC_DISC_DELAYED, &vport->fc_flag) && 10369 cmd != ELS_CMD_PLOGI) 10370 goto dropit; 10371 10372 ndlp = lpfc_findnode_did(vport, did); 10373 if (!ndlp) { 10374 /* Cannot find existing Fabric ndlp, so allocate a new one */ 10375 ndlp = lpfc_nlp_init(vport, did); 10376 if (!ndlp) 10377 goto dropit; 10378 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 10379 newnode = 1; 10380 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 10381 ndlp->nlp_type |= NLP_FABRIC; 10382 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 10383 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 10384 newnode = 1; 10385 } 10386 10387 phba->fc_stat.elsRcvFrame++; 10388 10389 /* 10390 * Do not process any unsolicited ELS commands 10391 * if the ndlp is in DEV_LOSS 10392 */ 10393 spin_lock_irq(&ndlp->lock); 10394 if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { 10395 spin_unlock_irq(&ndlp->lock); 10396 if (newnode) 10397 lpfc_nlp_put(ndlp); 10398 goto dropit; 10399 } 10400 spin_unlock_irq(&ndlp->lock); 10401 10402 elsiocb->ndlp = lpfc_nlp_get(ndlp); 10403 if (!elsiocb->ndlp) 10404 goto dropit; 10405 elsiocb->vport = vport; 10406 10407 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { 10408 cmd &= ELS_CMD_MASK; 10409 } 10410 /* ELS command <elsCmd> received from NPORT <did> */ 10411 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 10412 "0112 ELS command x%x received from NPORT x%x " 10413 "refcnt %d Data: x%x x%lx x%x x%x\n", 10414 cmd, did, kref_read(&ndlp->kref), vport->port_state, 10415 vport->fc_flag, vport->fc_myDID, vport->fc_prevDID); 10416 10417 /* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */ 10418 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) && 10419 (cmd != ELS_CMD_FLOGI) && 10420 !((cmd == ELS_CMD_PLOGI) && test_bit(FC_PT2PT, &vport->fc_flag))) { 10421 rjt_err = LSRJT_LOGICAL_BSY; 10422 rjt_exp = LSEXP_NOTHING_MORE; 10423 goto lsrjt; 10424 } 10425 10426 switch (cmd) { 10427 case ELS_CMD_PLOGI: 10428 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10429 "RCV PLOGI: did:x%x/ste:x%x flg:x%x", 10430 did, vport->port_state, ndlp->nlp_flag); 10431 10432 phba->fc_stat.elsRcvPLOGI++; 10433 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 10434 if (phba->sli_rev == LPFC_SLI_REV4 && 10435 test_bit(FC_PT2PT, &phba->pport->fc_flag)) { 10436 vport->fc_prevDID = vport->fc_myDID; 10437 /* Our DID needs to be updated before registering 10438 * the vfi. This is done in lpfc_rcv_plogi but 10439 * that is called after the reg_vfi. 10440 */ 10441 vport->fc_myDID = 10442 bf_get(els_rsp64_sid, 10443 &elsiocb->wqe.xmit_els_rsp); 10444 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 10445 "3312 Remote port assigned DID x%x " 10446 "%x\n", vport->fc_myDID, 10447 vport->fc_prevDID); 10448 } 10449 10450 lpfc_send_els_event(vport, ndlp, payload); 10451 10452 /* If Nport discovery is delayed, reject PLOGIs */ 10453 if (test_bit(FC_DISC_DELAYED, &vport->fc_flag)) { 10454 rjt_err = LSRJT_UNABLE_TPC; 10455 rjt_exp = LSEXP_NOTHING_MORE; 10456 break; 10457 } 10458 10459 if (vport->port_state < LPFC_DISC_AUTH) { 10460 if (!test_bit(FC_PT2PT, &phba->pport->fc_flag) || 10461 test_bit(FC_PT2PT_PLOGI, &phba->pport->fc_flag)) { 10462 rjt_err = LSRJT_UNABLE_TPC; 10463 rjt_exp = LSEXP_NOTHING_MORE; 10464 break; 10465 } 10466 } 10467 10468 spin_lock_irq(&ndlp->lock); 10469 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; 10470 spin_unlock_irq(&ndlp->lock); 10471 10472 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10473 NLP_EVT_RCV_PLOGI); 10474 10475 break; 10476 case ELS_CMD_FLOGI: 10477 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10478 "RCV FLOGI: did:x%x/ste:x%x flg:x%x", 10479 did, vport->port_state, ndlp->nlp_flag); 10480 10481 phba->fc_stat.elsRcvFLOGI++; 10482 10483 /* If the driver believes fabric discovery is done and is ready, 10484 * bounce the link. There is some descrepancy. 10485 */ 10486 if (vport->port_state >= LPFC_LOCAL_CFG_LINK && 10487 test_bit(FC_PT2PT, &vport->fc_flag) && 10488 vport->rcv_flogi_cnt >= 1) { 10489 rjt_err = LSRJT_LOGICAL_BSY; 10490 rjt_exp = LSEXP_NOTHING_MORE; 10491 init_link++; 10492 goto lsrjt; 10493 } 10494 10495 lpfc_els_rcv_flogi(vport, elsiocb, ndlp); 10496 /* retain node if our response is deferred */ 10497 if (phba->defer_flogi_acc_flag) 10498 break; 10499 if (newnode) 10500 lpfc_disc_state_machine(vport, ndlp, NULL, 10501 NLP_EVT_DEVICE_RM); 10502 break; 10503 case ELS_CMD_LOGO: 10504 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10505 "RCV LOGO: did:x%x/ste:x%x flg:x%x", 10506 did, vport->port_state, ndlp->nlp_flag); 10507 10508 phba->fc_stat.elsRcvLOGO++; 10509 lpfc_send_els_event(vport, ndlp, payload); 10510 if (vport->port_state < LPFC_DISC_AUTH) { 10511 rjt_err = LSRJT_UNABLE_TPC; 10512 rjt_exp = LSEXP_NOTHING_MORE; 10513 break; 10514 } 10515 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); 10516 if (newnode) 10517 lpfc_disc_state_machine(vport, ndlp, NULL, 10518 NLP_EVT_DEVICE_RM); 10519 break; 10520 case ELS_CMD_PRLO: 10521 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10522 "RCV PRLO: did:x%x/ste:x%x flg:x%x", 10523 did, vport->port_state, ndlp->nlp_flag); 10524 10525 phba->fc_stat.elsRcvPRLO++; 10526 lpfc_send_els_event(vport, ndlp, payload); 10527 if (vport->port_state < LPFC_DISC_AUTH) { 10528 rjt_err = LSRJT_UNABLE_TPC; 10529 rjt_exp = LSEXP_NOTHING_MORE; 10530 break; 10531 } 10532 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); 10533 break; 10534 case ELS_CMD_LCB: 10535 phba->fc_stat.elsRcvLCB++; 10536 lpfc_els_rcv_lcb(vport, elsiocb, ndlp); 10537 break; 10538 case ELS_CMD_RDP: 10539 phba->fc_stat.elsRcvRDP++; 10540 lpfc_els_rcv_rdp(vport, elsiocb, ndlp); 10541 break; 10542 case ELS_CMD_RSCN: 10543 phba->fc_stat.elsRcvRSCN++; 10544 lpfc_els_rcv_rscn(vport, elsiocb, ndlp); 10545 if (newnode) 10546 lpfc_disc_state_machine(vport, ndlp, NULL, 10547 NLP_EVT_DEVICE_RM); 10548 break; 10549 case ELS_CMD_ADISC: 10550 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10551 "RCV ADISC: did:x%x/ste:x%x flg:x%x", 10552 did, vport->port_state, ndlp->nlp_flag); 10553 10554 lpfc_send_els_event(vport, ndlp, payload); 10555 phba->fc_stat.elsRcvADISC++; 10556 if (vport->port_state < LPFC_DISC_AUTH) { 10557 rjt_err = LSRJT_UNABLE_TPC; 10558 rjt_exp = LSEXP_NOTHING_MORE; 10559 break; 10560 } 10561 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10562 NLP_EVT_RCV_ADISC); 10563 break; 10564 case ELS_CMD_PDISC: 10565 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10566 "RCV PDISC: did:x%x/ste:x%x flg:x%x", 10567 did, vport->port_state, ndlp->nlp_flag); 10568 10569 phba->fc_stat.elsRcvPDISC++; 10570 if (vport->port_state < LPFC_DISC_AUTH) { 10571 rjt_err = LSRJT_UNABLE_TPC; 10572 rjt_exp = LSEXP_NOTHING_MORE; 10573 break; 10574 } 10575 lpfc_disc_state_machine(vport, ndlp, elsiocb, 10576 NLP_EVT_RCV_PDISC); 10577 break; 10578 case ELS_CMD_FARPR: 10579 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10580 "RCV FARPR: did:x%x/ste:x%x flg:x%x", 10581 did, vport->port_state, ndlp->nlp_flag); 10582 10583 phba->fc_stat.elsRcvFARPR++; 10584 lpfc_els_rcv_farpr(vport, elsiocb, ndlp); 10585 break; 10586 case ELS_CMD_FARP: 10587 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10588 "RCV FARP: did:x%x/ste:x%x flg:x%x", 10589 did, vport->port_state, ndlp->nlp_flag); 10590 10591 phba->fc_stat.elsRcvFARP++; 10592 lpfc_els_rcv_farp(vport, elsiocb, ndlp); 10593 break; 10594 case ELS_CMD_FAN: 10595 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10596 "RCV FAN: did:x%x/ste:x%x flg:x%x", 10597 did, vport->port_state, ndlp->nlp_flag); 10598 10599 phba->fc_stat.elsRcvFAN++; 10600 lpfc_els_rcv_fan(vport, elsiocb, ndlp); 10601 break; 10602 case ELS_CMD_PRLI: 10603 case ELS_CMD_NVMEPRLI: 10604 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10605 "RCV PRLI: did:x%x/ste:x%x flg:x%x", 10606 did, vport->port_state, ndlp->nlp_flag); 10607 10608 phba->fc_stat.elsRcvPRLI++; 10609 if ((vport->port_state < LPFC_DISC_AUTH) && 10610 test_bit(FC_FABRIC, &vport->fc_flag)) { 10611 rjt_err = LSRJT_UNABLE_TPC; 10612 rjt_exp = LSEXP_NOTHING_MORE; 10613 break; 10614 } 10615 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); 10616 break; 10617 case ELS_CMD_LIRR: 10618 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10619 "RCV LIRR: did:x%x/ste:x%x flg:x%x", 10620 did, vport->port_state, ndlp->nlp_flag); 10621 10622 phba->fc_stat.elsRcvLIRR++; 10623 lpfc_els_rcv_lirr(vport, elsiocb, ndlp); 10624 if (newnode) 10625 lpfc_disc_state_machine(vport, ndlp, NULL, 10626 NLP_EVT_DEVICE_RM); 10627 break; 10628 case ELS_CMD_RLS: 10629 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10630 "RCV RLS: did:x%x/ste:x%x flg:x%x", 10631 did, vport->port_state, ndlp->nlp_flag); 10632 10633 phba->fc_stat.elsRcvRLS++; 10634 lpfc_els_rcv_rls(vport, elsiocb, ndlp); 10635 if (newnode) 10636 lpfc_disc_state_machine(vport, ndlp, NULL, 10637 NLP_EVT_DEVICE_RM); 10638 break; 10639 case ELS_CMD_RPL: 10640 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10641 "RCV RPL: did:x%x/ste:x%x flg:x%x", 10642 did, vport->port_state, ndlp->nlp_flag); 10643 10644 phba->fc_stat.elsRcvRPL++; 10645 lpfc_els_rcv_rpl(vport, elsiocb, ndlp); 10646 if (newnode) 10647 lpfc_disc_state_machine(vport, ndlp, NULL, 10648 NLP_EVT_DEVICE_RM); 10649 break; 10650 case ELS_CMD_RNID: 10651 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10652 "RCV RNID: did:x%x/ste:x%x flg:x%x", 10653 did, vport->port_state, ndlp->nlp_flag); 10654 10655 phba->fc_stat.elsRcvRNID++; 10656 lpfc_els_rcv_rnid(vport, elsiocb, ndlp); 10657 if (newnode) 10658 lpfc_disc_state_machine(vport, ndlp, NULL, 10659 NLP_EVT_DEVICE_RM); 10660 break; 10661 case ELS_CMD_RTV: 10662 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10663 "RCV RTV: did:x%x/ste:x%x flg:x%x", 10664 did, vport->port_state, ndlp->nlp_flag); 10665 phba->fc_stat.elsRcvRTV++; 10666 lpfc_els_rcv_rtv(vport, elsiocb, ndlp); 10667 if (newnode) 10668 lpfc_disc_state_machine(vport, ndlp, NULL, 10669 NLP_EVT_DEVICE_RM); 10670 break; 10671 case ELS_CMD_RRQ: 10672 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10673 "RCV RRQ: did:x%x/ste:x%x flg:x%x", 10674 did, vport->port_state, ndlp->nlp_flag); 10675 10676 phba->fc_stat.elsRcvRRQ++; 10677 lpfc_els_rcv_rrq(vport, elsiocb, ndlp); 10678 if (newnode) 10679 lpfc_disc_state_machine(vport, ndlp, NULL, 10680 NLP_EVT_DEVICE_RM); 10681 break; 10682 case ELS_CMD_ECHO: 10683 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10684 "RCV ECHO: did:x%x/ste:x%x flg:x%x", 10685 did, vport->port_state, ndlp->nlp_flag); 10686 10687 phba->fc_stat.elsRcvECHO++; 10688 lpfc_els_rcv_echo(vport, elsiocb, ndlp); 10689 if (newnode) 10690 lpfc_disc_state_machine(vport, ndlp, NULL, 10691 NLP_EVT_DEVICE_RM); 10692 break; 10693 case ELS_CMD_REC: 10694 /* receive this due to exchange closed */ 10695 rjt_err = LSRJT_UNABLE_TPC; 10696 rjt_exp = LSEXP_INVALID_OX_RX; 10697 break; 10698 case ELS_CMD_FPIN: 10699 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10700 "RCV FPIN: did:x%x/ste:x%x flg:x%x", 10701 did, vport->port_state, ndlp->nlp_flag); 10702 10703 lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload, 10704 payload_len); 10705 10706 /* There are no replies, so no rjt codes */ 10707 break; 10708 case ELS_CMD_EDC: 10709 lpfc_els_rcv_edc(vport, elsiocb, ndlp); 10710 break; 10711 case ELS_CMD_RDF: 10712 phba->fc_stat.elsRcvRDF++; 10713 /* Accept RDF only from fabric controller */ 10714 if (did != Fabric_Cntl_DID) { 10715 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 10716 "1115 Received RDF from invalid DID " 10717 "x%x\n", did); 10718 rjt_err = LSRJT_PROTOCOL_ERR; 10719 rjt_exp = LSEXP_NOTHING_MORE; 10720 goto lsrjt; 10721 } 10722 10723 lpfc_els_rcv_rdf(vport, elsiocb, ndlp); 10724 break; 10725 default: 10726 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10727 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 10728 cmd, did, vport->port_state); 10729 10730 /* Unsupported ELS command, reject */ 10731 rjt_err = LSRJT_CMD_UNSUPPORTED; 10732 rjt_exp = LSEXP_NOTHING_MORE; 10733 10734 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 10735 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10736 "0115 Unknown ELS command x%x " 10737 "received from NPORT x%x\n", cmd, did); 10738 if (newnode) 10739 lpfc_disc_state_machine(vport, ndlp, NULL, 10740 NLP_EVT_DEVICE_RM); 10741 break; 10742 } 10743 10744 lsrjt: 10745 /* check if need to LS_RJT received ELS cmd */ 10746 if (rjt_err) { 10747 memset(&stat, 0, sizeof(stat)); 10748 stat.un.b.lsRjtRsnCode = rjt_err; 10749 stat.un.b.lsRjtRsnCodeExp = rjt_exp; 10750 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, 10751 NULL); 10752 /* Remove the reference from above for new nodes. */ 10753 if (newnode) 10754 lpfc_disc_state_machine(vport, ndlp, NULL, 10755 NLP_EVT_DEVICE_RM); 10756 } 10757 10758 /* Release the reference on this elsiocb, not the ndlp. */ 10759 lpfc_nlp_put(elsiocb->ndlp); 10760 elsiocb->ndlp = NULL; 10761 10762 /* Special case. Driver received an unsolicited command that 10763 * unsupportable given the driver's current state. Reset the 10764 * link and start over. 10765 */ 10766 if (init_link) { 10767 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10768 if (!mbox) 10769 return; 10770 lpfc_linkdown(phba); 10771 lpfc_init_link(phba, mbox, 10772 phba->cfg_topology, 10773 phba->cfg_link_speed); 10774 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 10775 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 10776 mbox->vport = vport; 10777 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == 10778 MBX_NOT_FINISHED) 10779 mempool_free(mbox, phba->mbox_mem_pool); 10780 } 10781 10782 return; 10783 10784 dropit: 10785 if (vport && !test_bit(FC_UNLOADING, &vport->load_flag)) 10786 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10787 "0111 Dropping received ELS cmd " 10788 "Data: x%x x%x x%x x%x\n", 10789 cmd, status, get_job_word4(phba, elsiocb), did); 10790 10791 phba->fc_stat.elsRcvDrop++; 10792 } 10793 10794 /** 10795 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring 10796 * @phba: pointer to lpfc hba data structure. 10797 * @pring: pointer to a SLI ring. 10798 * @elsiocb: pointer to lpfc els iocb data structure. 10799 * 10800 * This routine is used to process an unsolicited event received from a SLI 10801 * (Service Level Interface) ring. The actual processing of the data buffer 10802 * associated with the unsolicited event is done by invoking the routine 10803 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the 10804 * SLI ring on which the unsolicited event was received. 10805 **/ 10806 void 10807 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10808 struct lpfc_iocbq *elsiocb) 10809 { 10810 struct lpfc_vport *vport = elsiocb->vport; 10811 u32 ulp_command, status, parameter, bde_count = 0; 10812 IOCB_t *icmd; 10813 struct lpfc_wcqe_complete *wcqe_cmpl = NULL; 10814 struct lpfc_dmabuf *bdeBuf1 = elsiocb->cmd_dmabuf; 10815 struct lpfc_dmabuf *bdeBuf2 = elsiocb->bpl_dmabuf; 10816 dma_addr_t paddr; 10817 10818 elsiocb->cmd_dmabuf = NULL; 10819 elsiocb->rsp_dmabuf = NULL; 10820 elsiocb->bpl_dmabuf = NULL; 10821 10822 wcqe_cmpl = &elsiocb->wcqe_cmpl; 10823 ulp_command = get_job_cmnd(phba, elsiocb); 10824 status = get_job_ulpstatus(phba, elsiocb); 10825 parameter = get_job_word4(phba, elsiocb); 10826 if (phba->sli_rev == LPFC_SLI_REV4) 10827 bde_count = wcqe_cmpl->word3; 10828 else 10829 bde_count = elsiocb->iocb.ulpBdeCount; 10830 10831 if (status == IOSTAT_NEED_BUFFER) { 10832 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 10833 } else if (status == IOSTAT_LOCAL_REJECT && 10834 (parameter & IOERR_PARAM_MASK) == 10835 IOERR_RCV_BUFFER_WAITING) { 10836 phba->fc_stat.NoRcvBuf++; 10837 /* Not enough posted buffers; Try posting more buffers */ 10838 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 10839 lpfc_sli3_post_buffer(phba, pring, 0); 10840 return; 10841 } 10842 10843 if (phba->sli_rev == LPFC_SLI_REV3) { 10844 icmd = &elsiocb->iocb; 10845 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 10846 (ulp_command == CMD_IOCB_RCV_ELS64_CX || 10847 ulp_command == CMD_IOCB_RCV_SEQ64_CX)) { 10848 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 10849 vport = phba->pport; 10850 else 10851 vport = lpfc_find_vport_by_vpid(phba, 10852 icmd->unsli3.rcvsli3.vpi); 10853 } 10854 } 10855 10856 /* If there are no BDEs associated 10857 * with this IOCB, there is nothing to do. 10858 */ 10859 if (bde_count == 0) 10860 return; 10861 10862 /* Account for SLI2 or SLI3 and later unsolicited buffering */ 10863 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 10864 elsiocb->cmd_dmabuf = bdeBuf1; 10865 if (bde_count == 2) 10866 elsiocb->bpl_dmabuf = bdeBuf2; 10867 } else { 10868 icmd = &elsiocb->iocb; 10869 paddr = getPaddr(icmd->un.cont64[0].addrHigh, 10870 icmd->un.cont64[0].addrLow); 10871 elsiocb->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, 10872 paddr); 10873 if (bde_count == 2) { 10874 paddr = getPaddr(icmd->un.cont64[1].addrHigh, 10875 icmd->un.cont64[1].addrLow); 10876 elsiocb->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba, 10877 pring, 10878 paddr); 10879 } 10880 } 10881 10882 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 10883 /* 10884 * The different unsolicited event handlers would tell us 10885 * if they are done with "mp" by setting cmd_dmabuf to NULL. 10886 */ 10887 if (elsiocb->cmd_dmabuf) { 10888 lpfc_in_buf_free(phba, elsiocb->cmd_dmabuf); 10889 elsiocb->cmd_dmabuf = NULL; 10890 } 10891 10892 if (elsiocb->bpl_dmabuf) { 10893 lpfc_in_buf_free(phba, elsiocb->bpl_dmabuf); 10894 elsiocb->bpl_dmabuf = NULL; 10895 } 10896 10897 } 10898 10899 static void 10900 lpfc_start_fdmi(struct lpfc_vport *vport) 10901 { 10902 struct lpfc_nodelist *ndlp; 10903 10904 /* If this is the first time, allocate an ndlp and initialize 10905 * it. Otherwise, make sure the node is enabled and then do the 10906 * login. 10907 */ 10908 ndlp = lpfc_findnode_did(vport, FDMI_DID); 10909 if (!ndlp) { 10910 ndlp = lpfc_nlp_init(vport, FDMI_DID); 10911 if (ndlp) { 10912 ndlp->nlp_type |= NLP_FABRIC; 10913 } else { 10914 return; 10915 } 10916 } 10917 10918 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 10919 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 10920 } 10921 10922 /** 10923 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr 10924 * @phba: pointer to lpfc hba data structure. 10925 * @vport: pointer to a virtual N_Port data structure. 10926 * 10927 * This routine issues a Port Login (PLOGI) to the Name Server with 10928 * State Change Request (SCR) for a @vport. This routine will create an 10929 * ndlp for the Name Server associated to the @vport if such node does 10930 * not already exist. The PLOGI to Name Server is issued by invoking the 10931 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface 10932 * (FDMI) is configured to the @vport, a FDMI node will be created and 10933 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine. 10934 **/ 10935 void 10936 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) 10937 { 10938 struct lpfc_nodelist *ndlp; 10939 10940 /* 10941 * If lpfc_delay_discovery parameter is set and the clean address 10942 * bit is cleared and fc fabric parameters chenged, delay FC NPort 10943 * discovery. 10944 */ 10945 if (test_bit(FC_DISC_DELAYED, &vport->fc_flag)) { 10946 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10947 "3334 Delay fc port discovery for %d secs\n", 10948 phba->fc_ratov); 10949 mod_timer(&vport->delayed_disc_tmo, 10950 jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); 10951 return; 10952 } 10953 10954 ndlp = lpfc_findnode_did(vport, NameServer_DID); 10955 if (!ndlp) { 10956 ndlp = lpfc_nlp_init(vport, NameServer_DID); 10957 if (!ndlp) { 10958 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 10959 lpfc_disc_start(vport); 10960 return; 10961 } 10962 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10963 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10964 "0251 NameServer login: no memory\n"); 10965 return; 10966 } 10967 } 10968 10969 ndlp->nlp_type |= NLP_FABRIC; 10970 10971 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 10972 10973 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) { 10974 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10975 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10976 "0252 Cannot issue NameServer login\n"); 10977 return; 10978 } 10979 10980 if ((phba->cfg_enable_SmartSAN || 10981 phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) && 10982 test_bit(FC_ALLOW_FDMI, &vport->load_flag)) 10983 lpfc_start_fdmi(vport); 10984 } 10985 10986 /** 10987 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport 10988 * @phba: pointer to lpfc hba data structure. 10989 * @pmb: pointer to the driver internal queue element for mailbox command. 10990 * 10991 * This routine is the completion callback function to register new vport 10992 * mailbox command. If the new vport mailbox command completes successfully, 10993 * the fabric registration login shall be performed on physical port (the 10994 * new vport created is actually a physical port, with VPI 0) or the port 10995 * login to Name Server for State Change Request (SCR) will be performed 10996 * on virtual port (real virtual port, with VPI greater than 0). 10997 **/ 10998 static void 10999 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 11000 { 11001 struct lpfc_vport *vport = pmb->vport; 11002 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11003 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; 11004 MAILBOX_t *mb = &pmb->u.mb; 11005 int rc; 11006 11007 clear_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag); 11008 11009 if (mb->mbxStatus) { 11010 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11011 "0915 Register VPI failed : Status: x%x" 11012 " upd bit: x%x \n", mb->mbxStatus, 11013 mb->un.varRegVpi.upd); 11014 if (phba->sli_rev == LPFC_SLI_REV4 && 11015 mb->un.varRegVpi.upd) 11016 goto mbox_err_exit ; 11017 11018 switch (mb->mbxStatus) { 11019 case 0x11: /* unsupported feature */ 11020 case 0x9603: /* max_vpi exceeded */ 11021 case 0x9602: /* Link event since CLEAR_LA */ 11022 /* giving up on vport registration */ 11023 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11024 clear_bit(FC_FABRIC, &vport->fc_flag); 11025 clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag); 11026 lpfc_can_disctmo(vport); 11027 break; 11028 /* If reg_vpi fail with invalid VPI status, re-init VPI */ 11029 case 0x20: 11030 set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag); 11031 lpfc_init_vpi(phba, pmb, vport->vpi); 11032 pmb->vport = vport; 11033 pmb->mbox_cmpl = lpfc_init_vpi_cmpl; 11034 rc = lpfc_sli_issue_mbox(phba, pmb, 11035 MBX_NOWAIT); 11036 if (rc == MBX_NOT_FINISHED) { 11037 lpfc_printf_vlog(vport, KERN_ERR, 11038 LOG_TRACE_EVENT, 11039 "2732 Failed to issue INIT_VPI" 11040 " mailbox command\n"); 11041 } else { 11042 lpfc_nlp_put(ndlp); 11043 return; 11044 } 11045 fallthrough; 11046 default: 11047 /* Try to recover from this error */ 11048 if (phba->sli_rev == LPFC_SLI_REV4) 11049 lpfc_sli4_unreg_all_rpis(vport); 11050 lpfc_mbx_unreg_vpi(vport); 11051 set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag); 11052 if (mb->mbxStatus == MBX_NOT_FINISHED) 11053 break; 11054 if ((vport->port_type == LPFC_PHYSICAL_PORT) && 11055 !test_bit(FC_LOGO_RCVD_DID_CHNG, &vport->fc_flag)) { 11056 if (phba->sli_rev == LPFC_SLI_REV4) 11057 lpfc_issue_init_vfi(vport); 11058 else 11059 lpfc_initial_flogi(vport); 11060 } else { 11061 lpfc_initial_fdisc(vport); 11062 } 11063 break; 11064 } 11065 } else { 11066 spin_lock_irq(shost->host_lock); 11067 vport->vpi_state |= LPFC_VPI_REGISTERED; 11068 spin_unlock_irq(shost->host_lock); 11069 if (vport == phba->pport) { 11070 if (phba->sli_rev < LPFC_SLI_REV4) 11071 lpfc_issue_fabric_reglogin(vport); 11072 else { 11073 /* 11074 * If the physical port is instantiated using 11075 * FDISC, do not start vport discovery. 11076 */ 11077 if (vport->port_state != LPFC_FDISC) 11078 lpfc_start_fdiscs(phba); 11079 lpfc_do_scr_ns_plogi(phba, vport); 11080 } 11081 } else { 11082 lpfc_do_scr_ns_plogi(phba, vport); 11083 } 11084 } 11085 mbox_err_exit: 11086 /* Now, we decrement the ndlp reference count held for this 11087 * callback function 11088 */ 11089 lpfc_nlp_put(ndlp); 11090 11091 mempool_free(pmb, phba->mbox_mem_pool); 11092 11093 /* reinitialize the VMID datastructure before returning. 11094 * this is specifically for vport 11095 */ 11096 if (lpfc_is_vmid_enabled(phba)) 11097 lpfc_reinit_vmid(vport); 11098 vport->vmid_flag = vport->phba->pport->vmid_flag; 11099 11100 return; 11101 } 11102 11103 /** 11104 * lpfc_register_new_vport - Register a new vport with a HBA 11105 * @phba: pointer to lpfc hba data structure. 11106 * @vport: pointer to a host virtual N_Port data structure. 11107 * @ndlp: pointer to a node-list data structure. 11108 * 11109 * This routine registers the @vport as a new virtual port with a HBA. 11110 * It is done through a registering vpi mailbox command. 11111 **/ 11112 void 11113 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, 11114 struct lpfc_nodelist *ndlp) 11115 { 11116 LPFC_MBOXQ_t *mbox; 11117 11118 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11119 if (mbox) { 11120 lpfc_reg_vpi(vport, mbox); 11121 mbox->vport = vport; 11122 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 11123 if (!mbox->ctx_ndlp) { 11124 mempool_free(mbox, phba->mbox_mem_pool); 11125 goto mbox_err_exit; 11126 } 11127 11128 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 11129 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 11130 == MBX_NOT_FINISHED) { 11131 /* mailbox command not success, decrement ndlp 11132 * reference count for this command 11133 */ 11134 lpfc_nlp_put(ndlp); 11135 mempool_free(mbox, phba->mbox_mem_pool); 11136 11137 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11138 "0253 Register VPI: Can't send mbox\n"); 11139 goto mbox_err_exit; 11140 } 11141 } else { 11142 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11143 "0254 Register VPI: no memory\n"); 11144 goto mbox_err_exit; 11145 } 11146 return; 11147 11148 mbox_err_exit: 11149 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11150 clear_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag); 11151 return; 11152 } 11153 11154 /** 11155 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer 11156 * @phba: pointer to lpfc hba data structure. 11157 * 11158 * This routine cancels the retry delay timers to all the vports. 11159 **/ 11160 void 11161 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba) 11162 { 11163 struct lpfc_vport **vports; 11164 struct lpfc_nodelist *ndlp; 11165 uint32_t link_state; 11166 int i; 11167 11168 /* Treat this failure as linkdown for all vports */ 11169 link_state = phba->link_state; 11170 lpfc_linkdown(phba); 11171 phba->link_state = link_state; 11172 11173 vports = lpfc_create_vport_work_array(phba); 11174 11175 if (vports) { 11176 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 11177 ndlp = lpfc_findnode_did(vports[i], Fabric_DID); 11178 if (ndlp) 11179 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 11180 lpfc_els_flush_cmd(vports[i]); 11181 } 11182 lpfc_destroy_vport_work_array(phba, vports); 11183 } 11184 } 11185 11186 /** 11187 * lpfc_retry_pport_discovery - Start timer to retry FLOGI. 11188 * @phba: pointer to lpfc hba data structure. 11189 * 11190 * This routine abort all pending discovery commands and 11191 * start a timer to retry FLOGI for the physical port 11192 * discovery. 11193 **/ 11194 void 11195 lpfc_retry_pport_discovery(struct lpfc_hba *phba) 11196 { 11197 struct lpfc_nodelist *ndlp; 11198 11199 /* Cancel the all vports retry delay retry timers */ 11200 lpfc_cancel_all_vport_retry_delay_timer(phba); 11201 11202 /* If fabric require FLOGI, then re-instantiate physical login */ 11203 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 11204 if (!ndlp) 11205 return; 11206 11207 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); 11208 spin_lock_irq(&ndlp->lock); 11209 ndlp->nlp_flag |= NLP_DELAY_TMO; 11210 spin_unlock_irq(&ndlp->lock); 11211 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; 11212 phba->pport->port_state = LPFC_FLOGI; 11213 return; 11214 } 11215 11216 /** 11217 * lpfc_fabric_login_reqd - Check if FLOGI required. 11218 * @phba: pointer to lpfc hba data structure. 11219 * @cmdiocb: pointer to FDISC command iocb. 11220 * @rspiocb: pointer to FDISC response iocb. 11221 * 11222 * This routine checks if a FLOGI is reguired for FDISC 11223 * to succeed. 11224 **/ 11225 static int 11226 lpfc_fabric_login_reqd(struct lpfc_hba *phba, 11227 struct lpfc_iocbq *cmdiocb, 11228 struct lpfc_iocbq *rspiocb) 11229 { 11230 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 11231 u32 ulp_word4 = get_job_word4(phba, rspiocb); 11232 11233 if (ulp_status != IOSTAT_FABRIC_RJT || 11234 ulp_word4 != RJT_LOGIN_REQUIRED) 11235 return 0; 11236 else 11237 return 1; 11238 } 11239 11240 /** 11241 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command 11242 * @phba: pointer to lpfc hba data structure. 11243 * @cmdiocb: pointer to lpfc command iocb data structure. 11244 * @rspiocb: pointer to lpfc response iocb data structure. 11245 * 11246 * This routine is the completion callback function to a Fabric Discover 11247 * (FDISC) ELS command. Since all the FDISC ELS commands are issued 11248 * single threaded, each FDISC completion callback function will reset 11249 * the discovery timer for all vports such that the timers will not get 11250 * unnecessary timeout. The function checks the FDISC IOCB status. If error 11251 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the 11252 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID 11253 * assigned to the vport has been changed with the completion of the FDISC 11254 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index) 11255 * are unregistered from the HBA, and then the lpfc_register_new_vport() 11256 * routine is invoked to register new vport with the HBA. Otherwise, the 11257 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name 11258 * Server for State Change Request (SCR). 11259 **/ 11260 static void 11261 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11262 struct lpfc_iocbq *rspiocb) 11263 { 11264 struct lpfc_vport *vport = cmdiocb->vport; 11265 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 11266 struct lpfc_nodelist *np; 11267 struct lpfc_nodelist *next_np; 11268 struct lpfc_iocbq *piocb; 11269 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp; 11270 struct serv_parm *sp; 11271 uint8_t fabric_param_changed; 11272 u32 ulp_status, ulp_word4; 11273 11274 ulp_status = get_job_ulpstatus(phba, rspiocb); 11275 ulp_word4 = get_job_word4(phba, rspiocb); 11276 11277 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 11278 "0123 FDISC completes. x%x/x%x prevDID: x%x\n", 11279 ulp_status, ulp_word4, 11280 vport->fc_prevDID); 11281 /* Since all FDISCs are being single threaded, we 11282 * must reset the discovery timer for ALL vports 11283 * waiting to send FDISC when one completes. 11284 */ 11285 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) { 11286 lpfc_set_disctmo(piocb->vport); 11287 } 11288 11289 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11290 "FDISC cmpl: status:x%x/x%x prevdid:x%x", 11291 ulp_status, ulp_word4, vport->fc_prevDID); 11292 11293 if (ulp_status) { 11294 11295 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) { 11296 lpfc_retry_pport_discovery(phba); 11297 goto out; 11298 } 11299 11300 /* Check for retry */ 11301 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 11302 goto out; 11303 /* FDISC failed */ 11304 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11305 "0126 FDISC failed. (x%x/x%x)\n", 11306 ulp_status, ulp_word4); 11307 goto fdisc_failed; 11308 } 11309 11310 lpfc_check_nlp_post_devloss(vport, ndlp); 11311 11312 clear_bit(FC_VPORT_CVL_RCVD, &vport->fc_flag); 11313 clear_bit(FC_VPORT_LOGO_RCVD, &vport->fc_flag); 11314 set_bit(FC_FABRIC, &vport->fc_flag); 11315 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP) 11316 set_bit(FC_PUBLIC_LOOP, &vport->fc_flag); 11317 11318 vport->fc_myDID = ulp_word4 & Mask_DID; 11319 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); 11320 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 11321 if (!prsp) 11322 goto out; 11323 if (!lpfc_is_els_acc_rsp(prsp)) 11324 goto out; 11325 11326 sp = prsp->virt + sizeof(uint32_t); 11327 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 11328 memcpy(&vport->fabric_portname, &sp->portName, 11329 sizeof(struct lpfc_name)); 11330 memcpy(&vport->fabric_nodename, &sp->nodeName, 11331 sizeof(struct lpfc_name)); 11332 if (fabric_param_changed && 11333 !test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) { 11334 /* If our NportID changed, we need to ensure all 11335 * remaining NPORTs get unreg_login'ed so we can 11336 * issue unreg_vpi. 11337 */ 11338 list_for_each_entry_safe(np, next_np, 11339 &vport->fc_nodes, nlp_listp) { 11340 if ((np->nlp_state != NLP_STE_NPR_NODE) || 11341 !(np->nlp_flag & NLP_NPR_ADISC)) 11342 continue; 11343 spin_lock_irq(&ndlp->lock); 11344 np->nlp_flag &= ~NLP_NPR_ADISC; 11345 spin_unlock_irq(&ndlp->lock); 11346 lpfc_unreg_rpi(vport, np); 11347 } 11348 lpfc_cleanup_pending_mbox(vport); 11349 11350 if (phba->sli_rev == LPFC_SLI_REV4) 11351 lpfc_sli4_unreg_all_rpis(vport); 11352 11353 lpfc_mbx_unreg_vpi(vport); 11354 set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag); 11355 if (phba->sli_rev == LPFC_SLI_REV4) 11356 set_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag); 11357 else 11358 set_bit(FC_LOGO_RCVD_DID_CHNG, &vport->fc_flag); 11359 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 11360 !test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) { 11361 /* 11362 * Driver needs to re-reg VPI in order for f/w 11363 * to update the MAC address. 11364 */ 11365 lpfc_register_new_vport(phba, vport, ndlp); 11366 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 11367 goto out; 11368 } 11369 11370 if (test_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag)) 11371 lpfc_issue_init_vpi(vport); 11372 else if (test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) 11373 lpfc_register_new_vport(phba, vport, ndlp); 11374 else 11375 lpfc_do_scr_ns_plogi(phba, vport); 11376 11377 /* The FDISC completed successfully. Move the fabric ndlp to 11378 * UNMAPPED state and register with the transport. 11379 */ 11380 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 11381 goto out; 11382 11383 fdisc_failed: 11384 if (vport->fc_vport && 11385 (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS)) 11386 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11387 /* Cancel discovery timer */ 11388 lpfc_can_disctmo(vport); 11389 out: 11390 lpfc_els_free_iocb(phba, cmdiocb); 11391 lpfc_nlp_put(ndlp); 11392 } 11393 11394 /** 11395 * lpfc_issue_els_fdisc - Issue a fdisc iocb command 11396 * @vport: pointer to a virtual N_Port data structure. 11397 * @ndlp: pointer to a node-list data structure. 11398 * @retry: number of retries to the command IOCB. 11399 * 11400 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to 11401 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb() 11402 * routine to issue the IOCB, which makes sure only one outstanding fabric 11403 * IOCB will be sent off HBA at any given time. 11404 * 11405 * Note that the ndlp reference count will be incremented by 1 for holding the 11406 * ndlp and the reference to ndlp will be stored into the ndlp field of 11407 * the IOCB for the completion callback function to the FDISC ELS command. 11408 * 11409 * Return code 11410 * 0 - Successfully issued fdisc iocb command 11411 * 1 - Failed to issue fdisc iocb command 11412 **/ 11413 static int 11414 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 11415 uint8_t retry) 11416 { 11417 struct lpfc_hba *phba = vport->phba; 11418 IOCB_t *icmd; 11419 union lpfc_wqe128 *wqe = NULL; 11420 struct lpfc_iocbq *elsiocb; 11421 struct serv_parm *sp; 11422 uint8_t *pcmd; 11423 uint16_t cmdsize; 11424 int did = ndlp->nlp_DID; 11425 int rc; 11426 11427 vport->port_state = LPFC_FDISC; 11428 vport->fc_myDID = 0; 11429 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 11430 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 11431 ELS_CMD_FDISC); 11432 if (!elsiocb) { 11433 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11434 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11435 "0255 Issue FDISC: no IOCB\n"); 11436 return 1; 11437 } 11438 11439 if (phba->sli_rev == LPFC_SLI_REV4) { 11440 wqe = &elsiocb->wqe; 11441 bf_set(els_req64_sid, &wqe->els_req, 0); 11442 bf_set(els_req64_sp, &wqe->els_req, 1); 11443 } else { 11444 icmd = &elsiocb->iocb; 11445 icmd->un.elsreq64.myID = 0; 11446 icmd->un.elsreq64.fl = 1; 11447 icmd->ulpCt_h = 1; 11448 icmd->ulpCt_l = 0; 11449 } 11450 11451 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 11452 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; 11453 pcmd += sizeof(uint32_t); /* CSP Word 1 */ 11454 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm)); 11455 sp = (struct serv_parm *) pcmd; 11456 /* Setup CSPs accordingly for Fabric */ 11457 sp->cmn.e_d_tov = 0; 11458 sp->cmn.w2.r_a_tov = 0; 11459 sp->cmn.virtual_fabric_support = 0; 11460 sp->cls1.classValid = 0; 11461 sp->cls2.seqDelivery = 1; 11462 sp->cls3.seqDelivery = 1; 11463 11464 pcmd += sizeof(uint32_t); /* CSP Word 2 */ 11465 pcmd += sizeof(uint32_t); /* CSP Word 3 */ 11466 pcmd += sizeof(uint32_t); /* CSP Word 4 */ 11467 pcmd += sizeof(uint32_t); /* Port Name */ 11468 memcpy(pcmd, &vport->fc_portname, 8); 11469 pcmd += sizeof(uint32_t); /* Node Name */ 11470 pcmd += sizeof(uint32_t); /* Node Name */ 11471 memcpy(pcmd, &vport->fc_nodename, 8); 11472 sp->cmn.valid_vendor_ver_level = 0; 11473 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 11474 lpfc_set_disctmo(vport); 11475 11476 phba->fc_stat.elsXmitFDISC++; 11477 elsiocb->cmd_cmpl = lpfc_cmpl_els_fdisc; 11478 11479 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11480 "Issue FDISC: did:x%x", 11481 did, 0, 0); 11482 11483 elsiocb->ndlp = lpfc_nlp_get(ndlp); 11484 if (!elsiocb->ndlp) 11485 goto err_out; 11486 11487 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 11488 if (rc == IOCB_ERROR) { 11489 lpfc_nlp_put(ndlp); 11490 goto err_out; 11491 } 11492 11493 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); 11494 return 0; 11495 11496 err_out: 11497 lpfc_els_free_iocb(phba, elsiocb); 11498 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 11499 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 11500 "0256 Issue FDISC: Cannot send IOCB\n"); 11501 return 1; 11502 } 11503 11504 /** 11505 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo 11506 * @phba: pointer to lpfc hba data structure. 11507 * @cmdiocb: pointer to lpfc command iocb data structure. 11508 * @rspiocb: pointer to lpfc response iocb data structure. 11509 * 11510 * This routine is the completion callback function to the issuing of a LOGO 11511 * ELS command off a vport. It frees the command IOCB and then decrement the 11512 * reference count held on ndlp for this completion function, indicating that 11513 * the reference to the ndlp is no long needed. Note that the 11514 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this 11515 * callback function and an additional explicit ndlp reference decrementation 11516 * will trigger the actual release of the ndlp. 11517 **/ 11518 static void 11519 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11520 struct lpfc_iocbq *rspiocb) 11521 { 11522 struct lpfc_vport *vport = cmdiocb->vport; 11523 IOCB_t *irsp; 11524 struct lpfc_nodelist *ndlp; 11525 u32 ulp_status, ulp_word4, did, tmo; 11526 11527 ndlp = cmdiocb->ndlp; 11528 11529 ulp_status = get_job_ulpstatus(phba, rspiocb); 11530 ulp_word4 = get_job_word4(phba, rspiocb); 11531 11532 if (phba->sli_rev == LPFC_SLI_REV4) { 11533 did = get_job_els_rsp64_did(phba, cmdiocb); 11534 tmo = get_wqe_tmo(cmdiocb); 11535 } else { 11536 irsp = &rspiocb->iocb; 11537 did = get_job_els_rsp64_did(phba, rspiocb); 11538 tmo = irsp->ulpTimeout; 11539 } 11540 11541 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11542 "LOGO npiv cmpl: status:x%x/x%x did:x%x", 11543 ulp_status, ulp_word4, did); 11544 11545 /* NPIV LOGO completes to NPort <nlp_DID> */ 11546 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 11547 "2928 NPIV LOGO completes to NPort x%x " 11548 "Data: x%x x%x x%x x%x x%x x%x x%x\n", 11549 ndlp->nlp_DID, ulp_status, ulp_word4, 11550 tmo, vport->num_disc_nodes, 11551 kref_read(&ndlp->kref), ndlp->nlp_flag, 11552 ndlp->fc4_xpt_flags); 11553 11554 if (ulp_status == IOSTAT_SUCCESS) { 11555 clear_bit(FC_NDISC_ACTIVE, &vport->fc_flag); 11556 clear_bit(FC_FABRIC, &vport->fc_flag); 11557 lpfc_can_disctmo(vport); 11558 } 11559 11560 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { 11561 /* Wake up lpfc_vport_delete if waiting...*/ 11562 if (ndlp->logo_waitq) 11563 wake_up(ndlp->logo_waitq); 11564 spin_lock_irq(&ndlp->lock); 11565 ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND); 11566 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; 11567 spin_unlock_irq(&ndlp->lock); 11568 } 11569 11570 /* Safe to release resources now. */ 11571 lpfc_els_free_iocb(phba, cmdiocb); 11572 lpfc_nlp_put(ndlp); 11573 } 11574 11575 /** 11576 * lpfc_issue_els_npiv_logo - Issue a logo off a vport 11577 * @vport: pointer to a virtual N_Port data structure. 11578 * @ndlp: pointer to a node-list data structure. 11579 * 11580 * This routine issues a LOGO ELS command to an @ndlp off a @vport. 11581 * 11582 * Note that the ndlp reference count will be incremented by 1 for holding the 11583 * ndlp and the reference to ndlp will be stored into the ndlp field of 11584 * the IOCB for the completion callback function to the LOGO ELS command. 11585 * 11586 * Return codes 11587 * 0 - Successfully issued logo off the @vport 11588 * 1 - Failed to issue logo off the @vport 11589 **/ 11590 int 11591 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 11592 { 11593 int rc = 0; 11594 struct lpfc_hba *phba = vport->phba; 11595 struct lpfc_iocbq *elsiocb; 11596 uint8_t *pcmd; 11597 uint16_t cmdsize; 11598 11599 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name); 11600 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, 11601 ELS_CMD_LOGO); 11602 if (!elsiocb) 11603 return 1; 11604 11605 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt; 11606 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 11607 pcmd += sizeof(uint32_t); 11608 11609 /* Fill in LOGO payload */ 11610 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 11611 pcmd += sizeof(uint32_t); 11612 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 11613 11614 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11615 "Issue LOGO npiv did:x%x flg:x%x", 11616 ndlp->nlp_DID, ndlp->nlp_flag, 0); 11617 11618 elsiocb->cmd_cmpl = lpfc_cmpl_els_npiv_logo; 11619 spin_lock_irq(&ndlp->lock); 11620 ndlp->nlp_flag |= NLP_LOGO_SND; 11621 spin_unlock_irq(&ndlp->lock); 11622 elsiocb->ndlp = lpfc_nlp_get(ndlp); 11623 if (!elsiocb->ndlp) { 11624 lpfc_els_free_iocb(phba, elsiocb); 11625 goto err; 11626 } 11627 11628 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 11629 if (rc == IOCB_ERROR) { 11630 lpfc_els_free_iocb(phba, elsiocb); 11631 lpfc_nlp_put(ndlp); 11632 goto err; 11633 } 11634 return 0; 11635 11636 err: 11637 spin_lock_irq(&ndlp->lock); 11638 ndlp->nlp_flag &= ~NLP_LOGO_SND; 11639 spin_unlock_irq(&ndlp->lock); 11640 return 1; 11641 } 11642 11643 /** 11644 * lpfc_fabric_block_timeout - Handler function to the fabric block timer 11645 * @t: timer context used to obtain the lpfc hba. 11646 * 11647 * This routine is invoked by the fabric iocb block timer after 11648 * timeout. It posts the fabric iocb block timeout event by setting the 11649 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes 11650 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for 11651 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the 11652 * posted event WORKER_FABRIC_BLOCK_TMO. 11653 **/ 11654 void 11655 lpfc_fabric_block_timeout(struct timer_list *t) 11656 { 11657 struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer); 11658 unsigned long iflags; 11659 uint32_t tmo_posted; 11660 11661 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 11662 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; 11663 if (!tmo_posted) 11664 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; 11665 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 11666 11667 if (!tmo_posted) 11668 lpfc_worker_wake_up(phba); 11669 return; 11670 } 11671 11672 /** 11673 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list 11674 * @phba: pointer to lpfc hba data structure. 11675 * 11676 * This routine issues one fabric iocb from the driver internal list to 11677 * the HBA. It first checks whether it's ready to issue one fabric iocb to 11678 * the HBA (whether there is no outstanding fabric iocb). If so, it shall 11679 * remove one pending fabric iocb from the driver internal list and invokes 11680 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA. 11681 **/ 11682 static void 11683 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) 11684 { 11685 struct lpfc_iocbq *iocb; 11686 unsigned long iflags; 11687 int ret; 11688 11689 repeat: 11690 iocb = NULL; 11691 spin_lock_irqsave(&phba->hbalock, iflags); 11692 /* Post any pending iocb to the SLI layer */ 11693 if (atomic_read(&phba->fabric_iocb_count) == 0) { 11694 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), 11695 list); 11696 if (iocb) 11697 /* Increment fabric iocb count to hold the position */ 11698 atomic_inc(&phba->fabric_iocb_count); 11699 } 11700 spin_unlock_irqrestore(&phba->hbalock, iflags); 11701 if (iocb) { 11702 iocb->fabric_cmd_cmpl = iocb->cmd_cmpl; 11703 iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb; 11704 iocb->cmd_flag |= LPFC_IO_FABRIC; 11705 11706 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 11707 "Fabric sched1: ste:x%x", 11708 iocb->vport->port_state, 0, 0); 11709 11710 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 11711 11712 if (ret == IOCB_ERROR) { 11713 iocb->cmd_cmpl = iocb->fabric_cmd_cmpl; 11714 iocb->fabric_cmd_cmpl = NULL; 11715 iocb->cmd_flag &= ~LPFC_IO_FABRIC; 11716 set_job_ulpstatus(iocb, IOSTAT_LOCAL_REJECT); 11717 iocb->wcqe_cmpl.parameter = IOERR_SLI_ABORTED; 11718 iocb->cmd_cmpl(phba, iocb, iocb); 11719 11720 atomic_dec(&phba->fabric_iocb_count); 11721 goto repeat; 11722 } 11723 } 11724 } 11725 11726 /** 11727 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command 11728 * @phba: pointer to lpfc hba data structure. 11729 * 11730 * This routine unblocks the issuing fabric iocb command. The function 11731 * will clear the fabric iocb block bit and then invoke the routine 11732 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb 11733 * from the driver internal fabric iocb list. 11734 **/ 11735 void 11736 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) 11737 { 11738 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11739 11740 lpfc_resume_fabric_iocbs(phba); 11741 return; 11742 } 11743 11744 /** 11745 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command 11746 * @phba: pointer to lpfc hba data structure. 11747 * 11748 * This routine blocks the issuing fabric iocb for a specified amount of 11749 * time (currently 100 ms). This is done by set the fabric iocb block bit 11750 * and set up a timeout timer for 100ms. When the block bit is set, no more 11751 * fabric iocb will be issued out of the HBA. 11752 **/ 11753 static void 11754 lpfc_block_fabric_iocbs(struct lpfc_hba *phba) 11755 { 11756 int blocked; 11757 11758 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11759 /* Start a timer to unblock fabric iocbs after 100ms */ 11760 if (!blocked) 11761 mod_timer(&phba->fabric_block_timer, 11762 jiffies + msecs_to_jiffies(100)); 11763 11764 return; 11765 } 11766 11767 /** 11768 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb 11769 * @phba: pointer to lpfc hba data structure. 11770 * @cmdiocb: pointer to lpfc command iocb data structure. 11771 * @rspiocb: pointer to lpfc response iocb data structure. 11772 * 11773 * This routine is the callback function that is put to the fabric iocb's 11774 * callback function pointer (iocb->cmd_cmpl). The original iocb's callback 11775 * function pointer has been stored in iocb->fabric_cmd_cmpl. This callback 11776 * function first restores and invokes the original iocb's callback function 11777 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next 11778 * fabric bound iocb from the driver internal fabric iocb list onto the wire. 11779 **/ 11780 static void 11781 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11782 struct lpfc_iocbq *rspiocb) 11783 { 11784 struct ls_rjt stat; 11785 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 11786 u32 ulp_word4 = get_job_word4(phba, rspiocb); 11787 11788 WARN_ON((cmdiocb->cmd_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC); 11789 11790 switch (ulp_status) { 11791 case IOSTAT_NPORT_RJT: 11792 case IOSTAT_FABRIC_RJT: 11793 if (ulp_word4 & RJT_UNAVAIL_TEMP) 11794 lpfc_block_fabric_iocbs(phba); 11795 break; 11796 11797 case IOSTAT_NPORT_BSY: 11798 case IOSTAT_FABRIC_BSY: 11799 lpfc_block_fabric_iocbs(phba); 11800 break; 11801 11802 case IOSTAT_LS_RJT: 11803 stat.un.ls_rjt_error_be = 11804 cpu_to_be32(ulp_word4); 11805 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) || 11806 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY)) 11807 lpfc_block_fabric_iocbs(phba); 11808 break; 11809 } 11810 11811 BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0); 11812 11813 cmdiocb->cmd_cmpl = cmdiocb->fabric_cmd_cmpl; 11814 cmdiocb->fabric_cmd_cmpl = NULL; 11815 cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC; 11816 cmdiocb->cmd_cmpl(phba, cmdiocb, rspiocb); 11817 11818 atomic_dec(&phba->fabric_iocb_count); 11819 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { 11820 /* Post any pending iocbs to HBA */ 11821 lpfc_resume_fabric_iocbs(phba); 11822 } 11823 } 11824 11825 /** 11826 * lpfc_issue_fabric_iocb - Issue a fabric iocb command 11827 * @phba: pointer to lpfc hba data structure. 11828 * @iocb: pointer to lpfc command iocb data structure. 11829 * 11830 * This routine is used as the top-level API for issuing a fabric iocb command 11831 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver 11832 * function makes sure that only one fabric bound iocb will be outstanding at 11833 * any given time. As such, this function will first check to see whether there 11834 * is already an outstanding fabric iocb on the wire. If so, it will put the 11835 * newly issued iocb onto the driver internal fabric iocb list, waiting to be 11836 * issued later. Otherwise, it will issue the iocb on the wire and update the 11837 * fabric iocb count it indicate that there is one fabric iocb on the wire. 11838 * 11839 * Note, this implementation has a potential sending out fabric IOCBs out of 11840 * order. The problem is caused by the construction of the "ready" boolen does 11841 * not include the condition that the internal fabric IOCB list is empty. As 11842 * such, it is possible a fabric IOCB issued by this routine might be "jump" 11843 * ahead of the fabric IOCBs in the internal list. 11844 * 11845 * Return code 11846 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully 11847 * IOCB_ERROR - failed to issue fabric iocb 11848 **/ 11849 static int 11850 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 11851 { 11852 unsigned long iflags; 11853 int ready; 11854 int ret; 11855 11856 BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1); 11857 11858 spin_lock_irqsave(&phba->hbalock, iflags); 11859 ready = atomic_read(&phba->fabric_iocb_count) == 0 && 11860 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11861 11862 if (ready) 11863 /* Increment fabric iocb count to hold the position */ 11864 atomic_inc(&phba->fabric_iocb_count); 11865 spin_unlock_irqrestore(&phba->hbalock, iflags); 11866 if (ready) { 11867 iocb->fabric_cmd_cmpl = iocb->cmd_cmpl; 11868 iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb; 11869 iocb->cmd_flag |= LPFC_IO_FABRIC; 11870 11871 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 11872 "Fabric sched2: ste:x%x", 11873 iocb->vport->port_state, 0, 0); 11874 11875 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 11876 11877 if (ret == IOCB_ERROR) { 11878 iocb->cmd_cmpl = iocb->fabric_cmd_cmpl; 11879 iocb->fabric_cmd_cmpl = NULL; 11880 iocb->cmd_flag &= ~LPFC_IO_FABRIC; 11881 atomic_dec(&phba->fabric_iocb_count); 11882 } 11883 } else { 11884 spin_lock_irqsave(&phba->hbalock, iflags); 11885 list_add_tail(&iocb->list, &phba->fabric_iocb_list); 11886 spin_unlock_irqrestore(&phba->hbalock, iflags); 11887 ret = IOCB_SUCCESS; 11888 } 11889 return ret; 11890 } 11891 11892 /** 11893 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list 11894 * @vport: pointer to a virtual N_Port data structure. 11895 * 11896 * This routine aborts all the IOCBs associated with a @vport from the 11897 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 11898 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 11899 * list, removes each IOCB associated with the @vport off the list, set the 11900 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 11901 * associated with the IOCB. 11902 **/ 11903 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) 11904 { 11905 LIST_HEAD(completions); 11906 struct lpfc_hba *phba = vport->phba; 11907 struct lpfc_iocbq *tmp_iocb, *piocb; 11908 11909 spin_lock_irq(&phba->hbalock); 11910 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 11911 list) { 11912 11913 if (piocb->vport != vport) 11914 continue; 11915 11916 list_move_tail(&piocb->list, &completions); 11917 } 11918 spin_unlock_irq(&phba->hbalock); 11919 11920 /* Cancel all the IOCBs from the completions list */ 11921 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11922 IOERR_SLI_ABORTED); 11923 } 11924 11925 /** 11926 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list 11927 * @ndlp: pointer to a node-list data structure. 11928 * 11929 * This routine aborts all the IOCBs associated with an @ndlp from the 11930 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 11931 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 11932 * list, removes each IOCB associated with the @ndlp off the list, set the 11933 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 11934 * associated with the IOCB. 11935 **/ 11936 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) 11937 { 11938 LIST_HEAD(completions); 11939 struct lpfc_hba *phba = ndlp->phba; 11940 struct lpfc_iocbq *tmp_iocb, *piocb; 11941 struct lpfc_sli_ring *pring; 11942 11943 pring = lpfc_phba_elsring(phba); 11944 11945 if (unlikely(!pring)) 11946 return; 11947 11948 spin_lock_irq(&phba->hbalock); 11949 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 11950 list) { 11951 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) { 11952 11953 list_move_tail(&piocb->list, &completions); 11954 } 11955 } 11956 spin_unlock_irq(&phba->hbalock); 11957 11958 /* Cancel all the IOCBs from the completions list */ 11959 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11960 IOERR_SLI_ABORTED); 11961 } 11962 11963 /** 11964 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list 11965 * @phba: pointer to lpfc hba data structure. 11966 * 11967 * This routine aborts all the IOCBs currently on the driver internal 11968 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS 11969 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB 11970 * list, removes IOCBs off the list, set the status field to 11971 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with 11972 * the IOCB. 11973 **/ 11974 void lpfc_fabric_abort_hba(struct lpfc_hba *phba) 11975 { 11976 LIST_HEAD(completions); 11977 11978 spin_lock_irq(&phba->hbalock); 11979 list_splice_init(&phba->fabric_iocb_list, &completions); 11980 spin_unlock_irq(&phba->hbalock); 11981 11982 /* Cancel all the IOCBs from the completions list */ 11983 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11984 IOERR_SLI_ABORTED); 11985 } 11986 11987 /** 11988 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport 11989 * @vport: pointer to lpfc vport data structure. 11990 * 11991 * This routine is invoked by the vport cleanup for deletions and the cleanup 11992 * for an ndlp on removal. 11993 **/ 11994 void 11995 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) 11996 { 11997 struct lpfc_hba *phba = vport->phba; 11998 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 11999 struct lpfc_nodelist *ndlp = NULL; 12000 unsigned long iflag = 0; 12001 12002 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 12003 list_for_each_entry_safe(sglq_entry, sglq_next, 12004 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 12005 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) { 12006 lpfc_nlp_put(sglq_entry->ndlp); 12007 ndlp = sglq_entry->ndlp; 12008 sglq_entry->ndlp = NULL; 12009 12010 /* If the xri on the abts_els_sgl list is for the Fport 12011 * node and the vport is unloading, the xri aborted wcqe 12012 * likely isn't coming back. Just release the sgl. 12013 */ 12014 if (test_bit(FC_UNLOADING, &vport->load_flag) && 12015 ndlp->nlp_DID == Fabric_DID) { 12016 list_del(&sglq_entry->list); 12017 sglq_entry->state = SGL_FREED; 12018 list_add_tail(&sglq_entry->list, 12019 &phba->sli4_hba.lpfc_els_sgl_list); 12020 } 12021 } 12022 } 12023 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 12024 return; 12025 } 12026 12027 /** 12028 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort 12029 * @phba: pointer to lpfc hba data structure. 12030 * @axri: pointer to the els xri abort wcqe structure. 12031 * 12032 * This routine is invoked by the worker thread to process a SLI4 slow-path 12033 * ELS aborted xri. 12034 **/ 12035 void 12036 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, 12037 struct sli4_wcqe_xri_aborted *axri) 12038 { 12039 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 12040 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 12041 uint16_t lxri = 0; 12042 12043 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 12044 unsigned long iflag = 0; 12045 struct lpfc_nodelist *ndlp; 12046 struct lpfc_sli_ring *pring; 12047 12048 pring = lpfc_phba_elsring(phba); 12049 12050 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 12051 list_for_each_entry_safe(sglq_entry, sglq_next, 12052 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 12053 if (sglq_entry->sli4_xritag == xri) { 12054 list_del(&sglq_entry->list); 12055 ndlp = sglq_entry->ndlp; 12056 sglq_entry->ndlp = NULL; 12057 list_add_tail(&sglq_entry->list, 12058 &phba->sli4_hba.lpfc_els_sgl_list); 12059 sglq_entry->state = SGL_FREED; 12060 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, 12061 iflag); 12062 12063 if (ndlp) { 12064 lpfc_set_rrq_active(phba, ndlp, 12065 sglq_entry->sli4_lxritag, 12066 rxid, 1); 12067 lpfc_nlp_put(ndlp); 12068 } 12069 12070 /* Check if TXQ queue needs to be serviced */ 12071 if (pring && !list_empty(&pring->txq)) 12072 lpfc_worker_wake_up(phba); 12073 return; 12074 } 12075 } 12076 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 12077 lxri = lpfc_sli4_xri_inrange(phba, xri); 12078 if (lxri == NO_XRI) 12079 return; 12080 12081 spin_lock_irqsave(&phba->hbalock, iflag); 12082 sglq_entry = __lpfc_get_active_sglq(phba, lxri); 12083 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { 12084 spin_unlock_irqrestore(&phba->hbalock, iflag); 12085 return; 12086 } 12087 sglq_entry->state = SGL_XRI_ABORTED; 12088 spin_unlock_irqrestore(&phba->hbalock, iflag); 12089 return; 12090 } 12091 12092 /* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req. 12093 * @vport: pointer to virtual port object. 12094 * @ndlp: nodelist pointer for the impacted node. 12095 * 12096 * The driver calls this routine in response to an SLI4 XRI ABORT CQE 12097 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event, 12098 * the driver is required to send a LOGO to the remote node before it 12099 * attempts to recover its login to the remote node. 12100 */ 12101 void 12102 lpfc_sli_abts_recover_port(struct lpfc_vport *vport, 12103 struct lpfc_nodelist *ndlp) 12104 { 12105 struct Scsi_Host *shost; 12106 struct lpfc_hba *phba; 12107 unsigned long flags = 0; 12108 12109 shost = lpfc_shost_from_vport(vport); 12110 phba = vport->phba; 12111 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { 12112 lpfc_printf_log(phba, KERN_INFO, 12113 LOG_SLI, "3093 No rport recovery needed. " 12114 "rport in state 0x%x\n", ndlp->nlp_state); 12115 return; 12116 } 12117 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12118 "3094 Start rport recovery on shost id 0x%x " 12119 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " 12120 "flags 0x%x\n", 12121 shost->host_no, ndlp->nlp_DID, 12122 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, 12123 ndlp->nlp_flag); 12124 /* 12125 * The rport is not responding. Remove the FCP-2 flag to prevent 12126 * an ADISC in the follow-up recovery code. 12127 */ 12128 spin_lock_irqsave(&ndlp->lock, flags); 12129 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 12130 ndlp->nlp_flag |= NLP_ISSUE_LOGO; 12131 spin_unlock_irqrestore(&ndlp->lock, flags); 12132 lpfc_unreg_rpi(vport, ndlp); 12133 } 12134 12135 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport) 12136 { 12137 bitmap_zero(vport->vmid_priority_range, LPFC_VMID_MAX_PRIORITY_RANGE); 12138 } 12139 12140 static void 12141 lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max) 12142 { 12143 u32 i; 12144 12145 if ((min > max) || (max > LPFC_VMID_MAX_PRIORITY_RANGE)) 12146 return; 12147 12148 for (i = min; i <= max; i++) 12149 set_bit(i, vport->vmid_priority_range); 12150 } 12151 12152 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid) 12153 { 12154 set_bit(ctcl_vmid, vport->vmid_priority_range); 12155 } 12156 12157 u32 lpfc_vmid_get_cs_ctl(struct lpfc_vport *vport) 12158 { 12159 u32 i; 12160 12161 i = find_first_bit(vport->vmid_priority_range, 12162 LPFC_VMID_MAX_PRIORITY_RANGE); 12163 12164 if (i == LPFC_VMID_MAX_PRIORITY_RANGE) 12165 return 0; 12166 12167 clear_bit(i, vport->vmid_priority_range); 12168 return i; 12169 } 12170 12171 #define MAX_PRIORITY_DESC 255 12172 12173 static void 12174 lpfc_cmpl_els_qfpa(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 12175 struct lpfc_iocbq *rspiocb) 12176 { 12177 struct lpfc_vport *vport = cmdiocb->vport; 12178 struct priority_range_desc *desc; 12179 struct lpfc_dmabuf *prsp = NULL; 12180 struct lpfc_vmid_priority_range *vmid_range = NULL; 12181 u32 *data; 12182 struct lpfc_dmabuf *dmabuf = cmdiocb->cmd_dmabuf; 12183 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 12184 u32 ulp_word4 = get_job_word4(phba, rspiocb); 12185 u8 *pcmd, max_desc; 12186 u32 len, i; 12187 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 12188 12189 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); 12190 if (!prsp) 12191 goto out; 12192 12193 pcmd = prsp->virt; 12194 data = (u32 *)pcmd; 12195 if (data[0] == ELS_CMD_LS_RJT) { 12196 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12197 "3277 QFPA LS_RJT x%x x%x\n", 12198 data[0], data[1]); 12199 goto out; 12200 } 12201 if (ulp_status) { 12202 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 12203 "6529 QFPA failed with status x%x x%x\n", 12204 ulp_status, ulp_word4); 12205 goto out; 12206 } 12207 12208 if (!vport->qfpa_res) { 12209 max_desc = FCELSSIZE / sizeof(*vport->qfpa_res); 12210 vport->qfpa_res = kcalloc(max_desc, sizeof(*vport->qfpa_res), 12211 GFP_KERNEL); 12212 if (!vport->qfpa_res) 12213 goto out; 12214 } 12215 12216 len = *((u32 *)(pcmd + 4)); 12217 len = be32_to_cpu(len); 12218 memcpy(vport->qfpa_res, pcmd, len + 8); 12219 len = len / LPFC_PRIORITY_RANGE_DESC_SIZE; 12220 12221 desc = (struct priority_range_desc *)(pcmd + 8); 12222 vmid_range = vport->vmid_priority.vmid_range; 12223 if (!vmid_range) { 12224 vmid_range = kcalloc(MAX_PRIORITY_DESC, sizeof(*vmid_range), 12225 GFP_KERNEL); 12226 if (!vmid_range) { 12227 kfree(vport->qfpa_res); 12228 goto out; 12229 } 12230 vport->vmid_priority.vmid_range = vmid_range; 12231 } 12232 vport->vmid_priority.num_descriptors = len; 12233 12234 for (i = 0; i < len; i++, vmid_range++, desc++) { 12235 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, 12236 "6539 vmid values low=%d, high=%d, qos=%d, " 12237 "local ve id=%d\n", desc->lo_range, 12238 desc->hi_range, desc->qos_priority, 12239 desc->local_ve_id); 12240 12241 vmid_range->low = desc->lo_range << 1; 12242 if (desc->local_ve_id == QFPA_ODD_ONLY) 12243 vmid_range->low++; 12244 if (desc->qos_priority) 12245 vport->vmid_flag |= LPFC_VMID_QOS_ENABLED; 12246 vmid_range->qos = desc->qos_priority; 12247 12248 vmid_range->high = desc->hi_range << 1; 12249 if ((desc->local_ve_id == QFPA_ODD_ONLY) || 12250 (desc->local_ve_id == QFPA_EVEN_ODD)) 12251 vmid_range->high++; 12252 } 12253 lpfc_init_cs_ctl_bitmap(vport); 12254 for (i = 0; i < vport->vmid_priority.num_descriptors; i++) { 12255 lpfc_vmid_set_cs_ctl_range(vport, 12256 vport->vmid_priority.vmid_range[i].low, 12257 vport->vmid_priority.vmid_range[i].high); 12258 } 12259 12260 vport->vmid_flag |= LPFC_VMID_QFPA_CMPL; 12261 out: 12262 lpfc_els_free_iocb(phba, cmdiocb); 12263 lpfc_nlp_put(ndlp); 12264 } 12265 12266 int lpfc_issue_els_qfpa(struct lpfc_vport *vport) 12267 { 12268 struct lpfc_hba *phba = vport->phba; 12269 struct lpfc_nodelist *ndlp; 12270 struct lpfc_iocbq *elsiocb; 12271 u8 *pcmd; 12272 int ret; 12273 12274 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 12275 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12276 return -ENXIO; 12277 12278 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_QFPA_SIZE, 2, ndlp, 12279 ndlp->nlp_DID, ELS_CMD_QFPA); 12280 if (!elsiocb) 12281 return -ENOMEM; 12282 12283 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 12284 12285 *((u32 *)(pcmd)) = ELS_CMD_QFPA; 12286 pcmd += 4; 12287 12288 elsiocb->cmd_cmpl = lpfc_cmpl_els_qfpa; 12289 12290 elsiocb->ndlp = lpfc_nlp_get(ndlp); 12291 if (!elsiocb->ndlp) { 12292 lpfc_els_free_iocb(vport->phba, elsiocb); 12293 return -ENXIO; 12294 } 12295 12296 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 2); 12297 if (ret != IOCB_SUCCESS) { 12298 lpfc_els_free_iocb(phba, elsiocb); 12299 lpfc_nlp_put(ndlp); 12300 return -EIO; 12301 } 12302 vport->vmid_flag &= ~LPFC_VMID_QOS_ENABLED; 12303 return 0; 12304 } 12305 12306 int 12307 lpfc_vmid_uvem(struct lpfc_vport *vport, 12308 struct lpfc_vmid *vmid, bool instantiated) 12309 { 12310 struct lpfc_vem_id_desc *vem_id_desc; 12311 struct lpfc_nodelist *ndlp; 12312 struct lpfc_iocbq *elsiocb; 12313 struct instantiated_ve_desc *inst_desc; 12314 struct lpfc_vmid_context *vmid_context; 12315 u8 *pcmd; 12316 u32 *len; 12317 int ret = 0; 12318 12319 ndlp = lpfc_findnode_did(vport, Fabric_DID); 12320 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12321 return -ENXIO; 12322 12323 vmid_context = kmalloc(sizeof(*vmid_context), GFP_KERNEL); 12324 if (!vmid_context) 12325 return -ENOMEM; 12326 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_UVEM_SIZE, 2, 12327 ndlp, Fabric_DID, ELS_CMD_UVEM); 12328 if (!elsiocb) 12329 goto out; 12330 12331 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, 12332 "3427 Host vmid %s %d\n", 12333 vmid->host_vmid, instantiated); 12334 vmid_context->vmp = vmid; 12335 vmid_context->nlp = ndlp; 12336 vmid_context->instantiated = instantiated; 12337 elsiocb->vmid_tag.vmid_context = vmid_context; 12338 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt; 12339 12340 if (!memchr_inv(vport->lpfc_vmid_host_uuid, 0, 12341 sizeof(vport->lpfc_vmid_host_uuid))) 12342 memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid, 12343 sizeof(vport->lpfc_vmid_host_uuid)); 12344 12345 *((u32 *)(pcmd)) = ELS_CMD_UVEM; 12346 len = (u32 *)(pcmd + 4); 12347 *len = cpu_to_be32(LPFC_UVEM_SIZE - 8); 12348 12349 vem_id_desc = (struct lpfc_vem_id_desc *)(pcmd + 8); 12350 vem_id_desc->tag = be32_to_cpu(VEM_ID_DESC_TAG); 12351 vem_id_desc->length = be32_to_cpu(LPFC_UVEM_VEM_ID_DESC_SIZE); 12352 memcpy(vem_id_desc->vem_id, vport->lpfc_vmid_host_uuid, 12353 sizeof(vem_id_desc->vem_id)); 12354 12355 inst_desc = (struct instantiated_ve_desc *)(pcmd + 32); 12356 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); 12357 inst_desc->length = be32_to_cpu(LPFC_UVEM_VE_MAP_DESC_SIZE); 12358 memcpy(inst_desc->global_vem_id, vmid->host_vmid, 12359 sizeof(inst_desc->global_vem_id)); 12360 12361 bf_set(lpfc_instantiated_nport_id, inst_desc, vport->fc_myDID); 12362 bf_set(lpfc_instantiated_local_id, inst_desc, 12363 vmid->un.cs_ctl_vmid); 12364 if (instantiated) { 12365 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); 12366 } else { 12367 inst_desc->tag = be32_to_cpu(DEINSTANTIATED_VE_DESC_TAG); 12368 lpfc_vmid_put_cs_ctl(vport, vmid->un.cs_ctl_vmid); 12369 } 12370 inst_desc->word6 = cpu_to_be32(inst_desc->word6); 12371 12372 elsiocb->cmd_cmpl = lpfc_cmpl_els_uvem; 12373 12374 elsiocb->ndlp = lpfc_nlp_get(ndlp); 12375 if (!elsiocb->ndlp) { 12376 lpfc_els_free_iocb(vport->phba, elsiocb); 12377 goto out; 12378 } 12379 12380 ret = lpfc_sli_issue_iocb(vport->phba, LPFC_ELS_RING, elsiocb, 0); 12381 if (ret != IOCB_SUCCESS) { 12382 lpfc_els_free_iocb(vport->phba, elsiocb); 12383 lpfc_nlp_put(ndlp); 12384 goto out; 12385 } 12386 12387 return 0; 12388 out: 12389 kfree(vmid_context); 12390 return -EIO; 12391 } 12392 12393 static void 12394 lpfc_cmpl_els_uvem(struct lpfc_hba *phba, struct lpfc_iocbq *icmdiocb, 12395 struct lpfc_iocbq *rspiocb) 12396 { 12397 struct lpfc_vport *vport = icmdiocb->vport; 12398 struct lpfc_dmabuf *prsp = NULL; 12399 struct lpfc_vmid_context *vmid_context = 12400 icmdiocb->vmid_tag.vmid_context; 12401 struct lpfc_nodelist *ndlp = icmdiocb->ndlp; 12402 u8 *pcmd; 12403 u32 *data; 12404 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 12405 u32 ulp_word4 = get_job_word4(phba, rspiocb); 12406 struct lpfc_dmabuf *dmabuf = icmdiocb->cmd_dmabuf; 12407 struct lpfc_vmid *vmid; 12408 12409 vmid = vmid_context->vmp; 12410 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 12411 ndlp = NULL; 12412 12413 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); 12414 if (!prsp) 12415 goto out; 12416 pcmd = prsp->virt; 12417 data = (u32 *)pcmd; 12418 if (data[0] == ELS_CMD_LS_RJT) { 12419 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12420 "4532 UVEM LS_RJT %x %x\n", data[0], data[1]); 12421 goto out; 12422 } 12423 if (ulp_status) { 12424 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 12425 "4533 UVEM error status %x: %x\n", 12426 ulp_status, ulp_word4); 12427 goto out; 12428 } 12429 spin_lock(&phba->hbalock); 12430 /* Set IN USE flag */ 12431 vport->vmid_flag |= LPFC_VMID_IN_USE; 12432 phba->pport->vmid_flag |= LPFC_VMID_IN_USE; 12433 spin_unlock(&phba->hbalock); 12434 12435 if (vmid_context->instantiated) { 12436 write_lock(&vport->vmid_lock); 12437 vmid->flag |= LPFC_VMID_REGISTERED; 12438 vmid->flag &= ~LPFC_VMID_REQ_REGISTER; 12439 write_unlock(&vport->vmid_lock); 12440 } 12441 12442 out: 12443 kfree(vmid_context); 12444 lpfc_els_free_iocb(phba, icmdiocb); 12445 lpfc_nlp_put(ndlp); 12446 } 12447