1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * * 10 * This program is free software; you can redistribute it and/or * 11 * modify it under the terms of version 2 of the GNU General * 12 * Public License as published by the Free Software Foundation. * 13 * This program is distributed in the hope that it will be useful. * 14 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 15 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 16 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 17 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 18 * TO BE LEGALLY INVALID. See the GNU General Public License for * 19 * more details, a copy of which can be found in the file COPYING * 20 * included with this package. * 21 *******************************************************************/ 22 23 /* 24 * Fibre Channel SCSI LAN Device Driver CT support: FC Generic Services FC-GS 25 */ 26 27 #include <linux/blkdev.h> 28 #include <linux/pci.h> 29 #include <linux/interrupt.h> 30 #include <linux/slab.h> 31 #include <linux/utsname.h> 32 33 #include <scsi/scsi.h> 34 #include <scsi/scsi_device.h> 35 #include <scsi/scsi_host.h> 36 #include <scsi/scsi_transport_fc.h> 37 #include <scsi/fc/fc_fs.h> 38 39 #include "lpfc_hw4.h" 40 #include "lpfc_hw.h" 41 #include "lpfc_sli.h" 42 #include "lpfc_sli4.h" 43 #include "lpfc_nl.h" 44 #include "lpfc_disc.h" 45 #include "lpfc.h" 46 #include "lpfc_scsi.h" 47 #include "lpfc_logmsg.h" 48 #include "lpfc_crtn.h" 49 #include "lpfc_version.h" 50 #include "lpfc_vport.h" 51 #include "lpfc_debugfs.h" 52 53 /* FDMI Port Speed definitions - FC-GS-7 */ 54 #define HBA_PORTSPEED_1GFC 0x00000001 /* 1G FC */ 55 #define HBA_PORTSPEED_2GFC 0x00000002 /* 2G FC */ 56 #define HBA_PORTSPEED_4GFC 0x00000008 /* 4G FC */ 57 #define HBA_PORTSPEED_10GFC 0x00000004 /* 10G FC */ 58 #define HBA_PORTSPEED_8GFC 0x00000010 /* 8G FC */ 59 #define HBA_PORTSPEED_16GFC 0x00000020 /* 16G FC */ 60 #define HBA_PORTSPEED_32GFC 0x00000040 /* 32G FC */ 61 #define HBA_PORTSPEED_20GFC 0x00000080 /* 20G FC */ 62 #define HBA_PORTSPEED_40GFC 0x00000100 /* 40G FC */ 63 #define HBA_PORTSPEED_128GFC 0x00000200 /* 128G FC */ 64 #define HBA_PORTSPEED_64GFC 0x00000400 /* 64G FC */ 65 #define HBA_PORTSPEED_256GFC 0x00000800 /* 256G FC */ 66 #define HBA_PORTSPEED_UNKNOWN 0x00008000 /* Unknown */ 67 #define HBA_PORTSPEED_10GE 0x00010000 /* 10G E */ 68 #define HBA_PORTSPEED_40GE 0x00020000 /* 40G E */ 69 #define HBA_PORTSPEED_100GE 0x00040000 /* 100G E */ 70 #define HBA_PORTSPEED_25GE 0x00080000 /* 25G E */ 71 #define HBA_PORTSPEED_50GE 0x00100000 /* 50G E */ 72 #define HBA_PORTSPEED_400GE 0x00200000 /* 400G E */ 73 74 #define FOURBYTES 4 75 76 77 static char *lpfc_release_version = LPFC_DRIVER_VERSION; 78 static void 79 lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 80 struct lpfc_iocbq *rspiocb); 81 82 static void 83 lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, 84 struct lpfc_dmabuf *mp, uint32_t size) 85 { 86 if (!mp) { 87 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 88 "0146 Ignoring unsolicited CT No HBQ " 89 "status = x%x\n", 90 get_job_ulpstatus(phba, piocbq)); 91 } 92 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 93 "0145 Ignoring unsolicited CT HBQ Size:%d " 94 "status = x%x\n", 95 size, get_job_ulpstatus(phba, piocbq)); 96 } 97 98 static void 99 lpfc_ct_unsol_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, 100 struct lpfc_dmabuf *mp, uint32_t size) 101 { 102 lpfc_ct_ignore_hbq_buffer(phba, piocbq, mp, size); 103 } 104 105 /** 106 * lpfc_ct_unsol_cmpl : Completion callback function for unsol ct commands 107 * @phba : pointer to lpfc hba data structure. 108 * @cmdiocb : pointer to lpfc command iocb data structure. 109 * @rspiocb : pointer to lpfc response iocb data structure. 110 * 111 * This routine is the callback function for issuing unsol ct reject command. 112 * The memory allocated in the reject command path is freed up here. 113 **/ 114 static void 115 lpfc_ct_unsol_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 116 struct lpfc_iocbq *rspiocb) 117 { 118 struct lpfc_nodelist *ndlp; 119 struct lpfc_dmabuf *mp, *bmp; 120 121 ndlp = cmdiocb->ndlp; 122 if (ndlp) 123 lpfc_nlp_put(ndlp); 124 125 mp = cmdiocb->rsp_dmabuf; 126 bmp = cmdiocb->bpl_dmabuf; 127 if (mp) { 128 lpfc_mbuf_free(phba, mp->virt, mp->phys); 129 kfree(mp); 130 cmdiocb->rsp_dmabuf = NULL; 131 } 132 133 if (bmp) { 134 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 135 kfree(bmp); 136 cmdiocb->bpl_dmabuf = NULL; 137 } 138 139 lpfc_sli_release_iocbq(phba, cmdiocb); 140 } 141 142 /** 143 * lpfc_ct_reject_event - Issue reject for unhandled CT MIB commands 144 * @ndlp: pointer to a node-list data structure. 145 * @ct_req: pointer to the CT request data structure. 146 * @ulp_context: context of received UNSOL CT command 147 * @ox_id: ox_id of the UNSOL CT command 148 * 149 * This routine is invoked by the lpfc_ct_handle_mibreq routine for sending 150 * a reject response. Reject response is sent for the unhandled commands. 151 **/ 152 static void 153 lpfc_ct_reject_event(struct lpfc_nodelist *ndlp, 154 struct lpfc_sli_ct_request *ct_req, 155 u16 ulp_context, u16 ox_id) 156 { 157 struct lpfc_vport *vport = ndlp->vport; 158 struct lpfc_hba *phba = vport->phba; 159 struct lpfc_sli_ct_request *ct_rsp; 160 struct lpfc_iocbq *cmdiocbq = NULL; 161 struct lpfc_dmabuf *bmp = NULL; 162 struct lpfc_dmabuf *mp = NULL; 163 struct ulp_bde64 *bpl; 164 u8 rc = 0; 165 u32 tmo; 166 167 /* fill in BDEs for command */ 168 mp = kmalloc(sizeof(*mp), GFP_KERNEL); 169 if (!mp) { 170 rc = 1; 171 goto ct_exit; 172 } 173 174 mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp->phys); 175 if (!mp->virt) { 176 rc = 2; 177 goto ct_free_mp; 178 } 179 180 /* Allocate buffer for Buffer ptr list */ 181 bmp = kmalloc(sizeof(*bmp), GFP_KERNEL); 182 if (!bmp) { 183 rc = 3; 184 goto ct_free_mpvirt; 185 } 186 187 bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &bmp->phys); 188 if (!bmp->virt) { 189 rc = 4; 190 goto ct_free_bmp; 191 } 192 193 INIT_LIST_HEAD(&mp->list); 194 INIT_LIST_HEAD(&bmp->list); 195 196 bpl = (struct ulp_bde64 *)bmp->virt; 197 memset(bpl, 0, sizeof(struct ulp_bde64)); 198 bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys)); 199 bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys)); 200 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 201 bpl->tus.f.bdeSize = (LPFC_CT_PREAMBLE - 4); 202 bpl->tus.w = le32_to_cpu(bpl->tus.w); 203 204 ct_rsp = (struct lpfc_sli_ct_request *)mp->virt; 205 memset(ct_rsp, 0, sizeof(struct lpfc_sli_ct_request)); 206 207 ct_rsp->RevisionId.bits.Revision = SLI_CT_REVISION; 208 ct_rsp->RevisionId.bits.InId = 0; 209 ct_rsp->FsType = ct_req->FsType; 210 ct_rsp->FsSubType = ct_req->FsSubType; 211 ct_rsp->CommandResponse.bits.Size = 0; 212 ct_rsp->CommandResponse.bits.CmdRsp = 213 cpu_to_be16(SLI_CT_RESPONSE_FS_RJT); 214 ct_rsp->ReasonCode = SLI_CT_REQ_NOT_SUPPORTED; 215 ct_rsp->Explanation = SLI_CT_NO_ADDITIONAL_EXPL; 216 217 cmdiocbq = lpfc_sli_get_iocbq(phba); 218 if (!cmdiocbq) { 219 rc = 5; 220 goto ct_free_bmpvirt; 221 } 222 223 if (phba->sli_rev == LPFC_SLI_REV4) { 224 lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, bmp, 225 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi], 226 ox_id, 1, FC_RCTL_DD_SOL_CTL, 1, 227 CMD_XMIT_SEQUENCE64_WQE); 228 } else { 229 lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, bmp, 0, ulp_context, 1, 230 FC_RCTL_DD_SOL_CTL, 1, 231 CMD_XMIT_SEQUENCE64_CX); 232 } 233 234 /* Save for completion so we can release these resources */ 235 cmdiocbq->rsp_dmabuf = mp; 236 cmdiocbq->bpl_dmabuf = bmp; 237 cmdiocbq->cmd_cmpl = lpfc_ct_unsol_cmpl; 238 tmo = (3 * phba->fc_ratov); 239 240 cmdiocbq->retry = 0; 241 cmdiocbq->vport = vport; 242 cmdiocbq->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT; 243 244 cmdiocbq->ndlp = lpfc_nlp_get(ndlp); 245 if (!cmdiocbq->ndlp) 246 goto ct_no_ndlp; 247 248 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 249 if (rc) { 250 lpfc_nlp_put(ndlp); 251 goto ct_no_ndlp; 252 } 253 return; 254 255 ct_no_ndlp: 256 rc = 6; 257 lpfc_sli_release_iocbq(phba, cmdiocbq); 258 ct_free_bmpvirt: 259 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 260 ct_free_bmp: 261 kfree(bmp); 262 ct_free_mpvirt: 263 lpfc_mbuf_free(phba, mp->virt, mp->phys); 264 ct_free_mp: 265 kfree(mp); 266 ct_exit: 267 lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, 268 "6440 Unsol CT: Rsp err %d Data: x%lx\n", 269 rc, vport->fc_flag); 270 } 271 272 /** 273 * lpfc_ct_handle_mibreq - Process an unsolicited CT MIB request data buffer 274 * @phba: pointer to lpfc hba data structure. 275 * @ctiocbq: pointer to lpfc CT command iocb data structure. 276 * 277 * This routine is used for processing the IOCB associated with a unsolicited 278 * CT MIB request. It first determines whether there is an existing ndlp that 279 * matches the DID from the unsolicited IOCB. If not, it will return. 280 **/ 281 static void 282 lpfc_ct_handle_mibreq(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocbq) 283 { 284 struct lpfc_sli_ct_request *ct_req; 285 struct lpfc_nodelist *ndlp = NULL; 286 struct lpfc_vport *vport = ctiocbq->vport; 287 u32 ulp_status = get_job_ulpstatus(phba, ctiocbq); 288 u32 ulp_word4 = get_job_word4(phba, ctiocbq); 289 u32 did; 290 u16 mi_cmd; 291 292 did = bf_get(els_rsp64_sid, &ctiocbq->wqe.xmit_els_rsp); 293 if (ulp_status) { 294 lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, 295 "6438 Unsol CT: status:x%x/x%x did : x%x\n", 296 ulp_status, ulp_word4, did); 297 return; 298 } 299 300 /* Ignore traffic received during vport shutdown */ 301 if (test_bit(FC_UNLOADING, &vport->load_flag)) 302 return; 303 304 ndlp = lpfc_findnode_did(vport, did); 305 if (!ndlp) { 306 lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, 307 "6439 Unsol CT: NDLP Not Found for DID : x%x", 308 did); 309 return; 310 } 311 312 ct_req = (struct lpfc_sli_ct_request *)ctiocbq->cmd_dmabuf->virt; 313 314 mi_cmd = be16_to_cpu(ct_req->CommandResponse.bits.CmdRsp); 315 lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, 316 "6442 MI Cmd: x%x Not Supported\n", mi_cmd); 317 lpfc_ct_reject_event(ndlp, ct_req, 318 bf_get(wqe_ctxt_tag, 319 &ctiocbq->wqe.xmit_els_rsp.wqe_com), 320 bf_get(wqe_rcvoxid, 321 &ctiocbq->wqe.xmit_els_rsp.wqe_com)); 322 } 323 324 /** 325 * lpfc_ct_unsol_event - Process an unsolicited event from a ct sli ring 326 * @phba: pointer to lpfc hba data structure. 327 * @pring: pointer to a SLI ring. 328 * @ctiocbq: pointer to lpfc ct iocb data structure. 329 * 330 * This routine is used to process an unsolicited event received from a SLI 331 * (Service Level Interface) ring. The actual processing of the data buffer 332 * associated with the unsolicited event is done by invoking appropriate routine 333 * after properly set up the iocb buffer from the SLI ring on which the 334 * unsolicited event was received. 335 **/ 336 void 337 lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 338 struct lpfc_iocbq *ctiocbq) 339 { 340 struct lpfc_dmabuf *mp = NULL; 341 IOCB_t *icmd = &ctiocbq->iocb; 342 int i; 343 struct lpfc_iocbq *iocbq; 344 struct lpfc_iocbq *iocb; 345 dma_addr_t dma_addr; 346 uint32_t size; 347 struct list_head head; 348 struct lpfc_sli_ct_request *ct_req; 349 struct lpfc_dmabuf *bdeBuf1 = ctiocbq->cmd_dmabuf; 350 struct lpfc_dmabuf *bdeBuf2 = ctiocbq->bpl_dmabuf; 351 u32 status, parameter, bde_count = 0; 352 struct lpfc_wcqe_complete *wcqe_cmpl = NULL; 353 354 ctiocbq->cmd_dmabuf = NULL; 355 ctiocbq->rsp_dmabuf = NULL; 356 ctiocbq->bpl_dmabuf = NULL; 357 358 wcqe_cmpl = &ctiocbq->wcqe_cmpl; 359 status = get_job_ulpstatus(phba, ctiocbq); 360 parameter = get_job_word4(phba, ctiocbq); 361 if (phba->sli_rev == LPFC_SLI_REV4) 362 bde_count = wcqe_cmpl->word3; 363 else 364 bde_count = icmd->ulpBdeCount; 365 366 if (unlikely(status == IOSTAT_NEED_BUFFER)) { 367 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 368 } else if ((status == IOSTAT_LOCAL_REJECT) && 369 ((parameter & IOERR_PARAM_MASK) == 370 IOERR_RCV_BUFFER_WAITING)) { 371 /* Not enough posted buffers; Try posting more buffers */ 372 phba->fc_stat.NoRcvBuf++; 373 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 374 lpfc_sli3_post_buffer(phba, pring, 2); 375 return; 376 } 377 378 /* If there are no BDEs associated 379 * with this IOCB, there is nothing to do. 380 */ 381 if (bde_count == 0) 382 return; 383 384 ctiocbq->cmd_dmabuf = bdeBuf1; 385 if (bde_count == 2) 386 ctiocbq->bpl_dmabuf = bdeBuf2; 387 388 ct_req = (struct lpfc_sli_ct_request *)ctiocbq->cmd_dmabuf->virt; 389 390 if (ct_req->FsType == SLI_CT_MANAGEMENT_SERVICE && 391 ct_req->FsSubType == SLI_CT_MIB_Subtypes) { 392 lpfc_ct_handle_mibreq(phba, ctiocbq); 393 } else { 394 if (!lpfc_bsg_ct_unsol_event(phba, pring, ctiocbq)) 395 return; 396 } 397 398 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 399 INIT_LIST_HEAD(&head); 400 list_add_tail(&head, &ctiocbq->list); 401 list_for_each_entry(iocb, &head, list) { 402 if (phba->sli_rev == LPFC_SLI_REV4) 403 bde_count = iocb->wcqe_cmpl.word3; 404 else 405 bde_count = iocb->iocb.ulpBdeCount; 406 407 if (!bde_count) 408 continue; 409 bdeBuf1 = iocb->cmd_dmabuf; 410 iocb->cmd_dmabuf = NULL; 411 if (phba->sli_rev == LPFC_SLI_REV4) 412 size = iocb->wqe.gen_req.bde.tus.f.bdeSize; 413 else 414 size = iocb->iocb.un.cont64[0].tus.f.bdeSize; 415 lpfc_ct_unsol_buffer(phba, ctiocbq, bdeBuf1, size); 416 lpfc_in_buf_free(phba, bdeBuf1); 417 if (bde_count == 2) { 418 bdeBuf2 = iocb->bpl_dmabuf; 419 iocb->bpl_dmabuf = NULL; 420 if (phba->sli_rev == LPFC_SLI_REV4) 421 size = iocb->unsol_rcv_len; 422 else 423 size = iocb->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize; 424 lpfc_ct_unsol_buffer(phba, ctiocbq, bdeBuf2, 425 size); 426 lpfc_in_buf_free(phba, bdeBuf2); 427 } 428 } 429 list_del(&head); 430 } else { 431 INIT_LIST_HEAD(&head); 432 list_add_tail(&head, &ctiocbq->list); 433 list_for_each_entry(iocbq, &head, list) { 434 icmd = &iocbq->iocb; 435 if (icmd->ulpBdeCount == 0) 436 lpfc_ct_unsol_buffer(phba, iocbq, NULL, 0); 437 for (i = 0; i < icmd->ulpBdeCount; i++) { 438 dma_addr = getPaddr(icmd->un.cont64[i].addrHigh, 439 icmd->un.cont64[i].addrLow); 440 mp = lpfc_sli_ringpostbuf_get(phba, pring, 441 dma_addr); 442 size = icmd->un.cont64[i].tus.f.bdeSize; 443 lpfc_ct_unsol_buffer(phba, iocbq, mp, size); 444 lpfc_in_buf_free(phba, mp); 445 } 446 lpfc_sli3_post_buffer(phba, pring, i); 447 } 448 list_del(&head); 449 } 450 } 451 452 /** 453 * lpfc_ct_handle_unsol_abort - ct upper level protocol abort handler 454 * @phba: Pointer to HBA context object. 455 * @dmabuf: pointer to a dmabuf that describes the FC sequence 456 * 457 * This function serves as the upper level protocol abort handler for CT 458 * protocol. 459 * 460 * Return 1 if abort has been handled, 0 otherwise. 461 **/ 462 int 463 lpfc_ct_handle_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf) 464 { 465 int handled; 466 467 /* CT upper level goes through BSG */ 468 handled = lpfc_bsg_ct_unsol_abort(phba, dmabuf); 469 470 return handled; 471 } 472 473 static void 474 lpfc_free_ct_rsp(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist) 475 { 476 struct lpfc_dmabuf *mlast, *next_mlast; 477 478 list_for_each_entry_safe(mlast, next_mlast, &mlist->list, list) { 479 list_del(&mlast->list); 480 lpfc_mbuf_free(phba, mlast->virt, mlast->phys); 481 kfree(mlast); 482 } 483 lpfc_mbuf_free(phba, mlist->virt, mlist->phys); 484 kfree(mlist); 485 return; 486 } 487 488 static struct lpfc_dmabuf * 489 lpfc_alloc_ct_rsp(struct lpfc_hba *phba, __be16 cmdcode, struct ulp_bde64 *bpl, 490 uint32_t size, int *entries) 491 { 492 struct lpfc_dmabuf *mlist = NULL; 493 struct lpfc_dmabuf *mp; 494 int cnt, i = 0; 495 496 /* We get chunks of FCELSSIZE */ 497 cnt = size > FCELSSIZE ? FCELSSIZE: size; 498 499 while (size) { 500 /* Allocate buffer for rsp payload */ 501 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 502 if (!mp) { 503 if (mlist) 504 lpfc_free_ct_rsp(phba, mlist); 505 return NULL; 506 } 507 508 INIT_LIST_HEAD(&mp->list); 509 510 if (be16_to_cpu(cmdcode) == SLI_CTNS_GID_FT || 511 be16_to_cpu(cmdcode) == SLI_CTNS_GFF_ID) 512 mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys)); 513 else 514 mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys)); 515 516 if (!mp->virt) { 517 kfree(mp); 518 if (mlist) 519 lpfc_free_ct_rsp(phba, mlist); 520 return NULL; 521 } 522 523 /* Queue it to a linked list */ 524 if (!mlist) 525 mlist = mp; 526 else 527 list_add_tail(&mp->list, &mlist->list); 528 529 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 530 /* build buffer ptr list for IOCB */ 531 bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) ); 532 bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) ); 533 bpl->tus.f.bdeSize = (uint16_t) cnt; 534 bpl->tus.w = le32_to_cpu(bpl->tus.w); 535 bpl++; 536 537 i++; 538 size -= cnt; 539 } 540 541 *entries = i; 542 return mlist; 543 } 544 545 int 546 lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb) 547 { 548 struct lpfc_dmabuf *buf_ptr; 549 550 /* IOCBQ job structure gets cleaned during release. Just release 551 * the dma buffers here. 552 */ 553 if (ctiocb->cmd_dmabuf) { 554 buf_ptr = ctiocb->cmd_dmabuf; 555 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 556 kfree(buf_ptr); 557 ctiocb->cmd_dmabuf = NULL; 558 } 559 if (ctiocb->rsp_dmabuf) { 560 lpfc_free_ct_rsp(phba, ctiocb->rsp_dmabuf); 561 ctiocb->rsp_dmabuf = NULL; 562 } 563 564 if (ctiocb->bpl_dmabuf) { 565 buf_ptr = ctiocb->bpl_dmabuf; 566 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 567 kfree(buf_ptr); 568 ctiocb->bpl_dmabuf = NULL; 569 } 570 lpfc_sli_release_iocbq(phba, ctiocb); 571 return 0; 572 } 573 574 /* 575 * lpfc_gen_req - Build and issue a GEN_REQUEST command to the SLI Layer 576 * @vport: pointer to a host virtual N_Port data structure. 577 * @bmp: Pointer to BPL for SLI command 578 * @inp: Pointer to data buffer for response data. 579 * @outp: Pointer to data buffer that hold the CT command. 580 * @cmpl: completion routine to call when command completes 581 * @ndlp: Destination NPort nodelist entry 582 * 583 * This function as the final part for issuing a CT command. 584 */ 585 static int 586 lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, 587 struct lpfc_dmabuf *inp, struct lpfc_dmabuf *outp, 588 void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *, 589 struct lpfc_iocbq *), 590 struct lpfc_nodelist *ndlp, uint32_t event_tag, uint32_t num_entry, 591 uint32_t tmo, uint8_t retry) 592 { 593 struct lpfc_hba *phba = vport->phba; 594 struct lpfc_iocbq *geniocb; 595 int rc; 596 u16 ulp_context; 597 598 /* Allocate buffer for command iocb */ 599 geniocb = lpfc_sli_get_iocbq(phba); 600 601 if (geniocb == NULL) 602 return 1; 603 604 /* Update the num_entry bde count */ 605 geniocb->num_bdes = num_entry; 606 607 geniocb->bpl_dmabuf = bmp; 608 609 /* Save for completion so we can release these resources */ 610 geniocb->cmd_dmabuf = inp; 611 geniocb->rsp_dmabuf = outp; 612 613 geniocb->event_tag = event_tag; 614 615 if (!tmo) { 616 /* FC spec states we need 3 * ratov for CT requests */ 617 tmo = (3 * phba->fc_ratov); 618 } 619 620 if (phba->sli_rev == LPFC_SLI_REV4) 621 ulp_context = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 622 else 623 ulp_context = ndlp->nlp_rpi; 624 625 lpfc_sli_prep_gen_req(phba, geniocb, bmp, ulp_context, num_entry, tmo); 626 627 /* Issue GEN REQ IOCB for NPORT <did> */ 628 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 629 "0119 Issue GEN REQ IOCB to NPORT x%x " 630 "Data: x%x x%x\n", 631 ndlp->nlp_DID, geniocb->iotag, 632 vport->port_state); 633 geniocb->cmd_cmpl = cmpl; 634 geniocb->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT; 635 geniocb->vport = vport; 636 geniocb->retry = retry; 637 geniocb->ndlp = lpfc_nlp_get(ndlp); 638 if (!geniocb->ndlp) 639 goto out; 640 641 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0); 642 if (rc == IOCB_ERROR) { 643 lpfc_nlp_put(ndlp); 644 goto out; 645 } 646 647 return 0; 648 out: 649 lpfc_sli_release_iocbq(phba, geniocb); 650 return 1; 651 } 652 653 /* 654 * lpfc_ct_cmd - Build and issue a CT command 655 * @vport: pointer to a host virtual N_Port data structure. 656 * @inmp: Pointer to data buffer for response data. 657 * @bmp: Pointer to BPL for SLI command 658 * @ndlp: Destination NPort nodelist entry 659 * @cmpl: completion routine to call when command completes 660 * 661 * This function is called for issuing a CT command. 662 */ 663 static int 664 lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp, 665 struct lpfc_dmabuf *bmp, struct lpfc_nodelist *ndlp, 666 void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, 667 struct lpfc_iocbq *), 668 uint32_t rsp_size, uint8_t retry) 669 { 670 struct lpfc_hba *phba = vport->phba; 671 struct ulp_bde64 *bpl = (struct ulp_bde64 *) bmp->virt; 672 struct lpfc_dmabuf *outmp; 673 int cnt = 0, status; 674 __be16 cmdcode = ((struct lpfc_sli_ct_request *)inmp->virt)-> 675 CommandResponse.bits.CmdRsp; 676 677 bpl++; /* Skip past ct request */ 678 679 /* Put buffer(s) for ct rsp in bpl */ 680 outmp = lpfc_alloc_ct_rsp(phba, cmdcode, bpl, rsp_size, &cnt); 681 if (!outmp) 682 return -ENOMEM; 683 /* 684 * Form the CT IOCB. The total number of BDEs in this IOCB 685 * is the single command plus response count from 686 * lpfc_alloc_ct_rsp. 687 */ 688 cnt += 1; 689 status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, 690 phba->fc_eventTag, cnt, 0, retry); 691 if (status) { 692 lpfc_free_ct_rsp(phba, outmp); 693 return -ENOMEM; 694 } 695 return 0; 696 } 697 698 struct lpfc_vport * 699 lpfc_find_vport_by_did(struct lpfc_hba *phba, uint32_t did) { 700 struct lpfc_vport *vport_curr; 701 unsigned long flags; 702 703 spin_lock_irqsave(&phba->port_list_lock, flags); 704 list_for_each_entry(vport_curr, &phba->port_list, listentry) { 705 if ((vport_curr->fc_myDID) && (vport_curr->fc_myDID == did)) { 706 spin_unlock_irqrestore(&phba->port_list_lock, flags); 707 return vport_curr; 708 } 709 } 710 spin_unlock_irqrestore(&phba->port_list_lock, flags); 711 return NULL; 712 } 713 714 static void 715 lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) 716 { 717 struct lpfc_nodelist *ndlp; 718 719 if ((vport->port_type != LPFC_NPIV_PORT) || 720 !(vport->ct_flags & FC_CT_RFF_ID) || !vport->cfg_restrict_login) { 721 722 ndlp = lpfc_setup_disc_node(vport, Did); 723 724 if (ndlp) { 725 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, 726 "Parse GID_FTrsp: did:x%x flg:x%lx x%x", 727 Did, ndlp->nlp_flag, vport->fc_flag); 728 729 /* By default, the driver expects to support FCP FC4 */ 730 if (fc4_type == FC_TYPE_FCP) 731 ndlp->nlp_fc4_type |= NLP_FC4_FCP; 732 733 if (fc4_type == FC_TYPE_NVME) 734 ndlp->nlp_fc4_type |= NLP_FC4_NVME; 735 736 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 737 "0238 Process x%06x NameServer Rsp " 738 "Data: x%lx x%x x%x x%lx x%x\n", Did, 739 ndlp->nlp_flag, ndlp->nlp_fc4_type, 740 ndlp->nlp_state, vport->fc_flag, 741 vport->fc_rscn_id_cnt); 742 743 /* if ndlp needs to be discovered and prior 744 * state of ndlp hit devloss, change state to 745 * allow rediscovery. 746 */ 747 if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag) && 748 ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 749 lpfc_nlp_set_state(vport, ndlp, 750 NLP_STE_NPR_NODE); 751 } 752 } else { 753 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, 754 "Skip1 GID_FTrsp: did:x%x flg:x%lx cnt:%d", 755 Did, vport->fc_flag, vport->fc_rscn_id_cnt); 756 757 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 758 "0239 Skip x%06x NameServer Rsp " 759 "Data: x%lx x%x x%px\n", 760 Did, vport->fc_flag, 761 vport->fc_rscn_id_cnt, ndlp); 762 } 763 } else { 764 if (!test_bit(FC_RSCN_MODE, &vport->fc_flag) || 765 lpfc_rscn_payload_check(vport, Did)) { 766 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, 767 "Query GID_FTrsp: did:x%x flg:x%lx cnt:%d", 768 Did, vport->fc_flag, vport->fc_rscn_id_cnt); 769 770 /* 771 * This NPortID was previously a FCP/NVMe target, 772 * Don't even bother to send GFF_ID. 773 */ 774 ndlp = lpfc_findnode_did(vport, Did); 775 if (ndlp && 776 (ndlp->nlp_type & 777 (NLP_FCP_TARGET | NLP_NVME_TARGET))) { 778 if (fc4_type == FC_TYPE_FCP) 779 ndlp->nlp_fc4_type |= NLP_FC4_FCP; 780 if (fc4_type == FC_TYPE_NVME) 781 ndlp->nlp_fc4_type |= NLP_FC4_NVME; 782 lpfc_setup_disc_node(vport, Did); 783 } else if (lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID, 784 0, Did) == 0) 785 vport->num_disc_nodes++; 786 else 787 lpfc_setup_disc_node(vport, Did); 788 } else { 789 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, 790 "Skip2 GID_FTrsp: did:x%x flg:x%lx cnt:%d", 791 Did, vport->fc_flag, vport->fc_rscn_id_cnt); 792 793 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 794 "0245 Skip x%06x NameServer Rsp " 795 "Data: x%lx x%x\n", Did, 796 vport->fc_flag, 797 vport->fc_rscn_id_cnt); 798 } 799 } 800 } 801 802 static void 803 lpfc_ns_rsp_audit_did(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) 804 { 805 struct lpfc_hba *phba = vport->phba; 806 struct lpfc_nodelist *ndlp = NULL; 807 char *str; 808 809 if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) 810 str = "GID_FT"; 811 else 812 str = "GID_PT"; 813 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 814 "6430 Process %s rsp for %08x type %x %s %s\n", 815 str, Did, fc4_type, 816 (fc4_type == FC_TYPE_FCP) ? "FCP" : " ", 817 (fc4_type == FC_TYPE_NVME) ? "NVME" : " "); 818 /* 819 * To conserve rpi's, filter out addresses for other 820 * vports on the same physical HBAs. 821 */ 822 if (Did != vport->fc_myDID && 823 (!lpfc_find_vport_by_did(phba, Did) || 824 vport->cfg_peer_port_login)) { 825 if (!phba->nvmet_support) { 826 /* FCPI/NVMEI path. Process Did */ 827 lpfc_prep_node_fc4type(vport, Did, fc4_type); 828 return; 829 } 830 /* NVMET path. NVMET only cares about NVMEI nodes. */ 831 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 832 if (ndlp->nlp_type != NLP_NVME_INITIATOR || 833 ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 834 continue; 835 if (ndlp->nlp_DID == Did) 836 clear_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag); 837 else 838 set_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag); 839 } 840 } 841 } 842 843 static int 844 lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type, 845 uint32_t Size) 846 { 847 struct lpfc_sli_ct_request *Response = 848 (struct lpfc_sli_ct_request *) mp->virt; 849 struct lpfc_dmabuf *mlast, *next_mp; 850 uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType; 851 uint32_t Did, CTentry; 852 int Cnt; 853 struct list_head head; 854 struct lpfc_nodelist *ndlp = NULL; 855 856 lpfc_set_disctmo(vport); 857 vport->num_disc_nodes = 0; 858 vport->fc_ns_retry = 0; 859 860 861 list_add_tail(&head, &mp->list); 862 list_for_each_entry_safe(mp, next_mp, &head, list) { 863 mlast = mp; 864 865 Cnt = Size > FCELSSIZE ? FCELSSIZE : Size; 866 867 Size -= Cnt; 868 869 if (!ctptr) { 870 ctptr = (uint32_t *) mlast->virt; 871 } else 872 Cnt -= 16; /* subtract length of CT header */ 873 874 /* Loop through entire NameServer list of DIDs */ 875 while (Cnt >= sizeof(uint32_t)) { 876 /* Get next DID from NameServer List */ 877 CTentry = *ctptr++; 878 Did = ((be32_to_cpu(CTentry)) & Mask_DID); 879 lpfc_ns_rsp_audit_did(vport, Did, fc4_type); 880 if (CTentry & (cpu_to_be32(SLI_CT_LAST_ENTRY))) 881 goto nsout1; 882 883 Cnt -= sizeof(uint32_t); 884 } 885 ctptr = NULL; 886 887 } 888 889 /* All GID_FT entries processed. If the driver is running in 890 * in target mode, put impacted nodes into recovery and drop 891 * the RPI to flush outstanding IO. 892 */ 893 if (vport->phba->nvmet_support) { 894 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 895 if (!test_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag)) 896 continue; 897 lpfc_disc_state_machine(vport, ndlp, NULL, 898 NLP_EVT_DEVICE_RECOVERY); 899 clear_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag); 900 } 901 } 902 903 nsout1: 904 list_del(&head); 905 return 0; 906 } 907 908 static void 909 lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 910 struct lpfc_iocbq *rspiocb) 911 { 912 struct lpfc_vport *vport = cmdiocb->vport; 913 struct lpfc_dmabuf *outp; 914 struct lpfc_dmabuf *inp; 915 struct lpfc_sli_ct_request *CTrsp; 916 struct lpfc_sli_ct_request *CTreq; 917 struct lpfc_nodelist *ndlp; 918 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 919 u32 ulp_word4 = get_job_word4(phba, rspiocb); 920 int rc, type; 921 922 /* First save ndlp, before we overwrite it */ 923 ndlp = cmdiocb->ndlp; 924 925 /* we pass cmdiocb to state machine which needs rspiocb as well */ 926 cmdiocb->rsp_iocb = rspiocb; 927 inp = cmdiocb->cmd_dmabuf; 928 outp = cmdiocb->rsp_dmabuf; 929 930 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, 931 "GID_FT cmpl: status:x%x/x%x rtry:%d", 932 ulp_status, ulp_word4, vport->fc_ns_retry); 933 934 /* Ignore response if link flipped after this request was made */ 935 if (cmdiocb->event_tag != phba->fc_eventTag) { 936 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 937 "9043 Event tag mismatch. Ignoring NS rsp\n"); 938 goto out; 939 } 940 941 /* Skip processing response on pport if unloading */ 942 if (vport == phba->pport && test_bit(FC_UNLOADING, &vport->load_flag)) { 943 if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) 944 lpfc_els_flush_rscn(vport); 945 goto out; 946 } 947 948 if (lpfc_els_chk_latt(vport)) { 949 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 950 "0216 Link event during NS query\n"); 951 if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) 952 lpfc_els_flush_rscn(vport); 953 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 954 goto out; 955 } 956 if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { 957 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 958 "0226 NS query failed due to link event: " 959 "ulp_status x%x ulp_word4 x%x fc_flag x%lx " 960 "port_state x%x gidft_inp x%x\n", 961 ulp_status, ulp_word4, vport->fc_flag, 962 vport->port_state, vport->gidft_inp); 963 if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) 964 lpfc_els_flush_rscn(vport); 965 if (vport->gidft_inp) 966 vport->gidft_inp--; 967 goto out; 968 } 969 970 if (test_and_clear_bit(FC_RSCN_DEFERRED, &vport->fc_flag)) { 971 /* This is a GID_FT completing so the gidft_inp counter was 972 * incremented before the GID_FT was issued to the wire. 973 */ 974 if (vport->gidft_inp) 975 vport->gidft_inp--; 976 977 /* 978 * Skip processing the NS response 979 * Re-issue the NS cmd 980 */ 981 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 982 "0151 Process Deferred RSCN Data: x%lx x%x\n", 983 vport->fc_flag, vport->fc_rscn_id_cnt); 984 lpfc_els_handle_rscn(vport); 985 986 goto out; 987 } 988 989 if (ulp_status) { 990 /* Check for retry */ 991 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { 992 if (ulp_status != IOSTAT_LOCAL_REJECT || 993 (ulp_word4 & IOERR_PARAM_MASK) != 994 IOERR_NO_RESOURCES) 995 vport->fc_ns_retry++; 996 997 type = lpfc_get_gidft_type(vport, cmdiocb); 998 if (type == 0) 999 goto out; 1000 1001 /* CT command is being retried */ 1002 rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 1003 vport->fc_ns_retry, type); 1004 if (rc == 0) 1005 goto out; 1006 else { /* Unable to send NS cmd */ 1007 if (vport->gidft_inp) 1008 vport->gidft_inp--; 1009 } 1010 } 1011 if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) 1012 lpfc_els_flush_rscn(vport); 1013 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 1014 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1015 "0257 GID_FT Query error: 0x%x 0x%x\n", 1016 ulp_status, vport->fc_ns_retry); 1017 } else { 1018 /* Good status, continue checking */ 1019 CTreq = (struct lpfc_sli_ct_request *) inp->virt; 1020 CTrsp = (struct lpfc_sli_ct_request *) outp->virt; 1021 if (CTrsp->CommandResponse.bits.CmdRsp == 1022 cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) { 1023 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1024 "0208 NameServer Rsp Data: x%lx x%x " 1025 "x%x x%x sz x%x\n", 1026 vport->fc_flag, 1027 CTreq->un.gid.Fc4Type, 1028 vport->num_disc_nodes, 1029 vport->gidft_inp, 1030 get_job_data_placed(phba, rspiocb)); 1031 1032 lpfc_ns_rsp(vport, 1033 outp, 1034 CTreq->un.gid.Fc4Type, 1035 get_job_data_placed(phba, rspiocb)); 1036 } else if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == 1037 SLI_CT_RESPONSE_FS_RJT) { 1038 /* NameServer Rsp Error */ 1039 if ((CTrsp->ReasonCode == SLI_CT_UNABLE_TO_PERFORM_REQ) 1040 && (CTrsp->Explanation == SLI_CT_NO_FC4_TYPES)) { 1041 lpfc_printf_vlog(vport, KERN_INFO, 1042 LOG_DISCOVERY, 1043 "0269 No NameServer Entries " 1044 "Data: x%x x%x x%x x%lx\n", 1045 be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), 1046 (uint32_t) CTrsp->ReasonCode, 1047 (uint32_t) CTrsp->Explanation, 1048 vport->fc_flag); 1049 1050 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, 1051 "GID_FT no entry cmd:x%x rsn:x%x exp:x%x", 1052 be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), 1053 (uint32_t) CTrsp->ReasonCode, 1054 (uint32_t) CTrsp->Explanation); 1055 } else { 1056 lpfc_printf_vlog(vport, KERN_INFO, 1057 LOG_DISCOVERY, 1058 "0240 NameServer Rsp Error " 1059 "Data: x%x x%x x%x x%lx\n", 1060 be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), 1061 (uint32_t) CTrsp->ReasonCode, 1062 (uint32_t) CTrsp->Explanation, 1063 vport->fc_flag); 1064 1065 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, 1066 "GID_FT rsp err1 cmd:x%x rsn:x%x exp:x%x", 1067 be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), 1068 (uint32_t) CTrsp->ReasonCode, 1069 (uint32_t) CTrsp->Explanation); 1070 } 1071 1072 1073 } else { 1074 /* NameServer Rsp Error */ 1075 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1076 "0241 NameServer Rsp Error " 1077 "Data: x%x x%x x%x x%lx\n", 1078 be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), 1079 (uint32_t) CTrsp->ReasonCode, 1080 (uint32_t) CTrsp->Explanation, 1081 vport->fc_flag); 1082 1083 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, 1084 "GID_FT rsp err2 cmd:x%x rsn:x%x exp:x%x", 1085 be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), 1086 (uint32_t) CTrsp->ReasonCode, 1087 (uint32_t) CTrsp->Explanation); 1088 } 1089 if (vport->gidft_inp) 1090 vport->gidft_inp--; 1091 } 1092 1093 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1094 "4216 GID_FT cmpl inp %d disc %d\n", 1095 vport->gidft_inp, vport->num_disc_nodes); 1096 1097 /* Link up / RSCN discovery */ 1098 if ((vport->num_disc_nodes == 0) && 1099 (vport->gidft_inp == 0)) { 1100 /* 1101 * The driver has cycled through all Nports in the RSCN payload. 1102 * Complete the handling by cleaning up and marking the 1103 * current driver state. 1104 */ 1105 if (vport->port_state >= LPFC_DISC_AUTH) { 1106 if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) { 1107 lpfc_els_flush_rscn(vport); 1108 /* RSCN still */ 1109 set_bit(FC_RSCN_MODE, &vport->fc_flag); 1110 } else { 1111 lpfc_els_flush_rscn(vport); 1112 } 1113 } 1114 1115 lpfc_disc_start(vport); 1116 } 1117 out: 1118 lpfc_ct_free_iocb(phba, cmdiocb); 1119 lpfc_nlp_put(ndlp); 1120 return; 1121 } 1122 1123 static void 1124 lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1125 struct lpfc_iocbq *rspiocb) 1126 { 1127 struct lpfc_vport *vport = cmdiocb->vport; 1128 struct lpfc_dmabuf *outp; 1129 struct lpfc_dmabuf *inp; 1130 struct lpfc_sli_ct_request *CTrsp; 1131 struct lpfc_sli_ct_request *CTreq; 1132 struct lpfc_nodelist *ndlp; 1133 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 1134 u32 ulp_word4 = get_job_word4(phba, rspiocb); 1135 int rc; 1136 1137 /* First save ndlp, before we overwrite it */ 1138 ndlp = cmdiocb->ndlp; 1139 1140 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1141 cmdiocb->rsp_iocb = rspiocb; 1142 inp = cmdiocb->cmd_dmabuf; 1143 outp = cmdiocb->rsp_dmabuf; 1144 1145 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, 1146 "GID_PT cmpl: status:x%x/x%x rtry:%d", 1147 ulp_status, ulp_word4, 1148 vport->fc_ns_retry); 1149 1150 /* Ignore response if link flipped after this request was made */ 1151 if (cmdiocb->event_tag != phba->fc_eventTag) { 1152 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1153 "9044 Event tag mismatch. Ignoring NS rsp\n"); 1154 goto out; 1155 } 1156 1157 /* Skip processing response on pport if unloading */ 1158 if (vport == phba->pport && test_bit(FC_UNLOADING, &vport->load_flag)) { 1159 if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) 1160 lpfc_els_flush_rscn(vport); 1161 goto out; 1162 } 1163 1164 if (lpfc_els_chk_latt(vport)) { 1165 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1166 "4108 Link event during NS query\n"); 1167 if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) 1168 lpfc_els_flush_rscn(vport); 1169 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 1170 goto out; 1171 } 1172 if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { 1173 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1174 "4166 NS query failed due to link event: " 1175 "ulp_status x%x ulp_word4 x%x fc_flag x%lx " 1176 "port_state x%x gidft_inp x%x\n", 1177 ulp_status, ulp_word4, vport->fc_flag, 1178 vport->port_state, vport->gidft_inp); 1179 if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) 1180 lpfc_els_flush_rscn(vport); 1181 if (vport->gidft_inp) 1182 vport->gidft_inp--; 1183 goto out; 1184 } 1185 1186 if (test_and_clear_bit(FC_RSCN_DEFERRED, &vport->fc_flag)) { 1187 /* This is a GID_PT completing so the gidft_inp counter was 1188 * incremented before the GID_PT was issued to the wire. 1189 */ 1190 if (vport->gidft_inp) 1191 vport->gidft_inp--; 1192 1193 /* 1194 * Skip processing the NS response 1195 * Re-issue the NS cmd 1196 */ 1197 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1198 "4167 Process Deferred RSCN Data: x%lx x%x\n", 1199 vport->fc_flag, vport->fc_rscn_id_cnt); 1200 lpfc_els_handle_rscn(vport); 1201 1202 goto out; 1203 } 1204 1205 if (ulp_status) { 1206 /* Check for retry */ 1207 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { 1208 if (ulp_status != IOSTAT_LOCAL_REJECT || 1209 (ulp_word4 & IOERR_PARAM_MASK) != 1210 IOERR_NO_RESOURCES) 1211 vport->fc_ns_retry++; 1212 1213 /* CT command is being retried */ 1214 rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_PT, 1215 vport->fc_ns_retry, GID_PT_N_PORT); 1216 if (rc == 0) 1217 goto out; 1218 else { /* Unable to send NS cmd */ 1219 if (vport->gidft_inp) 1220 vport->gidft_inp--; 1221 } 1222 } 1223 if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) 1224 lpfc_els_flush_rscn(vport); 1225 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 1226 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1227 "4103 GID_FT Query error: 0x%x 0x%x\n", 1228 ulp_status, vport->fc_ns_retry); 1229 } else { 1230 /* Good status, continue checking */ 1231 CTreq = (struct lpfc_sli_ct_request *)inp->virt; 1232 CTrsp = (struct lpfc_sli_ct_request *)outp->virt; 1233 if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == 1234 SLI_CT_RESPONSE_FS_ACC) { 1235 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1236 "4105 NameServer Rsp Data: x%lx x%x " 1237 "x%x x%x sz x%x\n", 1238 vport->fc_flag, 1239 CTreq->un.gid.Fc4Type, 1240 vport->num_disc_nodes, 1241 vport->gidft_inp, 1242 get_job_data_placed(phba, rspiocb)); 1243 1244 lpfc_ns_rsp(vport, 1245 outp, 1246 CTreq->un.gid.Fc4Type, 1247 get_job_data_placed(phba, rspiocb)); 1248 } else if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == 1249 SLI_CT_RESPONSE_FS_RJT) { 1250 /* NameServer Rsp Error */ 1251 if ((CTrsp->ReasonCode == SLI_CT_UNABLE_TO_PERFORM_REQ) 1252 && (CTrsp->Explanation == SLI_CT_NO_FC4_TYPES)) { 1253 lpfc_printf_vlog( 1254 vport, KERN_INFO, LOG_DISCOVERY, 1255 "4106 No NameServer Entries " 1256 "Data: x%x x%x x%x x%lx\n", 1257 be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), 1258 (uint32_t)CTrsp->ReasonCode, 1259 (uint32_t)CTrsp->Explanation, 1260 vport->fc_flag); 1261 1262 lpfc_debugfs_disc_trc( 1263 vport, LPFC_DISC_TRC_CT, 1264 "GID_PT no entry cmd:x%x rsn:x%x exp:x%x", 1265 be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), 1266 (uint32_t)CTrsp->ReasonCode, 1267 (uint32_t)CTrsp->Explanation); 1268 } else { 1269 lpfc_printf_vlog( 1270 vport, KERN_INFO, LOG_DISCOVERY, 1271 "4107 NameServer Rsp Error " 1272 "Data: x%x x%x x%x x%lx\n", 1273 be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), 1274 (uint32_t)CTrsp->ReasonCode, 1275 (uint32_t)CTrsp->Explanation, 1276 vport->fc_flag); 1277 1278 lpfc_debugfs_disc_trc( 1279 vport, LPFC_DISC_TRC_CT, 1280 "GID_PT rsp err1 cmd:x%x rsn:x%x exp:x%x", 1281 be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), 1282 (uint32_t)CTrsp->ReasonCode, 1283 (uint32_t)CTrsp->Explanation); 1284 } 1285 } else { 1286 /* NameServer Rsp Error */ 1287 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1288 "4109 NameServer Rsp Error " 1289 "Data: x%x x%x x%x x%lx\n", 1290 be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), 1291 (uint32_t)CTrsp->ReasonCode, 1292 (uint32_t)CTrsp->Explanation, 1293 vport->fc_flag); 1294 1295 lpfc_debugfs_disc_trc( 1296 vport, LPFC_DISC_TRC_CT, 1297 "GID_PT rsp err2 cmd:x%x rsn:x%x exp:x%x", 1298 be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), 1299 (uint32_t)CTrsp->ReasonCode, 1300 (uint32_t)CTrsp->Explanation); 1301 } 1302 if (vport->gidft_inp) 1303 vport->gidft_inp--; 1304 } 1305 1306 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1307 "6450 GID_PT cmpl inp %d disc %d\n", 1308 vport->gidft_inp, vport->num_disc_nodes); 1309 1310 /* Link up / RSCN discovery */ 1311 if ((vport->num_disc_nodes == 0) && 1312 (vport->gidft_inp == 0)) { 1313 /* 1314 * The driver has cycled through all Nports in the RSCN payload. 1315 * Complete the handling by cleaning up and marking the 1316 * current driver state. 1317 */ 1318 if (vport->port_state >= LPFC_DISC_AUTH) { 1319 if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) { 1320 lpfc_els_flush_rscn(vport); 1321 /* RSCN still */ 1322 set_bit(FC_RSCN_MODE, &vport->fc_flag); 1323 } else { 1324 lpfc_els_flush_rscn(vport); 1325 } 1326 } 1327 1328 lpfc_disc_start(vport); 1329 } 1330 out: 1331 lpfc_ct_free_iocb(phba, cmdiocb); 1332 lpfc_nlp_put(ndlp); 1333 } 1334 1335 static void 1336 lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1337 struct lpfc_iocbq *rspiocb) 1338 { 1339 struct lpfc_vport *vport = cmdiocb->vport; 1340 struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf; 1341 struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf; 1342 struct lpfc_sli_ct_request *CTrsp; 1343 int did, rc, retry; 1344 uint8_t fbits; 1345 struct lpfc_nodelist *ndlp = NULL, *free_ndlp = NULL; 1346 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 1347 u32 ulp_word4 = get_job_word4(phba, rspiocb); 1348 1349 did = ((struct lpfc_sli_ct_request *) inp->virt)->un.gff.PortId; 1350 did = be32_to_cpu(did); 1351 1352 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, 1353 "GFF_ID cmpl: status:x%x/x%x did:x%x", 1354 ulp_status, ulp_word4, did); 1355 1356 /* Ignore response if link flipped after this request was made */ 1357 if (cmdiocb->event_tag != phba->fc_eventTag) { 1358 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1359 "9045 Event tag mismatch. Ignoring NS rsp\n"); 1360 goto iocb_free; 1361 } 1362 1363 if (ulp_status == IOSTAT_SUCCESS) { 1364 /* Good status, continue checking */ 1365 CTrsp = (struct lpfc_sli_ct_request *) outp->virt; 1366 fbits = CTrsp->un.gff_acc.fbits[FCP_TYPE_FEATURE_OFFSET]; 1367 1368 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1369 "6431 Process GFF_ID rsp for %08x " 1370 "fbits %02x %s %s\n", 1371 did, fbits, 1372 (fbits & FC4_FEATURE_INIT) ? "Initiator" : " ", 1373 (fbits & FC4_FEATURE_TARGET) ? "Target" : " "); 1374 1375 if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == 1376 SLI_CT_RESPONSE_FS_ACC) { 1377 if ((fbits & FC4_FEATURE_INIT) && 1378 !(fbits & FC4_FEATURE_TARGET)) { 1379 lpfc_printf_vlog(vport, KERN_INFO, 1380 LOG_DISCOVERY, 1381 "0270 Skip x%x GFF " 1382 "NameServer Rsp Data: (init) " 1383 "x%x x%x\n", did, fbits, 1384 vport->fc_rscn_id_cnt); 1385 goto out; 1386 } 1387 } 1388 } 1389 else { 1390 /* Check for retry */ 1391 if (cmdiocb->retry < LPFC_MAX_NS_RETRY) { 1392 retry = 1; 1393 if (ulp_status == IOSTAT_LOCAL_REJECT) { 1394 switch ((ulp_word4 & 1395 IOERR_PARAM_MASK)) { 1396 1397 case IOERR_NO_RESOURCES: 1398 /* We don't increment the retry 1399 * count for this case. 1400 */ 1401 break; 1402 case IOERR_LINK_DOWN: 1403 case IOERR_SLI_ABORTED: 1404 case IOERR_SLI_DOWN: 1405 retry = 0; 1406 break; 1407 default: 1408 cmdiocb->retry++; 1409 } 1410 } 1411 else 1412 cmdiocb->retry++; 1413 1414 if (retry) { 1415 /* CT command is being retried */ 1416 rc = lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID, 1417 cmdiocb->retry, did); 1418 if (rc == 0) { 1419 /* success */ 1420 free_ndlp = cmdiocb->ndlp; 1421 lpfc_ct_free_iocb(phba, cmdiocb); 1422 lpfc_nlp_put(free_ndlp); 1423 return; 1424 } 1425 } 1426 } 1427 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1428 "0267 NameServer GFF Rsp " 1429 "x%x Error (%d %d) Data: x%lx x%x\n", 1430 did, ulp_status, ulp_word4, 1431 vport->fc_flag, vport->fc_rscn_id_cnt); 1432 } 1433 1434 /* This is a target port, unregistered port, or the GFF_ID failed */ 1435 ndlp = lpfc_setup_disc_node(vport, did); 1436 if (ndlp) { 1437 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1438 "0242 Process x%x GFF " 1439 "NameServer Rsp Data: x%lx x%lx x%x\n", 1440 did, ndlp->nlp_flag, vport->fc_flag, 1441 vport->fc_rscn_id_cnt); 1442 } else { 1443 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1444 "0243 Skip x%x GFF " 1445 "NameServer Rsp Data: x%lx x%x\n", did, 1446 vport->fc_flag, vport->fc_rscn_id_cnt); 1447 } 1448 out: 1449 /* Link up / RSCN discovery */ 1450 if (vport->num_disc_nodes) 1451 vport->num_disc_nodes--; 1452 1453 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1454 "6451 GFF_ID cmpl inp %d disc %d\n", 1455 vport->gidft_inp, vport->num_disc_nodes); 1456 1457 if (vport->num_disc_nodes == 0) { 1458 /* 1459 * The driver has cycled through all Nports in the RSCN payload. 1460 * Complete the handling by cleaning up and marking the 1461 * current driver state. 1462 */ 1463 if (vport->port_state >= LPFC_DISC_AUTH) { 1464 if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) { 1465 lpfc_els_flush_rscn(vport); 1466 /* RSCN still */ 1467 set_bit(FC_RSCN_MODE, &vport->fc_flag); 1468 } else { 1469 lpfc_els_flush_rscn(vport); 1470 } 1471 } 1472 lpfc_disc_start(vport); 1473 } 1474 1475 iocb_free: 1476 free_ndlp = cmdiocb->ndlp; 1477 lpfc_ct_free_iocb(phba, cmdiocb); 1478 lpfc_nlp_put(free_ndlp); 1479 return; 1480 } 1481 1482 static void 1483 lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1484 struct lpfc_iocbq *rspiocb) 1485 { 1486 struct lpfc_vport *vport = cmdiocb->vport; 1487 struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf; 1488 struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf; 1489 struct lpfc_sli_ct_request *CTrsp; 1490 int did; 1491 struct lpfc_nodelist *ndlp = NULL; 1492 struct lpfc_nodelist *ns_ndlp = cmdiocb->ndlp; 1493 uint32_t fc4_data_0, fc4_data_1; 1494 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 1495 u32 ulp_word4 = get_job_word4(phba, rspiocb); 1496 1497 did = ((struct lpfc_sli_ct_request *)inp->virt)->un.gft.PortId; 1498 did = be32_to_cpu(did); 1499 1500 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, 1501 "GFT_ID cmpl: status:x%x/x%x did:x%x", 1502 ulp_status, ulp_word4, did); 1503 1504 /* Ignore response if link flipped after this request was made */ 1505 if ((uint32_t)cmdiocb->event_tag != phba->fc_eventTag) { 1506 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1507 "9046 Event tag mismatch. Ignoring NS rsp\n"); 1508 goto out; 1509 } 1510 1511 if (ulp_status == IOSTAT_SUCCESS) { 1512 /* Good status, continue checking */ 1513 CTrsp = (struct lpfc_sli_ct_request *)outp->virt; 1514 fc4_data_0 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[0]); 1515 fc4_data_1 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[1]); 1516 1517 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1518 "6432 Process GFT_ID rsp for %08x " 1519 "Data %08x %08x %s %s\n", 1520 did, fc4_data_0, fc4_data_1, 1521 (fc4_data_0 & LPFC_FC4_TYPE_BITMASK) ? 1522 "FCP" : " ", 1523 (fc4_data_1 & LPFC_FC4_TYPE_BITMASK) ? 1524 "NVME" : " "); 1525 1526 /* Lookup the NPort_ID queried in the GFT_ID and find the 1527 * driver's local node. It's an error if the driver 1528 * doesn't have one. 1529 */ 1530 ndlp = lpfc_findnode_did(vport, did); 1531 if (ndlp) { 1532 /* The bitmask value for FCP and NVME FCP types is 1533 * the same because they are 32 bits distant from 1534 * each other in word0 and word0. 1535 */ 1536 if (fc4_data_0 & LPFC_FC4_TYPE_BITMASK) 1537 ndlp->nlp_fc4_type |= NLP_FC4_FCP; 1538 if (fc4_data_1 & LPFC_FC4_TYPE_BITMASK) 1539 ndlp->nlp_fc4_type |= NLP_FC4_NVME; 1540 lpfc_printf_vlog(vport, KERN_INFO, 1541 LOG_DISCOVERY | LOG_NODE, 1542 "3064 Setting ndlp x%px, DID x%06x " 1543 "with FC4 x%08x, Data: x%08x x%08x " 1544 "%d\n", 1545 ndlp, did, ndlp->nlp_fc4_type, 1546 FC_TYPE_FCP, FC_TYPE_NVME, 1547 ndlp->nlp_state); 1548 1549 if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE && 1550 ndlp->nlp_fc4_type) { 1551 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1552 lpfc_nlp_set_state(vport, ndlp, 1553 NLP_STE_PRLI_ISSUE); 1554 lpfc_issue_els_prli(vport, ndlp, 0); 1555 } else if (!ndlp->nlp_fc4_type) { 1556 /* If fc4 type is still unknown, then LOGO */ 1557 lpfc_printf_vlog(vport, KERN_INFO, 1558 LOG_DISCOVERY | LOG_NODE, 1559 "6443 Sending LOGO ndlp x%px, " 1560 "DID x%06x with fc4_type: " 1561 "x%08x, state: %d\n", 1562 ndlp, did, ndlp->nlp_fc4_type, 1563 ndlp->nlp_state); 1564 lpfc_issue_els_logo(vport, ndlp, 0); 1565 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1566 lpfc_nlp_set_state(vport, ndlp, 1567 NLP_STE_NPR_NODE); 1568 } 1569 } 1570 } else 1571 lpfc_vlog_msg(vport, KERN_WARNING, LOG_DISCOVERY, 1572 "3065 GFT_ID status x%08x\n", ulp_status); 1573 1574 out: 1575 lpfc_ct_free_iocb(phba, cmdiocb); 1576 lpfc_nlp_put(ns_ndlp); 1577 } 1578 1579 static void 1580 lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1581 struct lpfc_iocbq *rspiocb) 1582 { 1583 struct lpfc_vport *vport = cmdiocb->vport; 1584 struct lpfc_dmabuf *inp; 1585 struct lpfc_dmabuf *outp; 1586 struct lpfc_sli_ct_request *CTrsp; 1587 struct lpfc_nodelist *ndlp; 1588 int cmdcode, rc; 1589 uint8_t retry; 1590 uint32_t latt; 1591 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 1592 u32 ulp_word4 = get_job_word4(phba, rspiocb); 1593 1594 /* First save ndlp, before we overwrite it */ 1595 ndlp = cmdiocb->ndlp; 1596 1597 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1598 cmdiocb->rsp_iocb = rspiocb; 1599 1600 inp = cmdiocb->cmd_dmabuf; 1601 outp = cmdiocb->rsp_dmabuf; 1602 1603 cmdcode = be16_to_cpu(((struct lpfc_sli_ct_request *) inp->virt)-> 1604 CommandResponse.bits.CmdRsp); 1605 CTrsp = (struct lpfc_sli_ct_request *) outp->virt; 1606 1607 latt = lpfc_els_chk_latt(vport); 1608 1609 /* RFT request completes status <ulp_status> CmdRsp <CmdRsp> */ 1610 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1611 "0209 CT Request completes, latt %d, " 1612 "ulp_status x%x CmdRsp x%x, Context x%x, Tag x%x\n", 1613 latt, ulp_status, 1614 be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), 1615 get_job_ulpcontext(phba, cmdiocb), cmdiocb->iotag); 1616 1617 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, 1618 "CT cmd cmpl: status:x%x/x%x cmd:x%x", 1619 ulp_status, ulp_word4, cmdcode); 1620 1621 if (ulp_status) { 1622 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1623 "0268 NS cmd x%x Error (x%x x%x)\n", 1624 cmdcode, ulp_status, ulp_word4); 1625 1626 if (ulp_status == IOSTAT_LOCAL_REJECT && 1627 (((ulp_word4 & IOERR_PARAM_MASK) == 1628 IOERR_SLI_DOWN) || 1629 ((ulp_word4 & IOERR_PARAM_MASK) == 1630 IOERR_SLI_ABORTED))) 1631 goto out; 1632 1633 retry = cmdiocb->retry; 1634 if (retry >= LPFC_MAX_NS_RETRY) 1635 goto out; 1636 1637 retry++; 1638 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1639 "0250 Retrying NS cmd %x\n", cmdcode); 1640 rc = lpfc_ns_cmd(vport, cmdcode, retry, 0); 1641 if (rc == 0) 1642 goto out; 1643 } 1644 1645 out: 1646 /* If the caller wanted a synchronous DA_ID completion, signal the 1647 * wait obj and clear flag to reset the vport. 1648 */ 1649 if (test_bit(NLP_WAIT_FOR_DA_ID, &ndlp->save_flags)) { 1650 if (ndlp->da_id_waitq) 1651 wake_up(ndlp->da_id_waitq); 1652 } 1653 1654 clear_bit(NLP_WAIT_FOR_DA_ID, &ndlp->save_flags); 1655 1656 lpfc_ct_free_iocb(phba, cmdiocb); 1657 lpfc_nlp_put(ndlp); 1658 return; 1659 } 1660 1661 static void 1662 lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1663 struct lpfc_iocbq *rspiocb) 1664 { 1665 struct lpfc_vport *vport = cmdiocb->vport; 1666 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 1667 1668 if (ulp_status == IOSTAT_SUCCESS) { 1669 struct lpfc_dmabuf *outp; 1670 struct lpfc_sli_ct_request *CTrsp; 1671 1672 outp = cmdiocb->rsp_dmabuf; 1673 CTrsp = (struct lpfc_sli_ct_request *)outp->virt; 1674 if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == 1675 SLI_CT_RESPONSE_FS_ACC) 1676 vport->ct_flags |= FC_CT_RFT_ID; 1677 } 1678 lpfc_cmpl_ct(phba, cmdiocb, rspiocb); 1679 return; 1680 } 1681 1682 static void 1683 lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1684 struct lpfc_iocbq *rspiocb) 1685 { 1686 struct lpfc_vport *vport = cmdiocb->vport; 1687 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 1688 1689 if (ulp_status == IOSTAT_SUCCESS) { 1690 struct lpfc_dmabuf *outp; 1691 struct lpfc_sli_ct_request *CTrsp; 1692 1693 outp = cmdiocb->rsp_dmabuf; 1694 CTrsp = (struct lpfc_sli_ct_request *) outp->virt; 1695 if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == 1696 SLI_CT_RESPONSE_FS_ACC) 1697 vport->ct_flags |= FC_CT_RNN_ID; 1698 } 1699 lpfc_cmpl_ct(phba, cmdiocb, rspiocb); 1700 return; 1701 } 1702 1703 static void 1704 lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1705 struct lpfc_iocbq *rspiocb) 1706 { 1707 struct lpfc_vport *vport = cmdiocb->vport; 1708 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 1709 1710 if (ulp_status == IOSTAT_SUCCESS) { 1711 struct lpfc_dmabuf *outp; 1712 struct lpfc_sli_ct_request *CTrsp; 1713 1714 outp = cmdiocb->rsp_dmabuf; 1715 CTrsp = (struct lpfc_sli_ct_request *)outp->virt; 1716 if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == 1717 SLI_CT_RESPONSE_FS_ACC) 1718 vport->ct_flags |= FC_CT_RSPN_ID; 1719 } 1720 lpfc_cmpl_ct(phba, cmdiocb, rspiocb); 1721 return; 1722 } 1723 1724 static void 1725 lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1726 struct lpfc_iocbq *rspiocb) 1727 { 1728 struct lpfc_vport *vport = cmdiocb->vport; 1729 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 1730 1731 if (ulp_status == IOSTAT_SUCCESS) { 1732 struct lpfc_dmabuf *outp; 1733 struct lpfc_sli_ct_request *CTrsp; 1734 1735 outp = cmdiocb->rsp_dmabuf; 1736 CTrsp = (struct lpfc_sli_ct_request *) outp->virt; 1737 if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == 1738 SLI_CT_RESPONSE_FS_ACC) 1739 vport->ct_flags |= FC_CT_RSNN_NN; 1740 } 1741 lpfc_cmpl_ct(phba, cmdiocb, rspiocb); 1742 return; 1743 } 1744 1745 static void 1746 lpfc_cmpl_ct_cmd_rspni_pni(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1747 struct lpfc_iocbq *rspiocb) 1748 { 1749 struct lpfc_vport *vport; 1750 struct lpfc_dmabuf *outp; 1751 struct lpfc_sli_ct_request *ctrsp; 1752 u32 ulp_status; 1753 1754 vport = cmdiocb->vport; 1755 ulp_status = get_job_ulpstatus(phba, rspiocb); 1756 1757 if (ulp_status == IOSTAT_SUCCESS) { 1758 outp = cmdiocb->rsp_dmabuf; 1759 ctrsp = (struct lpfc_sli_ct_request *)outp->virt; 1760 if (be16_to_cpu(ctrsp->CommandResponse.bits.CmdRsp) == 1761 SLI_CT_RESPONSE_FS_ACC) 1762 vport->ct_flags |= FC_CT_RSPNI_PNI; 1763 } 1764 lpfc_cmpl_ct(phba, cmdiocb, rspiocb); 1765 } 1766 1767 static void 1768 lpfc_cmpl_ct_cmd_da_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1769 struct lpfc_iocbq *rspiocb) 1770 { 1771 struct lpfc_vport *vport = cmdiocb->vport; 1772 1773 /* even if it fails we will act as though it succeeded. */ 1774 vport->ct_flags = 0; 1775 lpfc_cmpl_ct(phba, cmdiocb, rspiocb); 1776 return; 1777 } 1778 1779 static void 1780 lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1781 struct lpfc_iocbq *rspiocb) 1782 { 1783 struct lpfc_vport *vport = cmdiocb->vport; 1784 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 1785 1786 if (ulp_status == IOSTAT_SUCCESS) { 1787 struct lpfc_dmabuf *outp; 1788 struct lpfc_sli_ct_request *CTrsp; 1789 1790 outp = cmdiocb->rsp_dmabuf; 1791 CTrsp = (struct lpfc_sli_ct_request *)outp->virt; 1792 if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == 1793 SLI_CT_RESPONSE_FS_ACC) 1794 vport->ct_flags |= FC_CT_RFF_ID; 1795 } 1796 lpfc_cmpl_ct(phba, cmdiocb, rspiocb); 1797 return; 1798 } 1799 1800 /* 1801 * Although the symbolic port name is thought to be an integer 1802 * as of January 18, 2016, leave it as a string until more of 1803 * the record state becomes defined. 1804 */ 1805 int 1806 lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol, 1807 size_t size) 1808 { 1809 int n; 1810 1811 /* 1812 * Use the lpfc board number as the Symbolic Port 1813 * Name object. NPIV is not in play so this integer 1814 * value is sufficient and unique per FC-ID. 1815 */ 1816 n = scnprintf(symbol, size, "%d", vport->phba->brd_no); 1817 return n; 1818 } 1819 1820 1821 int 1822 lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol, 1823 size_t size) 1824 { 1825 char fwrev[FW_REV_STR_SIZE] = {0}; 1826 char tmp[MAXHOSTNAMELEN] = {0}; 1827 1828 memset(symbol, 0, size); 1829 1830 scnprintf(tmp, sizeof(tmp), "Emulex %s", vport->phba->ModelName); 1831 if (strlcat(symbol, tmp, size) >= size) 1832 goto buffer_done; 1833 1834 lpfc_decode_firmware_rev(vport->phba, fwrev, 0); 1835 scnprintf(tmp, sizeof(tmp), " FV%s", fwrev); 1836 if (strlcat(symbol, tmp, size) >= size) 1837 goto buffer_done; 1838 1839 scnprintf(tmp, sizeof(tmp), " DV%s", lpfc_release_version); 1840 if (strlcat(symbol, tmp, size) >= size) 1841 goto buffer_done; 1842 1843 scnprintf(tmp, sizeof(tmp), " HN:%s", vport->phba->os_host_name); 1844 if (strlcat(symbol, tmp, size) >= size) 1845 goto buffer_done; 1846 1847 /* Note :- OS name is "Linux" */ 1848 scnprintf(tmp, sizeof(tmp), " OS:%s", init_utsname()->sysname); 1849 strlcat(symbol, tmp, size); 1850 1851 buffer_done: 1852 return strnlen(symbol, size); 1853 1854 } 1855 1856 static uint32_t 1857 lpfc_find_map_node(struct lpfc_vport *vport) 1858 { 1859 struct lpfc_nodelist *ndlp, *next_ndlp; 1860 unsigned long iflags; 1861 uint32_t cnt = 0; 1862 1863 spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags); 1864 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 1865 if (ndlp->nlp_type & NLP_FABRIC) 1866 continue; 1867 if ((ndlp->nlp_state == NLP_STE_MAPPED_NODE) || 1868 (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)) 1869 cnt++; 1870 } 1871 spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags); 1872 return cnt; 1873 } 1874 1875 /* 1876 * This routine will return the FC4 Type associated with the CT 1877 * GID_FT command. 1878 */ 1879 int 1880 lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb) 1881 { 1882 struct lpfc_sli_ct_request *CtReq; 1883 struct lpfc_dmabuf *mp; 1884 uint32_t type; 1885 1886 mp = cmdiocb->cmd_dmabuf; 1887 if (mp == NULL) 1888 return 0; 1889 CtReq = (struct lpfc_sli_ct_request *)mp->virt; 1890 type = (uint32_t)CtReq->un.gid.Fc4Type; 1891 if ((type != SLI_CTPT_FCP) && (type != SLI_CTPT_NVME)) 1892 return 0; 1893 return type; 1894 } 1895 1896 /* 1897 * lpfc_ns_cmd 1898 * Description: 1899 * Issue Cmd to NameServer 1900 * SLI_CTNS_GID_FT 1901 * LI_CTNS_RFT_ID 1902 */ 1903 int 1904 lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, 1905 uint8_t retry, uint32_t context) 1906 { 1907 struct lpfc_nodelist * ndlp; 1908 struct lpfc_hba *phba = vport->phba; 1909 struct lpfc_dmabuf *mp, *bmp; 1910 struct lpfc_sli_ct_request *CtReq; 1911 struct ulp_bde64 *bpl; 1912 void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, 1913 struct lpfc_iocbq *) = NULL; 1914 uint32_t *ptr; 1915 uint32_t rsp_size = 1024; 1916 size_t size; 1917 int rc = 0; 1918 1919 ndlp = lpfc_findnode_did(vport, NameServer_DID); 1920 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) { 1921 rc=1; 1922 goto ns_cmd_exit; 1923 } 1924 1925 /* fill in BDEs for command */ 1926 /* Allocate buffer for command payload */ 1927 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1928 if (!mp) { 1929 rc=2; 1930 goto ns_cmd_exit; 1931 } 1932 1933 INIT_LIST_HEAD(&mp->list); 1934 mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys)); 1935 if (!mp->virt) { 1936 rc=3; 1937 goto ns_cmd_free_mp; 1938 } 1939 1940 /* Allocate buffer for Buffer ptr list */ 1941 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1942 if (!bmp) { 1943 rc=4; 1944 goto ns_cmd_free_mpvirt; 1945 } 1946 1947 INIT_LIST_HEAD(&bmp->list); 1948 bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(bmp->phys)); 1949 if (!bmp->virt) { 1950 rc=5; 1951 goto ns_cmd_free_bmp; 1952 } 1953 1954 /* NameServer Req */ 1955 lpfc_printf_vlog(vport, KERN_INFO ,LOG_DISCOVERY, 1956 "0236 NameServer Req Data: x%x x%lx x%x x%x\n", 1957 cmdcode, vport->fc_flag, vport->fc_rscn_id_cnt, 1958 context); 1959 1960 bpl = (struct ulp_bde64 *) bmp->virt; 1961 memset(bpl, 0, sizeof(struct ulp_bde64)); 1962 bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) ); 1963 bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) ); 1964 bpl->tus.f.bdeFlags = 0; 1965 if (cmdcode == SLI_CTNS_GID_FT) 1966 bpl->tus.f.bdeSize = GID_REQUEST_SZ; 1967 else if (cmdcode == SLI_CTNS_GID_PT) 1968 bpl->tus.f.bdeSize = GID_REQUEST_SZ; 1969 else if (cmdcode == SLI_CTNS_GFF_ID) 1970 bpl->tus.f.bdeSize = GFF_REQUEST_SZ; 1971 else if (cmdcode == SLI_CTNS_GFT_ID) 1972 bpl->tus.f.bdeSize = GFT_REQUEST_SZ; 1973 else if (cmdcode == SLI_CTNS_RFT_ID) 1974 bpl->tus.f.bdeSize = RFT_REQUEST_SZ; 1975 else if (cmdcode == SLI_CTNS_RNN_ID) 1976 bpl->tus.f.bdeSize = RNN_REQUEST_SZ; 1977 else if (cmdcode == SLI_CTNS_RSPN_ID) 1978 bpl->tus.f.bdeSize = RSPN_REQUEST_SZ; 1979 else if (cmdcode == SLI_CTNS_RSNN_NN) 1980 bpl->tus.f.bdeSize = RSNN_REQUEST_SZ; 1981 else if (cmdcode == SLI_CTNS_RSPNI_PNI) 1982 bpl->tus.f.bdeSize = RSPNI_REQUEST_SZ; 1983 else if (cmdcode == SLI_CTNS_DA_ID) 1984 bpl->tus.f.bdeSize = DA_ID_REQUEST_SZ; 1985 else if (cmdcode == SLI_CTNS_RFF_ID) 1986 bpl->tus.f.bdeSize = RFF_REQUEST_SZ; 1987 else 1988 bpl->tus.f.bdeSize = 0; 1989 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1990 1991 CtReq = (struct lpfc_sli_ct_request *) mp->virt; 1992 memset(CtReq, 0, sizeof(struct lpfc_sli_ct_request)); 1993 CtReq->RevisionId.bits.Revision = SLI_CT_REVISION; 1994 CtReq->RevisionId.bits.InId = 0; 1995 CtReq->FsType = SLI_CT_DIRECTORY_SERVICE; 1996 CtReq->FsSubType = SLI_CT_DIRECTORY_NAME_SERVER; 1997 CtReq->CommandResponse.bits.Size = 0; 1998 switch (cmdcode) { 1999 case SLI_CTNS_GID_FT: 2000 CtReq->CommandResponse.bits.CmdRsp = 2001 cpu_to_be16(SLI_CTNS_GID_FT); 2002 CtReq->un.gid.Fc4Type = context; 2003 2004 if (vport->port_state < LPFC_NS_QRY) 2005 vport->port_state = LPFC_NS_QRY; 2006 lpfc_set_disctmo(vport); 2007 cmpl = lpfc_cmpl_ct_cmd_gid_ft; 2008 rsp_size = FC_MAX_NS_RSP; 2009 break; 2010 2011 case SLI_CTNS_GID_PT: 2012 CtReq->CommandResponse.bits.CmdRsp = 2013 cpu_to_be16(SLI_CTNS_GID_PT); 2014 CtReq->un.gid.PortType = context; 2015 2016 if (vport->port_state < LPFC_NS_QRY) 2017 vport->port_state = LPFC_NS_QRY; 2018 lpfc_set_disctmo(vport); 2019 cmpl = lpfc_cmpl_ct_cmd_gid_pt; 2020 rsp_size = FC_MAX_NS_RSP; 2021 break; 2022 2023 case SLI_CTNS_GFF_ID: 2024 CtReq->CommandResponse.bits.CmdRsp = 2025 cpu_to_be16(SLI_CTNS_GFF_ID); 2026 CtReq->un.gff.PortId = cpu_to_be32(context); 2027 cmpl = lpfc_cmpl_ct_cmd_gff_id; 2028 break; 2029 2030 case SLI_CTNS_GFT_ID: 2031 CtReq->CommandResponse.bits.CmdRsp = 2032 cpu_to_be16(SLI_CTNS_GFT_ID); 2033 CtReq->un.gft.PortId = cpu_to_be32(context); 2034 cmpl = lpfc_cmpl_ct_cmd_gft_id; 2035 break; 2036 2037 case SLI_CTNS_RFT_ID: 2038 vport->ct_flags &= ~FC_CT_RFT_ID; 2039 CtReq->CommandResponse.bits.CmdRsp = 2040 cpu_to_be16(SLI_CTNS_RFT_ID); 2041 CtReq->un.rft.port_id = cpu_to_be32(vport->fc_myDID); 2042 2043 /* Register Application Services type if vmid enabled. */ 2044 if (phba->cfg_vmid_app_header) 2045 CtReq->un.rft.app_serv_reg = 2046 cpu_to_be32(RFT_APP_SERV_REG); 2047 2048 /* Register FC4 FCP type if enabled. */ 2049 if (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH || 2050 vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP) 2051 CtReq->un.rft.fcp_reg = cpu_to_be32(RFT_FCP_REG); 2052 2053 /* Register NVME type if enabled. */ 2054 if (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH || 2055 vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME) 2056 CtReq->un.rft.nvme_reg = cpu_to_be32(RFT_NVME_REG); 2057 2058 ptr = (uint32_t *)CtReq; 2059 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2060 "6433 Issue RFT (%s %s %s): %08x %08x %08x " 2061 "%08x %08x %08x %08x %08x\n", 2062 CtReq->un.rft.fcp_reg ? "FCP" : " ", 2063 CtReq->un.rft.nvme_reg ? "NVME" : " ", 2064 CtReq->un.rft.app_serv_reg ? "APPS" : " ", 2065 *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3), 2066 *(ptr + 4), *(ptr + 5), 2067 *(ptr + 6), *(ptr + 7)); 2068 cmpl = lpfc_cmpl_ct_cmd_rft_id; 2069 break; 2070 2071 case SLI_CTNS_RNN_ID: 2072 vport->ct_flags &= ~FC_CT_RNN_ID; 2073 CtReq->CommandResponse.bits.CmdRsp = 2074 cpu_to_be16(SLI_CTNS_RNN_ID); 2075 CtReq->un.rnn.PortId = cpu_to_be32(vport->fc_myDID); 2076 memcpy(CtReq->un.rnn.wwnn, &vport->fc_nodename, 2077 sizeof(struct lpfc_name)); 2078 cmpl = lpfc_cmpl_ct_cmd_rnn_id; 2079 break; 2080 2081 case SLI_CTNS_RSPN_ID: 2082 vport->ct_flags &= ~FC_CT_RSPN_ID; 2083 CtReq->CommandResponse.bits.CmdRsp = 2084 cpu_to_be16(SLI_CTNS_RSPN_ID); 2085 CtReq->un.rspn.PortId = cpu_to_be32(vport->fc_myDID); 2086 size = sizeof(CtReq->un.rspn.symbname); 2087 CtReq->un.rspn.len = 2088 lpfc_vport_symbolic_port_name(vport, 2089 CtReq->un.rspn.symbname, size); 2090 cmpl = lpfc_cmpl_ct_cmd_rspn_id; 2091 break; 2092 case SLI_CTNS_RSNN_NN: 2093 vport->ct_flags &= ~FC_CT_RSNN_NN; 2094 CtReq->CommandResponse.bits.CmdRsp = 2095 cpu_to_be16(SLI_CTNS_RSNN_NN); 2096 memcpy(CtReq->un.rsnn.wwnn, &vport->fc_nodename, 2097 sizeof(struct lpfc_name)); 2098 size = sizeof(CtReq->un.rsnn.symbname); 2099 CtReq->un.rsnn.len = 2100 lpfc_vport_symbolic_node_name(vport, 2101 CtReq->un.rsnn.symbname, size); 2102 cmpl = lpfc_cmpl_ct_cmd_rsnn_nn; 2103 break; 2104 case SLI_CTNS_RSPNI_PNI: 2105 vport->ct_flags &= ~FC_CT_RSPNI_PNI; 2106 CtReq->CommandResponse.bits.CmdRsp = 2107 cpu_to_be16(SLI_CTNS_RSPNI_PNI); 2108 CtReq->un.rspni.pni = cpu_to_be64(phba->pni); 2109 scnprintf(CtReq->un.rspni.symbname, 2110 sizeof(CtReq->un.rspni.symbname), "OS Host Name::%s", 2111 phba->os_host_name); 2112 CtReq->un.rspni.len = strnlen(CtReq->un.rspni.symbname, 2113 sizeof(CtReq->un.rspni.symbname)); 2114 cmpl = lpfc_cmpl_ct_cmd_rspni_pni; 2115 break; 2116 case SLI_CTNS_DA_ID: 2117 /* Implement DA_ID Nameserver request */ 2118 CtReq->CommandResponse.bits.CmdRsp = 2119 cpu_to_be16(SLI_CTNS_DA_ID); 2120 CtReq->un.da_id.port_id = cpu_to_be32(vport->fc_myDID); 2121 cmpl = lpfc_cmpl_ct_cmd_da_id; 2122 break; 2123 case SLI_CTNS_RFF_ID: 2124 vport->ct_flags &= ~FC_CT_RFF_ID; 2125 CtReq->CommandResponse.bits.CmdRsp = 2126 cpu_to_be16(SLI_CTNS_RFF_ID); 2127 CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID); 2128 CtReq->un.rff.fbits = FC4_FEATURE_INIT; 2129 2130 /* The driver always supports FC_TYPE_FCP. However, the 2131 * caller can specify NVME (type x28) as well. But only 2132 * these that FC4 type is supported. 2133 */ 2134 if (((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || 2135 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) && 2136 (context == FC_TYPE_NVME)) { 2137 if ((vport == phba->pport) && phba->nvmet_support) { 2138 CtReq->un.rff.fbits = (FC4_FEATURE_TARGET | 2139 FC4_FEATURE_NVME_DISC); 2140 lpfc_nvmet_update_targetport(phba); 2141 } else { 2142 lpfc_nvme_update_localport(vport); 2143 } 2144 CtReq->un.rff.type_code = context; 2145 2146 } else if (((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || 2147 (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) && 2148 (context == FC_TYPE_FCP)) 2149 CtReq->un.rff.type_code = context; 2150 2151 else 2152 goto ns_cmd_free_bmpvirt; 2153 2154 ptr = (uint32_t *)CtReq; 2155 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2156 "6434 Issue RFF (%s): %08x %08x %08x %08x " 2157 "%08x %08x %08x %08x\n", 2158 (context == FC_TYPE_NVME) ? "NVME" : "FCP", 2159 *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3), 2160 *(ptr + 4), *(ptr + 5), 2161 *(ptr + 6), *(ptr + 7)); 2162 cmpl = lpfc_cmpl_ct_cmd_rff_id; 2163 break; 2164 } 2165 /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count 2166 * to hold ndlp reference for the corresponding callback function. 2167 */ 2168 if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) { 2169 /* On success, The cmpl function will free the buffers */ 2170 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, 2171 "Issue CT cmd: cmd:x%x did:x%x", 2172 cmdcode, ndlp->nlp_DID, 0); 2173 return 0; 2174 } 2175 rc=6; 2176 2177 ns_cmd_free_bmpvirt: 2178 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 2179 ns_cmd_free_bmp: 2180 kfree(bmp); 2181 ns_cmd_free_mpvirt: 2182 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2183 ns_cmd_free_mp: 2184 kfree(mp); 2185 ns_cmd_exit: 2186 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2187 "0266 Issue NameServer Req x%x err %d Data: x%lx " 2188 "x%x\n", 2189 cmdcode, rc, vport->fc_flag, vport->fc_rscn_id_cnt); 2190 return 1; 2191 } 2192 2193 /** 2194 * lpfc_fdmi_rprt_defer - Check for any deferred FDMI RPRT commands 2195 * @phba: Pointer to HBA context object. 2196 * @mask: Initial port attributes mask 2197 * 2198 * This function checks to see if any vports have deferred their FDMI RPRT. 2199 * A vports RPRT may be deferred if it is issued before the primary ports 2200 * RHBA completes. 2201 */ 2202 static void 2203 lpfc_fdmi_rprt_defer(struct lpfc_hba *phba, uint32_t mask) 2204 { 2205 struct lpfc_vport **vports; 2206 struct lpfc_vport *vport; 2207 struct lpfc_nodelist *ndlp; 2208 int i; 2209 2210 set_bit(HBA_RHBA_CMPL, &phba->hba_flag); 2211 vports = lpfc_create_vport_work_array(phba); 2212 if (vports) { 2213 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2214 vport = vports[i]; 2215 ndlp = lpfc_findnode_did(phba->pport, FDMI_DID); 2216 if (!ndlp) 2217 continue; 2218 if (vport->ct_flags & FC_CT_RPRT_DEFER) { 2219 vport->ct_flags &= ~FC_CT_RPRT_DEFER; 2220 vport->fdmi_port_mask = mask; 2221 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0); 2222 } 2223 } 2224 } 2225 lpfc_destroy_vport_work_array(phba, vports); 2226 } 2227 2228 /** 2229 * lpfc_cmpl_ct_disc_fdmi - Handle a discovery FDMI completion 2230 * @phba: Pointer to HBA context object. 2231 * @cmdiocb: Pointer to the command IOCBQ. 2232 * @rspiocb: Pointer to the response IOCBQ. 2233 * 2234 * This function to handle the completion of a driver initiated FDMI 2235 * CT command issued during discovery. 2236 */ 2237 static void 2238 lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2239 struct lpfc_iocbq *rspiocb) 2240 { 2241 struct lpfc_vport *vport = cmdiocb->vport; 2242 struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf; 2243 struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf; 2244 struct lpfc_sli_ct_request *CTcmd = inp->virt; 2245 struct lpfc_sli_ct_request *CTrsp = outp->virt; 2246 __be16 fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp; 2247 __be16 fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp; 2248 struct lpfc_nodelist *ndlp, *free_ndlp = NULL; 2249 uint32_t latt, cmd, err; 2250 u32 ulp_status = get_job_ulpstatus(phba, rspiocb); 2251 u32 ulp_word4 = get_job_word4(phba, rspiocb); 2252 2253 latt = lpfc_els_chk_latt(vport); 2254 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, 2255 "FDMI cmpl: status:x%x/x%x latt:%d", 2256 ulp_status, ulp_word4, latt); 2257 2258 if (latt || ulp_status) { 2259 lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY, 2260 "0229 FDMI cmd %04x failed, latt = %d " 2261 "ulp_status: (x%x/x%x), sli_flag x%x\n", 2262 be16_to_cpu(fdmi_cmd), latt, ulp_status, 2263 ulp_word4, phba->sli.sli_flag); 2264 2265 /* Look for a retryable error */ 2266 if (ulp_status == IOSTAT_LOCAL_REJECT) { 2267 switch ((ulp_word4 & IOERR_PARAM_MASK)) { 2268 case IOERR_ABORT_IN_PROGRESS: 2269 case IOERR_SEQUENCE_TIMEOUT: 2270 case IOERR_ILLEGAL_FRAME: 2271 case IOERR_NO_RESOURCES: 2272 case IOERR_ILLEGAL_COMMAND: 2273 cmdiocb->retry++; 2274 if (cmdiocb->retry >= LPFC_FDMI_MAX_RETRY) 2275 break; 2276 2277 /* Retry the same FDMI command */ 2278 err = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, 2279 cmdiocb, 0); 2280 if (err == IOCB_ERROR) 2281 break; 2282 return; 2283 default: 2284 break; 2285 } 2286 } 2287 } 2288 2289 free_ndlp = cmdiocb->ndlp; 2290 lpfc_ct_free_iocb(phba, cmdiocb); 2291 lpfc_nlp_put(free_ndlp); 2292 2293 if (ulp_status != IOSTAT_SUCCESS) 2294 return; 2295 2296 ndlp = lpfc_findnode_did(vport, FDMI_DID); 2297 if (!ndlp) 2298 return; 2299 2300 /* Check for a CT LS_RJT response */ 2301 cmd = be16_to_cpu(fdmi_cmd); 2302 if (be16_to_cpu(fdmi_rsp) == SLI_CT_RESPONSE_FS_RJT) { 2303 /* Log FDMI reject */ 2304 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_ELS, 2305 "0220 FDMI cmd FS_RJT Data: x%x", cmd); 2306 2307 /* Should we fallback to FDMI-2 / FDMI-1 ? */ 2308 switch (cmd) { 2309 case SLI_MGMT_RHBA: 2310 if (vport->fdmi_hba_mask == LPFC_FDMI2_HBA_ATTR) { 2311 /* Fallback to FDMI-1 for HBA attributes */ 2312 vport->fdmi_hba_mask = LPFC_FDMI1_HBA_ATTR; 2313 2314 /* If HBA attributes are FDMI1, so should 2315 * port attributes be for consistency. 2316 */ 2317 vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR; 2318 /* Start over */ 2319 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0); 2320 } 2321 return; 2322 2323 case SLI_MGMT_RPRT: 2324 if (vport->port_type != LPFC_PHYSICAL_PORT) { 2325 ndlp = lpfc_findnode_did(phba->pport, FDMI_DID); 2326 if (!ndlp) 2327 return; 2328 } 2329 if (vport->fdmi_port_mask == LPFC_FDMI2_PORT_ATTR) { 2330 /* Fallback to FDMI-1 */ 2331 vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR; 2332 /* Start over */ 2333 lpfc_fdmi_cmd(vport, ndlp, cmd, 0); 2334 return; 2335 } 2336 if (vport->fdmi_port_mask == LPFC_FDMI2_SMART_ATTR) { 2337 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 2338 /* Retry the same command */ 2339 lpfc_fdmi_cmd(vport, ndlp, cmd, 0); 2340 } 2341 return; 2342 2343 case SLI_MGMT_RPA: 2344 /* No retry on Vendor, RPA only done on physical port */ 2345 if (phba->link_flag & LS_CT_VEN_RPA) { 2346 phba->link_flag &= ~LS_CT_VEN_RPA; 2347 if (phba->cmf_active_mode == LPFC_CFG_OFF) 2348 return; 2349 lpfc_printf_log(phba, KERN_WARNING, 2350 LOG_DISCOVERY | LOG_ELS, 2351 "6460 VEN FDMI RPA RJT\n"); 2352 return; 2353 } 2354 if (vport->fdmi_port_mask == LPFC_FDMI2_PORT_ATTR) { 2355 /* Fallback to FDMI-1 */ 2356 vport->fdmi_hba_mask = LPFC_FDMI1_HBA_ATTR; 2357 vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR; 2358 /* Start over */ 2359 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0); 2360 return; 2361 } 2362 if (vport->fdmi_port_mask == LPFC_FDMI2_SMART_ATTR) { 2363 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 2364 /* Retry the same command */ 2365 lpfc_fdmi_cmd(vport, ndlp, cmd, 0); 2366 } 2367 return; 2368 } 2369 } 2370 2371 /* 2372 * On success, need to cycle thru FDMI registration for discovery 2373 * DHBA -> DPRT -> RHBA -> RPA (physical port) 2374 * DPRT -> RPRT (vports) 2375 */ 2376 switch (cmd) { 2377 case SLI_MGMT_RHBA: 2378 /* Check for any RPRTs deferred till after RHBA completes */ 2379 lpfc_fdmi_rprt_defer(phba, vport->fdmi_port_mask); 2380 2381 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA, 0); 2382 break; 2383 2384 case SLI_MGMT_DHBA: 2385 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0); 2386 break; 2387 2388 case SLI_MGMT_DPRT: 2389 if (vport->port_type == LPFC_PHYSICAL_PORT) { 2390 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA, 0); 2391 } else { 2392 ndlp = lpfc_findnode_did(phba->pport, FDMI_DID); 2393 if (!ndlp) 2394 return; 2395 2396 /* Only issue a RPRT for the vport if the RHBA 2397 * for the physical port completes successfully. 2398 * We may have to defer the RPRT accordingly. 2399 */ 2400 if (test_bit(HBA_RHBA_CMPL, &phba->hba_flag)) { 2401 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0); 2402 } else { 2403 lpfc_printf_vlog(vport, KERN_INFO, 2404 LOG_DISCOVERY, 2405 "6078 RPRT deferred\n"); 2406 vport->ct_flags |= FC_CT_RPRT_DEFER; 2407 } 2408 } 2409 break; 2410 case SLI_MGMT_RPA: 2411 if (vport->port_type == LPFC_PHYSICAL_PORT && 2412 phba->sli4_hba.pc_sli4_params.mi_ver) { 2413 /* mi is only for the phyical port, no vports */ 2414 if (phba->link_flag & LS_CT_VEN_RPA) { 2415 lpfc_printf_vlog(vport, KERN_INFO, 2416 LOG_DISCOVERY | LOG_ELS | 2417 LOG_CGN_MGMT, 2418 "6449 VEN RPA FDMI Success\n"); 2419 phba->link_flag &= ~LS_CT_VEN_RPA; 2420 break; 2421 } 2422 2423 lpfc_printf_log(phba, KERN_INFO, 2424 LOG_DISCOVERY | LOG_CGN_MGMT, 2425 "6210 Issue Vendor MI FDMI %x\n", 2426 phba->sli4_hba.pc_sli4_params.mi_ver); 2427 2428 /* CGN is only for the physical port, no vports */ 2429 if (lpfc_fdmi_cmd(vport, ndlp, cmd, 2430 LPFC_FDMI_VENDOR_ATTR_mi) == 0) 2431 phba->link_flag |= LS_CT_VEN_RPA; 2432 lpfc_printf_log(phba, KERN_INFO, 2433 LOG_DISCOVERY | LOG_ELS, 2434 "6458 Send MI FDMI:%x Flag x%x\n", 2435 phba->sli4_hba.pc_sli4_params.mi_ver, 2436 phba->link_flag); 2437 } else { 2438 lpfc_printf_log(phba, KERN_INFO, 2439 LOG_DISCOVERY | LOG_ELS, 2440 "6459 No FDMI VEN MI support - " 2441 "RPA Success\n"); 2442 } 2443 break; 2444 } 2445 return; 2446 } 2447 2448 2449 /** 2450 * lpfc_fdmi_change_check - Check for changed FDMI parameters 2451 * @vport: pointer to a host virtual N_Port data structure. 2452 * 2453 * Check how many mapped NPorts we are connected to 2454 * Check if our hostname changed 2455 * Called from hbeat timeout routine to check if any FDMI parameters 2456 * changed. If so, re-register those Attributes. 2457 */ 2458 void 2459 lpfc_fdmi_change_check(struct lpfc_vport *vport) 2460 { 2461 struct lpfc_hba *phba = vport->phba; 2462 struct lpfc_nodelist *ndlp; 2463 uint16_t cnt; 2464 2465 if (!lpfc_is_link_up(phba)) 2466 return; 2467 2468 /* Must be connected to a Fabric */ 2469 if (!test_bit(FC_FABRIC, &vport->fc_flag)) 2470 return; 2471 2472 ndlp = lpfc_findnode_did(vport, FDMI_DID); 2473 if (!ndlp) 2474 return; 2475 2476 /* Check if system hostname changed */ 2477 if (strcmp(phba->os_host_name, init_utsname()->nodename)) { 2478 memset(phba->os_host_name, 0, sizeof(phba->os_host_name)); 2479 scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s", 2480 init_utsname()->nodename); 2481 lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0); 2482 2483 /* Since this effects multiple HBA and PORT attributes, we need 2484 * de-register and go thru the whole FDMI registration cycle. 2485 * DHBA -> DPRT -> RHBA -> RPA (physical port) 2486 * DPRT -> RPRT (vports) 2487 */ 2488 if (vport->port_type == LPFC_PHYSICAL_PORT) { 2489 /* For extra Vendor RPA */ 2490 phba->link_flag &= ~LS_CT_VEN_RPA; 2491 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0); 2492 } else { 2493 ndlp = lpfc_findnode_did(phba->pport, FDMI_DID); 2494 if (!ndlp) 2495 return; 2496 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0); 2497 } 2498 2499 /* Since this code path registers all the port attributes 2500 * we can just return without further checking. 2501 */ 2502 return; 2503 } 2504 2505 if (!(vport->fdmi_port_mask & LPFC_FDMI_PORT_ATTR_num_disc)) 2506 return; 2507 2508 /* Check if the number of mapped NPorts changed */ 2509 cnt = lpfc_find_map_node(vport); 2510 if (cnt == vport->fdmi_num_disc) 2511 return; 2512 2513 if (vport->port_type == LPFC_PHYSICAL_PORT) { 2514 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA, 2515 LPFC_FDMI_PORT_ATTR_num_disc); 2516 } else { 2517 ndlp = lpfc_findnode_did(phba->pport, FDMI_DID); 2518 if (!ndlp) 2519 return; 2520 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 2521 LPFC_FDMI_PORT_ATTR_num_disc); 2522 } 2523 } 2524 2525 static inline int 2526 lpfc_fdmi_set_attr_u32(void *attr, uint16_t attrtype, uint32_t attrval) 2527 { 2528 struct lpfc_fdmi_attr_u32 *ae = attr; 2529 int size = sizeof(*ae); 2530 2531 ae->type = cpu_to_be16(attrtype); 2532 ae->len = cpu_to_be16(size); 2533 ae->value_u32 = cpu_to_be32(attrval); 2534 2535 return size; 2536 } 2537 2538 static inline int 2539 lpfc_fdmi_set_attr_wwn(void *attr, uint16_t attrtype, struct lpfc_name *wwn) 2540 { 2541 struct lpfc_fdmi_attr_wwn *ae = attr; 2542 int size = sizeof(*ae); 2543 2544 ae->type = cpu_to_be16(attrtype); 2545 ae->len = cpu_to_be16(size); 2546 /* WWN's assumed to be bytestreams - Big Endian presentation */ 2547 memcpy(ae->name, wwn, 2548 min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64))); 2549 2550 return size; 2551 } 2552 2553 static inline int 2554 lpfc_fdmi_set_attr_fullwwn(void *attr, uint16_t attrtype, 2555 struct lpfc_name *wwnn, struct lpfc_name *wwpn) 2556 { 2557 struct lpfc_fdmi_attr_fullwwn *ae = attr; 2558 u8 *nname = ae->nname; 2559 u8 *pname = ae->pname; 2560 int size = sizeof(*ae); 2561 2562 ae->type = cpu_to_be16(attrtype); 2563 ae->len = cpu_to_be16(size); 2564 /* WWN's assumed to be bytestreams - Big Endian presentation */ 2565 memcpy(nname, wwnn, 2566 min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64))); 2567 memcpy(pname, wwpn, 2568 min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64))); 2569 2570 return size; 2571 } 2572 2573 static inline int 2574 lpfc_fdmi_set_attr_string(void *attr, uint16_t attrtype, char *attrstring) 2575 { 2576 struct lpfc_fdmi_attr_string *ae = attr; 2577 int len, size; 2578 2579 /* 2580 * We are trusting the caller that if a fdmi string field 2581 * is capped at 64 bytes, the caller passes in a string of 2582 * 64 bytes or less. 2583 */ 2584 2585 strscpy(ae->value_string, attrstring, sizeof(ae->value_string)); 2586 len = strnlen(ae->value_string, sizeof(ae->value_string)); 2587 /* round string length to a 32bit boundary */ 2588 len += (len & 3) ? (4 - (len & 3)) : 4; 2589 /* size is Type/Len (4 bytes) plus string length */ 2590 size = FOURBYTES + len; 2591 2592 ae->type = cpu_to_be16(attrtype); 2593 ae->len = cpu_to_be16(size); 2594 2595 return size; 2596 } 2597 2598 /* Bitfields for FC4 Types that can be reported */ 2599 #define ATTR_FC4_CT 0x00000001 2600 #define ATTR_FC4_FCP 0x00000002 2601 #define ATTR_FC4_NVME 0x00000004 2602 2603 static inline int 2604 lpfc_fdmi_set_attr_fc4types(void *attr, uint16_t attrtype, uint32_t typemask) 2605 { 2606 struct lpfc_fdmi_attr_fc4types *ae = attr; 2607 int size = sizeof(*ae); 2608 2609 ae->type = cpu_to_be16(attrtype); 2610 ae->len = cpu_to_be16(size); 2611 2612 if (typemask & ATTR_FC4_FCP) 2613 ae->value_types[2] = 0x01; /* Type 0x8 - FCP */ 2614 2615 if (typemask & ATTR_FC4_CT) 2616 ae->value_types[7] = 0x01; /* Type 0x20 - CT */ 2617 2618 if (typemask & ATTR_FC4_NVME) 2619 ae->value_types[6] = 0x01; /* Type 0x28 - NVME */ 2620 2621 return size; 2622 } 2623 2624 /* Routines for all individual HBA attributes */ 2625 static int 2626 lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, void *attr) 2627 { 2628 return lpfc_fdmi_set_attr_wwn(attr, RHBA_NODENAME, 2629 &vport->fc_sparam.nodeName); 2630 } 2631 2632 static int 2633 lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport, void *attr) 2634 { 2635 /* This string MUST be consistent with other FC platforms 2636 * supported by Broadcom. 2637 */ 2638 return lpfc_fdmi_set_attr_string(attr, RHBA_MANUFACTURER, 2639 "Emulex Corporation"); 2640 } 2641 2642 static int 2643 lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, void *attr) 2644 { 2645 struct lpfc_hba *phba = vport->phba; 2646 2647 return lpfc_fdmi_set_attr_string(attr, RHBA_SERIAL_NUMBER, 2648 phba->SerialNumber); 2649 } 2650 2651 static int 2652 lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport, void *attr) 2653 { 2654 struct lpfc_hba *phba = vport->phba; 2655 2656 return lpfc_fdmi_set_attr_string(attr, RHBA_MODEL, 2657 phba->ModelName); 2658 } 2659 2660 static int 2661 lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport, void *attr) 2662 { 2663 struct lpfc_hba *phba = vport->phba; 2664 2665 return lpfc_fdmi_set_attr_string(attr, RHBA_MODEL_DESCRIPTION, 2666 phba->ModelDesc); 2667 } 2668 2669 static int 2670 lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport, void *attr) 2671 { 2672 struct lpfc_hba *phba = vport->phba; 2673 lpfc_vpd_t *vp = &phba->vpd; 2674 char buf[16] = { 0 }; 2675 2676 snprintf(buf, sizeof(buf), "%08x", vp->rev.biuRev); 2677 2678 return lpfc_fdmi_set_attr_string(attr, RHBA_HARDWARE_VERSION, buf); 2679 } 2680 2681 static int 2682 lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport, void *attr) 2683 { 2684 return lpfc_fdmi_set_attr_string(attr, RHBA_DRIVER_VERSION, 2685 lpfc_release_version); 2686 } 2687 2688 static int 2689 lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport, void *attr) 2690 { 2691 struct lpfc_hba *phba = vport->phba; 2692 char buf[64] = { 0 }; 2693 2694 if (phba->sli_rev == LPFC_SLI_REV4) { 2695 lpfc_decode_firmware_rev(phba, buf, 1); 2696 2697 return lpfc_fdmi_set_attr_string(attr, RHBA_OPTION_ROM_VERSION, 2698 buf); 2699 } 2700 2701 return lpfc_fdmi_set_attr_string(attr, RHBA_OPTION_ROM_VERSION, 2702 phba->OptionROMVersion); 2703 } 2704 2705 static int 2706 lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport, void *attr) 2707 { 2708 struct lpfc_hba *phba = vport->phba; 2709 char buf[64] = { 0 }; 2710 2711 lpfc_decode_firmware_rev(phba, buf, 1); 2712 2713 return lpfc_fdmi_set_attr_string(attr, RHBA_FIRMWARE_VERSION, buf); 2714 } 2715 2716 static int 2717 lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport, void *attr) 2718 { 2719 char buf[256] = { 0 }; 2720 2721 snprintf(buf, sizeof(buf), "%s %s %s", 2722 init_utsname()->sysname, 2723 init_utsname()->release, 2724 init_utsname()->version); 2725 2726 return lpfc_fdmi_set_attr_string(attr, RHBA_OS_NAME_VERSION, buf); 2727 } 2728 2729 static int 2730 lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport, void *attr) 2731 { 2732 return lpfc_fdmi_set_attr_u32(attr, RHBA_MAX_CT_PAYLOAD_LEN, 2733 LPFC_MAX_CT_SIZE); 2734 } 2735 2736 static int 2737 lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport, void *attr) 2738 { 2739 char buf[256] = { 0 }; 2740 2741 lpfc_vport_symbolic_node_name(vport, buf, sizeof(buf)); 2742 2743 return lpfc_fdmi_set_attr_string(attr, RHBA_SYM_NODENAME, buf); 2744 } 2745 2746 static int 2747 lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport, void *attr) 2748 { 2749 return lpfc_fdmi_set_attr_u32(attr, RHBA_VENDOR_INFO, 0); 2750 } 2751 2752 static int 2753 lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport, void *attr) 2754 { 2755 /* Each driver instance corresponds to a single port */ 2756 return lpfc_fdmi_set_attr_u32(attr, RHBA_NUM_PORTS, 1); 2757 } 2758 2759 static int 2760 lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport, void *attr) 2761 { 2762 return lpfc_fdmi_set_attr_wwn(attr, RHBA_FABRIC_WWNN, 2763 &vport->fabric_nodename); 2764 } 2765 2766 static int 2767 lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport, void *attr) 2768 { 2769 struct lpfc_hba *phba = vport->phba; 2770 2771 return lpfc_fdmi_set_attr_string(attr, RHBA_BIOS_VERSION, 2772 phba->BIOSVersion); 2773 } 2774 2775 static int 2776 lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport, void *attr) 2777 { 2778 /* Driver doesn't have access to this information */ 2779 return lpfc_fdmi_set_attr_u32(attr, RHBA_BIOS_STATE, 0); 2780 } 2781 2782 static int 2783 lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport, void *attr) 2784 { 2785 return lpfc_fdmi_set_attr_string(attr, RHBA_VENDOR_ID, "EMULEX"); 2786 } 2787 2788 /* 2789 * Routines for all individual PORT attributes 2790 */ 2791 2792 static int 2793 lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport, void *attr) 2794 { 2795 struct lpfc_hba *phba = vport->phba; 2796 u32 fc4types; 2797 2798 fc4types = (ATTR_FC4_CT | ATTR_FC4_FCP); 2799 2800 /* Check to see if Firmware supports NVME and on physical port */ 2801 if ((phba->sli_rev == LPFC_SLI_REV4) && (vport == phba->pport) && 2802 phba->sli4_hba.pc_sli4_params.nvme) 2803 fc4types |= ATTR_FC4_NVME; 2804 2805 return lpfc_fdmi_set_attr_fc4types(attr, RPRT_SUPPORTED_FC4_TYPES, 2806 fc4types); 2807 } 2808 2809 static int 2810 lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport, void *attr) 2811 { 2812 struct lpfc_hba *phba = vport->phba; 2813 u32 speeds = 0; 2814 u32 tcfg; 2815 u8 i, cnt; 2816 2817 if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { 2818 cnt = 0; 2819 if (phba->sli_rev == LPFC_SLI_REV4) { 2820 tcfg = phba->sli4_hba.conf_trunk; 2821 for (i = 0; i < 4; i++, tcfg >>= 1) 2822 if (tcfg & 1) 2823 cnt++; 2824 } 2825 2826 if (cnt > 2) { /* 4 lane trunk group */ 2827 if (phba->lmt & LMT_64Gb) 2828 speeds |= HBA_PORTSPEED_256GFC; 2829 if (phba->lmt & LMT_32Gb) 2830 speeds |= HBA_PORTSPEED_128GFC; 2831 if (phba->lmt & LMT_16Gb) 2832 speeds |= HBA_PORTSPEED_64GFC; 2833 } else if (cnt) { /* 2 lane trunk group */ 2834 if (phba->lmt & LMT_128Gb) 2835 speeds |= HBA_PORTSPEED_256GFC; 2836 if (phba->lmt & LMT_64Gb) 2837 speeds |= HBA_PORTSPEED_128GFC; 2838 if (phba->lmt & LMT_32Gb) 2839 speeds |= HBA_PORTSPEED_64GFC; 2840 if (phba->lmt & LMT_16Gb) 2841 speeds |= HBA_PORTSPEED_32GFC; 2842 } else { 2843 if (phba->lmt & LMT_256Gb) 2844 speeds |= HBA_PORTSPEED_256GFC; 2845 if (phba->lmt & LMT_128Gb) 2846 speeds |= HBA_PORTSPEED_128GFC; 2847 if (phba->lmt & LMT_64Gb) 2848 speeds |= HBA_PORTSPEED_64GFC; 2849 if (phba->lmt & LMT_32Gb) 2850 speeds |= HBA_PORTSPEED_32GFC; 2851 if (phba->lmt & LMT_16Gb) 2852 speeds |= HBA_PORTSPEED_16GFC; 2853 if (phba->lmt & LMT_10Gb) 2854 speeds |= HBA_PORTSPEED_10GFC; 2855 if (phba->lmt & LMT_8Gb) 2856 speeds |= HBA_PORTSPEED_8GFC; 2857 if (phba->lmt & LMT_4Gb) 2858 speeds |= HBA_PORTSPEED_4GFC; 2859 if (phba->lmt & LMT_2Gb) 2860 speeds |= HBA_PORTSPEED_2GFC; 2861 if (phba->lmt & LMT_1Gb) 2862 speeds |= HBA_PORTSPEED_1GFC; 2863 } 2864 } else { 2865 /* FCoE links support only one speed */ 2866 switch (phba->fc_linkspeed) { 2867 case LPFC_ASYNC_LINK_SPEED_10GBPS: 2868 speeds = HBA_PORTSPEED_10GE; 2869 break; 2870 case LPFC_ASYNC_LINK_SPEED_25GBPS: 2871 speeds = HBA_PORTSPEED_25GE; 2872 break; 2873 case LPFC_ASYNC_LINK_SPEED_40GBPS: 2874 speeds = HBA_PORTSPEED_40GE; 2875 break; 2876 case LPFC_ASYNC_LINK_SPEED_100GBPS: 2877 speeds = HBA_PORTSPEED_100GE; 2878 break; 2879 } 2880 } 2881 2882 return lpfc_fdmi_set_attr_u32(attr, RPRT_SUPPORTED_SPEED, speeds); 2883 } 2884 2885 static int 2886 lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport, void *attr) 2887 { 2888 struct lpfc_hba *phba = vport->phba; 2889 u32 speeds = 0; 2890 2891 if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { 2892 switch (phba->fc_linkspeed) { 2893 case LPFC_LINK_SPEED_1GHZ: 2894 speeds = HBA_PORTSPEED_1GFC; 2895 break; 2896 case LPFC_LINK_SPEED_2GHZ: 2897 speeds = HBA_PORTSPEED_2GFC; 2898 break; 2899 case LPFC_LINK_SPEED_4GHZ: 2900 speeds = HBA_PORTSPEED_4GFC; 2901 break; 2902 case LPFC_LINK_SPEED_8GHZ: 2903 speeds = HBA_PORTSPEED_8GFC; 2904 break; 2905 case LPFC_LINK_SPEED_10GHZ: 2906 speeds = HBA_PORTSPEED_10GFC; 2907 break; 2908 case LPFC_LINK_SPEED_16GHZ: 2909 speeds = HBA_PORTSPEED_16GFC; 2910 break; 2911 case LPFC_LINK_SPEED_32GHZ: 2912 speeds = HBA_PORTSPEED_32GFC; 2913 break; 2914 case LPFC_LINK_SPEED_64GHZ: 2915 speeds = HBA_PORTSPEED_64GFC; 2916 break; 2917 case LPFC_LINK_SPEED_128GHZ: 2918 speeds = HBA_PORTSPEED_128GFC; 2919 break; 2920 case LPFC_LINK_SPEED_256GHZ: 2921 speeds = HBA_PORTSPEED_256GFC; 2922 break; 2923 default: 2924 speeds = HBA_PORTSPEED_UNKNOWN; 2925 break; 2926 } 2927 } else { 2928 switch (phba->fc_linkspeed) { 2929 case LPFC_ASYNC_LINK_SPEED_10GBPS: 2930 speeds = HBA_PORTSPEED_10GE; 2931 break; 2932 case LPFC_ASYNC_LINK_SPEED_25GBPS: 2933 speeds = HBA_PORTSPEED_25GE; 2934 break; 2935 case LPFC_ASYNC_LINK_SPEED_40GBPS: 2936 speeds = HBA_PORTSPEED_40GE; 2937 break; 2938 case LPFC_ASYNC_LINK_SPEED_100GBPS: 2939 speeds = HBA_PORTSPEED_100GE; 2940 break; 2941 default: 2942 speeds = HBA_PORTSPEED_UNKNOWN; 2943 break; 2944 } 2945 } 2946 2947 return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_SPEED, speeds); 2948 } 2949 2950 static int 2951 lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport, void *attr) 2952 { 2953 struct serv_parm *hsp = (struct serv_parm *)&vport->fc_sparam; 2954 2955 return lpfc_fdmi_set_attr_u32(attr, RPRT_MAX_FRAME_SIZE, 2956 (((uint32_t)hsp->cmn.bbRcvSizeMsb & 0x0F) << 8) | 2957 (uint32_t)hsp->cmn.bbRcvSizeLsb); 2958 } 2959 2960 static int 2961 lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport, void *attr) 2962 { 2963 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2964 char buf[64] = { 0 }; 2965 2966 snprintf(buf, sizeof(buf), "/sys/class/scsi_host/host%d", 2967 shost->host_no); 2968 2969 return lpfc_fdmi_set_attr_string(attr, RPRT_OS_DEVICE_NAME, buf); 2970 } 2971 2972 static int 2973 lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport, void *attr) 2974 { 2975 char buf[64] = { 0 }; 2976 2977 scnprintf(buf, sizeof(buf), "%s", vport->phba->os_host_name); 2978 2979 return lpfc_fdmi_set_attr_string(attr, RPRT_HOST_NAME, buf); 2980 } 2981 2982 static int 2983 lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport, void *attr) 2984 { 2985 return lpfc_fdmi_set_attr_wwn(attr, RPRT_NODENAME, 2986 &vport->fc_sparam.nodeName); 2987 } 2988 2989 static int 2990 lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport, void *attr) 2991 { 2992 return lpfc_fdmi_set_attr_wwn(attr, RPRT_PORTNAME, 2993 &vport->fc_sparam.portName); 2994 } 2995 2996 static int 2997 lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport, void *attr) 2998 { 2999 char buf[256] = { 0 }; 3000 3001 lpfc_vport_symbolic_port_name(vport, buf, sizeof(buf)); 3002 3003 return lpfc_fdmi_set_attr_string(attr, RPRT_SYM_PORTNAME, buf); 3004 } 3005 3006 static int 3007 lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport, void *attr) 3008 { 3009 struct lpfc_hba *phba = vport->phba; 3010 3011 return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_TYPE, 3012 (phba->fc_topology == LPFC_TOPOLOGY_LOOP) ? 3013 LPFC_FDMI_PORTTYPE_NLPORT : 3014 LPFC_FDMI_PORTTYPE_NPORT); 3015 } 3016 3017 static int 3018 lpfc_fdmi_port_attr_class(struct lpfc_vport *vport, void *attr) 3019 { 3020 return lpfc_fdmi_set_attr_u32(attr, RPRT_SUPPORTED_CLASS, 3021 FC_COS_CLASS2 | FC_COS_CLASS3); 3022 } 3023 3024 static int 3025 lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport, void *attr) 3026 { 3027 return lpfc_fdmi_set_attr_wwn(attr, RPRT_FABRICNAME, 3028 &vport->fabric_portname); 3029 } 3030 3031 static int 3032 lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport, void *attr) 3033 { 3034 struct lpfc_hba *phba = vport->phba; 3035 u32 fc4types; 3036 3037 fc4types = (ATTR_FC4_CT | ATTR_FC4_FCP); 3038 3039 /* Check to see if NVME is configured or not */ 3040 if (vport == phba->pport && 3041 phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3042 fc4types |= ATTR_FC4_NVME; 3043 3044 return lpfc_fdmi_set_attr_fc4types(attr, RPRT_ACTIVE_FC4_TYPES, 3045 fc4types); 3046 } 3047 3048 static int 3049 lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport, void *attr) 3050 { 3051 return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_STATE, 3052 LPFC_FDMI_PORTSTATE_ONLINE); 3053 } 3054 3055 static int 3056 lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport, void *attr) 3057 { 3058 vport->fdmi_num_disc = lpfc_find_map_node(vport); 3059 3060 return lpfc_fdmi_set_attr_u32(attr, RPRT_DISC_PORT, 3061 vport->fdmi_num_disc); 3062 } 3063 3064 static int 3065 lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport, void *attr) 3066 { 3067 return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_ID, vport->fc_myDID); 3068 } 3069 3070 static int 3071 lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport, void *attr) 3072 { 3073 return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_SERVICE, 3074 "Smart SAN Initiator"); 3075 } 3076 3077 static int 3078 lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport, void *attr) 3079 { 3080 return lpfc_fdmi_set_attr_fullwwn(attr, RPRT_SMART_GUID, 3081 &vport->fc_sparam.nodeName, 3082 &vport->fc_sparam.portName); 3083 } 3084 3085 static int 3086 lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport, void *attr) 3087 { 3088 return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_VERSION, 3089 "Smart SAN Version 2.0"); 3090 } 3091 3092 static int 3093 lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport, void *attr) 3094 { 3095 struct lpfc_hba *phba = vport->phba; 3096 3097 return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_MODEL, 3098 phba->ModelName); 3099 } 3100 3101 static int 3102 lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport, void *attr) 3103 { 3104 /* SRIOV (type 3) is not supported */ 3105 3106 return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_PORT_INFO, 3107 (vport->vpi) ? 2 /* NPIV */ : 1 /* Physical */); 3108 } 3109 3110 static int 3111 lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport, void *attr) 3112 { 3113 return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_QOS, 0); 3114 } 3115 3116 static int 3117 lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport, void *attr) 3118 { 3119 return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_SECURITY, 1); 3120 } 3121 3122 static int 3123 lpfc_fdmi_vendor_attr_mi(struct lpfc_vport *vport, void *attr) 3124 { 3125 struct lpfc_hba *phba = vport->phba; 3126 char buf[32] = { 0 }; 3127 3128 sprintf(buf, "ELXE2EM:%04d", phba->sli4_hba.pc_sli4_params.mi_ver); 3129 3130 return lpfc_fdmi_set_attr_string(attr, RPRT_VENDOR_MI, buf); 3131 } 3132 3133 /* RHBA attribute jump table */ 3134 static int (*lpfc_fdmi_hba_action[]) 3135 (struct lpfc_vport *vport, void *attrbuf) = { 3136 /* Action routine Mask bit Attribute type */ 3137 lpfc_fdmi_hba_attr_wwnn, /* bit0 RHBA_NODENAME */ 3138 lpfc_fdmi_hba_attr_manufacturer, /* bit1 RHBA_MANUFACTURER */ 3139 lpfc_fdmi_hba_attr_sn, /* bit2 RHBA_SERIAL_NUMBER */ 3140 lpfc_fdmi_hba_attr_model, /* bit3 RHBA_MODEL */ 3141 lpfc_fdmi_hba_attr_description, /* bit4 RHBA_MODEL_DESCRIPTION */ 3142 lpfc_fdmi_hba_attr_hdw_ver, /* bit5 RHBA_HARDWARE_VERSION */ 3143 lpfc_fdmi_hba_attr_drvr_ver, /* bit6 RHBA_DRIVER_VERSION */ 3144 lpfc_fdmi_hba_attr_rom_ver, /* bit7 RHBA_OPTION_ROM_VERSION */ 3145 lpfc_fdmi_hba_attr_fmw_ver, /* bit8 RHBA_FIRMWARE_VERSION */ 3146 lpfc_fdmi_hba_attr_os_ver, /* bit9 RHBA_OS_NAME_VERSION */ 3147 lpfc_fdmi_hba_attr_ct_len, /* bit10 RHBA_MAX_CT_PAYLOAD_LEN */ 3148 lpfc_fdmi_hba_attr_symbolic_name, /* bit11 RHBA_SYM_NODENAME */ 3149 lpfc_fdmi_hba_attr_vendor_info, /* bit12 RHBA_VENDOR_INFO */ 3150 lpfc_fdmi_hba_attr_num_ports, /* bit13 RHBA_NUM_PORTS */ 3151 lpfc_fdmi_hba_attr_fabric_wwnn, /* bit14 RHBA_FABRIC_WWNN */ 3152 lpfc_fdmi_hba_attr_bios_ver, /* bit15 RHBA_BIOS_VERSION */ 3153 lpfc_fdmi_hba_attr_bios_state, /* bit16 RHBA_BIOS_STATE */ 3154 lpfc_fdmi_hba_attr_vendor_id, /* bit17 RHBA_VENDOR_ID */ 3155 }; 3156 3157 /* RPA / RPRT attribute jump table */ 3158 static int (*lpfc_fdmi_port_action[]) 3159 (struct lpfc_vport *vport, void *attrbuf) = { 3160 /* Action routine Mask bit Attribute type */ 3161 lpfc_fdmi_port_attr_fc4type, /* bit0 RPRT_SUPPORT_FC4_TYPES */ 3162 lpfc_fdmi_port_attr_support_speed, /* bit1 RPRT_SUPPORTED_SPEED */ 3163 lpfc_fdmi_port_attr_speed, /* bit2 RPRT_PORT_SPEED */ 3164 lpfc_fdmi_port_attr_max_frame, /* bit3 RPRT_MAX_FRAME_SIZE */ 3165 lpfc_fdmi_port_attr_os_devname, /* bit4 RPRT_OS_DEVICE_NAME */ 3166 lpfc_fdmi_port_attr_host_name, /* bit5 RPRT_HOST_NAME */ 3167 lpfc_fdmi_port_attr_wwnn, /* bit6 RPRT_NODENAME */ 3168 lpfc_fdmi_port_attr_wwpn, /* bit7 RPRT_PORTNAME */ 3169 lpfc_fdmi_port_attr_symbolic_name, /* bit8 RPRT_SYM_PORTNAME */ 3170 lpfc_fdmi_port_attr_port_type, /* bit9 RPRT_PORT_TYPE */ 3171 lpfc_fdmi_port_attr_class, /* bit10 RPRT_SUPPORTED_CLASS */ 3172 lpfc_fdmi_port_attr_fabric_wwpn, /* bit11 RPRT_FABRICNAME */ 3173 lpfc_fdmi_port_attr_active_fc4type, /* bit12 RPRT_ACTIVE_FC4_TYPES */ 3174 lpfc_fdmi_port_attr_port_state, /* bit13 RPRT_PORT_STATE */ 3175 lpfc_fdmi_port_attr_num_disc, /* bit14 RPRT_DISC_PORT */ 3176 lpfc_fdmi_port_attr_nportid, /* bit15 RPRT_PORT_ID */ 3177 lpfc_fdmi_smart_attr_service, /* bit16 RPRT_SMART_SERVICE */ 3178 lpfc_fdmi_smart_attr_guid, /* bit17 RPRT_SMART_GUID */ 3179 lpfc_fdmi_smart_attr_version, /* bit18 RPRT_SMART_VERSION */ 3180 lpfc_fdmi_smart_attr_model, /* bit19 RPRT_SMART_MODEL */ 3181 lpfc_fdmi_smart_attr_port_info, /* bit20 RPRT_SMART_PORT_INFO */ 3182 lpfc_fdmi_smart_attr_qos, /* bit21 RPRT_SMART_QOS */ 3183 lpfc_fdmi_smart_attr_security, /* bit22 RPRT_SMART_SECURITY */ 3184 lpfc_fdmi_vendor_attr_mi, /* bit23 RPRT_VENDOR_MI */ 3185 }; 3186 3187 /** 3188 * lpfc_fdmi_cmd - Build and send a FDMI cmd to the specified NPort 3189 * @vport: pointer to a host virtual N_Port data structure. 3190 * @ndlp: ndlp to send FDMI cmd to (if NULL use FDMI_DID) 3191 * @cmdcode: FDMI command to send 3192 * @new_mask: Mask of HBA or PORT Attributes to send 3193 * 3194 * Builds and sends a FDMI command using the CT subsystem. 3195 */ 3196 int 3197 lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 3198 int cmdcode, uint32_t new_mask) 3199 { 3200 struct lpfc_hba *phba = vport->phba; 3201 struct lpfc_dmabuf *rq, *rsp; 3202 struct lpfc_sli_ct_request *CtReq; 3203 struct ulp_bde64_le *bde; 3204 uint32_t bit_pos; 3205 uint32_t size, addsz; 3206 uint32_t rsp_size; 3207 uint32_t mask; 3208 struct lpfc_fdmi_reg_hba *rh; 3209 struct lpfc_fdmi_port_entry *pe; 3210 struct lpfc_fdmi_reg_portattr *pab = NULL, *base = NULL; 3211 struct lpfc_fdmi_attr_block *ab = NULL; 3212 int (*func)(struct lpfc_vport *vport, void *attrbuf); 3213 void (*cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3214 struct lpfc_iocbq *rspiocb); 3215 3216 if (!ndlp) 3217 return 0; 3218 3219 cmpl = lpfc_cmpl_ct_disc_fdmi; /* called from discovery */ 3220 3221 /* fill in BDEs for command */ 3222 /* Allocate buffer for command payload */ 3223 rq = kmalloc(sizeof(*rq), GFP_KERNEL); 3224 if (!rq) 3225 goto fdmi_cmd_exit; 3226 3227 rq->virt = lpfc_mbuf_alloc(phba, 0, &rq->phys); 3228 if (!rq->virt) 3229 goto fdmi_cmd_free_rq; 3230 3231 /* Allocate buffer for Buffer ptr list */ 3232 rsp = kmalloc(sizeof(*rsp), GFP_KERNEL); 3233 if (!rsp) 3234 goto fdmi_cmd_free_rqvirt; 3235 3236 rsp->virt = lpfc_mbuf_alloc(phba, 0, &rsp->phys); 3237 if (!rsp->virt) 3238 goto fdmi_cmd_free_rsp; 3239 3240 INIT_LIST_HEAD(&rq->list); 3241 INIT_LIST_HEAD(&rsp->list); 3242 3243 /* mbuf buffers are 1K in length - aka LPFC_BPL_SIZE */ 3244 memset(rq->virt, 0, LPFC_BPL_SIZE); 3245 rsp_size = LPFC_BPL_SIZE; 3246 3247 /* FDMI request */ 3248 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 3249 "0218 FDMI Request x%x mask x%x Data: x%x x%lx x%x\n", 3250 cmdcode, new_mask, vport->fdmi_port_mask, 3251 vport->fc_flag, vport->port_state); 3252 3253 CtReq = (struct lpfc_sli_ct_request *)rq->virt; 3254 3255 /* First populate the CT_IU preamble */ 3256 CtReq->RevisionId.bits.Revision = SLI_CT_REVISION; 3257 CtReq->RevisionId.bits.InId = 0; 3258 3259 CtReq->FsType = SLI_CT_MANAGEMENT_SERVICE; 3260 CtReq->FsSubType = SLI_CT_FDMI_Subtypes; 3261 3262 CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(cmdcode); 3263 3264 size = 0; 3265 3266 /* Next fill in the specific FDMI cmd information */ 3267 switch (cmdcode) { 3268 case SLI_MGMT_RHAT: 3269 case SLI_MGMT_RHBA: 3270 rh = (struct lpfc_fdmi_reg_hba *)&CtReq->un; 3271 /* HBA Identifier */ 3272 memcpy(&rh->hi.PortName, &phba->pport->fc_sparam.portName, 3273 sizeof(struct lpfc_name)); 3274 size += sizeof(struct lpfc_fdmi_hba_ident); 3275 3276 if (cmdcode == SLI_MGMT_RHBA) { 3277 /* Registered Port List */ 3278 /* One entry (port) per adapter */ 3279 rh->rpl.EntryCnt = cpu_to_be32(1); 3280 memcpy(&rh->rpl.pe.PortName, 3281 &phba->pport->fc_sparam.portName, 3282 sizeof(struct lpfc_name)); 3283 size += sizeof(struct lpfc_fdmi_reg_port_list); 3284 } 3285 3286 ab = (struct lpfc_fdmi_attr_block *)((uint8_t *)rh + size); 3287 ab->EntryCnt = 0; 3288 size += FOURBYTES; /* add length of EntryCnt field */ 3289 3290 bit_pos = 0; 3291 if (new_mask) 3292 mask = new_mask; 3293 else 3294 mask = vport->fdmi_hba_mask; 3295 3296 /* Mask will dictate what attributes to build in the request */ 3297 while (mask) { 3298 if (mask & 0x1) { 3299 func = lpfc_fdmi_hba_action[bit_pos]; 3300 addsz = func(vport, ((uint8_t *)rh + size)); 3301 if (addsz) { 3302 ab->EntryCnt++; 3303 size += addsz; 3304 } 3305 /* check if another attribute fits */ 3306 if ((size + FDMI_MAX_ATTRLEN) > 3307 (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) 3308 goto hba_out; 3309 } 3310 mask = mask >> 1; 3311 bit_pos++; 3312 } 3313 hba_out: 3314 ab->EntryCnt = cpu_to_be32(ab->EntryCnt); 3315 /* Total size */ 3316 size += GID_REQUEST_SZ - 4; 3317 break; 3318 3319 case SLI_MGMT_RPRT: 3320 if (vport->port_type != LPFC_PHYSICAL_PORT) { 3321 ndlp = lpfc_findnode_did(phba->pport, FDMI_DID); 3322 if (!ndlp) 3323 return 0; 3324 } 3325 fallthrough; 3326 case SLI_MGMT_RPA: 3327 /* Store base ptr right after preamble */ 3328 base = (struct lpfc_fdmi_reg_portattr *)&CtReq->un; 3329 3330 if (cmdcode == SLI_MGMT_RPRT) { 3331 rh = (struct lpfc_fdmi_reg_hba *)base; 3332 /* HBA Identifier */ 3333 memcpy(&rh->hi.PortName, 3334 &phba->pport->fc_sparam.portName, 3335 sizeof(struct lpfc_name)); 3336 pab = (struct lpfc_fdmi_reg_portattr *) 3337 ((uint8_t *)base + sizeof(struct lpfc_name)); 3338 size += sizeof(struct lpfc_name); 3339 } else { 3340 pab = base; 3341 } 3342 3343 memcpy((uint8_t *)&pab->PortName, 3344 (uint8_t *)&vport->fc_sparam.portName, 3345 sizeof(struct lpfc_name)); 3346 pab->ab.EntryCnt = 0; 3347 /* add length of name and EntryCnt field */ 3348 size += sizeof(struct lpfc_name) + FOURBYTES; 3349 3350 bit_pos = 0; 3351 if (new_mask) 3352 mask = new_mask; 3353 else 3354 mask = vport->fdmi_port_mask; 3355 3356 /* Mask will dictate what attributes to build in the request */ 3357 while (mask) { 3358 if (mask & 0x1) { 3359 func = lpfc_fdmi_port_action[bit_pos]; 3360 addsz = func(vport, ((uint8_t *)base + size)); 3361 if (addsz) { 3362 pab->ab.EntryCnt++; 3363 size += addsz; 3364 } 3365 /* check if another attribute fits */ 3366 if ((size + FDMI_MAX_ATTRLEN) > 3367 (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) 3368 goto port_out; 3369 } 3370 mask = mask >> 1; 3371 bit_pos++; 3372 } 3373 port_out: 3374 pab->ab.EntryCnt = cpu_to_be32(pab->ab.EntryCnt); 3375 size += GID_REQUEST_SZ - 4; 3376 break; 3377 3378 case SLI_MGMT_GHAT: 3379 case SLI_MGMT_GRPL: 3380 rsp_size = FC_MAX_NS_RSP; 3381 fallthrough; 3382 case SLI_MGMT_DHBA: 3383 case SLI_MGMT_DHAT: 3384 pe = (struct lpfc_fdmi_port_entry *)&CtReq->un; 3385 memcpy((uint8_t *)&pe->PortName, 3386 (uint8_t *)&vport->fc_sparam.portName, 3387 sizeof(struct lpfc_name)); 3388 size = GID_REQUEST_SZ - 4 + sizeof(struct lpfc_name); 3389 break; 3390 3391 case SLI_MGMT_GPAT: 3392 case SLI_MGMT_GPAS: 3393 rsp_size = FC_MAX_NS_RSP; 3394 fallthrough; 3395 case SLI_MGMT_DPRT: 3396 if (vport->port_type != LPFC_PHYSICAL_PORT) { 3397 ndlp = lpfc_findnode_did(phba->pport, FDMI_DID); 3398 if (!ndlp) 3399 return 0; 3400 } 3401 fallthrough; 3402 case SLI_MGMT_DPA: 3403 pe = (struct lpfc_fdmi_port_entry *)&CtReq->un; 3404 memcpy((uint8_t *)&pe->PortName, 3405 (uint8_t *)&vport->fc_sparam.portName, 3406 sizeof(struct lpfc_name)); 3407 size = GID_REQUEST_SZ - 4 + sizeof(struct lpfc_name); 3408 break; 3409 case SLI_MGMT_GRHL: 3410 size = GID_REQUEST_SZ - 4; 3411 break; 3412 default: 3413 lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY, 3414 "0298 FDMI cmdcode x%x not supported\n", 3415 cmdcode); 3416 goto fdmi_cmd_free_rspvirt; 3417 } 3418 CtReq->CommandResponse.bits.Size = cpu_to_be16(rsp_size); 3419 3420 bde = (struct ulp_bde64_le *)rsp->virt; 3421 bde->addr_high = cpu_to_le32(putPaddrHigh(rq->phys)); 3422 bde->addr_low = cpu_to_le32(putPaddrLow(rq->phys)); 3423 bde->type_size = cpu_to_le32(ULP_BDE64_TYPE_BDE_64 << 3424 ULP_BDE64_TYPE_SHIFT); 3425 bde->type_size |= cpu_to_le32(size); 3426 3427 /* 3428 * The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count 3429 * to hold ndlp reference for the corresponding callback function. 3430 */ 3431 if (!lpfc_ct_cmd(vport, rq, rsp, ndlp, cmpl, rsp_size, 0)) 3432 return 0; 3433 3434 fdmi_cmd_free_rspvirt: 3435 lpfc_mbuf_free(phba, rsp->virt, rsp->phys); 3436 fdmi_cmd_free_rsp: 3437 kfree(rsp); 3438 fdmi_cmd_free_rqvirt: 3439 lpfc_mbuf_free(phba, rq->virt, rq->phys); 3440 fdmi_cmd_free_rq: 3441 kfree(rq); 3442 fdmi_cmd_exit: 3443 /* Issue FDMI request failed */ 3444 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 3445 "0244 Issue FDMI request failed Data: x%x\n", 3446 cmdcode); 3447 return 1; 3448 } 3449 3450 /** 3451 * lpfc_delayed_disc_tmo - Timeout handler for delayed discovery timer. 3452 * @t: Context object of the timer. 3453 * 3454 * This function set the WORKER_DELAYED_DISC_TMO flag and wake up 3455 * the worker thread. 3456 **/ 3457 void 3458 lpfc_delayed_disc_tmo(struct timer_list *t) 3459 { 3460 struct lpfc_vport *vport = timer_container_of(vport, t, 3461 delayed_disc_tmo); 3462 struct lpfc_hba *phba = vport->phba; 3463 uint32_t tmo_posted; 3464 unsigned long iflag; 3465 3466 spin_lock_irqsave(&vport->work_port_lock, iflag); 3467 tmo_posted = vport->work_port_events & WORKER_DELAYED_DISC_TMO; 3468 if (!tmo_posted) 3469 vport->work_port_events |= WORKER_DELAYED_DISC_TMO; 3470 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 3471 3472 if (!tmo_posted) 3473 lpfc_worker_wake_up(phba); 3474 return; 3475 } 3476 3477 /** 3478 * lpfc_delayed_disc_timeout_handler - Function called by worker thread to 3479 * handle delayed discovery. 3480 * @vport: pointer to a host virtual N_Port data structure. 3481 * 3482 * This function start nport discovery of the vport. 3483 **/ 3484 void 3485 lpfc_delayed_disc_timeout_handler(struct lpfc_vport *vport) 3486 { 3487 if (!test_and_clear_bit(FC_DISC_DELAYED, &vport->fc_flag)) 3488 return; 3489 3490 lpfc_do_scr_ns_plogi(vport->phba, vport); 3491 } 3492 3493 void 3494 lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag) 3495 { 3496 struct lpfc_sli *psli = &phba->sli; 3497 lpfc_vpd_t *vp = &phba->vpd; 3498 uint32_t b1, b2, b3, b4, i, rev; 3499 char c; 3500 uint32_t *ptr, str[4]; 3501 uint8_t *fwname; 3502 3503 if (phba->sli_rev == LPFC_SLI_REV4) 3504 snprintf(fwrevision, FW_REV_STR_SIZE, "%s", vp->rev.opFwName); 3505 else if (vp->rev.rBit) { 3506 if (psli->sli_flag & LPFC_SLI_ACTIVE) 3507 rev = vp->rev.sli2FwRev; 3508 else 3509 rev = vp->rev.sli1FwRev; 3510 3511 b1 = (rev & 0x0000f000) >> 12; 3512 b2 = (rev & 0x00000f00) >> 8; 3513 b3 = (rev & 0x000000c0) >> 6; 3514 b4 = (rev & 0x00000030) >> 4; 3515 3516 switch (b4) { 3517 case 0: 3518 c = 'N'; 3519 break; 3520 case 1: 3521 c = 'A'; 3522 break; 3523 case 2: 3524 c = 'B'; 3525 break; 3526 case 3: 3527 c = 'X'; 3528 break; 3529 default: 3530 c = 0; 3531 break; 3532 } 3533 b4 = (rev & 0x0000000f); 3534 3535 if (psli->sli_flag & LPFC_SLI_ACTIVE) 3536 fwname = vp->rev.sli2FwName; 3537 else 3538 fwname = vp->rev.sli1FwName; 3539 3540 for (i = 0; i < 16; i++) 3541 if (fwname[i] == 0x20) 3542 fwname[i] = 0; 3543 3544 ptr = (uint32_t*)fwname; 3545 3546 for (i = 0; i < 3; i++) 3547 str[i] = be32_to_cpu(*ptr++); 3548 3549 if (c == 0) { 3550 if (flag) 3551 sprintf(fwrevision, "%d.%d%d (%s)", 3552 b1, b2, b3, (char *)str); 3553 else 3554 sprintf(fwrevision, "%d.%d%d", b1, 3555 b2, b3); 3556 } else { 3557 if (flag) 3558 sprintf(fwrevision, "%d.%d%d%c%d (%s)", 3559 b1, b2, b3, c, 3560 b4, (char *)str); 3561 else 3562 sprintf(fwrevision, "%d.%d%d%c%d", 3563 b1, b2, b3, c, b4); 3564 } 3565 } else { 3566 rev = vp->rev.smFwRev; 3567 3568 b1 = (rev & 0xff000000) >> 24; 3569 b2 = (rev & 0x00f00000) >> 20; 3570 b3 = (rev & 0x000f0000) >> 16; 3571 c = (rev & 0x0000ff00) >> 8; 3572 b4 = (rev & 0x000000ff); 3573 3574 sprintf(fwrevision, "%d.%d%d%c%d", b1, b2, b3, c, b4); 3575 } 3576 return; 3577 } 3578 3579 static void 3580 lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3581 struct lpfc_iocbq *rspiocb) 3582 { 3583 struct lpfc_vport *vport = cmdiocb->vport; 3584 struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf; 3585 struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf; 3586 struct lpfc_sli_ct_request *ctcmd = inp->virt; 3587 struct lpfc_sli_ct_request *ctrsp = outp->virt; 3588 __be16 rsp = ctrsp->CommandResponse.bits.CmdRsp; 3589 struct app_id_object *app; 3590 struct lpfc_nodelist *ndlp = cmdiocb->ndlp; 3591 u32 cmd, hash, bucket; 3592 struct lpfc_vmid *vmp, *cur; 3593 u8 *data = outp->virt; 3594 int i; 3595 3596 cmd = be16_to_cpu(ctcmd->CommandResponse.bits.CmdRsp); 3597 if (cmd == SLI_CTAS_DALLAPP_ID) 3598 lpfc_ct_free_iocb(phba, cmdiocb); 3599 3600 if (lpfc_els_chk_latt(vport) || get_job_ulpstatus(phba, rspiocb)) { 3601 if (cmd != SLI_CTAS_DALLAPP_ID) 3602 goto free_res; 3603 } 3604 /* Check for a CT LS_RJT response */ 3605 if (be16_to_cpu(rsp) == SLI_CT_RESPONSE_FS_RJT) { 3606 if (cmd != SLI_CTAS_DALLAPP_ID) 3607 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, 3608 "3306 VMID FS_RJT Data: x%x x%x x%x\n", 3609 cmd, ctrsp->ReasonCode, 3610 ctrsp->Explanation); 3611 if ((cmd != SLI_CTAS_DALLAPP_ID) || 3612 (ctrsp->ReasonCode != SLI_CT_UNABLE_TO_PERFORM_REQ) || 3613 (ctrsp->Explanation != SLI_CT_APP_ID_NOT_AVAILABLE)) { 3614 /* If DALLAPP_ID failed retry later */ 3615 if (cmd == SLI_CTAS_DALLAPP_ID) 3616 set_bit(FC_DEREGISTER_ALL_APP_ID, 3617 &vport->load_flag); 3618 goto free_res; 3619 } 3620 } 3621 3622 switch (cmd) { 3623 case SLI_CTAS_RAPP_IDENT: 3624 app = (struct app_id_object *)(RAPP_IDENT_OFFSET + data); 3625 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, 3626 "6712 RAPP_IDENT app id %d port id x%x id " 3627 "len %d\n", be32_to_cpu(app->app_id), 3628 be32_to_cpu(app->port_id), 3629 app->obj.entity_id_len); 3630 3631 if (app->obj.entity_id_len == 0 || app->port_id == 0) 3632 goto free_res; 3633 3634 hash = lpfc_vmid_hash_fn(app->obj.entity_id, 3635 app->obj.entity_id_len); 3636 vmp = lpfc_get_vmid_from_hashtable(vport, hash, 3637 app->obj.entity_id); 3638 if (vmp) { 3639 write_lock(&vport->vmid_lock); 3640 vmp->un.app_id = be32_to_cpu(app->app_id); 3641 vmp->flag |= LPFC_VMID_REGISTERED; 3642 vmp->flag &= ~LPFC_VMID_REQ_REGISTER; 3643 write_unlock(&vport->vmid_lock); 3644 /* Set IN USE flag */ 3645 vport->vmid_flag |= LPFC_VMID_IN_USE; 3646 } else { 3647 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, 3648 "6901 No entry found %s hash %d\n", 3649 app->obj.entity_id, hash); 3650 } 3651 break; 3652 case SLI_CTAS_DAPP_IDENT: 3653 app = (struct app_id_object *)(DAPP_IDENT_OFFSET + data); 3654 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, 3655 "6713 DAPP_IDENT app id %d port id x%x\n", 3656 be32_to_cpu(app->app_id), 3657 be32_to_cpu(app->port_id)); 3658 break; 3659 case SLI_CTAS_DALLAPP_ID: 3660 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, 3661 "8856 Deregistered all app ids\n"); 3662 read_lock(&vport->vmid_lock); 3663 for (i = 0; i < phba->cfg_max_vmid; i++) { 3664 vmp = &vport->vmid[i]; 3665 if (vmp->flag != LPFC_VMID_SLOT_FREE) 3666 memset(vmp, 0, sizeof(struct lpfc_vmid)); 3667 } 3668 read_unlock(&vport->vmid_lock); 3669 /* for all elements in the hash table */ 3670 if (!hash_empty(vport->hash_table)) 3671 hash_for_each(vport->hash_table, bucket, cur, hnode) 3672 hash_del(&cur->hnode); 3673 set_bit(FC_ALLOW_VMID, &vport->load_flag); 3674 break; 3675 default: 3676 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, 3677 "8857 Invalid command code\n"); 3678 } 3679 free_res: 3680 lpfc_ct_free_iocb(phba, cmdiocb); 3681 lpfc_nlp_put(ndlp); 3682 } 3683 3684 /** 3685 * lpfc_vmid_cmd - Build and send a FDMI cmd to the specified NPort 3686 * @vport: pointer to a host virtual N_Port data structure. 3687 * @cmdcode: application server command code to send 3688 * @vmid: pointer to vmid info structure 3689 * 3690 * Builds and sends a FDMI command using the CT subsystem. 3691 */ 3692 int 3693 lpfc_vmid_cmd(struct lpfc_vport *vport, 3694 int cmdcode, struct lpfc_vmid *vmid) 3695 { 3696 struct lpfc_hba *phba = vport->phba; 3697 struct lpfc_dmabuf *mp, *bmp; 3698 struct lpfc_sli_ct_request *ctreq; 3699 struct ulp_bde64 *bpl; 3700 u32 size; 3701 u32 rsp_size; 3702 u8 *data; 3703 struct lpfc_vmid_rapp_ident_list *rap; 3704 struct lpfc_vmid_dapp_ident_list *dap; 3705 u8 retry = 0; 3706 struct lpfc_nodelist *ndlp; 3707 3708 void (*cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3709 struct lpfc_iocbq *rspiocb); 3710 3711 ndlp = lpfc_findnode_did(vport, FDMI_DID); 3712 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 3713 return 0; 3714 3715 cmpl = lpfc_cmpl_ct_cmd_vmid; 3716 3717 /* fill in BDEs for command */ 3718 /* Allocate buffer for command payload */ 3719 mp = kmalloc(sizeof(*mp), GFP_KERNEL); 3720 if (!mp) 3721 goto vmid_free_mp_exit; 3722 3723 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3724 if (!mp->virt) 3725 goto vmid_free_mp_virt_exit; 3726 3727 /* Allocate buffer for Buffer ptr list */ 3728 bmp = kmalloc(sizeof(*bmp), GFP_KERNEL); 3729 if (!bmp) 3730 goto vmid_free_bmp_exit; 3731 3732 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 3733 if (!bmp->virt) 3734 goto vmid_free_bmp_virt_exit; 3735 3736 INIT_LIST_HEAD(&mp->list); 3737 INIT_LIST_HEAD(&bmp->list); 3738 3739 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 3740 "3275 VMID Request Data: x%lx x%x x%x\n", 3741 vport->fc_flag, vport->port_state, cmdcode); 3742 ctreq = (struct lpfc_sli_ct_request *)mp->virt; 3743 data = mp->virt; 3744 /* First populate the CT_IU preamble */ 3745 memset(data, 0, LPFC_BPL_SIZE); 3746 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; 3747 ctreq->RevisionId.bits.InId = 0; 3748 3749 ctreq->FsType = SLI_CT_MANAGEMENT_SERVICE; 3750 ctreq->FsSubType = SLI_CT_APP_SEV_Subtypes; 3751 3752 ctreq->CommandResponse.bits.CmdRsp = cpu_to_be16(cmdcode); 3753 rsp_size = LPFC_BPL_SIZE; 3754 size = 0; 3755 3756 switch (cmdcode) { 3757 case SLI_CTAS_RAPP_IDENT: 3758 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, 3759 "1329 RAPP_IDENT for %s\n", vmid->host_vmid); 3760 ctreq->un.PortID = cpu_to_be32(vport->fc_myDID); 3761 rap = (struct lpfc_vmid_rapp_ident_list *) 3762 (DAPP_IDENT_OFFSET + data); 3763 rap->no_of_objects = cpu_to_be32(1); 3764 rap->obj[0].entity_id_len = vmid->vmid_len; 3765 memcpy(rap->obj[0].entity_id, vmid->host_vmid, vmid->vmid_len); 3766 size = RAPP_IDENT_OFFSET + 3767 struct_size(rap, obj, be32_to_cpu(rap->no_of_objects)); 3768 retry = 1; 3769 break; 3770 3771 case SLI_CTAS_GALLAPPIA_ID: 3772 ctreq->un.PortID = cpu_to_be32(vport->fc_myDID); 3773 size = GALLAPPIA_ID_SIZE; 3774 break; 3775 3776 case SLI_CTAS_DAPP_IDENT: 3777 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, 3778 "1469 DAPP_IDENT for %s\n", vmid->host_vmid); 3779 ctreq->un.PortID = cpu_to_be32(vport->fc_myDID); 3780 dap = (struct lpfc_vmid_dapp_ident_list *) 3781 (DAPP_IDENT_OFFSET + data); 3782 dap->no_of_objects = cpu_to_be32(1); 3783 dap->obj[0].entity_id_len = vmid->vmid_len; 3784 memcpy(dap->obj[0].entity_id, vmid->host_vmid, vmid->vmid_len); 3785 size = DAPP_IDENT_OFFSET + 3786 struct_size(dap, obj, be32_to_cpu(dap->no_of_objects)); 3787 write_lock(&vport->vmid_lock); 3788 vmid->flag &= ~LPFC_VMID_REGISTERED; 3789 write_unlock(&vport->vmid_lock); 3790 retry = 1; 3791 break; 3792 3793 case SLI_CTAS_DALLAPP_ID: 3794 ctreq->un.PortID = cpu_to_be32(vport->fc_myDID); 3795 size = DALLAPP_ID_SIZE; 3796 break; 3797 3798 default: 3799 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, 3800 "7062 VMID cmdcode x%x not supported\n", 3801 cmdcode); 3802 goto vmid_free_all_mem; 3803 } 3804 3805 ctreq->CommandResponse.bits.Size = cpu_to_be16(rsp_size); 3806 3807 bpl = (struct ulp_bde64 *)bmp->virt; 3808 bpl->addrHigh = putPaddrHigh(mp->phys); 3809 bpl->addrLow = putPaddrLow(mp->phys); 3810 bpl->tus.f.bdeFlags = 0; 3811 bpl->tus.f.bdeSize = size; 3812 3813 /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count 3814 * to hold ndlp reference for the corresponding callback function. 3815 */ 3816 if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) 3817 return 0; 3818 3819 vmid_free_all_mem: 3820 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 3821 vmid_free_bmp_virt_exit: 3822 kfree(bmp); 3823 vmid_free_bmp_exit: 3824 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3825 vmid_free_mp_virt_exit: 3826 kfree(mp); 3827 vmid_free_mp_exit: 3828 3829 /* Issue CT request failed */ 3830 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, 3831 "3276 VMID CT request failed Data: x%x\n", cmdcode); 3832 return -EIO; 3833 } 3834