1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23 /* See Fibre Channel protocol T11 FC-LS for details */
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/slab.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29
30 #include <scsi/scsi.h>
31 #include <scsi/scsi_device.h>
32 #include <scsi/scsi_host.h>
33 #include <scsi/scsi_transport_fc.h>
34 #include <uapi/scsi/fc/fc_fs.h>
35 #include <uapi/scsi/fc/fc_els.h>
36
37 #include "lpfc_hw4.h"
38 #include "lpfc_hw.h"
39 #include "lpfc_sli.h"
40 #include "lpfc_sli4.h"
41 #include "lpfc_nl.h"
42 #include "lpfc_disc.h"
43 #include "lpfc_scsi.h"
44 #include "lpfc.h"
45 #include "lpfc_logmsg.h"
46 #include "lpfc_crtn.h"
47 #include "lpfc_vport.h"
48 #include "lpfc_debugfs.h"
49
50 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
51 struct lpfc_iocbq *);
52 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
53 struct lpfc_iocbq *);
54 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
55 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
56 struct lpfc_nodelist *ndlp, uint8_t retry);
57 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
58 struct lpfc_iocbq *iocb);
59 static void lpfc_cmpl_els_edc(struct lpfc_hba *phba,
60 struct lpfc_iocbq *cmdiocb,
61 struct lpfc_iocbq *rspiocb);
62 static void lpfc_cmpl_els_uvem(struct lpfc_hba *, struct lpfc_iocbq *,
63 struct lpfc_iocbq *);
64
65 static int lpfc_max_els_tries = 3;
66
67 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport);
68 static void lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max);
69 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid);
70
71 /**
72 * lpfc_els_chk_latt - Check host link attention event for a vport
73 * @vport: pointer to a host virtual N_Port data structure.
74 *
75 * This routine checks whether there is an outstanding host link
76 * attention event during the discovery process with the @vport. It is done
77 * by reading the HBA's Host Attention (HA) register. If there is any host
78 * link attention events during this @vport's discovery process, the @vport
79 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall
80 * be issued if the link state is not already in host link cleared state,
81 * and a return code shall indicate whether the host link attention event
82 * had happened.
83 *
84 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport
85 * state in LPFC_VPORT_READY, the request for checking host link attention
86 * event will be ignored and a return code shall indicate no host link
87 * attention event had happened.
88 *
89 * Return codes
90 * 0 - no host link attention event happened
91 * 1 - host link attention event happened
92 **/
93 int
lpfc_els_chk_latt(struct lpfc_vport * vport)94 lpfc_els_chk_latt(struct lpfc_vport *vport)
95 {
96 struct lpfc_hba *phba = vport->phba;
97 uint32_t ha_copy;
98
99 if (vport->port_state >= LPFC_VPORT_READY ||
100 phba->link_state == LPFC_LINK_DOWN ||
101 phba->sli_rev > LPFC_SLI_REV3)
102 return 0;
103
104 /* Read the HBA Host Attention Register */
105 if (lpfc_readl(phba->HAregaddr, &ha_copy))
106 return 1;
107
108 if (!(ha_copy & HA_LATT))
109 return 0;
110
111 /* Pending Link Event during Discovery */
112 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
113 "0237 Pending Link Event during "
114 "Discovery: State x%x\n",
115 phba->pport->port_state);
116
117 /* CLEAR_LA should re-enable link attention events and
118 * we should then immediately take a LATT event. The
119 * LATT processing should call lpfc_linkdown() which
120 * will cleanup any left over in-progress discovery
121 * events.
122 */
123 set_bit(FC_ABORT_DISCOVERY, &vport->fc_flag);
124
125 if (phba->link_state != LPFC_CLEAR_LA)
126 lpfc_issue_clear_la(phba, vport);
127
128 return 1;
129 }
130
lpfc_is_els_acc_rsp(struct lpfc_dmabuf * buf)131 static bool lpfc_is_els_acc_rsp(struct lpfc_dmabuf *buf)
132 {
133 struct fc_els_ls_acc *rsp = buf->virt;
134
135 if (rsp && rsp->la_cmd == ELS_LS_ACC)
136 return true;
137 return false;
138 }
139
140 /**
141 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
142 * @vport: pointer to a host virtual N_Port data structure.
143 * @expect_rsp: flag indicating whether response is expected.
144 * @cmd_size: size of the ELS command.
145 * @retry: number of retries to the command when it fails.
146 * @ndlp: pointer to a node-list data structure.
147 * @did: destination identifier.
148 * @elscmd: the ELS command code.
149 *
150 * This routine is used for allocating a lpfc-IOCB data structure from
151 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters
152 * passed into the routine for discovery state machine to issue an Extended
153 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation
154 * and preparation routine that is used by all the discovery state machine
155 * routines and the ELS command-specific fields will be later set up by
156 * the individual discovery machine routines after calling this routine
157 * allocating and preparing a generic IOCB data structure. It fills in the
158 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
159 * payload and response payload (if expected). The reference count on the
160 * ndlp is incremented by 1 and the reference to the ndlp is put into
161 * ndlp of the IOCB data structure for this IOCB to hold the ndlp
162 * reference for the command's callback function to access later.
163 *
164 * Return code
165 * Pointer to the newly allocated/prepared els iocb data structure
166 * NULL - when els iocb data structure allocation/preparation failed
167 **/
168 struct lpfc_iocbq *
lpfc_prep_els_iocb(struct lpfc_vport * vport,u8 expect_rsp,u16 cmd_size,u8 retry,struct lpfc_nodelist * ndlp,u32 did,u32 elscmd)169 lpfc_prep_els_iocb(struct lpfc_vport *vport, u8 expect_rsp,
170 u16 cmd_size, u8 retry,
171 struct lpfc_nodelist *ndlp, u32 did,
172 u32 elscmd)
173 {
174 struct lpfc_hba *phba = vport->phba;
175 struct lpfc_iocbq *elsiocb;
176 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist, *bmp;
177 struct ulp_bde64_le *bpl;
178 u32 timeout = 0;
179
180 if (!lpfc_is_link_up(phba))
181 return NULL;
182
183 /* Allocate buffer for command iocb */
184 elsiocb = lpfc_sli_get_iocbq(phba);
185 if (!elsiocb)
186 return NULL;
187
188 /*
189 * If this command is for fabric controller and HBA running
190 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
191 */
192 if (did == Fabric_DID &&
193 test_bit(HBA_FIP_SUPPORT, &phba->hba_flag) &&
194 (elscmd == ELS_CMD_FLOGI ||
195 elscmd == ELS_CMD_FDISC ||
196 elscmd == ELS_CMD_LOGO))
197 switch (elscmd) {
198 case ELS_CMD_FLOGI:
199 elsiocb->cmd_flag |=
200 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
201 & LPFC_FIP_ELS_ID_MASK);
202 break;
203 case ELS_CMD_FDISC:
204 elsiocb->cmd_flag |=
205 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
206 & LPFC_FIP_ELS_ID_MASK);
207 break;
208 case ELS_CMD_LOGO:
209 elsiocb->cmd_flag |=
210 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
211 & LPFC_FIP_ELS_ID_MASK);
212 break;
213 }
214 else
215 elsiocb->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK;
216
217 /* fill in BDEs for command */
218 /* Allocate buffer for command payload */
219 pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL);
220 if (pcmd)
221 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
222 if (!pcmd || !pcmd->virt)
223 goto els_iocb_free_pcmb_exit;
224
225 INIT_LIST_HEAD(&pcmd->list);
226
227 /* Allocate buffer for response payload */
228 if (expect_rsp) {
229 prsp = kmalloc(sizeof(*prsp), GFP_KERNEL);
230 if (prsp)
231 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
232 &prsp->phys);
233 if (!prsp || !prsp->virt)
234 goto els_iocb_free_prsp_exit;
235 INIT_LIST_HEAD(&prsp->list);
236 } else {
237 prsp = NULL;
238 }
239
240 /* Allocate buffer for Buffer ptr list */
241 pbuflist = kmalloc(sizeof(*pbuflist), GFP_KERNEL);
242 if (pbuflist)
243 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
244 &pbuflist->phys);
245 if (!pbuflist || !pbuflist->virt)
246 goto els_iocb_free_pbuf_exit;
247
248 INIT_LIST_HEAD(&pbuflist->list);
249
250 if (expect_rsp) {
251 switch (elscmd) {
252 case ELS_CMD_FLOGI:
253 timeout = FF_DEF_RATOV * 2;
254 break;
255 case ELS_CMD_LOGO:
256 timeout = phba->fc_ratov;
257 break;
258 default:
259 timeout = phba->fc_ratov * 2;
260 }
261
262 /* Fill SGE for the num bde count */
263 elsiocb->num_bdes = 2;
264 }
265
266 if (phba->sli_rev == LPFC_SLI_REV4)
267 bmp = pcmd;
268 else
269 bmp = pbuflist;
270
271 lpfc_sli_prep_els_req_rsp(phba, elsiocb, vport, bmp, cmd_size, did,
272 elscmd, timeout, expect_rsp);
273
274 bpl = (struct ulp_bde64_le *)pbuflist->virt;
275 bpl->addr_low = cpu_to_le32(putPaddrLow(pcmd->phys));
276 bpl->addr_high = cpu_to_le32(putPaddrHigh(pcmd->phys));
277 bpl->type_size = cpu_to_le32(cmd_size);
278 bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
279
280 if (expect_rsp) {
281 bpl++;
282 bpl->addr_low = cpu_to_le32(putPaddrLow(prsp->phys));
283 bpl->addr_high = cpu_to_le32(putPaddrHigh(prsp->phys));
284 bpl->type_size = cpu_to_le32(FCELSSIZE);
285 bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
286 }
287
288 elsiocb->cmd_dmabuf = pcmd;
289 elsiocb->bpl_dmabuf = pbuflist;
290 elsiocb->retry = retry;
291 elsiocb->vport = vport;
292 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
293
294 if (prsp)
295 list_add(&prsp->list, &pcmd->list);
296 if (expect_rsp) {
297 /* Xmit ELS command <elsCmd> to remote NPORT <did> */
298 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
299 "0116 Xmit ELS command x%x to remote "
300 "NPORT x%x I/O tag: x%x, port state:x%x "
301 "rpi x%x fc_flag:x%lx\n",
302 elscmd, did, elsiocb->iotag,
303 vport->port_state, ndlp->nlp_rpi,
304 vport->fc_flag);
305 } else {
306 /* Xmit ELS response <elsCmd> to remote NPORT <did> */
307 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
308 "0117 Xmit ELS response x%x to remote "
309 "NPORT x%x I/O tag: x%x, size: x%x "
310 "port_state x%x rpi x%x fc_flag x%lx\n",
311 elscmd, ndlp->nlp_DID, elsiocb->iotag,
312 cmd_size, vport->port_state,
313 ndlp->nlp_rpi, vport->fc_flag);
314 }
315
316 return elsiocb;
317
318 els_iocb_free_pbuf_exit:
319 if (expect_rsp)
320 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
321 kfree(pbuflist);
322
323 els_iocb_free_prsp_exit:
324 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
325 kfree(prsp);
326
327 els_iocb_free_pcmb_exit:
328 kfree(pcmd);
329 lpfc_sli_release_iocbq(phba, elsiocb);
330 return NULL;
331 }
332
333 /**
334 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport
335 * @vport: pointer to a host virtual N_Port data structure.
336 *
337 * This routine issues a fabric registration login for a @vport. An
338 * active ndlp node with Fabric_DID must already exist for this @vport.
339 * The routine invokes two mailbox commands to carry out fabric registration
340 * login through the HBA firmware: the first mailbox command requests the
341 * HBA to perform link configuration for the @vport; and the second mailbox
342 * command requests the HBA to perform the actual fabric registration login
343 * with the @vport.
344 *
345 * Return code
346 * 0 - successfully issued fabric registration login for @vport
347 * -ENXIO -- failed to issue fabric registration login for @vport
348 **/
349 int
lpfc_issue_fabric_reglogin(struct lpfc_vport * vport)350 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
351 {
352 struct lpfc_hba *phba = vport->phba;
353 LPFC_MBOXQ_t *mbox;
354 struct lpfc_nodelist *ndlp;
355 struct serv_parm *sp;
356 int rc;
357 int err = 0;
358
359 sp = &phba->fc_fabparam;
360 ndlp = lpfc_findnode_did(vport, Fabric_DID);
361 if (!ndlp) {
362 err = 1;
363 goto fail;
364 }
365
366 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
367 if (!mbox) {
368 err = 2;
369 goto fail;
370 }
371
372 vport->port_state = LPFC_FABRIC_CFG_LINK;
373 lpfc_config_link(phba, mbox);
374 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
375 mbox->vport = vport;
376
377 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
378 if (rc == MBX_NOT_FINISHED) {
379 err = 3;
380 goto fail_free_mbox;
381 }
382
383 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
384 if (!mbox) {
385 err = 4;
386 goto fail;
387 }
388 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
389 ndlp->nlp_rpi);
390 if (rc) {
391 err = 5;
392 goto fail_free_mbox;
393 }
394
395 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
396 mbox->vport = vport;
397 /* increment the reference count on ndlp to hold reference
398 * for the callback routine.
399 */
400 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
401 if (!mbox->ctx_ndlp) {
402 err = 6;
403 goto fail_free_mbox;
404 }
405
406 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
407 if (rc == MBX_NOT_FINISHED) {
408 err = 7;
409 goto fail_issue_reg_login;
410 }
411
412 return 0;
413
414 fail_issue_reg_login:
415 /* decrement the reference count on ndlp just incremented
416 * for the failed mbox command.
417 */
418 lpfc_nlp_put(ndlp);
419 fail_free_mbox:
420 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
421 fail:
422 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
423 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
424 "0249 Cannot issue Register Fabric login: Err %d\n",
425 err);
426 return -ENXIO;
427 }
428
429 /**
430 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
431 * @vport: pointer to a host virtual N_Port data structure.
432 *
433 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
434 * the @vport. This mailbox command is necessary for SLI4 port only.
435 *
436 * Return code
437 * 0 - successfully issued REG_VFI for @vport
438 * A failure code otherwise.
439 **/
440 int
lpfc_issue_reg_vfi(struct lpfc_vport * vport)441 lpfc_issue_reg_vfi(struct lpfc_vport *vport)
442 {
443 struct lpfc_hba *phba = vport->phba;
444 LPFC_MBOXQ_t *mboxq = NULL;
445 struct lpfc_nodelist *ndlp;
446 struct lpfc_dmabuf *dmabuf = NULL;
447 int rc = 0;
448
449 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */
450 if ((phba->sli_rev == LPFC_SLI_REV4) &&
451 !(phba->link_flag & LS_LOOPBACK_MODE) &&
452 !test_bit(FC_PT2PT, &vport->fc_flag)) {
453 ndlp = lpfc_findnode_did(vport, Fabric_DID);
454 if (!ndlp) {
455 rc = -ENODEV;
456 goto fail;
457 }
458 }
459
460 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
461 if (!mboxq) {
462 rc = -ENOMEM;
463 goto fail;
464 }
465
466 /* Supply CSP's only if we are fabric connect or pt-to-pt connect */
467 if (test_bit(FC_FABRIC, &vport->fc_flag) ||
468 test_bit(FC_PT2PT, &vport->fc_flag)) {
469 rc = lpfc_mbox_rsrc_prep(phba, mboxq);
470 if (rc) {
471 rc = -ENOMEM;
472 goto fail_mbox;
473 }
474 dmabuf = mboxq->ctx_buf;
475 memcpy(dmabuf->virt, &phba->fc_fabparam,
476 sizeof(struct serv_parm));
477 }
478
479 vport->port_state = LPFC_FABRIC_CFG_LINK;
480 if (dmabuf) {
481 lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
482 /* lpfc_reg_vfi memsets the mailbox. Restore the ctx_buf. */
483 mboxq->ctx_buf = dmabuf;
484 } else {
485 lpfc_reg_vfi(mboxq, vport, 0);
486 }
487
488 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
489 mboxq->vport = vport;
490 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
491 if (rc == MBX_NOT_FINISHED) {
492 rc = -ENXIO;
493 goto fail_mbox;
494 }
495 return 0;
496
497 fail_mbox:
498 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
499 fail:
500 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
501 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
502 "0289 Issue Register VFI failed: Err %d\n", rc);
503 return rc;
504 }
505
506 /**
507 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login
508 * @vport: pointer to a host virtual N_Port data structure.
509 *
510 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for
511 * the @vport. This mailbox command is necessary for SLI4 port only.
512 *
513 * Return code
514 * 0 - successfully issued REG_VFI for @vport
515 * A failure code otherwise.
516 **/
517 int
lpfc_issue_unreg_vfi(struct lpfc_vport * vport)518 lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
519 {
520 struct lpfc_hba *phba = vport->phba;
521 LPFC_MBOXQ_t *mboxq;
522 int rc;
523
524 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
525 if (!mboxq) {
526 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
527 "2556 UNREG_VFI mbox allocation failed"
528 "HBA state x%x\n", phba->pport->port_state);
529 return -ENOMEM;
530 }
531
532 lpfc_unreg_vfi(mboxq, vport);
533 mboxq->vport = vport;
534 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl;
535
536 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
537 if (rc == MBX_NOT_FINISHED) {
538 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
539 "2557 UNREG_VFI issue mbox failed rc x%x "
540 "HBA state x%x\n",
541 rc, phba->pport->port_state);
542 mempool_free(mboxq, phba->mbox_mem_pool);
543 return -EIO;
544 }
545
546 clear_bit(FC_VFI_REGISTERED, &vport->fc_flag);
547 return 0;
548 }
549
550 /**
551 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
552 * @vport: pointer to a host virtual N_Port data structure.
553 * @sp: pointer to service parameter data structure.
554 *
555 * This routine is called from FLOGI/FDISC completion handler functions.
556 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric
557 * node nodename is changed in the completion service parameter else return
558 * 0. This function also set flag in the vport data structure to delay
559 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit
560 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric
561 * node nodename is changed in the completion service parameter.
562 *
563 * Return code
564 * 0 - FCID and Fabric Nodename and Fabric portname is not changed.
565 * 1 - FCID or Fabric Nodename or Fabric portname is changed.
566 *
567 **/
568 static uint8_t
lpfc_check_clean_addr_bit(struct lpfc_vport * vport,struct serv_parm * sp)569 lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
570 struct serv_parm *sp)
571 {
572 struct lpfc_hba *phba = vport->phba;
573 uint8_t fabric_param_changed = 0;
574
575 if ((vport->fc_prevDID != vport->fc_myDID) ||
576 memcmp(&vport->fabric_portname, &sp->portName,
577 sizeof(struct lpfc_name)) ||
578 memcmp(&vport->fabric_nodename, &sp->nodeName,
579 sizeof(struct lpfc_name)) ||
580 (vport->vport_flag & FAWWPN_PARAM_CHG)) {
581 fabric_param_changed = 1;
582 vport->vport_flag &= ~FAWWPN_PARAM_CHG;
583 }
584 /*
585 * Word 1 Bit 31 in common service parameter is overloaded.
586 * Word 1 Bit 31 in FLOGI request is multiple NPort request
587 * Word 1 Bit 31 in FLOGI response is clean address bit
588 *
589 * If fabric parameter is changed and clean address bit is
590 * cleared delay nport discovery if
591 * - vport->fc_prevDID != 0 (not initial discovery) OR
592 * - lpfc_delay_discovery module parameter is set.
593 */
594 if (fabric_param_changed && !sp->cmn.clean_address_bit &&
595 (vport->fc_prevDID || phba->cfg_delay_discovery))
596 set_bit(FC_DISC_DELAYED, &vport->fc_flag);
597
598 return fabric_param_changed;
599 }
600
601
602 /**
603 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
604 * @vport: pointer to a host virtual N_Port data structure.
605 * @ndlp: pointer to a node-list data structure.
606 * @sp: pointer to service parameter data structure.
607 * @ulp_word4: command response value
608 *
609 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
610 * function to handle the completion of a Fabric Login (FLOGI) into a fabric
611 * port in a fabric topology. It properly sets up the parameters to the @ndlp
612 * from the IOCB response. It also check the newly assigned N_Port ID to the
613 * @vport against the previously assigned N_Port ID. If it is different from
614 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine
615 * is invoked on all the remaining nodes with the @vport to unregister the
616 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin()
617 * is invoked to register login to the fabric.
618 *
619 * Return code
620 * 0 - Success (currently, always return 0)
621 **/
622 static int
lpfc_cmpl_els_flogi_fabric(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,struct serv_parm * sp,uint32_t ulp_word4)623 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
624 struct serv_parm *sp, uint32_t ulp_word4)
625 {
626 struct lpfc_hba *phba = vport->phba;
627 struct lpfc_nodelist *np;
628 struct lpfc_nodelist *next_np;
629 uint8_t fabric_param_changed;
630
631 set_bit(FC_FABRIC, &vport->fc_flag);
632
633 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
634 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
635 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
636
637 phba->fc_edtovResol = sp->cmn.edtovResolution;
638 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
639
640 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP)
641 set_bit(FC_PUBLIC_LOOP, &vport->fc_flag);
642
643 vport->fc_myDID = ulp_word4 & Mask_DID;
644 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
645 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
646 ndlp->nlp_class_sup = 0;
647 if (sp->cls1.classValid)
648 ndlp->nlp_class_sup |= FC_COS_CLASS1;
649 if (sp->cls2.classValid)
650 ndlp->nlp_class_sup |= FC_COS_CLASS2;
651 if (sp->cls3.classValid)
652 ndlp->nlp_class_sup |= FC_COS_CLASS3;
653 if (sp->cls4.classValid)
654 ndlp->nlp_class_sup |= FC_COS_CLASS4;
655 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
656 sp->cmn.bbRcvSizeLsb;
657
658 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
659 if (fabric_param_changed) {
660 /* Reset FDMI attribute masks based on config parameter */
661 if (phba->cfg_enable_SmartSAN ||
662 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
663 /* Setup appropriate attribute masks */
664 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
665 if (phba->cfg_enable_SmartSAN)
666 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
667 else
668 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
669 } else {
670 vport->fdmi_hba_mask = 0;
671 vport->fdmi_port_mask = 0;
672 }
673
674 }
675 memcpy(&vport->fabric_portname, &sp->portName,
676 sizeof(struct lpfc_name));
677 memcpy(&vport->fabric_nodename, &sp->nodeName,
678 sizeof(struct lpfc_name));
679 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
680
681 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
682 if (sp->cmn.response_multiple_NPort) {
683 lpfc_printf_vlog(vport, KERN_WARNING,
684 LOG_ELS | LOG_VPORT,
685 "1816 FLOGI NPIV supported, "
686 "response data 0x%x\n",
687 sp->cmn.response_multiple_NPort);
688 spin_lock_irq(&phba->hbalock);
689 phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
690 spin_unlock_irq(&phba->hbalock);
691 } else {
692 /* Because we asked f/w for NPIV it still expects us
693 to call reg_vnpid at least for the physical host */
694 lpfc_printf_vlog(vport, KERN_WARNING,
695 LOG_ELS | LOG_VPORT,
696 "1817 Fabric does not support NPIV "
697 "- configuring single port mode.\n");
698 spin_lock_irq(&phba->hbalock);
699 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
700 spin_unlock_irq(&phba->hbalock);
701 }
702 }
703
704 /*
705 * For FC we need to do some special processing because of the SLI
706 * Port's default settings of the Common Service Parameters.
707 */
708 if ((phba->sli_rev == LPFC_SLI_REV4) &&
709 (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) {
710 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
711 if (fabric_param_changed)
712 lpfc_unregister_fcf_prep(phba);
713
714 /* This should just update the VFI CSPs*/
715 if (test_bit(FC_VFI_REGISTERED, &vport->fc_flag))
716 lpfc_issue_reg_vfi(vport);
717 }
718
719 if (fabric_param_changed &&
720 !test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) {
721
722 /* If our NportID changed, we need to ensure all
723 * remaining NPORTs get unreg_login'ed.
724 */
725 list_for_each_entry_safe(np, next_np,
726 &vport->fc_nodes, nlp_listp) {
727 if ((np->nlp_state != NLP_STE_NPR_NODE) ||
728 !(np->nlp_flag & NLP_NPR_ADISC))
729 continue;
730 spin_lock_irq(&np->lock);
731 np->nlp_flag &= ~NLP_NPR_ADISC;
732 spin_unlock_irq(&np->lock);
733 lpfc_unreg_rpi(vport, np);
734 }
735 lpfc_cleanup_pending_mbox(vport);
736
737 if (phba->sli_rev == LPFC_SLI_REV4) {
738 lpfc_sli4_unreg_all_rpis(vport);
739 lpfc_mbx_unreg_vpi(vport);
740 set_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag);
741 }
742
743 /*
744 * For SLI3 and SLI4, the VPI needs to be reregistered in
745 * response to this fabric parameter change event.
746 */
747 set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
748 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
749 !test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) {
750 /*
751 * Driver needs to re-reg VPI in order for f/w
752 * to update the MAC address.
753 */
754 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
755 lpfc_register_new_vport(phba, vport, ndlp);
756 return 0;
757 }
758
759 if (phba->sli_rev < LPFC_SLI_REV4) {
760 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
761 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
762 test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag))
763 lpfc_register_new_vport(phba, vport, ndlp);
764 else
765 lpfc_issue_fabric_reglogin(vport);
766 } else {
767 ndlp->nlp_type |= NLP_FABRIC;
768 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
769 if ((!test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) &&
770 (vport->vpi_state & LPFC_VPI_REGISTERED)) {
771 lpfc_start_fdiscs(phba);
772 lpfc_do_scr_ns_plogi(phba, vport);
773 } else if (test_bit(FC_VFI_REGISTERED, &vport->fc_flag))
774 lpfc_issue_init_vpi(vport);
775 else {
776 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
777 "3135 Need register VFI: (x%x/%x)\n",
778 vport->fc_prevDID, vport->fc_myDID);
779 lpfc_issue_reg_vfi(vport);
780 }
781 }
782 return 0;
783 }
784
785 /**
786 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
787 * @vport: pointer to a host virtual N_Port data structure.
788 * @ndlp: pointer to a node-list data structure.
789 * @sp: pointer to service parameter data structure.
790 *
791 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
792 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port
793 * in a point-to-point topology. First, the @vport's N_Port Name is compared
794 * with the received N_Port Name: if the @vport's N_Port Name is greater than
795 * the received N_Port Name lexicographically, this node shall assign local
796 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and
797 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise,
798 * this node shall just wait for the remote node to issue PLOGI and assign
799 * N_Port IDs.
800 *
801 * Return code
802 * 0 - Success
803 * -ENXIO - Fail
804 **/
805 static int
lpfc_cmpl_els_flogi_nport(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,struct serv_parm * sp)806 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
807 struct serv_parm *sp)
808 {
809 struct lpfc_hba *phba = vport->phba;
810 LPFC_MBOXQ_t *mbox;
811 int rc;
812
813 clear_bit(FC_FABRIC, &vport->fc_flag);
814 clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag);
815 set_bit(FC_PT2PT, &vport->fc_flag);
816
817 /* If we are pt2pt with another NPort, force NPIV off! */
818 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
819
820 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
821 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
822 lpfc_unregister_fcf_prep(phba);
823 clear_bit(FC_VFI_REGISTERED, &vport->fc_flag);
824 phba->fc_topology_changed = 0;
825 }
826
827 rc = memcmp(&vport->fc_portname, &sp->portName,
828 sizeof(vport->fc_portname));
829
830 if (rc >= 0) {
831 /* This side will initiate the PLOGI */
832 set_bit(FC_PT2PT_PLOGI, &vport->fc_flag);
833
834 /*
835 * N_Port ID cannot be 0, set our Id to LocalID
836 * the other side will be RemoteID.
837 */
838
839 /* not equal */
840 if (rc)
841 vport->fc_myDID = PT2PT_LocalID;
842
843 /* If not registered with a transport, decrement ndlp reference
844 * count indicating that ndlp can be safely released when other
845 * references are removed.
846 */
847 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)))
848 lpfc_nlp_put(ndlp);
849
850 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
851 if (!ndlp) {
852 /*
853 * Cannot find existing Fabric ndlp, so allocate a
854 * new one
855 */
856 ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID);
857 if (!ndlp)
858 goto fail;
859 }
860
861 memcpy(&ndlp->nlp_portname, &sp->portName,
862 sizeof(struct lpfc_name));
863 memcpy(&ndlp->nlp_nodename, &sp->nodeName,
864 sizeof(struct lpfc_name));
865 /* Set state will put ndlp onto node list if not already done */
866 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
867 spin_lock_irq(&ndlp->lock);
868 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
869 spin_unlock_irq(&ndlp->lock);
870
871 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
872 if (!mbox)
873 goto fail;
874
875 lpfc_config_link(phba, mbox);
876
877 mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
878 mbox->vport = vport;
879 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
880 if (rc == MBX_NOT_FINISHED) {
881 mempool_free(mbox, phba->mbox_mem_pool);
882 goto fail;
883 }
884 } else {
885 /* This side will wait for the PLOGI. If not registered with
886 * a transport, decrement node reference count indicating that
887 * ndlp can be released when other references are removed.
888 */
889 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)))
890 lpfc_nlp_put(ndlp);
891
892 /* Start discovery - this should just do CLEAR_LA */
893 lpfc_disc_start(vport);
894 }
895
896 return 0;
897 fail:
898 return -ENXIO;
899 }
900
901 /**
902 * lpfc_cmpl_els_flogi - Completion callback function for flogi
903 * @phba: pointer to lpfc hba data structure.
904 * @cmdiocb: pointer to lpfc command iocb data structure.
905 * @rspiocb: pointer to lpfc response iocb data structure.
906 *
907 * This routine is the top-level completion callback function for issuing
908 * a Fabric Login (FLOGI) command. If the response IOCB reported error,
909 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If
910 * retry has been made (either immediately or delayed with lpfc_els_retry()
911 * returning 1), the command IOCB will be released and function returned.
912 * If the retry attempt has been given up (possibly reach the maximum
913 * number of retries), one additional decrement of ndlp reference shall be
914 * invoked before going out after releasing the command IOCB. This will
915 * actually release the remote node (Note, lpfc_els_free_iocb() will also
916 * invoke one decrement of ndlp reference count). If no error reported in
917 * the IOCB status, the command Port ID field is used to determine whether
918 * this is a point-to-point topology or a fabric topology: if the Port ID
919 * field is assigned, it is a fabric topology; otherwise, it is a
920 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or
921 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the
922 * specific topology completion conditions.
923 **/
924 static void
lpfc_cmpl_els_flogi(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)925 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
926 struct lpfc_iocbq *rspiocb)
927 {
928 struct lpfc_vport *vport = cmdiocb->vport;
929 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
930 IOCB_t *irsp;
931 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp;
932 struct serv_parm *sp;
933 uint16_t fcf_index;
934 int rc;
935 u32 ulp_status, ulp_word4, tmo;
936 bool flogi_in_retry = false;
937
938 /* Check to see if link went down during discovery */
939 if (lpfc_els_chk_latt(vport)) {
940 /* One additional decrement on node reference count to
941 * trigger the release of the node
942 */
943 if (!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD))
944 lpfc_nlp_put(ndlp);
945 goto out;
946 }
947
948 ulp_status = get_job_ulpstatus(phba, rspiocb);
949 ulp_word4 = get_job_word4(phba, rspiocb);
950
951 if (phba->sli_rev == LPFC_SLI_REV4) {
952 tmo = get_wqe_tmo(cmdiocb);
953 } else {
954 irsp = &rspiocb->iocb;
955 tmo = irsp->ulpTimeout;
956 }
957
958 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
959 "FLOGI cmpl: status:x%x/x%x state:x%x",
960 ulp_status, ulp_word4,
961 vport->port_state);
962
963 if (ulp_status) {
964 /*
965 * In case of FIP mode, perform roundrobin FCF failover
966 * due to new FCF discovery
967 */
968 if (test_bit(HBA_FIP_SUPPORT, &phba->hba_flag) &&
969 (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
970 if (phba->link_state < LPFC_LINK_UP)
971 goto stop_rr_fcf_flogi;
972 if ((phba->fcoe_cvl_eventtag_attn ==
973 phba->fcoe_cvl_eventtag) &&
974 (ulp_status == IOSTAT_LOCAL_REJECT) &&
975 ((ulp_word4 & IOERR_PARAM_MASK) ==
976 IOERR_SLI_ABORTED))
977 goto stop_rr_fcf_flogi;
978 else
979 phba->fcoe_cvl_eventtag_attn =
980 phba->fcoe_cvl_eventtag;
981 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
982 "2611 FLOGI FCF (x%x), "
983 "status:x%x/x%x, tmo:x%x, perform "
984 "roundrobin FCF failover\n",
985 phba->fcf.current_rec.fcf_indx,
986 ulp_status, ulp_word4, tmo);
987 lpfc_sli4_set_fcf_flogi_fail(phba,
988 phba->fcf.current_rec.fcf_indx);
989 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
990 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
991 if (rc)
992 goto out;
993 }
994
995 stop_rr_fcf_flogi:
996 /* FLOGI failure */
997 if (!(ulp_status == IOSTAT_LOCAL_REJECT &&
998 ((ulp_word4 & IOERR_PARAM_MASK) ==
999 IOERR_LOOP_OPEN_FAILURE)))
1000 lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
1001 "2858 FLOGI Status:x%x/x%x TMO"
1002 ":x%x Data x%lx x%x\n",
1003 ulp_status, ulp_word4, tmo,
1004 phba->hba_flag, phba->fcf.fcf_flag);
1005
1006 /* Check for retry */
1007 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1008 /* Address a timing race with dev_loss. If dev_loss
1009 * is active on this FPort node, put the initial ref
1010 * count back to stop premature node release actions.
1011 */
1012 lpfc_check_nlp_post_devloss(vport, ndlp);
1013 flogi_in_retry = true;
1014 goto out;
1015 }
1016
1017 /* The FLOGI will not be retried. If the FPort node is not
1018 * registered with the SCSI transport, remove the initial
1019 * reference to trigger node release.
1020 */
1021 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS) &&
1022 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD))
1023 lpfc_nlp_put(ndlp);
1024
1025 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
1026 "0150 FLOGI Status:x%x/x%x "
1027 "xri x%x TMO:x%x refcnt %d\n",
1028 ulp_status, ulp_word4, cmdiocb->sli4_xritag,
1029 tmo, kref_read(&ndlp->kref));
1030
1031 /* If this is not a loop open failure, bail out */
1032 if (!(ulp_status == IOSTAT_LOCAL_REJECT &&
1033 ((ulp_word4 & IOERR_PARAM_MASK) ==
1034 IOERR_LOOP_OPEN_FAILURE))) {
1035 /* Warn FLOGI status */
1036 lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
1037 "0100 FLOGI Status:x%x/x%x "
1038 "TMO:x%x\n",
1039 ulp_status, ulp_word4, tmo);
1040 goto flogifail;
1041 }
1042
1043 /* FLOGI failed, so there is no fabric */
1044 clear_bit(FC_FABRIC, &vport->fc_flag);
1045 clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag);
1046 clear_bit(FC_PT2PT_NO_NVME, &vport->fc_flag);
1047
1048 /* If private loop, then allow max outstanding els to be
1049 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
1050 * alpa map would take too long otherwise.
1051 */
1052 if (phba->alpa_map[0] == 0)
1053 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
1054 if ((phba->sli_rev == LPFC_SLI_REV4) &&
1055 (!test_bit(FC_VFI_REGISTERED, &vport->fc_flag) ||
1056 (vport->fc_prevDID != vport->fc_myDID) ||
1057 phba->fc_topology_changed)) {
1058 if (test_bit(FC_VFI_REGISTERED, &vport->fc_flag)) {
1059 if (phba->fc_topology_changed) {
1060 lpfc_unregister_fcf_prep(phba);
1061 clear_bit(FC_VFI_REGISTERED,
1062 &vport->fc_flag);
1063 phba->fc_topology_changed = 0;
1064 } else {
1065 lpfc_sli4_unreg_all_rpis(vport);
1066 }
1067 }
1068
1069 /* Do not register VFI if the driver aborted FLOGI */
1070 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4))
1071 lpfc_issue_reg_vfi(vport);
1072
1073 goto out;
1074 }
1075 goto flogifail;
1076 }
1077 clear_bit(FC_VPORT_CVL_RCVD, &vport->fc_flag);
1078 clear_bit(FC_VPORT_LOGO_RCVD, &vport->fc_flag);
1079
1080 /*
1081 * The FLOGI succeeded. Sync the data for the CPU before
1082 * accessing it.
1083 */
1084 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
1085 if (!prsp)
1086 goto out;
1087 if (!lpfc_is_els_acc_rsp(prsp))
1088 goto out;
1089 sp = prsp->virt + sizeof(uint32_t);
1090
1091 /* FLOGI completes successfully */
1092 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1093 "0101 FLOGI completes successfully, I/O tag:x%x "
1094 "xri x%x Data: x%x x%x x%x x%x x%x x%lx x%x %d\n",
1095 cmdiocb->iotag, cmdiocb->sli4_xritag,
1096 ulp_word4, sp->cmn.e_d_tov,
1097 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
1098 vport->port_state, vport->fc_flag,
1099 sp->cmn.priority_tagging, kref_read(&ndlp->kref));
1100
1101 /* reinitialize the VMID datastructure before returning */
1102 if (lpfc_is_vmid_enabled(phba)) {
1103 lpfc_reinit_vmid(vport);
1104 vport->vmid_flag = 0;
1105 }
1106 if (sp->cmn.priority_tagging)
1107 vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA |
1108 LPFC_VMID_TYPE_PRIO);
1109
1110 /*
1111 * Address a timing race with dev_loss. If dev_loss is active on
1112 * this FPort node, put the initial ref count back to stop premature
1113 * node release actions.
1114 */
1115 lpfc_check_nlp_post_devloss(vport, ndlp);
1116 if (vport->port_state == LPFC_FLOGI) {
1117 /*
1118 * If Common Service Parameters indicate Nport
1119 * we are point to point, if Fport we are Fabric.
1120 */
1121 if (sp->cmn.fPort)
1122 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp,
1123 ulp_word4);
1124 else if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag))
1125 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
1126 else {
1127 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1128 "2831 FLOGI response with cleared Fabric "
1129 "bit fcf_index 0x%x "
1130 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x "
1131 "Fabric Name "
1132 "%02x%02x%02x%02x%02x%02x%02x%02x\n",
1133 phba->fcf.current_rec.fcf_indx,
1134 phba->fcf.current_rec.switch_name[0],
1135 phba->fcf.current_rec.switch_name[1],
1136 phba->fcf.current_rec.switch_name[2],
1137 phba->fcf.current_rec.switch_name[3],
1138 phba->fcf.current_rec.switch_name[4],
1139 phba->fcf.current_rec.switch_name[5],
1140 phba->fcf.current_rec.switch_name[6],
1141 phba->fcf.current_rec.switch_name[7],
1142 phba->fcf.current_rec.fabric_name[0],
1143 phba->fcf.current_rec.fabric_name[1],
1144 phba->fcf.current_rec.fabric_name[2],
1145 phba->fcf.current_rec.fabric_name[3],
1146 phba->fcf.current_rec.fabric_name[4],
1147 phba->fcf.current_rec.fabric_name[5],
1148 phba->fcf.current_rec.fabric_name[6],
1149 phba->fcf.current_rec.fabric_name[7]);
1150
1151 lpfc_nlp_put(ndlp);
1152 spin_lock_irq(&phba->hbalock);
1153 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1154 spin_unlock_irq(&phba->hbalock);
1155 clear_bit(FCF_RR_INPROG, &phba->hba_flag);
1156 clear_bit(HBA_DEVLOSS_TMO, &phba->hba_flag);
1157 phba->fcf.fcf_redisc_attempted = 0; /* reset */
1158 goto out;
1159 }
1160 if (!rc) {
1161 /* Mark the FCF discovery process done */
1162 if (test_bit(HBA_FIP_SUPPORT, &phba->hba_flag))
1163 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
1164 LOG_ELS,
1165 "2769 FLOGI to FCF (x%x) "
1166 "completed successfully\n",
1167 phba->fcf.current_rec.fcf_indx);
1168 spin_lock_irq(&phba->hbalock);
1169 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1170 spin_unlock_irq(&phba->hbalock);
1171 clear_bit(FCF_RR_INPROG, &phba->hba_flag);
1172 clear_bit(HBA_DEVLOSS_TMO, &phba->hba_flag);
1173 phba->fcf.fcf_redisc_attempted = 0; /* reset */
1174 goto out;
1175 }
1176 } else if (vport->port_state > LPFC_FLOGI &&
1177 test_bit(FC_PT2PT, &vport->fc_flag)) {
1178 /*
1179 * In a p2p topology, it is possible that discovery has
1180 * already progressed, and this completion can be ignored.
1181 * Recheck the indicated topology.
1182 */
1183 if (!sp->cmn.fPort)
1184 goto out;
1185 }
1186
1187 flogifail:
1188 spin_lock_irq(&phba->hbalock);
1189 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1190 spin_unlock_irq(&phba->hbalock);
1191
1192 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4)) {
1193 /* FLOGI failed, so just use loop map to make discovery list */
1194 lpfc_disc_list_loopmap(vport);
1195
1196 /* Start discovery */
1197 lpfc_disc_start(vport);
1198 } else if (((ulp_status != IOSTAT_LOCAL_REJECT) ||
1199 (((ulp_word4 & IOERR_PARAM_MASK) !=
1200 IOERR_SLI_ABORTED) &&
1201 ((ulp_word4 & IOERR_PARAM_MASK) !=
1202 IOERR_SLI_DOWN))) &&
1203 (phba->link_state != LPFC_CLEAR_LA)) {
1204 /* If FLOGI failed enable link interrupt. */
1205 lpfc_issue_clear_la(phba, vport);
1206 }
1207 out:
1208 if (!flogi_in_retry)
1209 clear_bit(HBA_FLOGI_OUTSTANDING, &phba->hba_flag);
1210
1211 lpfc_els_free_iocb(phba, cmdiocb);
1212 lpfc_nlp_put(ndlp);
1213 }
1214
1215 /**
1216 * lpfc_cmpl_els_link_down - Completion callback function for ELS command
1217 * aborted during a link down
1218 * @phba: pointer to lpfc hba data structure.
1219 * @cmdiocb: pointer to lpfc command iocb data structure.
1220 * @rspiocb: pointer to lpfc response iocb data structure.
1221 *
1222 */
1223 static void
lpfc_cmpl_els_link_down(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)1224 lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1225 struct lpfc_iocbq *rspiocb)
1226 {
1227 uint32_t *pcmd;
1228 uint32_t cmd;
1229 u32 ulp_status, ulp_word4;
1230
1231 pcmd = (uint32_t *)cmdiocb->cmd_dmabuf->virt;
1232 cmd = *pcmd;
1233
1234 ulp_status = get_job_ulpstatus(phba, rspiocb);
1235 ulp_word4 = get_job_word4(phba, rspiocb);
1236
1237 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1238 "6445 ELS completes after LINK_DOWN: "
1239 " Status %x/%x cmd x%x flg x%x\n",
1240 ulp_status, ulp_word4, cmd,
1241 cmdiocb->cmd_flag);
1242
1243 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) {
1244 cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC;
1245 atomic_dec(&phba->fabric_iocb_count);
1246 }
1247 lpfc_els_free_iocb(phba, cmdiocb);
1248 }
1249
1250 /**
1251 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
1252 * @vport: pointer to a host virtual N_Port data structure.
1253 * @ndlp: pointer to a node-list data structure.
1254 * @retry: number of retries to the command IOCB.
1255 *
1256 * This routine issues a Fabric Login (FLOGI) Request ELS command
1257 * for a @vport. The initiator service parameters are put into the payload
1258 * of the FLOGI Request IOCB and the top-level callback function pointer
1259 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback
1260 * function field. The lpfc_issue_fabric_iocb routine is invoked to send
1261 * out FLOGI ELS command with one outstanding fabric IOCB at a time.
1262 *
1263 * Note that the ndlp reference count will be incremented by 1 for holding the
1264 * ndlp and the reference to ndlp will be stored into the ndlp field of
1265 * the IOCB for the completion callback function to the FLOGI ELS command.
1266 *
1267 * Return code
1268 * 0 - successfully issued flogi iocb for @vport
1269 * 1 - failed to issue flogi iocb for @vport
1270 **/
1271 static int
lpfc_issue_els_flogi(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,uint8_t retry)1272 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1273 uint8_t retry)
1274 {
1275 struct lpfc_hba *phba = vport->phba;
1276 struct serv_parm *sp;
1277 union lpfc_wqe128 *wqe = NULL;
1278 IOCB_t *icmd = NULL;
1279 struct lpfc_iocbq *elsiocb;
1280 struct lpfc_iocbq defer_flogi_acc;
1281 u8 *pcmd, ct;
1282 uint16_t cmdsize;
1283 uint32_t tmo, did;
1284 int rc;
1285
1286 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
1287 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1288 ndlp->nlp_DID, ELS_CMD_FLOGI);
1289
1290 if (!elsiocb)
1291 return 1;
1292
1293 wqe = &elsiocb->wqe;
1294 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
1295 icmd = &elsiocb->iocb;
1296
1297 /* For FLOGI request, remainder of payload is service parameters */
1298 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
1299 pcmd += sizeof(uint32_t);
1300 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
1301 sp = (struct serv_parm *) pcmd;
1302
1303 /* Setup CSPs accordingly for Fabric */
1304 sp->cmn.e_d_tov = 0;
1305 sp->cmn.w2.r_a_tov = 0;
1306 sp->cmn.virtual_fabric_support = 0;
1307 sp->cls1.classValid = 0;
1308 if (sp->cmn.fcphLow < FC_PH3)
1309 sp->cmn.fcphLow = FC_PH3;
1310 if (sp->cmn.fcphHigh < FC_PH3)
1311 sp->cmn.fcphHigh = FC_PH3;
1312
1313 /* Determine if switch supports priority tagging */
1314 if (phba->cfg_vmid_priority_tagging) {
1315 sp->cmn.priority_tagging = 1;
1316 /* lpfc_vmid_host_uuid is combination of wwpn and wwnn */
1317 if (!memchr_inv(vport->lpfc_vmid_host_uuid, 0,
1318 sizeof(vport->lpfc_vmid_host_uuid))) {
1319 memcpy(vport->lpfc_vmid_host_uuid, phba->wwpn,
1320 sizeof(phba->wwpn));
1321 memcpy(&vport->lpfc_vmid_host_uuid[8], phba->wwnn,
1322 sizeof(phba->wwnn));
1323 }
1324 }
1325
1326 if (phba->sli_rev == LPFC_SLI_REV4) {
1327 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
1328 LPFC_SLI_INTF_IF_TYPE_0) {
1329 /* FLOGI needs to be 3 for WQE FCFI */
1330 ct = SLI4_CT_FCFI;
1331 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
1332
1333 /* Set the fcfi to the fcfi we registered with */
1334 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
1335 phba->fcf.fcfi);
1336 }
1337
1338 /* Can't do SLI4 class2 without support sequence coalescing */
1339 sp->cls2.classValid = 0;
1340 sp->cls2.seqDelivery = 0;
1341 } else {
1342 /* Historical, setting sequential-delivery bit for SLI3 */
1343 sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0;
1344 sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0;
1345 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
1346 sp->cmn.request_multiple_Nport = 1;
1347 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
1348 icmd->ulpCt_h = 1;
1349 icmd->ulpCt_l = 0;
1350 } else {
1351 sp->cmn.request_multiple_Nport = 0;
1352 }
1353
1354 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
1355 icmd->un.elsreq64.myID = 0;
1356 icmd->un.elsreq64.fl = 1;
1357 }
1358 }
1359
1360 tmo = phba->fc_ratov;
1361 phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
1362 lpfc_set_disctmo(vport);
1363 phba->fc_ratov = tmo;
1364
1365 phba->fc_stat.elsXmitFLOGI++;
1366 elsiocb->cmd_cmpl = lpfc_cmpl_els_flogi;
1367
1368 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1369 "Issue FLOGI: opt:x%x",
1370 phba->sli3_options, 0, 0);
1371
1372 elsiocb->ndlp = lpfc_nlp_get(ndlp);
1373 if (!elsiocb->ndlp) {
1374 lpfc_els_free_iocb(phba, elsiocb);
1375 return 1;
1376 }
1377
1378 /* Avoid race with FLOGI completion and hba_flags. */
1379 set_bit(HBA_FLOGI_ISSUED, &phba->hba_flag);
1380 set_bit(HBA_FLOGI_OUTSTANDING, &phba->hba_flag);
1381
1382 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
1383 if (rc == IOCB_ERROR) {
1384 clear_bit(HBA_FLOGI_ISSUED, &phba->hba_flag);
1385 clear_bit(HBA_FLOGI_OUTSTANDING, &phba->hba_flag);
1386 lpfc_els_free_iocb(phba, elsiocb);
1387 lpfc_nlp_put(ndlp);
1388 return 1;
1389 }
1390
1391 /* Clear external loopback plug detected flag */
1392 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK;
1393
1394 /* Check for a deferred FLOGI ACC condition */
1395 if (phba->defer_flogi_acc.flag) {
1396 /* lookup ndlp for received FLOGI */
1397 ndlp = lpfc_findnode_did(vport, 0);
1398 if (!ndlp)
1399 return 0;
1400
1401 did = vport->fc_myDID;
1402 vport->fc_myDID = Fabric_DID;
1403
1404 memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq));
1405
1406 if (phba->sli_rev == LPFC_SLI_REV4) {
1407 bf_set(wqe_ctxt_tag,
1408 &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com,
1409 phba->defer_flogi_acc.rx_id);
1410 bf_set(wqe_rcvoxid,
1411 &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com,
1412 phba->defer_flogi_acc.ox_id);
1413 } else {
1414 icmd = &defer_flogi_acc.iocb;
1415 icmd->ulpContext = phba->defer_flogi_acc.rx_id;
1416 icmd->unsli3.rcvsli3.ox_id =
1417 phba->defer_flogi_acc.ox_id;
1418 }
1419
1420 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1421 "3354 Xmit deferred FLOGI ACC: rx_id: x%x,"
1422 " ox_id: x%x, hba_flag x%lx\n",
1423 phba->defer_flogi_acc.rx_id,
1424 phba->defer_flogi_acc.ox_id, phba->hba_flag);
1425
1426 /* Send deferred FLOGI ACC */
1427 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc,
1428 ndlp, NULL);
1429
1430 phba->defer_flogi_acc.flag = false;
1431
1432 /* Decrement the held ndlp that was incremented when the
1433 * deferred flogi acc flag was set.
1434 */
1435 if (phba->defer_flogi_acc.ndlp) {
1436 lpfc_nlp_put(phba->defer_flogi_acc.ndlp);
1437 phba->defer_flogi_acc.ndlp = NULL;
1438 }
1439
1440 vport->fc_myDID = did;
1441 }
1442
1443 return 0;
1444 }
1445
1446 /**
1447 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs
1448 * @phba: pointer to lpfc hba data structure.
1449 *
1450 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs
1451 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq
1452 * list and issues an abort IOCB commond on each outstanding IOCB that
1453 * contains a active Fabric_DID ndlp. Note that this function is to issue
1454 * the abort IOCB command on all the outstanding IOCBs, thus when this
1455 * function returns, it does not guarantee all the IOCBs are actually aborted.
1456 *
1457 * Return code
1458 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0)
1459 **/
1460 int
lpfc_els_abort_flogi(struct lpfc_hba * phba)1461 lpfc_els_abort_flogi(struct lpfc_hba *phba)
1462 {
1463 struct lpfc_sli_ring *pring;
1464 struct lpfc_iocbq *iocb, *next_iocb;
1465 struct lpfc_nodelist *ndlp;
1466 u32 ulp_command;
1467
1468 /* Abort outstanding I/O on NPort <nlp_DID> */
1469 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1470 "0201 Abort outstanding I/O on NPort x%x\n",
1471 Fabric_DID);
1472
1473 pring = lpfc_phba_elsring(phba);
1474 if (unlikely(!pring))
1475 return -EIO;
1476
1477 /*
1478 * Check the txcmplq for an iocb that matches the nport the driver is
1479 * searching for.
1480 */
1481 spin_lock_irq(&phba->hbalock);
1482 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
1483 ulp_command = get_job_cmnd(phba, iocb);
1484 if (ulp_command == CMD_ELS_REQUEST64_CR) {
1485 ndlp = iocb->ndlp;
1486 if (ndlp && ndlp->nlp_DID == Fabric_DID) {
1487 if (test_bit(FC_PT2PT, &phba->pport->fc_flag) &&
1488 !test_bit(FC_PT2PT_PLOGI,
1489 &phba->pport->fc_flag))
1490 iocb->fabric_cmd_cmpl =
1491 lpfc_ignore_els_cmpl;
1492 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
1493 NULL);
1494 }
1495 }
1496 }
1497 /* Make sure HBA is alive */
1498 lpfc_issue_hb_tmo(phba);
1499
1500 spin_unlock_irq(&phba->hbalock);
1501
1502 return 0;
1503 }
1504
1505 /**
1506 * lpfc_initial_flogi - Issue an initial fabric login for a vport
1507 * @vport: pointer to a host virtual N_Port data structure.
1508 *
1509 * This routine issues an initial Fabric Login (FLOGI) for the @vport
1510 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1511 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1512 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1513 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine
1514 * is then invoked with the @vport and the ndlp to perform the FLOGI for the
1515 * @vport.
1516 *
1517 * Return code
1518 * 0 - failed to issue initial flogi for @vport
1519 * 1 - successfully issued initial flogi for @vport
1520 **/
1521 int
lpfc_initial_flogi(struct lpfc_vport * vport)1522 lpfc_initial_flogi(struct lpfc_vport *vport)
1523 {
1524 struct lpfc_nodelist *ndlp;
1525
1526 vport->port_state = LPFC_FLOGI;
1527 lpfc_set_disctmo(vport);
1528
1529 /* First look for the Fabric ndlp */
1530 ndlp = lpfc_findnode_did(vport, Fabric_DID);
1531 if (!ndlp) {
1532 /* Cannot find existing Fabric ndlp, so allocate a new one */
1533 ndlp = lpfc_nlp_init(vport, Fabric_DID);
1534 if (!ndlp)
1535 return 0;
1536 /* Set the node type */
1537 ndlp->nlp_type |= NLP_FABRIC;
1538
1539 /* Put ndlp onto node list */
1540 lpfc_enqueue_node(vport, ndlp);
1541 }
1542
1543 /* Reset the Fabric flag, topology change may have happened */
1544 clear_bit(FC_FABRIC, &vport->fc_flag);
1545 if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
1546 /* A node reference should be retained while registered with a
1547 * transport or dev-loss-evt work is pending.
1548 * Otherwise, decrement node reference to trigger release.
1549 */
1550 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) &&
1551 !(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
1552 lpfc_nlp_put(ndlp);
1553 return 0;
1554 }
1555 return 1;
1556 }
1557
1558 /**
1559 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport
1560 * @vport: pointer to a host virtual N_Port data structure.
1561 *
1562 * This routine issues an initial Fabric Discover (FDISC) for the @vport
1563 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1564 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1565 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1566 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine
1567 * is then invoked with the @vport and the ndlp to perform the FDISC for the
1568 * @vport.
1569 *
1570 * Return code
1571 * 0 - failed to issue initial fdisc for @vport
1572 * 1 - successfully issued initial fdisc for @vport
1573 **/
1574 int
lpfc_initial_fdisc(struct lpfc_vport * vport)1575 lpfc_initial_fdisc(struct lpfc_vport *vport)
1576 {
1577 struct lpfc_nodelist *ndlp;
1578
1579 /* First look for the Fabric ndlp */
1580 ndlp = lpfc_findnode_did(vport, Fabric_DID);
1581 if (!ndlp) {
1582 /* Cannot find existing Fabric ndlp, so allocate a new one */
1583 ndlp = lpfc_nlp_init(vport, Fabric_DID);
1584 if (!ndlp)
1585 return 0;
1586
1587 /* NPIV is only supported in Fabrics. */
1588 ndlp->nlp_type |= NLP_FABRIC;
1589
1590 /* Put ndlp onto node list */
1591 lpfc_enqueue_node(vport, ndlp);
1592 }
1593
1594 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
1595 /* A node reference should be retained while registered with a
1596 * transport or dev-loss-evt work is pending.
1597 * Otherwise, decrement node reference to trigger release.
1598 */
1599 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) &&
1600 !(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
1601 lpfc_nlp_put(ndlp);
1602 return 0;
1603 }
1604 return 1;
1605 }
1606
1607 /**
1608 * lpfc_more_plogi - Check and issue remaining plogis for a vport
1609 * @vport: pointer to a host virtual N_Port data structure.
1610 *
1611 * This routine checks whether there are more remaining Port Logins
1612 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine
1613 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes
1614 * to issue ELS PLOGIs up to the configured discover threads with the
1615 * @vport (@vport->cfg_discovery_threads). The function also decrement
1616 * the @vport's num_disc_node by 1 if it is not already 0.
1617 **/
1618 void
lpfc_more_plogi(struct lpfc_vport * vport)1619 lpfc_more_plogi(struct lpfc_vport *vport)
1620 {
1621 if (vport->num_disc_nodes)
1622 vport->num_disc_nodes--;
1623
1624 /* Continue discovery with <num_disc_nodes> PLOGIs to go */
1625 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1626 "0232 Continue discovery with %d PLOGIs to go "
1627 "Data: x%x x%lx x%x\n",
1628 vport->num_disc_nodes,
1629 atomic_read(&vport->fc_plogi_cnt),
1630 vport->fc_flag, vport->port_state);
1631 /* Check to see if there are more PLOGIs to be sent */
1632 if (test_bit(FC_NLP_MORE, &vport->fc_flag))
1633 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
1634 lpfc_els_disc_plogi(vport);
1635
1636 return;
1637 }
1638
1639 /**
1640 * lpfc_plogi_confirm_nport - Confirm plogi wwpn matches stored ndlp
1641 * @phba: pointer to lpfc hba data structure.
1642 * @prsp: pointer to response IOCB payload.
1643 * @ndlp: pointer to a node-list data structure.
1644 *
1645 * This routine checks and indicates whether the WWPN of an N_Port, retrieved
1646 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt.
1647 * The following cases are considered N_Port confirmed:
1648 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches
1649 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but
1650 * it does not have WWPN assigned either. If the WWPN is confirmed, the
1651 * pointer to the @ndlp will be returned. If the WWPN is not confirmed:
1652 * 1) if there is a node on vport list other than the @ndlp with the same
1653 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked
1654 * on that node to release the RPI associated with the node; 2) if there is
1655 * no node found on vport list with the same WWPN of the N_Port PLOGI logged
1656 * into, a new node shall be allocated (or activated). In either case, the
1657 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall
1658 * be released and the new_ndlp shall be put on to the vport node list and
1659 * its pointer returned as the confirmed node.
1660 *
1661 * Note that before the @ndlp got "released", the keepDID from not-matching
1662 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID
1663 * of the @ndlp. This is because the release of @ndlp is actually to put it
1664 * into an inactive state on the vport node list and the vport node list
1665 * management algorithm does not allow two node with a same DID.
1666 *
1667 * Return code
1668 * pointer to the PLOGI N_Port @ndlp
1669 **/
1670 static struct lpfc_nodelist *
lpfc_plogi_confirm_nport(struct lpfc_hba * phba,uint32_t * prsp,struct lpfc_nodelist * ndlp)1671 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1672 struct lpfc_nodelist *ndlp)
1673 {
1674 struct lpfc_vport *vport = ndlp->vport;
1675 struct lpfc_nodelist *new_ndlp;
1676 struct serv_parm *sp;
1677 uint8_t name[sizeof(struct lpfc_name)];
1678 uint32_t keepDID = 0, keep_nlp_flag = 0;
1679 int rc;
1680 uint32_t keep_new_nlp_flag = 0;
1681 uint16_t keep_nlp_state;
1682 u32 keep_nlp_fc4_type = 0;
1683 struct lpfc_nvme_rport *keep_nrport = NULL;
1684 unsigned long *active_rrqs_xri_bitmap = NULL;
1685
1686 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
1687 memset(name, 0, sizeof(struct lpfc_name));
1688
1689 /* Now we find out if the NPort we are logging into, matches the WWPN
1690 * we have for that ndlp. If not, we have some work to do.
1691 */
1692 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
1693
1694 /* return immediately if the WWPN matches ndlp */
1695 if (new_ndlp == ndlp)
1696 return ndlp;
1697
1698 if (phba->sli_rev == LPFC_SLI_REV4) {
1699 active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool,
1700 GFP_KERNEL);
1701 if (active_rrqs_xri_bitmap)
1702 memset(active_rrqs_xri_bitmap, 0,
1703 phba->cfg_rrq_xri_bitmap_sz);
1704 }
1705
1706 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE,
1707 "3178 PLOGI confirm: ndlp x%x x%x x%x: "
1708 "new_ndlp x%x x%x x%x\n",
1709 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type,
1710 (new_ndlp ? new_ndlp->nlp_DID : 0),
1711 (new_ndlp ? new_ndlp->nlp_flag : 0),
1712 (new_ndlp ? new_ndlp->nlp_fc4_type : 0));
1713
1714 if (!new_ndlp) {
1715 rc = memcmp(&ndlp->nlp_portname, name,
1716 sizeof(struct lpfc_name));
1717 if (!rc) {
1718 if (active_rrqs_xri_bitmap)
1719 mempool_free(active_rrqs_xri_bitmap,
1720 phba->active_rrq_pool);
1721 return ndlp;
1722 }
1723 new_ndlp = lpfc_nlp_init(vport, ndlp->nlp_DID);
1724 if (!new_ndlp) {
1725 if (active_rrqs_xri_bitmap)
1726 mempool_free(active_rrqs_xri_bitmap,
1727 phba->active_rrq_pool);
1728 return ndlp;
1729 }
1730 } else {
1731 if (phba->sli_rev == LPFC_SLI_REV4 &&
1732 active_rrqs_xri_bitmap)
1733 memcpy(active_rrqs_xri_bitmap,
1734 new_ndlp->active_rrqs_xri_bitmap,
1735 phba->cfg_rrq_xri_bitmap_sz);
1736
1737 /*
1738 * Unregister from backend if not done yet. Could have been
1739 * skipped due to ADISC
1740 */
1741 lpfc_nlp_unreg_node(vport, new_ndlp);
1742 }
1743
1744 keepDID = new_ndlp->nlp_DID;
1745
1746 /* At this point in this routine, we know new_ndlp will be
1747 * returned. however, any previous GID_FTs that were done
1748 * would have updated nlp_fc4_type in ndlp, so we must ensure
1749 * new_ndlp has the right value.
1750 */
1751 if (test_bit(FC_FABRIC, &vport->fc_flag)) {
1752 keep_nlp_fc4_type = new_ndlp->nlp_fc4_type;
1753 new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type;
1754 }
1755
1756 lpfc_unreg_rpi(vport, new_ndlp);
1757 new_ndlp->nlp_DID = ndlp->nlp_DID;
1758 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
1759 if (phba->sli_rev == LPFC_SLI_REV4)
1760 memcpy(new_ndlp->active_rrqs_xri_bitmap,
1761 ndlp->active_rrqs_xri_bitmap,
1762 phba->cfg_rrq_xri_bitmap_sz);
1763
1764 /* Lock both ndlps */
1765 spin_lock_irq(&ndlp->lock);
1766 spin_lock_irq(&new_ndlp->lock);
1767 keep_new_nlp_flag = new_ndlp->nlp_flag;
1768 keep_nlp_flag = ndlp->nlp_flag;
1769 new_ndlp->nlp_flag = ndlp->nlp_flag;
1770
1771 /* if new_ndlp had NLP_UNREG_INP set, keep it */
1772 if (keep_new_nlp_flag & NLP_UNREG_INP)
1773 new_ndlp->nlp_flag |= NLP_UNREG_INP;
1774 else
1775 new_ndlp->nlp_flag &= ~NLP_UNREG_INP;
1776
1777 /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */
1778 if (keep_new_nlp_flag & NLP_RPI_REGISTERED)
1779 new_ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1780 else
1781 new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
1782
1783 /*
1784 * Retain the DROPPED flag. This will take care of the init
1785 * refcount when affecting the state change
1786 */
1787 if (keep_new_nlp_flag & NLP_DROPPED)
1788 new_ndlp->nlp_flag |= NLP_DROPPED;
1789 else
1790 new_ndlp->nlp_flag &= ~NLP_DROPPED;
1791
1792 ndlp->nlp_flag = keep_new_nlp_flag;
1793
1794 /* if ndlp had NLP_UNREG_INP set, keep it */
1795 if (keep_nlp_flag & NLP_UNREG_INP)
1796 ndlp->nlp_flag |= NLP_UNREG_INP;
1797 else
1798 ndlp->nlp_flag &= ~NLP_UNREG_INP;
1799
1800 /* if ndlp had NLP_RPI_REGISTERED set, keep it */
1801 if (keep_nlp_flag & NLP_RPI_REGISTERED)
1802 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1803 else
1804 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
1805
1806 /*
1807 * Retain the DROPPED flag. This will take care of the init
1808 * refcount when affecting the state change
1809 */
1810 if (keep_nlp_flag & NLP_DROPPED)
1811 ndlp->nlp_flag |= NLP_DROPPED;
1812 else
1813 ndlp->nlp_flag &= ~NLP_DROPPED;
1814
1815 spin_unlock_irq(&new_ndlp->lock);
1816 spin_unlock_irq(&ndlp->lock);
1817
1818 /* Set nlp_states accordingly */
1819 keep_nlp_state = new_ndlp->nlp_state;
1820 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
1821
1822 /* interchange the nvme remoteport structs */
1823 keep_nrport = new_ndlp->nrport;
1824 new_ndlp->nrport = ndlp->nrport;
1825
1826 /* Move this back to NPR state */
1827 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
1828 /* The ndlp doesn't have a portname yet, but does have an
1829 * NPort ID. The new_ndlp portname matches the Rport's
1830 * portname. Reinstantiate the new_ndlp and reset the ndlp.
1831 */
1832 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1833 "3179 PLOGI confirm NEW: %x %x\n",
1834 new_ndlp->nlp_DID, keepDID);
1835
1836 /* Two ndlps cannot have the same did on the nodelist.
1837 * The KeepDID and keep_nlp_fc4_type need to be swapped
1838 * because ndlp is inflight with no WWPN.
1839 */
1840 ndlp->nlp_DID = keepDID;
1841 ndlp->nlp_fc4_type = keep_nlp_fc4_type;
1842 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
1843 if (phba->sli_rev == LPFC_SLI_REV4 &&
1844 active_rrqs_xri_bitmap)
1845 memcpy(ndlp->active_rrqs_xri_bitmap,
1846 active_rrqs_xri_bitmap,
1847 phba->cfg_rrq_xri_bitmap_sz);
1848
1849 } else {
1850 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1851 "3180 PLOGI confirm SWAP: %x %x\n",
1852 new_ndlp->nlp_DID, keepDID);
1853
1854 lpfc_unreg_rpi(vport, ndlp);
1855
1856 /* The ndlp and new_ndlp both have WWPNs but are swapping
1857 * NPort Ids and attributes.
1858 */
1859 ndlp->nlp_DID = keepDID;
1860 ndlp->nlp_fc4_type = keep_nlp_fc4_type;
1861
1862 if (phba->sli_rev == LPFC_SLI_REV4 &&
1863 active_rrqs_xri_bitmap)
1864 memcpy(ndlp->active_rrqs_xri_bitmap,
1865 active_rrqs_xri_bitmap,
1866 phba->cfg_rrq_xri_bitmap_sz);
1867
1868 /* Since we are switching over to the new_ndlp,
1869 * reset the old ndlp state
1870 */
1871 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
1872 (ndlp->nlp_state == NLP_STE_MAPPED_NODE))
1873 keep_nlp_state = NLP_STE_NPR_NODE;
1874 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
1875 ndlp->nrport = keep_nrport;
1876 }
1877
1878 /*
1879 * If ndlp is not associated with any rport we can drop it here else
1880 * let dev_loss_tmo_callbk trigger DEVICE_RM event
1881 */
1882 if (!ndlp->rport && (ndlp->nlp_state == NLP_STE_NPR_NODE))
1883 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
1884
1885 if (phba->sli_rev == LPFC_SLI_REV4 &&
1886 active_rrqs_xri_bitmap)
1887 mempool_free(active_rrqs_xri_bitmap,
1888 phba->active_rrq_pool);
1889
1890 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE,
1891 "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n",
1892 new_ndlp->nlp_DID, new_ndlp->nlp_flag,
1893 new_ndlp->nlp_fc4_type);
1894
1895 return new_ndlp;
1896 }
1897
1898 /**
1899 * lpfc_end_rscn - Check and handle more rscn for a vport
1900 * @vport: pointer to a host virtual N_Port data structure.
1901 *
1902 * This routine checks whether more Registration State Change
1903 * Notifications (RSCNs) came in while the discovery state machine was in
1904 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be
1905 * invoked to handle the additional RSCNs for the @vport. Otherwise, the
1906 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of
1907 * handling the RSCNs.
1908 **/
1909 void
lpfc_end_rscn(struct lpfc_vport * vport)1910 lpfc_end_rscn(struct lpfc_vport *vport)
1911 {
1912
1913 if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) {
1914 /*
1915 * Check to see if more RSCNs came in while we were
1916 * processing this one.
1917 */
1918 if (vport->fc_rscn_id_cnt ||
1919 test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag))
1920 lpfc_els_handle_rscn(vport);
1921 else
1922 clear_bit(FC_RSCN_MODE, &vport->fc_flag);
1923 }
1924 }
1925
1926 /**
1927 * lpfc_cmpl_els_rrq - Completion handled for els RRQs.
1928 * @phba: pointer to lpfc hba data structure.
1929 * @cmdiocb: pointer to lpfc command iocb data structure.
1930 * @rspiocb: pointer to lpfc response iocb data structure.
1931 *
1932 * This routine will call the clear rrq function to free the rrq and
1933 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not
1934 * exist then the clear_rrq is still called because the rrq needs to
1935 * be freed.
1936 **/
1937
1938 static void
lpfc_cmpl_els_rrq(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)1939 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1940 struct lpfc_iocbq *rspiocb)
1941 {
1942 struct lpfc_vport *vport = cmdiocb->vport;
1943 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
1944 struct lpfc_node_rrq *rrq;
1945 u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
1946 u32 ulp_word4 = get_job_word4(phba, rspiocb);
1947
1948 /* we pass cmdiocb to state machine which needs rspiocb as well */
1949 rrq = cmdiocb->context_un.rrq;
1950 cmdiocb->rsp_iocb = rspiocb;
1951
1952 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1953 "RRQ cmpl: status:x%x/x%x did:x%x",
1954 ulp_status, ulp_word4,
1955 get_job_els_rsp64_did(phba, cmdiocb));
1956
1957
1958 /* rrq completes to NPort <nlp_DID> */
1959 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1960 "2880 RRQ completes to DID x%x "
1961 "Data: x%x x%x x%x x%x x%x\n",
1962 ndlp->nlp_DID, ulp_status, ulp_word4,
1963 get_wqe_tmo(cmdiocb), rrq->xritag, rrq->rxid);
1964
1965 if (ulp_status) {
1966 /* Check for retry */
1967 /* Warn RRQ status Don't print the vport to vport rjts */
1968 if (ulp_status != IOSTAT_LS_RJT ||
1969 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) &&
1970 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) ||
1971 (phba)->pport->cfg_log_verbose & LOG_ELS)
1972 lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
1973 "2881 RRQ DID:%06X Status:"
1974 "x%x/x%x\n",
1975 ndlp->nlp_DID, ulp_status,
1976 ulp_word4);
1977 }
1978
1979 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1980 lpfc_els_free_iocb(phba, cmdiocb);
1981 lpfc_nlp_put(ndlp);
1982 return;
1983 }
1984 /**
1985 * lpfc_cmpl_els_plogi - Completion callback function for plogi
1986 * @phba: pointer to lpfc hba data structure.
1987 * @cmdiocb: pointer to lpfc command iocb data structure.
1988 * @rspiocb: pointer to lpfc response iocb data structure.
1989 *
1990 * This routine is the completion callback function for issuing the Port
1991 * Login (PLOGI) command. For PLOGI completion, there must be an active
1992 * ndlp on the vport node list that matches the remote node ID from the
1993 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply
1994 * ignored and command IOCB released. The PLOGI response IOCB status is
1995 * checked for error conditions. If there is error status reported, PLOGI
1996 * retry shall be attempted by invoking the lpfc_els_retry() routine.
1997 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on
1998 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine
1999 * (DSM) is set for this PLOGI completion. Finally, it checks whether
2000 * there are additional N_Port nodes with the vport that need to perform
2001 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition
2002 * PLOGIs.
2003 **/
2004 static void
lpfc_cmpl_els_plogi(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)2005 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2006 struct lpfc_iocbq *rspiocb)
2007 {
2008 struct lpfc_vport *vport = cmdiocb->vport;
2009 IOCB_t *irsp;
2010 struct lpfc_nodelist *ndlp, *free_ndlp;
2011 struct lpfc_dmabuf *prsp;
2012 int disc;
2013 struct serv_parm *sp = NULL;
2014 u32 ulp_status, ulp_word4, did, iotag;
2015 bool release_node = false;
2016
2017 /* we pass cmdiocb to state machine which needs rspiocb as well */
2018 cmdiocb->rsp_iocb = rspiocb;
2019
2020 ulp_status = get_job_ulpstatus(phba, rspiocb);
2021 ulp_word4 = get_job_word4(phba, rspiocb);
2022 did = get_job_els_rsp64_did(phba, cmdiocb);
2023
2024 if (phba->sli_rev == LPFC_SLI_REV4) {
2025 iotag = get_wqe_reqtag(cmdiocb);
2026 } else {
2027 irsp = &rspiocb->iocb;
2028 iotag = irsp->ulpIoTag;
2029 }
2030
2031 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2032 "PLOGI cmpl: status:x%x/x%x did:x%x",
2033 ulp_status, ulp_word4, did);
2034
2035 ndlp = lpfc_findnode_did(vport, did);
2036 if (!ndlp) {
2037 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2038 "0136 PLOGI completes to NPort x%x "
2039 "with no ndlp. Data: x%x x%x x%x\n",
2040 did, ulp_status, ulp_word4, iotag);
2041 goto out_freeiocb;
2042 }
2043
2044 /* Since ndlp can be freed in the disc state machine, note if this node
2045 * is being used during discovery.
2046 */
2047 spin_lock_irq(&ndlp->lock);
2048 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
2049 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2050 spin_unlock_irq(&ndlp->lock);
2051
2052 /* PLOGI completes to NPort <nlp_DID> */
2053 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2054 "0102 PLOGI completes to NPort x%06x "
2055 "IoTag x%x Data: x%x x%x x%x x%x x%x\n",
2056 ndlp->nlp_DID, iotag,
2057 ndlp->nlp_fc4_type,
2058 ulp_status, ulp_word4,
2059 disc, vport->num_disc_nodes);
2060
2061 /* Check to see if link went down during discovery */
2062 if (lpfc_els_chk_latt(vport)) {
2063 spin_lock_irq(&ndlp->lock);
2064 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2065 spin_unlock_irq(&ndlp->lock);
2066 goto out;
2067 }
2068
2069 if (ulp_status) {
2070 /* Check for retry */
2071 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2072 /* ELS command is being retried */
2073 if (disc) {
2074 spin_lock_irq(&ndlp->lock);
2075 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2076 spin_unlock_irq(&ndlp->lock);
2077 }
2078 goto out;
2079 }
2080 /* Warn PLOGI status Don't print the vport to vport rjts */
2081 if (ulp_status != IOSTAT_LS_RJT ||
2082 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) &&
2083 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) ||
2084 (phba)->pport->cfg_log_verbose & LOG_ELS)
2085 lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
2086 "2753 PLOGI DID:%06X "
2087 "Status:x%x/x%x\n",
2088 ndlp->nlp_DID, ulp_status,
2089 ulp_word4);
2090
2091 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2092 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4))
2093 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2094 NLP_EVT_CMPL_PLOGI);
2095
2096 /* If a PLOGI collision occurred, the node needs to continue
2097 * with the reglogin process.
2098 */
2099 spin_lock_irq(&ndlp->lock);
2100 if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) &&
2101 ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) {
2102 spin_unlock_irq(&ndlp->lock);
2103 goto out;
2104 }
2105
2106 /* No PLOGI collision and the node is not registered with the
2107 * scsi or nvme transport. It is no longer an active node. Just
2108 * start the device remove process.
2109 */
2110 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
2111 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2112 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
2113 release_node = true;
2114 }
2115 spin_unlock_irq(&ndlp->lock);
2116
2117 if (release_node)
2118 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2119 NLP_EVT_DEVICE_RM);
2120 } else {
2121 /* Good status, call state machine */
2122 prsp = list_get_first(&cmdiocb->cmd_dmabuf->list,
2123 struct lpfc_dmabuf, list);
2124 if (!prsp)
2125 goto out;
2126 if (!lpfc_is_els_acc_rsp(prsp))
2127 goto out;
2128 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
2129
2130 sp = (struct serv_parm *)((u8 *)prsp->virt +
2131 sizeof(u32));
2132
2133 ndlp->vmid_support = 0;
2134 if ((phba->cfg_vmid_app_header && sp->cmn.app_hdr_support) ||
2135 (phba->cfg_vmid_priority_tagging &&
2136 sp->cmn.priority_tagging)) {
2137 lpfc_printf_log(phba, KERN_DEBUG, LOG_ELS,
2138 "4018 app_hdr_support %d tagging %d DID x%x\n",
2139 sp->cmn.app_hdr_support,
2140 sp->cmn.priority_tagging,
2141 ndlp->nlp_DID);
2142 /* if the dest port supports VMID, mark it in ndlp */
2143 ndlp->vmid_support = 1;
2144 }
2145
2146 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2147 NLP_EVT_CMPL_PLOGI);
2148 }
2149
2150 if (disc && vport->num_disc_nodes) {
2151 /* Check to see if there are more PLOGIs to be sent */
2152 lpfc_more_plogi(vport);
2153
2154 if (vport->num_disc_nodes == 0) {
2155 clear_bit(FC_NDISC_ACTIVE, &vport->fc_flag);
2156
2157 lpfc_can_disctmo(vport);
2158 lpfc_end_rscn(vport);
2159 }
2160 }
2161
2162 out:
2163 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
2164 "PLOGI Cmpl PUT: did:x%x refcnt %d",
2165 ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
2166
2167 out_freeiocb:
2168 /* Release the reference on the original I/O request. */
2169 free_ndlp = cmdiocb->ndlp;
2170
2171 lpfc_els_free_iocb(phba, cmdiocb);
2172 lpfc_nlp_put(free_ndlp);
2173 return;
2174 }
2175
2176 /**
2177 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport
2178 * @vport: pointer to a host virtual N_Port data structure.
2179 * @did: destination port identifier.
2180 * @retry: number of retries to the command IOCB.
2181 *
2182 * This routine issues a Port Login (PLOGI) command to a remote N_Port
2183 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
2184 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
2185 * This routine constructs the proper fields of the PLOGI IOCB and invokes
2186 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
2187 *
2188 * Note that the ndlp reference count will be incremented by 1 for holding
2189 * the ndlp and the reference to ndlp will be stored into the ndlp field
2190 * of the IOCB for the completion callback function to the PLOGI ELS command.
2191 *
2192 * Return code
2193 * 0 - Successfully issued a plogi for @vport
2194 * 1 - failed to issue a plogi for @vport
2195 **/
2196 int
lpfc_issue_els_plogi(struct lpfc_vport * vport,uint32_t did,uint8_t retry)2197 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
2198 {
2199 struct lpfc_hba *phba = vport->phba;
2200 struct serv_parm *sp;
2201 struct lpfc_nodelist *ndlp;
2202 struct lpfc_iocbq *elsiocb;
2203 uint8_t *pcmd;
2204 uint16_t cmdsize;
2205 int ret;
2206
2207 ndlp = lpfc_findnode_did(vport, did);
2208 if (!ndlp)
2209 return 1;
2210
2211 /* Defer the processing of the issue PLOGI until after the
2212 * outstanding UNREG_RPI mbox command completes, unless we
2213 * are going offline. This logic does not apply for Fabric DIDs
2214 */
2215 if ((ndlp->nlp_flag & (NLP_IGNR_REG_CMPL | NLP_UNREG_INP)) &&
2216 ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
2217 !test_bit(FC_OFFLINE_MODE, &vport->fc_flag)) {
2218 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2219 "4110 Issue PLOGI x%x deferred "
2220 "on NPort x%x rpi x%x flg x%x Data:"
2221 " x%px\n",
2222 ndlp->nlp_defer_did, ndlp->nlp_DID,
2223 ndlp->nlp_rpi, ndlp->nlp_flag, ndlp);
2224
2225 /* We can only defer 1st PLOGI */
2226 if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING)
2227 ndlp->nlp_defer_did = did;
2228 return 0;
2229 }
2230
2231 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
2232 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
2233 ELS_CMD_PLOGI);
2234 if (!elsiocb)
2235 return 1;
2236
2237 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
2238
2239 /* For PLOGI request, remainder of payload is service parameters */
2240 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
2241 pcmd += sizeof(uint32_t);
2242 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
2243 sp = (struct serv_parm *) pcmd;
2244
2245 /*
2246 * If we are a N-port connected to a Fabric, fix-up paramm's so logins
2247 * to device on remote loops work.
2248 */
2249 if (test_bit(FC_FABRIC, &vport->fc_flag) &&
2250 !test_bit(FC_PUBLIC_LOOP, &vport->fc_flag))
2251 sp->cmn.altBbCredit = 1;
2252
2253 if (sp->cmn.fcphLow < FC_PH_4_3)
2254 sp->cmn.fcphLow = FC_PH_4_3;
2255
2256 if (sp->cmn.fcphHigh < FC_PH3)
2257 sp->cmn.fcphHigh = FC_PH3;
2258
2259 sp->cmn.valid_vendor_ver_level = 0;
2260 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
2261 sp->cmn.bbRcvSizeMsb &= 0xF;
2262
2263 /* Check if the destination port supports VMID */
2264 ndlp->vmid_support = 0;
2265 if (vport->vmid_priority_tagging)
2266 sp->cmn.priority_tagging = 1;
2267 else if (phba->cfg_vmid_app_header &&
2268 bf_get(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags))
2269 sp->cmn.app_hdr_support = 1;
2270
2271 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2272 "Issue PLOGI: did:x%x",
2273 did, 0, 0);
2274
2275 /* If our firmware supports this feature, convey that
2276 * information to the target using the vendor specific field.
2277 */
2278 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) {
2279 sp->cmn.valid_vendor_ver_level = 1;
2280 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID);
2281 sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP);
2282 }
2283
2284 phba->fc_stat.elsXmitPLOGI++;
2285 elsiocb->cmd_cmpl = lpfc_cmpl_els_plogi;
2286
2287 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2288 "Issue PLOGI: did:x%x refcnt %d",
2289 did, kref_read(&ndlp->kref), 0);
2290 elsiocb->ndlp = lpfc_nlp_get(ndlp);
2291 if (!elsiocb->ndlp) {
2292 lpfc_els_free_iocb(phba, elsiocb);
2293 return 1;
2294 }
2295
2296 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2297 if (ret) {
2298 lpfc_els_free_iocb(phba, elsiocb);
2299 lpfc_nlp_put(ndlp);
2300 return 1;
2301 }
2302
2303 return 0;
2304 }
2305
2306 /**
2307 * lpfc_cmpl_els_prli - Completion callback function for prli
2308 * @phba: pointer to lpfc hba data structure.
2309 * @cmdiocb: pointer to lpfc command iocb data structure.
2310 * @rspiocb: pointer to lpfc response iocb data structure.
2311 *
2312 * This routine is the completion callback function for a Process Login
2313 * (PRLI) ELS command. The PRLI response IOCB status is checked for error
2314 * status. If there is error status reported, PRLI retry shall be attempted
2315 * by invoking the lpfc_els_retry() routine. Otherwise, the state
2316 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this
2317 * ndlp to mark the PRLI completion.
2318 **/
2319 static void
lpfc_cmpl_els_prli(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)2320 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2321 struct lpfc_iocbq *rspiocb)
2322 {
2323 struct lpfc_vport *vport = cmdiocb->vport;
2324 struct lpfc_nodelist *ndlp;
2325 char *mode;
2326 u32 ulp_status;
2327 u32 ulp_word4;
2328 bool release_node = false;
2329
2330 /* we pass cmdiocb to state machine which needs rspiocb as well */
2331 cmdiocb->rsp_iocb = rspiocb;
2332
2333 ndlp = cmdiocb->ndlp;
2334
2335 ulp_status = get_job_ulpstatus(phba, rspiocb);
2336 ulp_word4 = get_job_word4(phba, rspiocb);
2337
2338 spin_lock_irq(&ndlp->lock);
2339 ndlp->nlp_flag &= ~NLP_PRLI_SND;
2340
2341 /* Driver supports multiple FC4 types. Counters matter. */
2342 vport->fc_prli_sent--;
2343 ndlp->fc4_prli_sent--;
2344 spin_unlock_irq(&ndlp->lock);
2345
2346 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2347 "PRLI cmpl: status:x%x/x%x did:x%x",
2348 ulp_status, ulp_word4,
2349 ndlp->nlp_DID);
2350
2351 /* PRLI completes to NPort <nlp_DID> */
2352 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2353 "0103 PRLI completes to NPort x%06x "
2354 "Data: x%x x%x x%x x%x x%x\n",
2355 ndlp->nlp_DID, ulp_status, ulp_word4,
2356 vport->num_disc_nodes, ndlp->fc4_prli_sent,
2357 ndlp->fc4_xpt_flags);
2358
2359 /* Check to see if link went down during discovery */
2360 if (lpfc_els_chk_latt(vport))
2361 goto out;
2362
2363 if (ulp_status) {
2364 /* Check for retry */
2365 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2366 /* ELS command is being retried */
2367 goto out;
2368 }
2369
2370 /* If we don't send GFT_ID to Fabric, a PRLI error
2371 * could be expected.
2372 */
2373 if (test_bit(FC_FABRIC, &vport->fc_flag) ||
2374 vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)
2375 mode = KERN_WARNING;
2376 else
2377 mode = KERN_INFO;
2378
2379 /* Warn PRLI status */
2380 lpfc_printf_vlog(vport, mode, LOG_ELS,
2381 "2754 PRLI DID:%06X Status:x%x/x%x, "
2382 "data: x%x x%x x%x\n",
2383 ndlp->nlp_DID, ulp_status,
2384 ulp_word4, ndlp->nlp_state,
2385 ndlp->fc4_prli_sent, ndlp->nlp_flag);
2386
2387 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2388 if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4))
2389 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2390 NLP_EVT_CMPL_PRLI);
2391
2392 /* The following condition catches an inflight transition
2393 * mismatch typically caused by an RSCN. Skip any
2394 * processing to allow recovery.
2395 */
2396 if ((ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
2397 ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) ||
2398 (ndlp->nlp_state == NLP_STE_NPR_NODE &&
2399 ndlp->nlp_flag & NLP_DELAY_TMO)) {
2400 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
2401 "2784 PRLI cmpl: Allow Node recovery "
2402 "DID x%06x nstate x%x nflag x%x\n",
2403 ndlp->nlp_DID, ndlp->nlp_state,
2404 ndlp->nlp_flag);
2405 goto out;
2406 }
2407
2408 /*
2409 * For P2P topology, retain the node so that PLOGI can be
2410 * attempted on it again.
2411 */
2412 if (test_bit(FC_PT2PT, &vport->fc_flag))
2413 goto out;
2414
2415 /* As long as this node is not registered with the SCSI
2416 * or NVMe transport and no other PRLIs are outstanding,
2417 * it is no longer an active node. Otherwise devloss
2418 * handles the final cleanup.
2419 */
2420 spin_lock_irq(&ndlp->lock);
2421 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) &&
2422 !ndlp->fc4_prli_sent) {
2423 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2424 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
2425 release_node = true;
2426 }
2427 spin_unlock_irq(&ndlp->lock);
2428
2429 if (release_node)
2430 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2431 NLP_EVT_DEVICE_RM);
2432 } else {
2433 /* Good status, call state machine. However, if another
2434 * PRLI is outstanding, don't call the state machine
2435 * because final disposition to Mapped or Unmapped is
2436 * completed there.
2437 */
2438 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2439 NLP_EVT_CMPL_PRLI);
2440 }
2441
2442 out:
2443 lpfc_els_free_iocb(phba, cmdiocb);
2444 lpfc_nlp_put(ndlp);
2445 return;
2446 }
2447
2448 /**
2449 * lpfc_issue_els_prli - Issue a prli iocb command for a vport
2450 * @vport: pointer to a host virtual N_Port data structure.
2451 * @ndlp: pointer to a node-list data structure.
2452 * @retry: number of retries to the command IOCB.
2453 *
2454 * This routine issues a Process Login (PRLI) ELS command for the
2455 * @vport. The PRLI service parameters are set up in the payload of the
2456 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine
2457 * is put to the IOCB completion callback func field before invoking the
2458 * routine lpfc_sli_issue_iocb() to send out PRLI command.
2459 *
2460 * Note that the ndlp reference count will be incremented by 1 for holding the
2461 * ndlp and the reference to ndlp will be stored into the ndlp field of
2462 * the IOCB for the completion callback function to the PRLI ELS command.
2463 *
2464 * Return code
2465 * 0 - successfully issued prli iocb command for @vport
2466 * 1 - failed to issue prli iocb command for @vport
2467 **/
2468 int
lpfc_issue_els_prli(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,uint8_t retry)2469 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2470 uint8_t retry)
2471 {
2472 int rc = 0;
2473 struct lpfc_hba *phba = vport->phba;
2474 PRLI *npr;
2475 struct lpfc_nvme_prli *npr_nvme;
2476 struct lpfc_iocbq *elsiocb;
2477 uint8_t *pcmd;
2478 uint16_t cmdsize;
2479 u32 local_nlp_type, elscmd;
2480
2481 /*
2482 * If we are in RSCN mode, the FC4 types supported from a
2483 * previous GFT_ID command may not be accurate. So, if we
2484 * are a NVME Initiator, always look for the possibility of
2485 * the remote NPort beng a NVME Target.
2486 */
2487 if (phba->sli_rev == LPFC_SLI_REV4 &&
2488 test_bit(FC_RSCN_MODE, &vport->fc_flag) &&
2489 vport->nvmei_support)
2490 ndlp->nlp_fc4_type |= NLP_FC4_NVME;
2491 local_nlp_type = ndlp->nlp_fc4_type;
2492
2493 /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp
2494 * fields here before any of them can complete.
2495 */
2496 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
2497 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
2498 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
2499 ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC);
2500 ndlp->nvme_fb_size = 0;
2501
2502 send_next_prli:
2503 if (local_nlp_type & NLP_FC4_FCP) {
2504 /* Payload is 4 + 16 = 20 x14 bytes. */
2505 cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
2506 elscmd = ELS_CMD_PRLI;
2507 } else if (local_nlp_type & NLP_FC4_NVME) {
2508 /* Payload is 4 + 20 = 24 x18 bytes. */
2509 cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli));
2510 elscmd = ELS_CMD_NVMEPRLI;
2511 } else {
2512 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2513 "3083 Unknown FC_TYPE x%x ndlp x%06x\n",
2514 ndlp->nlp_fc4_type, ndlp->nlp_DID);
2515 return 1;
2516 }
2517
2518 /* SLI3 ports don't support NVME. If this rport is a strict NVME
2519 * FC4 type, implicitly LOGO.
2520 */
2521 if (phba->sli_rev == LPFC_SLI_REV3 &&
2522 ndlp->nlp_fc4_type == NLP_FC4_NVME) {
2523 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2524 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n",
2525 ndlp->nlp_type);
2526 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
2527 return 1;
2528 }
2529
2530 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2531 ndlp->nlp_DID, elscmd);
2532 if (!elsiocb)
2533 return 1;
2534
2535 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
2536
2537 /* For PRLI request, remainder of payload is service parameters */
2538 memset(pcmd, 0, cmdsize);
2539
2540 if (local_nlp_type & NLP_FC4_FCP) {
2541 /* Remainder of payload is FCP PRLI parameter page.
2542 * Note: this data structure is defined as
2543 * BE/LE in the structure definition so no
2544 * byte swap call is made.
2545 */
2546 *((uint32_t *)(pcmd)) = ELS_CMD_PRLI;
2547 pcmd += sizeof(uint32_t);
2548 npr = (PRLI *)pcmd;
2549
2550 /*
2551 * If our firmware version is 3.20 or later,
2552 * set the following bits for FC-TAPE support.
2553 */
2554 if (phba->vpd.rev.feaLevelHigh >= 0x02) {
2555 npr->ConfmComplAllowed = 1;
2556 npr->Retry = 1;
2557 npr->TaskRetryIdReq = 1;
2558 }
2559 npr->estabImagePair = 1;
2560 npr->readXferRdyDis = 1;
2561 if (vport->cfg_first_burst_size)
2562 npr->writeXferRdyDis = 1;
2563
2564 /* For FCP support */
2565 npr->prliType = PRLI_FCP_TYPE;
2566 npr->initiatorFunc = 1;
2567 elsiocb->cmd_flag |= LPFC_PRLI_FCP_REQ;
2568
2569 /* Remove FCP type - processed. */
2570 local_nlp_type &= ~NLP_FC4_FCP;
2571 } else if (local_nlp_type & NLP_FC4_NVME) {
2572 /* Remainder of payload is NVME PRLI parameter page.
2573 * This data structure is the newer definition that
2574 * uses bf macros so a byte swap is required.
2575 */
2576 *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI;
2577 pcmd += sizeof(uint32_t);
2578 npr_nvme = (struct lpfc_nvme_prli *)pcmd;
2579 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
2580 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */
2581 if (phba->nsler) {
2582 bf_set(prli_nsler, npr_nvme, 1);
2583 bf_set(prli_conf, npr_nvme, 1);
2584 }
2585
2586 /* Only initiators request first burst. */
2587 if ((phba->cfg_nvme_enable_fb) &&
2588 !phba->nvmet_support)
2589 bf_set(prli_fba, npr_nvme, 1);
2590
2591 if (phba->nvmet_support) {
2592 bf_set(prli_tgt, npr_nvme, 1);
2593 bf_set(prli_disc, npr_nvme, 1);
2594 } else {
2595 bf_set(prli_init, npr_nvme, 1);
2596 bf_set(prli_conf, npr_nvme, 1);
2597 }
2598
2599 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1);
2600 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4);
2601 elsiocb->cmd_flag |= LPFC_PRLI_NVME_REQ;
2602
2603 /* Remove NVME type - processed. */
2604 local_nlp_type &= ~NLP_FC4_NVME;
2605 }
2606
2607 phba->fc_stat.elsXmitPRLI++;
2608 elsiocb->cmd_cmpl = lpfc_cmpl_els_prli;
2609
2610 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2611 "Issue PRLI: did:x%x refcnt %d",
2612 ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
2613 elsiocb->ndlp = lpfc_nlp_get(ndlp);
2614 if (!elsiocb->ndlp) {
2615 lpfc_els_free_iocb(phba, elsiocb);
2616 return 1;
2617 }
2618
2619 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2620 if (rc == IOCB_ERROR) {
2621 lpfc_els_free_iocb(phba, elsiocb);
2622 lpfc_nlp_put(ndlp);
2623 return 1;
2624 }
2625
2626 /* The vport counters are used for lpfc_scan_finished, but
2627 * the ndlp is used to track outstanding PRLIs for different
2628 * FC4 types.
2629 */
2630 spin_lock_irq(&ndlp->lock);
2631 ndlp->nlp_flag |= NLP_PRLI_SND;
2632 vport->fc_prli_sent++;
2633 ndlp->fc4_prli_sent++;
2634 spin_unlock_irq(&ndlp->lock);
2635
2636 /* The driver supports 2 FC4 types. Make sure
2637 * a PRLI is issued for all types before exiting.
2638 */
2639 if (phba->sli_rev == LPFC_SLI_REV4 &&
2640 local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME))
2641 goto send_next_prli;
2642 else
2643 return 0;
2644 }
2645
2646 /**
2647 * lpfc_rscn_disc - Perform rscn discovery for a vport
2648 * @vport: pointer to a host virtual N_Port data structure.
2649 *
2650 * This routine performs Registration State Change Notification (RSCN)
2651 * discovery for a @vport. If the @vport's node port recovery count is not
2652 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all
2653 * the nodes that need recovery. If none of the PLOGI were needed through
2654 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be
2655 * invoked to check and handle possible more RSCN came in during the period
2656 * of processing the current ones.
2657 **/
2658 static void
lpfc_rscn_disc(struct lpfc_vport * vport)2659 lpfc_rscn_disc(struct lpfc_vport *vport)
2660 {
2661 lpfc_can_disctmo(vport);
2662
2663 /* RSCN discovery */
2664 /* go thru NPR nodes and issue ELS PLOGIs */
2665 if (atomic_read(&vport->fc_npr_cnt))
2666 if (lpfc_els_disc_plogi(vport))
2667 return;
2668
2669 lpfc_end_rscn(vport);
2670 }
2671
2672 /**
2673 * lpfc_adisc_done - Complete the adisc phase of discovery
2674 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs.
2675 *
2676 * This function is called when the final ADISC is completed during discovery.
2677 * This function handles clearing link attention or issuing reg_vpi depending
2678 * on whether npiv is enabled. This function also kicks off the PLOGI phase of
2679 * discovery.
2680 * This function is called with no locks held.
2681 **/
2682 static void
lpfc_adisc_done(struct lpfc_vport * vport)2683 lpfc_adisc_done(struct lpfc_vport *vport)
2684 {
2685 struct lpfc_hba *phba = vport->phba;
2686
2687 /*
2688 * For NPIV, cmpl_reg_vpi will set port_state to READY,
2689 * and continue discovery.
2690 */
2691 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2692 !test_bit(FC_RSCN_MODE, &vport->fc_flag) &&
2693 (phba->sli_rev < LPFC_SLI_REV4)) {
2694
2695 /*
2696 * If link is down, clear_la and reg_vpi will be done after
2697 * flogi following a link up event
2698 */
2699 if (!lpfc_is_link_up(phba))
2700 return;
2701
2702 /* The ADISCs are complete. Doesn't matter if they
2703 * succeeded or failed because the ADISC completion
2704 * routine guarantees to call the state machine and
2705 * the RPI is either unregistered (failed ADISC response)
2706 * or the RPI is still valid and the node is marked
2707 * mapped for a target. The exchanges should be in the
2708 * correct state. This code is specific to SLI3.
2709 */
2710 lpfc_issue_clear_la(phba, vport);
2711 lpfc_issue_reg_vpi(phba, vport);
2712 return;
2713 }
2714 /*
2715 * For SLI2, we need to set port_state to READY
2716 * and continue discovery.
2717 */
2718 if (vport->port_state < LPFC_VPORT_READY) {
2719 /* If we get here, there is nothing to ADISC */
2720 lpfc_issue_clear_la(phba, vport);
2721 if (!test_bit(FC_ABORT_DISCOVERY, &vport->fc_flag)) {
2722 vport->num_disc_nodes = 0;
2723 /* go thru NPR list, issue ELS PLOGIs */
2724 if (atomic_read(&vport->fc_npr_cnt))
2725 lpfc_els_disc_plogi(vport);
2726 if (!vport->num_disc_nodes) {
2727 clear_bit(FC_NDISC_ACTIVE, &vport->fc_flag);
2728 lpfc_can_disctmo(vport);
2729 lpfc_end_rscn(vport);
2730 }
2731 }
2732 vport->port_state = LPFC_VPORT_READY;
2733 } else
2734 lpfc_rscn_disc(vport);
2735 }
2736
2737 /**
2738 * lpfc_more_adisc - Issue more adisc as needed
2739 * @vport: pointer to a host virtual N_Port data structure.
2740 *
2741 * This routine determines whether there are more ndlps on a @vport
2742 * node list need to have Address Discover (ADISC) issued. If so, it will
2743 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's
2744 * remaining nodes which need to have ADISC sent.
2745 **/
2746 void
lpfc_more_adisc(struct lpfc_vport * vport)2747 lpfc_more_adisc(struct lpfc_vport *vport)
2748 {
2749 if (vport->num_disc_nodes)
2750 vport->num_disc_nodes--;
2751 /* Continue discovery with <num_disc_nodes> ADISCs to go */
2752 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2753 "0210 Continue discovery with %d ADISCs to go "
2754 "Data: x%x x%lx x%x\n",
2755 vport->num_disc_nodes,
2756 atomic_read(&vport->fc_adisc_cnt),
2757 vport->fc_flag, vport->port_state);
2758 /* Check to see if there are more ADISCs to be sent */
2759 if (test_bit(FC_NLP_MORE, &vport->fc_flag)) {
2760 lpfc_set_disctmo(vport);
2761 /* go thru NPR nodes and issue any remaining ELS ADISCs */
2762 lpfc_els_disc_adisc(vport);
2763 }
2764 if (!vport->num_disc_nodes)
2765 lpfc_adisc_done(vport);
2766 return;
2767 }
2768
2769 /**
2770 * lpfc_cmpl_els_adisc - Completion callback function for adisc
2771 * @phba: pointer to lpfc hba data structure.
2772 * @cmdiocb: pointer to lpfc command iocb data structure.
2773 * @rspiocb: pointer to lpfc response iocb data structure.
2774 *
2775 * This routine is the completion function for issuing the Address Discover
2776 * (ADISC) command. It first checks to see whether link went down during
2777 * the discovery process. If so, the node will be marked as node port
2778 * recovery for issuing discover IOCB by the link attention handler and
2779 * exit. Otherwise, the response status is checked. If error was reported
2780 * in the response status, the ADISC command shall be retried by invoking
2781 * the lpfc_els_retry() routine. Otherwise, if no error was reported in
2782 * the response status, the state machine is invoked to set transition
2783 * with respect to NLP_EVT_CMPL_ADISC event.
2784 **/
2785 static void
lpfc_cmpl_els_adisc(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)2786 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2787 struct lpfc_iocbq *rspiocb)
2788 {
2789 struct lpfc_vport *vport = cmdiocb->vport;
2790 IOCB_t *irsp;
2791 struct lpfc_nodelist *ndlp;
2792 int disc;
2793 u32 ulp_status, ulp_word4, tmo, iotag;
2794 bool release_node = false;
2795
2796 /* we pass cmdiocb to state machine which needs rspiocb as well */
2797 cmdiocb->rsp_iocb = rspiocb;
2798
2799 ndlp = cmdiocb->ndlp;
2800
2801 ulp_status = get_job_ulpstatus(phba, rspiocb);
2802 ulp_word4 = get_job_word4(phba, rspiocb);
2803
2804 if (phba->sli_rev == LPFC_SLI_REV4) {
2805 tmo = get_wqe_tmo(cmdiocb);
2806 iotag = get_wqe_reqtag(cmdiocb);
2807 } else {
2808 irsp = &rspiocb->iocb;
2809 tmo = irsp->ulpTimeout;
2810 iotag = irsp->ulpIoTag;
2811 }
2812
2813 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2814 "ADISC cmpl: status:x%x/x%x did:x%x",
2815 ulp_status, ulp_word4,
2816 ndlp->nlp_DID);
2817
2818 /* Since ndlp can be freed in the disc state machine, note if this node
2819 * is being used during discovery.
2820 */
2821 spin_lock_irq(&ndlp->lock);
2822 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
2823 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
2824 spin_unlock_irq(&ndlp->lock);
2825 /* ADISC completes to NPort <nlp_DID> */
2826 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2827 "0104 ADISC completes to NPort x%x "
2828 "IoTag x%x Data: x%x x%x x%x x%x x%x\n",
2829 ndlp->nlp_DID, iotag,
2830 ulp_status, ulp_word4,
2831 tmo, disc, vport->num_disc_nodes);
2832
2833 /* Check to see if link went down during discovery */
2834 if (lpfc_els_chk_latt(vport)) {
2835 spin_lock_irq(&ndlp->lock);
2836 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2837 spin_unlock_irq(&ndlp->lock);
2838 goto out;
2839 }
2840
2841 if (ulp_status) {
2842 /* Check for retry */
2843 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2844 /* ELS command is being retried */
2845 if (disc) {
2846 spin_lock_irq(&ndlp->lock);
2847 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2848 spin_unlock_irq(&ndlp->lock);
2849 lpfc_set_disctmo(vport);
2850 }
2851 goto out;
2852 }
2853 /* Warn ADISC status */
2854 lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
2855 "2755 ADISC DID:%06X Status:x%x/x%x\n",
2856 ndlp->nlp_DID, ulp_status,
2857 ulp_word4);
2858 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2859 NLP_EVT_CMPL_ADISC);
2860
2861 /* As long as this node is not registered with the SCSI or NVMe
2862 * transport, it is no longer an active node. Otherwise
2863 * devloss handles the final cleanup.
2864 */
2865 spin_lock_irq(&ndlp->lock);
2866 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
2867 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2868 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
2869 release_node = true;
2870 }
2871 spin_unlock_irq(&ndlp->lock);
2872
2873 if (release_node)
2874 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2875 NLP_EVT_DEVICE_RM);
2876 } else
2877 /* Good status, call state machine */
2878 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2879 NLP_EVT_CMPL_ADISC);
2880
2881 /* Check to see if there are more ADISCs to be sent */
2882 if (disc && vport->num_disc_nodes)
2883 lpfc_more_adisc(vport);
2884 out:
2885 lpfc_els_free_iocb(phba, cmdiocb);
2886 lpfc_nlp_put(ndlp);
2887 return;
2888 }
2889
2890 /**
2891 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport
2892 * @vport: pointer to a virtual N_Port data structure.
2893 * @ndlp: pointer to a node-list data structure.
2894 * @retry: number of retries to the command IOCB.
2895 *
2896 * This routine issues an Address Discover (ADISC) for an @ndlp on a
2897 * @vport. It prepares the payload of the ADISC ELS command, updates the
2898 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine
2899 * to issue the ADISC ELS command.
2900 *
2901 * Note that the ndlp reference count will be incremented by 1 for holding the
2902 * ndlp and the reference to ndlp will be stored into the ndlp field of
2903 * the IOCB for the completion callback function to the ADISC ELS command.
2904 *
2905 * Return code
2906 * 0 - successfully issued adisc
2907 * 1 - failed to issue adisc
2908 **/
2909 int
lpfc_issue_els_adisc(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,uint8_t retry)2910 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2911 uint8_t retry)
2912 {
2913 int rc = 0;
2914 struct lpfc_hba *phba = vport->phba;
2915 ADISC *ap;
2916 struct lpfc_iocbq *elsiocb;
2917 uint8_t *pcmd;
2918 uint16_t cmdsize;
2919
2920 cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
2921 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2922 ndlp->nlp_DID, ELS_CMD_ADISC);
2923 if (!elsiocb)
2924 return 1;
2925
2926 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
2927
2928 /* For ADISC request, remainder of payload is service parameters */
2929 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
2930 pcmd += sizeof(uint32_t);
2931
2932 /* Fill in ADISC payload */
2933 ap = (ADISC *) pcmd;
2934 ap->hardAL_PA = phba->fc_pref_ALPA;
2935 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
2936 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2937 ap->DID = be32_to_cpu(vport->fc_myDID);
2938
2939 phba->fc_stat.elsXmitADISC++;
2940 elsiocb->cmd_cmpl = lpfc_cmpl_els_adisc;
2941 spin_lock_irq(&ndlp->lock);
2942 ndlp->nlp_flag |= NLP_ADISC_SND;
2943 spin_unlock_irq(&ndlp->lock);
2944 elsiocb->ndlp = lpfc_nlp_get(ndlp);
2945 if (!elsiocb->ndlp) {
2946 lpfc_els_free_iocb(phba, elsiocb);
2947 goto err;
2948 }
2949
2950 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2951 "Issue ADISC: did:x%x refcnt %d",
2952 ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
2953
2954 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2955 if (rc == IOCB_ERROR) {
2956 lpfc_els_free_iocb(phba, elsiocb);
2957 lpfc_nlp_put(ndlp);
2958 goto err;
2959 }
2960
2961 return 0;
2962
2963 err:
2964 spin_lock_irq(&ndlp->lock);
2965 ndlp->nlp_flag &= ~NLP_ADISC_SND;
2966 spin_unlock_irq(&ndlp->lock);
2967 return 1;
2968 }
2969
2970 /**
2971 * lpfc_cmpl_els_logo - Completion callback function for logo
2972 * @phba: pointer to lpfc hba data structure.
2973 * @cmdiocb: pointer to lpfc command iocb data structure.
2974 * @rspiocb: pointer to lpfc response iocb data structure.
2975 *
2976 * This routine is the completion function for issuing the ELS Logout (LOGO)
2977 * command. If no error status was reported from the LOGO response, the
2978 * state machine of the associated ndlp shall be invoked for transition with
2979 * respect to NLP_EVT_CMPL_LOGO event.
2980 **/
2981 static void
lpfc_cmpl_els_logo(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)2982 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2983 struct lpfc_iocbq *rspiocb)
2984 {
2985 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
2986 struct lpfc_vport *vport = ndlp->vport;
2987 IOCB_t *irsp;
2988 unsigned long flags;
2989 uint32_t skip_recovery = 0;
2990 int wake_up_waiter = 0;
2991 u32 ulp_status;
2992 u32 ulp_word4;
2993 u32 tmo, iotag;
2994
2995 /* we pass cmdiocb to state machine which needs rspiocb as well */
2996 cmdiocb->rsp_iocb = rspiocb;
2997
2998 ulp_status = get_job_ulpstatus(phba, rspiocb);
2999 ulp_word4 = get_job_word4(phba, rspiocb);
3000
3001 if (phba->sli_rev == LPFC_SLI_REV4) {
3002 tmo = get_wqe_tmo(cmdiocb);
3003 iotag = get_wqe_reqtag(cmdiocb);
3004 } else {
3005 irsp = &rspiocb->iocb;
3006 tmo = irsp->ulpTimeout;
3007 iotag = irsp->ulpIoTag;
3008 }
3009
3010 spin_lock_irq(&ndlp->lock);
3011 ndlp->nlp_flag &= ~NLP_LOGO_SND;
3012 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) {
3013 wake_up_waiter = 1;
3014 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
3015 }
3016 spin_unlock_irq(&ndlp->lock);
3017
3018 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3019 "LOGO cmpl: status:x%x/x%x did:x%x",
3020 ulp_status, ulp_word4,
3021 ndlp->nlp_DID);
3022
3023 /* LOGO completes to NPort <nlp_DID> */
3024 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3025 "0105 LOGO completes to NPort x%x "
3026 "IoTag x%x refcnt %d nflags x%x xflags x%x "
3027 "Data: x%x x%x x%x x%x\n",
3028 ndlp->nlp_DID, iotag,
3029 kref_read(&ndlp->kref), ndlp->nlp_flag,
3030 ndlp->fc4_xpt_flags, ulp_status, ulp_word4,
3031 tmo, vport->num_disc_nodes);
3032
3033 if (lpfc_els_chk_latt(vport)) {
3034 skip_recovery = 1;
3035 goto out;
3036 }
3037
3038 /* The LOGO will not be retried on failure. A LOGO was
3039 * issued to the remote rport and a ACC or RJT or no Answer are
3040 * all acceptable. Note the failure and move forward with
3041 * discovery. The PLOGI will retry.
3042 */
3043 if (ulp_status) {
3044 /* Warn LOGO status */
3045 lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
3046 "2756 LOGO, No Retry DID:%06X "
3047 "Status:x%x/x%x\n",
3048 ndlp->nlp_DID, ulp_status,
3049 ulp_word4);
3050
3051 if (lpfc_error_lost_link(vport, ulp_status, ulp_word4))
3052 skip_recovery = 1;
3053 }
3054
3055 /* Call state machine. This will unregister the rpi if needed. */
3056 lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
3057
3058 if (skip_recovery)
3059 goto out;
3060
3061 /* The driver sets this flag for an NPIV instance that doesn't want to
3062 * log into the remote port.
3063 */
3064 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
3065 spin_lock_irq(&ndlp->lock);
3066 if (phba->sli_rev == LPFC_SLI_REV4)
3067 ndlp->nlp_flag |= NLP_RELEASE_RPI;
3068 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
3069 spin_unlock_irq(&ndlp->lock);
3070 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
3071 NLP_EVT_DEVICE_RM);
3072 goto out_rsrc_free;
3073 }
3074
3075 out:
3076 /* At this point, the LOGO processing is complete. NOTE: For a
3077 * pt2pt topology, we are assuming the NPortID will only change
3078 * on link up processing. For a LOGO / PLOGI initiated by the
3079 * Initiator, we are assuming the NPortID is not going to change.
3080 */
3081
3082 if (wake_up_waiter && ndlp->logo_waitq)
3083 wake_up(ndlp->logo_waitq);
3084 /*
3085 * If the node is a target, the handling attempts to recover the port.
3086 * For any other port type, the rpi is unregistered as an implicit
3087 * LOGO.
3088 */
3089 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) &&
3090 skip_recovery == 0) {
3091 lpfc_cancel_retry_delay_tmo(vport, ndlp);
3092 spin_lock_irqsave(&ndlp->lock, flags);
3093 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
3094 spin_unlock_irqrestore(&ndlp->lock, flags);
3095
3096 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3097 "3187 LOGO completes to NPort x%x: Start "
3098 "Recovery Data: x%x x%x x%x x%x\n",
3099 ndlp->nlp_DID, ulp_status,
3100 ulp_word4, tmo,
3101 vport->num_disc_nodes);
3102
3103 lpfc_els_free_iocb(phba, cmdiocb);
3104 lpfc_nlp_put(ndlp);
3105
3106 lpfc_disc_start(vport);
3107 return;
3108 }
3109
3110 /* Cleanup path for failed REG_RPI handling. If REG_RPI fails, the
3111 * driver sends a LOGO to the rport to cleanup. For fabric and
3112 * initiator ports cleanup the node as long as it the node is not
3113 * register with the transport.
3114 */
3115 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
3116 spin_lock_irq(&ndlp->lock);
3117 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
3118 spin_unlock_irq(&ndlp->lock);
3119 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
3120 NLP_EVT_DEVICE_RM);
3121 }
3122 out_rsrc_free:
3123 /* Driver is done with the I/O. */
3124 lpfc_els_free_iocb(phba, cmdiocb);
3125 lpfc_nlp_put(ndlp);
3126 }
3127
3128 /**
3129 * lpfc_issue_els_logo - Issue a logo to an node on a vport
3130 * @vport: pointer to a virtual N_Port data structure.
3131 * @ndlp: pointer to a node-list data structure.
3132 * @retry: number of retries to the command IOCB.
3133 *
3134 * This routine constructs and issues an ELS Logout (LOGO) iocb command
3135 * to a remote node, referred by an @ndlp on a @vport. It constructs the
3136 * payload of the IOCB, properly sets up the @ndlp state, and invokes the
3137 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
3138 *
3139 * Note that the ndlp reference count will be incremented by 1 for holding the
3140 * ndlp and the reference to ndlp will be stored into the ndlp field of
3141 * the IOCB for the completion callback function to the LOGO ELS command.
3142 *
3143 * Callers of this routine are expected to unregister the RPI first
3144 *
3145 * Return code
3146 * 0 - successfully issued logo
3147 * 1 - failed to issue logo
3148 **/
3149 int
lpfc_issue_els_logo(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,uint8_t retry)3150 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
3151 uint8_t retry)
3152 {
3153 struct lpfc_hba *phba = vport->phba;
3154 struct lpfc_iocbq *elsiocb;
3155 uint8_t *pcmd;
3156 uint16_t cmdsize;
3157 int rc;
3158
3159 spin_lock_irq(&ndlp->lock);
3160 if (ndlp->nlp_flag & NLP_LOGO_SND) {
3161 spin_unlock_irq(&ndlp->lock);
3162 return 0;
3163 }
3164 spin_unlock_irq(&ndlp->lock);
3165
3166 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
3167 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
3168 ndlp->nlp_DID, ELS_CMD_LOGO);
3169 if (!elsiocb)
3170 return 1;
3171
3172 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
3173 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
3174 pcmd += sizeof(uint32_t);
3175
3176 /* Fill in LOGO payload */
3177 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
3178 pcmd += sizeof(uint32_t);
3179 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
3180
3181 phba->fc_stat.elsXmitLOGO++;
3182 elsiocb->cmd_cmpl = lpfc_cmpl_els_logo;
3183 spin_lock_irq(&ndlp->lock);
3184 ndlp->nlp_flag |= NLP_LOGO_SND;
3185 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
3186 spin_unlock_irq(&ndlp->lock);
3187 elsiocb->ndlp = lpfc_nlp_get(ndlp);
3188 if (!elsiocb->ndlp) {
3189 lpfc_els_free_iocb(phba, elsiocb);
3190 goto err;
3191 }
3192
3193 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3194 "Issue LOGO: did:x%x refcnt %d",
3195 ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
3196
3197 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3198 if (rc == IOCB_ERROR) {
3199 lpfc_els_free_iocb(phba, elsiocb);
3200 lpfc_nlp_put(ndlp);
3201 goto err;
3202 }
3203
3204 spin_lock_irq(&ndlp->lock);
3205 ndlp->nlp_prev_state = ndlp->nlp_state;
3206 spin_unlock_irq(&ndlp->lock);
3207 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
3208 return 0;
3209
3210 err:
3211 spin_lock_irq(&ndlp->lock);
3212 ndlp->nlp_flag &= ~NLP_LOGO_SND;
3213 spin_unlock_irq(&ndlp->lock);
3214 return 1;
3215 }
3216
3217 /**
3218 * lpfc_cmpl_els_cmd - Completion callback function for generic els command
3219 * @phba: pointer to lpfc hba data structure.
3220 * @cmdiocb: pointer to lpfc command iocb data structure.
3221 * @rspiocb: pointer to lpfc response iocb data structure.
3222 *
3223 * This routine is a generic completion callback function for ELS commands.
3224 * Specifically, it is the callback function which does not need to perform
3225 * any command specific operations. It is currently used by the ELS command
3226 * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel
3227 * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr().
3228 * Other than certain debug loggings, this callback function simply invokes the
3229 * lpfc_els_chk_latt() routine to check whether link went down during the
3230 * discovery process.
3231 **/
3232 static void
lpfc_cmpl_els_cmd(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)3233 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3234 struct lpfc_iocbq *rspiocb)
3235 {
3236 struct lpfc_vport *vport = cmdiocb->vport;
3237 struct lpfc_nodelist *free_ndlp;
3238 IOCB_t *irsp;
3239 u32 ulp_status, ulp_word4, tmo, did, iotag;
3240
3241 ulp_status = get_job_ulpstatus(phba, rspiocb);
3242 ulp_word4 = get_job_word4(phba, rspiocb);
3243 did = get_job_els_rsp64_did(phba, cmdiocb);
3244
3245 if (phba->sli_rev == LPFC_SLI_REV4) {
3246 tmo = get_wqe_tmo(cmdiocb);
3247 iotag = get_wqe_reqtag(cmdiocb);
3248 } else {
3249 irsp = &rspiocb->iocb;
3250 tmo = irsp->ulpTimeout;
3251 iotag = irsp->ulpIoTag;
3252 }
3253
3254 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3255 "ELS cmd cmpl: status:x%x/x%x did:x%x",
3256 ulp_status, ulp_word4, did);
3257
3258 /* ELS cmd tag <ulpIoTag> completes */
3259 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3260 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
3261 iotag, ulp_status, ulp_word4, tmo);
3262
3263 /* Check to see if link went down during discovery */
3264 lpfc_els_chk_latt(vport);
3265
3266 free_ndlp = cmdiocb->ndlp;
3267
3268 lpfc_els_free_iocb(phba, cmdiocb);
3269 lpfc_nlp_put(free_ndlp);
3270 }
3271
3272 /**
3273 * lpfc_reg_fab_ctrl_node - RPI register the fabric controller node.
3274 * @vport: pointer to lpfc_vport data structure.
3275 * @fc_ndlp: pointer to the fabric controller (0xfffffd) node.
3276 *
3277 * This routine registers the rpi assigned to the fabric controller
3278 * NPort_ID (0xfffffd) with the port and moves the node to UNMAPPED
3279 * state triggering a registration with the SCSI transport.
3280 *
3281 * This routine is single out because the fabric controller node
3282 * does not receive a PLOGI. This routine is consumed by the
3283 * SCR and RDF ELS commands. Callers are expected to qualify
3284 * with SLI4 first.
3285 **/
3286 static int
lpfc_reg_fab_ctrl_node(struct lpfc_vport * vport,struct lpfc_nodelist * fc_ndlp)3287 lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp)
3288 {
3289 int rc = 0;
3290 struct lpfc_hba *phba = vport->phba;
3291 struct lpfc_nodelist *ns_ndlp;
3292 LPFC_MBOXQ_t *mbox;
3293
3294 if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED)
3295 return rc;
3296
3297 ns_ndlp = lpfc_findnode_did(vport, NameServer_DID);
3298 if (!ns_ndlp)
3299 return -ENODEV;
3300
3301 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3302 "0935 %s: Reg FC RPI x%x on FC DID x%x NSSte: x%x\n",
3303 __func__, fc_ndlp->nlp_rpi, fc_ndlp->nlp_DID,
3304 ns_ndlp->nlp_state);
3305 if (ns_ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
3306 return -ENODEV;
3307
3308 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3309 if (!mbox) {
3310 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
3311 "0936 %s: no memory for reg_login "
3312 "Data: x%x x%x x%x x%x\n", __func__,
3313 fc_ndlp->nlp_DID, fc_ndlp->nlp_state,
3314 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi);
3315 return -ENOMEM;
3316 }
3317 rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID,
3318 (u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi);
3319 if (rc) {
3320 rc = -EACCES;
3321 goto out;
3322 }
3323
3324 fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
3325 mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login;
3326 mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp);
3327 if (!mbox->ctx_ndlp) {
3328 rc = -ENOMEM;
3329 goto out;
3330 }
3331
3332 mbox->vport = vport;
3333 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
3334 if (rc == MBX_NOT_FINISHED) {
3335 rc = -ENODEV;
3336 lpfc_nlp_put(fc_ndlp);
3337 goto out;
3338 }
3339 /* Success path. Exit. */
3340 lpfc_nlp_set_state(vport, fc_ndlp,
3341 NLP_STE_REG_LOGIN_ISSUE);
3342 return 0;
3343
3344 out:
3345 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
3346 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
3347 "0938 %s: failed to format reg_login "
3348 "Data: x%x x%x x%x x%x\n", __func__,
3349 fc_ndlp->nlp_DID, fc_ndlp->nlp_state,
3350 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi);
3351 return rc;
3352 }
3353
3354 /**
3355 * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd
3356 * @phba: pointer to lpfc hba data structure.
3357 * @cmdiocb: pointer to lpfc command iocb data structure.
3358 * @rspiocb: pointer to lpfc response iocb data structure.
3359 *
3360 * This routine is a generic completion callback function for Discovery ELS cmd.
3361 * Currently used by the ELS command issuing routines for the ELS State Change
3362 * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf().
3363 * These commands will be retried once only for ELS timeout errors.
3364 **/
3365 static void
lpfc_cmpl_els_disc_cmd(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)3366 lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3367 struct lpfc_iocbq *rspiocb)
3368 {
3369 struct lpfc_vport *vport = cmdiocb->vport;
3370 IOCB_t *irsp;
3371 struct lpfc_els_rdf_rsp *prdf;
3372 struct lpfc_dmabuf *pcmd, *prsp;
3373 u32 *pdata;
3374 u32 cmd;
3375 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
3376 u32 ulp_status, ulp_word4, tmo, did, iotag;
3377
3378 ulp_status = get_job_ulpstatus(phba, rspiocb);
3379 ulp_word4 = get_job_word4(phba, rspiocb);
3380 did = get_job_els_rsp64_did(phba, cmdiocb);
3381
3382 if (phba->sli_rev == LPFC_SLI_REV4) {
3383 tmo = get_wqe_tmo(cmdiocb);
3384 iotag = get_wqe_reqtag(cmdiocb);
3385 } else {
3386 irsp = &rspiocb->iocb;
3387 tmo = irsp->ulpTimeout;
3388 iotag = irsp->ulpIoTag;
3389 }
3390
3391 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3392 "ELS cmd cmpl: status:x%x/x%x did:x%x",
3393 ulp_status, ulp_word4, did);
3394
3395 /* ELS cmd tag <ulpIoTag> completes */
3396 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
3397 "0217 ELS cmd tag x%x completes Data: x%x x%x x%x x%x\n",
3398 iotag, ulp_status, ulp_word4, tmo, cmdiocb->retry);
3399
3400 pcmd = cmdiocb->cmd_dmabuf;
3401 if (!pcmd)
3402 goto out;
3403
3404 pdata = (u32 *)pcmd->virt;
3405 if (!pdata)
3406 goto out;
3407 cmd = *pdata;
3408
3409 /* Only 1 retry for ELS Timeout only */
3410 if (ulp_status == IOSTAT_LOCAL_REJECT &&
3411 ((ulp_word4 & IOERR_PARAM_MASK) ==
3412 IOERR_SEQUENCE_TIMEOUT)) {
3413 cmdiocb->retry++;
3414 if (cmdiocb->retry <= 1) {
3415 switch (cmd) {
3416 case ELS_CMD_SCR:
3417 lpfc_issue_els_scr(vport, cmdiocb->retry);
3418 break;
3419 case ELS_CMD_EDC:
3420 lpfc_issue_els_edc(vport, cmdiocb->retry);
3421 break;
3422 case ELS_CMD_RDF:
3423 lpfc_issue_els_rdf(vport, cmdiocb->retry);
3424 break;
3425 }
3426 goto out;
3427 }
3428 phba->fc_stat.elsRetryExceeded++;
3429 }
3430 if (cmd == ELS_CMD_EDC) {
3431 /* must be called before checking uplStatus and returning */
3432 lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb);
3433 return;
3434 }
3435 if (ulp_status) {
3436 /* ELS discovery cmd completes with error */
3437 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT,
3438 "4203 ELS cmd x%x error: x%x x%X\n", cmd,
3439 ulp_status, ulp_word4);
3440 goto out;
3441 }
3442
3443 /* The RDF response doesn't have any impact on the running driver
3444 * but the notification descriptors are dumped here for support.
3445 */
3446 if (cmd == ELS_CMD_RDF) {
3447 int i;
3448
3449 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
3450 if (!prsp)
3451 goto out;
3452
3453 prdf = (struct lpfc_els_rdf_rsp *)prsp->virt;
3454 if (!prdf)
3455 goto out;
3456 if (!lpfc_is_els_acc_rsp(prsp))
3457 goto out;
3458
3459 for (i = 0; i < ELS_RDF_REG_TAG_CNT &&
3460 i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++)
3461 lpfc_printf_vlog(vport, KERN_INFO,
3462 LOG_ELS | LOG_CGN_MGMT,
3463 "4677 Fabric RDF Notification Grant "
3464 "Data: 0x%08x Reg: %x %x\n",
3465 be32_to_cpu(
3466 prdf->reg_d1.desc_tags[i]),
3467 phba->cgn_reg_signal,
3468 phba->cgn_reg_fpin);
3469 }
3470
3471 out:
3472 /* Check to see if link went down during discovery */
3473 lpfc_els_chk_latt(vport);
3474 lpfc_els_free_iocb(phba, cmdiocb);
3475 lpfc_nlp_put(ndlp);
3476 return;
3477 }
3478
3479 /**
3480 * lpfc_issue_els_scr - Issue a scr to an node on a vport
3481 * @vport: pointer to a host virtual N_Port data structure.
3482 * @retry: retry counter for the command IOCB.
3483 *
3484 * This routine issues a State Change Request (SCR) to a fabric node
3485 * on a @vport. The remote node is Fabric Controller (0xfffffd). It
3486 * first search the @vport node list to find the matching ndlp. If no such
3487 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An
3488 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
3489 * routine is invoked to send the SCR IOCB.
3490 *
3491 * Note that the ndlp reference count will be incremented by 1 for holding the
3492 * ndlp and the reference to ndlp will be stored into the ndlp field of
3493 * the IOCB for the completion callback function to the SCR ELS command.
3494 *
3495 * Return code
3496 * 0 - Successfully issued scr command
3497 * 1 - Failed to issue scr command
3498 **/
3499 int
lpfc_issue_els_scr(struct lpfc_vport * vport,uint8_t retry)3500 lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
3501 {
3502 int rc = 0;
3503 struct lpfc_hba *phba = vport->phba;
3504 struct lpfc_iocbq *elsiocb;
3505 uint8_t *pcmd;
3506 uint16_t cmdsize;
3507 struct lpfc_nodelist *ndlp;
3508
3509 cmdsize = (sizeof(uint32_t) + sizeof(SCR));
3510
3511 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID);
3512 if (!ndlp) {
3513 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID);
3514 if (!ndlp)
3515 return 1;
3516 lpfc_enqueue_node(vport, ndlp);
3517 }
3518
3519 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
3520 ndlp->nlp_DID, ELS_CMD_SCR);
3521 if (!elsiocb)
3522 return 1;
3523
3524 if (phba->sli_rev == LPFC_SLI_REV4) {
3525 rc = lpfc_reg_fab_ctrl_node(vport, ndlp);
3526 if (rc) {
3527 lpfc_els_free_iocb(phba, elsiocb);
3528 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
3529 "0937 %s: Failed to reg fc node, rc %d\n",
3530 __func__, rc);
3531 return 1;
3532 }
3533 }
3534 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
3535
3536 *((uint32_t *) (pcmd)) = ELS_CMD_SCR;
3537 pcmd += sizeof(uint32_t);
3538
3539 /* For SCR, remainder of payload is SCR parameter page */
3540 memset(pcmd, 0, sizeof(SCR));
3541 ((SCR *) pcmd)->Function = SCR_FUNC_FULL;
3542
3543 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3544 "Issue SCR: did:x%x",
3545 ndlp->nlp_DID, 0, 0);
3546
3547 phba->fc_stat.elsXmitSCR++;
3548 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
3549 elsiocb->ndlp = lpfc_nlp_get(ndlp);
3550 if (!elsiocb->ndlp) {
3551 lpfc_els_free_iocb(phba, elsiocb);
3552 return 1;
3553 }
3554
3555 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3556 "Issue SCR: did:x%x refcnt %d",
3557 ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
3558
3559 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3560 if (rc == IOCB_ERROR) {
3561 lpfc_els_free_iocb(phba, elsiocb);
3562 lpfc_nlp_put(ndlp);
3563 return 1;
3564 }
3565
3566 return 0;
3567 }
3568
3569 /**
3570 * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric)
3571 * or the other nport (pt2pt).
3572 * @vport: pointer to a host virtual N_Port data structure.
3573 * @retry: number of retries to the command IOCB.
3574 *
3575 * This routine issues a RSCN to the Fabric Controller (DID 0xFFFFFD)
3576 * when connected to a fabric, or to the remote port when connected
3577 * in point-to-point mode. When sent to the Fabric Controller, it will
3578 * replay the RSCN to registered recipients.
3579 *
3580 * Note that the ndlp reference count will be incremented by 1 for holding the
3581 * ndlp and the reference to ndlp will be stored into the ndlp field of
3582 * the IOCB for the completion callback function to the RSCN ELS command.
3583 *
3584 * Return code
3585 * 0 - Successfully issued RSCN command
3586 * 1 - Failed to issue RSCN command
3587 **/
3588 int
lpfc_issue_els_rscn(struct lpfc_vport * vport,uint8_t retry)3589 lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
3590 {
3591 int rc = 0;
3592 struct lpfc_hba *phba = vport->phba;
3593 struct lpfc_iocbq *elsiocb;
3594 struct lpfc_nodelist *ndlp;
3595 struct {
3596 struct fc_els_rscn rscn;
3597 struct fc_els_rscn_page portid;
3598 } *event;
3599 uint32_t nportid;
3600 uint16_t cmdsize = sizeof(*event);
3601
3602 /* Not supported for private loop */
3603 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
3604 !test_bit(FC_PUBLIC_LOOP, &vport->fc_flag))
3605 return 1;
3606
3607 if (test_bit(FC_PT2PT, &vport->fc_flag)) {
3608 /* find any mapped nport - that would be the other nport */
3609 ndlp = lpfc_findnode_mapped(vport);
3610 if (!ndlp)
3611 return 1;
3612 } else {
3613 nportid = FC_FID_FCTRL;
3614 /* find the fabric controller node */
3615 ndlp = lpfc_findnode_did(vport, nportid);
3616 if (!ndlp) {
3617 /* if one didn't exist, make one */
3618 ndlp = lpfc_nlp_init(vport, nportid);
3619 if (!ndlp)
3620 return 1;
3621 lpfc_enqueue_node(vport, ndlp);
3622 }
3623 }
3624
3625 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
3626 ndlp->nlp_DID, ELS_CMD_RSCN_XMT);
3627
3628 if (!elsiocb)
3629 return 1;
3630
3631 event = elsiocb->cmd_dmabuf->virt;
3632
3633 event->rscn.rscn_cmd = ELS_RSCN;
3634 event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page);
3635 event->rscn.rscn_plen = cpu_to_be16(cmdsize);
3636
3637 nportid = vport->fc_myDID;
3638 /* appears that page flags must be 0 for fabric to broadcast RSCN */
3639 event->portid.rscn_page_flags = 0;
3640 event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16;
3641 event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8;
3642 event->portid.rscn_fid[2] = nportid & 0x000000FF;
3643
3644 phba->fc_stat.elsXmitRSCN++;
3645 elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd;
3646 elsiocb->ndlp = lpfc_nlp_get(ndlp);
3647 if (!elsiocb->ndlp) {
3648 lpfc_els_free_iocb(phba, elsiocb);
3649 return 1;
3650 }
3651
3652 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3653 "Issue RSCN: did:x%x",
3654 ndlp->nlp_DID, 0, 0);
3655
3656 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3657 if (rc == IOCB_ERROR) {
3658 lpfc_els_free_iocb(phba, elsiocb);
3659 lpfc_nlp_put(ndlp);
3660 return 1;
3661 }
3662
3663 return 0;
3664 }
3665
3666 /**
3667 * lpfc_issue_els_farpr - Issue a farp to an node on a vport
3668 * @vport: pointer to a host virtual N_Port data structure.
3669 * @nportid: N_Port identifier to the remote node.
3670 * @retry: number of retries to the command IOCB.
3671 *
3672 * This routine issues a Fibre Channel Address Resolution Response
3673 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid)
3674 * is passed into the function. It first search the @vport node list to find
3675 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created
3676 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the
3677 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
3678 *
3679 * Note that the ndlp reference count will be incremented by 1 for holding the
3680 * ndlp and the reference to ndlp will be stored into the ndlp field of
3681 * the IOCB for the completion callback function to the FARPR ELS command.
3682 *
3683 * Return code
3684 * 0 - Successfully issued farpr command
3685 * 1 - Failed to issue farpr command
3686 **/
3687 static int
lpfc_issue_els_farpr(struct lpfc_vport * vport,uint32_t nportid,uint8_t retry)3688 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
3689 {
3690 int rc = 0;
3691 struct lpfc_hba *phba = vport->phba;
3692 struct lpfc_iocbq *elsiocb;
3693 FARP *fp;
3694 uint8_t *pcmd;
3695 uint32_t *lp;
3696 uint16_t cmdsize;
3697 struct lpfc_nodelist *ondlp;
3698 struct lpfc_nodelist *ndlp;
3699
3700 cmdsize = (sizeof(uint32_t) + sizeof(FARP));
3701
3702 ndlp = lpfc_findnode_did(vport, nportid);
3703 if (!ndlp) {
3704 ndlp = lpfc_nlp_init(vport, nportid);
3705 if (!ndlp)
3706 return 1;
3707 lpfc_enqueue_node(vport, ndlp);
3708 }
3709
3710 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
3711 ndlp->nlp_DID, ELS_CMD_FARPR);
3712 if (!elsiocb)
3713 return 1;
3714
3715 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
3716
3717 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
3718 pcmd += sizeof(uint32_t);
3719
3720 /* Fill in FARPR payload */
3721 fp = (FARP *) (pcmd);
3722 memset(fp, 0, sizeof(FARP));
3723 lp = (uint32_t *) pcmd;
3724 *lp++ = be32_to_cpu(nportid);
3725 *lp++ = be32_to_cpu(vport->fc_myDID);
3726 fp->Rflags = 0;
3727 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
3728
3729 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
3730 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
3731 ondlp = lpfc_findnode_did(vport, nportid);
3732 if (ondlp) {
3733 memcpy(&fp->OportName, &ondlp->nlp_portname,
3734 sizeof(struct lpfc_name));
3735 memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
3736 sizeof(struct lpfc_name));
3737 }
3738
3739 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3740 "Issue FARPR: did:x%x",
3741 ndlp->nlp_DID, 0, 0);
3742
3743 phba->fc_stat.elsXmitFARPR++;
3744 elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd;
3745 elsiocb->ndlp = lpfc_nlp_get(ndlp);
3746 if (!elsiocb->ndlp) {
3747 lpfc_els_free_iocb(phba, elsiocb);
3748 return 1;
3749 }
3750
3751 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3752 if (rc == IOCB_ERROR) {
3753 /* The additional lpfc_nlp_put will cause the following
3754 * lpfc_els_free_iocb routine to trigger the release of
3755 * the node.
3756 */
3757 lpfc_els_free_iocb(phba, elsiocb);
3758 lpfc_nlp_put(ndlp);
3759 return 1;
3760 }
3761 /* This will cause the callback-function lpfc_cmpl_els_cmd to
3762 * trigger the release of the node.
3763 */
3764 /* Don't release reference count as RDF is likely outstanding */
3765 return 0;
3766 }
3767
3768 /**
3769 * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric.
3770 * @vport: pointer to a host virtual N_Port data structure.
3771 * @retry: retry counter for the command IOCB.
3772 *
3773 * This routine issues an ELS RDF to the Fabric Controller to register
3774 * for diagnostic functions.
3775 *
3776 * Note that the ndlp reference count will be incremented by 1 for holding the
3777 * ndlp and the reference to ndlp will be stored into the ndlp field of
3778 * the IOCB for the completion callback function to the RDF ELS command.
3779 *
3780 * Return code
3781 * 0 - Successfully issued rdf command
3782 * 1 - Failed to issue rdf command
3783 **/
3784 int
lpfc_issue_els_rdf(struct lpfc_vport * vport,uint8_t retry)3785 lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
3786 {
3787 struct lpfc_hba *phba = vport->phba;
3788 struct lpfc_iocbq *elsiocb;
3789 struct lpfc_els_rdf_req *prdf;
3790 struct lpfc_nodelist *ndlp;
3791 uint16_t cmdsize;
3792 int rc;
3793
3794 cmdsize = sizeof(*prdf);
3795
3796 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID);
3797 if (!ndlp) {
3798 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID);
3799 if (!ndlp)
3800 return -ENODEV;
3801 lpfc_enqueue_node(vport, ndlp);
3802 }
3803
3804 /* RDF ELS is not required on an NPIV VN_Port. */
3805 if (vport->port_type == LPFC_NPIV_PORT)
3806 return -EACCES;
3807
3808 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
3809 ndlp->nlp_DID, ELS_CMD_RDF);
3810 if (!elsiocb)
3811 return -ENOMEM;
3812
3813 /* Configure the payload for the supported FPIN events. */
3814 prdf = (struct lpfc_els_rdf_req *)elsiocb->cmd_dmabuf->virt;
3815 memset(prdf, 0, cmdsize);
3816 prdf->rdf.fpin_cmd = ELS_RDF;
3817 prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) -
3818 sizeof(struct fc_els_rdf));
3819 prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER);
3820 prdf->reg_d1.reg_desc.desc_len = cpu_to_be32(
3821 FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1));
3822 prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT);
3823 prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY);
3824 prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY);
3825 prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST);
3826 prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION);
3827
3828 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
3829 "6444 Xmit RDF to remote NPORT x%x Reg: %x %x\n",
3830 ndlp->nlp_DID, phba->cgn_reg_signal,
3831 phba->cgn_reg_fpin);
3832
3833 phba->cgn_fpin_frequency = LPFC_FPIN_INIT_FREQ;
3834 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
3835 elsiocb->ndlp = lpfc_nlp_get(ndlp);
3836 if (!elsiocb->ndlp) {
3837 lpfc_els_free_iocb(phba, elsiocb);
3838 return -EIO;
3839 }
3840
3841 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3842 "Issue RDF: did:x%x refcnt %d",
3843 ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
3844
3845 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3846 if (rc == IOCB_ERROR) {
3847 lpfc_els_free_iocb(phba, elsiocb);
3848 lpfc_nlp_put(ndlp);
3849 return -EIO;
3850 }
3851 return 0;
3852 }
3853
3854 /**
3855 * lpfc_els_rcv_rdf - Receive RDF ELS request from the fabric.
3856 * @vport: pointer to a host virtual N_Port data structure.
3857 * @cmdiocb: pointer to lpfc command iocb data structure.
3858 * @ndlp: pointer to a node-list data structure.
3859 *
3860 * A received RDF implies a possible change to fabric supported diagnostic
3861 * functions. This routine sends LS_ACC and then has the Nx_Port issue a new
3862 * RDF request to reregister for supported diagnostic functions.
3863 *
3864 * Return code
3865 * 0 - Success
3866 * -EIO - Failed to process received RDF
3867 **/
3868 static int
lpfc_els_rcv_rdf(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)3869 lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3870 struct lpfc_nodelist *ndlp)
3871 {
3872 /* Send LS_ACC */
3873 if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) {
3874 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
3875 "1623 Failed to RDF_ACC from x%x for x%x\n",
3876 ndlp->nlp_DID, vport->fc_myDID);
3877 return -EIO;
3878 }
3879
3880 /* Issue new RDF for reregistering */
3881 if (lpfc_issue_els_rdf(vport, 0)) {
3882 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
3883 "2623 Failed to re register RDF for x%x\n",
3884 vport->fc_myDID);
3885 return -EIO;
3886 }
3887
3888 return 0;
3889 }
3890
3891 /**
3892 * lpfc_least_capable_settings - helper function for EDC rsp processing
3893 * @phba: pointer to lpfc hba data structure.
3894 * @pcgd: pointer to congestion detection descriptor in EDC rsp.
3895 *
3896 * This helper routine determines the least capable setting for
3897 * congestion signals, signal freq, including scale, from the
3898 * congestion detection descriptor in the EDC rsp. The routine
3899 * sets @phba values in preparation for a set_featues mailbox.
3900 **/
3901 static void
lpfc_least_capable_settings(struct lpfc_hba * phba,struct fc_diag_cg_sig_desc * pcgd)3902 lpfc_least_capable_settings(struct lpfc_hba *phba,
3903 struct fc_diag_cg_sig_desc *pcgd)
3904 {
3905 u32 rsp_sig_cap = 0, drv_sig_cap = 0;
3906 u32 rsp_sig_freq_cyc = 0, rsp_sig_freq_scale = 0;
3907
3908 /* Get rsp signal and frequency capabilities. */
3909 rsp_sig_cap = be32_to_cpu(pcgd->xmt_signal_capability);
3910 rsp_sig_freq_cyc = be16_to_cpu(pcgd->xmt_signal_frequency.count);
3911 rsp_sig_freq_scale = be16_to_cpu(pcgd->xmt_signal_frequency.units);
3912
3913 /* If the Fport does not support signals. Set FPIN only */
3914 if (rsp_sig_cap == EDC_CG_SIG_NOTSUPPORTED)
3915 goto out_no_support;
3916
3917 /* Apply the xmt scale to the xmt cycle to get the correct frequency.
3918 * Adapter default is 100 millisSeconds. Convert all xmt cycle values
3919 * to milliSeconds.
3920 */
3921 switch (rsp_sig_freq_scale) {
3922 case EDC_CG_SIGFREQ_SEC:
3923 rsp_sig_freq_cyc *= MSEC_PER_SEC;
3924 break;
3925 case EDC_CG_SIGFREQ_MSEC:
3926 rsp_sig_freq_cyc = 1;
3927 break;
3928 default:
3929 goto out_no_support;
3930 }
3931
3932 /* Convenient shorthand. */
3933 drv_sig_cap = phba->cgn_reg_signal;
3934
3935 /* Choose the least capable frequency. */
3936 if (rsp_sig_freq_cyc > phba->cgn_sig_freq)
3937 phba->cgn_sig_freq = rsp_sig_freq_cyc;
3938
3939 /* Should be some common signals support. Settle on least capable
3940 * signal and adjust FPIN values. Initialize defaults to ease the
3941 * decision.
3942 */
3943 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
3944 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
3945 if (rsp_sig_cap == EDC_CG_SIG_WARN_ONLY &&
3946 (drv_sig_cap == EDC_CG_SIG_WARN_ONLY ||
3947 drv_sig_cap == EDC_CG_SIG_WARN_ALARM)) {
3948 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
3949 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
3950 }
3951 if (rsp_sig_cap == EDC_CG_SIG_WARN_ALARM) {
3952 if (drv_sig_cap == EDC_CG_SIG_WARN_ALARM) {
3953 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ALARM;
3954 phba->cgn_reg_fpin = LPFC_CGN_FPIN_NONE;
3955 }
3956 if (drv_sig_cap == EDC_CG_SIG_WARN_ONLY) {
3957 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
3958 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
3959 }
3960 }
3961
3962 /* We are NOT recording signal frequency in congestion info buffer */
3963 return;
3964
3965 out_no_support:
3966 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
3967 phba->cgn_sig_freq = 0;
3968 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN;
3969 }
3970
3971 DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag,
3972 FC_LS_TLV_DTAG_INIT);
3973
3974 /**
3975 * lpfc_cmpl_els_edc - Completion callback function for EDC
3976 * @phba: pointer to lpfc hba data structure.
3977 * @cmdiocb: pointer to lpfc command iocb data structure.
3978 * @rspiocb: pointer to lpfc response iocb data structure.
3979 *
3980 * This routine is the completion callback function for issuing the Exchange
3981 * Diagnostic Capabilities (EDC) command. The driver issues an EDC to
3982 * notify the FPort of its Congestion and Link Fault capabilities. This
3983 * routine parses the FPort's response and decides on the least common
3984 * values applicable to both FPort and NPort for Warnings and Alarms that
3985 * are communicated via hardware signals.
3986 **/
3987 static void
lpfc_cmpl_els_edc(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)3988 lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3989 struct lpfc_iocbq *rspiocb)
3990 {
3991 IOCB_t *irsp_iocb;
3992 struct fc_els_edc_resp *edc_rsp;
3993 struct fc_tlv_desc *tlv;
3994 struct fc_diag_cg_sig_desc *pcgd;
3995 struct fc_diag_lnkflt_desc *plnkflt;
3996 struct lpfc_dmabuf *pcmd, *prsp;
3997 const char *dtag_nm;
3998 u32 *pdata, dtag;
3999 int desc_cnt = 0, bytes_remain;
4000 bool rcv_cap_desc = false;
4001 struct lpfc_nodelist *ndlp;
4002 u32 ulp_status, ulp_word4, tmo, did, iotag;
4003
4004 ndlp = cmdiocb->ndlp;
4005
4006 ulp_status = get_job_ulpstatus(phba, rspiocb);
4007 ulp_word4 = get_job_word4(phba, rspiocb);
4008 did = get_job_els_rsp64_did(phba, rspiocb);
4009
4010 if (phba->sli_rev == LPFC_SLI_REV4) {
4011 tmo = get_wqe_tmo(rspiocb);
4012 iotag = get_wqe_reqtag(rspiocb);
4013 } else {
4014 irsp_iocb = &rspiocb->iocb;
4015 tmo = irsp_iocb->ulpTimeout;
4016 iotag = irsp_iocb->ulpIoTag;
4017 }
4018
4019 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD,
4020 "EDC cmpl: status:x%x/x%x did:x%x",
4021 ulp_status, ulp_word4, did);
4022
4023 /* ELS cmd tag <ulpIoTag> completes */
4024 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
4025 "4201 EDC cmd tag x%x completes Data: x%x x%x x%x\n",
4026 iotag, ulp_status, ulp_word4, tmo);
4027
4028 pcmd = cmdiocb->cmd_dmabuf;
4029 if (!pcmd)
4030 goto out;
4031
4032 pdata = (u32 *)pcmd->virt;
4033 if (!pdata)
4034 goto out;
4035
4036 /* Need to clear signal values, send features MB and RDF with FPIN. */
4037 if (ulp_status)
4038 goto out;
4039
4040 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
4041 if (!prsp)
4042 goto out;
4043
4044 edc_rsp = prsp->virt;
4045 if (!edc_rsp)
4046 goto out;
4047
4048 /* ELS cmd tag <ulpIoTag> completes */
4049 lpfc_printf_log(phba, KERN_INFO,
4050 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
4051 "4676 Fabric EDC Rsp: "
4052 "0x%02x, 0x%08x\n",
4053 edc_rsp->acc_hdr.la_cmd,
4054 be32_to_cpu(edc_rsp->desc_list_len));
4055
4056 if (!lpfc_is_els_acc_rsp(prsp))
4057 goto out;
4058
4059 /*
4060 * Payload length in bytes is the response descriptor list
4061 * length minus the 12 bytes of Link Service Request
4062 * Information descriptor in the reply.
4063 */
4064 bytes_remain = be32_to_cpu(edc_rsp->desc_list_len) -
4065 sizeof(struct fc_els_lsri_desc);
4066 if (bytes_remain <= 0)
4067 goto out;
4068
4069 tlv = edc_rsp->desc;
4070
4071 /*
4072 * cycle through EDC diagnostic descriptors to find the
4073 * congestion signaling capability descriptor
4074 */
4075 while (bytes_remain) {
4076 if (bytes_remain < FC_TLV_DESC_HDR_SZ) {
4077 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
4078 "6461 Truncated TLV hdr on "
4079 "Diagnostic descriptor[%d]\n",
4080 desc_cnt);
4081 goto out;
4082 }
4083
4084 dtag = be32_to_cpu(tlv->desc_tag);
4085 switch (dtag) {
4086 case ELS_DTAG_LNK_FAULT_CAP:
4087 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
4088 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
4089 sizeof(struct fc_diag_lnkflt_desc)) {
4090 lpfc_printf_log(phba, KERN_WARNING,
4091 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
4092 "6462 Truncated Link Fault Diagnostic "
4093 "descriptor[%d]: %d vs 0x%zx 0x%zx\n",
4094 desc_cnt, bytes_remain,
4095 FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
4096 sizeof(struct fc_diag_lnkflt_desc));
4097 goto out;
4098 }
4099 plnkflt = (struct fc_diag_lnkflt_desc *)tlv;
4100 lpfc_printf_log(phba, KERN_INFO,
4101 LOG_ELS | LOG_LDS_EVENT,
4102 "4617 Link Fault Desc Data: 0x%08x 0x%08x "
4103 "0x%08x 0x%08x 0x%08x\n",
4104 be32_to_cpu(plnkflt->desc_tag),
4105 be32_to_cpu(plnkflt->desc_len),
4106 be32_to_cpu(
4107 plnkflt->degrade_activate_threshold),
4108 be32_to_cpu(
4109 plnkflt->degrade_deactivate_threshold),
4110 be32_to_cpu(plnkflt->fec_degrade_interval));
4111 break;
4112 case ELS_DTAG_CG_SIGNAL_CAP:
4113 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
4114 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
4115 sizeof(struct fc_diag_cg_sig_desc)) {
4116 lpfc_printf_log(
4117 phba, KERN_WARNING, LOG_CGN_MGMT,
4118 "6463 Truncated Cgn Signal Diagnostic "
4119 "descriptor[%d]: %d vs 0x%zx 0x%zx\n",
4120 desc_cnt, bytes_remain,
4121 FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
4122 sizeof(struct fc_diag_cg_sig_desc));
4123 goto out;
4124 }
4125
4126 pcgd = (struct fc_diag_cg_sig_desc *)tlv;
4127 lpfc_printf_log(
4128 phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
4129 "4616 CGN Desc Data: 0x%08x 0x%08x "
4130 "0x%08x 0x%04x 0x%04x 0x%08x 0x%04x 0x%04x\n",
4131 be32_to_cpu(pcgd->desc_tag),
4132 be32_to_cpu(pcgd->desc_len),
4133 be32_to_cpu(pcgd->xmt_signal_capability),
4134 be16_to_cpu(pcgd->xmt_signal_frequency.count),
4135 be16_to_cpu(pcgd->xmt_signal_frequency.units),
4136 be32_to_cpu(pcgd->rcv_signal_capability),
4137 be16_to_cpu(pcgd->rcv_signal_frequency.count),
4138 be16_to_cpu(pcgd->rcv_signal_frequency.units));
4139
4140 /* Compare driver and Fport capabilities and choose
4141 * least common.
4142 */
4143 lpfc_least_capable_settings(phba, pcgd);
4144 rcv_cap_desc = true;
4145 break;
4146 default:
4147 dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
4148 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
4149 "4919 unknown Diagnostic "
4150 "Descriptor[%d]: tag x%x (%s)\n",
4151 desc_cnt, dtag, dtag_nm);
4152 }
4153
4154 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
4155 tlv = fc_tlv_next_desc(tlv);
4156 desc_cnt++;
4157 }
4158
4159 out:
4160 if (!rcv_cap_desc) {
4161 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN;
4162 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
4163 phba->cgn_sig_freq = 0;
4164 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT,
4165 "4202 EDC rsp error - sending RDF "
4166 "for FPIN only.\n");
4167 }
4168
4169 lpfc_config_cgn_signal(phba);
4170
4171 /* Check to see if link went down during discovery */
4172 lpfc_els_chk_latt(phba->pport);
4173 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD,
4174 "EDC Cmpl: did:x%x refcnt %d",
4175 ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
4176 lpfc_els_free_iocb(phba, cmdiocb);
4177 lpfc_nlp_put(ndlp);
4178 }
4179
4180 static void
lpfc_format_edc_lft_desc(struct lpfc_hba * phba,struct fc_tlv_desc * tlv)4181 lpfc_format_edc_lft_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
4182 {
4183 struct fc_diag_lnkflt_desc *lft = (struct fc_diag_lnkflt_desc *)tlv;
4184
4185 lft->desc_tag = cpu_to_be32(ELS_DTAG_LNK_FAULT_CAP);
4186 lft->desc_len = cpu_to_be32(
4187 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_lnkflt_desc));
4188
4189 lft->degrade_activate_threshold =
4190 cpu_to_be32(phba->degrade_activate_threshold);
4191 lft->degrade_deactivate_threshold =
4192 cpu_to_be32(phba->degrade_deactivate_threshold);
4193 lft->fec_degrade_interval = cpu_to_be32(phba->fec_degrade_interval);
4194 }
4195
4196 static void
lpfc_format_edc_cgn_desc(struct lpfc_hba * phba,struct fc_tlv_desc * tlv)4197 lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
4198 {
4199 struct fc_diag_cg_sig_desc *cgd = (struct fc_diag_cg_sig_desc *)tlv;
4200
4201 /* We are assuming cgd was zero'ed before calling this routine */
4202
4203 /* Configure the congestion detection capability */
4204 cgd->desc_tag = cpu_to_be32(ELS_DTAG_CG_SIGNAL_CAP);
4205
4206 /* Descriptor len doesn't include the tag or len fields. */
4207 cgd->desc_len = cpu_to_be32(
4208 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_cg_sig_desc));
4209
4210 /* xmt_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED.
4211 * xmt_signal_frequency.count already set to 0.
4212 * xmt_signal_frequency.units already set to 0.
4213 */
4214
4215 if (phba->cmf_active_mode == LPFC_CFG_OFF) {
4216 /* rcv_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED.
4217 * rcv_signal_frequency.count already set to 0.
4218 * rcv_signal_frequency.units already set to 0.
4219 */
4220 phba->cgn_sig_freq = 0;
4221 return;
4222 }
4223 switch (phba->cgn_reg_signal) {
4224 case EDC_CG_SIG_WARN_ONLY:
4225 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ONLY);
4226 break;
4227 case EDC_CG_SIG_WARN_ALARM:
4228 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ALARM);
4229 break;
4230 default:
4231 /* rcv_signal_capability left 0 thus no support */
4232 break;
4233 }
4234
4235 /* We start negotiation with lpfc_fabric_cgn_frequency, after
4236 * the completion we settle on the higher frequency.
4237 */
4238 cgd->rcv_signal_frequency.count =
4239 cpu_to_be16(lpfc_fabric_cgn_frequency);
4240 cgd->rcv_signal_frequency.units =
4241 cpu_to_be16(EDC_CG_SIGFREQ_MSEC);
4242 }
4243
4244 static bool
lpfc_link_is_lds_capable(struct lpfc_hba * phba)4245 lpfc_link_is_lds_capable(struct lpfc_hba *phba)
4246 {
4247 if (!(phba->lmt & LMT_64Gb))
4248 return false;
4249 if (phba->sli_rev != LPFC_SLI_REV4)
4250 return false;
4251
4252 if (phba->sli4_hba.conf_trunk) {
4253 if (phba->trunk_link.phy_lnk_speed == LPFC_USER_LINK_SPEED_64G)
4254 return true;
4255 } else if (phba->fc_linkspeed == LPFC_LINK_SPEED_64GHZ) {
4256 return true;
4257 }
4258 return false;
4259 }
4260
4261 /**
4262 * lpfc_issue_els_edc - Exchange Diagnostic Capabilities with the fabric.
4263 * @vport: pointer to a host virtual N_Port data structure.
4264 * @retry: retry counter for the command iocb.
4265 *
4266 * This routine issues an ELS EDC to the F-Port Controller to communicate
4267 * this N_Port's support of hardware signals in its Congestion
4268 * Capabilities Descriptor.
4269 *
4270 * Note: This routine does not check if one or more signals are
4271 * set in the cgn_reg_signal parameter. The caller makes the
4272 * decision to enforce cgn_reg_signal as nonzero or zero depending
4273 * on the conditions. During Fabric requests, the driver
4274 * requires cgn_reg_signals to be nonzero. But a dynamic request
4275 * to set the congestion mode to OFF from Monitor or Manage
4276 * would correctly issue an EDC with no signals enabled to
4277 * turn off switch functionality and then update the FW.
4278 *
4279 * Return code
4280 * 0 - Successfully issued edc command
4281 * 1 - Failed to issue edc command
4282 **/
4283 int
lpfc_issue_els_edc(struct lpfc_vport * vport,uint8_t retry)4284 lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry)
4285 {
4286 struct lpfc_hba *phba = vport->phba;
4287 struct lpfc_iocbq *elsiocb;
4288 struct fc_els_edc *edc_req;
4289 struct fc_tlv_desc *tlv;
4290 u16 cmdsize;
4291 struct lpfc_nodelist *ndlp;
4292 u8 *pcmd = NULL;
4293 u32 cgn_desc_size, lft_desc_size;
4294 int rc;
4295
4296 if (vport->port_type == LPFC_NPIV_PORT)
4297 return -EACCES;
4298
4299 ndlp = lpfc_findnode_did(vport, Fabric_DID);
4300 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
4301 return -ENODEV;
4302
4303 cgn_desc_size = (phba->cgn_init_reg_signal) ?
4304 sizeof(struct fc_diag_cg_sig_desc) : 0;
4305 lft_desc_size = (lpfc_link_is_lds_capable(phba)) ?
4306 sizeof(struct fc_diag_lnkflt_desc) : 0;
4307 cmdsize = cgn_desc_size + lft_desc_size;
4308
4309 /* Skip EDC if no applicable descriptors */
4310 if (!cmdsize)
4311 goto try_rdf;
4312
4313 cmdsize += sizeof(struct fc_els_edc);
4314 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
4315 ndlp->nlp_DID, ELS_CMD_EDC);
4316 if (!elsiocb)
4317 goto try_rdf;
4318
4319 /* Configure the payload for the supported Diagnostics capabilities. */
4320 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
4321 memset(pcmd, 0, cmdsize);
4322 edc_req = (struct fc_els_edc *)pcmd;
4323 edc_req->desc_len = cpu_to_be32(cgn_desc_size + lft_desc_size);
4324 edc_req->edc_cmd = ELS_EDC;
4325 tlv = edc_req->desc;
4326
4327 if (cgn_desc_size) {
4328 lpfc_format_edc_cgn_desc(phba, tlv);
4329 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
4330 tlv = fc_tlv_next_desc(tlv);
4331 }
4332
4333 if (lft_desc_size)
4334 lpfc_format_edc_lft_desc(phba, tlv);
4335
4336 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
4337 "4623 Xmit EDC to remote "
4338 "NPORT x%x reg_sig x%x reg_fpin:x%x\n",
4339 ndlp->nlp_DID, phba->cgn_reg_signal,
4340 phba->cgn_reg_fpin);
4341
4342 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
4343 elsiocb->ndlp = lpfc_nlp_get(ndlp);
4344 if (!elsiocb->ndlp) {
4345 lpfc_els_free_iocb(phba, elsiocb);
4346 return -EIO;
4347 }
4348
4349 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4350 "Issue EDC: did:x%x refcnt %d",
4351 ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
4352 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4353 if (rc == IOCB_ERROR) {
4354 /* The additional lpfc_nlp_put will cause the following
4355 * lpfc_els_free_iocb routine to trigger the rlease of
4356 * the node.
4357 */
4358 lpfc_els_free_iocb(phba, elsiocb);
4359 lpfc_nlp_put(ndlp);
4360 goto try_rdf;
4361 }
4362 return 0;
4363 try_rdf:
4364 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
4365 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
4366 rc = lpfc_issue_els_rdf(vport, 0);
4367 return rc;
4368 }
4369
4370 /**
4371 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry
4372 * @vport: pointer to a host virtual N_Port data structure.
4373 * @nlp: pointer to a node-list data structure.
4374 *
4375 * This routine cancels the timer with a delayed IOCB-command retry for
4376 * a @vport's @ndlp. It stops the timer for the delayed function retrial and
4377 * removes the ELS retry event if it presents. In addition, if the
4378 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB
4379 * commands are sent for the @vport's nodes that require issuing discovery
4380 * ADISC.
4381 **/
4382 void
lpfc_cancel_retry_delay_tmo(struct lpfc_vport * vport,struct lpfc_nodelist * nlp)4383 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
4384 {
4385 struct lpfc_work_evt *evtp;
4386
4387 if (!(nlp->nlp_flag & NLP_DELAY_TMO))
4388 return;
4389 spin_lock_irq(&nlp->lock);
4390 nlp->nlp_flag &= ~NLP_DELAY_TMO;
4391 spin_unlock_irq(&nlp->lock);
4392 del_timer_sync(&nlp->nlp_delayfunc);
4393 nlp->nlp_last_elscmd = 0;
4394 if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
4395 list_del_init(&nlp->els_retry_evt.evt_listp);
4396 /* Decrement nlp reference count held for the delayed retry */
4397 evtp = &nlp->els_retry_evt;
4398 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
4399 }
4400 if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
4401 spin_lock_irq(&nlp->lock);
4402 nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
4403 spin_unlock_irq(&nlp->lock);
4404 if (vport->num_disc_nodes) {
4405 if (vport->port_state < LPFC_VPORT_READY) {
4406 /* Check if there are more ADISCs to be sent */
4407 lpfc_more_adisc(vport);
4408 } else {
4409 /* Check if there are more PLOGIs to be sent */
4410 lpfc_more_plogi(vport);
4411 if (vport->num_disc_nodes == 0) {
4412 clear_bit(FC_NDISC_ACTIVE,
4413 &vport->fc_flag);
4414 lpfc_can_disctmo(vport);
4415 lpfc_end_rscn(vport);
4416 }
4417 }
4418 }
4419 }
4420 return;
4421 }
4422
4423 /**
4424 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer
4425 * @t: pointer to the timer function associated data (ndlp).
4426 *
4427 * This routine is invoked by the ndlp delayed-function timer to check
4428 * whether there is any pending ELS retry event(s) with the node. If not, it
4429 * simply returns. Otherwise, if there is at least one ELS delayed event, it
4430 * adds the delayed events to the HBA work list and invokes the
4431 * lpfc_worker_wake_up() routine to wake up worker thread to process the
4432 * event. Note that lpfc_nlp_get() is called before posting the event to
4433 * the work list to hold reference count of ndlp so that it guarantees the
4434 * reference to ndlp will still be available when the worker thread gets
4435 * to the event associated with the ndlp.
4436 **/
4437 void
lpfc_els_retry_delay(struct timer_list * t)4438 lpfc_els_retry_delay(struct timer_list *t)
4439 {
4440 struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc);
4441 struct lpfc_vport *vport = ndlp->vport;
4442 struct lpfc_hba *phba = vport->phba;
4443 unsigned long flags;
4444 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
4445
4446 /* Hold a node reference for outstanding queued work */
4447 if (!lpfc_nlp_get(ndlp))
4448 return;
4449
4450 spin_lock_irqsave(&phba->hbalock, flags);
4451 if (!list_empty(&evtp->evt_listp)) {
4452 spin_unlock_irqrestore(&phba->hbalock, flags);
4453 lpfc_nlp_put(ndlp);
4454 return;
4455 }
4456
4457 evtp->evt_arg1 = ndlp;
4458 evtp->evt = LPFC_EVT_ELS_RETRY;
4459 list_add_tail(&evtp->evt_listp, &phba->work_list);
4460 spin_unlock_irqrestore(&phba->hbalock, flags);
4461
4462 lpfc_worker_wake_up(phba);
4463 }
4464
4465 /**
4466 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function
4467 * @ndlp: pointer to a node-list data structure.
4468 *
4469 * This routine is the worker-thread handler for processing the @ndlp delayed
4470 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves
4471 * the last ELS command from the associated ndlp and invokes the proper ELS
4472 * function according to the delayed ELS command to retry the command.
4473 **/
4474 void
lpfc_els_retry_delay_handler(struct lpfc_nodelist * ndlp)4475 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
4476 {
4477 struct lpfc_vport *vport = ndlp->vport;
4478 uint32_t cmd, retry;
4479
4480 spin_lock_irq(&ndlp->lock);
4481 cmd = ndlp->nlp_last_elscmd;
4482 ndlp->nlp_last_elscmd = 0;
4483
4484 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
4485 spin_unlock_irq(&ndlp->lock);
4486 return;
4487 }
4488
4489 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
4490 spin_unlock_irq(&ndlp->lock);
4491 /*
4492 * If a discovery event readded nlp_delayfunc after timer
4493 * firing and before processing the timer, cancel the
4494 * nlp_delayfunc.
4495 */
4496 del_timer_sync(&ndlp->nlp_delayfunc);
4497 retry = ndlp->nlp_retry;
4498 ndlp->nlp_retry = 0;
4499
4500 switch (cmd) {
4501 case ELS_CMD_FLOGI:
4502 lpfc_issue_els_flogi(vport, ndlp, retry);
4503 break;
4504 case ELS_CMD_PLOGI:
4505 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) {
4506 ndlp->nlp_prev_state = ndlp->nlp_state;
4507 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
4508 }
4509 break;
4510 case ELS_CMD_ADISC:
4511 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) {
4512 ndlp->nlp_prev_state = ndlp->nlp_state;
4513 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
4514 }
4515 break;
4516 case ELS_CMD_PRLI:
4517 case ELS_CMD_NVMEPRLI:
4518 if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
4519 ndlp->nlp_prev_state = ndlp->nlp_state;
4520 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
4521 }
4522 break;
4523 case ELS_CMD_LOGO:
4524 if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
4525 ndlp->nlp_prev_state = ndlp->nlp_state;
4526 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
4527 }
4528 break;
4529 case ELS_CMD_FDISC:
4530 if (!test_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag))
4531 lpfc_issue_els_fdisc(vport, ndlp, retry);
4532 break;
4533 }
4534 return;
4535 }
4536
4537 /**
4538 * lpfc_link_reset - Issue link reset
4539 * @vport: pointer to a virtual N_Port data structure.
4540 *
4541 * This routine performs link reset by sending INIT_LINK mailbox command.
4542 * For SLI-3 adapter, link attention interrupt is enabled before issuing
4543 * INIT_LINK mailbox command.
4544 *
4545 * Return code
4546 * 0 - Link reset initiated successfully
4547 * 1 - Failed to initiate link reset
4548 **/
4549 int
lpfc_link_reset(struct lpfc_vport * vport)4550 lpfc_link_reset(struct lpfc_vport *vport)
4551 {
4552 struct lpfc_hba *phba = vport->phba;
4553 LPFC_MBOXQ_t *mbox;
4554 uint32_t control;
4555 int rc;
4556
4557 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4558 "2851 Attempt link reset\n");
4559 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4560 if (!mbox) {
4561 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4562 "2852 Failed to allocate mbox memory");
4563 return 1;
4564 }
4565
4566 /* Enable Link attention interrupts */
4567 if (phba->sli_rev <= LPFC_SLI_REV3) {
4568 spin_lock_irq(&phba->hbalock);
4569 phba->sli.sli_flag |= LPFC_PROCESS_LA;
4570 control = readl(phba->HCregaddr);
4571 control |= HC_LAINT_ENA;
4572 writel(control, phba->HCregaddr);
4573 readl(phba->HCregaddr); /* flush */
4574 spin_unlock_irq(&phba->hbalock);
4575 }
4576
4577 lpfc_init_link(phba, mbox, phba->cfg_topology,
4578 phba->cfg_link_speed);
4579 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4580 mbox->vport = vport;
4581 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4582 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
4583 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4584 "2853 Failed to issue INIT_LINK "
4585 "mbox command, rc:x%x\n", rc);
4586 mempool_free(mbox, phba->mbox_mem_pool);
4587 return 1;
4588 }
4589
4590 return 0;
4591 }
4592
4593 /**
4594 * lpfc_els_retry - Make retry decision on an els command iocb
4595 * @phba: pointer to lpfc hba data structure.
4596 * @cmdiocb: pointer to lpfc command iocb data structure.
4597 * @rspiocb: pointer to lpfc response iocb data structure.
4598 *
4599 * This routine makes a retry decision on an ELS command IOCB, which has
4600 * failed. The following ELS IOCBs use this function for retrying the command
4601 * when previously issued command responsed with error status: FLOGI, PLOGI,
4602 * PRLI, ADISC and FDISC. Based on the ELS command type and the
4603 * returned error status, it makes the decision whether a retry shall be
4604 * issued for the command, and whether a retry shall be made immediately or
4605 * delayed. In the former case, the corresponding ELS command issuing-function
4606 * is called to retry the command. In the later case, the ELS command shall
4607 * be posted to the ndlp delayed event and delayed function timer set to the
4608 * ndlp for the delayed command issusing.
4609 *
4610 * Return code
4611 * 0 - No retry of els command is made
4612 * 1 - Immediate or delayed retry of els command is made
4613 **/
4614 static int
lpfc_els_retry(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)4615 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4616 struct lpfc_iocbq *rspiocb)
4617 {
4618 struct lpfc_vport *vport = cmdiocb->vport;
4619 union lpfc_wqe128 *irsp = &rspiocb->wqe;
4620 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
4621 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
4622 uint32_t *elscmd;
4623 struct ls_rjt stat;
4624 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
4625 int logerr = 0;
4626 uint32_t cmd = 0;
4627 uint32_t did;
4628 int link_reset = 0, rc;
4629 u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
4630 u32 ulp_word4 = get_job_word4(phba, rspiocb);
4631
4632
4633 /* Note: cmd_dmabuf may be 0 for internal driver abort
4634 * of delays ELS command.
4635 */
4636
4637 if (pcmd && pcmd->virt) {
4638 elscmd = (uint32_t *) (pcmd->virt);
4639 cmd = *elscmd++;
4640 }
4641
4642 if (ndlp)
4643 did = ndlp->nlp_DID;
4644 else {
4645 /* We should only hit this case for retrying PLOGI */
4646 did = get_job_els_rsp64_did(phba, rspiocb);
4647 ndlp = lpfc_findnode_did(vport, did);
4648 if (!ndlp && (cmd != ELS_CMD_PLOGI))
4649 return 0;
4650 }
4651
4652 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4653 "Retry ELS: wd7:x%x wd4:x%x did:x%x",
4654 *(((uint32_t *)irsp) + 7), ulp_word4, did);
4655
4656 switch (ulp_status) {
4657 case IOSTAT_FCP_RSP_ERROR:
4658 break;
4659 case IOSTAT_REMOTE_STOP:
4660 if (phba->sli_rev == LPFC_SLI_REV4) {
4661 /* This IO was aborted by the target, we don't
4662 * know the rxid and because we did not send the
4663 * ABTS we cannot generate and RRQ.
4664 */
4665 lpfc_set_rrq_active(phba, ndlp,
4666 cmdiocb->sli4_lxritag, 0, 0);
4667 }
4668 break;
4669 case IOSTAT_LOCAL_REJECT:
4670 switch ((ulp_word4 & IOERR_PARAM_MASK)) {
4671 case IOERR_LOOP_OPEN_FAILURE:
4672 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
4673 delay = 1000;
4674 retry = 1;
4675 break;
4676
4677 case IOERR_ILLEGAL_COMMAND:
4678 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4679 "0124 Retry illegal cmd x%x "
4680 "retry:x%x delay:x%x\n",
4681 cmd, cmdiocb->retry, delay);
4682 retry = 1;
4683 /* All command's retry policy */
4684 maxretry = 8;
4685 if (cmdiocb->retry > 2)
4686 delay = 1000;
4687 break;
4688
4689 case IOERR_NO_RESOURCES:
4690 logerr = 1; /* HBA out of resources */
4691 retry = 1;
4692 if (cmdiocb->retry > 100)
4693 delay = 100;
4694 maxretry = 250;
4695 break;
4696
4697 case IOERR_ILLEGAL_FRAME:
4698 delay = 100;
4699 retry = 1;
4700 break;
4701
4702 case IOERR_INVALID_RPI:
4703 if (cmd == ELS_CMD_PLOGI &&
4704 did == NameServer_DID) {
4705 /* Continue forever if plogi to */
4706 /* the nameserver fails */
4707 maxretry = 0;
4708 delay = 100;
4709 } else if (cmd == ELS_CMD_PRLI &&
4710 ndlp->nlp_state != NLP_STE_PRLI_ISSUE) {
4711 /* State-command disagreement. The PRLI was
4712 * failed with an invalid rpi meaning there
4713 * some unexpected state change. Don't retry.
4714 */
4715 maxretry = 0;
4716 retry = 0;
4717 break;
4718 }
4719 retry = 1;
4720 break;
4721
4722 case IOERR_SEQUENCE_TIMEOUT:
4723 if (cmd == ELS_CMD_PLOGI &&
4724 did == NameServer_DID &&
4725 (cmdiocb->retry + 1) == maxretry) {
4726 /* Reset the Link */
4727 link_reset = 1;
4728 break;
4729 }
4730 retry = 1;
4731 delay = 100;
4732 break;
4733 case IOERR_SLI_ABORTED:
4734 /* Retry ELS PLOGI command?
4735 * Possibly the rport just wasn't ready.
4736 */
4737 if (cmd == ELS_CMD_PLOGI) {
4738 /* No retry if state change */
4739 if (ndlp &&
4740 ndlp->nlp_state != NLP_STE_PLOGI_ISSUE)
4741 goto out_retry;
4742 retry = 1;
4743 maxretry = 2;
4744 }
4745 break;
4746 }
4747 break;
4748
4749 case IOSTAT_NPORT_RJT:
4750 case IOSTAT_FABRIC_RJT:
4751 if (ulp_word4 & RJT_UNAVAIL_TEMP) {
4752 retry = 1;
4753 break;
4754 }
4755 break;
4756
4757 case IOSTAT_NPORT_BSY:
4758 case IOSTAT_FABRIC_BSY:
4759 logerr = 1; /* Fabric / Remote NPort out of resources */
4760 retry = 1;
4761 break;
4762
4763 case IOSTAT_LS_RJT:
4764 stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4);
4765 /* Added for Vendor specifc support
4766 * Just keep retrying for these Rsn / Exp codes
4767 */
4768 if (test_bit(FC_PT2PT, &vport->fc_flag) &&
4769 cmd == ELS_CMD_NVMEPRLI) {
4770 switch (stat.un.b.lsRjtRsnCode) {
4771 case LSRJT_UNABLE_TPC:
4772 case LSRJT_INVALID_CMD:
4773 case LSRJT_LOGICAL_ERR:
4774 case LSRJT_CMD_UNSUPPORTED:
4775 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
4776 "0168 NVME PRLI LS_RJT "
4777 "reason %x port doesn't "
4778 "support NVME, disabling NVME\n",
4779 stat.un.b.lsRjtRsnCode);
4780 retry = 0;
4781 set_bit(FC_PT2PT_NO_NVME, &vport->fc_flag);
4782 goto out_retry;
4783 }
4784 }
4785 switch (stat.un.b.lsRjtRsnCode) {
4786 case LSRJT_UNABLE_TPC:
4787 /* Special case for PRLI LS_RJTs. Recall that lpfc
4788 * uses a single routine to issue both PRLI FC4 types.
4789 * If the PRLI is rejected because that FC4 type
4790 * isn't really supported, don't retry and cause
4791 * multiple transport registrations. Otherwise, parse
4792 * the reason code/reason code explanation and take the
4793 * appropriate action.
4794 */
4795 lpfc_printf_vlog(vport, KERN_INFO,
4796 LOG_DISCOVERY | LOG_ELS | LOG_NODE,
4797 "0153 ELS cmd x%x LS_RJT by x%x. "
4798 "RsnCode x%x RsnCodeExp x%x\n",
4799 cmd, did, stat.un.b.lsRjtRsnCode,
4800 stat.un.b.lsRjtRsnCodeExp);
4801
4802 switch (stat.un.b.lsRjtRsnCodeExp) {
4803 case LSEXP_CANT_GIVE_DATA:
4804 case LSEXP_CMD_IN_PROGRESS:
4805 if (cmd == ELS_CMD_PLOGI) {
4806 delay = 1000;
4807 maxretry = 48;
4808 }
4809 retry = 1;
4810 break;
4811 case LSEXP_REQ_UNSUPPORTED:
4812 case LSEXP_NO_RSRC_ASSIGN:
4813 /* These explanation codes get no retry. */
4814 if (cmd == ELS_CMD_PRLI ||
4815 cmd == ELS_CMD_NVMEPRLI)
4816 break;
4817 fallthrough;
4818 default:
4819 /* Limit the delay and retry action to a limited
4820 * cmd set. There are other ELS commands where
4821 * a retry is not expected.
4822 */
4823 if (cmd == ELS_CMD_PLOGI ||
4824 cmd == ELS_CMD_PRLI ||
4825 cmd == ELS_CMD_NVMEPRLI) {
4826 delay = 1000;
4827 maxretry = lpfc_max_els_tries + 1;
4828 retry = 1;
4829 }
4830 break;
4831 }
4832
4833 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4834 (cmd == ELS_CMD_FDISC) &&
4835 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
4836 lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
4837 "0125 FDISC (x%x). "
4838 "Fabric out of resources\n",
4839 stat.un.lsRjtError);
4840 lpfc_vport_set_state(vport,
4841 FC_VPORT_NO_FABRIC_RSCS);
4842 }
4843 break;
4844
4845 case LSRJT_LOGICAL_BSY:
4846 if ((cmd == ELS_CMD_PLOGI) ||
4847 (cmd == ELS_CMD_PRLI) ||
4848 (cmd == ELS_CMD_NVMEPRLI)) {
4849 delay = 1000;
4850 maxretry = 48;
4851 } else if (cmd == ELS_CMD_FDISC) {
4852 /* FDISC retry policy */
4853 maxretry = 48;
4854 if (cmdiocb->retry >= 32)
4855 delay = 1000;
4856 }
4857 retry = 1;
4858 break;
4859
4860 case LSRJT_LOGICAL_ERR:
4861 /* There are some cases where switches return this
4862 * error when they are not ready and should be returning
4863 * Logical Busy. We should delay every time.
4864 */
4865 if (cmd == ELS_CMD_FDISC &&
4866 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) {
4867 maxretry = 3;
4868 delay = 1000;
4869 retry = 1;
4870 } else if (cmd == ELS_CMD_FLOGI &&
4871 stat.un.b.lsRjtRsnCodeExp ==
4872 LSEXP_NOTHING_MORE) {
4873 vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
4874 retry = 1;
4875 lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
4876 "0820 FLOGI (x%x). "
4877 "BBCredit Not Supported\n",
4878 stat.un.lsRjtError);
4879 }
4880 break;
4881
4882 case LSRJT_PROTOCOL_ERR:
4883 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4884 (cmd == ELS_CMD_FDISC) &&
4885 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
4886 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
4887 ) {
4888 lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
4889 "0122 FDISC (x%x). "
4890 "Fabric Detected Bad WWN\n",
4891 stat.un.lsRjtError);
4892 lpfc_vport_set_state(vport,
4893 FC_VPORT_FABRIC_REJ_WWN);
4894 }
4895 break;
4896 case LSRJT_VENDOR_UNIQUE:
4897 if ((stat.un.b.vendorUnique == 0x45) &&
4898 (cmd == ELS_CMD_FLOGI)) {
4899 goto out_retry;
4900 }
4901 break;
4902 case LSRJT_CMD_UNSUPPORTED:
4903 /* lpfc nvmet returns this type of LS_RJT when it
4904 * receives an FCP PRLI because lpfc nvmet only
4905 * support NVME. ELS request is terminated for FCP4
4906 * on this rport.
4907 */
4908 if (stat.un.b.lsRjtRsnCodeExp ==
4909 LSEXP_REQ_UNSUPPORTED) {
4910 if (cmd == ELS_CMD_PRLI)
4911 goto out_retry;
4912 }
4913 break;
4914 }
4915 break;
4916
4917 case IOSTAT_INTERMED_RSP:
4918 case IOSTAT_BA_RJT:
4919 break;
4920
4921 default:
4922 break;
4923 }
4924
4925 if (link_reset) {
4926 rc = lpfc_link_reset(vport);
4927 if (rc) {
4928 /* Do not give up. Retry PLOGI one more time and attempt
4929 * link reset if PLOGI fails again.
4930 */
4931 retry = 1;
4932 delay = 100;
4933 goto out_retry;
4934 }
4935 return 1;
4936 }
4937
4938 if (did == FDMI_DID)
4939 retry = 1;
4940
4941 if ((cmd == ELS_CMD_FLOGI) &&
4942 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
4943 !lpfc_error_lost_link(vport, ulp_status, ulp_word4)) {
4944 /* FLOGI retry policy */
4945 retry = 1;
4946 /* retry FLOGI forever */
4947 if (phba->link_flag != LS_LOOPBACK_MODE)
4948 maxretry = 0;
4949 else
4950 maxretry = 2;
4951
4952 if (cmdiocb->retry >= 100)
4953 delay = 5000;
4954 else if (cmdiocb->retry >= 32)
4955 delay = 1000;
4956 } else if ((cmd == ELS_CMD_FDISC) &&
4957 !lpfc_error_lost_link(vport, ulp_status, ulp_word4)) {
4958 /* retry FDISCs every second up to devloss */
4959 retry = 1;
4960 maxretry = vport->cfg_devloss_tmo;
4961 delay = 1000;
4962 }
4963
4964 cmdiocb->retry++;
4965 if (maxretry && (cmdiocb->retry >= maxretry)) {
4966 phba->fc_stat.elsRetryExceeded++;
4967 retry = 0;
4968 }
4969
4970 if (test_bit(FC_UNLOADING, &vport->load_flag))
4971 retry = 0;
4972
4973 out_retry:
4974 if (retry) {
4975 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) {
4976 /* Stop retrying PLOGI and FDISC if in FCF discovery */
4977 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
4978 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4979 "2849 Stop retry ELS command "
4980 "x%x to remote NPORT x%x, "
4981 "Data: x%x x%x\n", cmd, did,
4982 cmdiocb->retry, delay);
4983 return 0;
4984 }
4985 }
4986
4987 /* Retry ELS command <elsCmd> to remote NPORT <did> */
4988 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4989 "0107 Retry ELS command x%x to remote "
4990 "NPORT x%x Data: x%x x%x\n",
4991 cmd, did, cmdiocb->retry, delay);
4992
4993 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
4994 ((ulp_status != IOSTAT_LOCAL_REJECT) ||
4995 ((ulp_word4 & IOERR_PARAM_MASK) !=
4996 IOERR_NO_RESOURCES))) {
4997 /* Don't reset timer for no resources */
4998
4999 /* If discovery / RSCN timer is running, reset it */
5000 if (timer_pending(&vport->fc_disctmo) ||
5001 test_bit(FC_RSCN_MODE, &vport->fc_flag))
5002 lpfc_set_disctmo(vport);
5003 }
5004
5005 phba->fc_stat.elsXmitRetry++;
5006 if (ndlp && delay) {
5007 phba->fc_stat.elsDelayRetry++;
5008 ndlp->nlp_retry = cmdiocb->retry;
5009
5010 /* delay is specified in milliseconds */
5011 mod_timer(&ndlp->nlp_delayfunc,
5012 jiffies + msecs_to_jiffies(delay));
5013 spin_lock_irq(&ndlp->lock);
5014 ndlp->nlp_flag |= NLP_DELAY_TMO;
5015 spin_unlock_irq(&ndlp->lock);
5016
5017 ndlp->nlp_prev_state = ndlp->nlp_state;
5018 if ((cmd == ELS_CMD_PRLI) ||
5019 (cmd == ELS_CMD_NVMEPRLI))
5020 lpfc_nlp_set_state(vport, ndlp,
5021 NLP_STE_PRLI_ISSUE);
5022 else if (cmd != ELS_CMD_ADISC)
5023 lpfc_nlp_set_state(vport, ndlp,
5024 NLP_STE_NPR_NODE);
5025 ndlp->nlp_last_elscmd = cmd;
5026
5027 return 1;
5028 }
5029 switch (cmd) {
5030 case ELS_CMD_FLOGI:
5031 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
5032 return 1;
5033 case ELS_CMD_FDISC:
5034 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
5035 return 1;
5036 case ELS_CMD_PLOGI:
5037 if (ndlp) {
5038 ndlp->nlp_prev_state = ndlp->nlp_state;
5039 lpfc_nlp_set_state(vport, ndlp,
5040 NLP_STE_PLOGI_ISSUE);
5041 }
5042 lpfc_issue_els_plogi(vport, did, cmdiocb->retry);
5043 return 1;
5044 case ELS_CMD_ADISC:
5045 ndlp->nlp_prev_state = ndlp->nlp_state;
5046 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
5047 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
5048 return 1;
5049 case ELS_CMD_PRLI:
5050 case ELS_CMD_NVMEPRLI:
5051 ndlp->nlp_prev_state = ndlp->nlp_state;
5052 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
5053 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
5054 return 1;
5055 case ELS_CMD_LOGO:
5056 ndlp->nlp_prev_state = ndlp->nlp_state;
5057 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
5058 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
5059 return 1;
5060 }
5061 }
5062 /* No retry ELS command <elsCmd> to remote NPORT <did> */
5063 if (logerr) {
5064 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5065 "0137 No retry ELS command x%x to remote "
5066 "NPORT x%x: Out of Resources: Error:x%x/%x "
5067 "IoTag x%x\n",
5068 cmd, did, ulp_status, ulp_word4,
5069 cmdiocb->iotag);
5070 }
5071 else {
5072 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5073 "0108 No retry ELS command x%x to remote "
5074 "NPORT x%x Retried:%d Error:x%x/%x "
5075 "IoTag x%x nflags x%x\n",
5076 cmd, did, cmdiocb->retry, ulp_status,
5077 ulp_word4, cmdiocb->iotag,
5078 (ndlp ? ndlp->nlp_flag : 0));
5079 }
5080 return 0;
5081 }
5082
5083 /**
5084 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb
5085 * @phba: pointer to lpfc hba data structure.
5086 * @buf_ptr1: pointer to the lpfc DMA buffer data structure.
5087 *
5088 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s)
5089 * associated with a command IOCB back to the lpfc DMA buffer pool. It first
5090 * checks to see whether there is a lpfc DMA buffer associated with the
5091 * response of the command IOCB. If so, it will be released before releasing
5092 * the lpfc DMA buffer associated with the IOCB itself.
5093 *
5094 * Return code
5095 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
5096 **/
5097 static int
lpfc_els_free_data(struct lpfc_hba * phba,struct lpfc_dmabuf * buf_ptr1)5098 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
5099 {
5100 struct lpfc_dmabuf *buf_ptr;
5101
5102 /* Free the response before processing the command. */
5103 if (!list_empty(&buf_ptr1->list)) {
5104 list_remove_head(&buf_ptr1->list, buf_ptr,
5105 struct lpfc_dmabuf,
5106 list);
5107 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
5108 kfree(buf_ptr);
5109 }
5110 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
5111 kfree(buf_ptr1);
5112 return 0;
5113 }
5114
5115 /**
5116 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl
5117 * @phba: pointer to lpfc hba data structure.
5118 * @buf_ptr: pointer to the lpfc dma buffer data structure.
5119 *
5120 * This routine releases the lpfc Direct Memory Access (DMA) buffer
5121 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer
5122 * pool.
5123 *
5124 * Return code
5125 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
5126 **/
5127 static int
lpfc_els_free_bpl(struct lpfc_hba * phba,struct lpfc_dmabuf * buf_ptr)5128 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
5129 {
5130 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
5131 kfree(buf_ptr);
5132 return 0;
5133 }
5134
5135 /**
5136 * lpfc_els_free_iocb - Free a command iocb and its associated resources
5137 * @phba: pointer to lpfc hba data structure.
5138 * @elsiocb: pointer to lpfc els command iocb data structure.
5139 *
5140 * This routine frees a command IOCB and its associated resources. The
5141 * command IOCB data structure contains the reference to various associated
5142 * resources, these fields must be set to NULL if the associated reference
5143 * not present:
5144 * cmd_dmabuf - reference to cmd.
5145 * cmd_dmabuf->next - reference to rsp
5146 * rsp_dmabuf - unused
5147 * bpl_dmabuf - reference to bpl
5148 *
5149 * It first properly decrements the reference count held on ndlp for the
5150 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
5151 * set, it invokes the lpfc_els_free_data() routine to release the Direct
5152 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it
5153 * adds the DMA buffer the @phba data structure for the delayed release.
5154 * If reference to the Buffer Pointer List (BPL) is present, the
5155 * lpfc_els_free_bpl() routine is invoked to release the DMA memory
5156 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is
5157 * invoked to release the IOCB data structure back to @phba IOCBQ list.
5158 *
5159 * Return code
5160 * 0 - Success (currently, always return 0)
5161 **/
5162 int
lpfc_els_free_iocb(struct lpfc_hba * phba,struct lpfc_iocbq * elsiocb)5163 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
5164 {
5165 struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
5166
5167 /* The I/O iocb is complete. Clear the node and first dmbuf */
5168 elsiocb->ndlp = NULL;
5169
5170 /* cmd_dmabuf = cmd, cmd_dmabuf->next = rsp, bpl_dmabuf = bpl */
5171 if (elsiocb->cmd_dmabuf) {
5172 if (elsiocb->cmd_flag & LPFC_DELAY_MEM_FREE) {
5173 /* Firmware could still be in progress of DMAing
5174 * payload, so don't free data buffer till after
5175 * a hbeat.
5176 */
5177 elsiocb->cmd_flag &= ~LPFC_DELAY_MEM_FREE;
5178 buf_ptr = elsiocb->cmd_dmabuf;
5179 elsiocb->cmd_dmabuf = NULL;
5180 if (buf_ptr) {
5181 buf_ptr1 = NULL;
5182 spin_lock_irq(&phba->hbalock);
5183 if (!list_empty(&buf_ptr->list)) {
5184 list_remove_head(&buf_ptr->list,
5185 buf_ptr1, struct lpfc_dmabuf,
5186 list);
5187 INIT_LIST_HEAD(&buf_ptr1->list);
5188 list_add_tail(&buf_ptr1->list,
5189 &phba->elsbuf);
5190 phba->elsbuf_cnt++;
5191 }
5192 INIT_LIST_HEAD(&buf_ptr->list);
5193 list_add_tail(&buf_ptr->list, &phba->elsbuf);
5194 phba->elsbuf_cnt++;
5195 spin_unlock_irq(&phba->hbalock);
5196 }
5197 } else {
5198 buf_ptr1 = elsiocb->cmd_dmabuf;
5199 lpfc_els_free_data(phba, buf_ptr1);
5200 elsiocb->cmd_dmabuf = NULL;
5201 }
5202 }
5203
5204 if (elsiocb->bpl_dmabuf) {
5205 buf_ptr = elsiocb->bpl_dmabuf;
5206 lpfc_els_free_bpl(phba, buf_ptr);
5207 elsiocb->bpl_dmabuf = NULL;
5208 }
5209 lpfc_sli_release_iocbq(phba, elsiocb);
5210 return 0;
5211 }
5212
5213 /**
5214 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response
5215 * @phba: pointer to lpfc hba data structure.
5216 * @cmdiocb: pointer to lpfc command iocb data structure.
5217 * @rspiocb: pointer to lpfc response iocb data structure.
5218 *
5219 * This routine is the completion callback function to the Logout (LOGO)
5220 * Accept (ACC) Response ELS command. This routine is invoked to indicate
5221 * the completion of the LOGO process. If the node has transitioned to NPR,
5222 * this routine unregisters the RPI if it is still registered. The
5223 * lpfc_els_free_iocb() is invoked to release the IOCB data structure.
5224 **/
5225 static void
lpfc_cmpl_els_logo_acc(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)5226 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
5227 struct lpfc_iocbq *rspiocb)
5228 {
5229 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
5230 struct lpfc_vport *vport = cmdiocb->vport;
5231 u32 ulp_status, ulp_word4;
5232
5233 ulp_status = get_job_ulpstatus(phba, rspiocb);
5234 ulp_word4 = get_job_word4(phba, rspiocb);
5235
5236 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5237 "ACC LOGO cmpl: status:x%x/x%x did:x%x",
5238 ulp_status, ulp_word4, ndlp->nlp_DID);
5239 /* ACC to LOGO completes to NPort <nlp_DID> */
5240 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5241 "0109 ACC to LOGO completes to NPort x%x refcnt %d "
5242 "last els x%x Data: x%x x%x x%x\n",
5243 ndlp->nlp_DID, kref_read(&ndlp->kref),
5244 ndlp->nlp_last_elscmd, ndlp->nlp_flag, ndlp->nlp_state,
5245 ndlp->nlp_rpi);
5246
5247 /* This clause allows the LOGO ACC to complete and free resources
5248 * for the Fabric Domain Controller. It does deliberately skip
5249 * the unreg_rpi and release rpi because some fabrics send RDP
5250 * requests after logging out from the initiator.
5251 */
5252 if (ndlp->nlp_type & NLP_FABRIC &&
5253 ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK))
5254 goto out;
5255
5256 if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
5257 if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
5258 lpfc_unreg_rpi(vport, ndlp);
5259
5260 /* If came from PRLO, then PRLO_ACC is done.
5261 * Start rediscovery now.
5262 */
5263 if (ndlp->nlp_last_elscmd == ELS_CMD_PRLO) {
5264 spin_lock_irq(&ndlp->lock);
5265 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5266 spin_unlock_irq(&ndlp->lock);
5267 ndlp->nlp_prev_state = ndlp->nlp_state;
5268 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
5269 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
5270 }
5271 }
5272
5273 out:
5274 /*
5275 * The driver received a LOGO from the rport and has ACK'd it.
5276 * At this point, the driver is done so release the IOCB
5277 */
5278 lpfc_els_free_iocb(phba, cmdiocb);
5279 lpfc_nlp_put(ndlp);
5280 }
5281
5282 /**
5283 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd
5284 * @phba: pointer to lpfc hba data structure.
5285 * @pmb: pointer to the driver internal queue element for mailbox command.
5286 *
5287 * This routine is the completion callback function for unregister default
5288 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases
5289 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
5290 * decrements the ndlp reference count held for this completion callback
5291 * function. After that, it invokes the lpfc_drop_node to check
5292 * whether it is appropriate to release the node.
5293 **/
5294 void
lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)5295 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5296 {
5297 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
5298 u32 mbx_flag = pmb->mbox_flag;
5299 u32 mbx_cmd = pmb->u.mb.mbxCommand;
5300
5301 if (ndlp) {
5302 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
5303 "0006 rpi x%x DID:%x flg:%x %d x%px "
5304 "mbx_cmd x%x mbx_flag x%x x%px\n",
5305 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
5306 kref_read(&ndlp->kref), ndlp, mbx_cmd,
5307 mbx_flag, pmb);
5308
5309 /* This ends the default/temporary RPI cleanup logic for this
5310 * ndlp and the node and rpi needs to be released. Free the rpi
5311 * first on an UNREG_LOGIN and then release the final
5312 * references.
5313 */
5314 spin_lock_irq(&ndlp->lock);
5315 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
5316 if (mbx_cmd == MBX_UNREG_LOGIN)
5317 ndlp->nlp_flag &= ~NLP_UNREG_INP;
5318 spin_unlock_irq(&ndlp->lock);
5319 lpfc_nlp_put(ndlp);
5320 lpfc_drop_node(ndlp->vport, ndlp);
5321 }
5322
5323 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
5324 }
5325
5326 /**
5327 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd
5328 * @phba: pointer to lpfc hba data structure.
5329 * @cmdiocb: pointer to lpfc command iocb data structure.
5330 * @rspiocb: pointer to lpfc response iocb data structure.
5331 *
5332 * This routine is the completion callback function for ELS Response IOCB
5333 * command. In normal case, this callback function just properly sets the
5334 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
5335 * field in the command IOCB is not NULL, the referred mailbox command will
5336 * be send out, and then invokes the lpfc_els_free_iocb() routine to release
5337 * the IOCB.
5338 **/
5339 static void
lpfc_cmpl_els_rsp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)5340 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
5341 struct lpfc_iocbq *rspiocb)
5342 {
5343 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
5344 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
5345 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
5346 IOCB_t *irsp;
5347 LPFC_MBOXQ_t *mbox = NULL;
5348 u32 ulp_status, ulp_word4, tmo, did, iotag;
5349
5350 if (!vport) {
5351 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
5352 "3177 null vport in ELS rsp\n");
5353 goto out;
5354 }
5355 if (cmdiocb->context_un.mbox)
5356 mbox = cmdiocb->context_un.mbox;
5357
5358 ulp_status = get_job_ulpstatus(phba, rspiocb);
5359 ulp_word4 = get_job_word4(phba, rspiocb);
5360 did = get_job_els_rsp64_did(phba, cmdiocb);
5361
5362 if (phba->sli_rev == LPFC_SLI_REV4) {
5363 tmo = get_wqe_tmo(cmdiocb);
5364 iotag = get_wqe_reqtag(cmdiocb);
5365 } else {
5366 irsp = &rspiocb->iocb;
5367 tmo = irsp->ulpTimeout;
5368 iotag = irsp->ulpIoTag;
5369 }
5370
5371 /* Check to see if link went down during discovery */
5372 if (!ndlp || lpfc_els_chk_latt(vport)) {
5373 if (mbox)
5374 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
5375 goto out;
5376 }
5377
5378 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5379 "ELS rsp cmpl: status:x%x/x%x did:x%x",
5380 ulp_status, ulp_word4, did);
5381 /* ELS response tag <ulpIoTag> completes */
5382 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5383 "0110 ELS response tag x%x completes "
5384 "Data: x%x x%x x%x x%x x%x x%x x%x x%x %p %p\n",
5385 iotag, ulp_status, ulp_word4, tmo,
5386 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5387 ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox, ndlp);
5388 if (mbox) {
5389 if (ulp_status == 0
5390 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
5391 if (!lpfc_unreg_rpi(vport, ndlp) &&
5392 !test_bit(FC_PT2PT, &vport->fc_flag)) {
5393 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
5394 ndlp->nlp_state ==
5395 NLP_STE_REG_LOGIN_ISSUE) {
5396 lpfc_printf_vlog(vport, KERN_INFO,
5397 LOG_DISCOVERY,
5398 "0314 PLOGI recov "
5399 "DID x%x "
5400 "Data: x%x x%x x%x\n",
5401 ndlp->nlp_DID,
5402 ndlp->nlp_state,
5403 ndlp->nlp_rpi,
5404 ndlp->nlp_flag);
5405 goto out_free_mbox;
5406 }
5407 }
5408
5409 /* Increment reference count to ndlp to hold the
5410 * reference to ndlp for the callback function.
5411 */
5412 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
5413 if (!mbox->ctx_ndlp)
5414 goto out_free_mbox;
5415
5416 mbox->vport = vport;
5417 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
5418 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
5419 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
5420 }
5421 else {
5422 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
5423 ndlp->nlp_prev_state = ndlp->nlp_state;
5424 lpfc_nlp_set_state(vport, ndlp,
5425 NLP_STE_REG_LOGIN_ISSUE);
5426 }
5427
5428 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
5429 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
5430 != MBX_NOT_FINISHED)
5431 goto out;
5432
5433 /* Decrement the ndlp reference count we
5434 * set for this failed mailbox command.
5435 */
5436 lpfc_nlp_put(ndlp);
5437 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
5438
5439 /* ELS rsp: Cannot issue reg_login for <NPortid> */
5440 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5441 "0138 ELS rsp: Cannot issue reg_login for x%x "
5442 "Data: x%x x%x x%x\n",
5443 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5444 ndlp->nlp_rpi);
5445 }
5446 out_free_mbox:
5447 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
5448 }
5449 out:
5450 if (ndlp && shost) {
5451 spin_lock_irq(&ndlp->lock);
5452 if (mbox)
5453 ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
5454 ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI;
5455 spin_unlock_irq(&ndlp->lock);
5456 }
5457
5458 /* An SLI4 NPIV instance wants to drop the node at this point under
5459 * these conditions and release the RPI.
5460 */
5461 if (phba->sli_rev == LPFC_SLI_REV4 &&
5462 vport && vport->port_type == LPFC_NPIV_PORT &&
5463 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) {
5464 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
5465 if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE &&
5466 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
5467 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
5468 spin_lock_irq(&ndlp->lock);
5469 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
5470 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
5471 spin_unlock_irq(&ndlp->lock);
5472 }
5473 lpfc_drop_node(vport, ndlp);
5474 } else if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE &&
5475 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE &&
5476 ndlp->nlp_state != NLP_STE_PRLI_ISSUE) {
5477 /* Drop ndlp if there is no planned or outstanding
5478 * issued PRLI.
5479 *
5480 * In cases when the ndlp is acting as both an initiator
5481 * and target function, let our issued PRLI determine
5482 * the final ndlp kref drop.
5483 */
5484 lpfc_drop_node(vport, ndlp);
5485 }
5486 }
5487
5488 /* Release the originating I/O reference. */
5489 lpfc_els_free_iocb(phba, cmdiocb);
5490 lpfc_nlp_put(ndlp);
5491 return;
5492 }
5493
5494 /**
5495 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command
5496 * @vport: pointer to a host virtual N_Port data structure.
5497 * @flag: the els command code to be accepted.
5498 * @oldiocb: pointer to the original lpfc command iocb data structure.
5499 * @ndlp: pointer to a node-list data structure.
5500 * @mbox: pointer to the driver internal queue element for mailbox command.
5501 *
5502 * This routine prepares and issues an Accept (ACC) response IOCB
5503 * command. It uses the @flag to properly set up the IOCB field for the
5504 * specific ACC response command to be issued and invokes the
5505 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a
5506 * @mbox pointer is passed in, it will be put into the context_un.mbox
5507 * field of the IOCB for the completion callback function to issue the
5508 * mailbox command to the HBA later when callback is invoked.
5509 *
5510 * Note that the ndlp reference count will be incremented by 1 for holding the
5511 * ndlp and the reference to ndlp will be stored into the ndlp field of
5512 * the IOCB for the completion callback function to the corresponding
5513 * response ELS IOCB command.
5514 *
5515 * Return code
5516 * 0 - Successfully issued acc response
5517 * 1 - Failed to issue acc response
5518 **/
5519 int
lpfc_els_rsp_acc(struct lpfc_vport * vport,uint32_t flag,struct lpfc_iocbq * oldiocb,struct lpfc_nodelist * ndlp,LPFC_MBOXQ_t * mbox)5520 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
5521 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
5522 LPFC_MBOXQ_t *mbox)
5523 {
5524 struct lpfc_hba *phba = vport->phba;
5525 IOCB_t *icmd;
5526 IOCB_t *oldcmd;
5527 union lpfc_wqe128 *wqe;
5528 union lpfc_wqe128 *oldwqe = &oldiocb->wqe;
5529 struct lpfc_iocbq *elsiocb;
5530 uint8_t *pcmd;
5531 struct serv_parm *sp;
5532 uint16_t cmdsize;
5533 int rc;
5534 ELS_PKT *els_pkt_ptr;
5535 struct fc_els_rdf_resp *rdf_resp;
5536
5537 switch (flag) {
5538 case ELS_CMD_ACC:
5539 cmdsize = sizeof(uint32_t);
5540 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
5541 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
5542 if (!elsiocb) {
5543 spin_lock_irq(&ndlp->lock);
5544 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
5545 spin_unlock_irq(&ndlp->lock);
5546 return 1;
5547 }
5548
5549 if (phba->sli_rev == LPFC_SLI_REV4) {
5550 wqe = &elsiocb->wqe;
5551 /* XRI / rx_id */
5552 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
5553 bf_get(wqe_ctxt_tag,
5554 &oldwqe->xmit_els_rsp.wqe_com));
5555
5556 /* oxid */
5557 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
5558 bf_get(wqe_rcvoxid,
5559 &oldwqe->xmit_els_rsp.wqe_com));
5560 } else {
5561 icmd = &elsiocb->iocb;
5562 oldcmd = &oldiocb->iocb;
5563 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
5564 icmd->unsli3.rcvsli3.ox_id =
5565 oldcmd->unsli3.rcvsli3.ox_id;
5566 }
5567
5568 pcmd = elsiocb->cmd_dmabuf->virt;
5569 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5570 pcmd += sizeof(uint32_t);
5571
5572 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5573 "Issue ACC: did:x%x flg:x%x",
5574 ndlp->nlp_DID, ndlp->nlp_flag, 0);
5575 break;
5576 case ELS_CMD_FLOGI:
5577 case ELS_CMD_PLOGI:
5578 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
5579 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
5580 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
5581 if (!elsiocb)
5582 return 1;
5583
5584 if (phba->sli_rev == LPFC_SLI_REV4) {
5585 wqe = &elsiocb->wqe;
5586 /* XRI / rx_id */
5587 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
5588 bf_get(wqe_ctxt_tag,
5589 &oldwqe->xmit_els_rsp.wqe_com));
5590
5591 /* oxid */
5592 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
5593 bf_get(wqe_rcvoxid,
5594 &oldwqe->xmit_els_rsp.wqe_com));
5595 } else {
5596 icmd = &elsiocb->iocb;
5597 oldcmd = &oldiocb->iocb;
5598 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
5599 icmd->unsli3.rcvsli3.ox_id =
5600 oldcmd->unsli3.rcvsli3.ox_id;
5601 }
5602
5603 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
5604
5605 if (mbox)
5606 elsiocb->context_un.mbox = mbox;
5607
5608 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5609 pcmd += sizeof(uint32_t);
5610 sp = (struct serv_parm *)pcmd;
5611
5612 if (flag == ELS_CMD_FLOGI) {
5613 /* Copy the received service parameters back */
5614 memcpy(sp, &phba->fc_fabparam,
5615 sizeof(struct serv_parm));
5616
5617 /* Clear the F_Port bit */
5618 sp->cmn.fPort = 0;
5619
5620 /* Mark all class service parameters as invalid */
5621 sp->cls1.classValid = 0;
5622 sp->cls2.classValid = 0;
5623 sp->cls3.classValid = 0;
5624 sp->cls4.classValid = 0;
5625
5626 /* Copy our worldwide names */
5627 memcpy(&sp->portName, &vport->fc_sparam.portName,
5628 sizeof(struct lpfc_name));
5629 memcpy(&sp->nodeName, &vport->fc_sparam.nodeName,
5630 sizeof(struct lpfc_name));
5631 } else {
5632 memcpy(pcmd, &vport->fc_sparam,
5633 sizeof(struct serv_parm));
5634
5635 sp->cmn.valid_vendor_ver_level = 0;
5636 memset(sp->un.vendorVersion, 0,
5637 sizeof(sp->un.vendorVersion));
5638 sp->cmn.bbRcvSizeMsb &= 0xF;
5639
5640 /* If our firmware supports this feature, convey that
5641 * info to the target using the vendor specific field.
5642 */
5643 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) {
5644 sp->cmn.valid_vendor_ver_level = 1;
5645 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID);
5646 sp->un.vv.flags =
5647 cpu_to_be32(LPFC_VV_SUPPRESS_RSP);
5648 }
5649 }
5650
5651 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5652 "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x",
5653 ndlp->nlp_DID, ndlp->nlp_flag, 0);
5654 break;
5655 case ELS_CMD_PRLO:
5656 cmdsize = sizeof(uint32_t) + sizeof(PRLO);
5657 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
5658 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
5659 if (!elsiocb)
5660 return 1;
5661
5662 if (phba->sli_rev == LPFC_SLI_REV4) {
5663 wqe = &elsiocb->wqe;
5664 /* XRI / rx_id */
5665 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
5666 bf_get(wqe_ctxt_tag,
5667 &oldwqe->xmit_els_rsp.wqe_com));
5668
5669 /* oxid */
5670 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
5671 bf_get(wqe_rcvoxid,
5672 &oldwqe->xmit_els_rsp.wqe_com));
5673 } else {
5674 icmd = &elsiocb->iocb;
5675 oldcmd = &oldiocb->iocb;
5676 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
5677 icmd->unsli3.rcvsli3.ox_id =
5678 oldcmd->unsli3.rcvsli3.ox_id;
5679 }
5680
5681 pcmd = (u8 *) elsiocb->cmd_dmabuf->virt;
5682
5683 memcpy(pcmd, oldiocb->cmd_dmabuf->virt,
5684 sizeof(uint32_t) + sizeof(PRLO));
5685 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
5686 els_pkt_ptr = (ELS_PKT *) pcmd;
5687 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
5688
5689 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5690 "Issue ACC PRLO: did:x%x flg:x%x",
5691 ndlp->nlp_DID, ndlp->nlp_flag, 0);
5692 break;
5693 case ELS_CMD_RDF:
5694 cmdsize = sizeof(*rdf_resp);
5695 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
5696 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
5697 if (!elsiocb)
5698 return 1;
5699
5700 if (phba->sli_rev == LPFC_SLI_REV4) {
5701 wqe = &elsiocb->wqe;
5702 /* XRI / rx_id */
5703 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
5704 bf_get(wqe_ctxt_tag,
5705 &oldwqe->xmit_els_rsp.wqe_com));
5706
5707 /* oxid */
5708 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
5709 bf_get(wqe_rcvoxid,
5710 &oldwqe->xmit_els_rsp.wqe_com));
5711 } else {
5712 icmd = &elsiocb->iocb;
5713 oldcmd = &oldiocb->iocb;
5714 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
5715 icmd->unsli3.rcvsli3.ox_id =
5716 oldcmd->unsli3.rcvsli3.ox_id;
5717 }
5718
5719 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
5720 rdf_resp = (struct fc_els_rdf_resp *)pcmd;
5721 memset(rdf_resp, 0, sizeof(*rdf_resp));
5722 rdf_resp->acc_hdr.la_cmd = ELS_LS_ACC;
5723
5724 /* FC-LS-5 specifies desc_list_len shall be set to 12 */
5725 rdf_resp->desc_list_len = cpu_to_be32(12);
5726
5727 /* FC-LS-5 specifies LS REQ Information descriptor */
5728 rdf_resp->lsri.desc_tag = cpu_to_be32(1);
5729 rdf_resp->lsri.desc_len = cpu_to_be32(sizeof(u32));
5730 rdf_resp->lsri.rqst_w0.cmd = ELS_RDF;
5731 break;
5732 default:
5733 return 1;
5734 }
5735 if (ndlp->nlp_flag & NLP_LOGO_ACC) {
5736 spin_lock_irq(&ndlp->lock);
5737 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED ||
5738 ndlp->nlp_flag & NLP_REG_LOGIN_SEND))
5739 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
5740 spin_unlock_irq(&ndlp->lock);
5741 elsiocb->cmd_cmpl = lpfc_cmpl_els_logo_acc;
5742 } else {
5743 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
5744 }
5745
5746 phba->fc_stat.elsXmitACC++;
5747 elsiocb->ndlp = lpfc_nlp_get(ndlp);
5748 if (!elsiocb->ndlp) {
5749 lpfc_els_free_iocb(phba, elsiocb);
5750 return 1;
5751 }
5752
5753 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5754 if (rc == IOCB_ERROR) {
5755 lpfc_els_free_iocb(phba, elsiocb);
5756 lpfc_nlp_put(ndlp);
5757 return 1;
5758 }
5759
5760 /* Xmit ELS ACC response tag <ulpIoTag> */
5761 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5762 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, "
5763 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
5764 "RPI: x%x, fc_flag x%lx refcnt %d\n",
5765 rc, elsiocb->iotag, elsiocb->sli4_xritag,
5766 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5767 ndlp->nlp_rpi, vport->fc_flag, kref_read(&ndlp->kref));
5768 return 0;
5769 }
5770
5771 /**
5772 * lpfc_els_rsp_reject - Prepare and issue a rjt response iocb command
5773 * @vport: pointer to a virtual N_Port data structure.
5774 * @rejectError: reject response to issue
5775 * @oldiocb: pointer to the original lpfc command iocb data structure.
5776 * @ndlp: pointer to a node-list data structure.
5777 * @mbox: pointer to the driver internal queue element for mailbox command.
5778 *
5779 * This routine prepares and issue an Reject (RJT) response IOCB
5780 * command. If a @mbox pointer is passed in, it will be put into the
5781 * context_un.mbox field of the IOCB for the completion callback function
5782 * to issue to the HBA later.
5783 *
5784 * Note that the ndlp reference count will be incremented by 1 for holding the
5785 * ndlp and the reference to ndlp will be stored into the ndlp field of
5786 * the IOCB for the completion callback function to the reject response
5787 * ELS IOCB command.
5788 *
5789 * Return code
5790 * 0 - Successfully issued reject response
5791 * 1 - Failed to issue reject response
5792 **/
5793 int
lpfc_els_rsp_reject(struct lpfc_vport * vport,uint32_t rejectError,struct lpfc_iocbq * oldiocb,struct lpfc_nodelist * ndlp,LPFC_MBOXQ_t * mbox)5794 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
5795 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
5796 LPFC_MBOXQ_t *mbox)
5797 {
5798 int rc;
5799 struct lpfc_hba *phba = vport->phba;
5800 IOCB_t *icmd;
5801 IOCB_t *oldcmd;
5802 union lpfc_wqe128 *wqe;
5803 struct lpfc_iocbq *elsiocb;
5804 uint8_t *pcmd;
5805 uint16_t cmdsize;
5806
5807 cmdsize = 2 * sizeof(uint32_t);
5808 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
5809 ndlp->nlp_DID, ELS_CMD_LS_RJT);
5810 if (!elsiocb)
5811 return 1;
5812
5813 if (phba->sli_rev == LPFC_SLI_REV4) {
5814 wqe = &elsiocb->wqe;
5815 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
5816 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
5817 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
5818 get_job_rcvoxid(phba, oldiocb));
5819 } else {
5820 icmd = &elsiocb->iocb;
5821 oldcmd = &oldiocb->iocb;
5822 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
5823 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
5824 }
5825
5826 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
5827
5828 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
5829 pcmd += sizeof(uint32_t);
5830 *((uint32_t *) (pcmd)) = rejectError;
5831
5832 if (mbox)
5833 elsiocb->context_un.mbox = mbox;
5834
5835 /* Xmit ELS RJT <err> response tag <ulpIoTag> */
5836 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5837 "0129 Xmit ELS RJT x%x response tag x%x "
5838 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
5839 "rpi x%x\n",
5840 rejectError, elsiocb->iotag,
5841 get_job_ulpcontext(phba, elsiocb), ndlp->nlp_DID,
5842 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
5843 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5844 "Issue LS_RJT: did:x%x flg:x%x err:x%x",
5845 ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
5846
5847 phba->fc_stat.elsXmitLSRJT++;
5848 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
5849 elsiocb->ndlp = lpfc_nlp_get(ndlp);
5850 if (!elsiocb->ndlp) {
5851 lpfc_els_free_iocb(phba, elsiocb);
5852 return 1;
5853 }
5854
5855 /* The NPIV instance is rejecting this unsolicited ELS. Make sure the
5856 * node's assigned RPI gets released provided this node is not already
5857 * registered with the transport.
5858 */
5859 if (phba->sli_rev == LPFC_SLI_REV4 &&
5860 vport->port_type == LPFC_NPIV_PORT &&
5861 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) {
5862 spin_lock_irq(&ndlp->lock);
5863 ndlp->nlp_flag |= NLP_RELEASE_RPI;
5864 spin_unlock_irq(&ndlp->lock);
5865 }
5866
5867 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5868 if (rc == IOCB_ERROR) {
5869 lpfc_els_free_iocb(phba, elsiocb);
5870 lpfc_nlp_put(ndlp);
5871 return 1;
5872 }
5873
5874 return 0;
5875 }
5876
5877 /**
5878 * lpfc_issue_els_edc_rsp - Exchange Diagnostic Capabilities with the fabric.
5879 * @vport: pointer to a host virtual N_Port data structure.
5880 * @cmdiocb: pointer to the original lpfc command iocb data structure.
5881 * @ndlp: NPort to where rsp is directed
5882 *
5883 * This routine issues an EDC ACC RSP to the F-Port Controller to communicate
5884 * this N_Port's support of hardware signals in its Congestion
5885 * Capabilities Descriptor.
5886 *
5887 * Return code
5888 * 0 - Successfully issued edc rsp command
5889 * 1 - Failed to issue edc rsp command
5890 **/
5891 static int
lpfc_issue_els_edc_rsp(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)5892 lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5893 struct lpfc_nodelist *ndlp)
5894 {
5895 struct lpfc_hba *phba = vport->phba;
5896 struct fc_els_edc_resp *edc_rsp;
5897 struct fc_tlv_desc *tlv;
5898 struct lpfc_iocbq *elsiocb;
5899 IOCB_t *icmd, *cmd;
5900 union lpfc_wqe128 *wqe;
5901 u32 cgn_desc_size, lft_desc_size;
5902 u16 cmdsize;
5903 uint8_t *pcmd;
5904 int rc;
5905
5906 cmdsize = sizeof(struct fc_els_edc_resp);
5907 cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc);
5908 lft_desc_size = (lpfc_link_is_lds_capable(phba)) ?
5909 sizeof(struct fc_diag_lnkflt_desc) : 0;
5910 cmdsize += cgn_desc_size + lft_desc_size;
5911 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, cmdiocb->retry,
5912 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
5913 if (!elsiocb)
5914 return 1;
5915
5916 if (phba->sli_rev == LPFC_SLI_REV4) {
5917 wqe = &elsiocb->wqe;
5918 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
5919 get_job_ulpcontext(phba, cmdiocb)); /* Xri / rx_id */
5920 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
5921 get_job_rcvoxid(phba, cmdiocb));
5922 } else {
5923 icmd = &elsiocb->iocb;
5924 cmd = &cmdiocb->iocb;
5925 icmd->ulpContext = cmd->ulpContext; /* Xri / rx_id */
5926 icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id;
5927 }
5928
5929 pcmd = elsiocb->cmd_dmabuf->virt;
5930 memset(pcmd, 0, cmdsize);
5931
5932 edc_rsp = (struct fc_els_edc_resp *)pcmd;
5933 edc_rsp->acc_hdr.la_cmd = ELS_LS_ACC;
5934 edc_rsp->desc_list_len = cpu_to_be32(sizeof(struct fc_els_lsri_desc) +
5935 cgn_desc_size + lft_desc_size);
5936 edc_rsp->lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO);
5937 edc_rsp->lsri.desc_len = cpu_to_be32(
5938 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_els_lsri_desc));
5939 edc_rsp->lsri.rqst_w0.cmd = ELS_EDC;
5940 tlv = edc_rsp->desc;
5941 lpfc_format_edc_cgn_desc(phba, tlv);
5942 tlv = fc_tlv_next_desc(tlv);
5943 if (lft_desc_size)
5944 lpfc_format_edc_lft_desc(phba, tlv);
5945
5946 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5947 "Issue EDC ACC: did:x%x flg:x%x refcnt %d",
5948 ndlp->nlp_DID, ndlp->nlp_flag,
5949 kref_read(&ndlp->kref));
5950 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
5951
5952 phba->fc_stat.elsXmitACC++;
5953 elsiocb->ndlp = lpfc_nlp_get(ndlp);
5954 if (!elsiocb->ndlp) {
5955 lpfc_els_free_iocb(phba, elsiocb);
5956 return 1;
5957 }
5958
5959 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5960 if (rc == IOCB_ERROR) {
5961 lpfc_els_free_iocb(phba, elsiocb);
5962 lpfc_nlp_put(ndlp);
5963 return 1;
5964 }
5965
5966 /* Xmit ELS ACC response tag <ulpIoTag> */
5967 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5968 "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, "
5969 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
5970 "RPI: x%x, fc_flag x%lx\n",
5971 rc, elsiocb->iotag, elsiocb->sli4_xritag,
5972 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5973 ndlp->nlp_rpi, vport->fc_flag);
5974
5975 return 0;
5976 }
5977
5978 /**
5979 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd
5980 * @vport: pointer to a virtual N_Port data structure.
5981 * @oldiocb: pointer to the original lpfc command iocb data structure.
5982 * @ndlp: pointer to a node-list data structure.
5983 *
5984 * This routine prepares and issues an Accept (ACC) response to Address
5985 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB
5986 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
5987 *
5988 * Note that the ndlp reference count will be incremented by 1 for holding the
5989 * ndlp and the reference to ndlp will be stored into the ndlp field of
5990 * the IOCB for the completion callback function to the ADISC Accept response
5991 * ELS IOCB command.
5992 *
5993 * Return code
5994 * 0 - Successfully issued acc adisc response
5995 * 1 - Failed to issue adisc acc response
5996 **/
5997 int
lpfc_els_rsp_adisc_acc(struct lpfc_vport * vport,struct lpfc_iocbq * oldiocb,struct lpfc_nodelist * ndlp)5998 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
5999 struct lpfc_nodelist *ndlp)
6000 {
6001 struct lpfc_hba *phba = vport->phba;
6002 ADISC *ap;
6003 IOCB_t *icmd, *oldcmd;
6004 union lpfc_wqe128 *wqe;
6005 struct lpfc_iocbq *elsiocb;
6006 uint8_t *pcmd;
6007 uint16_t cmdsize;
6008 int rc;
6009 u32 ulp_context;
6010
6011 cmdsize = sizeof(uint32_t) + sizeof(ADISC);
6012 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
6013 ndlp->nlp_DID, ELS_CMD_ACC);
6014 if (!elsiocb)
6015 return 1;
6016
6017 if (phba->sli_rev == LPFC_SLI_REV4) {
6018 wqe = &elsiocb->wqe;
6019 /* XRI / rx_id */
6020 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
6021 get_job_ulpcontext(phba, oldiocb));
6022 ulp_context = get_job_ulpcontext(phba, elsiocb);
6023 /* oxid */
6024 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
6025 get_job_rcvoxid(phba, oldiocb));
6026 } else {
6027 icmd = &elsiocb->iocb;
6028 oldcmd = &oldiocb->iocb;
6029 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
6030 ulp_context = elsiocb->iocb.ulpContext;
6031 icmd->unsli3.rcvsli3.ox_id =
6032 oldcmd->unsli3.rcvsli3.ox_id;
6033 }
6034
6035 /* Xmit ADISC ACC response tag <ulpIoTag> */
6036 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6037 "0130 Xmit ADISC ACC response iotag x%x xri: "
6038 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
6039 elsiocb->iotag, ulp_context,
6040 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
6041 ndlp->nlp_rpi);
6042 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
6043
6044 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
6045 pcmd += sizeof(uint32_t);
6046
6047 ap = (ADISC *) (pcmd);
6048 ap->hardAL_PA = phba->fc_pref_ALPA;
6049 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
6050 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
6051 ap->DID = be32_to_cpu(vport->fc_myDID);
6052
6053 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
6054 "Issue ACC ADISC: did:x%x flg:x%x refcnt %d",
6055 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
6056
6057 phba->fc_stat.elsXmitACC++;
6058 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
6059 elsiocb->ndlp = lpfc_nlp_get(ndlp);
6060 if (!elsiocb->ndlp) {
6061 lpfc_els_free_iocb(phba, elsiocb);
6062 return 1;
6063 }
6064
6065 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
6066 if (rc == IOCB_ERROR) {
6067 lpfc_els_free_iocb(phba, elsiocb);
6068 lpfc_nlp_put(ndlp);
6069 return 1;
6070 }
6071
6072 return 0;
6073 }
6074
6075 /**
6076 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd
6077 * @vport: pointer to a virtual N_Port data structure.
6078 * @oldiocb: pointer to the original lpfc command iocb data structure.
6079 * @ndlp: pointer to a node-list data structure.
6080 *
6081 * This routine prepares and issues an Accept (ACC) response to Process
6082 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB
6083 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
6084 *
6085 * Note that the ndlp reference count will be incremented by 1 for holding the
6086 * ndlp and the reference to ndlp will be stored into the ndlp field of
6087 * the IOCB for the completion callback function to the PRLI Accept response
6088 * ELS IOCB command.
6089 *
6090 * Return code
6091 * 0 - Successfully issued acc prli response
6092 * 1 - Failed to issue acc prli response
6093 **/
6094 int
lpfc_els_rsp_prli_acc(struct lpfc_vport * vport,struct lpfc_iocbq * oldiocb,struct lpfc_nodelist * ndlp)6095 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
6096 struct lpfc_nodelist *ndlp)
6097 {
6098 struct lpfc_hba *phba = vport->phba;
6099 PRLI *npr;
6100 struct lpfc_nvme_prli *npr_nvme;
6101 lpfc_vpd_t *vpd;
6102 IOCB_t *icmd;
6103 IOCB_t *oldcmd;
6104 union lpfc_wqe128 *wqe;
6105 struct lpfc_iocbq *elsiocb;
6106 uint8_t *pcmd;
6107 uint16_t cmdsize;
6108 uint32_t prli_fc4_req, *req_payload;
6109 struct lpfc_dmabuf *req_buf;
6110 int rc;
6111 u32 elsrspcmd, ulp_context;
6112
6113 /* Need the incoming PRLI payload to determine if the ACC is for an
6114 * FC4 or NVME PRLI type. The PRLI type is at word 1.
6115 */
6116 req_buf = oldiocb->cmd_dmabuf;
6117 req_payload = (((uint32_t *)req_buf->virt) + 1);
6118
6119 /* PRLI type payload is at byte 3 for FCP or NVME. */
6120 prli_fc4_req = be32_to_cpu(*req_payload);
6121 prli_fc4_req = (prli_fc4_req >> 24) & 0xff;
6122 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6123 "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n",
6124 prli_fc4_req, *((uint32_t *)req_payload));
6125
6126 if (prli_fc4_req == PRLI_FCP_TYPE) {
6127 cmdsize = sizeof(uint32_t) + sizeof(PRLI);
6128 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
6129 } else if (prli_fc4_req == PRLI_NVME_TYPE) {
6130 cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli);
6131 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK));
6132 } else {
6133 return 1;
6134 }
6135
6136 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
6137 ndlp->nlp_DID, elsrspcmd);
6138 if (!elsiocb)
6139 return 1;
6140
6141 if (phba->sli_rev == LPFC_SLI_REV4) {
6142 wqe = &elsiocb->wqe;
6143 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
6144 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
6145 ulp_context = get_job_ulpcontext(phba, elsiocb);
6146 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
6147 get_job_rcvoxid(phba, oldiocb));
6148 } else {
6149 icmd = &elsiocb->iocb;
6150 oldcmd = &oldiocb->iocb;
6151 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
6152 ulp_context = elsiocb->iocb.ulpContext;
6153 icmd->unsli3.rcvsli3.ox_id =
6154 oldcmd->unsli3.rcvsli3.ox_id;
6155 }
6156
6157 /* Xmit PRLI ACC response tag <ulpIoTag> */
6158 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6159 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
6160 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
6161 elsiocb->iotag, ulp_context,
6162 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
6163 ndlp->nlp_rpi);
6164 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
6165 memset(pcmd, 0, cmdsize);
6166
6167 *((uint32_t *)(pcmd)) = elsrspcmd;
6168 pcmd += sizeof(uint32_t);
6169
6170 /* For PRLI, remainder of payload is PRLI parameter page */
6171 vpd = &phba->vpd;
6172
6173 if (prli_fc4_req == PRLI_FCP_TYPE) {
6174 /*
6175 * If the remote port is a target and our firmware version
6176 * is 3.20 or later, set the following bits for FC-TAPE
6177 * support.
6178 */
6179 npr = (PRLI *) pcmd;
6180 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
6181 (vpd->rev.feaLevelHigh >= 0x02)) {
6182 npr->ConfmComplAllowed = 1;
6183 npr->Retry = 1;
6184 npr->TaskRetryIdReq = 1;
6185 }
6186 npr->acceptRspCode = PRLI_REQ_EXECUTED;
6187
6188 /* Set image pair for complementary pairs only. */
6189 if (ndlp->nlp_type & NLP_FCP_TARGET)
6190 npr->estabImagePair = 1;
6191 else
6192 npr->estabImagePair = 0;
6193 npr->readXferRdyDis = 1;
6194 npr->ConfmComplAllowed = 1;
6195 npr->prliType = PRLI_FCP_TYPE;
6196 npr->initiatorFunc = 1;
6197
6198 /* Xmit PRLI ACC response tag <ulpIoTag> */
6199 lpfc_printf_vlog(vport, KERN_INFO,
6200 LOG_ELS | LOG_NODE | LOG_DISCOVERY,
6201 "6014 FCP issue PRLI ACC imgpair %d "
6202 "retry %d task %d\n",
6203 npr->estabImagePair,
6204 npr->Retry, npr->TaskRetryIdReq);
6205
6206 } else if (prli_fc4_req == PRLI_NVME_TYPE) {
6207 /* Respond with an NVME PRLI Type */
6208 npr_nvme = (struct lpfc_nvme_prli *) pcmd;
6209 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
6210 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */
6211 bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED);
6212 if (phba->nvmet_support) {
6213 bf_set(prli_tgt, npr_nvme, 1);
6214 bf_set(prli_disc, npr_nvme, 1);
6215 if (phba->cfg_nvme_enable_fb) {
6216 bf_set(prli_fba, npr_nvme, 1);
6217
6218 /* TBD. Target mode needs to post buffers
6219 * that support the configured first burst
6220 * byte size.
6221 */
6222 bf_set(prli_fb_sz, npr_nvme,
6223 phba->cfg_nvmet_fb_size);
6224 }
6225 } else {
6226 bf_set(prli_init, npr_nvme, 1);
6227 }
6228
6229 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
6230 "6015 NVME issue PRLI ACC word1 x%08x "
6231 "word4 x%08x word5 x%08x flag x%x, "
6232 "fcp_info x%x nlp_type x%x\n",
6233 npr_nvme->word1, npr_nvme->word4,
6234 npr_nvme->word5, ndlp->nlp_flag,
6235 ndlp->nlp_fcp_info, ndlp->nlp_type);
6236 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1);
6237 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4);
6238 npr_nvme->word5 = cpu_to_be32(npr_nvme->word5);
6239 } else
6240 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
6241 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n",
6242 prli_fc4_req, ndlp->nlp_fc4_type,
6243 ndlp->nlp_DID);
6244
6245 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
6246 "Issue ACC PRLI: did:x%x flg:x%x",
6247 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
6248
6249 phba->fc_stat.elsXmitACC++;
6250 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
6251 elsiocb->ndlp = lpfc_nlp_get(ndlp);
6252 if (!elsiocb->ndlp) {
6253 lpfc_els_free_iocb(phba, elsiocb);
6254 return 1;
6255 }
6256
6257 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
6258 if (rc == IOCB_ERROR) {
6259 lpfc_els_free_iocb(phba, elsiocb);
6260 lpfc_nlp_put(ndlp);
6261 return 1;
6262 }
6263
6264 return 0;
6265 }
6266
6267 /**
6268 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command
6269 * @vport: pointer to a virtual N_Port data structure.
6270 * @format: rnid command format.
6271 * @oldiocb: pointer to the original lpfc command iocb data structure.
6272 * @ndlp: pointer to a node-list data structure.
6273 *
6274 * This routine issues a Request Node Identification Data (RNID) Accept
6275 * (ACC) response. It constructs the RNID ACC response command according to
6276 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to
6277 * issue the response.
6278 *
6279 * Note that the ndlp reference count will be incremented by 1 for holding the
6280 * ndlp and the reference to ndlp will be stored into the ndlp field of
6281 * the IOCB for the completion callback function.
6282 *
6283 * Return code
6284 * 0 - Successfully issued acc rnid response
6285 * 1 - Failed to issue acc rnid response
6286 **/
6287 static int
lpfc_els_rsp_rnid_acc(struct lpfc_vport * vport,uint8_t format,struct lpfc_iocbq * oldiocb,struct lpfc_nodelist * ndlp)6288 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
6289 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
6290 {
6291 struct lpfc_hba *phba = vport->phba;
6292 RNID *rn;
6293 IOCB_t *icmd, *oldcmd;
6294 union lpfc_wqe128 *wqe;
6295 struct lpfc_iocbq *elsiocb;
6296 uint8_t *pcmd;
6297 uint16_t cmdsize;
6298 int rc;
6299 u32 ulp_context;
6300
6301 cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
6302 + (2 * sizeof(struct lpfc_name));
6303 if (format)
6304 cmdsize += sizeof(RNID_TOP_DISC);
6305
6306 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
6307 ndlp->nlp_DID, ELS_CMD_ACC);
6308 if (!elsiocb)
6309 return 1;
6310
6311 if (phba->sli_rev == LPFC_SLI_REV4) {
6312 wqe = &elsiocb->wqe;
6313 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
6314 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
6315 ulp_context = get_job_ulpcontext(phba, elsiocb);
6316 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
6317 get_job_rcvoxid(phba, oldiocb));
6318 } else {
6319 icmd = &elsiocb->iocb;
6320 oldcmd = &oldiocb->iocb;
6321 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
6322 ulp_context = elsiocb->iocb.ulpContext;
6323 icmd->unsli3.rcvsli3.ox_id =
6324 oldcmd->unsli3.rcvsli3.ox_id;
6325 }
6326
6327 /* Xmit RNID ACC response tag <ulpIoTag> */
6328 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6329 "0132 Xmit RNID ACC response tag x%x xri x%x\n",
6330 elsiocb->iotag, ulp_context);
6331 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
6332 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
6333 pcmd += sizeof(uint32_t);
6334
6335 memset(pcmd, 0, sizeof(RNID));
6336 rn = (RNID *) (pcmd);
6337 rn->Format = format;
6338 rn->CommonLen = (2 * sizeof(struct lpfc_name));
6339 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
6340 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
6341 switch (format) {
6342 case 0:
6343 rn->SpecificLen = 0;
6344 break;
6345 case RNID_TOPOLOGY_DISC:
6346 rn->SpecificLen = sizeof(RNID_TOP_DISC);
6347 memcpy(&rn->un.topologyDisc.portName,
6348 &vport->fc_portname, sizeof(struct lpfc_name));
6349 rn->un.topologyDisc.unitType = RNID_HBA;
6350 rn->un.topologyDisc.physPort = 0;
6351 rn->un.topologyDisc.attachedNodes = 0;
6352 break;
6353 default:
6354 rn->CommonLen = 0;
6355 rn->SpecificLen = 0;
6356 break;
6357 }
6358
6359 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
6360 "Issue ACC RNID: did:x%x flg:x%x refcnt %d",
6361 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
6362
6363 phba->fc_stat.elsXmitACC++;
6364 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
6365 elsiocb->ndlp = lpfc_nlp_get(ndlp);
6366 if (!elsiocb->ndlp) {
6367 lpfc_els_free_iocb(phba, elsiocb);
6368 return 1;
6369 }
6370
6371 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
6372 if (rc == IOCB_ERROR) {
6373 lpfc_els_free_iocb(phba, elsiocb);
6374 lpfc_nlp_put(ndlp);
6375 return 1;
6376 }
6377
6378 return 0;
6379 }
6380
6381 /**
6382 * lpfc_els_clear_rrq - Clear the rq that this rrq describes.
6383 * @vport: pointer to a virtual N_Port data structure.
6384 * @iocb: pointer to the lpfc command iocb data structure.
6385 * @ndlp: pointer to a node-list data structure.
6386 *
6387 * Return
6388 **/
6389 static void
lpfc_els_clear_rrq(struct lpfc_vport * vport,struct lpfc_iocbq * iocb,struct lpfc_nodelist * ndlp)6390 lpfc_els_clear_rrq(struct lpfc_vport *vport,
6391 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp)
6392 {
6393 struct lpfc_hba *phba = vport->phba;
6394 uint8_t *pcmd;
6395 struct RRQ *rrq;
6396 uint16_t rxid;
6397 uint16_t xri;
6398 struct lpfc_node_rrq *prrq;
6399
6400
6401 pcmd = (uint8_t *)iocb->cmd_dmabuf->virt;
6402 pcmd += sizeof(uint32_t);
6403 rrq = (struct RRQ *)pcmd;
6404 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
6405 rxid = bf_get(rrq_rxid, rrq);
6406
6407 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6408 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
6409 " x%x x%x\n",
6410 be32_to_cpu(bf_get(rrq_did, rrq)),
6411 bf_get(rrq_oxid, rrq),
6412 rxid,
6413 get_wqe_reqtag(iocb),
6414 get_job_ulpcontext(phba, iocb));
6415
6416 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
6417 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x",
6418 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
6419 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq)))
6420 xri = bf_get(rrq_oxid, rrq);
6421 else
6422 xri = rxid;
6423 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID);
6424 if (prrq)
6425 lpfc_clr_rrq_active(phba, xri, prrq);
6426 return;
6427 }
6428
6429 /**
6430 * lpfc_els_rsp_echo_acc - Issue echo acc response
6431 * @vport: pointer to a virtual N_Port data structure.
6432 * @data: pointer to echo data to return in the accept.
6433 * @oldiocb: pointer to the original lpfc command iocb data structure.
6434 * @ndlp: pointer to a node-list data structure.
6435 *
6436 * Return code
6437 * 0 - Successfully issued acc echo response
6438 * 1 - Failed to issue acc echo response
6439 **/
6440 static int
lpfc_els_rsp_echo_acc(struct lpfc_vport * vport,uint8_t * data,struct lpfc_iocbq * oldiocb,struct lpfc_nodelist * ndlp)6441 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
6442 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
6443 {
6444 struct lpfc_hba *phba = vport->phba;
6445 IOCB_t *icmd, *oldcmd;
6446 union lpfc_wqe128 *wqe;
6447 struct lpfc_iocbq *elsiocb;
6448 uint8_t *pcmd;
6449 uint16_t cmdsize;
6450 int rc;
6451 u32 ulp_context;
6452
6453 if (phba->sli_rev == LPFC_SLI_REV4)
6454 cmdsize = oldiocb->wcqe_cmpl.total_data_placed;
6455 else
6456 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
6457
6458 /* The accumulated length can exceed the BPL_SIZE. For
6459 * now, use this as the limit
6460 */
6461 if (cmdsize > LPFC_BPL_SIZE)
6462 cmdsize = LPFC_BPL_SIZE;
6463 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
6464 ndlp->nlp_DID, ELS_CMD_ACC);
6465 if (!elsiocb)
6466 return 1;
6467
6468 if (phba->sli_rev == LPFC_SLI_REV4) {
6469 wqe = &elsiocb->wqe;
6470 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
6471 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
6472 ulp_context = get_job_ulpcontext(phba, elsiocb);
6473 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
6474 get_job_rcvoxid(phba, oldiocb));
6475 } else {
6476 icmd = &elsiocb->iocb;
6477 oldcmd = &oldiocb->iocb;
6478 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
6479 ulp_context = elsiocb->iocb.ulpContext;
6480 icmd->unsli3.rcvsli3.ox_id =
6481 oldcmd->unsli3.rcvsli3.ox_id;
6482 }
6483
6484 /* Xmit ECHO ACC response tag <ulpIoTag> */
6485 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6486 "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
6487 elsiocb->iotag, ulp_context);
6488 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
6489 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
6490 pcmd += sizeof(uint32_t);
6491 memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
6492
6493 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
6494 "Issue ACC ECHO: did:x%x flg:x%x refcnt %d",
6495 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
6496
6497 phba->fc_stat.elsXmitACC++;
6498 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
6499 elsiocb->ndlp = lpfc_nlp_get(ndlp);
6500 if (!elsiocb->ndlp) {
6501 lpfc_els_free_iocb(phba, elsiocb);
6502 return 1;
6503 }
6504
6505 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
6506 if (rc == IOCB_ERROR) {
6507 lpfc_els_free_iocb(phba, elsiocb);
6508 lpfc_nlp_put(ndlp);
6509 return 1;
6510 }
6511
6512 return 0;
6513 }
6514
6515 /**
6516 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
6517 * @vport: pointer to a host virtual N_Port data structure.
6518 *
6519 * This routine issues Address Discover (ADISC) ELS commands to those
6520 * N_Ports which are in node port recovery state and ADISC has not been issued
6521 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the
6522 * lpfc_issue_els_adisc() routine, the per @vport number of discover count
6523 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a
6524 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will
6525 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC
6526 * IOCBs quit for later pick up. On the other hand, after walking through
6527 * all the ndlps with the @vport and there is none ADISC IOCB issued, the
6528 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is
6529 * no more ADISC need to be sent.
6530 *
6531 * Return code
6532 * The number of N_Ports with adisc issued.
6533 **/
6534 int
lpfc_els_disc_adisc(struct lpfc_vport * vport)6535 lpfc_els_disc_adisc(struct lpfc_vport *vport)
6536 {
6537 struct lpfc_nodelist *ndlp, *next_ndlp;
6538 int sentadisc = 0;
6539
6540 /* go thru NPR nodes and issue any remaining ELS ADISCs */
6541 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
6542
6543 if (ndlp->nlp_state != NLP_STE_NPR_NODE ||
6544 !(ndlp->nlp_flag & NLP_NPR_ADISC))
6545 continue;
6546
6547 spin_lock_irq(&ndlp->lock);
6548 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
6549 spin_unlock_irq(&ndlp->lock);
6550
6551 if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
6552 /* This node was marked for ADISC but was not picked
6553 * for discovery. This is possible if the node was
6554 * missing in gidft response.
6555 *
6556 * At time of marking node for ADISC, we skipped unreg
6557 * from backend
6558 */
6559 lpfc_nlp_unreg_node(vport, ndlp);
6560 lpfc_unreg_rpi(vport, ndlp);
6561 continue;
6562 }
6563
6564 ndlp->nlp_prev_state = ndlp->nlp_state;
6565 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
6566 lpfc_issue_els_adisc(vport, ndlp, 0);
6567 sentadisc++;
6568 vport->num_disc_nodes++;
6569 if (vport->num_disc_nodes >=
6570 vport->cfg_discovery_threads) {
6571 set_bit(FC_NLP_MORE, &vport->fc_flag);
6572 break;
6573 }
6574
6575 }
6576 if (sentadisc == 0)
6577 clear_bit(FC_NLP_MORE, &vport->fc_flag);
6578 return sentadisc;
6579 }
6580
6581 /**
6582 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc
6583 * @vport: pointer to a host virtual N_Port data structure.
6584 *
6585 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports
6586 * which are in node port recovery state, with a @vport. Each time an ELS
6587 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine,
6588 * the per @vport number of discover count (num_disc_nodes) shall be
6589 * incremented. If the num_disc_nodes reaches a pre-configured threshold
6590 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE
6591 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for
6592 * later pick up. On the other hand, after walking through all the ndlps with
6593 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag
6594 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC
6595 * PLOGI need to be sent.
6596 *
6597 * Return code
6598 * The number of N_Ports with plogi issued.
6599 **/
6600 int
lpfc_els_disc_plogi(struct lpfc_vport * vport)6601 lpfc_els_disc_plogi(struct lpfc_vport *vport)
6602 {
6603 struct lpfc_nodelist *ndlp, *next_ndlp;
6604 int sentplogi = 0;
6605
6606 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
6607 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
6608 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
6609 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
6610 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
6611 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
6612 ndlp->nlp_prev_state = ndlp->nlp_state;
6613 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
6614 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
6615 sentplogi++;
6616 vport->num_disc_nodes++;
6617 if (vport->num_disc_nodes >=
6618 vport->cfg_discovery_threads) {
6619 set_bit(FC_NLP_MORE, &vport->fc_flag);
6620 break;
6621 }
6622 }
6623 }
6624
6625 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
6626 "6452 Discover PLOGI %d flag x%lx\n",
6627 sentplogi, vport->fc_flag);
6628
6629 if (sentplogi)
6630 lpfc_set_disctmo(vport);
6631 else
6632 clear_bit(FC_NLP_MORE, &vport->fc_flag);
6633 return sentplogi;
6634 }
6635
6636 static uint32_t
lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc * desc,uint32_t word0)6637 lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc,
6638 uint32_t word0)
6639 {
6640
6641 desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG);
6642 desc->payload.els_req = word0;
6643 desc->length = cpu_to_be32(sizeof(desc->payload));
6644
6645 return sizeof(struct fc_rdp_link_service_desc);
6646 }
6647
6648 static uint32_t
lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc * desc,uint8_t * page_a0,uint8_t * page_a2)6649 lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc,
6650 uint8_t *page_a0, uint8_t *page_a2)
6651 {
6652 uint16_t wavelength;
6653 uint16_t temperature;
6654 uint16_t rx_power;
6655 uint16_t tx_bias;
6656 uint16_t tx_power;
6657 uint16_t vcc;
6658 uint16_t flag = 0;
6659 struct sff_trasnceiver_codes_byte4 *trasn_code_byte4;
6660 struct sff_trasnceiver_codes_byte5 *trasn_code_byte5;
6661
6662 desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG);
6663
6664 trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *)
6665 &page_a0[SSF_TRANSCEIVER_CODE_B4];
6666 trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *)
6667 &page_a0[SSF_TRANSCEIVER_CODE_B5];
6668
6669 if ((trasn_code_byte4->fc_sw_laser) ||
6670 (trasn_code_byte5->fc_sw_laser_sl) ||
6671 (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */
6672 flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT);
6673 } else if (trasn_code_byte4->fc_lw_laser) {
6674 wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) |
6675 page_a0[SSF_WAVELENGTH_B0];
6676 if (wavelength == SFP_WAVELENGTH_LC1310)
6677 flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT;
6678 if (wavelength == SFP_WAVELENGTH_LL1550)
6679 flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT;
6680 }
6681 /* check if its SFP+ */
6682 flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ?
6683 SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN)
6684 << SFP_FLAG_CT_SHIFT;
6685
6686 /* check if its OPTICAL */
6687 flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ?
6688 SFP_FLAG_IS_OPTICAL_PORT : 0)
6689 << SFP_FLAG_IS_OPTICAL_SHIFT;
6690
6691 temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 |
6692 page_a2[SFF_TEMPERATURE_B0]);
6693 vcc = (page_a2[SFF_VCC_B1] << 8 |
6694 page_a2[SFF_VCC_B0]);
6695 tx_power = (page_a2[SFF_TXPOWER_B1] << 8 |
6696 page_a2[SFF_TXPOWER_B0]);
6697 tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 |
6698 page_a2[SFF_TX_BIAS_CURRENT_B0]);
6699 rx_power = (page_a2[SFF_RXPOWER_B1] << 8 |
6700 page_a2[SFF_RXPOWER_B0]);
6701 desc->sfp_info.temperature = cpu_to_be16(temperature);
6702 desc->sfp_info.rx_power = cpu_to_be16(rx_power);
6703 desc->sfp_info.tx_bias = cpu_to_be16(tx_bias);
6704 desc->sfp_info.tx_power = cpu_to_be16(tx_power);
6705 desc->sfp_info.vcc = cpu_to_be16(vcc);
6706
6707 desc->sfp_info.flags = cpu_to_be16(flag);
6708 desc->length = cpu_to_be32(sizeof(desc->sfp_info));
6709
6710 return sizeof(struct fc_rdp_sfp_desc);
6711 }
6712
6713 static uint32_t
lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc * desc,READ_LNK_VAR * stat)6714 lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc,
6715 READ_LNK_VAR *stat)
6716 {
6717 uint32_t type;
6718
6719 desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG);
6720
6721 type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT;
6722
6723 desc->info.port_type = cpu_to_be32(type);
6724
6725 desc->info.link_status.link_failure_cnt =
6726 cpu_to_be32(stat->linkFailureCnt);
6727 desc->info.link_status.loss_of_synch_cnt =
6728 cpu_to_be32(stat->lossSyncCnt);
6729 desc->info.link_status.loss_of_signal_cnt =
6730 cpu_to_be32(stat->lossSignalCnt);
6731 desc->info.link_status.primitive_seq_proto_err =
6732 cpu_to_be32(stat->primSeqErrCnt);
6733 desc->info.link_status.invalid_trans_word =
6734 cpu_to_be32(stat->invalidXmitWord);
6735 desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt);
6736
6737 desc->length = cpu_to_be32(sizeof(desc->info));
6738
6739 return sizeof(struct fc_rdp_link_error_status_desc);
6740 }
6741
6742 static uint32_t
lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc * desc,READ_LNK_VAR * stat,struct lpfc_vport * vport)6743 lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat,
6744 struct lpfc_vport *vport)
6745 {
6746 uint32_t bbCredit;
6747
6748 desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG);
6749
6750 bbCredit = vport->fc_sparam.cmn.bbCreditLsb |
6751 (vport->fc_sparam.cmn.bbCreditMsb << 8);
6752 desc->bbc_info.port_bbc = cpu_to_be32(bbCredit);
6753 if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
6754 bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb |
6755 (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8);
6756 desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit);
6757 } else {
6758 desc->bbc_info.attached_port_bbc = 0;
6759 }
6760
6761 desc->bbc_info.rtt = 0;
6762 desc->length = cpu_to_be32(sizeof(desc->bbc_info));
6763
6764 return sizeof(struct fc_rdp_bbc_desc);
6765 }
6766
6767 static uint32_t
lpfc_rdp_res_oed_temp_desc(struct lpfc_hba * phba,struct fc_rdp_oed_sfp_desc * desc,uint8_t * page_a2)6768 lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba,
6769 struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2)
6770 {
6771 uint32_t flags = 0;
6772
6773 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
6774
6775 desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM];
6776 desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM];
6777 desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING];
6778 desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING];
6779
6780 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE)
6781 flags |= RDP_OET_HIGH_ALARM;
6782 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE)
6783 flags |= RDP_OET_LOW_ALARM;
6784 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE)
6785 flags |= RDP_OET_HIGH_WARNING;
6786 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE)
6787 flags |= RDP_OET_LOW_WARNING;
6788
6789 flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT);
6790 desc->oed_info.function_flags = cpu_to_be32(flags);
6791 desc->length = cpu_to_be32(sizeof(desc->oed_info));
6792 return sizeof(struct fc_rdp_oed_sfp_desc);
6793 }
6794
6795 static uint32_t
lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba * phba,struct fc_rdp_oed_sfp_desc * desc,uint8_t * page_a2)6796 lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba,
6797 struct fc_rdp_oed_sfp_desc *desc,
6798 uint8_t *page_a2)
6799 {
6800 uint32_t flags = 0;
6801
6802 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
6803
6804 desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM];
6805 desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM];
6806 desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING];
6807 desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING];
6808
6809 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE)
6810 flags |= RDP_OET_HIGH_ALARM;
6811 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE)
6812 flags |= RDP_OET_LOW_ALARM;
6813 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE)
6814 flags |= RDP_OET_HIGH_WARNING;
6815 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE)
6816 flags |= RDP_OET_LOW_WARNING;
6817
6818 flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT);
6819 desc->oed_info.function_flags = cpu_to_be32(flags);
6820 desc->length = cpu_to_be32(sizeof(desc->oed_info));
6821 return sizeof(struct fc_rdp_oed_sfp_desc);
6822 }
6823
6824 static uint32_t
lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba * phba,struct fc_rdp_oed_sfp_desc * desc,uint8_t * page_a2)6825 lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba,
6826 struct fc_rdp_oed_sfp_desc *desc,
6827 uint8_t *page_a2)
6828 {
6829 uint32_t flags = 0;
6830
6831 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
6832
6833 desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM];
6834 desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM];
6835 desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING];
6836 desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING];
6837
6838 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS)
6839 flags |= RDP_OET_HIGH_ALARM;
6840 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS)
6841 flags |= RDP_OET_LOW_ALARM;
6842 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS)
6843 flags |= RDP_OET_HIGH_WARNING;
6844 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS)
6845 flags |= RDP_OET_LOW_WARNING;
6846
6847 flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT);
6848 desc->oed_info.function_flags = cpu_to_be32(flags);
6849 desc->length = cpu_to_be32(sizeof(desc->oed_info));
6850 return sizeof(struct fc_rdp_oed_sfp_desc);
6851 }
6852
6853 static uint32_t
lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba * phba,struct fc_rdp_oed_sfp_desc * desc,uint8_t * page_a2)6854 lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba,
6855 struct fc_rdp_oed_sfp_desc *desc,
6856 uint8_t *page_a2)
6857 {
6858 uint32_t flags = 0;
6859
6860 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
6861
6862 desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM];
6863 desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM];
6864 desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING];
6865 desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING];
6866
6867 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER)
6868 flags |= RDP_OET_HIGH_ALARM;
6869 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER)
6870 flags |= RDP_OET_LOW_ALARM;
6871 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER)
6872 flags |= RDP_OET_HIGH_WARNING;
6873 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER)
6874 flags |= RDP_OET_LOW_WARNING;
6875
6876 flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT);
6877 desc->oed_info.function_flags = cpu_to_be32(flags);
6878 desc->length = cpu_to_be32(sizeof(desc->oed_info));
6879 return sizeof(struct fc_rdp_oed_sfp_desc);
6880 }
6881
6882
6883 static uint32_t
lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba * phba,struct fc_rdp_oed_sfp_desc * desc,uint8_t * page_a2)6884 lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba,
6885 struct fc_rdp_oed_sfp_desc *desc,
6886 uint8_t *page_a2)
6887 {
6888 uint32_t flags = 0;
6889
6890 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
6891
6892 desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM];
6893 desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM];
6894 desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING];
6895 desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING];
6896
6897 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER)
6898 flags |= RDP_OET_HIGH_ALARM;
6899 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER)
6900 flags |= RDP_OET_LOW_ALARM;
6901 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER)
6902 flags |= RDP_OET_HIGH_WARNING;
6903 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER)
6904 flags |= RDP_OET_LOW_WARNING;
6905
6906 flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT);
6907 desc->oed_info.function_flags = cpu_to_be32(flags);
6908 desc->length = cpu_to_be32(sizeof(desc->oed_info));
6909 return sizeof(struct fc_rdp_oed_sfp_desc);
6910 }
6911
6912 static uint32_t
lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc * desc,uint8_t * page_a0,struct lpfc_vport * vport)6913 lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc,
6914 uint8_t *page_a0, struct lpfc_vport *vport)
6915 {
6916 desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG);
6917 memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16);
6918 memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16);
6919 memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16);
6920 memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4);
6921 memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8);
6922 desc->length = cpu_to_be32(sizeof(desc->opd_info));
6923 return sizeof(struct fc_rdp_opd_sfp_desc);
6924 }
6925
6926 static uint32_t
lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc * desc,READ_LNK_VAR * stat)6927 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat)
6928 {
6929 if (bf_get(lpfc_read_link_stat_gec2, stat) == 0)
6930 return 0;
6931 desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG);
6932
6933 desc->info.CorrectedBlocks =
6934 cpu_to_be32(stat->fecCorrBlkCount);
6935 desc->info.UncorrectableBlocks =
6936 cpu_to_be32(stat->fecUncorrBlkCount);
6937
6938 desc->length = cpu_to_be32(sizeof(desc->info));
6939
6940 return sizeof(struct fc_fec_rdp_desc);
6941 }
6942
6943 static uint32_t
lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc * desc,struct lpfc_hba * phba)6944 lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
6945 {
6946 uint16_t rdp_cap = 0;
6947 uint16_t rdp_speed;
6948
6949 desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG);
6950
6951 switch (phba->fc_linkspeed) {
6952 case LPFC_LINK_SPEED_1GHZ:
6953 rdp_speed = RDP_PS_1GB;
6954 break;
6955 case LPFC_LINK_SPEED_2GHZ:
6956 rdp_speed = RDP_PS_2GB;
6957 break;
6958 case LPFC_LINK_SPEED_4GHZ:
6959 rdp_speed = RDP_PS_4GB;
6960 break;
6961 case LPFC_LINK_SPEED_8GHZ:
6962 rdp_speed = RDP_PS_8GB;
6963 break;
6964 case LPFC_LINK_SPEED_10GHZ:
6965 rdp_speed = RDP_PS_10GB;
6966 break;
6967 case LPFC_LINK_SPEED_16GHZ:
6968 rdp_speed = RDP_PS_16GB;
6969 break;
6970 case LPFC_LINK_SPEED_32GHZ:
6971 rdp_speed = RDP_PS_32GB;
6972 break;
6973 case LPFC_LINK_SPEED_64GHZ:
6974 rdp_speed = RDP_PS_64GB;
6975 break;
6976 case LPFC_LINK_SPEED_128GHZ:
6977 rdp_speed = RDP_PS_128GB;
6978 break;
6979 case LPFC_LINK_SPEED_256GHZ:
6980 rdp_speed = RDP_PS_256GB;
6981 break;
6982 default:
6983 rdp_speed = RDP_PS_UNKNOWN;
6984 break;
6985 }
6986
6987 desc->info.port_speed.speed = cpu_to_be16(rdp_speed);
6988
6989 if (phba->lmt & LMT_256Gb)
6990 rdp_cap |= RDP_PS_256GB;
6991 if (phba->lmt & LMT_128Gb)
6992 rdp_cap |= RDP_PS_128GB;
6993 if (phba->lmt & LMT_64Gb)
6994 rdp_cap |= RDP_PS_64GB;
6995 if (phba->lmt & LMT_32Gb)
6996 rdp_cap |= RDP_PS_32GB;
6997 if (phba->lmt & LMT_16Gb)
6998 rdp_cap |= RDP_PS_16GB;
6999 if (phba->lmt & LMT_10Gb)
7000 rdp_cap |= RDP_PS_10GB;
7001 if (phba->lmt & LMT_8Gb)
7002 rdp_cap |= RDP_PS_8GB;
7003 if (phba->lmt & LMT_4Gb)
7004 rdp_cap |= RDP_PS_4GB;
7005 if (phba->lmt & LMT_2Gb)
7006 rdp_cap |= RDP_PS_2GB;
7007 if (phba->lmt & LMT_1Gb)
7008 rdp_cap |= RDP_PS_1GB;
7009
7010 if (rdp_cap == 0)
7011 rdp_cap = RDP_CAP_UNKNOWN;
7012 if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO)
7013 rdp_cap |= RDP_CAP_USER_CONFIGURED;
7014
7015 desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap);
7016 desc->length = cpu_to_be32(sizeof(desc->info));
7017 return sizeof(struct fc_rdp_port_speed_desc);
7018 }
7019
7020 static uint32_t
lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc * desc,struct lpfc_vport * vport)7021 lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
7022 struct lpfc_vport *vport)
7023 {
7024
7025 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
7026
7027 memcpy(desc->port_names.wwnn, &vport->fc_nodename,
7028 sizeof(desc->port_names.wwnn));
7029
7030 memcpy(desc->port_names.wwpn, &vport->fc_portname,
7031 sizeof(desc->port_names.wwpn));
7032
7033 desc->length = cpu_to_be32(sizeof(desc->port_names));
7034 return sizeof(struct fc_rdp_port_name_desc);
7035 }
7036
7037 static uint32_t
lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc * desc,struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)7038 lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc,
7039 struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
7040 {
7041
7042 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
7043 if (test_bit(FC_FABRIC, &vport->fc_flag)) {
7044 memcpy(desc->port_names.wwnn, &vport->fabric_nodename,
7045 sizeof(desc->port_names.wwnn));
7046
7047 memcpy(desc->port_names.wwpn, &vport->fabric_portname,
7048 sizeof(desc->port_names.wwpn));
7049 } else { /* Point to Point */
7050 memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename,
7051 sizeof(desc->port_names.wwnn));
7052
7053 memcpy(desc->port_names.wwpn, &ndlp->nlp_portname,
7054 sizeof(desc->port_names.wwpn));
7055 }
7056
7057 desc->length = cpu_to_be32(sizeof(desc->port_names));
7058 return sizeof(struct fc_rdp_port_name_desc);
7059 }
7060
7061 static void
lpfc_els_rdp_cmpl(struct lpfc_hba * phba,struct lpfc_rdp_context * rdp_context,int status)7062 lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
7063 int status)
7064 {
7065 struct lpfc_nodelist *ndlp = rdp_context->ndlp;
7066 struct lpfc_vport *vport = ndlp->vport;
7067 struct lpfc_iocbq *elsiocb;
7068 struct ulp_bde64 *bpl;
7069 IOCB_t *icmd;
7070 union lpfc_wqe128 *wqe;
7071 uint8_t *pcmd;
7072 struct ls_rjt *stat;
7073 struct fc_rdp_res_frame *rdp_res;
7074 uint32_t cmdsize, len;
7075 uint16_t *flag_ptr;
7076 int rc;
7077 u32 ulp_context;
7078
7079 if (status != SUCCESS)
7080 goto error;
7081
7082 /* This will change once we know the true size of the RDP payload */
7083 cmdsize = sizeof(struct fc_rdp_res_frame);
7084
7085 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize,
7086 lpfc_max_els_tries, rdp_context->ndlp,
7087 rdp_context->ndlp->nlp_DID, ELS_CMD_ACC);
7088 if (!elsiocb)
7089 goto free_rdp_context;
7090
7091 ulp_context = get_job_ulpcontext(phba, elsiocb);
7092 if (phba->sli_rev == LPFC_SLI_REV4) {
7093 wqe = &elsiocb->wqe;
7094 /* ox-id of the frame */
7095 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
7096 rdp_context->ox_id);
7097 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
7098 rdp_context->rx_id);
7099 } else {
7100 icmd = &elsiocb->iocb;
7101 icmd->ulpContext = rdp_context->rx_id;
7102 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
7103 }
7104
7105 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7106 "2171 Xmit RDP response tag x%x xri x%x, "
7107 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x",
7108 elsiocb->iotag, ulp_context,
7109 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
7110 ndlp->nlp_rpi);
7111 rdp_res = (struct fc_rdp_res_frame *)elsiocb->cmd_dmabuf->virt;
7112 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
7113 memset(pcmd, 0, sizeof(struct fc_rdp_res_frame));
7114 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
7115
7116 /* Update Alarm and Warning */
7117 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS);
7118 phba->sfp_alarm |= *flag_ptr;
7119 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS);
7120 phba->sfp_warning |= *flag_ptr;
7121
7122 /* For RDP payload */
7123 len = 8;
7124 len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *)
7125 (len + pcmd), ELS_CMD_RDP);
7126
7127 len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd),
7128 rdp_context->page_a0, rdp_context->page_a2);
7129 len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd),
7130 phba);
7131 len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *)
7132 (len + pcmd), &rdp_context->link_stat);
7133 len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *)
7134 (len + pcmd), vport);
7135 len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *)
7136 (len + pcmd), vport, ndlp);
7137 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd),
7138 &rdp_context->link_stat);
7139 len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd),
7140 &rdp_context->link_stat, vport);
7141 len += lpfc_rdp_res_oed_temp_desc(phba,
7142 (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
7143 rdp_context->page_a2);
7144 len += lpfc_rdp_res_oed_voltage_desc(phba,
7145 (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
7146 rdp_context->page_a2);
7147 len += lpfc_rdp_res_oed_txbias_desc(phba,
7148 (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
7149 rdp_context->page_a2);
7150 len += lpfc_rdp_res_oed_txpower_desc(phba,
7151 (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
7152 rdp_context->page_a2);
7153 len += lpfc_rdp_res_oed_rxpower_desc(phba,
7154 (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
7155 rdp_context->page_a2);
7156 len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd),
7157 rdp_context->page_a0, vport);
7158
7159 rdp_res->length = cpu_to_be32(len - 8);
7160 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
7161
7162 /* Now that we know the true size of the payload, update the BPL */
7163 bpl = (struct ulp_bde64 *)elsiocb->bpl_dmabuf->virt;
7164 bpl->tus.f.bdeSize = len;
7165 bpl->tus.f.bdeFlags = 0;
7166 bpl->tus.w = le32_to_cpu(bpl->tus.w);
7167
7168 phba->fc_stat.elsXmitACC++;
7169 elsiocb->ndlp = lpfc_nlp_get(ndlp);
7170 if (!elsiocb->ndlp) {
7171 lpfc_els_free_iocb(phba, elsiocb);
7172 goto free_rdp_context;
7173 }
7174
7175 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
7176 if (rc == IOCB_ERROR) {
7177 lpfc_els_free_iocb(phba, elsiocb);
7178 lpfc_nlp_put(ndlp);
7179 }
7180
7181 goto free_rdp_context;
7182
7183 error:
7184 cmdsize = 2 * sizeof(uint32_t);
7185 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries,
7186 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT);
7187 if (!elsiocb)
7188 goto free_rdp_context;
7189
7190 if (phba->sli_rev == LPFC_SLI_REV4) {
7191 wqe = &elsiocb->wqe;
7192 /* ox-id of the frame */
7193 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
7194 rdp_context->ox_id);
7195 bf_set(wqe_ctxt_tag,
7196 &wqe->xmit_els_rsp.wqe_com,
7197 rdp_context->rx_id);
7198 } else {
7199 icmd = &elsiocb->iocb;
7200 icmd->ulpContext = rdp_context->rx_id;
7201 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
7202 }
7203
7204 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
7205
7206 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
7207 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
7208 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
7209
7210 phba->fc_stat.elsXmitLSRJT++;
7211 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
7212 elsiocb->ndlp = lpfc_nlp_get(ndlp);
7213 if (!elsiocb->ndlp) {
7214 lpfc_els_free_iocb(phba, elsiocb);
7215 goto free_rdp_context;
7216 }
7217
7218 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
7219 if (rc == IOCB_ERROR) {
7220 lpfc_els_free_iocb(phba, elsiocb);
7221 lpfc_nlp_put(ndlp);
7222 }
7223
7224 free_rdp_context:
7225 /* This reference put is for the original unsolicited RDP. If the
7226 * prep failed, there is no reference to remove.
7227 */
7228 lpfc_nlp_put(ndlp);
7229 kfree(rdp_context);
7230 }
7231
7232 static int
lpfc_get_rdp_info(struct lpfc_hba * phba,struct lpfc_rdp_context * rdp_context)7233 lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
7234 {
7235 LPFC_MBOXQ_t *mbox = NULL;
7236 int rc;
7237
7238 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7239 if (!mbox) {
7240 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS,
7241 "7105 failed to allocate mailbox memory");
7242 return 1;
7243 }
7244
7245 if (lpfc_sli4_dump_page_a0(phba, mbox))
7246 goto rdp_fail;
7247 mbox->vport = rdp_context->ndlp->vport;
7248 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0;
7249 mbox->ctx_u.rdp = rdp_context;
7250 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
7251 if (rc == MBX_NOT_FINISHED) {
7252 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
7253 return 1;
7254 }
7255
7256 return 0;
7257
7258 rdp_fail:
7259 mempool_free(mbox, phba->mbox_mem_pool);
7260 return 1;
7261 }
7262
lpfc_get_sfp_info_wait(struct lpfc_hba * phba,struct lpfc_rdp_context * rdp_context)7263 int lpfc_get_sfp_info_wait(struct lpfc_hba *phba,
7264 struct lpfc_rdp_context *rdp_context)
7265 {
7266 LPFC_MBOXQ_t *mbox = NULL;
7267 int rc;
7268 struct lpfc_dmabuf *mp;
7269 struct lpfc_dmabuf *mpsave;
7270 void *virt;
7271 MAILBOX_t *mb;
7272
7273 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7274 if (!mbox) {
7275 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS,
7276 "7205 failed to allocate mailbox memory");
7277 return 1;
7278 }
7279
7280 if (lpfc_sli4_dump_page_a0(phba, mbox))
7281 goto sfp_fail;
7282 mp = mbox->ctx_buf;
7283 mpsave = mp;
7284 virt = mp->virt;
7285 if (phba->sli_rev < LPFC_SLI_REV4) {
7286 mb = &mbox->u.mb;
7287 mb->un.varDmp.cv = 1;
7288 mb->un.varDmp.co = 1;
7289 mb->un.varWords[2] = 0;
7290 mb->un.varWords[3] = DMP_SFF_PAGE_A0_SIZE / 4;
7291 mb->un.varWords[4] = 0;
7292 mb->un.varWords[5] = 0;
7293 mb->un.varWords[6] = 0;
7294 mb->un.varWords[7] = 0;
7295 mb->un.varWords[8] = 0;
7296 mb->un.varWords[9] = 0;
7297 mb->un.varWords[10] = 0;
7298 mbox->in_ext_byte_len = DMP_SFF_PAGE_A0_SIZE;
7299 mbox->out_ext_byte_len = DMP_SFF_PAGE_A0_SIZE;
7300 mbox->mbox_offset_word = 5;
7301 mbox->ext_buf = virt;
7302 } else {
7303 bf_set(lpfc_mbx_memory_dump_type3_length,
7304 &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE);
7305 mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys);
7306 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
7307 }
7308 mbox->vport = phba->pport;
7309 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_SLI4_CONFIG_TMO);
7310 if (rc == MBX_NOT_FINISHED) {
7311 rc = 1;
7312 goto error;
7313 }
7314 if (rc == MBX_TIMEOUT)
7315 goto error;
7316 if (phba->sli_rev == LPFC_SLI_REV4)
7317 mp = mbox->ctx_buf;
7318 else
7319 mp = mpsave;
7320
7321 if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) {
7322 rc = 1;
7323 goto error;
7324 }
7325
7326 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a0,
7327 DMP_SFF_PAGE_A0_SIZE);
7328
7329 memset(mbox, 0, sizeof(*mbox));
7330 memset(mp->virt, 0, DMP_SFF_PAGE_A2_SIZE);
7331 INIT_LIST_HEAD(&mp->list);
7332
7333 /* save address for completion */
7334 mbox->ctx_buf = mp;
7335 mbox->vport = phba->pport;
7336
7337 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY);
7338 bf_set(lpfc_mbx_memory_dump_type3_type,
7339 &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD);
7340 bf_set(lpfc_mbx_memory_dump_type3_link,
7341 &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port);
7342 bf_set(lpfc_mbx_memory_dump_type3_page_no,
7343 &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A2);
7344 if (phba->sli_rev < LPFC_SLI_REV4) {
7345 mb = &mbox->u.mb;
7346 mb->un.varDmp.cv = 1;
7347 mb->un.varDmp.co = 1;
7348 mb->un.varWords[2] = 0;
7349 mb->un.varWords[3] = DMP_SFF_PAGE_A2_SIZE / 4;
7350 mb->un.varWords[4] = 0;
7351 mb->un.varWords[5] = 0;
7352 mb->un.varWords[6] = 0;
7353 mb->un.varWords[7] = 0;
7354 mb->un.varWords[8] = 0;
7355 mb->un.varWords[9] = 0;
7356 mb->un.varWords[10] = 0;
7357 mbox->in_ext_byte_len = DMP_SFF_PAGE_A2_SIZE;
7358 mbox->out_ext_byte_len = DMP_SFF_PAGE_A2_SIZE;
7359 mbox->mbox_offset_word = 5;
7360 mbox->ext_buf = virt;
7361 } else {
7362 bf_set(lpfc_mbx_memory_dump_type3_length,
7363 &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A2_SIZE);
7364 mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys);
7365 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
7366 }
7367
7368 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_SLI4_CONFIG_TMO);
7369
7370 if (rc == MBX_TIMEOUT)
7371 goto error;
7372 if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) {
7373 rc = 1;
7374 goto error;
7375 }
7376 rc = 0;
7377
7378 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2,
7379 DMP_SFF_PAGE_A2_SIZE);
7380
7381 error:
7382 if (mbox->mbox_flag & LPFC_MBX_WAKE) {
7383 mbox->ctx_buf = mpsave;
7384 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
7385 }
7386
7387 return rc;
7388
7389 sfp_fail:
7390 mempool_free(mbox, phba->mbox_mem_pool);
7391 return 1;
7392 }
7393
7394 /*
7395 * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS.
7396 * @vport: pointer to a host virtual N_Port data structure.
7397 * @cmdiocb: pointer to lpfc command iocb data structure.
7398 * @ndlp: pointer to a node-list data structure.
7399 *
7400 * This routine processes an unsolicited RDP(Read Diagnostic Parameters)
7401 * IOCB. First, the payload of the unsolicited RDP is checked.
7402 * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3
7403 * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2,
7404 * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl
7405 * gather all data and send RDP response.
7406 *
7407 * Return code
7408 * 0 - Sent the acc response
7409 * 1 - Sent the reject response.
7410 */
7411 static int
lpfc_els_rcv_rdp(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)7412 lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7413 struct lpfc_nodelist *ndlp)
7414 {
7415 struct lpfc_hba *phba = vport->phba;
7416 struct lpfc_dmabuf *pcmd;
7417 uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE;
7418 struct fc_rdp_req_frame *rdp_req;
7419 struct lpfc_rdp_context *rdp_context;
7420 union lpfc_wqe128 *cmd = NULL;
7421 struct ls_rjt stat;
7422
7423 if (phba->sli_rev < LPFC_SLI_REV4 ||
7424 bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
7425 LPFC_SLI_INTF_IF_TYPE_2) {
7426 rjt_err = LSRJT_UNABLE_TPC;
7427 rjt_expl = LSEXP_REQ_UNSUPPORTED;
7428 goto error;
7429 }
7430
7431 if (phba->sli_rev < LPFC_SLI_REV4 ||
7432 test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
7433 rjt_err = LSRJT_UNABLE_TPC;
7434 rjt_expl = LSEXP_REQ_UNSUPPORTED;
7435 goto error;
7436 }
7437
7438 pcmd = cmdiocb->cmd_dmabuf;
7439 rdp_req = (struct fc_rdp_req_frame *) pcmd->virt;
7440
7441 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7442 "2422 ELS RDP Request "
7443 "dec len %d tag x%x port_id %d len %d\n",
7444 be32_to_cpu(rdp_req->rdp_des_length),
7445 be32_to_cpu(rdp_req->nport_id_desc.tag),
7446 be32_to_cpu(rdp_req->nport_id_desc.nport_id),
7447 be32_to_cpu(rdp_req->nport_id_desc.length));
7448
7449 if (sizeof(struct fc_rdp_nport_desc) !=
7450 be32_to_cpu(rdp_req->rdp_des_length))
7451 goto rjt_logerr;
7452 if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag))
7453 goto rjt_logerr;
7454 if (RDP_NPORT_ID_SIZE !=
7455 be32_to_cpu(rdp_req->nport_id_desc.length))
7456 goto rjt_logerr;
7457 rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL);
7458 if (!rdp_context) {
7459 rjt_err = LSRJT_UNABLE_TPC;
7460 goto error;
7461 }
7462
7463 cmd = &cmdiocb->wqe;
7464 rdp_context->ndlp = lpfc_nlp_get(ndlp);
7465 if (!rdp_context->ndlp) {
7466 kfree(rdp_context);
7467 rjt_err = LSRJT_UNABLE_TPC;
7468 goto error;
7469 }
7470 rdp_context->ox_id = bf_get(wqe_rcvoxid,
7471 &cmd->xmit_els_rsp.wqe_com);
7472 rdp_context->rx_id = bf_get(wqe_ctxt_tag,
7473 &cmd->xmit_els_rsp.wqe_com);
7474 rdp_context->cmpl = lpfc_els_rdp_cmpl;
7475 if (lpfc_get_rdp_info(phba, rdp_context)) {
7476 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS,
7477 "2423 Unable to send mailbox");
7478 kfree(rdp_context);
7479 rjt_err = LSRJT_UNABLE_TPC;
7480 lpfc_nlp_put(ndlp);
7481 goto error;
7482 }
7483
7484 return 0;
7485
7486 rjt_logerr:
7487 rjt_err = LSRJT_LOGICAL_ERR;
7488
7489 error:
7490 memset(&stat, 0, sizeof(stat));
7491 stat.un.b.lsRjtRsnCode = rjt_err;
7492 stat.un.b.lsRjtRsnCodeExp = rjt_expl;
7493 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
7494 return 1;
7495 }
7496
7497
7498 static void
lpfc_els_lcb_rsp(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)7499 lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7500 {
7501 MAILBOX_t *mb;
7502 IOCB_t *icmd;
7503 union lpfc_wqe128 *wqe;
7504 uint8_t *pcmd;
7505 struct lpfc_iocbq *elsiocb;
7506 struct lpfc_nodelist *ndlp;
7507 struct ls_rjt *stat;
7508 union lpfc_sli4_cfg_shdr *shdr;
7509 struct lpfc_lcb_context *lcb_context;
7510 struct fc_lcb_res_frame *lcb_res;
7511 uint32_t cmdsize, shdr_status, shdr_add_status;
7512 int rc;
7513
7514 mb = &pmb->u.mb;
7515 lcb_context = pmb->ctx_u.lcb;
7516 ndlp = lcb_context->ndlp;
7517 memset(&pmb->ctx_u, 0, sizeof(pmb->ctx_u));
7518 pmb->ctx_buf = NULL;
7519
7520 shdr = (union lpfc_sli4_cfg_shdr *)
7521 &pmb->u.mqe.un.beacon_config.header.cfg_shdr;
7522 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7523 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7524
7525 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX,
7526 "0194 SET_BEACON_CONFIG mailbox "
7527 "completed with status x%x add_status x%x,"
7528 " mbx status x%x\n",
7529 shdr_status, shdr_add_status, mb->mbxStatus);
7530
7531 if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status ||
7532 (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) ||
7533 (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) {
7534 mempool_free(pmb, phba->mbox_mem_pool);
7535 goto error;
7536 }
7537
7538 mempool_free(pmb, phba->mbox_mem_pool);
7539 cmdsize = sizeof(struct fc_lcb_res_frame);
7540 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
7541 lpfc_max_els_tries, ndlp,
7542 ndlp->nlp_DID, ELS_CMD_ACC);
7543
7544 /* Decrement the ndlp reference count from previous mbox command */
7545 lpfc_nlp_put(ndlp);
7546
7547 if (!elsiocb)
7548 goto free_lcb_context;
7549
7550 lcb_res = (struct fc_lcb_res_frame *)elsiocb->cmd_dmabuf->virt;
7551
7552 memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame));
7553
7554 if (phba->sli_rev == LPFC_SLI_REV4) {
7555 wqe = &elsiocb->wqe;
7556 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id);
7557 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
7558 lcb_context->ox_id);
7559 } else {
7560 icmd = &elsiocb->iocb;
7561 icmd->ulpContext = lcb_context->rx_id;
7562 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
7563 }
7564
7565 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
7566 *((uint32_t *)(pcmd)) = ELS_CMD_ACC;
7567 lcb_res->lcb_sub_command = lcb_context->sub_command;
7568 lcb_res->lcb_type = lcb_context->type;
7569 lcb_res->capability = lcb_context->capability;
7570 lcb_res->lcb_frequency = lcb_context->frequency;
7571 lcb_res->lcb_duration = lcb_context->duration;
7572 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
7573 phba->fc_stat.elsXmitACC++;
7574
7575 elsiocb->ndlp = lpfc_nlp_get(ndlp);
7576 if (!elsiocb->ndlp) {
7577 lpfc_els_free_iocb(phba, elsiocb);
7578 goto out;
7579 }
7580
7581 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
7582 if (rc == IOCB_ERROR) {
7583 lpfc_els_free_iocb(phba, elsiocb);
7584 lpfc_nlp_put(ndlp);
7585 }
7586 out:
7587 kfree(lcb_context);
7588 return;
7589
7590 error:
7591 cmdsize = sizeof(struct fc_lcb_res_frame);
7592 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
7593 lpfc_max_els_tries, ndlp,
7594 ndlp->nlp_DID, ELS_CMD_LS_RJT);
7595 lpfc_nlp_put(ndlp);
7596 if (!elsiocb)
7597 goto free_lcb_context;
7598
7599 if (phba->sli_rev == LPFC_SLI_REV4) {
7600 wqe = &elsiocb->wqe;
7601 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id);
7602 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
7603 lcb_context->ox_id);
7604 } else {
7605 icmd = &elsiocb->iocb;
7606 icmd->ulpContext = lcb_context->rx_id;
7607 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
7608 }
7609
7610 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
7611
7612 *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT;
7613 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
7614 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
7615
7616 if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)
7617 stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
7618
7619 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
7620 phba->fc_stat.elsXmitLSRJT++;
7621 elsiocb->ndlp = lpfc_nlp_get(ndlp);
7622 if (!elsiocb->ndlp) {
7623 lpfc_els_free_iocb(phba, elsiocb);
7624 goto free_lcb_context;
7625 }
7626
7627 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
7628 if (rc == IOCB_ERROR) {
7629 lpfc_els_free_iocb(phba, elsiocb);
7630 lpfc_nlp_put(ndlp);
7631 }
7632 free_lcb_context:
7633 kfree(lcb_context);
7634 }
7635
7636 static int
lpfc_sli4_set_beacon(struct lpfc_vport * vport,struct lpfc_lcb_context * lcb_context,uint32_t beacon_state)7637 lpfc_sli4_set_beacon(struct lpfc_vport *vport,
7638 struct lpfc_lcb_context *lcb_context,
7639 uint32_t beacon_state)
7640 {
7641 struct lpfc_hba *phba = vport->phba;
7642 union lpfc_sli4_cfg_shdr *cfg_shdr;
7643 LPFC_MBOXQ_t *mbox = NULL;
7644 uint32_t len;
7645 int rc;
7646
7647 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7648 if (!mbox)
7649 return 1;
7650
7651 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
7652 len = sizeof(struct lpfc_mbx_set_beacon_config) -
7653 sizeof(struct lpfc_sli4_cfg_mhdr);
7654 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7655 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len,
7656 LPFC_SLI4_MBX_EMBED);
7657 mbox->ctx_u.lcb = lcb_context;
7658 mbox->vport = phba->pport;
7659 mbox->mbox_cmpl = lpfc_els_lcb_rsp;
7660 bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config,
7661 phba->sli4_hba.physical_port);
7662 bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config,
7663 beacon_state);
7664 mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */
7665
7666 /*
7667 * Check bv1s bit before issuing the mailbox
7668 * if bv1s == 1, LCB V1 supported
7669 * else, LCB V0 supported
7670 */
7671
7672 if (phba->sli4_hba.pc_sli4_params.bv1s) {
7673 /* COMMON_SET_BEACON_CONFIG_V1 */
7674 cfg_shdr->request.word9 = BEACON_VERSION_V1;
7675 lcb_context->capability |= LCB_CAPABILITY_DURATION;
7676 bf_set(lpfc_mbx_set_beacon_port_type,
7677 &mbox->u.mqe.un.beacon_config, 0);
7678 bf_set(lpfc_mbx_set_beacon_duration_v1,
7679 &mbox->u.mqe.un.beacon_config,
7680 be16_to_cpu(lcb_context->duration));
7681 } else {
7682 /* COMMON_SET_BEACON_CONFIG_V0 */
7683 if (be16_to_cpu(lcb_context->duration) != 0) {
7684 mempool_free(mbox, phba->mbox_mem_pool);
7685 return 1;
7686 }
7687 cfg_shdr->request.word9 = BEACON_VERSION_V0;
7688 lcb_context->capability &= ~(LCB_CAPABILITY_DURATION);
7689 bf_set(lpfc_mbx_set_beacon_state,
7690 &mbox->u.mqe.un.beacon_config, beacon_state);
7691 bf_set(lpfc_mbx_set_beacon_port_type,
7692 &mbox->u.mqe.un.beacon_config, 1);
7693 bf_set(lpfc_mbx_set_beacon_duration,
7694 &mbox->u.mqe.un.beacon_config,
7695 be16_to_cpu(lcb_context->duration));
7696 }
7697
7698 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
7699 if (rc == MBX_NOT_FINISHED) {
7700 mempool_free(mbox, phba->mbox_mem_pool);
7701 return 1;
7702 }
7703
7704 return 0;
7705 }
7706
7707
7708 /**
7709 * lpfc_els_rcv_lcb - Process an unsolicited LCB
7710 * @vport: pointer to a host virtual N_Port data structure.
7711 * @cmdiocb: pointer to lpfc command iocb data structure.
7712 * @ndlp: pointer to a node-list data structure.
7713 *
7714 * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB.
7715 * First, the payload of the unsolicited LCB is checked.
7716 * Then based on Subcommand beacon will either turn on or off.
7717 *
7718 * Return code
7719 * 0 - Sent the acc response
7720 * 1 - Sent the reject response.
7721 **/
7722 static int
lpfc_els_rcv_lcb(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)7723 lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7724 struct lpfc_nodelist *ndlp)
7725 {
7726 struct lpfc_hba *phba = vport->phba;
7727 struct lpfc_dmabuf *pcmd;
7728 uint8_t *lp;
7729 struct fc_lcb_request_frame *beacon;
7730 struct lpfc_lcb_context *lcb_context;
7731 u8 state, rjt_err = 0;
7732 struct ls_rjt stat;
7733
7734 pcmd = cmdiocb->cmd_dmabuf;
7735 lp = (uint8_t *)pcmd->virt;
7736 beacon = (struct fc_lcb_request_frame *)pcmd->virt;
7737
7738 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7739 "0192 ELS LCB Data x%x x%x x%x x%x sub x%x "
7740 "type x%x frequency %x duration x%x\n",
7741 lp[0], lp[1], lp[2],
7742 beacon->lcb_command,
7743 beacon->lcb_sub_command,
7744 beacon->lcb_type,
7745 beacon->lcb_frequency,
7746 be16_to_cpu(beacon->lcb_duration));
7747
7748 if (beacon->lcb_sub_command != LPFC_LCB_ON &&
7749 beacon->lcb_sub_command != LPFC_LCB_OFF) {
7750 rjt_err = LSRJT_CMD_UNSUPPORTED;
7751 goto rjt;
7752 }
7753
7754 if (phba->sli_rev < LPFC_SLI_REV4 ||
7755 test_bit(HBA_FCOE_MODE, &phba->hba_flag) ||
7756 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
7757 LPFC_SLI_INTF_IF_TYPE_2)) {
7758 rjt_err = LSRJT_CMD_UNSUPPORTED;
7759 goto rjt;
7760 }
7761
7762 lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL);
7763 if (!lcb_context) {
7764 rjt_err = LSRJT_UNABLE_TPC;
7765 goto rjt;
7766 }
7767
7768 state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0;
7769 lcb_context->sub_command = beacon->lcb_sub_command;
7770 lcb_context->capability = 0;
7771 lcb_context->type = beacon->lcb_type;
7772 lcb_context->frequency = beacon->lcb_frequency;
7773 lcb_context->duration = beacon->lcb_duration;
7774 lcb_context->ox_id = get_job_rcvoxid(phba, cmdiocb);
7775 lcb_context->rx_id = get_job_ulpcontext(phba, cmdiocb);
7776 lcb_context->ndlp = lpfc_nlp_get(ndlp);
7777 if (!lcb_context->ndlp) {
7778 rjt_err = LSRJT_UNABLE_TPC;
7779 goto rjt_free;
7780 }
7781
7782 if (lpfc_sli4_set_beacon(vport, lcb_context, state)) {
7783 lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT,
7784 "0193 failed to send mail box");
7785 lpfc_nlp_put(ndlp);
7786 rjt_err = LSRJT_UNABLE_TPC;
7787 goto rjt_free;
7788 }
7789 return 0;
7790
7791 rjt_free:
7792 kfree(lcb_context);
7793 rjt:
7794 memset(&stat, 0, sizeof(stat));
7795 stat.un.b.lsRjtRsnCode = rjt_err;
7796 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
7797 return 1;
7798 }
7799
7800
7801 /**
7802 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport
7803 * @vport: pointer to a host virtual N_Port data structure.
7804 *
7805 * This routine cleans up any Registration State Change Notification
7806 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the
7807 * @vport together with the host_lock is used to prevent multiple thread
7808 * trying to access the RSCN array on a same @vport at the same time.
7809 **/
7810 void
lpfc_els_flush_rscn(struct lpfc_vport * vport)7811 lpfc_els_flush_rscn(struct lpfc_vport *vport)
7812 {
7813 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7814 struct lpfc_hba *phba = vport->phba;
7815 int i;
7816
7817 spin_lock_irq(shost->host_lock);
7818 if (vport->fc_rscn_flush) {
7819 /* Another thread is walking fc_rscn_id_list on this vport */
7820 spin_unlock_irq(shost->host_lock);
7821 return;
7822 }
7823 /* Indicate we are walking lpfc_els_flush_rscn on this vport */
7824 vport->fc_rscn_flush = 1;
7825 spin_unlock_irq(shost->host_lock);
7826
7827 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
7828 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
7829 vport->fc_rscn_id_list[i] = NULL;
7830 }
7831 clear_bit(FC_RSCN_MODE, &vport->fc_flag);
7832 clear_bit(FC_RSCN_DISCOVERY, &vport->fc_flag);
7833 spin_lock_irq(shost->host_lock);
7834 vport->fc_rscn_id_cnt = 0;
7835 spin_unlock_irq(shost->host_lock);
7836 lpfc_can_disctmo(vport);
7837 /* Indicate we are done walking this fc_rscn_id_list */
7838 vport->fc_rscn_flush = 0;
7839 }
7840
7841 /**
7842 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did
7843 * @vport: pointer to a host virtual N_Port data structure.
7844 * @did: remote destination port identifier.
7845 *
7846 * This routine checks whether there is any pending Registration State
7847 * Configuration Notification (RSCN) to a @did on @vport.
7848 *
7849 * Return code
7850 * None zero - The @did matched with a pending rscn
7851 * 0 - not able to match @did with a pending rscn
7852 **/
7853 int
lpfc_rscn_payload_check(struct lpfc_vport * vport,uint32_t did)7854 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
7855 {
7856 D_ID ns_did;
7857 D_ID rscn_did;
7858 uint32_t *lp;
7859 uint32_t payload_len, i;
7860 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7861
7862 ns_did.un.word = did;
7863
7864 /* Never match fabric nodes for RSCNs */
7865 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
7866 return 0;
7867
7868 /* If we are doing a FULL RSCN rediscovery, match everything */
7869 if (test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag))
7870 return did;
7871
7872 spin_lock_irq(shost->host_lock);
7873 if (vport->fc_rscn_flush) {
7874 /* Another thread is walking fc_rscn_id_list on this vport */
7875 spin_unlock_irq(shost->host_lock);
7876 return 0;
7877 }
7878 /* Indicate we are walking fc_rscn_id_list on this vport */
7879 vport->fc_rscn_flush = 1;
7880 spin_unlock_irq(shost->host_lock);
7881 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
7882 lp = vport->fc_rscn_id_list[i]->virt;
7883 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
7884 payload_len -= sizeof(uint32_t); /* take off word 0 */
7885 while (payload_len) {
7886 rscn_did.un.word = be32_to_cpu(*lp++);
7887 payload_len -= sizeof(uint32_t);
7888 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
7889 case RSCN_ADDRESS_FORMAT_PORT:
7890 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
7891 && (ns_did.un.b.area == rscn_did.un.b.area)
7892 && (ns_did.un.b.id == rscn_did.un.b.id))
7893 goto return_did_out;
7894 break;
7895 case RSCN_ADDRESS_FORMAT_AREA:
7896 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
7897 && (ns_did.un.b.area == rscn_did.un.b.area))
7898 goto return_did_out;
7899 break;
7900 case RSCN_ADDRESS_FORMAT_DOMAIN:
7901 if (ns_did.un.b.domain == rscn_did.un.b.domain)
7902 goto return_did_out;
7903 break;
7904 case RSCN_ADDRESS_FORMAT_FABRIC:
7905 goto return_did_out;
7906 }
7907 }
7908 }
7909 /* Indicate we are done with walking fc_rscn_id_list on this vport */
7910 vport->fc_rscn_flush = 0;
7911 return 0;
7912 return_did_out:
7913 /* Indicate we are done with walking fc_rscn_id_list on this vport */
7914 vport->fc_rscn_flush = 0;
7915 return did;
7916 }
7917
7918 /**
7919 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn
7920 * @vport: pointer to a host virtual N_Port data structure.
7921 *
7922 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the
7923 * state machine for a @vport's nodes that are with pending RSCN (Registration
7924 * State Change Notification).
7925 *
7926 * Return code
7927 * 0 - Successful (currently alway return 0)
7928 **/
7929 static int
lpfc_rscn_recovery_check(struct lpfc_vport * vport)7930 lpfc_rscn_recovery_check(struct lpfc_vport *vport)
7931 {
7932 struct lpfc_nodelist *ndlp = NULL, *n;
7933
7934 /* Move all affected nodes by pending RSCNs to NPR state. */
7935 list_for_each_entry_safe(ndlp, n, &vport->fc_nodes, nlp_listp) {
7936 if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
7937 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
7938 continue;
7939
7940 /* NVME Target mode does not do RSCN Recovery. */
7941 if (vport->phba->nvmet_support)
7942 continue;
7943
7944 /* If we are in the process of doing discovery on this
7945 * NPort, let it continue on its own.
7946 */
7947 switch (ndlp->nlp_state) {
7948 case NLP_STE_PLOGI_ISSUE:
7949 case NLP_STE_ADISC_ISSUE:
7950 case NLP_STE_REG_LOGIN_ISSUE:
7951 case NLP_STE_PRLI_ISSUE:
7952 case NLP_STE_LOGO_ISSUE:
7953 continue;
7954 }
7955
7956 lpfc_disc_state_machine(vport, ndlp, NULL,
7957 NLP_EVT_DEVICE_RECOVERY);
7958 lpfc_cancel_retry_delay_tmo(vport, ndlp);
7959 }
7960 return 0;
7961 }
7962
7963 /**
7964 * lpfc_send_rscn_event - Send an RSCN event to management application
7965 * @vport: pointer to a host virtual N_Port data structure.
7966 * @cmdiocb: pointer to lpfc command iocb data structure.
7967 *
7968 * lpfc_send_rscn_event sends an RSCN netlink event to management
7969 * applications.
7970 */
7971 static void
lpfc_send_rscn_event(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb)7972 lpfc_send_rscn_event(struct lpfc_vport *vport,
7973 struct lpfc_iocbq *cmdiocb)
7974 {
7975 struct lpfc_dmabuf *pcmd;
7976 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7977 uint32_t *payload_ptr;
7978 uint32_t payload_len;
7979 struct lpfc_rscn_event_header *rscn_event_data;
7980
7981 pcmd = cmdiocb->cmd_dmabuf;
7982 payload_ptr = (uint32_t *) pcmd->virt;
7983 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
7984
7985 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) +
7986 payload_len, GFP_KERNEL);
7987 if (!rscn_event_data) {
7988 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
7989 "0147 Failed to allocate memory for RSCN event\n");
7990 return;
7991 }
7992 rscn_event_data->event_type = FC_REG_RSCN_EVENT;
7993 rscn_event_data->payload_length = payload_len;
7994 memcpy(rscn_event_data->rscn_payload, payload_ptr,
7995 payload_len);
7996
7997 fc_host_post_vendor_event(shost,
7998 fc_get_event_number(),
7999 sizeof(struct lpfc_rscn_event_header) + payload_len,
8000 (char *)rscn_event_data,
8001 LPFC_NL_VENDOR_ID);
8002
8003 kfree(rscn_event_data);
8004 }
8005
8006 /**
8007 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb
8008 * @vport: pointer to a host virtual N_Port data structure.
8009 * @cmdiocb: pointer to lpfc command iocb data structure.
8010 * @ndlp: pointer to a node-list data structure.
8011 *
8012 * This routine processes an unsolicited RSCN (Registration State Change
8013 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked
8014 * to invoke fc_host_post_event() routine to the FC transport layer. If the
8015 * discover state machine is about to begin discovery, it just accepts the
8016 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only
8017 * contains N_Port IDs for other vports on this HBA, it just accepts the
8018 * RSCN and ignore processing it. If the state machine is in the recovery
8019 * state, the fc_rscn_id_list of this @vport is walked and the
8020 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for
8021 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn()
8022 * routine is invoked to handle the RSCN event.
8023 *
8024 * Return code
8025 * 0 - Just sent the acc response
8026 * 1 - Sent the acc response and waited for name server completion
8027 **/
8028 static int
lpfc_els_rcv_rscn(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)8029 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8030 struct lpfc_nodelist *ndlp)
8031 {
8032 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8033 struct lpfc_hba *phba = vport->phba;
8034 struct lpfc_dmabuf *pcmd;
8035 uint32_t *lp, *datap;
8036 uint32_t payload_len, length, nportid, *cmd;
8037 int rscn_cnt;
8038 int rscn_id = 0, hba_id = 0;
8039 int i, tmo;
8040
8041 pcmd = cmdiocb->cmd_dmabuf;
8042 lp = (uint32_t *) pcmd->virt;
8043
8044 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
8045 payload_len -= sizeof(uint32_t); /* take off word 0 */
8046 /* RSCN received */
8047 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
8048 "0214 RSCN received Data: x%lx x%x x%x x%x\n",
8049 vport->fc_flag, payload_len, *lp,
8050 vport->fc_rscn_id_cnt);
8051
8052 /* Send an RSCN event to the management application */
8053 lpfc_send_rscn_event(vport, cmdiocb);
8054
8055 for (i = 0; i < payload_len/sizeof(uint32_t); i++)
8056 fc_host_post_event(shost, fc_get_event_number(),
8057 FCH_EVT_RSCN, lp[i]);
8058
8059 /* Check if RSCN is coming from a direct-connected remote NPort */
8060 if (test_bit(FC_PT2PT, &vport->fc_flag)) {
8061 /* If so, just ACC it, no other action needed for now */
8062 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
8063 "2024 pt2pt RSCN %08x Data: x%lx x%x\n",
8064 *lp, vport->fc_flag, payload_len);
8065 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
8066
8067 /* Check to see if we need to NVME rescan this target
8068 * remoteport.
8069 */
8070 if (ndlp->nlp_fc4_type & NLP_FC4_NVME &&
8071 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY))
8072 lpfc_nvme_rescan_port(vport, ndlp);
8073 return 0;
8074 }
8075
8076 /* If we are about to begin discovery, just ACC the RSCN.
8077 * Discovery processing will satisfy it.
8078 */
8079 if (vport->port_state <= LPFC_NS_QRY) {
8080 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8081 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
8082 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
8083
8084 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
8085 return 0;
8086 }
8087
8088 /* If this RSCN just contains NPortIDs for other vports on this HBA,
8089 * just ACC and ignore it.
8090 */
8091 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
8092 !(vport->cfg_peer_port_login)) {
8093 i = payload_len;
8094 datap = lp;
8095 while (i > 0) {
8096 nportid = *datap++;
8097 nportid = ((be32_to_cpu(nportid)) & Mask_DID);
8098 i -= sizeof(uint32_t);
8099 rscn_id++;
8100 if (lpfc_find_vport_by_did(phba, nportid))
8101 hba_id++;
8102 }
8103 if (rscn_id == hba_id) {
8104 /* ALL NPortIDs in RSCN are on HBA */
8105 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
8106 "0219 Ignore RSCN "
8107 "Data: x%lx x%x x%x x%x\n",
8108 vport->fc_flag, payload_len,
8109 *lp, vport->fc_rscn_id_cnt);
8110 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8111 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x",
8112 ndlp->nlp_DID, vport->port_state,
8113 ndlp->nlp_flag);
8114
8115 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
8116 ndlp, NULL);
8117 /* Restart disctmo if its already running */
8118 if (test_bit(FC_DISC_TMO, &vport->fc_flag)) {
8119 tmo = ((phba->fc_ratov * 3) + 3);
8120 mod_timer(&vport->fc_disctmo,
8121 jiffies +
8122 msecs_to_jiffies(1000 * tmo));
8123 }
8124 return 0;
8125 }
8126 }
8127
8128 spin_lock_irq(shost->host_lock);
8129 if (vport->fc_rscn_flush) {
8130 /* Another thread is walking fc_rscn_id_list on this vport */
8131 spin_unlock_irq(shost->host_lock);
8132 set_bit(FC_RSCN_DISCOVERY, &vport->fc_flag);
8133 /* Send back ACC */
8134 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
8135 return 0;
8136 }
8137 /* Indicate we are walking fc_rscn_id_list on this vport */
8138 vport->fc_rscn_flush = 1;
8139 spin_unlock_irq(shost->host_lock);
8140 /* Get the array count after successfully have the token */
8141 rscn_cnt = vport->fc_rscn_id_cnt;
8142 /* If we are already processing an RSCN, save the received
8143 * RSCN payload buffer, cmdiocb->cmd_dmabuf to process later.
8144 */
8145 if (test_bit(FC_RSCN_MODE, &vport->fc_flag) ||
8146 test_bit(FC_NDISC_ACTIVE, &vport->fc_flag)) {
8147 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8148 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x",
8149 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
8150
8151 set_bit(FC_RSCN_DEFERRED, &vport->fc_flag);
8152
8153 /* Restart disctmo if its already running */
8154 if (test_bit(FC_DISC_TMO, &vport->fc_flag)) {
8155 tmo = ((phba->fc_ratov * 3) + 3);
8156 mod_timer(&vport->fc_disctmo,
8157 jiffies + msecs_to_jiffies(1000 * tmo));
8158 }
8159 if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
8160 !test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag)) {
8161 set_bit(FC_RSCN_MODE, &vport->fc_flag);
8162 if (rscn_cnt) {
8163 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
8164 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
8165 }
8166 if ((rscn_cnt) &&
8167 (payload_len + length <= LPFC_BPL_SIZE)) {
8168 *cmd &= ELS_CMD_MASK;
8169 *cmd |= cpu_to_be32(payload_len + length);
8170 memcpy(((uint8_t *)cmd) + length, lp,
8171 payload_len);
8172 } else {
8173 vport->fc_rscn_id_list[rscn_cnt] = pcmd;
8174 vport->fc_rscn_id_cnt++;
8175 /* If we zero, cmdiocb->cmd_dmabuf, the calling
8176 * routine will not try to free it.
8177 */
8178 cmdiocb->cmd_dmabuf = NULL;
8179 }
8180 /* Deferred RSCN */
8181 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
8182 "0235 Deferred RSCN "
8183 "Data: x%x x%lx x%x\n",
8184 vport->fc_rscn_id_cnt, vport->fc_flag,
8185 vport->port_state);
8186 } else {
8187 set_bit(FC_RSCN_DISCOVERY, &vport->fc_flag);
8188 /* ReDiscovery RSCN */
8189 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
8190 "0234 ReDiscovery RSCN "
8191 "Data: x%x x%lx x%x\n",
8192 vport->fc_rscn_id_cnt, vport->fc_flag,
8193 vport->port_state);
8194 }
8195 /* Indicate we are done walking fc_rscn_id_list on this vport */
8196 vport->fc_rscn_flush = 0;
8197 /* Send back ACC */
8198 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
8199 /* send RECOVERY event for ALL nodes that match RSCN payload */
8200 lpfc_rscn_recovery_check(vport);
8201 return 0;
8202 }
8203 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8204 "RCV RSCN: did:x%x/ste:x%x flg:x%x",
8205 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
8206
8207 set_bit(FC_RSCN_MODE, &vport->fc_flag);
8208 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
8209 /* Indicate we are done walking fc_rscn_id_list on this vport */
8210 vport->fc_rscn_flush = 0;
8211 /*
8212 * If we zero, cmdiocb->cmd_dmabuf, the calling routine will
8213 * not try to free it.
8214 */
8215 cmdiocb->cmd_dmabuf = NULL;
8216 lpfc_set_disctmo(vport);
8217 /* Send back ACC */
8218 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
8219 /* send RECOVERY event for ALL nodes that match RSCN payload */
8220 lpfc_rscn_recovery_check(vport);
8221 return lpfc_els_handle_rscn(vport);
8222 }
8223
8224 /**
8225 * lpfc_els_handle_rscn - Handle rscn for a vport
8226 * @vport: pointer to a host virtual N_Port data structure.
8227 *
8228 * This routine handles the Registration State Configuration Notification
8229 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall
8230 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise,
8231 * if the ndlp to NameServer exists, a Common Transport (CT) command to the
8232 * NameServer shall be issued. If CT command to the NameServer fails to be
8233 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any
8234 * RSCN activities with the @vport.
8235 *
8236 * Return code
8237 * 0 - Cleaned up rscn on the @vport
8238 * 1 - Wait for plogi to name server before proceed
8239 **/
8240 int
lpfc_els_handle_rscn(struct lpfc_vport * vport)8241 lpfc_els_handle_rscn(struct lpfc_vport *vport)
8242 {
8243 struct lpfc_nodelist *ndlp;
8244 struct lpfc_hba *phba = vport->phba;
8245
8246 /* Ignore RSCN if the port is being torn down. */
8247 if (test_bit(FC_UNLOADING, &vport->load_flag)) {
8248 lpfc_els_flush_rscn(vport);
8249 return 0;
8250 }
8251
8252 /* Start timer for RSCN processing */
8253 lpfc_set_disctmo(vport);
8254
8255 /* RSCN processed */
8256 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
8257 "0215 RSCN processed Data: x%lx x%x x%x x%x x%x x%x\n",
8258 vport->fc_flag, 0, vport->fc_rscn_id_cnt,
8259 vport->port_state, vport->num_disc_nodes,
8260 vport->gidft_inp);
8261
8262 /* To process RSCN, first compare RSCN data with NameServer */
8263 vport->fc_ns_retry = 0;
8264 vport->num_disc_nodes = 0;
8265
8266 ndlp = lpfc_findnode_did(vport, NameServer_DID);
8267 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
8268 /* Good ndlp, issue CT Request to NameServer. Need to
8269 * know how many gidfts were issued. If none, then just
8270 * flush the RSCN. Otherwise, the outstanding requests
8271 * need to complete.
8272 */
8273 if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) {
8274 if (lpfc_issue_gidft(vport) > 0)
8275 return 1;
8276 } else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) {
8277 if (lpfc_issue_gidpt(vport) > 0)
8278 return 1;
8279 } else {
8280 return 1;
8281 }
8282 } else {
8283 /* Nameserver login in question. Revalidate. */
8284 if (ndlp) {
8285 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
8286 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
8287 } else {
8288 ndlp = lpfc_nlp_init(vport, NameServer_DID);
8289 if (!ndlp) {
8290 lpfc_els_flush_rscn(vport);
8291 return 0;
8292 }
8293 ndlp->nlp_prev_state = ndlp->nlp_state;
8294 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
8295 }
8296 ndlp->nlp_type |= NLP_FABRIC;
8297 lpfc_issue_els_plogi(vport, NameServer_DID, 0);
8298 /* Wait for NameServer login cmpl before we can
8299 * continue
8300 */
8301 return 1;
8302 }
8303
8304 lpfc_els_flush_rscn(vport);
8305 return 0;
8306 }
8307
8308 /**
8309 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb
8310 * @vport: pointer to a host virtual N_Port data structure.
8311 * @cmdiocb: pointer to lpfc command iocb data structure.
8312 * @ndlp: pointer to a node-list data structure.
8313 *
8314 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS
8315 * unsolicited event. An unsolicited FLOGI can be received in a point-to-
8316 * point topology. As an unsolicited FLOGI should not be received in a loop
8317 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The
8318 * lpfc_check_sparm() routine is invoked to check the parameters in the
8319 * unsolicited FLOGI. If parameters validation failed, the routine
8320 * lpfc_els_rsp_reject() shall be called with reject reason code set to
8321 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the
8322 * FLOGI shall be compared with the Port WWN of the @vport to determine who
8323 * will initiate PLOGI. The higher lexicographical value party shall has
8324 * higher priority (as the winning port) and will initiate PLOGI and
8325 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result
8326 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI
8327 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI.
8328 *
8329 * Return code
8330 * 0 - Successfully processed the unsolicited flogi
8331 * 1 - Failed to process the unsolicited flogi
8332 **/
8333 static int
lpfc_els_rcv_flogi(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)8334 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8335 struct lpfc_nodelist *ndlp)
8336 {
8337 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8338 struct lpfc_hba *phba = vport->phba;
8339 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
8340 uint32_t *lp = (uint32_t *) pcmd->virt;
8341 union lpfc_wqe128 *wqe = &cmdiocb->wqe;
8342 struct serv_parm *sp;
8343 LPFC_MBOXQ_t *mbox;
8344 uint32_t cmd, did;
8345 int rc;
8346 unsigned long fc_flag = 0;
8347 uint32_t port_state = 0;
8348
8349 /* Clear external loopback plug detected flag */
8350 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK;
8351
8352 cmd = *lp++;
8353 sp = (struct serv_parm *) lp;
8354
8355 /* FLOGI received */
8356
8357 lpfc_set_disctmo(vport);
8358
8359 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
8360 /* We should never receive a FLOGI in loop mode, ignore it */
8361 did = bf_get(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest);
8362
8363 /* An FLOGI ELS command <elsCmd> was received from DID <did> in
8364 Loop Mode */
8365 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
8366 "0113 An FLOGI ELS command x%x was "
8367 "received from DID x%x in Loop Mode\n",
8368 cmd, did);
8369 return 1;
8370 }
8371
8372 (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1);
8373
8374 /*
8375 * If our portname is greater than the remote portname,
8376 * then we initiate Nport login.
8377 */
8378
8379 rc = memcmp(&vport->fc_portname, &sp->portName,
8380 sizeof(struct lpfc_name));
8381
8382 if (!rc) {
8383 if (phba->sli_rev < LPFC_SLI_REV4) {
8384 mbox = mempool_alloc(phba->mbox_mem_pool,
8385 GFP_KERNEL);
8386 if (!mbox)
8387 return 1;
8388 lpfc_linkdown(phba);
8389 lpfc_init_link(phba, mbox,
8390 phba->cfg_topology,
8391 phba->cfg_link_speed);
8392 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
8393 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
8394 mbox->vport = vport;
8395 rc = lpfc_sli_issue_mbox(phba, mbox,
8396 MBX_NOWAIT);
8397 lpfc_set_loopback_flag(phba);
8398 if (rc == MBX_NOT_FINISHED)
8399 mempool_free(mbox, phba->mbox_mem_pool);
8400 return 1;
8401 }
8402
8403 /* External loopback plug insertion detected */
8404 phba->link_flag |= LS_EXTERNAL_LOOPBACK;
8405
8406 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_LIBDFC,
8407 "1119 External Loopback plug detected\n");
8408
8409 /* abort the flogi coming back to ourselves
8410 * due to external loopback on the port.
8411 */
8412 lpfc_els_abort_flogi(phba);
8413 return 0;
8414
8415 } else if (rc > 0) { /* greater than */
8416 set_bit(FC_PT2PT_PLOGI, &vport->fc_flag);
8417
8418 /* If we have the high WWPN we can assign our own
8419 * myDID; otherwise, we have to WAIT for a PLOGI
8420 * from the remote NPort to find out what it
8421 * will be.
8422 */
8423 vport->fc_myDID = PT2PT_LocalID;
8424 } else {
8425 vport->fc_myDID = PT2PT_RemoteID;
8426 }
8427
8428 /*
8429 * The vport state should go to LPFC_FLOGI only
8430 * AFTER we issue a FLOGI, not receive one.
8431 */
8432 spin_lock_irq(shost->host_lock);
8433 fc_flag = vport->fc_flag;
8434 port_state = vport->port_state;
8435 /* Acking an unsol FLOGI. Count 1 for link bounce
8436 * work-around.
8437 */
8438 vport->rcv_flogi_cnt++;
8439 spin_unlock_irq(shost->host_lock);
8440 set_bit(FC_PT2PT, &vport->fc_flag);
8441 clear_bit(FC_FABRIC, &vport->fc_flag);
8442 clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag);
8443 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
8444 "3311 Rcv Flogi PS x%x new PS x%x "
8445 "fc_flag x%lx new fc_flag x%lx\n",
8446 port_state, vport->port_state,
8447 fc_flag, vport->fc_flag);
8448
8449 /*
8450 * We temporarily set fc_myDID to make it look like we are
8451 * a Fabric. This is done just so we end up with the right
8452 * did / sid on the FLOGI ACC rsp.
8453 */
8454 did = vport->fc_myDID;
8455 vport->fc_myDID = Fabric_DID;
8456
8457 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
8458
8459 /* Defer ACC response until AFTER we issue a FLOGI */
8460 if (!test_bit(HBA_FLOGI_ISSUED, &phba->hba_flag)) {
8461 phba->defer_flogi_acc.rx_id = bf_get(wqe_ctxt_tag,
8462 &wqe->xmit_els_rsp.wqe_com);
8463 phba->defer_flogi_acc.ox_id = bf_get(wqe_rcvoxid,
8464 &wqe->xmit_els_rsp.wqe_com);
8465
8466 vport->fc_myDID = did;
8467
8468 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
8469 "3344 Deferring FLOGI ACC: rx_id: x%x,"
8470 " ox_id: x%x, hba_flag x%lx\n",
8471 phba->defer_flogi_acc.rx_id,
8472 phba->defer_flogi_acc.ox_id, phba->hba_flag);
8473
8474 phba->defer_flogi_acc.flag = true;
8475
8476 /* This nlp_get is paired with nlp_puts that reset the
8477 * defer_flogi_acc.flag back to false. We need to retain
8478 * a kref on the ndlp until the deferred FLOGI ACC is
8479 * processed or cancelled.
8480 */
8481 phba->defer_flogi_acc.ndlp = lpfc_nlp_get(ndlp);
8482 return 0;
8483 }
8484
8485 /* Send back ACC */
8486 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL);
8487
8488 /* Now lets put fc_myDID back to what its supposed to be */
8489 vport->fc_myDID = did;
8490
8491 return 0;
8492 }
8493
8494 /**
8495 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb
8496 * @vport: pointer to a host virtual N_Port data structure.
8497 * @cmdiocb: pointer to lpfc command iocb data structure.
8498 * @ndlp: pointer to a node-list data structure.
8499 *
8500 * This routine processes Request Node Identification Data (RNID) IOCB
8501 * received as an ELS unsolicited event. Only when the RNID specified format
8502 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data)
8503 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to
8504 * Accept (ACC) the RNID ELS command. All the other RNID formats are
8505 * rejected by invoking the lpfc_els_rsp_reject() routine.
8506 *
8507 * Return code
8508 * 0 - Successfully processed rnid iocb (currently always return 0)
8509 **/
8510 static int
lpfc_els_rcv_rnid(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)8511 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8512 struct lpfc_nodelist *ndlp)
8513 {
8514 struct lpfc_dmabuf *pcmd;
8515 uint32_t *lp;
8516 RNID *rn;
8517 struct ls_rjt stat;
8518
8519 pcmd = cmdiocb->cmd_dmabuf;
8520 lp = (uint32_t *) pcmd->virt;
8521
8522 lp++;
8523 rn = (RNID *) lp;
8524
8525 /* RNID received */
8526
8527 switch (rn->Format) {
8528 case 0:
8529 case RNID_TOPOLOGY_DISC:
8530 /* Send back ACC */
8531 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp);
8532 break;
8533 default:
8534 /* Reject this request because format not supported */
8535 stat.un.b.lsRjtRsvd0 = 0;
8536 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
8537 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
8538 stat.un.b.vendorUnique = 0;
8539 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
8540 NULL);
8541 }
8542 return 0;
8543 }
8544
8545 /**
8546 * lpfc_els_rcv_echo - Process an unsolicited echo iocb
8547 * @vport: pointer to a host virtual N_Port data structure.
8548 * @cmdiocb: pointer to lpfc command iocb data structure.
8549 * @ndlp: pointer to a node-list data structure.
8550 *
8551 * Return code
8552 * 0 - Successfully processed echo iocb (currently always return 0)
8553 **/
8554 static int
lpfc_els_rcv_echo(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)8555 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8556 struct lpfc_nodelist *ndlp)
8557 {
8558 uint8_t *pcmd;
8559
8560 pcmd = (uint8_t *)cmdiocb->cmd_dmabuf->virt;
8561
8562 /* skip over first word of echo command to find echo data */
8563 pcmd += sizeof(uint32_t);
8564
8565 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp);
8566 return 0;
8567 }
8568
8569 /**
8570 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb
8571 * @vport: pointer to a host virtual N_Port data structure.
8572 * @cmdiocb: pointer to lpfc command iocb data structure.
8573 * @ndlp: pointer to a node-list data structure.
8574 *
8575 * This routine processes a Link Incident Report Registration(LIRR) IOCB
8576 * received as an ELS unsolicited event. Currently, this function just invokes
8577 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally.
8578 *
8579 * Return code
8580 * 0 - Successfully processed lirr iocb (currently always return 0)
8581 **/
8582 static int
lpfc_els_rcv_lirr(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)8583 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8584 struct lpfc_nodelist *ndlp)
8585 {
8586 struct ls_rjt stat;
8587
8588 /* For now, unconditionally reject this command */
8589 stat.un.b.lsRjtRsvd0 = 0;
8590 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
8591 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
8592 stat.un.b.vendorUnique = 0;
8593 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
8594 return 0;
8595 }
8596
8597 /**
8598 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb
8599 * @vport: pointer to a host virtual N_Port data structure.
8600 * @cmdiocb: pointer to lpfc command iocb data structure.
8601 * @ndlp: pointer to a node-list data structure.
8602 *
8603 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB
8604 * received as an ELS unsolicited event. A request to RRQ shall only
8605 * be accepted if the Originator Nx_Port N_Port_ID or the Responder
8606 * Nx_Port N_Port_ID of the target Exchange is the same as the
8607 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is
8608 * not accepted, an LS_RJT with reason code "Unable to perform
8609 * command request" and reason code explanation "Invalid Originator
8610 * S_ID" shall be returned. For now, we just unconditionally accept
8611 * RRQ from the target.
8612 **/
8613 static void
lpfc_els_rcv_rrq(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)8614 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8615 struct lpfc_nodelist *ndlp)
8616 {
8617 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
8618 if (vport->phba->sli_rev == LPFC_SLI_REV4)
8619 lpfc_els_clear_rrq(vport, cmdiocb, ndlp);
8620 }
8621
8622 /**
8623 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
8624 * @phba: pointer to lpfc hba data structure.
8625 * @pmb: pointer to the driver internal queue element for mailbox command.
8626 *
8627 * This routine is the completion callback function for the MBX_READ_LNK_STAT
8628 * mailbox command. This callback function is to actually send the Accept
8629 * (ACC) response to a Read Link Status (RLS) unsolicited IOCB event. It
8630 * collects the link statistics from the completion of the MBX_READ_LNK_STAT
8631 * mailbox command, constructs the RLS response with the link statistics
8632 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
8633 * response to the RLS.
8634 *
8635 * Note that the ndlp reference count will be incremented by 1 for holding the
8636 * ndlp and the reference to ndlp will be stored into the ndlp field of
8637 * the IOCB for the completion callback function to the RLS Accept Response
8638 * ELS IOCB command.
8639 *
8640 **/
8641 static void
lpfc_els_rsp_rls_acc(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)8642 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
8643 {
8644 int rc = 0;
8645 MAILBOX_t *mb;
8646 IOCB_t *icmd;
8647 union lpfc_wqe128 *wqe;
8648 struct RLS_RSP *rls_rsp;
8649 uint8_t *pcmd;
8650 struct lpfc_iocbq *elsiocb;
8651 struct lpfc_nodelist *ndlp;
8652 uint16_t oxid;
8653 uint16_t rxid;
8654 uint32_t cmdsize;
8655 u32 ulp_context;
8656
8657 mb = &pmb->u.mb;
8658
8659 ndlp = pmb->ctx_ndlp;
8660 rxid = (uint16_t)(pmb->ctx_u.ox_rx_id & 0xffff);
8661 oxid = (uint16_t)((pmb->ctx_u.ox_rx_id >> 16) & 0xffff);
8662 memset(&pmb->ctx_u, 0, sizeof(pmb->ctx_u));
8663 pmb->ctx_ndlp = NULL;
8664
8665 if (mb->mbxStatus) {
8666 mempool_free(pmb, phba->mbox_mem_pool);
8667 return;
8668 }
8669
8670 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t);
8671 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
8672 lpfc_max_els_tries, ndlp,
8673 ndlp->nlp_DID, ELS_CMD_ACC);
8674
8675 /* Decrement the ndlp reference count from previous mbox command */
8676 lpfc_nlp_put(ndlp);
8677
8678 if (!elsiocb) {
8679 mempool_free(pmb, phba->mbox_mem_pool);
8680 return;
8681 }
8682
8683 ulp_context = get_job_ulpcontext(phba, elsiocb);
8684 if (phba->sli_rev == LPFC_SLI_REV4) {
8685 wqe = &elsiocb->wqe;
8686 /* Xri / rx_id */
8687 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, rxid);
8688 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, oxid);
8689 } else {
8690 icmd = &elsiocb->iocb;
8691 icmd->ulpContext = rxid;
8692 icmd->unsli3.rcvsli3.ox_id = oxid;
8693 }
8694
8695 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
8696 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
8697 pcmd += sizeof(uint32_t); /* Skip past command */
8698 rls_rsp = (struct RLS_RSP *)pcmd;
8699
8700 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
8701 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
8702 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
8703 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
8704 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
8705 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
8706 mempool_free(pmb, phba->mbox_mem_pool);
8707 /* Xmit ELS RLS ACC response tag <ulpIoTag> */
8708 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
8709 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
8710 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
8711 elsiocb->iotag, ulp_context,
8712 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
8713 ndlp->nlp_rpi);
8714 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
8715 phba->fc_stat.elsXmitACC++;
8716 elsiocb->ndlp = lpfc_nlp_get(ndlp);
8717 if (!elsiocb->ndlp) {
8718 lpfc_els_free_iocb(phba, elsiocb);
8719 return;
8720 }
8721
8722 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
8723 if (rc == IOCB_ERROR) {
8724 lpfc_els_free_iocb(phba, elsiocb);
8725 lpfc_nlp_put(ndlp);
8726 }
8727 return;
8728 }
8729
8730 /**
8731 * lpfc_els_rcv_rls - Process an unsolicited rls iocb
8732 * @vport: pointer to a host virtual N_Port data structure.
8733 * @cmdiocb: pointer to lpfc command iocb data structure.
8734 * @ndlp: pointer to a node-list data structure.
8735 *
8736 * This routine processes Read Link Status (RLS) IOCB received as an
8737 * ELS unsolicited event. It first checks the remote port state. If the
8738 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
8739 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
8740 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
8741 * for reading the HBA link statistics. It is for the callback function,
8742 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command
8743 * to actually sending out RPL Accept (ACC) response.
8744 *
8745 * Return codes
8746 * 0 - Successfully processed rls iocb (currently always return 0)
8747 **/
8748 static int
lpfc_els_rcv_rls(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)8749 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8750 struct lpfc_nodelist *ndlp)
8751 {
8752 struct lpfc_hba *phba = vport->phba;
8753 LPFC_MBOXQ_t *mbox;
8754 struct ls_rjt stat;
8755 u32 ctx = get_job_ulpcontext(phba, cmdiocb);
8756 u32 ox_id = get_job_rcvoxid(phba, cmdiocb);
8757
8758 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
8759 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
8760 /* reject the unsolicited RLS request and done with it */
8761 goto reject_out;
8762
8763 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
8764 if (mbox) {
8765 lpfc_read_lnk_stat(phba, mbox);
8766 mbox->ctx_u.ox_rx_id = ox_id << 16 | ctx;
8767 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
8768 if (!mbox->ctx_ndlp)
8769 goto node_err;
8770 mbox->vport = vport;
8771 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
8772 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
8773 != MBX_NOT_FINISHED)
8774 /* Mbox completion will send ELS Response */
8775 return 0;
8776 /* Decrement reference count used for the failed mbox
8777 * command.
8778 */
8779 lpfc_nlp_put(ndlp);
8780 node_err:
8781 mempool_free(mbox, phba->mbox_mem_pool);
8782 }
8783 reject_out:
8784 /* issue rejection response */
8785 stat.un.b.lsRjtRsvd0 = 0;
8786 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
8787 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
8788 stat.un.b.vendorUnique = 0;
8789 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
8790 return 0;
8791 }
8792
8793 /**
8794 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb
8795 * @vport: pointer to a host virtual N_Port data structure.
8796 * @cmdiocb: pointer to lpfc command iocb data structure.
8797 * @ndlp: pointer to a node-list data structure.
8798 *
8799 * This routine processes Read Timout Value (RTV) IOCB received as an
8800 * ELS unsolicited event. It first checks the remote port state. If the
8801 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
8802 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
8803 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout
8804 * Value (RTV) unsolicited IOCB event.
8805 *
8806 * Note that the ndlp reference count will be incremented by 1 for holding the
8807 * ndlp and the reference to ndlp will be stored into the ndlp field of
8808 * the IOCB for the completion callback function to the RTV Accept Response
8809 * ELS IOCB command.
8810 *
8811 * Return codes
8812 * 0 - Successfully processed rtv iocb (currently always return 0)
8813 **/
8814 static int
lpfc_els_rcv_rtv(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)8815 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8816 struct lpfc_nodelist *ndlp)
8817 {
8818 int rc = 0;
8819 IOCB_t *icmd;
8820 union lpfc_wqe128 *wqe;
8821 struct lpfc_hba *phba = vport->phba;
8822 struct ls_rjt stat;
8823 struct RTV_RSP *rtv_rsp;
8824 uint8_t *pcmd;
8825 struct lpfc_iocbq *elsiocb;
8826 uint32_t cmdsize;
8827 u32 ulp_context;
8828
8829 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
8830 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
8831 /* reject the unsolicited RTV request and done with it */
8832 goto reject_out;
8833
8834 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t);
8835 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
8836 lpfc_max_els_tries, ndlp,
8837 ndlp->nlp_DID, ELS_CMD_ACC);
8838
8839 if (!elsiocb)
8840 return 1;
8841
8842 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
8843 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
8844 pcmd += sizeof(uint32_t); /* Skip past command */
8845
8846 ulp_context = get_job_ulpcontext(phba, elsiocb);
8847 /* use the command's xri in the response */
8848 if (phba->sli_rev == LPFC_SLI_REV4) {
8849 wqe = &elsiocb->wqe;
8850 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
8851 get_job_ulpcontext(phba, cmdiocb));
8852 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
8853 get_job_rcvoxid(phba, cmdiocb));
8854 } else {
8855 icmd = &elsiocb->iocb;
8856 icmd->ulpContext = get_job_ulpcontext(phba, cmdiocb);
8857 icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, cmdiocb);
8858 }
8859
8860 rtv_rsp = (struct RTV_RSP *)pcmd;
8861
8862 /* populate RTV payload */
8863 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */
8864 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov);
8865 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0);
8866 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */
8867 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov);
8868
8869 /* Xmit ELS RLS ACC response tag <ulpIoTag> */
8870 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
8871 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
8872 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
8873 "Data: x%x x%x x%x\n",
8874 elsiocb->iotag, ulp_context,
8875 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
8876 ndlp->nlp_rpi,
8877 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
8878 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
8879 phba->fc_stat.elsXmitACC++;
8880 elsiocb->ndlp = lpfc_nlp_get(ndlp);
8881 if (!elsiocb->ndlp) {
8882 lpfc_els_free_iocb(phba, elsiocb);
8883 return 0;
8884 }
8885
8886 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
8887 if (rc == IOCB_ERROR) {
8888 lpfc_els_free_iocb(phba, elsiocb);
8889 lpfc_nlp_put(ndlp);
8890 }
8891 return 0;
8892
8893 reject_out:
8894 /* issue rejection response */
8895 stat.un.b.lsRjtRsvd0 = 0;
8896 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
8897 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
8898 stat.un.b.vendorUnique = 0;
8899 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
8900 return 0;
8901 }
8902
8903 /* lpfc_issue_els_rrq - Process an unsolicited rrq iocb
8904 * @vport: pointer to a host virtual N_Port data structure.
8905 * @ndlp: pointer to a node-list data structure.
8906 * @did: DID of the target.
8907 * @rrq: Pointer to the rrq struct.
8908 *
8909 * Build a ELS RRQ command and send it to the target. If the issue_iocb is
8910 * successful, the completion handler will clear the RRQ.
8911 *
8912 * Return codes
8913 * 0 - Successfully sent rrq els iocb.
8914 * 1 - Failed to send rrq els iocb.
8915 **/
8916 static int
lpfc_issue_els_rrq(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,uint32_t did,struct lpfc_node_rrq * rrq)8917 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
8918 uint32_t did, struct lpfc_node_rrq *rrq)
8919 {
8920 struct lpfc_hba *phba = vport->phba;
8921 struct RRQ *els_rrq;
8922 struct lpfc_iocbq *elsiocb;
8923 uint8_t *pcmd;
8924 uint16_t cmdsize;
8925 int ret;
8926
8927 if (!ndlp)
8928 return 1;
8929
8930 /* If ndlp is not NULL, we will bump the reference count on it */
8931 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ));
8932 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did,
8933 ELS_CMD_RRQ);
8934 if (!elsiocb)
8935 return 1;
8936
8937 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
8938
8939 /* For RRQ request, remainder of payload is Exchange IDs */
8940 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ;
8941 pcmd += sizeof(uint32_t);
8942 els_rrq = (struct RRQ *) pcmd;
8943
8944 bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]);
8945 bf_set(rrq_rxid, els_rrq, rrq->rxid);
8946 bf_set(rrq_did, els_rrq, vport->fc_myDID);
8947 els_rrq->rrq = cpu_to_be32(els_rrq->rrq);
8948 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg);
8949
8950
8951 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
8952 "Issue RRQ: did:x%x",
8953 did, rrq->xritag, rrq->rxid);
8954 elsiocb->context_un.rrq = rrq;
8955 elsiocb->cmd_cmpl = lpfc_cmpl_els_rrq;
8956
8957 elsiocb->ndlp = lpfc_nlp_get(ndlp);
8958 if (!elsiocb->ndlp)
8959 goto io_err;
8960
8961 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
8962 if (ret == IOCB_ERROR) {
8963 lpfc_nlp_put(ndlp);
8964 goto io_err;
8965 }
8966 return 0;
8967
8968 io_err:
8969 lpfc_els_free_iocb(phba, elsiocb);
8970 return 1;
8971 }
8972
8973 /**
8974 * lpfc_send_rrq - Sends ELS RRQ if needed.
8975 * @phba: pointer to lpfc hba data structure.
8976 * @rrq: pointer to the active rrq.
8977 *
8978 * This routine will call the lpfc_issue_els_rrq if the rrq is
8979 * still active for the xri. If this function returns a failure then
8980 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq.
8981 *
8982 * Returns 0 Success.
8983 * 1 Failure.
8984 **/
8985 int
lpfc_send_rrq(struct lpfc_hba * phba,struct lpfc_node_rrq * rrq)8986 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
8987 {
8988 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport,
8989 rrq->nlp_DID);
8990 if (!ndlp)
8991 return 1;
8992
8993 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag))
8994 return lpfc_issue_els_rrq(rrq->vport, ndlp,
8995 rrq->nlp_DID, rrq);
8996 else
8997 return 1;
8998 }
8999
9000 /**
9001 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command
9002 * @vport: pointer to a host virtual N_Port data structure.
9003 * @cmdsize: size of the ELS command.
9004 * @oldiocb: pointer to the original lpfc command iocb data structure.
9005 * @ndlp: pointer to a node-list data structure.
9006 *
9007 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command.
9008 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
9009 *
9010 * Note that the ndlp reference count will be incremented by 1 for holding the
9011 * ndlp and the reference to ndlp will be stored into the ndlp field of
9012 * the IOCB for the completion callback function to the RPL Accept Response
9013 * ELS command.
9014 *
9015 * Return code
9016 * 0 - Successfully issued ACC RPL ELS command
9017 * 1 - Failed to issue ACC RPL ELS command
9018 **/
9019 static int
lpfc_els_rsp_rpl_acc(struct lpfc_vport * vport,uint16_t cmdsize,struct lpfc_iocbq * oldiocb,struct lpfc_nodelist * ndlp)9020 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
9021 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
9022 {
9023 int rc = 0;
9024 struct lpfc_hba *phba = vport->phba;
9025 IOCB_t *icmd;
9026 union lpfc_wqe128 *wqe;
9027 RPL_RSP rpl_rsp;
9028 struct lpfc_iocbq *elsiocb;
9029 uint8_t *pcmd;
9030 u32 ulp_context;
9031
9032 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
9033 ndlp->nlp_DID, ELS_CMD_ACC);
9034
9035 if (!elsiocb)
9036 return 1;
9037
9038 ulp_context = get_job_ulpcontext(phba, elsiocb);
9039 if (phba->sli_rev == LPFC_SLI_REV4) {
9040 wqe = &elsiocb->wqe;
9041 /* Xri / rx_id */
9042 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
9043 get_job_ulpcontext(phba, oldiocb));
9044 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
9045 get_job_rcvoxid(phba, oldiocb));
9046 } else {
9047 icmd = &elsiocb->iocb;
9048 icmd->ulpContext = get_job_ulpcontext(phba, oldiocb);
9049 icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, oldiocb);
9050 }
9051
9052 pcmd = elsiocb->cmd_dmabuf->virt;
9053 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
9054 pcmd += sizeof(uint16_t);
9055 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
9056 pcmd += sizeof(uint16_t);
9057
9058 /* Setup the RPL ACC payload */
9059 rpl_rsp.listLen = be32_to_cpu(1);
9060 rpl_rsp.index = 0;
9061 rpl_rsp.port_num_blk.portNum = 0;
9062 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID);
9063 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname,
9064 sizeof(struct lpfc_name));
9065 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
9066 /* Xmit ELS RPL ACC response tag <ulpIoTag> */
9067 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
9068 "0120 Xmit ELS RPL ACC response tag x%x "
9069 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
9070 "rpi x%x\n",
9071 elsiocb->iotag, ulp_context,
9072 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
9073 ndlp->nlp_rpi);
9074 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
9075 phba->fc_stat.elsXmitACC++;
9076 elsiocb->ndlp = lpfc_nlp_get(ndlp);
9077 if (!elsiocb->ndlp) {
9078 lpfc_els_free_iocb(phba, elsiocb);
9079 return 1;
9080 }
9081
9082 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
9083 if (rc == IOCB_ERROR) {
9084 lpfc_els_free_iocb(phba, elsiocb);
9085 lpfc_nlp_put(ndlp);
9086 return 1;
9087 }
9088
9089 return 0;
9090 }
9091
9092 /**
9093 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb
9094 * @vport: pointer to a host virtual N_Port data structure.
9095 * @cmdiocb: pointer to lpfc command iocb data structure.
9096 * @ndlp: pointer to a node-list data structure.
9097 *
9098 * This routine processes Read Port List (RPL) IOCB received as an ELS
9099 * unsolicited event. It first checks the remote port state. If the remote
9100 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it
9101 * invokes the lpfc_els_rsp_reject() routine to send reject response.
9102 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine
9103 * to accept the RPL.
9104 *
9105 * Return code
9106 * 0 - Successfully processed rpl iocb (currently always return 0)
9107 **/
9108 static int
lpfc_els_rcv_rpl(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)9109 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
9110 struct lpfc_nodelist *ndlp)
9111 {
9112 struct lpfc_dmabuf *pcmd;
9113 uint32_t *lp;
9114 uint32_t maxsize;
9115 uint16_t cmdsize;
9116 RPL *rpl;
9117 struct ls_rjt stat;
9118
9119 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
9120 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
9121 /* issue rejection response */
9122 stat.un.b.lsRjtRsvd0 = 0;
9123 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
9124 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
9125 stat.un.b.vendorUnique = 0;
9126 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
9127 NULL);
9128 /* rejected the unsolicited RPL request and done with it */
9129 return 0;
9130 }
9131
9132 pcmd = cmdiocb->cmd_dmabuf;
9133 lp = (uint32_t *) pcmd->virt;
9134 rpl = (RPL *) (lp + 1);
9135 maxsize = be32_to_cpu(rpl->maxsize);
9136
9137 /* We support only one port */
9138 if ((rpl->index == 0) &&
9139 ((maxsize == 0) ||
9140 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) {
9141 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP);
9142 } else {
9143 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
9144 }
9145 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp);
9146
9147 return 0;
9148 }
9149
9150 /**
9151 * lpfc_els_rcv_farp - Process an unsolicited farp request els command
9152 * @vport: pointer to a virtual N_Port data structure.
9153 * @cmdiocb: pointer to lpfc command iocb data structure.
9154 * @ndlp: pointer to a node-list data structure.
9155 *
9156 * This routine processes Fibre Channel Address Resolution Protocol
9157 * (FARP) Request IOCB received as an ELS unsolicited event. Currently,
9158 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such,
9159 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the
9160 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the
9161 * remote PortName is compared against the FC PortName stored in the @vport
9162 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is
9163 * compared against the FC NodeName stored in the @vport data structure.
9164 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the
9165 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is
9166 * invoked to send out FARP Response to the remote node. Before sending the
9167 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP
9168 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi()
9169 * routine is invoked to log into the remote port first.
9170 *
9171 * Return code
9172 * 0 - Either the FARP Match Mode not supported or successfully processed
9173 **/
9174 static int
lpfc_els_rcv_farp(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)9175 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
9176 struct lpfc_nodelist *ndlp)
9177 {
9178 struct lpfc_dmabuf *pcmd;
9179 uint32_t *lp;
9180 FARP *fp;
9181 uint32_t cnt, did;
9182
9183 did = get_job_els_rsp64_did(vport->phba, cmdiocb);
9184 pcmd = cmdiocb->cmd_dmabuf;
9185 lp = (uint32_t *) pcmd->virt;
9186
9187 lp++;
9188 fp = (FARP *) lp;
9189 /* FARP-REQ received from DID <did> */
9190 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
9191 "0601 FARP-REQ received from DID x%x\n", did);
9192 /* We will only support match on WWPN or WWNN */
9193 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
9194 return 0;
9195 }
9196
9197 cnt = 0;
9198 /* If this FARP command is searching for my portname */
9199 if (fp->Mflags & FARP_MATCH_PORT) {
9200 if (memcmp(&fp->RportName, &vport->fc_portname,
9201 sizeof(struct lpfc_name)) == 0)
9202 cnt = 1;
9203 }
9204
9205 /* If this FARP command is searching for my nodename */
9206 if (fp->Mflags & FARP_MATCH_NODE) {
9207 if (memcmp(&fp->RnodeName, &vport->fc_nodename,
9208 sizeof(struct lpfc_name)) == 0)
9209 cnt = 1;
9210 }
9211
9212 if (cnt) {
9213 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
9214 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
9215 /* Log back into the node before sending the FARP. */
9216 if (fp->Rflags & FARP_REQUEST_PLOGI) {
9217 ndlp->nlp_prev_state = ndlp->nlp_state;
9218 lpfc_nlp_set_state(vport, ndlp,
9219 NLP_STE_PLOGI_ISSUE);
9220 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
9221 }
9222
9223 /* Send a FARP response to that node */
9224 if (fp->Rflags & FARP_REQUEST_FARPR)
9225 lpfc_issue_els_farpr(vport, did, 0);
9226 }
9227 }
9228 return 0;
9229 }
9230
9231 /**
9232 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb
9233 * @vport: pointer to a host virtual N_Port data structure.
9234 * @cmdiocb: pointer to lpfc command iocb data structure.
9235 * @ndlp: pointer to a node-list data structure.
9236 *
9237 * This routine processes Fibre Channel Address Resolution Protocol
9238 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply
9239 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept
9240 * the FARP response request.
9241 *
9242 * Return code
9243 * 0 - Successfully processed FARPR IOCB (currently always return 0)
9244 **/
9245 static int
lpfc_els_rcv_farpr(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)9246 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
9247 struct lpfc_nodelist *ndlp)
9248 {
9249 uint32_t did;
9250
9251 did = get_job_els_rsp64_did(vport->phba, cmdiocb);
9252
9253 /* FARP-RSP received from DID <did> */
9254 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
9255 "0600 FARP-RSP received from DID x%x\n", did);
9256 /* ACCEPT the Farp resp request */
9257 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
9258
9259 return 0;
9260 }
9261
9262 /**
9263 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command
9264 * @vport: pointer to a host virtual N_Port data structure.
9265 * @cmdiocb: pointer to lpfc command iocb data structure.
9266 * @fan_ndlp: pointer to a node-list data structure.
9267 *
9268 * This routine processes a Fabric Address Notification (FAN) IOCB
9269 * command received as an ELS unsolicited event. The FAN ELS command will
9270 * only be processed on a physical port (i.e., the @vport represents the
9271 * physical port). The fabric NodeName and PortName from the FAN IOCB are
9272 * compared against those in the phba data structure. If any of those is
9273 * different, the lpfc_initial_flogi() routine is invoked to initialize
9274 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise,
9275 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine
9276 * is invoked to register login to the fabric.
9277 *
9278 * Return code
9279 * 0 - Successfully processed fan iocb (currently always return 0).
9280 **/
9281 static int
lpfc_els_rcv_fan(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * fan_ndlp)9282 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
9283 struct lpfc_nodelist *fan_ndlp)
9284 {
9285 struct lpfc_hba *phba = vport->phba;
9286 uint32_t *lp;
9287 FAN *fp;
9288
9289 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
9290 lp = (uint32_t *)cmdiocb->cmd_dmabuf->virt;
9291 fp = (FAN *) ++lp;
9292 /* FAN received; Fan does not have a reply sequence */
9293 if ((vport == phba->pport) &&
9294 (vport->port_state == LPFC_LOCAL_CFG_LINK)) {
9295 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
9296 sizeof(struct lpfc_name))) ||
9297 (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
9298 sizeof(struct lpfc_name)))) {
9299 /* This port has switched fabrics. FLOGI is required */
9300 lpfc_issue_init_vfi(vport);
9301 } else {
9302 /* FAN verified - skip FLOGI */
9303 vport->fc_myDID = vport->fc_prevDID;
9304 if (phba->sli_rev < LPFC_SLI_REV4)
9305 lpfc_issue_fabric_reglogin(vport);
9306 else {
9307 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
9308 "3138 Need register VFI: (x%x/%x)\n",
9309 vport->fc_prevDID, vport->fc_myDID);
9310 lpfc_issue_reg_vfi(vport);
9311 }
9312 }
9313 }
9314 return 0;
9315 }
9316
9317 /**
9318 * lpfc_els_rcv_edc - Process an unsolicited EDC iocb
9319 * @vport: pointer to a host virtual N_Port data structure.
9320 * @cmdiocb: pointer to lpfc command iocb data structure.
9321 * @ndlp: pointer to a node-list data structure.
9322 *
9323 * Return code
9324 * 0 - Successfully processed echo iocb (currently always return 0)
9325 **/
9326 static int
lpfc_els_rcv_edc(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)9327 lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
9328 struct lpfc_nodelist *ndlp)
9329 {
9330 struct lpfc_hba *phba = vport->phba;
9331 struct fc_els_edc *edc_req;
9332 struct fc_tlv_desc *tlv;
9333 uint8_t *payload;
9334 uint32_t *ptr, dtag;
9335 const char *dtag_nm;
9336 int desc_cnt = 0, bytes_remain;
9337 struct fc_diag_lnkflt_desc *plnkflt;
9338
9339 payload = cmdiocb->cmd_dmabuf->virt;
9340
9341 edc_req = (struct fc_els_edc *)payload;
9342 bytes_remain = be32_to_cpu(edc_req->desc_len);
9343
9344 ptr = (uint32_t *)payload;
9345 lpfc_printf_vlog(vport, KERN_INFO,
9346 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
9347 "3319 Rcv EDC payload len %d: x%x x%x x%x\n",
9348 bytes_remain, be32_to_cpu(*ptr),
9349 be32_to_cpu(*(ptr + 1)), be32_to_cpu(*(ptr + 2)));
9350
9351 /* No signal support unless there is a congestion descriptor */
9352 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
9353 phba->cgn_sig_freq = 0;
9354 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN;
9355
9356 if (bytes_remain <= 0)
9357 goto out;
9358
9359 tlv = edc_req->desc;
9360
9361 /*
9362 * cycle through EDC diagnostic descriptors to find the
9363 * congestion signaling capability descriptor
9364 */
9365 while (bytes_remain) {
9366 if (bytes_remain < FC_TLV_DESC_HDR_SZ) {
9367 lpfc_printf_log(phba, KERN_WARNING,
9368 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
9369 "6464 Truncated TLV hdr on "
9370 "Diagnostic descriptor[%d]\n",
9371 desc_cnt);
9372 goto out;
9373 }
9374
9375 dtag = be32_to_cpu(tlv->desc_tag);
9376 switch (dtag) {
9377 case ELS_DTAG_LNK_FAULT_CAP:
9378 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
9379 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
9380 sizeof(struct fc_diag_lnkflt_desc)) {
9381 lpfc_printf_log(phba, KERN_WARNING,
9382 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
9383 "6465 Truncated Link Fault Diagnostic "
9384 "descriptor[%d]: %d vs 0x%zx 0x%zx\n",
9385 desc_cnt, bytes_remain,
9386 FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
9387 sizeof(struct fc_diag_lnkflt_desc));
9388 goto out;
9389 }
9390 plnkflt = (struct fc_diag_lnkflt_desc *)tlv;
9391 lpfc_printf_log(phba, KERN_INFO,
9392 LOG_ELS | LOG_LDS_EVENT,
9393 "4626 Link Fault Desc Data: x%08x len x%x "
9394 "da x%x dd x%x interval x%x\n",
9395 be32_to_cpu(plnkflt->desc_tag),
9396 be32_to_cpu(plnkflt->desc_len),
9397 be32_to_cpu(
9398 plnkflt->degrade_activate_threshold),
9399 be32_to_cpu(
9400 plnkflt->degrade_deactivate_threshold),
9401 be32_to_cpu(plnkflt->fec_degrade_interval));
9402 break;
9403 case ELS_DTAG_CG_SIGNAL_CAP:
9404 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
9405 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
9406 sizeof(struct fc_diag_cg_sig_desc)) {
9407 lpfc_printf_log(
9408 phba, KERN_WARNING, LOG_CGN_MGMT,
9409 "6466 Truncated cgn signal Diagnostic "
9410 "descriptor[%d]: %d vs 0x%zx 0x%zx\n",
9411 desc_cnt, bytes_remain,
9412 FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
9413 sizeof(struct fc_diag_cg_sig_desc));
9414 goto out;
9415 }
9416
9417 phba->cgn_reg_fpin = phba->cgn_init_reg_fpin;
9418 phba->cgn_reg_signal = phba->cgn_init_reg_signal;
9419
9420 /* We start negotiation with lpfc_fabric_cgn_frequency.
9421 * When we process the EDC, we will settle on the
9422 * higher frequency.
9423 */
9424 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
9425
9426 lpfc_least_capable_settings(
9427 phba, (struct fc_diag_cg_sig_desc *)tlv);
9428 break;
9429 default:
9430 dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
9431 lpfc_printf_log(phba, KERN_WARNING,
9432 LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
9433 "6467 unknown Diagnostic "
9434 "Descriptor[%d]: tag x%x (%s)\n",
9435 desc_cnt, dtag, dtag_nm);
9436 }
9437 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
9438 tlv = fc_tlv_next_desc(tlv);
9439 desc_cnt++;
9440 }
9441 out:
9442 /* Need to send back an ACC */
9443 lpfc_issue_els_edc_rsp(vport, cmdiocb, ndlp);
9444
9445 lpfc_config_cgn_signal(phba);
9446 return 0;
9447 }
9448
9449 /**
9450 * lpfc_els_timeout - Handler funciton to the els timer
9451 * @t: timer context used to obtain the vport.
9452 *
9453 * This routine is invoked by the ELS timer after timeout. It posts the ELS
9454 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port
9455 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake
9456 * up the worker thread. It is for the worker thread to invoke the routine
9457 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO.
9458 **/
9459 void
lpfc_els_timeout(struct timer_list * t)9460 lpfc_els_timeout(struct timer_list *t)
9461 {
9462 struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc);
9463 struct lpfc_hba *phba = vport->phba;
9464 uint32_t tmo_posted;
9465 unsigned long iflag;
9466
9467 spin_lock_irqsave(&vport->work_port_lock, iflag);
9468 tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
9469 if (!tmo_posted && !test_bit(FC_UNLOADING, &vport->load_flag))
9470 vport->work_port_events |= WORKER_ELS_TMO;
9471 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
9472
9473 if (!tmo_posted && !test_bit(FC_UNLOADING, &vport->load_flag))
9474 lpfc_worker_wake_up(phba);
9475 return;
9476 }
9477
9478
9479 /**
9480 * lpfc_els_timeout_handler - Process an els timeout event
9481 * @vport: pointer to a virtual N_Port data structure.
9482 *
9483 * This routine is the actual handler function that processes an ELS timeout
9484 * event. It walks the ELS ring to get and abort all the IOCBs (except the
9485 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by
9486 * invoking the lpfc_sli_issue_abort_iotag() routine.
9487 **/
9488 void
lpfc_els_timeout_handler(struct lpfc_vport * vport)9489 lpfc_els_timeout_handler(struct lpfc_vport *vport)
9490 {
9491 struct lpfc_hba *phba = vport->phba;
9492 struct lpfc_sli_ring *pring;
9493 struct lpfc_iocbq *tmp_iocb, *piocb;
9494 IOCB_t *cmd = NULL;
9495 struct lpfc_dmabuf *pcmd;
9496 uint32_t els_command = 0;
9497 uint32_t timeout;
9498 uint32_t remote_ID = 0xffffffff;
9499 LIST_HEAD(abort_list);
9500 u32 ulp_command = 0, ulp_context = 0, did = 0, iotag = 0;
9501
9502
9503 timeout = (uint32_t)(phba->fc_ratov << 1);
9504
9505 pring = lpfc_phba_elsring(phba);
9506 if (unlikely(!pring))
9507 return;
9508
9509 if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
9510 return;
9511
9512 spin_lock_irq(&phba->hbalock);
9513 if (phba->sli_rev == LPFC_SLI_REV4)
9514 spin_lock(&pring->ring_lock);
9515
9516 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
9517 ulp_command = get_job_cmnd(phba, piocb);
9518 ulp_context = get_job_ulpcontext(phba, piocb);
9519 did = get_job_els_rsp64_did(phba, piocb);
9520
9521 if (phba->sli_rev == LPFC_SLI_REV4) {
9522 iotag = get_wqe_reqtag(piocb);
9523 } else {
9524 cmd = &piocb->iocb;
9525 iotag = cmd->ulpIoTag;
9526 }
9527
9528 if ((piocb->cmd_flag & LPFC_IO_LIBDFC) != 0 ||
9529 ulp_command == CMD_ABORT_XRI_CX ||
9530 ulp_command == CMD_ABORT_XRI_CN ||
9531 ulp_command == CMD_CLOSE_XRI_CN)
9532 continue;
9533
9534 if (piocb->vport != vport)
9535 continue;
9536
9537 pcmd = piocb->cmd_dmabuf;
9538 if (pcmd)
9539 els_command = *(uint32_t *) (pcmd->virt);
9540
9541 if (els_command == ELS_CMD_FARP ||
9542 els_command == ELS_CMD_FARPR ||
9543 els_command == ELS_CMD_FDISC)
9544 continue;
9545
9546 if (piocb->drvrTimeout > 0) {
9547 if (piocb->drvrTimeout >= timeout)
9548 piocb->drvrTimeout -= timeout;
9549 else
9550 piocb->drvrTimeout = 0;
9551 continue;
9552 }
9553
9554 remote_ID = 0xffffffff;
9555 if (ulp_command != CMD_GEN_REQUEST64_CR) {
9556 remote_ID = did;
9557 } else {
9558 struct lpfc_nodelist *ndlp;
9559 ndlp = __lpfc_findnode_rpi(vport, ulp_context);
9560 if (ndlp)
9561 remote_ID = ndlp->nlp_DID;
9562 }
9563 list_add_tail(&piocb->dlist, &abort_list);
9564 }
9565 if (phba->sli_rev == LPFC_SLI_REV4)
9566 spin_unlock(&pring->ring_lock);
9567 spin_unlock_irq(&phba->hbalock);
9568
9569 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
9570 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9571 "0127 ELS timeout Data: x%x x%x x%x "
9572 "x%x\n", els_command,
9573 remote_ID, ulp_command, iotag);
9574
9575 spin_lock_irq(&phba->hbalock);
9576 list_del_init(&piocb->dlist);
9577 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
9578 spin_unlock_irq(&phba->hbalock);
9579 }
9580
9581 /* Make sure HBA is alive */
9582 lpfc_issue_hb_tmo(phba);
9583
9584 if (!list_empty(&pring->txcmplq))
9585 if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
9586 mod_timer(&vport->els_tmofunc,
9587 jiffies + msecs_to_jiffies(1000 * timeout));
9588 }
9589
9590 /**
9591 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport
9592 * @vport: pointer to a host virtual N_Port data structure.
9593 *
9594 * This routine is used to clean up all the outstanding ELS commands on a
9595 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport()
9596 * routine. After that, it walks the ELS transmit queue to remove all the
9597 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For
9598 * the IOCBs with a non-NULL completion callback function, the callback
9599 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
9600 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion
9601 * callback function, the IOCB will simply be released. Finally, it walks
9602 * the ELS transmit completion queue to issue an abort IOCB to any transmit
9603 * completion queue IOCB that is associated with the @vport and is not
9604 * an IOCB from libdfc (i.e., the management plane IOCBs that are not
9605 * part of the discovery state machine) out to HBA by invoking the
9606 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the
9607 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee
9608 * the IOCBs are aborted when this function returns.
9609 **/
9610 void
lpfc_els_flush_cmd(struct lpfc_vport * vport)9611 lpfc_els_flush_cmd(struct lpfc_vport *vport)
9612 {
9613 LIST_HEAD(abort_list);
9614 LIST_HEAD(cancel_list);
9615 struct lpfc_hba *phba = vport->phba;
9616 struct lpfc_sli_ring *pring;
9617 struct lpfc_iocbq *tmp_iocb, *piocb;
9618 u32 ulp_command;
9619 unsigned long iflags = 0;
9620 bool mbx_tmo_err;
9621
9622 lpfc_fabric_abort_vport(vport);
9623
9624 /*
9625 * For SLI3, only the hbalock is required. But SLI4 needs to coordinate
9626 * with the ring insert operation. Because lpfc_sli_issue_abort_iotag
9627 * ultimately grabs the ring_lock, the driver must splice the list into
9628 * a working list and release the locks before calling the abort.
9629 */
9630 spin_lock_irqsave(&phba->hbalock, iflags);
9631 pring = lpfc_phba_elsring(phba);
9632
9633 /* Bail out if we've no ELS wq, like in PCI error recovery case. */
9634 if (unlikely(!pring)) {
9635 spin_unlock_irqrestore(&phba->hbalock, iflags);
9636 return;
9637 }
9638
9639 if (phba->sli_rev == LPFC_SLI_REV4)
9640 spin_lock(&pring->ring_lock);
9641
9642 mbx_tmo_err = test_bit(MBX_TMO_ERR, &phba->bit_flags);
9643 /* First we need to issue aborts to outstanding cmds on txcmpl */
9644 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
9645 if (piocb->cmd_flag & LPFC_IO_LIBDFC && !mbx_tmo_err)
9646 continue;
9647
9648 if (piocb->vport != vport)
9649 continue;
9650
9651 if (piocb->cmd_flag & LPFC_DRIVER_ABORTED && !mbx_tmo_err)
9652 continue;
9653
9654 /* On the ELS ring we can have ELS_REQUESTs, ELS_RSPs,
9655 * or GEN_REQUESTs waiting for a CQE response.
9656 */
9657 ulp_command = get_job_cmnd(phba, piocb);
9658 if (ulp_command == CMD_ELS_REQUEST64_WQE ||
9659 ulp_command == CMD_XMIT_ELS_RSP64_WQE) {
9660 list_add_tail(&piocb->dlist, &abort_list);
9661
9662 /* If the link is down when flushing ELS commands
9663 * the firmware will not complete them till after
9664 * the link comes back up. This may confuse
9665 * discovery for the new link up, so we need to
9666 * change the compl routine to just clean up the iocb
9667 * and avoid any retry logic.
9668 */
9669 if (phba->link_state == LPFC_LINK_DOWN)
9670 piocb->cmd_cmpl = lpfc_cmpl_els_link_down;
9671 } else if (ulp_command == CMD_GEN_REQUEST64_CR ||
9672 mbx_tmo_err)
9673 list_add_tail(&piocb->dlist, &abort_list);
9674 }
9675
9676 if (phba->sli_rev == LPFC_SLI_REV4)
9677 spin_unlock(&pring->ring_lock);
9678 spin_unlock_irqrestore(&phba->hbalock, iflags);
9679
9680 /* Abort each txcmpl iocb on aborted list and remove the dlist links. */
9681 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
9682 spin_lock_irqsave(&phba->hbalock, iflags);
9683 list_del_init(&piocb->dlist);
9684 if (mbx_tmo_err || !(phba->sli.sli_flag & LPFC_SLI_ACTIVE))
9685 list_move_tail(&piocb->list, &cancel_list);
9686 else
9687 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
9688
9689 spin_unlock_irqrestore(&phba->hbalock, iflags);
9690 }
9691 if (!list_empty(&cancel_list))
9692 lpfc_sli_cancel_iocbs(phba, &cancel_list, IOSTAT_LOCAL_REJECT,
9693 IOERR_SLI_ABORTED);
9694 else
9695 /* Make sure HBA is alive */
9696 lpfc_issue_hb_tmo(phba);
9697
9698 if (!list_empty(&abort_list))
9699 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9700 "3387 abort list for txq not empty\n");
9701 INIT_LIST_HEAD(&abort_list);
9702
9703 spin_lock_irqsave(&phba->hbalock, iflags);
9704 if (phba->sli_rev == LPFC_SLI_REV4)
9705 spin_lock(&pring->ring_lock);
9706
9707 /* No need to abort the txq list,
9708 * just queue them up for lpfc_sli_cancel_iocbs
9709 */
9710 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
9711 ulp_command = get_job_cmnd(phba, piocb);
9712
9713 if (piocb->cmd_flag & LPFC_IO_LIBDFC)
9714 continue;
9715
9716 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
9717 if (ulp_command == CMD_QUE_RING_BUF_CN ||
9718 ulp_command == CMD_QUE_RING_BUF64_CN ||
9719 ulp_command == CMD_CLOSE_XRI_CN ||
9720 ulp_command == CMD_ABORT_XRI_CN ||
9721 ulp_command == CMD_ABORT_XRI_CX)
9722 continue;
9723
9724 if (piocb->vport != vport)
9725 continue;
9726
9727 list_del_init(&piocb->list);
9728 list_add_tail(&piocb->list, &abort_list);
9729 }
9730
9731 /* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */
9732 if (vport == phba->pport) {
9733 list_for_each_entry_safe(piocb, tmp_iocb,
9734 &phba->fabric_iocb_list, list) {
9735 list_del_init(&piocb->list);
9736 list_add_tail(&piocb->list, &abort_list);
9737 }
9738 }
9739
9740 if (phba->sli_rev == LPFC_SLI_REV4)
9741 spin_unlock(&pring->ring_lock);
9742 spin_unlock_irqrestore(&phba->hbalock, iflags);
9743
9744 /* Cancel all the IOCBs from the completions list */
9745 lpfc_sli_cancel_iocbs(phba, &abort_list,
9746 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
9747
9748 return;
9749 }
9750
9751 /**
9752 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA
9753 * @phba: pointer to lpfc hba data structure.
9754 *
9755 * This routine is used to clean up all the outstanding ELS commands on a
9756 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba()
9757 * routine. After that, it walks the ELS transmit queue to remove all the
9758 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For
9759 * the IOCBs with the completion callback function associated, the callback
9760 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
9761 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion
9762 * callback function associated, the IOCB will simply be released. Finally,
9763 * it walks the ELS transmit completion queue to issue an abort IOCB to any
9764 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the
9765 * management plane IOCBs that are not part of the discovery state machine)
9766 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine.
9767 **/
9768 void
lpfc_els_flush_all_cmd(struct lpfc_hba * phba)9769 lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
9770 {
9771 struct lpfc_vport *vport;
9772
9773 spin_lock_irq(&phba->port_list_lock);
9774 list_for_each_entry(vport, &phba->port_list, listentry)
9775 lpfc_els_flush_cmd(vport);
9776 spin_unlock_irq(&phba->port_list_lock);
9777
9778 return;
9779 }
9780
9781 /**
9782 * lpfc_send_els_failure_event - Posts an ELS command failure event
9783 * @phba: Pointer to hba context object.
9784 * @cmdiocbp: Pointer to command iocb which reported error.
9785 * @rspiocbp: Pointer to response iocb which reported error.
9786 *
9787 * This function sends an event when there is an ELS command
9788 * failure.
9789 **/
9790 void
lpfc_send_els_failure_event(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbp,struct lpfc_iocbq * rspiocbp)9791 lpfc_send_els_failure_event(struct lpfc_hba *phba,
9792 struct lpfc_iocbq *cmdiocbp,
9793 struct lpfc_iocbq *rspiocbp)
9794 {
9795 struct lpfc_vport *vport = cmdiocbp->vport;
9796 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
9797 struct lpfc_lsrjt_event lsrjt_event;
9798 struct lpfc_fabric_event_header fabric_event;
9799 struct ls_rjt stat;
9800 struct lpfc_nodelist *ndlp;
9801 uint32_t *pcmd;
9802 u32 ulp_status, ulp_word4;
9803
9804 ndlp = cmdiocbp->ndlp;
9805 if (!ndlp)
9806 return;
9807
9808 ulp_status = get_job_ulpstatus(phba, rspiocbp);
9809 ulp_word4 = get_job_word4(phba, rspiocbp);
9810
9811 if (ulp_status == IOSTAT_LS_RJT) {
9812 lsrjt_event.header.event_type = FC_REG_ELS_EVENT;
9813 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV;
9814 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname,
9815 sizeof(struct lpfc_name));
9816 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename,
9817 sizeof(struct lpfc_name));
9818 pcmd = (uint32_t *)cmdiocbp->cmd_dmabuf->virt;
9819 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0;
9820 stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4);
9821 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
9822 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
9823 fc_host_post_vendor_event(shost,
9824 fc_get_event_number(),
9825 sizeof(lsrjt_event),
9826 (char *)&lsrjt_event,
9827 LPFC_NL_VENDOR_ID);
9828 return;
9829 }
9830 if (ulp_status == IOSTAT_NPORT_BSY ||
9831 ulp_status == IOSTAT_FABRIC_BSY) {
9832 fabric_event.event_type = FC_REG_FABRIC_EVENT;
9833 if (ulp_status == IOSTAT_NPORT_BSY)
9834 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY;
9835 else
9836 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY;
9837 memcpy(fabric_event.wwpn, &ndlp->nlp_portname,
9838 sizeof(struct lpfc_name));
9839 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename,
9840 sizeof(struct lpfc_name));
9841 fc_host_post_vendor_event(shost,
9842 fc_get_event_number(),
9843 sizeof(fabric_event),
9844 (char *)&fabric_event,
9845 LPFC_NL_VENDOR_ID);
9846 return;
9847 }
9848
9849 }
9850
9851 /**
9852 * lpfc_send_els_event - Posts unsolicited els event
9853 * @vport: Pointer to vport object.
9854 * @ndlp: Pointer FC node object.
9855 * @payload: ELS command code type.
9856 *
9857 * This function posts an event when there is an incoming
9858 * unsolicited ELS command.
9859 **/
9860 static void
lpfc_send_els_event(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,uint32_t * payload)9861 lpfc_send_els_event(struct lpfc_vport *vport,
9862 struct lpfc_nodelist *ndlp,
9863 uint32_t *payload)
9864 {
9865 struct lpfc_els_event_header *els_data = NULL;
9866 struct lpfc_logo_event *logo_data = NULL;
9867 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
9868
9869 if (*payload == ELS_CMD_LOGO) {
9870 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL);
9871 if (!logo_data) {
9872 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9873 "0148 Failed to allocate memory "
9874 "for LOGO event\n");
9875 return;
9876 }
9877 els_data = &logo_data->header;
9878 } else {
9879 els_data = kmalloc(sizeof(struct lpfc_els_event_header),
9880 GFP_KERNEL);
9881 if (!els_data) {
9882 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9883 "0149 Failed to allocate memory "
9884 "for ELS event\n");
9885 return;
9886 }
9887 }
9888 els_data->event_type = FC_REG_ELS_EVENT;
9889 switch (*payload) {
9890 case ELS_CMD_PLOGI:
9891 els_data->subcategory = LPFC_EVENT_PLOGI_RCV;
9892 break;
9893 case ELS_CMD_PRLO:
9894 els_data->subcategory = LPFC_EVENT_PRLO_RCV;
9895 break;
9896 case ELS_CMD_ADISC:
9897 els_data->subcategory = LPFC_EVENT_ADISC_RCV;
9898 break;
9899 case ELS_CMD_LOGO:
9900 els_data->subcategory = LPFC_EVENT_LOGO_RCV;
9901 /* Copy the WWPN in the LOGO payload */
9902 memcpy(logo_data->logo_wwpn, &payload[2],
9903 sizeof(struct lpfc_name));
9904 break;
9905 default:
9906 kfree(els_data);
9907 return;
9908 }
9909 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
9910 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
9911 if (*payload == ELS_CMD_LOGO) {
9912 fc_host_post_vendor_event(shost,
9913 fc_get_event_number(),
9914 sizeof(struct lpfc_logo_event),
9915 (char *)logo_data,
9916 LPFC_NL_VENDOR_ID);
9917 kfree(logo_data);
9918 } else {
9919 fc_host_post_vendor_event(shost,
9920 fc_get_event_number(),
9921 sizeof(struct lpfc_els_event_header),
9922 (char *)els_data,
9923 LPFC_NL_VENDOR_ID);
9924 kfree(els_data);
9925 }
9926
9927 return;
9928 }
9929
9930
9931 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types,
9932 FC_FPIN_LI_EVT_TYPES_INIT);
9933
9934 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_deli_event_nm, fc_fpin_deli_event_types,
9935 FC_FPIN_DELI_EVT_TYPES_INIT);
9936
9937 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_event_nm, fc_fpin_congn_event_types,
9938 FC_FPIN_CONGN_EVT_TYPES_INIT);
9939
9940 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_severity_nm,
9941 fc_fpin_congn_severity_types,
9942 FC_FPIN_CONGN_SEVERITY_INIT);
9943
9944
9945 /**
9946 * lpfc_display_fpin_wwpn - Display WWPNs accessible by the attached port
9947 * @phba: Pointer to phba object.
9948 * @wwnlist: Pointer to list of WWPNs in FPIN payload
9949 * @cnt: count of WWPNs in FPIN payload
9950 *
9951 * This routine is called by LI and PC descriptors.
9952 * Limit the number of WWPNs displayed to 6 log messages, 6 per log message
9953 */
9954 static void
lpfc_display_fpin_wwpn(struct lpfc_hba * phba,__be64 * wwnlist,u32 cnt)9955 lpfc_display_fpin_wwpn(struct lpfc_hba *phba, __be64 *wwnlist, u32 cnt)
9956 {
9957 char buf[LPFC_FPIN_WWPN_LINE_SZ];
9958 __be64 wwn;
9959 u64 wwpn;
9960 int i, len;
9961 int line = 0;
9962 int wcnt = 0;
9963 bool endit = false;
9964
9965 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, "Accessible WWPNs:");
9966 for (i = 0; i < cnt; i++) {
9967 /* Are we on the last WWPN */
9968 if (i == (cnt - 1))
9969 endit = true;
9970
9971 /* Extract the next WWPN from the payload */
9972 wwn = *wwnlist++;
9973 wwpn = be64_to_cpu(wwn);
9974 len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ - len,
9975 " %016llx", wwpn);
9976
9977 /* Log a message if we are on the last WWPN
9978 * or if we hit the max allowed per message.
9979 */
9980 wcnt++;
9981 if (wcnt == LPFC_FPIN_WWPN_LINE_CNT || endit) {
9982 buf[len] = 0;
9983 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
9984 "4686 %s\n", buf);
9985
9986 /* Check if we reached the last WWPN */
9987 if (endit)
9988 return;
9989
9990 /* Limit the number of log message displayed per FPIN */
9991 line++;
9992 if (line == LPFC_FPIN_WWPN_NUM_LINE) {
9993 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
9994 "4687 %d WWPNs Truncated\n",
9995 cnt - i - 1);
9996 return;
9997 }
9998
9999 /* Start over with next log message */
10000 wcnt = 0;
10001 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ,
10002 "Additional WWPNs:");
10003 }
10004 }
10005 }
10006
10007 /**
10008 * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event.
10009 * @phba: Pointer to phba object.
10010 * @tlv: Pointer to the Link Integrity Notification Descriptor.
10011 *
10012 * This function processes a Link Integrity FPIN event by logging a message.
10013 **/
10014 static void
lpfc_els_rcv_fpin_li(struct lpfc_hba * phba,struct fc_tlv_desc * tlv)10015 lpfc_els_rcv_fpin_li(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
10016 {
10017 struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv;
10018 const char *li_evt_str;
10019 u32 li_evt, cnt;
10020
10021 li_evt = be16_to_cpu(li->event_type);
10022 li_evt_str = lpfc_get_fpin_li_event_nm(li_evt);
10023 cnt = be32_to_cpu(li->pname_count);
10024
10025 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
10026 "4680 FPIN Link Integrity %s (x%x) "
10027 "Detecting PN x%016llx Attached PN x%016llx "
10028 "Duration %d mSecs Count %d Port Cnt %d\n",
10029 li_evt_str, li_evt,
10030 be64_to_cpu(li->detecting_wwpn),
10031 be64_to_cpu(li->attached_wwpn),
10032 be32_to_cpu(li->event_threshold),
10033 be32_to_cpu(li->event_count), cnt);
10034
10035 lpfc_display_fpin_wwpn(phba, (__be64 *)&li->pname_list, cnt);
10036 }
10037
10038 /**
10039 * lpfc_els_rcv_fpin_del - Process an FPIN Delivery Event.
10040 * @phba: Pointer to hba object.
10041 * @tlv: Pointer to the Delivery Notification Descriptor TLV
10042 *
10043 * This function processes a Delivery FPIN event by logging a message.
10044 **/
10045 static void
lpfc_els_rcv_fpin_del(struct lpfc_hba * phba,struct fc_tlv_desc * tlv)10046 lpfc_els_rcv_fpin_del(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
10047 {
10048 struct fc_fn_deli_desc *del = (struct fc_fn_deli_desc *)tlv;
10049 const char *del_rsn_str;
10050 u32 del_rsn;
10051 __be32 *frame;
10052
10053 del_rsn = be16_to_cpu(del->deli_reason_code);
10054 del_rsn_str = lpfc_get_fpin_deli_event_nm(del_rsn);
10055
10056 /* Skip over desc_tag/desc_len header to payload */
10057 frame = (__be32 *)(del + 1);
10058
10059 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
10060 "4681 FPIN Delivery %s (x%x) "
10061 "Detecting PN x%016llx Attached PN x%016llx "
10062 "DiscHdr0 x%08x "
10063 "DiscHdr1 x%08x DiscHdr2 x%08x DiscHdr3 x%08x "
10064 "DiscHdr4 x%08x DiscHdr5 x%08x\n",
10065 del_rsn_str, del_rsn,
10066 be64_to_cpu(del->detecting_wwpn),
10067 be64_to_cpu(del->attached_wwpn),
10068 be32_to_cpu(frame[0]),
10069 be32_to_cpu(frame[1]),
10070 be32_to_cpu(frame[2]),
10071 be32_to_cpu(frame[3]),
10072 be32_to_cpu(frame[4]),
10073 be32_to_cpu(frame[5]));
10074 }
10075
10076 /**
10077 * lpfc_els_rcv_fpin_peer_cgn - Process a FPIN Peer Congestion Event.
10078 * @phba: Pointer to hba object.
10079 * @tlv: Pointer to the Peer Congestion Notification Descriptor TLV
10080 *
10081 * This function processes a Peer Congestion FPIN event by logging a message.
10082 **/
10083 static void
lpfc_els_rcv_fpin_peer_cgn(struct lpfc_hba * phba,struct fc_tlv_desc * tlv)10084 lpfc_els_rcv_fpin_peer_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
10085 {
10086 struct fc_fn_peer_congn_desc *pc = (struct fc_fn_peer_congn_desc *)tlv;
10087 const char *pc_evt_str;
10088 u32 pc_evt, cnt;
10089
10090 pc_evt = be16_to_cpu(pc->event_type);
10091 pc_evt_str = lpfc_get_fpin_congn_event_nm(pc_evt);
10092 cnt = be32_to_cpu(pc->pname_count);
10093
10094 /* Capture FPIN frequency */
10095 phba->cgn_fpin_frequency = be32_to_cpu(pc->event_period);
10096
10097 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_ELS,
10098 "4684 FPIN Peer Congestion %s (x%x) "
10099 "Duration %d mSecs "
10100 "Detecting PN x%016llx Attached PN x%016llx "
10101 "Impacted Port Cnt %d\n",
10102 pc_evt_str, pc_evt,
10103 be32_to_cpu(pc->event_period),
10104 be64_to_cpu(pc->detecting_wwpn),
10105 be64_to_cpu(pc->attached_wwpn),
10106 cnt);
10107
10108 lpfc_display_fpin_wwpn(phba, (__be64 *)&pc->pname_list, cnt);
10109 }
10110
10111 /**
10112 * lpfc_els_rcv_fpin_cgn - Process an FPIN Congestion notification
10113 * @phba: Pointer to hba object.
10114 * @tlv: Pointer to the Congestion Notification Descriptor TLV
10115 *
10116 * This function processes an FPIN Congestion Notifiction. The notification
10117 * could be an Alarm or Warning. This routine feeds that data into driver's
10118 * running congestion algorithm. It also processes the FPIN by
10119 * logging a message. It returns 1 to indicate deliver this message
10120 * to the upper layer or 0 to indicate don't deliver it.
10121 **/
10122 static int
lpfc_els_rcv_fpin_cgn(struct lpfc_hba * phba,struct fc_tlv_desc * tlv)10123 lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
10124 {
10125 struct lpfc_cgn_info *cp;
10126 struct fc_fn_congn_desc *cgn = (struct fc_fn_congn_desc *)tlv;
10127 const char *cgn_evt_str;
10128 u32 cgn_evt;
10129 const char *cgn_sev_str;
10130 u32 cgn_sev;
10131 uint16_t value;
10132 u32 crc;
10133 bool nm_log = false;
10134 int rc = 1;
10135
10136 cgn_evt = be16_to_cpu(cgn->event_type);
10137 cgn_evt_str = lpfc_get_fpin_congn_event_nm(cgn_evt);
10138 cgn_sev = cgn->severity;
10139 cgn_sev_str = lpfc_get_fpin_congn_severity_nm(cgn_sev);
10140
10141 /* The driver only takes action on a Credit Stall or Oversubscription
10142 * event type to engage the IO algorithm. The driver prints an
10143 * unmaskable message only for Lost Credit and Credit Stall.
10144 * TODO: Still need to have definition of host action on clear,
10145 * lost credit and device specific event types.
10146 */
10147 switch (cgn_evt) {
10148 case FPIN_CONGN_LOST_CREDIT:
10149 nm_log = true;
10150 break;
10151 case FPIN_CONGN_CREDIT_STALL:
10152 nm_log = true;
10153 fallthrough;
10154 case FPIN_CONGN_OVERSUBSCRIPTION:
10155 if (cgn_evt == FPIN_CONGN_OVERSUBSCRIPTION)
10156 nm_log = false;
10157 switch (cgn_sev) {
10158 case FPIN_CONGN_SEVERITY_ERROR:
10159 /* Take action here for an Alarm event */
10160 if (phba->cmf_active_mode != LPFC_CFG_OFF) {
10161 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) {
10162 /* Track of alarm cnt for SYNC_WQE */
10163 atomic_inc(&phba->cgn_sync_alarm_cnt);
10164 }
10165 /* Track alarm cnt for cgn_info regardless
10166 * of whether CMF is configured for Signals
10167 * or FPINs.
10168 */
10169 atomic_inc(&phba->cgn_fabric_alarm_cnt);
10170 goto cleanup;
10171 }
10172 break;
10173 case FPIN_CONGN_SEVERITY_WARNING:
10174 /* Take action here for a Warning event */
10175 if (phba->cmf_active_mode != LPFC_CFG_OFF) {
10176 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) {
10177 /* Track of warning cnt for SYNC_WQE */
10178 atomic_inc(&phba->cgn_sync_warn_cnt);
10179 }
10180 /* Track warning cnt and freq for cgn_info
10181 * regardless of whether CMF is configured for
10182 * Signals or FPINs.
10183 */
10184 atomic_inc(&phba->cgn_fabric_warn_cnt);
10185 cleanup:
10186 /* Save frequency in ms */
10187 phba->cgn_fpin_frequency =
10188 be32_to_cpu(cgn->event_period);
10189 value = phba->cgn_fpin_frequency;
10190 if (phba->cgn_i) {
10191 cp = (struct lpfc_cgn_info *)
10192 phba->cgn_i->virt;
10193 cp->cgn_alarm_freq =
10194 cpu_to_le16(value);
10195 cp->cgn_warn_freq =
10196 cpu_to_le16(value);
10197 crc = lpfc_cgn_calc_crc32
10198 (cp,
10199 LPFC_CGN_INFO_SZ,
10200 LPFC_CGN_CRC32_SEED);
10201 cp->cgn_info_crc = cpu_to_le32(crc);
10202 }
10203
10204 /* Don't deliver to upper layer since
10205 * driver took action on this tlv.
10206 */
10207 rc = 0;
10208 }
10209 break;
10210 }
10211 break;
10212 }
10213
10214 /* Change the log level to unmaskable for the following event types. */
10215 lpfc_printf_log(phba, (nm_log ? KERN_WARNING : KERN_INFO),
10216 LOG_CGN_MGMT | LOG_ELS,
10217 "4683 FPIN CONGESTION %s type %s (x%x) Event "
10218 "Duration %d mSecs\n",
10219 cgn_sev_str, cgn_evt_str, cgn_evt,
10220 be32_to_cpu(cgn->event_period));
10221 return rc;
10222 }
10223
10224 void
lpfc_els_rcv_fpin(struct lpfc_vport * vport,void * p,u32 fpin_length)10225 lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length)
10226 {
10227 struct lpfc_hba *phba = vport->phba;
10228 struct fc_els_fpin *fpin = (struct fc_els_fpin *)p;
10229 struct fc_tlv_desc *tlv, *first_tlv, *current_tlv;
10230 const char *dtag_nm;
10231 int desc_cnt = 0, bytes_remain, cnt;
10232 u32 dtag, deliver = 0;
10233 int len;
10234
10235 /* FPINs handled only if we are in the right discovery state */
10236 if (vport->port_state < LPFC_DISC_AUTH)
10237 return;
10238
10239 /* make sure there is the full fpin header */
10240 if (fpin_length < sizeof(struct fc_els_fpin))
10241 return;
10242
10243 /* Sanity check descriptor length. The desc_len value does not
10244 * include space for the ELS command and the desc_len fields.
10245 */
10246 len = be32_to_cpu(fpin->desc_len);
10247 if (fpin_length < len + sizeof(struct fc_els_fpin)) {
10248 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
10249 "4671 Bad ELS FPIN length %d: %d\n",
10250 len, fpin_length);
10251 return;
10252 }
10253
10254 tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0];
10255 first_tlv = tlv;
10256 bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc);
10257 bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len));
10258
10259 /* process each descriptor separately */
10260 while (bytes_remain >= FC_TLV_DESC_HDR_SZ &&
10261 bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) {
10262 dtag = be32_to_cpu(tlv->desc_tag);
10263 switch (dtag) {
10264 case ELS_DTAG_LNK_INTEGRITY:
10265 lpfc_els_rcv_fpin_li(phba, tlv);
10266 deliver = 1;
10267 break;
10268 case ELS_DTAG_DELIVERY:
10269 lpfc_els_rcv_fpin_del(phba, tlv);
10270 deliver = 1;
10271 break;
10272 case ELS_DTAG_PEER_CONGEST:
10273 lpfc_els_rcv_fpin_peer_cgn(phba, tlv);
10274 deliver = 1;
10275 break;
10276 case ELS_DTAG_CONGESTION:
10277 deliver = lpfc_els_rcv_fpin_cgn(phba, tlv);
10278 break;
10279 default:
10280 dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
10281 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
10282 "4678 unknown FPIN descriptor[%d]: "
10283 "tag x%x (%s)\n",
10284 desc_cnt, dtag, dtag_nm);
10285
10286 /* If descriptor is bad, drop the rest of the data */
10287 return;
10288 }
10289 lpfc_cgn_update_stat(phba, dtag);
10290 cnt = be32_to_cpu(tlv->desc_len);
10291
10292 /* Sanity check descriptor length. The desc_len value does not
10293 * include space for the desc_tag and the desc_len fields.
10294 */
10295 len -= (cnt + sizeof(struct fc_tlv_desc));
10296 if (len < 0) {
10297 dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
10298 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
10299 "4672 Bad FPIN descriptor TLV length "
10300 "%d: %d %d %s\n",
10301 cnt, len, fpin_length, dtag_nm);
10302 return;
10303 }
10304
10305 current_tlv = tlv;
10306 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
10307 tlv = fc_tlv_next_desc(tlv);
10308
10309 /* Format payload such that the FPIN delivered to the
10310 * upper layer is a single descriptor FPIN.
10311 */
10312 if (desc_cnt)
10313 memcpy(first_tlv, current_tlv,
10314 (cnt + sizeof(struct fc_els_fpin)));
10315
10316 /* Adjust the length so that it only reflects a
10317 * single descriptor FPIN.
10318 */
10319 fpin_length = cnt + sizeof(struct fc_els_fpin);
10320 fpin->desc_len = cpu_to_be32(fpin_length);
10321 fpin_length += sizeof(struct fc_els_fpin); /* the entire FPIN */
10322
10323 /* Send every descriptor individually to the upper layer */
10324 if (deliver)
10325 fc_host_fpin_rcv(lpfc_shost_from_vport(vport),
10326 fpin_length, (char *)fpin, 0);
10327 desc_cnt++;
10328 }
10329 }
10330
10331 /**
10332 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer
10333 * @phba: pointer to lpfc hba data structure.
10334 * @pring: pointer to a SLI ring.
10335 * @vport: pointer to a host virtual N_Port data structure.
10336 * @elsiocb: pointer to lpfc els command iocb data structure.
10337 *
10338 * This routine is used for processing the IOCB associated with a unsolicited
10339 * event. It first determines whether there is an existing ndlp that matches
10340 * the DID from the unsolicited IOCB. If not, it will create a new one with
10341 * the DID from the unsolicited IOCB. The ELS command from the unsolicited
10342 * IOCB is then used to invoke the proper routine and to set up proper state
10343 * of the discovery state machine.
10344 **/
10345 static void
lpfc_els_unsol_buffer(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_vport * vport,struct lpfc_iocbq * elsiocb)10346 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10347 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
10348 {
10349 struct lpfc_nodelist *ndlp;
10350 struct ls_rjt stat;
10351 u32 *payload, payload_len;
10352 u32 cmd = 0, did = 0, newnode, status = 0;
10353 uint8_t rjt_exp, rjt_err = 0, init_link = 0;
10354 struct lpfc_wcqe_complete *wcqe_cmpl = NULL;
10355 LPFC_MBOXQ_t *mbox;
10356
10357 if (!vport || !elsiocb->cmd_dmabuf)
10358 goto dropit;
10359
10360 newnode = 0;
10361 wcqe_cmpl = &elsiocb->wcqe_cmpl;
10362 payload = elsiocb->cmd_dmabuf->virt;
10363 if (phba->sli_rev == LPFC_SLI_REV4)
10364 payload_len = wcqe_cmpl->total_data_placed;
10365 else
10366 payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len;
10367 status = get_job_ulpstatus(phba, elsiocb);
10368 cmd = *payload;
10369 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
10370 lpfc_sli3_post_buffer(phba, pring, 1);
10371
10372 did = get_job_els_rsp64_did(phba, elsiocb);
10373 if (status) {
10374 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10375 "RCV Unsol ELS: status:x%x/x%x did:x%x",
10376 status, get_job_word4(phba, elsiocb), did);
10377 goto dropit;
10378 }
10379
10380 /* Check to see if link went down during discovery */
10381 if (lpfc_els_chk_latt(vport))
10382 goto dropit;
10383
10384 /* Ignore traffic received during vport shutdown. */
10385 if (test_bit(FC_UNLOADING, &vport->load_flag))
10386 goto dropit;
10387
10388 /* If NPort discovery is delayed drop incoming ELS */
10389 if (test_bit(FC_DISC_DELAYED, &vport->fc_flag) &&
10390 cmd != ELS_CMD_PLOGI)
10391 goto dropit;
10392
10393 ndlp = lpfc_findnode_did(vport, did);
10394 if (!ndlp) {
10395 /* Cannot find existing Fabric ndlp, so allocate a new one */
10396 ndlp = lpfc_nlp_init(vport, did);
10397 if (!ndlp)
10398 goto dropit;
10399 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
10400 newnode = 1;
10401 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
10402 ndlp->nlp_type |= NLP_FABRIC;
10403 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
10404 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
10405 newnode = 1;
10406 }
10407
10408 phba->fc_stat.elsRcvFrame++;
10409
10410 /*
10411 * Do not process any unsolicited ELS commands
10412 * if the ndlp is in DEV_LOSS
10413 */
10414 spin_lock_irq(&ndlp->lock);
10415 if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) {
10416 spin_unlock_irq(&ndlp->lock);
10417 if (newnode)
10418 lpfc_nlp_put(ndlp);
10419 goto dropit;
10420 }
10421 spin_unlock_irq(&ndlp->lock);
10422
10423 elsiocb->ndlp = lpfc_nlp_get(ndlp);
10424 if (!elsiocb->ndlp)
10425 goto dropit;
10426 elsiocb->vport = vport;
10427
10428 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
10429 cmd &= ELS_CMD_MASK;
10430 }
10431 /* ELS command <elsCmd> received from NPORT <did> */
10432 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
10433 "0112 ELS command x%x received from NPORT x%x "
10434 "refcnt %d Data: x%x x%lx x%x x%x\n",
10435 cmd, did, kref_read(&ndlp->kref), vport->port_state,
10436 vport->fc_flag, vport->fc_myDID, vport->fc_prevDID);
10437
10438 /* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */
10439 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
10440 (cmd != ELS_CMD_FLOGI) &&
10441 !((cmd == ELS_CMD_PLOGI) && test_bit(FC_PT2PT, &vport->fc_flag))) {
10442 rjt_err = LSRJT_LOGICAL_BSY;
10443 rjt_exp = LSEXP_NOTHING_MORE;
10444 goto lsrjt;
10445 }
10446
10447 switch (cmd) {
10448 case ELS_CMD_PLOGI:
10449 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10450 "RCV PLOGI: did:x%x/ste:x%x flg:x%x",
10451 did, vport->port_state, ndlp->nlp_flag);
10452
10453 phba->fc_stat.elsRcvPLOGI++;
10454 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
10455 if (phba->sli_rev == LPFC_SLI_REV4 &&
10456 test_bit(FC_PT2PT, &phba->pport->fc_flag)) {
10457 vport->fc_prevDID = vport->fc_myDID;
10458 /* Our DID needs to be updated before registering
10459 * the vfi. This is done in lpfc_rcv_plogi but
10460 * that is called after the reg_vfi.
10461 */
10462 vport->fc_myDID =
10463 bf_get(els_rsp64_sid,
10464 &elsiocb->wqe.xmit_els_rsp);
10465 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
10466 "3312 Remote port assigned DID x%x "
10467 "%x\n", vport->fc_myDID,
10468 vport->fc_prevDID);
10469 }
10470
10471 lpfc_send_els_event(vport, ndlp, payload);
10472
10473 /* If Nport discovery is delayed, reject PLOGIs */
10474 if (test_bit(FC_DISC_DELAYED, &vport->fc_flag)) {
10475 rjt_err = LSRJT_UNABLE_TPC;
10476 rjt_exp = LSEXP_NOTHING_MORE;
10477 break;
10478 }
10479
10480 if (vport->port_state < LPFC_DISC_AUTH) {
10481 if (!test_bit(FC_PT2PT, &phba->pport->fc_flag) ||
10482 test_bit(FC_PT2PT_PLOGI, &phba->pport->fc_flag)) {
10483 rjt_err = LSRJT_UNABLE_TPC;
10484 rjt_exp = LSEXP_NOTHING_MORE;
10485 break;
10486 }
10487 }
10488
10489 spin_lock_irq(&ndlp->lock);
10490 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
10491 spin_unlock_irq(&ndlp->lock);
10492
10493 lpfc_disc_state_machine(vport, ndlp, elsiocb,
10494 NLP_EVT_RCV_PLOGI);
10495
10496 break;
10497 case ELS_CMD_FLOGI:
10498 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10499 "RCV FLOGI: did:x%x/ste:x%x flg:x%x",
10500 did, vport->port_state, ndlp->nlp_flag);
10501
10502 phba->fc_stat.elsRcvFLOGI++;
10503
10504 /* If the driver believes fabric discovery is done and is ready,
10505 * bounce the link. There is some descrepancy.
10506 */
10507 if (vport->port_state >= LPFC_LOCAL_CFG_LINK &&
10508 test_bit(FC_PT2PT, &vport->fc_flag) &&
10509 vport->rcv_flogi_cnt >= 1) {
10510 rjt_err = LSRJT_LOGICAL_BSY;
10511 rjt_exp = LSEXP_NOTHING_MORE;
10512 init_link++;
10513 goto lsrjt;
10514 }
10515
10516 lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
10517 /* retain node if our response is deferred */
10518 if (phba->defer_flogi_acc.flag)
10519 break;
10520 if (newnode)
10521 lpfc_disc_state_machine(vport, ndlp, NULL,
10522 NLP_EVT_DEVICE_RM);
10523 break;
10524 case ELS_CMD_LOGO:
10525 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10526 "RCV LOGO: did:x%x/ste:x%x flg:x%x",
10527 did, vport->port_state, ndlp->nlp_flag);
10528
10529 phba->fc_stat.elsRcvLOGO++;
10530 lpfc_send_els_event(vport, ndlp, payload);
10531 if (vport->port_state < LPFC_DISC_AUTH) {
10532 rjt_err = LSRJT_UNABLE_TPC;
10533 rjt_exp = LSEXP_NOTHING_MORE;
10534 break;
10535 }
10536 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
10537 if (newnode)
10538 lpfc_disc_state_machine(vport, ndlp, NULL,
10539 NLP_EVT_DEVICE_RM);
10540 break;
10541 case ELS_CMD_PRLO:
10542 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10543 "RCV PRLO: did:x%x/ste:x%x flg:x%x",
10544 did, vport->port_state, ndlp->nlp_flag);
10545
10546 phba->fc_stat.elsRcvPRLO++;
10547 lpfc_send_els_event(vport, ndlp, payload);
10548 if (vport->port_state < LPFC_DISC_AUTH) {
10549 rjt_err = LSRJT_UNABLE_TPC;
10550 rjt_exp = LSEXP_NOTHING_MORE;
10551 break;
10552 }
10553 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
10554 break;
10555 case ELS_CMD_LCB:
10556 phba->fc_stat.elsRcvLCB++;
10557 lpfc_els_rcv_lcb(vport, elsiocb, ndlp);
10558 break;
10559 case ELS_CMD_RDP:
10560 phba->fc_stat.elsRcvRDP++;
10561 lpfc_els_rcv_rdp(vport, elsiocb, ndlp);
10562 break;
10563 case ELS_CMD_RSCN:
10564 phba->fc_stat.elsRcvRSCN++;
10565 lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
10566 if (newnode)
10567 lpfc_disc_state_machine(vport, ndlp, NULL,
10568 NLP_EVT_DEVICE_RM);
10569 break;
10570 case ELS_CMD_ADISC:
10571 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10572 "RCV ADISC: did:x%x/ste:x%x flg:x%x",
10573 did, vport->port_state, ndlp->nlp_flag);
10574
10575 lpfc_send_els_event(vport, ndlp, payload);
10576 phba->fc_stat.elsRcvADISC++;
10577 if (vport->port_state < LPFC_DISC_AUTH) {
10578 rjt_err = LSRJT_UNABLE_TPC;
10579 rjt_exp = LSEXP_NOTHING_MORE;
10580 break;
10581 }
10582 lpfc_disc_state_machine(vport, ndlp, elsiocb,
10583 NLP_EVT_RCV_ADISC);
10584 break;
10585 case ELS_CMD_PDISC:
10586 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10587 "RCV PDISC: did:x%x/ste:x%x flg:x%x",
10588 did, vport->port_state, ndlp->nlp_flag);
10589
10590 phba->fc_stat.elsRcvPDISC++;
10591 if (vport->port_state < LPFC_DISC_AUTH) {
10592 rjt_err = LSRJT_UNABLE_TPC;
10593 rjt_exp = LSEXP_NOTHING_MORE;
10594 break;
10595 }
10596 lpfc_disc_state_machine(vport, ndlp, elsiocb,
10597 NLP_EVT_RCV_PDISC);
10598 break;
10599 case ELS_CMD_FARPR:
10600 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10601 "RCV FARPR: did:x%x/ste:x%x flg:x%x",
10602 did, vport->port_state, ndlp->nlp_flag);
10603
10604 phba->fc_stat.elsRcvFARPR++;
10605 lpfc_els_rcv_farpr(vport, elsiocb, ndlp);
10606 break;
10607 case ELS_CMD_FARP:
10608 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10609 "RCV FARP: did:x%x/ste:x%x flg:x%x",
10610 did, vport->port_state, ndlp->nlp_flag);
10611
10612 phba->fc_stat.elsRcvFARP++;
10613 lpfc_els_rcv_farp(vport, elsiocb, ndlp);
10614 break;
10615 case ELS_CMD_FAN:
10616 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10617 "RCV FAN: did:x%x/ste:x%x flg:x%x",
10618 did, vport->port_state, ndlp->nlp_flag);
10619
10620 phba->fc_stat.elsRcvFAN++;
10621 lpfc_els_rcv_fan(vport, elsiocb, ndlp);
10622 break;
10623 case ELS_CMD_PRLI:
10624 case ELS_CMD_NVMEPRLI:
10625 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10626 "RCV PRLI: did:x%x/ste:x%x flg:x%x",
10627 did, vport->port_state, ndlp->nlp_flag);
10628
10629 phba->fc_stat.elsRcvPRLI++;
10630 if ((vport->port_state < LPFC_DISC_AUTH) &&
10631 test_bit(FC_FABRIC, &vport->fc_flag)) {
10632 rjt_err = LSRJT_UNABLE_TPC;
10633 rjt_exp = LSEXP_NOTHING_MORE;
10634 break;
10635 }
10636 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
10637 break;
10638 case ELS_CMD_LIRR:
10639 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10640 "RCV LIRR: did:x%x/ste:x%x flg:x%x",
10641 did, vport->port_state, ndlp->nlp_flag);
10642
10643 phba->fc_stat.elsRcvLIRR++;
10644 lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
10645 if (newnode)
10646 lpfc_disc_state_machine(vport, ndlp, NULL,
10647 NLP_EVT_DEVICE_RM);
10648 break;
10649 case ELS_CMD_RLS:
10650 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10651 "RCV RLS: did:x%x/ste:x%x flg:x%x",
10652 did, vport->port_state, ndlp->nlp_flag);
10653
10654 phba->fc_stat.elsRcvRLS++;
10655 lpfc_els_rcv_rls(vport, elsiocb, ndlp);
10656 if (newnode)
10657 lpfc_disc_state_machine(vport, ndlp, NULL,
10658 NLP_EVT_DEVICE_RM);
10659 break;
10660 case ELS_CMD_RPL:
10661 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10662 "RCV RPL: did:x%x/ste:x%x flg:x%x",
10663 did, vport->port_state, ndlp->nlp_flag);
10664
10665 phba->fc_stat.elsRcvRPL++;
10666 lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
10667 if (newnode)
10668 lpfc_disc_state_machine(vport, ndlp, NULL,
10669 NLP_EVT_DEVICE_RM);
10670 break;
10671 case ELS_CMD_RNID:
10672 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10673 "RCV RNID: did:x%x/ste:x%x flg:x%x",
10674 did, vport->port_state, ndlp->nlp_flag);
10675
10676 phba->fc_stat.elsRcvRNID++;
10677 lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
10678 if (newnode)
10679 lpfc_disc_state_machine(vport, ndlp, NULL,
10680 NLP_EVT_DEVICE_RM);
10681 break;
10682 case ELS_CMD_RTV:
10683 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10684 "RCV RTV: did:x%x/ste:x%x flg:x%x",
10685 did, vport->port_state, ndlp->nlp_flag);
10686 phba->fc_stat.elsRcvRTV++;
10687 lpfc_els_rcv_rtv(vport, elsiocb, ndlp);
10688 if (newnode)
10689 lpfc_disc_state_machine(vport, ndlp, NULL,
10690 NLP_EVT_DEVICE_RM);
10691 break;
10692 case ELS_CMD_RRQ:
10693 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10694 "RCV RRQ: did:x%x/ste:x%x flg:x%x",
10695 did, vport->port_state, ndlp->nlp_flag);
10696
10697 phba->fc_stat.elsRcvRRQ++;
10698 lpfc_els_rcv_rrq(vport, elsiocb, ndlp);
10699 if (newnode)
10700 lpfc_disc_state_machine(vport, ndlp, NULL,
10701 NLP_EVT_DEVICE_RM);
10702 break;
10703 case ELS_CMD_ECHO:
10704 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10705 "RCV ECHO: did:x%x/ste:x%x flg:x%x",
10706 did, vport->port_state, ndlp->nlp_flag);
10707
10708 phba->fc_stat.elsRcvECHO++;
10709 lpfc_els_rcv_echo(vport, elsiocb, ndlp);
10710 if (newnode)
10711 lpfc_disc_state_machine(vport, ndlp, NULL,
10712 NLP_EVT_DEVICE_RM);
10713 break;
10714 case ELS_CMD_REC:
10715 /* receive this due to exchange closed */
10716 rjt_err = LSRJT_UNABLE_TPC;
10717 rjt_exp = LSEXP_INVALID_OX_RX;
10718 break;
10719 case ELS_CMD_FPIN:
10720 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10721 "RCV FPIN: did:x%x/ste:x%x flg:x%x",
10722 did, vport->port_state, ndlp->nlp_flag);
10723
10724 lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload,
10725 payload_len);
10726
10727 /* There are no replies, so no rjt codes */
10728 break;
10729 case ELS_CMD_EDC:
10730 lpfc_els_rcv_edc(vport, elsiocb, ndlp);
10731 break;
10732 case ELS_CMD_RDF:
10733 phba->fc_stat.elsRcvRDF++;
10734 /* Accept RDF only from fabric controller */
10735 if (did != Fabric_Cntl_DID) {
10736 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
10737 "1115 Received RDF from invalid DID "
10738 "x%x\n", did);
10739 rjt_err = LSRJT_PROTOCOL_ERR;
10740 rjt_exp = LSEXP_NOTHING_MORE;
10741 goto lsrjt;
10742 }
10743
10744 lpfc_els_rcv_rdf(vport, elsiocb, ndlp);
10745 break;
10746 default:
10747 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10748 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
10749 cmd, did, vport->port_state);
10750
10751 /* Unsupported ELS command, reject */
10752 rjt_err = LSRJT_CMD_UNSUPPORTED;
10753 rjt_exp = LSEXP_NOTHING_MORE;
10754
10755 /* Unknown ELS command <elsCmd> received from NPORT <did> */
10756 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
10757 "0115 Unknown ELS command x%x "
10758 "received from NPORT x%x\n", cmd, did);
10759 if (newnode)
10760 lpfc_disc_state_machine(vport, ndlp, NULL,
10761 NLP_EVT_DEVICE_RM);
10762 break;
10763 }
10764
10765 lsrjt:
10766 /* check if need to LS_RJT received ELS cmd */
10767 if (rjt_err) {
10768 memset(&stat, 0, sizeof(stat));
10769 stat.un.b.lsRjtRsnCode = rjt_err;
10770 stat.un.b.lsRjtRsnCodeExp = rjt_exp;
10771 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
10772 NULL);
10773 /* Remove the reference from above for new nodes. */
10774 if (newnode)
10775 lpfc_disc_state_machine(vport, ndlp, NULL,
10776 NLP_EVT_DEVICE_RM);
10777 }
10778
10779 /* Release the reference on this elsiocb, not the ndlp. */
10780 lpfc_nlp_put(elsiocb->ndlp);
10781 elsiocb->ndlp = NULL;
10782
10783 /* Special case. Driver received an unsolicited command that
10784 * unsupportable given the driver's current state. Reset the
10785 * link and start over.
10786 */
10787 if (init_link) {
10788 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10789 if (!mbox)
10790 return;
10791 lpfc_linkdown(phba);
10792 lpfc_init_link(phba, mbox,
10793 phba->cfg_topology,
10794 phba->cfg_link_speed);
10795 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
10796 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10797 mbox->vport = vport;
10798 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
10799 MBX_NOT_FINISHED)
10800 mempool_free(mbox, phba->mbox_mem_pool);
10801 }
10802
10803 return;
10804
10805 dropit:
10806 if (vport && !test_bit(FC_UNLOADING, &vport->load_flag))
10807 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
10808 "0111 Dropping received ELS cmd "
10809 "Data: x%x x%x x%x x%x\n",
10810 cmd, status, get_job_word4(phba, elsiocb), did);
10811
10812 phba->fc_stat.elsRcvDrop++;
10813 }
10814
10815 /**
10816 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring
10817 * @phba: pointer to lpfc hba data structure.
10818 * @pring: pointer to a SLI ring.
10819 * @elsiocb: pointer to lpfc els iocb data structure.
10820 *
10821 * This routine is used to process an unsolicited event received from a SLI
10822 * (Service Level Interface) ring. The actual processing of the data buffer
10823 * associated with the unsolicited event is done by invoking the routine
10824 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the
10825 * SLI ring on which the unsolicited event was received.
10826 **/
10827 void
lpfc_els_unsol_event(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * elsiocb)10828 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10829 struct lpfc_iocbq *elsiocb)
10830 {
10831 struct lpfc_vport *vport = elsiocb->vport;
10832 u32 ulp_command, status, parameter, bde_count = 0;
10833 IOCB_t *icmd;
10834 struct lpfc_wcqe_complete *wcqe_cmpl = NULL;
10835 struct lpfc_dmabuf *bdeBuf1 = elsiocb->cmd_dmabuf;
10836 struct lpfc_dmabuf *bdeBuf2 = elsiocb->bpl_dmabuf;
10837 dma_addr_t paddr;
10838
10839 elsiocb->cmd_dmabuf = NULL;
10840 elsiocb->rsp_dmabuf = NULL;
10841 elsiocb->bpl_dmabuf = NULL;
10842
10843 wcqe_cmpl = &elsiocb->wcqe_cmpl;
10844 ulp_command = get_job_cmnd(phba, elsiocb);
10845 status = get_job_ulpstatus(phba, elsiocb);
10846 parameter = get_job_word4(phba, elsiocb);
10847 if (phba->sli_rev == LPFC_SLI_REV4)
10848 bde_count = wcqe_cmpl->word3;
10849 else
10850 bde_count = elsiocb->iocb.ulpBdeCount;
10851
10852 if (status == IOSTAT_NEED_BUFFER) {
10853 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
10854 } else if (status == IOSTAT_LOCAL_REJECT &&
10855 (parameter & IOERR_PARAM_MASK) ==
10856 IOERR_RCV_BUFFER_WAITING) {
10857 phba->fc_stat.NoRcvBuf++;
10858 /* Not enough posted buffers; Try posting more buffers */
10859 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
10860 lpfc_sli3_post_buffer(phba, pring, 0);
10861 return;
10862 }
10863
10864 if (phba->sli_rev == LPFC_SLI_REV3) {
10865 icmd = &elsiocb->iocb;
10866 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
10867 (ulp_command == CMD_IOCB_RCV_ELS64_CX ||
10868 ulp_command == CMD_IOCB_RCV_SEQ64_CX)) {
10869 if (icmd->unsli3.rcvsli3.vpi == 0xffff)
10870 vport = phba->pport;
10871 else
10872 vport = lpfc_find_vport_by_vpid(phba,
10873 icmd->unsli3.rcvsli3.vpi);
10874 }
10875 }
10876
10877 /* If there are no BDEs associated
10878 * with this IOCB, there is nothing to do.
10879 */
10880 if (bde_count == 0)
10881 return;
10882
10883 /* Account for SLI2 or SLI3 and later unsolicited buffering */
10884 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
10885 elsiocb->cmd_dmabuf = bdeBuf1;
10886 if (bde_count == 2)
10887 elsiocb->bpl_dmabuf = bdeBuf2;
10888 } else {
10889 icmd = &elsiocb->iocb;
10890 paddr = getPaddr(icmd->un.cont64[0].addrHigh,
10891 icmd->un.cont64[0].addrLow);
10892 elsiocb->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring,
10893 paddr);
10894 if (bde_count == 2) {
10895 paddr = getPaddr(icmd->un.cont64[1].addrHigh,
10896 icmd->un.cont64[1].addrLow);
10897 elsiocb->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba,
10898 pring,
10899 paddr);
10900 }
10901 }
10902
10903 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
10904 /*
10905 * The different unsolicited event handlers would tell us
10906 * if they are done with "mp" by setting cmd_dmabuf to NULL.
10907 */
10908 if (elsiocb->cmd_dmabuf) {
10909 lpfc_in_buf_free(phba, elsiocb->cmd_dmabuf);
10910 elsiocb->cmd_dmabuf = NULL;
10911 }
10912
10913 if (elsiocb->bpl_dmabuf) {
10914 lpfc_in_buf_free(phba, elsiocb->bpl_dmabuf);
10915 elsiocb->bpl_dmabuf = NULL;
10916 }
10917
10918 }
10919
10920 static void
lpfc_start_fdmi(struct lpfc_vport * vport)10921 lpfc_start_fdmi(struct lpfc_vport *vport)
10922 {
10923 struct lpfc_nodelist *ndlp;
10924
10925 /* If this is the first time, allocate an ndlp and initialize
10926 * it. Otherwise, make sure the node is enabled and then do the
10927 * login.
10928 */
10929 ndlp = lpfc_findnode_did(vport, FDMI_DID);
10930 if (!ndlp) {
10931 ndlp = lpfc_nlp_init(vport, FDMI_DID);
10932 if (ndlp) {
10933 ndlp->nlp_type |= NLP_FABRIC;
10934 } else {
10935 return;
10936 }
10937 }
10938
10939 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
10940 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
10941 }
10942
10943 /**
10944 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr
10945 * @phba: pointer to lpfc hba data structure.
10946 * @vport: pointer to a virtual N_Port data structure.
10947 *
10948 * This routine issues a Port Login (PLOGI) to the Name Server with
10949 * State Change Request (SCR) for a @vport. This routine will create an
10950 * ndlp for the Name Server associated to the @vport if such node does
10951 * not already exist. The PLOGI to Name Server is issued by invoking the
10952 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface
10953 * (FDMI) is configured to the @vport, a FDMI node will be created and
10954 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine.
10955 **/
10956 void
lpfc_do_scr_ns_plogi(struct lpfc_hba * phba,struct lpfc_vport * vport)10957 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
10958 {
10959 struct lpfc_nodelist *ndlp;
10960
10961 /*
10962 * If lpfc_delay_discovery parameter is set and the clean address
10963 * bit is cleared and fc fabric parameters chenged, delay FC NPort
10964 * discovery.
10965 */
10966 if (test_bit(FC_DISC_DELAYED, &vport->fc_flag)) {
10967 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
10968 "3334 Delay fc port discovery for %d secs\n",
10969 phba->fc_ratov);
10970 mod_timer(&vport->delayed_disc_tmo,
10971 jiffies + msecs_to_jiffies(1000 * phba->fc_ratov));
10972 return;
10973 }
10974
10975 ndlp = lpfc_findnode_did(vport, NameServer_DID);
10976 if (!ndlp) {
10977 ndlp = lpfc_nlp_init(vport, NameServer_DID);
10978 if (!ndlp) {
10979 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
10980 lpfc_disc_start(vport);
10981 return;
10982 }
10983 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
10984 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
10985 "0251 NameServer login: no memory\n");
10986 return;
10987 }
10988 }
10989
10990 ndlp->nlp_type |= NLP_FABRIC;
10991
10992 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
10993
10994 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
10995 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
10996 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
10997 "0252 Cannot issue NameServer login\n");
10998 return;
10999 }
11000
11001 if ((phba->cfg_enable_SmartSAN ||
11002 phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) &&
11003 test_bit(FC_ALLOW_FDMI, &vport->load_flag))
11004 lpfc_start_fdmi(vport);
11005 }
11006
11007 /**
11008 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport
11009 * @phba: pointer to lpfc hba data structure.
11010 * @pmb: pointer to the driver internal queue element for mailbox command.
11011 *
11012 * This routine is the completion callback function to register new vport
11013 * mailbox command. If the new vport mailbox command completes successfully,
11014 * the fabric registration login shall be performed on physical port (the
11015 * new vport created is actually a physical port, with VPI 0) or the port
11016 * login to Name Server for State Change Request (SCR) will be performed
11017 * on virtual port (real virtual port, with VPI greater than 0).
11018 **/
11019 static void
lpfc_cmpl_reg_new_vport(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)11020 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
11021 {
11022 struct lpfc_vport *vport = pmb->vport;
11023 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
11024 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
11025 MAILBOX_t *mb = &pmb->u.mb;
11026 int rc;
11027
11028 clear_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
11029
11030 if (mb->mbxStatus) {
11031 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
11032 "0915 Register VPI failed : Status: x%x"
11033 " upd bit: x%x \n", mb->mbxStatus,
11034 mb->un.varRegVpi.upd);
11035 if (phba->sli_rev == LPFC_SLI_REV4 &&
11036 mb->un.varRegVpi.upd)
11037 goto mbox_err_exit ;
11038
11039 switch (mb->mbxStatus) {
11040 case 0x11: /* unsupported feature */
11041 case 0x9603: /* max_vpi exceeded */
11042 case 0x9602: /* Link event since CLEAR_LA */
11043 /* giving up on vport registration */
11044 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
11045 clear_bit(FC_FABRIC, &vport->fc_flag);
11046 clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag);
11047 lpfc_can_disctmo(vport);
11048 break;
11049 /* If reg_vpi fail with invalid VPI status, re-init VPI */
11050 case 0x20:
11051 set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
11052 lpfc_init_vpi(phba, pmb, vport->vpi);
11053 pmb->vport = vport;
11054 pmb->mbox_cmpl = lpfc_init_vpi_cmpl;
11055 rc = lpfc_sli_issue_mbox(phba, pmb,
11056 MBX_NOWAIT);
11057 if (rc == MBX_NOT_FINISHED) {
11058 lpfc_printf_vlog(vport, KERN_ERR,
11059 LOG_TRACE_EVENT,
11060 "2732 Failed to issue INIT_VPI"
11061 " mailbox command\n");
11062 } else {
11063 lpfc_nlp_put(ndlp);
11064 return;
11065 }
11066 fallthrough;
11067 default:
11068 /* Try to recover from this error */
11069 if (phba->sli_rev == LPFC_SLI_REV4)
11070 lpfc_sli4_unreg_all_rpis(vport);
11071 lpfc_mbx_unreg_vpi(vport);
11072 set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
11073 if (mb->mbxStatus == MBX_NOT_FINISHED)
11074 break;
11075 if ((vport->port_type == LPFC_PHYSICAL_PORT) &&
11076 !test_bit(FC_LOGO_RCVD_DID_CHNG, &vport->fc_flag)) {
11077 if (phba->sli_rev == LPFC_SLI_REV4)
11078 lpfc_issue_init_vfi(vport);
11079 else
11080 lpfc_initial_flogi(vport);
11081 } else {
11082 lpfc_initial_fdisc(vport);
11083 }
11084 break;
11085 }
11086 } else {
11087 spin_lock_irq(shost->host_lock);
11088 vport->vpi_state |= LPFC_VPI_REGISTERED;
11089 spin_unlock_irq(shost->host_lock);
11090 if (vport == phba->pport) {
11091 if (phba->sli_rev < LPFC_SLI_REV4)
11092 lpfc_issue_fabric_reglogin(vport);
11093 else {
11094 /*
11095 * If the physical port is instantiated using
11096 * FDISC, do not start vport discovery.
11097 */
11098 if (vport->port_state != LPFC_FDISC)
11099 lpfc_start_fdiscs(phba);
11100 lpfc_do_scr_ns_plogi(phba, vport);
11101 }
11102 } else {
11103 lpfc_do_scr_ns_plogi(phba, vport);
11104 }
11105 }
11106 mbox_err_exit:
11107 /* Now, we decrement the ndlp reference count held for this
11108 * callback function
11109 */
11110 lpfc_nlp_put(ndlp);
11111
11112 mempool_free(pmb, phba->mbox_mem_pool);
11113
11114 /* reinitialize the VMID datastructure before returning.
11115 * this is specifically for vport
11116 */
11117 if (lpfc_is_vmid_enabled(phba))
11118 lpfc_reinit_vmid(vport);
11119 vport->vmid_flag = vport->phba->pport->vmid_flag;
11120
11121 return;
11122 }
11123
11124 /**
11125 * lpfc_register_new_vport - Register a new vport with a HBA
11126 * @phba: pointer to lpfc hba data structure.
11127 * @vport: pointer to a host virtual N_Port data structure.
11128 * @ndlp: pointer to a node-list data structure.
11129 *
11130 * This routine registers the @vport as a new virtual port with a HBA.
11131 * It is done through a registering vpi mailbox command.
11132 **/
11133 void
lpfc_register_new_vport(struct lpfc_hba * phba,struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)11134 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
11135 struct lpfc_nodelist *ndlp)
11136 {
11137 LPFC_MBOXQ_t *mbox;
11138
11139 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11140 if (mbox) {
11141 lpfc_reg_vpi(vport, mbox);
11142 mbox->vport = vport;
11143 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
11144 if (!mbox->ctx_ndlp) {
11145 mempool_free(mbox, phba->mbox_mem_pool);
11146 goto mbox_err_exit;
11147 }
11148
11149 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
11150 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
11151 == MBX_NOT_FINISHED) {
11152 /* mailbox command not success, decrement ndlp
11153 * reference count for this command
11154 */
11155 lpfc_nlp_put(ndlp);
11156 mempool_free(mbox, phba->mbox_mem_pool);
11157
11158 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
11159 "0253 Register VPI: Can't send mbox\n");
11160 goto mbox_err_exit;
11161 }
11162 } else {
11163 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
11164 "0254 Register VPI: no memory\n");
11165 goto mbox_err_exit;
11166 }
11167 return;
11168
11169 mbox_err_exit:
11170 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
11171 clear_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
11172 return;
11173 }
11174
11175 /**
11176 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer
11177 * @phba: pointer to lpfc hba data structure.
11178 *
11179 * This routine cancels the retry delay timers to all the vports.
11180 **/
11181 void
lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba * phba)11182 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba)
11183 {
11184 struct lpfc_vport **vports;
11185 struct lpfc_nodelist *ndlp;
11186 uint32_t link_state;
11187 int i;
11188
11189 /* Treat this failure as linkdown for all vports */
11190 link_state = phba->link_state;
11191 lpfc_linkdown(phba);
11192 phba->link_state = link_state;
11193
11194 vports = lpfc_create_vport_work_array(phba);
11195
11196 if (vports) {
11197 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
11198 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
11199 if (ndlp)
11200 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
11201 lpfc_els_flush_cmd(vports[i]);
11202 }
11203 lpfc_destroy_vport_work_array(phba, vports);
11204 }
11205 }
11206
11207 /**
11208 * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
11209 * @phba: pointer to lpfc hba data structure.
11210 *
11211 * This routine abort all pending discovery commands and
11212 * start a timer to retry FLOGI for the physical port
11213 * discovery.
11214 **/
11215 void
lpfc_retry_pport_discovery(struct lpfc_hba * phba)11216 lpfc_retry_pport_discovery(struct lpfc_hba *phba)
11217 {
11218 struct lpfc_nodelist *ndlp;
11219
11220 /* Cancel the all vports retry delay retry timers */
11221 lpfc_cancel_all_vport_retry_delay_timer(phba);
11222
11223 /* If fabric require FLOGI, then re-instantiate physical login */
11224 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
11225 if (!ndlp)
11226 return;
11227
11228 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
11229 spin_lock_irq(&ndlp->lock);
11230 ndlp->nlp_flag |= NLP_DELAY_TMO;
11231 spin_unlock_irq(&ndlp->lock);
11232 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
11233 phba->pport->port_state = LPFC_FLOGI;
11234 return;
11235 }
11236
11237 /**
11238 * lpfc_fabric_login_reqd - Check if FLOGI required.
11239 * @phba: pointer to lpfc hba data structure.
11240 * @cmdiocb: pointer to FDISC command iocb.
11241 * @rspiocb: pointer to FDISC response iocb.
11242 *
11243 * This routine checks if a FLOGI is reguired for FDISC
11244 * to succeed.
11245 **/
11246 static int
lpfc_fabric_login_reqd(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)11247 lpfc_fabric_login_reqd(struct lpfc_hba *phba,
11248 struct lpfc_iocbq *cmdiocb,
11249 struct lpfc_iocbq *rspiocb)
11250 {
11251 u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
11252 u32 ulp_word4 = get_job_word4(phba, rspiocb);
11253
11254 if (ulp_status != IOSTAT_FABRIC_RJT ||
11255 ulp_word4 != RJT_LOGIN_REQUIRED)
11256 return 0;
11257 else
11258 return 1;
11259 }
11260
11261 /**
11262 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command
11263 * @phba: pointer to lpfc hba data structure.
11264 * @cmdiocb: pointer to lpfc command iocb data structure.
11265 * @rspiocb: pointer to lpfc response iocb data structure.
11266 *
11267 * This routine is the completion callback function to a Fabric Discover
11268 * (FDISC) ELS command. Since all the FDISC ELS commands are issued
11269 * single threaded, each FDISC completion callback function will reset
11270 * the discovery timer for all vports such that the timers will not get
11271 * unnecessary timeout. The function checks the FDISC IOCB status. If error
11272 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the
11273 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID
11274 * assigned to the vport has been changed with the completion of the FDISC
11275 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index)
11276 * are unregistered from the HBA, and then the lpfc_register_new_vport()
11277 * routine is invoked to register new vport with the HBA. Otherwise, the
11278 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name
11279 * Server for State Change Request (SCR).
11280 **/
11281 static void
lpfc_cmpl_els_fdisc(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)11282 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11283 struct lpfc_iocbq *rspiocb)
11284 {
11285 struct lpfc_vport *vport = cmdiocb->vport;
11286 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
11287 struct lpfc_nodelist *np;
11288 struct lpfc_nodelist *next_np;
11289 struct lpfc_iocbq *piocb;
11290 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp;
11291 struct serv_parm *sp;
11292 uint8_t fabric_param_changed;
11293 u32 ulp_status, ulp_word4;
11294
11295 ulp_status = get_job_ulpstatus(phba, rspiocb);
11296 ulp_word4 = get_job_word4(phba, rspiocb);
11297
11298 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
11299 "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
11300 ulp_status, ulp_word4,
11301 vport->fc_prevDID);
11302 /* Since all FDISCs are being single threaded, we
11303 * must reset the discovery timer for ALL vports
11304 * waiting to send FDISC when one completes.
11305 */
11306 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
11307 lpfc_set_disctmo(piocb->vport);
11308 }
11309
11310 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
11311 "FDISC cmpl: status:x%x/x%x prevdid:x%x",
11312 ulp_status, ulp_word4, vport->fc_prevDID);
11313
11314 if (ulp_status) {
11315
11316 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
11317 lpfc_retry_pport_discovery(phba);
11318 goto out;
11319 }
11320
11321 /* Check for retry */
11322 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
11323 goto out;
11324 /* Warn FDISC status */
11325 lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
11326 "0126 FDISC cmpl status: x%x/x%x)\n",
11327 ulp_status, ulp_word4);
11328 goto fdisc_failed;
11329 }
11330
11331 lpfc_check_nlp_post_devloss(vport, ndlp);
11332
11333 clear_bit(FC_VPORT_CVL_RCVD, &vport->fc_flag);
11334 clear_bit(FC_VPORT_LOGO_RCVD, &vport->fc_flag);
11335 set_bit(FC_FABRIC, &vport->fc_flag);
11336 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP)
11337 set_bit(FC_PUBLIC_LOOP, &vport->fc_flag);
11338
11339 vport->fc_myDID = ulp_word4 & Mask_DID;
11340 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
11341 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
11342 if (!prsp)
11343 goto out;
11344 if (!lpfc_is_els_acc_rsp(prsp))
11345 goto out;
11346
11347 sp = prsp->virt + sizeof(uint32_t);
11348 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
11349 memcpy(&vport->fabric_portname, &sp->portName,
11350 sizeof(struct lpfc_name));
11351 memcpy(&vport->fabric_nodename, &sp->nodeName,
11352 sizeof(struct lpfc_name));
11353 if (fabric_param_changed &&
11354 !test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) {
11355 /* If our NportID changed, we need to ensure all
11356 * remaining NPORTs get unreg_login'ed so we can
11357 * issue unreg_vpi.
11358 */
11359 list_for_each_entry_safe(np, next_np,
11360 &vport->fc_nodes, nlp_listp) {
11361 if ((np->nlp_state != NLP_STE_NPR_NODE) ||
11362 !(np->nlp_flag & NLP_NPR_ADISC))
11363 continue;
11364 spin_lock_irq(&ndlp->lock);
11365 np->nlp_flag &= ~NLP_NPR_ADISC;
11366 spin_unlock_irq(&ndlp->lock);
11367 lpfc_unreg_rpi(vport, np);
11368 }
11369 lpfc_cleanup_pending_mbox(vport);
11370
11371 if (phba->sli_rev == LPFC_SLI_REV4)
11372 lpfc_sli4_unreg_all_rpis(vport);
11373
11374 lpfc_mbx_unreg_vpi(vport);
11375 set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
11376 if (phba->sli_rev == LPFC_SLI_REV4)
11377 set_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag);
11378 else
11379 set_bit(FC_LOGO_RCVD_DID_CHNG, &vport->fc_flag);
11380 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
11381 !test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) {
11382 /*
11383 * Driver needs to re-reg VPI in order for f/w
11384 * to update the MAC address.
11385 */
11386 lpfc_register_new_vport(phba, vport, ndlp);
11387 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
11388 goto out;
11389 }
11390
11391 if (test_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag))
11392 lpfc_issue_init_vpi(vport);
11393 else if (test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag))
11394 lpfc_register_new_vport(phba, vport, ndlp);
11395 else
11396 lpfc_do_scr_ns_plogi(phba, vport);
11397
11398 /* The FDISC completed successfully. Move the fabric ndlp to
11399 * UNMAPPED state and register with the transport.
11400 */
11401 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
11402 goto out;
11403
11404 fdisc_failed:
11405 if (vport->fc_vport &&
11406 (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS))
11407 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
11408 /* Cancel discovery timer */
11409 lpfc_can_disctmo(vport);
11410 out:
11411 lpfc_els_free_iocb(phba, cmdiocb);
11412 lpfc_nlp_put(ndlp);
11413 }
11414
11415 /**
11416 * lpfc_issue_els_fdisc - Issue a fdisc iocb command
11417 * @vport: pointer to a virtual N_Port data structure.
11418 * @ndlp: pointer to a node-list data structure.
11419 * @retry: number of retries to the command IOCB.
11420 *
11421 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to
11422 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb()
11423 * routine to issue the IOCB, which makes sure only one outstanding fabric
11424 * IOCB will be sent off HBA at any given time.
11425 *
11426 * Note that the ndlp reference count will be incremented by 1 for holding the
11427 * ndlp and the reference to ndlp will be stored into the ndlp field of
11428 * the IOCB for the completion callback function to the FDISC ELS command.
11429 *
11430 * Return code
11431 * 0 - Successfully issued fdisc iocb command
11432 * 1 - Failed to issue fdisc iocb command
11433 **/
11434 static int
lpfc_issue_els_fdisc(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,uint8_t retry)11435 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
11436 uint8_t retry)
11437 {
11438 struct lpfc_hba *phba = vport->phba;
11439 IOCB_t *icmd;
11440 union lpfc_wqe128 *wqe = NULL;
11441 struct lpfc_iocbq *elsiocb;
11442 struct serv_parm *sp;
11443 uint8_t *pcmd;
11444 uint16_t cmdsize;
11445 int did = ndlp->nlp_DID;
11446 int rc;
11447
11448 vport->port_state = LPFC_FDISC;
11449 vport->fc_myDID = 0;
11450 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
11451 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
11452 ELS_CMD_FDISC);
11453 if (!elsiocb) {
11454 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
11455 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
11456 "0255 Issue FDISC: no IOCB\n");
11457 return 1;
11458 }
11459
11460 if (phba->sli_rev == LPFC_SLI_REV4) {
11461 wqe = &elsiocb->wqe;
11462 bf_set(els_req64_sid, &wqe->els_req, 0);
11463 bf_set(els_req64_sp, &wqe->els_req, 1);
11464 } else {
11465 icmd = &elsiocb->iocb;
11466 icmd->un.elsreq64.myID = 0;
11467 icmd->un.elsreq64.fl = 1;
11468 icmd->ulpCt_h = 1;
11469 icmd->ulpCt_l = 0;
11470 }
11471
11472 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
11473 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
11474 pcmd += sizeof(uint32_t); /* CSP Word 1 */
11475 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
11476 sp = (struct serv_parm *) pcmd;
11477 /* Setup CSPs accordingly for Fabric */
11478 sp->cmn.e_d_tov = 0;
11479 sp->cmn.w2.r_a_tov = 0;
11480 sp->cmn.virtual_fabric_support = 0;
11481 sp->cls1.classValid = 0;
11482 sp->cls2.seqDelivery = 1;
11483 sp->cls3.seqDelivery = 1;
11484
11485 pcmd += sizeof(uint32_t); /* CSP Word 2 */
11486 pcmd += sizeof(uint32_t); /* CSP Word 3 */
11487 pcmd += sizeof(uint32_t); /* CSP Word 4 */
11488 pcmd += sizeof(uint32_t); /* Port Name */
11489 memcpy(pcmd, &vport->fc_portname, 8);
11490 pcmd += sizeof(uint32_t); /* Node Name */
11491 pcmd += sizeof(uint32_t); /* Node Name */
11492 memcpy(pcmd, &vport->fc_nodename, 8);
11493 sp->cmn.valid_vendor_ver_level = 0;
11494 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
11495 lpfc_set_disctmo(vport);
11496
11497 phba->fc_stat.elsXmitFDISC++;
11498 elsiocb->cmd_cmpl = lpfc_cmpl_els_fdisc;
11499
11500 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
11501 "Issue FDISC: did:x%x",
11502 did, 0, 0);
11503
11504 elsiocb->ndlp = lpfc_nlp_get(ndlp);
11505 if (!elsiocb->ndlp)
11506 goto err_out;
11507
11508 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
11509 if (rc == IOCB_ERROR) {
11510 lpfc_nlp_put(ndlp);
11511 goto err_out;
11512 }
11513
11514 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
11515 return 0;
11516
11517 err_out:
11518 lpfc_els_free_iocb(phba, elsiocb);
11519 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
11520 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
11521 "0256 Issue FDISC: Cannot send IOCB\n");
11522 return 1;
11523 }
11524
11525 /**
11526 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo
11527 * @phba: pointer to lpfc hba data structure.
11528 * @cmdiocb: pointer to lpfc command iocb data structure.
11529 * @rspiocb: pointer to lpfc response iocb data structure.
11530 *
11531 * This routine is the completion callback function to the issuing of a LOGO
11532 * ELS command off a vport. It frees the command IOCB and then decrement the
11533 * reference count held on ndlp for this completion function, indicating that
11534 * the reference to the ndlp is no long needed. Note that the
11535 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this
11536 * callback function and an additional explicit ndlp reference decrementation
11537 * will trigger the actual release of the ndlp.
11538 **/
11539 static void
lpfc_cmpl_els_npiv_logo(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)11540 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11541 struct lpfc_iocbq *rspiocb)
11542 {
11543 struct lpfc_vport *vport = cmdiocb->vport;
11544 IOCB_t *irsp;
11545 struct lpfc_nodelist *ndlp;
11546 u32 ulp_status, ulp_word4, did, tmo;
11547
11548 ndlp = cmdiocb->ndlp;
11549
11550 ulp_status = get_job_ulpstatus(phba, rspiocb);
11551 ulp_word4 = get_job_word4(phba, rspiocb);
11552
11553 if (phba->sli_rev == LPFC_SLI_REV4) {
11554 did = get_job_els_rsp64_did(phba, cmdiocb);
11555 tmo = get_wqe_tmo(cmdiocb);
11556 } else {
11557 irsp = &rspiocb->iocb;
11558 did = get_job_els_rsp64_did(phba, rspiocb);
11559 tmo = irsp->ulpTimeout;
11560 }
11561
11562 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
11563 "LOGO npiv cmpl: status:x%x/x%x did:x%x",
11564 ulp_status, ulp_word4, did);
11565
11566 /* NPIV LOGO completes to NPort <nlp_DID> */
11567 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
11568 "2928 NPIV LOGO completes to NPort x%x "
11569 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
11570 ndlp->nlp_DID, ulp_status, ulp_word4,
11571 tmo, vport->num_disc_nodes,
11572 kref_read(&ndlp->kref), ndlp->nlp_flag,
11573 ndlp->fc4_xpt_flags);
11574
11575 if (ulp_status == IOSTAT_SUCCESS) {
11576 clear_bit(FC_NDISC_ACTIVE, &vport->fc_flag);
11577 clear_bit(FC_FABRIC, &vport->fc_flag);
11578 lpfc_can_disctmo(vport);
11579 }
11580
11581 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) {
11582 /* Wake up lpfc_vport_delete if waiting...*/
11583 if (ndlp->logo_waitq)
11584 wake_up(ndlp->logo_waitq);
11585 spin_lock_irq(&ndlp->lock);
11586 ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND);
11587 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
11588 spin_unlock_irq(&ndlp->lock);
11589 }
11590
11591 /* Safe to release resources now. */
11592 lpfc_els_free_iocb(phba, cmdiocb);
11593 lpfc_nlp_put(ndlp);
11594 }
11595
11596 /**
11597 * lpfc_issue_els_npiv_logo - Issue a logo off a vport
11598 * @vport: pointer to a virtual N_Port data structure.
11599 * @ndlp: pointer to a node-list data structure.
11600 *
11601 * This routine issues a LOGO ELS command to an @ndlp off a @vport.
11602 *
11603 * Note that the ndlp reference count will be incremented by 1 for holding the
11604 * ndlp and the reference to ndlp will be stored into the ndlp field of
11605 * the IOCB for the completion callback function to the LOGO ELS command.
11606 *
11607 * Return codes
11608 * 0 - Successfully issued logo off the @vport
11609 * 1 - Failed to issue logo off the @vport
11610 **/
11611 int
lpfc_issue_els_npiv_logo(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)11612 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
11613 {
11614 int rc = 0;
11615 struct lpfc_hba *phba = vport->phba;
11616 struct lpfc_iocbq *elsiocb;
11617 uint8_t *pcmd;
11618 uint16_t cmdsize;
11619
11620 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
11621 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID,
11622 ELS_CMD_LOGO);
11623 if (!elsiocb)
11624 return 1;
11625
11626 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
11627 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
11628 pcmd += sizeof(uint32_t);
11629
11630 /* Fill in LOGO payload */
11631 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
11632 pcmd += sizeof(uint32_t);
11633 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
11634
11635 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
11636 "Issue LOGO npiv did:x%x flg:x%x",
11637 ndlp->nlp_DID, ndlp->nlp_flag, 0);
11638
11639 elsiocb->cmd_cmpl = lpfc_cmpl_els_npiv_logo;
11640 spin_lock_irq(&ndlp->lock);
11641 ndlp->nlp_flag |= NLP_LOGO_SND;
11642 spin_unlock_irq(&ndlp->lock);
11643 elsiocb->ndlp = lpfc_nlp_get(ndlp);
11644 if (!elsiocb->ndlp) {
11645 lpfc_els_free_iocb(phba, elsiocb);
11646 goto err;
11647 }
11648
11649 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
11650 if (rc == IOCB_ERROR) {
11651 lpfc_els_free_iocb(phba, elsiocb);
11652 lpfc_nlp_put(ndlp);
11653 goto err;
11654 }
11655 return 0;
11656
11657 err:
11658 spin_lock_irq(&ndlp->lock);
11659 ndlp->nlp_flag &= ~NLP_LOGO_SND;
11660 spin_unlock_irq(&ndlp->lock);
11661 return 1;
11662 }
11663
11664 /**
11665 * lpfc_fabric_block_timeout - Handler function to the fabric block timer
11666 * @t: timer context used to obtain the lpfc hba.
11667 *
11668 * This routine is invoked by the fabric iocb block timer after
11669 * timeout. It posts the fabric iocb block timeout event by setting the
11670 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes
11671 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for
11672 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the
11673 * posted event WORKER_FABRIC_BLOCK_TMO.
11674 **/
11675 void
lpfc_fabric_block_timeout(struct timer_list * t)11676 lpfc_fabric_block_timeout(struct timer_list *t)
11677 {
11678 struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer);
11679 unsigned long iflags;
11680 uint32_t tmo_posted;
11681
11682 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
11683 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
11684 if (!tmo_posted)
11685 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
11686 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
11687
11688 if (!tmo_posted)
11689 lpfc_worker_wake_up(phba);
11690 return;
11691 }
11692
11693 /**
11694 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list
11695 * @phba: pointer to lpfc hba data structure.
11696 *
11697 * This routine issues one fabric iocb from the driver internal list to
11698 * the HBA. It first checks whether it's ready to issue one fabric iocb to
11699 * the HBA (whether there is no outstanding fabric iocb). If so, it shall
11700 * remove one pending fabric iocb from the driver internal list and invokes
11701 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA.
11702 **/
11703 static void
lpfc_resume_fabric_iocbs(struct lpfc_hba * phba)11704 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
11705 {
11706 struct lpfc_iocbq *iocb;
11707 unsigned long iflags;
11708 int ret;
11709
11710 repeat:
11711 iocb = NULL;
11712 spin_lock_irqsave(&phba->hbalock, iflags);
11713 /* Post any pending iocb to the SLI layer */
11714 if (atomic_read(&phba->fabric_iocb_count) == 0) {
11715 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
11716 list);
11717 if (iocb)
11718 /* Increment fabric iocb count to hold the position */
11719 atomic_inc(&phba->fabric_iocb_count);
11720 }
11721 spin_unlock_irqrestore(&phba->hbalock, iflags);
11722 if (iocb) {
11723 iocb->fabric_cmd_cmpl = iocb->cmd_cmpl;
11724 iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb;
11725 iocb->cmd_flag |= LPFC_IO_FABRIC;
11726
11727 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
11728 "Fabric sched1: ste:x%x",
11729 iocb->vport->port_state, 0, 0);
11730
11731 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
11732
11733 if (ret == IOCB_ERROR) {
11734 iocb->cmd_cmpl = iocb->fabric_cmd_cmpl;
11735 iocb->fabric_cmd_cmpl = NULL;
11736 iocb->cmd_flag &= ~LPFC_IO_FABRIC;
11737 set_job_ulpstatus(iocb, IOSTAT_LOCAL_REJECT);
11738 iocb->wcqe_cmpl.parameter = IOERR_SLI_ABORTED;
11739 iocb->cmd_cmpl(phba, iocb, iocb);
11740
11741 atomic_dec(&phba->fabric_iocb_count);
11742 goto repeat;
11743 }
11744 }
11745 }
11746
11747 /**
11748 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command
11749 * @phba: pointer to lpfc hba data structure.
11750 *
11751 * This routine unblocks the issuing fabric iocb command. The function
11752 * will clear the fabric iocb block bit and then invoke the routine
11753 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb
11754 * from the driver internal fabric iocb list.
11755 **/
11756 void
lpfc_unblock_fabric_iocbs(struct lpfc_hba * phba)11757 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
11758 {
11759 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
11760
11761 lpfc_resume_fabric_iocbs(phba);
11762 return;
11763 }
11764
11765 /**
11766 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command
11767 * @phba: pointer to lpfc hba data structure.
11768 *
11769 * This routine blocks the issuing fabric iocb for a specified amount of
11770 * time (currently 100 ms). This is done by set the fabric iocb block bit
11771 * and set up a timeout timer for 100ms. When the block bit is set, no more
11772 * fabric iocb will be issued out of the HBA.
11773 **/
11774 static void
lpfc_block_fabric_iocbs(struct lpfc_hba * phba)11775 lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
11776 {
11777 int blocked;
11778
11779 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
11780 /* Start a timer to unblock fabric iocbs after 100ms */
11781 if (!blocked)
11782 mod_timer(&phba->fabric_block_timer,
11783 jiffies + msecs_to_jiffies(100));
11784
11785 return;
11786 }
11787
11788 /**
11789 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb
11790 * @phba: pointer to lpfc hba data structure.
11791 * @cmdiocb: pointer to lpfc command iocb data structure.
11792 * @rspiocb: pointer to lpfc response iocb data structure.
11793 *
11794 * This routine is the callback function that is put to the fabric iocb's
11795 * callback function pointer (iocb->cmd_cmpl). The original iocb's callback
11796 * function pointer has been stored in iocb->fabric_cmd_cmpl. This callback
11797 * function first restores and invokes the original iocb's callback function
11798 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next
11799 * fabric bound iocb from the driver internal fabric iocb list onto the wire.
11800 **/
11801 static void
lpfc_cmpl_fabric_iocb(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)11802 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11803 struct lpfc_iocbq *rspiocb)
11804 {
11805 struct ls_rjt stat;
11806 u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
11807 u32 ulp_word4 = get_job_word4(phba, rspiocb);
11808
11809 WARN_ON((cmdiocb->cmd_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC);
11810
11811 switch (ulp_status) {
11812 case IOSTAT_NPORT_RJT:
11813 case IOSTAT_FABRIC_RJT:
11814 if (ulp_word4 & RJT_UNAVAIL_TEMP)
11815 lpfc_block_fabric_iocbs(phba);
11816 break;
11817
11818 case IOSTAT_NPORT_BSY:
11819 case IOSTAT_FABRIC_BSY:
11820 lpfc_block_fabric_iocbs(phba);
11821 break;
11822
11823 case IOSTAT_LS_RJT:
11824 stat.un.ls_rjt_error_be =
11825 cpu_to_be32(ulp_word4);
11826 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
11827 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
11828 lpfc_block_fabric_iocbs(phba);
11829 break;
11830 }
11831
11832 BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0);
11833
11834 cmdiocb->cmd_cmpl = cmdiocb->fabric_cmd_cmpl;
11835 cmdiocb->fabric_cmd_cmpl = NULL;
11836 cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC;
11837 cmdiocb->cmd_cmpl(phba, cmdiocb, rspiocb);
11838
11839 atomic_dec(&phba->fabric_iocb_count);
11840 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
11841 /* Post any pending iocbs to HBA */
11842 lpfc_resume_fabric_iocbs(phba);
11843 }
11844 }
11845
11846 /**
11847 * lpfc_issue_fabric_iocb - Issue a fabric iocb command
11848 * @phba: pointer to lpfc hba data structure.
11849 * @iocb: pointer to lpfc command iocb data structure.
11850 *
11851 * This routine is used as the top-level API for issuing a fabric iocb command
11852 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver
11853 * function makes sure that only one fabric bound iocb will be outstanding at
11854 * any given time. As such, this function will first check to see whether there
11855 * is already an outstanding fabric iocb on the wire. If so, it will put the
11856 * newly issued iocb onto the driver internal fabric iocb list, waiting to be
11857 * issued later. Otherwise, it will issue the iocb on the wire and update the
11858 * fabric iocb count it indicate that there is one fabric iocb on the wire.
11859 *
11860 * Note, this implementation has a potential sending out fabric IOCBs out of
11861 * order. The problem is caused by the construction of the "ready" boolen does
11862 * not include the condition that the internal fabric IOCB list is empty. As
11863 * such, it is possible a fabric IOCB issued by this routine might be "jump"
11864 * ahead of the fabric IOCBs in the internal list.
11865 *
11866 * Return code
11867 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully
11868 * IOCB_ERROR - failed to issue fabric iocb
11869 **/
11870 static int
lpfc_issue_fabric_iocb(struct lpfc_hba * phba,struct lpfc_iocbq * iocb)11871 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
11872 {
11873 unsigned long iflags;
11874 int ready;
11875 int ret;
11876
11877 BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1);
11878
11879 spin_lock_irqsave(&phba->hbalock, iflags);
11880 ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
11881 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
11882
11883 if (ready)
11884 /* Increment fabric iocb count to hold the position */
11885 atomic_inc(&phba->fabric_iocb_count);
11886 spin_unlock_irqrestore(&phba->hbalock, iflags);
11887 if (ready) {
11888 iocb->fabric_cmd_cmpl = iocb->cmd_cmpl;
11889 iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb;
11890 iocb->cmd_flag |= LPFC_IO_FABRIC;
11891
11892 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
11893 "Fabric sched2: ste:x%x",
11894 iocb->vport->port_state, 0, 0);
11895
11896 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
11897
11898 if (ret == IOCB_ERROR) {
11899 iocb->cmd_cmpl = iocb->fabric_cmd_cmpl;
11900 iocb->fabric_cmd_cmpl = NULL;
11901 iocb->cmd_flag &= ~LPFC_IO_FABRIC;
11902 atomic_dec(&phba->fabric_iocb_count);
11903 }
11904 } else {
11905 spin_lock_irqsave(&phba->hbalock, iflags);
11906 list_add_tail(&iocb->list, &phba->fabric_iocb_list);
11907 spin_unlock_irqrestore(&phba->hbalock, iflags);
11908 ret = IOCB_SUCCESS;
11909 }
11910 return ret;
11911 }
11912
11913 /**
11914 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list
11915 * @vport: pointer to a virtual N_Port data structure.
11916 *
11917 * This routine aborts all the IOCBs associated with a @vport from the
11918 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
11919 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
11920 * list, removes each IOCB associated with the @vport off the list, set the
11921 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function
11922 * associated with the IOCB.
11923 **/
lpfc_fabric_abort_vport(struct lpfc_vport * vport)11924 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
11925 {
11926 LIST_HEAD(completions);
11927 struct lpfc_hba *phba = vport->phba;
11928 struct lpfc_iocbq *tmp_iocb, *piocb;
11929
11930 spin_lock_irq(&phba->hbalock);
11931 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
11932 list) {
11933
11934 if (piocb->vport != vport)
11935 continue;
11936
11937 list_move_tail(&piocb->list, &completions);
11938 }
11939 spin_unlock_irq(&phba->hbalock);
11940
11941 /* Cancel all the IOCBs from the completions list */
11942 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11943 IOERR_SLI_ABORTED);
11944 }
11945
11946 /**
11947 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list
11948 * @ndlp: pointer to a node-list data structure.
11949 *
11950 * This routine aborts all the IOCBs associated with an @ndlp from the
11951 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
11952 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
11953 * list, removes each IOCB associated with the @ndlp off the list, set the
11954 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function
11955 * associated with the IOCB.
11956 **/
lpfc_fabric_abort_nport(struct lpfc_nodelist * ndlp)11957 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
11958 {
11959 LIST_HEAD(completions);
11960 struct lpfc_hba *phba = ndlp->phba;
11961 struct lpfc_iocbq *tmp_iocb, *piocb;
11962 struct lpfc_sli_ring *pring;
11963
11964 pring = lpfc_phba_elsring(phba);
11965
11966 if (unlikely(!pring))
11967 return;
11968
11969 spin_lock_irq(&phba->hbalock);
11970 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
11971 list) {
11972 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
11973
11974 list_move_tail(&piocb->list, &completions);
11975 }
11976 }
11977 spin_unlock_irq(&phba->hbalock);
11978
11979 /* Cancel all the IOCBs from the completions list */
11980 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11981 IOERR_SLI_ABORTED);
11982 }
11983
11984 /**
11985 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list
11986 * @phba: pointer to lpfc hba data structure.
11987 *
11988 * This routine aborts all the IOCBs currently on the driver internal
11989 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS
11990 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB
11991 * list, removes IOCBs off the list, set the status field to
11992 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with
11993 * the IOCB.
11994 **/
lpfc_fabric_abort_hba(struct lpfc_hba * phba)11995 void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
11996 {
11997 LIST_HEAD(completions);
11998
11999 spin_lock_irq(&phba->hbalock);
12000 list_splice_init(&phba->fabric_iocb_list, &completions);
12001 spin_unlock_irq(&phba->hbalock);
12002
12003 /* Cancel all the IOCBs from the completions list */
12004 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
12005 IOERR_SLI_ABORTED);
12006 }
12007
12008 /**
12009 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport
12010 * @vport: pointer to lpfc vport data structure.
12011 *
12012 * This routine is invoked by the vport cleanup for deletions and the cleanup
12013 * for an ndlp on removal.
12014 **/
12015 void
lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport * vport)12016 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
12017 {
12018 struct lpfc_hba *phba = vport->phba;
12019 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
12020 struct lpfc_nodelist *ndlp = NULL;
12021 unsigned long iflag = 0;
12022
12023 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag);
12024 list_for_each_entry_safe(sglq_entry, sglq_next,
12025 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
12026 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) {
12027 lpfc_nlp_put(sglq_entry->ndlp);
12028 ndlp = sglq_entry->ndlp;
12029 sglq_entry->ndlp = NULL;
12030
12031 /* If the xri on the abts_els_sgl list is for the Fport
12032 * node and the vport is unloading, the xri aborted wcqe
12033 * likely isn't coming back. Just release the sgl.
12034 */
12035 if (test_bit(FC_UNLOADING, &vport->load_flag) &&
12036 ndlp->nlp_DID == Fabric_DID) {
12037 list_del(&sglq_entry->list);
12038 sglq_entry->state = SGL_FREED;
12039 list_add_tail(&sglq_entry->list,
12040 &phba->sli4_hba.lpfc_els_sgl_list);
12041 }
12042 }
12043 }
12044 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag);
12045 return;
12046 }
12047
12048 /**
12049 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
12050 * @phba: pointer to lpfc hba data structure.
12051 * @axri: pointer to the els xri abort wcqe structure.
12052 *
12053 * This routine is invoked by the worker thread to process a SLI4 slow-path
12054 * ELS aborted xri.
12055 **/
12056 void
lpfc_sli4_els_xri_aborted(struct lpfc_hba * phba,struct sli4_wcqe_xri_aborted * axri)12057 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
12058 struct sli4_wcqe_xri_aborted *axri)
12059 {
12060 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
12061 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
12062 uint16_t lxri = 0;
12063
12064 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
12065 unsigned long iflag = 0;
12066 struct lpfc_nodelist *ndlp;
12067 struct lpfc_sli_ring *pring;
12068
12069 pring = lpfc_phba_elsring(phba);
12070
12071 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag);
12072 list_for_each_entry_safe(sglq_entry, sglq_next,
12073 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
12074 if (sglq_entry->sli4_xritag == xri) {
12075 list_del(&sglq_entry->list);
12076 ndlp = sglq_entry->ndlp;
12077 sglq_entry->ndlp = NULL;
12078 list_add_tail(&sglq_entry->list,
12079 &phba->sli4_hba.lpfc_els_sgl_list);
12080 sglq_entry->state = SGL_FREED;
12081 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock,
12082 iflag);
12083
12084 if (ndlp) {
12085 lpfc_set_rrq_active(phba, ndlp,
12086 sglq_entry->sli4_lxritag,
12087 rxid, 1);
12088 lpfc_nlp_put(ndlp);
12089 }
12090
12091 /* Check if TXQ queue needs to be serviced */
12092 if (pring && !list_empty(&pring->txq))
12093 lpfc_worker_wake_up(phba);
12094 return;
12095 }
12096 }
12097 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag);
12098 lxri = lpfc_sli4_xri_inrange(phba, xri);
12099 if (lxri == NO_XRI)
12100 return;
12101
12102 spin_lock_irqsave(&phba->hbalock, iflag);
12103 sglq_entry = __lpfc_get_active_sglq(phba, lxri);
12104 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
12105 spin_unlock_irqrestore(&phba->hbalock, iflag);
12106 return;
12107 }
12108 sglq_entry->state = SGL_XRI_ABORTED;
12109 spin_unlock_irqrestore(&phba->hbalock, iflag);
12110 return;
12111 }
12112
12113 /* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req.
12114 * @vport: pointer to virtual port object.
12115 * @ndlp: nodelist pointer for the impacted node.
12116 *
12117 * The driver calls this routine in response to an SLI4 XRI ABORT CQE
12118 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event,
12119 * the driver is required to send a LOGO to the remote node before it
12120 * attempts to recover its login to the remote node.
12121 */
12122 void
lpfc_sli_abts_recover_port(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)12123 lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
12124 struct lpfc_nodelist *ndlp)
12125 {
12126 struct Scsi_Host *shost;
12127 struct lpfc_hba *phba;
12128 unsigned long flags = 0;
12129
12130 shost = lpfc_shost_from_vport(vport);
12131 phba = vport->phba;
12132 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
12133 lpfc_printf_log(phba, KERN_INFO,
12134 LOG_SLI, "3093 No rport recovery needed. "
12135 "rport in state 0x%x\n", ndlp->nlp_state);
12136 return;
12137 }
12138 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12139 "3094 Start rport recovery on shost id 0x%x "
12140 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
12141 "flags 0x%x\n",
12142 shost->host_no, ndlp->nlp_DID,
12143 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
12144 ndlp->nlp_flag);
12145 /*
12146 * The rport is not responding. Remove the FCP-2 flag to prevent
12147 * an ADISC in the follow-up recovery code.
12148 */
12149 spin_lock_irqsave(&ndlp->lock, flags);
12150 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
12151 ndlp->nlp_flag |= NLP_ISSUE_LOGO;
12152 spin_unlock_irqrestore(&ndlp->lock, flags);
12153 lpfc_unreg_rpi(vport, ndlp);
12154 }
12155
lpfc_init_cs_ctl_bitmap(struct lpfc_vport * vport)12156 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport)
12157 {
12158 bitmap_zero(vport->vmid_priority_range, LPFC_VMID_MAX_PRIORITY_RANGE);
12159 }
12160
12161 static void
lpfc_vmid_set_cs_ctl_range(struct lpfc_vport * vport,u32 min,u32 max)12162 lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max)
12163 {
12164 u32 i;
12165
12166 if ((min > max) || (max > LPFC_VMID_MAX_PRIORITY_RANGE))
12167 return;
12168
12169 for (i = min; i <= max; i++)
12170 set_bit(i, vport->vmid_priority_range);
12171 }
12172
lpfc_vmid_put_cs_ctl(struct lpfc_vport * vport,u32 ctcl_vmid)12173 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid)
12174 {
12175 set_bit(ctcl_vmid, vport->vmid_priority_range);
12176 }
12177
lpfc_vmid_get_cs_ctl(struct lpfc_vport * vport)12178 u32 lpfc_vmid_get_cs_ctl(struct lpfc_vport *vport)
12179 {
12180 u32 i;
12181
12182 i = find_first_bit(vport->vmid_priority_range,
12183 LPFC_VMID_MAX_PRIORITY_RANGE);
12184
12185 if (i == LPFC_VMID_MAX_PRIORITY_RANGE)
12186 return 0;
12187
12188 clear_bit(i, vport->vmid_priority_range);
12189 return i;
12190 }
12191
12192 #define MAX_PRIORITY_DESC 255
12193
12194 static void
lpfc_cmpl_els_qfpa(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)12195 lpfc_cmpl_els_qfpa(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12196 struct lpfc_iocbq *rspiocb)
12197 {
12198 struct lpfc_vport *vport = cmdiocb->vport;
12199 struct priority_range_desc *desc;
12200 struct lpfc_dmabuf *prsp = NULL;
12201 struct lpfc_vmid_priority_range *vmid_range = NULL;
12202 u32 *data;
12203 struct lpfc_dmabuf *dmabuf = cmdiocb->cmd_dmabuf;
12204 u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
12205 u32 ulp_word4 = get_job_word4(phba, rspiocb);
12206 u8 *pcmd, max_desc;
12207 u32 len, i;
12208 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
12209
12210 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list);
12211 if (!prsp)
12212 goto out;
12213
12214 pcmd = prsp->virt;
12215 data = (u32 *)pcmd;
12216 if (data[0] == ELS_CMD_LS_RJT) {
12217 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
12218 "3277 QFPA LS_RJT x%x x%x\n",
12219 data[0], data[1]);
12220 goto out;
12221 }
12222 if (ulp_status) {
12223 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
12224 "6529 QFPA failed with status x%x x%x\n",
12225 ulp_status, ulp_word4);
12226 goto out;
12227 }
12228
12229 if (!vport->qfpa_res) {
12230 max_desc = FCELSSIZE / sizeof(*vport->qfpa_res);
12231 vport->qfpa_res = kcalloc(max_desc, sizeof(*vport->qfpa_res),
12232 GFP_KERNEL);
12233 if (!vport->qfpa_res)
12234 goto out;
12235 }
12236
12237 len = *((u32 *)(pcmd + 4));
12238 len = be32_to_cpu(len);
12239 memcpy(vport->qfpa_res, pcmd, len + 8);
12240 len = len / LPFC_PRIORITY_RANGE_DESC_SIZE;
12241
12242 desc = (struct priority_range_desc *)(pcmd + 8);
12243 vmid_range = vport->vmid_priority.vmid_range;
12244 if (!vmid_range) {
12245 vmid_range = kcalloc(MAX_PRIORITY_DESC, sizeof(*vmid_range),
12246 GFP_KERNEL);
12247 if (!vmid_range) {
12248 kfree(vport->qfpa_res);
12249 goto out;
12250 }
12251 vport->vmid_priority.vmid_range = vmid_range;
12252 }
12253 vport->vmid_priority.num_descriptors = len;
12254
12255 for (i = 0; i < len; i++, vmid_range++, desc++) {
12256 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS,
12257 "6539 vmid values low=%d, high=%d, qos=%d, "
12258 "local ve id=%d\n", desc->lo_range,
12259 desc->hi_range, desc->qos_priority,
12260 desc->local_ve_id);
12261
12262 vmid_range->low = desc->lo_range << 1;
12263 if (desc->local_ve_id == QFPA_ODD_ONLY)
12264 vmid_range->low++;
12265 if (desc->qos_priority)
12266 vport->vmid_flag |= LPFC_VMID_QOS_ENABLED;
12267 vmid_range->qos = desc->qos_priority;
12268
12269 vmid_range->high = desc->hi_range << 1;
12270 if ((desc->local_ve_id == QFPA_ODD_ONLY) ||
12271 (desc->local_ve_id == QFPA_EVEN_ODD))
12272 vmid_range->high++;
12273 }
12274 lpfc_init_cs_ctl_bitmap(vport);
12275 for (i = 0; i < vport->vmid_priority.num_descriptors; i++) {
12276 lpfc_vmid_set_cs_ctl_range(vport,
12277 vport->vmid_priority.vmid_range[i].low,
12278 vport->vmid_priority.vmid_range[i].high);
12279 }
12280
12281 vport->vmid_flag |= LPFC_VMID_QFPA_CMPL;
12282 out:
12283 lpfc_els_free_iocb(phba, cmdiocb);
12284 lpfc_nlp_put(ndlp);
12285 }
12286
lpfc_issue_els_qfpa(struct lpfc_vport * vport)12287 int lpfc_issue_els_qfpa(struct lpfc_vport *vport)
12288 {
12289 struct lpfc_hba *phba = vport->phba;
12290 struct lpfc_nodelist *ndlp;
12291 struct lpfc_iocbq *elsiocb;
12292 u8 *pcmd;
12293 int ret;
12294
12295 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
12296 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
12297 return -ENXIO;
12298
12299 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_QFPA_SIZE, 2, ndlp,
12300 ndlp->nlp_DID, ELS_CMD_QFPA);
12301 if (!elsiocb)
12302 return -ENOMEM;
12303
12304 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
12305
12306 *((u32 *)(pcmd)) = ELS_CMD_QFPA;
12307 pcmd += 4;
12308
12309 elsiocb->cmd_cmpl = lpfc_cmpl_els_qfpa;
12310
12311 elsiocb->ndlp = lpfc_nlp_get(ndlp);
12312 if (!elsiocb->ndlp) {
12313 lpfc_els_free_iocb(vport->phba, elsiocb);
12314 return -ENXIO;
12315 }
12316
12317 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 2);
12318 if (ret != IOCB_SUCCESS) {
12319 lpfc_els_free_iocb(phba, elsiocb);
12320 lpfc_nlp_put(ndlp);
12321 return -EIO;
12322 }
12323 vport->vmid_flag &= ~LPFC_VMID_QOS_ENABLED;
12324 return 0;
12325 }
12326
12327 int
lpfc_vmid_uvem(struct lpfc_vport * vport,struct lpfc_vmid * vmid,bool instantiated)12328 lpfc_vmid_uvem(struct lpfc_vport *vport,
12329 struct lpfc_vmid *vmid, bool instantiated)
12330 {
12331 struct lpfc_vem_id_desc *vem_id_desc;
12332 struct lpfc_nodelist *ndlp;
12333 struct lpfc_iocbq *elsiocb;
12334 struct instantiated_ve_desc *inst_desc;
12335 struct lpfc_vmid_context *vmid_context;
12336 u8 *pcmd;
12337 u32 *len;
12338 int ret = 0;
12339
12340 ndlp = lpfc_findnode_did(vport, Fabric_DID);
12341 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
12342 return -ENXIO;
12343
12344 vmid_context = kmalloc(sizeof(*vmid_context), GFP_KERNEL);
12345 if (!vmid_context)
12346 return -ENOMEM;
12347 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_UVEM_SIZE, 2,
12348 ndlp, Fabric_DID, ELS_CMD_UVEM);
12349 if (!elsiocb)
12350 goto out;
12351
12352 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS,
12353 "3427 Host vmid %s %d\n",
12354 vmid->host_vmid, instantiated);
12355 vmid_context->vmp = vmid;
12356 vmid_context->nlp = ndlp;
12357 vmid_context->instantiated = instantiated;
12358 elsiocb->vmid_tag.vmid_context = vmid_context;
12359 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
12360
12361 if (!memchr_inv(vport->lpfc_vmid_host_uuid, 0,
12362 sizeof(vport->lpfc_vmid_host_uuid)))
12363 memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid,
12364 sizeof(vport->lpfc_vmid_host_uuid));
12365
12366 *((u32 *)(pcmd)) = ELS_CMD_UVEM;
12367 len = (u32 *)(pcmd + 4);
12368 *len = cpu_to_be32(LPFC_UVEM_SIZE - 8);
12369
12370 vem_id_desc = (struct lpfc_vem_id_desc *)(pcmd + 8);
12371 vem_id_desc->tag = be32_to_cpu(VEM_ID_DESC_TAG);
12372 vem_id_desc->length = be32_to_cpu(LPFC_UVEM_VEM_ID_DESC_SIZE);
12373 memcpy(vem_id_desc->vem_id, vport->lpfc_vmid_host_uuid,
12374 sizeof(vem_id_desc->vem_id));
12375
12376 inst_desc = (struct instantiated_ve_desc *)(pcmd + 32);
12377 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG);
12378 inst_desc->length = be32_to_cpu(LPFC_UVEM_VE_MAP_DESC_SIZE);
12379 memcpy(inst_desc->global_vem_id, vmid->host_vmid,
12380 sizeof(inst_desc->global_vem_id));
12381
12382 bf_set(lpfc_instantiated_nport_id, inst_desc, vport->fc_myDID);
12383 bf_set(lpfc_instantiated_local_id, inst_desc,
12384 vmid->un.cs_ctl_vmid);
12385 if (instantiated) {
12386 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG);
12387 } else {
12388 inst_desc->tag = be32_to_cpu(DEINSTANTIATED_VE_DESC_TAG);
12389 lpfc_vmid_put_cs_ctl(vport, vmid->un.cs_ctl_vmid);
12390 }
12391 inst_desc->word6 = cpu_to_be32(inst_desc->word6);
12392
12393 elsiocb->cmd_cmpl = lpfc_cmpl_els_uvem;
12394
12395 elsiocb->ndlp = lpfc_nlp_get(ndlp);
12396 if (!elsiocb->ndlp) {
12397 lpfc_els_free_iocb(vport->phba, elsiocb);
12398 goto out;
12399 }
12400
12401 ret = lpfc_sli_issue_iocb(vport->phba, LPFC_ELS_RING, elsiocb, 0);
12402 if (ret != IOCB_SUCCESS) {
12403 lpfc_els_free_iocb(vport->phba, elsiocb);
12404 lpfc_nlp_put(ndlp);
12405 goto out;
12406 }
12407
12408 return 0;
12409 out:
12410 kfree(vmid_context);
12411 return -EIO;
12412 }
12413
12414 static void
lpfc_cmpl_els_uvem(struct lpfc_hba * phba,struct lpfc_iocbq * icmdiocb,struct lpfc_iocbq * rspiocb)12415 lpfc_cmpl_els_uvem(struct lpfc_hba *phba, struct lpfc_iocbq *icmdiocb,
12416 struct lpfc_iocbq *rspiocb)
12417 {
12418 struct lpfc_vport *vport = icmdiocb->vport;
12419 struct lpfc_dmabuf *prsp = NULL;
12420 struct lpfc_vmid_context *vmid_context =
12421 icmdiocb->vmid_tag.vmid_context;
12422 struct lpfc_nodelist *ndlp = icmdiocb->ndlp;
12423 u8 *pcmd;
12424 u32 *data;
12425 u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
12426 u32 ulp_word4 = get_job_word4(phba, rspiocb);
12427 struct lpfc_dmabuf *dmabuf = icmdiocb->cmd_dmabuf;
12428 struct lpfc_vmid *vmid;
12429
12430 vmid = vmid_context->vmp;
12431 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
12432 ndlp = NULL;
12433
12434 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list);
12435 if (!prsp)
12436 goto out;
12437 pcmd = prsp->virt;
12438 data = (u32 *)pcmd;
12439 if (data[0] == ELS_CMD_LS_RJT) {
12440 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
12441 "4532 UVEM LS_RJT %x %x\n", data[0], data[1]);
12442 goto out;
12443 }
12444 if (ulp_status) {
12445 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
12446 "4533 UVEM error status %x: %x\n",
12447 ulp_status, ulp_word4);
12448 goto out;
12449 }
12450 spin_lock(&phba->hbalock);
12451 /* Set IN USE flag */
12452 vport->vmid_flag |= LPFC_VMID_IN_USE;
12453 phba->pport->vmid_flag |= LPFC_VMID_IN_USE;
12454 spin_unlock(&phba->hbalock);
12455
12456 if (vmid_context->instantiated) {
12457 write_lock(&vport->vmid_lock);
12458 vmid->flag |= LPFC_VMID_REGISTERED;
12459 vmid->flag &= ~LPFC_VMID_REQ_REGISTER;
12460 write_unlock(&vport->vmid_lock);
12461 }
12462
12463 out:
12464 kfree(vmid_context);
12465 lpfc_els_free_iocb(phba, icmdiocb);
12466 lpfc_nlp_put(ndlp);
12467 }
12468