xref: /linux/drivers/scsi/lpfc/lpfc_nportdisc.c (revision b43ab901d671e3e3cad425ea5e9a3c74e266dcdd)
1  /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21 
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
31 
32 #include "lpfc_hw4.h"
33 #include "lpfc_hw.h"
34 #include "lpfc_sli.h"
35 #include "lpfc_sli4.h"
36 #include "lpfc_nl.h"
37 #include "lpfc_disc.h"
38 #include "lpfc_scsi.h"
39 #include "lpfc.h"
40 #include "lpfc_logmsg.h"
41 #include "lpfc_crtn.h"
42 #include "lpfc_vport.h"
43 #include "lpfc_debugfs.h"
44 
45 
46 /* Called to verify a rcv'ed ADISC was intended for us. */
47 static int
48 lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
49 		 struct lpfc_name *nn, struct lpfc_name *pn)
50 {
51 	/* Compare the ADISC rsp WWNN / WWPN matches our internal node
52 	 * table entry for that node.
53 	 */
54 	if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)))
55 		return 0;
56 
57 	if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)))
58 		return 0;
59 
60 	/* we match, return success */
61 	return 1;
62 }
63 
64 int
65 lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
66 		 struct serv_parm *sp, uint32_t class, int flogi)
67 {
68 	volatile struct serv_parm *hsp = &vport->fc_sparam;
69 	uint16_t hsp_value, ssp_value = 0;
70 
71 	/*
72 	 * The receive data field size and buffer-to-buffer receive data field
73 	 * size entries are 16 bits but are represented as two 8-bit fields in
74 	 * the driver data structure to account for rsvd bits and other control
75 	 * bits.  Reconstruct and compare the fields as a 16-bit values before
76 	 * correcting the byte values.
77 	 */
78 	if (sp->cls1.classValid) {
79 		if (!flogi) {
80 			hsp_value = ((hsp->cls1.rcvDataSizeMsb << 8) |
81 				     hsp->cls1.rcvDataSizeLsb);
82 			ssp_value = ((sp->cls1.rcvDataSizeMsb << 8) |
83 				     sp->cls1.rcvDataSizeLsb);
84 			if (!ssp_value)
85 				goto bad_service_param;
86 			if (ssp_value > hsp_value) {
87 				sp->cls1.rcvDataSizeLsb =
88 					hsp->cls1.rcvDataSizeLsb;
89 				sp->cls1.rcvDataSizeMsb =
90 					hsp->cls1.rcvDataSizeMsb;
91 			}
92 		}
93 	} else if (class == CLASS1)
94 		goto bad_service_param;
95 	if (sp->cls2.classValid) {
96 		if (!flogi) {
97 			hsp_value = ((hsp->cls2.rcvDataSizeMsb << 8) |
98 				     hsp->cls2.rcvDataSizeLsb);
99 			ssp_value = ((sp->cls2.rcvDataSizeMsb << 8) |
100 				     sp->cls2.rcvDataSizeLsb);
101 			if (!ssp_value)
102 				goto bad_service_param;
103 			if (ssp_value > hsp_value) {
104 				sp->cls2.rcvDataSizeLsb =
105 					hsp->cls2.rcvDataSizeLsb;
106 				sp->cls2.rcvDataSizeMsb =
107 					hsp->cls2.rcvDataSizeMsb;
108 			}
109 		}
110 	} else if (class == CLASS2)
111 		goto bad_service_param;
112 	if (sp->cls3.classValid) {
113 		if (!flogi) {
114 			hsp_value = ((hsp->cls3.rcvDataSizeMsb << 8) |
115 				     hsp->cls3.rcvDataSizeLsb);
116 			ssp_value = ((sp->cls3.rcvDataSizeMsb << 8) |
117 				     sp->cls3.rcvDataSizeLsb);
118 			if (!ssp_value)
119 				goto bad_service_param;
120 			if (ssp_value > hsp_value) {
121 				sp->cls3.rcvDataSizeLsb =
122 					hsp->cls3.rcvDataSizeLsb;
123 				sp->cls3.rcvDataSizeMsb =
124 					hsp->cls3.rcvDataSizeMsb;
125 			}
126 		}
127 	} else if (class == CLASS3)
128 		goto bad_service_param;
129 
130 	/*
131 	 * Preserve the upper four bits of the MSB from the PLOGI response.
132 	 * These bits contain the Buffer-to-Buffer State Change Number
133 	 * from the target and need to be passed to the FW.
134 	 */
135 	hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb;
136 	ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb;
137 	if (ssp_value > hsp_value) {
138 		sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
139 		sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) |
140 				       (hsp->cmn.bbRcvSizeMsb & 0x0F);
141 	}
142 
143 	memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
144 	memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
145 	return 1;
146 bad_service_param:
147 	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
148 			 "0207 Device %x "
149 			 "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
150 			 "invalid service parameters.  Ignoring device.\n",
151 			 ndlp->nlp_DID,
152 			 sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1],
153 			 sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3],
154 			 sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5],
155 			 sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]);
156 	return 0;
157 }
158 
159 static void *
160 lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
161 			struct lpfc_iocbq *rspiocb)
162 {
163 	struct lpfc_dmabuf *pcmd, *prsp;
164 	uint32_t *lp;
165 	void     *ptr = NULL;
166 	IOCB_t   *irsp;
167 
168 	irsp = &rspiocb->iocb;
169 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
170 
171 	/* For lpfc_els_abort, context2 could be zero'ed to delay
172 	 * freeing associated memory till after ABTS completes.
173 	 */
174 	if (pcmd) {
175 		prsp =  list_get_first(&pcmd->list, struct lpfc_dmabuf,
176 				       list);
177 		if (prsp) {
178 			lp = (uint32_t *) prsp->virt;
179 			ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
180 		}
181 	} else {
182 		/* Force ulpStatus error since we are returning NULL ptr */
183 		if (!(irsp->ulpStatus)) {
184 			irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
185 			irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
186 		}
187 		ptr = NULL;
188 	}
189 	return ptr;
190 }
191 
192 
193 
194 /*
195  * Free resources / clean up outstanding I/Os
196  * associated with a LPFC_NODELIST entry. This
197  * routine effectively results in a "software abort".
198  */
199 int
200 lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
201 {
202 	LIST_HEAD(completions);
203 	LIST_HEAD(txcmplq_completions);
204 	LIST_HEAD(abort_list);
205 	struct lpfc_sli  *psli = &phba->sli;
206 	struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
207 	struct lpfc_iocbq *iocb, *next_iocb;
208 
209 	/* Abort outstanding I/O on NPort <nlp_DID> */
210 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
211 			 "2819 Abort outstanding I/O on NPort x%x "
212 			 "Data: x%x x%x x%x\n",
213 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
214 			 ndlp->nlp_rpi);
215 
216 	lpfc_fabric_abort_nport(ndlp);
217 
218 	/* First check the txq */
219 	spin_lock_irq(&phba->hbalock);
220 	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
221 		/* Check to see if iocb matches the nport we are looking for */
222 		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
223 			/* It matches, so deque and call compl with anp error */
224 			list_move_tail(&iocb->list, &completions);
225 			pring->txq_cnt--;
226 		}
227 	}
228 
229 	/* Next check the txcmplq */
230 	list_splice_init(&pring->txcmplq, &txcmplq_completions);
231 	spin_unlock_irq(&phba->hbalock);
232 
233 	list_for_each_entry_safe(iocb, next_iocb, &txcmplq_completions, list) {
234 		/* Check to see if iocb matches the nport we are looking for */
235 		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
236 			list_add_tail(&iocb->dlist, &abort_list);
237 	}
238 	spin_lock_irq(&phba->hbalock);
239 	list_splice(&txcmplq_completions, &pring->txcmplq);
240 	spin_unlock_irq(&phba->hbalock);
241 
242 	list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {
243 			spin_lock_irq(&phba->hbalock);
244 			list_del_init(&iocb->dlist);
245 			lpfc_sli_issue_abort_iotag(phba, pring, iocb);
246 			spin_unlock_irq(&phba->hbalock);
247 	}
248 
249 	/* Cancel all the IOCBs from the completions list */
250 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
251 			      IOERR_SLI_ABORTED);
252 
253 	lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
254 	return 0;
255 }
256 
257 static int
258 lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
259 	       struct lpfc_iocbq *cmdiocb)
260 {
261 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
262 	struct lpfc_hba    *phba = vport->phba;
263 	struct lpfc_dmabuf *pcmd;
264 	uint32_t *lp;
265 	IOCB_t *icmd;
266 	struct serv_parm *sp;
267 	LPFC_MBOXQ_t *mbox;
268 	struct ls_rjt stat;
269 	int rc;
270 
271 	memset(&stat, 0, sizeof (struct ls_rjt));
272 	if (vport->port_state <= LPFC_FDISC) {
273 		/* Before responding to PLOGI, check for pt2pt mode.
274 		 * If we are pt2pt, with an outstanding FLOGI, abort
275 		 * the FLOGI and resend it first.
276 		 */
277 		if (vport->fc_flag & FC_PT2PT) {
278 			 lpfc_els_abort_flogi(phba);
279 		        if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
280 				/* If the other side is supposed to initiate
281 				 * the PLOGI anyway, just ACC it now and
282 				 * move on with discovery.
283 				 */
284 				phba->fc_edtov = FF_DEF_EDTOV;
285 				phba->fc_ratov = FF_DEF_RATOV;
286 				/* Start discovery - this should just do
287 				   CLEAR_LA */
288 				lpfc_disc_start(vport);
289 			} else
290 				lpfc_initial_flogi(vport);
291 		} else {
292 			stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
293 			stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
294 			lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
295 					    ndlp, NULL);
296 			return 0;
297 		}
298 	}
299 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
300 	lp = (uint32_t *) pcmd->virt;
301 	sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
302 	if (wwn_to_u64(sp->portName.u.wwn) == 0) {
303 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
304 				 "0140 PLOGI Reject: invalid nname\n");
305 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
306 		stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME;
307 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
308 			NULL);
309 		return 0;
310 	}
311 	if (wwn_to_u64(sp->nodeName.u.wwn) == 0) {
312 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
313 				 "0141 PLOGI Reject: invalid pname\n");
314 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
315 		stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME;
316 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
317 			NULL);
318 		return 0;
319 	}
320 	if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) {
321 		/* Reject this request because invalid parameters */
322 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
323 		stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
324 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
325 			NULL);
326 		return 0;
327 	}
328 	icmd = &cmdiocb->iocb;
329 
330 	/* PLOGI chkparm OK */
331 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
332 			 "0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
333 			 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
334 			 ndlp->nlp_rpi);
335 
336 	if (vport->cfg_fcp_class == 2 && sp->cls2.classValid)
337 		ndlp->nlp_fcp_info |= CLASS2;
338 	else
339 		ndlp->nlp_fcp_info |= CLASS3;
340 
341 	ndlp->nlp_class_sup = 0;
342 	if (sp->cls1.classValid)
343 		ndlp->nlp_class_sup |= FC_COS_CLASS1;
344 	if (sp->cls2.classValid)
345 		ndlp->nlp_class_sup |= FC_COS_CLASS2;
346 	if (sp->cls3.classValid)
347 		ndlp->nlp_class_sup |= FC_COS_CLASS3;
348 	if (sp->cls4.classValid)
349 		ndlp->nlp_class_sup |= FC_COS_CLASS4;
350 	ndlp->nlp_maxframe =
351 		((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
352 
353 	/* no need to reg_login if we are already in one of these states */
354 	switch (ndlp->nlp_state) {
355 	case  NLP_STE_NPR_NODE:
356 		if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
357 			break;
358 	case  NLP_STE_REG_LOGIN_ISSUE:
359 	case  NLP_STE_PRLI_ISSUE:
360 	case  NLP_STE_UNMAPPED_NODE:
361 	case  NLP_STE_MAPPED_NODE:
362 		lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
363 		return 1;
364 	}
365 
366 	if ((vport->fc_flag & FC_PT2PT) &&
367 	    !(vport->fc_flag & FC_PT2PT_PLOGI)) {
368 		/* rcv'ed PLOGI decides what our NPortId will be */
369 		vport->fc_myDID = icmd->un.rcvels.parmRo;
370 		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
371 		if (mbox == NULL)
372 			goto out;
373 		lpfc_config_link(phba, mbox);
374 		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
375 		mbox->vport = vport;
376 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
377 		if (rc == MBX_NOT_FINISHED) {
378 			mempool_free(mbox, phba->mbox_mem_pool);
379 			goto out;
380 		}
381 
382 		lpfc_can_disctmo(vport);
383 	}
384 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
385 	if (!mbox)
386 		goto out;
387 
388 	rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
389 			    (uint8_t *) sp, mbox, ndlp->nlp_rpi);
390 	if (rc) {
391 		mempool_free(mbox, phba->mbox_mem_pool);
392 		goto out;
393 	}
394 
395 	/* ACC PLOGI rsp command needs to execute first,
396 	 * queue this mbox command to be processed later.
397 	 */
398 	mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
399 	/*
400 	 * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox
401 	 * command issued in lpfc_cmpl_els_acc().
402 	 */
403 	mbox->vport = vport;
404 	spin_lock_irq(shost->host_lock);
405 	ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
406 	spin_unlock_irq(shost->host_lock);
407 
408 	/*
409 	 * If there is an outstanding PLOGI issued, abort it before
410 	 * sending ACC rsp for received PLOGI. If pending plogi
411 	 * is not canceled here, the plogi will be rejected by
412 	 * remote port and will be retried. On a configuration with
413 	 * single discovery thread, this will cause a huge delay in
414 	 * discovery. Also this will cause multiple state machines
415 	 * running in parallel for this node.
416 	 */
417 	if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
418 		/* software abort outstanding PLOGI */
419 		lpfc_els_abort(phba, ndlp);
420 	}
421 
422 	if ((vport->port_type == LPFC_NPIV_PORT &&
423 	     vport->cfg_restrict_login)) {
424 
425 		/* In order to preserve RPIs, we want to cleanup
426 		 * the default RPI the firmware created to rcv
427 		 * this ELS request. The only way to do this is
428 		 * to register, then unregister the RPI.
429 		 */
430 		spin_lock_irq(shost->host_lock);
431 		ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
432 		spin_unlock_irq(shost->host_lock);
433 		stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
434 		stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
435 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
436 			ndlp, mbox);
437 		return 1;
438 	}
439 	lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
440 	return 1;
441 out:
442 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
443 	stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
444 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
445 	return 0;
446 }
447 
448 static int
449 lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
450 		struct lpfc_iocbq *cmdiocb)
451 {
452 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
453 	struct lpfc_dmabuf *pcmd;
454 	struct serv_parm   *sp;
455 	struct lpfc_name   *pnn, *ppn;
456 	struct ls_rjt stat;
457 	ADISC *ap;
458 	IOCB_t *icmd;
459 	uint32_t *lp;
460 	uint32_t cmd;
461 
462 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
463 	lp = (uint32_t *) pcmd->virt;
464 
465 	cmd = *lp++;
466 	if (cmd == ELS_CMD_ADISC) {
467 		ap = (ADISC *) lp;
468 		pnn = (struct lpfc_name *) & ap->nodeName;
469 		ppn = (struct lpfc_name *) & ap->portName;
470 	} else {
471 		sp = (struct serv_parm *) lp;
472 		pnn = (struct lpfc_name *) & sp->nodeName;
473 		ppn = (struct lpfc_name *) & sp->portName;
474 	}
475 
476 	icmd = &cmdiocb->iocb;
477 	if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
478 		if (cmd == ELS_CMD_ADISC) {
479 			lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
480 		} else {
481 			lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp,
482 					 NULL);
483 		}
484 		return 1;
485 	}
486 	/* Reject this request because invalid parameters */
487 	stat.un.b.lsRjtRsvd0 = 0;
488 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
489 	stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
490 	stat.un.b.vendorUnique = 0;
491 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
492 
493 	/* 1 sec timeout */
494 	mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
495 
496 	spin_lock_irq(shost->host_lock);
497 	ndlp->nlp_flag |= NLP_DELAY_TMO;
498 	spin_unlock_irq(shost->host_lock);
499 	ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
500 	ndlp->nlp_prev_state = ndlp->nlp_state;
501 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
502 	return 0;
503 }
504 
505 static int
506 lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
507 	      struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
508 {
509 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
510 	struct lpfc_hba    *phba = vport->phba;
511 	struct lpfc_vport **vports;
512 	int i, active_vlink_present = 0 ;
513 
514 	/* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */
515 	/* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
516 	 * PLOGIs during LOGO storms from a device.
517 	 */
518 	spin_lock_irq(shost->host_lock);
519 	ndlp->nlp_flag |= NLP_LOGO_ACC;
520 	spin_unlock_irq(shost->host_lock);
521 	if (els_cmd == ELS_CMD_PRLO)
522 		lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
523 	else
524 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
525 	if (ndlp->nlp_DID == Fabric_DID) {
526 		if (vport->port_state <= LPFC_FDISC)
527 			goto out;
528 		lpfc_linkdown_port(vport);
529 		spin_lock_irq(shost->host_lock);
530 		vport->fc_flag |= FC_VPORT_LOGO_RCVD;
531 		spin_unlock_irq(shost->host_lock);
532 		vports = lpfc_create_vport_work_array(phba);
533 		if (vports) {
534 			for (i = 0; i <= phba->max_vports && vports[i] != NULL;
535 					i++) {
536 				if ((!(vports[i]->fc_flag &
537 					FC_VPORT_LOGO_RCVD)) &&
538 					(vports[i]->port_state > LPFC_FDISC)) {
539 					active_vlink_present = 1;
540 					break;
541 				}
542 			}
543 			lpfc_destroy_vport_work_array(phba, vports);
544 		}
545 
546 		if (active_vlink_present) {
547 			/*
548 			 * If there are other active VLinks present,
549 			 * re-instantiate the Vlink using FDISC.
550 			 */
551 			mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
552 			spin_lock_irq(shost->host_lock);
553 			ndlp->nlp_flag |= NLP_DELAY_TMO;
554 			spin_unlock_irq(shost->host_lock);
555 			ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
556 			vport->port_state = LPFC_FDISC;
557 		} else {
558 			spin_lock_irq(shost->host_lock);
559 			phba->pport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG;
560 			spin_unlock_irq(shost->host_lock);
561 			lpfc_retry_pport_discovery(phba);
562 		}
563 	} else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
564 		((ndlp->nlp_type & NLP_FCP_TARGET) ||
565 		!(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
566 		(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
567 		/* Only try to re-login if this is NOT a Fabric Node */
568 		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
569 		spin_lock_irq(shost->host_lock);
570 		ndlp->nlp_flag |= NLP_DELAY_TMO;
571 		spin_unlock_irq(shost->host_lock);
572 
573 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
574 	}
575 out:
576 	ndlp->nlp_prev_state = ndlp->nlp_state;
577 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
578 
579 	spin_lock_irq(shost->host_lock);
580 	ndlp->nlp_flag &= ~NLP_NPR_ADISC;
581 	spin_unlock_irq(shost->host_lock);
582 	/* The driver has to wait until the ACC completes before it continues
583 	 * processing the LOGO.  The action will resume in
584 	 * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
585 	 * unreg_login, the driver waits so the ACC does not get aborted.
586 	 */
587 	return 0;
588 }
589 
590 static void
591 lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
592 	      struct lpfc_iocbq *cmdiocb)
593 {
594 	struct lpfc_dmabuf *pcmd;
595 	uint32_t *lp;
596 	PRLI *npr;
597 	struct fc_rport *rport = ndlp->rport;
598 	u32 roles;
599 
600 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
601 	lp = (uint32_t *) pcmd->virt;
602 	npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
603 
604 	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
605 	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
606 	if (npr->prliType == PRLI_FCP_TYPE) {
607 		if (npr->initiatorFunc)
608 			ndlp->nlp_type |= NLP_FCP_INITIATOR;
609 		if (npr->targetFunc)
610 			ndlp->nlp_type |= NLP_FCP_TARGET;
611 		if (npr->Retry)
612 			ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
613 	}
614 	if (rport) {
615 		/* We need to update the rport role values */
616 		roles = FC_RPORT_ROLE_UNKNOWN;
617 		if (ndlp->nlp_type & NLP_FCP_INITIATOR)
618 			roles |= FC_RPORT_ROLE_FCP_INITIATOR;
619 		if (ndlp->nlp_type & NLP_FCP_TARGET)
620 			roles |= FC_RPORT_ROLE_FCP_TARGET;
621 
622 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
623 			"rport rolechg:   role:x%x did:x%x flg:x%x",
624 			roles, ndlp->nlp_DID, ndlp->nlp_flag);
625 
626 		fc_remote_port_rolechg(rport, roles);
627 	}
628 }
629 
630 static uint32_t
631 lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
632 {
633 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
634 
635 	if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
636 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
637 		return 0;
638 	}
639 
640 	if (!(vport->fc_flag & FC_PT2PT)) {
641 		/* Check config parameter use-adisc or FCP-2 */
642 		if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
643 		    ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
644 		     (ndlp->nlp_type & NLP_FCP_TARGET))) {
645 			spin_lock_irq(shost->host_lock);
646 			ndlp->nlp_flag |= NLP_NPR_ADISC;
647 			spin_unlock_irq(shost->host_lock);
648 			return 1;
649 		}
650 	}
651 	ndlp->nlp_flag &= ~NLP_NPR_ADISC;
652 	lpfc_unreg_rpi(vport, ndlp);
653 	return 0;
654 }
655 
656 /**
657  * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd.
658  * @phba : Pointer to lpfc_hba structure.
659  * @vport: Pointer to lpfc_vport structure.
660  * @rpi  : rpi to be release.
661  *
662  * This function will send a unreg_login mailbox command to the firmware
663  * to release a rpi.
664  **/
665 void
666 lpfc_release_rpi(struct lpfc_hba *phba,
667 		struct lpfc_vport *vport,
668 		uint16_t rpi)
669 {
670 	LPFC_MBOXQ_t *pmb;
671 	int rc;
672 
673 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
674 			GFP_KERNEL);
675 	if (!pmb)
676 		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
677 			"2796 mailbox memory allocation failed \n");
678 	else {
679 		lpfc_unreg_login(phba, vport->vpi, rpi, pmb);
680 		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
681 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
682 		if (rc == MBX_NOT_FINISHED)
683 			mempool_free(pmb, phba->mbox_mem_pool);
684 	}
685 }
686 
687 static uint32_t
688 lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
689 		  void *arg, uint32_t evt)
690 {
691 	struct lpfc_hba *phba;
692 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
693 	MAILBOX_t *mb;
694 	uint16_t rpi;
695 
696 	phba = vport->phba;
697 	/* Release the RPI if reglogin completing */
698 	if (!(phba->pport->load_flag & FC_UNLOADING) &&
699 		(evt == NLP_EVT_CMPL_REG_LOGIN) &&
700 		(!pmb->u.mb.mbxStatus)) {
701 		mb = &pmb->u.mb;
702 		rpi = pmb->u.mb.un.varWords[0];
703 		lpfc_release_rpi(phba, vport, rpi);
704 	}
705 	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
706 			 "0271 Illegal State Transition: node x%x "
707 			 "event x%x, state x%x Data: x%x x%x\n",
708 			 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
709 			 ndlp->nlp_flag);
710 	return ndlp->nlp_state;
711 }
712 
713 static uint32_t
714 lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
715 		  void *arg, uint32_t evt)
716 {
717 	/* This transition is only legal if we previously
718 	 * rcv'ed a PLOGI. Since we don't want 2 discovery threads
719 	 * working on the same NPortID, do nothing for this thread
720 	 * to stop it.
721 	 */
722 	if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
723 		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
724 			 "0272 Illegal State Transition: node x%x "
725 			 "event x%x, state x%x Data: x%x x%x\n",
726 			 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
727 			 ndlp->nlp_flag);
728 	}
729 	return ndlp->nlp_state;
730 }
731 
732 /* Start of Discovery State Machine routines */
733 
734 static uint32_t
735 lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
736 			   void *arg, uint32_t evt)
737 {
738 	struct lpfc_iocbq *cmdiocb;
739 
740 	cmdiocb = (struct lpfc_iocbq *) arg;
741 
742 	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
743 		return ndlp->nlp_state;
744 	}
745 	return NLP_STE_FREED_NODE;
746 }
747 
748 static uint32_t
749 lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
750 			 void *arg, uint32_t evt)
751 {
752 	lpfc_issue_els_logo(vport, ndlp, 0);
753 	return ndlp->nlp_state;
754 }
755 
756 static uint32_t
757 lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
758 			  void *arg, uint32_t evt)
759 {
760 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
761 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
762 
763 	spin_lock_irq(shost->host_lock);
764 	ndlp->nlp_flag |= NLP_LOGO_ACC;
765 	spin_unlock_irq(shost->host_lock);
766 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
767 
768 	return ndlp->nlp_state;
769 }
770 
771 static uint32_t
772 lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
773 			   void *arg, uint32_t evt)
774 {
775 	return NLP_STE_FREED_NODE;
776 }
777 
778 static uint32_t
779 lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
780 			   void *arg, uint32_t evt)
781 {
782 	return NLP_STE_FREED_NODE;
783 }
784 
785 static uint32_t
786 lpfc_device_recov_unused_node(struct lpfc_vport *vport,
787 			struct lpfc_nodelist *ndlp,
788 			   void *arg, uint32_t evt)
789 {
790 	return ndlp->nlp_state;
791 }
792 
793 static uint32_t
794 lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
795 			   void *arg, uint32_t evt)
796 {
797 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
798 	struct lpfc_hba   *phba = vport->phba;
799 	struct lpfc_iocbq *cmdiocb = arg;
800 	struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
801 	uint32_t *lp = (uint32_t *) pcmd->virt;
802 	struct serv_parm *sp = (struct serv_parm *) (lp + 1);
803 	struct ls_rjt stat;
804 	int port_cmp;
805 
806 	memset(&stat, 0, sizeof (struct ls_rjt));
807 
808 	/* For a PLOGI, we only accept if our portname is less
809 	 * than the remote portname.
810 	 */
811 	phba->fc_stat.elsLogiCol++;
812 	port_cmp = memcmp(&vport->fc_portname, &sp->portName,
813 			  sizeof(struct lpfc_name));
814 
815 	if (port_cmp >= 0) {
816 		/* Reject this request because the remote node will accept
817 		   ours */
818 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
819 		stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
820 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
821 			NULL);
822 	} else {
823 		if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
824 		    (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
825 		    (vport->num_disc_nodes)) {
826 			spin_lock_irq(shost->host_lock);
827 			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
828 			spin_unlock_irq(shost->host_lock);
829 			/* Check if there are more PLOGIs to be sent */
830 			lpfc_more_plogi(vport);
831 			if (vport->num_disc_nodes == 0) {
832 				spin_lock_irq(shost->host_lock);
833 				vport->fc_flag &= ~FC_NDISC_ACTIVE;
834 				spin_unlock_irq(shost->host_lock);
835 				lpfc_can_disctmo(vport);
836 				lpfc_end_rscn(vport);
837 			}
838 		}
839 	} /* If our portname was less */
840 
841 	return ndlp->nlp_state;
842 }
843 
844 static uint32_t
845 lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
846 			  void *arg, uint32_t evt)
847 {
848 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
849 	struct ls_rjt     stat;
850 
851 	memset(&stat, 0, sizeof (struct ls_rjt));
852 	stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
853 	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
854 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
855 	return ndlp->nlp_state;
856 }
857 
858 static uint32_t
859 lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
860 			  void *arg, uint32_t evt)
861 {
862 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
863 
864 				/* software abort outstanding PLOGI */
865 	lpfc_els_abort(vport->phba, ndlp);
866 
867 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
868 	return ndlp->nlp_state;
869 }
870 
871 static uint32_t
872 lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
873 			 void *arg, uint32_t evt)
874 {
875 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
876 	struct lpfc_hba   *phba = vport->phba;
877 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
878 
879 	/* software abort outstanding PLOGI */
880 	lpfc_els_abort(phba, ndlp);
881 
882 	if (evt == NLP_EVT_RCV_LOGO) {
883 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
884 	} else {
885 		lpfc_issue_els_logo(vport, ndlp, 0);
886 	}
887 
888 	/* Put ndlp in npr state set plogi timer for 1 sec */
889 	mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
890 	spin_lock_irq(shost->host_lock);
891 	ndlp->nlp_flag |= NLP_DELAY_TMO;
892 	spin_unlock_irq(shost->host_lock);
893 	ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
894 	ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
895 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
896 
897 	return ndlp->nlp_state;
898 }
899 
900 static uint32_t
901 lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
902 			    struct lpfc_nodelist *ndlp,
903 			    void *arg,
904 			    uint32_t evt)
905 {
906 	struct lpfc_hba    *phba = vport->phba;
907 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
908 	struct lpfc_iocbq  *cmdiocb, *rspiocb;
909 	struct lpfc_dmabuf *pcmd, *prsp, *mp;
910 	uint32_t *lp;
911 	IOCB_t *irsp;
912 	struct serv_parm *sp;
913 	LPFC_MBOXQ_t *mbox;
914 
915 	cmdiocb = (struct lpfc_iocbq *) arg;
916 	rspiocb = cmdiocb->context_un.rsp_iocb;
917 
918 	if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
919 		/* Recovery from PLOGI collision logic */
920 		return ndlp->nlp_state;
921 	}
922 
923 	irsp = &rspiocb->iocb;
924 
925 	if (irsp->ulpStatus)
926 		goto out;
927 
928 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
929 
930 	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
931 
932 	lp = (uint32_t *) prsp->virt;
933 	sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
934 
935 	/* Some switches have FDMI servers returning 0 for WWN */
936 	if ((ndlp->nlp_DID != FDMI_DID) &&
937 		(wwn_to_u64(sp->portName.u.wwn) == 0 ||
938 		wwn_to_u64(sp->nodeName.u.wwn) == 0)) {
939 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
940 				 "0142 PLOGI RSP: Invalid WWN.\n");
941 		goto out;
942 	}
943 	if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0))
944 		goto out;
945 	/* PLOGI chkparm OK */
946 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
947 			 "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
948 			 ndlp->nlp_DID, ndlp->nlp_state,
949 			 ndlp->nlp_flag, ndlp->nlp_rpi);
950 	if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid))
951 		ndlp->nlp_fcp_info |= CLASS2;
952 	else
953 		ndlp->nlp_fcp_info |= CLASS3;
954 
955 	ndlp->nlp_class_sup = 0;
956 	if (sp->cls1.classValid)
957 		ndlp->nlp_class_sup |= FC_COS_CLASS1;
958 	if (sp->cls2.classValid)
959 		ndlp->nlp_class_sup |= FC_COS_CLASS2;
960 	if (sp->cls3.classValid)
961 		ndlp->nlp_class_sup |= FC_COS_CLASS3;
962 	if (sp->cls4.classValid)
963 		ndlp->nlp_class_sup |= FC_COS_CLASS4;
964 	ndlp->nlp_maxframe =
965 		((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
966 
967 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
968 	if (!mbox) {
969 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
970 			"0133 PLOGI: no memory for reg_login "
971 			"Data: x%x x%x x%x x%x\n",
972 			ndlp->nlp_DID, ndlp->nlp_state,
973 			ndlp->nlp_flag, ndlp->nlp_rpi);
974 		goto out;
975 	}
976 
977 	lpfc_unreg_rpi(vport, ndlp);
978 
979 	if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
980 			 (uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) {
981 		switch (ndlp->nlp_DID) {
982 		case NameServer_DID:
983 			mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
984 			break;
985 		case FDMI_DID:
986 			mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login;
987 			break;
988 		default:
989 			ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
990 			mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
991 		}
992 		mbox->context2 = lpfc_nlp_get(ndlp);
993 		mbox->vport = vport;
994 		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
995 		    != MBX_NOT_FINISHED) {
996 			lpfc_nlp_set_state(vport, ndlp,
997 					   NLP_STE_REG_LOGIN_ISSUE);
998 			return ndlp->nlp_state;
999 		}
1000 		if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
1001 			ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
1002 		/* decrement node reference count to the failed mbox
1003 		 * command
1004 		 */
1005 		lpfc_nlp_put(ndlp);
1006 		mp = (struct lpfc_dmabuf *) mbox->context1;
1007 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
1008 		kfree(mp);
1009 		mempool_free(mbox, phba->mbox_mem_pool);
1010 
1011 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1012 				 "0134 PLOGI: cannot issue reg_login "
1013 				 "Data: x%x x%x x%x x%x\n",
1014 				 ndlp->nlp_DID, ndlp->nlp_state,
1015 				 ndlp->nlp_flag, ndlp->nlp_rpi);
1016 	} else {
1017 		mempool_free(mbox, phba->mbox_mem_pool);
1018 
1019 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1020 				 "0135 PLOGI: cannot format reg_login "
1021 				 "Data: x%x x%x x%x x%x\n",
1022 				 ndlp->nlp_DID, ndlp->nlp_state,
1023 				 ndlp->nlp_flag, ndlp->nlp_rpi);
1024 	}
1025 
1026 
1027 out:
1028 	if (ndlp->nlp_DID == NameServer_DID) {
1029 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1030 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1031 				 "0261 Cannot Register NameServer login\n");
1032 	}
1033 
1034 	spin_lock_irq(shost->host_lock);
1035 	ndlp->nlp_flag |= NLP_DEFER_RM;
1036 	spin_unlock_irq(shost->host_lock);
1037 	return NLP_STE_FREED_NODE;
1038 }
1039 
1040 static uint32_t
1041 lpfc_cmpl_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1042 			   void *arg, uint32_t evt)
1043 {
1044 	return ndlp->nlp_state;
1045 }
1046 
1047 static uint32_t
1048 lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport,
1049 	struct lpfc_nodelist *ndlp, void *arg, uint32_t evt)
1050 {
1051 	struct lpfc_hba *phba;
1052 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1053 	MAILBOX_t *mb = &pmb->u.mb;
1054 	uint16_t rpi;
1055 
1056 	phba = vport->phba;
1057 	/* Release the RPI */
1058 	if (!(phba->pport->load_flag & FC_UNLOADING) &&
1059 		!mb->mbxStatus) {
1060 		rpi = pmb->u.mb.un.varWords[0];
1061 		lpfc_release_rpi(phba, vport, rpi);
1062 	}
1063 	return ndlp->nlp_state;
1064 }
1065 
1066 static uint32_t
1067 lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1068 			   void *arg, uint32_t evt)
1069 {
1070 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1071 
1072 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1073 		spin_lock_irq(shost->host_lock);
1074 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1075 		spin_unlock_irq(shost->host_lock);
1076 		return ndlp->nlp_state;
1077 	} else {
1078 		/* software abort outstanding PLOGI */
1079 		lpfc_els_abort(vport->phba, ndlp);
1080 
1081 		lpfc_drop_node(vport, ndlp);
1082 		return NLP_STE_FREED_NODE;
1083 	}
1084 }
1085 
1086 static uint32_t
1087 lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
1088 			      struct lpfc_nodelist *ndlp,
1089 			      void *arg,
1090 			      uint32_t evt)
1091 {
1092 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1093 	struct lpfc_hba  *phba = vport->phba;
1094 
1095 	/* Don't do anything that will mess up processing of the
1096 	 * previous RSCN.
1097 	 */
1098 	if (vport->fc_flag & FC_RSCN_DEFERRED)
1099 		return ndlp->nlp_state;
1100 
1101 	/* software abort outstanding PLOGI */
1102 	lpfc_els_abort(phba, ndlp);
1103 
1104 	ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
1105 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1106 	spin_lock_irq(shost->host_lock);
1107 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1108 	spin_unlock_irq(shost->host_lock);
1109 
1110 	return ndlp->nlp_state;
1111 }
1112 
1113 static uint32_t
1114 lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1115 			   void *arg, uint32_t evt)
1116 {
1117 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
1118 	struct lpfc_hba   *phba = vport->phba;
1119 	struct lpfc_iocbq *cmdiocb;
1120 
1121 	/* software abort outstanding ADISC */
1122 	lpfc_els_abort(phba, ndlp);
1123 
1124 	cmdiocb = (struct lpfc_iocbq *) arg;
1125 
1126 	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
1127 		if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1128 			spin_lock_irq(shost->host_lock);
1129 			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1130 			spin_unlock_irq(shost->host_lock);
1131 			if (vport->num_disc_nodes)
1132 				lpfc_more_adisc(vport);
1133 		}
1134 		return ndlp->nlp_state;
1135 	}
1136 	ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1137 	lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1138 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1139 
1140 	return ndlp->nlp_state;
1141 }
1142 
1143 static uint32_t
1144 lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1145 			  void *arg, uint32_t evt)
1146 {
1147 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1148 
1149 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1150 	return ndlp->nlp_state;
1151 }
1152 
1153 static uint32_t
1154 lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1155 			  void *arg, uint32_t evt)
1156 {
1157 	struct lpfc_hba *phba = vport->phba;
1158 	struct lpfc_iocbq *cmdiocb;
1159 
1160 	cmdiocb = (struct lpfc_iocbq *) arg;
1161 
1162 	/* software abort outstanding ADISC */
1163 	lpfc_els_abort(phba, ndlp);
1164 
1165 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1166 	return ndlp->nlp_state;
1167 }
1168 
1169 static uint32_t
1170 lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport,
1171 			    struct lpfc_nodelist *ndlp,
1172 			    void *arg, uint32_t evt)
1173 {
1174 	struct lpfc_iocbq *cmdiocb;
1175 
1176 	cmdiocb = (struct lpfc_iocbq *) arg;
1177 
1178 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1179 	return ndlp->nlp_state;
1180 }
1181 
1182 static uint32_t
1183 lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1184 			  void *arg, uint32_t evt)
1185 {
1186 	struct lpfc_iocbq *cmdiocb;
1187 
1188 	cmdiocb = (struct lpfc_iocbq *) arg;
1189 
1190 	/* Treat like rcv logo */
1191 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
1192 	return ndlp->nlp_state;
1193 }
1194 
1195 static uint32_t
1196 lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1197 			    struct lpfc_nodelist *ndlp,
1198 			    void *arg, uint32_t evt)
1199 {
1200 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1201 	struct lpfc_hba   *phba = vport->phba;
1202 	struct lpfc_iocbq *cmdiocb, *rspiocb;
1203 	IOCB_t *irsp;
1204 	ADISC *ap;
1205 	int rc;
1206 
1207 	cmdiocb = (struct lpfc_iocbq *) arg;
1208 	rspiocb = cmdiocb->context_un.rsp_iocb;
1209 
1210 	ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1211 	irsp = &rspiocb->iocb;
1212 
1213 	if ((irsp->ulpStatus) ||
1214 	    (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
1215 		/* 1 sec timeout */
1216 		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
1217 		spin_lock_irq(shost->host_lock);
1218 		ndlp->nlp_flag |= NLP_DELAY_TMO;
1219 		spin_unlock_irq(shost->host_lock);
1220 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1221 
1222 		memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name));
1223 		memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name));
1224 
1225 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1226 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1227 		lpfc_unreg_rpi(vport, ndlp);
1228 		return ndlp->nlp_state;
1229 	}
1230 
1231 	if (phba->sli_rev == LPFC_SLI_REV4) {
1232 		rc = lpfc_sli4_resume_rpi(ndlp);
1233 		if (rc) {
1234 			/* Stay in state and retry. */
1235 			ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1236 			return ndlp->nlp_state;
1237 		}
1238 	}
1239 
1240 	if (ndlp->nlp_type & NLP_FCP_TARGET) {
1241 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1242 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
1243 	} else {
1244 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1245 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1246 	}
1247 
1248 	return ndlp->nlp_state;
1249 }
1250 
1251 static uint32_t
1252 lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1253 			   void *arg, uint32_t evt)
1254 {
1255 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1256 
1257 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1258 		spin_lock_irq(shost->host_lock);
1259 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1260 		spin_unlock_irq(shost->host_lock);
1261 		return ndlp->nlp_state;
1262 	} else {
1263 		/* software abort outstanding ADISC */
1264 		lpfc_els_abort(vport->phba, ndlp);
1265 
1266 		lpfc_drop_node(vport, ndlp);
1267 		return NLP_STE_FREED_NODE;
1268 	}
1269 }
1270 
1271 static uint32_t
1272 lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
1273 			      struct lpfc_nodelist *ndlp,
1274 			      void *arg,
1275 			      uint32_t evt)
1276 {
1277 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1278 	struct lpfc_hba  *phba = vport->phba;
1279 
1280 	/* Don't do anything that will mess up processing of the
1281 	 * previous RSCN.
1282 	 */
1283 	if (vport->fc_flag & FC_RSCN_DEFERRED)
1284 		return ndlp->nlp_state;
1285 
1286 	/* software abort outstanding ADISC */
1287 	lpfc_els_abort(phba, ndlp);
1288 
1289 	ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1290 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1291 	spin_lock_irq(shost->host_lock);
1292 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1293 	spin_unlock_irq(shost->host_lock);
1294 	lpfc_disc_set_adisc(vport, ndlp);
1295 	return ndlp->nlp_state;
1296 }
1297 
1298 static uint32_t
1299 lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport,
1300 			      struct lpfc_nodelist *ndlp,
1301 			      void *arg,
1302 			      uint32_t evt)
1303 {
1304 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1305 
1306 	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1307 	return ndlp->nlp_state;
1308 }
1309 
1310 static uint32_t
1311 lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
1312 			     struct lpfc_nodelist *ndlp,
1313 			     void *arg,
1314 			     uint32_t evt)
1315 {
1316 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1317 
1318 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1319 	return ndlp->nlp_state;
1320 }
1321 
1322 static uint32_t
1323 lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1324 			     struct lpfc_nodelist *ndlp,
1325 			     void *arg,
1326 			     uint32_t evt)
1327 {
1328 	struct lpfc_hba   *phba = vport->phba;
1329 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1330 	LPFC_MBOXQ_t	  *mb;
1331 	LPFC_MBOXQ_t	  *nextmb;
1332 	struct lpfc_dmabuf *mp;
1333 
1334 	cmdiocb = (struct lpfc_iocbq *) arg;
1335 
1336 	/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1337 	if ((mb = phba->sli.mbox_active)) {
1338 		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1339 		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1340 			lpfc_nlp_put(ndlp);
1341 			mb->context2 = NULL;
1342 			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1343 		}
1344 	}
1345 
1346 	spin_lock_irq(&phba->hbalock);
1347 	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1348 		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1349 		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1350 			mp = (struct lpfc_dmabuf *) (mb->context1);
1351 			if (mp) {
1352 				__lpfc_mbuf_free(phba, mp->virt, mp->phys);
1353 				kfree(mp);
1354 			}
1355 			lpfc_nlp_put(ndlp);
1356 			list_del(&mb->list);
1357 			phba->sli.mboxq_cnt--;
1358 			mempool_free(mb, phba->mbox_mem_pool);
1359 		}
1360 	}
1361 	spin_unlock_irq(&phba->hbalock);
1362 
1363 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1364 	return ndlp->nlp_state;
1365 }
1366 
1367 static uint32_t
1368 lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport,
1369 			       struct lpfc_nodelist *ndlp,
1370 			       void *arg,
1371 			       uint32_t evt)
1372 {
1373 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1374 
1375 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1376 	return ndlp->nlp_state;
1377 }
1378 
1379 static uint32_t
1380 lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport,
1381 			     struct lpfc_nodelist *ndlp,
1382 			     void *arg,
1383 			     uint32_t evt)
1384 {
1385 	struct lpfc_iocbq *cmdiocb;
1386 
1387 	cmdiocb = (struct lpfc_iocbq *) arg;
1388 	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1389 	return ndlp->nlp_state;
1390 }
1391 
1392 static uint32_t
1393 lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1394 				  struct lpfc_nodelist *ndlp,
1395 				  void *arg,
1396 				  uint32_t evt)
1397 {
1398 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1399 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1400 	MAILBOX_t *mb = &pmb->u.mb;
1401 	uint32_t did  = mb->un.varWords[1];
1402 
1403 	if (mb->mbxStatus) {
1404 		/* RegLogin failed */
1405 		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1406 				"0246 RegLogin failed Data: x%x x%x x%x x%x "
1407 				 "x%x\n",
1408 				 did, mb->mbxStatus, vport->port_state,
1409 				 mb->un.varRegLogin.vpi,
1410 				 mb->un.varRegLogin.rpi);
1411 		/*
1412 		 * If RegLogin failed due to lack of HBA resources do not
1413 		 * retry discovery.
1414 		 */
1415 		if (mb->mbxStatus == MBXERR_RPI_FULL) {
1416 			ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1417 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1418 			return ndlp->nlp_state;
1419 		}
1420 
1421 		/* Put ndlp in npr state set plogi timer for 1 sec */
1422 		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
1423 		spin_lock_irq(shost->host_lock);
1424 		ndlp->nlp_flag |= NLP_DELAY_TMO;
1425 		spin_unlock_irq(shost->host_lock);
1426 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1427 
1428 		lpfc_issue_els_logo(vport, ndlp, 0);
1429 		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1430 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1431 		return ndlp->nlp_state;
1432 	}
1433 
1434 	/* SLI4 ports have preallocated logical rpis. */
1435 	if (vport->phba->sli_rev < LPFC_SLI_REV4)
1436 		ndlp->nlp_rpi = mb->un.varWords[0];
1437 
1438 	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1439 
1440 	/* Only if we are not a fabric nport do we issue PRLI */
1441 	if (!(ndlp->nlp_type & NLP_FABRIC)) {
1442 		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1443 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
1444 		lpfc_issue_els_prli(vport, ndlp, 0);
1445 	} else {
1446 		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1447 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1448 	}
1449 	return ndlp->nlp_state;
1450 }
1451 
1452 static uint32_t
1453 lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport,
1454 			      struct lpfc_nodelist *ndlp,
1455 			      void *arg,
1456 			      uint32_t evt)
1457 {
1458 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1459 
1460 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1461 		spin_lock_irq(shost->host_lock);
1462 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1463 		spin_unlock_irq(shost->host_lock);
1464 		return ndlp->nlp_state;
1465 	} else {
1466 		lpfc_drop_node(vport, ndlp);
1467 		return NLP_STE_FREED_NODE;
1468 	}
1469 }
1470 
1471 static uint32_t
1472 lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
1473 				 struct lpfc_nodelist *ndlp,
1474 				 void *arg,
1475 				 uint32_t evt)
1476 {
1477 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1478 
1479 	/* Don't do anything that will mess up processing of the
1480 	 * previous RSCN.
1481 	 */
1482 	if (vport->fc_flag & FC_RSCN_DEFERRED)
1483 		return ndlp->nlp_state;
1484 
1485 	ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1486 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1487 	spin_lock_irq(shost->host_lock);
1488 	ndlp->nlp_flag |= NLP_IGNR_REG_CMPL;
1489 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1490 	spin_unlock_irq(shost->host_lock);
1491 	lpfc_disc_set_adisc(vport, ndlp);
1492 	return ndlp->nlp_state;
1493 }
1494 
1495 static uint32_t
1496 lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1497 			  void *arg, uint32_t evt)
1498 {
1499 	struct lpfc_iocbq *cmdiocb;
1500 
1501 	cmdiocb = (struct lpfc_iocbq *) arg;
1502 
1503 	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1504 	return ndlp->nlp_state;
1505 }
1506 
1507 static uint32_t
1508 lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1509 			 void *arg, uint32_t evt)
1510 {
1511 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1512 
1513 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1514 	return ndlp->nlp_state;
1515 }
1516 
1517 static uint32_t
1518 lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1519 			 void *arg, uint32_t evt)
1520 {
1521 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1522 
1523 	/* Software abort outstanding PRLI before sending acc */
1524 	lpfc_els_abort(vport->phba, ndlp);
1525 
1526 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1527 	return ndlp->nlp_state;
1528 }
1529 
1530 static uint32_t
1531 lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1532 			   void *arg, uint32_t evt)
1533 {
1534 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1535 
1536 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1537 	return ndlp->nlp_state;
1538 }
1539 
1540 /* This routine is envoked when we rcv a PRLO request from a nport
1541  * we are logged into.  We should send back a PRLO rsp setting the
1542  * appropriate bits.
1543  * NEXT STATE = PRLI_ISSUE
1544  */
1545 static uint32_t
1546 lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1547 			 void *arg, uint32_t evt)
1548 {
1549 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1550 
1551 	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1552 	return ndlp->nlp_state;
1553 }
1554 
1555 static uint32_t
1556 lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1557 			  void *arg, uint32_t evt)
1558 {
1559 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1560 	struct lpfc_iocbq *cmdiocb, *rspiocb;
1561 	struct lpfc_hba   *phba = vport->phba;
1562 	IOCB_t *irsp;
1563 	PRLI *npr;
1564 
1565 	cmdiocb = (struct lpfc_iocbq *) arg;
1566 	rspiocb = cmdiocb->context_un.rsp_iocb;
1567 	npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1568 
1569 	irsp = &rspiocb->iocb;
1570 	if (irsp->ulpStatus) {
1571 		if ((vport->port_type == LPFC_NPIV_PORT) &&
1572 		    vport->cfg_restrict_login) {
1573 			goto out;
1574 		}
1575 		ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1576 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1577 		return ndlp->nlp_state;
1578 	}
1579 
1580 	/* Check out PRLI rsp */
1581 	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1582 	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
1583 	if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
1584 	    (npr->prliType == PRLI_FCP_TYPE)) {
1585 		if (npr->initiatorFunc)
1586 			ndlp->nlp_type |= NLP_FCP_INITIATOR;
1587 		if (npr->targetFunc)
1588 			ndlp->nlp_type |= NLP_FCP_TARGET;
1589 		if (npr->Retry)
1590 			ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
1591 	}
1592 	if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
1593 	    (vport->port_type == LPFC_NPIV_PORT) &&
1594 	     vport->cfg_restrict_login) {
1595 out:
1596 		spin_lock_irq(shost->host_lock);
1597 		ndlp->nlp_flag |= NLP_TARGET_REMOVE;
1598 		spin_unlock_irq(shost->host_lock);
1599 		lpfc_issue_els_logo(vport, ndlp, 0);
1600 
1601 		ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1602 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1603 		return ndlp->nlp_state;
1604 	}
1605 
1606 	ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1607 	if (ndlp->nlp_type & NLP_FCP_TARGET)
1608 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
1609 	else
1610 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1611 	return ndlp->nlp_state;
1612 }
1613 
1614 /*! lpfc_device_rm_prli_issue
1615  *
1616  * \pre
1617  * \post
1618  * \param   phba
1619  * \param   ndlp
1620  * \param   arg
1621  * \param   evt
1622  * \return  uint32_t
1623  *
1624  * \b Description:
1625  *    This routine is envoked when we a request to remove a nport we are in the
1626  *    process of PRLIing. We should software abort outstanding prli, unreg
1627  *    login, send a logout. We will change node state to UNUSED_NODE, put it
1628  *    on plogi list so it can be freed when LOGO completes.
1629  *
1630  */
1631 
1632 static uint32_t
1633 lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1634 			  void *arg, uint32_t evt)
1635 {
1636 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1637 
1638 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1639 		spin_lock_irq(shost->host_lock);
1640 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1641 		spin_unlock_irq(shost->host_lock);
1642 		return ndlp->nlp_state;
1643 	} else {
1644 		/* software abort outstanding PLOGI */
1645 		lpfc_els_abort(vport->phba, ndlp);
1646 
1647 		lpfc_drop_node(vport, ndlp);
1648 		return NLP_STE_FREED_NODE;
1649 	}
1650 }
1651 
1652 
1653 /*! lpfc_device_recov_prli_issue
1654  *
1655  * \pre
1656  * \post
1657  * \param   phba
1658  * \param   ndlp
1659  * \param   arg
1660  * \param   evt
1661  * \return  uint32_t
1662  *
1663  * \b Description:
1664  *    The routine is envoked when the state of a device is unknown, like
1665  *    during a link down. We should remove the nodelist entry from the
1666  *    unmapped list, issue a UNREG_LOGIN, do a software abort of the
1667  *    outstanding PRLI command, then free the node entry.
1668  */
1669 static uint32_t
1670 lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
1671 			     struct lpfc_nodelist *ndlp,
1672 			     void *arg,
1673 			     uint32_t evt)
1674 {
1675 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1676 	struct lpfc_hba  *phba = vport->phba;
1677 
1678 	/* Don't do anything that will mess up processing of the
1679 	 * previous RSCN.
1680 	 */
1681 	if (vport->fc_flag & FC_RSCN_DEFERRED)
1682 		return ndlp->nlp_state;
1683 
1684 	/* software abort outstanding PRLI */
1685 	lpfc_els_abort(phba, ndlp);
1686 
1687 	ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1688 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1689 	spin_lock_irq(shost->host_lock);
1690 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1691 	spin_unlock_irq(shost->host_lock);
1692 	lpfc_disc_set_adisc(vport, ndlp);
1693 	return ndlp->nlp_state;
1694 }
1695 
1696 static uint32_t
1697 lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1698 			  void *arg, uint32_t evt)
1699 {
1700 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1701 
1702 	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1703 	return ndlp->nlp_state;
1704 }
1705 
1706 static uint32_t
1707 lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1708 			 void *arg, uint32_t evt)
1709 {
1710 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1711 
1712 	lpfc_rcv_prli(vport, ndlp, cmdiocb);
1713 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1714 	return ndlp->nlp_state;
1715 }
1716 
1717 static uint32_t
1718 lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1719 			 void *arg, uint32_t evt)
1720 {
1721 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1722 
1723 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1724 	return ndlp->nlp_state;
1725 }
1726 
1727 static uint32_t
1728 lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1729 			   void *arg, uint32_t evt)
1730 {
1731 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1732 
1733 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1734 	return ndlp->nlp_state;
1735 }
1736 
1737 static uint32_t
1738 lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1739 			 void *arg, uint32_t evt)
1740 {
1741 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1742 
1743 	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1744 	return ndlp->nlp_state;
1745 }
1746 
1747 static uint32_t
1748 lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
1749 			     struct lpfc_nodelist *ndlp,
1750 			     void *arg,
1751 			     uint32_t evt)
1752 {
1753 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1754 
1755 	ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
1756 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1757 	spin_lock_irq(shost->host_lock);
1758 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1759 	spin_unlock_irq(shost->host_lock);
1760 	lpfc_disc_set_adisc(vport, ndlp);
1761 
1762 	return ndlp->nlp_state;
1763 }
1764 
1765 static uint32_t
1766 lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1767 			   void *arg, uint32_t evt)
1768 {
1769 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1770 
1771 	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1772 	return ndlp->nlp_state;
1773 }
1774 
1775 static uint32_t
1776 lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1777 			  void *arg, uint32_t evt)
1778 {
1779 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1780 
1781 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1782 	return ndlp->nlp_state;
1783 }
1784 
1785 static uint32_t
1786 lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1787 			  void *arg, uint32_t evt)
1788 {
1789 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1790 
1791 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1792 	return ndlp->nlp_state;
1793 }
1794 
1795 static uint32_t
1796 lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport,
1797 			    struct lpfc_nodelist *ndlp,
1798 			    void *arg, uint32_t evt)
1799 {
1800 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1801 
1802 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1803 	return ndlp->nlp_state;
1804 }
1805 
1806 static uint32_t
1807 lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1808 			  void *arg, uint32_t evt)
1809 {
1810 	struct lpfc_hba  *phba = vport->phba;
1811 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1812 
1813 	/* flush the target */
1814 	lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
1815 			    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
1816 
1817 	/* Treat like rcv logo */
1818 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
1819 	return ndlp->nlp_state;
1820 }
1821 
1822 static uint32_t
1823 lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
1824 			      struct lpfc_nodelist *ndlp,
1825 			      void *arg,
1826 			      uint32_t evt)
1827 {
1828 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1829 
1830 	ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
1831 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1832 	spin_lock_irq(shost->host_lock);
1833 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1834 	spin_unlock_irq(shost->host_lock);
1835 	lpfc_disc_set_adisc(vport, ndlp);
1836 	return ndlp->nlp_state;
1837 }
1838 
1839 static uint32_t
1840 lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1841 			void *arg, uint32_t evt)
1842 {
1843 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1844 	struct lpfc_iocbq *cmdiocb  = (struct lpfc_iocbq *) arg;
1845 
1846 	/* Ignore PLOGI if we have an outstanding LOGO */
1847 	if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC))
1848 		return ndlp->nlp_state;
1849 	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
1850 		lpfc_cancel_retry_delay_tmo(vport, ndlp);
1851 		spin_lock_irq(shost->host_lock);
1852 		ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
1853 		spin_unlock_irq(shost->host_lock);
1854 	} else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
1855 		/* send PLOGI immediately, move to PLOGI issue state */
1856 		if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1857 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1858 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1859 			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1860 		}
1861 	}
1862 	return ndlp->nlp_state;
1863 }
1864 
1865 static uint32_t
1866 lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1867 		       void *arg, uint32_t evt)
1868 {
1869 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1870 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1871 	struct ls_rjt     stat;
1872 
1873 	memset(&stat, 0, sizeof (struct ls_rjt));
1874 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1875 	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1876 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1877 
1878 	if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1879 		if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1880 			spin_lock_irq(shost->host_lock);
1881 			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1882 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1883 			spin_unlock_irq(shost->host_lock);
1884 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
1885 			lpfc_issue_els_adisc(vport, ndlp, 0);
1886 		} else {
1887 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1888 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1889 			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1890 		}
1891 	}
1892 	return ndlp->nlp_state;
1893 }
1894 
1895 static uint32_t
1896 lpfc_rcv_logo_npr_node(struct lpfc_vport *vport,  struct lpfc_nodelist *ndlp,
1897 		       void *arg, uint32_t evt)
1898 {
1899 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1900 
1901 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1902 	return ndlp->nlp_state;
1903 }
1904 
1905 static uint32_t
1906 lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1907 			 void *arg, uint32_t evt)
1908 {
1909 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1910 
1911 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1912 	/*
1913 	 * Do not start discovery if discovery is about to start
1914 	 * or discovery in progress for this node. Starting discovery
1915 	 * here will affect the counting of discovery threads.
1916 	 */
1917 	if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
1918 	    !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
1919 		if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1920 			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1921 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1922 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
1923 			lpfc_issue_els_adisc(vport, ndlp, 0);
1924 		} else {
1925 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1926 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1927 			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1928 		}
1929 	}
1930 	return ndlp->nlp_state;
1931 }
1932 
1933 static uint32_t
1934 lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1935 		       void *arg, uint32_t evt)
1936 {
1937 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1938 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1939 
1940 	spin_lock_irq(shost->host_lock);
1941 	ndlp->nlp_flag |= NLP_LOGO_ACC;
1942 	spin_unlock_irq(shost->host_lock);
1943 
1944 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
1945 
1946 	if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
1947 		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
1948 		spin_lock_irq(shost->host_lock);
1949 		ndlp->nlp_flag |= NLP_DELAY_TMO;
1950 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1951 		spin_unlock_irq(shost->host_lock);
1952 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1953 	} else {
1954 		spin_lock_irq(shost->host_lock);
1955 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1956 		spin_unlock_irq(shost->host_lock);
1957 	}
1958 	return ndlp->nlp_state;
1959 }
1960 
1961 static uint32_t
1962 lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1963 			 void *arg, uint32_t evt)
1964 {
1965 	struct lpfc_iocbq *cmdiocb, *rspiocb;
1966 	IOCB_t *irsp;
1967 
1968 	cmdiocb = (struct lpfc_iocbq *) arg;
1969 	rspiocb = cmdiocb->context_un.rsp_iocb;
1970 
1971 	irsp = &rspiocb->iocb;
1972 	if (irsp->ulpStatus) {
1973 		ndlp->nlp_flag |= NLP_DEFER_RM;
1974 		return NLP_STE_FREED_NODE;
1975 	}
1976 	return ndlp->nlp_state;
1977 }
1978 
1979 static uint32_t
1980 lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1981 			void *arg, uint32_t evt)
1982 {
1983 	struct lpfc_iocbq *cmdiocb, *rspiocb;
1984 	IOCB_t *irsp;
1985 
1986 	cmdiocb = (struct lpfc_iocbq *) arg;
1987 	rspiocb = cmdiocb->context_un.rsp_iocb;
1988 
1989 	irsp = &rspiocb->iocb;
1990 	if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
1991 		lpfc_drop_node(vport, ndlp);
1992 		return NLP_STE_FREED_NODE;
1993 	}
1994 	return ndlp->nlp_state;
1995 }
1996 
1997 static uint32_t
1998 lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1999 			void *arg, uint32_t evt)
2000 {
2001 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2002 	if (ndlp->nlp_DID == Fabric_DID) {
2003 		spin_lock_irq(shost->host_lock);
2004 		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
2005 		spin_unlock_irq(shost->host_lock);
2006 	}
2007 	lpfc_unreg_rpi(vport, ndlp);
2008 	return ndlp->nlp_state;
2009 }
2010 
2011 static uint32_t
2012 lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2013 			 void *arg, uint32_t evt)
2014 {
2015 	struct lpfc_iocbq *cmdiocb, *rspiocb;
2016 	IOCB_t *irsp;
2017 
2018 	cmdiocb = (struct lpfc_iocbq *) arg;
2019 	rspiocb = cmdiocb->context_un.rsp_iocb;
2020 
2021 	irsp = &rspiocb->iocb;
2022 	if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
2023 		lpfc_drop_node(vport, ndlp);
2024 		return NLP_STE_FREED_NODE;
2025 	}
2026 	return ndlp->nlp_state;
2027 }
2028 
2029 static uint32_t
2030 lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
2031 			    struct lpfc_nodelist *ndlp,
2032 			    void *arg, uint32_t evt)
2033 {
2034 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
2035 	MAILBOX_t    *mb = &pmb->u.mb;
2036 
2037 	if (!mb->mbxStatus) {
2038 		/* SLI4 ports have preallocated logical rpis. */
2039 		if (vport->phba->sli_rev < LPFC_SLI_REV4)
2040 			ndlp->nlp_rpi = mb->un.varWords[0];
2041 		ndlp->nlp_flag |= NLP_RPI_REGISTERED;
2042 	} else {
2043 		if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
2044 			lpfc_drop_node(vport, ndlp);
2045 			return NLP_STE_FREED_NODE;
2046 		}
2047 	}
2048 	return ndlp->nlp_state;
2049 }
2050 
2051 static uint32_t
2052 lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2053 			void *arg, uint32_t evt)
2054 {
2055 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2056 
2057 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
2058 		spin_lock_irq(shost->host_lock);
2059 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
2060 		spin_unlock_irq(shost->host_lock);
2061 		return ndlp->nlp_state;
2062 	}
2063 	lpfc_drop_node(vport, ndlp);
2064 	return NLP_STE_FREED_NODE;
2065 }
2066 
2067 static uint32_t
2068 lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2069 			   void *arg, uint32_t evt)
2070 {
2071 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2072 
2073 	/* Don't do anything that will mess up processing of the
2074 	 * previous RSCN.
2075 	 */
2076 	if (vport->fc_flag & FC_RSCN_DEFERRED)
2077 		return ndlp->nlp_state;
2078 
2079 	lpfc_cancel_retry_delay_tmo(vport, ndlp);
2080 	spin_lock_irq(shost->host_lock);
2081 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2082 	spin_unlock_irq(shost->host_lock);
2083 	return ndlp->nlp_state;
2084 }
2085 
2086 
2087 /* This next section defines the NPort Discovery State Machine */
2088 
2089 /* There are 4 different double linked lists nodelist entries can reside on.
2090  * The plogi list and adisc list are used when Link Up discovery or RSCN
2091  * processing is needed. Each list holds the nodes that we will send PLOGI
2092  * or ADISC on. These lists will keep track of what nodes will be effected
2093  * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up).
2094  * The unmapped_list will contain all nodes that we have successfully logged
2095  * into at the Fibre Channel level. The mapped_list will contain all nodes
2096  * that are mapped FCP targets.
2097  */
2098 /*
2099  * The bind list is a list of undiscovered (potentially non-existent) nodes
2100  * that we have saved binding information on. This information is used when
2101  * nodes transition from the unmapped to the mapped list.
2102  */
2103 /* For UNUSED_NODE state, the node has just been allocated .
2104  * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
2105  * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
2106  * and put on the unmapped list. For ADISC processing, the node is taken off
2107  * the ADISC list and placed on either the mapped or unmapped list (depending
2108  * on its previous state). Once on the unmapped list, a PRLI is issued and the
2109  * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
2110  * changed to UNMAPPED_NODE. If the completion indicates a mapped
2111  * node, the node is taken off the unmapped list. The binding list is checked
2112  * for a valid binding, or a binding is automatically assigned. If binding
2113  * assignment is unsuccessful, the node is left on the unmapped list. If
2114  * binding assignment is successful, the associated binding list entry (if
2115  * any) is removed, and the node is placed on the mapped list.
2116  */
2117 /*
2118  * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
2119  * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
2120  * expire, all effected nodes will receive a DEVICE_RM event.
2121  */
2122 /*
2123  * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
2124  * to either the ADISC or PLOGI list.  After a Nameserver query or ALPA loopmap
2125  * check, additional nodes may be added or removed (via DEVICE_RM) to / from
2126  * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
2127  * we will first process the ADISC list.  32 entries are processed initially and
2128  * ADISC is initited for each one.  Completions / Events for each node are
2129  * funnelled thru the state machine.  As each node finishes ADISC processing, it
2130  * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
2131  * waiting, and the ADISC list count is identically 0, then we are done. For
2132  * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
2133  * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
2134  * list.  32 entries are processed initially and PLOGI is initited for each one.
2135  * Completions / Events for each node are funnelled thru the state machine.  As
2136  * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
2137  * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
2138  * indentically 0, then we are done. We have now completed discovery / RSCN
2139  * handling. Upon completion, ALL nodes should be on either the mapped or
2140  * unmapped lists.
2141  */
2142 
2143 static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
2144      (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = {
2145 	/* Action routine                  Event       Current State  */
2146 	lpfc_rcv_plogi_unused_node,	/* RCV_PLOGI   UNUSED_NODE    */
2147 	lpfc_rcv_els_unused_node,	/* RCV_PRLI        */
2148 	lpfc_rcv_logo_unused_node,	/* RCV_LOGO        */
2149 	lpfc_rcv_els_unused_node,	/* RCV_ADISC       */
2150 	lpfc_rcv_els_unused_node,	/* RCV_PDISC       */
2151 	lpfc_rcv_els_unused_node,	/* RCV_PRLO        */
2152 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2153 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2154 	lpfc_cmpl_logo_unused_node,	/* CMPL_LOGO       */
2155 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2156 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2157 	lpfc_device_rm_unused_node,	/* DEVICE_RM       */
2158 	lpfc_device_recov_unused_node,	/* DEVICE_RECOVERY */
2159 
2160 	lpfc_rcv_plogi_plogi_issue,	/* RCV_PLOGI   PLOGI_ISSUE    */
2161 	lpfc_rcv_prli_plogi_issue,	/* RCV_PRLI        */
2162 	lpfc_rcv_logo_plogi_issue,	/* RCV_LOGO        */
2163 	lpfc_rcv_els_plogi_issue,	/* RCV_ADISC       */
2164 	lpfc_rcv_els_plogi_issue,	/* RCV_PDISC       */
2165 	lpfc_rcv_els_plogi_issue,	/* RCV_PRLO        */
2166 	lpfc_cmpl_plogi_plogi_issue,	/* CMPL_PLOGI      */
2167 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2168 	lpfc_cmpl_logo_plogi_issue,	/* CMPL_LOGO       */
2169 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2170 	lpfc_cmpl_reglogin_plogi_issue,/* CMPL_REG_LOGIN  */
2171 	lpfc_device_rm_plogi_issue,	/* DEVICE_RM       */
2172 	lpfc_device_recov_plogi_issue,	/* DEVICE_RECOVERY */
2173 
2174 	lpfc_rcv_plogi_adisc_issue,	/* RCV_PLOGI   ADISC_ISSUE    */
2175 	lpfc_rcv_prli_adisc_issue,	/* RCV_PRLI        */
2176 	lpfc_rcv_logo_adisc_issue,	/* RCV_LOGO        */
2177 	lpfc_rcv_padisc_adisc_issue,	/* RCV_ADISC       */
2178 	lpfc_rcv_padisc_adisc_issue,	/* RCV_PDISC       */
2179 	lpfc_rcv_prlo_adisc_issue,	/* RCV_PRLO        */
2180 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2181 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2182 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2183 	lpfc_cmpl_adisc_adisc_issue,	/* CMPL_ADISC      */
2184 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2185 	lpfc_device_rm_adisc_issue,	/* DEVICE_RM       */
2186 	lpfc_device_recov_adisc_issue,	/* DEVICE_RECOVERY */
2187 
2188 	lpfc_rcv_plogi_reglogin_issue,	/* RCV_PLOGI  REG_LOGIN_ISSUE */
2189 	lpfc_rcv_prli_reglogin_issue,	/* RCV_PLOGI       */
2190 	lpfc_rcv_logo_reglogin_issue,	/* RCV_LOGO        */
2191 	lpfc_rcv_padisc_reglogin_issue,	/* RCV_ADISC       */
2192 	lpfc_rcv_padisc_reglogin_issue,	/* RCV_PDISC       */
2193 	lpfc_rcv_prlo_reglogin_issue,	/* RCV_PRLO        */
2194 	lpfc_cmpl_plogi_illegal,	/* CMPL_PLOGI      */
2195 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2196 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2197 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2198 	lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN  */
2199 	lpfc_device_rm_reglogin_issue,	/* DEVICE_RM       */
2200 	lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */
2201 
2202 	lpfc_rcv_plogi_prli_issue,	/* RCV_PLOGI   PRLI_ISSUE     */
2203 	lpfc_rcv_prli_prli_issue,	/* RCV_PRLI        */
2204 	lpfc_rcv_logo_prli_issue,	/* RCV_LOGO        */
2205 	lpfc_rcv_padisc_prli_issue,	/* RCV_ADISC       */
2206 	lpfc_rcv_padisc_prli_issue,	/* RCV_PDISC       */
2207 	lpfc_rcv_prlo_prli_issue,	/* RCV_PRLO        */
2208 	lpfc_cmpl_plogi_illegal,	/* CMPL_PLOGI      */
2209 	lpfc_cmpl_prli_prli_issue,	/* CMPL_PRLI       */
2210 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2211 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2212 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2213 	lpfc_device_rm_prli_issue,	/* DEVICE_RM       */
2214 	lpfc_device_recov_prli_issue,	/* DEVICE_RECOVERY */
2215 
2216 	lpfc_rcv_plogi_unmap_node,	/* RCV_PLOGI   UNMAPPED_NODE  */
2217 	lpfc_rcv_prli_unmap_node,	/* RCV_PRLI        */
2218 	lpfc_rcv_logo_unmap_node,	/* RCV_LOGO        */
2219 	lpfc_rcv_padisc_unmap_node,	/* RCV_ADISC       */
2220 	lpfc_rcv_padisc_unmap_node,	/* RCV_PDISC       */
2221 	lpfc_rcv_prlo_unmap_node,	/* RCV_PRLO        */
2222 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2223 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2224 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2225 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2226 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2227 	lpfc_disc_illegal,		/* DEVICE_RM       */
2228 	lpfc_device_recov_unmap_node,	/* DEVICE_RECOVERY */
2229 
2230 	lpfc_rcv_plogi_mapped_node,	/* RCV_PLOGI   MAPPED_NODE    */
2231 	lpfc_rcv_prli_mapped_node,	/* RCV_PRLI        */
2232 	lpfc_rcv_logo_mapped_node,	/* RCV_LOGO        */
2233 	lpfc_rcv_padisc_mapped_node,	/* RCV_ADISC       */
2234 	lpfc_rcv_padisc_mapped_node,	/* RCV_PDISC       */
2235 	lpfc_rcv_prlo_mapped_node,	/* RCV_PRLO        */
2236 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2237 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2238 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2239 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2240 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2241 	lpfc_disc_illegal,		/* DEVICE_RM       */
2242 	lpfc_device_recov_mapped_node,	/* DEVICE_RECOVERY */
2243 
2244 	lpfc_rcv_plogi_npr_node,        /* RCV_PLOGI   NPR_NODE    */
2245 	lpfc_rcv_prli_npr_node,         /* RCV_PRLI        */
2246 	lpfc_rcv_logo_npr_node,         /* RCV_LOGO        */
2247 	lpfc_rcv_padisc_npr_node,       /* RCV_ADISC       */
2248 	lpfc_rcv_padisc_npr_node,       /* RCV_PDISC       */
2249 	lpfc_rcv_prlo_npr_node,         /* RCV_PRLO        */
2250 	lpfc_cmpl_plogi_npr_node,	/* CMPL_PLOGI      */
2251 	lpfc_cmpl_prli_npr_node,	/* CMPL_PRLI       */
2252 	lpfc_cmpl_logo_npr_node,        /* CMPL_LOGO       */
2253 	lpfc_cmpl_adisc_npr_node,       /* CMPL_ADISC      */
2254 	lpfc_cmpl_reglogin_npr_node,    /* CMPL_REG_LOGIN  */
2255 	lpfc_device_rm_npr_node,        /* DEVICE_RM       */
2256 	lpfc_device_recov_npr_node,     /* DEVICE_RECOVERY */
2257 };
2258 
2259 int
2260 lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2261 			void *arg, uint32_t evt)
2262 {
2263 	uint32_t cur_state, rc;
2264 	uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
2265 			 uint32_t);
2266 	uint32_t got_ndlp = 0;
2267 
2268 	if (lpfc_nlp_get(ndlp))
2269 		got_ndlp = 1;
2270 
2271 	cur_state = ndlp->nlp_state;
2272 
2273 	/* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
2274 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2275 			 "0211 DSM in event x%x on NPort x%x in "
2276 			 "state %d Data: x%x\n",
2277 			 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
2278 
2279 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2280 		 "DSM in:          evt:%d ste:%d did:x%x",
2281 		evt, cur_state, ndlp->nlp_DID);
2282 
2283 	func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
2284 	rc = (func) (vport, ndlp, arg, evt);
2285 
2286 	/* DSM out state <rc> on NPort <nlp_DID> */
2287 	if (got_ndlp) {
2288 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2289 			 "0212 DSM out state %d on NPort x%x Data: x%x\n",
2290 			 rc, ndlp->nlp_DID, ndlp->nlp_flag);
2291 
2292 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2293 			"DSM out:         ste:%d did:x%x flg:x%x",
2294 			rc, ndlp->nlp_DID, ndlp->nlp_flag);
2295 		/* Decrement the ndlp reference count held for this function */
2296 		lpfc_nlp_put(ndlp);
2297 	} else {
2298 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2299 			"0213 DSM out state %d on NPort free\n", rc);
2300 
2301 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2302 			"DSM out:         ste:%d did:x%x flg:x%x",
2303 			rc, 0, 0);
2304 	}
2305 
2306 	return rc;
2307 }
2308