xref: /linux/drivers/scsi/lpfc/lpfc_hbadisc.c (revision 13abf8130139c2ccd4962a7e5a8902be5e6cb5a7)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2005 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21 
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/kthread.h>
25 #include <linux/interrupt.h>
26 
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_transport_fc.h>
30 
31 #include "lpfc_hw.h"
32 #include "lpfc_disc.h"
33 #include "lpfc_sli.h"
34 #include "lpfc_scsi.h"
35 #include "lpfc.h"
36 #include "lpfc_logmsg.h"
37 #include "lpfc_crtn.h"
38 
39 /* AlpaArray for assignment of scsid for scan-down and bind_method */
40 static uint8_t lpfcAlpaArray[] = {
41 	0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
42 	0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
43 	0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
44 	0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
45 	0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
46 	0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
47 	0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
48 	0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
49 	0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
50 	0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
51 	0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
52 	0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
53 	0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
54 };
55 
56 static void lpfc_disc_timeout_handler(struct lpfc_hba *);
57 
58 static void
59 lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
60 {
61 	int warn_on = 0;
62 
63 	spin_lock_irq(phba->host->host_lock);
64 	if (!(ndlp->nlp_flag & NLP_NODEV_TMO)) {
65 		spin_unlock_irq(phba->host->host_lock);
66 		return;
67 	}
68 
69 	ndlp->nlp_flag &= ~NLP_NODEV_TMO;
70 
71 	if (ndlp->nlp_sid != NLP_NO_SID) {
72 		warn_on = 1;
73 		/* flush the target */
74 		lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
75 			ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
76 	}
77 	spin_unlock_irq(phba->host->host_lock);
78 
79 	if (warn_on) {
80 		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
81 				"%d:0203 Nodev timeout on NPort x%x "
82 				"Data: x%x x%x x%x\n",
83 				phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
84 				ndlp->nlp_state, ndlp->nlp_rpi);
85 	} else {
86 		lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
87 				"%d:0204 Nodev timeout on NPort x%x "
88 				"Data: x%x x%x x%x\n",
89 				phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
90 				ndlp->nlp_state, ndlp->nlp_rpi);
91 	}
92 
93 	lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM);
94 	return;
95 }
96 
97 static void
98 lpfc_work_list_done(struct lpfc_hba * phba)
99 {
100 	struct lpfc_work_evt  *evtp = NULL;
101 	struct lpfc_nodelist  *ndlp;
102 	int free_evt;
103 
104 	spin_lock_irq(phba->host->host_lock);
105 	while(!list_empty(&phba->work_list)) {
106 		list_remove_head((&phba->work_list), evtp, typeof(*evtp),
107 				 evt_listp);
108 		spin_unlock_irq(phba->host->host_lock);
109 		free_evt = 1;
110 		switch(evtp->evt) {
111 		case LPFC_EVT_NODEV_TMO:
112 			ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
113 			lpfc_process_nodev_timeout(phba, ndlp);
114 			free_evt = 0;
115 			break;
116 		case LPFC_EVT_ELS_RETRY:
117 			ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
118 			lpfc_els_retry_delay_handler(ndlp);
119 			free_evt = 0;
120 			break;
121 		case LPFC_EVT_ONLINE:
122 			*(int *)(evtp->evt_arg1)  = lpfc_online(phba);
123 			complete((struct completion *)(evtp->evt_arg2));
124 			break;
125 		case LPFC_EVT_OFFLINE:
126 			*(int *)(evtp->evt_arg1)  = lpfc_offline(phba);
127 			complete((struct completion *)(evtp->evt_arg2));
128 			break;
129 		}
130 		if (free_evt)
131 			kfree(evtp);
132 		spin_lock_irq(phba->host->host_lock);
133 	}
134 	spin_unlock_irq(phba->host->host_lock);
135 
136 }
137 
138 static void
139 lpfc_work_done(struct lpfc_hba * phba)
140 {
141 	struct lpfc_sli_ring *pring;
142 	int i;
143 	uint32_t ha_copy;
144 	uint32_t control;
145 	uint32_t work_hba_events;
146 
147 	spin_lock_irq(phba->host->host_lock);
148 	ha_copy = phba->work_ha;
149 	phba->work_ha = 0;
150 	work_hba_events=phba->work_hba_events;
151 	spin_unlock_irq(phba->host->host_lock);
152 
153 	if(ha_copy & HA_ERATT)
154 		lpfc_handle_eratt(phba);
155 
156 	if(ha_copy & HA_MBATT)
157 		lpfc_sli_handle_mb_event(phba);
158 
159 	if(ha_copy & HA_LATT)
160 		lpfc_handle_latt(phba);
161 
162 	if (work_hba_events & WORKER_DISC_TMO)
163 		lpfc_disc_timeout_handler(phba);
164 
165 	if (work_hba_events & WORKER_ELS_TMO)
166 		lpfc_els_timeout_handler(phba);
167 
168 	if (work_hba_events & WORKER_MBOX_TMO)
169 		lpfc_mbox_timeout_handler(phba);
170 
171 	if (work_hba_events & WORKER_FDMI_TMO)
172 		lpfc_fdmi_tmo_handler(phba);
173 
174 	spin_lock_irq(phba->host->host_lock);
175 	phba->work_hba_events &= ~work_hba_events;
176 	spin_unlock_irq(phba->host->host_lock);
177 
178 	for (i = 0; i < phba->sli.num_rings; i++, ha_copy >>= 4) {
179 		pring = &phba->sli.ring[i];
180 		if ((ha_copy & HA_RXATT)
181 		    || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
182 			if (pring->flag & LPFC_STOP_IOCB_MASK) {
183 				pring->flag |= LPFC_DEFERRED_RING_EVENT;
184 			} else {
185 				lpfc_sli_handle_slow_ring_event(phba, pring,
186 								(ha_copy &
187 								 HA_RXMASK));
188 				pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
189 			}
190 			/*
191 			 * Turn on Ring interrupts
192 			 */
193 			spin_lock_irq(phba->host->host_lock);
194 			control = readl(phba->HCregaddr);
195 			control |= (HC_R0INT_ENA << i);
196 			writel(control, phba->HCregaddr);
197 			readl(phba->HCregaddr); /* flush */
198 			spin_unlock_irq(phba->host->host_lock);
199 		}
200 	}
201 
202 	lpfc_work_list_done (phba);
203 
204 }
205 
206 static int
207 check_work_wait_done(struct lpfc_hba *phba) {
208 
209 	spin_lock_irq(phba->host->host_lock);
210 	if (phba->work_ha ||
211 	    phba->work_hba_events ||
212 	    (!list_empty(&phba->work_list)) ||
213 	    kthread_should_stop()) {
214 		spin_unlock_irq(phba->host->host_lock);
215 		return 1;
216 	} else {
217 		spin_unlock_irq(phba->host->host_lock);
218 		return 0;
219 	}
220 }
221 
222 int
223 lpfc_do_work(void *p)
224 {
225 	struct lpfc_hba *phba = p;
226 	int rc;
227 	DECLARE_WAIT_QUEUE_HEAD(work_waitq);
228 
229 	set_user_nice(current, -20);
230 	phba->work_wait = &work_waitq;
231 
232 	while (1) {
233 
234 		rc = wait_event_interruptible(work_waitq,
235 						check_work_wait_done(phba));
236 		BUG_ON(rc);
237 
238 		if (kthread_should_stop())
239 			break;
240 
241 		lpfc_work_done(phba);
242 
243 	}
244 	phba->work_wait = NULL;
245 	return 0;
246 }
247 
248 /*
249  * This is only called to handle FC worker events. Since this a rare
250  * occurance, we allocate a struct lpfc_work_evt structure here instead of
251  * embedding it in the IOCB.
252  */
253 int
254 lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2,
255 		      uint32_t evt)
256 {
257 	struct lpfc_work_evt  *evtp;
258 
259 	/*
260 	 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
261 	 * be queued to worker thread for processing
262 	 */
263 	evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_KERNEL);
264 	if (!evtp)
265 		return 0;
266 
267 	evtp->evt_arg1  = arg1;
268 	evtp->evt_arg2  = arg2;
269 	evtp->evt       = evt;
270 
271 	list_add_tail(&evtp->evt_listp, &phba->work_list);
272 	spin_lock_irq(phba->host->host_lock);
273 	if (phba->work_wait)
274 		wake_up(phba->work_wait);
275 	spin_unlock_irq(phba->host->host_lock);
276 
277 	return 1;
278 }
279 
280 int
281 lpfc_linkdown(struct lpfc_hba * phba)
282 {
283 	struct lpfc_sli       *psli;
284 	struct lpfc_nodelist  *ndlp, *next_ndlp;
285 	struct list_head *listp;
286 	struct list_head *node_list[7];
287 	LPFC_MBOXQ_t     *mb;
288 	int               rc, i;
289 
290 	psli = &phba->sli;
291 
292 	spin_lock_irq(phba->host->host_lock);
293 	phba->hba_state = LPFC_LINK_DOWN;
294 	spin_unlock_irq(phba->host->host_lock);
295 
296 	/* Clean up any firmware default rpi's */
297 	if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
298 		lpfc_unreg_did(phba, 0xffffffff, mb);
299 		mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
300 		if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
301 		    == MBX_NOT_FINISHED) {
302 			mempool_free( mb, phba->mbox_mem_pool);
303 		}
304 	}
305 
306 	/* Cleanup any outstanding RSCN activity */
307 	lpfc_els_flush_rscn(phba);
308 
309 	/* Cleanup any outstanding ELS commands */
310 	lpfc_els_flush_cmd(phba);
311 
312 	/* Issue a LINK DOWN event to all nodes */
313 	node_list[0] = &phba->fc_npr_list;  /* MUST do this list first */
314 	node_list[1] = &phba->fc_nlpmap_list;
315 	node_list[2] = &phba->fc_nlpunmap_list;
316 	node_list[3] = &phba->fc_prli_list;
317 	node_list[4] = &phba->fc_reglogin_list;
318 	node_list[5] = &phba->fc_adisc_list;
319 	node_list[6] = &phba->fc_plogi_list;
320 	for (i = 0; i < 7; i++) {
321 		listp = node_list[i];
322 		if (list_empty(listp))
323 			continue;
324 
325 		list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
326 			/* Fabric nodes are not handled thru state machine for
327 			   link down */
328 			if (ndlp->nlp_type & NLP_FABRIC) {
329 				/* Remove ALL Fabric nodes except Fabric_DID */
330 				if (ndlp->nlp_DID != Fabric_DID) {
331 					/* Take it off current list and free */
332 					lpfc_nlp_list(phba, ndlp,
333 						NLP_NO_LIST);
334 				}
335 			}
336 			else {
337 
338 				rc = lpfc_disc_state_machine(phba, ndlp, NULL,
339 						     NLP_EVT_DEVICE_RECOVERY);
340 
341 				/* Check config parameter use-adisc or FCP-2 */
342 				if ((rc != NLP_STE_FREED_NODE) &&
343 					(phba->cfg_use_adisc == 0) &&
344 					!(ndlp->nlp_fcp_info &
345 						NLP_FCP_2_DEVICE)) {
346 					/* We know we will have to relogin, so
347 					 * unreglogin the rpi right now to fail
348 					 * any outstanding I/Os quickly.
349 					 */
350 					lpfc_unreg_rpi(phba, ndlp);
351 				}
352 			}
353 		}
354 	}
355 
356 	/* free any ndlp's on unused list */
357 	list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
358 				nlp_listp) {
359 		lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
360 	}
361 
362 	/* Setup myDID for link up if we are in pt2pt mode */
363 	if (phba->fc_flag & FC_PT2PT) {
364 		phba->fc_myDID = 0;
365 		if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
366 			lpfc_config_link(phba, mb);
367 			mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
368 			if (lpfc_sli_issue_mbox
369 			    (phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
370 			    == MBX_NOT_FINISHED) {
371 				mempool_free( mb, phba->mbox_mem_pool);
372 			}
373 		}
374 		spin_lock_irq(phba->host->host_lock);
375 		phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
376 		spin_unlock_irq(phba->host->host_lock);
377 	}
378 	spin_lock_irq(phba->host->host_lock);
379 	phba->fc_flag &= ~FC_LBIT;
380 	spin_unlock_irq(phba->host->host_lock);
381 
382 	/* Turn off discovery timer if its running */
383 	lpfc_can_disctmo(phba);
384 
385 	/* Must process IOCBs on all rings to handle ABORTed I/Os */
386 	return (0);
387 }
388 
389 static int
390 lpfc_linkup(struct lpfc_hba * phba)
391 {
392 	struct lpfc_nodelist *ndlp, *next_ndlp;
393 
394 	spin_lock_irq(phba->host->host_lock);
395 	phba->hba_state = LPFC_LINK_UP;
396 	phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
397 			   FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
398 	phba->fc_flag |= FC_NDISC_ACTIVE;
399 	phba->fc_ns_retry = 0;
400 	spin_unlock_irq(phba->host->host_lock);
401 
402 
403 	/*
404 	 * Clean up old Fabric NLP_FABRIC logins.
405 	 */
406 	list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpunmap_list,
407 				nlp_listp) {
408 		if (ndlp->nlp_DID == Fabric_DID) {
409 			/* Take it off current list and free */
410 			lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
411 		}
412 	}
413 
414 	/* free any ndlp's on unused list */
415 	list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
416 				nlp_listp) {
417 		lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
418 	}
419 
420 	return 0;
421 }
422 
423 /*
424  * This routine handles processing a CLEAR_LA mailbox
425  * command upon completion. It is setup in the LPFC_MBOXQ
426  * as the completion routine when the command is
427  * handed off to the SLI layer.
428  */
429 void
430 lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
431 {
432 	struct lpfc_sli *psli;
433 	MAILBOX_t *mb;
434 	uint32_t control;
435 
436 	psli = &phba->sli;
437 	mb = &pmb->mb;
438 	/* Since we don't do discovery right now, turn these off here */
439 	psli->ring[psli->ip_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
440 	psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
441 	psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
442 
443 	/* Check for error */
444 	if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
445 		/* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
446 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
447 				"%d:0320 CLEAR_LA mbxStatus error x%x hba "
448 				"state x%x\n",
449 				phba->brd_no, mb->mbxStatus, phba->hba_state);
450 
451 		phba->hba_state = LPFC_HBA_ERROR;
452 		goto out;
453 	}
454 
455 	if (phba->fc_flag & FC_ABORT_DISCOVERY)
456 		goto out;
457 
458 	phba->num_disc_nodes = 0;
459 	/* go thru NPR list and issue ELS PLOGIs */
460 	if (phba->fc_npr_cnt) {
461 		lpfc_els_disc_plogi(phba);
462 	}
463 
464 	if(!phba->num_disc_nodes) {
465 		spin_lock_irq(phba->host->host_lock);
466 		phba->fc_flag &= ~FC_NDISC_ACTIVE;
467 		spin_unlock_irq(phba->host->host_lock);
468 	}
469 
470 	phba->hba_state = LPFC_HBA_READY;
471 
472 out:
473 	/* Device Discovery completes */
474 	lpfc_printf_log(phba,
475 			 KERN_INFO,
476 			 LOG_DISCOVERY,
477 			 "%d:0225 Device Discovery completes\n",
478 			 phba->brd_no);
479 
480 	mempool_free( pmb, phba->mbox_mem_pool);
481 
482 	spin_lock_irq(phba->host->host_lock);
483 	phba->fc_flag &= ~FC_ABORT_DISCOVERY;
484 	if (phba->fc_flag & FC_ESTABLISH_LINK) {
485 		phba->fc_flag &= ~FC_ESTABLISH_LINK;
486 	}
487 	spin_unlock_irq(phba->host->host_lock);
488 
489 	del_timer_sync(&phba->fc_estabtmo);
490 
491 	lpfc_can_disctmo(phba);
492 
493 	/* turn on Link Attention interrupts */
494 	spin_lock_irq(phba->host->host_lock);
495 	psli->sli_flag |= LPFC_PROCESS_LA;
496 	control = readl(phba->HCregaddr);
497 	control |= HC_LAINT_ENA;
498 	writel(control, phba->HCregaddr);
499 	readl(phba->HCregaddr); /* flush */
500 	spin_unlock_irq(phba->host->host_lock);
501 
502 	return;
503 }
504 
505 static void
506 lpfc_mbx_cmpl_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
507 {
508 	struct lpfc_sli *psli;
509 	MAILBOX_t *mb;
510 
511 	psli = &phba->sli;
512 	mb = &pmb->mb;
513 	/* Check for error */
514 	if (mb->mbxStatus) {
515 		/* CONFIG_LINK mbox error <mbxStatus> state <hba_state> */
516 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
517 				"%d:0306 CONFIG_LINK mbxStatus error x%x "
518 				"HBA state x%x\n",
519 				phba->brd_no, mb->mbxStatus, phba->hba_state);
520 
521 		lpfc_linkdown(phba);
522 		phba->hba_state = LPFC_HBA_ERROR;
523 		goto out;
524 	}
525 
526 	if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
527 		if (phba->fc_topology == TOPOLOGY_LOOP) {
528 			/* If we are public loop and L bit was set */
529 			if ((phba->fc_flag & FC_PUBLIC_LOOP) &&
530 			    !(phba->fc_flag & FC_LBIT)) {
531 				/* Need to wait for FAN - use discovery timer
532 				 * for timeout.  hba_state is identically
533 				 * LPFC_LOCAL_CFG_LINK while waiting for FAN
534 				 */
535 				lpfc_set_disctmo(phba);
536 				mempool_free( pmb, phba->mbox_mem_pool);
537 				return;
538 			}
539 		}
540 
541 		/* Start discovery by sending a FLOGI hba_state is identically
542 		 * LPFC_FLOGI while waiting for FLOGI cmpl
543 		 */
544 		phba->hba_state = LPFC_FLOGI;
545 		lpfc_set_disctmo(phba);
546 		lpfc_initial_flogi(phba);
547 		mempool_free( pmb, phba->mbox_mem_pool);
548 		return;
549 	}
550 	if (phba->hba_state == LPFC_FABRIC_CFG_LINK) {
551 		mempool_free( pmb, phba->mbox_mem_pool);
552 		return;
553 	}
554 
555 out:
556 	/* CONFIG_LINK bad hba state <hba_state> */
557 	lpfc_printf_log(phba,
558 			KERN_ERR,
559 			LOG_DISCOVERY,
560 			"%d:0200 CONFIG_LINK bad hba state x%x\n",
561 			phba->brd_no, phba->hba_state);
562 
563 	if (phba->hba_state != LPFC_CLEAR_LA) {
564 		lpfc_clear_la(phba, pmb);
565 		pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
566 		if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
567 		    == MBX_NOT_FINISHED) {
568 			mempool_free( pmb, phba->mbox_mem_pool);
569 			lpfc_disc_flush_list(phba);
570 			psli->ring[(psli->ip_ring)].flag &=
571 				~LPFC_STOP_IOCB_EVENT;
572 			psli->ring[(psli->fcp_ring)].flag &=
573 				~LPFC_STOP_IOCB_EVENT;
574 			psli->ring[(psli->next_ring)].flag &=
575 				~LPFC_STOP_IOCB_EVENT;
576 			phba->hba_state = LPFC_HBA_READY;
577 		}
578 	} else {
579 		mempool_free( pmb, phba->mbox_mem_pool);
580 	}
581 	return;
582 }
583 
584 static void
585 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
586 {
587 	struct lpfc_sli *psli = &phba->sli;
588 	MAILBOX_t *mb = &pmb->mb;
589 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
590 
591 
592 	/* Check for error */
593 	if (mb->mbxStatus) {
594 		/* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
595 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
596 				"%d:0319 READ_SPARAM mbxStatus error x%x "
597 				"hba state x%x>\n",
598 				phba->brd_no, mb->mbxStatus, phba->hba_state);
599 
600 		lpfc_linkdown(phba);
601 		phba->hba_state = LPFC_HBA_ERROR;
602 		goto out;
603 	}
604 
605 	memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt,
606 	       sizeof (struct serv_parm));
607 	memcpy((uint8_t *) & phba->fc_nodename,
608 	       (uint8_t *) & phba->fc_sparam.nodeName,
609 	       sizeof (struct lpfc_name));
610 	memcpy((uint8_t *) & phba->fc_portname,
611 	       (uint8_t *) & phba->fc_sparam.portName,
612 	       sizeof (struct lpfc_name));
613 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
614 	kfree(mp);
615 	mempool_free( pmb, phba->mbox_mem_pool);
616 	return;
617 
618 out:
619 	pmb->context1 = NULL;
620 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
621 	kfree(mp);
622 	if (phba->hba_state != LPFC_CLEAR_LA) {
623 		lpfc_clear_la(phba, pmb);
624 		pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
625 		if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
626 		    == MBX_NOT_FINISHED) {
627 			mempool_free( pmb, phba->mbox_mem_pool);
628 			lpfc_disc_flush_list(phba);
629 			psli->ring[(psli->ip_ring)].flag &=
630 			    ~LPFC_STOP_IOCB_EVENT;
631 			psli->ring[(psli->fcp_ring)].flag &=
632 			    ~LPFC_STOP_IOCB_EVENT;
633 			psli->ring[(psli->next_ring)].flag &=
634 			    ~LPFC_STOP_IOCB_EVENT;
635 			phba->hba_state = LPFC_HBA_READY;
636 		}
637 	} else {
638 		mempool_free( pmb, phba->mbox_mem_pool);
639 	}
640 	return;
641 }
642 
643 static void
644 lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
645 {
646 	int i;
647 	LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
648 	sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
649 	cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
650 
651 	spin_lock_irq(phba->host->host_lock);
652 	switch(la->UlnkSpeed) {
653 		case LA_1GHZ_LINK:
654 			phba->fc_linkspeed = LA_1GHZ_LINK;
655 			break;
656 		case LA_2GHZ_LINK:
657 			phba->fc_linkspeed = LA_2GHZ_LINK;
658 			break;
659 		case LA_4GHZ_LINK:
660 			phba->fc_linkspeed = LA_4GHZ_LINK;
661 			break;
662 		default:
663 			phba->fc_linkspeed = LA_UNKNW_LINK;
664 			break;
665 	}
666 
667 	phba->fc_topology = la->topology;
668 
669 	if (phba->fc_topology == TOPOLOGY_LOOP) {
670 	/* Get Loop Map information */
671 
672 		if (la->il)
673 			phba->fc_flag |= FC_LBIT;
674 
675 		phba->fc_myDID = la->granted_AL_PA;
676 		i = la->un.lilpBde64.tus.f.bdeSize;
677 
678 		if (i == 0) {
679 			phba->alpa_map[0] = 0;
680 		} else {
681 			if (phba->cfg_log_verbose & LOG_LINK_EVENT) {
682 				int numalpa, j, k;
683 				union {
684 					uint8_t pamap[16];
685 					struct {
686 						uint32_t wd1;
687 						uint32_t wd2;
688 						uint32_t wd3;
689 						uint32_t wd4;
690 					} pa;
691 				} un;
692 				numalpa = phba->alpa_map[0];
693 				j = 0;
694 				while (j < numalpa) {
695 					memset(un.pamap, 0, 16);
696 					for (k = 1; j < numalpa; k++) {
697 						un.pamap[k - 1] =
698 							phba->alpa_map[j + 1];
699 						j++;
700 						if (k == 16)
701 							break;
702 					}
703 					/* Link Up Event ALPA map */
704 					lpfc_printf_log(phba,
705 						KERN_WARNING,
706 						LOG_LINK_EVENT,
707 						"%d:1304 Link Up Event "
708 						"ALPA map Data: x%x "
709 						"x%x x%x x%x\n",
710 						phba->brd_no,
711 						un.pa.wd1, un.pa.wd2,
712 						un.pa.wd3, un.pa.wd4);
713 				}
714 			}
715 		}
716 	} else {
717 		phba->fc_myDID = phba->fc_pref_DID;
718 		phba->fc_flag |= FC_LBIT;
719 	}
720 	spin_unlock_irq(phba->host->host_lock);
721 
722 	lpfc_linkup(phba);
723 	if (sparam_mbox) {
724 		lpfc_read_sparam(phba, sparam_mbox);
725 		sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
726 		lpfc_sli_issue_mbox(phba, sparam_mbox,
727 						(MBX_NOWAIT | MBX_STOP_IOCB));
728 	}
729 
730 	if (cfglink_mbox) {
731 		phba->hba_state = LPFC_LOCAL_CFG_LINK;
732 		lpfc_config_link(phba, cfglink_mbox);
733 		cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_config_link;
734 		lpfc_sli_issue_mbox(phba, cfglink_mbox,
735 						(MBX_NOWAIT | MBX_STOP_IOCB));
736 	}
737 }
738 
739 static void
740 lpfc_mbx_issue_link_down(struct lpfc_hba *phba) {
741 	uint32_t control;
742 	struct lpfc_sli *psli = &phba->sli;
743 
744 	lpfc_linkdown(phba);
745 
746 	/* turn on Link Attention interrupts - no CLEAR_LA needed */
747 	spin_lock_irq(phba->host->host_lock);
748 	psli->sli_flag |= LPFC_PROCESS_LA;
749 	control = readl(phba->HCregaddr);
750 	control |= HC_LAINT_ENA;
751 	writel(control, phba->HCregaddr);
752 	readl(phba->HCregaddr); /* flush */
753 	spin_unlock_irq(phba->host->host_lock);
754 }
755 
756 /*
757  * This routine handles processing a READ_LA mailbox
758  * command upon completion. It is setup in the LPFC_MBOXQ
759  * as the completion routine when the command is
760  * handed off to the SLI layer.
761  */
762 void
763 lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
764 {
765 	READ_LA_VAR *la;
766 	MAILBOX_t *mb = &pmb->mb;
767 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
768 
769 	/* Check for error */
770 	if (mb->mbxStatus) {
771 		lpfc_printf_log(phba,
772 				KERN_INFO,
773 				LOG_LINK_EVENT,
774 				"%d:1307 READ_LA mbox error x%x state x%x\n",
775 				phba->brd_no,
776 				mb->mbxStatus, phba->hba_state);
777 		lpfc_mbx_issue_link_down(phba);
778 		phba->hba_state = LPFC_HBA_ERROR;
779 		goto lpfc_mbx_cmpl_read_la_free_mbuf;
780 	}
781 
782 	la = (READ_LA_VAR *) & pmb->mb.un.varReadLA;
783 
784 	memcpy(&phba->alpa_map[0], mp->virt, 128);
785 
786 	if (((phba->fc_eventTag + 1) < la->eventTag) ||
787 	     (phba->fc_eventTag == la->eventTag)) {
788 		phba->fc_stat.LinkMultiEvent++;
789 		if (la->attType == AT_LINK_UP) {
790 			if (phba->fc_eventTag != 0)
791 				lpfc_linkdown(phba);
792 		}
793 	}
794 
795 	phba->fc_eventTag = la->eventTag;
796 
797 	if (la->attType == AT_LINK_UP) {
798 		phba->fc_stat.LinkUp++;
799 		lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
800 				"%d:1303 Link Up Event x%x received "
801 				"Data: x%x x%x x%x x%x\n",
802 				phba->brd_no, la->eventTag, phba->fc_eventTag,
803 				la->granted_AL_PA, la->UlnkSpeed,
804 				phba->alpa_map[0]);
805 		lpfc_mbx_process_link_up(phba, la);
806 	} else {
807 		phba->fc_stat.LinkDown++;
808 		lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
809 				"%d:1305 Link Down Event x%x received "
810 				"Data: x%x x%x x%x\n",
811 				phba->brd_no, la->eventTag, phba->fc_eventTag,
812 				phba->hba_state, phba->fc_flag);
813 		lpfc_mbx_issue_link_down(phba);
814 	}
815 
816 lpfc_mbx_cmpl_read_la_free_mbuf:
817 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
818 	kfree(mp);
819 	mempool_free(pmb, phba->mbox_mem_pool);
820 	return;
821 }
822 
823 /*
824  * This routine handles processing a REG_LOGIN mailbox
825  * command upon completion. It is setup in the LPFC_MBOXQ
826  * as the completion routine when the command is
827  * handed off to the SLI layer.
828  */
829 void
830 lpfc_mbx_cmpl_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
831 {
832 	struct lpfc_sli *psli;
833 	MAILBOX_t *mb;
834 	struct lpfc_dmabuf *mp;
835 	struct lpfc_nodelist *ndlp;
836 
837 	psli = &phba->sli;
838 	mb = &pmb->mb;
839 
840 	ndlp = (struct lpfc_nodelist *) pmb->context2;
841 	mp = (struct lpfc_dmabuf *) (pmb->context1);
842 
843 	pmb->context1 = NULL;
844 
845 	/* Good status, call state machine */
846 	lpfc_disc_state_machine(phba, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
847 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
848 	kfree(mp);
849 	mempool_free( pmb, phba->mbox_mem_pool);
850 
851 	return;
852 }
853 
854 /*
855  * This routine handles processing a Fabric REG_LOGIN mailbox
856  * command upon completion. It is setup in the LPFC_MBOXQ
857  * as the completion routine when the command is
858  * handed off to the SLI layer.
859  */
860 void
861 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
862 {
863 	struct lpfc_sli *psli;
864 	MAILBOX_t *mb;
865 	struct lpfc_dmabuf *mp;
866 	struct lpfc_nodelist *ndlp;
867 	struct lpfc_nodelist *ndlp_fdmi;
868 
869 
870 	psli = &phba->sli;
871 	mb = &pmb->mb;
872 
873 	ndlp = (struct lpfc_nodelist *) pmb->context2;
874 	mp = (struct lpfc_dmabuf *) (pmb->context1);
875 
876 	if (mb->mbxStatus) {
877 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
878 		kfree(mp);
879 		mempool_free( pmb, phba->mbox_mem_pool);
880 		mempool_free( ndlp, phba->nlp_mem_pool);
881 
882 		/* FLOGI failed, so just use loop map to make discovery list */
883 		lpfc_disc_list_loopmap(phba);
884 
885 		/* Start discovery */
886 		lpfc_disc_start(phba);
887 		return;
888 	}
889 
890 	pmb->context1 = NULL;
891 
892 	if (ndlp->nlp_rpi != 0)
893 		lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
894 	ndlp->nlp_rpi = mb->un.varWords[0];
895 	lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
896 	ndlp->nlp_type |= NLP_FABRIC;
897 	ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
898 	lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
899 
900 	if (phba->hba_state == LPFC_FABRIC_CFG_LINK) {
901 		/* This NPort has been assigned an NPort_ID by the fabric as a
902 		 * result of the completed fabric login.  Issue a State Change
903 		 * Registration (SCR) ELS request to the fabric controller
904 		 * (SCR_DID) so that this NPort gets RSCN events from the
905 		 * fabric.
906 		 */
907 		lpfc_issue_els_scr(phba, SCR_DID, 0);
908 
909 		/* Allocate a new node instance.  If the pool is empty, just
910 		 * start the discovery process and skip the Nameserver login
911 		 * process.  This is attempted again later on.  Otherwise, issue
912 		 * a Port Login (PLOGI) to the NameServer
913 		 */
914 		if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL))
915 		    == 0) {
916 			lpfc_disc_start(phba);
917 		} else {
918 			lpfc_nlp_init(phba, ndlp, NameServer_DID);
919 			ndlp->nlp_type |= NLP_FABRIC;
920 			ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
921 			lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
922 			lpfc_issue_els_plogi(phba, ndlp, 0);
923 			if (phba->cfg_fdmi_on) {
924 				if ((ndlp_fdmi = mempool_alloc(
925 						       phba->nlp_mem_pool,
926 						       GFP_KERNEL))) {
927 					lpfc_nlp_init(phba, ndlp_fdmi,
928 						FDMI_DID);
929 					ndlp_fdmi->nlp_type |= NLP_FABRIC;
930 					ndlp_fdmi->nlp_state =
931 					    NLP_STE_PLOGI_ISSUE;
932 					lpfc_issue_els_plogi(phba, ndlp_fdmi,
933 							     0);
934 				}
935 			}
936 		}
937 	}
938 
939 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
940 	kfree(mp);
941 	mempool_free( pmb, phba->mbox_mem_pool);
942 
943 	return;
944 }
945 
946 /*
947  * This routine handles processing a NameServer REG_LOGIN mailbox
948  * command upon completion. It is setup in the LPFC_MBOXQ
949  * as the completion routine when the command is
950  * handed off to the SLI layer.
951  */
952 void
953 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
954 {
955 	struct lpfc_sli *psli;
956 	MAILBOX_t *mb;
957 	struct lpfc_dmabuf *mp;
958 	struct lpfc_nodelist *ndlp;
959 
960 	psli = &phba->sli;
961 	mb = &pmb->mb;
962 
963 	ndlp = (struct lpfc_nodelist *) pmb->context2;
964 	mp = (struct lpfc_dmabuf *) (pmb->context1);
965 
966 	if (mb->mbxStatus) {
967 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
968 		kfree(mp);
969 		mempool_free( pmb, phba->mbox_mem_pool);
970 		lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
971 
972 		/* RegLogin failed, so just use loop map to make discovery
973 		   list */
974 		lpfc_disc_list_loopmap(phba);
975 
976 		/* Start discovery */
977 		lpfc_disc_start(phba);
978 		return;
979 	}
980 
981 	pmb->context1 = NULL;
982 
983 	if (ndlp->nlp_rpi != 0)
984 		lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
985 	ndlp->nlp_rpi = mb->un.varWords[0];
986 	lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
987 	ndlp->nlp_type |= NLP_FABRIC;
988 	ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
989 	lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
990 
991 	if (phba->hba_state < LPFC_HBA_READY) {
992 		/* Link up discovery requires Fabrib registration. */
993 		lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RNN_ID);
994 		lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RSNN_NN);
995 		lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFT_ID);
996 	}
997 
998 	phba->fc_ns_retry = 0;
999 	/* Good status, issue CT Request to NameServer */
1000 	if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT)) {
1001 		/* Cannot issue NameServer Query, so finish up discovery */
1002 		lpfc_disc_start(phba);
1003 	}
1004 
1005 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
1006 	kfree(mp);
1007 	mempool_free( pmb, phba->mbox_mem_pool);
1008 
1009 	return;
1010 }
1011 
1012 static void
1013 lpfc_register_remote_port(struct lpfc_hba * phba,
1014 			    struct lpfc_nodelist * ndlp)
1015 {
1016 	struct fc_rport *rport;
1017 	struct lpfc_rport_data *rdata;
1018 	struct fc_rport_identifiers rport_ids;
1019 	uint64_t wwn;
1020 
1021 	/* Remote port has reappeared. Re-register w/ FC transport */
1022 	memcpy(&wwn, &ndlp->nlp_nodename, sizeof(uint64_t));
1023 	rport_ids.node_name = be64_to_cpu(wwn);
1024 	memcpy(&wwn, &ndlp->nlp_portname, sizeof(uint64_t));
1025 	rport_ids.port_name = be64_to_cpu(wwn);
1026 	rport_ids.port_id = ndlp->nlp_DID;
1027 	rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
1028 	if (ndlp->nlp_type & NLP_FCP_TARGET)
1029 		rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
1030 	if (ndlp->nlp_type & NLP_FCP_INITIATOR)
1031 		rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1032 
1033 	ndlp->rport = rport = fc_remote_port_add(phba->host, 0, &rport_ids);
1034 	if (!rport) {
1035 		dev_printk(KERN_WARNING, &phba->pcidev->dev,
1036 			   "Warning: fc_remote_port_add failed\n");
1037 		return;
1038 	}
1039 
1040 	/* initialize static port data */
1041 	rport->maxframe_size = ndlp->nlp_maxframe;
1042 	rport->supported_classes = ndlp->nlp_class_sup;
1043 	if ((rport->scsi_target_id != -1) &&
1044 		(rport->scsi_target_id < MAX_FCP_TARGET)) {
1045 		ndlp->nlp_sid = rport->scsi_target_id;
1046 	}
1047 	rdata = rport->dd_data;
1048 	rdata->pnode = ndlp;
1049 
1050 	return;
1051 }
1052 
1053 int
1054 lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1055 {
1056 	enum { none, unmapped, mapped } rport_add = none, rport_del = none;
1057 	struct lpfc_sli      *psli;
1058 
1059 	psli = &phba->sli;
1060 	/* Sanity check to ensure we are not moving to / from the same list */
1061 	if ((nlp->nlp_flag & NLP_LIST_MASK) == list) {
1062 		if (list != NLP_NO_LIST)
1063 			return(0);
1064 	}
1065 
1066 	switch(nlp->nlp_flag & NLP_LIST_MASK) {
1067 	case NLP_NO_LIST: /* Not on any list */
1068 		break;
1069 	case NLP_UNUSED_LIST:
1070 		phba->fc_unused_cnt--;
1071 		list_del(&nlp->nlp_listp);
1072 		break;
1073 	case NLP_PLOGI_LIST:
1074 		phba->fc_plogi_cnt--;
1075 		list_del(&nlp->nlp_listp);
1076 		break;
1077 	case NLP_ADISC_LIST:
1078 		phba->fc_adisc_cnt--;
1079 		list_del(&nlp->nlp_listp);
1080 		break;
1081 	case NLP_REGLOGIN_LIST:
1082 		phba->fc_reglogin_cnt--;
1083 		list_del(&nlp->nlp_listp);
1084 		break;
1085 	case NLP_PRLI_LIST:
1086 		phba->fc_prli_cnt--;
1087 		list_del(&nlp->nlp_listp);
1088 		break;
1089 	case NLP_UNMAPPED_LIST:
1090 		phba->fc_unmap_cnt--;
1091 		list_del(&nlp->nlp_listp);
1092 		spin_lock_irq(phba->host->host_lock);
1093 		nlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
1094 		nlp->nlp_type &= ~NLP_FC_NODE;
1095 		spin_unlock_irq(phba->host->host_lock);
1096 		phba->nport_event_cnt++;
1097 		if (nlp->rport)
1098 			rport_del = unmapped;
1099 		break;
1100 	case NLP_MAPPED_LIST:
1101 		phba->fc_map_cnt--;
1102 		list_del(&nlp->nlp_listp);
1103 		phba->nport_event_cnt++;
1104 		if (nlp->rport)
1105 			rport_del = mapped;
1106 		break;
1107 	case NLP_NPR_LIST:
1108 		phba->fc_npr_cnt--;
1109 		list_del(&nlp->nlp_listp);
1110 		/* Stop delay tmo if taking node off NPR list */
1111 		if ((nlp->nlp_flag & NLP_DELAY_TMO) &&
1112 		   (list != NLP_NPR_LIST)) {
1113 			spin_lock_irq(phba->host->host_lock);
1114 			nlp->nlp_flag &= ~NLP_DELAY_TMO;
1115 			spin_unlock_irq(phba->host->host_lock);
1116 			del_timer_sync(&nlp->nlp_delayfunc);
1117 			if (!list_empty(&nlp->els_retry_evt.evt_listp))
1118 				list_del_init(&nlp->els_retry_evt.evt_listp);
1119 		}
1120 		break;
1121 	}
1122 
1123 	spin_lock_irq(phba->host->host_lock);
1124 	nlp->nlp_flag &= ~NLP_LIST_MASK;
1125 	spin_unlock_irq(phba->host->host_lock);
1126 
1127 	/* Add NPort <did> to <num> list */
1128 	lpfc_printf_log(phba,
1129 			KERN_INFO,
1130 			LOG_NODE,
1131 			"%d:0904 Add NPort x%x to %d list Data: x%x\n",
1132 			phba->brd_no,
1133 			nlp->nlp_DID, list, nlp->nlp_flag);
1134 
1135 	switch(list) {
1136 	case NLP_NO_LIST: /* No list, just remove it */
1137 		lpfc_nlp_remove(phba, nlp);
1138 		break;
1139 	case NLP_UNUSED_LIST:
1140 		spin_lock_irq(phba->host->host_lock);
1141 		nlp->nlp_flag |= list;
1142 		spin_unlock_irq(phba->host->host_lock);
1143 		/* Put it at the end of the unused list */
1144 		list_add_tail(&nlp->nlp_listp, &phba->fc_unused_list);
1145 		phba->fc_unused_cnt++;
1146 		break;
1147 	case NLP_PLOGI_LIST:
1148 		spin_lock_irq(phba->host->host_lock);
1149 		nlp->nlp_flag |= list;
1150 		spin_unlock_irq(phba->host->host_lock);
1151 		/* Put it at the end of the plogi list */
1152 		list_add_tail(&nlp->nlp_listp, &phba->fc_plogi_list);
1153 		phba->fc_plogi_cnt++;
1154 		break;
1155 	case NLP_ADISC_LIST:
1156 		spin_lock_irq(phba->host->host_lock);
1157 		nlp->nlp_flag |= list;
1158 		spin_unlock_irq(phba->host->host_lock);
1159 		/* Put it at the end of the adisc list */
1160 		list_add_tail(&nlp->nlp_listp, &phba->fc_adisc_list);
1161 		phba->fc_adisc_cnt++;
1162 		break;
1163 	case NLP_REGLOGIN_LIST:
1164 		spin_lock_irq(phba->host->host_lock);
1165 		nlp->nlp_flag |= list;
1166 		spin_unlock_irq(phba->host->host_lock);
1167 		/* Put it at the end of the reglogin list */
1168 		list_add_tail(&nlp->nlp_listp, &phba->fc_reglogin_list);
1169 		phba->fc_reglogin_cnt++;
1170 		break;
1171 	case NLP_PRLI_LIST:
1172 		spin_lock_irq(phba->host->host_lock);
1173 		nlp->nlp_flag |= list;
1174 		spin_unlock_irq(phba->host->host_lock);
1175 		/* Put it at the end of the prli list */
1176 		list_add_tail(&nlp->nlp_listp, &phba->fc_prli_list);
1177 		phba->fc_prli_cnt++;
1178 		break;
1179 	case NLP_UNMAPPED_LIST:
1180 		rport_add = unmapped;
1181 		/* ensure all vestiges of "mapped" significance are gone */
1182 		nlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1183 		spin_lock_irq(phba->host->host_lock);
1184 		nlp->nlp_flag |= list;
1185 		spin_unlock_irq(phba->host->host_lock);
1186 		/* Put it at the end of the unmap list */
1187 		list_add_tail(&nlp->nlp_listp, &phba->fc_nlpunmap_list);
1188 		phba->fc_unmap_cnt++;
1189 		phba->nport_event_cnt++;
1190 		/* stop nodev tmo if running */
1191 		if (nlp->nlp_flag & NLP_NODEV_TMO) {
1192 			spin_lock_irq(phba->host->host_lock);
1193 			nlp->nlp_flag &= ~NLP_NODEV_TMO;
1194 			spin_unlock_irq(phba->host->host_lock);
1195 			del_timer_sync(&nlp->nlp_tmofunc);
1196 			if (!list_empty(&nlp->nodev_timeout_evt.evt_listp))
1197 				list_del_init(&nlp->nodev_timeout_evt.
1198 						evt_listp);
1199 
1200 		}
1201 		nlp->nlp_type |= NLP_FC_NODE;
1202 		break;
1203 	case NLP_MAPPED_LIST:
1204 		rport_add = mapped;
1205 		spin_lock_irq(phba->host->host_lock);
1206 		nlp->nlp_flag |= list;
1207 		spin_unlock_irq(phba->host->host_lock);
1208 		/* Put it at the end of the map list */
1209 		list_add_tail(&nlp->nlp_listp, &phba->fc_nlpmap_list);
1210 		phba->fc_map_cnt++;
1211 		phba->nport_event_cnt++;
1212 		/* stop nodev tmo if running */
1213 		if (nlp->nlp_flag & NLP_NODEV_TMO) {
1214 			nlp->nlp_flag &= ~NLP_NODEV_TMO;
1215 			del_timer_sync(&nlp->nlp_tmofunc);
1216 			if (!list_empty(&nlp->nodev_timeout_evt.evt_listp))
1217 				list_del_init(&nlp->nodev_timeout_evt.
1218 						evt_listp);
1219 
1220 		}
1221 		break;
1222 	case NLP_NPR_LIST:
1223 		spin_lock_irq(phba->host->host_lock);
1224 		nlp->nlp_flag |= list;
1225 		spin_unlock_irq(phba->host->host_lock);
1226 		/* Put it at the end of the npr list */
1227 		list_add_tail(&nlp->nlp_listp, &phba->fc_npr_list);
1228 		phba->fc_npr_cnt++;
1229 
1230 		/*
1231 		 * Sanity check for Fabric entity.
1232 		 * Set nodev_tmo for NPR state, for Fabric use 1 sec.
1233 		 */
1234 		if (nlp->nlp_type & NLP_FABRIC) {
1235 			mod_timer(&nlp->nlp_tmofunc, jiffies + HZ);
1236 		}
1237 		else {
1238 			mod_timer(&nlp->nlp_tmofunc,
1239 			    jiffies + HZ * phba->cfg_nodev_tmo);
1240 		}
1241 		spin_lock_irq(phba->host->host_lock);
1242 		nlp->nlp_flag |= NLP_NODEV_TMO;
1243 		nlp->nlp_flag &= ~NLP_RCV_PLOGI;
1244 		spin_unlock_irq(phba->host->host_lock);
1245 		break;
1246 	case NLP_JUST_DQ:
1247 		break;
1248 	}
1249 
1250 	/*
1251 	 * We make all the calls into the transport after we have
1252 	 * moved the node between lists. This so that we don't
1253 	 * release the lock while in-between lists.
1254 	 */
1255 
1256 	/* Don't upcall midlayer if we're unloading */
1257 	if (!(phba->fc_flag & FC_UNLOADING)) {
1258 		/*
1259 		 * We revalidate the rport pointer as the "add" function
1260 		 * may have removed the remote port.
1261 		 */
1262 		if ((rport_del != none) && nlp->rport)
1263 			fc_remote_port_block(nlp->rport);
1264 
1265 		if (rport_add != none) {
1266 			/*
1267 			 * Tell the fc transport about the port, if we haven't
1268 			 * already. If we have, and it's a scsi entity, be
1269 			 * sure to unblock any attached scsi devices
1270 			 */
1271 			if (!nlp->rport)
1272 				lpfc_register_remote_port(phba, nlp);
1273 			else
1274 				fc_remote_port_unblock(nlp->rport);
1275 
1276 			/*
1277 			 * if we added to Mapped list, but the remote port
1278 			 * registration failed or assigned a target id outside
1279 			 * our presentable range - move the node to the
1280 			 * Unmapped List
1281 			 */
1282 			if ((rport_add == mapped) &&
1283 			    ((!nlp->rport) ||
1284 			     (nlp->rport->scsi_target_id == -1) ||
1285 			     (nlp->rport->scsi_target_id >= MAX_FCP_TARGET))) {
1286 				nlp->nlp_state = NLP_STE_UNMAPPED_NODE;
1287 				spin_lock_irq(phba->host->host_lock);
1288 				nlp->nlp_flag |= NLP_TGT_NO_SCSIID;
1289 				spin_unlock_irq(phba->host->host_lock);
1290 				lpfc_nlp_list(phba, nlp, NLP_UNMAPPED_LIST);
1291 			}
1292 		}
1293 	}
1294 	return (0);
1295 }
1296 
1297 /*
1298  * Start / ReStart rescue timer for Discovery / RSCN handling
1299  */
1300 void
1301 lpfc_set_disctmo(struct lpfc_hba * phba)
1302 {
1303 	uint32_t tmo;
1304 
1305 	tmo = ((phba->fc_ratov * 2) + 1);
1306 
1307 	mod_timer(&phba->fc_disctmo, jiffies + HZ * tmo);
1308 	spin_lock_irq(phba->host->host_lock);
1309 	phba->fc_flag |= FC_DISC_TMO;
1310 	spin_unlock_irq(phba->host->host_lock);
1311 
1312 	/* Start Discovery Timer state <hba_state> */
1313 	lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1314 			"%d:0247 Start Discovery Timer state x%x "
1315 			"Data: x%x x%lx x%x x%x\n",
1316 			phba->brd_no,
1317 			phba->hba_state, tmo, (unsigned long)&phba->fc_disctmo,
1318 			phba->fc_plogi_cnt, phba->fc_adisc_cnt);
1319 
1320 	return;
1321 }
1322 
1323 /*
1324  * Cancel rescue timer for Discovery / RSCN handling
1325  */
1326 int
1327 lpfc_can_disctmo(struct lpfc_hba * phba)
1328 {
1329 	/* Turn off discovery timer if its running */
1330 	if (phba->fc_flag & FC_DISC_TMO) {
1331 		spin_lock_irq(phba->host->host_lock);
1332 		phba->fc_flag &= ~FC_DISC_TMO;
1333 		spin_unlock_irq(phba->host->host_lock);
1334 		del_timer_sync(&phba->fc_disctmo);
1335 		phba->work_hba_events &= ~WORKER_DISC_TMO;
1336 	}
1337 
1338 	/* Cancel Discovery Timer state <hba_state> */
1339 	lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1340 			"%d:0248 Cancel Discovery Timer state x%x "
1341 			"Data: x%x x%x x%x\n",
1342 			phba->brd_no, phba->hba_state, phba->fc_flag,
1343 			phba->fc_plogi_cnt, phba->fc_adisc_cnt);
1344 
1345 	return (0);
1346 }
1347 
1348 /*
1349  * Check specified ring for outstanding IOCB on the SLI queue
1350  * Return true if iocb matches the specified nport
1351  */
1352 int
1353 lpfc_check_sli_ndlp(struct lpfc_hba * phba,
1354 		    struct lpfc_sli_ring * pring,
1355 		    struct lpfc_iocbq * iocb, struct lpfc_nodelist * ndlp)
1356 {
1357 	struct lpfc_sli *psli;
1358 	IOCB_t *icmd;
1359 
1360 	psli = &phba->sli;
1361 	icmd = &iocb->iocb;
1362 	if (pring->ringno == LPFC_ELS_RING) {
1363 		switch (icmd->ulpCommand) {
1364 		case CMD_GEN_REQUEST64_CR:
1365 			if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
1366 				return (1);
1367 		case CMD_ELS_REQUEST64_CR:
1368 		case CMD_XMIT_ELS_RSP64_CX:
1369 			if (iocb->context1 == (uint8_t *) ndlp)
1370 				return (1);
1371 		}
1372 	} else if (pring->ringno == psli->ip_ring) {
1373 
1374 	} else if (pring->ringno == psli->fcp_ring) {
1375 		/* Skip match check if waiting to relogin to FCP target */
1376 		if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
1377 		  (ndlp->nlp_flag & NLP_DELAY_TMO)) {
1378 			return (0);
1379 		}
1380 		if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
1381 			return (1);
1382 		}
1383 	} else if (pring->ringno == psli->next_ring) {
1384 
1385 	}
1386 	return (0);
1387 }
1388 
1389 /*
1390  * Free resources / clean up outstanding I/Os
1391  * associated with nlp_rpi in the LPFC_NODELIST entry.
1392  */
1393 static int
1394 lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1395 {
1396 	struct lpfc_sli *psli;
1397 	struct lpfc_sli_ring *pring;
1398 	struct lpfc_iocbq *iocb, *next_iocb;
1399 	IOCB_t *icmd;
1400 	uint32_t rpi, i;
1401 
1402 	/*
1403 	 * Everything that matches on txcmplq will be returned
1404 	 * by firmware with a no rpi error.
1405 	 */
1406 	psli = &phba->sli;
1407 	rpi = ndlp->nlp_rpi;
1408 	if (rpi) {
1409 		/* Now process each ring */
1410 		for (i = 0; i < psli->num_rings; i++) {
1411 			pring = &psli->ring[i];
1412 
1413 			spin_lock_irq(phba->host->host_lock);
1414 			list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
1415 						list) {
1416 				/*
1417 				 * Check to see if iocb matches the nport we are
1418 				 * looking for
1419 				 */
1420 				if ((lpfc_check_sli_ndlp
1421 				     (phba, pring, iocb, ndlp))) {
1422 					/* It matches, so deque and call compl
1423 					   with an error */
1424 					list_del(&iocb->list);
1425 					pring->txq_cnt--;
1426 					if (iocb->iocb_cmpl) {
1427 						icmd = &iocb->iocb;
1428 						icmd->ulpStatus =
1429 						    IOSTAT_LOCAL_REJECT;
1430 						icmd->un.ulpWord[4] =
1431 						    IOERR_SLI_ABORTED;
1432 						spin_unlock_irq(phba->host->
1433 								host_lock);
1434 						(iocb->iocb_cmpl) (phba,
1435 								   iocb, iocb);
1436 						spin_lock_irq(phba->host->
1437 							      host_lock);
1438 					} else {
1439 						list_add_tail(&iocb->list,
1440 							&phba->lpfc_iocb_list);
1441 					}
1442 				}
1443 			}
1444 			spin_unlock_irq(phba->host->host_lock);
1445 
1446 		}
1447 	}
1448 	return (0);
1449 }
1450 
1451 /*
1452  * Free rpi associated with LPFC_NODELIST entry.
1453  * This routine is called from lpfc_freenode(), when we are removing
1454  * a LPFC_NODELIST entry. It is also called if the driver initiates a
1455  * LOGO that completes successfully, and we are waiting to PLOGI back
1456  * to the remote NPort. In addition, it is called after we receive
1457  * and unsolicated ELS cmd, send back a rsp, the rsp completes and
1458  * we are waiting to PLOGI back to the remote NPort.
1459  */
1460 int
1461 lpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1462 {
1463 	LPFC_MBOXQ_t *mbox;
1464 	int rc;
1465 
1466 	if (ndlp->nlp_rpi) {
1467 		if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
1468 			lpfc_unreg_login(phba, ndlp->nlp_rpi, mbox);
1469 			mbox->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
1470 			rc = lpfc_sli_issue_mbox
1471 				    (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
1472 			if (rc == MBX_NOT_FINISHED)
1473 				mempool_free( mbox, phba->mbox_mem_pool);
1474 		}
1475 		lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
1476 		lpfc_no_rpi(phba, ndlp);
1477 		ndlp->nlp_rpi = 0;
1478 		return 1;
1479 	}
1480 	return 0;
1481 }
1482 
1483 /*
1484  * Free resources associated with LPFC_NODELIST entry
1485  * so it can be freed.
1486  */
1487 static int
1488 lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1489 {
1490 	LPFC_MBOXQ_t       *mb;
1491 	LPFC_MBOXQ_t       *nextmb;
1492 	struct lpfc_dmabuf *mp;
1493 	struct fc_rport *rport;
1494 
1495 	/* Cleanup node for NPort <nlp_DID> */
1496 	lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1497 			"%d:0900 Cleanup node for NPort x%x "
1498 			"Data: x%x x%x x%x\n",
1499 			phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
1500 			ndlp->nlp_state, ndlp->nlp_rpi);
1501 
1502 	lpfc_nlp_list(phba, ndlp, NLP_JUST_DQ);
1503 
1504 	/*
1505 	 * if unloading the driver - just leave the remote port in place.
1506 	 * The driver unload will force the attached devices to detach
1507 	 * and flush cache's w/o generating flush errors.
1508 	 */
1509 	if ((ndlp->rport) && !(phba->fc_flag & FC_UNLOADING)) {
1510 		rport = ndlp->rport;
1511 		ndlp->rport = NULL;
1512 		fc_remote_port_unblock(rport);
1513 		fc_remote_port_delete(rport);
1514 		ndlp->nlp_sid = NLP_NO_SID;
1515 	}
1516 
1517 	/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1518 	if ((mb = phba->sli.mbox_active)) {
1519 		if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1520 		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1521 			mb->context2 = NULL;
1522 			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1523 		}
1524 	}
1525 	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1526 		if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1527 		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1528 			mp = (struct lpfc_dmabuf *) (mb->context1);
1529 			if (mp) {
1530 				lpfc_mbuf_free(phba, mp->virt, mp->phys);
1531 				kfree(mp);
1532 			}
1533 			list_del(&mb->list);
1534 			mempool_free(mb, phba->mbox_mem_pool);
1535 		}
1536 	}
1537 
1538 	lpfc_els_abort(phba,ndlp,0);
1539 	spin_lock_irq(phba->host->host_lock);
1540 	ndlp->nlp_flag &= ~(NLP_NODEV_TMO|NLP_DELAY_TMO);
1541 	spin_unlock_irq(phba->host->host_lock);
1542 	del_timer_sync(&ndlp->nlp_tmofunc);
1543 
1544 	del_timer_sync(&ndlp->nlp_delayfunc);
1545 
1546 	if (!list_empty(&ndlp->nodev_timeout_evt.evt_listp))
1547 		list_del_init(&ndlp->nodev_timeout_evt.evt_listp);
1548 	if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1549 		list_del_init(&ndlp->els_retry_evt.evt_listp);
1550 
1551 	lpfc_unreg_rpi(phba, ndlp);
1552 
1553 	return (0);
1554 }
1555 
1556 /*
1557  * Check to see if we can free the nlp back to the freelist.
1558  * If we are in the middle of using the nlp in the discovery state
1559  * machine, defer the free till we reach the end of the state machine.
1560  */
1561 int
1562 lpfc_nlp_remove(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1563 {
1564 	if (ndlp->nlp_flag & NLP_NODEV_TMO) {
1565 		spin_lock_irq(phba->host->host_lock);
1566 		ndlp->nlp_flag &= ~NLP_NODEV_TMO;
1567 		spin_unlock_irq(phba->host->host_lock);
1568 		del_timer_sync(&ndlp->nlp_tmofunc);
1569 		if (!list_empty(&ndlp->nodev_timeout_evt.evt_listp))
1570 			list_del_init(&ndlp->nodev_timeout_evt.evt_listp);
1571 
1572 	}
1573 
1574 
1575 	if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1576 		spin_lock_irq(phba->host->host_lock);
1577 		ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1578 		spin_unlock_irq(phba->host->host_lock);
1579 		del_timer_sync(&ndlp->nlp_delayfunc);
1580 		if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1581 			list_del_init(&ndlp->els_retry_evt.evt_listp);
1582 	}
1583 
1584 	if (ndlp->nlp_disc_refcnt) {
1585 		spin_lock_irq(phba->host->host_lock);
1586 		ndlp->nlp_flag |= NLP_DELAY_REMOVE;
1587 		spin_unlock_irq(phba->host->host_lock);
1588 	}
1589 	else {
1590 		lpfc_freenode(phba, ndlp);
1591 		mempool_free( ndlp, phba->nlp_mem_pool);
1592 	}
1593 	return(0);
1594 }
1595 
1596 static int
1597 lpfc_matchdid(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, uint32_t did)
1598 {
1599 	D_ID mydid;
1600 	D_ID ndlpdid;
1601 	D_ID matchdid;
1602 
1603 	if (did == Bcast_DID)
1604 		return (0);
1605 
1606 	if (ndlp->nlp_DID == 0) {
1607 		return (0);
1608 	}
1609 
1610 	/* First check for Direct match */
1611 	if (ndlp->nlp_DID == did)
1612 		return (1);
1613 
1614 	/* Next check for area/domain identically equals 0 match */
1615 	mydid.un.word = phba->fc_myDID;
1616 	if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
1617 		return (0);
1618 	}
1619 
1620 	matchdid.un.word = did;
1621 	ndlpdid.un.word = ndlp->nlp_DID;
1622 	if (matchdid.un.b.id == ndlpdid.un.b.id) {
1623 		if ((mydid.un.b.domain == matchdid.un.b.domain) &&
1624 		    (mydid.un.b.area == matchdid.un.b.area)) {
1625 			if ((ndlpdid.un.b.domain == 0) &&
1626 			    (ndlpdid.un.b.area == 0)) {
1627 				if (ndlpdid.un.b.id)
1628 					return (1);
1629 			}
1630 			return (0);
1631 		}
1632 
1633 		matchdid.un.word = ndlp->nlp_DID;
1634 		if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
1635 		    (mydid.un.b.area == ndlpdid.un.b.area)) {
1636 			if ((matchdid.un.b.domain == 0) &&
1637 			    (matchdid.un.b.area == 0)) {
1638 				if (matchdid.un.b.id)
1639 					return (1);
1640 			}
1641 		}
1642 	}
1643 	return (0);
1644 }
1645 
1646 /* Search for a nodelist entry on a specific list */
1647 struct lpfc_nodelist *
1648 lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
1649 {
1650 	struct lpfc_nodelist *ndlp, *next_ndlp;
1651 	uint32_t data1;
1652 
1653 	if (order & NLP_SEARCH_UNMAPPED) {
1654 		list_for_each_entry_safe(ndlp, next_ndlp,
1655 					 &phba->fc_nlpunmap_list, nlp_listp) {
1656 			if (lpfc_matchdid(phba, ndlp, did)) {
1657 				data1 = (((uint32_t) ndlp->nlp_state << 24) |
1658 					 ((uint32_t) ndlp->nlp_xri << 16) |
1659 					 ((uint32_t) ndlp->nlp_type << 8) |
1660 					 ((uint32_t) ndlp->nlp_rpi & 0xff));
1661 				/* FIND node DID unmapped */
1662 				lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1663 						"%d:0929 FIND node DID unmapped"
1664 						" Data: x%p x%x x%x x%x\n",
1665 						phba->brd_no,
1666 						ndlp, ndlp->nlp_DID,
1667 						ndlp->nlp_flag, data1);
1668 				return (ndlp);
1669 			}
1670 		}
1671 	}
1672 
1673 	if (order & NLP_SEARCH_MAPPED) {
1674 		list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
1675 					nlp_listp) {
1676 			if (lpfc_matchdid(phba, ndlp, did)) {
1677 
1678 				data1 = (((uint32_t) ndlp->nlp_state << 24) |
1679 					 ((uint32_t) ndlp->nlp_xri << 16) |
1680 					 ((uint32_t) ndlp->nlp_type << 8) |
1681 					 ((uint32_t) ndlp->nlp_rpi & 0xff));
1682 				/* FIND node DID mapped */
1683 				lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1684 						"%d:0930 FIND node DID mapped "
1685 						"Data: x%p x%x x%x x%x\n",
1686 						phba->brd_no,
1687 						ndlp, ndlp->nlp_DID,
1688 						ndlp->nlp_flag, data1);
1689 				return (ndlp);
1690 			}
1691 		}
1692 	}
1693 
1694 	if (order & NLP_SEARCH_PLOGI) {
1695 		list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
1696 					nlp_listp) {
1697 			if (lpfc_matchdid(phba, ndlp, did)) {
1698 
1699 				data1 = (((uint32_t) ndlp->nlp_state << 24) |
1700 					 ((uint32_t) ndlp->nlp_xri << 16) |
1701 					 ((uint32_t) ndlp->nlp_type << 8) |
1702 					 ((uint32_t) ndlp->nlp_rpi & 0xff));
1703 				/* LOG change to PLOGI */
1704 				/* FIND node DID plogi */
1705 				lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1706 						"%d:0908 FIND node DID plogi "
1707 						"Data: x%p x%x x%x x%x\n",
1708 						phba->brd_no,
1709 						ndlp, ndlp->nlp_DID,
1710 						ndlp->nlp_flag, data1);
1711 				return (ndlp);
1712 			}
1713 		}
1714 	}
1715 
1716 	if (order & NLP_SEARCH_ADISC) {
1717 		list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
1718 					nlp_listp) {
1719 			if (lpfc_matchdid(phba, ndlp, did)) {
1720 
1721 				data1 = (((uint32_t) ndlp->nlp_state << 24) |
1722 					 ((uint32_t) ndlp->nlp_xri << 16) |
1723 					 ((uint32_t) ndlp->nlp_type << 8) |
1724 					 ((uint32_t) ndlp->nlp_rpi & 0xff));
1725 				/* LOG change to ADISC */
1726 				/* FIND node DID adisc */
1727 				lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1728 						"%d:0931 FIND node DID adisc "
1729 						"Data: x%p x%x x%x x%x\n",
1730 						phba->brd_no,
1731 						ndlp, ndlp->nlp_DID,
1732 						ndlp->nlp_flag, data1);
1733 				return (ndlp);
1734 			}
1735 		}
1736 	}
1737 
1738 	if (order & NLP_SEARCH_REGLOGIN) {
1739 		list_for_each_entry_safe(ndlp, next_ndlp,
1740 					 &phba->fc_reglogin_list, nlp_listp) {
1741 			if (lpfc_matchdid(phba, ndlp, did)) {
1742 
1743 				data1 = (((uint32_t) ndlp->nlp_state << 24) |
1744 					 ((uint32_t) ndlp->nlp_xri << 16) |
1745 					 ((uint32_t) ndlp->nlp_type << 8) |
1746 					 ((uint32_t) ndlp->nlp_rpi & 0xff));
1747 				/* LOG change to REGLOGIN */
1748 				/* FIND node DID reglogin */
1749 				lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1750 						"%d:0931 FIND node DID reglogin"
1751 						" Data: x%p x%x x%x x%x\n",
1752 						phba->brd_no,
1753 						ndlp, ndlp->nlp_DID,
1754 						ndlp->nlp_flag, data1);
1755 				return (ndlp);
1756 			}
1757 		}
1758 	}
1759 
1760 	if (order & NLP_SEARCH_PRLI) {
1761 		list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list,
1762 					nlp_listp) {
1763 			if (lpfc_matchdid(phba, ndlp, did)) {
1764 
1765 				data1 = (((uint32_t) ndlp->nlp_state << 24) |
1766 					 ((uint32_t) ndlp->nlp_xri << 16) |
1767 					 ((uint32_t) ndlp->nlp_type << 8) |
1768 					 ((uint32_t) ndlp->nlp_rpi & 0xff));
1769 				/* LOG change to PRLI */
1770 				/* FIND node DID prli */
1771 				lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1772 						"%d:0931 FIND node DID prli "
1773 						"Data: x%p x%x x%x x%x\n",
1774 						phba->brd_no,
1775 						ndlp, ndlp->nlp_DID,
1776 						ndlp->nlp_flag, data1);
1777 				return (ndlp);
1778 			}
1779 		}
1780 	}
1781 
1782 	if (order & NLP_SEARCH_NPR) {
1783 		list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
1784 					nlp_listp) {
1785 			if (lpfc_matchdid(phba, ndlp, did)) {
1786 
1787 				data1 = (((uint32_t) ndlp->nlp_state << 24) |
1788 					 ((uint32_t) ndlp->nlp_xri << 16) |
1789 					 ((uint32_t) ndlp->nlp_type << 8) |
1790 					 ((uint32_t) ndlp->nlp_rpi & 0xff));
1791 				/* LOG change to NPR */
1792 				/* FIND node DID npr */
1793 				lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1794 						"%d:0931 FIND node DID npr "
1795 						"Data: x%p x%x x%x x%x\n",
1796 						phba->brd_no,
1797 						ndlp, ndlp->nlp_DID,
1798 						ndlp->nlp_flag, data1);
1799 				return (ndlp);
1800 			}
1801 		}
1802 	}
1803 
1804 	if (order & NLP_SEARCH_UNUSED) {
1805 		list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
1806 					nlp_listp) {
1807 			if (lpfc_matchdid(phba, ndlp, did)) {
1808 
1809 				data1 = (((uint32_t) ndlp->nlp_state << 24) |
1810 					 ((uint32_t) ndlp->nlp_xri << 16) |
1811 					 ((uint32_t) ndlp->nlp_type << 8) |
1812 					 ((uint32_t) ndlp->nlp_rpi & 0xff));
1813 				/* LOG change to UNUSED */
1814 				/* FIND node DID unused */
1815 				lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1816 						"%d:0931 FIND node DID unused "
1817 						"Data: x%p x%x x%x x%x\n",
1818 						phba->brd_no,
1819 						ndlp, ndlp->nlp_DID,
1820 						ndlp->nlp_flag, data1);
1821 				return (ndlp);
1822 			}
1823 		}
1824 	}
1825 
1826 	/* FIND node did <did> NOT FOUND */
1827 	lpfc_printf_log(phba,
1828 			KERN_INFO,
1829 			LOG_NODE,
1830 			"%d:0932 FIND node did x%x NOT FOUND Data: x%x\n",
1831 			phba->brd_no, did, order);
1832 
1833 	/* no match found */
1834 	return NULL;
1835 }
1836 
1837 struct lpfc_nodelist *
1838 lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did)
1839 {
1840 	struct lpfc_nodelist *ndlp;
1841 	uint32_t flg;
1842 
1843 	if ((ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, did)) == 0) {
1844 		if ((phba->hba_state == LPFC_HBA_READY) &&
1845 		   ((lpfc_rscn_payload_check(phba, did) == 0)))
1846 			return NULL;
1847 		ndlp = (struct lpfc_nodelist *)
1848 		     mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1849 		if (!ndlp)
1850 			return NULL;
1851 		lpfc_nlp_init(phba, ndlp, did);
1852 		ndlp->nlp_state = NLP_STE_NPR_NODE;
1853 		lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1854 		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1855 		return ndlp;
1856 	}
1857 	if ((phba->hba_state == LPFC_HBA_READY) &&
1858 	    (phba->fc_flag & FC_RSCN_MODE)) {
1859 		if (lpfc_rscn_payload_check(phba, did)) {
1860 			ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1861 		}
1862 		else {
1863 			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1864 			ndlp = NULL;
1865 		}
1866 	}
1867 	else {
1868 		flg = ndlp->nlp_flag & NLP_LIST_MASK;
1869 		if ((flg == NLP_ADISC_LIST) ||
1870 		(flg == NLP_PLOGI_LIST)) {
1871 			return NULL;
1872 		}
1873 		ndlp->nlp_state = NLP_STE_NPR_NODE;
1874 		lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1875 		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1876 	}
1877 	return ndlp;
1878 }
1879 
1880 /* Build a list of nodes to discover based on the loopmap */
1881 void
1882 lpfc_disc_list_loopmap(struct lpfc_hba * phba)
1883 {
1884 	int j;
1885 	uint32_t alpa, index;
1886 
1887 	if (phba->hba_state <= LPFC_LINK_DOWN) {
1888 		return;
1889 	}
1890 	if (phba->fc_topology != TOPOLOGY_LOOP) {
1891 		return;
1892 	}
1893 
1894 	/* Check for loop map present or not */
1895 	if (phba->alpa_map[0]) {
1896 		for (j = 1; j <= phba->alpa_map[0]; j++) {
1897 			alpa = phba->alpa_map[j];
1898 
1899 			if (((phba->fc_myDID & 0xff) == alpa) || (alpa == 0)) {
1900 				continue;
1901 			}
1902 			lpfc_setup_disc_node(phba, alpa);
1903 		}
1904 	} else {
1905 		/* No alpamap, so try all alpa's */
1906 		for (j = 0; j < FC_MAXLOOP; j++) {
1907 			/* If cfg_scan_down is set, start from highest
1908 			 * ALPA (0xef) to lowest (0x1).
1909 			 */
1910 			if (phba->cfg_scan_down)
1911 				index = j;
1912 			else
1913 				index = FC_MAXLOOP - j - 1;
1914 			alpa = lpfcAlpaArray[index];
1915 			if ((phba->fc_myDID & 0xff) == alpa) {
1916 				continue;
1917 			}
1918 
1919 			lpfc_setup_disc_node(phba, alpa);
1920 		}
1921 	}
1922 	return;
1923 }
1924 
1925 /* Start Link up / RSCN discovery on NPR list */
1926 void
1927 lpfc_disc_start(struct lpfc_hba * phba)
1928 {
1929 	struct lpfc_sli *psli;
1930 	LPFC_MBOXQ_t *mbox;
1931 	struct lpfc_nodelist *ndlp, *next_ndlp;
1932 	uint32_t did_changed, num_sent;
1933 	uint32_t clear_la_pending;
1934 	int rc;
1935 
1936 	psli = &phba->sli;
1937 
1938 	if (phba->hba_state <= LPFC_LINK_DOWN) {
1939 		return;
1940 	}
1941 	if (phba->hba_state == LPFC_CLEAR_LA)
1942 		clear_la_pending = 1;
1943 	else
1944 		clear_la_pending = 0;
1945 
1946 	if (phba->hba_state < LPFC_HBA_READY) {
1947 		phba->hba_state = LPFC_DISC_AUTH;
1948 	}
1949 	lpfc_set_disctmo(phba);
1950 
1951 	if (phba->fc_prevDID == phba->fc_myDID) {
1952 		did_changed = 0;
1953 	} else {
1954 		did_changed = 1;
1955 	}
1956 	phba->fc_prevDID = phba->fc_myDID;
1957 	phba->num_disc_nodes = 0;
1958 
1959 	/* Start Discovery state <hba_state> */
1960 	lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1961 			"%d:0202 Start Discovery hba state x%x "
1962 			"Data: x%x x%x x%x\n",
1963 			phba->brd_no, phba->hba_state, phba->fc_flag,
1964 			phba->fc_plogi_cnt, phba->fc_adisc_cnt);
1965 
1966 	/* If our did changed, we MUST do PLOGI */
1967 	list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
1968 				nlp_listp) {
1969 		if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1970 			if (did_changed) {
1971 				spin_lock_irq(phba->host->host_lock);
1972 				ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1973 				spin_unlock_irq(phba->host->host_lock);
1974 			}
1975 		}
1976 	}
1977 
1978 	/* First do ADISCs - if any */
1979 	num_sent = lpfc_els_disc_adisc(phba);
1980 
1981 	if (num_sent)
1982 		return;
1983 
1984 	if ((phba->hba_state < LPFC_HBA_READY) && (!clear_la_pending)) {
1985 		/* If we get here, there is nothing to ADISC */
1986 		if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
1987 			phba->hba_state = LPFC_CLEAR_LA;
1988 			lpfc_clear_la(phba, mbox);
1989 			mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
1990 			rc = lpfc_sli_issue_mbox(phba, mbox,
1991 						 (MBX_NOWAIT | MBX_STOP_IOCB));
1992 			if (rc == MBX_NOT_FINISHED) {
1993 				mempool_free( mbox, phba->mbox_mem_pool);
1994 				lpfc_disc_flush_list(phba);
1995 				psli->ring[(psli->ip_ring)].flag &=
1996 					~LPFC_STOP_IOCB_EVENT;
1997 				psli->ring[(psli->fcp_ring)].flag &=
1998 					~LPFC_STOP_IOCB_EVENT;
1999 				psli->ring[(psli->next_ring)].flag &=
2000 					~LPFC_STOP_IOCB_EVENT;
2001 				phba->hba_state = LPFC_HBA_READY;
2002 			}
2003 		}
2004 	} else {
2005 		/* Next do PLOGIs - if any */
2006 		num_sent = lpfc_els_disc_plogi(phba);
2007 
2008 		if (num_sent)
2009 			return;
2010 
2011 		if (phba->fc_flag & FC_RSCN_MODE) {
2012 			/* Check to see if more RSCNs came in while we
2013 			 * were processing this one.
2014 			 */
2015 			if ((phba->fc_rscn_id_cnt == 0) &&
2016 			    (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
2017 				spin_lock_irq(phba->host->host_lock);
2018 				phba->fc_flag &= ~FC_RSCN_MODE;
2019 				spin_unlock_irq(phba->host->host_lock);
2020 			}
2021 			else
2022 				lpfc_els_handle_rscn(phba);
2023 		}
2024 	}
2025 	return;
2026 }
2027 
2028 /*
2029  *  Ignore completion for all IOCBs on tx and txcmpl queue for ELS
2030  *  ring the match the sppecified nodelist.
2031  */
2032 static void
2033 lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
2034 {
2035 	struct lpfc_sli *psli;
2036 	IOCB_t     *icmd;
2037 	struct lpfc_iocbq    *iocb, *next_iocb;
2038 	struct lpfc_sli_ring *pring;
2039 	struct lpfc_dmabuf   *mp;
2040 
2041 	psli = &phba->sli;
2042 	pring = &psli->ring[LPFC_ELS_RING];
2043 
2044 	/* Error matching iocb on txq or txcmplq
2045 	 * First check the txq.
2046 	 */
2047 	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
2048 		if (iocb->context1 != ndlp) {
2049 			continue;
2050 		}
2051 		icmd = &iocb->iocb;
2052 		if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
2053 		    (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
2054 
2055 			list_del(&iocb->list);
2056 			pring->txq_cnt--;
2057 			lpfc_els_free_iocb(phba, iocb);
2058 		}
2059 	}
2060 
2061 	/* Next check the txcmplq */
2062 	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
2063 		if (iocb->context1 != ndlp) {
2064 			continue;
2065 		}
2066 		icmd = &iocb->iocb;
2067 		if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
2068 		    (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
2069 
2070 			iocb->iocb_cmpl = NULL;
2071 			/* context2 = cmd, context2->next = rsp, context3 =
2072 			   bpl */
2073 			if (iocb->context2) {
2074 				/* Free the response IOCB before handling the
2075 				   command. */
2076 
2077 				mp = (struct lpfc_dmabuf *) (iocb->context2);
2078 				mp = list_get_first(&mp->list,
2079 						    struct lpfc_dmabuf,
2080 						    list);
2081 				if (mp) {
2082 					/* Delay before releasing rsp buffer to
2083 					 * give UNREG mbox a chance to take
2084 					 * effect.
2085 					 */
2086 					list_add(&mp->list,
2087 						&phba->freebufList);
2088 				}
2089 				lpfc_mbuf_free(phba,
2090 					       ((struct lpfc_dmabuf *)
2091 						iocb->context2)->virt,
2092 					       ((struct lpfc_dmabuf *)
2093 						iocb->context2)->phys);
2094 				kfree(iocb->context2);
2095 			}
2096 
2097 			if (iocb->context3) {
2098 				lpfc_mbuf_free(phba,
2099 					       ((struct lpfc_dmabuf *)
2100 						iocb->context3)->virt,
2101 					       ((struct lpfc_dmabuf *)
2102 						iocb->context3)->phys);
2103 				kfree(iocb->context3);
2104 			}
2105 		}
2106 	}
2107 
2108 	return;
2109 }
2110 
2111 void
2112 lpfc_disc_flush_list(struct lpfc_hba * phba)
2113 {
2114 	struct lpfc_nodelist *ndlp, *next_ndlp;
2115 
2116 	if (phba->fc_plogi_cnt) {
2117 		list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
2118 					nlp_listp) {
2119 			lpfc_free_tx(phba, ndlp);
2120 			lpfc_nlp_remove(phba, ndlp);
2121 		}
2122 	}
2123 	if (phba->fc_adisc_cnt) {
2124 		list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
2125 					nlp_listp) {
2126 			lpfc_free_tx(phba, ndlp);
2127 			lpfc_nlp_remove(phba, ndlp);
2128 		}
2129 	}
2130 	return;
2131 }
2132 
2133 /*****************************************************************************/
2134 /*
2135  * NAME:     lpfc_disc_timeout
2136  *
2137  * FUNCTION: Fibre Channel driver discovery timeout routine.
2138  *
2139  * EXECUTION ENVIRONMENT: interrupt only
2140  *
2141  * CALLED FROM:
2142  *      Timer function
2143  *
2144  * RETURNS:
2145  *      none
2146  */
2147 /*****************************************************************************/
2148 void
2149 lpfc_disc_timeout(unsigned long ptr)
2150 {
2151 	struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
2152 	unsigned long flags = 0;
2153 
2154 	if (unlikely(!phba))
2155 		return;
2156 
2157 	spin_lock_irqsave(phba->host->host_lock, flags);
2158 	if (!(phba->work_hba_events & WORKER_DISC_TMO)) {
2159 		phba->work_hba_events |= WORKER_DISC_TMO;
2160 		if (phba->work_wait)
2161 			wake_up(phba->work_wait);
2162 	}
2163 	spin_unlock_irqrestore(phba->host->host_lock, flags);
2164 	return;
2165 }
2166 
2167 static void
2168 lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2169 {
2170 	struct lpfc_sli *psli;
2171 	struct lpfc_nodelist *ndlp;
2172 	LPFC_MBOXQ_t *clearlambox, *initlinkmbox;
2173 	int rc, clrlaerr = 0;
2174 
2175 	if (unlikely(!phba))
2176 		return;
2177 
2178 	if (!(phba->fc_flag & FC_DISC_TMO))
2179 		return;
2180 
2181 	psli = &phba->sli;
2182 
2183 	spin_lock_irq(phba->host->host_lock);
2184 	phba->fc_flag &= ~FC_DISC_TMO;
2185 	spin_unlock_irq(phba->host->host_lock);
2186 
2187 	switch (phba->hba_state) {
2188 
2189 	case LPFC_LOCAL_CFG_LINK:
2190 	/* hba_state is identically LPFC_LOCAL_CFG_LINK while waiting for FAN */
2191 		/* FAN timeout */
2192 		lpfc_printf_log(phba,
2193 				 KERN_WARNING,
2194 				 LOG_DISCOVERY,
2195 				 "%d:0221 FAN timeout\n",
2196 				 phba->brd_no);
2197 
2198 		/* Forget about FAN, Start discovery by sending a FLOGI
2199 		 * hba_state is identically LPFC_FLOGI while waiting for FLOGI
2200 		 * cmpl
2201 		 */
2202 		phba->hba_state = LPFC_FLOGI;
2203 		lpfc_set_disctmo(phba);
2204 		lpfc_initial_flogi(phba);
2205 		break;
2206 
2207 	case LPFC_FLOGI:
2208 	/* hba_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
2209 		/* Initial FLOGI timeout */
2210 		lpfc_printf_log(phba,
2211 				 KERN_ERR,
2212 				 LOG_DISCOVERY,
2213 				 "%d:0222 Initial FLOGI timeout\n",
2214 				 phba->brd_no);
2215 
2216 		/* Assume no Fabric and go on with discovery.
2217 		 * Check for outstanding ELS FLOGI to abort.
2218 		 */
2219 
2220 		/* FLOGI failed, so just use loop map to make discovery list */
2221 		lpfc_disc_list_loopmap(phba);
2222 
2223 		/* Start discovery */
2224 		lpfc_disc_start(phba);
2225 		break;
2226 
2227 	case LPFC_FABRIC_CFG_LINK:
2228 	/* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2229 	   NameServer login */
2230 		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2231 				"%d:0223 Timeout while waiting for NameServer "
2232 				"login\n", phba->brd_no);
2233 
2234 		/* Next look for NameServer ndlp */
2235 		ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, NameServer_DID);
2236 		if (ndlp)
2237 			lpfc_nlp_remove(phba, ndlp);
2238 		/* Start discovery */
2239 		lpfc_disc_start(phba);
2240 		break;
2241 
2242 	case LPFC_NS_QRY:
2243 	/* Check for wait for NameServer Rsp timeout */
2244 		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2245 				"%d:0224 NameServer Query timeout "
2246 				"Data: x%x x%x\n",
2247 				phba->brd_no,
2248 				phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
2249 
2250 		ndlp = lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
2251 								NameServer_DID);
2252 		if (ndlp) {
2253 			if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) {
2254 				/* Try it one more time */
2255 				rc = lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT);
2256 				if (rc == 0)
2257 					break;
2258 			}
2259 			phba->fc_ns_retry = 0;
2260 		}
2261 
2262 		/* Nothing to authenticate, so CLEAR_LA right now */
2263 		clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2264 		if (!clearlambox) {
2265 			clrlaerr = 1;
2266 			lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2267 					"%d:0226 Device Discovery "
2268 					"completion error\n",
2269 					phba->brd_no);
2270 			phba->hba_state = LPFC_HBA_ERROR;
2271 			break;
2272 		}
2273 
2274 		phba->hba_state = LPFC_CLEAR_LA;
2275 		lpfc_clear_la(phba, clearlambox);
2276 		clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2277 		rc = lpfc_sli_issue_mbox(phba, clearlambox,
2278 					 (MBX_NOWAIT | MBX_STOP_IOCB));
2279 		if (rc == MBX_NOT_FINISHED) {
2280 			mempool_free(clearlambox, phba->mbox_mem_pool);
2281 			clrlaerr = 1;
2282 			break;
2283 		}
2284 
2285 		/* Setup and issue mailbox INITIALIZE LINK command */
2286 		initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2287 		if (!initlinkmbox) {
2288 			lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2289 					"%d:0226 Device Discovery "
2290 					"completion error\n",
2291 					phba->brd_no);
2292 			phba->hba_state = LPFC_HBA_ERROR;
2293 			break;
2294 		}
2295 
2296 		lpfc_linkdown(phba);
2297 		lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
2298 			       phba->cfg_link_speed);
2299 		initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2300 		rc = lpfc_sli_issue_mbox(phba, initlinkmbox,
2301 					 (MBX_NOWAIT | MBX_STOP_IOCB));
2302 		if (rc == MBX_NOT_FINISHED)
2303 			mempool_free(initlinkmbox, phba->mbox_mem_pool);
2304 
2305 		break;
2306 
2307 	case LPFC_DISC_AUTH:
2308 	/* Node Authentication timeout */
2309 		lpfc_printf_log(phba,
2310 				 KERN_ERR,
2311 				 LOG_DISCOVERY,
2312 				 "%d:0227 Node Authentication timeout\n",
2313 				 phba->brd_no);
2314 		lpfc_disc_flush_list(phba);
2315 		clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2316 		if (!clearlambox) {
2317 			clrlaerr = 1;
2318 			lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2319 					"%d:0226 Device Discovery "
2320 					"completion error\n",
2321 					phba->brd_no);
2322 			phba->hba_state = LPFC_HBA_ERROR;
2323 			break;
2324 		}
2325 		phba->hba_state = LPFC_CLEAR_LA;
2326 		lpfc_clear_la(phba, clearlambox);
2327 		clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2328 		rc = lpfc_sli_issue_mbox(phba, clearlambox,
2329 					 (MBX_NOWAIT | MBX_STOP_IOCB));
2330 		if (rc == MBX_NOT_FINISHED) {
2331 			mempool_free(clearlambox, phba->mbox_mem_pool);
2332 			clrlaerr = 1;
2333 		}
2334 		break;
2335 
2336 	case LPFC_CLEAR_LA:
2337 	/* CLEAR LA timeout */
2338 		lpfc_printf_log(phba,
2339 				 KERN_ERR,
2340 				 LOG_DISCOVERY,
2341 				 "%d:0228 CLEAR LA timeout\n",
2342 				 phba->brd_no);
2343 		clrlaerr = 1;
2344 		break;
2345 
2346 	case LPFC_HBA_READY:
2347 		if (phba->fc_flag & FC_RSCN_MODE) {
2348 			lpfc_printf_log(phba,
2349 					KERN_ERR,
2350 					LOG_DISCOVERY,
2351 					"%d:0231 RSCN timeout Data: x%x x%x\n",
2352 					phba->brd_no,
2353 					phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
2354 
2355 			/* Cleanup any outstanding ELS commands */
2356 			lpfc_els_flush_cmd(phba);
2357 
2358 			lpfc_els_flush_rscn(phba);
2359 			lpfc_disc_flush_list(phba);
2360 		}
2361 		break;
2362 	}
2363 
2364 	if (clrlaerr) {
2365 		lpfc_disc_flush_list(phba);
2366 		psli->ring[(psli->ip_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2367 		psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2368 		psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2369 		phba->hba_state = LPFC_HBA_READY;
2370 	}
2371 
2372 	return;
2373 }
2374 
2375 static void
2376 lpfc_nodev_timeout(unsigned long ptr)
2377 {
2378 	struct lpfc_hba *phba;
2379 	struct lpfc_nodelist *ndlp;
2380 	unsigned long iflag;
2381 	struct lpfc_work_evt  *evtp;
2382 
2383 	ndlp = (struct lpfc_nodelist *)ptr;
2384 	phba = ndlp->nlp_phba;
2385 	evtp = &ndlp->nodev_timeout_evt;
2386 	spin_lock_irqsave(phba->host->host_lock, iflag);
2387 
2388 	if (!list_empty(&evtp->evt_listp)) {
2389 		spin_unlock_irqrestore(phba->host->host_lock, iflag);
2390 		return;
2391 	}
2392 	evtp->evt_arg1  = ndlp;
2393 	evtp->evt       = LPFC_EVT_NODEV_TMO;
2394 	list_add_tail(&evtp->evt_listp, &phba->work_list);
2395 	if (phba->work_wait)
2396 		wake_up(phba->work_wait);
2397 
2398 	spin_unlock_irqrestore(phba->host->host_lock, iflag);
2399 	return;
2400 }
2401 
2402 
2403 /*
2404  * This routine handles processing a NameServer REG_LOGIN mailbox
2405  * command upon completion. It is setup in the LPFC_MBOXQ
2406  * as the completion routine when the command is
2407  * handed off to the SLI layer.
2408  */
2409 void
2410 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
2411 {
2412 	struct lpfc_sli *psli;
2413 	MAILBOX_t *mb;
2414 	struct lpfc_dmabuf *mp;
2415 	struct lpfc_nodelist *ndlp;
2416 
2417 	psli = &phba->sli;
2418 	mb = &pmb->mb;
2419 
2420 	ndlp = (struct lpfc_nodelist *) pmb->context2;
2421 	mp = (struct lpfc_dmabuf *) (pmb->context1);
2422 
2423 	pmb->context1 = NULL;
2424 
2425 	if (ndlp->nlp_rpi != 0)
2426 		lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
2427 	ndlp->nlp_rpi = mb->un.varWords[0];
2428 	lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
2429 	ndlp->nlp_type |= NLP_FABRIC;
2430 	ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
2431 	lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
2432 
2433 	/* Start issuing Fabric-Device Management Interface (FDMI)
2434 	 * command to 0xfffffa (FDMI well known port)
2435 	 */
2436 	if (phba->cfg_fdmi_on == 1) {
2437 		lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA);
2438 	} else {
2439 		/*
2440 		 * Delay issuing FDMI command if fdmi-on=2
2441 		 * (supporting RPA/hostnmae)
2442 		 */
2443 		mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60);
2444 	}
2445 
2446 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
2447 	kfree(mp);
2448 	mempool_free( pmb, phba->mbox_mem_pool);
2449 
2450 	return;
2451 }
2452 
2453 /*
2454  * This routine looks up the ndlp hash
2455  * table for the given RPI. If rpi found
2456  * it return the node list pointer
2457  * else return 0.
2458  */
2459 struct lpfc_nodelist *
2460 lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi)
2461 {
2462 	struct lpfc_nodelist *ret;
2463 
2464 	ret = phba->fc_nlplookup[LPFC_RPI_HASH_FUNC(rpi)];
2465 	while ((ret != 0) && (ret->nlp_rpi != rpi)) {
2466 		ret = ret->nlp_rpi_hash_next;
2467 	}
2468 	return ret;
2469 }
2470 
2471 /*
2472  * This routine looks up the ndlp hash table for the
2473  * given RPI. If rpi found it return the node list
2474  * pointer else return 0 after deleting the entry
2475  * from hash table.
2476  */
2477 struct lpfc_nodelist *
2478 lpfc_findnode_remove_rpi(struct lpfc_hba * phba, uint16_t rpi)
2479 {
2480 	struct lpfc_nodelist *ret, *temp;;
2481 
2482 	ret = phba->fc_nlplookup[LPFC_RPI_HASH_FUNC(rpi)];
2483 	if (ret == 0)
2484 		return NULL;
2485 
2486 	if (ret->nlp_rpi == rpi) {
2487 		phba->fc_nlplookup[LPFC_RPI_HASH_FUNC(rpi)] =
2488 		    ret->nlp_rpi_hash_next;
2489 		ret->nlp_rpi_hash_next = NULL;
2490 		return ret;
2491 	}
2492 
2493 	while ((ret->nlp_rpi_hash_next != 0) &&
2494 	       (ret->nlp_rpi_hash_next->nlp_rpi != rpi)) {
2495 		ret = ret->nlp_rpi_hash_next;
2496 	}
2497 
2498 	if (ret->nlp_rpi_hash_next != 0) {
2499 		temp = ret->nlp_rpi_hash_next;
2500 		ret->nlp_rpi_hash_next = temp->nlp_rpi_hash_next;
2501 		temp->nlp_rpi_hash_next = NULL;
2502 		return temp;
2503 	} else {
2504 		return NULL;
2505 	}
2506 }
2507 
2508 /*
2509  * This routine adds the node list entry to the
2510  * ndlp hash table.
2511  */
2512 void
2513 lpfc_addnode_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
2514 		 uint16_t rpi)
2515 {
2516 
2517 	uint32_t index;
2518 
2519 	index = LPFC_RPI_HASH_FUNC(rpi);
2520 	ndlp->nlp_rpi_hash_next = phba->fc_nlplookup[index];
2521 	phba->fc_nlplookup[index] = ndlp;
2522 	return;
2523 }
2524 
2525 void
2526 lpfc_nlp_init(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
2527 		 uint32_t did)
2528 {
2529 	memset(ndlp, 0, sizeof (struct lpfc_nodelist));
2530 	INIT_LIST_HEAD(&ndlp->nodev_timeout_evt.evt_listp);
2531 	INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
2532 	init_timer(&ndlp->nlp_tmofunc);
2533 	ndlp->nlp_tmofunc.function = lpfc_nodev_timeout;
2534 	ndlp->nlp_tmofunc.data = (unsigned long)ndlp;
2535 	init_timer(&ndlp->nlp_delayfunc);
2536 	ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2537 	ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
2538 	ndlp->nlp_DID = did;
2539 	ndlp->nlp_phba = phba;
2540 	ndlp->nlp_sid = NLP_NO_SID;
2541 	return;
2542 }
2543