xref: /linux/drivers/scsi/lpfc/lpfc_init.c (revision cc4589ebfae6f8dbb5cf880a0a67eedab3416492)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2010 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21 
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/idr.h>
26 #include <linux/interrupt.h>
27 #include <linux/kthread.h>
28 #include <linux/pci.h>
29 #include <linux/spinlock.h>
30 #include <linux/ctype.h>
31 #include <linux/aer.h>
32 #include <linux/slab.h>
33 
34 #include <scsi/scsi.h>
35 #include <scsi/scsi_device.h>
36 #include <scsi/scsi_host.h>
37 #include <scsi/scsi_transport_fc.h>
38 
39 #include "lpfc_hw4.h"
40 #include "lpfc_hw.h"
41 #include "lpfc_sli.h"
42 #include "lpfc_sli4.h"
43 #include "lpfc_nl.h"
44 #include "lpfc_disc.h"
45 #include "lpfc_scsi.h"
46 #include "lpfc.h"
47 #include "lpfc_logmsg.h"
48 #include "lpfc_crtn.h"
49 #include "lpfc_vport.h"
50 #include "lpfc_version.h"
51 
52 char *_dump_buf_data;
53 unsigned long _dump_buf_data_order;
54 char *_dump_buf_dif;
55 unsigned long _dump_buf_dif_order;
56 spinlock_t _dump_buf_lock;
57 
58 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
59 static int lpfc_post_rcv_buf(struct lpfc_hba *);
60 static int lpfc_sli4_queue_create(struct lpfc_hba *);
61 static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
62 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
63 static int lpfc_setup_endian_order(struct lpfc_hba *);
64 static int lpfc_sli4_read_config(struct lpfc_hba *);
65 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
66 static void lpfc_free_sgl_list(struct lpfc_hba *);
67 static int lpfc_init_sgl_list(struct lpfc_hba *);
68 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
69 static void lpfc_free_active_sgl(struct lpfc_hba *);
70 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
71 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
72 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
73 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
74 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
75 
76 static struct scsi_transport_template *lpfc_transport_template = NULL;
77 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
78 static DEFINE_IDR(lpfc_hba_index);
79 
80 /**
81  * lpfc_config_port_prep - Perform lpfc initialization prior to config port
82  * @phba: pointer to lpfc hba data structure.
83  *
84  * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
85  * mailbox command. It retrieves the revision information from the HBA and
86  * collects the Vital Product Data (VPD) about the HBA for preparing the
87  * configuration of the HBA.
88  *
89  * Return codes:
90  *   0 - success.
91  *   -ERESTART - requests the SLI layer to reset the HBA and try again.
92  *   Any other value - indicates an error.
93  **/
94 int
95 lpfc_config_port_prep(struct lpfc_hba *phba)
96 {
97 	lpfc_vpd_t *vp = &phba->vpd;
98 	int i = 0, rc;
99 	LPFC_MBOXQ_t *pmb;
100 	MAILBOX_t *mb;
101 	char *lpfc_vpd_data = NULL;
102 	uint16_t offset = 0;
103 	static char licensed[56] =
104 		    "key unlock for use with gnu public licensed code only\0";
105 	static int init_key = 1;
106 
107 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
108 	if (!pmb) {
109 		phba->link_state = LPFC_HBA_ERROR;
110 		return -ENOMEM;
111 	}
112 
113 	mb = &pmb->u.mb;
114 	phba->link_state = LPFC_INIT_MBX_CMDS;
115 
116 	if (lpfc_is_LC_HBA(phba->pcidev->device)) {
117 		if (init_key) {
118 			uint32_t *ptext = (uint32_t *) licensed;
119 
120 			for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
121 				*ptext = cpu_to_be32(*ptext);
122 			init_key = 0;
123 		}
124 
125 		lpfc_read_nv(phba, pmb);
126 		memset((char*)mb->un.varRDnvp.rsvd3, 0,
127 			sizeof (mb->un.varRDnvp.rsvd3));
128 		memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
129 			 sizeof (licensed));
130 
131 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
132 
133 		if (rc != MBX_SUCCESS) {
134 			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
135 					"0324 Config Port initialization "
136 					"error, mbxCmd x%x READ_NVPARM, "
137 					"mbxStatus x%x\n",
138 					mb->mbxCommand, mb->mbxStatus);
139 			mempool_free(pmb, phba->mbox_mem_pool);
140 			return -ERESTART;
141 		}
142 		memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
143 		       sizeof(phba->wwnn));
144 		memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
145 		       sizeof(phba->wwpn));
146 	}
147 
148 	phba->sli3_options = 0x0;
149 
150 	/* Setup and issue mailbox READ REV command */
151 	lpfc_read_rev(phba, pmb);
152 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
153 	if (rc != MBX_SUCCESS) {
154 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
155 				"0439 Adapter failed to init, mbxCmd x%x "
156 				"READ_REV, mbxStatus x%x\n",
157 				mb->mbxCommand, mb->mbxStatus);
158 		mempool_free( pmb, phba->mbox_mem_pool);
159 		return -ERESTART;
160 	}
161 
162 
163 	/*
164 	 * The value of rr must be 1 since the driver set the cv field to 1.
165 	 * This setting requires the FW to set all revision fields.
166 	 */
167 	if (mb->un.varRdRev.rr == 0) {
168 		vp->rev.rBit = 0;
169 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
170 				"0440 Adapter failed to init, READ_REV has "
171 				"missing revision information.\n");
172 		mempool_free(pmb, phba->mbox_mem_pool);
173 		return -ERESTART;
174 	}
175 
176 	if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
177 		mempool_free(pmb, phba->mbox_mem_pool);
178 		return -EINVAL;
179 	}
180 
181 	/* Save information as VPD data */
182 	vp->rev.rBit = 1;
183 	memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
184 	vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
185 	memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
186 	vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
187 	memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
188 	vp->rev.biuRev = mb->un.varRdRev.biuRev;
189 	vp->rev.smRev = mb->un.varRdRev.smRev;
190 	vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
191 	vp->rev.endecRev = mb->un.varRdRev.endecRev;
192 	vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
193 	vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
194 	vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
195 	vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
196 	vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
197 	vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
198 
199 	/* If the sli feature level is less then 9, we must
200 	 * tear down all RPIs and VPIs on link down if NPIV
201 	 * is enabled.
202 	 */
203 	if (vp->rev.feaLevelHigh < 9)
204 		phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
205 
206 	if (lpfc_is_LC_HBA(phba->pcidev->device))
207 		memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
208 						sizeof (phba->RandomData));
209 
210 	/* Get adapter VPD information */
211 	lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
212 	if (!lpfc_vpd_data)
213 		goto out_free_mbox;
214 
215 	do {
216 		lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
217 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
218 
219 		if (rc != MBX_SUCCESS) {
220 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
221 					"0441 VPD not present on adapter, "
222 					"mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
223 					mb->mbxCommand, mb->mbxStatus);
224 			mb->un.varDmp.word_cnt = 0;
225 		}
226 		/* dump mem may return a zero when finished or we got a
227 		 * mailbox error, either way we are done.
228 		 */
229 		if (mb->un.varDmp.word_cnt == 0)
230 			break;
231 		if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
232 			mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
233 		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
234 				      lpfc_vpd_data + offset,
235 				      mb->un.varDmp.word_cnt);
236 		offset += mb->un.varDmp.word_cnt;
237 	} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
238 	lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
239 
240 	kfree(lpfc_vpd_data);
241 out_free_mbox:
242 	mempool_free(pmb, phba->mbox_mem_pool);
243 	return 0;
244 }
245 
246 /**
247  * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
248  * @phba: pointer to lpfc hba data structure.
249  * @pmboxq: pointer to the driver internal queue element for mailbox command.
250  *
251  * This is the completion handler for driver's configuring asynchronous event
252  * mailbox command to the device. If the mailbox command returns successfully,
253  * it will set internal async event support flag to 1; otherwise, it will
254  * set internal async event support flag to 0.
255  **/
256 static void
257 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
258 {
259 	if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
260 		phba->temp_sensor_support = 1;
261 	else
262 		phba->temp_sensor_support = 0;
263 	mempool_free(pmboxq, phba->mbox_mem_pool);
264 	return;
265 }
266 
267 /**
268  * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
269  * @phba: pointer to lpfc hba data structure.
270  * @pmboxq: pointer to the driver internal queue element for mailbox command.
271  *
272  * This is the completion handler for dump mailbox command for getting
273  * wake up parameters. When this command complete, the response contain
274  * Option rom version of the HBA. This function translate the version number
275  * into a human readable string and store it in OptionROMVersion.
276  **/
277 static void
278 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
279 {
280 	struct prog_id *prg;
281 	uint32_t prog_id_word;
282 	char dist = ' ';
283 	/* character array used for decoding dist type. */
284 	char dist_char[] = "nabx";
285 
286 	if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
287 		mempool_free(pmboxq, phba->mbox_mem_pool);
288 		return;
289 	}
290 
291 	prg = (struct prog_id *) &prog_id_word;
292 
293 	/* word 7 contain option rom version */
294 	prog_id_word = pmboxq->u.mb.un.varWords[7];
295 
296 	/* Decode the Option rom version word to a readable string */
297 	if (prg->dist < 4)
298 		dist = dist_char[prg->dist];
299 
300 	if ((prg->dist == 3) && (prg->num == 0))
301 		sprintf(phba->OptionROMVersion, "%d.%d%d",
302 			prg->ver, prg->rev, prg->lev);
303 	else
304 		sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
305 			prg->ver, prg->rev, prg->lev,
306 			dist, prg->num);
307 	mempool_free(pmboxq, phba->mbox_mem_pool);
308 	return;
309 }
310 
311 /**
312  * lpfc_config_port_post - Perform lpfc initialization after config port
313  * @phba: pointer to lpfc hba data structure.
314  *
315  * This routine will do LPFC initialization after the CONFIG_PORT mailbox
316  * command call. It performs all internal resource and state setups on the
317  * port: post IOCB buffers, enable appropriate host interrupt attentions,
318  * ELS ring timers, etc.
319  *
320  * Return codes
321  *   0 - success.
322  *   Any other value - error.
323  **/
324 int
325 lpfc_config_port_post(struct lpfc_hba *phba)
326 {
327 	struct lpfc_vport *vport = phba->pport;
328 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
329 	LPFC_MBOXQ_t *pmb;
330 	MAILBOX_t *mb;
331 	struct lpfc_dmabuf *mp;
332 	struct lpfc_sli *psli = &phba->sli;
333 	uint32_t status, timeout;
334 	int i, j;
335 	int rc;
336 
337 	spin_lock_irq(&phba->hbalock);
338 	/*
339 	 * If the Config port completed correctly the HBA is not
340 	 * over heated any more.
341 	 */
342 	if (phba->over_temp_state == HBA_OVER_TEMP)
343 		phba->over_temp_state = HBA_NORMAL_TEMP;
344 	spin_unlock_irq(&phba->hbalock);
345 
346 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
347 	if (!pmb) {
348 		phba->link_state = LPFC_HBA_ERROR;
349 		return -ENOMEM;
350 	}
351 	mb = &pmb->u.mb;
352 
353 	/* Get login parameters for NID.  */
354 	rc = lpfc_read_sparam(phba, pmb, 0);
355 	if (rc) {
356 		mempool_free(pmb, phba->mbox_mem_pool);
357 		return -ENOMEM;
358 	}
359 
360 	pmb->vport = vport;
361 	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
362 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
363 				"0448 Adapter failed init, mbxCmd x%x "
364 				"READ_SPARM mbxStatus x%x\n",
365 				mb->mbxCommand, mb->mbxStatus);
366 		phba->link_state = LPFC_HBA_ERROR;
367 		mp = (struct lpfc_dmabuf *) pmb->context1;
368 		mempool_free(pmb, phba->mbox_mem_pool);
369 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
370 		kfree(mp);
371 		return -EIO;
372 	}
373 
374 	mp = (struct lpfc_dmabuf *) pmb->context1;
375 
376 	memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
377 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
378 	kfree(mp);
379 	pmb->context1 = NULL;
380 
381 	if (phba->cfg_soft_wwnn)
382 		u64_to_wwn(phba->cfg_soft_wwnn,
383 			   vport->fc_sparam.nodeName.u.wwn);
384 	if (phba->cfg_soft_wwpn)
385 		u64_to_wwn(phba->cfg_soft_wwpn,
386 			   vport->fc_sparam.portName.u.wwn);
387 	memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
388 	       sizeof (struct lpfc_name));
389 	memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
390 	       sizeof (struct lpfc_name));
391 
392 	/* Update the fc_host data structures with new wwn. */
393 	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
394 	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
395 	fc_host_max_npiv_vports(shost) = phba->max_vpi;
396 
397 	/* If no serial number in VPD data, use low 6 bytes of WWNN */
398 	/* This should be consolidated into parse_vpd ? - mr */
399 	if (phba->SerialNumber[0] == 0) {
400 		uint8_t *outptr;
401 
402 		outptr = &vport->fc_nodename.u.s.IEEE[0];
403 		for (i = 0; i < 12; i++) {
404 			status = *outptr++;
405 			j = ((status & 0xf0) >> 4);
406 			if (j <= 9)
407 				phba->SerialNumber[i] =
408 				    (char)((uint8_t) 0x30 + (uint8_t) j);
409 			else
410 				phba->SerialNumber[i] =
411 				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
412 			i++;
413 			j = (status & 0xf);
414 			if (j <= 9)
415 				phba->SerialNumber[i] =
416 				    (char)((uint8_t) 0x30 + (uint8_t) j);
417 			else
418 				phba->SerialNumber[i] =
419 				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
420 		}
421 	}
422 
423 	lpfc_read_config(phba, pmb);
424 	pmb->vport = vport;
425 	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
426 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
427 				"0453 Adapter failed to init, mbxCmd x%x "
428 				"READ_CONFIG, mbxStatus x%x\n",
429 				mb->mbxCommand, mb->mbxStatus);
430 		phba->link_state = LPFC_HBA_ERROR;
431 		mempool_free( pmb, phba->mbox_mem_pool);
432 		return -EIO;
433 	}
434 
435 	/* Check if the port is disabled */
436 	lpfc_sli_read_link_ste(phba);
437 
438 	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
439 	if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
440 		phba->cfg_hba_queue_depth =
441 			(mb->un.varRdConfig.max_xri + 1) -
442 					lpfc_sli4_get_els_iocb_cnt(phba);
443 
444 	phba->lmt = mb->un.varRdConfig.lmt;
445 
446 	/* Get the default values for Model Name and Description */
447 	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
448 
449 	if ((phba->cfg_link_speed > LINK_SPEED_10G)
450 	    || ((phba->cfg_link_speed == LINK_SPEED_1G)
451 		&& !(phba->lmt & LMT_1Gb))
452 	    || ((phba->cfg_link_speed == LINK_SPEED_2G)
453 		&& !(phba->lmt & LMT_2Gb))
454 	    || ((phba->cfg_link_speed == LINK_SPEED_4G)
455 		&& !(phba->lmt & LMT_4Gb))
456 	    || ((phba->cfg_link_speed == LINK_SPEED_8G)
457 		&& !(phba->lmt & LMT_8Gb))
458 	    || ((phba->cfg_link_speed == LINK_SPEED_10G)
459 		&& !(phba->lmt & LMT_10Gb))) {
460 		/* Reset link speed to auto */
461 		lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
462 			"1302 Invalid speed for this board: "
463 			"Reset link speed to auto: x%x\n",
464 			phba->cfg_link_speed);
465 			phba->cfg_link_speed = LINK_SPEED_AUTO;
466 	}
467 
468 	phba->link_state = LPFC_LINK_DOWN;
469 
470 	/* Only process IOCBs on ELS ring till hba_state is READY */
471 	if (psli->ring[psli->extra_ring].cmdringaddr)
472 		psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
473 	if (psli->ring[psli->fcp_ring].cmdringaddr)
474 		psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
475 	if (psli->ring[psli->next_ring].cmdringaddr)
476 		psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
477 
478 	/* Post receive buffers for desired rings */
479 	if (phba->sli_rev != 3)
480 		lpfc_post_rcv_buf(phba);
481 
482 	/*
483 	 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
484 	 */
485 	if (phba->intr_type == MSIX) {
486 		rc = lpfc_config_msi(phba, pmb);
487 		if (rc) {
488 			mempool_free(pmb, phba->mbox_mem_pool);
489 			return -EIO;
490 		}
491 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
492 		if (rc != MBX_SUCCESS) {
493 			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
494 					"0352 Config MSI mailbox command "
495 					"failed, mbxCmd x%x, mbxStatus x%x\n",
496 					pmb->u.mb.mbxCommand,
497 					pmb->u.mb.mbxStatus);
498 			mempool_free(pmb, phba->mbox_mem_pool);
499 			return -EIO;
500 		}
501 	}
502 
503 	spin_lock_irq(&phba->hbalock);
504 	/* Initialize ERATT handling flag */
505 	phba->hba_flag &= ~HBA_ERATT_HANDLED;
506 
507 	/* Enable appropriate host interrupts */
508 	status = readl(phba->HCregaddr);
509 	status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
510 	if (psli->num_rings > 0)
511 		status |= HC_R0INT_ENA;
512 	if (psli->num_rings > 1)
513 		status |= HC_R1INT_ENA;
514 	if (psli->num_rings > 2)
515 		status |= HC_R2INT_ENA;
516 	if (psli->num_rings > 3)
517 		status |= HC_R3INT_ENA;
518 
519 	if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
520 	    (phba->cfg_poll & DISABLE_FCP_RING_INT))
521 		status &= ~(HC_R0INT_ENA);
522 
523 	writel(status, phba->HCregaddr);
524 	readl(phba->HCregaddr); /* flush */
525 	spin_unlock_irq(&phba->hbalock);
526 
527 	/* Set up ring-0 (ELS) timer */
528 	timeout = phba->fc_ratov * 2;
529 	mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
530 	/* Set up heart beat (HB) timer */
531 	mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
532 	phba->hb_outstanding = 0;
533 	phba->last_completion_time = jiffies;
534 	/* Set up error attention (ERATT) polling timer */
535 	mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
536 
537 	if (phba->hba_flag & LINK_DISABLED) {
538 		lpfc_printf_log(phba,
539 			KERN_ERR, LOG_INIT,
540 			"2598 Adapter Link is disabled.\n");
541 		lpfc_down_link(phba, pmb);
542 		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
543 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
544 		if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
545 			lpfc_printf_log(phba,
546 			KERN_ERR, LOG_INIT,
547 			"2599 Adapter failed to issue DOWN_LINK"
548 			" mbox command rc 0x%x\n", rc);
549 
550 			mempool_free(pmb, phba->mbox_mem_pool);
551 			return -EIO;
552 		}
553 	} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
554 		lpfc_init_link(phba, pmb, phba->cfg_topology,
555 			phba->cfg_link_speed);
556 		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
557 		lpfc_set_loopback_flag(phba);
558 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
559 		if (rc != MBX_SUCCESS) {
560 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
561 				"0454 Adapter failed to init, mbxCmd x%x "
562 				"INIT_LINK, mbxStatus x%x\n",
563 				mb->mbxCommand, mb->mbxStatus);
564 
565 			/* Clear all interrupt enable conditions */
566 			writel(0, phba->HCregaddr);
567 			readl(phba->HCregaddr); /* flush */
568 			/* Clear all pending interrupts */
569 			writel(0xffffffff, phba->HAregaddr);
570 			readl(phba->HAregaddr); /* flush */
571 
572 			phba->link_state = LPFC_HBA_ERROR;
573 			if (rc != MBX_BUSY)
574 				mempool_free(pmb, phba->mbox_mem_pool);
575 			return -EIO;
576 		}
577 	}
578 	/* MBOX buffer will be freed in mbox compl */
579 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
580 	if (!pmb) {
581 		phba->link_state = LPFC_HBA_ERROR;
582 		return -ENOMEM;
583 	}
584 
585 	lpfc_config_async(phba, pmb, LPFC_ELS_RING);
586 	pmb->mbox_cmpl = lpfc_config_async_cmpl;
587 	pmb->vport = phba->pport;
588 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
589 
590 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
591 		lpfc_printf_log(phba,
592 				KERN_ERR,
593 				LOG_INIT,
594 				"0456 Adapter failed to issue "
595 				"ASYNCEVT_ENABLE mbox status x%x\n",
596 				rc);
597 		mempool_free(pmb, phba->mbox_mem_pool);
598 	}
599 
600 	/* Get Option rom version */
601 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
602 	if (!pmb) {
603 		phba->link_state = LPFC_HBA_ERROR;
604 		return -ENOMEM;
605 	}
606 
607 	lpfc_dump_wakeup_param(phba, pmb);
608 	pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
609 	pmb->vport = phba->pport;
610 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
611 
612 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
613 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
614 				"to get Option ROM version status x%x\n", rc);
615 		mempool_free(pmb, phba->mbox_mem_pool);
616 	}
617 
618 	return 0;
619 }
620 
621 /**
622  * lpfc_hba_init_link - Initialize the FC link
623  * @phba: pointer to lpfc hba data structure.
624  * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
625  *
626  * This routine will issue the INIT_LINK mailbox command call.
627  * It is available to other drivers through the lpfc_hba data
628  * structure for use as a delayed link up mechanism with the
629  * module parameter lpfc_suppress_link_up.
630  *
631  * Return code
632  *		0 - success
633  *		Any other value - error
634  **/
635 int
636 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
637 {
638 	struct lpfc_vport *vport = phba->pport;
639 	LPFC_MBOXQ_t *pmb;
640 	MAILBOX_t *mb;
641 	int rc;
642 
643 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
644 	if (!pmb) {
645 		phba->link_state = LPFC_HBA_ERROR;
646 		return -ENOMEM;
647 	}
648 	mb = &pmb->u.mb;
649 	pmb->vport = vport;
650 
651 	lpfc_init_link(phba, pmb, phba->cfg_topology,
652 		phba->cfg_link_speed);
653 	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
654 	lpfc_set_loopback_flag(phba);
655 	rc = lpfc_sli_issue_mbox(phba, pmb, flag);
656 	if (rc != MBX_SUCCESS) {
657 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
658 			"0498 Adapter failed to init, mbxCmd x%x "
659 			"INIT_LINK, mbxStatus x%x\n",
660 			mb->mbxCommand, mb->mbxStatus);
661 		/* Clear all interrupt enable conditions */
662 		writel(0, phba->HCregaddr);
663 		readl(phba->HCregaddr); /* flush */
664 		/* Clear all pending interrupts */
665 		writel(0xffffffff, phba->HAregaddr);
666 		readl(phba->HAregaddr); /* flush */
667 		phba->link_state = LPFC_HBA_ERROR;
668 		if (rc != MBX_BUSY || flag == MBX_POLL)
669 			mempool_free(pmb, phba->mbox_mem_pool);
670 		return -EIO;
671 	}
672 	phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
673 	if (flag == MBX_POLL)
674 		mempool_free(pmb, phba->mbox_mem_pool);
675 
676 	return 0;
677 }
678 
679 /**
680  * lpfc_hba_down_link - this routine downs the FC link
681  * @phba: pointer to lpfc hba data structure.
682  * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
683  *
684  * This routine will issue the DOWN_LINK mailbox command call.
685  * It is available to other drivers through the lpfc_hba data
686  * structure for use to stop the link.
687  *
688  * Return code
689  *		0 - success
690  *		Any other value - error
691  **/
692 int
693 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
694 {
695 	LPFC_MBOXQ_t *pmb;
696 	int rc;
697 
698 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
699 	if (!pmb) {
700 		phba->link_state = LPFC_HBA_ERROR;
701 		return -ENOMEM;
702 	}
703 
704 	lpfc_printf_log(phba,
705 		KERN_ERR, LOG_INIT,
706 		"0491 Adapter Link is disabled.\n");
707 	lpfc_down_link(phba, pmb);
708 	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
709 	rc = lpfc_sli_issue_mbox(phba, pmb, flag);
710 	if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
711 		lpfc_printf_log(phba,
712 		KERN_ERR, LOG_INIT,
713 		"2522 Adapter failed to issue DOWN_LINK"
714 		" mbox command rc 0x%x\n", rc);
715 
716 		mempool_free(pmb, phba->mbox_mem_pool);
717 		return -EIO;
718 	}
719 	if (flag == MBX_POLL)
720 		mempool_free(pmb, phba->mbox_mem_pool);
721 
722 	return 0;
723 }
724 
725 /**
726  * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
727  * @phba: pointer to lpfc HBA data structure.
728  *
729  * This routine will do LPFC uninitialization before the HBA is reset when
730  * bringing down the SLI Layer.
731  *
732  * Return codes
733  *   0 - success.
734  *   Any other value - error.
735  **/
736 int
737 lpfc_hba_down_prep(struct lpfc_hba *phba)
738 {
739 	struct lpfc_vport **vports;
740 	int i;
741 
742 	if (phba->sli_rev <= LPFC_SLI_REV3) {
743 		/* Disable interrupts */
744 		writel(0, phba->HCregaddr);
745 		readl(phba->HCregaddr); /* flush */
746 	}
747 
748 	if (phba->pport->load_flag & FC_UNLOADING)
749 		lpfc_cleanup_discovery_resources(phba->pport);
750 	else {
751 		vports = lpfc_create_vport_work_array(phba);
752 		if (vports != NULL)
753 			for (i = 0; i <= phba->max_vports &&
754 				vports[i] != NULL; i++)
755 				lpfc_cleanup_discovery_resources(vports[i]);
756 		lpfc_destroy_vport_work_array(phba, vports);
757 	}
758 	return 0;
759 }
760 
761 /**
762  * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
763  * @phba: pointer to lpfc HBA data structure.
764  *
765  * This routine will do uninitialization after the HBA is reset when bring
766  * down the SLI Layer.
767  *
768  * Return codes
769  *   0 - success.
770  *   Any other value - error.
771  **/
772 static int
773 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
774 {
775 	struct lpfc_sli *psli = &phba->sli;
776 	struct lpfc_sli_ring *pring;
777 	struct lpfc_dmabuf *mp, *next_mp;
778 	LIST_HEAD(completions);
779 	int i;
780 
781 	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
782 		lpfc_sli_hbqbuf_free_all(phba);
783 	else {
784 		/* Cleanup preposted buffers on the ELS ring */
785 		pring = &psli->ring[LPFC_ELS_RING];
786 		list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
787 			list_del(&mp->list);
788 			pring->postbufq_cnt--;
789 			lpfc_mbuf_free(phba, mp->virt, mp->phys);
790 			kfree(mp);
791 		}
792 	}
793 
794 	spin_lock_irq(&phba->hbalock);
795 	for (i = 0; i < psli->num_rings; i++) {
796 		pring = &psli->ring[i];
797 
798 		/* At this point in time the HBA is either reset or DOA. Either
799 		 * way, nothing should be on txcmplq as it will NEVER complete.
800 		 */
801 		list_splice_init(&pring->txcmplq, &completions);
802 		pring->txcmplq_cnt = 0;
803 		spin_unlock_irq(&phba->hbalock);
804 
805 		/* Cancel all the IOCBs from the completions list */
806 		lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
807 				      IOERR_SLI_ABORTED);
808 
809 		lpfc_sli_abort_iocb_ring(phba, pring);
810 		spin_lock_irq(&phba->hbalock);
811 	}
812 	spin_unlock_irq(&phba->hbalock);
813 
814 	return 0;
815 }
816 /**
817  * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
818  * @phba: pointer to lpfc HBA data structure.
819  *
820  * This routine will do uninitialization after the HBA is reset when bring
821  * down the SLI Layer.
822  *
823  * Return codes
824  *   0 - success.
825  *   Any other value - error.
826  **/
827 static int
828 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
829 {
830 	struct lpfc_scsi_buf *psb, *psb_next;
831 	LIST_HEAD(aborts);
832 	int ret;
833 	unsigned long iflag = 0;
834 	struct lpfc_sglq *sglq_entry = NULL;
835 
836 	ret = lpfc_hba_down_post_s3(phba);
837 	if (ret)
838 		return ret;
839 	/* At this point in time the HBA is either reset or DOA. Either
840 	 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
841 	 * on the lpfc_sgl_list so that it can either be freed if the
842 	 * driver is unloading or reposted if the driver is restarting
843 	 * the port.
844 	 */
845 	spin_lock_irq(&phba->hbalock);  /* required for lpfc_sgl_list and */
846 					/* scsl_buf_list */
847 	/* abts_sgl_list_lock required because worker thread uses this
848 	 * list.
849 	 */
850 	spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
851 	list_for_each_entry(sglq_entry,
852 		&phba->sli4_hba.lpfc_abts_els_sgl_list, list)
853 		sglq_entry->state = SGL_FREED;
854 
855 	list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
856 			&phba->sli4_hba.lpfc_sgl_list);
857 	spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
858 	/* abts_scsi_buf_list_lock required because worker thread uses this
859 	 * list.
860 	 */
861 	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
862 	list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
863 			&aborts);
864 	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
865 	spin_unlock_irq(&phba->hbalock);
866 
867 	list_for_each_entry_safe(psb, psb_next, &aborts, list) {
868 		psb->pCmd = NULL;
869 		psb->status = IOSTAT_SUCCESS;
870 	}
871 	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
872 	list_splice(&aborts, &phba->lpfc_scsi_buf_list);
873 	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
874 	return 0;
875 }
876 
877 /**
878  * lpfc_hba_down_post - Wrapper func for hba down post routine
879  * @phba: pointer to lpfc HBA data structure.
880  *
881  * This routine wraps the actual SLI3 or SLI4 routine for performing
882  * uninitialization after the HBA is reset when bring down the SLI Layer.
883  *
884  * Return codes
885  *   0 - success.
886  *   Any other value - error.
887  **/
888 int
889 lpfc_hba_down_post(struct lpfc_hba *phba)
890 {
891 	return (*phba->lpfc_hba_down_post)(phba);
892 }
893 
894 /**
895  * lpfc_hb_timeout - The HBA-timer timeout handler
896  * @ptr: unsigned long holds the pointer to lpfc hba data structure.
897  *
898  * This is the HBA-timer timeout handler registered to the lpfc driver. When
899  * this timer fires, a HBA timeout event shall be posted to the lpfc driver
900  * work-port-events bitmap and the worker thread is notified. This timeout
901  * event will be used by the worker thread to invoke the actual timeout
902  * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
903  * be performed in the timeout handler and the HBA timeout event bit shall
904  * be cleared by the worker thread after it has taken the event bitmap out.
905  **/
906 static void
907 lpfc_hb_timeout(unsigned long ptr)
908 {
909 	struct lpfc_hba *phba;
910 	uint32_t tmo_posted;
911 	unsigned long iflag;
912 
913 	phba = (struct lpfc_hba *)ptr;
914 
915 	/* Check for heart beat timeout conditions */
916 	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
917 	tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
918 	if (!tmo_posted)
919 		phba->pport->work_port_events |= WORKER_HB_TMO;
920 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
921 
922 	/* Tell the worker thread there is work to do */
923 	if (!tmo_posted)
924 		lpfc_worker_wake_up(phba);
925 	return;
926 }
927 
928 /**
929  * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
930  * @phba: pointer to lpfc hba data structure.
931  * @pmboxq: pointer to the driver internal queue element for mailbox command.
932  *
933  * This is the callback function to the lpfc heart-beat mailbox command.
934  * If configured, the lpfc driver issues the heart-beat mailbox command to
935  * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
936  * heart-beat mailbox command is issued, the driver shall set up heart-beat
937  * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
938  * heart-beat outstanding state. Once the mailbox command comes back and
939  * no error conditions detected, the heart-beat mailbox command timer is
940  * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
941  * state is cleared for the next heart-beat. If the timer expired with the
942  * heart-beat outstanding state set, the driver will put the HBA offline.
943  **/
944 static void
945 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
946 {
947 	unsigned long drvr_flag;
948 
949 	spin_lock_irqsave(&phba->hbalock, drvr_flag);
950 	phba->hb_outstanding = 0;
951 	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
952 
953 	/* Check and reset heart-beat timer is necessary */
954 	mempool_free(pmboxq, phba->mbox_mem_pool);
955 	if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
956 		!(phba->link_state == LPFC_HBA_ERROR) &&
957 		!(phba->pport->load_flag & FC_UNLOADING))
958 		mod_timer(&phba->hb_tmofunc,
959 			jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
960 	return;
961 }
962 
963 /**
964  * lpfc_hb_timeout_handler - The HBA-timer timeout handler
965  * @phba: pointer to lpfc hba data structure.
966  *
967  * This is the actual HBA-timer timeout handler to be invoked by the worker
968  * thread whenever the HBA timer fired and HBA-timeout event posted. This
969  * handler performs any periodic operations needed for the device. If such
970  * periodic event has already been attended to either in the interrupt handler
971  * or by processing slow-ring or fast-ring events within the HBA-timer
972  * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
973  * the timer for the next timeout period. If lpfc heart-beat mailbox command
974  * is configured and there is no heart-beat mailbox command outstanding, a
975  * heart-beat mailbox is issued and timer set properly. Otherwise, if there
976  * has been a heart-beat mailbox command outstanding, the HBA shall be put
977  * to offline.
978  **/
979 void
980 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
981 {
982 	struct lpfc_vport **vports;
983 	LPFC_MBOXQ_t *pmboxq;
984 	struct lpfc_dmabuf *buf_ptr;
985 	int retval, i;
986 	struct lpfc_sli *psli = &phba->sli;
987 	LIST_HEAD(completions);
988 
989 	vports = lpfc_create_vport_work_array(phba);
990 	if (vports != NULL)
991 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
992 			lpfc_rcv_seq_check_edtov(vports[i]);
993 	lpfc_destroy_vport_work_array(phba, vports);
994 
995 	if ((phba->link_state == LPFC_HBA_ERROR) ||
996 		(phba->pport->load_flag & FC_UNLOADING) ||
997 		(phba->pport->fc_flag & FC_OFFLINE_MODE))
998 		return;
999 
1000 	spin_lock_irq(&phba->pport->work_port_lock);
1001 
1002 	if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
1003 		jiffies)) {
1004 		spin_unlock_irq(&phba->pport->work_port_lock);
1005 		if (!phba->hb_outstanding)
1006 			mod_timer(&phba->hb_tmofunc,
1007 				jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1008 		else
1009 			mod_timer(&phba->hb_tmofunc,
1010 				jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1011 		return;
1012 	}
1013 	spin_unlock_irq(&phba->pport->work_port_lock);
1014 
1015 	if (phba->elsbuf_cnt &&
1016 		(phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1017 		spin_lock_irq(&phba->hbalock);
1018 		list_splice_init(&phba->elsbuf, &completions);
1019 		phba->elsbuf_cnt = 0;
1020 		phba->elsbuf_prev_cnt = 0;
1021 		spin_unlock_irq(&phba->hbalock);
1022 
1023 		while (!list_empty(&completions)) {
1024 			list_remove_head(&completions, buf_ptr,
1025 				struct lpfc_dmabuf, list);
1026 			lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1027 			kfree(buf_ptr);
1028 		}
1029 	}
1030 	phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1031 
1032 	/* If there is no heart beat outstanding, issue a heartbeat command */
1033 	if (phba->cfg_enable_hba_heartbeat) {
1034 		if (!phba->hb_outstanding) {
1035 			pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
1036 			if (!pmboxq) {
1037 				mod_timer(&phba->hb_tmofunc,
1038 					  jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1039 				return;
1040 			}
1041 
1042 			lpfc_heart_beat(phba, pmboxq);
1043 			pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1044 			pmboxq->vport = phba->pport;
1045 			retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
1046 
1047 			if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
1048 				mempool_free(pmboxq, phba->mbox_mem_pool);
1049 				mod_timer(&phba->hb_tmofunc,
1050 					  jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1051 				return;
1052 			}
1053 			mod_timer(&phba->hb_tmofunc,
1054 				  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1055 			phba->hb_outstanding = 1;
1056 			return;
1057 		} else {
1058 			/*
1059 			* If heart beat timeout called with hb_outstanding set
1060 			* we need to take the HBA offline.
1061 			*/
1062 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1063 					"0459 Adapter heartbeat failure, "
1064 					"taking this port offline.\n");
1065 
1066 			spin_lock_irq(&phba->hbalock);
1067 			psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1068 			spin_unlock_irq(&phba->hbalock);
1069 
1070 			lpfc_offline_prep(phba);
1071 			lpfc_offline(phba);
1072 			lpfc_unblock_mgmt_io(phba);
1073 			phba->link_state = LPFC_HBA_ERROR;
1074 			lpfc_hba_down_post(phba);
1075 		}
1076 	}
1077 }
1078 
1079 /**
1080  * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1081  * @phba: pointer to lpfc hba data structure.
1082  *
1083  * This routine is called to bring the HBA offline when HBA hardware error
1084  * other than Port Error 6 has been detected.
1085  **/
1086 static void
1087 lpfc_offline_eratt(struct lpfc_hba *phba)
1088 {
1089 	struct lpfc_sli   *psli = &phba->sli;
1090 
1091 	spin_lock_irq(&phba->hbalock);
1092 	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1093 	spin_unlock_irq(&phba->hbalock);
1094 	lpfc_offline_prep(phba);
1095 
1096 	lpfc_offline(phba);
1097 	lpfc_reset_barrier(phba);
1098 	spin_lock_irq(&phba->hbalock);
1099 	lpfc_sli_brdreset(phba);
1100 	spin_unlock_irq(&phba->hbalock);
1101 	lpfc_hba_down_post(phba);
1102 	lpfc_sli_brdready(phba, HS_MBRDY);
1103 	lpfc_unblock_mgmt_io(phba);
1104 	phba->link_state = LPFC_HBA_ERROR;
1105 	return;
1106 }
1107 
1108 /**
1109  * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1110  * @phba: pointer to lpfc hba data structure.
1111  *
1112  * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1113  * other than Port Error 6 has been detected.
1114  **/
1115 static void
1116 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1117 {
1118 	lpfc_offline_prep(phba);
1119 	lpfc_offline(phba);
1120 	lpfc_sli4_brdreset(phba);
1121 	lpfc_hba_down_post(phba);
1122 	lpfc_sli4_post_status_check(phba);
1123 	lpfc_unblock_mgmt_io(phba);
1124 	phba->link_state = LPFC_HBA_ERROR;
1125 }
1126 
1127 /**
1128  * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1129  * @phba: pointer to lpfc hba data structure.
1130  *
1131  * This routine is invoked to handle the deferred HBA hardware error
1132  * conditions. This type of error is indicated by HBA by setting ER1
1133  * and another ER bit in the host status register. The driver will
1134  * wait until the ER1 bit clears before handling the error condition.
1135  **/
1136 static void
1137 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1138 {
1139 	uint32_t old_host_status = phba->work_hs;
1140 	struct lpfc_sli_ring  *pring;
1141 	struct lpfc_sli *psli = &phba->sli;
1142 
1143 	/* If the pci channel is offline, ignore possible errors,
1144 	 * since we cannot communicate with the pci card anyway.
1145 	 */
1146 	if (pci_channel_offline(phba->pcidev)) {
1147 		spin_lock_irq(&phba->hbalock);
1148 		phba->hba_flag &= ~DEFER_ERATT;
1149 		spin_unlock_irq(&phba->hbalock);
1150 		return;
1151 	}
1152 
1153 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1154 		"0479 Deferred Adapter Hardware Error "
1155 		"Data: x%x x%x x%x\n",
1156 		phba->work_hs,
1157 		phba->work_status[0], phba->work_status[1]);
1158 
1159 	spin_lock_irq(&phba->hbalock);
1160 	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1161 	spin_unlock_irq(&phba->hbalock);
1162 
1163 
1164 	/*
1165 	 * Firmware stops when it triggred erratt. That could cause the I/Os
1166 	 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1167 	 * SCSI layer retry it after re-establishing link.
1168 	 */
1169 	pring = &psli->ring[psli->fcp_ring];
1170 	lpfc_sli_abort_iocb_ring(phba, pring);
1171 
1172 	/*
1173 	 * There was a firmware error. Take the hba offline and then
1174 	 * attempt to restart it.
1175 	 */
1176 	lpfc_offline_prep(phba);
1177 	lpfc_offline(phba);
1178 
1179 	/* Wait for the ER1 bit to clear.*/
1180 	while (phba->work_hs & HS_FFER1) {
1181 		msleep(100);
1182 		phba->work_hs = readl(phba->HSregaddr);
1183 		/* If driver is unloading let the worker thread continue */
1184 		if (phba->pport->load_flag & FC_UNLOADING) {
1185 			phba->work_hs = 0;
1186 			break;
1187 		}
1188 	}
1189 
1190 	/*
1191 	 * This is to ptrotect against a race condition in which
1192 	 * first write to the host attention register clear the
1193 	 * host status register.
1194 	 */
1195 	if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1196 		phba->work_hs = old_host_status & ~HS_FFER1;
1197 
1198 	spin_lock_irq(&phba->hbalock);
1199 	phba->hba_flag &= ~DEFER_ERATT;
1200 	spin_unlock_irq(&phba->hbalock);
1201 	phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1202 	phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1203 }
1204 
1205 static void
1206 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1207 {
1208 	struct lpfc_board_event_header board_event;
1209 	struct Scsi_Host *shost;
1210 
1211 	board_event.event_type = FC_REG_BOARD_EVENT;
1212 	board_event.subcategory = LPFC_EVENT_PORTINTERR;
1213 	shost = lpfc_shost_from_vport(phba->pport);
1214 	fc_host_post_vendor_event(shost, fc_get_event_number(),
1215 				  sizeof(board_event),
1216 				  (char *) &board_event,
1217 				  LPFC_NL_VENDOR_ID);
1218 }
1219 
1220 /**
1221  * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1222  * @phba: pointer to lpfc hba data structure.
1223  *
1224  * This routine is invoked to handle the following HBA hardware error
1225  * conditions:
1226  * 1 - HBA error attention interrupt
1227  * 2 - DMA ring index out of range
1228  * 3 - Mailbox command came back as unknown
1229  **/
1230 static void
1231 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1232 {
1233 	struct lpfc_vport *vport = phba->pport;
1234 	struct lpfc_sli   *psli = &phba->sli;
1235 	struct lpfc_sli_ring  *pring;
1236 	uint32_t event_data;
1237 	unsigned long temperature;
1238 	struct temp_event temp_event_data;
1239 	struct Scsi_Host  *shost;
1240 
1241 	/* If the pci channel is offline, ignore possible errors,
1242 	 * since we cannot communicate with the pci card anyway.
1243 	 */
1244 	if (pci_channel_offline(phba->pcidev)) {
1245 		spin_lock_irq(&phba->hbalock);
1246 		phba->hba_flag &= ~DEFER_ERATT;
1247 		spin_unlock_irq(&phba->hbalock);
1248 		return;
1249 	}
1250 
1251 	/* If resets are disabled then leave the HBA alone and return */
1252 	if (!phba->cfg_enable_hba_reset)
1253 		return;
1254 
1255 	/* Send an internal error event to mgmt application */
1256 	lpfc_board_errevt_to_mgmt(phba);
1257 
1258 	if (phba->hba_flag & DEFER_ERATT)
1259 		lpfc_handle_deferred_eratt(phba);
1260 
1261 	if (phba->work_hs & HS_FFER6) {
1262 		/* Re-establishing Link */
1263 		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1264 				"1301 Re-establishing Link "
1265 				"Data: x%x x%x x%x\n",
1266 				phba->work_hs,
1267 				phba->work_status[0], phba->work_status[1]);
1268 
1269 		spin_lock_irq(&phba->hbalock);
1270 		psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1271 		spin_unlock_irq(&phba->hbalock);
1272 
1273 		/*
1274 		* Firmware stops when it triggled erratt with HS_FFER6.
1275 		* That could cause the I/Os dropped by the firmware.
1276 		* Error iocb (I/O) on txcmplq and let the SCSI layer
1277 		* retry it after re-establishing link.
1278 		*/
1279 		pring = &psli->ring[psli->fcp_ring];
1280 		lpfc_sli_abort_iocb_ring(phba, pring);
1281 
1282 		/*
1283 		 * There was a firmware error.  Take the hba offline and then
1284 		 * attempt to restart it.
1285 		 */
1286 		lpfc_offline_prep(phba);
1287 		lpfc_offline(phba);
1288 		lpfc_sli_brdrestart(phba);
1289 		if (lpfc_online(phba) == 0) {	/* Initialize the HBA */
1290 			lpfc_unblock_mgmt_io(phba);
1291 			return;
1292 		}
1293 		lpfc_unblock_mgmt_io(phba);
1294 	} else if (phba->work_hs & HS_CRIT_TEMP) {
1295 		temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1296 		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1297 		temp_event_data.event_code = LPFC_CRIT_TEMP;
1298 		temp_event_data.data = (uint32_t)temperature;
1299 
1300 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1301 				"0406 Adapter maximum temperature exceeded "
1302 				"(%ld), taking this port offline "
1303 				"Data: x%x x%x x%x\n",
1304 				temperature, phba->work_hs,
1305 				phba->work_status[0], phba->work_status[1]);
1306 
1307 		shost = lpfc_shost_from_vport(phba->pport);
1308 		fc_host_post_vendor_event(shost, fc_get_event_number(),
1309 					  sizeof(temp_event_data),
1310 					  (char *) &temp_event_data,
1311 					  SCSI_NL_VID_TYPE_PCI
1312 					  | PCI_VENDOR_ID_EMULEX);
1313 
1314 		spin_lock_irq(&phba->hbalock);
1315 		phba->over_temp_state = HBA_OVER_TEMP;
1316 		spin_unlock_irq(&phba->hbalock);
1317 		lpfc_offline_eratt(phba);
1318 
1319 	} else {
1320 		/* The if clause above forces this code path when the status
1321 		 * failure is a value other than FFER6. Do not call the offline
1322 		 * twice. This is the adapter hardware error path.
1323 		 */
1324 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1325 				"0457 Adapter Hardware Error "
1326 				"Data: x%x x%x x%x\n",
1327 				phba->work_hs,
1328 				phba->work_status[0], phba->work_status[1]);
1329 
1330 		event_data = FC_REG_DUMP_EVENT;
1331 		shost = lpfc_shost_from_vport(vport);
1332 		fc_host_post_vendor_event(shost, fc_get_event_number(),
1333 				sizeof(event_data), (char *) &event_data,
1334 				SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1335 
1336 		lpfc_offline_eratt(phba);
1337 	}
1338 	return;
1339 }
1340 
1341 /**
1342  * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1343  * @phba: pointer to lpfc hba data structure.
1344  *
1345  * This routine is invoked to handle the SLI4 HBA hardware error attention
1346  * conditions.
1347  **/
1348 static void
1349 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1350 {
1351 	struct lpfc_vport *vport = phba->pport;
1352 	uint32_t event_data;
1353 	struct Scsi_Host *shost;
1354 
1355 	/* If the pci channel is offline, ignore possible errors, since
1356 	 * we cannot communicate with the pci card anyway.
1357 	 */
1358 	if (pci_channel_offline(phba->pcidev))
1359 		return;
1360 	/* If resets are disabled then leave the HBA alone and return */
1361 	if (!phba->cfg_enable_hba_reset)
1362 		return;
1363 
1364 	/* Send an internal error event to mgmt application */
1365 	lpfc_board_errevt_to_mgmt(phba);
1366 
1367 	/* For now, the actual action for SLI4 device handling is not
1368 	 * specified yet, just treated it as adaptor hardware failure
1369 	 */
1370 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1371 			"0143 SLI4 Adapter Hardware Error Data: x%x x%x\n",
1372 			phba->work_status[0], phba->work_status[1]);
1373 
1374 	event_data = FC_REG_DUMP_EVENT;
1375 	shost = lpfc_shost_from_vport(vport);
1376 	fc_host_post_vendor_event(shost, fc_get_event_number(),
1377 				  sizeof(event_data), (char *) &event_data,
1378 				  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1379 
1380 	lpfc_sli4_offline_eratt(phba);
1381 }
1382 
1383 /**
1384  * lpfc_handle_eratt - Wrapper func for handling hba error attention
1385  * @phba: pointer to lpfc HBA data structure.
1386  *
1387  * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1388  * routine from the API jump table function pointer from the lpfc_hba struct.
1389  *
1390  * Return codes
1391  *   0 - success.
1392  *   Any other value - error.
1393  **/
1394 void
1395 lpfc_handle_eratt(struct lpfc_hba *phba)
1396 {
1397 	(*phba->lpfc_handle_eratt)(phba);
1398 }
1399 
1400 /**
1401  * lpfc_handle_latt - The HBA link event handler
1402  * @phba: pointer to lpfc hba data structure.
1403  *
1404  * This routine is invoked from the worker thread to handle a HBA host
1405  * attention link event.
1406  **/
1407 void
1408 lpfc_handle_latt(struct lpfc_hba *phba)
1409 {
1410 	struct lpfc_vport *vport = phba->pport;
1411 	struct lpfc_sli   *psli = &phba->sli;
1412 	LPFC_MBOXQ_t *pmb;
1413 	volatile uint32_t control;
1414 	struct lpfc_dmabuf *mp;
1415 	int rc = 0;
1416 
1417 	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1418 	if (!pmb) {
1419 		rc = 1;
1420 		goto lpfc_handle_latt_err_exit;
1421 	}
1422 
1423 	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1424 	if (!mp) {
1425 		rc = 2;
1426 		goto lpfc_handle_latt_free_pmb;
1427 	}
1428 
1429 	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1430 	if (!mp->virt) {
1431 		rc = 3;
1432 		goto lpfc_handle_latt_free_mp;
1433 	}
1434 
1435 	/* Cleanup any outstanding ELS commands */
1436 	lpfc_els_flush_all_cmd(phba);
1437 
1438 	psli->slistat.link_event++;
1439 	lpfc_read_la(phba, pmb, mp);
1440 	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
1441 	pmb->vport = vport;
1442 	/* Block ELS IOCBs until we have processed this mbox command */
1443 	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1444 	rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1445 	if (rc == MBX_NOT_FINISHED) {
1446 		rc = 4;
1447 		goto lpfc_handle_latt_free_mbuf;
1448 	}
1449 
1450 	/* Clear Link Attention in HA REG */
1451 	spin_lock_irq(&phba->hbalock);
1452 	writel(HA_LATT, phba->HAregaddr);
1453 	readl(phba->HAregaddr); /* flush */
1454 	spin_unlock_irq(&phba->hbalock);
1455 
1456 	return;
1457 
1458 lpfc_handle_latt_free_mbuf:
1459 	phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1460 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
1461 lpfc_handle_latt_free_mp:
1462 	kfree(mp);
1463 lpfc_handle_latt_free_pmb:
1464 	mempool_free(pmb, phba->mbox_mem_pool);
1465 lpfc_handle_latt_err_exit:
1466 	/* Enable Link attention interrupts */
1467 	spin_lock_irq(&phba->hbalock);
1468 	psli->sli_flag |= LPFC_PROCESS_LA;
1469 	control = readl(phba->HCregaddr);
1470 	control |= HC_LAINT_ENA;
1471 	writel(control, phba->HCregaddr);
1472 	readl(phba->HCregaddr); /* flush */
1473 
1474 	/* Clear Link Attention in HA REG */
1475 	writel(HA_LATT, phba->HAregaddr);
1476 	readl(phba->HAregaddr); /* flush */
1477 	spin_unlock_irq(&phba->hbalock);
1478 	lpfc_linkdown(phba);
1479 	phba->link_state = LPFC_HBA_ERROR;
1480 
1481 	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1482 		     "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1483 
1484 	return;
1485 }
1486 
1487 /**
1488  * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1489  * @phba: pointer to lpfc hba data structure.
1490  * @vpd: pointer to the vital product data.
1491  * @len: length of the vital product data in bytes.
1492  *
1493  * This routine parses the Vital Product Data (VPD). The VPD is treated as
1494  * an array of characters. In this routine, the ModelName, ProgramType, and
1495  * ModelDesc, etc. fields of the phba data structure will be populated.
1496  *
1497  * Return codes
1498  *   0 - pointer to the VPD passed in is NULL
1499  *   1 - success
1500  **/
1501 int
1502 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1503 {
1504 	uint8_t lenlo, lenhi;
1505 	int Length;
1506 	int i, j;
1507 	int finished = 0;
1508 	int index = 0;
1509 
1510 	if (!vpd)
1511 		return 0;
1512 
1513 	/* Vital Product */
1514 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1515 			"0455 Vital Product Data: x%x x%x x%x x%x\n",
1516 			(uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1517 			(uint32_t) vpd[3]);
1518 	while (!finished && (index < (len - 4))) {
1519 		switch (vpd[index]) {
1520 		case 0x82:
1521 		case 0x91:
1522 			index += 1;
1523 			lenlo = vpd[index];
1524 			index += 1;
1525 			lenhi = vpd[index];
1526 			index += 1;
1527 			i = ((((unsigned short)lenhi) << 8) + lenlo);
1528 			index += i;
1529 			break;
1530 		case 0x90:
1531 			index += 1;
1532 			lenlo = vpd[index];
1533 			index += 1;
1534 			lenhi = vpd[index];
1535 			index += 1;
1536 			Length = ((((unsigned short)lenhi) << 8) + lenlo);
1537 			if (Length > len - index)
1538 				Length = len - index;
1539 			while (Length > 0) {
1540 			/* Look for Serial Number */
1541 			if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1542 				index += 2;
1543 				i = vpd[index];
1544 				index += 1;
1545 				j = 0;
1546 				Length -= (3+i);
1547 				while(i--) {
1548 					phba->SerialNumber[j++] = vpd[index++];
1549 					if (j == 31)
1550 						break;
1551 				}
1552 				phba->SerialNumber[j] = 0;
1553 				continue;
1554 			}
1555 			else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1556 				phba->vpd_flag |= VPD_MODEL_DESC;
1557 				index += 2;
1558 				i = vpd[index];
1559 				index += 1;
1560 				j = 0;
1561 				Length -= (3+i);
1562 				while(i--) {
1563 					phba->ModelDesc[j++] = vpd[index++];
1564 					if (j == 255)
1565 						break;
1566 				}
1567 				phba->ModelDesc[j] = 0;
1568 				continue;
1569 			}
1570 			else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1571 				phba->vpd_flag |= VPD_MODEL_NAME;
1572 				index += 2;
1573 				i = vpd[index];
1574 				index += 1;
1575 				j = 0;
1576 				Length -= (3+i);
1577 				while(i--) {
1578 					phba->ModelName[j++] = vpd[index++];
1579 					if (j == 79)
1580 						break;
1581 				}
1582 				phba->ModelName[j] = 0;
1583 				continue;
1584 			}
1585 			else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1586 				phba->vpd_flag |= VPD_PROGRAM_TYPE;
1587 				index += 2;
1588 				i = vpd[index];
1589 				index += 1;
1590 				j = 0;
1591 				Length -= (3+i);
1592 				while(i--) {
1593 					phba->ProgramType[j++] = vpd[index++];
1594 					if (j == 255)
1595 						break;
1596 				}
1597 				phba->ProgramType[j] = 0;
1598 				continue;
1599 			}
1600 			else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1601 				phba->vpd_flag |= VPD_PORT;
1602 				index += 2;
1603 				i = vpd[index];
1604 				index += 1;
1605 				j = 0;
1606 				Length -= (3+i);
1607 				while(i--) {
1608 				phba->Port[j++] = vpd[index++];
1609 				if (j == 19)
1610 					break;
1611 				}
1612 				phba->Port[j] = 0;
1613 				continue;
1614 			}
1615 			else {
1616 				index += 2;
1617 				i = vpd[index];
1618 				index += 1;
1619 				index += i;
1620 				Length -= (3 + i);
1621 			}
1622 		}
1623 		finished = 0;
1624 		break;
1625 		case 0x78:
1626 			finished = 1;
1627 			break;
1628 		default:
1629 			index ++;
1630 			break;
1631 		}
1632 	}
1633 
1634 	return(1);
1635 }
1636 
1637 /**
1638  * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
1639  * @phba: pointer to lpfc hba data structure.
1640  * @mdp: pointer to the data structure to hold the derived model name.
1641  * @descp: pointer to the data structure to hold the derived description.
1642  *
1643  * This routine retrieves HBA's description based on its registered PCI device
1644  * ID. The @descp passed into this function points to an array of 256 chars. It
1645  * shall be returned with the model name, maximum speed, and the host bus type.
1646  * The @mdp passed into this function points to an array of 80 chars. When the
1647  * function returns, the @mdp will be filled with the model name.
1648  **/
1649 static void
1650 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1651 {
1652 	lpfc_vpd_t *vp;
1653 	uint16_t dev_id = phba->pcidev->device;
1654 	int max_speed;
1655 	int GE = 0;
1656 	int oneConnect = 0; /* default is not a oneConnect */
1657 	struct {
1658 		char *name;
1659 		char *bus;
1660 		char *function;
1661 	} m = {"<Unknown>", "", ""};
1662 
1663 	if (mdp && mdp[0] != '\0'
1664 		&& descp && descp[0] != '\0')
1665 		return;
1666 
1667 	if (phba->lmt & LMT_10Gb)
1668 		max_speed = 10;
1669 	else if (phba->lmt & LMT_8Gb)
1670 		max_speed = 8;
1671 	else if (phba->lmt & LMT_4Gb)
1672 		max_speed = 4;
1673 	else if (phba->lmt & LMT_2Gb)
1674 		max_speed = 2;
1675 	else
1676 		max_speed = 1;
1677 
1678 	vp = &phba->vpd;
1679 
1680 	switch (dev_id) {
1681 	case PCI_DEVICE_ID_FIREFLY:
1682 		m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
1683 		break;
1684 	case PCI_DEVICE_ID_SUPERFLY:
1685 		if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1686 			m = (typeof(m)){"LP7000", "PCI",
1687 					"Fibre Channel Adapter"};
1688 		else
1689 			m = (typeof(m)){"LP7000E", "PCI",
1690 					"Fibre Channel Adapter"};
1691 		break;
1692 	case PCI_DEVICE_ID_DRAGONFLY:
1693 		m = (typeof(m)){"LP8000", "PCI",
1694 				"Fibre Channel Adapter"};
1695 		break;
1696 	case PCI_DEVICE_ID_CENTAUR:
1697 		if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1698 			m = (typeof(m)){"LP9002", "PCI",
1699 					"Fibre Channel Adapter"};
1700 		else
1701 			m = (typeof(m)){"LP9000", "PCI",
1702 					"Fibre Channel Adapter"};
1703 		break;
1704 	case PCI_DEVICE_ID_RFLY:
1705 		m = (typeof(m)){"LP952", "PCI",
1706 				"Fibre Channel Adapter"};
1707 		break;
1708 	case PCI_DEVICE_ID_PEGASUS:
1709 		m = (typeof(m)){"LP9802", "PCI-X",
1710 				"Fibre Channel Adapter"};
1711 		break;
1712 	case PCI_DEVICE_ID_THOR:
1713 		m = (typeof(m)){"LP10000", "PCI-X",
1714 				"Fibre Channel Adapter"};
1715 		break;
1716 	case PCI_DEVICE_ID_VIPER:
1717 		m = (typeof(m)){"LPX1000",  "PCI-X",
1718 				"Fibre Channel Adapter"};
1719 		break;
1720 	case PCI_DEVICE_ID_PFLY:
1721 		m = (typeof(m)){"LP982", "PCI-X",
1722 				"Fibre Channel Adapter"};
1723 		break;
1724 	case PCI_DEVICE_ID_TFLY:
1725 		m = (typeof(m)){"LP1050", "PCI-X",
1726 				"Fibre Channel Adapter"};
1727 		break;
1728 	case PCI_DEVICE_ID_HELIOS:
1729 		m = (typeof(m)){"LP11000", "PCI-X2",
1730 				"Fibre Channel Adapter"};
1731 		break;
1732 	case PCI_DEVICE_ID_HELIOS_SCSP:
1733 		m = (typeof(m)){"LP11000-SP", "PCI-X2",
1734 				"Fibre Channel Adapter"};
1735 		break;
1736 	case PCI_DEVICE_ID_HELIOS_DCSP:
1737 		m = (typeof(m)){"LP11002-SP",  "PCI-X2",
1738 				"Fibre Channel Adapter"};
1739 		break;
1740 	case PCI_DEVICE_ID_NEPTUNE:
1741 		m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
1742 		break;
1743 	case PCI_DEVICE_ID_NEPTUNE_SCSP:
1744 		m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
1745 		break;
1746 	case PCI_DEVICE_ID_NEPTUNE_DCSP:
1747 		m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
1748 		break;
1749 	case PCI_DEVICE_ID_BMID:
1750 		m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
1751 		break;
1752 	case PCI_DEVICE_ID_BSMB:
1753 		m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
1754 		break;
1755 	case PCI_DEVICE_ID_ZEPHYR:
1756 		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1757 		break;
1758 	case PCI_DEVICE_ID_ZEPHYR_SCSP:
1759 		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1760 		break;
1761 	case PCI_DEVICE_ID_ZEPHYR_DCSP:
1762 		m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
1763 		GE = 1;
1764 		break;
1765 	case PCI_DEVICE_ID_ZMID:
1766 		m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
1767 		break;
1768 	case PCI_DEVICE_ID_ZSMB:
1769 		m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
1770 		break;
1771 	case PCI_DEVICE_ID_LP101:
1772 		m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
1773 		break;
1774 	case PCI_DEVICE_ID_LP10000S:
1775 		m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
1776 		break;
1777 	case PCI_DEVICE_ID_LP11000S:
1778 		m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
1779 		break;
1780 	case PCI_DEVICE_ID_LPE11000S:
1781 		m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
1782 		break;
1783 	case PCI_DEVICE_ID_SAT:
1784 		m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
1785 		break;
1786 	case PCI_DEVICE_ID_SAT_MID:
1787 		m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
1788 		break;
1789 	case PCI_DEVICE_ID_SAT_SMB:
1790 		m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
1791 		break;
1792 	case PCI_DEVICE_ID_SAT_DCSP:
1793 		m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
1794 		break;
1795 	case PCI_DEVICE_ID_SAT_SCSP:
1796 		m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
1797 		break;
1798 	case PCI_DEVICE_ID_SAT_S:
1799 		m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
1800 		break;
1801 	case PCI_DEVICE_ID_HORNET:
1802 		m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
1803 		GE = 1;
1804 		break;
1805 	case PCI_DEVICE_ID_PROTEUS_VF:
1806 		m = (typeof(m)){"LPev12000", "PCIe IOV",
1807 				"Fibre Channel Adapter"};
1808 		break;
1809 	case PCI_DEVICE_ID_PROTEUS_PF:
1810 		m = (typeof(m)){"LPev12000", "PCIe IOV",
1811 				"Fibre Channel Adapter"};
1812 		break;
1813 	case PCI_DEVICE_ID_PROTEUS_S:
1814 		m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
1815 				"Fibre Channel Adapter"};
1816 		break;
1817 	case PCI_DEVICE_ID_TIGERSHARK:
1818 		oneConnect = 1;
1819 		m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
1820 		break;
1821 	case PCI_DEVICE_ID_TOMCAT:
1822 		oneConnect = 1;
1823 		m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
1824 		break;
1825 	case PCI_DEVICE_ID_FALCON:
1826 		m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
1827 				"EmulexSecure Fibre"};
1828 		break;
1829 	case PCI_DEVICE_ID_BALIUS:
1830 		m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
1831 				"Fibre Channel Adapter"};
1832 		break;
1833 	default:
1834 		m = (typeof(m)){"Unknown", "", ""};
1835 		break;
1836 	}
1837 
1838 	if (mdp && mdp[0] == '\0')
1839 		snprintf(mdp, 79,"%s", m.name);
1840 	/* oneConnect hba requires special processing, they are all initiators
1841 	 * and we put the port number on the end
1842 	 */
1843 	if (descp && descp[0] == '\0') {
1844 		if (oneConnect)
1845 			snprintf(descp, 255,
1846 				"Emulex OneConnect %s, %s Initiator, Port %s",
1847 				m.name, m.function,
1848 				phba->Port);
1849 		else
1850 			snprintf(descp, 255,
1851 				"Emulex %s %d%s %s %s",
1852 				m.name, max_speed, (GE) ? "GE" : "Gb",
1853 				m.bus, m.function);
1854 	}
1855 }
1856 
1857 /**
1858  * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
1859  * @phba: pointer to lpfc hba data structure.
1860  * @pring: pointer to a IOCB ring.
1861  * @cnt: the number of IOCBs to be posted to the IOCB ring.
1862  *
1863  * This routine posts a given number of IOCBs with the associated DMA buffer
1864  * descriptors specified by the cnt argument to the given IOCB ring.
1865  *
1866  * Return codes
1867  *   The number of IOCBs NOT able to be posted to the IOCB ring.
1868  **/
1869 int
1870 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
1871 {
1872 	IOCB_t *icmd;
1873 	struct lpfc_iocbq *iocb;
1874 	struct lpfc_dmabuf *mp1, *mp2;
1875 
1876 	cnt += pring->missbufcnt;
1877 
1878 	/* While there are buffers to post */
1879 	while (cnt > 0) {
1880 		/* Allocate buffer for  command iocb */
1881 		iocb = lpfc_sli_get_iocbq(phba);
1882 		if (iocb == NULL) {
1883 			pring->missbufcnt = cnt;
1884 			return cnt;
1885 		}
1886 		icmd = &iocb->iocb;
1887 
1888 		/* 2 buffers can be posted per command */
1889 		/* Allocate buffer to post */
1890 		mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1891 		if (mp1)
1892 		    mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
1893 		if (!mp1 || !mp1->virt) {
1894 			kfree(mp1);
1895 			lpfc_sli_release_iocbq(phba, iocb);
1896 			pring->missbufcnt = cnt;
1897 			return cnt;
1898 		}
1899 
1900 		INIT_LIST_HEAD(&mp1->list);
1901 		/* Allocate buffer to post */
1902 		if (cnt > 1) {
1903 			mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1904 			if (mp2)
1905 				mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
1906 							    &mp2->phys);
1907 			if (!mp2 || !mp2->virt) {
1908 				kfree(mp2);
1909 				lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1910 				kfree(mp1);
1911 				lpfc_sli_release_iocbq(phba, iocb);
1912 				pring->missbufcnt = cnt;
1913 				return cnt;
1914 			}
1915 
1916 			INIT_LIST_HEAD(&mp2->list);
1917 		} else {
1918 			mp2 = NULL;
1919 		}
1920 
1921 		icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
1922 		icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
1923 		icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
1924 		icmd->ulpBdeCount = 1;
1925 		cnt--;
1926 		if (mp2) {
1927 			icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
1928 			icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
1929 			icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
1930 			cnt--;
1931 			icmd->ulpBdeCount = 2;
1932 		}
1933 
1934 		icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
1935 		icmd->ulpLe = 1;
1936 
1937 		if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
1938 		    IOCB_ERROR) {
1939 			lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1940 			kfree(mp1);
1941 			cnt++;
1942 			if (mp2) {
1943 				lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
1944 				kfree(mp2);
1945 				cnt++;
1946 			}
1947 			lpfc_sli_release_iocbq(phba, iocb);
1948 			pring->missbufcnt = cnt;
1949 			return cnt;
1950 		}
1951 		lpfc_sli_ringpostbuf_put(phba, pring, mp1);
1952 		if (mp2)
1953 			lpfc_sli_ringpostbuf_put(phba, pring, mp2);
1954 	}
1955 	pring->missbufcnt = 0;
1956 	return 0;
1957 }
1958 
1959 /**
1960  * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
1961  * @phba: pointer to lpfc hba data structure.
1962  *
1963  * This routine posts initial receive IOCB buffers to the ELS ring. The
1964  * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
1965  * set to 64 IOCBs.
1966  *
1967  * Return codes
1968  *   0 - success (currently always success)
1969  **/
1970 static int
1971 lpfc_post_rcv_buf(struct lpfc_hba *phba)
1972 {
1973 	struct lpfc_sli *psli = &phba->sli;
1974 
1975 	/* Ring 0, ELS / CT buffers */
1976 	lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
1977 	/* Ring 2 - FCP no buffers needed */
1978 
1979 	return 0;
1980 }
1981 
1982 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
1983 
1984 /**
1985  * lpfc_sha_init - Set up initial array of hash table entries
1986  * @HashResultPointer: pointer to an array as hash table.
1987  *
1988  * This routine sets up the initial values to the array of hash table entries
1989  * for the LC HBAs.
1990  **/
1991 static void
1992 lpfc_sha_init(uint32_t * HashResultPointer)
1993 {
1994 	HashResultPointer[0] = 0x67452301;
1995 	HashResultPointer[1] = 0xEFCDAB89;
1996 	HashResultPointer[2] = 0x98BADCFE;
1997 	HashResultPointer[3] = 0x10325476;
1998 	HashResultPointer[4] = 0xC3D2E1F0;
1999 }
2000 
2001 /**
2002  * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2003  * @HashResultPointer: pointer to an initial/result hash table.
2004  * @HashWorkingPointer: pointer to an working hash table.
2005  *
2006  * This routine iterates an initial hash table pointed by @HashResultPointer
2007  * with the values from the working hash table pointeed by @HashWorkingPointer.
2008  * The results are putting back to the initial hash table, returned through
2009  * the @HashResultPointer as the result hash table.
2010  **/
2011 static void
2012 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2013 {
2014 	int t;
2015 	uint32_t TEMP;
2016 	uint32_t A, B, C, D, E;
2017 	t = 16;
2018 	do {
2019 		HashWorkingPointer[t] =
2020 		    S(1,
2021 		      HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2022 								     8] ^
2023 		      HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2024 	} while (++t <= 79);
2025 	t = 0;
2026 	A = HashResultPointer[0];
2027 	B = HashResultPointer[1];
2028 	C = HashResultPointer[2];
2029 	D = HashResultPointer[3];
2030 	E = HashResultPointer[4];
2031 
2032 	do {
2033 		if (t < 20) {
2034 			TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2035 		} else if (t < 40) {
2036 			TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2037 		} else if (t < 60) {
2038 			TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2039 		} else {
2040 			TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2041 		}
2042 		TEMP += S(5, A) + E + HashWorkingPointer[t];
2043 		E = D;
2044 		D = C;
2045 		C = S(30, B);
2046 		B = A;
2047 		A = TEMP;
2048 	} while (++t <= 79);
2049 
2050 	HashResultPointer[0] += A;
2051 	HashResultPointer[1] += B;
2052 	HashResultPointer[2] += C;
2053 	HashResultPointer[3] += D;
2054 	HashResultPointer[4] += E;
2055 
2056 }
2057 
2058 /**
2059  * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2060  * @RandomChallenge: pointer to the entry of host challenge random number array.
2061  * @HashWorking: pointer to the entry of the working hash array.
2062  *
2063  * This routine calculates the working hash array referred by @HashWorking
2064  * from the challenge random numbers associated with the host, referred by
2065  * @RandomChallenge. The result is put into the entry of the working hash
2066  * array and returned by reference through @HashWorking.
2067  **/
2068 static void
2069 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2070 {
2071 	*HashWorking = (*RandomChallenge ^ *HashWorking);
2072 }
2073 
2074 /**
2075  * lpfc_hba_init - Perform special handling for LC HBA initialization
2076  * @phba: pointer to lpfc hba data structure.
2077  * @hbainit: pointer to an array of unsigned 32-bit integers.
2078  *
2079  * This routine performs the special handling for LC HBA initialization.
2080  **/
2081 void
2082 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2083 {
2084 	int t;
2085 	uint32_t *HashWorking;
2086 	uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2087 
2088 	HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2089 	if (!HashWorking)
2090 		return;
2091 
2092 	HashWorking[0] = HashWorking[78] = *pwwnn++;
2093 	HashWorking[1] = HashWorking[79] = *pwwnn;
2094 
2095 	for (t = 0; t < 7; t++)
2096 		lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2097 
2098 	lpfc_sha_init(hbainit);
2099 	lpfc_sha_iterate(hbainit, HashWorking);
2100 	kfree(HashWorking);
2101 }
2102 
2103 /**
2104  * lpfc_cleanup - Performs vport cleanups before deleting a vport
2105  * @vport: pointer to a virtual N_Port data structure.
2106  *
2107  * This routine performs the necessary cleanups before deleting the @vport.
2108  * It invokes the discovery state machine to perform necessary state
2109  * transitions and to release the ndlps associated with the @vport. Note,
2110  * the physical port is treated as @vport 0.
2111  **/
2112 void
2113 lpfc_cleanup(struct lpfc_vport *vport)
2114 {
2115 	struct lpfc_hba   *phba = vport->phba;
2116 	struct lpfc_nodelist *ndlp, *next_ndlp;
2117 	int i = 0;
2118 
2119 	if (phba->link_state > LPFC_LINK_DOWN)
2120 		lpfc_port_link_failure(vport);
2121 
2122 	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2123 		if (!NLP_CHK_NODE_ACT(ndlp)) {
2124 			ndlp = lpfc_enable_node(vport, ndlp,
2125 						NLP_STE_UNUSED_NODE);
2126 			if (!ndlp)
2127 				continue;
2128 			spin_lock_irq(&phba->ndlp_lock);
2129 			NLP_SET_FREE_REQ(ndlp);
2130 			spin_unlock_irq(&phba->ndlp_lock);
2131 			/* Trigger the release of the ndlp memory */
2132 			lpfc_nlp_put(ndlp);
2133 			continue;
2134 		}
2135 		spin_lock_irq(&phba->ndlp_lock);
2136 		if (NLP_CHK_FREE_REQ(ndlp)) {
2137 			/* The ndlp should not be in memory free mode already */
2138 			spin_unlock_irq(&phba->ndlp_lock);
2139 			continue;
2140 		} else
2141 			/* Indicate request for freeing ndlp memory */
2142 			NLP_SET_FREE_REQ(ndlp);
2143 		spin_unlock_irq(&phba->ndlp_lock);
2144 
2145 		if (vport->port_type != LPFC_PHYSICAL_PORT &&
2146 		    ndlp->nlp_DID == Fabric_DID) {
2147 			/* Just free up ndlp with Fabric_DID for vports */
2148 			lpfc_nlp_put(ndlp);
2149 			continue;
2150 		}
2151 
2152 		if (ndlp->nlp_type & NLP_FABRIC)
2153 			lpfc_disc_state_machine(vport, ndlp, NULL,
2154 					NLP_EVT_DEVICE_RECOVERY);
2155 
2156 		lpfc_disc_state_machine(vport, ndlp, NULL,
2157 					     NLP_EVT_DEVICE_RM);
2158 
2159 	}
2160 
2161 	/* At this point, ALL ndlp's should be gone
2162 	 * because of the previous NLP_EVT_DEVICE_RM.
2163 	 * Lets wait for this to happen, if needed.
2164 	 */
2165 	while (!list_empty(&vport->fc_nodes)) {
2166 		if (i++ > 3000) {
2167 			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2168 				"0233 Nodelist not empty\n");
2169 			list_for_each_entry_safe(ndlp, next_ndlp,
2170 						&vport->fc_nodes, nlp_listp) {
2171 				lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2172 						LOG_NODE,
2173 						"0282 did:x%x ndlp:x%p "
2174 						"usgmap:x%x refcnt:%d\n",
2175 						ndlp->nlp_DID, (void *)ndlp,
2176 						ndlp->nlp_usg_map,
2177 						atomic_read(
2178 							&ndlp->kref.refcount));
2179 			}
2180 			break;
2181 		}
2182 
2183 		/* Wait for any activity on ndlps to settle */
2184 		msleep(10);
2185 	}
2186 }
2187 
2188 /**
2189  * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2190  * @vport: pointer to a virtual N_Port data structure.
2191  *
2192  * This routine stops all the timers associated with a @vport. This function
2193  * is invoked before disabling or deleting a @vport. Note that the physical
2194  * port is treated as @vport 0.
2195  **/
2196 void
2197 lpfc_stop_vport_timers(struct lpfc_vport *vport)
2198 {
2199 	del_timer_sync(&vport->els_tmofunc);
2200 	del_timer_sync(&vport->fc_fdmitmo);
2201 	lpfc_can_disctmo(vport);
2202 	return;
2203 }
2204 
2205 /**
2206  * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2207  * @phba: pointer to lpfc hba data structure.
2208  *
2209  * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2210  * caller of this routine should already hold the host lock.
2211  **/
2212 void
2213 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2214 {
2215 	/* Clear pending FCF rediscovery wait and failover in progress flags */
2216 	phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND |
2217 				FCF_DEAD_DISC |
2218 				FCF_ACVL_DISC);
2219 	/* Now, try to stop the timer */
2220 	del_timer(&phba->fcf.redisc_wait);
2221 }
2222 
2223 /**
2224  * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2225  * @phba: pointer to lpfc hba data structure.
2226  *
2227  * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2228  * checks whether the FCF rediscovery wait timer is pending with the host
2229  * lock held before proceeding with disabling the timer and clearing the
2230  * wait timer pendig flag.
2231  **/
2232 void
2233 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2234 {
2235 	spin_lock_irq(&phba->hbalock);
2236 	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2237 		/* FCF rediscovery timer already fired or stopped */
2238 		spin_unlock_irq(&phba->hbalock);
2239 		return;
2240 	}
2241 	__lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2242 	spin_unlock_irq(&phba->hbalock);
2243 }
2244 
2245 /**
2246  * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2247  * @phba: pointer to lpfc hba data structure.
2248  *
2249  * This routine stops all the timers associated with a HBA. This function is
2250  * invoked before either putting a HBA offline or unloading the driver.
2251  **/
2252 void
2253 lpfc_stop_hba_timers(struct lpfc_hba *phba)
2254 {
2255 	lpfc_stop_vport_timers(phba->pport);
2256 	del_timer_sync(&phba->sli.mbox_tmo);
2257 	del_timer_sync(&phba->fabric_block_timer);
2258 	del_timer_sync(&phba->eratt_poll);
2259 	del_timer_sync(&phba->hb_tmofunc);
2260 	phba->hb_outstanding = 0;
2261 
2262 	switch (phba->pci_dev_grp) {
2263 	case LPFC_PCI_DEV_LP:
2264 		/* Stop any LightPulse device specific driver timers */
2265 		del_timer_sync(&phba->fcp_poll_timer);
2266 		break;
2267 	case LPFC_PCI_DEV_OC:
2268 		/* Stop any OneConnect device sepcific driver timers */
2269 		lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2270 		break;
2271 	default:
2272 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2273 				"0297 Invalid device group (x%x)\n",
2274 				phba->pci_dev_grp);
2275 		break;
2276 	}
2277 	return;
2278 }
2279 
2280 /**
2281  * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2282  * @phba: pointer to lpfc hba data structure.
2283  *
2284  * This routine marks a HBA's management interface as blocked. Once the HBA's
2285  * management interface is marked as blocked, all the user space access to
2286  * the HBA, whether they are from sysfs interface or libdfc interface will
2287  * all be blocked. The HBA is set to block the management interface when the
2288  * driver prepares the HBA interface for online or offline.
2289  **/
2290 static void
2291 lpfc_block_mgmt_io(struct lpfc_hba * phba)
2292 {
2293 	unsigned long iflag;
2294 	uint8_t actcmd = MBX_HEARTBEAT;
2295 	unsigned long timeout;
2296 
2297 
2298 	spin_lock_irqsave(&phba->hbalock, iflag);
2299 	phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2300 	if (phba->sli.mbox_active)
2301 		actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2302 	spin_unlock_irqrestore(&phba->hbalock, iflag);
2303 	/* Determine how long we might wait for the active mailbox
2304 	 * command to be gracefully completed by firmware.
2305 	 */
2306 	timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) +
2307 			jiffies;
2308 	/* Wait for the outstnading mailbox command to complete */
2309 	while (phba->sli.mbox_active) {
2310 		/* Check active mailbox complete status every 2ms */
2311 		msleep(2);
2312 		if (time_after(jiffies, timeout)) {
2313 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2314 				"2813 Mgmt IO is Blocked %x "
2315 				"- mbox cmd %x still active\n",
2316 				phba->sli.sli_flag, actcmd);
2317 			break;
2318 		}
2319 	}
2320 }
2321 
2322 /**
2323  * lpfc_online - Initialize and bring a HBA online
2324  * @phba: pointer to lpfc hba data structure.
2325  *
2326  * This routine initializes the HBA and brings a HBA online. During this
2327  * process, the management interface is blocked to prevent user space access
2328  * to the HBA interfering with the driver initialization.
2329  *
2330  * Return codes
2331  *   0 - successful
2332  *   1 - failed
2333  **/
2334 int
2335 lpfc_online(struct lpfc_hba *phba)
2336 {
2337 	struct lpfc_vport *vport;
2338 	struct lpfc_vport **vports;
2339 	int i;
2340 
2341 	if (!phba)
2342 		return 0;
2343 	vport = phba->pport;
2344 
2345 	if (!(vport->fc_flag & FC_OFFLINE_MODE))
2346 		return 0;
2347 
2348 	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2349 			"0458 Bring Adapter online\n");
2350 
2351 	lpfc_block_mgmt_io(phba);
2352 
2353 	if (!lpfc_sli_queue_setup(phba)) {
2354 		lpfc_unblock_mgmt_io(phba);
2355 		return 1;
2356 	}
2357 
2358 	if (phba->sli_rev == LPFC_SLI_REV4) {
2359 		if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2360 			lpfc_unblock_mgmt_io(phba);
2361 			return 1;
2362 		}
2363 	} else {
2364 		if (lpfc_sli_hba_setup(phba)) {	/* Initialize SLI2/SLI3 HBA */
2365 			lpfc_unblock_mgmt_io(phba);
2366 			return 1;
2367 		}
2368 	}
2369 
2370 	vports = lpfc_create_vport_work_array(phba);
2371 	if (vports != NULL)
2372 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2373 			struct Scsi_Host *shost;
2374 			shost = lpfc_shost_from_vport(vports[i]);
2375 			spin_lock_irq(shost->host_lock);
2376 			vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2377 			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2378 				vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2379 			if (phba->sli_rev == LPFC_SLI_REV4)
2380 				vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2381 			spin_unlock_irq(shost->host_lock);
2382 		}
2383 		lpfc_destroy_vport_work_array(phba, vports);
2384 
2385 	lpfc_unblock_mgmt_io(phba);
2386 	return 0;
2387 }
2388 
2389 /**
2390  * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
2391  * @phba: pointer to lpfc hba data structure.
2392  *
2393  * This routine marks a HBA's management interface as not blocked. Once the
2394  * HBA's management interface is marked as not blocked, all the user space
2395  * access to the HBA, whether they are from sysfs interface or libdfc
2396  * interface will be allowed. The HBA is set to block the management interface
2397  * when the driver prepares the HBA interface for online or offline and then
2398  * set to unblock the management interface afterwards.
2399  **/
2400 void
2401 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2402 {
2403 	unsigned long iflag;
2404 
2405 	spin_lock_irqsave(&phba->hbalock, iflag);
2406 	phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2407 	spin_unlock_irqrestore(&phba->hbalock, iflag);
2408 }
2409 
2410 /**
2411  * lpfc_offline_prep - Prepare a HBA to be brought offline
2412  * @phba: pointer to lpfc hba data structure.
2413  *
2414  * This routine is invoked to prepare a HBA to be brought offline. It performs
2415  * unregistration login to all the nodes on all vports and flushes the mailbox
2416  * queue to make it ready to be brought offline.
2417  **/
2418 void
2419 lpfc_offline_prep(struct lpfc_hba * phba)
2420 {
2421 	struct lpfc_vport *vport = phba->pport;
2422 	struct lpfc_nodelist  *ndlp, *next_ndlp;
2423 	struct lpfc_vport **vports;
2424 	struct Scsi_Host *shost;
2425 	int i;
2426 
2427 	if (vport->fc_flag & FC_OFFLINE_MODE)
2428 		return;
2429 
2430 	lpfc_block_mgmt_io(phba);
2431 
2432 	lpfc_linkdown(phba);
2433 
2434 	/* Issue an unreg_login to all nodes on all vports */
2435 	vports = lpfc_create_vport_work_array(phba);
2436 	if (vports != NULL) {
2437 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2438 			if (vports[i]->load_flag & FC_UNLOADING)
2439 				continue;
2440 			shost = lpfc_shost_from_vport(vports[i]);
2441 			spin_lock_irq(shost->host_lock);
2442 			vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
2443 			vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2444 			vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
2445 			spin_unlock_irq(shost->host_lock);
2446 
2447 			shost =	lpfc_shost_from_vport(vports[i]);
2448 			list_for_each_entry_safe(ndlp, next_ndlp,
2449 						 &vports[i]->fc_nodes,
2450 						 nlp_listp) {
2451 				if (!NLP_CHK_NODE_ACT(ndlp))
2452 					continue;
2453 				if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2454 					continue;
2455 				if (ndlp->nlp_type & NLP_FABRIC) {
2456 					lpfc_disc_state_machine(vports[i], ndlp,
2457 						NULL, NLP_EVT_DEVICE_RECOVERY);
2458 					lpfc_disc_state_machine(vports[i], ndlp,
2459 						NULL, NLP_EVT_DEVICE_RM);
2460 				}
2461 				spin_lock_irq(shost->host_lock);
2462 				ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2463 				spin_unlock_irq(shost->host_lock);
2464 				lpfc_unreg_rpi(vports[i], ndlp);
2465 			}
2466 		}
2467 	}
2468 	lpfc_destroy_vport_work_array(phba, vports);
2469 
2470 	lpfc_sli_mbox_sys_shutdown(phba);
2471 }
2472 
2473 /**
2474  * lpfc_offline - Bring a HBA offline
2475  * @phba: pointer to lpfc hba data structure.
2476  *
2477  * This routine actually brings a HBA offline. It stops all the timers
2478  * associated with the HBA, brings down the SLI layer, and eventually
2479  * marks the HBA as in offline state for the upper layer protocol.
2480  **/
2481 void
2482 lpfc_offline(struct lpfc_hba *phba)
2483 {
2484 	struct Scsi_Host  *shost;
2485 	struct lpfc_vport **vports;
2486 	int i;
2487 
2488 	if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2489 		return;
2490 
2491 	/* stop port and all timers associated with this hba */
2492 	lpfc_stop_port(phba);
2493 	vports = lpfc_create_vport_work_array(phba);
2494 	if (vports != NULL)
2495 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2496 			lpfc_stop_vport_timers(vports[i]);
2497 	lpfc_destroy_vport_work_array(phba, vports);
2498 	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2499 			"0460 Bring Adapter offline\n");
2500 	/* Bring down the SLI Layer and cleanup.  The HBA is offline
2501 	   now.  */
2502 	lpfc_sli_hba_down(phba);
2503 	spin_lock_irq(&phba->hbalock);
2504 	phba->work_ha = 0;
2505 	spin_unlock_irq(&phba->hbalock);
2506 	vports = lpfc_create_vport_work_array(phba);
2507 	if (vports != NULL)
2508 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2509 			shost = lpfc_shost_from_vport(vports[i]);
2510 			spin_lock_irq(shost->host_lock);
2511 			vports[i]->work_port_events = 0;
2512 			vports[i]->fc_flag |= FC_OFFLINE_MODE;
2513 			spin_unlock_irq(shost->host_lock);
2514 		}
2515 	lpfc_destroy_vport_work_array(phba, vports);
2516 }
2517 
2518 /**
2519  * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2520  * @phba: pointer to lpfc hba data structure.
2521  *
2522  * This routine is to free all the SCSI buffers and IOCBs from the driver
2523  * list back to kernel. It is called from lpfc_pci_remove_one to free
2524  * the internal resources before the device is removed from the system.
2525  *
2526  * Return codes
2527  *   0 - successful (for now, it always returns 0)
2528  **/
2529 static int
2530 lpfc_scsi_free(struct lpfc_hba *phba)
2531 {
2532 	struct lpfc_scsi_buf *sb, *sb_next;
2533 	struct lpfc_iocbq *io, *io_next;
2534 
2535 	spin_lock_irq(&phba->hbalock);
2536 	/* Release all the lpfc_scsi_bufs maintained by this host. */
2537 	spin_lock(&phba->scsi_buf_list_lock);
2538 	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2539 		list_del(&sb->list);
2540 		pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2541 			      sb->dma_handle);
2542 		kfree(sb);
2543 		phba->total_scsi_bufs--;
2544 	}
2545 	spin_unlock(&phba->scsi_buf_list_lock);
2546 
2547 	/* Release all the lpfc_iocbq entries maintained by this host. */
2548 	list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2549 		list_del(&io->list);
2550 		kfree(io);
2551 		phba->total_iocbq_bufs--;
2552 	}
2553 	spin_unlock_irq(&phba->hbalock);
2554 	return 0;
2555 }
2556 
2557 /**
2558  * lpfc_create_port - Create an FC port
2559  * @phba: pointer to lpfc hba data structure.
2560  * @instance: a unique integer ID to this FC port.
2561  * @dev: pointer to the device data structure.
2562  *
2563  * This routine creates a FC port for the upper layer protocol. The FC port
2564  * can be created on top of either a physical port or a virtual port provided
2565  * by the HBA. This routine also allocates a SCSI host data structure (shost)
2566  * and associates the FC port created before adding the shost into the SCSI
2567  * layer.
2568  *
2569  * Return codes
2570  *   @vport - pointer to the virtual N_Port data structure.
2571  *   NULL - port create failed.
2572  **/
2573 struct lpfc_vport *
2574 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2575 {
2576 	struct lpfc_vport *vport;
2577 	struct Scsi_Host  *shost;
2578 	int error = 0;
2579 
2580 	if (dev != &phba->pcidev->dev)
2581 		shost = scsi_host_alloc(&lpfc_vport_template,
2582 					sizeof(struct lpfc_vport));
2583 	else
2584 		shost = scsi_host_alloc(&lpfc_template,
2585 					sizeof(struct lpfc_vport));
2586 	if (!shost)
2587 		goto out;
2588 
2589 	vport = (struct lpfc_vport *) shost->hostdata;
2590 	vport->phba = phba;
2591 	vport->load_flag |= FC_LOADING;
2592 	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2593 	vport->fc_rscn_flush = 0;
2594 
2595 	lpfc_get_vport_cfgparam(vport);
2596 	shost->unique_id = instance;
2597 	shost->max_id = LPFC_MAX_TARGET;
2598 	shost->max_lun = vport->cfg_max_luns;
2599 	shost->this_id = -1;
2600 	shost->max_cmd_len = 16;
2601 	if (phba->sli_rev == LPFC_SLI_REV4) {
2602 		shost->dma_boundary =
2603 			phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
2604 		shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2605 	}
2606 
2607 	/*
2608 	 * Set initial can_queue value since 0 is no longer supported and
2609 	 * scsi_add_host will fail. This will be adjusted later based on the
2610 	 * max xri value determined in hba setup.
2611 	 */
2612 	shost->can_queue = phba->cfg_hba_queue_depth - 10;
2613 	if (dev != &phba->pcidev->dev) {
2614 		shost->transportt = lpfc_vport_transport_template;
2615 		vport->port_type = LPFC_NPIV_PORT;
2616 	} else {
2617 		shost->transportt = lpfc_transport_template;
2618 		vport->port_type = LPFC_PHYSICAL_PORT;
2619 	}
2620 
2621 	/* Initialize all internally managed lists. */
2622 	INIT_LIST_HEAD(&vport->fc_nodes);
2623 	INIT_LIST_HEAD(&vport->rcv_buffer_list);
2624 	spin_lock_init(&vport->work_port_lock);
2625 
2626 	init_timer(&vport->fc_disctmo);
2627 	vport->fc_disctmo.function = lpfc_disc_timeout;
2628 	vport->fc_disctmo.data = (unsigned long)vport;
2629 
2630 	init_timer(&vport->fc_fdmitmo);
2631 	vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
2632 	vport->fc_fdmitmo.data = (unsigned long)vport;
2633 
2634 	init_timer(&vport->els_tmofunc);
2635 	vport->els_tmofunc.function = lpfc_els_timeout;
2636 	vport->els_tmofunc.data = (unsigned long)vport;
2637 	error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2638 	if (error)
2639 		goto out_put_shost;
2640 
2641 	spin_lock_irq(&phba->hbalock);
2642 	list_add_tail(&vport->listentry, &phba->port_list);
2643 	spin_unlock_irq(&phba->hbalock);
2644 	return vport;
2645 
2646 out_put_shost:
2647 	scsi_host_put(shost);
2648 out:
2649 	return NULL;
2650 }
2651 
2652 /**
2653  * destroy_port -  destroy an FC port
2654  * @vport: pointer to an lpfc virtual N_Port data structure.
2655  *
2656  * This routine destroys a FC port from the upper layer protocol. All the
2657  * resources associated with the port are released.
2658  **/
2659 void
2660 destroy_port(struct lpfc_vport *vport)
2661 {
2662 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2663 	struct lpfc_hba  *phba = vport->phba;
2664 
2665 	lpfc_debugfs_terminate(vport);
2666 	fc_remove_host(shost);
2667 	scsi_remove_host(shost);
2668 
2669 	spin_lock_irq(&phba->hbalock);
2670 	list_del_init(&vport->listentry);
2671 	spin_unlock_irq(&phba->hbalock);
2672 
2673 	lpfc_cleanup(vport);
2674 	return;
2675 }
2676 
2677 /**
2678  * lpfc_get_instance - Get a unique integer ID
2679  *
2680  * This routine allocates a unique integer ID from lpfc_hba_index pool. It
2681  * uses the kernel idr facility to perform the task.
2682  *
2683  * Return codes:
2684  *   instance - a unique integer ID allocated as the new instance.
2685  *   -1 - lpfc get instance failed.
2686  **/
2687 int
2688 lpfc_get_instance(void)
2689 {
2690 	int instance = 0;
2691 
2692 	/* Assign an unused number */
2693 	if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
2694 		return -1;
2695 	if (idr_get_new(&lpfc_hba_index, NULL, &instance))
2696 		return -1;
2697 	return instance;
2698 }
2699 
2700 /**
2701  * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
2702  * @shost: pointer to SCSI host data structure.
2703  * @time: elapsed time of the scan in jiffies.
2704  *
2705  * This routine is called by the SCSI layer with a SCSI host to determine
2706  * whether the scan host is finished.
2707  *
2708  * Note: there is no scan_start function as adapter initialization will have
2709  * asynchronously kicked off the link initialization.
2710  *
2711  * Return codes
2712  *   0 - SCSI host scan is not over yet.
2713  *   1 - SCSI host scan is over.
2714  **/
2715 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2716 {
2717 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2718 	struct lpfc_hba   *phba = vport->phba;
2719 	int stat = 0;
2720 
2721 	spin_lock_irq(shost->host_lock);
2722 
2723 	if (vport->load_flag & FC_UNLOADING) {
2724 		stat = 1;
2725 		goto finished;
2726 	}
2727 	if (time >= 30 * HZ) {
2728 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2729 				"0461 Scanning longer than 30 "
2730 				"seconds.  Continuing initialization\n");
2731 		stat = 1;
2732 		goto finished;
2733 	}
2734 	if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
2735 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2736 				"0465 Link down longer than 15 "
2737 				"seconds.  Continuing initialization\n");
2738 		stat = 1;
2739 		goto finished;
2740 	}
2741 
2742 	if (vport->port_state != LPFC_VPORT_READY)
2743 		goto finished;
2744 	if (vport->num_disc_nodes || vport->fc_prli_sent)
2745 		goto finished;
2746 	if (vport->fc_map_cnt == 0 && time < 2 * HZ)
2747 		goto finished;
2748 	if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
2749 		goto finished;
2750 
2751 	stat = 1;
2752 
2753 finished:
2754 	spin_unlock_irq(shost->host_lock);
2755 	return stat;
2756 }
2757 
2758 /**
2759  * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
2760  * @shost: pointer to SCSI host data structure.
2761  *
2762  * This routine initializes a given SCSI host attributes on a FC port. The
2763  * SCSI host can be either on top of a physical port or a virtual port.
2764  **/
2765 void lpfc_host_attrib_init(struct Scsi_Host *shost)
2766 {
2767 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2768 	struct lpfc_hba   *phba = vport->phba;
2769 	/*
2770 	 * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
2771 	 */
2772 
2773 	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
2774 	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
2775 	fc_host_supported_classes(shost) = FC_COS_CLASS3;
2776 
2777 	memset(fc_host_supported_fc4s(shost), 0,
2778 	       sizeof(fc_host_supported_fc4s(shost)));
2779 	fc_host_supported_fc4s(shost)[2] = 1;
2780 	fc_host_supported_fc4s(shost)[7] = 1;
2781 
2782 	lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
2783 				 sizeof fc_host_symbolic_name(shost));
2784 
2785 	fc_host_supported_speeds(shost) = 0;
2786 	if (phba->lmt & LMT_10Gb)
2787 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
2788 	if (phba->lmt & LMT_8Gb)
2789 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
2790 	if (phba->lmt & LMT_4Gb)
2791 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
2792 	if (phba->lmt & LMT_2Gb)
2793 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
2794 	if (phba->lmt & LMT_1Gb)
2795 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
2796 
2797 	fc_host_maxframe_size(shost) =
2798 		(((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
2799 		(uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
2800 
2801 	/* This value is also unchanging */
2802 	memset(fc_host_active_fc4s(shost), 0,
2803 	       sizeof(fc_host_active_fc4s(shost)));
2804 	fc_host_active_fc4s(shost)[2] = 1;
2805 	fc_host_active_fc4s(shost)[7] = 1;
2806 
2807 	fc_host_max_npiv_vports(shost) = phba->max_vpi;
2808 	spin_lock_irq(shost->host_lock);
2809 	vport->load_flag &= ~FC_LOADING;
2810 	spin_unlock_irq(shost->host_lock);
2811 }
2812 
2813 /**
2814  * lpfc_stop_port_s3 - Stop SLI3 device port
2815  * @phba: pointer to lpfc hba data structure.
2816  *
2817  * This routine is invoked to stop an SLI3 device port, it stops the device
2818  * from generating interrupts and stops the device driver's timers for the
2819  * device.
2820  **/
2821 static void
2822 lpfc_stop_port_s3(struct lpfc_hba *phba)
2823 {
2824 	/* Clear all interrupt enable conditions */
2825 	writel(0, phba->HCregaddr);
2826 	readl(phba->HCregaddr); /* flush */
2827 	/* Clear all pending interrupts */
2828 	writel(0xffffffff, phba->HAregaddr);
2829 	readl(phba->HAregaddr); /* flush */
2830 
2831 	/* Reset some HBA SLI setup states */
2832 	lpfc_stop_hba_timers(phba);
2833 	phba->pport->work_port_events = 0;
2834 }
2835 
2836 /**
2837  * lpfc_stop_port_s4 - Stop SLI4 device port
2838  * @phba: pointer to lpfc hba data structure.
2839  *
2840  * This routine is invoked to stop an SLI4 device port, it stops the device
2841  * from generating interrupts and stops the device driver's timers for the
2842  * device.
2843  **/
2844 static void
2845 lpfc_stop_port_s4(struct lpfc_hba *phba)
2846 {
2847 	/* Reset some HBA SLI4 setup states */
2848 	lpfc_stop_hba_timers(phba);
2849 	phba->pport->work_port_events = 0;
2850 	phba->sli4_hba.intr_enable = 0;
2851 }
2852 
2853 /**
2854  * lpfc_stop_port - Wrapper function for stopping hba port
2855  * @phba: Pointer to HBA context object.
2856  *
2857  * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
2858  * the API jump table function pointer from the lpfc_hba struct.
2859  **/
2860 void
2861 lpfc_stop_port(struct lpfc_hba *phba)
2862 {
2863 	phba->lpfc_stop_port(phba);
2864 }
2865 
2866 /**
2867  * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port.
2868  * @phba: pointer to lpfc hba data structure.
2869  *
2870  * This routine is invoked to remove the driver default fcf record from
2871  * the port.  This routine currently acts on FCF Index 0.
2872  *
2873  **/
2874 void
2875 lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2876 {
2877 	int rc = 0;
2878 	LPFC_MBOXQ_t *mboxq;
2879 	struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record;
2880 	uint32_t mbox_tmo, req_len;
2881 	uint32_t shdr_status, shdr_add_status;
2882 
2883 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2884 	if (!mboxq) {
2885 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2886 			"2020 Failed to allocate mbox for ADD_FCF cmd\n");
2887 		return;
2888 	}
2889 
2890 	req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) -
2891 		  sizeof(struct lpfc_sli4_cfg_mhdr);
2892 	rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2893 			      LPFC_MBOX_OPCODE_FCOE_DELETE_FCF,
2894 			      req_len, LPFC_SLI4_MBX_EMBED);
2895 	/*
2896 	 * In phase 1, there is a single FCF index, 0.  In phase2, the driver
2897 	 * supports multiple FCF indices.
2898 	 */
2899 	del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
2900 	bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
2901 	bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
2902 	       phba->fcf.current_rec.fcf_indx);
2903 
2904 	if (!phba->sli4_hba.intr_enable)
2905 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2906 	else {
2907 		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
2908 		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
2909 	}
2910 	/* The IOCTL status is embedded in the mailbox subheader. */
2911 	shdr_status = bf_get(lpfc_mbox_hdr_status,
2912 			     &del_fcf_record->header.cfg_shdr.response);
2913 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2914 				 &del_fcf_record->header.cfg_shdr.response);
2915 	if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2916 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2917 				"2516 DEL FCF of default FCF Index failed "
2918 				"mbx status x%x, status x%x add_status x%x\n",
2919 				rc, shdr_status, shdr_add_status);
2920 	}
2921 	if (rc != MBX_TIMEOUT)
2922 		mempool_free(mboxq, phba->mbox_mem_pool);
2923 }
2924 
2925 /**
2926  * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
2927  * @phba: Pointer to hba for which this call is being executed.
2928  *
2929  * This routine starts the timer waiting for the FCF rediscovery to complete.
2930  **/
2931 void
2932 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
2933 {
2934 	unsigned long fcf_redisc_wait_tmo =
2935 		(jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
2936 	/* Start fcf rediscovery wait period timer */
2937 	mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
2938 	spin_lock_irq(&phba->hbalock);
2939 	/* Allow action to new fcf asynchronous event */
2940 	phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
2941 	/* Mark the FCF rediscovery pending state */
2942 	phba->fcf.fcf_flag |= FCF_REDISC_PEND;
2943 	spin_unlock_irq(&phba->hbalock);
2944 }
2945 
2946 /**
2947  * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
2948  * @ptr: Map to lpfc_hba data structure pointer.
2949  *
2950  * This routine is invoked when waiting for FCF table rediscover has been
2951  * timed out. If new FCF record(s) has (have) been discovered during the
2952  * wait period, a new FCF event shall be added to the FCOE async event
2953  * list, and then worker thread shall be waked up for processing from the
2954  * worker thread context.
2955  **/
2956 void
2957 lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
2958 {
2959 	struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
2960 
2961 	/* Don't send FCF rediscovery event if timer cancelled */
2962 	spin_lock_irq(&phba->hbalock);
2963 	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2964 		spin_unlock_irq(&phba->hbalock);
2965 		return;
2966 	}
2967 	/* Clear FCF rediscovery timer pending flag */
2968 	phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2969 	/* FCF rediscovery event to worker thread */
2970 	phba->fcf.fcf_flag |= FCF_REDISC_EVT;
2971 	spin_unlock_irq(&phba->hbalock);
2972 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2973 			"2776 FCF rediscover wait timer expired, post "
2974 			"a worker thread event for FCF table scan\n");
2975 	/* wake up worker thread */
2976 	lpfc_worker_wake_up(phba);
2977 }
2978 
2979 /**
2980  * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support
2981  * @phba: pointer to lpfc hba data structure.
2982  *
2983  * This function uses the QUERY_FW_CFG mailbox command to determine if the
2984  * firmware loaded supports FCoE. A return of zero indicates that the mailbox
2985  * was successful and the firmware supports FCoE. Any other return indicates
2986  * a error. It is assumed that this function will be called before interrupts
2987  * are enabled.
2988  **/
2989 static int
2990 lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba)
2991 {
2992 	int rc = 0;
2993 	LPFC_MBOXQ_t *mboxq;
2994 	struct lpfc_mbx_query_fw_cfg *query_fw_cfg;
2995 	uint32_t length;
2996 	uint32_t shdr_status, shdr_add_status;
2997 
2998 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2999 	if (!mboxq) {
3000 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3001 				"2621 Failed to allocate mbox for "
3002 				"query firmware config cmd\n");
3003 		return -ENOMEM;
3004 	}
3005 	query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg;
3006 	length = (sizeof(struct lpfc_mbx_query_fw_cfg) -
3007 		  sizeof(struct lpfc_sli4_cfg_mhdr));
3008 	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
3009 			 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
3010 			 length, LPFC_SLI4_MBX_EMBED);
3011 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
3012 	/* The IOCTL status is embedded in the mailbox subheader. */
3013 	shdr_status = bf_get(lpfc_mbox_hdr_status,
3014 			     &query_fw_cfg->header.cfg_shdr.response);
3015 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
3016 				 &query_fw_cfg->header.cfg_shdr.response);
3017 	if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
3018 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3019 				"2622 Query Firmware Config failed "
3020 				"mbx status x%x, status x%x add_status x%x\n",
3021 				rc, shdr_status, shdr_add_status);
3022 		return -EINVAL;
3023 	}
3024 	if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) {
3025 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3026 				"2623 FCoE Function not supported by firmware. "
3027 				"Function mode = %08x\n",
3028 				query_fw_cfg->function_mode);
3029 		return -EINVAL;
3030 	}
3031 	if (rc != MBX_TIMEOUT)
3032 		mempool_free(mboxq, phba->mbox_mem_pool);
3033 	return 0;
3034 }
3035 
3036 /**
3037  * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
3038  * @phba: pointer to lpfc hba data structure.
3039  * @acqe_link: pointer to the async link completion queue entry.
3040  *
3041  * This routine is to parse the SLI4 link-attention link fault code and
3042  * translate it into the base driver's read link attention mailbox command
3043  * status.
3044  *
3045  * Return: Link-attention status in terms of base driver's coding.
3046  **/
3047 static uint16_t
3048 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
3049 			   struct lpfc_acqe_link *acqe_link)
3050 {
3051 	uint16_t latt_fault;
3052 
3053 	switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
3054 	case LPFC_ASYNC_LINK_FAULT_NONE:
3055 	case LPFC_ASYNC_LINK_FAULT_LOCAL:
3056 	case LPFC_ASYNC_LINK_FAULT_REMOTE:
3057 		latt_fault = 0;
3058 		break;
3059 	default:
3060 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3061 				"0398 Invalid link fault code: x%x\n",
3062 				bf_get(lpfc_acqe_link_fault, acqe_link));
3063 		latt_fault = MBXERR_ERROR;
3064 		break;
3065 	}
3066 	return latt_fault;
3067 }
3068 
3069 /**
3070  * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
3071  * @phba: pointer to lpfc hba data structure.
3072  * @acqe_link: pointer to the async link completion queue entry.
3073  *
3074  * This routine is to parse the SLI4 link attention type and translate it
3075  * into the base driver's link attention type coding.
3076  *
3077  * Return: Link attention type in terms of base driver's coding.
3078  **/
3079 static uint8_t
3080 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
3081 			  struct lpfc_acqe_link *acqe_link)
3082 {
3083 	uint8_t att_type;
3084 
3085 	switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
3086 	case LPFC_ASYNC_LINK_STATUS_DOWN:
3087 	case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
3088 		att_type = AT_LINK_DOWN;
3089 		break;
3090 	case LPFC_ASYNC_LINK_STATUS_UP:
3091 		/* Ignore physical link up events - wait for logical link up */
3092 		att_type = AT_RESERVED;
3093 		break;
3094 	case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
3095 		att_type = AT_LINK_UP;
3096 		break;
3097 	default:
3098 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3099 				"0399 Invalid link attention type: x%x\n",
3100 				bf_get(lpfc_acqe_link_status, acqe_link));
3101 		att_type = AT_RESERVED;
3102 		break;
3103 	}
3104 	return att_type;
3105 }
3106 
3107 /**
3108  * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
3109  * @phba: pointer to lpfc hba data structure.
3110  * @acqe_link: pointer to the async link completion queue entry.
3111  *
3112  * This routine is to parse the SLI4 link-attention link speed and translate
3113  * it into the base driver's link-attention link speed coding.
3114  *
3115  * Return: Link-attention link speed in terms of base driver's coding.
3116  **/
3117 static uint8_t
3118 lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
3119 				struct lpfc_acqe_link *acqe_link)
3120 {
3121 	uint8_t link_speed;
3122 
3123 	switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
3124 	case LPFC_ASYNC_LINK_SPEED_ZERO:
3125 		link_speed = LA_UNKNW_LINK;
3126 		break;
3127 	case LPFC_ASYNC_LINK_SPEED_10MBPS:
3128 		link_speed = LA_UNKNW_LINK;
3129 		break;
3130 	case LPFC_ASYNC_LINK_SPEED_100MBPS:
3131 		link_speed = LA_UNKNW_LINK;
3132 		break;
3133 	case LPFC_ASYNC_LINK_SPEED_1GBPS:
3134 		link_speed = LA_1GHZ_LINK;
3135 		break;
3136 	case LPFC_ASYNC_LINK_SPEED_10GBPS:
3137 		link_speed = LA_10GHZ_LINK;
3138 		break;
3139 	default:
3140 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3141 				"0483 Invalid link-attention link speed: x%x\n",
3142 				bf_get(lpfc_acqe_link_speed, acqe_link));
3143 		link_speed = LA_UNKNW_LINK;
3144 		break;
3145 	}
3146 	return link_speed;
3147 }
3148 
3149 /**
3150  * lpfc_sli4_async_link_evt - Process the asynchronous link event
3151  * @phba: pointer to lpfc hba data structure.
3152  * @acqe_link: pointer to the async link completion queue entry.
3153  *
3154  * This routine is to handle the SLI4 asynchronous link event.
3155  **/
3156 static void
3157 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3158 			 struct lpfc_acqe_link *acqe_link)
3159 {
3160 	struct lpfc_dmabuf *mp;
3161 	LPFC_MBOXQ_t *pmb;
3162 	MAILBOX_t *mb;
3163 	READ_LA_VAR *la;
3164 	uint8_t att_type;
3165 
3166 	att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
3167 	if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
3168 		return;
3169 	phba->fcoe_eventtag = acqe_link->event_tag;
3170 	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3171 	if (!pmb) {
3172 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3173 				"0395 The mboxq allocation failed\n");
3174 		return;
3175 	}
3176 	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3177 	if (!mp) {
3178 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3179 				"0396 The lpfc_dmabuf allocation failed\n");
3180 		goto out_free_pmb;
3181 	}
3182 	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3183 	if (!mp->virt) {
3184 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3185 				"0397 The mbuf allocation failed\n");
3186 		goto out_free_dmabuf;
3187 	}
3188 
3189 	/* Cleanup any outstanding ELS commands */
3190 	lpfc_els_flush_all_cmd(phba);
3191 
3192 	/* Block ELS IOCBs until we have done process link event */
3193 	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3194 
3195 	/* Update link event statistics */
3196 	phba->sli.slistat.link_event++;
3197 
3198 	/* Create pseudo lpfc_handle_latt mailbox command from link ACQE */
3199 	lpfc_read_la(phba, pmb, mp);
3200 	pmb->vport = phba->pport;
3201 
3202 	/* Parse and translate status field */
3203 	mb = &pmb->u.mb;
3204 	mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
3205 
3206 	/* Parse and translate link attention fields */
3207 	la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
3208 	la->eventTag = acqe_link->event_tag;
3209 	la->attType = att_type;
3210 	la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link);
3211 
3212 	/* Fake the the following irrelvant fields */
3213 	la->topology = TOPOLOGY_PT_PT;
3214 	la->granted_AL_PA = 0;
3215 	la->il = 0;
3216 	la->pb = 0;
3217 	la->fa = 0;
3218 	la->mm = 0;
3219 
3220 	/* Keep the link status for extra SLI4 state machine reference */
3221 	phba->sli4_hba.link_state.speed =
3222 				bf_get(lpfc_acqe_link_speed, acqe_link);
3223 	phba->sli4_hba.link_state.duplex =
3224 				bf_get(lpfc_acqe_link_duplex, acqe_link);
3225 	phba->sli4_hba.link_state.status =
3226 				bf_get(lpfc_acqe_link_status, acqe_link);
3227 	phba->sli4_hba.link_state.physical =
3228 				bf_get(lpfc_acqe_link_physical, acqe_link);
3229 	phba->sli4_hba.link_state.fault =
3230 				bf_get(lpfc_acqe_link_fault, acqe_link);
3231 	phba->sli4_hba.link_state.logical_speed =
3232 				bf_get(lpfc_acqe_qos_link_speed, acqe_link);
3233 
3234 	/* Invoke the lpfc_handle_latt mailbox command callback function */
3235 	lpfc_mbx_cmpl_read_la(phba, pmb);
3236 
3237 	return;
3238 
3239 out_free_dmabuf:
3240 	kfree(mp);
3241 out_free_pmb:
3242 	mempool_free(pmb, phba->mbox_mem_pool);
3243 }
3244 
3245 /**
3246  * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
3247  * @vport: pointer to vport data structure.
3248  *
3249  * This routine is to perform Clear Virtual Link (CVL) on a vport in
3250  * response to a CVL event.
3251  *
3252  * Return the pointer to the ndlp with the vport if successful, otherwise
3253  * return NULL.
3254  **/
3255 static struct lpfc_nodelist *
3256 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3257 {
3258 	struct lpfc_nodelist *ndlp;
3259 	struct Scsi_Host *shost;
3260 	struct lpfc_hba *phba;
3261 
3262 	if (!vport)
3263 		return NULL;
3264 	phba = vport->phba;
3265 	if (!phba)
3266 		return NULL;
3267 	ndlp = lpfc_findnode_did(vport, Fabric_DID);
3268 	if (!ndlp) {
3269 		/* Cannot find existing Fabric ndlp, so allocate a new one */
3270 		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
3271 		if (!ndlp)
3272 			return 0;
3273 		lpfc_nlp_init(vport, ndlp, Fabric_DID);
3274 		/* Set the node type */
3275 		ndlp->nlp_type |= NLP_FABRIC;
3276 		/* Put ndlp onto node list */
3277 		lpfc_enqueue_node(vport, ndlp);
3278 	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
3279 		/* re-setup ndlp without removing from node list */
3280 		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
3281 		if (!ndlp)
3282 			return 0;
3283 	}
3284 	if (phba->pport->port_state <= LPFC_FLOGI)
3285 		return NULL;
3286 	/* If virtual link is not yet instantiated ignore CVL */
3287 	if (vport->port_state <= LPFC_FDISC)
3288 		return NULL;
3289 	shost = lpfc_shost_from_vport(vport);
3290 	if (!shost)
3291 		return NULL;
3292 	lpfc_linkdown_port(vport);
3293 	lpfc_cleanup_pending_mbox(vport);
3294 	spin_lock_irq(shost->host_lock);
3295 	vport->fc_flag |= FC_VPORT_CVL_RCVD;
3296 	spin_unlock_irq(shost->host_lock);
3297 
3298 	return ndlp;
3299 }
3300 
3301 /**
3302  * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
3303  * @vport: pointer to lpfc hba data structure.
3304  *
3305  * This routine is to perform Clear Virtual Link (CVL) on all vports in
3306  * response to a FCF dead event.
3307  **/
3308 static void
3309 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
3310 {
3311 	struct lpfc_vport **vports;
3312 	int i;
3313 
3314 	vports = lpfc_create_vport_work_array(phba);
3315 	if (vports)
3316 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3317 			lpfc_sli4_perform_vport_cvl(vports[i]);
3318 	lpfc_destroy_vport_work_array(phba, vports);
3319 }
3320 
3321 /**
3322  * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
3323  * @phba: pointer to lpfc hba data structure.
3324  * @acqe_link: pointer to the async fcoe completion queue entry.
3325  *
3326  * This routine is to handle the SLI4 asynchronous fcoe event.
3327  **/
3328 static void
3329 lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3330 			 struct lpfc_acqe_fcoe *acqe_fcoe)
3331 {
3332 	uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
3333 	int rc;
3334 	struct lpfc_vport *vport;
3335 	struct lpfc_nodelist *ndlp;
3336 	struct Scsi_Host  *shost;
3337 	int active_vlink_present;
3338 	struct lpfc_vport **vports;
3339 	int i;
3340 
3341 	phba->fc_eventTag = acqe_fcoe->event_tag;
3342 	phba->fcoe_eventtag = acqe_fcoe->event_tag;
3343 	switch (event_type) {
3344 	case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
3345 	case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
3346 		if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF)
3347 			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3348 					LOG_DISCOVERY,
3349 					"2546 New FCF found event: "
3350 					"evt_tag:x%x, fcf_index:x%x\n",
3351 					acqe_fcoe->event_tag,
3352 					acqe_fcoe->index);
3353 		else
3354 			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
3355 					LOG_DISCOVERY,
3356 					"2788 FCF parameter modified event: "
3357 					"evt_tag:x%x, fcf_index:x%x\n",
3358 					acqe_fcoe->event_tag,
3359 					acqe_fcoe->index);
3360 		/* If the FCF discovery is in progress, do nothing. */
3361 		spin_lock_irq(&phba->hbalock);
3362 		if (phba->hba_flag & FCF_DISC_INPROGRESS) {
3363 			spin_unlock_irq(&phba->hbalock);
3364 			break;
3365 		}
3366 		/* If fast FCF failover rescan event is pending, do nothing */
3367 		if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3368 			spin_unlock_irq(&phba->hbalock);
3369 			break;
3370 		}
3371 		spin_unlock_irq(&phba->hbalock);
3372 
3373 		if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
3374 		    !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
3375 			/*
3376 			 * During period of FCF discovery, read the FCF
3377 			 * table record indexed by the event to update
3378 			 * FCF round robin failover eligible FCF bmask.
3379 			 */
3380 			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3381 					LOG_DISCOVERY,
3382 					"2779 Read new FCF record with "
3383 					"fcf_index:x%x for updating FCF "
3384 					"round robin failover bmask\n",
3385 					acqe_fcoe->index);
3386 			rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
3387 		}
3388 		/* If the FCF has been in discovered state, do nothing. */
3389 		spin_lock_irq(&phba->hbalock);
3390 		if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
3391 			spin_unlock_irq(&phba->hbalock);
3392 			break;
3393 		}
3394 		spin_unlock_irq(&phba->hbalock);
3395 		/* Otherwise, scan the entire FCF table and re-discover SAN */
3396 		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3397 				"2770 Start FCF table scan due to new FCF "
3398 				"event: evt_tag:x%x, fcf_index:x%x\n",
3399 				acqe_fcoe->event_tag, acqe_fcoe->index);
3400 		rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3401 						     LPFC_FCOE_FCF_GET_FIRST);
3402 		if (rc)
3403 			lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3404 					"2547 Issue FCF scan read FCF mailbox "
3405 					"command failed 0x%x\n", rc);
3406 		break;
3407 
3408 	case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
3409 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3410 			"2548 FCF Table full count 0x%x tag 0x%x\n",
3411 			bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
3412 			acqe_fcoe->event_tag);
3413 		break;
3414 
3415 	case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
3416 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3417 			"2549 FCF disconnected from network index 0x%x"
3418 			" tag 0x%x\n", acqe_fcoe->index,
3419 			acqe_fcoe->event_tag);
3420 		/* If the event is not for currently used fcf do nothing */
3421 		if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index)
3422 			break;
3423 		/* We request port to rediscover the entire FCF table for
3424 		 * a fast recovery from case that the current FCF record
3425 		 * is no longer valid if we are not in the middle of FCF
3426 		 * failover process already.
3427 		 */
3428 		spin_lock_irq(&phba->hbalock);
3429 		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3430 			spin_unlock_irq(&phba->hbalock);
3431 			/* Update FLOGI FCF failover eligible FCF bmask */
3432 			lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index);
3433 			break;
3434 		}
3435 		/* Mark the fast failover process in progress */
3436 		phba->fcf.fcf_flag |= FCF_DEAD_DISC;
3437 		spin_unlock_irq(&phba->hbalock);
3438 		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3439 				"2771 Start FCF fast failover process due to "
3440 				"FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
3441 				"\n", acqe_fcoe->event_tag, acqe_fcoe->index);
3442 		rc = lpfc_sli4_redisc_fcf_table(phba);
3443 		if (rc) {
3444 			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3445 					LOG_DISCOVERY,
3446 					"2772 Issue FCF rediscover mabilbox "
3447 					"command failed, fail through to FCF "
3448 					"dead event\n");
3449 			spin_lock_irq(&phba->hbalock);
3450 			phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
3451 			spin_unlock_irq(&phba->hbalock);
3452 			/*
3453 			 * Last resort will fail over by treating this
3454 			 * as a link down to FCF registration.
3455 			 */
3456 			lpfc_sli4_fcf_dead_failthrough(phba);
3457 		} else
3458 			/* Handling fast FCF failover to a DEAD FCF event
3459 			 * is considered equalivant to receiving CVL to all
3460 			 * vports.
3461 			 */
3462 			lpfc_sli4_perform_all_vport_cvl(phba);
3463 		break;
3464 	case LPFC_FCOE_EVENT_TYPE_CVL:
3465 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3466 			"2718 Clear Virtual Link Received for VPI 0x%x"
3467 			" tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
3468 		vport = lpfc_find_vport_by_vpid(phba,
3469 				acqe_fcoe->index - phba->vpi_base);
3470 		ndlp = lpfc_sli4_perform_vport_cvl(vport);
3471 		if (!ndlp)
3472 			break;
3473 		active_vlink_present = 0;
3474 
3475 		vports = lpfc_create_vport_work_array(phba);
3476 		if (vports) {
3477 			for (i = 0; i <= phba->max_vports && vports[i] != NULL;
3478 					i++) {
3479 				if ((!(vports[i]->fc_flag &
3480 					FC_VPORT_CVL_RCVD)) &&
3481 					(vports[i]->port_state > LPFC_FDISC)) {
3482 					active_vlink_present = 1;
3483 					break;
3484 				}
3485 			}
3486 			lpfc_destroy_vport_work_array(phba, vports);
3487 		}
3488 
3489 		if (active_vlink_present) {
3490 			/*
3491 			 * If there are other active VLinks present,
3492 			 * re-instantiate the Vlink using FDISC.
3493 			 */
3494 			mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
3495 			shost = lpfc_shost_from_vport(vport);
3496 			spin_lock_irq(shost->host_lock);
3497 			ndlp->nlp_flag |= NLP_DELAY_TMO;
3498 			spin_unlock_irq(shost->host_lock);
3499 			ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
3500 			vport->port_state = LPFC_FDISC;
3501 		} else {
3502 			/*
3503 			 * Otherwise, we request port to rediscover
3504 			 * the entire FCF table for a fast recovery
3505 			 * from possible case that the current FCF
3506 			 * is no longer valid if we are not already
3507 			 * in the FCF failover process.
3508 			 */
3509 			spin_lock_irq(&phba->hbalock);
3510 			if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3511 				spin_unlock_irq(&phba->hbalock);
3512 				break;
3513 			}
3514 			/* Mark the fast failover process in progress */
3515 			phba->fcf.fcf_flag |= FCF_ACVL_DISC;
3516 			spin_unlock_irq(&phba->hbalock);
3517 			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3518 					LOG_DISCOVERY,
3519 					"2773 Start FCF fast failover due "
3520 					"to CVL event: evt_tag:x%x\n",
3521 					acqe_fcoe->event_tag);
3522 			rc = lpfc_sli4_redisc_fcf_table(phba);
3523 			if (rc) {
3524 				lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3525 						LOG_DISCOVERY,
3526 						"2774 Issue FCF rediscover "
3527 						"mabilbox command failed, "
3528 						"through to CVL event\n");
3529 				spin_lock_irq(&phba->hbalock);
3530 				phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
3531 				spin_unlock_irq(&phba->hbalock);
3532 				/*
3533 				 * Last resort will be re-try on the
3534 				 * the current registered FCF entry.
3535 				 */
3536 				lpfc_retry_pport_discovery(phba);
3537 			}
3538 		}
3539 		break;
3540 	default:
3541 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3542 			"0288 Unknown FCoE event type 0x%x event tag "
3543 			"0x%x\n", event_type, acqe_fcoe->event_tag);
3544 		break;
3545 	}
3546 }
3547 
3548 /**
3549  * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
3550  * @phba: pointer to lpfc hba data structure.
3551  * @acqe_link: pointer to the async dcbx completion queue entry.
3552  *
3553  * This routine is to handle the SLI4 asynchronous dcbx event.
3554  **/
3555 static void
3556 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
3557 			 struct lpfc_acqe_dcbx *acqe_dcbx)
3558 {
3559 	phba->fc_eventTag = acqe_dcbx->event_tag;
3560 	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3561 			"0290 The SLI4 DCBX asynchronous event is not "
3562 			"handled yet\n");
3563 }
3564 
3565 /**
3566  * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
3567  * @phba: pointer to lpfc hba data structure.
3568  * @acqe_link: pointer to the async grp5 completion queue entry.
3569  *
3570  * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
3571  * is an asynchronous notified of a logical link speed change.  The Port
3572  * reports the logical link speed in units of 10Mbps.
3573  **/
3574 static void
3575 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
3576 			 struct lpfc_acqe_grp5 *acqe_grp5)
3577 {
3578 	uint16_t prev_ll_spd;
3579 
3580 	phba->fc_eventTag = acqe_grp5->event_tag;
3581 	phba->fcoe_eventtag = acqe_grp5->event_tag;
3582 	prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
3583 	phba->sli4_hba.link_state.logical_speed =
3584 		(bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5));
3585 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3586 			"2789 GRP5 Async Event: Updating logical link speed "
3587 			"from %dMbps to %dMbps\n", (prev_ll_spd * 10),
3588 			(phba->sli4_hba.link_state.logical_speed*10));
3589 }
3590 
3591 /**
3592  * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
3593  * @phba: pointer to lpfc hba data structure.
3594  *
3595  * This routine is invoked by the worker thread to process all the pending
3596  * SLI4 asynchronous events.
3597  **/
3598 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
3599 {
3600 	struct lpfc_cq_event *cq_event;
3601 
3602 	/* First, declare the async event has been handled */
3603 	spin_lock_irq(&phba->hbalock);
3604 	phba->hba_flag &= ~ASYNC_EVENT;
3605 	spin_unlock_irq(&phba->hbalock);
3606 	/* Now, handle all the async events */
3607 	while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
3608 		/* Get the first event from the head of the event queue */
3609 		spin_lock_irq(&phba->hbalock);
3610 		list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
3611 				 cq_event, struct lpfc_cq_event, list);
3612 		spin_unlock_irq(&phba->hbalock);
3613 		/* Process the asynchronous event */
3614 		switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
3615 		case LPFC_TRAILER_CODE_LINK:
3616 			lpfc_sli4_async_link_evt(phba,
3617 						 &cq_event->cqe.acqe_link);
3618 			break;
3619 		case LPFC_TRAILER_CODE_FCOE:
3620 			lpfc_sli4_async_fcoe_evt(phba,
3621 						 &cq_event->cqe.acqe_fcoe);
3622 			break;
3623 		case LPFC_TRAILER_CODE_DCBX:
3624 			lpfc_sli4_async_dcbx_evt(phba,
3625 						 &cq_event->cqe.acqe_dcbx);
3626 			break;
3627 		case LPFC_TRAILER_CODE_GRP5:
3628 			lpfc_sli4_async_grp5_evt(phba,
3629 						 &cq_event->cqe.acqe_grp5);
3630 			break;
3631 		default:
3632 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3633 					"1804 Invalid asynchrous event code: "
3634 					"x%x\n", bf_get(lpfc_trailer_code,
3635 					&cq_event->cqe.mcqe_cmpl));
3636 			break;
3637 		}
3638 		/* Free the completion event processed to the free pool */
3639 		lpfc_sli4_cq_event_release(phba, cq_event);
3640 	}
3641 }
3642 
3643 /**
3644  * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
3645  * @phba: pointer to lpfc hba data structure.
3646  *
3647  * This routine is invoked by the worker thread to process FCF table
3648  * rediscovery pending completion event.
3649  **/
3650 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
3651 {
3652 	int rc;
3653 
3654 	spin_lock_irq(&phba->hbalock);
3655 	/* Clear FCF rediscovery timeout event */
3656 	phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
3657 	/* Clear driver fast failover FCF record flag */
3658 	phba->fcf.failover_rec.flag = 0;
3659 	/* Set state for FCF fast failover */
3660 	phba->fcf.fcf_flag |= FCF_REDISC_FOV;
3661 	spin_unlock_irq(&phba->hbalock);
3662 
3663 	/* Scan FCF table from the first entry to re-discover SAN */
3664 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3665 			"2777 Start FCF table scan after FCF "
3666 			"rediscovery quiescent period over\n");
3667 	rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
3668 	if (rc)
3669 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3670 				"2747 Issue FCF scan read FCF mailbox "
3671 				"command failed 0x%x\n", rc);
3672 }
3673 
3674 /**
3675  * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3676  * @phba: pointer to lpfc hba data structure.
3677  * @dev_grp: The HBA PCI-Device group number.
3678  *
3679  * This routine is invoked to set up the per HBA PCI-Device group function
3680  * API jump table entries.
3681  *
3682  * Return: 0 if success, otherwise -ENODEV
3683  **/
3684 int
3685 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3686 {
3687 	int rc;
3688 
3689 	/* Set up lpfc PCI-device group */
3690 	phba->pci_dev_grp = dev_grp;
3691 
3692 	/* The LPFC_PCI_DEV_OC uses SLI4 */
3693 	if (dev_grp == LPFC_PCI_DEV_OC)
3694 		phba->sli_rev = LPFC_SLI_REV4;
3695 
3696 	/* Set up device INIT API function jump table */
3697 	rc = lpfc_init_api_table_setup(phba, dev_grp);
3698 	if (rc)
3699 		return -ENODEV;
3700 	/* Set up SCSI API function jump table */
3701 	rc = lpfc_scsi_api_table_setup(phba, dev_grp);
3702 	if (rc)
3703 		return -ENODEV;
3704 	/* Set up SLI API function jump table */
3705 	rc = lpfc_sli_api_table_setup(phba, dev_grp);
3706 	if (rc)
3707 		return -ENODEV;
3708 	/* Set up MBOX API function jump table */
3709 	rc = lpfc_mbox_api_table_setup(phba, dev_grp);
3710 	if (rc)
3711 		return -ENODEV;
3712 
3713 	return 0;
3714 }
3715 
3716 /**
3717  * lpfc_log_intr_mode - Log the active interrupt mode
3718  * @phba: pointer to lpfc hba data structure.
3719  * @intr_mode: active interrupt mode adopted.
3720  *
3721  * This routine it invoked to log the currently used active interrupt mode
3722  * to the device.
3723  **/
3724 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3725 {
3726 	switch (intr_mode) {
3727 	case 0:
3728 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3729 				"0470 Enable INTx interrupt mode.\n");
3730 		break;
3731 	case 1:
3732 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3733 				"0481 Enabled MSI interrupt mode.\n");
3734 		break;
3735 	case 2:
3736 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3737 				"0480 Enabled MSI-X interrupt mode.\n");
3738 		break;
3739 	default:
3740 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3741 				"0482 Illegal interrupt mode.\n");
3742 		break;
3743 	}
3744 	return;
3745 }
3746 
3747 /**
3748  * lpfc_enable_pci_dev - Enable a generic PCI device.
3749  * @phba: pointer to lpfc hba data structure.
3750  *
3751  * This routine is invoked to enable the PCI device that is common to all
3752  * PCI devices.
3753  *
3754  * Return codes
3755  * 	0 - successful
3756  * 	other values - error
3757  **/
3758 static int
3759 lpfc_enable_pci_dev(struct lpfc_hba *phba)
3760 {
3761 	struct pci_dev *pdev;
3762 	int bars;
3763 
3764 	/* Obtain PCI device reference */
3765 	if (!phba->pcidev)
3766 		goto out_error;
3767 	else
3768 		pdev = phba->pcidev;
3769 	/* Select PCI BARs */
3770 	bars = pci_select_bars(pdev, IORESOURCE_MEM);
3771 	/* Enable PCI device */
3772 	if (pci_enable_device_mem(pdev))
3773 		goto out_error;
3774 	/* Request PCI resource for the device */
3775 	if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
3776 		goto out_disable_device;
3777 	/* Set up device as PCI master and save state for EEH */
3778 	pci_set_master(pdev);
3779 	pci_try_set_mwi(pdev);
3780 	pci_save_state(pdev);
3781 
3782 	return 0;
3783 
3784 out_disable_device:
3785 	pci_disable_device(pdev);
3786 out_error:
3787 	return -ENODEV;
3788 }
3789 
3790 /**
3791  * lpfc_disable_pci_dev - Disable a generic PCI device.
3792  * @phba: pointer to lpfc hba data structure.
3793  *
3794  * This routine is invoked to disable the PCI device that is common to all
3795  * PCI devices.
3796  **/
3797 static void
3798 lpfc_disable_pci_dev(struct lpfc_hba *phba)
3799 {
3800 	struct pci_dev *pdev;
3801 	int bars;
3802 
3803 	/* Obtain PCI device reference */
3804 	if (!phba->pcidev)
3805 		return;
3806 	else
3807 		pdev = phba->pcidev;
3808 	/* Select PCI BARs */
3809 	bars = pci_select_bars(pdev, IORESOURCE_MEM);
3810 	/* Release PCI resource and disable PCI device */
3811 	pci_release_selected_regions(pdev, bars);
3812 	pci_disable_device(pdev);
3813 	/* Null out PCI private reference to driver */
3814 	pci_set_drvdata(pdev, NULL);
3815 
3816 	return;
3817 }
3818 
3819 /**
3820  * lpfc_reset_hba - Reset a hba
3821  * @phba: pointer to lpfc hba data structure.
3822  *
3823  * This routine is invoked to reset a hba device. It brings the HBA
3824  * offline, performs a board restart, and then brings the board back
3825  * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
3826  * on outstanding mailbox commands.
3827  **/
3828 void
3829 lpfc_reset_hba(struct lpfc_hba *phba)
3830 {
3831 	/* If resets are disabled then set error state and return. */
3832 	if (!phba->cfg_enable_hba_reset) {
3833 		phba->link_state = LPFC_HBA_ERROR;
3834 		return;
3835 	}
3836 	lpfc_offline_prep(phba);
3837 	lpfc_offline(phba);
3838 	lpfc_sli_brdrestart(phba);
3839 	lpfc_online(phba);
3840 	lpfc_unblock_mgmt_io(phba);
3841 }
3842 
3843 /**
3844  * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
3845  * @phba: pointer to lpfc hba data structure.
3846  *
3847  * This routine is invoked to set up the driver internal resources specific to
3848  * support the SLI-3 HBA device it attached to.
3849  *
3850  * Return codes
3851  * 	0 - successful
3852  * 	other values - error
3853  **/
3854 static int
3855 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
3856 {
3857 	struct lpfc_sli *psli;
3858 
3859 	/*
3860 	 * Initialize timers used by driver
3861 	 */
3862 
3863 	/* Heartbeat timer */
3864 	init_timer(&phba->hb_tmofunc);
3865 	phba->hb_tmofunc.function = lpfc_hb_timeout;
3866 	phba->hb_tmofunc.data = (unsigned long)phba;
3867 
3868 	psli = &phba->sli;
3869 	/* MBOX heartbeat timer */
3870 	init_timer(&psli->mbox_tmo);
3871 	psli->mbox_tmo.function = lpfc_mbox_timeout;
3872 	psli->mbox_tmo.data = (unsigned long) phba;
3873 	/* FCP polling mode timer */
3874 	init_timer(&phba->fcp_poll_timer);
3875 	phba->fcp_poll_timer.function = lpfc_poll_timeout;
3876 	phba->fcp_poll_timer.data = (unsigned long) phba;
3877 	/* Fabric block timer */
3878 	init_timer(&phba->fabric_block_timer);
3879 	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3880 	phba->fabric_block_timer.data = (unsigned long) phba;
3881 	/* EA polling mode timer */
3882 	init_timer(&phba->eratt_poll);
3883 	phba->eratt_poll.function = lpfc_poll_eratt;
3884 	phba->eratt_poll.data = (unsigned long) phba;
3885 
3886 	/* Host attention work mask setup */
3887 	phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
3888 	phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
3889 
3890 	/* Get all the module params for configuring this host */
3891 	lpfc_get_cfgparam(phba);
3892 	if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
3893 		phba->menlo_flag |= HBA_MENLO_SUPPORT;
3894 		/* check for menlo minimum sg count */
3895 		if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
3896 			phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
3897 	}
3898 
3899 	/*
3900 	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3901 	 * used to create the sg_dma_buf_pool must be dynamically calculated.
3902 	 * 2 segments are added since the IOCB needs a command and response bde.
3903 	 */
3904 	phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
3905 		sizeof(struct fcp_rsp) +
3906 			((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
3907 
3908 	if (phba->cfg_enable_bg) {
3909 		phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
3910 		phba->cfg_sg_dma_buf_size +=
3911 			phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
3912 	}
3913 
3914 	/* Also reinitialize the host templates with new values. */
3915 	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3916 	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3917 
3918 	phba->max_vpi = LPFC_MAX_VPI;
3919 	/* This will be set to correct value after config_port mbox */
3920 	phba->max_vports = 0;
3921 
3922 	/*
3923 	 * Initialize the SLI Layer to run with lpfc HBAs.
3924 	 */
3925 	lpfc_sli_setup(phba);
3926 	lpfc_sli_queue_setup(phba);
3927 
3928 	/* Allocate device driver memory */
3929 	if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
3930 		return -ENOMEM;
3931 
3932 	return 0;
3933 }
3934 
3935 /**
3936  * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
3937  * @phba: pointer to lpfc hba data structure.
3938  *
3939  * This routine is invoked to unset the driver internal resources set up
3940  * specific for supporting the SLI-3 HBA device it attached to.
3941  **/
3942 static void
3943 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
3944 {
3945 	/* Free device driver memory allocated */
3946 	lpfc_mem_free_all(phba);
3947 
3948 	return;
3949 }
3950 
3951 /**
3952  * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
3953  * @phba: pointer to lpfc hba data structure.
3954  *
3955  * This routine is invoked to set up the driver internal resources specific to
3956  * support the SLI-4 HBA device it attached to.
3957  *
3958  * Return codes
3959  * 	0 - successful
3960  * 	other values - error
3961  **/
3962 static int
3963 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3964 {
3965 	struct lpfc_sli *psli;
3966 	LPFC_MBOXQ_t *mboxq;
3967 	int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
3968 	uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
3969 	struct lpfc_mqe *mqe;
3970 	int longs;
3971 
3972 	/* Before proceed, wait for POST done and device ready */
3973 	rc = lpfc_sli4_post_status_check(phba);
3974 	if (rc)
3975 		return -ENODEV;
3976 
3977 	/*
3978 	 * Initialize timers used by driver
3979 	 */
3980 
3981 	/* Heartbeat timer */
3982 	init_timer(&phba->hb_tmofunc);
3983 	phba->hb_tmofunc.function = lpfc_hb_timeout;
3984 	phba->hb_tmofunc.data = (unsigned long)phba;
3985 
3986 	psli = &phba->sli;
3987 	/* MBOX heartbeat timer */
3988 	init_timer(&psli->mbox_tmo);
3989 	psli->mbox_tmo.function = lpfc_mbox_timeout;
3990 	psli->mbox_tmo.data = (unsigned long) phba;
3991 	/* Fabric block timer */
3992 	init_timer(&phba->fabric_block_timer);
3993 	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3994 	phba->fabric_block_timer.data = (unsigned long) phba;
3995 	/* EA polling mode timer */
3996 	init_timer(&phba->eratt_poll);
3997 	phba->eratt_poll.function = lpfc_poll_eratt;
3998 	phba->eratt_poll.data = (unsigned long) phba;
3999 	/* FCF rediscover timer */
4000 	init_timer(&phba->fcf.redisc_wait);
4001 	phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
4002 	phba->fcf.redisc_wait.data = (unsigned long)phba;
4003 
4004 	/*
4005 	 * We need to do a READ_CONFIG mailbox command here before
4006 	 * calling lpfc_get_cfgparam. For VFs this will report the
4007 	 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
4008 	 * All of the resources allocated
4009 	 * for this Port are tied to these values.
4010 	 */
4011 	/* Get all the module params for configuring this host */
4012 	lpfc_get_cfgparam(phba);
4013 	phba->max_vpi = LPFC_MAX_VPI;
4014 	/* This will be set to correct value after the read_config mbox */
4015 	phba->max_vports = 0;
4016 
4017 	/* Program the default value of vlan_id and fc_map */
4018 	phba->valid_vlan = 0;
4019 	phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4020 	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4021 	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4022 
4023 	/*
4024 	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4025 	 * used to create the sg_dma_buf_pool must be dynamically calculated.
4026 	 * 2 segments are added since the IOCB needs a command and response bde.
4027 	 * To insure that the scsi sgl does not cross a 4k page boundary only
4028 	 * sgl sizes of must be a power of 2.
4029 	 */
4030 	buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
4031 		    ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
4032 	/* Feature Level 1 hardware is limited to 2 pages */
4033 	if ((bf_get(lpfc_sli_intf_featurelevel1, &phba->sli4_hba.sli_intf) ==
4034 	     LPFC_SLI_INTF_FEATURELEVEL1_1))
4035 		max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
4036 	else
4037 		max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
4038 	for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
4039 	     dma_buf_size < max_buf_size && buf_size > dma_buf_size;
4040 	     dma_buf_size = dma_buf_size << 1)
4041 		;
4042 	if (dma_buf_size == max_buf_size)
4043 		phba->cfg_sg_seg_cnt = (dma_buf_size -
4044 			sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
4045 			(2 * sizeof(struct sli4_sge))) /
4046 				sizeof(struct sli4_sge);
4047 	phba->cfg_sg_dma_buf_size = dma_buf_size;
4048 
4049 	/* Initialize buffer queue management fields */
4050 	hbq_count = lpfc_sli_hbq_count();
4051 	for (i = 0; i < hbq_count; ++i)
4052 		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4053 	INIT_LIST_HEAD(&phba->rb_pend_list);
4054 	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
4055 	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
4056 
4057 	/*
4058 	 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
4059 	 */
4060 	/* Initialize the Abort scsi buffer list used by driver */
4061 	spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
4062 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
4063 	/* This abort list used by worker thread */
4064 	spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
4065 
4066 	/*
4067 	 * Initialize dirver internal slow-path work queues
4068 	 */
4069 
4070 	/* Driver internel slow-path CQ Event pool */
4071 	INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
4072 	/* Response IOCB work queue list */
4073 	INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
4074 	/* Asynchronous event CQ Event work queue list */
4075 	INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
4076 	/* Fast-path XRI aborted CQ Event work queue list */
4077 	INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
4078 	/* Slow-path XRI aborted CQ Event work queue list */
4079 	INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
4080 	/* Receive queue CQ Event work queue list */
4081 	INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
4082 
4083 	/* Initialize the driver internal SLI layer lists. */
4084 	lpfc_sli_setup(phba);
4085 	lpfc_sli_queue_setup(phba);
4086 
4087 	/* Allocate device driver memory */
4088 	rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
4089 	if (rc)
4090 		return -ENOMEM;
4091 
4092 	/* Create the bootstrap mailbox command */
4093 	rc = lpfc_create_bootstrap_mbox(phba);
4094 	if (unlikely(rc))
4095 		goto out_free_mem;
4096 
4097 	/* Set up the host's endian order with the device. */
4098 	rc = lpfc_setup_endian_order(phba);
4099 	if (unlikely(rc))
4100 		goto out_free_bsmbx;
4101 
4102 	rc = lpfc_sli4_fw_cfg_check(phba);
4103 	if (unlikely(rc))
4104 		goto out_free_bsmbx;
4105 
4106 	/* Set up the hba's configuration parameters. */
4107 	rc = lpfc_sli4_read_config(phba);
4108 	if (unlikely(rc))
4109 		goto out_free_bsmbx;
4110 
4111 	/* Perform a function reset */
4112 	rc = lpfc_pci_function_reset(phba);
4113 	if (unlikely(rc))
4114 		goto out_free_bsmbx;
4115 
4116 	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4117 						       GFP_KERNEL);
4118 	if (!mboxq) {
4119 		rc = -ENOMEM;
4120 		goto out_free_bsmbx;
4121 	}
4122 
4123 	/* Get the Supported Pages. It is always available. */
4124 	lpfc_supported_pages(mboxq);
4125 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4126 	if (unlikely(rc)) {
4127 		rc = -EIO;
4128 		mempool_free(mboxq, phba->mbox_mem_pool);
4129 		goto out_free_bsmbx;
4130 	}
4131 
4132 	mqe = &mboxq->u.mqe;
4133 	memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
4134 	       LPFC_MAX_SUPPORTED_PAGES);
4135 	for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
4136 		switch (pn_page[i]) {
4137 		case LPFC_SLI4_PARAMETERS:
4138 			phba->sli4_hba.pc_sli4_params.supported = 1;
4139 			break;
4140 		default:
4141 			break;
4142 		}
4143 	}
4144 
4145 	/* Read the port's SLI4 Parameters capabilities if supported. */
4146 	if (phba->sli4_hba.pc_sli4_params.supported)
4147 		rc = lpfc_pc_sli4_params_get(phba, mboxq);
4148 	mempool_free(mboxq, phba->mbox_mem_pool);
4149 	if (rc) {
4150 		rc = -EIO;
4151 		goto out_free_bsmbx;
4152 	}
4153 	/* Create all the SLI4 queues */
4154 	rc = lpfc_sli4_queue_create(phba);
4155 	if (rc)
4156 		goto out_free_bsmbx;
4157 
4158 	/* Create driver internal CQE event pool */
4159 	rc = lpfc_sli4_cq_event_pool_create(phba);
4160 	if (rc)
4161 		goto out_destroy_queue;
4162 
4163 	/* Initialize and populate the iocb list per host */
4164 	rc = lpfc_init_sgl_list(phba);
4165 	if (rc) {
4166 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4167 				"1400 Failed to initialize sgl list.\n");
4168 		goto out_destroy_cq_event_pool;
4169 	}
4170 	rc = lpfc_init_active_sgl_array(phba);
4171 	if (rc) {
4172 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4173 				"1430 Failed to initialize sgl list.\n");
4174 		goto out_free_sgl_list;
4175 	}
4176 
4177 	rc = lpfc_sli4_init_rpi_hdrs(phba);
4178 	if (rc) {
4179 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4180 				"1432 Failed to initialize rpi headers.\n");
4181 		goto out_free_active_sgl;
4182 	}
4183 
4184 	/* Allocate eligible FCF bmask memory for FCF round robin failover */
4185 	longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
4186 	phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
4187 					 GFP_KERNEL);
4188 	if (!phba->fcf.fcf_rr_bmask) {
4189 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4190 				"2759 Failed allocate memory for FCF round "
4191 				"robin failover bmask\n");
4192 		goto out_remove_rpi_hdrs;
4193 	}
4194 
4195 	phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
4196 				    phba->cfg_fcp_eq_count), GFP_KERNEL);
4197 	if (!phba->sli4_hba.fcp_eq_hdl) {
4198 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4199 				"2572 Failed allocate memory for fast-path "
4200 				"per-EQ handle array\n");
4201 		goto out_free_fcf_rr_bmask;
4202 	}
4203 
4204 	phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
4205 				      phba->sli4_hba.cfg_eqn), GFP_KERNEL);
4206 	if (!phba->sli4_hba.msix_entries) {
4207 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4208 				"2573 Failed allocate memory for msi-x "
4209 				"interrupt vector entries\n");
4210 		goto out_free_fcp_eq_hdl;
4211 	}
4212 
4213 	return rc;
4214 
4215 out_free_fcp_eq_hdl:
4216 	kfree(phba->sli4_hba.fcp_eq_hdl);
4217 out_free_fcf_rr_bmask:
4218 	kfree(phba->fcf.fcf_rr_bmask);
4219 out_remove_rpi_hdrs:
4220 	lpfc_sli4_remove_rpi_hdrs(phba);
4221 out_free_active_sgl:
4222 	lpfc_free_active_sgl(phba);
4223 out_free_sgl_list:
4224 	lpfc_free_sgl_list(phba);
4225 out_destroy_cq_event_pool:
4226 	lpfc_sli4_cq_event_pool_destroy(phba);
4227 out_destroy_queue:
4228 	lpfc_sli4_queue_destroy(phba);
4229 out_free_bsmbx:
4230 	lpfc_destroy_bootstrap_mbox(phba);
4231 out_free_mem:
4232 	lpfc_mem_free(phba);
4233 	return rc;
4234 }
4235 
4236 /**
4237  * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
4238  * @phba: pointer to lpfc hba data structure.
4239  *
4240  * This routine is invoked to unset the driver internal resources set up
4241  * specific for supporting the SLI-4 HBA device it attached to.
4242  **/
4243 static void
4244 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4245 {
4246 	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
4247 
4248 	/* unregister default FCFI from the HBA */
4249 	lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
4250 
4251 	/* Free the default FCR table */
4252 	lpfc_sli_remove_dflt_fcf(phba);
4253 
4254 	/* Free memory allocated for msi-x interrupt vector entries */
4255 	kfree(phba->sli4_hba.msix_entries);
4256 
4257 	/* Free memory allocated for fast-path work queue handles */
4258 	kfree(phba->sli4_hba.fcp_eq_hdl);
4259 
4260 	/* Free the allocated rpi headers. */
4261 	lpfc_sli4_remove_rpi_hdrs(phba);
4262 	lpfc_sli4_remove_rpis(phba);
4263 
4264 	/* Free eligible FCF index bmask */
4265 	kfree(phba->fcf.fcf_rr_bmask);
4266 
4267 	/* Free the ELS sgl list */
4268 	lpfc_free_active_sgl(phba);
4269 	lpfc_free_sgl_list(phba);
4270 
4271 	/* Free the SCSI sgl management array */
4272 	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4273 
4274 	/* Free the SLI4 queues */
4275 	lpfc_sli4_queue_destroy(phba);
4276 
4277 	/* Free the completion queue EQ event pool */
4278 	lpfc_sli4_cq_event_release_all(phba);
4279 	lpfc_sli4_cq_event_pool_destroy(phba);
4280 
4281 	/* Reset SLI4 HBA FCoE function */
4282 	lpfc_pci_function_reset(phba);
4283 
4284 	/* Free the bsmbx region. */
4285 	lpfc_destroy_bootstrap_mbox(phba);
4286 
4287 	/* Free the SLI Layer memory with SLI4 HBAs */
4288 	lpfc_mem_free_all(phba);
4289 
4290 	/* Free the current connect table */
4291 	list_for_each_entry_safe(conn_entry, next_conn_entry,
4292 		&phba->fcf_conn_rec_list, list) {
4293 		list_del_init(&conn_entry->list);
4294 		kfree(conn_entry);
4295 	}
4296 
4297 	return;
4298 }
4299 
4300 /**
4301  * lpfc_init_api_table_setup - Set up init api fucntion jump table
4302  * @phba: The hba struct for which this call is being executed.
4303  * @dev_grp: The HBA PCI-Device group number.
4304  *
4305  * This routine sets up the device INIT interface API function jump table
4306  * in @phba struct.
4307  *
4308  * Returns: 0 - success, -ENODEV - failure.
4309  **/
4310 int
4311 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4312 {
4313 	phba->lpfc_hba_init_link = lpfc_hba_init_link;
4314 	phba->lpfc_hba_down_link = lpfc_hba_down_link;
4315 	switch (dev_grp) {
4316 	case LPFC_PCI_DEV_LP:
4317 		phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
4318 		phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
4319 		phba->lpfc_stop_port = lpfc_stop_port_s3;
4320 		break;
4321 	case LPFC_PCI_DEV_OC:
4322 		phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
4323 		phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
4324 		phba->lpfc_stop_port = lpfc_stop_port_s4;
4325 		break;
4326 	default:
4327 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4328 				"1431 Invalid HBA PCI-device group: 0x%x\n",
4329 				dev_grp);
4330 		return -ENODEV;
4331 		break;
4332 	}
4333 	return 0;
4334 }
4335 
4336 /**
4337  * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
4338  * @phba: pointer to lpfc hba data structure.
4339  *
4340  * This routine is invoked to set up the driver internal resources before the
4341  * device specific resource setup to support the HBA device it attached to.
4342  *
4343  * Return codes
4344  *	0 - successful
4345  *	other values - error
4346  **/
4347 static int
4348 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
4349 {
4350 	/*
4351 	 * Driver resources common to all SLI revisions
4352 	 */
4353 	atomic_set(&phba->fast_event_count, 0);
4354 	spin_lock_init(&phba->hbalock);
4355 
4356 	/* Initialize ndlp management spinlock */
4357 	spin_lock_init(&phba->ndlp_lock);
4358 
4359 	INIT_LIST_HEAD(&phba->port_list);
4360 	INIT_LIST_HEAD(&phba->work_list);
4361 	init_waitqueue_head(&phba->wait_4_mlo_m_q);
4362 
4363 	/* Initialize the wait queue head for the kernel thread */
4364 	init_waitqueue_head(&phba->work_waitq);
4365 
4366 	/* Initialize the scsi buffer list used by driver for scsi IO */
4367 	spin_lock_init(&phba->scsi_buf_list_lock);
4368 	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
4369 
4370 	/* Initialize the fabric iocb list */
4371 	INIT_LIST_HEAD(&phba->fabric_iocb_list);
4372 
4373 	/* Initialize list to save ELS buffers */
4374 	INIT_LIST_HEAD(&phba->elsbuf);
4375 
4376 	/* Initialize FCF connection rec list */
4377 	INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
4378 
4379 	return 0;
4380 }
4381 
4382 /**
4383  * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
4384  * @phba: pointer to lpfc hba data structure.
4385  *
4386  * This routine is invoked to set up the driver internal resources after the
4387  * device specific resource setup to support the HBA device it attached to.
4388  *
4389  * Return codes
4390  * 	0 - successful
4391  * 	other values - error
4392  **/
4393 static int
4394 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
4395 {
4396 	int error;
4397 
4398 	/* Startup the kernel thread for this host adapter. */
4399 	phba->worker_thread = kthread_run(lpfc_do_work, phba,
4400 					  "lpfc_worker_%d", phba->brd_no);
4401 	if (IS_ERR(phba->worker_thread)) {
4402 		error = PTR_ERR(phba->worker_thread);
4403 		return error;
4404 	}
4405 
4406 	return 0;
4407 }
4408 
4409 /**
4410  * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
4411  * @phba: pointer to lpfc hba data structure.
4412  *
4413  * This routine is invoked to unset the driver internal resources set up after
4414  * the device specific resource setup for supporting the HBA device it
4415  * attached to.
4416  **/
4417 static void
4418 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
4419 {
4420 	/* Stop kernel worker thread */
4421 	kthread_stop(phba->worker_thread);
4422 }
4423 
4424 /**
4425  * lpfc_free_iocb_list - Free iocb list.
4426  * @phba: pointer to lpfc hba data structure.
4427  *
4428  * This routine is invoked to free the driver's IOCB list and memory.
4429  **/
4430 static void
4431 lpfc_free_iocb_list(struct lpfc_hba *phba)
4432 {
4433 	struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
4434 
4435 	spin_lock_irq(&phba->hbalock);
4436 	list_for_each_entry_safe(iocbq_entry, iocbq_next,
4437 				 &phba->lpfc_iocb_list, list) {
4438 		list_del(&iocbq_entry->list);
4439 		kfree(iocbq_entry);
4440 		phba->total_iocbq_bufs--;
4441 	}
4442 	spin_unlock_irq(&phba->hbalock);
4443 
4444 	return;
4445 }
4446 
4447 /**
4448  * lpfc_init_iocb_list - Allocate and initialize iocb list.
4449  * @phba: pointer to lpfc hba data structure.
4450  *
4451  * This routine is invoked to allocate and initizlize the driver's IOCB
4452  * list and set up the IOCB tag array accordingly.
4453  *
4454  * Return codes
4455  *	0 - successful
4456  *	other values - error
4457  **/
4458 static int
4459 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
4460 {
4461 	struct lpfc_iocbq *iocbq_entry = NULL;
4462 	uint16_t iotag;
4463 	int i;
4464 
4465 	/* Initialize and populate the iocb list per host.  */
4466 	INIT_LIST_HEAD(&phba->lpfc_iocb_list);
4467 	for (i = 0; i < iocb_count; i++) {
4468 		iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
4469 		if (iocbq_entry == NULL) {
4470 			printk(KERN_ERR "%s: only allocated %d iocbs of "
4471 				"expected %d count. Unloading driver.\n",
4472 				__func__, i, LPFC_IOCB_LIST_CNT);
4473 			goto out_free_iocbq;
4474 		}
4475 
4476 		iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
4477 		if (iotag == 0) {
4478 			kfree(iocbq_entry);
4479 			printk(KERN_ERR "%s: failed to allocate IOTAG. "
4480 				"Unloading driver.\n", __func__);
4481 			goto out_free_iocbq;
4482 		}
4483 		iocbq_entry->sli4_xritag = NO_XRI;
4484 
4485 		spin_lock_irq(&phba->hbalock);
4486 		list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
4487 		phba->total_iocbq_bufs++;
4488 		spin_unlock_irq(&phba->hbalock);
4489 	}
4490 
4491 	return 0;
4492 
4493 out_free_iocbq:
4494 	lpfc_free_iocb_list(phba);
4495 
4496 	return -ENOMEM;
4497 }
4498 
4499 /**
4500  * lpfc_free_sgl_list - Free sgl list.
4501  * @phba: pointer to lpfc hba data structure.
4502  *
4503  * This routine is invoked to free the driver's sgl list and memory.
4504  **/
4505 static void
4506 lpfc_free_sgl_list(struct lpfc_hba *phba)
4507 {
4508 	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
4509 	LIST_HEAD(sglq_list);
4510 	int rc = 0;
4511 
4512 	spin_lock_irq(&phba->hbalock);
4513 	list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
4514 	spin_unlock_irq(&phba->hbalock);
4515 
4516 	list_for_each_entry_safe(sglq_entry, sglq_next,
4517 				 &sglq_list, list) {
4518 		list_del(&sglq_entry->list);
4519 		lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
4520 		kfree(sglq_entry);
4521 		phba->sli4_hba.total_sglq_bufs--;
4522 	}
4523 	rc = lpfc_sli4_remove_all_sgl_pages(phba);
4524 	if (rc) {
4525 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4526 			"2005 Unable to deregister pages from HBA: %x\n", rc);
4527 	}
4528 	kfree(phba->sli4_hba.lpfc_els_sgl_array);
4529 }
4530 
4531 /**
4532  * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
4533  * @phba: pointer to lpfc hba data structure.
4534  *
4535  * This routine is invoked to allocate the driver's active sgl memory.
4536  * This array will hold the sglq_entry's for active IOs.
4537  **/
4538 static int
4539 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
4540 {
4541 	int size;
4542 	size = sizeof(struct lpfc_sglq *);
4543 	size *= phba->sli4_hba.max_cfg_param.max_xri;
4544 
4545 	phba->sli4_hba.lpfc_sglq_active_list =
4546 		kzalloc(size, GFP_KERNEL);
4547 	if (!phba->sli4_hba.lpfc_sglq_active_list)
4548 		return -ENOMEM;
4549 	return 0;
4550 }
4551 
4552 /**
4553  * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
4554  * @phba: pointer to lpfc hba data structure.
4555  *
4556  * This routine is invoked to walk through the array of active sglq entries
4557  * and free all of the resources.
4558  * This is just a place holder for now.
4559  **/
4560 static void
4561 lpfc_free_active_sgl(struct lpfc_hba *phba)
4562 {
4563 	kfree(phba->sli4_hba.lpfc_sglq_active_list);
4564 }
4565 
4566 /**
4567  * lpfc_init_sgl_list - Allocate and initialize sgl list.
4568  * @phba: pointer to lpfc hba data structure.
4569  *
4570  * This routine is invoked to allocate and initizlize the driver's sgl
4571  * list and set up the sgl xritag tag array accordingly.
4572  *
4573  * Return codes
4574  *	0 - successful
4575  *	other values - error
4576  **/
4577 static int
4578 lpfc_init_sgl_list(struct lpfc_hba *phba)
4579 {
4580 	struct lpfc_sglq *sglq_entry = NULL;
4581 	int i;
4582 	int els_xri_cnt;
4583 
4584 	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4585 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4586 				"2400 lpfc_init_sgl_list els %d.\n",
4587 				els_xri_cnt);
4588 	/* Initialize and populate the sglq list per host/VF. */
4589 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
4590 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
4591 
4592 	/* Sanity check on XRI management */
4593 	if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
4594 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4595 				"2562 No room left for SCSI XRI allocation: "
4596 				"max_xri=%d, els_xri=%d\n",
4597 				phba->sli4_hba.max_cfg_param.max_xri,
4598 				els_xri_cnt);
4599 		return -ENOMEM;
4600 	}
4601 
4602 	/* Allocate memory for the ELS XRI management array */
4603 	phba->sli4_hba.lpfc_els_sgl_array =
4604 			kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
4605 			GFP_KERNEL);
4606 
4607 	if (!phba->sli4_hba.lpfc_els_sgl_array) {
4608 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4609 				"2401 Failed to allocate memory for ELS "
4610 				"XRI management array of size %d.\n",
4611 				els_xri_cnt);
4612 		return -ENOMEM;
4613 	}
4614 
4615 	/* Keep the SCSI XRI into the XRI management array */
4616 	phba->sli4_hba.scsi_xri_max =
4617 			phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4618 	phba->sli4_hba.scsi_xri_cnt = 0;
4619 
4620 	phba->sli4_hba.lpfc_scsi_psb_array =
4621 			kzalloc((sizeof(struct lpfc_scsi_buf *) *
4622 			phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
4623 
4624 	if (!phba->sli4_hba.lpfc_scsi_psb_array) {
4625 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4626 				"2563 Failed to allocate memory for SCSI "
4627 				"XRI management array of size %d.\n",
4628 				phba->sli4_hba.scsi_xri_max);
4629 		kfree(phba->sli4_hba.lpfc_els_sgl_array);
4630 		return -ENOMEM;
4631 	}
4632 
4633 	for (i = 0; i < els_xri_cnt; i++) {
4634 		sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
4635 		if (sglq_entry == NULL) {
4636 			printk(KERN_ERR "%s: only allocated %d sgls of "
4637 				"expected %d count. Unloading driver.\n",
4638 				__func__, i, els_xri_cnt);
4639 			goto out_free_mem;
4640 		}
4641 
4642 		sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
4643 		if (sglq_entry->sli4_xritag == NO_XRI) {
4644 			kfree(sglq_entry);
4645 			printk(KERN_ERR "%s: failed to allocate XRI.\n"
4646 				"Unloading driver.\n", __func__);
4647 			goto out_free_mem;
4648 		}
4649 		sglq_entry->buff_type = GEN_BUFF_TYPE;
4650 		sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
4651 		if (sglq_entry->virt == NULL) {
4652 			kfree(sglq_entry);
4653 			printk(KERN_ERR "%s: failed to allocate mbuf.\n"
4654 				"Unloading driver.\n", __func__);
4655 			goto out_free_mem;
4656 		}
4657 		sglq_entry->sgl = sglq_entry->virt;
4658 		memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
4659 
4660 		/* The list order is used by later block SGL registraton */
4661 		spin_lock_irq(&phba->hbalock);
4662 		sglq_entry->state = SGL_FREED;
4663 		list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
4664 		phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
4665 		phba->sli4_hba.total_sglq_bufs++;
4666 		spin_unlock_irq(&phba->hbalock);
4667 	}
4668 	return 0;
4669 
4670 out_free_mem:
4671 	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4672 	lpfc_free_sgl_list(phba);
4673 	return -ENOMEM;
4674 }
4675 
4676 /**
4677  * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
4678  * @phba: pointer to lpfc hba data structure.
4679  *
4680  * This routine is invoked to post rpi header templates to the
4681  * HBA consistent with the SLI-4 interface spec.  This routine
4682  * posts a PAGE_SIZE memory region to the port to hold up to
4683  * PAGE_SIZE modulo 64 rpi context headers.
4684  * No locks are held here because this is an initialization routine
4685  * called only from probe or lpfc_online when interrupts are not
4686  * enabled and the driver is reinitializing the device.
4687  *
4688  * Return codes
4689  * 	0 - successful
4690  * 	ENOMEM - No availble memory
4691  *      EIO - The mailbox failed to complete successfully.
4692  **/
4693 int
4694 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
4695 {
4696 	int rc = 0;
4697 	int longs;
4698 	uint16_t rpi_count;
4699 	struct lpfc_rpi_hdr *rpi_hdr;
4700 
4701 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
4702 
4703 	/*
4704 	 * Provision an rpi bitmask range for discovery. The total count
4705 	 * is the difference between max and base + 1.
4706 	 */
4707 	rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
4708 		    phba->sli4_hba.max_cfg_param.max_rpi - 1;
4709 
4710 	longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
4711 	phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
4712 					   GFP_KERNEL);
4713 	if (!phba->sli4_hba.rpi_bmask)
4714 		return -ENOMEM;
4715 
4716 	rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
4717 	if (!rpi_hdr) {
4718 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4719 				"0391 Error during rpi post operation\n");
4720 		lpfc_sli4_remove_rpis(phba);
4721 		rc = -ENODEV;
4722 	}
4723 
4724 	return rc;
4725 }
4726 
4727 /**
4728  * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
4729  * @phba: pointer to lpfc hba data structure.
4730  *
4731  * This routine is invoked to allocate a single 4KB memory region to
4732  * support rpis and stores them in the phba.  This single region
4733  * provides support for up to 64 rpis.  The region is used globally
4734  * by the device.
4735  *
4736  * Returns:
4737  *   A valid rpi hdr on success.
4738  *   A NULL pointer on any failure.
4739  **/
4740 struct lpfc_rpi_hdr *
4741 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4742 {
4743 	uint16_t rpi_limit, curr_rpi_range;
4744 	struct lpfc_dmabuf *dmabuf;
4745 	struct lpfc_rpi_hdr *rpi_hdr;
4746 
4747 	rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
4748 		    phba->sli4_hba.max_cfg_param.max_rpi - 1;
4749 
4750 	spin_lock_irq(&phba->hbalock);
4751 	curr_rpi_range = phba->sli4_hba.next_rpi;
4752 	spin_unlock_irq(&phba->hbalock);
4753 
4754 	/*
4755 	 * The port has a limited number of rpis. The increment here
4756 	 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
4757 	 * and to allow the full max_rpi range per port.
4758 	 */
4759 	if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
4760 		return NULL;
4761 
4762 	/*
4763 	 * First allocate the protocol header region for the port.  The
4764 	 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
4765 	 */
4766 	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4767 	if (!dmabuf)
4768 		return NULL;
4769 
4770 	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4771 					  LPFC_HDR_TEMPLATE_SIZE,
4772 					  &dmabuf->phys,
4773 					  GFP_KERNEL);
4774 	if (!dmabuf->virt) {
4775 		rpi_hdr = NULL;
4776 		goto err_free_dmabuf;
4777 	}
4778 
4779 	memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
4780 	if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
4781 		rpi_hdr = NULL;
4782 		goto err_free_coherent;
4783 	}
4784 
4785 	/* Save the rpi header data for cleanup later. */
4786 	rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
4787 	if (!rpi_hdr)
4788 		goto err_free_coherent;
4789 
4790 	rpi_hdr->dmabuf = dmabuf;
4791 	rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
4792 	rpi_hdr->page_count = 1;
4793 	spin_lock_irq(&phba->hbalock);
4794 	rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
4795 	list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
4796 
4797 	/*
4798 	 * The next_rpi stores the next module-64 rpi value to post
4799 	 * in any subsequent rpi memory region postings.
4800 	 */
4801 	phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT;
4802 	spin_unlock_irq(&phba->hbalock);
4803 	return rpi_hdr;
4804 
4805  err_free_coherent:
4806 	dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
4807 			  dmabuf->virt, dmabuf->phys);
4808  err_free_dmabuf:
4809 	kfree(dmabuf);
4810 	return NULL;
4811 }
4812 
4813 /**
4814  * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
4815  * @phba: pointer to lpfc hba data structure.
4816  *
4817  * This routine is invoked to remove all memory resources allocated
4818  * to support rpis. This routine presumes the caller has released all
4819  * rpis consumed by fabric or port logins and is prepared to have
4820  * the header pages removed.
4821  **/
4822 void
4823 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4824 {
4825 	struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
4826 
4827 	list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
4828 				 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
4829 		list_del(&rpi_hdr->list);
4830 		dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
4831 				  rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
4832 		kfree(rpi_hdr->dmabuf);
4833 		kfree(rpi_hdr);
4834 	}
4835 
4836 	phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4837 	memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
4838 }
4839 
4840 /**
4841  * lpfc_hba_alloc - Allocate driver hba data structure for a device.
4842  * @pdev: pointer to pci device data structure.
4843  *
4844  * This routine is invoked to allocate the driver hba data structure for an
4845  * HBA device. If the allocation is successful, the phba reference to the
4846  * PCI device data structure is set.
4847  *
4848  * Return codes
4849  *      pointer to @phba - successful
4850  *      NULL - error
4851  **/
4852 static struct lpfc_hba *
4853 lpfc_hba_alloc(struct pci_dev *pdev)
4854 {
4855 	struct lpfc_hba *phba;
4856 
4857 	/* Allocate memory for HBA structure */
4858 	phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
4859 	if (!phba) {
4860 		dev_err(&pdev->dev, "failed to allocate hba struct\n");
4861 		return NULL;
4862 	}
4863 
4864 	/* Set reference to PCI device in HBA structure */
4865 	phba->pcidev = pdev;
4866 
4867 	/* Assign an unused board number */
4868 	phba->brd_no = lpfc_get_instance();
4869 	if (phba->brd_no < 0) {
4870 		kfree(phba);
4871 		return NULL;
4872 	}
4873 
4874 	spin_lock_init(&phba->ct_ev_lock);
4875 	INIT_LIST_HEAD(&phba->ct_ev_waiters);
4876 
4877 	return phba;
4878 }
4879 
4880 /**
4881  * lpfc_hba_free - Free driver hba data structure with a device.
4882  * @phba: pointer to lpfc hba data structure.
4883  *
4884  * This routine is invoked to free the driver hba data structure with an
4885  * HBA device.
4886  **/
4887 static void
4888 lpfc_hba_free(struct lpfc_hba *phba)
4889 {
4890 	/* Release the driver assigned board number */
4891 	idr_remove(&lpfc_hba_index, phba->brd_no);
4892 
4893 	kfree(phba);
4894 	return;
4895 }
4896 
4897 /**
4898  * lpfc_create_shost - Create hba physical port with associated scsi host.
4899  * @phba: pointer to lpfc hba data structure.
4900  *
4901  * This routine is invoked to create HBA physical port and associate a SCSI
4902  * host with it.
4903  *
4904  * Return codes
4905  *      0 - successful
4906  *      other values - error
4907  **/
4908 static int
4909 lpfc_create_shost(struct lpfc_hba *phba)
4910 {
4911 	struct lpfc_vport *vport;
4912 	struct Scsi_Host  *shost;
4913 
4914 	/* Initialize HBA FC structure */
4915 	phba->fc_edtov = FF_DEF_EDTOV;
4916 	phba->fc_ratov = FF_DEF_RATOV;
4917 	phba->fc_altov = FF_DEF_ALTOV;
4918 	phba->fc_arbtov = FF_DEF_ARBTOV;
4919 
4920 	atomic_set(&phba->sdev_cnt, 0);
4921 	vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
4922 	if (!vport)
4923 		return -ENODEV;
4924 
4925 	shost = lpfc_shost_from_vport(vport);
4926 	phba->pport = vport;
4927 	lpfc_debugfs_initialize(vport);
4928 	/* Put reference to SCSI host to driver's device private data */
4929 	pci_set_drvdata(phba->pcidev, shost);
4930 
4931 	return 0;
4932 }
4933 
4934 /**
4935  * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
4936  * @phba: pointer to lpfc hba data structure.
4937  *
4938  * This routine is invoked to destroy HBA physical port and the associated
4939  * SCSI host.
4940  **/
4941 static void
4942 lpfc_destroy_shost(struct lpfc_hba *phba)
4943 {
4944 	struct lpfc_vport *vport = phba->pport;
4945 
4946 	/* Destroy physical port that associated with the SCSI host */
4947 	destroy_port(vport);
4948 
4949 	return;
4950 }
4951 
4952 /**
4953  * lpfc_setup_bg - Setup Block guard structures and debug areas.
4954  * @phba: pointer to lpfc hba data structure.
4955  * @shost: the shost to be used to detect Block guard settings.
4956  *
4957  * This routine sets up the local Block guard protocol settings for @shost.
4958  * This routine also allocates memory for debugging bg buffers.
4959  **/
4960 static void
4961 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4962 {
4963 	int pagecnt = 10;
4964 	if (lpfc_prot_mask && lpfc_prot_guard) {
4965 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4966 				"1478 Registering BlockGuard with the "
4967 				"SCSI layer\n");
4968 		scsi_host_set_prot(shost, lpfc_prot_mask);
4969 		scsi_host_set_guard(shost, lpfc_prot_guard);
4970 	}
4971 	if (!_dump_buf_data) {
4972 		while (pagecnt) {
4973 			spin_lock_init(&_dump_buf_lock);
4974 			_dump_buf_data =
4975 				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
4976 			if (_dump_buf_data) {
4977 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4978 					"9043 BLKGRD: allocated %d pages for "
4979 				       "_dump_buf_data at 0x%p\n",
4980 				       (1 << pagecnt), _dump_buf_data);
4981 				_dump_buf_data_order = pagecnt;
4982 				memset(_dump_buf_data, 0,
4983 				       ((1 << PAGE_SHIFT) << pagecnt));
4984 				break;
4985 			} else
4986 				--pagecnt;
4987 		}
4988 		if (!_dump_buf_data_order)
4989 			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4990 				"9044 BLKGRD: ERROR unable to allocate "
4991 			       "memory for hexdump\n");
4992 	} else
4993 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4994 			"9045 BLKGRD: already allocated _dump_buf_data=0x%p"
4995 		       "\n", _dump_buf_data);
4996 	if (!_dump_buf_dif) {
4997 		while (pagecnt) {
4998 			_dump_buf_dif =
4999 				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
5000 			if (_dump_buf_dif) {
5001 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5002 					"9046 BLKGRD: allocated %d pages for "
5003 				       "_dump_buf_dif at 0x%p\n",
5004 				       (1 << pagecnt), _dump_buf_dif);
5005 				_dump_buf_dif_order = pagecnt;
5006 				memset(_dump_buf_dif, 0,
5007 				       ((1 << PAGE_SHIFT) << pagecnt));
5008 				break;
5009 			} else
5010 				--pagecnt;
5011 		}
5012 		if (!_dump_buf_dif_order)
5013 			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5014 			"9047 BLKGRD: ERROR unable to allocate "
5015 			       "memory for hexdump\n");
5016 	} else
5017 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5018 			"9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
5019 		       _dump_buf_dif);
5020 }
5021 
5022 /**
5023  * lpfc_post_init_setup - Perform necessary device post initialization setup.
5024  * @phba: pointer to lpfc hba data structure.
5025  *
5026  * This routine is invoked to perform all the necessary post initialization
5027  * setup for the device.
5028  **/
5029 static void
5030 lpfc_post_init_setup(struct lpfc_hba *phba)
5031 {
5032 	struct Scsi_Host  *shost;
5033 	struct lpfc_adapter_event_header adapter_event;
5034 
5035 	/* Get the default values for Model Name and Description */
5036 	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
5037 
5038 	/*
5039 	 * hba setup may have changed the hba_queue_depth so we need to
5040 	 * adjust the value of can_queue.
5041 	 */
5042 	shost = pci_get_drvdata(phba->pcidev);
5043 	shost->can_queue = phba->cfg_hba_queue_depth - 10;
5044 	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
5045 		lpfc_setup_bg(phba, shost);
5046 
5047 	lpfc_host_attrib_init(shost);
5048 
5049 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
5050 		spin_lock_irq(shost->host_lock);
5051 		lpfc_poll_start_timer(phba);
5052 		spin_unlock_irq(shost->host_lock);
5053 	}
5054 
5055 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5056 			"0428 Perform SCSI scan\n");
5057 	/* Send board arrival event to upper layer */
5058 	adapter_event.event_type = FC_REG_ADAPTER_EVENT;
5059 	adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
5060 	fc_host_post_vendor_event(shost, fc_get_event_number(),
5061 				  sizeof(adapter_event),
5062 				  (char *) &adapter_event,
5063 				  LPFC_NL_VENDOR_ID);
5064 	return;
5065 }
5066 
5067 /**
5068  * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
5069  * @phba: pointer to lpfc hba data structure.
5070  *
5071  * This routine is invoked to set up the PCI device memory space for device
5072  * with SLI-3 interface spec.
5073  *
5074  * Return codes
5075  * 	0 - successful
5076  * 	other values - error
5077  **/
5078 static int
5079 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
5080 {
5081 	struct pci_dev *pdev;
5082 	unsigned long bar0map_len, bar2map_len;
5083 	int i, hbq_count;
5084 	void *ptr;
5085 	int error = -ENODEV;
5086 
5087 	/* Obtain PCI device reference */
5088 	if (!phba->pcidev)
5089 		return error;
5090 	else
5091 		pdev = phba->pcidev;
5092 
5093 	/* Set the device DMA mask size */
5094 	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
5095 	 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
5096 		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
5097 		 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
5098 			return error;
5099 		}
5100 	}
5101 
5102 	/* Get the bus address of Bar0 and Bar2 and the number of bytes
5103 	 * required by each mapping.
5104 	 */
5105 	phba->pci_bar0_map = pci_resource_start(pdev, 0);
5106 	bar0map_len = pci_resource_len(pdev, 0);
5107 
5108 	phba->pci_bar2_map = pci_resource_start(pdev, 2);
5109 	bar2map_len = pci_resource_len(pdev, 2);
5110 
5111 	/* Map HBA SLIM to a kernel virtual address. */
5112 	phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
5113 	if (!phba->slim_memmap_p) {
5114 		dev_printk(KERN_ERR, &pdev->dev,
5115 			   "ioremap failed for SLIM memory.\n");
5116 		goto out;
5117 	}
5118 
5119 	/* Map HBA Control Registers to a kernel virtual address. */
5120 	phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
5121 	if (!phba->ctrl_regs_memmap_p) {
5122 		dev_printk(KERN_ERR, &pdev->dev,
5123 			   "ioremap failed for HBA control registers.\n");
5124 		goto out_iounmap_slim;
5125 	}
5126 
5127 	/* Allocate memory for SLI-2 structures */
5128 	phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
5129 					       SLI2_SLIM_SIZE,
5130 					       &phba->slim2p.phys,
5131 					       GFP_KERNEL);
5132 	if (!phba->slim2p.virt)
5133 		goto out_iounmap;
5134 
5135 	memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
5136 	phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
5137 	phba->mbox_ext = (phba->slim2p.virt +
5138 		offsetof(struct lpfc_sli2_slim, mbx_ext_words));
5139 	phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
5140 	phba->IOCBs = (phba->slim2p.virt +
5141 		       offsetof(struct lpfc_sli2_slim, IOCBs));
5142 
5143 	phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
5144 						 lpfc_sli_hbq_size(),
5145 						 &phba->hbqslimp.phys,
5146 						 GFP_KERNEL);
5147 	if (!phba->hbqslimp.virt)
5148 		goto out_free_slim;
5149 
5150 	hbq_count = lpfc_sli_hbq_count();
5151 	ptr = phba->hbqslimp.virt;
5152 	for (i = 0; i < hbq_count; ++i) {
5153 		phba->hbqs[i].hbq_virt = ptr;
5154 		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
5155 		ptr += (lpfc_hbq_defs[i]->entry_count *
5156 			sizeof(struct lpfc_hbq_entry));
5157 	}
5158 	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
5159 	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
5160 
5161 	memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
5162 
5163 	INIT_LIST_HEAD(&phba->rb_pend_list);
5164 
5165 	phba->MBslimaddr = phba->slim_memmap_p;
5166 	phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
5167 	phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
5168 	phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
5169 	phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
5170 
5171 	return 0;
5172 
5173 out_free_slim:
5174 	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5175 			  phba->slim2p.virt, phba->slim2p.phys);
5176 out_iounmap:
5177 	iounmap(phba->ctrl_regs_memmap_p);
5178 out_iounmap_slim:
5179 	iounmap(phba->slim_memmap_p);
5180 out:
5181 	return error;
5182 }
5183 
5184 /**
5185  * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
5186  * @phba: pointer to lpfc hba data structure.
5187  *
5188  * This routine is invoked to unset the PCI device memory space for device
5189  * with SLI-3 interface spec.
5190  **/
5191 static void
5192 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
5193 {
5194 	struct pci_dev *pdev;
5195 
5196 	/* Obtain PCI device reference */
5197 	if (!phba->pcidev)
5198 		return;
5199 	else
5200 		pdev = phba->pcidev;
5201 
5202 	/* Free coherent DMA memory allocated */
5203 	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
5204 			  phba->hbqslimp.virt, phba->hbqslimp.phys);
5205 	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5206 			  phba->slim2p.virt, phba->slim2p.phys);
5207 
5208 	/* I/O memory unmap */
5209 	iounmap(phba->ctrl_regs_memmap_p);
5210 	iounmap(phba->slim_memmap_p);
5211 
5212 	return;
5213 }
5214 
5215 /**
5216  * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
5217  * @phba: pointer to lpfc hba data structure.
5218  *
5219  * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
5220  * done and check status.
5221  *
5222  * Return 0 if successful, otherwise -ENODEV.
5223  **/
5224 int
5225 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
5226 {
5227 	struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg;
5228 	int i, port_error = -ENODEV;
5229 
5230 	if (!phba->sli4_hba.STAregaddr)
5231 		return -ENODEV;
5232 
5233 	/* Wait up to 30 seconds for the SLI Port POST done and ready */
5234 	for (i = 0; i < 3000; i++) {
5235 		sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
5236 		/* Encounter fatal POST error, break out */
5237 		if (bf_get(lpfc_hst_state_perr, &sta_reg)) {
5238 			port_error = -ENODEV;
5239 			break;
5240 		}
5241 		if (LPFC_POST_STAGE_ARMFW_READY ==
5242 		    bf_get(lpfc_hst_state_port_status, &sta_reg)) {
5243 			port_error = 0;
5244 			break;
5245 		}
5246 		msleep(10);
5247 	}
5248 
5249 	if (port_error)
5250 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5251 			"1408 Failure HBA POST Status: sta_reg=0x%x, "
5252 			"perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, "
5253 			"dl=x%x, pstatus=x%x\n", sta_reg.word0,
5254 			bf_get(lpfc_hst_state_perr, &sta_reg),
5255 			bf_get(lpfc_hst_state_sfi, &sta_reg),
5256 			bf_get(lpfc_hst_state_nip, &sta_reg),
5257 			bf_get(lpfc_hst_state_ipc, &sta_reg),
5258 			bf_get(lpfc_hst_state_xrom, &sta_reg),
5259 			bf_get(lpfc_hst_state_dl, &sta_reg),
5260 			bf_get(lpfc_hst_state_port_status, &sta_reg));
5261 
5262 	/* Log device information */
5263 	phba->sli4_hba.sli_intf.word0 = readl(phba->sli4_hba.SLIINTFregaddr);
5264 	if (bf_get(lpfc_sli_intf_valid,
5265 		   &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_VALID) {
5266 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5267 				"2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
5268 				"FeatureL1=0x%x, FeatureL2=0x%x\n",
5269 				bf_get(lpfc_sli_intf_sli_family,
5270 				       &phba->sli4_hba.sli_intf),
5271 				bf_get(lpfc_sli_intf_slirev,
5272 				       &phba->sli4_hba.sli_intf),
5273 				bf_get(lpfc_sli_intf_featurelevel1,
5274 				       &phba->sli4_hba.sli_intf),
5275 				bf_get(lpfc_sli_intf_featurelevel2,
5276 				       &phba->sli4_hba.sli_intf));
5277 	}
5278 	phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr);
5279 	phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr);
5280 	/* With uncoverable error, log the error message and return error */
5281 	uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
5282 	uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
5283 	if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
5284 	    (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
5285 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5286 				"1422 HBA Unrecoverable error: "
5287 				"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
5288 				"ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
5289 				uerrlo_reg.word0, uerrhi_reg.word0,
5290 				phba->sli4_hba.ue_mask_lo,
5291 				phba->sli4_hba.ue_mask_hi);
5292 		return -ENODEV;
5293 	}
5294 
5295 	return port_error;
5296 }
5297 
5298 /**
5299  * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
5300  * @phba: pointer to lpfc hba data structure.
5301  *
5302  * This routine is invoked to set up SLI4 BAR0 PCI config space register
5303  * memory map.
5304  **/
5305 static void
5306 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
5307 {
5308 	phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
5309 					LPFC_UERR_STATUS_LO;
5310 	phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
5311 					LPFC_UERR_STATUS_HI;
5312 	phba->sli4_hba.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
5313 					LPFC_UE_MASK_LO;
5314 	phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
5315 					LPFC_UE_MASK_HI;
5316 	phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p +
5317 					LPFC_SLI_INTF;
5318 }
5319 
5320 /**
5321  * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
5322  * @phba: pointer to lpfc hba data structure.
5323  *
5324  * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
5325  * memory map.
5326  **/
5327 static void
5328 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
5329 {
5330 
5331 	phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5332 				    LPFC_HST_STATE;
5333 	phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5334 				    LPFC_HST_ISR0;
5335 	phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5336 				    LPFC_HST_IMR0;
5337 	phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5338 				     LPFC_HST_ISCR0;
5339 	return;
5340 }
5341 
5342 /**
5343  * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
5344  * @phba: pointer to lpfc hba data structure.
5345  * @vf: virtual function number
5346  *
5347  * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
5348  * based on the given viftual function number, @vf.
5349  *
5350  * Return 0 if successful, otherwise -ENODEV.
5351  **/
5352 static int
5353 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
5354 {
5355 	if (vf > LPFC_VIR_FUNC_MAX)
5356 		return -ENODEV;
5357 
5358 	phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5359 				vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
5360 	phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5361 				vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
5362 	phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5363 				vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
5364 	phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5365 				vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
5366 	phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5367 				vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
5368 	return 0;
5369 }
5370 
5371 /**
5372  * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
5373  * @phba: pointer to lpfc hba data structure.
5374  *
5375  * This routine is invoked to create the bootstrap mailbox
5376  * region consistent with the SLI-4 interface spec.  This
5377  * routine allocates all memory necessary to communicate
5378  * mailbox commands to the port and sets up all alignment
5379  * needs.  No locks are expected to be held when calling
5380  * this routine.
5381  *
5382  * Return codes
5383  * 	0 - successful
5384  * 	ENOMEM - could not allocated memory.
5385  **/
5386 static int
5387 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
5388 {
5389 	uint32_t bmbx_size;
5390 	struct lpfc_dmabuf *dmabuf;
5391 	struct dma_address *dma_address;
5392 	uint32_t pa_addr;
5393 	uint64_t phys_addr;
5394 
5395 	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5396 	if (!dmabuf)
5397 		return -ENOMEM;
5398 
5399 	/*
5400 	 * The bootstrap mailbox region is comprised of 2 parts
5401 	 * plus an alignment restriction of 16 bytes.
5402 	 */
5403 	bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
5404 	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5405 					  bmbx_size,
5406 					  &dmabuf->phys,
5407 					  GFP_KERNEL);
5408 	if (!dmabuf->virt) {
5409 		kfree(dmabuf);
5410 		return -ENOMEM;
5411 	}
5412 	memset(dmabuf->virt, 0, bmbx_size);
5413 
5414 	/*
5415 	 * Initialize the bootstrap mailbox pointers now so that the register
5416 	 * operations are simple later.  The mailbox dma address is required
5417 	 * to be 16-byte aligned.  Also align the virtual memory as each
5418 	 * maibox is copied into the bmbx mailbox region before issuing the
5419 	 * command to the port.
5420 	 */
5421 	phba->sli4_hba.bmbx.dmabuf = dmabuf;
5422 	phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
5423 
5424 	phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
5425 					      LPFC_ALIGN_16_BYTE);
5426 	phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
5427 					      LPFC_ALIGN_16_BYTE);
5428 
5429 	/*
5430 	 * Set the high and low physical addresses now.  The SLI4 alignment
5431 	 * requirement is 16 bytes and the mailbox is posted to the port
5432 	 * as two 30-bit addresses.  The other data is a bit marking whether
5433 	 * the 30-bit address is the high or low address.
5434 	 * Upcast bmbx aphys to 64bits so shift instruction compiles
5435 	 * clean on 32 bit machines.
5436 	 */
5437 	dma_address = &phba->sli4_hba.bmbx.dma_address;
5438 	phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
5439 	pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
5440 	dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
5441 					   LPFC_BMBX_BIT1_ADDR_HI);
5442 
5443 	pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
5444 	dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
5445 					   LPFC_BMBX_BIT1_ADDR_LO);
5446 	return 0;
5447 }
5448 
5449 /**
5450  * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
5451  * @phba: pointer to lpfc hba data structure.
5452  *
5453  * This routine is invoked to teardown the bootstrap mailbox
5454  * region and release all host resources. This routine requires
5455  * the caller to ensure all mailbox commands recovered, no
5456  * additional mailbox comands are sent, and interrupts are disabled
5457  * before calling this routine.
5458  *
5459  **/
5460 static void
5461 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
5462 {
5463 	dma_free_coherent(&phba->pcidev->dev,
5464 			  phba->sli4_hba.bmbx.bmbx_size,
5465 			  phba->sli4_hba.bmbx.dmabuf->virt,
5466 			  phba->sli4_hba.bmbx.dmabuf->phys);
5467 
5468 	kfree(phba->sli4_hba.bmbx.dmabuf);
5469 	memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
5470 }
5471 
5472 /**
5473  * lpfc_sli4_read_config - Get the config parameters.
5474  * @phba: pointer to lpfc hba data structure.
5475  *
5476  * This routine is invoked to read the configuration parameters from the HBA.
5477  * The configuration parameters are used to set the base and maximum values
5478  * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
5479  * allocation for the port.
5480  *
5481  * Return codes
5482  * 	0 - successful
5483  * 	ENOMEM - No availble memory
5484  *      EIO - The mailbox failed to complete successfully.
5485  **/
5486 static int
5487 lpfc_sli4_read_config(struct lpfc_hba *phba)
5488 {
5489 	LPFC_MBOXQ_t *pmb;
5490 	struct lpfc_mbx_read_config *rd_config;
5491 	uint32_t rc = 0;
5492 
5493 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5494 	if (!pmb) {
5495 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5496 				"2011 Unable to allocate memory for issuing "
5497 				"SLI_CONFIG_SPECIAL mailbox command\n");
5498 		return -ENOMEM;
5499 	}
5500 
5501 	lpfc_read_config(phba, pmb);
5502 
5503 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5504 	if (rc != MBX_SUCCESS) {
5505 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5506 			"2012 Mailbox failed , mbxCmd x%x "
5507 			"READ_CONFIG, mbxStatus x%x\n",
5508 			bf_get(lpfc_mqe_command, &pmb->u.mqe),
5509 			bf_get(lpfc_mqe_status, &pmb->u.mqe));
5510 		rc = -EIO;
5511 	} else {
5512 		rd_config = &pmb->u.mqe.un.rd_config;
5513 		phba->sli4_hba.max_cfg_param.max_xri =
5514 			bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
5515 		phba->sli4_hba.max_cfg_param.xri_base =
5516 			bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
5517 		phba->sli4_hba.max_cfg_param.max_vpi =
5518 			bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
5519 		phba->sli4_hba.max_cfg_param.vpi_base =
5520 			bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
5521 		phba->sli4_hba.max_cfg_param.max_rpi =
5522 			bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
5523 		phba->sli4_hba.max_cfg_param.rpi_base =
5524 			bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
5525 		phba->sli4_hba.max_cfg_param.max_vfi =
5526 			bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
5527 		phba->sli4_hba.max_cfg_param.vfi_base =
5528 			bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
5529 		phba->sli4_hba.max_cfg_param.max_fcfi =
5530 			bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
5531 		phba->sli4_hba.max_cfg_param.fcfi_base =
5532 			bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
5533 		phba->sli4_hba.max_cfg_param.max_eq =
5534 			bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
5535 		phba->sli4_hba.max_cfg_param.max_rq =
5536 			bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
5537 		phba->sli4_hba.max_cfg_param.max_wq =
5538 			bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
5539 		phba->sli4_hba.max_cfg_param.max_cq =
5540 			bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
5541 		phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
5542 		phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
5543 		phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
5544 		phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
5545 		phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
5546 		phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
5547 				(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
5548 		phba->max_vports = phba->max_vpi;
5549 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5550 				"2003 cfg params XRI(B:%d M:%d), "
5551 				"VPI(B:%d M:%d) "
5552 				"VFI(B:%d M:%d) "
5553 				"RPI(B:%d M:%d) "
5554 				"FCFI(B:%d M:%d)\n",
5555 				phba->sli4_hba.max_cfg_param.xri_base,
5556 				phba->sli4_hba.max_cfg_param.max_xri,
5557 				phba->sli4_hba.max_cfg_param.vpi_base,
5558 				phba->sli4_hba.max_cfg_param.max_vpi,
5559 				phba->sli4_hba.max_cfg_param.vfi_base,
5560 				phba->sli4_hba.max_cfg_param.max_vfi,
5561 				phba->sli4_hba.max_cfg_param.rpi_base,
5562 				phba->sli4_hba.max_cfg_param.max_rpi,
5563 				phba->sli4_hba.max_cfg_param.fcfi_base,
5564 				phba->sli4_hba.max_cfg_param.max_fcfi);
5565 	}
5566 	mempool_free(pmb, phba->mbox_mem_pool);
5567 
5568 	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
5569 	if (phba->cfg_hba_queue_depth >
5570 		(phba->sli4_hba.max_cfg_param.max_xri -
5571 			lpfc_sli4_get_els_iocb_cnt(phba)))
5572 		phba->cfg_hba_queue_depth =
5573 			phba->sli4_hba.max_cfg_param.max_xri -
5574 				lpfc_sli4_get_els_iocb_cnt(phba);
5575 	return rc;
5576 }
5577 
5578 /**
5579  * lpfc_dev_endian_order_setup - Notify the port of the host's endian order.
5580  * @phba: pointer to lpfc hba data structure.
5581  *
5582  * This routine is invoked to setup the host-side endian order to the
5583  * HBA consistent with the SLI-4 interface spec.
5584  *
5585  * Return codes
5586  * 	0 - successful
5587  * 	ENOMEM - No availble memory
5588  *      EIO - The mailbox failed to complete successfully.
5589  **/
5590 static int
5591 lpfc_setup_endian_order(struct lpfc_hba *phba)
5592 {
5593 	LPFC_MBOXQ_t *mboxq;
5594 	uint32_t rc = 0;
5595 	uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
5596 				      HOST_ENDIAN_HIGH_WORD1};
5597 
5598 	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5599 	if (!mboxq) {
5600 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5601 				"0492 Unable to allocate memory for issuing "
5602 				"SLI_CONFIG_SPECIAL mailbox command\n");
5603 		return -ENOMEM;
5604 	}
5605 
5606 	/*
5607 	 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two
5608 	 * words to contain special data values and no other data.
5609 	 */
5610 	memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
5611 	memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
5612 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5613 	if (rc != MBX_SUCCESS) {
5614 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5615 				"0493 SLI_CONFIG_SPECIAL mailbox failed with "
5616 				"status x%x\n",
5617 				rc);
5618 		rc = -EIO;
5619 	}
5620 
5621 	mempool_free(mboxq, phba->mbox_mem_pool);
5622 	return rc;
5623 }
5624 
5625 /**
5626  * lpfc_sli4_queue_create - Create all the SLI4 queues
5627  * @phba: pointer to lpfc hba data structure.
5628  *
5629  * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
5630  * operation. For each SLI4 queue type, the parameters such as queue entry
5631  * count (queue depth) shall be taken from the module parameter. For now,
5632  * we just use some constant number as place holder.
5633  *
5634  * Return codes
5635  *      0 - successful
5636  *      ENOMEM - No availble memory
5637  *      EIO - The mailbox failed to complete successfully.
5638  **/
5639 static int
5640 lpfc_sli4_queue_create(struct lpfc_hba *phba)
5641 {
5642 	struct lpfc_queue *qdesc;
5643 	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
5644 	int cfg_fcp_wq_count;
5645 	int cfg_fcp_eq_count;
5646 
5647 	/*
5648 	 * Sanity check for confiugred queue parameters against the run-time
5649 	 * device parameters
5650 	 */
5651 
5652 	/* Sanity check on FCP fast-path WQ parameters */
5653 	cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
5654 	if (cfg_fcp_wq_count >
5655 	    (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
5656 		cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
5657 				   LPFC_SP_WQN_DEF;
5658 		if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
5659 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5660 					"2581 Not enough WQs (%d) from "
5661 					"the pci function for supporting "
5662 					"FCP WQs (%d)\n",
5663 					phba->sli4_hba.max_cfg_param.max_wq,
5664 					phba->cfg_fcp_wq_count);
5665 			goto out_error;
5666 		}
5667 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5668 				"2582 Not enough WQs (%d) from the pci "
5669 				"function for supporting the requested "
5670 				"FCP WQs (%d), the actual FCP WQs can "
5671 				"be supported: %d\n",
5672 				phba->sli4_hba.max_cfg_param.max_wq,
5673 				phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
5674 	}
5675 	/* The actual number of FCP work queues adopted */
5676 	phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
5677 
5678 	/* Sanity check on FCP fast-path EQ parameters */
5679 	cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
5680 	if (cfg_fcp_eq_count >
5681 	    (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
5682 		cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
5683 				   LPFC_SP_EQN_DEF;
5684 		if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
5685 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5686 					"2574 Not enough EQs (%d) from the "
5687 					"pci function for supporting FCP "
5688 					"EQs (%d)\n",
5689 					phba->sli4_hba.max_cfg_param.max_eq,
5690 					phba->cfg_fcp_eq_count);
5691 			goto out_error;
5692 		}
5693 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5694 				"2575 Not enough EQs (%d) from the pci "
5695 				"function for supporting the requested "
5696 				"FCP EQs (%d), the actual FCP EQs can "
5697 				"be supported: %d\n",
5698 				phba->sli4_hba.max_cfg_param.max_eq,
5699 				phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
5700 	}
5701 	/* It does not make sense to have more EQs than WQs */
5702 	if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
5703 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5704 				"2593 The FCP EQ count(%d) cannot be greater "
5705 				"than the FCP WQ count(%d), limiting the "
5706 				"FCP EQ count to %d\n", cfg_fcp_eq_count,
5707 				phba->cfg_fcp_wq_count,
5708 				phba->cfg_fcp_wq_count);
5709 		cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
5710 	}
5711 	/* The actual number of FCP event queues adopted */
5712 	phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
5713 	/* The overall number of event queues used */
5714 	phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
5715 
5716 	/*
5717 	 * Create Event Queues (EQs)
5718 	 */
5719 
5720 	/* Get EQ depth from module parameter, fake the default for now */
5721 	phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
5722 	phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
5723 
5724 	/* Create slow path event queue */
5725 	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
5726 				      phba->sli4_hba.eq_ecount);
5727 	if (!qdesc) {
5728 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5729 				"0496 Failed allocate slow-path EQ\n");
5730 		goto out_error;
5731 	}
5732 	phba->sli4_hba.sp_eq = qdesc;
5733 
5734 	/* Create fast-path FCP Event Queue(s) */
5735 	phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
5736 			       phba->cfg_fcp_eq_count), GFP_KERNEL);
5737 	if (!phba->sli4_hba.fp_eq) {
5738 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5739 				"2576 Failed allocate memory for fast-path "
5740 				"EQ record array\n");
5741 		goto out_free_sp_eq;
5742 	}
5743 	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5744 		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
5745 					      phba->sli4_hba.eq_ecount);
5746 		if (!qdesc) {
5747 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5748 					"0497 Failed allocate fast-path EQ\n");
5749 			goto out_free_fp_eq;
5750 		}
5751 		phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
5752 	}
5753 
5754 	/*
5755 	 * Create Complete Queues (CQs)
5756 	 */
5757 
5758 	/* Get CQ depth from module parameter, fake the default for now */
5759 	phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
5760 	phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
5761 
5762 	/* Create slow-path Mailbox Command Complete Queue */
5763 	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5764 				      phba->sli4_hba.cq_ecount);
5765 	if (!qdesc) {
5766 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5767 				"0500 Failed allocate slow-path mailbox CQ\n");
5768 		goto out_free_fp_eq;
5769 	}
5770 	phba->sli4_hba.mbx_cq = qdesc;
5771 
5772 	/* Create slow-path ELS Complete Queue */
5773 	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5774 				      phba->sli4_hba.cq_ecount);
5775 	if (!qdesc) {
5776 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5777 				"0501 Failed allocate slow-path ELS CQ\n");
5778 		goto out_free_mbx_cq;
5779 	}
5780 	phba->sli4_hba.els_cq = qdesc;
5781 
5782 
5783 	/* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
5784 	phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
5785 				phba->cfg_fcp_eq_count), GFP_KERNEL);
5786 	if (!phba->sli4_hba.fcp_cq) {
5787 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5788 				"2577 Failed allocate memory for fast-path "
5789 				"CQ record array\n");
5790 		goto out_free_els_cq;
5791 	}
5792 	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5793 		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5794 					      phba->sli4_hba.cq_ecount);
5795 		if (!qdesc) {
5796 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5797 					"0499 Failed allocate fast-path FCP "
5798 					"CQ (%d)\n", fcp_cqidx);
5799 			goto out_free_fcp_cq;
5800 		}
5801 		phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
5802 	}
5803 
5804 	/* Create Mailbox Command Queue */
5805 	phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
5806 	phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
5807 
5808 	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
5809 				      phba->sli4_hba.mq_ecount);
5810 	if (!qdesc) {
5811 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5812 				"0505 Failed allocate slow-path MQ\n");
5813 		goto out_free_fcp_cq;
5814 	}
5815 	phba->sli4_hba.mbx_wq = qdesc;
5816 
5817 	/*
5818 	 * Create all the Work Queues (WQs)
5819 	 */
5820 	phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
5821 	phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
5822 
5823 	/* Create slow-path ELS Work Queue */
5824 	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5825 				      phba->sli4_hba.wq_ecount);
5826 	if (!qdesc) {
5827 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5828 				"0504 Failed allocate slow-path ELS WQ\n");
5829 		goto out_free_mbx_wq;
5830 	}
5831 	phba->sli4_hba.els_wq = qdesc;
5832 
5833 	/* Create fast-path FCP Work Queue(s) */
5834 	phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
5835 				phba->cfg_fcp_wq_count), GFP_KERNEL);
5836 	if (!phba->sli4_hba.fcp_wq) {
5837 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5838 				"2578 Failed allocate memory for fast-path "
5839 				"WQ record array\n");
5840 		goto out_free_els_wq;
5841 	}
5842 	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5843 		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5844 					      phba->sli4_hba.wq_ecount);
5845 		if (!qdesc) {
5846 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5847 					"0503 Failed allocate fast-path FCP "
5848 					"WQ (%d)\n", fcp_wqidx);
5849 			goto out_free_fcp_wq;
5850 		}
5851 		phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
5852 	}
5853 
5854 	/*
5855 	 * Create Receive Queue (RQ)
5856 	 */
5857 	phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
5858 	phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
5859 
5860 	/* Create Receive Queue for header */
5861 	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5862 				      phba->sli4_hba.rq_ecount);
5863 	if (!qdesc) {
5864 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5865 				"0506 Failed allocate receive HRQ\n");
5866 		goto out_free_fcp_wq;
5867 	}
5868 	phba->sli4_hba.hdr_rq = qdesc;
5869 
5870 	/* Create Receive Queue for data */
5871 	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5872 				      phba->sli4_hba.rq_ecount);
5873 	if (!qdesc) {
5874 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5875 				"0507 Failed allocate receive DRQ\n");
5876 		goto out_free_hdr_rq;
5877 	}
5878 	phba->sli4_hba.dat_rq = qdesc;
5879 
5880 	return 0;
5881 
5882 out_free_hdr_rq:
5883 	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5884 	phba->sli4_hba.hdr_rq = NULL;
5885 out_free_fcp_wq:
5886 	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
5887 		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
5888 		phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
5889 	}
5890 	kfree(phba->sli4_hba.fcp_wq);
5891 out_free_els_wq:
5892 	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5893 	phba->sli4_hba.els_wq = NULL;
5894 out_free_mbx_wq:
5895 	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5896 	phba->sli4_hba.mbx_wq = NULL;
5897 out_free_fcp_cq:
5898 	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
5899 		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
5900 		phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
5901 	}
5902 	kfree(phba->sli4_hba.fcp_cq);
5903 out_free_els_cq:
5904 	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5905 	phba->sli4_hba.els_cq = NULL;
5906 out_free_mbx_cq:
5907 	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5908 	phba->sli4_hba.mbx_cq = NULL;
5909 out_free_fp_eq:
5910 	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
5911 		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
5912 		phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
5913 	}
5914 	kfree(phba->sli4_hba.fp_eq);
5915 out_free_sp_eq:
5916 	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5917 	phba->sli4_hba.sp_eq = NULL;
5918 out_error:
5919 	return -ENOMEM;
5920 }
5921 
5922 /**
5923  * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
5924  * @phba: pointer to lpfc hba data structure.
5925  *
5926  * This routine is invoked to release all the SLI4 queues with the FCoE HBA
5927  * operation.
5928  *
5929  * Return codes
5930  *      0 - successful
5931  *      ENOMEM - No availble memory
5932  *      EIO - The mailbox failed to complete successfully.
5933  **/
5934 static void
5935 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5936 {
5937 	int fcp_qidx;
5938 
5939 	/* Release mailbox command work queue */
5940 	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5941 	phba->sli4_hba.mbx_wq = NULL;
5942 
5943 	/* Release ELS work queue */
5944 	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5945 	phba->sli4_hba.els_wq = NULL;
5946 
5947 	/* Release FCP work queue */
5948 	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5949 		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
5950 	kfree(phba->sli4_hba.fcp_wq);
5951 	phba->sli4_hba.fcp_wq = NULL;
5952 
5953 	/* Release unsolicited receive queue */
5954 	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5955 	phba->sli4_hba.hdr_rq = NULL;
5956 	lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
5957 	phba->sli4_hba.dat_rq = NULL;
5958 
5959 	/* Release ELS complete queue */
5960 	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5961 	phba->sli4_hba.els_cq = NULL;
5962 
5963 	/* Release mailbox command complete queue */
5964 	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5965 	phba->sli4_hba.mbx_cq = NULL;
5966 
5967 	/* Release FCP response complete queue */
5968 	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5969 		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
5970 	kfree(phba->sli4_hba.fcp_cq);
5971 	phba->sli4_hba.fcp_cq = NULL;
5972 
5973 	/* Release fast-path event queue */
5974 	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5975 		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
5976 	kfree(phba->sli4_hba.fp_eq);
5977 	phba->sli4_hba.fp_eq = NULL;
5978 
5979 	/* Release slow-path event queue */
5980 	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5981 	phba->sli4_hba.sp_eq = NULL;
5982 
5983 	return;
5984 }
5985 
5986 /**
5987  * lpfc_sli4_queue_setup - Set up all the SLI4 queues
5988  * @phba: pointer to lpfc hba data structure.
5989  *
5990  * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
5991  * operation.
5992  *
5993  * Return codes
5994  *      0 - successful
5995  *      ENOMEM - No availble memory
5996  *      EIO - The mailbox failed to complete successfully.
5997  **/
5998 int
5999 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6000 {
6001 	int rc = -ENOMEM;
6002 	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
6003 	int fcp_cq_index = 0;
6004 
6005 	/*
6006 	 * Set up Event Queues (EQs)
6007 	 */
6008 
6009 	/* Set up slow-path event queue */
6010 	if (!phba->sli4_hba.sp_eq) {
6011 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6012 				"0520 Slow-path EQ not allocated\n");
6013 		goto out_error;
6014 	}
6015 	rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
6016 			    LPFC_SP_DEF_IMAX);
6017 	if (rc) {
6018 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6019 				"0521 Failed setup of slow-path EQ: "
6020 				"rc = 0x%x\n", rc);
6021 		goto out_error;
6022 	}
6023 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6024 			"2583 Slow-path EQ setup: queue-id=%d\n",
6025 			phba->sli4_hba.sp_eq->queue_id);
6026 
6027 	/* Set up fast-path event queue */
6028 	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
6029 		if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
6030 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6031 					"0522 Fast-path EQ (%d) not "
6032 					"allocated\n", fcp_eqidx);
6033 			goto out_destroy_fp_eq;
6034 		}
6035 		rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
6036 				    phba->cfg_fcp_imax);
6037 		if (rc) {
6038 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6039 					"0523 Failed setup of fast-path EQ "
6040 					"(%d), rc = 0x%x\n", fcp_eqidx, rc);
6041 			goto out_destroy_fp_eq;
6042 		}
6043 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6044 				"2584 Fast-path EQ setup: "
6045 				"queue[%d]-id=%d\n", fcp_eqidx,
6046 				phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
6047 	}
6048 
6049 	/*
6050 	 * Set up Complete Queues (CQs)
6051 	 */
6052 
6053 	/* Set up slow-path MBOX Complete Queue as the first CQ */
6054 	if (!phba->sli4_hba.mbx_cq) {
6055 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6056 				"0528 Mailbox CQ not allocated\n");
6057 		goto out_destroy_fp_eq;
6058 	}
6059 	rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
6060 			    LPFC_MCQ, LPFC_MBOX);
6061 	if (rc) {
6062 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6063 				"0529 Failed setup of slow-path mailbox CQ: "
6064 				"rc = 0x%x\n", rc);
6065 		goto out_destroy_fp_eq;
6066 	}
6067 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6068 			"2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
6069 			phba->sli4_hba.mbx_cq->queue_id,
6070 			phba->sli4_hba.sp_eq->queue_id);
6071 
6072 	/* Set up slow-path ELS Complete Queue */
6073 	if (!phba->sli4_hba.els_cq) {
6074 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6075 				"0530 ELS CQ not allocated\n");
6076 		goto out_destroy_mbx_cq;
6077 	}
6078 	rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
6079 			    LPFC_WCQ, LPFC_ELS);
6080 	if (rc) {
6081 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6082 				"0531 Failed setup of slow-path ELS CQ: "
6083 				"rc = 0x%x\n", rc);
6084 		goto out_destroy_mbx_cq;
6085 	}
6086 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6087 			"2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
6088 			phba->sli4_hba.els_cq->queue_id,
6089 			phba->sli4_hba.sp_eq->queue_id);
6090 
6091 	/* Set up fast-path FCP Response Complete Queue */
6092 	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
6093 		if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
6094 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6095 					"0526 Fast-path FCP CQ (%d) not "
6096 					"allocated\n", fcp_cqidx);
6097 			goto out_destroy_fcp_cq;
6098 		}
6099 		rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
6100 				    phba->sli4_hba.fp_eq[fcp_cqidx],
6101 				    LPFC_WCQ, LPFC_FCP);
6102 		if (rc) {
6103 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6104 					"0527 Failed setup of fast-path FCP "
6105 					"CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
6106 			goto out_destroy_fcp_cq;
6107 		}
6108 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6109 				"2588 FCP CQ setup: cq[%d]-id=%d, "
6110 				"parent eq[%d]-id=%d\n",
6111 				fcp_cqidx,
6112 				phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
6113 				fcp_cqidx,
6114 				phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
6115 	}
6116 
6117 	/*
6118 	 * Set up all the Work Queues (WQs)
6119 	 */
6120 
6121 	/* Set up Mailbox Command Queue */
6122 	if (!phba->sli4_hba.mbx_wq) {
6123 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6124 				"0538 Slow-path MQ not allocated\n");
6125 		goto out_destroy_fcp_cq;
6126 	}
6127 	rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
6128 			    phba->sli4_hba.mbx_cq, LPFC_MBOX);
6129 	if (rc) {
6130 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6131 				"0539 Failed setup of slow-path MQ: "
6132 				"rc = 0x%x\n", rc);
6133 		goto out_destroy_fcp_cq;
6134 	}
6135 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6136 			"2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
6137 			phba->sli4_hba.mbx_wq->queue_id,
6138 			phba->sli4_hba.mbx_cq->queue_id);
6139 
6140 	/* Set up slow-path ELS Work Queue */
6141 	if (!phba->sli4_hba.els_wq) {
6142 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6143 				"0536 Slow-path ELS WQ not allocated\n");
6144 		goto out_destroy_mbx_wq;
6145 	}
6146 	rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
6147 			    phba->sli4_hba.els_cq, LPFC_ELS);
6148 	if (rc) {
6149 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6150 				"0537 Failed setup of slow-path ELS WQ: "
6151 				"rc = 0x%x\n", rc);
6152 		goto out_destroy_mbx_wq;
6153 	}
6154 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6155 			"2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
6156 			phba->sli4_hba.els_wq->queue_id,
6157 			phba->sli4_hba.els_cq->queue_id);
6158 
6159 	/* Set up fast-path FCP Work Queue */
6160 	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6161 		if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
6162 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6163 					"0534 Fast-path FCP WQ (%d) not "
6164 					"allocated\n", fcp_wqidx);
6165 			goto out_destroy_fcp_wq;
6166 		}
6167 		rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
6168 				    phba->sli4_hba.fcp_cq[fcp_cq_index],
6169 				    LPFC_FCP);
6170 		if (rc) {
6171 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6172 					"0535 Failed setup of fast-path FCP "
6173 					"WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
6174 			goto out_destroy_fcp_wq;
6175 		}
6176 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6177 				"2591 FCP WQ setup: wq[%d]-id=%d, "
6178 				"parent cq[%d]-id=%d\n",
6179 				fcp_wqidx,
6180 				phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
6181 				fcp_cq_index,
6182 				phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
6183 		/* Round robin FCP Work Queue's Completion Queue assignment */
6184 		fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
6185 	}
6186 
6187 	/*
6188 	 * Create Receive Queue (RQ)
6189 	 */
6190 	if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
6191 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6192 				"0540 Receive Queue not allocated\n");
6193 		goto out_destroy_fcp_wq;
6194 	}
6195 	rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
6196 			    phba->sli4_hba.els_cq, LPFC_USOL);
6197 	if (rc) {
6198 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6199 				"0541 Failed setup of Receive Queue: "
6200 				"rc = 0x%x\n", rc);
6201 		goto out_destroy_fcp_wq;
6202 	}
6203 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6204 			"2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
6205 			"parent cq-id=%d\n",
6206 			phba->sli4_hba.hdr_rq->queue_id,
6207 			phba->sli4_hba.dat_rq->queue_id,
6208 			phba->sli4_hba.els_cq->queue_id);
6209 	return 0;
6210 
6211 out_destroy_fcp_wq:
6212 	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
6213 		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
6214 	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6215 out_destroy_mbx_wq:
6216 	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6217 out_destroy_fcp_cq:
6218 	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
6219 		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
6220 	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6221 out_destroy_mbx_cq:
6222 	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6223 out_destroy_fp_eq:
6224 	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
6225 		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
6226 	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6227 out_error:
6228 	return rc;
6229 }
6230 
6231 /**
6232  * lpfc_sli4_queue_unset - Unset all the SLI4 queues
6233  * @phba: pointer to lpfc hba data structure.
6234  *
6235  * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
6236  * operation.
6237  *
6238  * Return codes
6239  *      0 - successful
6240  *      ENOMEM - No availble memory
6241  *      EIO - The mailbox failed to complete successfully.
6242  **/
6243 void
6244 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
6245 {
6246 	int fcp_qidx;
6247 
6248 	/* Unset mailbox command work queue */
6249 	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6250 	/* Unset ELS work queue */
6251 	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6252 	/* Unset unsolicited receive queue */
6253 	lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
6254 	/* Unset FCP work queue */
6255 	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
6256 		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
6257 	/* Unset mailbox command complete queue */
6258 	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6259 	/* Unset ELS complete queue */
6260 	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6261 	/* Unset FCP response complete queue */
6262 	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6263 		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
6264 	/* Unset fast-path event queue */
6265 	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6266 		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
6267 	/* Unset slow-path event queue */
6268 	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6269 }
6270 
6271 /**
6272  * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
6273  * @phba: pointer to lpfc hba data structure.
6274  *
6275  * This routine is invoked to allocate and set up a pool of completion queue
6276  * events. The body of the completion queue event is a completion queue entry
6277  * CQE. For now, this pool is used for the interrupt service routine to queue
6278  * the following HBA completion queue events for the worker thread to process:
6279  *   - Mailbox asynchronous events
6280  *   - Receive queue completion unsolicited events
6281  * Later, this can be used for all the slow-path events.
6282  *
6283  * Return codes
6284  *      0 - successful
6285  *      -ENOMEM - No availble memory
6286  **/
6287 static int
6288 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
6289 {
6290 	struct lpfc_cq_event *cq_event;
6291 	int i;
6292 
6293 	for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
6294 		cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
6295 		if (!cq_event)
6296 			goto out_pool_create_fail;
6297 		list_add_tail(&cq_event->list,
6298 			      &phba->sli4_hba.sp_cqe_event_pool);
6299 	}
6300 	return 0;
6301 
6302 out_pool_create_fail:
6303 	lpfc_sli4_cq_event_pool_destroy(phba);
6304 	return -ENOMEM;
6305 }
6306 
6307 /**
6308  * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
6309  * @phba: pointer to lpfc hba data structure.
6310  *
6311  * This routine is invoked to free the pool of completion queue events at
6312  * driver unload time. Note that, it is the responsibility of the driver
6313  * cleanup routine to free all the outstanding completion-queue events
6314  * allocated from this pool back into the pool before invoking this routine
6315  * to destroy the pool.
6316  **/
6317 static void
6318 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
6319 {
6320 	struct lpfc_cq_event *cq_event, *next_cq_event;
6321 
6322 	list_for_each_entry_safe(cq_event, next_cq_event,
6323 				 &phba->sli4_hba.sp_cqe_event_pool, list) {
6324 		list_del(&cq_event->list);
6325 		kfree(cq_event);
6326 	}
6327 }
6328 
6329 /**
6330  * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
6331  * @phba: pointer to lpfc hba data structure.
6332  *
6333  * This routine is the lock free version of the API invoked to allocate a
6334  * completion-queue event from the free pool.
6335  *
6336  * Return: Pointer to the newly allocated completion-queue event if successful
6337  *         NULL otherwise.
6338  **/
6339 struct lpfc_cq_event *
6340 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
6341 {
6342 	struct lpfc_cq_event *cq_event = NULL;
6343 
6344 	list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
6345 			 struct lpfc_cq_event, list);
6346 	return cq_event;
6347 }
6348 
6349 /**
6350  * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
6351  * @phba: pointer to lpfc hba data structure.
6352  *
6353  * This routine is the lock version of the API invoked to allocate a
6354  * completion-queue event from the free pool.
6355  *
6356  * Return: Pointer to the newly allocated completion-queue event if successful
6357  *         NULL otherwise.
6358  **/
6359 struct lpfc_cq_event *
6360 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
6361 {
6362 	struct lpfc_cq_event *cq_event;
6363 	unsigned long iflags;
6364 
6365 	spin_lock_irqsave(&phba->hbalock, iflags);
6366 	cq_event = __lpfc_sli4_cq_event_alloc(phba);
6367 	spin_unlock_irqrestore(&phba->hbalock, iflags);
6368 	return cq_event;
6369 }
6370 
6371 /**
6372  * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
6373  * @phba: pointer to lpfc hba data structure.
6374  * @cq_event: pointer to the completion queue event to be freed.
6375  *
6376  * This routine is the lock free version of the API invoked to release a
6377  * completion-queue event back into the free pool.
6378  **/
6379 void
6380 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
6381 			     struct lpfc_cq_event *cq_event)
6382 {
6383 	list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
6384 }
6385 
6386 /**
6387  * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
6388  * @phba: pointer to lpfc hba data structure.
6389  * @cq_event: pointer to the completion queue event to be freed.
6390  *
6391  * This routine is the lock version of the API invoked to release a
6392  * completion-queue event back into the free pool.
6393  **/
6394 void
6395 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
6396 			   struct lpfc_cq_event *cq_event)
6397 {
6398 	unsigned long iflags;
6399 	spin_lock_irqsave(&phba->hbalock, iflags);
6400 	__lpfc_sli4_cq_event_release(phba, cq_event);
6401 	spin_unlock_irqrestore(&phba->hbalock, iflags);
6402 }
6403 
6404 /**
6405  * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
6406  * @phba: pointer to lpfc hba data structure.
6407  *
6408  * This routine is to free all the pending completion-queue events to the
6409  * back into the free pool for device reset.
6410  **/
6411 static void
6412 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
6413 {
6414 	LIST_HEAD(cqelist);
6415 	struct lpfc_cq_event *cqe;
6416 	unsigned long iflags;
6417 
6418 	/* Retrieve all the pending WCQEs from pending WCQE lists */
6419 	spin_lock_irqsave(&phba->hbalock, iflags);
6420 	/* Pending FCP XRI abort events */
6421 	list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
6422 			 &cqelist);
6423 	/* Pending ELS XRI abort events */
6424 	list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
6425 			 &cqelist);
6426 	/* Pending asynnc events */
6427 	list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
6428 			 &cqelist);
6429 	spin_unlock_irqrestore(&phba->hbalock, iflags);
6430 
6431 	while (!list_empty(&cqelist)) {
6432 		list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
6433 		lpfc_sli4_cq_event_release(phba, cqe);
6434 	}
6435 }
6436 
6437 /**
6438  * lpfc_pci_function_reset - Reset pci function.
6439  * @phba: pointer to lpfc hba data structure.
6440  *
6441  * This routine is invoked to request a PCI function reset. It will destroys
6442  * all resources assigned to the PCI function which originates this request.
6443  *
6444  * Return codes
6445  *      0 - successful
6446  *      ENOMEM - No availble memory
6447  *      EIO - The mailbox failed to complete successfully.
6448  **/
6449 int
6450 lpfc_pci_function_reset(struct lpfc_hba *phba)
6451 {
6452 	LPFC_MBOXQ_t *mboxq;
6453 	uint32_t rc = 0;
6454 	uint32_t shdr_status, shdr_add_status;
6455 	union lpfc_sli4_cfg_shdr *shdr;
6456 
6457 	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6458 	if (!mboxq) {
6459 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6460 				"0494 Unable to allocate memory for issuing "
6461 				"SLI_FUNCTION_RESET mailbox command\n");
6462 		return -ENOMEM;
6463 	}
6464 
6465 	/* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */
6466 	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6467 			 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
6468 			 LPFC_SLI4_MBX_EMBED);
6469 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6470 	shdr = (union lpfc_sli4_cfg_shdr *)
6471 		&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
6472 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6473 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6474 	if (rc != MBX_TIMEOUT)
6475 		mempool_free(mboxq, phba->mbox_mem_pool);
6476 	if (shdr_status || shdr_add_status || rc) {
6477 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6478 				"0495 SLI_FUNCTION_RESET mailbox failed with "
6479 				"status x%x add_status x%x, mbx status x%x\n",
6480 				shdr_status, shdr_add_status, rc);
6481 		rc = -ENXIO;
6482 	}
6483 	return rc;
6484 }
6485 
6486 /**
6487  * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
6488  * @phba: pointer to lpfc hba data structure.
6489  * @cnt: number of nop mailbox commands to send.
6490  *
6491  * This routine is invoked to send a number @cnt of NOP mailbox command and
6492  * wait for each command to complete.
6493  *
6494  * Return: the number of NOP mailbox command completed.
6495  **/
6496 static int
6497 lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
6498 {
6499 	LPFC_MBOXQ_t *mboxq;
6500 	int length, cmdsent;
6501 	uint32_t mbox_tmo;
6502 	uint32_t rc = 0;
6503 	uint32_t shdr_status, shdr_add_status;
6504 	union lpfc_sli4_cfg_shdr *shdr;
6505 
6506 	if (cnt == 0) {
6507 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6508 				"2518 Requested to send 0 NOP mailbox cmd\n");
6509 		return cnt;
6510 	}
6511 
6512 	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6513 	if (!mboxq) {
6514 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6515 				"2519 Unable to allocate memory for issuing "
6516 				"NOP mailbox command\n");
6517 		return 0;
6518 	}
6519 
6520 	/* Set up NOP SLI4_CONFIG mailbox-ioctl command */
6521 	length = (sizeof(struct lpfc_mbx_nop) -
6522 		  sizeof(struct lpfc_sli4_cfg_mhdr));
6523 	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6524 			 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
6525 
6526 	mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
6527 	for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
6528 		if (!phba->sli4_hba.intr_enable)
6529 			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6530 		else
6531 			rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
6532 		if (rc == MBX_TIMEOUT)
6533 			break;
6534 		/* Check return status */
6535 		shdr = (union lpfc_sli4_cfg_shdr *)
6536 			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
6537 		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6538 		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
6539 					 &shdr->response);
6540 		if (shdr_status || shdr_add_status || rc) {
6541 			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6542 					"2520 NOP mailbox command failed "
6543 					"status x%x add_status x%x mbx "
6544 					"status x%x\n", shdr_status,
6545 					shdr_add_status, rc);
6546 			break;
6547 		}
6548 	}
6549 
6550 	if (rc != MBX_TIMEOUT)
6551 		mempool_free(mboxq, phba->mbox_mem_pool);
6552 
6553 	return cmdsent;
6554 }
6555 
6556 /**
6557  * lpfc_sli4_fcfi_unreg - Unregister fcfi to device
6558  * @phba: pointer to lpfc hba data structure.
6559  * @fcfi: fcf index.
6560  *
6561  * This routine is invoked to unregister a FCFI from device.
6562  **/
6563 void
6564 lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
6565 {
6566 	LPFC_MBOXQ_t *mbox;
6567 	uint32_t mbox_tmo;
6568 	int rc;
6569 	unsigned long flags;
6570 
6571 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6572 
6573 	if (!mbox)
6574 		return;
6575 
6576 	lpfc_unreg_fcfi(mbox, fcfi);
6577 
6578 	if (!phba->sli4_hba.intr_enable)
6579 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6580 	else {
6581 		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
6582 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6583 	}
6584 	if (rc != MBX_TIMEOUT)
6585 		mempool_free(mbox, phba->mbox_mem_pool);
6586 	if (rc != MBX_SUCCESS)
6587 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6588 				"2517 Unregister FCFI command failed "
6589 				"status %d, mbxStatus x%x\n", rc,
6590 				bf_get(lpfc_mqe_status, &mbox->u.mqe));
6591 	else {
6592 		spin_lock_irqsave(&phba->hbalock, flags);
6593 		/* Mark the FCFI is no longer registered */
6594 		phba->fcf.fcf_flag &=
6595 			~(FCF_AVAILABLE | FCF_REGISTERED | FCF_SCAN_DONE);
6596 		spin_unlock_irqrestore(&phba->hbalock, flags);
6597 	}
6598 }
6599 
6600 /**
6601  * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
6602  * @phba: pointer to lpfc hba data structure.
6603  *
6604  * This routine is invoked to set up the PCI device memory space for device
6605  * with SLI-4 interface spec.
6606  *
6607  * Return codes
6608  * 	0 - successful
6609  * 	other values - error
6610  **/
6611 static int
6612 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
6613 {
6614 	struct pci_dev *pdev;
6615 	unsigned long bar0map_len, bar1map_len, bar2map_len;
6616 	int error = -ENODEV;
6617 
6618 	/* Obtain PCI device reference */
6619 	if (!phba->pcidev)
6620 		return error;
6621 	else
6622 		pdev = phba->pcidev;
6623 
6624 	/* Set the device DMA mask size */
6625 	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
6626 	 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
6627 		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
6628 		 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
6629 			return error;
6630 		}
6631 	}
6632 
6633 	/* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
6634 	 * number of bytes required by each mapping. They are actually
6635 	 * mapping to the PCI BAR regions 0 or 1, 2, and 4 by the SLI4 device.
6636 	 */
6637 	if (pci_resource_start(pdev, 0)) {
6638 		phba->pci_bar0_map = pci_resource_start(pdev, 0);
6639 		bar0map_len = pci_resource_len(pdev, 0);
6640 	} else {
6641 		phba->pci_bar0_map = pci_resource_start(pdev, 1);
6642 		bar0map_len = pci_resource_len(pdev, 1);
6643 	}
6644 	phba->pci_bar1_map = pci_resource_start(pdev, 2);
6645 	bar1map_len = pci_resource_len(pdev, 2);
6646 
6647 	phba->pci_bar2_map = pci_resource_start(pdev, 4);
6648 	bar2map_len = pci_resource_len(pdev, 4);
6649 
6650 	/* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
6651 	phba->sli4_hba.conf_regs_memmap_p =
6652 				ioremap(phba->pci_bar0_map, bar0map_len);
6653 	if (!phba->sli4_hba.conf_regs_memmap_p) {
6654 		dev_printk(KERN_ERR, &pdev->dev,
6655 			   "ioremap failed for SLI4 PCI config registers.\n");
6656 		goto out;
6657 	}
6658 
6659 	/* Map SLI4 HBA Control Register base to a kernel virtual address. */
6660 	phba->sli4_hba.ctrl_regs_memmap_p =
6661 				ioremap(phba->pci_bar1_map, bar1map_len);
6662 	if (!phba->sli4_hba.ctrl_regs_memmap_p) {
6663 		dev_printk(KERN_ERR, &pdev->dev,
6664 			   "ioremap failed for SLI4 HBA control registers.\n");
6665 		goto out_iounmap_conf;
6666 	}
6667 
6668 	/* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */
6669 	phba->sli4_hba.drbl_regs_memmap_p =
6670 				ioremap(phba->pci_bar2_map, bar2map_len);
6671 	if (!phba->sli4_hba.drbl_regs_memmap_p) {
6672 		dev_printk(KERN_ERR, &pdev->dev,
6673 			   "ioremap failed for SLI4 HBA doorbell registers.\n");
6674 		goto out_iounmap_ctrl;
6675 	}
6676 
6677 	/* Set up BAR0 PCI config space register memory map */
6678 	lpfc_sli4_bar0_register_memmap(phba);
6679 
6680 	/* Set up BAR1 register memory map */
6681 	lpfc_sli4_bar1_register_memmap(phba);
6682 
6683 	/* Set up BAR2 register memory map */
6684 	error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
6685 	if (error)
6686 		goto out_iounmap_all;
6687 
6688 	return 0;
6689 
6690 out_iounmap_all:
6691 	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6692 out_iounmap_ctrl:
6693 	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6694 out_iounmap_conf:
6695 	iounmap(phba->sli4_hba.conf_regs_memmap_p);
6696 out:
6697 	return error;
6698 }
6699 
6700 /**
6701  * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
6702  * @phba: pointer to lpfc hba data structure.
6703  *
6704  * This routine is invoked to unset the PCI device memory space for device
6705  * with SLI-4 interface spec.
6706  **/
6707 static void
6708 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
6709 {
6710 	struct pci_dev *pdev;
6711 
6712 	/* Obtain PCI device reference */
6713 	if (!phba->pcidev)
6714 		return;
6715 	else
6716 		pdev = phba->pcidev;
6717 
6718 	/* Free coherent DMA memory allocated */
6719 
6720 	/* Unmap I/O memory space */
6721 	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6722 	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6723 	iounmap(phba->sli4_hba.conf_regs_memmap_p);
6724 
6725 	return;
6726 }
6727 
6728 /**
6729  * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
6730  * @phba: pointer to lpfc hba data structure.
6731  *
6732  * This routine is invoked to enable the MSI-X interrupt vectors to device
6733  * with SLI-3 interface specs. The kernel function pci_enable_msix() is
6734  * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
6735  * invoked, enables either all or nothing, depending on the current
6736  * availability of PCI vector resources. The device driver is responsible
6737  * for calling the individual request_irq() to register each MSI-X vector
6738  * with a interrupt handler, which is done in this function. Note that
6739  * later when device is unloading, the driver should always call free_irq()
6740  * on all MSI-X vectors it has done request_irq() on before calling
6741  * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
6742  * will be left with MSI-X enabled and leaks its vectors.
6743  *
6744  * Return codes
6745  *   0 - successful
6746  *   other values - error
6747  **/
6748 static int
6749 lpfc_sli_enable_msix(struct lpfc_hba *phba)
6750 {
6751 	int rc, i;
6752 	LPFC_MBOXQ_t *pmb;
6753 
6754 	/* Set up MSI-X multi-message vectors */
6755 	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6756 		phba->msix_entries[i].entry = i;
6757 
6758 	/* Configure MSI-X capability structure */
6759 	rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
6760 				ARRAY_SIZE(phba->msix_entries));
6761 	if (rc) {
6762 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6763 				"0420 PCI enable MSI-X failed (%d)\n", rc);
6764 		goto msi_fail_out;
6765 	}
6766 	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6767 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6768 				"0477 MSI-X entry[%d]: vector=x%x "
6769 				"message=%d\n", i,
6770 				phba->msix_entries[i].vector,
6771 				phba->msix_entries[i].entry);
6772 	/*
6773 	 * Assign MSI-X vectors to interrupt handlers
6774 	 */
6775 
6776 	/* vector-0 is associated to slow-path handler */
6777 	rc = request_irq(phba->msix_entries[0].vector,
6778 			 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
6779 			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6780 	if (rc) {
6781 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6782 				"0421 MSI-X slow-path request_irq failed "
6783 				"(%d)\n", rc);
6784 		goto msi_fail_out;
6785 	}
6786 
6787 	/* vector-1 is associated to fast-path handler */
6788 	rc = request_irq(phba->msix_entries[1].vector,
6789 			 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
6790 			 LPFC_FP_DRIVER_HANDLER_NAME, phba);
6791 
6792 	if (rc) {
6793 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6794 				"0429 MSI-X fast-path request_irq failed "
6795 				"(%d)\n", rc);
6796 		goto irq_fail_out;
6797 	}
6798 
6799 	/*
6800 	 * Configure HBA MSI-X attention conditions to messages
6801 	 */
6802 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6803 
6804 	if (!pmb) {
6805 		rc = -ENOMEM;
6806 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6807 				"0474 Unable to allocate memory for issuing "
6808 				"MBOX_CONFIG_MSI command\n");
6809 		goto mem_fail_out;
6810 	}
6811 	rc = lpfc_config_msi(phba, pmb);
6812 	if (rc)
6813 		goto mbx_fail_out;
6814 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6815 	if (rc != MBX_SUCCESS) {
6816 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
6817 				"0351 Config MSI mailbox command failed, "
6818 				"mbxCmd x%x, mbxStatus x%x\n",
6819 				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
6820 		goto mbx_fail_out;
6821 	}
6822 
6823 	/* Free memory allocated for mailbox command */
6824 	mempool_free(pmb, phba->mbox_mem_pool);
6825 	return rc;
6826 
6827 mbx_fail_out:
6828 	/* Free memory allocated for mailbox command */
6829 	mempool_free(pmb, phba->mbox_mem_pool);
6830 
6831 mem_fail_out:
6832 	/* free the irq already requested */
6833 	free_irq(phba->msix_entries[1].vector, phba);
6834 
6835 irq_fail_out:
6836 	/* free the irq already requested */
6837 	free_irq(phba->msix_entries[0].vector, phba);
6838 
6839 msi_fail_out:
6840 	/* Unconfigure MSI-X capability structure */
6841 	pci_disable_msix(phba->pcidev);
6842 	return rc;
6843 }
6844 
6845 /**
6846  * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
6847  * @phba: pointer to lpfc hba data structure.
6848  *
6849  * This routine is invoked to release the MSI-X vectors and then disable the
6850  * MSI-X interrupt mode to device with SLI-3 interface spec.
6851  **/
6852 static void
6853 lpfc_sli_disable_msix(struct lpfc_hba *phba)
6854 {
6855 	int i;
6856 
6857 	/* Free up MSI-X multi-message vectors */
6858 	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6859 		free_irq(phba->msix_entries[i].vector, phba);
6860 	/* Disable MSI-X */
6861 	pci_disable_msix(phba->pcidev);
6862 
6863 	return;
6864 }
6865 
6866 /**
6867  * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
6868  * @phba: pointer to lpfc hba data structure.
6869  *
6870  * This routine is invoked to enable the MSI interrupt mode to device with
6871  * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
6872  * enable the MSI vector. The device driver is responsible for calling the
6873  * request_irq() to register MSI vector with a interrupt the handler, which
6874  * is done in this function.
6875  *
6876  * Return codes
6877  * 	0 - successful
6878  * 	other values - error
6879  */
6880 static int
6881 lpfc_sli_enable_msi(struct lpfc_hba *phba)
6882 {
6883 	int rc;
6884 
6885 	rc = pci_enable_msi(phba->pcidev);
6886 	if (!rc)
6887 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6888 				"0462 PCI enable MSI mode success.\n");
6889 	else {
6890 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6891 				"0471 PCI enable MSI mode failed (%d)\n", rc);
6892 		return rc;
6893 	}
6894 
6895 	rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6896 			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6897 	if (rc) {
6898 		pci_disable_msi(phba->pcidev);
6899 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6900 				"0478 MSI request_irq failed (%d)\n", rc);
6901 	}
6902 	return rc;
6903 }
6904 
6905 /**
6906  * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
6907  * @phba: pointer to lpfc hba data structure.
6908  *
6909  * This routine is invoked to disable the MSI interrupt mode to device with
6910  * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
6911  * done request_irq() on before calling pci_disable_msi(). Failure to do so
6912  * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6913  * its vector.
6914  */
6915 static void
6916 lpfc_sli_disable_msi(struct lpfc_hba *phba)
6917 {
6918 	free_irq(phba->pcidev->irq, phba);
6919 	pci_disable_msi(phba->pcidev);
6920 	return;
6921 }
6922 
6923 /**
6924  * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
6925  * @phba: pointer to lpfc hba data structure.
6926  *
6927  * This routine is invoked to enable device interrupt and associate driver's
6928  * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
6929  * spec. Depends on the interrupt mode configured to the driver, the driver
6930  * will try to fallback from the configured interrupt mode to an interrupt
6931  * mode which is supported by the platform, kernel, and device in the order
6932  * of:
6933  * MSI-X -> MSI -> IRQ.
6934  *
6935  * Return codes
6936  *   0 - successful
6937  *   other values - error
6938  **/
6939 static uint32_t
6940 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
6941 {
6942 	uint32_t intr_mode = LPFC_INTR_ERROR;
6943 	int retval;
6944 
6945 	if (cfg_mode == 2) {
6946 		/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
6947 		retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
6948 		if (!retval) {
6949 			/* Now, try to enable MSI-X interrupt mode */
6950 			retval = lpfc_sli_enable_msix(phba);
6951 			if (!retval) {
6952 				/* Indicate initialization to MSI-X mode */
6953 				phba->intr_type = MSIX;
6954 				intr_mode = 2;
6955 			}
6956 		}
6957 	}
6958 
6959 	/* Fallback to MSI if MSI-X initialization failed */
6960 	if (cfg_mode >= 1 && phba->intr_type == NONE) {
6961 		retval = lpfc_sli_enable_msi(phba);
6962 		if (!retval) {
6963 			/* Indicate initialization to MSI mode */
6964 			phba->intr_type = MSI;
6965 			intr_mode = 1;
6966 		}
6967 	}
6968 
6969 	/* Fallback to INTx if both MSI-X/MSI initalization failed */
6970 	if (phba->intr_type == NONE) {
6971 		retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6972 				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6973 		if (!retval) {
6974 			/* Indicate initialization to INTx mode */
6975 			phba->intr_type = INTx;
6976 			intr_mode = 0;
6977 		}
6978 	}
6979 	return intr_mode;
6980 }
6981 
6982 /**
6983  * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
6984  * @phba: pointer to lpfc hba data structure.
6985  *
6986  * This routine is invoked to disable device interrupt and disassociate the
6987  * driver's interrupt handler(s) from interrupt vector(s) to device with
6988  * SLI-3 interface spec. Depending on the interrupt mode, the driver will
6989  * release the interrupt vector(s) for the message signaled interrupt.
6990  **/
6991 static void
6992 lpfc_sli_disable_intr(struct lpfc_hba *phba)
6993 {
6994 	/* Disable the currently initialized interrupt mode */
6995 	if (phba->intr_type == MSIX)
6996 		lpfc_sli_disable_msix(phba);
6997 	else if (phba->intr_type == MSI)
6998 		lpfc_sli_disable_msi(phba);
6999 	else if (phba->intr_type == INTx)
7000 		free_irq(phba->pcidev->irq, phba);
7001 
7002 	/* Reset interrupt management states */
7003 	phba->intr_type = NONE;
7004 	phba->sli.slistat.sli_intr = 0;
7005 
7006 	return;
7007 }
7008 
7009 /**
7010  * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
7011  * @phba: pointer to lpfc hba data structure.
7012  *
7013  * This routine is invoked to enable the MSI-X interrupt vectors to device
7014  * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
7015  * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
7016  * enables either all or nothing, depending on the current availability of
7017  * PCI vector resources. The device driver is responsible for calling the
7018  * individual request_irq() to register each MSI-X vector with a interrupt
7019  * handler, which is done in this function. Note that later when device is
7020  * unloading, the driver should always call free_irq() on all MSI-X vectors
7021  * it has done request_irq() on before calling pci_disable_msix(). Failure
7022  * to do so results in a BUG_ON() and a device will be left with MSI-X
7023  * enabled and leaks its vectors.
7024  *
7025  * Return codes
7026  * 0 - successful
7027  * other values - error
7028  **/
7029 static int
7030 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
7031 {
7032 	int vectors, rc, index;
7033 
7034 	/* Set up MSI-X multi-message vectors */
7035 	for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
7036 		phba->sli4_hba.msix_entries[index].entry = index;
7037 
7038 	/* Configure MSI-X capability structure */
7039 	vectors = phba->sli4_hba.cfg_eqn;
7040 enable_msix_vectors:
7041 	rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
7042 			     vectors);
7043 	if (rc > 1) {
7044 		vectors = rc;
7045 		goto enable_msix_vectors;
7046 	} else if (rc) {
7047 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7048 				"0484 PCI enable MSI-X failed (%d)\n", rc);
7049 		goto msi_fail_out;
7050 	}
7051 
7052 	/* Log MSI-X vector assignment */
7053 	for (index = 0; index < vectors; index++)
7054 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7055 				"0489 MSI-X entry[%d]: vector=x%x "
7056 				"message=%d\n", index,
7057 				phba->sli4_hba.msix_entries[index].vector,
7058 				phba->sli4_hba.msix_entries[index].entry);
7059 	/*
7060 	 * Assign MSI-X vectors to interrupt handlers
7061 	 */
7062 
7063 	/* The first vector must associated to slow-path handler for MQ */
7064 	rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
7065 			 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
7066 			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
7067 	if (rc) {
7068 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7069 				"0485 MSI-X slow-path request_irq failed "
7070 				"(%d)\n", rc);
7071 		goto msi_fail_out;
7072 	}
7073 
7074 	/* The rest of the vector(s) are associated to fast-path handler(s) */
7075 	for (index = 1; index < vectors; index++) {
7076 		phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
7077 		phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
7078 		rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
7079 				 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
7080 				 LPFC_FP_DRIVER_HANDLER_NAME,
7081 				 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7082 		if (rc) {
7083 			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7084 					"0486 MSI-X fast-path (%d) "
7085 					"request_irq failed (%d)\n", index, rc);
7086 			goto cfg_fail_out;
7087 		}
7088 	}
7089 	phba->sli4_hba.msix_vec_nr = vectors;
7090 
7091 	return rc;
7092 
7093 cfg_fail_out:
7094 	/* free the irq already requested */
7095 	for (--index; index >= 1; index--)
7096 		free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
7097 			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7098 
7099 	/* free the irq already requested */
7100 	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7101 
7102 msi_fail_out:
7103 	/* Unconfigure MSI-X capability structure */
7104 	pci_disable_msix(phba->pcidev);
7105 	return rc;
7106 }
7107 
7108 /**
7109  * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
7110  * @phba: pointer to lpfc hba data structure.
7111  *
7112  * This routine is invoked to release the MSI-X vectors and then disable the
7113  * MSI-X interrupt mode to device with SLI-4 interface spec.
7114  **/
7115 static void
7116 lpfc_sli4_disable_msix(struct lpfc_hba *phba)
7117 {
7118 	int index;
7119 
7120 	/* Free up MSI-X multi-message vectors */
7121 	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7122 
7123 	for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++)
7124 		free_irq(phba->sli4_hba.msix_entries[index].vector,
7125 			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7126 
7127 	/* Disable MSI-X */
7128 	pci_disable_msix(phba->pcidev);
7129 
7130 	return;
7131 }
7132 
7133 /**
7134  * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
7135  * @phba: pointer to lpfc hba data structure.
7136  *
7137  * This routine is invoked to enable the MSI interrupt mode to device with
7138  * SLI-4 interface spec. The kernel function pci_enable_msi() is called
7139  * to enable the MSI vector. The device driver is responsible for calling
7140  * the request_irq() to register MSI vector with a interrupt the handler,
7141  * which is done in this function.
7142  *
7143  * Return codes
7144  * 	0 - successful
7145  * 	other values - error
7146  **/
7147 static int
7148 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
7149 {
7150 	int rc, index;
7151 
7152 	rc = pci_enable_msi(phba->pcidev);
7153 	if (!rc)
7154 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7155 				"0487 PCI enable MSI mode success.\n");
7156 	else {
7157 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7158 				"0488 PCI enable MSI mode failed (%d)\n", rc);
7159 		return rc;
7160 	}
7161 
7162 	rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
7163 			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7164 	if (rc) {
7165 		pci_disable_msi(phba->pcidev);
7166 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7167 				"0490 MSI request_irq failed (%d)\n", rc);
7168 		return rc;
7169 	}
7170 
7171 	for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
7172 		phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7173 		phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7174 	}
7175 
7176 	return 0;
7177 }
7178 
7179 /**
7180  * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
7181  * @phba: pointer to lpfc hba data structure.
7182  *
7183  * This routine is invoked to disable the MSI interrupt mode to device with
7184  * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
7185  * done request_irq() on before calling pci_disable_msi(). Failure to do so
7186  * results in a BUG_ON() and a device will be left with MSI enabled and leaks
7187  * its vector.
7188  **/
7189 static void
7190 lpfc_sli4_disable_msi(struct lpfc_hba *phba)
7191 {
7192 	free_irq(phba->pcidev->irq, phba);
7193 	pci_disable_msi(phba->pcidev);
7194 	return;
7195 }
7196 
7197 /**
7198  * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
7199  * @phba: pointer to lpfc hba data structure.
7200  *
7201  * This routine is invoked to enable device interrupt and associate driver's
7202  * interrupt handler(s) to interrupt vector(s) to device with SLI-4
7203  * interface spec. Depends on the interrupt mode configured to the driver,
7204  * the driver will try to fallback from the configured interrupt mode to an
7205  * interrupt mode which is supported by the platform, kernel, and device in
7206  * the order of:
7207  * MSI-X -> MSI -> IRQ.
7208  *
7209  * Return codes
7210  * 	0 - successful
7211  * 	other values - error
7212  **/
7213 static uint32_t
7214 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
7215 {
7216 	uint32_t intr_mode = LPFC_INTR_ERROR;
7217 	int retval, index;
7218 
7219 	if (cfg_mode == 2) {
7220 		/* Preparation before conf_msi mbox cmd */
7221 		retval = 0;
7222 		if (!retval) {
7223 			/* Now, try to enable MSI-X interrupt mode */
7224 			retval = lpfc_sli4_enable_msix(phba);
7225 			if (!retval) {
7226 				/* Indicate initialization to MSI-X mode */
7227 				phba->intr_type = MSIX;
7228 				intr_mode = 2;
7229 			}
7230 		}
7231 	}
7232 
7233 	/* Fallback to MSI if MSI-X initialization failed */
7234 	if (cfg_mode >= 1 && phba->intr_type == NONE) {
7235 		retval = lpfc_sli4_enable_msi(phba);
7236 		if (!retval) {
7237 			/* Indicate initialization to MSI mode */
7238 			phba->intr_type = MSI;
7239 			intr_mode = 1;
7240 		}
7241 	}
7242 
7243 	/* Fallback to INTx if both MSI-X/MSI initalization failed */
7244 	if (phba->intr_type == NONE) {
7245 		retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
7246 				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7247 		if (!retval) {
7248 			/* Indicate initialization to INTx mode */
7249 			phba->intr_type = INTx;
7250 			intr_mode = 0;
7251 			for (index = 0; index < phba->cfg_fcp_eq_count;
7252 			     index++) {
7253 				phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7254 				phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7255 			}
7256 		}
7257 	}
7258 	return intr_mode;
7259 }
7260 
7261 /**
7262  * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
7263  * @phba: pointer to lpfc hba data structure.
7264  *
7265  * This routine is invoked to disable device interrupt and disassociate
7266  * the driver's interrupt handler(s) from interrupt vector(s) to device
7267  * with SLI-4 interface spec. Depending on the interrupt mode, the driver
7268  * will release the interrupt vector(s) for the message signaled interrupt.
7269  **/
7270 static void
7271 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
7272 {
7273 	/* Disable the currently initialized interrupt mode */
7274 	if (phba->intr_type == MSIX)
7275 		lpfc_sli4_disable_msix(phba);
7276 	else if (phba->intr_type == MSI)
7277 		lpfc_sli4_disable_msi(phba);
7278 	else if (phba->intr_type == INTx)
7279 		free_irq(phba->pcidev->irq, phba);
7280 
7281 	/* Reset interrupt management states */
7282 	phba->intr_type = NONE;
7283 	phba->sli.slistat.sli_intr = 0;
7284 
7285 	return;
7286 }
7287 
7288 /**
7289  * lpfc_unset_hba - Unset SLI3 hba device initialization
7290  * @phba: pointer to lpfc hba data structure.
7291  *
7292  * This routine is invoked to unset the HBA device initialization steps to
7293  * a device with SLI-3 interface spec.
7294  **/
7295 static void
7296 lpfc_unset_hba(struct lpfc_hba *phba)
7297 {
7298 	struct lpfc_vport *vport = phba->pport;
7299 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
7300 
7301 	spin_lock_irq(shost->host_lock);
7302 	vport->load_flag |= FC_UNLOADING;
7303 	spin_unlock_irq(shost->host_lock);
7304 
7305 	lpfc_stop_hba_timers(phba);
7306 
7307 	phba->pport->work_port_events = 0;
7308 
7309 	lpfc_sli_hba_down(phba);
7310 
7311 	lpfc_sli_brdrestart(phba);
7312 
7313 	lpfc_sli_disable_intr(phba);
7314 
7315 	return;
7316 }
7317 
7318 /**
7319  * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
7320  * @phba: pointer to lpfc hba data structure.
7321  *
7322  * This routine is invoked to unset the HBA device initialization steps to
7323  * a device with SLI-4 interface spec.
7324  **/
7325 static void
7326 lpfc_sli4_unset_hba(struct lpfc_hba *phba)
7327 {
7328 	struct lpfc_vport *vport = phba->pport;
7329 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
7330 
7331 	spin_lock_irq(shost->host_lock);
7332 	vport->load_flag |= FC_UNLOADING;
7333 	spin_unlock_irq(shost->host_lock);
7334 
7335 	phba->pport->work_port_events = 0;
7336 
7337 	lpfc_sli4_hba_down(phba);
7338 
7339 	lpfc_sli4_disable_intr(phba);
7340 
7341 	return;
7342 }
7343 
7344 /**
7345  * lpfc_sli4_hba_unset - Unset the fcoe hba
7346  * @phba: Pointer to HBA context object.
7347  *
7348  * This function is called in the SLI4 code path to reset the HBA's FCoE
7349  * function. The caller is not required to hold any lock. This routine
7350  * issues PCI function reset mailbox command to reset the FCoE function.
7351  * At the end of the function, it calls lpfc_hba_down_post function to
7352  * free any pending commands.
7353  **/
7354 static void
7355 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
7356 {
7357 	int wait_cnt = 0;
7358 	LPFC_MBOXQ_t *mboxq;
7359 
7360 	lpfc_stop_hba_timers(phba);
7361 	phba->sli4_hba.intr_enable = 0;
7362 
7363 	/*
7364 	 * Gracefully wait out the potential current outstanding asynchronous
7365 	 * mailbox command.
7366 	 */
7367 
7368 	/* First, block any pending async mailbox command from posted */
7369 	spin_lock_irq(&phba->hbalock);
7370 	phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
7371 	spin_unlock_irq(&phba->hbalock);
7372 	/* Now, trying to wait it out if we can */
7373 	while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7374 		msleep(10);
7375 		if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
7376 			break;
7377 	}
7378 	/* Forcefully release the outstanding mailbox command if timed out */
7379 	if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7380 		spin_lock_irq(&phba->hbalock);
7381 		mboxq = phba->sli.mbox_active;
7382 		mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
7383 		__lpfc_mbox_cmpl_put(phba, mboxq);
7384 		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7385 		phba->sli.mbox_active = NULL;
7386 		spin_unlock_irq(&phba->hbalock);
7387 	}
7388 
7389 	/* Tear down the queues in the HBA */
7390 	lpfc_sli4_queue_unset(phba);
7391 
7392 	/* Disable PCI subsystem interrupt */
7393 	lpfc_sli4_disable_intr(phba);
7394 
7395 	/* Stop kthread signal shall trigger work_done one more time */
7396 	kthread_stop(phba->worker_thread);
7397 
7398 	/* Stop the SLI4 device port */
7399 	phba->pport->work_port_events = 0;
7400 }
7401 
7402  /**
7403  * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
7404  * @phba: Pointer to HBA context object.
7405  * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
7406  *
7407  * This function is called in the SLI4 code path to read the port's
7408  * sli4 capabilities.
7409  *
7410  * This function may be be called from any context that can block-wait
7411  * for the completion.  The expectation is that this routine is called
7412  * typically from probe_one or from the online routine.
7413  **/
7414 int
7415 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7416 {
7417 	int rc;
7418 	struct lpfc_mqe *mqe;
7419 	struct lpfc_pc_sli4_params *sli4_params;
7420 	uint32_t mbox_tmo;
7421 
7422 	rc = 0;
7423 	mqe = &mboxq->u.mqe;
7424 
7425 	/* Read the port's SLI4 Parameters port capabilities */
7426 	lpfc_sli4_params(mboxq);
7427 	if (!phba->sli4_hba.intr_enable)
7428 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7429 	else {
7430 		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES);
7431 		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
7432 	}
7433 
7434 	if (unlikely(rc))
7435 		return 1;
7436 
7437 	sli4_params = &phba->sli4_hba.pc_sli4_params;
7438 	sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
7439 	sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
7440 	sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
7441 	sli4_params->featurelevel_1 = bf_get(featurelevel_1,
7442 					     &mqe->un.sli4_params);
7443 	sli4_params->featurelevel_2 = bf_get(featurelevel_2,
7444 					     &mqe->un.sli4_params);
7445 	sli4_params->proto_types = mqe->un.sli4_params.word3;
7446 	sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
7447 	sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
7448 	sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
7449 	sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
7450 	sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
7451 	sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
7452 	sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
7453 	sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
7454 	sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
7455 	sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
7456 	sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
7457 	sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
7458 	sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
7459 	sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
7460 	sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
7461 	sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
7462 	sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
7463 	sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
7464 	sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
7465 	sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
7466 	return rc;
7467 }
7468 
7469 /**
7470  * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
7471  * @pdev: pointer to PCI device
7472  * @pid: pointer to PCI device identifier
7473  *
7474  * This routine is to be called to attach a device with SLI-3 interface spec
7475  * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
7476  * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
7477  * information of the device and driver to see if the driver state that it can
7478  * support this kind of device. If the match is successful, the driver core
7479  * invokes this routine. If this routine determines it can claim the HBA, it
7480  * does all the initialization that it needs to do to handle the HBA properly.
7481  *
7482  * Return code
7483  * 	0 - driver can claim the device
7484  * 	negative value - driver can not claim the device
7485  **/
7486 static int __devinit
7487 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
7488 {
7489 	struct lpfc_hba   *phba;
7490 	struct lpfc_vport *vport = NULL;
7491 	struct Scsi_Host  *shost = NULL;
7492 	int error;
7493 	uint32_t cfg_mode, intr_mode;
7494 
7495 	/* Allocate memory for HBA structure */
7496 	phba = lpfc_hba_alloc(pdev);
7497 	if (!phba)
7498 		return -ENOMEM;
7499 
7500 	/* Perform generic PCI device enabling operation */
7501 	error = lpfc_enable_pci_dev(phba);
7502 	if (error) {
7503 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7504 				"1401 Failed to enable pci device.\n");
7505 		goto out_free_phba;
7506 	}
7507 
7508 	/* Set up SLI API function jump table for PCI-device group-0 HBAs */
7509 	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
7510 	if (error)
7511 		goto out_disable_pci_dev;
7512 
7513 	/* Set up SLI-3 specific device PCI memory space */
7514 	error = lpfc_sli_pci_mem_setup(phba);
7515 	if (error) {
7516 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7517 				"1402 Failed to set up pci memory space.\n");
7518 		goto out_disable_pci_dev;
7519 	}
7520 
7521 	/* Set up phase-1 common device driver resources */
7522 	error = lpfc_setup_driver_resource_phase1(phba);
7523 	if (error) {
7524 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7525 				"1403 Failed to set up driver resource.\n");
7526 		goto out_unset_pci_mem_s3;
7527 	}
7528 
7529 	/* Set up SLI-3 specific device driver resources */
7530 	error = lpfc_sli_driver_resource_setup(phba);
7531 	if (error) {
7532 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7533 				"1404 Failed to set up driver resource.\n");
7534 		goto out_unset_pci_mem_s3;
7535 	}
7536 
7537 	/* Initialize and populate the iocb list per host */
7538 	error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
7539 	if (error) {
7540 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7541 				"1405 Failed to initialize iocb list.\n");
7542 		goto out_unset_driver_resource_s3;
7543 	}
7544 
7545 	/* Set up common device driver resources */
7546 	error = lpfc_setup_driver_resource_phase2(phba);
7547 	if (error) {
7548 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7549 				"1406 Failed to set up driver resource.\n");
7550 		goto out_free_iocb_list;
7551 	}
7552 
7553 	/* Create SCSI host to the physical port */
7554 	error = lpfc_create_shost(phba);
7555 	if (error) {
7556 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7557 				"1407 Failed to create scsi host.\n");
7558 		goto out_unset_driver_resource;
7559 	}
7560 
7561 	/* Configure sysfs attributes */
7562 	vport = phba->pport;
7563 	error = lpfc_alloc_sysfs_attr(vport);
7564 	if (error) {
7565 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7566 				"1476 Failed to allocate sysfs attr\n");
7567 		goto out_destroy_shost;
7568 	}
7569 
7570 	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
7571 	/* Now, trying to enable interrupt and bring up the device */
7572 	cfg_mode = phba->cfg_use_msi;
7573 	while (true) {
7574 		/* Put device to a known state before enabling interrupt */
7575 		lpfc_stop_port(phba);
7576 		/* Configure and enable interrupt */
7577 		intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
7578 		if (intr_mode == LPFC_INTR_ERROR) {
7579 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7580 					"0431 Failed to enable interrupt.\n");
7581 			error = -ENODEV;
7582 			goto out_free_sysfs_attr;
7583 		}
7584 		/* SLI-3 HBA setup */
7585 		if (lpfc_sli_hba_setup(phba)) {
7586 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7587 					"1477 Failed to set up hba\n");
7588 			error = -ENODEV;
7589 			goto out_remove_device;
7590 		}
7591 
7592 		/* Wait 50ms for the interrupts of previous mailbox commands */
7593 		msleep(50);
7594 		/* Check active interrupts on message signaled interrupts */
7595 		if (intr_mode == 0 ||
7596 		    phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
7597 			/* Log the current active interrupt mode */
7598 			phba->intr_mode = intr_mode;
7599 			lpfc_log_intr_mode(phba, intr_mode);
7600 			break;
7601 		} else {
7602 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7603 					"0447 Configure interrupt mode (%d) "
7604 					"failed active interrupt test.\n",
7605 					intr_mode);
7606 			/* Disable the current interrupt mode */
7607 			lpfc_sli_disable_intr(phba);
7608 			/* Try next level of interrupt mode */
7609 			cfg_mode = --intr_mode;
7610 		}
7611 	}
7612 
7613 	/* Perform post initialization setup */
7614 	lpfc_post_init_setup(phba);
7615 
7616 	/* Check if there are static vports to be created. */
7617 	lpfc_create_static_vport(phba);
7618 
7619 	return 0;
7620 
7621 out_remove_device:
7622 	lpfc_unset_hba(phba);
7623 out_free_sysfs_attr:
7624 	lpfc_free_sysfs_attr(vport);
7625 out_destroy_shost:
7626 	lpfc_destroy_shost(phba);
7627 out_unset_driver_resource:
7628 	lpfc_unset_driver_resource_phase2(phba);
7629 out_free_iocb_list:
7630 	lpfc_free_iocb_list(phba);
7631 out_unset_driver_resource_s3:
7632 	lpfc_sli_driver_resource_unset(phba);
7633 out_unset_pci_mem_s3:
7634 	lpfc_sli_pci_mem_unset(phba);
7635 out_disable_pci_dev:
7636 	lpfc_disable_pci_dev(phba);
7637 	if (shost)
7638 		scsi_host_put(shost);
7639 out_free_phba:
7640 	lpfc_hba_free(phba);
7641 	return error;
7642 }
7643 
7644 /**
7645  * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
7646  * @pdev: pointer to PCI device
7647  *
7648  * This routine is to be called to disattach a device with SLI-3 interface
7649  * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
7650  * removed from PCI bus, it performs all the necessary cleanup for the HBA
7651  * device to be removed from the PCI subsystem properly.
7652  **/
7653 static void __devexit
7654 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
7655 {
7656 	struct Scsi_Host  *shost = pci_get_drvdata(pdev);
7657 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7658 	struct lpfc_vport **vports;
7659 	struct lpfc_hba   *phba = vport->phba;
7660 	int i;
7661 	int bars = pci_select_bars(pdev, IORESOURCE_MEM);
7662 
7663 	spin_lock_irq(&phba->hbalock);
7664 	vport->load_flag |= FC_UNLOADING;
7665 	spin_unlock_irq(&phba->hbalock);
7666 
7667 	lpfc_free_sysfs_attr(vport);
7668 
7669 	/* Release all the vports against this physical port */
7670 	vports = lpfc_create_vport_work_array(phba);
7671 	if (vports != NULL)
7672 		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
7673 			fc_vport_terminate(vports[i]->fc_vport);
7674 	lpfc_destroy_vport_work_array(phba, vports);
7675 
7676 	/* Remove FC host and then SCSI host with the physical port */
7677 	fc_remove_host(shost);
7678 	scsi_remove_host(shost);
7679 	lpfc_cleanup(vport);
7680 
7681 	/*
7682 	 * Bring down the SLI Layer. This step disable all interrupts,
7683 	 * clears the rings, discards all mailbox commands, and resets
7684 	 * the HBA.
7685 	 */
7686 
7687 	/* HBA interrupt will be diabled after this call */
7688 	lpfc_sli_hba_down(phba);
7689 	/* Stop kthread signal shall trigger work_done one more time */
7690 	kthread_stop(phba->worker_thread);
7691 	/* Final cleanup of txcmplq and reset the HBA */
7692 	lpfc_sli_brdrestart(phba);
7693 
7694 	lpfc_stop_hba_timers(phba);
7695 	spin_lock_irq(&phba->hbalock);
7696 	list_del_init(&vport->listentry);
7697 	spin_unlock_irq(&phba->hbalock);
7698 
7699 	lpfc_debugfs_terminate(vport);
7700 
7701 	/* Disable interrupt */
7702 	lpfc_sli_disable_intr(phba);
7703 
7704 	pci_set_drvdata(pdev, NULL);
7705 	scsi_host_put(shost);
7706 
7707 	/*
7708 	 * Call scsi_free before mem_free since scsi bufs are released to their
7709 	 * corresponding pools here.
7710 	 */
7711 	lpfc_scsi_free(phba);
7712 	lpfc_mem_free_all(phba);
7713 
7714 	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
7715 			  phba->hbqslimp.virt, phba->hbqslimp.phys);
7716 
7717 	/* Free resources associated with SLI2 interface */
7718 	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7719 			  phba->slim2p.virt, phba->slim2p.phys);
7720 
7721 	/* unmap adapter SLIM and Control Registers */
7722 	iounmap(phba->ctrl_regs_memmap_p);
7723 	iounmap(phba->slim_memmap_p);
7724 
7725 	lpfc_hba_free(phba);
7726 
7727 	pci_release_selected_regions(pdev, bars);
7728 	pci_disable_device(pdev);
7729 }
7730 
7731 /**
7732  * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
7733  * @pdev: pointer to PCI device
7734  * @msg: power management message
7735  *
7736  * This routine is to be called from the kernel's PCI subsystem to support
7737  * system Power Management (PM) to device with SLI-3 interface spec. When
7738  * PM invokes this method, it quiesces the device by stopping the driver's
7739  * worker thread for the device, turning off device's interrupt and DMA,
7740  * and bring the device offline. Note that as the driver implements the
7741  * minimum PM requirements to a power-aware driver's PM support for the
7742  * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
7743  * to the suspend() method call will be treated as SUSPEND and the driver will
7744  * fully reinitialize its device during resume() method call, the driver will
7745  * set device to PCI_D3hot state in PCI config space instead of setting it
7746  * according to the @msg provided by the PM.
7747  *
7748  * Return code
7749  * 	0 - driver suspended the device
7750  * 	Error otherwise
7751  **/
7752 static int
7753 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
7754 {
7755 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7756 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7757 
7758 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7759 			"0473 PCI device Power Management suspend.\n");
7760 
7761 	/* Bring down the device */
7762 	lpfc_offline_prep(phba);
7763 	lpfc_offline(phba);
7764 	kthread_stop(phba->worker_thread);
7765 
7766 	/* Disable interrupt from device */
7767 	lpfc_sli_disable_intr(phba);
7768 
7769 	/* Save device state to PCI config space */
7770 	pci_save_state(pdev);
7771 	pci_set_power_state(pdev, PCI_D3hot);
7772 
7773 	return 0;
7774 }
7775 
7776 /**
7777  * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
7778  * @pdev: pointer to PCI device
7779  *
7780  * This routine is to be called from the kernel's PCI subsystem to support
7781  * system Power Management (PM) to device with SLI-3 interface spec. When PM
7782  * invokes this method, it restores the device's PCI config space state and
7783  * fully reinitializes the device and brings it online. Note that as the
7784  * driver implements the minimum PM requirements to a power-aware driver's
7785  * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
7786  * FREEZE) to the suspend() method call will be treated as SUSPEND and the
7787  * driver will fully reinitialize its device during resume() method call,
7788  * the device will be set to PCI_D0 directly in PCI config space before
7789  * restoring the state.
7790  *
7791  * Return code
7792  * 	0 - driver suspended the device
7793  * 	Error otherwise
7794  **/
7795 static int
7796 lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7797 {
7798 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7799 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7800 	uint32_t intr_mode;
7801 	int error;
7802 
7803 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7804 			"0452 PCI device Power Management resume.\n");
7805 
7806 	/* Restore device state from PCI config space */
7807 	pci_set_power_state(pdev, PCI_D0);
7808 	pci_restore_state(pdev);
7809 
7810 	/*
7811 	 * As the new kernel behavior of pci_restore_state() API call clears
7812 	 * device saved_state flag, need to save the restored state again.
7813 	 */
7814 	pci_save_state(pdev);
7815 
7816 	if (pdev->is_busmaster)
7817 		pci_set_master(pdev);
7818 
7819 	/* Startup the kernel thread for this host adapter. */
7820 	phba->worker_thread = kthread_run(lpfc_do_work, phba,
7821 					"lpfc_worker_%d", phba->brd_no);
7822 	if (IS_ERR(phba->worker_thread)) {
7823 		error = PTR_ERR(phba->worker_thread);
7824 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7825 				"0434 PM resume failed to start worker "
7826 				"thread: error=x%x.\n", error);
7827 		return error;
7828 	}
7829 
7830 	/* Configure and enable interrupt */
7831 	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
7832 	if (intr_mode == LPFC_INTR_ERROR) {
7833 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7834 				"0430 PM resume Failed to enable interrupt\n");
7835 		return -EIO;
7836 	} else
7837 		phba->intr_mode = intr_mode;
7838 
7839 	/* Restart HBA and bring it online */
7840 	lpfc_sli_brdrestart(phba);
7841 	lpfc_online(phba);
7842 
7843 	/* Log the current active interrupt mode */
7844 	lpfc_log_intr_mode(phba, phba->intr_mode);
7845 
7846 	return 0;
7847 }
7848 
7849 /**
7850  * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
7851  * @phba: pointer to lpfc hba data structure.
7852  *
7853  * This routine is called to prepare the SLI3 device for PCI slot recover. It
7854  * aborts all the outstanding SCSI I/Os to the pci device.
7855  **/
7856 static void
7857 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
7858 {
7859 	struct lpfc_sli *psli = &phba->sli;
7860 	struct lpfc_sli_ring  *pring;
7861 
7862 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7863 			"2723 PCI channel I/O abort preparing for recovery\n");
7864 
7865 	/*
7866 	 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
7867 	 * and let the SCSI mid-layer to retry them to recover.
7868 	 */
7869 	pring = &psli->ring[psli->fcp_ring];
7870 	lpfc_sli_abort_iocb_ring(phba, pring);
7871 }
7872 
7873 /**
7874  * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
7875  * @phba: pointer to lpfc hba data structure.
7876  *
7877  * This routine is called to prepare the SLI3 device for PCI slot reset. It
7878  * disables the device interrupt and pci device, and aborts the internal FCP
7879  * pending I/Os.
7880  **/
7881 static void
7882 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
7883 {
7884 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7885 			"2710 PCI channel disable preparing for reset\n");
7886 
7887 	/* Block any management I/Os to the device */
7888 	lpfc_block_mgmt_io(phba);
7889 
7890 	/* Block all SCSI devices' I/Os on the host */
7891 	lpfc_scsi_dev_block(phba);
7892 
7893 	/* stop all timers */
7894 	lpfc_stop_hba_timers(phba);
7895 
7896 	/* Disable interrupt and pci device */
7897 	lpfc_sli_disable_intr(phba);
7898 	pci_disable_device(phba->pcidev);
7899 
7900 	/* Flush all driver's outstanding SCSI I/Os as we are to reset */
7901 	lpfc_sli_flush_fcp_rings(phba);
7902 }
7903 
7904 /**
7905  * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
7906  * @phba: pointer to lpfc hba data structure.
7907  *
7908  * This routine is called to prepare the SLI3 device for PCI slot permanently
7909  * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
7910  * pending I/Os.
7911  **/
7912 static void
7913 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
7914 {
7915 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7916 			"2711 PCI channel permanent disable for failure\n");
7917 	/* Block all SCSI devices' I/Os on the host */
7918 	lpfc_scsi_dev_block(phba);
7919 
7920 	/* stop all timers */
7921 	lpfc_stop_hba_timers(phba);
7922 
7923 	/* Clean up all driver's outstanding SCSI I/Os */
7924 	lpfc_sli_flush_fcp_rings(phba);
7925 }
7926 
7927 /**
7928  * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
7929  * @pdev: pointer to PCI device.
7930  * @state: the current PCI connection state.
7931  *
7932  * This routine is called from the PCI subsystem for I/O error handling to
7933  * device with SLI-3 interface spec. This function is called by the PCI
7934  * subsystem after a PCI bus error affecting this device has been detected.
7935  * When this function is invoked, it will need to stop all the I/Os and
7936  * interrupt(s) to the device. Once that is done, it will return
7937  * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
7938  * as desired.
7939  *
7940  * Return codes
7941  * 	PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
7942  * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7943  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7944  **/
7945 static pci_ers_result_t
7946 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
7947 {
7948 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7949 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7950 
7951 	switch (state) {
7952 	case pci_channel_io_normal:
7953 		/* Non-fatal error, prepare for recovery */
7954 		lpfc_sli_prep_dev_for_recover(phba);
7955 		return PCI_ERS_RESULT_CAN_RECOVER;
7956 	case pci_channel_io_frozen:
7957 		/* Fatal error, prepare for slot reset */
7958 		lpfc_sli_prep_dev_for_reset(phba);
7959 		return PCI_ERS_RESULT_NEED_RESET;
7960 	case pci_channel_io_perm_failure:
7961 		/* Permanent failure, prepare for device down */
7962 		lpfc_sli_prep_dev_for_perm_failure(phba);
7963 		return PCI_ERS_RESULT_DISCONNECT;
7964 	default:
7965 		/* Unknown state, prepare and request slot reset */
7966 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7967 				"0472 Unknown PCI error state: x%x\n", state);
7968 		lpfc_sli_prep_dev_for_reset(phba);
7969 		return PCI_ERS_RESULT_NEED_RESET;
7970 	}
7971 }
7972 
7973 /**
7974  * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
7975  * @pdev: pointer to PCI device.
7976  *
7977  * This routine is called from the PCI subsystem for error handling to
7978  * device with SLI-3 interface spec. This is called after PCI bus has been
7979  * reset to restart the PCI card from scratch, as if from a cold-boot.
7980  * During the PCI subsystem error recovery, after driver returns
7981  * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
7982  * recovery and then call this routine before calling the .resume method
7983  * to recover the device. This function will initialize the HBA device,
7984  * enable the interrupt, but it will just put the HBA to offline state
7985  * without passing any I/O traffic.
7986  *
7987  * Return codes
7988  * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
7989  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7990  */
7991 static pci_ers_result_t
7992 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
7993 {
7994 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7995 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7996 	struct lpfc_sli *psli = &phba->sli;
7997 	uint32_t intr_mode;
7998 
7999 	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
8000 	if (pci_enable_device_mem(pdev)) {
8001 		printk(KERN_ERR "lpfc: Cannot re-enable "
8002 			"PCI device after reset.\n");
8003 		return PCI_ERS_RESULT_DISCONNECT;
8004 	}
8005 
8006 	pci_restore_state(pdev);
8007 
8008 	/*
8009 	 * As the new kernel behavior of pci_restore_state() API call clears
8010 	 * device saved_state flag, need to save the restored state again.
8011 	 */
8012 	pci_save_state(pdev);
8013 
8014 	if (pdev->is_busmaster)
8015 		pci_set_master(pdev);
8016 
8017 	spin_lock_irq(&phba->hbalock);
8018 	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8019 	spin_unlock_irq(&phba->hbalock);
8020 
8021 	/* Configure and enable interrupt */
8022 	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
8023 	if (intr_mode == LPFC_INTR_ERROR) {
8024 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8025 				"0427 Cannot re-enable interrupt after "
8026 				"slot reset.\n");
8027 		return PCI_ERS_RESULT_DISCONNECT;
8028 	} else
8029 		phba->intr_mode = intr_mode;
8030 
8031 	/* Take device offline, it will perform cleanup */
8032 	lpfc_offline_prep(phba);
8033 	lpfc_offline(phba);
8034 	lpfc_sli_brdrestart(phba);
8035 
8036 	/* Log the current active interrupt mode */
8037 	lpfc_log_intr_mode(phba, phba->intr_mode);
8038 
8039 	return PCI_ERS_RESULT_RECOVERED;
8040 }
8041 
8042 /**
8043  * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
8044  * @pdev: pointer to PCI device
8045  *
8046  * This routine is called from the PCI subsystem for error handling to device
8047  * with SLI-3 interface spec. It is called when kernel error recovery tells
8048  * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
8049  * error recovery. After this call, traffic can start to flow from this device
8050  * again.
8051  */
8052 static void
8053 lpfc_io_resume_s3(struct pci_dev *pdev)
8054 {
8055 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8056 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8057 
8058 	/* Bring device online, it will be no-op for non-fatal error resume */
8059 	lpfc_online(phba);
8060 
8061 	/* Clean up Advanced Error Reporting (AER) if needed */
8062 	if (phba->hba_flag & HBA_AER_ENABLED)
8063 		pci_cleanup_aer_uncorrect_error_status(pdev);
8064 }
8065 
8066 /**
8067  * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
8068  * @phba: pointer to lpfc hba data structure.
8069  *
8070  * returns the number of ELS/CT IOCBs to reserve
8071  **/
8072 int
8073 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
8074 {
8075 	int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
8076 
8077 	if (phba->sli_rev == LPFC_SLI_REV4) {
8078 		if (max_xri <= 100)
8079 			return 10;
8080 		else if (max_xri <= 256)
8081 			return 25;
8082 		else if (max_xri <= 512)
8083 			return 50;
8084 		else if (max_xri <= 1024)
8085 			return 100;
8086 		else
8087 			return 150;
8088 	} else
8089 		return 0;
8090 }
8091 
8092 /**
8093  * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
8094  * @pdev: pointer to PCI device
8095  * @pid: pointer to PCI device identifier
8096  *
8097  * This routine is called from the kernel's PCI subsystem to device with
8098  * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
8099  * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
8100  * information of the device and driver to see if the driver state that it
8101  * can support this kind of device. If the match is successful, the driver
8102  * core invokes this routine. If this routine determines it can claim the HBA,
8103  * it does all the initialization that it needs to do to handle the HBA
8104  * properly.
8105  *
8106  * Return code
8107  * 	0 - driver can claim the device
8108  * 	negative value - driver can not claim the device
8109  **/
8110 static int __devinit
8111 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8112 {
8113 	struct lpfc_hba   *phba;
8114 	struct lpfc_vport *vport = NULL;
8115 	struct Scsi_Host  *shost = NULL;
8116 	int error;
8117 	uint32_t cfg_mode, intr_mode;
8118 	int mcnt;
8119 
8120 	/* Allocate memory for HBA structure */
8121 	phba = lpfc_hba_alloc(pdev);
8122 	if (!phba)
8123 		return -ENOMEM;
8124 
8125 	/* Perform generic PCI device enabling operation */
8126 	error = lpfc_enable_pci_dev(phba);
8127 	if (error) {
8128 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8129 				"1409 Failed to enable pci device.\n");
8130 		goto out_free_phba;
8131 	}
8132 
8133 	/* Set up SLI API function jump table for PCI-device group-1 HBAs */
8134 	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
8135 	if (error)
8136 		goto out_disable_pci_dev;
8137 
8138 	/* Set up SLI-4 specific device PCI memory space */
8139 	error = lpfc_sli4_pci_mem_setup(phba);
8140 	if (error) {
8141 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8142 				"1410 Failed to set up pci memory space.\n");
8143 		goto out_disable_pci_dev;
8144 	}
8145 
8146 	/* Set up phase-1 common device driver resources */
8147 	error = lpfc_setup_driver_resource_phase1(phba);
8148 	if (error) {
8149 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8150 				"1411 Failed to set up driver resource.\n");
8151 		goto out_unset_pci_mem_s4;
8152 	}
8153 
8154 	/* Set up SLI-4 Specific device driver resources */
8155 	error = lpfc_sli4_driver_resource_setup(phba);
8156 	if (error) {
8157 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8158 				"1412 Failed to set up driver resource.\n");
8159 		goto out_unset_pci_mem_s4;
8160 	}
8161 
8162 	/* Initialize and populate the iocb list per host */
8163 
8164 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8165 			"2821 initialize iocb list %d.\n",
8166 			phba->cfg_iocb_cnt*1024);
8167 	error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024);
8168 
8169 	if (error) {
8170 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8171 				"1413 Failed to initialize iocb list.\n");
8172 		goto out_unset_driver_resource_s4;
8173 	}
8174 
8175 	/* Set up common device driver resources */
8176 	error = lpfc_setup_driver_resource_phase2(phba);
8177 	if (error) {
8178 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8179 				"1414 Failed to set up driver resource.\n");
8180 		goto out_free_iocb_list;
8181 	}
8182 
8183 	/* Create SCSI host to the physical port */
8184 	error = lpfc_create_shost(phba);
8185 	if (error) {
8186 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8187 				"1415 Failed to create scsi host.\n");
8188 		goto out_unset_driver_resource;
8189 	}
8190 
8191 	/* Configure sysfs attributes */
8192 	vport = phba->pport;
8193 	error = lpfc_alloc_sysfs_attr(vport);
8194 	if (error) {
8195 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8196 				"1416 Failed to allocate sysfs attr\n");
8197 		goto out_destroy_shost;
8198 	}
8199 
8200 	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
8201 	/* Now, trying to enable interrupt and bring up the device */
8202 	cfg_mode = phba->cfg_use_msi;
8203 	while (true) {
8204 		/* Put device to a known state before enabling interrupt */
8205 		lpfc_stop_port(phba);
8206 		/* Configure and enable interrupt */
8207 		intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
8208 		if (intr_mode == LPFC_INTR_ERROR) {
8209 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8210 					"0426 Failed to enable interrupt.\n");
8211 			error = -ENODEV;
8212 			goto out_free_sysfs_attr;
8213 		}
8214 		/* Default to single FCP EQ for non-MSI-X */
8215 		if (phba->intr_type != MSIX)
8216 			phba->cfg_fcp_eq_count = 1;
8217 		else if (phba->sli4_hba.msix_vec_nr < phba->cfg_fcp_eq_count)
8218 			phba->cfg_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
8219 		/* Set up SLI-4 HBA */
8220 		if (lpfc_sli4_hba_setup(phba)) {
8221 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8222 					"1421 Failed to set up hba\n");
8223 			error = -ENODEV;
8224 			goto out_disable_intr;
8225 		}
8226 
8227 		/* Send NOP mbx cmds for non-INTx mode active interrupt test */
8228 		if (intr_mode != 0)
8229 			mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
8230 							    LPFC_ACT_INTR_CNT);
8231 
8232 		/* Check active interrupts received only for MSI/MSI-X */
8233 		if (intr_mode == 0 ||
8234 		    phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
8235 			/* Log the current active interrupt mode */
8236 			phba->intr_mode = intr_mode;
8237 			lpfc_log_intr_mode(phba, intr_mode);
8238 			break;
8239 		}
8240 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8241 				"0451 Configure interrupt mode (%d) "
8242 				"failed active interrupt test.\n",
8243 				intr_mode);
8244 		/* Unset the preivous SLI-4 HBA setup */
8245 		lpfc_sli4_unset_hba(phba);
8246 		/* Try next level of interrupt mode */
8247 		cfg_mode = --intr_mode;
8248 	}
8249 
8250 	/* Perform post initialization setup */
8251 	lpfc_post_init_setup(phba);
8252 
8253 	/* Check if there are static vports to be created. */
8254 	lpfc_create_static_vport(phba);
8255 
8256 	return 0;
8257 
8258 out_disable_intr:
8259 	lpfc_sli4_disable_intr(phba);
8260 out_free_sysfs_attr:
8261 	lpfc_free_sysfs_attr(vport);
8262 out_destroy_shost:
8263 	lpfc_destroy_shost(phba);
8264 out_unset_driver_resource:
8265 	lpfc_unset_driver_resource_phase2(phba);
8266 out_free_iocb_list:
8267 	lpfc_free_iocb_list(phba);
8268 out_unset_driver_resource_s4:
8269 	lpfc_sli4_driver_resource_unset(phba);
8270 out_unset_pci_mem_s4:
8271 	lpfc_sli4_pci_mem_unset(phba);
8272 out_disable_pci_dev:
8273 	lpfc_disable_pci_dev(phba);
8274 	if (shost)
8275 		scsi_host_put(shost);
8276 out_free_phba:
8277 	lpfc_hba_free(phba);
8278 	return error;
8279 }
8280 
8281 /**
8282  * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
8283  * @pdev: pointer to PCI device
8284  *
8285  * This routine is called from the kernel's PCI subsystem to device with
8286  * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
8287  * removed from PCI bus, it performs all the necessary cleanup for the HBA
8288  * device to be removed from the PCI subsystem properly.
8289  **/
8290 static void __devexit
8291 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
8292 {
8293 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8294 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
8295 	struct lpfc_vport **vports;
8296 	struct lpfc_hba *phba = vport->phba;
8297 	int i;
8298 
8299 	/* Mark the device unloading flag */
8300 	spin_lock_irq(&phba->hbalock);
8301 	vport->load_flag |= FC_UNLOADING;
8302 	spin_unlock_irq(&phba->hbalock);
8303 
8304 	/* Free the HBA sysfs attributes */
8305 	lpfc_free_sysfs_attr(vport);
8306 
8307 	/* Release all the vports against this physical port */
8308 	vports = lpfc_create_vport_work_array(phba);
8309 	if (vports != NULL)
8310 		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
8311 			fc_vport_terminate(vports[i]->fc_vport);
8312 	lpfc_destroy_vport_work_array(phba, vports);
8313 
8314 	/* Remove FC host and then SCSI host with the physical port */
8315 	fc_remove_host(shost);
8316 	scsi_remove_host(shost);
8317 
8318 	/* Perform cleanup on the physical port */
8319 	lpfc_cleanup(vport);
8320 
8321 	/*
8322 	 * Bring down the SLI Layer. This step disables all interrupts,
8323 	 * clears the rings, discards all mailbox commands, and resets
8324 	 * the HBA FCoE function.
8325 	 */
8326 	lpfc_debugfs_terminate(vport);
8327 	lpfc_sli4_hba_unset(phba);
8328 
8329 	spin_lock_irq(&phba->hbalock);
8330 	list_del_init(&vport->listentry);
8331 	spin_unlock_irq(&phba->hbalock);
8332 
8333 	/* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi
8334 	 * buffers are released to their corresponding pools here.
8335 	 */
8336 	lpfc_scsi_free(phba);
8337 	lpfc_sli4_driver_resource_unset(phba);
8338 
8339 	/* Unmap adapter Control and Doorbell registers */
8340 	lpfc_sli4_pci_mem_unset(phba);
8341 
8342 	/* Release PCI resources and disable device's PCI function */
8343 	scsi_host_put(shost);
8344 	lpfc_disable_pci_dev(phba);
8345 
8346 	/* Finally, free the driver's device data structure */
8347 	lpfc_hba_free(phba);
8348 
8349 	return;
8350 }
8351 
8352 /**
8353  * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
8354  * @pdev: pointer to PCI device
8355  * @msg: power management message
8356  *
8357  * This routine is called from the kernel's PCI subsystem to support system
8358  * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
8359  * this method, it quiesces the device by stopping the driver's worker
8360  * thread for the device, turning off device's interrupt and DMA, and bring
8361  * the device offline. Note that as the driver implements the minimum PM
8362  * requirements to a power-aware driver's PM support for suspend/resume -- all
8363  * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
8364  * method call will be treated as SUSPEND and the driver will fully
8365  * reinitialize its device during resume() method call, the driver will set
8366  * device to PCI_D3hot state in PCI config space instead of setting it
8367  * according to the @msg provided by the PM.
8368  *
8369  * Return code
8370  * 	0 - driver suspended the device
8371  * 	Error otherwise
8372  **/
8373 static int
8374 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
8375 {
8376 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8377 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8378 
8379 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8380 			"2843 PCI device Power Management suspend.\n");
8381 
8382 	/* Bring down the device */
8383 	lpfc_offline_prep(phba);
8384 	lpfc_offline(phba);
8385 	kthread_stop(phba->worker_thread);
8386 
8387 	/* Disable interrupt from device */
8388 	lpfc_sli4_disable_intr(phba);
8389 
8390 	/* Save device state to PCI config space */
8391 	pci_save_state(pdev);
8392 	pci_set_power_state(pdev, PCI_D3hot);
8393 
8394 	return 0;
8395 }
8396 
8397 /**
8398  * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
8399  * @pdev: pointer to PCI device
8400  *
8401  * This routine is called from the kernel's PCI subsystem to support system
8402  * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
8403  * this method, it restores the device's PCI config space state and fully
8404  * reinitializes the device and brings it online. Note that as the driver
8405  * implements the minimum PM requirements to a power-aware driver's PM for
8406  * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
8407  * to the suspend() method call will be treated as SUSPEND and the driver
8408  * will fully reinitialize its device during resume() method call, the device
8409  * will be set to PCI_D0 directly in PCI config space before restoring the
8410  * state.
8411  *
8412  * Return code
8413  * 	0 - driver suspended the device
8414  * 	Error otherwise
8415  **/
8416 static int
8417 lpfc_pci_resume_one_s4(struct pci_dev *pdev)
8418 {
8419 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8420 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8421 	uint32_t intr_mode;
8422 	int error;
8423 
8424 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8425 			"0292 PCI device Power Management resume.\n");
8426 
8427 	/* Restore device state from PCI config space */
8428 	pci_set_power_state(pdev, PCI_D0);
8429 	pci_restore_state(pdev);
8430 
8431 	/*
8432 	 * As the new kernel behavior of pci_restore_state() API call clears
8433 	 * device saved_state flag, need to save the restored state again.
8434 	 */
8435 	pci_save_state(pdev);
8436 
8437 	if (pdev->is_busmaster)
8438 		pci_set_master(pdev);
8439 
8440 	 /* Startup the kernel thread for this host adapter. */
8441 	phba->worker_thread = kthread_run(lpfc_do_work, phba,
8442 					"lpfc_worker_%d", phba->brd_no);
8443 	if (IS_ERR(phba->worker_thread)) {
8444 		error = PTR_ERR(phba->worker_thread);
8445 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8446 				"0293 PM resume failed to start worker "
8447 				"thread: error=x%x.\n", error);
8448 		return error;
8449 	}
8450 
8451 	/* Configure and enable interrupt */
8452 	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
8453 	if (intr_mode == LPFC_INTR_ERROR) {
8454 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8455 				"0294 PM resume Failed to enable interrupt\n");
8456 		return -EIO;
8457 	} else
8458 		phba->intr_mode = intr_mode;
8459 
8460 	/* Restart HBA and bring it online */
8461 	lpfc_sli_brdrestart(phba);
8462 	lpfc_online(phba);
8463 
8464 	/* Log the current active interrupt mode */
8465 	lpfc_log_intr_mode(phba, phba->intr_mode);
8466 
8467 	return 0;
8468 }
8469 
8470 /**
8471  * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
8472  * @phba: pointer to lpfc hba data structure.
8473  *
8474  * This routine is called to prepare the SLI4 device for PCI slot recover. It
8475  * aborts all the outstanding SCSI I/Os to the pci device.
8476  **/
8477 static void
8478 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
8479 {
8480 	struct lpfc_sli *psli = &phba->sli;
8481 	struct lpfc_sli_ring  *pring;
8482 
8483 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8484 			"2828 PCI channel I/O abort preparing for recovery\n");
8485 	/*
8486 	 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
8487 	 * and let the SCSI mid-layer to retry them to recover.
8488 	 */
8489 	pring = &psli->ring[psli->fcp_ring];
8490 	lpfc_sli_abort_iocb_ring(phba, pring);
8491 }
8492 
8493 /**
8494  * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
8495  * @phba: pointer to lpfc hba data structure.
8496  *
8497  * This routine is called to prepare the SLI4 device for PCI slot reset. It
8498  * disables the device interrupt and pci device, and aborts the internal FCP
8499  * pending I/Os.
8500  **/
8501 static void
8502 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
8503 {
8504 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8505 			"2826 PCI channel disable preparing for reset\n");
8506 
8507 	/* Block any management I/Os to the device */
8508 	lpfc_block_mgmt_io(phba);
8509 
8510 	/* Block all SCSI devices' I/Os on the host */
8511 	lpfc_scsi_dev_block(phba);
8512 
8513 	/* stop all timers */
8514 	lpfc_stop_hba_timers(phba);
8515 
8516 	/* Disable interrupt and pci device */
8517 	lpfc_sli4_disable_intr(phba);
8518 	pci_disable_device(phba->pcidev);
8519 
8520 	/* Flush all driver's outstanding SCSI I/Os as we are to reset */
8521 	lpfc_sli_flush_fcp_rings(phba);
8522 }
8523 
8524 /**
8525  * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
8526  * @phba: pointer to lpfc hba data structure.
8527  *
8528  * This routine is called to prepare the SLI4 device for PCI slot permanently
8529  * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
8530  * pending I/Os.
8531  **/
8532 static void
8533 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
8534 {
8535 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8536 			"2827 PCI channel permanent disable for failure\n");
8537 
8538 	/* Block all SCSI devices' I/Os on the host */
8539 	lpfc_scsi_dev_block(phba);
8540 
8541 	/* stop all timers */
8542 	lpfc_stop_hba_timers(phba);
8543 
8544 	/* Clean up all driver's outstanding SCSI I/Os */
8545 	lpfc_sli_flush_fcp_rings(phba);
8546 }
8547 
8548 /**
8549  * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
8550  * @pdev: pointer to PCI device.
8551  * @state: the current PCI connection state.
8552  *
8553  * This routine is called from the PCI subsystem for error handling to device
8554  * with SLI-4 interface spec. This function is called by the PCI subsystem
8555  * after a PCI bus error affecting this device has been detected. When this
8556  * function is invoked, it will need to stop all the I/Os and interrupt(s)
8557  * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
8558  * for the PCI subsystem to perform proper recovery as desired.
8559  *
8560  * Return codes
8561  * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
8562  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8563  **/
8564 static pci_ers_result_t
8565 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
8566 {
8567 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8568 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8569 
8570 	switch (state) {
8571 	case pci_channel_io_normal:
8572 		/* Non-fatal error, prepare for recovery */
8573 		lpfc_sli4_prep_dev_for_recover(phba);
8574 		return PCI_ERS_RESULT_CAN_RECOVER;
8575 	case pci_channel_io_frozen:
8576 		/* Fatal error, prepare for slot reset */
8577 		lpfc_sli4_prep_dev_for_reset(phba);
8578 		return PCI_ERS_RESULT_NEED_RESET;
8579 	case pci_channel_io_perm_failure:
8580 		/* Permanent failure, prepare for device down */
8581 		lpfc_sli4_prep_dev_for_perm_failure(phba);
8582 		return PCI_ERS_RESULT_DISCONNECT;
8583 	default:
8584 		/* Unknown state, prepare and request slot reset */
8585 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8586 				"2825 Unknown PCI error state: x%x\n", state);
8587 		lpfc_sli4_prep_dev_for_reset(phba);
8588 		return PCI_ERS_RESULT_NEED_RESET;
8589 	}
8590 }
8591 
8592 /**
8593  * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
8594  * @pdev: pointer to PCI device.
8595  *
8596  * This routine is called from the PCI subsystem for error handling to device
8597  * with SLI-4 interface spec. It is called after PCI bus has been reset to
8598  * restart the PCI card from scratch, as if from a cold-boot. During the
8599  * PCI subsystem error recovery, after the driver returns
8600  * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
8601  * recovery and then call this routine before calling the .resume method to
8602  * recover the device. This function will initialize the HBA device, enable
8603  * the interrupt, but it will just put the HBA to offline state without
8604  * passing any I/O traffic.
8605  *
8606  * Return codes
8607  * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
8608  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8609  */
8610 static pci_ers_result_t
8611 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
8612 {
8613 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8614 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8615 	struct lpfc_sli *psli = &phba->sli;
8616 	uint32_t intr_mode;
8617 
8618 	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
8619 	if (pci_enable_device_mem(pdev)) {
8620 		printk(KERN_ERR "lpfc: Cannot re-enable "
8621 			"PCI device after reset.\n");
8622 		return PCI_ERS_RESULT_DISCONNECT;
8623 	}
8624 
8625 	pci_restore_state(pdev);
8626 	if (pdev->is_busmaster)
8627 		pci_set_master(pdev);
8628 
8629 	spin_lock_irq(&phba->hbalock);
8630 	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8631 	spin_unlock_irq(&phba->hbalock);
8632 
8633 	/* Configure and enable interrupt */
8634 	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
8635 	if (intr_mode == LPFC_INTR_ERROR) {
8636 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8637 				"2824 Cannot re-enable interrupt after "
8638 				"slot reset.\n");
8639 		return PCI_ERS_RESULT_DISCONNECT;
8640 	} else
8641 		phba->intr_mode = intr_mode;
8642 
8643 	/* Log the current active interrupt mode */
8644 	lpfc_log_intr_mode(phba, phba->intr_mode);
8645 
8646 	return PCI_ERS_RESULT_RECOVERED;
8647 }
8648 
8649 /**
8650  * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
8651  * @pdev: pointer to PCI device
8652  *
8653  * This routine is called from the PCI subsystem for error handling to device
8654  * with SLI-4 interface spec. It is called when kernel error recovery tells
8655  * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
8656  * error recovery. After this call, traffic can start to flow from this device
8657  * again.
8658  **/
8659 static void
8660 lpfc_io_resume_s4(struct pci_dev *pdev)
8661 {
8662 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8663 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8664 
8665 	/*
8666 	 * In case of slot reset, as function reset is performed through
8667 	 * mailbox command which needs DMA to be enabled, this operation
8668 	 * has to be moved to the io resume phase. Taking device offline
8669 	 * will perform the necessary cleanup.
8670 	 */
8671 	if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
8672 		/* Perform device reset */
8673 		lpfc_offline_prep(phba);
8674 		lpfc_offline(phba);
8675 		lpfc_sli_brdrestart(phba);
8676 		/* Bring the device back online */
8677 		lpfc_online(phba);
8678 	}
8679 
8680 	/* Clean up Advanced Error Reporting (AER) if needed */
8681 	if (phba->hba_flag & HBA_AER_ENABLED)
8682 		pci_cleanup_aer_uncorrect_error_status(pdev);
8683 }
8684 
8685 /**
8686  * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
8687  * @pdev: pointer to PCI device
8688  * @pid: pointer to PCI device identifier
8689  *
8690  * This routine is to be registered to the kernel's PCI subsystem. When an
8691  * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
8692  * at PCI device-specific information of the device and driver to see if the
8693  * driver state that it can support this kind of device. If the match is
8694  * successful, the driver core invokes this routine. This routine dispatches
8695  * the action to the proper SLI-3 or SLI-4 device probing routine, which will
8696  * do all the initialization that it needs to do to handle the HBA device
8697  * properly.
8698  *
8699  * Return code
8700  * 	0 - driver can claim the device
8701  * 	negative value - driver can not claim the device
8702  **/
8703 static int __devinit
8704 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
8705 {
8706 	int rc;
8707 	struct lpfc_sli_intf intf;
8708 
8709 	if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
8710 		return -ENODEV;
8711 
8712 	if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
8713 	    (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
8714 		rc = lpfc_pci_probe_one_s4(pdev, pid);
8715 	else
8716 		rc = lpfc_pci_probe_one_s3(pdev, pid);
8717 
8718 	return rc;
8719 }
8720 
8721 /**
8722  * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
8723  * @pdev: pointer to PCI device
8724  *
8725  * This routine is to be registered to the kernel's PCI subsystem. When an
8726  * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
8727  * This routine dispatches the action to the proper SLI-3 or SLI-4 device
8728  * remove routine, which will perform all the necessary cleanup for the
8729  * device to be removed from the PCI subsystem properly.
8730  **/
8731 static void __devexit
8732 lpfc_pci_remove_one(struct pci_dev *pdev)
8733 {
8734 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8735 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8736 
8737 	switch (phba->pci_dev_grp) {
8738 	case LPFC_PCI_DEV_LP:
8739 		lpfc_pci_remove_one_s3(pdev);
8740 		break;
8741 	case LPFC_PCI_DEV_OC:
8742 		lpfc_pci_remove_one_s4(pdev);
8743 		break;
8744 	default:
8745 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8746 				"1424 Invalid PCI device group: 0x%x\n",
8747 				phba->pci_dev_grp);
8748 		break;
8749 	}
8750 	return;
8751 }
8752 
8753 /**
8754  * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
8755  * @pdev: pointer to PCI device
8756  * @msg: power management message
8757  *
8758  * This routine is to be registered to the kernel's PCI subsystem to support
8759  * system Power Management (PM). When PM invokes this method, it dispatches
8760  * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
8761  * suspend the device.
8762  *
8763  * Return code
8764  * 	0 - driver suspended the device
8765  * 	Error otherwise
8766  **/
8767 static int
8768 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
8769 {
8770 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8771 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8772 	int rc = -ENODEV;
8773 
8774 	switch (phba->pci_dev_grp) {
8775 	case LPFC_PCI_DEV_LP:
8776 		rc = lpfc_pci_suspend_one_s3(pdev, msg);
8777 		break;
8778 	case LPFC_PCI_DEV_OC:
8779 		rc = lpfc_pci_suspend_one_s4(pdev, msg);
8780 		break;
8781 	default:
8782 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8783 				"1425 Invalid PCI device group: 0x%x\n",
8784 				phba->pci_dev_grp);
8785 		break;
8786 	}
8787 	return rc;
8788 }
8789 
8790 /**
8791  * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
8792  * @pdev: pointer to PCI device
8793  *
8794  * This routine is to be registered to the kernel's PCI subsystem to support
8795  * system Power Management (PM). When PM invokes this method, it dispatches
8796  * the action to the proper SLI-3 or SLI-4 device resume routine, which will
8797  * resume the device.
8798  *
8799  * Return code
8800  * 	0 - driver suspended the device
8801  * 	Error otherwise
8802  **/
8803 static int
8804 lpfc_pci_resume_one(struct pci_dev *pdev)
8805 {
8806 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8807 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8808 	int rc = -ENODEV;
8809 
8810 	switch (phba->pci_dev_grp) {
8811 	case LPFC_PCI_DEV_LP:
8812 		rc = lpfc_pci_resume_one_s3(pdev);
8813 		break;
8814 	case LPFC_PCI_DEV_OC:
8815 		rc = lpfc_pci_resume_one_s4(pdev);
8816 		break;
8817 	default:
8818 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8819 				"1426 Invalid PCI device group: 0x%x\n",
8820 				phba->pci_dev_grp);
8821 		break;
8822 	}
8823 	return rc;
8824 }
8825 
8826 /**
8827  * lpfc_io_error_detected - lpfc method for handling PCI I/O error
8828  * @pdev: pointer to PCI device.
8829  * @state: the current PCI connection state.
8830  *
8831  * This routine is registered to the PCI subsystem for error handling. This
8832  * function is called by the PCI subsystem after a PCI bus error affecting
8833  * this device has been detected. When this routine is invoked, it dispatches
8834  * the action to the proper SLI-3 or SLI-4 device error detected handling
8835  * routine, which will perform the proper error detected operation.
8836  *
8837  * Return codes
8838  * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
8839  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8840  **/
8841 static pci_ers_result_t
8842 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
8843 {
8844 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8845 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8846 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
8847 
8848 	switch (phba->pci_dev_grp) {
8849 	case LPFC_PCI_DEV_LP:
8850 		rc = lpfc_io_error_detected_s3(pdev, state);
8851 		break;
8852 	case LPFC_PCI_DEV_OC:
8853 		rc = lpfc_io_error_detected_s4(pdev, state);
8854 		break;
8855 	default:
8856 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8857 				"1427 Invalid PCI device group: 0x%x\n",
8858 				phba->pci_dev_grp);
8859 		break;
8860 	}
8861 	return rc;
8862 }
8863 
8864 /**
8865  * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
8866  * @pdev: pointer to PCI device.
8867  *
8868  * This routine is registered to the PCI subsystem for error handling. This
8869  * function is called after PCI bus has been reset to restart the PCI card
8870  * from scratch, as if from a cold-boot. When this routine is invoked, it
8871  * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
8872  * routine, which will perform the proper device reset.
8873  *
8874  * Return codes
8875  * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
8876  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8877  **/
8878 static pci_ers_result_t
8879 lpfc_io_slot_reset(struct pci_dev *pdev)
8880 {
8881 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8882 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8883 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
8884 
8885 	switch (phba->pci_dev_grp) {
8886 	case LPFC_PCI_DEV_LP:
8887 		rc = lpfc_io_slot_reset_s3(pdev);
8888 		break;
8889 	case LPFC_PCI_DEV_OC:
8890 		rc = lpfc_io_slot_reset_s4(pdev);
8891 		break;
8892 	default:
8893 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8894 				"1428 Invalid PCI device group: 0x%x\n",
8895 				phba->pci_dev_grp);
8896 		break;
8897 	}
8898 	return rc;
8899 }
8900 
8901 /**
8902  * lpfc_io_resume - lpfc method for resuming PCI I/O operation
8903  * @pdev: pointer to PCI device
8904  *
8905  * This routine is registered to the PCI subsystem for error handling. It
8906  * is called when kernel error recovery tells the lpfc driver that it is
8907  * OK to resume normal PCI operation after PCI bus error recovery. When
8908  * this routine is invoked, it dispatches the action to the proper SLI-3
8909  * or SLI-4 device io_resume routine, which will resume the device operation.
8910  **/
8911 static void
8912 lpfc_io_resume(struct pci_dev *pdev)
8913 {
8914 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8915 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8916 
8917 	switch (phba->pci_dev_grp) {
8918 	case LPFC_PCI_DEV_LP:
8919 		lpfc_io_resume_s3(pdev);
8920 		break;
8921 	case LPFC_PCI_DEV_OC:
8922 		lpfc_io_resume_s4(pdev);
8923 		break;
8924 	default:
8925 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8926 				"1429 Invalid PCI device group: 0x%x\n",
8927 				phba->pci_dev_grp);
8928 		break;
8929 	}
8930 	return;
8931 }
8932 
8933 static struct pci_device_id lpfc_id_table[] = {
8934 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
8935 		PCI_ANY_ID, PCI_ANY_ID, },
8936 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
8937 		PCI_ANY_ID, PCI_ANY_ID, },
8938 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
8939 		PCI_ANY_ID, PCI_ANY_ID, },
8940 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
8941 		PCI_ANY_ID, PCI_ANY_ID, },
8942 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
8943 		PCI_ANY_ID, PCI_ANY_ID, },
8944 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
8945 		PCI_ANY_ID, PCI_ANY_ID, },
8946 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
8947 		PCI_ANY_ID, PCI_ANY_ID, },
8948 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
8949 		PCI_ANY_ID, PCI_ANY_ID, },
8950 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
8951 		PCI_ANY_ID, PCI_ANY_ID, },
8952 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
8953 		PCI_ANY_ID, PCI_ANY_ID, },
8954 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
8955 		PCI_ANY_ID, PCI_ANY_ID, },
8956 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
8957 		PCI_ANY_ID, PCI_ANY_ID, },
8958 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
8959 		PCI_ANY_ID, PCI_ANY_ID, },
8960 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
8961 		PCI_ANY_ID, PCI_ANY_ID, },
8962 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
8963 		PCI_ANY_ID, PCI_ANY_ID, },
8964 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
8965 		PCI_ANY_ID, PCI_ANY_ID, },
8966 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
8967 		PCI_ANY_ID, PCI_ANY_ID, },
8968 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
8969 		PCI_ANY_ID, PCI_ANY_ID, },
8970 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
8971 		PCI_ANY_ID, PCI_ANY_ID, },
8972 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
8973 		PCI_ANY_ID, PCI_ANY_ID, },
8974 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
8975 		PCI_ANY_ID, PCI_ANY_ID, },
8976 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
8977 		PCI_ANY_ID, PCI_ANY_ID, },
8978 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
8979 		PCI_ANY_ID, PCI_ANY_ID, },
8980 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
8981 		PCI_ANY_ID, PCI_ANY_ID, },
8982 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
8983 		PCI_ANY_ID, PCI_ANY_ID, },
8984 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
8985 		PCI_ANY_ID, PCI_ANY_ID, },
8986 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
8987 		PCI_ANY_ID, PCI_ANY_ID, },
8988 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
8989 		PCI_ANY_ID, PCI_ANY_ID, },
8990 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
8991 		PCI_ANY_ID, PCI_ANY_ID, },
8992 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
8993 		PCI_ANY_ID, PCI_ANY_ID, },
8994 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
8995 		PCI_ANY_ID, PCI_ANY_ID, },
8996 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
8997 		PCI_ANY_ID, PCI_ANY_ID, },
8998 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
8999 		PCI_ANY_ID, PCI_ANY_ID, },
9000 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
9001 		PCI_ANY_ID, PCI_ANY_ID, },
9002 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
9003 		PCI_ANY_ID, PCI_ANY_ID, },
9004 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
9005 		PCI_ANY_ID, PCI_ANY_ID, },
9006 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
9007 		PCI_ANY_ID, PCI_ANY_ID, },
9008 	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
9009 		PCI_ANY_ID, PCI_ANY_ID, },
9010 	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
9011 		PCI_ANY_ID, PCI_ANY_ID, },
9012 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
9013 		PCI_ANY_ID, PCI_ANY_ID, },
9014 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS,
9015 		PCI_ANY_ID, PCI_ANY_ID, },
9016 	{ 0 }
9017 };
9018 
9019 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
9020 
9021 static struct pci_error_handlers lpfc_err_handler = {
9022 	.error_detected = lpfc_io_error_detected,
9023 	.slot_reset = lpfc_io_slot_reset,
9024 	.resume = lpfc_io_resume,
9025 };
9026 
9027 static struct pci_driver lpfc_driver = {
9028 	.name		= LPFC_DRIVER_NAME,
9029 	.id_table	= lpfc_id_table,
9030 	.probe		= lpfc_pci_probe_one,
9031 	.remove		= __devexit_p(lpfc_pci_remove_one),
9032 	.suspend        = lpfc_pci_suspend_one,
9033 	.resume		= lpfc_pci_resume_one,
9034 	.err_handler    = &lpfc_err_handler,
9035 };
9036 
9037 /**
9038  * lpfc_init - lpfc module initialization routine
9039  *
9040  * This routine is to be invoked when the lpfc module is loaded into the
9041  * kernel. The special kernel macro module_init() is used to indicate the
9042  * role of this routine to the kernel as lpfc module entry point.
9043  *
9044  * Return codes
9045  *   0 - successful
9046  *   -ENOMEM - FC attach transport failed
9047  *   all others - failed
9048  */
9049 static int __init
9050 lpfc_init(void)
9051 {
9052 	int error = 0;
9053 
9054 	printk(LPFC_MODULE_DESC "\n");
9055 	printk(LPFC_COPYRIGHT "\n");
9056 
9057 	if (lpfc_enable_npiv) {
9058 		lpfc_transport_functions.vport_create = lpfc_vport_create;
9059 		lpfc_transport_functions.vport_delete = lpfc_vport_delete;
9060 	}
9061 	lpfc_transport_template =
9062 				fc_attach_transport(&lpfc_transport_functions);
9063 	if (lpfc_transport_template == NULL)
9064 		return -ENOMEM;
9065 	if (lpfc_enable_npiv) {
9066 		lpfc_vport_transport_template =
9067 			fc_attach_transport(&lpfc_vport_transport_functions);
9068 		if (lpfc_vport_transport_template == NULL) {
9069 			fc_release_transport(lpfc_transport_template);
9070 			return -ENOMEM;
9071 		}
9072 	}
9073 	error = pci_register_driver(&lpfc_driver);
9074 	if (error) {
9075 		fc_release_transport(lpfc_transport_template);
9076 		if (lpfc_enable_npiv)
9077 			fc_release_transport(lpfc_vport_transport_template);
9078 	}
9079 
9080 	return error;
9081 }
9082 
9083 /**
9084  * lpfc_exit - lpfc module removal routine
9085  *
9086  * This routine is invoked when the lpfc module is removed from the kernel.
9087  * The special kernel macro module_exit() is used to indicate the role of
9088  * this routine to the kernel as lpfc module exit point.
9089  */
9090 static void __exit
9091 lpfc_exit(void)
9092 {
9093 	pci_unregister_driver(&lpfc_driver);
9094 	fc_release_transport(lpfc_transport_template);
9095 	if (lpfc_enable_npiv)
9096 		fc_release_transport(lpfc_vport_transport_template);
9097 	if (_dump_buf_data) {
9098 		printk(KERN_ERR	"9062 BLKGRD: freeing %lu pages for "
9099 				"_dump_buf_data at 0x%p\n",
9100 				(1L << _dump_buf_data_order), _dump_buf_data);
9101 		free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
9102 	}
9103 
9104 	if (_dump_buf_dif) {
9105 		printk(KERN_ERR	"9049 BLKGRD: freeing %lu pages for "
9106 				"_dump_buf_dif at 0x%p\n",
9107 				(1L << _dump_buf_dif_order), _dump_buf_dif);
9108 		free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
9109 	}
9110 }
9111 
9112 module_init(lpfc_init);
9113 module_exit(lpfc_exit);
9114 MODULE_LICENSE("GPL");
9115 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
9116 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
9117 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
9118