xref: /linux/drivers/scsi/lpfc/lpfc_init.c (revision c145211d1f9e2ef19e7b4c2b943f68366daa97af)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2010 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21 
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/idr.h>
26 #include <linux/interrupt.h>
27 #include <linux/kthread.h>
28 #include <linux/pci.h>
29 #include <linux/spinlock.h>
30 #include <linux/ctype.h>
31 #include <linux/aer.h>
32 #include <linux/slab.h>
33 
34 #include <scsi/scsi.h>
35 #include <scsi/scsi_device.h>
36 #include <scsi/scsi_host.h>
37 #include <scsi/scsi_transport_fc.h>
38 
39 #include "lpfc_hw4.h"
40 #include "lpfc_hw.h"
41 #include "lpfc_sli.h"
42 #include "lpfc_sli4.h"
43 #include "lpfc_nl.h"
44 #include "lpfc_disc.h"
45 #include "lpfc_scsi.h"
46 #include "lpfc.h"
47 #include "lpfc_logmsg.h"
48 #include "lpfc_crtn.h"
49 #include "lpfc_vport.h"
50 #include "lpfc_version.h"
51 
52 char *_dump_buf_data;
53 unsigned long _dump_buf_data_order;
54 char *_dump_buf_dif;
55 unsigned long _dump_buf_dif_order;
56 spinlock_t _dump_buf_lock;
57 
58 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
59 static int lpfc_post_rcv_buf(struct lpfc_hba *);
60 static int lpfc_sli4_queue_create(struct lpfc_hba *);
61 static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
62 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
63 static int lpfc_setup_endian_order(struct lpfc_hba *);
64 static int lpfc_sli4_read_config(struct lpfc_hba *);
65 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
66 static void lpfc_free_sgl_list(struct lpfc_hba *);
67 static int lpfc_init_sgl_list(struct lpfc_hba *);
68 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
69 static void lpfc_free_active_sgl(struct lpfc_hba *);
70 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
71 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
72 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
73 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
74 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
75 
76 static struct scsi_transport_template *lpfc_transport_template = NULL;
77 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
78 static DEFINE_IDR(lpfc_hba_index);
79 
80 /**
81  * lpfc_config_port_prep - Perform lpfc initialization prior to config port
82  * @phba: pointer to lpfc hba data structure.
83  *
84  * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
85  * mailbox command. It retrieves the revision information from the HBA and
86  * collects the Vital Product Data (VPD) about the HBA for preparing the
87  * configuration of the HBA.
88  *
89  * Return codes:
90  *   0 - success.
91  *   -ERESTART - requests the SLI layer to reset the HBA and try again.
92  *   Any other value - indicates an error.
93  **/
94 int
95 lpfc_config_port_prep(struct lpfc_hba *phba)
96 {
97 	lpfc_vpd_t *vp = &phba->vpd;
98 	int i = 0, rc;
99 	LPFC_MBOXQ_t *pmb;
100 	MAILBOX_t *mb;
101 	char *lpfc_vpd_data = NULL;
102 	uint16_t offset = 0;
103 	static char licensed[56] =
104 		    "key unlock for use with gnu public licensed code only\0";
105 	static int init_key = 1;
106 
107 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
108 	if (!pmb) {
109 		phba->link_state = LPFC_HBA_ERROR;
110 		return -ENOMEM;
111 	}
112 
113 	mb = &pmb->u.mb;
114 	phba->link_state = LPFC_INIT_MBX_CMDS;
115 
116 	if (lpfc_is_LC_HBA(phba->pcidev->device)) {
117 		if (init_key) {
118 			uint32_t *ptext = (uint32_t *) licensed;
119 
120 			for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
121 				*ptext = cpu_to_be32(*ptext);
122 			init_key = 0;
123 		}
124 
125 		lpfc_read_nv(phba, pmb);
126 		memset((char*)mb->un.varRDnvp.rsvd3, 0,
127 			sizeof (mb->un.varRDnvp.rsvd3));
128 		memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
129 			 sizeof (licensed));
130 
131 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
132 
133 		if (rc != MBX_SUCCESS) {
134 			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
135 					"0324 Config Port initialization "
136 					"error, mbxCmd x%x READ_NVPARM, "
137 					"mbxStatus x%x\n",
138 					mb->mbxCommand, mb->mbxStatus);
139 			mempool_free(pmb, phba->mbox_mem_pool);
140 			return -ERESTART;
141 		}
142 		memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
143 		       sizeof(phba->wwnn));
144 		memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
145 		       sizeof(phba->wwpn));
146 	}
147 
148 	phba->sli3_options = 0x0;
149 
150 	/* Setup and issue mailbox READ REV command */
151 	lpfc_read_rev(phba, pmb);
152 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
153 	if (rc != MBX_SUCCESS) {
154 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
155 				"0439 Adapter failed to init, mbxCmd x%x "
156 				"READ_REV, mbxStatus x%x\n",
157 				mb->mbxCommand, mb->mbxStatus);
158 		mempool_free( pmb, phba->mbox_mem_pool);
159 		return -ERESTART;
160 	}
161 
162 
163 	/*
164 	 * The value of rr must be 1 since the driver set the cv field to 1.
165 	 * This setting requires the FW to set all revision fields.
166 	 */
167 	if (mb->un.varRdRev.rr == 0) {
168 		vp->rev.rBit = 0;
169 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
170 				"0440 Adapter failed to init, READ_REV has "
171 				"missing revision information.\n");
172 		mempool_free(pmb, phba->mbox_mem_pool);
173 		return -ERESTART;
174 	}
175 
176 	if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
177 		mempool_free(pmb, phba->mbox_mem_pool);
178 		return -EINVAL;
179 	}
180 
181 	/* Save information as VPD data */
182 	vp->rev.rBit = 1;
183 	memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
184 	vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
185 	memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
186 	vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
187 	memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
188 	vp->rev.biuRev = mb->un.varRdRev.biuRev;
189 	vp->rev.smRev = mb->un.varRdRev.smRev;
190 	vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
191 	vp->rev.endecRev = mb->un.varRdRev.endecRev;
192 	vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
193 	vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
194 	vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
195 	vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
196 	vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
197 	vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
198 
199 	/* If the sli feature level is less then 9, we must
200 	 * tear down all RPIs and VPIs on link down if NPIV
201 	 * is enabled.
202 	 */
203 	if (vp->rev.feaLevelHigh < 9)
204 		phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
205 
206 	if (lpfc_is_LC_HBA(phba->pcidev->device))
207 		memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
208 						sizeof (phba->RandomData));
209 
210 	/* Get adapter VPD information */
211 	lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
212 	if (!lpfc_vpd_data)
213 		goto out_free_mbox;
214 
215 	do {
216 		lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
217 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
218 
219 		if (rc != MBX_SUCCESS) {
220 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
221 					"0441 VPD not present on adapter, "
222 					"mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
223 					mb->mbxCommand, mb->mbxStatus);
224 			mb->un.varDmp.word_cnt = 0;
225 		}
226 		/* dump mem may return a zero when finished or we got a
227 		 * mailbox error, either way we are done.
228 		 */
229 		if (mb->un.varDmp.word_cnt == 0)
230 			break;
231 		if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
232 			mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
233 		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
234 				      lpfc_vpd_data + offset,
235 				      mb->un.varDmp.word_cnt);
236 		offset += mb->un.varDmp.word_cnt;
237 	} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
238 	lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
239 
240 	kfree(lpfc_vpd_data);
241 out_free_mbox:
242 	mempool_free(pmb, phba->mbox_mem_pool);
243 	return 0;
244 }
245 
246 /**
247  * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
248  * @phba: pointer to lpfc hba data structure.
249  * @pmboxq: pointer to the driver internal queue element for mailbox command.
250  *
251  * This is the completion handler for driver's configuring asynchronous event
252  * mailbox command to the device. If the mailbox command returns successfully,
253  * it will set internal async event support flag to 1; otherwise, it will
254  * set internal async event support flag to 0.
255  **/
256 static void
257 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
258 {
259 	if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
260 		phba->temp_sensor_support = 1;
261 	else
262 		phba->temp_sensor_support = 0;
263 	mempool_free(pmboxq, phba->mbox_mem_pool);
264 	return;
265 }
266 
267 /**
268  * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
269  * @phba: pointer to lpfc hba data structure.
270  * @pmboxq: pointer to the driver internal queue element for mailbox command.
271  *
272  * This is the completion handler for dump mailbox command for getting
273  * wake up parameters. When this command complete, the response contain
274  * Option rom version of the HBA. This function translate the version number
275  * into a human readable string and store it in OptionROMVersion.
276  **/
277 static void
278 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
279 {
280 	struct prog_id *prg;
281 	uint32_t prog_id_word;
282 	char dist = ' ';
283 	/* character array used for decoding dist type. */
284 	char dist_char[] = "nabx";
285 
286 	if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
287 		mempool_free(pmboxq, phba->mbox_mem_pool);
288 		return;
289 	}
290 
291 	prg = (struct prog_id *) &prog_id_word;
292 
293 	/* word 7 contain option rom version */
294 	prog_id_word = pmboxq->u.mb.un.varWords[7];
295 
296 	/* Decode the Option rom version word to a readable string */
297 	if (prg->dist < 4)
298 		dist = dist_char[prg->dist];
299 
300 	if ((prg->dist == 3) && (prg->num == 0))
301 		sprintf(phba->OptionROMVersion, "%d.%d%d",
302 			prg->ver, prg->rev, prg->lev);
303 	else
304 		sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
305 			prg->ver, prg->rev, prg->lev,
306 			dist, prg->num);
307 	mempool_free(pmboxq, phba->mbox_mem_pool);
308 	return;
309 }
310 
311 /**
312  * lpfc_config_port_post - Perform lpfc initialization after config port
313  * @phba: pointer to lpfc hba data structure.
314  *
315  * This routine will do LPFC initialization after the CONFIG_PORT mailbox
316  * command call. It performs all internal resource and state setups on the
317  * port: post IOCB buffers, enable appropriate host interrupt attentions,
318  * ELS ring timers, etc.
319  *
320  * Return codes
321  *   0 - success.
322  *   Any other value - error.
323  **/
324 int
325 lpfc_config_port_post(struct lpfc_hba *phba)
326 {
327 	struct lpfc_vport *vport = phba->pport;
328 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
329 	LPFC_MBOXQ_t *pmb;
330 	MAILBOX_t *mb;
331 	struct lpfc_dmabuf *mp;
332 	struct lpfc_sli *psli = &phba->sli;
333 	uint32_t status, timeout;
334 	int i, j;
335 	int rc;
336 
337 	spin_lock_irq(&phba->hbalock);
338 	/*
339 	 * If the Config port completed correctly the HBA is not
340 	 * over heated any more.
341 	 */
342 	if (phba->over_temp_state == HBA_OVER_TEMP)
343 		phba->over_temp_state = HBA_NORMAL_TEMP;
344 	spin_unlock_irq(&phba->hbalock);
345 
346 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
347 	if (!pmb) {
348 		phba->link_state = LPFC_HBA_ERROR;
349 		return -ENOMEM;
350 	}
351 	mb = &pmb->u.mb;
352 
353 	/* Get login parameters for NID.  */
354 	rc = lpfc_read_sparam(phba, pmb, 0);
355 	if (rc) {
356 		mempool_free(pmb, phba->mbox_mem_pool);
357 		return -ENOMEM;
358 	}
359 
360 	pmb->vport = vport;
361 	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
362 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
363 				"0448 Adapter failed init, mbxCmd x%x "
364 				"READ_SPARM mbxStatus x%x\n",
365 				mb->mbxCommand, mb->mbxStatus);
366 		phba->link_state = LPFC_HBA_ERROR;
367 		mp = (struct lpfc_dmabuf *) pmb->context1;
368 		mempool_free(pmb, phba->mbox_mem_pool);
369 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
370 		kfree(mp);
371 		return -EIO;
372 	}
373 
374 	mp = (struct lpfc_dmabuf *) pmb->context1;
375 
376 	memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
377 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
378 	kfree(mp);
379 	pmb->context1 = NULL;
380 
381 	if (phba->cfg_soft_wwnn)
382 		u64_to_wwn(phba->cfg_soft_wwnn,
383 			   vport->fc_sparam.nodeName.u.wwn);
384 	if (phba->cfg_soft_wwpn)
385 		u64_to_wwn(phba->cfg_soft_wwpn,
386 			   vport->fc_sparam.portName.u.wwn);
387 	memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
388 	       sizeof (struct lpfc_name));
389 	memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
390 	       sizeof (struct lpfc_name));
391 
392 	/* Update the fc_host data structures with new wwn. */
393 	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
394 	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
395 	fc_host_max_npiv_vports(shost) = phba->max_vpi;
396 
397 	/* If no serial number in VPD data, use low 6 bytes of WWNN */
398 	/* This should be consolidated into parse_vpd ? - mr */
399 	if (phba->SerialNumber[0] == 0) {
400 		uint8_t *outptr;
401 
402 		outptr = &vport->fc_nodename.u.s.IEEE[0];
403 		for (i = 0; i < 12; i++) {
404 			status = *outptr++;
405 			j = ((status & 0xf0) >> 4);
406 			if (j <= 9)
407 				phba->SerialNumber[i] =
408 				    (char)((uint8_t) 0x30 + (uint8_t) j);
409 			else
410 				phba->SerialNumber[i] =
411 				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
412 			i++;
413 			j = (status & 0xf);
414 			if (j <= 9)
415 				phba->SerialNumber[i] =
416 				    (char)((uint8_t) 0x30 + (uint8_t) j);
417 			else
418 				phba->SerialNumber[i] =
419 				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
420 		}
421 	}
422 
423 	lpfc_read_config(phba, pmb);
424 	pmb->vport = vport;
425 	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
426 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
427 				"0453 Adapter failed to init, mbxCmd x%x "
428 				"READ_CONFIG, mbxStatus x%x\n",
429 				mb->mbxCommand, mb->mbxStatus);
430 		phba->link_state = LPFC_HBA_ERROR;
431 		mempool_free( pmb, phba->mbox_mem_pool);
432 		return -EIO;
433 	}
434 
435 	/* Check if the port is disabled */
436 	lpfc_sli_read_link_ste(phba);
437 
438 	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
439 	if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
440 		phba->cfg_hba_queue_depth =
441 			(mb->un.varRdConfig.max_xri + 1) -
442 					lpfc_sli4_get_els_iocb_cnt(phba);
443 
444 	phba->lmt = mb->un.varRdConfig.lmt;
445 
446 	/* Get the default values for Model Name and Description */
447 	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
448 
449 	if ((phba->cfg_link_speed > LINK_SPEED_10G)
450 	    || ((phba->cfg_link_speed == LINK_SPEED_1G)
451 		&& !(phba->lmt & LMT_1Gb))
452 	    || ((phba->cfg_link_speed == LINK_SPEED_2G)
453 		&& !(phba->lmt & LMT_2Gb))
454 	    || ((phba->cfg_link_speed == LINK_SPEED_4G)
455 		&& !(phba->lmt & LMT_4Gb))
456 	    || ((phba->cfg_link_speed == LINK_SPEED_8G)
457 		&& !(phba->lmt & LMT_8Gb))
458 	    || ((phba->cfg_link_speed == LINK_SPEED_10G)
459 		&& !(phba->lmt & LMT_10Gb))) {
460 		/* Reset link speed to auto */
461 		lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
462 			"1302 Invalid speed for this board: "
463 			"Reset link speed to auto: x%x\n",
464 			phba->cfg_link_speed);
465 			phba->cfg_link_speed = LINK_SPEED_AUTO;
466 	}
467 
468 	phba->link_state = LPFC_LINK_DOWN;
469 
470 	/* Only process IOCBs on ELS ring till hba_state is READY */
471 	if (psli->ring[psli->extra_ring].cmdringaddr)
472 		psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
473 	if (psli->ring[psli->fcp_ring].cmdringaddr)
474 		psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
475 	if (psli->ring[psli->next_ring].cmdringaddr)
476 		psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
477 
478 	/* Post receive buffers for desired rings */
479 	if (phba->sli_rev != 3)
480 		lpfc_post_rcv_buf(phba);
481 
482 	/*
483 	 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
484 	 */
485 	if (phba->intr_type == MSIX) {
486 		rc = lpfc_config_msi(phba, pmb);
487 		if (rc) {
488 			mempool_free(pmb, phba->mbox_mem_pool);
489 			return -EIO;
490 		}
491 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
492 		if (rc != MBX_SUCCESS) {
493 			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
494 					"0352 Config MSI mailbox command "
495 					"failed, mbxCmd x%x, mbxStatus x%x\n",
496 					pmb->u.mb.mbxCommand,
497 					pmb->u.mb.mbxStatus);
498 			mempool_free(pmb, phba->mbox_mem_pool);
499 			return -EIO;
500 		}
501 	}
502 
503 	spin_lock_irq(&phba->hbalock);
504 	/* Initialize ERATT handling flag */
505 	phba->hba_flag &= ~HBA_ERATT_HANDLED;
506 
507 	/* Enable appropriate host interrupts */
508 	status = readl(phba->HCregaddr);
509 	status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
510 	if (psli->num_rings > 0)
511 		status |= HC_R0INT_ENA;
512 	if (psli->num_rings > 1)
513 		status |= HC_R1INT_ENA;
514 	if (psli->num_rings > 2)
515 		status |= HC_R2INT_ENA;
516 	if (psli->num_rings > 3)
517 		status |= HC_R3INT_ENA;
518 
519 	if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
520 	    (phba->cfg_poll & DISABLE_FCP_RING_INT))
521 		status &= ~(HC_R0INT_ENA);
522 
523 	writel(status, phba->HCregaddr);
524 	readl(phba->HCregaddr); /* flush */
525 	spin_unlock_irq(&phba->hbalock);
526 
527 	/* Set up ring-0 (ELS) timer */
528 	timeout = phba->fc_ratov * 2;
529 	mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
530 	/* Set up heart beat (HB) timer */
531 	mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
532 	phba->hb_outstanding = 0;
533 	phba->last_completion_time = jiffies;
534 	/* Set up error attention (ERATT) polling timer */
535 	mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
536 
537 	if (phba->hba_flag & LINK_DISABLED) {
538 		lpfc_printf_log(phba,
539 			KERN_ERR, LOG_INIT,
540 			"2598 Adapter Link is disabled.\n");
541 		lpfc_down_link(phba, pmb);
542 		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
543 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
544 		if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
545 			lpfc_printf_log(phba,
546 			KERN_ERR, LOG_INIT,
547 			"2599 Adapter failed to issue DOWN_LINK"
548 			" mbox command rc 0x%x\n", rc);
549 
550 			mempool_free(pmb, phba->mbox_mem_pool);
551 			return -EIO;
552 		}
553 	} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
554 		lpfc_init_link(phba, pmb, phba->cfg_topology,
555 			phba->cfg_link_speed);
556 		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
557 		lpfc_set_loopback_flag(phba);
558 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
559 		if (rc != MBX_SUCCESS) {
560 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
561 				"0454 Adapter failed to init, mbxCmd x%x "
562 				"INIT_LINK, mbxStatus x%x\n",
563 				mb->mbxCommand, mb->mbxStatus);
564 
565 			/* Clear all interrupt enable conditions */
566 			writel(0, phba->HCregaddr);
567 			readl(phba->HCregaddr); /* flush */
568 			/* Clear all pending interrupts */
569 			writel(0xffffffff, phba->HAregaddr);
570 			readl(phba->HAregaddr); /* flush */
571 
572 			phba->link_state = LPFC_HBA_ERROR;
573 			if (rc != MBX_BUSY)
574 				mempool_free(pmb, phba->mbox_mem_pool);
575 			return -EIO;
576 		}
577 	}
578 	/* MBOX buffer will be freed in mbox compl */
579 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
580 	if (!pmb) {
581 		phba->link_state = LPFC_HBA_ERROR;
582 		return -ENOMEM;
583 	}
584 
585 	lpfc_config_async(phba, pmb, LPFC_ELS_RING);
586 	pmb->mbox_cmpl = lpfc_config_async_cmpl;
587 	pmb->vport = phba->pport;
588 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
589 
590 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
591 		lpfc_printf_log(phba,
592 				KERN_ERR,
593 				LOG_INIT,
594 				"0456 Adapter failed to issue "
595 				"ASYNCEVT_ENABLE mbox status x%x\n",
596 				rc);
597 		mempool_free(pmb, phba->mbox_mem_pool);
598 	}
599 
600 	/* Get Option rom version */
601 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
602 	if (!pmb) {
603 		phba->link_state = LPFC_HBA_ERROR;
604 		return -ENOMEM;
605 	}
606 
607 	lpfc_dump_wakeup_param(phba, pmb);
608 	pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
609 	pmb->vport = phba->pport;
610 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
611 
612 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
613 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
614 				"to get Option ROM version status x%x\n", rc);
615 		mempool_free(pmb, phba->mbox_mem_pool);
616 	}
617 
618 	return 0;
619 }
620 
621 /**
622  * lpfc_hba_init_link - Initialize the FC link
623  * @phba: pointer to lpfc hba data structure.
624  *
625  * This routine will issue the INIT_LINK mailbox command call.
626  * It is available to other drivers through the lpfc_hba data
627  * structure for use as a delayed link up mechanism with the
628  * module parameter lpfc_suppress_link_up.
629  *
630  * Return code
631  *		0 - success
632  *		Any other value - error
633  **/
634 int
635 lpfc_hba_init_link(struct lpfc_hba *phba)
636 {
637 	struct lpfc_vport *vport = phba->pport;
638 	LPFC_MBOXQ_t *pmb;
639 	MAILBOX_t *mb;
640 	int rc;
641 
642 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
643 	if (!pmb) {
644 		phba->link_state = LPFC_HBA_ERROR;
645 		return -ENOMEM;
646 	}
647 	mb = &pmb->u.mb;
648 	pmb->vport = vport;
649 
650 	lpfc_init_link(phba, pmb, phba->cfg_topology,
651 		phba->cfg_link_speed);
652 	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
653 	lpfc_set_loopback_flag(phba);
654 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
655 	if (rc != MBX_SUCCESS) {
656 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
657 			"0498 Adapter failed to init, mbxCmd x%x "
658 			"INIT_LINK, mbxStatus x%x\n",
659 			mb->mbxCommand, mb->mbxStatus);
660 		/* Clear all interrupt enable conditions */
661 		writel(0, phba->HCregaddr);
662 		readl(phba->HCregaddr); /* flush */
663 		/* Clear all pending interrupts */
664 		writel(0xffffffff, phba->HAregaddr);
665 		readl(phba->HAregaddr); /* flush */
666 		phba->link_state = LPFC_HBA_ERROR;
667 		if (rc != MBX_BUSY)
668 			mempool_free(pmb, phba->mbox_mem_pool);
669 		return -EIO;
670 	}
671 	phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
672 
673 	return 0;
674 }
675 
676 /**
677  * lpfc_hba_down_link - this routine downs the FC link
678  *
679  * This routine will issue the DOWN_LINK mailbox command call.
680  * It is available to other drivers through the lpfc_hba data
681  * structure for use to stop the link.
682  *
683  * Return code
684  *		0 - success
685  *		Any other value - error
686  **/
687 int
688 lpfc_hba_down_link(struct lpfc_hba *phba)
689 {
690 	LPFC_MBOXQ_t *pmb;
691 	int rc;
692 
693 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
694 	if (!pmb) {
695 		phba->link_state = LPFC_HBA_ERROR;
696 		return -ENOMEM;
697 	}
698 
699 	lpfc_printf_log(phba,
700 		KERN_ERR, LOG_INIT,
701 		"0491 Adapter Link is disabled.\n");
702 	lpfc_down_link(phba, pmb);
703 	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
704 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
705 	if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
706 		lpfc_printf_log(phba,
707 		KERN_ERR, LOG_INIT,
708 		"2522 Adapter failed to issue DOWN_LINK"
709 		" mbox command rc 0x%x\n", rc);
710 
711 		mempool_free(pmb, phba->mbox_mem_pool);
712 		return -EIO;
713 	}
714 	return 0;
715 }
716 
717 /**
718  * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
719  * @phba: pointer to lpfc HBA data structure.
720  *
721  * This routine will do LPFC uninitialization before the HBA is reset when
722  * bringing down the SLI Layer.
723  *
724  * Return codes
725  *   0 - success.
726  *   Any other value - error.
727  **/
728 int
729 lpfc_hba_down_prep(struct lpfc_hba *phba)
730 {
731 	struct lpfc_vport **vports;
732 	int i;
733 
734 	if (phba->sli_rev <= LPFC_SLI_REV3) {
735 		/* Disable interrupts */
736 		writel(0, phba->HCregaddr);
737 		readl(phba->HCregaddr); /* flush */
738 	}
739 
740 	if (phba->pport->load_flag & FC_UNLOADING)
741 		lpfc_cleanup_discovery_resources(phba->pport);
742 	else {
743 		vports = lpfc_create_vport_work_array(phba);
744 		if (vports != NULL)
745 			for (i = 0; i <= phba->max_vports &&
746 				vports[i] != NULL; i++)
747 				lpfc_cleanup_discovery_resources(vports[i]);
748 		lpfc_destroy_vport_work_array(phba, vports);
749 	}
750 	return 0;
751 }
752 
753 /**
754  * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
755  * @phba: pointer to lpfc HBA data structure.
756  *
757  * This routine will do uninitialization after the HBA is reset when bring
758  * down the SLI Layer.
759  *
760  * Return codes
761  *   0 - success.
762  *   Any other value - error.
763  **/
764 static int
765 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
766 {
767 	struct lpfc_sli *psli = &phba->sli;
768 	struct lpfc_sli_ring *pring;
769 	struct lpfc_dmabuf *mp, *next_mp;
770 	LIST_HEAD(completions);
771 	int i;
772 
773 	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
774 		lpfc_sli_hbqbuf_free_all(phba);
775 	else {
776 		/* Cleanup preposted buffers on the ELS ring */
777 		pring = &psli->ring[LPFC_ELS_RING];
778 		list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
779 			list_del(&mp->list);
780 			pring->postbufq_cnt--;
781 			lpfc_mbuf_free(phba, mp->virt, mp->phys);
782 			kfree(mp);
783 		}
784 	}
785 
786 	spin_lock_irq(&phba->hbalock);
787 	for (i = 0; i < psli->num_rings; i++) {
788 		pring = &psli->ring[i];
789 
790 		/* At this point in time the HBA is either reset or DOA. Either
791 		 * way, nothing should be on txcmplq as it will NEVER complete.
792 		 */
793 		list_splice_init(&pring->txcmplq, &completions);
794 		pring->txcmplq_cnt = 0;
795 		spin_unlock_irq(&phba->hbalock);
796 
797 		/* Cancel all the IOCBs from the completions list */
798 		lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
799 				      IOERR_SLI_ABORTED);
800 
801 		lpfc_sli_abort_iocb_ring(phba, pring);
802 		spin_lock_irq(&phba->hbalock);
803 	}
804 	spin_unlock_irq(&phba->hbalock);
805 
806 	return 0;
807 }
808 /**
809  * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
810  * @phba: pointer to lpfc HBA data structure.
811  *
812  * This routine will do uninitialization after the HBA is reset when bring
813  * down the SLI Layer.
814  *
815  * Return codes
816  *   0 - success.
817  *   Any other value - error.
818  **/
819 static int
820 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
821 {
822 	struct lpfc_scsi_buf *psb, *psb_next;
823 	LIST_HEAD(aborts);
824 	int ret;
825 	unsigned long iflag = 0;
826 	struct lpfc_sglq *sglq_entry = NULL;
827 
828 	ret = lpfc_hba_down_post_s3(phba);
829 	if (ret)
830 		return ret;
831 	/* At this point in time the HBA is either reset or DOA. Either
832 	 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
833 	 * on the lpfc_sgl_list so that it can either be freed if the
834 	 * driver is unloading or reposted if the driver is restarting
835 	 * the port.
836 	 */
837 	spin_lock_irq(&phba->hbalock);  /* required for lpfc_sgl_list and */
838 					/* scsl_buf_list */
839 	/* abts_sgl_list_lock required because worker thread uses this
840 	 * list.
841 	 */
842 	spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
843 	list_for_each_entry(sglq_entry,
844 		&phba->sli4_hba.lpfc_abts_els_sgl_list, list)
845 		sglq_entry->state = SGL_FREED;
846 
847 	list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
848 			&phba->sli4_hba.lpfc_sgl_list);
849 	spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
850 	/* abts_scsi_buf_list_lock required because worker thread uses this
851 	 * list.
852 	 */
853 	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
854 	list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
855 			&aborts);
856 	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
857 	spin_unlock_irq(&phba->hbalock);
858 
859 	list_for_each_entry_safe(psb, psb_next, &aborts, list) {
860 		psb->pCmd = NULL;
861 		psb->status = IOSTAT_SUCCESS;
862 	}
863 	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
864 	list_splice(&aborts, &phba->lpfc_scsi_buf_list);
865 	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
866 	return 0;
867 }
868 
869 /**
870  * lpfc_hba_down_post - Wrapper func for hba down post routine
871  * @phba: pointer to lpfc HBA data structure.
872  *
873  * This routine wraps the actual SLI3 or SLI4 routine for performing
874  * uninitialization after the HBA is reset when bring down the SLI Layer.
875  *
876  * Return codes
877  *   0 - success.
878  *   Any other value - error.
879  **/
880 int
881 lpfc_hba_down_post(struct lpfc_hba *phba)
882 {
883 	return (*phba->lpfc_hba_down_post)(phba);
884 }
885 
886 /**
887  * lpfc_hb_timeout - The HBA-timer timeout handler
888  * @ptr: unsigned long holds the pointer to lpfc hba data structure.
889  *
890  * This is the HBA-timer timeout handler registered to the lpfc driver. When
891  * this timer fires, a HBA timeout event shall be posted to the lpfc driver
892  * work-port-events bitmap and the worker thread is notified. This timeout
893  * event will be used by the worker thread to invoke the actual timeout
894  * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
895  * be performed in the timeout handler and the HBA timeout event bit shall
896  * be cleared by the worker thread after it has taken the event bitmap out.
897  **/
898 static void
899 lpfc_hb_timeout(unsigned long ptr)
900 {
901 	struct lpfc_hba *phba;
902 	uint32_t tmo_posted;
903 	unsigned long iflag;
904 
905 	phba = (struct lpfc_hba *)ptr;
906 
907 	/* Check for heart beat timeout conditions */
908 	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
909 	tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
910 	if (!tmo_posted)
911 		phba->pport->work_port_events |= WORKER_HB_TMO;
912 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
913 
914 	/* Tell the worker thread there is work to do */
915 	if (!tmo_posted)
916 		lpfc_worker_wake_up(phba);
917 	return;
918 }
919 
920 /**
921  * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
922  * @phba: pointer to lpfc hba data structure.
923  * @pmboxq: pointer to the driver internal queue element for mailbox command.
924  *
925  * This is the callback function to the lpfc heart-beat mailbox command.
926  * If configured, the lpfc driver issues the heart-beat mailbox command to
927  * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
928  * heart-beat mailbox command is issued, the driver shall set up heart-beat
929  * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
930  * heart-beat outstanding state. Once the mailbox command comes back and
931  * no error conditions detected, the heart-beat mailbox command timer is
932  * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
933  * state is cleared for the next heart-beat. If the timer expired with the
934  * heart-beat outstanding state set, the driver will put the HBA offline.
935  **/
936 static void
937 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
938 {
939 	unsigned long drvr_flag;
940 
941 	spin_lock_irqsave(&phba->hbalock, drvr_flag);
942 	phba->hb_outstanding = 0;
943 	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
944 
945 	/* Check and reset heart-beat timer is necessary */
946 	mempool_free(pmboxq, phba->mbox_mem_pool);
947 	if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
948 		!(phba->link_state == LPFC_HBA_ERROR) &&
949 		!(phba->pport->load_flag & FC_UNLOADING))
950 		mod_timer(&phba->hb_tmofunc,
951 			jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
952 	return;
953 }
954 
955 /**
956  * lpfc_hb_timeout_handler - The HBA-timer timeout handler
957  * @phba: pointer to lpfc hba data structure.
958  *
959  * This is the actual HBA-timer timeout handler to be invoked by the worker
960  * thread whenever the HBA timer fired and HBA-timeout event posted. This
961  * handler performs any periodic operations needed for the device. If such
962  * periodic event has already been attended to either in the interrupt handler
963  * or by processing slow-ring or fast-ring events within the HBA-timer
964  * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
965  * the timer for the next timeout period. If lpfc heart-beat mailbox command
966  * is configured and there is no heart-beat mailbox command outstanding, a
967  * heart-beat mailbox is issued and timer set properly. Otherwise, if there
968  * has been a heart-beat mailbox command outstanding, the HBA shall be put
969  * to offline.
970  **/
971 void
972 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
973 {
974 	struct lpfc_vport **vports;
975 	LPFC_MBOXQ_t *pmboxq;
976 	struct lpfc_dmabuf *buf_ptr;
977 	int retval, i;
978 	struct lpfc_sli *psli = &phba->sli;
979 	LIST_HEAD(completions);
980 
981 	vports = lpfc_create_vport_work_array(phba);
982 	if (vports != NULL)
983 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
984 			lpfc_rcv_seq_check_edtov(vports[i]);
985 	lpfc_destroy_vport_work_array(phba, vports);
986 
987 	if ((phba->link_state == LPFC_HBA_ERROR) ||
988 		(phba->pport->load_flag & FC_UNLOADING) ||
989 		(phba->pport->fc_flag & FC_OFFLINE_MODE))
990 		return;
991 
992 	spin_lock_irq(&phba->pport->work_port_lock);
993 
994 	if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
995 		jiffies)) {
996 		spin_unlock_irq(&phba->pport->work_port_lock);
997 		if (!phba->hb_outstanding)
998 			mod_timer(&phba->hb_tmofunc,
999 				jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1000 		else
1001 			mod_timer(&phba->hb_tmofunc,
1002 				jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1003 		return;
1004 	}
1005 	spin_unlock_irq(&phba->pport->work_port_lock);
1006 
1007 	if (phba->elsbuf_cnt &&
1008 		(phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1009 		spin_lock_irq(&phba->hbalock);
1010 		list_splice_init(&phba->elsbuf, &completions);
1011 		phba->elsbuf_cnt = 0;
1012 		phba->elsbuf_prev_cnt = 0;
1013 		spin_unlock_irq(&phba->hbalock);
1014 
1015 		while (!list_empty(&completions)) {
1016 			list_remove_head(&completions, buf_ptr,
1017 				struct lpfc_dmabuf, list);
1018 			lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1019 			kfree(buf_ptr);
1020 		}
1021 	}
1022 	phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1023 
1024 	/* If there is no heart beat outstanding, issue a heartbeat command */
1025 	if (phba->cfg_enable_hba_heartbeat) {
1026 		if (!phba->hb_outstanding) {
1027 			pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
1028 			if (!pmboxq) {
1029 				mod_timer(&phba->hb_tmofunc,
1030 					  jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1031 				return;
1032 			}
1033 
1034 			lpfc_heart_beat(phba, pmboxq);
1035 			pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1036 			pmboxq->vport = phba->pport;
1037 			retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
1038 
1039 			if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
1040 				mempool_free(pmboxq, phba->mbox_mem_pool);
1041 				mod_timer(&phba->hb_tmofunc,
1042 					  jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1043 				return;
1044 			}
1045 			mod_timer(&phba->hb_tmofunc,
1046 				  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1047 			phba->hb_outstanding = 1;
1048 			return;
1049 		} else {
1050 			/*
1051 			* If heart beat timeout called with hb_outstanding set
1052 			* we need to take the HBA offline.
1053 			*/
1054 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1055 					"0459 Adapter heartbeat failure, "
1056 					"taking this port offline.\n");
1057 
1058 			spin_lock_irq(&phba->hbalock);
1059 			psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1060 			spin_unlock_irq(&phba->hbalock);
1061 
1062 			lpfc_offline_prep(phba);
1063 			lpfc_offline(phba);
1064 			lpfc_unblock_mgmt_io(phba);
1065 			phba->link_state = LPFC_HBA_ERROR;
1066 			lpfc_hba_down_post(phba);
1067 		}
1068 	}
1069 }
1070 
1071 /**
1072  * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1073  * @phba: pointer to lpfc hba data structure.
1074  *
1075  * This routine is called to bring the HBA offline when HBA hardware error
1076  * other than Port Error 6 has been detected.
1077  **/
1078 static void
1079 lpfc_offline_eratt(struct lpfc_hba *phba)
1080 {
1081 	struct lpfc_sli   *psli = &phba->sli;
1082 
1083 	spin_lock_irq(&phba->hbalock);
1084 	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1085 	spin_unlock_irq(&phba->hbalock);
1086 	lpfc_offline_prep(phba);
1087 
1088 	lpfc_offline(phba);
1089 	lpfc_reset_barrier(phba);
1090 	spin_lock_irq(&phba->hbalock);
1091 	lpfc_sli_brdreset(phba);
1092 	spin_unlock_irq(&phba->hbalock);
1093 	lpfc_hba_down_post(phba);
1094 	lpfc_sli_brdready(phba, HS_MBRDY);
1095 	lpfc_unblock_mgmt_io(phba);
1096 	phba->link_state = LPFC_HBA_ERROR;
1097 	return;
1098 }
1099 
1100 /**
1101  * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1102  * @phba: pointer to lpfc hba data structure.
1103  *
1104  * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1105  * other than Port Error 6 has been detected.
1106  **/
1107 static void
1108 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1109 {
1110 	lpfc_offline_prep(phba);
1111 	lpfc_offline(phba);
1112 	lpfc_sli4_brdreset(phba);
1113 	lpfc_hba_down_post(phba);
1114 	lpfc_sli4_post_status_check(phba);
1115 	lpfc_unblock_mgmt_io(phba);
1116 	phba->link_state = LPFC_HBA_ERROR;
1117 }
1118 
1119 /**
1120  * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1121  * @phba: pointer to lpfc hba data structure.
1122  *
1123  * This routine is invoked to handle the deferred HBA hardware error
1124  * conditions. This type of error is indicated by HBA by setting ER1
1125  * and another ER bit in the host status register. The driver will
1126  * wait until the ER1 bit clears before handling the error condition.
1127  **/
1128 static void
1129 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1130 {
1131 	uint32_t old_host_status = phba->work_hs;
1132 	struct lpfc_sli_ring  *pring;
1133 	struct lpfc_sli *psli = &phba->sli;
1134 
1135 	/* If the pci channel is offline, ignore possible errors,
1136 	 * since we cannot communicate with the pci card anyway.
1137 	 */
1138 	if (pci_channel_offline(phba->pcidev)) {
1139 		spin_lock_irq(&phba->hbalock);
1140 		phba->hba_flag &= ~DEFER_ERATT;
1141 		spin_unlock_irq(&phba->hbalock);
1142 		return;
1143 	}
1144 
1145 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1146 		"0479 Deferred Adapter Hardware Error "
1147 		"Data: x%x x%x x%x\n",
1148 		phba->work_hs,
1149 		phba->work_status[0], phba->work_status[1]);
1150 
1151 	spin_lock_irq(&phba->hbalock);
1152 	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1153 	spin_unlock_irq(&phba->hbalock);
1154 
1155 
1156 	/*
1157 	 * Firmware stops when it triggred erratt. That could cause the I/Os
1158 	 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1159 	 * SCSI layer retry it after re-establishing link.
1160 	 */
1161 	pring = &psli->ring[psli->fcp_ring];
1162 	lpfc_sli_abort_iocb_ring(phba, pring);
1163 
1164 	/*
1165 	 * There was a firmware error. Take the hba offline and then
1166 	 * attempt to restart it.
1167 	 */
1168 	lpfc_offline_prep(phba);
1169 	lpfc_offline(phba);
1170 
1171 	/* Wait for the ER1 bit to clear.*/
1172 	while (phba->work_hs & HS_FFER1) {
1173 		msleep(100);
1174 		phba->work_hs = readl(phba->HSregaddr);
1175 		/* If driver is unloading let the worker thread continue */
1176 		if (phba->pport->load_flag & FC_UNLOADING) {
1177 			phba->work_hs = 0;
1178 			break;
1179 		}
1180 	}
1181 
1182 	/*
1183 	 * This is to ptrotect against a race condition in which
1184 	 * first write to the host attention register clear the
1185 	 * host status register.
1186 	 */
1187 	if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1188 		phba->work_hs = old_host_status & ~HS_FFER1;
1189 
1190 	spin_lock_irq(&phba->hbalock);
1191 	phba->hba_flag &= ~DEFER_ERATT;
1192 	spin_unlock_irq(&phba->hbalock);
1193 	phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1194 	phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1195 }
1196 
1197 static void
1198 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1199 {
1200 	struct lpfc_board_event_header board_event;
1201 	struct Scsi_Host *shost;
1202 
1203 	board_event.event_type = FC_REG_BOARD_EVENT;
1204 	board_event.subcategory = LPFC_EVENT_PORTINTERR;
1205 	shost = lpfc_shost_from_vport(phba->pport);
1206 	fc_host_post_vendor_event(shost, fc_get_event_number(),
1207 				  sizeof(board_event),
1208 				  (char *) &board_event,
1209 				  LPFC_NL_VENDOR_ID);
1210 }
1211 
1212 /**
1213  * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1214  * @phba: pointer to lpfc hba data structure.
1215  *
1216  * This routine is invoked to handle the following HBA hardware error
1217  * conditions:
1218  * 1 - HBA error attention interrupt
1219  * 2 - DMA ring index out of range
1220  * 3 - Mailbox command came back as unknown
1221  **/
1222 static void
1223 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1224 {
1225 	struct lpfc_vport *vport = phba->pport;
1226 	struct lpfc_sli   *psli = &phba->sli;
1227 	struct lpfc_sli_ring  *pring;
1228 	uint32_t event_data;
1229 	unsigned long temperature;
1230 	struct temp_event temp_event_data;
1231 	struct Scsi_Host  *shost;
1232 
1233 	/* If the pci channel is offline, ignore possible errors,
1234 	 * since we cannot communicate with the pci card anyway.
1235 	 */
1236 	if (pci_channel_offline(phba->pcidev)) {
1237 		spin_lock_irq(&phba->hbalock);
1238 		phba->hba_flag &= ~DEFER_ERATT;
1239 		spin_unlock_irq(&phba->hbalock);
1240 		return;
1241 	}
1242 
1243 	/* If resets are disabled then leave the HBA alone and return */
1244 	if (!phba->cfg_enable_hba_reset)
1245 		return;
1246 
1247 	/* Send an internal error event to mgmt application */
1248 	lpfc_board_errevt_to_mgmt(phba);
1249 
1250 	if (phba->hba_flag & DEFER_ERATT)
1251 		lpfc_handle_deferred_eratt(phba);
1252 
1253 	if (phba->work_hs & HS_FFER6) {
1254 		/* Re-establishing Link */
1255 		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1256 				"1301 Re-establishing Link "
1257 				"Data: x%x x%x x%x\n",
1258 				phba->work_hs,
1259 				phba->work_status[0], phba->work_status[1]);
1260 
1261 		spin_lock_irq(&phba->hbalock);
1262 		psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1263 		spin_unlock_irq(&phba->hbalock);
1264 
1265 		/*
1266 		* Firmware stops when it triggled erratt with HS_FFER6.
1267 		* That could cause the I/Os dropped by the firmware.
1268 		* Error iocb (I/O) on txcmplq and let the SCSI layer
1269 		* retry it after re-establishing link.
1270 		*/
1271 		pring = &psli->ring[psli->fcp_ring];
1272 		lpfc_sli_abort_iocb_ring(phba, pring);
1273 
1274 		/*
1275 		 * There was a firmware error.  Take the hba offline and then
1276 		 * attempt to restart it.
1277 		 */
1278 		lpfc_offline_prep(phba);
1279 		lpfc_offline(phba);
1280 		lpfc_sli_brdrestart(phba);
1281 		if (lpfc_online(phba) == 0) {	/* Initialize the HBA */
1282 			lpfc_unblock_mgmt_io(phba);
1283 			return;
1284 		}
1285 		lpfc_unblock_mgmt_io(phba);
1286 	} else if (phba->work_hs & HS_CRIT_TEMP) {
1287 		temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1288 		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1289 		temp_event_data.event_code = LPFC_CRIT_TEMP;
1290 		temp_event_data.data = (uint32_t)temperature;
1291 
1292 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1293 				"0406 Adapter maximum temperature exceeded "
1294 				"(%ld), taking this port offline "
1295 				"Data: x%x x%x x%x\n",
1296 				temperature, phba->work_hs,
1297 				phba->work_status[0], phba->work_status[1]);
1298 
1299 		shost = lpfc_shost_from_vport(phba->pport);
1300 		fc_host_post_vendor_event(shost, fc_get_event_number(),
1301 					  sizeof(temp_event_data),
1302 					  (char *) &temp_event_data,
1303 					  SCSI_NL_VID_TYPE_PCI
1304 					  | PCI_VENDOR_ID_EMULEX);
1305 
1306 		spin_lock_irq(&phba->hbalock);
1307 		phba->over_temp_state = HBA_OVER_TEMP;
1308 		spin_unlock_irq(&phba->hbalock);
1309 		lpfc_offline_eratt(phba);
1310 
1311 	} else {
1312 		/* The if clause above forces this code path when the status
1313 		 * failure is a value other than FFER6. Do not call the offline
1314 		 * twice. This is the adapter hardware error path.
1315 		 */
1316 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1317 				"0457 Adapter Hardware Error "
1318 				"Data: x%x x%x x%x\n",
1319 				phba->work_hs,
1320 				phba->work_status[0], phba->work_status[1]);
1321 
1322 		event_data = FC_REG_DUMP_EVENT;
1323 		shost = lpfc_shost_from_vport(vport);
1324 		fc_host_post_vendor_event(shost, fc_get_event_number(),
1325 				sizeof(event_data), (char *) &event_data,
1326 				SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1327 
1328 		lpfc_offline_eratt(phba);
1329 	}
1330 	return;
1331 }
1332 
1333 /**
1334  * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1335  * @phba: pointer to lpfc hba data structure.
1336  *
1337  * This routine is invoked to handle the SLI4 HBA hardware error attention
1338  * conditions.
1339  **/
1340 static void
1341 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1342 {
1343 	struct lpfc_vport *vport = phba->pport;
1344 	uint32_t event_data;
1345 	struct Scsi_Host *shost;
1346 
1347 	/* If the pci channel is offline, ignore possible errors, since
1348 	 * we cannot communicate with the pci card anyway.
1349 	 */
1350 	if (pci_channel_offline(phba->pcidev))
1351 		return;
1352 	/* If resets are disabled then leave the HBA alone and return */
1353 	if (!phba->cfg_enable_hba_reset)
1354 		return;
1355 
1356 	/* Send an internal error event to mgmt application */
1357 	lpfc_board_errevt_to_mgmt(phba);
1358 
1359 	/* For now, the actual action for SLI4 device handling is not
1360 	 * specified yet, just treated it as adaptor hardware failure
1361 	 */
1362 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1363 			"0143 SLI4 Adapter Hardware Error Data: x%x x%x\n",
1364 			phba->work_status[0], phba->work_status[1]);
1365 
1366 	event_data = FC_REG_DUMP_EVENT;
1367 	shost = lpfc_shost_from_vport(vport);
1368 	fc_host_post_vendor_event(shost, fc_get_event_number(),
1369 				  sizeof(event_data), (char *) &event_data,
1370 				  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1371 
1372 	lpfc_sli4_offline_eratt(phba);
1373 }
1374 
1375 /**
1376  * lpfc_handle_eratt - Wrapper func for handling hba error attention
1377  * @phba: pointer to lpfc HBA data structure.
1378  *
1379  * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1380  * routine from the API jump table function pointer from the lpfc_hba struct.
1381  *
1382  * Return codes
1383  *   0 - success.
1384  *   Any other value - error.
1385  **/
1386 void
1387 lpfc_handle_eratt(struct lpfc_hba *phba)
1388 {
1389 	(*phba->lpfc_handle_eratt)(phba);
1390 }
1391 
1392 /**
1393  * lpfc_handle_latt - The HBA link event handler
1394  * @phba: pointer to lpfc hba data structure.
1395  *
1396  * This routine is invoked from the worker thread to handle a HBA host
1397  * attention link event.
1398  **/
1399 void
1400 lpfc_handle_latt(struct lpfc_hba *phba)
1401 {
1402 	struct lpfc_vport *vport = phba->pport;
1403 	struct lpfc_sli   *psli = &phba->sli;
1404 	LPFC_MBOXQ_t *pmb;
1405 	volatile uint32_t control;
1406 	struct lpfc_dmabuf *mp;
1407 	int rc = 0;
1408 
1409 	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1410 	if (!pmb) {
1411 		rc = 1;
1412 		goto lpfc_handle_latt_err_exit;
1413 	}
1414 
1415 	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1416 	if (!mp) {
1417 		rc = 2;
1418 		goto lpfc_handle_latt_free_pmb;
1419 	}
1420 
1421 	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1422 	if (!mp->virt) {
1423 		rc = 3;
1424 		goto lpfc_handle_latt_free_mp;
1425 	}
1426 
1427 	/* Cleanup any outstanding ELS commands */
1428 	lpfc_els_flush_all_cmd(phba);
1429 
1430 	psli->slistat.link_event++;
1431 	lpfc_read_la(phba, pmb, mp);
1432 	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
1433 	pmb->vport = vport;
1434 	/* Block ELS IOCBs until we have processed this mbox command */
1435 	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1436 	rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1437 	if (rc == MBX_NOT_FINISHED) {
1438 		rc = 4;
1439 		goto lpfc_handle_latt_free_mbuf;
1440 	}
1441 
1442 	/* Clear Link Attention in HA REG */
1443 	spin_lock_irq(&phba->hbalock);
1444 	writel(HA_LATT, phba->HAregaddr);
1445 	readl(phba->HAregaddr); /* flush */
1446 	spin_unlock_irq(&phba->hbalock);
1447 
1448 	return;
1449 
1450 lpfc_handle_latt_free_mbuf:
1451 	phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1452 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
1453 lpfc_handle_latt_free_mp:
1454 	kfree(mp);
1455 lpfc_handle_latt_free_pmb:
1456 	mempool_free(pmb, phba->mbox_mem_pool);
1457 lpfc_handle_latt_err_exit:
1458 	/* Enable Link attention interrupts */
1459 	spin_lock_irq(&phba->hbalock);
1460 	psli->sli_flag |= LPFC_PROCESS_LA;
1461 	control = readl(phba->HCregaddr);
1462 	control |= HC_LAINT_ENA;
1463 	writel(control, phba->HCregaddr);
1464 	readl(phba->HCregaddr); /* flush */
1465 
1466 	/* Clear Link Attention in HA REG */
1467 	writel(HA_LATT, phba->HAregaddr);
1468 	readl(phba->HAregaddr); /* flush */
1469 	spin_unlock_irq(&phba->hbalock);
1470 	lpfc_linkdown(phba);
1471 	phba->link_state = LPFC_HBA_ERROR;
1472 
1473 	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1474 		     "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1475 
1476 	return;
1477 }
1478 
1479 /**
1480  * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1481  * @phba: pointer to lpfc hba data structure.
1482  * @vpd: pointer to the vital product data.
1483  * @len: length of the vital product data in bytes.
1484  *
1485  * This routine parses the Vital Product Data (VPD). The VPD is treated as
1486  * an array of characters. In this routine, the ModelName, ProgramType, and
1487  * ModelDesc, etc. fields of the phba data structure will be populated.
1488  *
1489  * Return codes
1490  *   0 - pointer to the VPD passed in is NULL
1491  *   1 - success
1492  **/
1493 int
1494 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1495 {
1496 	uint8_t lenlo, lenhi;
1497 	int Length;
1498 	int i, j;
1499 	int finished = 0;
1500 	int index = 0;
1501 
1502 	if (!vpd)
1503 		return 0;
1504 
1505 	/* Vital Product */
1506 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1507 			"0455 Vital Product Data: x%x x%x x%x x%x\n",
1508 			(uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1509 			(uint32_t) vpd[3]);
1510 	while (!finished && (index < (len - 4))) {
1511 		switch (vpd[index]) {
1512 		case 0x82:
1513 		case 0x91:
1514 			index += 1;
1515 			lenlo = vpd[index];
1516 			index += 1;
1517 			lenhi = vpd[index];
1518 			index += 1;
1519 			i = ((((unsigned short)lenhi) << 8) + lenlo);
1520 			index += i;
1521 			break;
1522 		case 0x90:
1523 			index += 1;
1524 			lenlo = vpd[index];
1525 			index += 1;
1526 			lenhi = vpd[index];
1527 			index += 1;
1528 			Length = ((((unsigned short)lenhi) << 8) + lenlo);
1529 			if (Length > len - index)
1530 				Length = len - index;
1531 			while (Length > 0) {
1532 			/* Look for Serial Number */
1533 			if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1534 				index += 2;
1535 				i = vpd[index];
1536 				index += 1;
1537 				j = 0;
1538 				Length -= (3+i);
1539 				while(i--) {
1540 					phba->SerialNumber[j++] = vpd[index++];
1541 					if (j == 31)
1542 						break;
1543 				}
1544 				phba->SerialNumber[j] = 0;
1545 				continue;
1546 			}
1547 			else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1548 				phba->vpd_flag |= VPD_MODEL_DESC;
1549 				index += 2;
1550 				i = vpd[index];
1551 				index += 1;
1552 				j = 0;
1553 				Length -= (3+i);
1554 				while(i--) {
1555 					phba->ModelDesc[j++] = vpd[index++];
1556 					if (j == 255)
1557 						break;
1558 				}
1559 				phba->ModelDesc[j] = 0;
1560 				continue;
1561 			}
1562 			else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1563 				phba->vpd_flag |= VPD_MODEL_NAME;
1564 				index += 2;
1565 				i = vpd[index];
1566 				index += 1;
1567 				j = 0;
1568 				Length -= (3+i);
1569 				while(i--) {
1570 					phba->ModelName[j++] = vpd[index++];
1571 					if (j == 79)
1572 						break;
1573 				}
1574 				phba->ModelName[j] = 0;
1575 				continue;
1576 			}
1577 			else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1578 				phba->vpd_flag |= VPD_PROGRAM_TYPE;
1579 				index += 2;
1580 				i = vpd[index];
1581 				index += 1;
1582 				j = 0;
1583 				Length -= (3+i);
1584 				while(i--) {
1585 					phba->ProgramType[j++] = vpd[index++];
1586 					if (j == 255)
1587 						break;
1588 				}
1589 				phba->ProgramType[j] = 0;
1590 				continue;
1591 			}
1592 			else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1593 				phba->vpd_flag |= VPD_PORT;
1594 				index += 2;
1595 				i = vpd[index];
1596 				index += 1;
1597 				j = 0;
1598 				Length -= (3+i);
1599 				while(i--) {
1600 				phba->Port[j++] = vpd[index++];
1601 				if (j == 19)
1602 					break;
1603 				}
1604 				phba->Port[j] = 0;
1605 				continue;
1606 			}
1607 			else {
1608 				index += 2;
1609 				i = vpd[index];
1610 				index += 1;
1611 				index += i;
1612 				Length -= (3 + i);
1613 			}
1614 		}
1615 		finished = 0;
1616 		break;
1617 		case 0x78:
1618 			finished = 1;
1619 			break;
1620 		default:
1621 			index ++;
1622 			break;
1623 		}
1624 	}
1625 
1626 	return(1);
1627 }
1628 
1629 /**
1630  * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
1631  * @phba: pointer to lpfc hba data structure.
1632  * @mdp: pointer to the data structure to hold the derived model name.
1633  * @descp: pointer to the data structure to hold the derived description.
1634  *
1635  * This routine retrieves HBA's description based on its registered PCI device
1636  * ID. The @descp passed into this function points to an array of 256 chars. It
1637  * shall be returned with the model name, maximum speed, and the host bus type.
1638  * The @mdp passed into this function points to an array of 80 chars. When the
1639  * function returns, the @mdp will be filled with the model name.
1640  **/
1641 static void
1642 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1643 {
1644 	lpfc_vpd_t *vp;
1645 	uint16_t dev_id = phba->pcidev->device;
1646 	int max_speed;
1647 	int GE = 0;
1648 	int oneConnect = 0; /* default is not a oneConnect */
1649 	struct {
1650 		char *name;
1651 		char *bus;
1652 		char *function;
1653 	} m = {"<Unknown>", "", ""};
1654 
1655 	if (mdp && mdp[0] != '\0'
1656 		&& descp && descp[0] != '\0')
1657 		return;
1658 
1659 	if (phba->lmt & LMT_10Gb)
1660 		max_speed = 10;
1661 	else if (phba->lmt & LMT_8Gb)
1662 		max_speed = 8;
1663 	else if (phba->lmt & LMT_4Gb)
1664 		max_speed = 4;
1665 	else if (phba->lmt & LMT_2Gb)
1666 		max_speed = 2;
1667 	else
1668 		max_speed = 1;
1669 
1670 	vp = &phba->vpd;
1671 
1672 	switch (dev_id) {
1673 	case PCI_DEVICE_ID_FIREFLY:
1674 		m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
1675 		break;
1676 	case PCI_DEVICE_ID_SUPERFLY:
1677 		if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1678 			m = (typeof(m)){"LP7000", "PCI",
1679 					"Fibre Channel Adapter"};
1680 		else
1681 			m = (typeof(m)){"LP7000E", "PCI",
1682 					"Fibre Channel Adapter"};
1683 		break;
1684 	case PCI_DEVICE_ID_DRAGONFLY:
1685 		m = (typeof(m)){"LP8000", "PCI",
1686 				"Fibre Channel Adapter"};
1687 		break;
1688 	case PCI_DEVICE_ID_CENTAUR:
1689 		if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1690 			m = (typeof(m)){"LP9002", "PCI",
1691 					"Fibre Channel Adapter"};
1692 		else
1693 			m = (typeof(m)){"LP9000", "PCI",
1694 					"Fibre Channel Adapter"};
1695 		break;
1696 	case PCI_DEVICE_ID_RFLY:
1697 		m = (typeof(m)){"LP952", "PCI",
1698 				"Fibre Channel Adapter"};
1699 		break;
1700 	case PCI_DEVICE_ID_PEGASUS:
1701 		m = (typeof(m)){"LP9802", "PCI-X",
1702 				"Fibre Channel Adapter"};
1703 		break;
1704 	case PCI_DEVICE_ID_THOR:
1705 		m = (typeof(m)){"LP10000", "PCI-X",
1706 				"Fibre Channel Adapter"};
1707 		break;
1708 	case PCI_DEVICE_ID_VIPER:
1709 		m = (typeof(m)){"LPX1000",  "PCI-X",
1710 				"Fibre Channel Adapter"};
1711 		break;
1712 	case PCI_DEVICE_ID_PFLY:
1713 		m = (typeof(m)){"LP982", "PCI-X",
1714 				"Fibre Channel Adapter"};
1715 		break;
1716 	case PCI_DEVICE_ID_TFLY:
1717 		m = (typeof(m)){"LP1050", "PCI-X",
1718 				"Fibre Channel Adapter"};
1719 		break;
1720 	case PCI_DEVICE_ID_HELIOS:
1721 		m = (typeof(m)){"LP11000", "PCI-X2",
1722 				"Fibre Channel Adapter"};
1723 		break;
1724 	case PCI_DEVICE_ID_HELIOS_SCSP:
1725 		m = (typeof(m)){"LP11000-SP", "PCI-X2",
1726 				"Fibre Channel Adapter"};
1727 		break;
1728 	case PCI_DEVICE_ID_HELIOS_DCSP:
1729 		m = (typeof(m)){"LP11002-SP",  "PCI-X2",
1730 				"Fibre Channel Adapter"};
1731 		break;
1732 	case PCI_DEVICE_ID_NEPTUNE:
1733 		m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
1734 		break;
1735 	case PCI_DEVICE_ID_NEPTUNE_SCSP:
1736 		m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
1737 		break;
1738 	case PCI_DEVICE_ID_NEPTUNE_DCSP:
1739 		m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
1740 		break;
1741 	case PCI_DEVICE_ID_BMID:
1742 		m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
1743 		break;
1744 	case PCI_DEVICE_ID_BSMB:
1745 		m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
1746 		break;
1747 	case PCI_DEVICE_ID_ZEPHYR:
1748 		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1749 		break;
1750 	case PCI_DEVICE_ID_ZEPHYR_SCSP:
1751 		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1752 		break;
1753 	case PCI_DEVICE_ID_ZEPHYR_DCSP:
1754 		m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
1755 		GE = 1;
1756 		break;
1757 	case PCI_DEVICE_ID_ZMID:
1758 		m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
1759 		break;
1760 	case PCI_DEVICE_ID_ZSMB:
1761 		m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
1762 		break;
1763 	case PCI_DEVICE_ID_LP101:
1764 		m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
1765 		break;
1766 	case PCI_DEVICE_ID_LP10000S:
1767 		m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
1768 		break;
1769 	case PCI_DEVICE_ID_LP11000S:
1770 		m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
1771 		break;
1772 	case PCI_DEVICE_ID_LPE11000S:
1773 		m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
1774 		break;
1775 	case PCI_DEVICE_ID_SAT:
1776 		m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
1777 		break;
1778 	case PCI_DEVICE_ID_SAT_MID:
1779 		m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
1780 		break;
1781 	case PCI_DEVICE_ID_SAT_SMB:
1782 		m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
1783 		break;
1784 	case PCI_DEVICE_ID_SAT_DCSP:
1785 		m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
1786 		break;
1787 	case PCI_DEVICE_ID_SAT_SCSP:
1788 		m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
1789 		break;
1790 	case PCI_DEVICE_ID_SAT_S:
1791 		m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
1792 		break;
1793 	case PCI_DEVICE_ID_HORNET:
1794 		m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
1795 		GE = 1;
1796 		break;
1797 	case PCI_DEVICE_ID_PROTEUS_VF:
1798 		m = (typeof(m)){"LPev12000", "PCIe IOV",
1799 				"Fibre Channel Adapter"};
1800 		break;
1801 	case PCI_DEVICE_ID_PROTEUS_PF:
1802 		m = (typeof(m)){"LPev12000", "PCIe IOV",
1803 				"Fibre Channel Adapter"};
1804 		break;
1805 	case PCI_DEVICE_ID_PROTEUS_S:
1806 		m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
1807 				"Fibre Channel Adapter"};
1808 		break;
1809 	case PCI_DEVICE_ID_TIGERSHARK:
1810 		oneConnect = 1;
1811 		m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
1812 		break;
1813 	case PCI_DEVICE_ID_TOMCAT:
1814 		oneConnect = 1;
1815 		m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
1816 		break;
1817 	case PCI_DEVICE_ID_FALCON:
1818 		m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
1819 				"EmulexSecure Fibre"};
1820 		break;
1821 	default:
1822 		m = (typeof(m)){"Unknown", "", ""};
1823 		break;
1824 	}
1825 
1826 	if (mdp && mdp[0] == '\0')
1827 		snprintf(mdp, 79,"%s", m.name);
1828 	/* oneConnect hba requires special processing, they are all initiators
1829 	 * and we put the port number on the end
1830 	 */
1831 	if (descp && descp[0] == '\0') {
1832 		if (oneConnect)
1833 			snprintf(descp, 255,
1834 				"Emulex OneConnect %s, %s Initiator, Port %s",
1835 				m.name, m.function,
1836 				phba->Port);
1837 		else
1838 			snprintf(descp, 255,
1839 				"Emulex %s %d%s %s %s",
1840 				m.name, max_speed, (GE) ? "GE" : "Gb",
1841 				m.bus, m.function);
1842 	}
1843 }
1844 
1845 /**
1846  * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
1847  * @phba: pointer to lpfc hba data structure.
1848  * @pring: pointer to a IOCB ring.
1849  * @cnt: the number of IOCBs to be posted to the IOCB ring.
1850  *
1851  * This routine posts a given number of IOCBs with the associated DMA buffer
1852  * descriptors specified by the cnt argument to the given IOCB ring.
1853  *
1854  * Return codes
1855  *   The number of IOCBs NOT able to be posted to the IOCB ring.
1856  **/
1857 int
1858 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
1859 {
1860 	IOCB_t *icmd;
1861 	struct lpfc_iocbq *iocb;
1862 	struct lpfc_dmabuf *mp1, *mp2;
1863 
1864 	cnt += pring->missbufcnt;
1865 
1866 	/* While there are buffers to post */
1867 	while (cnt > 0) {
1868 		/* Allocate buffer for  command iocb */
1869 		iocb = lpfc_sli_get_iocbq(phba);
1870 		if (iocb == NULL) {
1871 			pring->missbufcnt = cnt;
1872 			return cnt;
1873 		}
1874 		icmd = &iocb->iocb;
1875 
1876 		/* 2 buffers can be posted per command */
1877 		/* Allocate buffer to post */
1878 		mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1879 		if (mp1)
1880 		    mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
1881 		if (!mp1 || !mp1->virt) {
1882 			kfree(mp1);
1883 			lpfc_sli_release_iocbq(phba, iocb);
1884 			pring->missbufcnt = cnt;
1885 			return cnt;
1886 		}
1887 
1888 		INIT_LIST_HEAD(&mp1->list);
1889 		/* Allocate buffer to post */
1890 		if (cnt > 1) {
1891 			mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1892 			if (mp2)
1893 				mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
1894 							    &mp2->phys);
1895 			if (!mp2 || !mp2->virt) {
1896 				kfree(mp2);
1897 				lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1898 				kfree(mp1);
1899 				lpfc_sli_release_iocbq(phba, iocb);
1900 				pring->missbufcnt = cnt;
1901 				return cnt;
1902 			}
1903 
1904 			INIT_LIST_HEAD(&mp2->list);
1905 		} else {
1906 			mp2 = NULL;
1907 		}
1908 
1909 		icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
1910 		icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
1911 		icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
1912 		icmd->ulpBdeCount = 1;
1913 		cnt--;
1914 		if (mp2) {
1915 			icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
1916 			icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
1917 			icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
1918 			cnt--;
1919 			icmd->ulpBdeCount = 2;
1920 		}
1921 
1922 		icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
1923 		icmd->ulpLe = 1;
1924 
1925 		if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
1926 		    IOCB_ERROR) {
1927 			lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1928 			kfree(mp1);
1929 			cnt++;
1930 			if (mp2) {
1931 				lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
1932 				kfree(mp2);
1933 				cnt++;
1934 			}
1935 			lpfc_sli_release_iocbq(phba, iocb);
1936 			pring->missbufcnt = cnt;
1937 			return cnt;
1938 		}
1939 		lpfc_sli_ringpostbuf_put(phba, pring, mp1);
1940 		if (mp2)
1941 			lpfc_sli_ringpostbuf_put(phba, pring, mp2);
1942 	}
1943 	pring->missbufcnt = 0;
1944 	return 0;
1945 }
1946 
1947 /**
1948  * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
1949  * @phba: pointer to lpfc hba data structure.
1950  *
1951  * This routine posts initial receive IOCB buffers to the ELS ring. The
1952  * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
1953  * set to 64 IOCBs.
1954  *
1955  * Return codes
1956  *   0 - success (currently always success)
1957  **/
1958 static int
1959 lpfc_post_rcv_buf(struct lpfc_hba *phba)
1960 {
1961 	struct lpfc_sli *psli = &phba->sli;
1962 
1963 	/* Ring 0, ELS / CT buffers */
1964 	lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
1965 	/* Ring 2 - FCP no buffers needed */
1966 
1967 	return 0;
1968 }
1969 
1970 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
1971 
1972 /**
1973  * lpfc_sha_init - Set up initial array of hash table entries
1974  * @HashResultPointer: pointer to an array as hash table.
1975  *
1976  * This routine sets up the initial values to the array of hash table entries
1977  * for the LC HBAs.
1978  **/
1979 static void
1980 lpfc_sha_init(uint32_t * HashResultPointer)
1981 {
1982 	HashResultPointer[0] = 0x67452301;
1983 	HashResultPointer[1] = 0xEFCDAB89;
1984 	HashResultPointer[2] = 0x98BADCFE;
1985 	HashResultPointer[3] = 0x10325476;
1986 	HashResultPointer[4] = 0xC3D2E1F0;
1987 }
1988 
1989 /**
1990  * lpfc_sha_iterate - Iterate initial hash table with the working hash table
1991  * @HashResultPointer: pointer to an initial/result hash table.
1992  * @HashWorkingPointer: pointer to an working hash table.
1993  *
1994  * This routine iterates an initial hash table pointed by @HashResultPointer
1995  * with the values from the working hash table pointeed by @HashWorkingPointer.
1996  * The results are putting back to the initial hash table, returned through
1997  * the @HashResultPointer as the result hash table.
1998  **/
1999 static void
2000 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2001 {
2002 	int t;
2003 	uint32_t TEMP;
2004 	uint32_t A, B, C, D, E;
2005 	t = 16;
2006 	do {
2007 		HashWorkingPointer[t] =
2008 		    S(1,
2009 		      HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2010 								     8] ^
2011 		      HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2012 	} while (++t <= 79);
2013 	t = 0;
2014 	A = HashResultPointer[0];
2015 	B = HashResultPointer[1];
2016 	C = HashResultPointer[2];
2017 	D = HashResultPointer[3];
2018 	E = HashResultPointer[4];
2019 
2020 	do {
2021 		if (t < 20) {
2022 			TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2023 		} else if (t < 40) {
2024 			TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2025 		} else if (t < 60) {
2026 			TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2027 		} else {
2028 			TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2029 		}
2030 		TEMP += S(5, A) + E + HashWorkingPointer[t];
2031 		E = D;
2032 		D = C;
2033 		C = S(30, B);
2034 		B = A;
2035 		A = TEMP;
2036 	} while (++t <= 79);
2037 
2038 	HashResultPointer[0] += A;
2039 	HashResultPointer[1] += B;
2040 	HashResultPointer[2] += C;
2041 	HashResultPointer[3] += D;
2042 	HashResultPointer[4] += E;
2043 
2044 }
2045 
2046 /**
2047  * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2048  * @RandomChallenge: pointer to the entry of host challenge random number array.
2049  * @HashWorking: pointer to the entry of the working hash array.
2050  *
2051  * This routine calculates the working hash array referred by @HashWorking
2052  * from the challenge random numbers associated with the host, referred by
2053  * @RandomChallenge. The result is put into the entry of the working hash
2054  * array and returned by reference through @HashWorking.
2055  **/
2056 static void
2057 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2058 {
2059 	*HashWorking = (*RandomChallenge ^ *HashWorking);
2060 }
2061 
2062 /**
2063  * lpfc_hba_init - Perform special handling for LC HBA initialization
2064  * @phba: pointer to lpfc hba data structure.
2065  * @hbainit: pointer to an array of unsigned 32-bit integers.
2066  *
2067  * This routine performs the special handling for LC HBA initialization.
2068  **/
2069 void
2070 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2071 {
2072 	int t;
2073 	uint32_t *HashWorking;
2074 	uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2075 
2076 	HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2077 	if (!HashWorking)
2078 		return;
2079 
2080 	HashWorking[0] = HashWorking[78] = *pwwnn++;
2081 	HashWorking[1] = HashWorking[79] = *pwwnn;
2082 
2083 	for (t = 0; t < 7; t++)
2084 		lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2085 
2086 	lpfc_sha_init(hbainit);
2087 	lpfc_sha_iterate(hbainit, HashWorking);
2088 	kfree(HashWorking);
2089 }
2090 
2091 /**
2092  * lpfc_cleanup - Performs vport cleanups before deleting a vport
2093  * @vport: pointer to a virtual N_Port data structure.
2094  *
2095  * This routine performs the necessary cleanups before deleting the @vport.
2096  * It invokes the discovery state machine to perform necessary state
2097  * transitions and to release the ndlps associated with the @vport. Note,
2098  * the physical port is treated as @vport 0.
2099  **/
2100 void
2101 lpfc_cleanup(struct lpfc_vport *vport)
2102 {
2103 	struct lpfc_hba   *phba = vport->phba;
2104 	struct lpfc_nodelist *ndlp, *next_ndlp;
2105 	int i = 0;
2106 
2107 	if (phba->link_state > LPFC_LINK_DOWN)
2108 		lpfc_port_link_failure(vport);
2109 
2110 	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2111 		if (!NLP_CHK_NODE_ACT(ndlp)) {
2112 			ndlp = lpfc_enable_node(vport, ndlp,
2113 						NLP_STE_UNUSED_NODE);
2114 			if (!ndlp)
2115 				continue;
2116 			spin_lock_irq(&phba->ndlp_lock);
2117 			NLP_SET_FREE_REQ(ndlp);
2118 			spin_unlock_irq(&phba->ndlp_lock);
2119 			/* Trigger the release of the ndlp memory */
2120 			lpfc_nlp_put(ndlp);
2121 			continue;
2122 		}
2123 		spin_lock_irq(&phba->ndlp_lock);
2124 		if (NLP_CHK_FREE_REQ(ndlp)) {
2125 			/* The ndlp should not be in memory free mode already */
2126 			spin_unlock_irq(&phba->ndlp_lock);
2127 			continue;
2128 		} else
2129 			/* Indicate request for freeing ndlp memory */
2130 			NLP_SET_FREE_REQ(ndlp);
2131 		spin_unlock_irq(&phba->ndlp_lock);
2132 
2133 		if (vport->port_type != LPFC_PHYSICAL_PORT &&
2134 		    ndlp->nlp_DID == Fabric_DID) {
2135 			/* Just free up ndlp with Fabric_DID for vports */
2136 			lpfc_nlp_put(ndlp);
2137 			continue;
2138 		}
2139 
2140 		if (ndlp->nlp_type & NLP_FABRIC)
2141 			lpfc_disc_state_machine(vport, ndlp, NULL,
2142 					NLP_EVT_DEVICE_RECOVERY);
2143 
2144 		lpfc_disc_state_machine(vport, ndlp, NULL,
2145 					     NLP_EVT_DEVICE_RM);
2146 
2147 	}
2148 
2149 	/* At this point, ALL ndlp's should be gone
2150 	 * because of the previous NLP_EVT_DEVICE_RM.
2151 	 * Lets wait for this to happen, if needed.
2152 	 */
2153 	while (!list_empty(&vport->fc_nodes)) {
2154 		if (i++ > 3000) {
2155 			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2156 				"0233 Nodelist not empty\n");
2157 			list_for_each_entry_safe(ndlp, next_ndlp,
2158 						&vport->fc_nodes, nlp_listp) {
2159 				lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2160 						LOG_NODE,
2161 						"0282 did:x%x ndlp:x%p "
2162 						"usgmap:x%x refcnt:%d\n",
2163 						ndlp->nlp_DID, (void *)ndlp,
2164 						ndlp->nlp_usg_map,
2165 						atomic_read(
2166 							&ndlp->kref.refcount));
2167 			}
2168 			break;
2169 		}
2170 
2171 		/* Wait for any activity on ndlps to settle */
2172 		msleep(10);
2173 	}
2174 }
2175 
2176 /**
2177  * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2178  * @vport: pointer to a virtual N_Port data structure.
2179  *
2180  * This routine stops all the timers associated with a @vport. This function
2181  * is invoked before disabling or deleting a @vport. Note that the physical
2182  * port is treated as @vport 0.
2183  **/
2184 void
2185 lpfc_stop_vport_timers(struct lpfc_vport *vport)
2186 {
2187 	del_timer_sync(&vport->els_tmofunc);
2188 	del_timer_sync(&vport->fc_fdmitmo);
2189 	lpfc_can_disctmo(vport);
2190 	return;
2191 }
2192 
2193 /**
2194  * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2195  * @phba: pointer to lpfc hba data structure.
2196  *
2197  * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2198  * caller of this routine should already hold the host lock.
2199  **/
2200 void
2201 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2202 {
2203 	/* Clear pending FCF rediscovery wait and failover in progress flags */
2204 	phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND |
2205 				FCF_DEAD_DISC |
2206 				FCF_ACVL_DISC);
2207 	/* Now, try to stop the timer */
2208 	del_timer(&phba->fcf.redisc_wait);
2209 }
2210 
2211 /**
2212  * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2213  * @phba: pointer to lpfc hba data structure.
2214  *
2215  * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2216  * checks whether the FCF rediscovery wait timer is pending with the host
2217  * lock held before proceeding with disabling the timer and clearing the
2218  * wait timer pendig flag.
2219  **/
2220 void
2221 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2222 {
2223 	spin_lock_irq(&phba->hbalock);
2224 	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2225 		/* FCF rediscovery timer already fired or stopped */
2226 		spin_unlock_irq(&phba->hbalock);
2227 		return;
2228 	}
2229 	__lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2230 	spin_unlock_irq(&phba->hbalock);
2231 }
2232 
2233 /**
2234  * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2235  * @phba: pointer to lpfc hba data structure.
2236  *
2237  * This routine stops all the timers associated with a HBA. This function is
2238  * invoked before either putting a HBA offline or unloading the driver.
2239  **/
2240 void
2241 lpfc_stop_hba_timers(struct lpfc_hba *phba)
2242 {
2243 	lpfc_stop_vport_timers(phba->pport);
2244 	del_timer_sync(&phba->sli.mbox_tmo);
2245 	del_timer_sync(&phba->fabric_block_timer);
2246 	del_timer_sync(&phba->eratt_poll);
2247 	del_timer_sync(&phba->hb_tmofunc);
2248 	phba->hb_outstanding = 0;
2249 
2250 	switch (phba->pci_dev_grp) {
2251 	case LPFC_PCI_DEV_LP:
2252 		/* Stop any LightPulse device specific driver timers */
2253 		del_timer_sync(&phba->fcp_poll_timer);
2254 		break;
2255 	case LPFC_PCI_DEV_OC:
2256 		/* Stop any OneConnect device sepcific driver timers */
2257 		lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2258 		break;
2259 	default:
2260 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2261 				"0297 Invalid device group (x%x)\n",
2262 				phba->pci_dev_grp);
2263 		break;
2264 	}
2265 	return;
2266 }
2267 
2268 /**
2269  * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2270  * @phba: pointer to lpfc hba data structure.
2271  *
2272  * This routine marks a HBA's management interface as blocked. Once the HBA's
2273  * management interface is marked as blocked, all the user space access to
2274  * the HBA, whether they are from sysfs interface or libdfc interface will
2275  * all be blocked. The HBA is set to block the management interface when the
2276  * driver prepares the HBA interface for online or offline.
2277  **/
2278 static void
2279 lpfc_block_mgmt_io(struct lpfc_hba * phba)
2280 {
2281 	unsigned long iflag;
2282 
2283 	spin_lock_irqsave(&phba->hbalock, iflag);
2284 	phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2285 	spin_unlock_irqrestore(&phba->hbalock, iflag);
2286 }
2287 
2288 /**
2289  * lpfc_online - Initialize and bring a HBA online
2290  * @phba: pointer to lpfc hba data structure.
2291  *
2292  * This routine initializes the HBA and brings a HBA online. During this
2293  * process, the management interface is blocked to prevent user space access
2294  * to the HBA interfering with the driver initialization.
2295  *
2296  * Return codes
2297  *   0 - successful
2298  *   1 - failed
2299  **/
2300 int
2301 lpfc_online(struct lpfc_hba *phba)
2302 {
2303 	struct lpfc_vport *vport;
2304 	struct lpfc_vport **vports;
2305 	int i;
2306 
2307 	if (!phba)
2308 		return 0;
2309 	vport = phba->pport;
2310 
2311 	if (!(vport->fc_flag & FC_OFFLINE_MODE))
2312 		return 0;
2313 
2314 	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2315 			"0458 Bring Adapter online\n");
2316 
2317 	lpfc_block_mgmt_io(phba);
2318 
2319 	if (!lpfc_sli_queue_setup(phba)) {
2320 		lpfc_unblock_mgmt_io(phba);
2321 		return 1;
2322 	}
2323 
2324 	if (phba->sli_rev == LPFC_SLI_REV4) {
2325 		if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2326 			lpfc_unblock_mgmt_io(phba);
2327 			return 1;
2328 		}
2329 	} else {
2330 		if (lpfc_sli_hba_setup(phba)) {	/* Initialize SLI2/SLI3 HBA */
2331 			lpfc_unblock_mgmt_io(phba);
2332 			return 1;
2333 		}
2334 	}
2335 
2336 	vports = lpfc_create_vport_work_array(phba);
2337 	if (vports != NULL)
2338 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2339 			struct Scsi_Host *shost;
2340 			shost = lpfc_shost_from_vport(vports[i]);
2341 			spin_lock_irq(shost->host_lock);
2342 			vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2343 			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2344 				vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2345 			if (phba->sli_rev == LPFC_SLI_REV4)
2346 				vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2347 			spin_unlock_irq(shost->host_lock);
2348 		}
2349 		lpfc_destroy_vport_work_array(phba, vports);
2350 
2351 	lpfc_unblock_mgmt_io(phba);
2352 	return 0;
2353 }
2354 
2355 /**
2356  * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
2357  * @phba: pointer to lpfc hba data structure.
2358  *
2359  * This routine marks a HBA's management interface as not blocked. Once the
2360  * HBA's management interface is marked as not blocked, all the user space
2361  * access to the HBA, whether they are from sysfs interface or libdfc
2362  * interface will be allowed. The HBA is set to block the management interface
2363  * when the driver prepares the HBA interface for online or offline and then
2364  * set to unblock the management interface afterwards.
2365  **/
2366 void
2367 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2368 {
2369 	unsigned long iflag;
2370 
2371 	spin_lock_irqsave(&phba->hbalock, iflag);
2372 	phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2373 	spin_unlock_irqrestore(&phba->hbalock, iflag);
2374 }
2375 
2376 /**
2377  * lpfc_offline_prep - Prepare a HBA to be brought offline
2378  * @phba: pointer to lpfc hba data structure.
2379  *
2380  * This routine is invoked to prepare a HBA to be brought offline. It performs
2381  * unregistration login to all the nodes on all vports and flushes the mailbox
2382  * queue to make it ready to be brought offline.
2383  **/
2384 void
2385 lpfc_offline_prep(struct lpfc_hba * phba)
2386 {
2387 	struct lpfc_vport *vport = phba->pport;
2388 	struct lpfc_nodelist  *ndlp, *next_ndlp;
2389 	struct lpfc_vport **vports;
2390 	struct Scsi_Host *shost;
2391 	int i;
2392 
2393 	if (vport->fc_flag & FC_OFFLINE_MODE)
2394 		return;
2395 
2396 	lpfc_block_mgmt_io(phba);
2397 
2398 	lpfc_linkdown(phba);
2399 
2400 	/* Issue an unreg_login to all nodes on all vports */
2401 	vports = lpfc_create_vport_work_array(phba);
2402 	if (vports != NULL) {
2403 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2404 			if (vports[i]->load_flag & FC_UNLOADING)
2405 				continue;
2406 			shost = lpfc_shost_from_vport(vports[i]);
2407 			spin_lock_irq(shost->host_lock);
2408 			vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
2409 			vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2410 			vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
2411 			spin_unlock_irq(shost->host_lock);
2412 
2413 			shost =	lpfc_shost_from_vport(vports[i]);
2414 			list_for_each_entry_safe(ndlp, next_ndlp,
2415 						 &vports[i]->fc_nodes,
2416 						 nlp_listp) {
2417 				if (!NLP_CHK_NODE_ACT(ndlp))
2418 					continue;
2419 				if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2420 					continue;
2421 				if (ndlp->nlp_type & NLP_FABRIC) {
2422 					lpfc_disc_state_machine(vports[i], ndlp,
2423 						NULL, NLP_EVT_DEVICE_RECOVERY);
2424 					lpfc_disc_state_machine(vports[i], ndlp,
2425 						NULL, NLP_EVT_DEVICE_RM);
2426 				}
2427 				spin_lock_irq(shost->host_lock);
2428 				ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2429 				spin_unlock_irq(shost->host_lock);
2430 				lpfc_unreg_rpi(vports[i], ndlp);
2431 			}
2432 		}
2433 	}
2434 	lpfc_destroy_vport_work_array(phba, vports);
2435 
2436 	lpfc_sli_mbox_sys_shutdown(phba);
2437 }
2438 
2439 /**
2440  * lpfc_offline - Bring a HBA offline
2441  * @phba: pointer to lpfc hba data structure.
2442  *
2443  * This routine actually brings a HBA offline. It stops all the timers
2444  * associated with the HBA, brings down the SLI layer, and eventually
2445  * marks the HBA as in offline state for the upper layer protocol.
2446  **/
2447 void
2448 lpfc_offline(struct lpfc_hba *phba)
2449 {
2450 	struct Scsi_Host  *shost;
2451 	struct lpfc_vport **vports;
2452 	int i;
2453 
2454 	if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2455 		return;
2456 
2457 	/* stop port and all timers associated with this hba */
2458 	lpfc_stop_port(phba);
2459 	vports = lpfc_create_vport_work_array(phba);
2460 	if (vports != NULL)
2461 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2462 			lpfc_stop_vport_timers(vports[i]);
2463 	lpfc_destroy_vport_work_array(phba, vports);
2464 	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2465 			"0460 Bring Adapter offline\n");
2466 	/* Bring down the SLI Layer and cleanup.  The HBA is offline
2467 	   now.  */
2468 	lpfc_sli_hba_down(phba);
2469 	spin_lock_irq(&phba->hbalock);
2470 	phba->work_ha = 0;
2471 	spin_unlock_irq(&phba->hbalock);
2472 	vports = lpfc_create_vport_work_array(phba);
2473 	if (vports != NULL)
2474 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2475 			shost = lpfc_shost_from_vport(vports[i]);
2476 			spin_lock_irq(shost->host_lock);
2477 			vports[i]->work_port_events = 0;
2478 			vports[i]->fc_flag |= FC_OFFLINE_MODE;
2479 			spin_unlock_irq(shost->host_lock);
2480 		}
2481 	lpfc_destroy_vport_work_array(phba, vports);
2482 }
2483 
2484 /**
2485  * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2486  * @phba: pointer to lpfc hba data structure.
2487  *
2488  * This routine is to free all the SCSI buffers and IOCBs from the driver
2489  * list back to kernel. It is called from lpfc_pci_remove_one to free
2490  * the internal resources before the device is removed from the system.
2491  *
2492  * Return codes
2493  *   0 - successful (for now, it always returns 0)
2494  **/
2495 static int
2496 lpfc_scsi_free(struct lpfc_hba *phba)
2497 {
2498 	struct lpfc_scsi_buf *sb, *sb_next;
2499 	struct lpfc_iocbq *io, *io_next;
2500 
2501 	spin_lock_irq(&phba->hbalock);
2502 	/* Release all the lpfc_scsi_bufs maintained by this host. */
2503 	spin_lock(&phba->scsi_buf_list_lock);
2504 	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2505 		list_del(&sb->list);
2506 		pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2507 			      sb->dma_handle);
2508 		kfree(sb);
2509 		phba->total_scsi_bufs--;
2510 	}
2511 	spin_unlock(&phba->scsi_buf_list_lock);
2512 
2513 	/* Release all the lpfc_iocbq entries maintained by this host. */
2514 	list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2515 		list_del(&io->list);
2516 		kfree(io);
2517 		phba->total_iocbq_bufs--;
2518 	}
2519 	spin_unlock_irq(&phba->hbalock);
2520 	return 0;
2521 }
2522 
2523 /**
2524  * lpfc_create_port - Create an FC port
2525  * @phba: pointer to lpfc hba data structure.
2526  * @instance: a unique integer ID to this FC port.
2527  * @dev: pointer to the device data structure.
2528  *
2529  * This routine creates a FC port for the upper layer protocol. The FC port
2530  * can be created on top of either a physical port or a virtual port provided
2531  * by the HBA. This routine also allocates a SCSI host data structure (shost)
2532  * and associates the FC port created before adding the shost into the SCSI
2533  * layer.
2534  *
2535  * Return codes
2536  *   @vport - pointer to the virtual N_Port data structure.
2537  *   NULL - port create failed.
2538  **/
2539 struct lpfc_vport *
2540 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2541 {
2542 	struct lpfc_vport *vport;
2543 	struct Scsi_Host  *shost;
2544 	int error = 0;
2545 
2546 	if (dev != &phba->pcidev->dev)
2547 		shost = scsi_host_alloc(&lpfc_vport_template,
2548 					sizeof(struct lpfc_vport));
2549 	else
2550 		shost = scsi_host_alloc(&lpfc_template,
2551 					sizeof(struct lpfc_vport));
2552 	if (!shost)
2553 		goto out;
2554 
2555 	vport = (struct lpfc_vport *) shost->hostdata;
2556 	vport->phba = phba;
2557 	vport->load_flag |= FC_LOADING;
2558 	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2559 	vport->fc_rscn_flush = 0;
2560 
2561 	lpfc_get_vport_cfgparam(vport);
2562 	shost->unique_id = instance;
2563 	shost->max_id = LPFC_MAX_TARGET;
2564 	shost->max_lun = vport->cfg_max_luns;
2565 	shost->this_id = -1;
2566 	shost->max_cmd_len = 16;
2567 	if (phba->sli_rev == LPFC_SLI_REV4) {
2568 		shost->dma_boundary =
2569 			phba->sli4_hba.pc_sli4_params.sge_supp_len;
2570 		shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2571 	}
2572 
2573 	/*
2574 	 * Set initial can_queue value since 0 is no longer supported and
2575 	 * scsi_add_host will fail. This will be adjusted later based on the
2576 	 * max xri value determined in hba setup.
2577 	 */
2578 	shost->can_queue = phba->cfg_hba_queue_depth - 10;
2579 	if (dev != &phba->pcidev->dev) {
2580 		shost->transportt = lpfc_vport_transport_template;
2581 		vport->port_type = LPFC_NPIV_PORT;
2582 	} else {
2583 		shost->transportt = lpfc_transport_template;
2584 		vport->port_type = LPFC_PHYSICAL_PORT;
2585 	}
2586 
2587 	/* Initialize all internally managed lists. */
2588 	INIT_LIST_HEAD(&vport->fc_nodes);
2589 	INIT_LIST_HEAD(&vport->rcv_buffer_list);
2590 	spin_lock_init(&vport->work_port_lock);
2591 
2592 	init_timer(&vport->fc_disctmo);
2593 	vport->fc_disctmo.function = lpfc_disc_timeout;
2594 	vport->fc_disctmo.data = (unsigned long)vport;
2595 
2596 	init_timer(&vport->fc_fdmitmo);
2597 	vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
2598 	vport->fc_fdmitmo.data = (unsigned long)vport;
2599 
2600 	init_timer(&vport->els_tmofunc);
2601 	vport->els_tmofunc.function = lpfc_els_timeout;
2602 	vport->els_tmofunc.data = (unsigned long)vport;
2603 	if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
2604 		phba->menlo_flag |= HBA_MENLO_SUPPORT;
2605 		/* check for menlo minimum sg count */
2606 		if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) {
2607 			phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
2608 			shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2609 		}
2610 	}
2611 
2612 	error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2613 	if (error)
2614 		goto out_put_shost;
2615 
2616 	spin_lock_irq(&phba->hbalock);
2617 	list_add_tail(&vport->listentry, &phba->port_list);
2618 	spin_unlock_irq(&phba->hbalock);
2619 	return vport;
2620 
2621 out_put_shost:
2622 	scsi_host_put(shost);
2623 out:
2624 	return NULL;
2625 }
2626 
2627 /**
2628  * destroy_port -  destroy an FC port
2629  * @vport: pointer to an lpfc virtual N_Port data structure.
2630  *
2631  * This routine destroys a FC port from the upper layer protocol. All the
2632  * resources associated with the port are released.
2633  **/
2634 void
2635 destroy_port(struct lpfc_vport *vport)
2636 {
2637 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2638 	struct lpfc_hba  *phba = vport->phba;
2639 
2640 	lpfc_debugfs_terminate(vport);
2641 	fc_remove_host(shost);
2642 	scsi_remove_host(shost);
2643 
2644 	spin_lock_irq(&phba->hbalock);
2645 	list_del_init(&vport->listentry);
2646 	spin_unlock_irq(&phba->hbalock);
2647 
2648 	lpfc_cleanup(vport);
2649 	return;
2650 }
2651 
2652 /**
2653  * lpfc_get_instance - Get a unique integer ID
2654  *
2655  * This routine allocates a unique integer ID from lpfc_hba_index pool. It
2656  * uses the kernel idr facility to perform the task.
2657  *
2658  * Return codes:
2659  *   instance - a unique integer ID allocated as the new instance.
2660  *   -1 - lpfc get instance failed.
2661  **/
2662 int
2663 lpfc_get_instance(void)
2664 {
2665 	int instance = 0;
2666 
2667 	/* Assign an unused number */
2668 	if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
2669 		return -1;
2670 	if (idr_get_new(&lpfc_hba_index, NULL, &instance))
2671 		return -1;
2672 	return instance;
2673 }
2674 
2675 /**
2676  * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
2677  * @shost: pointer to SCSI host data structure.
2678  * @time: elapsed time of the scan in jiffies.
2679  *
2680  * This routine is called by the SCSI layer with a SCSI host to determine
2681  * whether the scan host is finished.
2682  *
2683  * Note: there is no scan_start function as adapter initialization will have
2684  * asynchronously kicked off the link initialization.
2685  *
2686  * Return codes
2687  *   0 - SCSI host scan is not over yet.
2688  *   1 - SCSI host scan is over.
2689  **/
2690 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2691 {
2692 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2693 	struct lpfc_hba   *phba = vport->phba;
2694 	int stat = 0;
2695 
2696 	spin_lock_irq(shost->host_lock);
2697 
2698 	if (vport->load_flag & FC_UNLOADING) {
2699 		stat = 1;
2700 		goto finished;
2701 	}
2702 	if (time >= 30 * HZ) {
2703 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2704 				"0461 Scanning longer than 30 "
2705 				"seconds.  Continuing initialization\n");
2706 		stat = 1;
2707 		goto finished;
2708 	}
2709 	if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
2710 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2711 				"0465 Link down longer than 15 "
2712 				"seconds.  Continuing initialization\n");
2713 		stat = 1;
2714 		goto finished;
2715 	}
2716 
2717 	if (vport->port_state != LPFC_VPORT_READY)
2718 		goto finished;
2719 	if (vport->num_disc_nodes || vport->fc_prli_sent)
2720 		goto finished;
2721 	if (vport->fc_map_cnt == 0 && time < 2 * HZ)
2722 		goto finished;
2723 	if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
2724 		goto finished;
2725 
2726 	stat = 1;
2727 
2728 finished:
2729 	spin_unlock_irq(shost->host_lock);
2730 	return stat;
2731 }
2732 
2733 /**
2734  * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
2735  * @shost: pointer to SCSI host data structure.
2736  *
2737  * This routine initializes a given SCSI host attributes on a FC port. The
2738  * SCSI host can be either on top of a physical port or a virtual port.
2739  **/
2740 void lpfc_host_attrib_init(struct Scsi_Host *shost)
2741 {
2742 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2743 	struct lpfc_hba   *phba = vport->phba;
2744 	/*
2745 	 * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
2746 	 */
2747 
2748 	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
2749 	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
2750 	fc_host_supported_classes(shost) = FC_COS_CLASS3;
2751 
2752 	memset(fc_host_supported_fc4s(shost), 0,
2753 	       sizeof(fc_host_supported_fc4s(shost)));
2754 	fc_host_supported_fc4s(shost)[2] = 1;
2755 	fc_host_supported_fc4s(shost)[7] = 1;
2756 
2757 	lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
2758 				 sizeof fc_host_symbolic_name(shost));
2759 
2760 	fc_host_supported_speeds(shost) = 0;
2761 	if (phba->lmt & LMT_10Gb)
2762 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
2763 	if (phba->lmt & LMT_8Gb)
2764 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
2765 	if (phba->lmt & LMT_4Gb)
2766 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
2767 	if (phba->lmt & LMT_2Gb)
2768 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
2769 	if (phba->lmt & LMT_1Gb)
2770 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
2771 
2772 	fc_host_maxframe_size(shost) =
2773 		(((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
2774 		(uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
2775 
2776 	/* This value is also unchanging */
2777 	memset(fc_host_active_fc4s(shost), 0,
2778 	       sizeof(fc_host_active_fc4s(shost)));
2779 	fc_host_active_fc4s(shost)[2] = 1;
2780 	fc_host_active_fc4s(shost)[7] = 1;
2781 
2782 	fc_host_max_npiv_vports(shost) = phba->max_vpi;
2783 	spin_lock_irq(shost->host_lock);
2784 	vport->load_flag &= ~FC_LOADING;
2785 	spin_unlock_irq(shost->host_lock);
2786 }
2787 
2788 /**
2789  * lpfc_stop_port_s3 - Stop SLI3 device port
2790  * @phba: pointer to lpfc hba data structure.
2791  *
2792  * This routine is invoked to stop an SLI3 device port, it stops the device
2793  * from generating interrupts and stops the device driver's timers for the
2794  * device.
2795  **/
2796 static void
2797 lpfc_stop_port_s3(struct lpfc_hba *phba)
2798 {
2799 	/* Clear all interrupt enable conditions */
2800 	writel(0, phba->HCregaddr);
2801 	readl(phba->HCregaddr); /* flush */
2802 	/* Clear all pending interrupts */
2803 	writel(0xffffffff, phba->HAregaddr);
2804 	readl(phba->HAregaddr); /* flush */
2805 
2806 	/* Reset some HBA SLI setup states */
2807 	lpfc_stop_hba_timers(phba);
2808 	phba->pport->work_port_events = 0;
2809 }
2810 
2811 /**
2812  * lpfc_stop_port_s4 - Stop SLI4 device port
2813  * @phba: pointer to lpfc hba data structure.
2814  *
2815  * This routine is invoked to stop an SLI4 device port, it stops the device
2816  * from generating interrupts and stops the device driver's timers for the
2817  * device.
2818  **/
2819 static void
2820 lpfc_stop_port_s4(struct lpfc_hba *phba)
2821 {
2822 	/* Reset some HBA SLI4 setup states */
2823 	lpfc_stop_hba_timers(phba);
2824 	phba->pport->work_port_events = 0;
2825 	phba->sli4_hba.intr_enable = 0;
2826 }
2827 
2828 /**
2829  * lpfc_stop_port - Wrapper function for stopping hba port
2830  * @phba: Pointer to HBA context object.
2831  *
2832  * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
2833  * the API jump table function pointer from the lpfc_hba struct.
2834  **/
2835 void
2836 lpfc_stop_port(struct lpfc_hba *phba)
2837 {
2838 	phba->lpfc_stop_port(phba);
2839 }
2840 
2841 /**
2842  * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port.
2843  * @phba: pointer to lpfc hba data structure.
2844  *
2845  * This routine is invoked to remove the driver default fcf record from
2846  * the port.  This routine currently acts on FCF Index 0.
2847  *
2848  **/
2849 void
2850 lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2851 {
2852 	int rc = 0;
2853 	LPFC_MBOXQ_t *mboxq;
2854 	struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record;
2855 	uint32_t mbox_tmo, req_len;
2856 	uint32_t shdr_status, shdr_add_status;
2857 
2858 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2859 	if (!mboxq) {
2860 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2861 			"2020 Failed to allocate mbox for ADD_FCF cmd\n");
2862 		return;
2863 	}
2864 
2865 	req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) -
2866 		  sizeof(struct lpfc_sli4_cfg_mhdr);
2867 	rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2868 			      LPFC_MBOX_OPCODE_FCOE_DELETE_FCF,
2869 			      req_len, LPFC_SLI4_MBX_EMBED);
2870 	/*
2871 	 * In phase 1, there is a single FCF index, 0.  In phase2, the driver
2872 	 * supports multiple FCF indices.
2873 	 */
2874 	del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
2875 	bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
2876 	bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
2877 	       phba->fcf.current_rec.fcf_indx);
2878 
2879 	if (!phba->sli4_hba.intr_enable)
2880 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2881 	else {
2882 		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
2883 		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
2884 	}
2885 	/* The IOCTL status is embedded in the mailbox subheader. */
2886 	shdr_status = bf_get(lpfc_mbox_hdr_status,
2887 			     &del_fcf_record->header.cfg_shdr.response);
2888 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2889 				 &del_fcf_record->header.cfg_shdr.response);
2890 	if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2891 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2892 				"2516 DEL FCF of default FCF Index failed "
2893 				"mbx status x%x, status x%x add_status x%x\n",
2894 				rc, shdr_status, shdr_add_status);
2895 	}
2896 	if (rc != MBX_TIMEOUT)
2897 		mempool_free(mboxq, phba->mbox_mem_pool);
2898 }
2899 
2900 /**
2901  * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
2902  * @phba: Pointer to hba for which this call is being executed.
2903  *
2904  * This routine starts the timer waiting for the FCF rediscovery to complete.
2905  **/
2906 void
2907 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
2908 {
2909 	unsigned long fcf_redisc_wait_tmo =
2910 		(jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
2911 	/* Start fcf rediscovery wait period timer */
2912 	mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
2913 	spin_lock_irq(&phba->hbalock);
2914 	/* Allow action to new fcf asynchronous event */
2915 	phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
2916 	/* Mark the FCF rediscovery pending state */
2917 	phba->fcf.fcf_flag |= FCF_REDISC_PEND;
2918 	spin_unlock_irq(&phba->hbalock);
2919 }
2920 
2921 /**
2922  * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
2923  * @ptr: Map to lpfc_hba data structure pointer.
2924  *
2925  * This routine is invoked when waiting for FCF table rediscover has been
2926  * timed out. If new FCF record(s) has (have) been discovered during the
2927  * wait period, a new FCF event shall be added to the FCOE async event
2928  * list, and then worker thread shall be waked up for processing from the
2929  * worker thread context.
2930  **/
2931 void
2932 lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
2933 {
2934 	struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
2935 
2936 	/* Don't send FCF rediscovery event if timer cancelled */
2937 	spin_lock_irq(&phba->hbalock);
2938 	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2939 		spin_unlock_irq(&phba->hbalock);
2940 		return;
2941 	}
2942 	/* Clear FCF rediscovery timer pending flag */
2943 	phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2944 	/* FCF rediscovery event to worker thread */
2945 	phba->fcf.fcf_flag |= FCF_REDISC_EVT;
2946 	spin_unlock_irq(&phba->hbalock);
2947 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2948 			"2776 FCF rediscover wait timer expired, post "
2949 			"a worker thread event for FCF table scan\n");
2950 	/* wake up worker thread */
2951 	lpfc_worker_wake_up(phba);
2952 }
2953 
2954 /**
2955  * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support
2956  * @phba: pointer to lpfc hba data structure.
2957  *
2958  * This function uses the QUERY_FW_CFG mailbox command to determine if the
2959  * firmware loaded supports FCoE. A return of zero indicates that the mailbox
2960  * was successful and the firmware supports FCoE. Any other return indicates
2961  * a error. It is assumed that this function will be called before interrupts
2962  * are enabled.
2963  **/
2964 static int
2965 lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba)
2966 {
2967 	int rc = 0;
2968 	LPFC_MBOXQ_t *mboxq;
2969 	struct lpfc_mbx_query_fw_cfg *query_fw_cfg;
2970 	uint32_t length;
2971 	uint32_t shdr_status, shdr_add_status;
2972 
2973 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2974 	if (!mboxq) {
2975 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2976 				"2621 Failed to allocate mbox for "
2977 				"query firmware config cmd\n");
2978 		return -ENOMEM;
2979 	}
2980 	query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg;
2981 	length = (sizeof(struct lpfc_mbx_query_fw_cfg) -
2982 		  sizeof(struct lpfc_sli4_cfg_mhdr));
2983 	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
2984 			 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
2985 			 length, LPFC_SLI4_MBX_EMBED);
2986 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2987 	/* The IOCTL status is embedded in the mailbox subheader. */
2988 	shdr_status = bf_get(lpfc_mbox_hdr_status,
2989 			     &query_fw_cfg->header.cfg_shdr.response);
2990 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2991 				 &query_fw_cfg->header.cfg_shdr.response);
2992 	if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2993 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2994 				"2622 Query Firmware Config failed "
2995 				"mbx status x%x, status x%x add_status x%x\n",
2996 				rc, shdr_status, shdr_add_status);
2997 		return -EINVAL;
2998 	}
2999 	if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) {
3000 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3001 				"2623 FCoE Function not supported by firmware. "
3002 				"Function mode = %08x\n",
3003 				query_fw_cfg->function_mode);
3004 		return -EINVAL;
3005 	}
3006 	if (rc != MBX_TIMEOUT)
3007 		mempool_free(mboxq, phba->mbox_mem_pool);
3008 	return 0;
3009 }
3010 
3011 /**
3012  * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
3013  * @phba: pointer to lpfc hba data structure.
3014  * @acqe_link: pointer to the async link completion queue entry.
3015  *
3016  * This routine is to parse the SLI4 link-attention link fault code and
3017  * translate it into the base driver's read link attention mailbox command
3018  * status.
3019  *
3020  * Return: Link-attention status in terms of base driver's coding.
3021  **/
3022 static uint16_t
3023 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
3024 			   struct lpfc_acqe_link *acqe_link)
3025 {
3026 	uint16_t latt_fault;
3027 
3028 	switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
3029 	case LPFC_ASYNC_LINK_FAULT_NONE:
3030 	case LPFC_ASYNC_LINK_FAULT_LOCAL:
3031 	case LPFC_ASYNC_LINK_FAULT_REMOTE:
3032 		latt_fault = 0;
3033 		break;
3034 	default:
3035 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3036 				"0398 Invalid link fault code: x%x\n",
3037 				bf_get(lpfc_acqe_link_fault, acqe_link));
3038 		latt_fault = MBXERR_ERROR;
3039 		break;
3040 	}
3041 	return latt_fault;
3042 }
3043 
3044 /**
3045  * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
3046  * @phba: pointer to lpfc hba data structure.
3047  * @acqe_link: pointer to the async link completion queue entry.
3048  *
3049  * This routine is to parse the SLI4 link attention type and translate it
3050  * into the base driver's link attention type coding.
3051  *
3052  * Return: Link attention type in terms of base driver's coding.
3053  **/
3054 static uint8_t
3055 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
3056 			  struct lpfc_acqe_link *acqe_link)
3057 {
3058 	uint8_t att_type;
3059 
3060 	switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
3061 	case LPFC_ASYNC_LINK_STATUS_DOWN:
3062 	case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
3063 		att_type = AT_LINK_DOWN;
3064 		break;
3065 	case LPFC_ASYNC_LINK_STATUS_UP:
3066 		/* Ignore physical link up events - wait for logical link up */
3067 		att_type = AT_RESERVED;
3068 		break;
3069 	case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
3070 		att_type = AT_LINK_UP;
3071 		break;
3072 	default:
3073 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3074 				"0399 Invalid link attention type: x%x\n",
3075 				bf_get(lpfc_acqe_link_status, acqe_link));
3076 		att_type = AT_RESERVED;
3077 		break;
3078 	}
3079 	return att_type;
3080 }
3081 
3082 /**
3083  * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
3084  * @phba: pointer to lpfc hba data structure.
3085  * @acqe_link: pointer to the async link completion queue entry.
3086  *
3087  * This routine is to parse the SLI4 link-attention link speed and translate
3088  * it into the base driver's link-attention link speed coding.
3089  *
3090  * Return: Link-attention link speed in terms of base driver's coding.
3091  **/
3092 static uint8_t
3093 lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
3094 				struct lpfc_acqe_link *acqe_link)
3095 {
3096 	uint8_t link_speed;
3097 
3098 	switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
3099 	case LPFC_ASYNC_LINK_SPEED_ZERO:
3100 		link_speed = LA_UNKNW_LINK;
3101 		break;
3102 	case LPFC_ASYNC_LINK_SPEED_10MBPS:
3103 		link_speed = LA_UNKNW_LINK;
3104 		break;
3105 	case LPFC_ASYNC_LINK_SPEED_100MBPS:
3106 		link_speed = LA_UNKNW_LINK;
3107 		break;
3108 	case LPFC_ASYNC_LINK_SPEED_1GBPS:
3109 		link_speed = LA_1GHZ_LINK;
3110 		break;
3111 	case LPFC_ASYNC_LINK_SPEED_10GBPS:
3112 		link_speed = LA_10GHZ_LINK;
3113 		break;
3114 	default:
3115 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3116 				"0483 Invalid link-attention link speed: x%x\n",
3117 				bf_get(lpfc_acqe_link_speed, acqe_link));
3118 		link_speed = LA_UNKNW_LINK;
3119 		break;
3120 	}
3121 	return link_speed;
3122 }
3123 
3124 /**
3125  * lpfc_sli4_async_link_evt - Process the asynchronous link event
3126  * @phba: pointer to lpfc hba data structure.
3127  * @acqe_link: pointer to the async link completion queue entry.
3128  *
3129  * This routine is to handle the SLI4 asynchronous link event.
3130  **/
3131 static void
3132 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3133 			 struct lpfc_acqe_link *acqe_link)
3134 {
3135 	struct lpfc_dmabuf *mp;
3136 	LPFC_MBOXQ_t *pmb;
3137 	MAILBOX_t *mb;
3138 	READ_LA_VAR *la;
3139 	uint8_t att_type;
3140 
3141 	att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
3142 	if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
3143 		return;
3144 	phba->fcoe_eventtag = acqe_link->event_tag;
3145 	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3146 	if (!pmb) {
3147 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3148 				"0395 The mboxq allocation failed\n");
3149 		return;
3150 	}
3151 	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3152 	if (!mp) {
3153 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3154 				"0396 The lpfc_dmabuf allocation failed\n");
3155 		goto out_free_pmb;
3156 	}
3157 	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3158 	if (!mp->virt) {
3159 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3160 				"0397 The mbuf allocation failed\n");
3161 		goto out_free_dmabuf;
3162 	}
3163 
3164 	/* Cleanup any outstanding ELS commands */
3165 	lpfc_els_flush_all_cmd(phba);
3166 
3167 	/* Block ELS IOCBs until we have done process link event */
3168 	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3169 
3170 	/* Update link event statistics */
3171 	phba->sli.slistat.link_event++;
3172 
3173 	/* Create pseudo lpfc_handle_latt mailbox command from link ACQE */
3174 	lpfc_read_la(phba, pmb, mp);
3175 	pmb->vport = phba->pport;
3176 
3177 	/* Parse and translate status field */
3178 	mb = &pmb->u.mb;
3179 	mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
3180 
3181 	/* Parse and translate link attention fields */
3182 	la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
3183 	la->eventTag = acqe_link->event_tag;
3184 	la->attType = att_type;
3185 	la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link);
3186 
3187 	/* Fake the the following irrelvant fields */
3188 	la->topology = TOPOLOGY_PT_PT;
3189 	la->granted_AL_PA = 0;
3190 	la->il = 0;
3191 	la->pb = 0;
3192 	la->fa = 0;
3193 	la->mm = 0;
3194 
3195 	/* Keep the link status for extra SLI4 state machine reference */
3196 	phba->sli4_hba.link_state.speed =
3197 				bf_get(lpfc_acqe_link_speed, acqe_link);
3198 	phba->sli4_hba.link_state.duplex =
3199 				bf_get(lpfc_acqe_link_duplex, acqe_link);
3200 	phba->sli4_hba.link_state.status =
3201 				bf_get(lpfc_acqe_link_status, acqe_link);
3202 	phba->sli4_hba.link_state.physical =
3203 				bf_get(lpfc_acqe_link_physical, acqe_link);
3204 	phba->sli4_hba.link_state.fault =
3205 				bf_get(lpfc_acqe_link_fault, acqe_link);
3206 	phba->sli4_hba.link_state.logical_speed =
3207 				bf_get(lpfc_acqe_qos_link_speed, acqe_link);
3208 
3209 	/* Invoke the lpfc_handle_latt mailbox command callback function */
3210 	lpfc_mbx_cmpl_read_la(phba, pmb);
3211 
3212 	return;
3213 
3214 out_free_dmabuf:
3215 	kfree(mp);
3216 out_free_pmb:
3217 	mempool_free(pmb, phba->mbox_mem_pool);
3218 }
3219 
3220 /**
3221  * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
3222  * @vport: pointer to vport data structure.
3223  *
3224  * This routine is to perform Clear Virtual Link (CVL) on a vport in
3225  * response to a CVL event.
3226  *
3227  * Return the pointer to the ndlp with the vport if successful, otherwise
3228  * return NULL.
3229  **/
3230 static struct lpfc_nodelist *
3231 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3232 {
3233 	struct lpfc_nodelist *ndlp;
3234 	struct Scsi_Host *shost;
3235 	struct lpfc_hba *phba;
3236 
3237 	if (!vport)
3238 		return NULL;
3239 	ndlp = lpfc_findnode_did(vport, Fabric_DID);
3240 	if (!ndlp)
3241 		return NULL;
3242 	phba = vport->phba;
3243 	if (!phba)
3244 		return NULL;
3245 	if (phba->pport->port_state <= LPFC_FLOGI)
3246 		return NULL;
3247 	/* If virtual link is not yet instantiated ignore CVL */
3248 	if (vport->port_state <= LPFC_FDISC)
3249 		return NULL;
3250 	shost = lpfc_shost_from_vport(vport);
3251 	if (!shost)
3252 		return NULL;
3253 	lpfc_linkdown_port(vport);
3254 	lpfc_cleanup_pending_mbox(vport);
3255 	spin_lock_irq(shost->host_lock);
3256 	vport->fc_flag |= FC_VPORT_CVL_RCVD;
3257 	spin_unlock_irq(shost->host_lock);
3258 
3259 	return ndlp;
3260 }
3261 
3262 /**
3263  * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
3264  * @vport: pointer to lpfc hba data structure.
3265  *
3266  * This routine is to perform Clear Virtual Link (CVL) on all vports in
3267  * response to a FCF dead event.
3268  **/
3269 static void
3270 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
3271 {
3272 	struct lpfc_vport **vports;
3273 	int i;
3274 
3275 	vports = lpfc_create_vport_work_array(phba);
3276 	if (vports)
3277 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3278 			lpfc_sli4_perform_vport_cvl(vports[i]);
3279 	lpfc_destroy_vport_work_array(phba, vports);
3280 }
3281 
3282 /**
3283  * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
3284  * @phba: pointer to lpfc hba data structure.
3285  * @acqe_link: pointer to the async fcoe completion queue entry.
3286  *
3287  * This routine is to handle the SLI4 asynchronous fcoe event.
3288  **/
3289 static void
3290 lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3291 			 struct lpfc_acqe_fcoe *acqe_fcoe)
3292 {
3293 	uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
3294 	int rc;
3295 	struct lpfc_vport *vport;
3296 	struct lpfc_nodelist *ndlp;
3297 	struct Scsi_Host  *shost;
3298 	int active_vlink_present;
3299 	struct lpfc_vport **vports;
3300 	int i;
3301 
3302 	phba->fc_eventTag = acqe_fcoe->event_tag;
3303 	phba->fcoe_eventtag = acqe_fcoe->event_tag;
3304 	switch (event_type) {
3305 	case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
3306 	case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
3307 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3308 			"2546 New FCF found/FCF parameter modified event: "
3309 			"evt_tag:x%x, fcf_index:x%x\n",
3310 			acqe_fcoe->event_tag, acqe_fcoe->index);
3311 
3312 		spin_lock_irq(&phba->hbalock);
3313 		if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) ||
3314 		    (phba->hba_flag & FCF_DISC_INPROGRESS)) {
3315 			/*
3316 			 * If the current FCF is in discovered state or
3317 			 * FCF discovery is in progress, do nothing.
3318 			 */
3319 			spin_unlock_irq(&phba->hbalock);
3320 			break;
3321 		}
3322 
3323 		if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3324 			/*
3325 			 * If fast FCF failover rescan event is pending,
3326 			 * do nothing.
3327 			 */
3328 			spin_unlock_irq(&phba->hbalock);
3329 			break;
3330 		}
3331 		spin_unlock_irq(&phba->hbalock);
3332 
3333 		if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
3334 		    !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
3335 			/*
3336 			 * During period of FCF discovery, read the FCF
3337 			 * table record indexed by the event to update
3338 			 * FCF round robin failover eligible FCF bmask.
3339 			 */
3340 			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3341 					LOG_DISCOVERY,
3342 					"2779 Read new FCF record with "
3343 					"fcf_index:x%x for updating FCF "
3344 					"round robin failover bmask\n",
3345 					acqe_fcoe->index);
3346 			rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
3347 		}
3348 
3349 		/* Otherwise, scan the entire FCF table and re-discover SAN */
3350 		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3351 				"2770 Start FCF table scan due to new FCF "
3352 				"event: evt_tag:x%x, fcf_index:x%x\n",
3353 				acqe_fcoe->event_tag, acqe_fcoe->index);
3354 		rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3355 						     LPFC_FCOE_FCF_GET_FIRST);
3356 		if (rc)
3357 			lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3358 					"2547 Issue FCF scan read FCF mailbox "
3359 					"command failed 0x%x\n", rc);
3360 		break;
3361 
3362 	case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
3363 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3364 			"2548 FCF Table full count 0x%x tag 0x%x\n",
3365 			bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
3366 			acqe_fcoe->event_tag);
3367 		break;
3368 
3369 	case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
3370 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3371 			"2549 FCF disconnected from network index 0x%x"
3372 			" tag 0x%x\n", acqe_fcoe->index,
3373 			acqe_fcoe->event_tag);
3374 		/* If the event is not for currently used fcf do nothing */
3375 		if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index)
3376 			break;
3377 		/* We request port to rediscover the entire FCF table for
3378 		 * a fast recovery from case that the current FCF record
3379 		 * is no longer valid if we are not in the middle of FCF
3380 		 * failover process already.
3381 		 */
3382 		spin_lock_irq(&phba->hbalock);
3383 		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3384 			spin_unlock_irq(&phba->hbalock);
3385 			/* Update FLOGI FCF failover eligible FCF bmask */
3386 			lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index);
3387 			break;
3388 		}
3389 		/* Mark the fast failover process in progress */
3390 		phba->fcf.fcf_flag |= FCF_DEAD_DISC;
3391 		spin_unlock_irq(&phba->hbalock);
3392 		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3393 				"2771 Start FCF fast failover process due to "
3394 				"FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
3395 				"\n", acqe_fcoe->event_tag, acqe_fcoe->index);
3396 		rc = lpfc_sli4_redisc_fcf_table(phba);
3397 		if (rc) {
3398 			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3399 					LOG_DISCOVERY,
3400 					"2772 Issue FCF rediscover mabilbox "
3401 					"command failed, fail through to FCF "
3402 					"dead event\n");
3403 			spin_lock_irq(&phba->hbalock);
3404 			phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
3405 			spin_unlock_irq(&phba->hbalock);
3406 			/*
3407 			 * Last resort will fail over by treating this
3408 			 * as a link down to FCF registration.
3409 			 */
3410 			lpfc_sli4_fcf_dead_failthrough(phba);
3411 		} else
3412 			/* Handling fast FCF failover to a DEAD FCF event
3413 			 * is considered equalivant to receiving CVL to all
3414 			 * vports.
3415 			 */
3416 			lpfc_sli4_perform_all_vport_cvl(phba);
3417 		break;
3418 	case LPFC_FCOE_EVENT_TYPE_CVL:
3419 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3420 			"2718 Clear Virtual Link Received for VPI 0x%x"
3421 			" tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
3422 		vport = lpfc_find_vport_by_vpid(phba,
3423 				acqe_fcoe->index - phba->vpi_base);
3424 		ndlp = lpfc_sli4_perform_vport_cvl(vport);
3425 		if (!ndlp)
3426 			break;
3427 		active_vlink_present = 0;
3428 
3429 		vports = lpfc_create_vport_work_array(phba);
3430 		if (vports) {
3431 			for (i = 0; i <= phba->max_vports && vports[i] != NULL;
3432 					i++) {
3433 				if ((!(vports[i]->fc_flag &
3434 					FC_VPORT_CVL_RCVD)) &&
3435 					(vports[i]->port_state > LPFC_FDISC)) {
3436 					active_vlink_present = 1;
3437 					break;
3438 				}
3439 			}
3440 			lpfc_destroy_vport_work_array(phba, vports);
3441 		}
3442 
3443 		if (active_vlink_present) {
3444 			/*
3445 			 * If there are other active VLinks present,
3446 			 * re-instantiate the Vlink using FDISC.
3447 			 */
3448 			mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
3449 			shost = lpfc_shost_from_vport(vport);
3450 			spin_lock_irq(shost->host_lock);
3451 			ndlp->nlp_flag |= NLP_DELAY_TMO;
3452 			spin_unlock_irq(shost->host_lock);
3453 			ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
3454 			vport->port_state = LPFC_FDISC;
3455 		} else {
3456 			/*
3457 			 * Otherwise, we request port to rediscover
3458 			 * the entire FCF table for a fast recovery
3459 			 * from possible case that the current FCF
3460 			 * is no longer valid if we are not already
3461 			 * in the FCF failover process.
3462 			 */
3463 			spin_lock_irq(&phba->hbalock);
3464 			if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3465 				spin_unlock_irq(&phba->hbalock);
3466 				break;
3467 			}
3468 			/* Mark the fast failover process in progress */
3469 			phba->fcf.fcf_flag |= FCF_ACVL_DISC;
3470 			spin_unlock_irq(&phba->hbalock);
3471 			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3472 					LOG_DISCOVERY,
3473 					"2773 Start FCF fast failover due "
3474 					"to CVL event: evt_tag:x%x\n",
3475 					acqe_fcoe->event_tag);
3476 			rc = lpfc_sli4_redisc_fcf_table(phba);
3477 			if (rc) {
3478 				lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3479 						LOG_DISCOVERY,
3480 						"2774 Issue FCF rediscover "
3481 						"mabilbox command failed, "
3482 						"through to CVL event\n");
3483 				spin_lock_irq(&phba->hbalock);
3484 				phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
3485 				spin_unlock_irq(&phba->hbalock);
3486 				/*
3487 				 * Last resort will be re-try on the
3488 				 * the current registered FCF entry.
3489 				 */
3490 				lpfc_retry_pport_discovery(phba);
3491 			}
3492 		}
3493 		break;
3494 	default:
3495 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3496 			"0288 Unknown FCoE event type 0x%x event tag "
3497 			"0x%x\n", event_type, acqe_fcoe->event_tag);
3498 		break;
3499 	}
3500 }
3501 
3502 /**
3503  * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
3504  * @phba: pointer to lpfc hba data structure.
3505  * @acqe_link: pointer to the async dcbx completion queue entry.
3506  *
3507  * This routine is to handle the SLI4 asynchronous dcbx event.
3508  **/
3509 static void
3510 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
3511 			 struct lpfc_acqe_dcbx *acqe_dcbx)
3512 {
3513 	phba->fc_eventTag = acqe_dcbx->event_tag;
3514 	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3515 			"0290 The SLI4 DCBX asynchronous event is not "
3516 			"handled yet\n");
3517 }
3518 
3519 /**
3520  * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
3521  * @phba: pointer to lpfc hba data structure.
3522  *
3523  * This routine is invoked by the worker thread to process all the pending
3524  * SLI4 asynchronous events.
3525  **/
3526 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
3527 {
3528 	struct lpfc_cq_event *cq_event;
3529 
3530 	/* First, declare the async event has been handled */
3531 	spin_lock_irq(&phba->hbalock);
3532 	phba->hba_flag &= ~ASYNC_EVENT;
3533 	spin_unlock_irq(&phba->hbalock);
3534 	/* Now, handle all the async events */
3535 	while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
3536 		/* Get the first event from the head of the event queue */
3537 		spin_lock_irq(&phba->hbalock);
3538 		list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
3539 				 cq_event, struct lpfc_cq_event, list);
3540 		spin_unlock_irq(&phba->hbalock);
3541 		/* Process the asynchronous event */
3542 		switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
3543 		case LPFC_TRAILER_CODE_LINK:
3544 			lpfc_sli4_async_link_evt(phba,
3545 						 &cq_event->cqe.acqe_link);
3546 			break;
3547 		case LPFC_TRAILER_CODE_FCOE:
3548 			lpfc_sli4_async_fcoe_evt(phba,
3549 						 &cq_event->cqe.acqe_fcoe);
3550 			break;
3551 		case LPFC_TRAILER_CODE_DCBX:
3552 			lpfc_sli4_async_dcbx_evt(phba,
3553 						 &cq_event->cqe.acqe_dcbx);
3554 			break;
3555 		default:
3556 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3557 					"1804 Invalid asynchrous event code: "
3558 					"x%x\n", bf_get(lpfc_trailer_code,
3559 					&cq_event->cqe.mcqe_cmpl));
3560 			break;
3561 		}
3562 		/* Free the completion event processed to the free pool */
3563 		lpfc_sli4_cq_event_release(phba, cq_event);
3564 	}
3565 }
3566 
3567 /**
3568  * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
3569  * @phba: pointer to lpfc hba data structure.
3570  *
3571  * This routine is invoked by the worker thread to process FCF table
3572  * rediscovery pending completion event.
3573  **/
3574 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
3575 {
3576 	int rc;
3577 
3578 	spin_lock_irq(&phba->hbalock);
3579 	/* Clear FCF rediscovery timeout event */
3580 	phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
3581 	/* Clear driver fast failover FCF record flag */
3582 	phba->fcf.failover_rec.flag = 0;
3583 	/* Set state for FCF fast failover */
3584 	phba->fcf.fcf_flag |= FCF_REDISC_FOV;
3585 	spin_unlock_irq(&phba->hbalock);
3586 
3587 	/* Scan FCF table from the first entry to re-discover SAN */
3588 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3589 			"2777 Start FCF table scan after FCF "
3590 			"rediscovery quiescent period over\n");
3591 	rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
3592 	if (rc)
3593 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3594 				"2747 Issue FCF scan read FCF mailbox "
3595 				"command failed 0x%x\n", rc);
3596 }
3597 
3598 /**
3599  * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3600  * @phba: pointer to lpfc hba data structure.
3601  * @dev_grp: The HBA PCI-Device group number.
3602  *
3603  * This routine is invoked to set up the per HBA PCI-Device group function
3604  * API jump table entries.
3605  *
3606  * Return: 0 if success, otherwise -ENODEV
3607  **/
3608 int
3609 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3610 {
3611 	int rc;
3612 
3613 	/* Set up lpfc PCI-device group */
3614 	phba->pci_dev_grp = dev_grp;
3615 
3616 	/* The LPFC_PCI_DEV_OC uses SLI4 */
3617 	if (dev_grp == LPFC_PCI_DEV_OC)
3618 		phba->sli_rev = LPFC_SLI_REV4;
3619 
3620 	/* Set up device INIT API function jump table */
3621 	rc = lpfc_init_api_table_setup(phba, dev_grp);
3622 	if (rc)
3623 		return -ENODEV;
3624 	/* Set up SCSI API function jump table */
3625 	rc = lpfc_scsi_api_table_setup(phba, dev_grp);
3626 	if (rc)
3627 		return -ENODEV;
3628 	/* Set up SLI API function jump table */
3629 	rc = lpfc_sli_api_table_setup(phba, dev_grp);
3630 	if (rc)
3631 		return -ENODEV;
3632 	/* Set up MBOX API function jump table */
3633 	rc = lpfc_mbox_api_table_setup(phba, dev_grp);
3634 	if (rc)
3635 		return -ENODEV;
3636 
3637 	return 0;
3638 }
3639 
3640 /**
3641  * lpfc_log_intr_mode - Log the active interrupt mode
3642  * @phba: pointer to lpfc hba data structure.
3643  * @intr_mode: active interrupt mode adopted.
3644  *
3645  * This routine it invoked to log the currently used active interrupt mode
3646  * to the device.
3647  **/
3648 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3649 {
3650 	switch (intr_mode) {
3651 	case 0:
3652 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3653 				"0470 Enable INTx interrupt mode.\n");
3654 		break;
3655 	case 1:
3656 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3657 				"0481 Enabled MSI interrupt mode.\n");
3658 		break;
3659 	case 2:
3660 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3661 				"0480 Enabled MSI-X interrupt mode.\n");
3662 		break;
3663 	default:
3664 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3665 				"0482 Illegal interrupt mode.\n");
3666 		break;
3667 	}
3668 	return;
3669 }
3670 
3671 /**
3672  * lpfc_enable_pci_dev - Enable a generic PCI device.
3673  * @phba: pointer to lpfc hba data structure.
3674  *
3675  * This routine is invoked to enable the PCI device that is common to all
3676  * PCI devices.
3677  *
3678  * Return codes
3679  * 	0 - successful
3680  * 	other values - error
3681  **/
3682 static int
3683 lpfc_enable_pci_dev(struct lpfc_hba *phba)
3684 {
3685 	struct pci_dev *pdev;
3686 	int bars;
3687 
3688 	/* Obtain PCI device reference */
3689 	if (!phba->pcidev)
3690 		goto out_error;
3691 	else
3692 		pdev = phba->pcidev;
3693 	/* Select PCI BARs */
3694 	bars = pci_select_bars(pdev, IORESOURCE_MEM);
3695 	/* Enable PCI device */
3696 	if (pci_enable_device_mem(pdev))
3697 		goto out_error;
3698 	/* Request PCI resource for the device */
3699 	if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
3700 		goto out_disable_device;
3701 	/* Set up device as PCI master and save state for EEH */
3702 	pci_set_master(pdev);
3703 	pci_try_set_mwi(pdev);
3704 	pci_save_state(pdev);
3705 
3706 	return 0;
3707 
3708 out_disable_device:
3709 	pci_disable_device(pdev);
3710 out_error:
3711 	return -ENODEV;
3712 }
3713 
3714 /**
3715  * lpfc_disable_pci_dev - Disable a generic PCI device.
3716  * @phba: pointer to lpfc hba data structure.
3717  *
3718  * This routine is invoked to disable the PCI device that is common to all
3719  * PCI devices.
3720  **/
3721 static void
3722 lpfc_disable_pci_dev(struct lpfc_hba *phba)
3723 {
3724 	struct pci_dev *pdev;
3725 	int bars;
3726 
3727 	/* Obtain PCI device reference */
3728 	if (!phba->pcidev)
3729 		return;
3730 	else
3731 		pdev = phba->pcidev;
3732 	/* Select PCI BARs */
3733 	bars = pci_select_bars(pdev, IORESOURCE_MEM);
3734 	/* Release PCI resource and disable PCI device */
3735 	pci_release_selected_regions(pdev, bars);
3736 	pci_disable_device(pdev);
3737 	/* Null out PCI private reference to driver */
3738 	pci_set_drvdata(pdev, NULL);
3739 
3740 	return;
3741 }
3742 
3743 /**
3744  * lpfc_reset_hba - Reset a hba
3745  * @phba: pointer to lpfc hba data structure.
3746  *
3747  * This routine is invoked to reset a hba device. It brings the HBA
3748  * offline, performs a board restart, and then brings the board back
3749  * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
3750  * on outstanding mailbox commands.
3751  **/
3752 void
3753 lpfc_reset_hba(struct lpfc_hba *phba)
3754 {
3755 	/* If resets are disabled then set error state and return. */
3756 	if (!phba->cfg_enable_hba_reset) {
3757 		phba->link_state = LPFC_HBA_ERROR;
3758 		return;
3759 	}
3760 	lpfc_offline_prep(phba);
3761 	lpfc_offline(phba);
3762 	lpfc_sli_brdrestart(phba);
3763 	lpfc_online(phba);
3764 	lpfc_unblock_mgmt_io(phba);
3765 }
3766 
3767 /**
3768  * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
3769  * @phba: pointer to lpfc hba data structure.
3770  *
3771  * This routine is invoked to set up the driver internal resources specific to
3772  * support the SLI-3 HBA device it attached to.
3773  *
3774  * Return codes
3775  * 	0 - successful
3776  * 	other values - error
3777  **/
3778 static int
3779 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
3780 {
3781 	struct lpfc_sli *psli;
3782 
3783 	/*
3784 	 * Initialize timers used by driver
3785 	 */
3786 
3787 	/* Heartbeat timer */
3788 	init_timer(&phba->hb_tmofunc);
3789 	phba->hb_tmofunc.function = lpfc_hb_timeout;
3790 	phba->hb_tmofunc.data = (unsigned long)phba;
3791 
3792 	psli = &phba->sli;
3793 	/* MBOX heartbeat timer */
3794 	init_timer(&psli->mbox_tmo);
3795 	psli->mbox_tmo.function = lpfc_mbox_timeout;
3796 	psli->mbox_tmo.data = (unsigned long) phba;
3797 	/* FCP polling mode timer */
3798 	init_timer(&phba->fcp_poll_timer);
3799 	phba->fcp_poll_timer.function = lpfc_poll_timeout;
3800 	phba->fcp_poll_timer.data = (unsigned long) phba;
3801 	/* Fabric block timer */
3802 	init_timer(&phba->fabric_block_timer);
3803 	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3804 	phba->fabric_block_timer.data = (unsigned long) phba;
3805 	/* EA polling mode timer */
3806 	init_timer(&phba->eratt_poll);
3807 	phba->eratt_poll.function = lpfc_poll_eratt;
3808 	phba->eratt_poll.data = (unsigned long) phba;
3809 
3810 	/* Host attention work mask setup */
3811 	phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
3812 	phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
3813 
3814 	/* Get all the module params for configuring this host */
3815 	lpfc_get_cfgparam(phba);
3816 	/*
3817 	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3818 	 * used to create the sg_dma_buf_pool must be dynamically calculated.
3819 	 * 2 segments are added since the IOCB needs a command and response bde.
3820 	 */
3821 	phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
3822 		sizeof(struct fcp_rsp) +
3823 			((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
3824 
3825 	if (phba->cfg_enable_bg) {
3826 		phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
3827 		phba->cfg_sg_dma_buf_size +=
3828 			phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
3829 	}
3830 
3831 	/* Also reinitialize the host templates with new values. */
3832 	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3833 	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3834 
3835 	phba->max_vpi = LPFC_MAX_VPI;
3836 	/* This will be set to correct value after config_port mbox */
3837 	phba->max_vports = 0;
3838 
3839 	/*
3840 	 * Initialize the SLI Layer to run with lpfc HBAs.
3841 	 */
3842 	lpfc_sli_setup(phba);
3843 	lpfc_sli_queue_setup(phba);
3844 
3845 	/* Allocate device driver memory */
3846 	if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
3847 		return -ENOMEM;
3848 
3849 	return 0;
3850 }
3851 
3852 /**
3853  * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
3854  * @phba: pointer to lpfc hba data structure.
3855  *
3856  * This routine is invoked to unset the driver internal resources set up
3857  * specific for supporting the SLI-3 HBA device it attached to.
3858  **/
3859 static void
3860 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
3861 {
3862 	/* Free device driver memory allocated */
3863 	lpfc_mem_free_all(phba);
3864 
3865 	return;
3866 }
3867 
3868 /**
3869  * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
3870  * @phba: pointer to lpfc hba data structure.
3871  *
3872  * This routine is invoked to set up the driver internal resources specific to
3873  * support the SLI-4 HBA device it attached to.
3874  *
3875  * Return codes
3876  * 	0 - successful
3877  * 	other values - error
3878  **/
3879 static int
3880 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3881 {
3882 	struct lpfc_sli *psli;
3883 	LPFC_MBOXQ_t *mboxq;
3884 	int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
3885 	uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
3886 	struct lpfc_mqe *mqe;
3887 	int longs;
3888 
3889 	/* Before proceed, wait for POST done and device ready */
3890 	rc = lpfc_sli4_post_status_check(phba);
3891 	if (rc)
3892 		return -ENODEV;
3893 
3894 	/*
3895 	 * Initialize timers used by driver
3896 	 */
3897 
3898 	/* Heartbeat timer */
3899 	init_timer(&phba->hb_tmofunc);
3900 	phba->hb_tmofunc.function = lpfc_hb_timeout;
3901 	phba->hb_tmofunc.data = (unsigned long)phba;
3902 
3903 	psli = &phba->sli;
3904 	/* MBOX heartbeat timer */
3905 	init_timer(&psli->mbox_tmo);
3906 	psli->mbox_tmo.function = lpfc_mbox_timeout;
3907 	psli->mbox_tmo.data = (unsigned long) phba;
3908 	/* Fabric block timer */
3909 	init_timer(&phba->fabric_block_timer);
3910 	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3911 	phba->fabric_block_timer.data = (unsigned long) phba;
3912 	/* EA polling mode timer */
3913 	init_timer(&phba->eratt_poll);
3914 	phba->eratt_poll.function = lpfc_poll_eratt;
3915 	phba->eratt_poll.data = (unsigned long) phba;
3916 	/* FCF rediscover timer */
3917 	init_timer(&phba->fcf.redisc_wait);
3918 	phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
3919 	phba->fcf.redisc_wait.data = (unsigned long)phba;
3920 
3921 	/*
3922 	 * We need to do a READ_CONFIG mailbox command here before
3923 	 * calling lpfc_get_cfgparam. For VFs this will report the
3924 	 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
3925 	 * All of the resources allocated
3926 	 * for this Port are tied to these values.
3927 	 */
3928 	/* Get all the module params for configuring this host */
3929 	lpfc_get_cfgparam(phba);
3930 	phba->max_vpi = LPFC_MAX_VPI;
3931 	/* This will be set to correct value after the read_config mbox */
3932 	phba->max_vports = 0;
3933 
3934 	/* Program the default value of vlan_id and fc_map */
3935 	phba->valid_vlan = 0;
3936 	phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
3937 	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
3938 	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
3939 
3940 	/*
3941 	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3942 	 * used to create the sg_dma_buf_pool must be dynamically calculated.
3943 	 * 2 segments are added since the IOCB needs a command and response bde.
3944 	 * To insure that the scsi sgl does not cross a 4k page boundary only
3945 	 * sgl sizes of must be a power of 2.
3946 	 */
3947 	buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
3948 		    ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
3949 	/* Feature Level 1 hardware is limited to 2 pages */
3950 	if ((bf_get(lpfc_sli_intf_featurelevel1, &phba->sli4_hba.sli_intf) ==
3951 	     LPFC_SLI_INTF_FEATURELEVEL1_1))
3952 		max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
3953 	else
3954 		max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
3955 	for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
3956 	     dma_buf_size < max_buf_size && buf_size > dma_buf_size;
3957 	     dma_buf_size = dma_buf_size << 1)
3958 		;
3959 	if (dma_buf_size == max_buf_size)
3960 		phba->cfg_sg_seg_cnt = (dma_buf_size -
3961 			sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
3962 			(2 * sizeof(struct sli4_sge))) /
3963 				sizeof(struct sli4_sge);
3964 	phba->cfg_sg_dma_buf_size = dma_buf_size;
3965 
3966 	/* Initialize buffer queue management fields */
3967 	hbq_count = lpfc_sli_hbq_count();
3968 	for (i = 0; i < hbq_count; ++i)
3969 		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
3970 	INIT_LIST_HEAD(&phba->rb_pend_list);
3971 	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
3972 	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
3973 
3974 	/*
3975 	 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
3976 	 */
3977 	/* Initialize the Abort scsi buffer list used by driver */
3978 	spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
3979 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
3980 	/* This abort list used by worker thread */
3981 	spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
3982 
3983 	/*
3984 	 * Initialize dirver internal slow-path work queues
3985 	 */
3986 
3987 	/* Driver internel slow-path CQ Event pool */
3988 	INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
3989 	/* Response IOCB work queue list */
3990 	INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
3991 	/* Asynchronous event CQ Event work queue list */
3992 	INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
3993 	/* Fast-path XRI aborted CQ Event work queue list */
3994 	INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
3995 	/* Slow-path XRI aborted CQ Event work queue list */
3996 	INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
3997 	/* Receive queue CQ Event work queue list */
3998 	INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
3999 
4000 	/* Initialize the driver internal SLI layer lists. */
4001 	lpfc_sli_setup(phba);
4002 	lpfc_sli_queue_setup(phba);
4003 
4004 	/* Allocate device driver memory */
4005 	rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
4006 	if (rc)
4007 		return -ENOMEM;
4008 
4009 	/* Create the bootstrap mailbox command */
4010 	rc = lpfc_create_bootstrap_mbox(phba);
4011 	if (unlikely(rc))
4012 		goto out_free_mem;
4013 
4014 	/* Set up the host's endian order with the device. */
4015 	rc = lpfc_setup_endian_order(phba);
4016 	if (unlikely(rc))
4017 		goto out_free_bsmbx;
4018 
4019 	rc = lpfc_sli4_fw_cfg_check(phba);
4020 	if (unlikely(rc))
4021 		goto out_free_bsmbx;
4022 
4023 	/* Set up the hba's configuration parameters. */
4024 	rc = lpfc_sli4_read_config(phba);
4025 	if (unlikely(rc))
4026 		goto out_free_bsmbx;
4027 
4028 	/* Perform a function reset */
4029 	rc = lpfc_pci_function_reset(phba);
4030 	if (unlikely(rc))
4031 		goto out_free_bsmbx;
4032 
4033 	/* Create all the SLI4 queues */
4034 	rc = lpfc_sli4_queue_create(phba);
4035 	if (rc)
4036 		goto out_free_bsmbx;
4037 
4038 	/* Create driver internal CQE event pool */
4039 	rc = lpfc_sli4_cq_event_pool_create(phba);
4040 	if (rc)
4041 		goto out_destroy_queue;
4042 
4043 	/* Initialize and populate the iocb list per host */
4044 	rc = lpfc_init_sgl_list(phba);
4045 	if (rc) {
4046 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4047 				"1400 Failed to initialize sgl list.\n");
4048 		goto out_destroy_cq_event_pool;
4049 	}
4050 	rc = lpfc_init_active_sgl_array(phba);
4051 	if (rc) {
4052 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4053 				"1430 Failed to initialize sgl list.\n");
4054 		goto out_free_sgl_list;
4055 	}
4056 
4057 	rc = lpfc_sli4_init_rpi_hdrs(phba);
4058 	if (rc) {
4059 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4060 				"1432 Failed to initialize rpi headers.\n");
4061 		goto out_free_active_sgl;
4062 	}
4063 
4064 	/* Allocate eligible FCF bmask memory for FCF round robin failover */
4065 	longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
4066 	phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
4067 					 GFP_KERNEL);
4068 	if (!phba->fcf.fcf_rr_bmask) {
4069 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4070 				"2759 Failed allocate memory for FCF round "
4071 				"robin failover bmask\n");
4072 		goto out_remove_rpi_hdrs;
4073 	}
4074 
4075 	phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
4076 				    phba->cfg_fcp_eq_count), GFP_KERNEL);
4077 	if (!phba->sli4_hba.fcp_eq_hdl) {
4078 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4079 				"2572 Failed allocate memory for fast-path "
4080 				"per-EQ handle array\n");
4081 		goto out_free_fcf_rr_bmask;
4082 	}
4083 
4084 	phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
4085 				      phba->sli4_hba.cfg_eqn), GFP_KERNEL);
4086 	if (!phba->sli4_hba.msix_entries) {
4087 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4088 				"2573 Failed allocate memory for msi-x "
4089 				"interrupt vector entries\n");
4090 		goto out_free_fcp_eq_hdl;
4091 	}
4092 
4093 	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4094 						       GFP_KERNEL);
4095 	if (!mboxq) {
4096 		rc = -ENOMEM;
4097 		goto out_free_fcp_eq_hdl;
4098 	}
4099 
4100 	/* Get the Supported Pages. It is always available. */
4101 	lpfc_supported_pages(mboxq);
4102 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4103 	if (unlikely(rc)) {
4104 		rc = -EIO;
4105 		mempool_free(mboxq, phba->mbox_mem_pool);
4106 		goto out_free_fcp_eq_hdl;
4107 	}
4108 
4109 	mqe = &mboxq->u.mqe;
4110 	memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
4111 	       LPFC_MAX_SUPPORTED_PAGES);
4112 	for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
4113 		switch (pn_page[i]) {
4114 		case LPFC_SLI4_PARAMETERS:
4115 			phba->sli4_hba.pc_sli4_params.supported = 1;
4116 			break;
4117 		default:
4118 			break;
4119 		}
4120 	}
4121 
4122 	/* Read the port's SLI4 Parameters capabilities if supported. */
4123 	if (phba->sli4_hba.pc_sli4_params.supported)
4124 		rc = lpfc_pc_sli4_params_get(phba, mboxq);
4125 	mempool_free(mboxq, phba->mbox_mem_pool);
4126 	if (rc) {
4127 		rc = -EIO;
4128 		goto out_free_fcp_eq_hdl;
4129 	}
4130 	return rc;
4131 
4132 out_free_fcp_eq_hdl:
4133 	kfree(phba->sli4_hba.fcp_eq_hdl);
4134 out_free_fcf_rr_bmask:
4135 	kfree(phba->fcf.fcf_rr_bmask);
4136 out_remove_rpi_hdrs:
4137 	lpfc_sli4_remove_rpi_hdrs(phba);
4138 out_free_active_sgl:
4139 	lpfc_free_active_sgl(phba);
4140 out_free_sgl_list:
4141 	lpfc_free_sgl_list(phba);
4142 out_destroy_cq_event_pool:
4143 	lpfc_sli4_cq_event_pool_destroy(phba);
4144 out_destroy_queue:
4145 	lpfc_sli4_queue_destroy(phba);
4146 out_free_bsmbx:
4147 	lpfc_destroy_bootstrap_mbox(phba);
4148 out_free_mem:
4149 	lpfc_mem_free(phba);
4150 	return rc;
4151 }
4152 
4153 /**
4154  * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
4155  * @phba: pointer to lpfc hba data structure.
4156  *
4157  * This routine is invoked to unset the driver internal resources set up
4158  * specific for supporting the SLI-4 HBA device it attached to.
4159  **/
4160 static void
4161 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4162 {
4163 	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
4164 
4165 	/* unregister default FCFI from the HBA */
4166 	lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
4167 
4168 	/* Free the default FCR table */
4169 	lpfc_sli_remove_dflt_fcf(phba);
4170 
4171 	/* Free memory allocated for msi-x interrupt vector entries */
4172 	kfree(phba->sli4_hba.msix_entries);
4173 
4174 	/* Free memory allocated for fast-path work queue handles */
4175 	kfree(phba->sli4_hba.fcp_eq_hdl);
4176 
4177 	/* Free the allocated rpi headers. */
4178 	lpfc_sli4_remove_rpi_hdrs(phba);
4179 	lpfc_sli4_remove_rpis(phba);
4180 
4181 	/* Free eligible FCF index bmask */
4182 	kfree(phba->fcf.fcf_rr_bmask);
4183 
4184 	/* Free the ELS sgl list */
4185 	lpfc_free_active_sgl(phba);
4186 	lpfc_free_sgl_list(phba);
4187 
4188 	/* Free the SCSI sgl management array */
4189 	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4190 
4191 	/* Free the SLI4 queues */
4192 	lpfc_sli4_queue_destroy(phba);
4193 
4194 	/* Free the completion queue EQ event pool */
4195 	lpfc_sli4_cq_event_release_all(phba);
4196 	lpfc_sli4_cq_event_pool_destroy(phba);
4197 
4198 	/* Reset SLI4 HBA FCoE function */
4199 	lpfc_pci_function_reset(phba);
4200 
4201 	/* Free the bsmbx region. */
4202 	lpfc_destroy_bootstrap_mbox(phba);
4203 
4204 	/* Free the SLI Layer memory with SLI4 HBAs */
4205 	lpfc_mem_free_all(phba);
4206 
4207 	/* Free the current connect table */
4208 	list_for_each_entry_safe(conn_entry, next_conn_entry,
4209 		&phba->fcf_conn_rec_list, list) {
4210 		list_del_init(&conn_entry->list);
4211 		kfree(conn_entry);
4212 	}
4213 
4214 	return;
4215 }
4216 
4217 /**
4218  * lpfc_init_api_table_setup - Set up init api fucntion jump table
4219  * @phba: The hba struct for which this call is being executed.
4220  * @dev_grp: The HBA PCI-Device group number.
4221  *
4222  * This routine sets up the device INIT interface API function jump table
4223  * in @phba struct.
4224  *
4225  * Returns: 0 - success, -ENODEV - failure.
4226  **/
4227 int
4228 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4229 {
4230 	phba->lpfc_hba_init_link = lpfc_hba_init_link;
4231 	phba->lpfc_hba_down_link = lpfc_hba_down_link;
4232 	switch (dev_grp) {
4233 	case LPFC_PCI_DEV_LP:
4234 		phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
4235 		phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
4236 		phba->lpfc_stop_port = lpfc_stop_port_s3;
4237 		break;
4238 	case LPFC_PCI_DEV_OC:
4239 		phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
4240 		phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
4241 		phba->lpfc_stop_port = lpfc_stop_port_s4;
4242 		break;
4243 	default:
4244 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4245 				"1431 Invalid HBA PCI-device group: 0x%x\n",
4246 				dev_grp);
4247 		return -ENODEV;
4248 		break;
4249 	}
4250 	return 0;
4251 }
4252 
4253 /**
4254  * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
4255  * @phba: pointer to lpfc hba data structure.
4256  *
4257  * This routine is invoked to set up the driver internal resources before the
4258  * device specific resource setup to support the HBA device it attached to.
4259  *
4260  * Return codes
4261  *	0 - successful
4262  *	other values - error
4263  **/
4264 static int
4265 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
4266 {
4267 	/*
4268 	 * Driver resources common to all SLI revisions
4269 	 */
4270 	atomic_set(&phba->fast_event_count, 0);
4271 	spin_lock_init(&phba->hbalock);
4272 
4273 	/* Initialize ndlp management spinlock */
4274 	spin_lock_init(&phba->ndlp_lock);
4275 
4276 	INIT_LIST_HEAD(&phba->port_list);
4277 	INIT_LIST_HEAD(&phba->work_list);
4278 	init_waitqueue_head(&phba->wait_4_mlo_m_q);
4279 
4280 	/* Initialize the wait queue head for the kernel thread */
4281 	init_waitqueue_head(&phba->work_waitq);
4282 
4283 	/* Initialize the scsi buffer list used by driver for scsi IO */
4284 	spin_lock_init(&phba->scsi_buf_list_lock);
4285 	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
4286 
4287 	/* Initialize the fabric iocb list */
4288 	INIT_LIST_HEAD(&phba->fabric_iocb_list);
4289 
4290 	/* Initialize list to save ELS buffers */
4291 	INIT_LIST_HEAD(&phba->elsbuf);
4292 
4293 	/* Initialize FCF connection rec list */
4294 	INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
4295 
4296 	return 0;
4297 }
4298 
4299 /**
4300  * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
4301  * @phba: pointer to lpfc hba data structure.
4302  *
4303  * This routine is invoked to set up the driver internal resources after the
4304  * device specific resource setup to support the HBA device it attached to.
4305  *
4306  * Return codes
4307  * 	0 - successful
4308  * 	other values - error
4309  **/
4310 static int
4311 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
4312 {
4313 	int error;
4314 
4315 	/* Startup the kernel thread for this host adapter. */
4316 	phba->worker_thread = kthread_run(lpfc_do_work, phba,
4317 					  "lpfc_worker_%d", phba->brd_no);
4318 	if (IS_ERR(phba->worker_thread)) {
4319 		error = PTR_ERR(phba->worker_thread);
4320 		return error;
4321 	}
4322 
4323 	return 0;
4324 }
4325 
4326 /**
4327  * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
4328  * @phba: pointer to lpfc hba data structure.
4329  *
4330  * This routine is invoked to unset the driver internal resources set up after
4331  * the device specific resource setup for supporting the HBA device it
4332  * attached to.
4333  **/
4334 static void
4335 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
4336 {
4337 	/* Stop kernel worker thread */
4338 	kthread_stop(phba->worker_thread);
4339 }
4340 
4341 /**
4342  * lpfc_free_iocb_list - Free iocb list.
4343  * @phba: pointer to lpfc hba data structure.
4344  *
4345  * This routine is invoked to free the driver's IOCB list and memory.
4346  **/
4347 static void
4348 lpfc_free_iocb_list(struct lpfc_hba *phba)
4349 {
4350 	struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
4351 
4352 	spin_lock_irq(&phba->hbalock);
4353 	list_for_each_entry_safe(iocbq_entry, iocbq_next,
4354 				 &phba->lpfc_iocb_list, list) {
4355 		list_del(&iocbq_entry->list);
4356 		kfree(iocbq_entry);
4357 		phba->total_iocbq_bufs--;
4358 	}
4359 	spin_unlock_irq(&phba->hbalock);
4360 
4361 	return;
4362 }
4363 
4364 /**
4365  * lpfc_init_iocb_list - Allocate and initialize iocb list.
4366  * @phba: pointer to lpfc hba data structure.
4367  *
4368  * This routine is invoked to allocate and initizlize the driver's IOCB
4369  * list and set up the IOCB tag array accordingly.
4370  *
4371  * Return codes
4372  *	0 - successful
4373  *	other values - error
4374  **/
4375 static int
4376 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
4377 {
4378 	struct lpfc_iocbq *iocbq_entry = NULL;
4379 	uint16_t iotag;
4380 	int i;
4381 
4382 	/* Initialize and populate the iocb list per host.  */
4383 	INIT_LIST_HEAD(&phba->lpfc_iocb_list);
4384 	for (i = 0; i < iocb_count; i++) {
4385 		iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
4386 		if (iocbq_entry == NULL) {
4387 			printk(KERN_ERR "%s: only allocated %d iocbs of "
4388 				"expected %d count. Unloading driver.\n",
4389 				__func__, i, LPFC_IOCB_LIST_CNT);
4390 			goto out_free_iocbq;
4391 		}
4392 
4393 		iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
4394 		if (iotag == 0) {
4395 			kfree(iocbq_entry);
4396 			printk(KERN_ERR "%s: failed to allocate IOTAG. "
4397 				"Unloading driver.\n", __func__);
4398 			goto out_free_iocbq;
4399 		}
4400 		iocbq_entry->sli4_xritag = NO_XRI;
4401 
4402 		spin_lock_irq(&phba->hbalock);
4403 		list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
4404 		phba->total_iocbq_bufs++;
4405 		spin_unlock_irq(&phba->hbalock);
4406 	}
4407 
4408 	return 0;
4409 
4410 out_free_iocbq:
4411 	lpfc_free_iocb_list(phba);
4412 
4413 	return -ENOMEM;
4414 }
4415 
4416 /**
4417  * lpfc_free_sgl_list - Free sgl list.
4418  * @phba: pointer to lpfc hba data structure.
4419  *
4420  * This routine is invoked to free the driver's sgl list and memory.
4421  **/
4422 static void
4423 lpfc_free_sgl_list(struct lpfc_hba *phba)
4424 {
4425 	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
4426 	LIST_HEAD(sglq_list);
4427 	int rc = 0;
4428 
4429 	spin_lock_irq(&phba->hbalock);
4430 	list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
4431 	spin_unlock_irq(&phba->hbalock);
4432 
4433 	list_for_each_entry_safe(sglq_entry, sglq_next,
4434 				 &sglq_list, list) {
4435 		list_del(&sglq_entry->list);
4436 		lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
4437 		kfree(sglq_entry);
4438 		phba->sli4_hba.total_sglq_bufs--;
4439 	}
4440 	rc = lpfc_sli4_remove_all_sgl_pages(phba);
4441 	if (rc) {
4442 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4443 			"2005 Unable to deregister pages from HBA: %x\n", rc);
4444 	}
4445 	kfree(phba->sli4_hba.lpfc_els_sgl_array);
4446 }
4447 
4448 /**
4449  * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
4450  * @phba: pointer to lpfc hba data structure.
4451  *
4452  * This routine is invoked to allocate the driver's active sgl memory.
4453  * This array will hold the sglq_entry's for active IOs.
4454  **/
4455 static int
4456 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
4457 {
4458 	int size;
4459 	size = sizeof(struct lpfc_sglq *);
4460 	size *= phba->sli4_hba.max_cfg_param.max_xri;
4461 
4462 	phba->sli4_hba.lpfc_sglq_active_list =
4463 		kzalloc(size, GFP_KERNEL);
4464 	if (!phba->sli4_hba.lpfc_sglq_active_list)
4465 		return -ENOMEM;
4466 	return 0;
4467 }
4468 
4469 /**
4470  * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
4471  * @phba: pointer to lpfc hba data structure.
4472  *
4473  * This routine is invoked to walk through the array of active sglq entries
4474  * and free all of the resources.
4475  * This is just a place holder for now.
4476  **/
4477 static void
4478 lpfc_free_active_sgl(struct lpfc_hba *phba)
4479 {
4480 	kfree(phba->sli4_hba.lpfc_sglq_active_list);
4481 }
4482 
4483 /**
4484  * lpfc_init_sgl_list - Allocate and initialize sgl list.
4485  * @phba: pointer to lpfc hba data structure.
4486  *
4487  * This routine is invoked to allocate and initizlize the driver's sgl
4488  * list and set up the sgl xritag tag array accordingly.
4489  *
4490  * Return codes
4491  *	0 - successful
4492  *	other values - error
4493  **/
4494 static int
4495 lpfc_init_sgl_list(struct lpfc_hba *phba)
4496 {
4497 	struct lpfc_sglq *sglq_entry = NULL;
4498 	int i;
4499 	int els_xri_cnt;
4500 
4501 	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4502 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4503 				"2400 lpfc_init_sgl_list els %d.\n",
4504 				els_xri_cnt);
4505 	/* Initialize and populate the sglq list per host/VF. */
4506 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
4507 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
4508 
4509 	/* Sanity check on XRI management */
4510 	if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
4511 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4512 				"2562 No room left for SCSI XRI allocation: "
4513 				"max_xri=%d, els_xri=%d\n",
4514 				phba->sli4_hba.max_cfg_param.max_xri,
4515 				els_xri_cnt);
4516 		return -ENOMEM;
4517 	}
4518 
4519 	/* Allocate memory for the ELS XRI management array */
4520 	phba->sli4_hba.lpfc_els_sgl_array =
4521 			kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
4522 			GFP_KERNEL);
4523 
4524 	if (!phba->sli4_hba.lpfc_els_sgl_array) {
4525 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4526 				"2401 Failed to allocate memory for ELS "
4527 				"XRI management array of size %d.\n",
4528 				els_xri_cnt);
4529 		return -ENOMEM;
4530 	}
4531 
4532 	/* Keep the SCSI XRI into the XRI management array */
4533 	phba->sli4_hba.scsi_xri_max =
4534 			phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4535 	phba->sli4_hba.scsi_xri_cnt = 0;
4536 
4537 	phba->sli4_hba.lpfc_scsi_psb_array =
4538 			kzalloc((sizeof(struct lpfc_scsi_buf *) *
4539 			phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
4540 
4541 	if (!phba->sli4_hba.lpfc_scsi_psb_array) {
4542 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4543 				"2563 Failed to allocate memory for SCSI "
4544 				"XRI management array of size %d.\n",
4545 				phba->sli4_hba.scsi_xri_max);
4546 		kfree(phba->sli4_hba.lpfc_els_sgl_array);
4547 		return -ENOMEM;
4548 	}
4549 
4550 	for (i = 0; i < els_xri_cnt; i++) {
4551 		sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
4552 		if (sglq_entry == NULL) {
4553 			printk(KERN_ERR "%s: only allocated %d sgls of "
4554 				"expected %d count. Unloading driver.\n",
4555 				__func__, i, els_xri_cnt);
4556 			goto out_free_mem;
4557 		}
4558 
4559 		sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
4560 		if (sglq_entry->sli4_xritag == NO_XRI) {
4561 			kfree(sglq_entry);
4562 			printk(KERN_ERR "%s: failed to allocate XRI.\n"
4563 				"Unloading driver.\n", __func__);
4564 			goto out_free_mem;
4565 		}
4566 		sglq_entry->buff_type = GEN_BUFF_TYPE;
4567 		sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
4568 		if (sglq_entry->virt == NULL) {
4569 			kfree(sglq_entry);
4570 			printk(KERN_ERR "%s: failed to allocate mbuf.\n"
4571 				"Unloading driver.\n", __func__);
4572 			goto out_free_mem;
4573 		}
4574 		sglq_entry->sgl = sglq_entry->virt;
4575 		memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
4576 
4577 		/* The list order is used by later block SGL registraton */
4578 		spin_lock_irq(&phba->hbalock);
4579 		sglq_entry->state = SGL_FREED;
4580 		list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
4581 		phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
4582 		phba->sli4_hba.total_sglq_bufs++;
4583 		spin_unlock_irq(&phba->hbalock);
4584 	}
4585 	return 0;
4586 
4587 out_free_mem:
4588 	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4589 	lpfc_free_sgl_list(phba);
4590 	return -ENOMEM;
4591 }
4592 
4593 /**
4594  * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
4595  * @phba: pointer to lpfc hba data structure.
4596  *
4597  * This routine is invoked to post rpi header templates to the
4598  * HBA consistent with the SLI-4 interface spec.  This routine
4599  * posts a PAGE_SIZE memory region to the port to hold up to
4600  * PAGE_SIZE modulo 64 rpi context headers.
4601  * No locks are held here because this is an initialization routine
4602  * called only from probe or lpfc_online when interrupts are not
4603  * enabled and the driver is reinitializing the device.
4604  *
4605  * Return codes
4606  * 	0 - successful
4607  * 	ENOMEM - No availble memory
4608  *      EIO - The mailbox failed to complete successfully.
4609  **/
4610 int
4611 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
4612 {
4613 	int rc = 0;
4614 	int longs;
4615 	uint16_t rpi_count;
4616 	struct lpfc_rpi_hdr *rpi_hdr;
4617 
4618 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
4619 
4620 	/*
4621 	 * Provision an rpi bitmask range for discovery. The total count
4622 	 * is the difference between max and base + 1.
4623 	 */
4624 	rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
4625 		    phba->sli4_hba.max_cfg_param.max_rpi - 1;
4626 
4627 	longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
4628 	phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
4629 					   GFP_KERNEL);
4630 	if (!phba->sli4_hba.rpi_bmask)
4631 		return -ENOMEM;
4632 
4633 	rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
4634 	if (!rpi_hdr) {
4635 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4636 				"0391 Error during rpi post operation\n");
4637 		lpfc_sli4_remove_rpis(phba);
4638 		rc = -ENODEV;
4639 	}
4640 
4641 	return rc;
4642 }
4643 
4644 /**
4645  * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
4646  * @phba: pointer to lpfc hba data structure.
4647  *
4648  * This routine is invoked to allocate a single 4KB memory region to
4649  * support rpis and stores them in the phba.  This single region
4650  * provides support for up to 64 rpis.  The region is used globally
4651  * by the device.
4652  *
4653  * Returns:
4654  *   A valid rpi hdr on success.
4655  *   A NULL pointer on any failure.
4656  **/
4657 struct lpfc_rpi_hdr *
4658 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4659 {
4660 	uint16_t rpi_limit, curr_rpi_range;
4661 	struct lpfc_dmabuf *dmabuf;
4662 	struct lpfc_rpi_hdr *rpi_hdr;
4663 
4664 	rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
4665 		    phba->sli4_hba.max_cfg_param.max_rpi - 1;
4666 
4667 	spin_lock_irq(&phba->hbalock);
4668 	curr_rpi_range = phba->sli4_hba.next_rpi;
4669 	spin_unlock_irq(&phba->hbalock);
4670 
4671 	/*
4672 	 * The port has a limited number of rpis. The increment here
4673 	 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
4674 	 * and to allow the full max_rpi range per port.
4675 	 */
4676 	if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
4677 		return NULL;
4678 
4679 	/*
4680 	 * First allocate the protocol header region for the port.  The
4681 	 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
4682 	 */
4683 	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4684 	if (!dmabuf)
4685 		return NULL;
4686 
4687 	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4688 					  LPFC_HDR_TEMPLATE_SIZE,
4689 					  &dmabuf->phys,
4690 					  GFP_KERNEL);
4691 	if (!dmabuf->virt) {
4692 		rpi_hdr = NULL;
4693 		goto err_free_dmabuf;
4694 	}
4695 
4696 	memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
4697 	if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
4698 		rpi_hdr = NULL;
4699 		goto err_free_coherent;
4700 	}
4701 
4702 	/* Save the rpi header data for cleanup later. */
4703 	rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
4704 	if (!rpi_hdr)
4705 		goto err_free_coherent;
4706 
4707 	rpi_hdr->dmabuf = dmabuf;
4708 	rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
4709 	rpi_hdr->page_count = 1;
4710 	spin_lock_irq(&phba->hbalock);
4711 	rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
4712 	list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
4713 
4714 	/*
4715 	 * The next_rpi stores the next module-64 rpi value to post
4716 	 * in any subsequent rpi memory region postings.
4717 	 */
4718 	phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT;
4719 	spin_unlock_irq(&phba->hbalock);
4720 	return rpi_hdr;
4721 
4722  err_free_coherent:
4723 	dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
4724 			  dmabuf->virt, dmabuf->phys);
4725  err_free_dmabuf:
4726 	kfree(dmabuf);
4727 	return NULL;
4728 }
4729 
4730 /**
4731  * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
4732  * @phba: pointer to lpfc hba data structure.
4733  *
4734  * This routine is invoked to remove all memory resources allocated
4735  * to support rpis. This routine presumes the caller has released all
4736  * rpis consumed by fabric or port logins and is prepared to have
4737  * the header pages removed.
4738  **/
4739 void
4740 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4741 {
4742 	struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
4743 
4744 	list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
4745 				 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
4746 		list_del(&rpi_hdr->list);
4747 		dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
4748 				  rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
4749 		kfree(rpi_hdr->dmabuf);
4750 		kfree(rpi_hdr);
4751 	}
4752 
4753 	phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4754 	memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
4755 }
4756 
4757 /**
4758  * lpfc_hba_alloc - Allocate driver hba data structure for a device.
4759  * @pdev: pointer to pci device data structure.
4760  *
4761  * This routine is invoked to allocate the driver hba data structure for an
4762  * HBA device. If the allocation is successful, the phba reference to the
4763  * PCI device data structure is set.
4764  *
4765  * Return codes
4766  *      pointer to @phba - successful
4767  *      NULL - error
4768  **/
4769 static struct lpfc_hba *
4770 lpfc_hba_alloc(struct pci_dev *pdev)
4771 {
4772 	struct lpfc_hba *phba;
4773 
4774 	/* Allocate memory for HBA structure */
4775 	phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
4776 	if (!phba) {
4777 		dev_err(&pdev->dev, "failed to allocate hba struct\n");
4778 		return NULL;
4779 	}
4780 
4781 	/* Set reference to PCI device in HBA structure */
4782 	phba->pcidev = pdev;
4783 
4784 	/* Assign an unused board number */
4785 	phba->brd_no = lpfc_get_instance();
4786 	if (phba->brd_no < 0) {
4787 		kfree(phba);
4788 		return NULL;
4789 	}
4790 
4791 	spin_lock_init(&phba->ct_ev_lock);
4792 	INIT_LIST_HEAD(&phba->ct_ev_waiters);
4793 
4794 	return phba;
4795 }
4796 
4797 /**
4798  * lpfc_hba_free - Free driver hba data structure with a device.
4799  * @phba: pointer to lpfc hba data structure.
4800  *
4801  * This routine is invoked to free the driver hba data structure with an
4802  * HBA device.
4803  **/
4804 static void
4805 lpfc_hba_free(struct lpfc_hba *phba)
4806 {
4807 	/* Release the driver assigned board number */
4808 	idr_remove(&lpfc_hba_index, phba->brd_no);
4809 
4810 	kfree(phba);
4811 	return;
4812 }
4813 
4814 /**
4815  * lpfc_create_shost - Create hba physical port with associated scsi host.
4816  * @phba: pointer to lpfc hba data structure.
4817  *
4818  * This routine is invoked to create HBA physical port and associate a SCSI
4819  * host with it.
4820  *
4821  * Return codes
4822  *      0 - successful
4823  *      other values - error
4824  **/
4825 static int
4826 lpfc_create_shost(struct lpfc_hba *phba)
4827 {
4828 	struct lpfc_vport *vport;
4829 	struct Scsi_Host  *shost;
4830 
4831 	/* Initialize HBA FC structure */
4832 	phba->fc_edtov = FF_DEF_EDTOV;
4833 	phba->fc_ratov = FF_DEF_RATOV;
4834 	phba->fc_altov = FF_DEF_ALTOV;
4835 	phba->fc_arbtov = FF_DEF_ARBTOV;
4836 
4837 	vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
4838 	if (!vport)
4839 		return -ENODEV;
4840 
4841 	shost = lpfc_shost_from_vport(vport);
4842 	phba->pport = vport;
4843 	lpfc_debugfs_initialize(vport);
4844 	/* Put reference to SCSI host to driver's device private data */
4845 	pci_set_drvdata(phba->pcidev, shost);
4846 
4847 	return 0;
4848 }
4849 
4850 /**
4851  * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
4852  * @phba: pointer to lpfc hba data structure.
4853  *
4854  * This routine is invoked to destroy HBA physical port and the associated
4855  * SCSI host.
4856  **/
4857 static void
4858 lpfc_destroy_shost(struct lpfc_hba *phba)
4859 {
4860 	struct lpfc_vport *vport = phba->pport;
4861 
4862 	/* Destroy physical port that associated with the SCSI host */
4863 	destroy_port(vport);
4864 
4865 	return;
4866 }
4867 
4868 /**
4869  * lpfc_setup_bg - Setup Block guard structures and debug areas.
4870  * @phba: pointer to lpfc hba data structure.
4871  * @shost: the shost to be used to detect Block guard settings.
4872  *
4873  * This routine sets up the local Block guard protocol settings for @shost.
4874  * This routine also allocates memory for debugging bg buffers.
4875  **/
4876 static void
4877 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4878 {
4879 	int pagecnt = 10;
4880 	if (lpfc_prot_mask && lpfc_prot_guard) {
4881 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4882 				"1478 Registering BlockGuard with the "
4883 				"SCSI layer\n");
4884 		scsi_host_set_prot(shost, lpfc_prot_mask);
4885 		scsi_host_set_guard(shost, lpfc_prot_guard);
4886 	}
4887 	if (!_dump_buf_data) {
4888 		while (pagecnt) {
4889 			spin_lock_init(&_dump_buf_lock);
4890 			_dump_buf_data =
4891 				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
4892 			if (_dump_buf_data) {
4893 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4894 					"9043 BLKGRD: allocated %d pages for "
4895 				       "_dump_buf_data at 0x%p\n",
4896 				       (1 << pagecnt), _dump_buf_data);
4897 				_dump_buf_data_order = pagecnt;
4898 				memset(_dump_buf_data, 0,
4899 				       ((1 << PAGE_SHIFT) << pagecnt));
4900 				break;
4901 			} else
4902 				--pagecnt;
4903 		}
4904 		if (!_dump_buf_data_order)
4905 			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4906 				"9044 BLKGRD: ERROR unable to allocate "
4907 			       "memory for hexdump\n");
4908 	} else
4909 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4910 			"9045 BLKGRD: already allocated _dump_buf_data=0x%p"
4911 		       "\n", _dump_buf_data);
4912 	if (!_dump_buf_dif) {
4913 		while (pagecnt) {
4914 			_dump_buf_dif =
4915 				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
4916 			if (_dump_buf_dif) {
4917 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4918 					"9046 BLKGRD: allocated %d pages for "
4919 				       "_dump_buf_dif at 0x%p\n",
4920 				       (1 << pagecnt), _dump_buf_dif);
4921 				_dump_buf_dif_order = pagecnt;
4922 				memset(_dump_buf_dif, 0,
4923 				       ((1 << PAGE_SHIFT) << pagecnt));
4924 				break;
4925 			} else
4926 				--pagecnt;
4927 		}
4928 		if (!_dump_buf_dif_order)
4929 			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4930 			"9047 BLKGRD: ERROR unable to allocate "
4931 			       "memory for hexdump\n");
4932 	} else
4933 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4934 			"9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
4935 		       _dump_buf_dif);
4936 }
4937 
4938 /**
4939  * lpfc_post_init_setup - Perform necessary device post initialization setup.
4940  * @phba: pointer to lpfc hba data structure.
4941  *
4942  * This routine is invoked to perform all the necessary post initialization
4943  * setup for the device.
4944  **/
4945 static void
4946 lpfc_post_init_setup(struct lpfc_hba *phba)
4947 {
4948 	struct Scsi_Host  *shost;
4949 	struct lpfc_adapter_event_header adapter_event;
4950 
4951 	/* Get the default values for Model Name and Description */
4952 	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
4953 
4954 	/*
4955 	 * hba setup may have changed the hba_queue_depth so we need to
4956 	 * adjust the value of can_queue.
4957 	 */
4958 	shost = pci_get_drvdata(phba->pcidev);
4959 	shost->can_queue = phba->cfg_hba_queue_depth - 10;
4960 	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4961 		lpfc_setup_bg(phba, shost);
4962 
4963 	lpfc_host_attrib_init(shost);
4964 
4965 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
4966 		spin_lock_irq(shost->host_lock);
4967 		lpfc_poll_start_timer(phba);
4968 		spin_unlock_irq(shost->host_lock);
4969 	}
4970 
4971 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4972 			"0428 Perform SCSI scan\n");
4973 	/* Send board arrival event to upper layer */
4974 	adapter_event.event_type = FC_REG_ADAPTER_EVENT;
4975 	adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
4976 	fc_host_post_vendor_event(shost, fc_get_event_number(),
4977 				  sizeof(adapter_event),
4978 				  (char *) &adapter_event,
4979 				  LPFC_NL_VENDOR_ID);
4980 	return;
4981 }
4982 
4983 /**
4984  * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
4985  * @phba: pointer to lpfc hba data structure.
4986  *
4987  * This routine is invoked to set up the PCI device memory space for device
4988  * with SLI-3 interface spec.
4989  *
4990  * Return codes
4991  * 	0 - successful
4992  * 	other values - error
4993  **/
4994 static int
4995 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
4996 {
4997 	struct pci_dev *pdev;
4998 	unsigned long bar0map_len, bar2map_len;
4999 	int i, hbq_count;
5000 	void *ptr;
5001 	int error = -ENODEV;
5002 
5003 	/* Obtain PCI device reference */
5004 	if (!phba->pcidev)
5005 		return error;
5006 	else
5007 		pdev = phba->pcidev;
5008 
5009 	/* Set the device DMA mask size */
5010 	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
5011 	 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
5012 		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
5013 		 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
5014 			return error;
5015 		}
5016 	}
5017 
5018 	/* Get the bus address of Bar0 and Bar2 and the number of bytes
5019 	 * required by each mapping.
5020 	 */
5021 	phba->pci_bar0_map = pci_resource_start(pdev, 0);
5022 	bar0map_len = pci_resource_len(pdev, 0);
5023 
5024 	phba->pci_bar2_map = pci_resource_start(pdev, 2);
5025 	bar2map_len = pci_resource_len(pdev, 2);
5026 
5027 	/* Map HBA SLIM to a kernel virtual address. */
5028 	phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
5029 	if (!phba->slim_memmap_p) {
5030 		dev_printk(KERN_ERR, &pdev->dev,
5031 			   "ioremap failed for SLIM memory.\n");
5032 		goto out;
5033 	}
5034 
5035 	/* Map HBA Control Registers to a kernel virtual address. */
5036 	phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
5037 	if (!phba->ctrl_regs_memmap_p) {
5038 		dev_printk(KERN_ERR, &pdev->dev,
5039 			   "ioremap failed for HBA control registers.\n");
5040 		goto out_iounmap_slim;
5041 	}
5042 
5043 	/* Allocate memory for SLI-2 structures */
5044 	phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
5045 					       SLI2_SLIM_SIZE,
5046 					       &phba->slim2p.phys,
5047 					       GFP_KERNEL);
5048 	if (!phba->slim2p.virt)
5049 		goto out_iounmap;
5050 
5051 	memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
5052 	phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
5053 	phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
5054 	phba->IOCBs = (phba->slim2p.virt +
5055 		       offsetof(struct lpfc_sli2_slim, IOCBs));
5056 
5057 	phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
5058 						 lpfc_sli_hbq_size(),
5059 						 &phba->hbqslimp.phys,
5060 						 GFP_KERNEL);
5061 	if (!phba->hbqslimp.virt)
5062 		goto out_free_slim;
5063 
5064 	hbq_count = lpfc_sli_hbq_count();
5065 	ptr = phba->hbqslimp.virt;
5066 	for (i = 0; i < hbq_count; ++i) {
5067 		phba->hbqs[i].hbq_virt = ptr;
5068 		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
5069 		ptr += (lpfc_hbq_defs[i]->entry_count *
5070 			sizeof(struct lpfc_hbq_entry));
5071 	}
5072 	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
5073 	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
5074 
5075 	memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
5076 
5077 	INIT_LIST_HEAD(&phba->rb_pend_list);
5078 
5079 	phba->MBslimaddr = phba->slim_memmap_p;
5080 	phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
5081 	phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
5082 	phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
5083 	phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
5084 
5085 	return 0;
5086 
5087 out_free_slim:
5088 	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5089 			  phba->slim2p.virt, phba->slim2p.phys);
5090 out_iounmap:
5091 	iounmap(phba->ctrl_regs_memmap_p);
5092 out_iounmap_slim:
5093 	iounmap(phba->slim_memmap_p);
5094 out:
5095 	return error;
5096 }
5097 
5098 /**
5099  * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
5100  * @phba: pointer to lpfc hba data structure.
5101  *
5102  * This routine is invoked to unset the PCI device memory space for device
5103  * with SLI-3 interface spec.
5104  **/
5105 static void
5106 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
5107 {
5108 	struct pci_dev *pdev;
5109 
5110 	/* Obtain PCI device reference */
5111 	if (!phba->pcidev)
5112 		return;
5113 	else
5114 		pdev = phba->pcidev;
5115 
5116 	/* Free coherent DMA memory allocated */
5117 	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
5118 			  phba->hbqslimp.virt, phba->hbqslimp.phys);
5119 	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5120 			  phba->slim2p.virt, phba->slim2p.phys);
5121 
5122 	/* I/O memory unmap */
5123 	iounmap(phba->ctrl_regs_memmap_p);
5124 	iounmap(phba->slim_memmap_p);
5125 
5126 	return;
5127 }
5128 
5129 /**
5130  * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
5131  * @phba: pointer to lpfc hba data structure.
5132  *
5133  * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
5134  * done and check status.
5135  *
5136  * Return 0 if successful, otherwise -ENODEV.
5137  **/
5138 int
5139 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
5140 {
5141 	struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg;
5142 	int i, port_error = -ENODEV;
5143 
5144 	if (!phba->sli4_hba.STAregaddr)
5145 		return -ENODEV;
5146 
5147 	/* Wait up to 30 seconds for the SLI Port POST done and ready */
5148 	for (i = 0; i < 3000; i++) {
5149 		sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
5150 		/* Encounter fatal POST error, break out */
5151 		if (bf_get(lpfc_hst_state_perr, &sta_reg)) {
5152 			port_error = -ENODEV;
5153 			break;
5154 		}
5155 		if (LPFC_POST_STAGE_ARMFW_READY ==
5156 		    bf_get(lpfc_hst_state_port_status, &sta_reg)) {
5157 			port_error = 0;
5158 			break;
5159 		}
5160 		msleep(10);
5161 	}
5162 
5163 	if (port_error)
5164 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5165 			"1408 Failure HBA POST Status: sta_reg=0x%x, "
5166 			"perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, "
5167 			"dl=x%x, pstatus=x%x\n", sta_reg.word0,
5168 			bf_get(lpfc_hst_state_perr, &sta_reg),
5169 			bf_get(lpfc_hst_state_sfi, &sta_reg),
5170 			bf_get(lpfc_hst_state_nip, &sta_reg),
5171 			bf_get(lpfc_hst_state_ipc, &sta_reg),
5172 			bf_get(lpfc_hst_state_xrom, &sta_reg),
5173 			bf_get(lpfc_hst_state_dl, &sta_reg),
5174 			bf_get(lpfc_hst_state_port_status, &sta_reg));
5175 
5176 	/* Log device information */
5177 	phba->sli4_hba.sli_intf.word0 = readl(phba->sli4_hba.SLIINTFregaddr);
5178 	if (bf_get(lpfc_sli_intf_valid,
5179 		   &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_VALID) {
5180 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5181 				"2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
5182 				"FeatureL1=0x%x, FeatureL2=0x%x\n",
5183 				bf_get(lpfc_sli_intf_sli_family,
5184 				       &phba->sli4_hba.sli_intf),
5185 				bf_get(lpfc_sli_intf_slirev,
5186 				       &phba->sli4_hba.sli_intf),
5187 				bf_get(lpfc_sli_intf_featurelevel1,
5188 				       &phba->sli4_hba.sli_intf),
5189 				bf_get(lpfc_sli_intf_featurelevel2,
5190 				       &phba->sli4_hba.sli_intf));
5191 	}
5192 	phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr);
5193 	phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr);
5194 	/* With uncoverable error, log the error message and return error */
5195 	uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
5196 	uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
5197 	if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
5198 	    (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
5199 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5200 				"1422 HBA Unrecoverable error: "
5201 				"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
5202 				"ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
5203 				uerrlo_reg.word0, uerrhi_reg.word0,
5204 				phba->sli4_hba.ue_mask_lo,
5205 				phba->sli4_hba.ue_mask_hi);
5206 		return -ENODEV;
5207 	}
5208 
5209 	return port_error;
5210 }
5211 
5212 /**
5213  * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
5214  * @phba: pointer to lpfc hba data structure.
5215  *
5216  * This routine is invoked to set up SLI4 BAR0 PCI config space register
5217  * memory map.
5218  **/
5219 static void
5220 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
5221 {
5222 	phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
5223 					LPFC_UERR_STATUS_LO;
5224 	phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
5225 					LPFC_UERR_STATUS_HI;
5226 	phba->sli4_hba.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
5227 					LPFC_UE_MASK_LO;
5228 	phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
5229 					LPFC_UE_MASK_HI;
5230 	phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p +
5231 					LPFC_SLI_INTF;
5232 }
5233 
5234 /**
5235  * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
5236  * @phba: pointer to lpfc hba data structure.
5237  *
5238  * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
5239  * memory map.
5240  **/
5241 static void
5242 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
5243 {
5244 
5245 	phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5246 				    LPFC_HST_STATE;
5247 	phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5248 				    LPFC_HST_ISR0;
5249 	phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5250 				    LPFC_HST_IMR0;
5251 	phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5252 				     LPFC_HST_ISCR0;
5253 	return;
5254 }
5255 
5256 /**
5257  * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
5258  * @phba: pointer to lpfc hba data structure.
5259  * @vf: virtual function number
5260  *
5261  * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
5262  * based on the given viftual function number, @vf.
5263  *
5264  * Return 0 if successful, otherwise -ENODEV.
5265  **/
5266 static int
5267 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
5268 {
5269 	if (vf > LPFC_VIR_FUNC_MAX)
5270 		return -ENODEV;
5271 
5272 	phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5273 				vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
5274 	phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5275 				vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
5276 	phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5277 				vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
5278 	phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5279 				vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
5280 	phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5281 				vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
5282 	return 0;
5283 }
5284 
5285 /**
5286  * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
5287  * @phba: pointer to lpfc hba data structure.
5288  *
5289  * This routine is invoked to create the bootstrap mailbox
5290  * region consistent with the SLI-4 interface spec.  This
5291  * routine allocates all memory necessary to communicate
5292  * mailbox commands to the port and sets up all alignment
5293  * needs.  No locks are expected to be held when calling
5294  * this routine.
5295  *
5296  * Return codes
5297  * 	0 - successful
5298  * 	ENOMEM - could not allocated memory.
5299  **/
5300 static int
5301 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
5302 {
5303 	uint32_t bmbx_size;
5304 	struct lpfc_dmabuf *dmabuf;
5305 	struct dma_address *dma_address;
5306 	uint32_t pa_addr;
5307 	uint64_t phys_addr;
5308 
5309 	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5310 	if (!dmabuf)
5311 		return -ENOMEM;
5312 
5313 	/*
5314 	 * The bootstrap mailbox region is comprised of 2 parts
5315 	 * plus an alignment restriction of 16 bytes.
5316 	 */
5317 	bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
5318 	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5319 					  bmbx_size,
5320 					  &dmabuf->phys,
5321 					  GFP_KERNEL);
5322 	if (!dmabuf->virt) {
5323 		kfree(dmabuf);
5324 		return -ENOMEM;
5325 	}
5326 	memset(dmabuf->virt, 0, bmbx_size);
5327 
5328 	/*
5329 	 * Initialize the bootstrap mailbox pointers now so that the register
5330 	 * operations are simple later.  The mailbox dma address is required
5331 	 * to be 16-byte aligned.  Also align the virtual memory as each
5332 	 * maibox is copied into the bmbx mailbox region before issuing the
5333 	 * command to the port.
5334 	 */
5335 	phba->sli4_hba.bmbx.dmabuf = dmabuf;
5336 	phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
5337 
5338 	phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
5339 					      LPFC_ALIGN_16_BYTE);
5340 	phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
5341 					      LPFC_ALIGN_16_BYTE);
5342 
5343 	/*
5344 	 * Set the high and low physical addresses now.  The SLI4 alignment
5345 	 * requirement is 16 bytes and the mailbox is posted to the port
5346 	 * as two 30-bit addresses.  The other data is a bit marking whether
5347 	 * the 30-bit address is the high or low address.
5348 	 * Upcast bmbx aphys to 64bits so shift instruction compiles
5349 	 * clean on 32 bit machines.
5350 	 */
5351 	dma_address = &phba->sli4_hba.bmbx.dma_address;
5352 	phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
5353 	pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
5354 	dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
5355 					   LPFC_BMBX_BIT1_ADDR_HI);
5356 
5357 	pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
5358 	dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
5359 					   LPFC_BMBX_BIT1_ADDR_LO);
5360 	return 0;
5361 }
5362 
5363 /**
5364  * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
5365  * @phba: pointer to lpfc hba data structure.
5366  *
5367  * This routine is invoked to teardown the bootstrap mailbox
5368  * region and release all host resources. This routine requires
5369  * the caller to ensure all mailbox commands recovered, no
5370  * additional mailbox comands are sent, and interrupts are disabled
5371  * before calling this routine.
5372  *
5373  **/
5374 static void
5375 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
5376 {
5377 	dma_free_coherent(&phba->pcidev->dev,
5378 			  phba->sli4_hba.bmbx.bmbx_size,
5379 			  phba->sli4_hba.bmbx.dmabuf->virt,
5380 			  phba->sli4_hba.bmbx.dmabuf->phys);
5381 
5382 	kfree(phba->sli4_hba.bmbx.dmabuf);
5383 	memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
5384 }
5385 
5386 /**
5387  * lpfc_sli4_read_config - Get the config parameters.
5388  * @phba: pointer to lpfc hba data structure.
5389  *
5390  * This routine is invoked to read the configuration parameters from the HBA.
5391  * The configuration parameters are used to set the base and maximum values
5392  * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
5393  * allocation for the port.
5394  *
5395  * Return codes
5396  * 	0 - successful
5397  * 	ENOMEM - No availble memory
5398  *      EIO - The mailbox failed to complete successfully.
5399  **/
5400 static int
5401 lpfc_sli4_read_config(struct lpfc_hba *phba)
5402 {
5403 	LPFC_MBOXQ_t *pmb;
5404 	struct lpfc_mbx_read_config *rd_config;
5405 	uint32_t rc = 0;
5406 
5407 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5408 	if (!pmb) {
5409 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5410 				"2011 Unable to allocate memory for issuing "
5411 				"SLI_CONFIG_SPECIAL mailbox command\n");
5412 		return -ENOMEM;
5413 	}
5414 
5415 	lpfc_read_config(phba, pmb);
5416 
5417 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5418 	if (rc != MBX_SUCCESS) {
5419 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5420 			"2012 Mailbox failed , mbxCmd x%x "
5421 			"READ_CONFIG, mbxStatus x%x\n",
5422 			bf_get(lpfc_mqe_command, &pmb->u.mqe),
5423 			bf_get(lpfc_mqe_status, &pmb->u.mqe));
5424 		rc = -EIO;
5425 	} else {
5426 		rd_config = &pmb->u.mqe.un.rd_config;
5427 		phba->sli4_hba.max_cfg_param.max_xri =
5428 			bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
5429 		phba->sli4_hba.max_cfg_param.xri_base =
5430 			bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
5431 		phba->sli4_hba.max_cfg_param.max_vpi =
5432 			bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
5433 		phba->sli4_hba.max_cfg_param.vpi_base =
5434 			bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
5435 		phba->sli4_hba.max_cfg_param.max_rpi =
5436 			bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
5437 		phba->sli4_hba.max_cfg_param.rpi_base =
5438 			bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
5439 		phba->sli4_hba.max_cfg_param.max_vfi =
5440 			bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
5441 		phba->sli4_hba.max_cfg_param.vfi_base =
5442 			bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
5443 		phba->sli4_hba.max_cfg_param.max_fcfi =
5444 			bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
5445 		phba->sli4_hba.max_cfg_param.fcfi_base =
5446 			bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
5447 		phba->sli4_hba.max_cfg_param.max_eq =
5448 			bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
5449 		phba->sli4_hba.max_cfg_param.max_rq =
5450 			bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
5451 		phba->sli4_hba.max_cfg_param.max_wq =
5452 			bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
5453 		phba->sli4_hba.max_cfg_param.max_cq =
5454 			bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
5455 		phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
5456 		phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
5457 		phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
5458 		phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
5459 		phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
5460 		phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
5461 				(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
5462 		phba->max_vports = phba->max_vpi;
5463 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5464 				"2003 cfg params XRI(B:%d M:%d), "
5465 				"VPI(B:%d M:%d) "
5466 				"VFI(B:%d M:%d) "
5467 				"RPI(B:%d M:%d) "
5468 				"FCFI(B:%d M:%d)\n",
5469 				phba->sli4_hba.max_cfg_param.xri_base,
5470 				phba->sli4_hba.max_cfg_param.max_xri,
5471 				phba->sli4_hba.max_cfg_param.vpi_base,
5472 				phba->sli4_hba.max_cfg_param.max_vpi,
5473 				phba->sli4_hba.max_cfg_param.vfi_base,
5474 				phba->sli4_hba.max_cfg_param.max_vfi,
5475 				phba->sli4_hba.max_cfg_param.rpi_base,
5476 				phba->sli4_hba.max_cfg_param.max_rpi,
5477 				phba->sli4_hba.max_cfg_param.fcfi_base,
5478 				phba->sli4_hba.max_cfg_param.max_fcfi);
5479 	}
5480 	mempool_free(pmb, phba->mbox_mem_pool);
5481 
5482 	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
5483 	if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri))
5484 		phba->cfg_hba_queue_depth =
5485 				phba->sli4_hba.max_cfg_param.max_xri;
5486 	return rc;
5487 }
5488 
5489 /**
5490  * lpfc_dev_endian_order_setup - Notify the port of the host's endian order.
5491  * @phba: pointer to lpfc hba data structure.
5492  *
5493  * This routine is invoked to setup the host-side endian order to the
5494  * HBA consistent with the SLI-4 interface spec.
5495  *
5496  * Return codes
5497  * 	0 - successful
5498  * 	ENOMEM - No availble memory
5499  *      EIO - The mailbox failed to complete successfully.
5500  **/
5501 static int
5502 lpfc_setup_endian_order(struct lpfc_hba *phba)
5503 {
5504 	LPFC_MBOXQ_t *mboxq;
5505 	uint32_t rc = 0;
5506 	uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
5507 				      HOST_ENDIAN_HIGH_WORD1};
5508 
5509 	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5510 	if (!mboxq) {
5511 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5512 				"0492 Unable to allocate memory for issuing "
5513 				"SLI_CONFIG_SPECIAL mailbox command\n");
5514 		return -ENOMEM;
5515 	}
5516 
5517 	/*
5518 	 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two
5519 	 * words to contain special data values and no other data.
5520 	 */
5521 	memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
5522 	memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
5523 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5524 	if (rc != MBX_SUCCESS) {
5525 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5526 				"0493 SLI_CONFIG_SPECIAL mailbox failed with "
5527 				"status x%x\n",
5528 				rc);
5529 		rc = -EIO;
5530 	}
5531 
5532 	mempool_free(mboxq, phba->mbox_mem_pool);
5533 	return rc;
5534 }
5535 
5536 /**
5537  * lpfc_sli4_queue_create - Create all the SLI4 queues
5538  * @phba: pointer to lpfc hba data structure.
5539  *
5540  * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
5541  * operation. For each SLI4 queue type, the parameters such as queue entry
5542  * count (queue depth) shall be taken from the module parameter. For now,
5543  * we just use some constant number as place holder.
5544  *
5545  * Return codes
5546  *      0 - successful
5547  *      ENOMEM - No availble memory
5548  *      EIO - The mailbox failed to complete successfully.
5549  **/
5550 static int
5551 lpfc_sli4_queue_create(struct lpfc_hba *phba)
5552 {
5553 	struct lpfc_queue *qdesc;
5554 	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
5555 	int cfg_fcp_wq_count;
5556 	int cfg_fcp_eq_count;
5557 
5558 	/*
5559 	 * Sanity check for confiugred queue parameters against the run-time
5560 	 * device parameters
5561 	 */
5562 
5563 	/* Sanity check on FCP fast-path WQ parameters */
5564 	cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
5565 	if (cfg_fcp_wq_count >
5566 	    (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
5567 		cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
5568 				   LPFC_SP_WQN_DEF;
5569 		if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
5570 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5571 					"2581 Not enough WQs (%d) from "
5572 					"the pci function for supporting "
5573 					"FCP WQs (%d)\n",
5574 					phba->sli4_hba.max_cfg_param.max_wq,
5575 					phba->cfg_fcp_wq_count);
5576 			goto out_error;
5577 		}
5578 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5579 				"2582 Not enough WQs (%d) from the pci "
5580 				"function for supporting the requested "
5581 				"FCP WQs (%d), the actual FCP WQs can "
5582 				"be supported: %d\n",
5583 				phba->sli4_hba.max_cfg_param.max_wq,
5584 				phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
5585 	}
5586 	/* The actual number of FCP work queues adopted */
5587 	phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
5588 
5589 	/* Sanity check on FCP fast-path EQ parameters */
5590 	cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
5591 	if (cfg_fcp_eq_count >
5592 	    (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
5593 		cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
5594 				   LPFC_SP_EQN_DEF;
5595 		if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
5596 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5597 					"2574 Not enough EQs (%d) from the "
5598 					"pci function for supporting FCP "
5599 					"EQs (%d)\n",
5600 					phba->sli4_hba.max_cfg_param.max_eq,
5601 					phba->cfg_fcp_eq_count);
5602 			goto out_error;
5603 		}
5604 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5605 				"2575 Not enough EQs (%d) from the pci "
5606 				"function for supporting the requested "
5607 				"FCP EQs (%d), the actual FCP EQs can "
5608 				"be supported: %d\n",
5609 				phba->sli4_hba.max_cfg_param.max_eq,
5610 				phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
5611 	}
5612 	/* It does not make sense to have more EQs than WQs */
5613 	if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
5614 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5615 				"2593 The FCP EQ count(%d) cannot be greater "
5616 				"than the FCP WQ count(%d), limiting the "
5617 				"FCP EQ count to %d\n", cfg_fcp_eq_count,
5618 				phba->cfg_fcp_wq_count,
5619 				phba->cfg_fcp_wq_count);
5620 		cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
5621 	}
5622 	/* The actual number of FCP event queues adopted */
5623 	phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
5624 	/* The overall number of event queues used */
5625 	phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
5626 
5627 	/*
5628 	 * Create Event Queues (EQs)
5629 	 */
5630 
5631 	/* Get EQ depth from module parameter, fake the default for now */
5632 	phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
5633 	phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
5634 
5635 	/* Create slow path event queue */
5636 	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
5637 				      phba->sli4_hba.eq_ecount);
5638 	if (!qdesc) {
5639 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5640 				"0496 Failed allocate slow-path EQ\n");
5641 		goto out_error;
5642 	}
5643 	phba->sli4_hba.sp_eq = qdesc;
5644 
5645 	/* Create fast-path FCP Event Queue(s) */
5646 	phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
5647 			       phba->cfg_fcp_eq_count), GFP_KERNEL);
5648 	if (!phba->sli4_hba.fp_eq) {
5649 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5650 				"2576 Failed allocate memory for fast-path "
5651 				"EQ record array\n");
5652 		goto out_free_sp_eq;
5653 	}
5654 	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5655 		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
5656 					      phba->sli4_hba.eq_ecount);
5657 		if (!qdesc) {
5658 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5659 					"0497 Failed allocate fast-path EQ\n");
5660 			goto out_free_fp_eq;
5661 		}
5662 		phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
5663 	}
5664 
5665 	/*
5666 	 * Create Complete Queues (CQs)
5667 	 */
5668 
5669 	/* Get CQ depth from module parameter, fake the default for now */
5670 	phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
5671 	phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
5672 
5673 	/* Create slow-path Mailbox Command Complete Queue */
5674 	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5675 				      phba->sli4_hba.cq_ecount);
5676 	if (!qdesc) {
5677 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5678 				"0500 Failed allocate slow-path mailbox CQ\n");
5679 		goto out_free_fp_eq;
5680 	}
5681 	phba->sli4_hba.mbx_cq = qdesc;
5682 
5683 	/* Create slow-path ELS Complete Queue */
5684 	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5685 				      phba->sli4_hba.cq_ecount);
5686 	if (!qdesc) {
5687 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5688 				"0501 Failed allocate slow-path ELS CQ\n");
5689 		goto out_free_mbx_cq;
5690 	}
5691 	phba->sli4_hba.els_cq = qdesc;
5692 
5693 
5694 	/* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
5695 	phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
5696 				phba->cfg_fcp_eq_count), GFP_KERNEL);
5697 	if (!phba->sli4_hba.fcp_cq) {
5698 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5699 				"2577 Failed allocate memory for fast-path "
5700 				"CQ record array\n");
5701 		goto out_free_els_cq;
5702 	}
5703 	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5704 		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5705 					      phba->sli4_hba.cq_ecount);
5706 		if (!qdesc) {
5707 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5708 					"0499 Failed allocate fast-path FCP "
5709 					"CQ (%d)\n", fcp_cqidx);
5710 			goto out_free_fcp_cq;
5711 		}
5712 		phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
5713 	}
5714 
5715 	/* Create Mailbox Command Queue */
5716 	phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
5717 	phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
5718 
5719 	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
5720 				      phba->sli4_hba.mq_ecount);
5721 	if (!qdesc) {
5722 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5723 				"0505 Failed allocate slow-path MQ\n");
5724 		goto out_free_fcp_cq;
5725 	}
5726 	phba->sli4_hba.mbx_wq = qdesc;
5727 
5728 	/*
5729 	 * Create all the Work Queues (WQs)
5730 	 */
5731 	phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
5732 	phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
5733 
5734 	/* Create slow-path ELS Work Queue */
5735 	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5736 				      phba->sli4_hba.wq_ecount);
5737 	if (!qdesc) {
5738 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5739 				"0504 Failed allocate slow-path ELS WQ\n");
5740 		goto out_free_mbx_wq;
5741 	}
5742 	phba->sli4_hba.els_wq = qdesc;
5743 
5744 	/* Create fast-path FCP Work Queue(s) */
5745 	phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
5746 				phba->cfg_fcp_wq_count), GFP_KERNEL);
5747 	if (!phba->sli4_hba.fcp_wq) {
5748 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5749 				"2578 Failed allocate memory for fast-path "
5750 				"WQ record array\n");
5751 		goto out_free_els_wq;
5752 	}
5753 	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5754 		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5755 					      phba->sli4_hba.wq_ecount);
5756 		if (!qdesc) {
5757 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5758 					"0503 Failed allocate fast-path FCP "
5759 					"WQ (%d)\n", fcp_wqidx);
5760 			goto out_free_fcp_wq;
5761 		}
5762 		phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
5763 	}
5764 
5765 	/*
5766 	 * Create Receive Queue (RQ)
5767 	 */
5768 	phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
5769 	phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
5770 
5771 	/* Create Receive Queue for header */
5772 	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5773 				      phba->sli4_hba.rq_ecount);
5774 	if (!qdesc) {
5775 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5776 				"0506 Failed allocate receive HRQ\n");
5777 		goto out_free_fcp_wq;
5778 	}
5779 	phba->sli4_hba.hdr_rq = qdesc;
5780 
5781 	/* Create Receive Queue for data */
5782 	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5783 				      phba->sli4_hba.rq_ecount);
5784 	if (!qdesc) {
5785 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5786 				"0507 Failed allocate receive DRQ\n");
5787 		goto out_free_hdr_rq;
5788 	}
5789 	phba->sli4_hba.dat_rq = qdesc;
5790 
5791 	return 0;
5792 
5793 out_free_hdr_rq:
5794 	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5795 	phba->sli4_hba.hdr_rq = NULL;
5796 out_free_fcp_wq:
5797 	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
5798 		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
5799 		phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
5800 	}
5801 	kfree(phba->sli4_hba.fcp_wq);
5802 out_free_els_wq:
5803 	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5804 	phba->sli4_hba.els_wq = NULL;
5805 out_free_mbx_wq:
5806 	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5807 	phba->sli4_hba.mbx_wq = NULL;
5808 out_free_fcp_cq:
5809 	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
5810 		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
5811 		phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
5812 	}
5813 	kfree(phba->sli4_hba.fcp_cq);
5814 out_free_els_cq:
5815 	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5816 	phba->sli4_hba.els_cq = NULL;
5817 out_free_mbx_cq:
5818 	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5819 	phba->sli4_hba.mbx_cq = NULL;
5820 out_free_fp_eq:
5821 	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
5822 		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
5823 		phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
5824 	}
5825 	kfree(phba->sli4_hba.fp_eq);
5826 out_free_sp_eq:
5827 	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5828 	phba->sli4_hba.sp_eq = NULL;
5829 out_error:
5830 	return -ENOMEM;
5831 }
5832 
5833 /**
5834  * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
5835  * @phba: pointer to lpfc hba data structure.
5836  *
5837  * This routine is invoked to release all the SLI4 queues with the FCoE HBA
5838  * operation.
5839  *
5840  * Return codes
5841  *      0 - successful
5842  *      ENOMEM - No availble memory
5843  *      EIO - The mailbox failed to complete successfully.
5844  **/
5845 static void
5846 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5847 {
5848 	int fcp_qidx;
5849 
5850 	/* Release mailbox command work queue */
5851 	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5852 	phba->sli4_hba.mbx_wq = NULL;
5853 
5854 	/* Release ELS work queue */
5855 	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5856 	phba->sli4_hba.els_wq = NULL;
5857 
5858 	/* Release FCP work queue */
5859 	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5860 		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
5861 	kfree(phba->sli4_hba.fcp_wq);
5862 	phba->sli4_hba.fcp_wq = NULL;
5863 
5864 	/* Release unsolicited receive queue */
5865 	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5866 	phba->sli4_hba.hdr_rq = NULL;
5867 	lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
5868 	phba->sli4_hba.dat_rq = NULL;
5869 
5870 	/* Release ELS complete queue */
5871 	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5872 	phba->sli4_hba.els_cq = NULL;
5873 
5874 	/* Release mailbox command complete queue */
5875 	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5876 	phba->sli4_hba.mbx_cq = NULL;
5877 
5878 	/* Release FCP response complete queue */
5879 	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5880 		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
5881 	kfree(phba->sli4_hba.fcp_cq);
5882 	phba->sli4_hba.fcp_cq = NULL;
5883 
5884 	/* Release fast-path event queue */
5885 	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5886 		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
5887 	kfree(phba->sli4_hba.fp_eq);
5888 	phba->sli4_hba.fp_eq = NULL;
5889 
5890 	/* Release slow-path event queue */
5891 	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5892 	phba->sli4_hba.sp_eq = NULL;
5893 
5894 	return;
5895 }
5896 
5897 /**
5898  * lpfc_sli4_queue_setup - Set up all the SLI4 queues
5899  * @phba: pointer to lpfc hba data structure.
5900  *
5901  * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
5902  * operation.
5903  *
5904  * Return codes
5905  *      0 - successful
5906  *      ENOMEM - No availble memory
5907  *      EIO - The mailbox failed to complete successfully.
5908  **/
5909 int
5910 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5911 {
5912 	int rc = -ENOMEM;
5913 	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
5914 	int fcp_cq_index = 0;
5915 
5916 	/*
5917 	 * Set up Event Queues (EQs)
5918 	 */
5919 
5920 	/* Set up slow-path event queue */
5921 	if (!phba->sli4_hba.sp_eq) {
5922 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5923 				"0520 Slow-path EQ not allocated\n");
5924 		goto out_error;
5925 	}
5926 	rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
5927 			    LPFC_SP_DEF_IMAX);
5928 	if (rc) {
5929 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5930 				"0521 Failed setup of slow-path EQ: "
5931 				"rc = 0x%x\n", rc);
5932 		goto out_error;
5933 	}
5934 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5935 			"2583 Slow-path EQ setup: queue-id=%d\n",
5936 			phba->sli4_hba.sp_eq->queue_id);
5937 
5938 	/* Set up fast-path event queue */
5939 	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5940 		if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
5941 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5942 					"0522 Fast-path EQ (%d) not "
5943 					"allocated\n", fcp_eqidx);
5944 			goto out_destroy_fp_eq;
5945 		}
5946 		rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
5947 				    phba->cfg_fcp_imax);
5948 		if (rc) {
5949 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5950 					"0523 Failed setup of fast-path EQ "
5951 					"(%d), rc = 0x%x\n", fcp_eqidx, rc);
5952 			goto out_destroy_fp_eq;
5953 		}
5954 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5955 				"2584 Fast-path EQ setup: "
5956 				"queue[%d]-id=%d\n", fcp_eqidx,
5957 				phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
5958 	}
5959 
5960 	/*
5961 	 * Set up Complete Queues (CQs)
5962 	 */
5963 
5964 	/* Set up slow-path MBOX Complete Queue as the first CQ */
5965 	if (!phba->sli4_hba.mbx_cq) {
5966 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5967 				"0528 Mailbox CQ not allocated\n");
5968 		goto out_destroy_fp_eq;
5969 	}
5970 	rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
5971 			    LPFC_MCQ, LPFC_MBOX);
5972 	if (rc) {
5973 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5974 				"0529 Failed setup of slow-path mailbox CQ: "
5975 				"rc = 0x%x\n", rc);
5976 		goto out_destroy_fp_eq;
5977 	}
5978 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5979 			"2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
5980 			phba->sli4_hba.mbx_cq->queue_id,
5981 			phba->sli4_hba.sp_eq->queue_id);
5982 
5983 	/* Set up slow-path ELS Complete Queue */
5984 	if (!phba->sli4_hba.els_cq) {
5985 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5986 				"0530 ELS CQ not allocated\n");
5987 		goto out_destroy_mbx_cq;
5988 	}
5989 	rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
5990 			    LPFC_WCQ, LPFC_ELS);
5991 	if (rc) {
5992 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5993 				"0531 Failed setup of slow-path ELS CQ: "
5994 				"rc = 0x%x\n", rc);
5995 		goto out_destroy_mbx_cq;
5996 	}
5997 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5998 			"2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
5999 			phba->sli4_hba.els_cq->queue_id,
6000 			phba->sli4_hba.sp_eq->queue_id);
6001 
6002 	/* Set up fast-path FCP Response Complete Queue */
6003 	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
6004 		if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
6005 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6006 					"0526 Fast-path FCP CQ (%d) not "
6007 					"allocated\n", fcp_cqidx);
6008 			goto out_destroy_fcp_cq;
6009 		}
6010 		rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
6011 				    phba->sli4_hba.fp_eq[fcp_cqidx],
6012 				    LPFC_WCQ, LPFC_FCP);
6013 		if (rc) {
6014 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6015 					"0527 Failed setup of fast-path FCP "
6016 					"CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
6017 			goto out_destroy_fcp_cq;
6018 		}
6019 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6020 				"2588 FCP CQ setup: cq[%d]-id=%d, "
6021 				"parent eq[%d]-id=%d\n",
6022 				fcp_cqidx,
6023 				phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
6024 				fcp_cqidx,
6025 				phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
6026 	}
6027 
6028 	/*
6029 	 * Set up all the Work Queues (WQs)
6030 	 */
6031 
6032 	/* Set up Mailbox Command Queue */
6033 	if (!phba->sli4_hba.mbx_wq) {
6034 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6035 				"0538 Slow-path MQ not allocated\n");
6036 		goto out_destroy_fcp_cq;
6037 	}
6038 	rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
6039 			    phba->sli4_hba.mbx_cq, LPFC_MBOX);
6040 	if (rc) {
6041 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6042 				"0539 Failed setup of slow-path MQ: "
6043 				"rc = 0x%x\n", rc);
6044 		goto out_destroy_fcp_cq;
6045 	}
6046 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6047 			"2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
6048 			phba->sli4_hba.mbx_wq->queue_id,
6049 			phba->sli4_hba.mbx_cq->queue_id);
6050 
6051 	/* Set up slow-path ELS Work Queue */
6052 	if (!phba->sli4_hba.els_wq) {
6053 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6054 				"0536 Slow-path ELS WQ not allocated\n");
6055 		goto out_destroy_mbx_wq;
6056 	}
6057 	rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
6058 			    phba->sli4_hba.els_cq, LPFC_ELS);
6059 	if (rc) {
6060 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6061 				"0537 Failed setup of slow-path ELS WQ: "
6062 				"rc = 0x%x\n", rc);
6063 		goto out_destroy_mbx_wq;
6064 	}
6065 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6066 			"2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
6067 			phba->sli4_hba.els_wq->queue_id,
6068 			phba->sli4_hba.els_cq->queue_id);
6069 
6070 	/* Set up fast-path FCP Work Queue */
6071 	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6072 		if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
6073 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6074 					"0534 Fast-path FCP WQ (%d) not "
6075 					"allocated\n", fcp_wqidx);
6076 			goto out_destroy_fcp_wq;
6077 		}
6078 		rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
6079 				    phba->sli4_hba.fcp_cq[fcp_cq_index],
6080 				    LPFC_FCP);
6081 		if (rc) {
6082 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6083 					"0535 Failed setup of fast-path FCP "
6084 					"WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
6085 			goto out_destroy_fcp_wq;
6086 		}
6087 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6088 				"2591 FCP WQ setup: wq[%d]-id=%d, "
6089 				"parent cq[%d]-id=%d\n",
6090 				fcp_wqidx,
6091 				phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
6092 				fcp_cq_index,
6093 				phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
6094 		/* Round robin FCP Work Queue's Completion Queue assignment */
6095 		fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
6096 	}
6097 
6098 	/*
6099 	 * Create Receive Queue (RQ)
6100 	 */
6101 	if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
6102 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6103 				"0540 Receive Queue not allocated\n");
6104 		goto out_destroy_fcp_wq;
6105 	}
6106 	rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
6107 			    phba->sli4_hba.els_cq, LPFC_USOL);
6108 	if (rc) {
6109 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6110 				"0541 Failed setup of Receive Queue: "
6111 				"rc = 0x%x\n", rc);
6112 		goto out_destroy_fcp_wq;
6113 	}
6114 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6115 			"2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
6116 			"parent cq-id=%d\n",
6117 			phba->sli4_hba.hdr_rq->queue_id,
6118 			phba->sli4_hba.dat_rq->queue_id,
6119 			phba->sli4_hba.els_cq->queue_id);
6120 	return 0;
6121 
6122 out_destroy_fcp_wq:
6123 	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
6124 		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
6125 	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6126 out_destroy_mbx_wq:
6127 	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6128 out_destroy_fcp_cq:
6129 	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
6130 		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
6131 	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6132 out_destroy_mbx_cq:
6133 	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6134 out_destroy_fp_eq:
6135 	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
6136 		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
6137 	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6138 out_error:
6139 	return rc;
6140 }
6141 
6142 /**
6143  * lpfc_sli4_queue_unset - Unset all the SLI4 queues
6144  * @phba: pointer to lpfc hba data structure.
6145  *
6146  * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
6147  * operation.
6148  *
6149  * Return codes
6150  *      0 - successful
6151  *      ENOMEM - No availble memory
6152  *      EIO - The mailbox failed to complete successfully.
6153  **/
6154 void
6155 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
6156 {
6157 	int fcp_qidx;
6158 
6159 	/* Unset mailbox command work queue */
6160 	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6161 	/* Unset ELS work queue */
6162 	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6163 	/* Unset unsolicited receive queue */
6164 	lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
6165 	/* Unset FCP work queue */
6166 	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
6167 		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
6168 	/* Unset mailbox command complete queue */
6169 	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6170 	/* Unset ELS complete queue */
6171 	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6172 	/* Unset FCP response complete queue */
6173 	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6174 		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
6175 	/* Unset fast-path event queue */
6176 	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6177 		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
6178 	/* Unset slow-path event queue */
6179 	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6180 }
6181 
6182 /**
6183  * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
6184  * @phba: pointer to lpfc hba data structure.
6185  *
6186  * This routine is invoked to allocate and set up a pool of completion queue
6187  * events. The body of the completion queue event is a completion queue entry
6188  * CQE. For now, this pool is used for the interrupt service routine to queue
6189  * the following HBA completion queue events for the worker thread to process:
6190  *   - Mailbox asynchronous events
6191  *   - Receive queue completion unsolicited events
6192  * Later, this can be used for all the slow-path events.
6193  *
6194  * Return codes
6195  *      0 - successful
6196  *      -ENOMEM - No availble memory
6197  **/
6198 static int
6199 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
6200 {
6201 	struct lpfc_cq_event *cq_event;
6202 	int i;
6203 
6204 	for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
6205 		cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
6206 		if (!cq_event)
6207 			goto out_pool_create_fail;
6208 		list_add_tail(&cq_event->list,
6209 			      &phba->sli4_hba.sp_cqe_event_pool);
6210 	}
6211 	return 0;
6212 
6213 out_pool_create_fail:
6214 	lpfc_sli4_cq_event_pool_destroy(phba);
6215 	return -ENOMEM;
6216 }
6217 
6218 /**
6219  * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
6220  * @phba: pointer to lpfc hba data structure.
6221  *
6222  * This routine is invoked to free the pool of completion queue events at
6223  * driver unload time. Note that, it is the responsibility of the driver
6224  * cleanup routine to free all the outstanding completion-queue events
6225  * allocated from this pool back into the pool before invoking this routine
6226  * to destroy the pool.
6227  **/
6228 static void
6229 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
6230 {
6231 	struct lpfc_cq_event *cq_event, *next_cq_event;
6232 
6233 	list_for_each_entry_safe(cq_event, next_cq_event,
6234 				 &phba->sli4_hba.sp_cqe_event_pool, list) {
6235 		list_del(&cq_event->list);
6236 		kfree(cq_event);
6237 	}
6238 }
6239 
6240 /**
6241  * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
6242  * @phba: pointer to lpfc hba data structure.
6243  *
6244  * This routine is the lock free version of the API invoked to allocate a
6245  * completion-queue event from the free pool.
6246  *
6247  * Return: Pointer to the newly allocated completion-queue event if successful
6248  *         NULL otherwise.
6249  **/
6250 struct lpfc_cq_event *
6251 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
6252 {
6253 	struct lpfc_cq_event *cq_event = NULL;
6254 
6255 	list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
6256 			 struct lpfc_cq_event, list);
6257 	return cq_event;
6258 }
6259 
6260 /**
6261  * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
6262  * @phba: pointer to lpfc hba data structure.
6263  *
6264  * This routine is the lock version of the API invoked to allocate a
6265  * completion-queue event from the free pool.
6266  *
6267  * Return: Pointer to the newly allocated completion-queue event if successful
6268  *         NULL otherwise.
6269  **/
6270 struct lpfc_cq_event *
6271 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
6272 {
6273 	struct lpfc_cq_event *cq_event;
6274 	unsigned long iflags;
6275 
6276 	spin_lock_irqsave(&phba->hbalock, iflags);
6277 	cq_event = __lpfc_sli4_cq_event_alloc(phba);
6278 	spin_unlock_irqrestore(&phba->hbalock, iflags);
6279 	return cq_event;
6280 }
6281 
6282 /**
6283  * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
6284  * @phba: pointer to lpfc hba data structure.
6285  * @cq_event: pointer to the completion queue event to be freed.
6286  *
6287  * This routine is the lock free version of the API invoked to release a
6288  * completion-queue event back into the free pool.
6289  **/
6290 void
6291 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
6292 			     struct lpfc_cq_event *cq_event)
6293 {
6294 	list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
6295 }
6296 
6297 /**
6298  * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
6299  * @phba: pointer to lpfc hba data structure.
6300  * @cq_event: pointer to the completion queue event to be freed.
6301  *
6302  * This routine is the lock version of the API invoked to release a
6303  * completion-queue event back into the free pool.
6304  **/
6305 void
6306 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
6307 			   struct lpfc_cq_event *cq_event)
6308 {
6309 	unsigned long iflags;
6310 	spin_lock_irqsave(&phba->hbalock, iflags);
6311 	__lpfc_sli4_cq_event_release(phba, cq_event);
6312 	spin_unlock_irqrestore(&phba->hbalock, iflags);
6313 }
6314 
6315 /**
6316  * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
6317  * @phba: pointer to lpfc hba data structure.
6318  *
6319  * This routine is to free all the pending completion-queue events to the
6320  * back into the free pool for device reset.
6321  **/
6322 static void
6323 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
6324 {
6325 	LIST_HEAD(cqelist);
6326 	struct lpfc_cq_event *cqe;
6327 	unsigned long iflags;
6328 
6329 	/* Retrieve all the pending WCQEs from pending WCQE lists */
6330 	spin_lock_irqsave(&phba->hbalock, iflags);
6331 	/* Pending FCP XRI abort events */
6332 	list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
6333 			 &cqelist);
6334 	/* Pending ELS XRI abort events */
6335 	list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
6336 			 &cqelist);
6337 	/* Pending asynnc events */
6338 	list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
6339 			 &cqelist);
6340 	spin_unlock_irqrestore(&phba->hbalock, iflags);
6341 
6342 	while (!list_empty(&cqelist)) {
6343 		list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
6344 		lpfc_sli4_cq_event_release(phba, cqe);
6345 	}
6346 }
6347 
6348 /**
6349  * lpfc_pci_function_reset - Reset pci function.
6350  * @phba: pointer to lpfc hba data structure.
6351  *
6352  * This routine is invoked to request a PCI function reset. It will destroys
6353  * all resources assigned to the PCI function which originates this request.
6354  *
6355  * Return codes
6356  *      0 - successful
6357  *      ENOMEM - No availble memory
6358  *      EIO - The mailbox failed to complete successfully.
6359  **/
6360 int
6361 lpfc_pci_function_reset(struct lpfc_hba *phba)
6362 {
6363 	LPFC_MBOXQ_t *mboxq;
6364 	uint32_t rc = 0;
6365 	uint32_t shdr_status, shdr_add_status;
6366 	union lpfc_sli4_cfg_shdr *shdr;
6367 
6368 	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6369 	if (!mboxq) {
6370 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6371 				"0494 Unable to allocate memory for issuing "
6372 				"SLI_FUNCTION_RESET mailbox command\n");
6373 		return -ENOMEM;
6374 	}
6375 
6376 	/* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */
6377 	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6378 			 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
6379 			 LPFC_SLI4_MBX_EMBED);
6380 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6381 	shdr = (union lpfc_sli4_cfg_shdr *)
6382 		&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
6383 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6384 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6385 	if (rc != MBX_TIMEOUT)
6386 		mempool_free(mboxq, phba->mbox_mem_pool);
6387 	if (shdr_status || shdr_add_status || rc) {
6388 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6389 				"0495 SLI_FUNCTION_RESET mailbox failed with "
6390 				"status x%x add_status x%x, mbx status x%x\n",
6391 				shdr_status, shdr_add_status, rc);
6392 		rc = -ENXIO;
6393 	}
6394 	return rc;
6395 }
6396 
6397 /**
6398  * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
6399  * @phba: pointer to lpfc hba data structure.
6400  * @cnt: number of nop mailbox commands to send.
6401  *
6402  * This routine is invoked to send a number @cnt of NOP mailbox command and
6403  * wait for each command to complete.
6404  *
6405  * Return: the number of NOP mailbox command completed.
6406  **/
6407 static int
6408 lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
6409 {
6410 	LPFC_MBOXQ_t *mboxq;
6411 	int length, cmdsent;
6412 	uint32_t mbox_tmo;
6413 	uint32_t rc = 0;
6414 	uint32_t shdr_status, shdr_add_status;
6415 	union lpfc_sli4_cfg_shdr *shdr;
6416 
6417 	if (cnt == 0) {
6418 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6419 				"2518 Requested to send 0 NOP mailbox cmd\n");
6420 		return cnt;
6421 	}
6422 
6423 	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6424 	if (!mboxq) {
6425 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6426 				"2519 Unable to allocate memory for issuing "
6427 				"NOP mailbox command\n");
6428 		return 0;
6429 	}
6430 
6431 	/* Set up NOP SLI4_CONFIG mailbox-ioctl command */
6432 	length = (sizeof(struct lpfc_mbx_nop) -
6433 		  sizeof(struct lpfc_sli4_cfg_mhdr));
6434 	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6435 			 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
6436 
6437 	mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
6438 	for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
6439 		if (!phba->sli4_hba.intr_enable)
6440 			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6441 		else
6442 			rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
6443 		if (rc == MBX_TIMEOUT)
6444 			break;
6445 		/* Check return status */
6446 		shdr = (union lpfc_sli4_cfg_shdr *)
6447 			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
6448 		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6449 		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
6450 					 &shdr->response);
6451 		if (shdr_status || shdr_add_status || rc) {
6452 			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6453 					"2520 NOP mailbox command failed "
6454 					"status x%x add_status x%x mbx "
6455 					"status x%x\n", shdr_status,
6456 					shdr_add_status, rc);
6457 			break;
6458 		}
6459 	}
6460 
6461 	if (rc != MBX_TIMEOUT)
6462 		mempool_free(mboxq, phba->mbox_mem_pool);
6463 
6464 	return cmdsent;
6465 }
6466 
6467 /**
6468  * lpfc_sli4_fcfi_unreg - Unregister fcfi to device
6469  * @phba: pointer to lpfc hba data structure.
6470  * @fcfi: fcf index.
6471  *
6472  * This routine is invoked to unregister a FCFI from device.
6473  **/
6474 void
6475 lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
6476 {
6477 	LPFC_MBOXQ_t *mbox;
6478 	uint32_t mbox_tmo;
6479 	int rc;
6480 	unsigned long flags;
6481 
6482 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6483 
6484 	if (!mbox)
6485 		return;
6486 
6487 	lpfc_unreg_fcfi(mbox, fcfi);
6488 
6489 	if (!phba->sli4_hba.intr_enable)
6490 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6491 	else {
6492 		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
6493 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6494 	}
6495 	if (rc != MBX_TIMEOUT)
6496 		mempool_free(mbox, phba->mbox_mem_pool);
6497 	if (rc != MBX_SUCCESS)
6498 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6499 				"2517 Unregister FCFI command failed "
6500 				"status %d, mbxStatus x%x\n", rc,
6501 				bf_get(lpfc_mqe_status, &mbox->u.mqe));
6502 	else {
6503 		spin_lock_irqsave(&phba->hbalock, flags);
6504 		/* Mark the FCFI is no longer registered */
6505 		phba->fcf.fcf_flag &=
6506 			~(FCF_AVAILABLE | FCF_REGISTERED | FCF_SCAN_DONE);
6507 		spin_unlock_irqrestore(&phba->hbalock, flags);
6508 	}
6509 }
6510 
6511 /**
6512  * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
6513  * @phba: pointer to lpfc hba data structure.
6514  *
6515  * This routine is invoked to set up the PCI device memory space for device
6516  * with SLI-4 interface spec.
6517  *
6518  * Return codes
6519  * 	0 - successful
6520  * 	other values - error
6521  **/
6522 static int
6523 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
6524 {
6525 	struct pci_dev *pdev;
6526 	unsigned long bar0map_len, bar1map_len, bar2map_len;
6527 	int error = -ENODEV;
6528 
6529 	/* Obtain PCI device reference */
6530 	if (!phba->pcidev)
6531 		return error;
6532 	else
6533 		pdev = phba->pcidev;
6534 
6535 	/* Set the device DMA mask size */
6536 	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
6537 	 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
6538 		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
6539 		 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
6540 			return error;
6541 		}
6542 	}
6543 
6544 	/* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
6545 	 * number of bytes required by each mapping. They are actually
6546 	 * mapping to the PCI BAR regions 0 or 1, 2, and 4 by the SLI4 device.
6547 	 */
6548 	if (pci_resource_start(pdev, 0)) {
6549 		phba->pci_bar0_map = pci_resource_start(pdev, 0);
6550 		bar0map_len = pci_resource_len(pdev, 0);
6551 	} else {
6552 		phba->pci_bar0_map = pci_resource_start(pdev, 1);
6553 		bar0map_len = pci_resource_len(pdev, 1);
6554 	}
6555 	phba->pci_bar1_map = pci_resource_start(pdev, 2);
6556 	bar1map_len = pci_resource_len(pdev, 2);
6557 
6558 	phba->pci_bar2_map = pci_resource_start(pdev, 4);
6559 	bar2map_len = pci_resource_len(pdev, 4);
6560 
6561 	/* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
6562 	phba->sli4_hba.conf_regs_memmap_p =
6563 				ioremap(phba->pci_bar0_map, bar0map_len);
6564 	if (!phba->sli4_hba.conf_regs_memmap_p) {
6565 		dev_printk(KERN_ERR, &pdev->dev,
6566 			   "ioremap failed for SLI4 PCI config registers.\n");
6567 		goto out;
6568 	}
6569 
6570 	/* Map SLI4 HBA Control Register base to a kernel virtual address. */
6571 	phba->sli4_hba.ctrl_regs_memmap_p =
6572 				ioremap(phba->pci_bar1_map, bar1map_len);
6573 	if (!phba->sli4_hba.ctrl_regs_memmap_p) {
6574 		dev_printk(KERN_ERR, &pdev->dev,
6575 			   "ioremap failed for SLI4 HBA control registers.\n");
6576 		goto out_iounmap_conf;
6577 	}
6578 
6579 	/* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */
6580 	phba->sli4_hba.drbl_regs_memmap_p =
6581 				ioremap(phba->pci_bar2_map, bar2map_len);
6582 	if (!phba->sli4_hba.drbl_regs_memmap_p) {
6583 		dev_printk(KERN_ERR, &pdev->dev,
6584 			   "ioremap failed for SLI4 HBA doorbell registers.\n");
6585 		goto out_iounmap_ctrl;
6586 	}
6587 
6588 	/* Set up BAR0 PCI config space register memory map */
6589 	lpfc_sli4_bar0_register_memmap(phba);
6590 
6591 	/* Set up BAR1 register memory map */
6592 	lpfc_sli4_bar1_register_memmap(phba);
6593 
6594 	/* Set up BAR2 register memory map */
6595 	error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
6596 	if (error)
6597 		goto out_iounmap_all;
6598 
6599 	return 0;
6600 
6601 out_iounmap_all:
6602 	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6603 out_iounmap_ctrl:
6604 	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6605 out_iounmap_conf:
6606 	iounmap(phba->sli4_hba.conf_regs_memmap_p);
6607 out:
6608 	return error;
6609 }
6610 
6611 /**
6612  * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
6613  * @phba: pointer to lpfc hba data structure.
6614  *
6615  * This routine is invoked to unset the PCI device memory space for device
6616  * with SLI-4 interface spec.
6617  **/
6618 static void
6619 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
6620 {
6621 	struct pci_dev *pdev;
6622 
6623 	/* Obtain PCI device reference */
6624 	if (!phba->pcidev)
6625 		return;
6626 	else
6627 		pdev = phba->pcidev;
6628 
6629 	/* Free coherent DMA memory allocated */
6630 
6631 	/* Unmap I/O memory space */
6632 	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6633 	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6634 	iounmap(phba->sli4_hba.conf_regs_memmap_p);
6635 
6636 	return;
6637 }
6638 
6639 /**
6640  * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
6641  * @phba: pointer to lpfc hba data structure.
6642  *
6643  * This routine is invoked to enable the MSI-X interrupt vectors to device
6644  * with SLI-3 interface specs. The kernel function pci_enable_msix() is
6645  * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
6646  * invoked, enables either all or nothing, depending on the current
6647  * availability of PCI vector resources. The device driver is responsible
6648  * for calling the individual request_irq() to register each MSI-X vector
6649  * with a interrupt handler, which is done in this function. Note that
6650  * later when device is unloading, the driver should always call free_irq()
6651  * on all MSI-X vectors it has done request_irq() on before calling
6652  * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
6653  * will be left with MSI-X enabled and leaks its vectors.
6654  *
6655  * Return codes
6656  *   0 - successful
6657  *   other values - error
6658  **/
6659 static int
6660 lpfc_sli_enable_msix(struct lpfc_hba *phba)
6661 {
6662 	int rc, i;
6663 	LPFC_MBOXQ_t *pmb;
6664 
6665 	/* Set up MSI-X multi-message vectors */
6666 	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6667 		phba->msix_entries[i].entry = i;
6668 
6669 	/* Configure MSI-X capability structure */
6670 	rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
6671 				ARRAY_SIZE(phba->msix_entries));
6672 	if (rc) {
6673 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6674 				"0420 PCI enable MSI-X failed (%d)\n", rc);
6675 		goto msi_fail_out;
6676 	}
6677 	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6678 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6679 				"0477 MSI-X entry[%d]: vector=x%x "
6680 				"message=%d\n", i,
6681 				phba->msix_entries[i].vector,
6682 				phba->msix_entries[i].entry);
6683 	/*
6684 	 * Assign MSI-X vectors to interrupt handlers
6685 	 */
6686 
6687 	/* vector-0 is associated to slow-path handler */
6688 	rc = request_irq(phba->msix_entries[0].vector,
6689 			 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
6690 			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6691 	if (rc) {
6692 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6693 				"0421 MSI-X slow-path request_irq failed "
6694 				"(%d)\n", rc);
6695 		goto msi_fail_out;
6696 	}
6697 
6698 	/* vector-1 is associated to fast-path handler */
6699 	rc = request_irq(phba->msix_entries[1].vector,
6700 			 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
6701 			 LPFC_FP_DRIVER_HANDLER_NAME, phba);
6702 
6703 	if (rc) {
6704 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6705 				"0429 MSI-X fast-path request_irq failed "
6706 				"(%d)\n", rc);
6707 		goto irq_fail_out;
6708 	}
6709 
6710 	/*
6711 	 * Configure HBA MSI-X attention conditions to messages
6712 	 */
6713 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6714 
6715 	if (!pmb) {
6716 		rc = -ENOMEM;
6717 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6718 				"0474 Unable to allocate memory for issuing "
6719 				"MBOX_CONFIG_MSI command\n");
6720 		goto mem_fail_out;
6721 	}
6722 	rc = lpfc_config_msi(phba, pmb);
6723 	if (rc)
6724 		goto mbx_fail_out;
6725 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6726 	if (rc != MBX_SUCCESS) {
6727 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
6728 				"0351 Config MSI mailbox command failed, "
6729 				"mbxCmd x%x, mbxStatus x%x\n",
6730 				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
6731 		goto mbx_fail_out;
6732 	}
6733 
6734 	/* Free memory allocated for mailbox command */
6735 	mempool_free(pmb, phba->mbox_mem_pool);
6736 	return rc;
6737 
6738 mbx_fail_out:
6739 	/* Free memory allocated for mailbox command */
6740 	mempool_free(pmb, phba->mbox_mem_pool);
6741 
6742 mem_fail_out:
6743 	/* free the irq already requested */
6744 	free_irq(phba->msix_entries[1].vector, phba);
6745 
6746 irq_fail_out:
6747 	/* free the irq already requested */
6748 	free_irq(phba->msix_entries[0].vector, phba);
6749 
6750 msi_fail_out:
6751 	/* Unconfigure MSI-X capability structure */
6752 	pci_disable_msix(phba->pcidev);
6753 	return rc;
6754 }
6755 
6756 /**
6757  * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
6758  * @phba: pointer to lpfc hba data structure.
6759  *
6760  * This routine is invoked to release the MSI-X vectors and then disable the
6761  * MSI-X interrupt mode to device with SLI-3 interface spec.
6762  **/
6763 static void
6764 lpfc_sli_disable_msix(struct lpfc_hba *phba)
6765 {
6766 	int i;
6767 
6768 	/* Free up MSI-X multi-message vectors */
6769 	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6770 		free_irq(phba->msix_entries[i].vector, phba);
6771 	/* Disable MSI-X */
6772 	pci_disable_msix(phba->pcidev);
6773 
6774 	return;
6775 }
6776 
6777 /**
6778  * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
6779  * @phba: pointer to lpfc hba data structure.
6780  *
6781  * This routine is invoked to enable the MSI interrupt mode to device with
6782  * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
6783  * enable the MSI vector. The device driver is responsible for calling the
6784  * request_irq() to register MSI vector with a interrupt the handler, which
6785  * is done in this function.
6786  *
6787  * Return codes
6788  * 	0 - successful
6789  * 	other values - error
6790  */
6791 static int
6792 lpfc_sli_enable_msi(struct lpfc_hba *phba)
6793 {
6794 	int rc;
6795 
6796 	rc = pci_enable_msi(phba->pcidev);
6797 	if (!rc)
6798 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6799 				"0462 PCI enable MSI mode success.\n");
6800 	else {
6801 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6802 				"0471 PCI enable MSI mode failed (%d)\n", rc);
6803 		return rc;
6804 	}
6805 
6806 	rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6807 			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6808 	if (rc) {
6809 		pci_disable_msi(phba->pcidev);
6810 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6811 				"0478 MSI request_irq failed (%d)\n", rc);
6812 	}
6813 	return rc;
6814 }
6815 
6816 /**
6817  * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
6818  * @phba: pointer to lpfc hba data structure.
6819  *
6820  * This routine is invoked to disable the MSI interrupt mode to device with
6821  * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
6822  * done request_irq() on before calling pci_disable_msi(). Failure to do so
6823  * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6824  * its vector.
6825  */
6826 static void
6827 lpfc_sli_disable_msi(struct lpfc_hba *phba)
6828 {
6829 	free_irq(phba->pcidev->irq, phba);
6830 	pci_disable_msi(phba->pcidev);
6831 	return;
6832 }
6833 
6834 /**
6835  * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
6836  * @phba: pointer to lpfc hba data structure.
6837  *
6838  * This routine is invoked to enable device interrupt and associate driver's
6839  * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
6840  * spec. Depends on the interrupt mode configured to the driver, the driver
6841  * will try to fallback from the configured interrupt mode to an interrupt
6842  * mode which is supported by the platform, kernel, and device in the order
6843  * of:
6844  * MSI-X -> MSI -> IRQ.
6845  *
6846  * Return codes
6847  *   0 - successful
6848  *   other values - error
6849  **/
6850 static uint32_t
6851 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
6852 {
6853 	uint32_t intr_mode = LPFC_INTR_ERROR;
6854 	int retval;
6855 
6856 	if (cfg_mode == 2) {
6857 		/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
6858 		retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
6859 		if (!retval) {
6860 			/* Now, try to enable MSI-X interrupt mode */
6861 			retval = lpfc_sli_enable_msix(phba);
6862 			if (!retval) {
6863 				/* Indicate initialization to MSI-X mode */
6864 				phba->intr_type = MSIX;
6865 				intr_mode = 2;
6866 			}
6867 		}
6868 	}
6869 
6870 	/* Fallback to MSI if MSI-X initialization failed */
6871 	if (cfg_mode >= 1 && phba->intr_type == NONE) {
6872 		retval = lpfc_sli_enable_msi(phba);
6873 		if (!retval) {
6874 			/* Indicate initialization to MSI mode */
6875 			phba->intr_type = MSI;
6876 			intr_mode = 1;
6877 		}
6878 	}
6879 
6880 	/* Fallback to INTx if both MSI-X/MSI initalization failed */
6881 	if (phba->intr_type == NONE) {
6882 		retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6883 				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6884 		if (!retval) {
6885 			/* Indicate initialization to INTx mode */
6886 			phba->intr_type = INTx;
6887 			intr_mode = 0;
6888 		}
6889 	}
6890 	return intr_mode;
6891 }
6892 
6893 /**
6894  * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
6895  * @phba: pointer to lpfc hba data structure.
6896  *
6897  * This routine is invoked to disable device interrupt and disassociate the
6898  * driver's interrupt handler(s) from interrupt vector(s) to device with
6899  * SLI-3 interface spec. Depending on the interrupt mode, the driver will
6900  * release the interrupt vector(s) for the message signaled interrupt.
6901  **/
6902 static void
6903 lpfc_sli_disable_intr(struct lpfc_hba *phba)
6904 {
6905 	/* Disable the currently initialized interrupt mode */
6906 	if (phba->intr_type == MSIX)
6907 		lpfc_sli_disable_msix(phba);
6908 	else if (phba->intr_type == MSI)
6909 		lpfc_sli_disable_msi(phba);
6910 	else if (phba->intr_type == INTx)
6911 		free_irq(phba->pcidev->irq, phba);
6912 
6913 	/* Reset interrupt management states */
6914 	phba->intr_type = NONE;
6915 	phba->sli.slistat.sli_intr = 0;
6916 
6917 	return;
6918 }
6919 
6920 /**
6921  * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
6922  * @phba: pointer to lpfc hba data structure.
6923  *
6924  * This routine is invoked to enable the MSI-X interrupt vectors to device
6925  * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
6926  * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
6927  * enables either all or nothing, depending on the current availability of
6928  * PCI vector resources. The device driver is responsible for calling the
6929  * individual request_irq() to register each MSI-X vector with a interrupt
6930  * handler, which is done in this function. Note that later when device is
6931  * unloading, the driver should always call free_irq() on all MSI-X vectors
6932  * it has done request_irq() on before calling pci_disable_msix(). Failure
6933  * to do so results in a BUG_ON() and a device will be left with MSI-X
6934  * enabled and leaks its vectors.
6935  *
6936  * Return codes
6937  * 0 - successful
6938  * other values - error
6939  **/
6940 static int
6941 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
6942 {
6943 	int rc, index;
6944 
6945 	/* Set up MSI-X multi-message vectors */
6946 	for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
6947 		phba->sli4_hba.msix_entries[index].entry = index;
6948 
6949 	/* Configure MSI-X capability structure */
6950 	rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
6951 			     phba->sli4_hba.cfg_eqn);
6952 	if (rc) {
6953 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6954 				"0484 PCI enable MSI-X failed (%d)\n", rc);
6955 		goto msi_fail_out;
6956 	}
6957 	/* Log MSI-X vector assignment */
6958 	for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
6959 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6960 				"0489 MSI-X entry[%d]: vector=x%x "
6961 				"message=%d\n", index,
6962 				phba->sli4_hba.msix_entries[index].vector,
6963 				phba->sli4_hba.msix_entries[index].entry);
6964 	/*
6965 	 * Assign MSI-X vectors to interrupt handlers
6966 	 */
6967 
6968 	/* The first vector must associated to slow-path handler for MQ */
6969 	rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
6970 			 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
6971 			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6972 	if (rc) {
6973 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6974 				"0485 MSI-X slow-path request_irq failed "
6975 				"(%d)\n", rc);
6976 		goto msi_fail_out;
6977 	}
6978 
6979 	/* The rest of the vector(s) are associated to fast-path handler(s) */
6980 	for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) {
6981 		phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
6982 		phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
6983 		rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
6984 				 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
6985 				 LPFC_FP_DRIVER_HANDLER_NAME,
6986 				 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6987 		if (rc) {
6988 			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6989 					"0486 MSI-X fast-path (%d) "
6990 					"request_irq failed (%d)\n", index, rc);
6991 			goto cfg_fail_out;
6992 		}
6993 	}
6994 
6995 	return rc;
6996 
6997 cfg_fail_out:
6998 	/* free the irq already requested */
6999 	for (--index; index >= 1; index--)
7000 		free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
7001 			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7002 
7003 	/* free the irq already requested */
7004 	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7005 
7006 msi_fail_out:
7007 	/* Unconfigure MSI-X capability structure */
7008 	pci_disable_msix(phba->pcidev);
7009 	return rc;
7010 }
7011 
7012 /**
7013  * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
7014  * @phba: pointer to lpfc hba data structure.
7015  *
7016  * This routine is invoked to release the MSI-X vectors and then disable the
7017  * MSI-X interrupt mode to device with SLI-4 interface spec.
7018  **/
7019 static void
7020 lpfc_sli4_disable_msix(struct lpfc_hba *phba)
7021 {
7022 	int index;
7023 
7024 	/* Free up MSI-X multi-message vectors */
7025 	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7026 
7027 	for (index = 1; index < phba->sli4_hba.cfg_eqn; index++)
7028 		free_irq(phba->sli4_hba.msix_entries[index].vector,
7029 			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7030 	/* Disable MSI-X */
7031 	pci_disable_msix(phba->pcidev);
7032 
7033 	return;
7034 }
7035 
7036 /**
7037  * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
7038  * @phba: pointer to lpfc hba data structure.
7039  *
7040  * This routine is invoked to enable the MSI interrupt mode to device with
7041  * SLI-4 interface spec. The kernel function pci_enable_msi() is called
7042  * to enable the MSI vector. The device driver is responsible for calling
7043  * the request_irq() to register MSI vector with a interrupt the handler,
7044  * which is done in this function.
7045  *
7046  * Return codes
7047  * 	0 - successful
7048  * 	other values - error
7049  **/
7050 static int
7051 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
7052 {
7053 	int rc, index;
7054 
7055 	rc = pci_enable_msi(phba->pcidev);
7056 	if (!rc)
7057 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7058 				"0487 PCI enable MSI mode success.\n");
7059 	else {
7060 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7061 				"0488 PCI enable MSI mode failed (%d)\n", rc);
7062 		return rc;
7063 	}
7064 
7065 	rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
7066 			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7067 	if (rc) {
7068 		pci_disable_msi(phba->pcidev);
7069 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7070 				"0490 MSI request_irq failed (%d)\n", rc);
7071 	}
7072 
7073 	for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
7074 		phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7075 		phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7076 	}
7077 
7078 	return rc;
7079 }
7080 
7081 /**
7082  * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
7083  * @phba: pointer to lpfc hba data structure.
7084  *
7085  * This routine is invoked to disable the MSI interrupt mode to device with
7086  * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
7087  * done request_irq() on before calling pci_disable_msi(). Failure to do so
7088  * results in a BUG_ON() and a device will be left with MSI enabled and leaks
7089  * its vector.
7090  **/
7091 static void
7092 lpfc_sli4_disable_msi(struct lpfc_hba *phba)
7093 {
7094 	free_irq(phba->pcidev->irq, phba);
7095 	pci_disable_msi(phba->pcidev);
7096 	return;
7097 }
7098 
7099 /**
7100  * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
7101  * @phba: pointer to lpfc hba data structure.
7102  *
7103  * This routine is invoked to enable device interrupt and associate driver's
7104  * interrupt handler(s) to interrupt vector(s) to device with SLI-4
7105  * interface spec. Depends on the interrupt mode configured to the driver,
7106  * the driver will try to fallback from the configured interrupt mode to an
7107  * interrupt mode which is supported by the platform, kernel, and device in
7108  * the order of:
7109  * MSI-X -> MSI -> IRQ.
7110  *
7111  * Return codes
7112  * 	0 - successful
7113  * 	other values - error
7114  **/
7115 static uint32_t
7116 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
7117 {
7118 	uint32_t intr_mode = LPFC_INTR_ERROR;
7119 	int retval, index;
7120 
7121 	if (cfg_mode == 2) {
7122 		/* Preparation before conf_msi mbox cmd */
7123 		retval = 0;
7124 		if (!retval) {
7125 			/* Now, try to enable MSI-X interrupt mode */
7126 			retval = lpfc_sli4_enable_msix(phba);
7127 			if (!retval) {
7128 				/* Indicate initialization to MSI-X mode */
7129 				phba->intr_type = MSIX;
7130 				intr_mode = 2;
7131 			}
7132 		}
7133 	}
7134 
7135 	/* Fallback to MSI if MSI-X initialization failed */
7136 	if (cfg_mode >= 1 && phba->intr_type == NONE) {
7137 		retval = lpfc_sli4_enable_msi(phba);
7138 		if (!retval) {
7139 			/* Indicate initialization to MSI mode */
7140 			phba->intr_type = MSI;
7141 			intr_mode = 1;
7142 		}
7143 	}
7144 
7145 	/* Fallback to INTx if both MSI-X/MSI initalization failed */
7146 	if (phba->intr_type == NONE) {
7147 		retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
7148 				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7149 		if (!retval) {
7150 			/* Indicate initialization to INTx mode */
7151 			phba->intr_type = INTx;
7152 			intr_mode = 0;
7153 			for (index = 0; index < phba->cfg_fcp_eq_count;
7154 			     index++) {
7155 				phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7156 				phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7157 			}
7158 		}
7159 	}
7160 	return intr_mode;
7161 }
7162 
7163 /**
7164  * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
7165  * @phba: pointer to lpfc hba data structure.
7166  *
7167  * This routine is invoked to disable device interrupt and disassociate
7168  * the driver's interrupt handler(s) from interrupt vector(s) to device
7169  * with SLI-4 interface spec. Depending on the interrupt mode, the driver
7170  * will release the interrupt vector(s) for the message signaled interrupt.
7171  **/
7172 static void
7173 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
7174 {
7175 	/* Disable the currently initialized interrupt mode */
7176 	if (phba->intr_type == MSIX)
7177 		lpfc_sli4_disable_msix(phba);
7178 	else if (phba->intr_type == MSI)
7179 		lpfc_sli4_disable_msi(phba);
7180 	else if (phba->intr_type == INTx)
7181 		free_irq(phba->pcidev->irq, phba);
7182 
7183 	/* Reset interrupt management states */
7184 	phba->intr_type = NONE;
7185 	phba->sli.slistat.sli_intr = 0;
7186 
7187 	return;
7188 }
7189 
7190 /**
7191  * lpfc_unset_hba - Unset SLI3 hba device initialization
7192  * @phba: pointer to lpfc hba data structure.
7193  *
7194  * This routine is invoked to unset the HBA device initialization steps to
7195  * a device with SLI-3 interface spec.
7196  **/
7197 static void
7198 lpfc_unset_hba(struct lpfc_hba *phba)
7199 {
7200 	struct lpfc_vport *vport = phba->pport;
7201 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
7202 
7203 	spin_lock_irq(shost->host_lock);
7204 	vport->load_flag |= FC_UNLOADING;
7205 	spin_unlock_irq(shost->host_lock);
7206 
7207 	lpfc_stop_hba_timers(phba);
7208 
7209 	phba->pport->work_port_events = 0;
7210 
7211 	lpfc_sli_hba_down(phba);
7212 
7213 	lpfc_sli_brdrestart(phba);
7214 
7215 	lpfc_sli_disable_intr(phba);
7216 
7217 	return;
7218 }
7219 
7220 /**
7221  * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
7222  * @phba: pointer to lpfc hba data structure.
7223  *
7224  * This routine is invoked to unset the HBA device initialization steps to
7225  * a device with SLI-4 interface spec.
7226  **/
7227 static void
7228 lpfc_sli4_unset_hba(struct lpfc_hba *phba)
7229 {
7230 	struct lpfc_vport *vport = phba->pport;
7231 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
7232 
7233 	spin_lock_irq(shost->host_lock);
7234 	vport->load_flag |= FC_UNLOADING;
7235 	spin_unlock_irq(shost->host_lock);
7236 
7237 	phba->pport->work_port_events = 0;
7238 
7239 	lpfc_sli4_hba_down(phba);
7240 
7241 	lpfc_sli4_disable_intr(phba);
7242 
7243 	return;
7244 }
7245 
7246 /**
7247  * lpfc_sli4_hba_unset - Unset the fcoe hba
7248  * @phba: Pointer to HBA context object.
7249  *
7250  * This function is called in the SLI4 code path to reset the HBA's FCoE
7251  * function. The caller is not required to hold any lock. This routine
7252  * issues PCI function reset mailbox command to reset the FCoE function.
7253  * At the end of the function, it calls lpfc_hba_down_post function to
7254  * free any pending commands.
7255  **/
7256 static void
7257 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
7258 {
7259 	int wait_cnt = 0;
7260 	LPFC_MBOXQ_t *mboxq;
7261 
7262 	lpfc_stop_hba_timers(phba);
7263 	phba->sli4_hba.intr_enable = 0;
7264 
7265 	/*
7266 	 * Gracefully wait out the potential current outstanding asynchronous
7267 	 * mailbox command.
7268 	 */
7269 
7270 	/* First, block any pending async mailbox command from posted */
7271 	spin_lock_irq(&phba->hbalock);
7272 	phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
7273 	spin_unlock_irq(&phba->hbalock);
7274 	/* Now, trying to wait it out if we can */
7275 	while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7276 		msleep(10);
7277 		if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
7278 			break;
7279 	}
7280 	/* Forcefully release the outstanding mailbox command if timed out */
7281 	if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7282 		spin_lock_irq(&phba->hbalock);
7283 		mboxq = phba->sli.mbox_active;
7284 		mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
7285 		__lpfc_mbox_cmpl_put(phba, mboxq);
7286 		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7287 		phba->sli.mbox_active = NULL;
7288 		spin_unlock_irq(&phba->hbalock);
7289 	}
7290 
7291 	/* Tear down the queues in the HBA */
7292 	lpfc_sli4_queue_unset(phba);
7293 
7294 	/* Disable PCI subsystem interrupt */
7295 	lpfc_sli4_disable_intr(phba);
7296 
7297 	/* Stop kthread signal shall trigger work_done one more time */
7298 	kthread_stop(phba->worker_thread);
7299 
7300 	/* Stop the SLI4 device port */
7301 	phba->pport->work_port_events = 0;
7302 }
7303 
7304  /**
7305  * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
7306  * @phba: Pointer to HBA context object.
7307  * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
7308  *
7309  * This function is called in the SLI4 code path to read the port's
7310  * sli4 capabilities.
7311  *
7312  * This function may be be called from any context that can block-wait
7313  * for the completion.  The expectation is that this routine is called
7314  * typically from probe_one or from the online routine.
7315  **/
7316 int
7317 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7318 {
7319 	int rc;
7320 	struct lpfc_mqe *mqe;
7321 	struct lpfc_pc_sli4_params *sli4_params;
7322 	uint32_t mbox_tmo;
7323 
7324 	rc = 0;
7325 	mqe = &mboxq->u.mqe;
7326 
7327 	/* Read the port's SLI4 Parameters port capabilities */
7328 	lpfc_sli4_params(mboxq);
7329 	if (!phba->sli4_hba.intr_enable)
7330 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7331 	else {
7332 		mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES);
7333 		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
7334 	}
7335 
7336 	if (unlikely(rc))
7337 		return 1;
7338 
7339 	sli4_params = &phba->sli4_hba.pc_sli4_params;
7340 	sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
7341 	sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
7342 	sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
7343 	sli4_params->featurelevel_1 = bf_get(featurelevel_1,
7344 					     &mqe->un.sli4_params);
7345 	sli4_params->featurelevel_2 = bf_get(featurelevel_2,
7346 					     &mqe->un.sli4_params);
7347 	sli4_params->proto_types = mqe->un.sli4_params.word3;
7348 	sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
7349 	sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
7350 	sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
7351 	sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
7352 	sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
7353 	sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
7354 	sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
7355 	sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
7356 	sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
7357 	sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
7358 	sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
7359 	sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
7360 	sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
7361 	sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
7362 	sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
7363 	sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
7364 	sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
7365 	sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
7366 	sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
7367 	sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
7368 	return rc;
7369 }
7370 
7371 /**
7372  * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
7373  * @pdev: pointer to PCI device
7374  * @pid: pointer to PCI device identifier
7375  *
7376  * This routine is to be called to attach a device with SLI-3 interface spec
7377  * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
7378  * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
7379  * information of the device and driver to see if the driver state that it can
7380  * support this kind of device. If the match is successful, the driver core
7381  * invokes this routine. If this routine determines it can claim the HBA, it
7382  * does all the initialization that it needs to do to handle the HBA properly.
7383  *
7384  * Return code
7385  * 	0 - driver can claim the device
7386  * 	negative value - driver can not claim the device
7387  **/
7388 static int __devinit
7389 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
7390 {
7391 	struct lpfc_hba   *phba;
7392 	struct lpfc_vport *vport = NULL;
7393 	struct Scsi_Host  *shost = NULL;
7394 	int error;
7395 	uint32_t cfg_mode, intr_mode;
7396 
7397 	/* Allocate memory for HBA structure */
7398 	phba = lpfc_hba_alloc(pdev);
7399 	if (!phba)
7400 		return -ENOMEM;
7401 
7402 	/* Perform generic PCI device enabling operation */
7403 	error = lpfc_enable_pci_dev(phba);
7404 	if (error) {
7405 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7406 				"1401 Failed to enable pci device.\n");
7407 		goto out_free_phba;
7408 	}
7409 
7410 	/* Set up SLI API function jump table for PCI-device group-0 HBAs */
7411 	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
7412 	if (error)
7413 		goto out_disable_pci_dev;
7414 
7415 	/* Set up SLI-3 specific device PCI memory space */
7416 	error = lpfc_sli_pci_mem_setup(phba);
7417 	if (error) {
7418 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7419 				"1402 Failed to set up pci memory space.\n");
7420 		goto out_disable_pci_dev;
7421 	}
7422 
7423 	/* Set up phase-1 common device driver resources */
7424 	error = lpfc_setup_driver_resource_phase1(phba);
7425 	if (error) {
7426 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7427 				"1403 Failed to set up driver resource.\n");
7428 		goto out_unset_pci_mem_s3;
7429 	}
7430 
7431 	/* Set up SLI-3 specific device driver resources */
7432 	error = lpfc_sli_driver_resource_setup(phba);
7433 	if (error) {
7434 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7435 				"1404 Failed to set up driver resource.\n");
7436 		goto out_unset_pci_mem_s3;
7437 	}
7438 
7439 	/* Initialize and populate the iocb list per host */
7440 	error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
7441 	if (error) {
7442 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7443 				"1405 Failed to initialize iocb list.\n");
7444 		goto out_unset_driver_resource_s3;
7445 	}
7446 
7447 	/* Set up common device driver resources */
7448 	error = lpfc_setup_driver_resource_phase2(phba);
7449 	if (error) {
7450 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7451 				"1406 Failed to set up driver resource.\n");
7452 		goto out_free_iocb_list;
7453 	}
7454 
7455 	/* Create SCSI host to the physical port */
7456 	error = lpfc_create_shost(phba);
7457 	if (error) {
7458 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7459 				"1407 Failed to create scsi host.\n");
7460 		goto out_unset_driver_resource;
7461 	}
7462 
7463 	/* Configure sysfs attributes */
7464 	vport = phba->pport;
7465 	error = lpfc_alloc_sysfs_attr(vport);
7466 	if (error) {
7467 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7468 				"1476 Failed to allocate sysfs attr\n");
7469 		goto out_destroy_shost;
7470 	}
7471 
7472 	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
7473 	/* Now, trying to enable interrupt and bring up the device */
7474 	cfg_mode = phba->cfg_use_msi;
7475 	while (true) {
7476 		/* Put device to a known state before enabling interrupt */
7477 		lpfc_stop_port(phba);
7478 		/* Configure and enable interrupt */
7479 		intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
7480 		if (intr_mode == LPFC_INTR_ERROR) {
7481 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7482 					"0431 Failed to enable interrupt.\n");
7483 			error = -ENODEV;
7484 			goto out_free_sysfs_attr;
7485 		}
7486 		/* SLI-3 HBA setup */
7487 		if (lpfc_sli_hba_setup(phba)) {
7488 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7489 					"1477 Failed to set up hba\n");
7490 			error = -ENODEV;
7491 			goto out_remove_device;
7492 		}
7493 
7494 		/* Wait 50ms for the interrupts of previous mailbox commands */
7495 		msleep(50);
7496 		/* Check active interrupts on message signaled interrupts */
7497 		if (intr_mode == 0 ||
7498 		    phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
7499 			/* Log the current active interrupt mode */
7500 			phba->intr_mode = intr_mode;
7501 			lpfc_log_intr_mode(phba, intr_mode);
7502 			break;
7503 		} else {
7504 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7505 					"0447 Configure interrupt mode (%d) "
7506 					"failed active interrupt test.\n",
7507 					intr_mode);
7508 			/* Disable the current interrupt mode */
7509 			lpfc_sli_disable_intr(phba);
7510 			/* Try next level of interrupt mode */
7511 			cfg_mode = --intr_mode;
7512 		}
7513 	}
7514 
7515 	/* Perform post initialization setup */
7516 	lpfc_post_init_setup(phba);
7517 
7518 	/* Check if there are static vports to be created. */
7519 	lpfc_create_static_vport(phba);
7520 
7521 	return 0;
7522 
7523 out_remove_device:
7524 	lpfc_unset_hba(phba);
7525 out_free_sysfs_attr:
7526 	lpfc_free_sysfs_attr(vport);
7527 out_destroy_shost:
7528 	lpfc_destroy_shost(phba);
7529 out_unset_driver_resource:
7530 	lpfc_unset_driver_resource_phase2(phba);
7531 out_free_iocb_list:
7532 	lpfc_free_iocb_list(phba);
7533 out_unset_driver_resource_s3:
7534 	lpfc_sli_driver_resource_unset(phba);
7535 out_unset_pci_mem_s3:
7536 	lpfc_sli_pci_mem_unset(phba);
7537 out_disable_pci_dev:
7538 	lpfc_disable_pci_dev(phba);
7539 	if (shost)
7540 		scsi_host_put(shost);
7541 out_free_phba:
7542 	lpfc_hba_free(phba);
7543 	return error;
7544 }
7545 
7546 /**
7547  * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
7548  * @pdev: pointer to PCI device
7549  *
7550  * This routine is to be called to disattach a device with SLI-3 interface
7551  * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
7552  * removed from PCI bus, it performs all the necessary cleanup for the HBA
7553  * device to be removed from the PCI subsystem properly.
7554  **/
7555 static void __devexit
7556 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
7557 {
7558 	struct Scsi_Host  *shost = pci_get_drvdata(pdev);
7559 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7560 	struct lpfc_vport **vports;
7561 	struct lpfc_hba   *phba = vport->phba;
7562 	int i;
7563 	int bars = pci_select_bars(pdev, IORESOURCE_MEM);
7564 
7565 	spin_lock_irq(&phba->hbalock);
7566 	vport->load_flag |= FC_UNLOADING;
7567 	spin_unlock_irq(&phba->hbalock);
7568 
7569 	lpfc_free_sysfs_attr(vport);
7570 
7571 	/* Release all the vports against this physical port */
7572 	vports = lpfc_create_vport_work_array(phba);
7573 	if (vports != NULL)
7574 		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
7575 			fc_vport_terminate(vports[i]->fc_vport);
7576 	lpfc_destroy_vport_work_array(phba, vports);
7577 
7578 	/* Remove FC host and then SCSI host with the physical port */
7579 	fc_remove_host(shost);
7580 	scsi_remove_host(shost);
7581 	lpfc_cleanup(vport);
7582 
7583 	/*
7584 	 * Bring down the SLI Layer. This step disable all interrupts,
7585 	 * clears the rings, discards all mailbox commands, and resets
7586 	 * the HBA.
7587 	 */
7588 
7589 	/* HBA interrupt will be diabled after this call */
7590 	lpfc_sli_hba_down(phba);
7591 	/* Stop kthread signal shall trigger work_done one more time */
7592 	kthread_stop(phba->worker_thread);
7593 	/* Final cleanup of txcmplq and reset the HBA */
7594 	lpfc_sli_brdrestart(phba);
7595 
7596 	lpfc_stop_hba_timers(phba);
7597 	spin_lock_irq(&phba->hbalock);
7598 	list_del_init(&vport->listentry);
7599 	spin_unlock_irq(&phba->hbalock);
7600 
7601 	lpfc_debugfs_terminate(vport);
7602 
7603 	/* Disable interrupt */
7604 	lpfc_sli_disable_intr(phba);
7605 
7606 	pci_set_drvdata(pdev, NULL);
7607 	scsi_host_put(shost);
7608 
7609 	/*
7610 	 * Call scsi_free before mem_free since scsi bufs are released to their
7611 	 * corresponding pools here.
7612 	 */
7613 	lpfc_scsi_free(phba);
7614 	lpfc_mem_free_all(phba);
7615 
7616 	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
7617 			  phba->hbqslimp.virt, phba->hbqslimp.phys);
7618 
7619 	/* Free resources associated with SLI2 interface */
7620 	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7621 			  phba->slim2p.virt, phba->slim2p.phys);
7622 
7623 	/* unmap adapter SLIM and Control Registers */
7624 	iounmap(phba->ctrl_regs_memmap_p);
7625 	iounmap(phba->slim_memmap_p);
7626 
7627 	lpfc_hba_free(phba);
7628 
7629 	pci_release_selected_regions(pdev, bars);
7630 	pci_disable_device(pdev);
7631 }
7632 
7633 /**
7634  * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
7635  * @pdev: pointer to PCI device
7636  * @msg: power management message
7637  *
7638  * This routine is to be called from the kernel's PCI subsystem to support
7639  * system Power Management (PM) to device with SLI-3 interface spec. When
7640  * PM invokes this method, it quiesces the device by stopping the driver's
7641  * worker thread for the device, turning off device's interrupt and DMA,
7642  * and bring the device offline. Note that as the driver implements the
7643  * minimum PM requirements to a power-aware driver's PM support for the
7644  * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
7645  * to the suspend() method call will be treated as SUSPEND and the driver will
7646  * fully reinitialize its device during resume() method call, the driver will
7647  * set device to PCI_D3hot state in PCI config space instead of setting it
7648  * according to the @msg provided by the PM.
7649  *
7650  * Return code
7651  * 	0 - driver suspended the device
7652  * 	Error otherwise
7653  **/
7654 static int
7655 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
7656 {
7657 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7658 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7659 
7660 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7661 			"0473 PCI device Power Management suspend.\n");
7662 
7663 	/* Bring down the device */
7664 	lpfc_offline_prep(phba);
7665 	lpfc_offline(phba);
7666 	kthread_stop(phba->worker_thread);
7667 
7668 	/* Disable interrupt from device */
7669 	lpfc_sli_disable_intr(phba);
7670 
7671 	/* Save device state to PCI config space */
7672 	pci_save_state(pdev);
7673 	pci_set_power_state(pdev, PCI_D3hot);
7674 
7675 	return 0;
7676 }
7677 
7678 /**
7679  * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
7680  * @pdev: pointer to PCI device
7681  *
7682  * This routine is to be called from the kernel's PCI subsystem to support
7683  * system Power Management (PM) to device with SLI-3 interface spec. When PM
7684  * invokes this method, it restores the device's PCI config space state and
7685  * fully reinitializes the device and brings it online. Note that as the
7686  * driver implements the minimum PM requirements to a power-aware driver's
7687  * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
7688  * FREEZE) to the suspend() method call will be treated as SUSPEND and the
7689  * driver will fully reinitialize its device during resume() method call,
7690  * the device will be set to PCI_D0 directly in PCI config space before
7691  * restoring the state.
7692  *
7693  * Return code
7694  * 	0 - driver suspended the device
7695  * 	Error otherwise
7696  **/
7697 static int
7698 lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7699 {
7700 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7701 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7702 	uint32_t intr_mode;
7703 	int error;
7704 
7705 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7706 			"0452 PCI device Power Management resume.\n");
7707 
7708 	/* Restore device state from PCI config space */
7709 	pci_set_power_state(pdev, PCI_D0);
7710 	pci_restore_state(pdev);
7711 
7712 	/*
7713 	 * As the new kernel behavior of pci_restore_state() API call clears
7714 	 * device saved_state flag, need to save the restored state again.
7715 	 */
7716 	pci_save_state(pdev);
7717 
7718 	if (pdev->is_busmaster)
7719 		pci_set_master(pdev);
7720 
7721 	/* Startup the kernel thread for this host adapter. */
7722 	phba->worker_thread = kthread_run(lpfc_do_work, phba,
7723 					"lpfc_worker_%d", phba->brd_no);
7724 	if (IS_ERR(phba->worker_thread)) {
7725 		error = PTR_ERR(phba->worker_thread);
7726 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7727 				"0434 PM resume failed to start worker "
7728 				"thread: error=x%x.\n", error);
7729 		return error;
7730 	}
7731 
7732 	/* Configure and enable interrupt */
7733 	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
7734 	if (intr_mode == LPFC_INTR_ERROR) {
7735 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7736 				"0430 PM resume Failed to enable interrupt\n");
7737 		return -EIO;
7738 	} else
7739 		phba->intr_mode = intr_mode;
7740 
7741 	/* Restart HBA and bring it online */
7742 	lpfc_sli_brdrestart(phba);
7743 	lpfc_online(phba);
7744 
7745 	/* Log the current active interrupt mode */
7746 	lpfc_log_intr_mode(phba, phba->intr_mode);
7747 
7748 	return 0;
7749 }
7750 
7751 /**
7752  * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
7753  * @phba: pointer to lpfc hba data structure.
7754  *
7755  * This routine is called to prepare the SLI3 device for PCI slot recover. It
7756  * aborts and stops all the on-going I/Os on the pci device.
7757  **/
7758 static void
7759 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
7760 {
7761 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7762 			"2723 PCI channel I/O abort preparing for recovery\n");
7763 	/* Prepare for bringing HBA offline */
7764 	lpfc_offline_prep(phba);
7765 	/* Clear sli active flag to prevent sysfs access to HBA */
7766 	spin_lock_irq(&phba->hbalock);
7767 	phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
7768 	spin_unlock_irq(&phba->hbalock);
7769 	/* Stop and flush all I/Os and bring HBA offline */
7770 	lpfc_offline(phba);
7771 }
7772 
7773 /**
7774  * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
7775  * @phba: pointer to lpfc hba data structure.
7776  *
7777  * This routine is called to prepare the SLI3 device for PCI slot reset. It
7778  * disables the device interrupt and pci device, and aborts the internal FCP
7779  * pending I/Os.
7780  **/
7781 static void
7782 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
7783 {
7784 	struct lpfc_sli *psli = &phba->sli;
7785 	struct lpfc_sli_ring  *pring;
7786 
7787 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7788 			"2710 PCI channel disable preparing for reset\n");
7789 	/* Disable interrupt and pci device */
7790 	lpfc_sli_disable_intr(phba);
7791 	pci_disable_device(phba->pcidev);
7792 	/*
7793 	 * There may be I/Os dropped by the firmware.
7794 	 * Error iocb (I/O) on txcmplq and let the SCSI layer
7795 	 * retry it after re-establishing link.
7796 	 */
7797 	pring = &psli->ring[psli->fcp_ring];
7798 	lpfc_sli_abort_iocb_ring(phba, pring);
7799 }
7800 
7801 /**
7802  * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
7803  * @phba: pointer to lpfc hba data structure.
7804  *
7805  * This routine is called to prepare the SLI3 device for PCI slot permanently
7806  * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
7807  * pending I/Os.
7808  **/
7809 static void
7810 lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba)
7811 {
7812 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7813 			"2711 PCI channel permanent disable for failure\n");
7814 	/* Clean up all driver's outstanding SCSI I/Os */
7815 	lpfc_sli_flush_fcp_rings(phba);
7816 }
7817 
7818 /**
7819  * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
7820  * @pdev: pointer to PCI device.
7821  * @state: the current PCI connection state.
7822  *
7823  * This routine is called from the PCI subsystem for I/O error handling to
7824  * device with SLI-3 interface spec. This function is called by the PCI
7825  * subsystem after a PCI bus error affecting this device has been detected.
7826  * When this function is invoked, it will need to stop all the I/Os and
7827  * interrupt(s) to the device. Once that is done, it will return
7828  * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
7829  * as desired.
7830  *
7831  * Return codes
7832  * 	PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
7833  * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7834  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7835  **/
7836 static pci_ers_result_t
7837 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
7838 {
7839 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7840 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7841 
7842 	/* Block all SCSI devices' I/Os on the host */
7843 	lpfc_scsi_dev_block(phba);
7844 
7845 	switch (state) {
7846 	case pci_channel_io_normal:
7847 		/* Non-fatal error, prepare for recovery */
7848 		lpfc_sli_prep_dev_for_recover(phba);
7849 		return PCI_ERS_RESULT_CAN_RECOVER;
7850 	case pci_channel_io_frozen:
7851 		/* Fatal error, prepare for slot reset */
7852 		lpfc_sli_prep_dev_for_reset(phba);
7853 		return PCI_ERS_RESULT_NEED_RESET;
7854 	case pci_channel_io_perm_failure:
7855 		/* Permanent failure, prepare for device down */
7856 		lpfc_prep_dev_for_perm_failure(phba);
7857 		return PCI_ERS_RESULT_DISCONNECT;
7858 	default:
7859 		/* Unknown state, prepare and request slot reset */
7860 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7861 				"0472 Unknown PCI error state: x%x\n", state);
7862 		lpfc_sli_prep_dev_for_reset(phba);
7863 		return PCI_ERS_RESULT_NEED_RESET;
7864 	}
7865 }
7866 
7867 /**
7868  * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
7869  * @pdev: pointer to PCI device.
7870  *
7871  * This routine is called from the PCI subsystem for error handling to
7872  * device with SLI-3 interface spec. This is called after PCI bus has been
7873  * reset to restart the PCI card from scratch, as if from a cold-boot.
7874  * During the PCI subsystem error recovery, after driver returns
7875  * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
7876  * recovery and then call this routine before calling the .resume method
7877  * to recover the device. This function will initialize the HBA device,
7878  * enable the interrupt, but it will just put the HBA to offline state
7879  * without passing any I/O traffic.
7880  *
7881  * Return codes
7882  * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
7883  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7884  */
7885 static pci_ers_result_t
7886 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
7887 {
7888 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7889 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7890 	struct lpfc_sli *psli = &phba->sli;
7891 	uint32_t intr_mode;
7892 
7893 	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
7894 	if (pci_enable_device_mem(pdev)) {
7895 		printk(KERN_ERR "lpfc: Cannot re-enable "
7896 			"PCI device after reset.\n");
7897 		return PCI_ERS_RESULT_DISCONNECT;
7898 	}
7899 
7900 	pci_restore_state(pdev);
7901 
7902 	/*
7903 	 * As the new kernel behavior of pci_restore_state() API call clears
7904 	 * device saved_state flag, need to save the restored state again.
7905 	 */
7906 	pci_save_state(pdev);
7907 
7908 	if (pdev->is_busmaster)
7909 		pci_set_master(pdev);
7910 
7911 	spin_lock_irq(&phba->hbalock);
7912 	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
7913 	spin_unlock_irq(&phba->hbalock);
7914 
7915 	/* Configure and enable interrupt */
7916 	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
7917 	if (intr_mode == LPFC_INTR_ERROR) {
7918 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7919 				"0427 Cannot re-enable interrupt after "
7920 				"slot reset.\n");
7921 		return PCI_ERS_RESULT_DISCONNECT;
7922 	} else
7923 		phba->intr_mode = intr_mode;
7924 
7925 	/* Take device offline; this will perform cleanup */
7926 	lpfc_offline(phba);
7927 	lpfc_sli_brdrestart(phba);
7928 
7929 	/* Log the current active interrupt mode */
7930 	lpfc_log_intr_mode(phba, phba->intr_mode);
7931 
7932 	return PCI_ERS_RESULT_RECOVERED;
7933 }
7934 
7935 /**
7936  * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
7937  * @pdev: pointer to PCI device
7938  *
7939  * This routine is called from the PCI subsystem for error handling to device
7940  * with SLI-3 interface spec. It is called when kernel error recovery tells
7941  * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
7942  * error recovery. After this call, traffic can start to flow from this device
7943  * again.
7944  */
7945 static void
7946 lpfc_io_resume_s3(struct pci_dev *pdev)
7947 {
7948 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
7949 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7950 
7951 	/* Bring the device online */
7952 	lpfc_online(phba);
7953 
7954 	/* Clean up Advanced Error Reporting (AER) if needed */
7955 	if (phba->hba_flag & HBA_AER_ENABLED)
7956 		pci_cleanup_aer_uncorrect_error_status(pdev);
7957 }
7958 
7959 /**
7960  * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
7961  * @phba: pointer to lpfc hba data structure.
7962  *
7963  * returns the number of ELS/CT IOCBs to reserve
7964  **/
7965 int
7966 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
7967 {
7968 	int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
7969 
7970 	if (phba->sli_rev == LPFC_SLI_REV4) {
7971 		if (max_xri <= 100)
7972 			return 10;
7973 		else if (max_xri <= 256)
7974 			return 25;
7975 		else if (max_xri <= 512)
7976 			return 50;
7977 		else if (max_xri <= 1024)
7978 			return 100;
7979 		else
7980 			return 150;
7981 	} else
7982 		return 0;
7983 }
7984 
7985 /**
7986  * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
7987  * @pdev: pointer to PCI device
7988  * @pid: pointer to PCI device identifier
7989  *
7990  * This routine is called from the kernel's PCI subsystem to device with
7991  * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
7992  * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
7993  * information of the device and driver to see if the driver state that it
7994  * can support this kind of device. If the match is successful, the driver
7995  * core invokes this routine. If this routine determines it can claim the HBA,
7996  * it does all the initialization that it needs to do to handle the HBA
7997  * properly.
7998  *
7999  * Return code
8000  * 	0 - driver can claim the device
8001  * 	negative value - driver can not claim the device
8002  **/
8003 static int __devinit
8004 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8005 {
8006 	struct lpfc_hba   *phba;
8007 	struct lpfc_vport *vport = NULL;
8008 	struct Scsi_Host  *shost = NULL;
8009 	int error;
8010 	uint32_t cfg_mode, intr_mode;
8011 	int mcnt;
8012 
8013 	/* Allocate memory for HBA structure */
8014 	phba = lpfc_hba_alloc(pdev);
8015 	if (!phba)
8016 		return -ENOMEM;
8017 
8018 	/* Perform generic PCI device enabling operation */
8019 	error = lpfc_enable_pci_dev(phba);
8020 	if (error) {
8021 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8022 				"1409 Failed to enable pci device.\n");
8023 		goto out_free_phba;
8024 	}
8025 
8026 	/* Set up SLI API function jump table for PCI-device group-1 HBAs */
8027 	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
8028 	if (error)
8029 		goto out_disable_pci_dev;
8030 
8031 	/* Set up SLI-4 specific device PCI memory space */
8032 	error = lpfc_sli4_pci_mem_setup(phba);
8033 	if (error) {
8034 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8035 				"1410 Failed to set up pci memory space.\n");
8036 		goto out_disable_pci_dev;
8037 	}
8038 
8039 	/* Set up phase-1 common device driver resources */
8040 	error = lpfc_setup_driver_resource_phase1(phba);
8041 	if (error) {
8042 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8043 				"1411 Failed to set up driver resource.\n");
8044 		goto out_unset_pci_mem_s4;
8045 	}
8046 
8047 	/* Set up SLI-4 Specific device driver resources */
8048 	error = lpfc_sli4_driver_resource_setup(phba);
8049 	if (error) {
8050 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8051 				"1412 Failed to set up driver resource.\n");
8052 		goto out_unset_pci_mem_s4;
8053 	}
8054 
8055 	/* Initialize and populate the iocb list per host */
8056 	error = lpfc_init_iocb_list(phba,
8057 			phba->sli4_hba.max_cfg_param.max_xri);
8058 	if (error) {
8059 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8060 				"1413 Failed to initialize iocb list.\n");
8061 		goto out_unset_driver_resource_s4;
8062 	}
8063 
8064 	/* Set up common device driver resources */
8065 	error = lpfc_setup_driver_resource_phase2(phba);
8066 	if (error) {
8067 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8068 				"1414 Failed to set up driver resource.\n");
8069 		goto out_free_iocb_list;
8070 	}
8071 
8072 	/* Create SCSI host to the physical port */
8073 	error = lpfc_create_shost(phba);
8074 	if (error) {
8075 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8076 				"1415 Failed to create scsi host.\n");
8077 		goto out_unset_driver_resource;
8078 	}
8079 
8080 	/* Configure sysfs attributes */
8081 	vport = phba->pport;
8082 	error = lpfc_alloc_sysfs_attr(vport);
8083 	if (error) {
8084 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8085 				"1416 Failed to allocate sysfs attr\n");
8086 		goto out_destroy_shost;
8087 	}
8088 
8089 	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
8090 	/* Now, trying to enable interrupt and bring up the device */
8091 	cfg_mode = phba->cfg_use_msi;
8092 	while (true) {
8093 		/* Put device to a known state before enabling interrupt */
8094 		lpfc_stop_port(phba);
8095 		/* Configure and enable interrupt */
8096 		intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
8097 		if (intr_mode == LPFC_INTR_ERROR) {
8098 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8099 					"0426 Failed to enable interrupt.\n");
8100 			error = -ENODEV;
8101 			goto out_free_sysfs_attr;
8102 		}
8103 		/* Default to single FCP EQ for non-MSI-X */
8104 		if (phba->intr_type != MSIX)
8105 			phba->cfg_fcp_eq_count = 1;
8106 		/* Set up SLI-4 HBA */
8107 		if (lpfc_sli4_hba_setup(phba)) {
8108 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8109 					"1421 Failed to set up hba\n");
8110 			error = -ENODEV;
8111 			goto out_disable_intr;
8112 		}
8113 
8114 		/* Send NOP mbx cmds for non-INTx mode active interrupt test */
8115 		if (intr_mode != 0)
8116 			mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
8117 							    LPFC_ACT_INTR_CNT);
8118 
8119 		/* Check active interrupts received only for MSI/MSI-X */
8120 		if (intr_mode == 0 ||
8121 		    phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
8122 			/* Log the current active interrupt mode */
8123 			phba->intr_mode = intr_mode;
8124 			lpfc_log_intr_mode(phba, intr_mode);
8125 			break;
8126 		}
8127 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8128 				"0451 Configure interrupt mode (%d) "
8129 				"failed active interrupt test.\n",
8130 				intr_mode);
8131 		/* Unset the preivous SLI-4 HBA setup */
8132 		lpfc_sli4_unset_hba(phba);
8133 		/* Try next level of interrupt mode */
8134 		cfg_mode = --intr_mode;
8135 	}
8136 
8137 	/* Perform post initialization setup */
8138 	lpfc_post_init_setup(phba);
8139 
8140 	/* Check if there are static vports to be created. */
8141 	lpfc_create_static_vport(phba);
8142 
8143 	return 0;
8144 
8145 out_disable_intr:
8146 	lpfc_sli4_disable_intr(phba);
8147 out_free_sysfs_attr:
8148 	lpfc_free_sysfs_attr(vport);
8149 out_destroy_shost:
8150 	lpfc_destroy_shost(phba);
8151 out_unset_driver_resource:
8152 	lpfc_unset_driver_resource_phase2(phba);
8153 out_free_iocb_list:
8154 	lpfc_free_iocb_list(phba);
8155 out_unset_driver_resource_s4:
8156 	lpfc_sli4_driver_resource_unset(phba);
8157 out_unset_pci_mem_s4:
8158 	lpfc_sli4_pci_mem_unset(phba);
8159 out_disable_pci_dev:
8160 	lpfc_disable_pci_dev(phba);
8161 	if (shost)
8162 		scsi_host_put(shost);
8163 out_free_phba:
8164 	lpfc_hba_free(phba);
8165 	return error;
8166 }
8167 
8168 /**
8169  * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
8170  * @pdev: pointer to PCI device
8171  *
8172  * This routine is called from the kernel's PCI subsystem to device with
8173  * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
8174  * removed from PCI bus, it performs all the necessary cleanup for the HBA
8175  * device to be removed from the PCI subsystem properly.
8176  **/
8177 static void __devexit
8178 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
8179 {
8180 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8181 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
8182 	struct lpfc_vport **vports;
8183 	struct lpfc_hba *phba = vport->phba;
8184 	int i;
8185 
8186 	/* Mark the device unloading flag */
8187 	spin_lock_irq(&phba->hbalock);
8188 	vport->load_flag |= FC_UNLOADING;
8189 	spin_unlock_irq(&phba->hbalock);
8190 
8191 	/* Free the HBA sysfs attributes */
8192 	lpfc_free_sysfs_attr(vport);
8193 
8194 	/* Release all the vports against this physical port */
8195 	vports = lpfc_create_vport_work_array(phba);
8196 	if (vports != NULL)
8197 		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
8198 			fc_vport_terminate(vports[i]->fc_vport);
8199 	lpfc_destroy_vport_work_array(phba, vports);
8200 
8201 	/* Remove FC host and then SCSI host with the physical port */
8202 	fc_remove_host(shost);
8203 	scsi_remove_host(shost);
8204 
8205 	/* Perform cleanup on the physical port */
8206 	lpfc_cleanup(vport);
8207 
8208 	/*
8209 	 * Bring down the SLI Layer. This step disables all interrupts,
8210 	 * clears the rings, discards all mailbox commands, and resets
8211 	 * the HBA FCoE function.
8212 	 */
8213 	lpfc_debugfs_terminate(vport);
8214 	lpfc_sli4_hba_unset(phba);
8215 
8216 	spin_lock_irq(&phba->hbalock);
8217 	list_del_init(&vport->listentry);
8218 	spin_unlock_irq(&phba->hbalock);
8219 
8220 	/* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi
8221 	 * buffers are released to their corresponding pools here.
8222 	 */
8223 	lpfc_scsi_free(phba);
8224 	lpfc_sli4_driver_resource_unset(phba);
8225 
8226 	/* Unmap adapter Control and Doorbell registers */
8227 	lpfc_sli4_pci_mem_unset(phba);
8228 
8229 	/* Release PCI resources and disable device's PCI function */
8230 	scsi_host_put(shost);
8231 	lpfc_disable_pci_dev(phba);
8232 
8233 	/* Finally, free the driver's device data structure */
8234 	lpfc_hba_free(phba);
8235 
8236 	return;
8237 }
8238 
8239 /**
8240  * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
8241  * @pdev: pointer to PCI device
8242  * @msg: power management message
8243  *
8244  * This routine is called from the kernel's PCI subsystem to support system
8245  * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
8246  * this method, it quiesces the device by stopping the driver's worker
8247  * thread for the device, turning off device's interrupt and DMA, and bring
8248  * the device offline. Note that as the driver implements the minimum PM
8249  * requirements to a power-aware driver's PM support for suspend/resume -- all
8250  * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
8251  * method call will be treated as SUSPEND and the driver will fully
8252  * reinitialize its device during resume() method call, the driver will set
8253  * device to PCI_D3hot state in PCI config space instead of setting it
8254  * according to the @msg provided by the PM.
8255  *
8256  * Return code
8257  * 	0 - driver suspended the device
8258  * 	Error otherwise
8259  **/
8260 static int
8261 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
8262 {
8263 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8264 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8265 
8266 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8267 			"0298 PCI device Power Management suspend.\n");
8268 
8269 	/* Bring down the device */
8270 	lpfc_offline_prep(phba);
8271 	lpfc_offline(phba);
8272 	kthread_stop(phba->worker_thread);
8273 
8274 	/* Disable interrupt from device */
8275 	lpfc_sli4_disable_intr(phba);
8276 
8277 	/* Save device state to PCI config space */
8278 	pci_save_state(pdev);
8279 	pci_set_power_state(pdev, PCI_D3hot);
8280 
8281 	return 0;
8282 }
8283 
8284 /**
8285  * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
8286  * @pdev: pointer to PCI device
8287  *
8288  * This routine is called from the kernel's PCI subsystem to support system
8289  * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
8290  * this method, it restores the device's PCI config space state and fully
8291  * reinitializes the device and brings it online. Note that as the driver
8292  * implements the minimum PM requirements to a power-aware driver's PM for
8293  * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
8294  * to the suspend() method call will be treated as SUSPEND and the driver
8295  * will fully reinitialize its device during resume() method call, the device
8296  * will be set to PCI_D0 directly in PCI config space before restoring the
8297  * state.
8298  *
8299  * Return code
8300  * 	0 - driver suspended the device
8301  * 	Error otherwise
8302  **/
8303 static int
8304 lpfc_pci_resume_one_s4(struct pci_dev *pdev)
8305 {
8306 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8307 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8308 	uint32_t intr_mode;
8309 	int error;
8310 
8311 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8312 			"0292 PCI device Power Management resume.\n");
8313 
8314 	/* Restore device state from PCI config space */
8315 	pci_set_power_state(pdev, PCI_D0);
8316 	pci_restore_state(pdev);
8317 
8318 	/*
8319 	 * As the new kernel behavior of pci_restore_state() API call clears
8320 	 * device saved_state flag, need to save the restored state again.
8321 	 */
8322 	pci_save_state(pdev);
8323 
8324 	if (pdev->is_busmaster)
8325 		pci_set_master(pdev);
8326 
8327 	 /* Startup the kernel thread for this host adapter. */
8328 	phba->worker_thread = kthread_run(lpfc_do_work, phba,
8329 					"lpfc_worker_%d", phba->brd_no);
8330 	if (IS_ERR(phba->worker_thread)) {
8331 		error = PTR_ERR(phba->worker_thread);
8332 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8333 				"0293 PM resume failed to start worker "
8334 				"thread: error=x%x.\n", error);
8335 		return error;
8336 	}
8337 
8338 	/* Configure and enable interrupt */
8339 	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
8340 	if (intr_mode == LPFC_INTR_ERROR) {
8341 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8342 				"0294 PM resume Failed to enable interrupt\n");
8343 		return -EIO;
8344 	} else
8345 		phba->intr_mode = intr_mode;
8346 
8347 	/* Restart HBA and bring it online */
8348 	lpfc_sli_brdrestart(phba);
8349 	lpfc_online(phba);
8350 
8351 	/* Log the current active interrupt mode */
8352 	lpfc_log_intr_mode(phba, phba->intr_mode);
8353 
8354 	return 0;
8355 }
8356 
8357 /**
8358  * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
8359  * @pdev: pointer to PCI device.
8360  * @state: the current PCI connection state.
8361  *
8362  * This routine is called from the PCI subsystem for error handling to device
8363  * with SLI-4 interface spec. This function is called by the PCI subsystem
8364  * after a PCI bus error affecting this device has been detected. When this
8365  * function is invoked, it will need to stop all the I/Os and interrupt(s)
8366  * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
8367  * for the PCI subsystem to perform proper recovery as desired.
8368  *
8369  * Return codes
8370  * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
8371  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8372  **/
8373 static pci_ers_result_t
8374 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
8375 {
8376 	return PCI_ERS_RESULT_NEED_RESET;
8377 }
8378 
8379 /**
8380  * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
8381  * @pdev: pointer to PCI device.
8382  *
8383  * This routine is called from the PCI subsystem for error handling to device
8384  * with SLI-4 interface spec. It is called after PCI bus has been reset to
8385  * restart the PCI card from scratch, as if from a cold-boot. During the
8386  * PCI subsystem error recovery, after the driver returns
8387  * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
8388  * recovery and then call this routine before calling the .resume method to
8389  * recover the device. This function will initialize the HBA device, enable
8390  * the interrupt, but it will just put the HBA to offline state without
8391  * passing any I/O traffic.
8392  *
8393  * Return codes
8394  * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
8395  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8396  */
8397 static pci_ers_result_t
8398 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
8399 {
8400 	return PCI_ERS_RESULT_RECOVERED;
8401 }
8402 
8403 /**
8404  * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
8405  * @pdev: pointer to PCI device
8406  *
8407  * This routine is called from the PCI subsystem for error handling to device
8408  * with SLI-4 interface spec. It is called when kernel error recovery tells
8409  * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
8410  * error recovery. After this call, traffic can start to flow from this device
8411  * again.
8412  **/
8413 static void
8414 lpfc_io_resume_s4(struct pci_dev *pdev)
8415 {
8416 	return;
8417 }
8418 
8419 /**
8420  * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
8421  * @pdev: pointer to PCI device
8422  * @pid: pointer to PCI device identifier
8423  *
8424  * This routine is to be registered to the kernel's PCI subsystem. When an
8425  * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
8426  * at PCI device-specific information of the device and driver to see if the
8427  * driver state that it can support this kind of device. If the match is
8428  * successful, the driver core invokes this routine. This routine dispatches
8429  * the action to the proper SLI-3 or SLI-4 device probing routine, which will
8430  * do all the initialization that it needs to do to handle the HBA device
8431  * properly.
8432  *
8433  * Return code
8434  * 	0 - driver can claim the device
8435  * 	negative value - driver can not claim the device
8436  **/
8437 static int __devinit
8438 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
8439 {
8440 	int rc;
8441 	struct lpfc_sli_intf intf;
8442 
8443 	if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
8444 		return -ENODEV;
8445 
8446 	if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
8447 	    (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
8448 		rc = lpfc_pci_probe_one_s4(pdev, pid);
8449 	else
8450 		rc = lpfc_pci_probe_one_s3(pdev, pid);
8451 
8452 	return rc;
8453 }
8454 
8455 /**
8456  * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
8457  * @pdev: pointer to PCI device
8458  *
8459  * This routine is to be registered to the kernel's PCI subsystem. When an
8460  * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
8461  * This routine dispatches the action to the proper SLI-3 or SLI-4 device
8462  * remove routine, which will perform all the necessary cleanup for the
8463  * device to be removed from the PCI subsystem properly.
8464  **/
8465 static void __devexit
8466 lpfc_pci_remove_one(struct pci_dev *pdev)
8467 {
8468 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8469 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8470 
8471 	switch (phba->pci_dev_grp) {
8472 	case LPFC_PCI_DEV_LP:
8473 		lpfc_pci_remove_one_s3(pdev);
8474 		break;
8475 	case LPFC_PCI_DEV_OC:
8476 		lpfc_pci_remove_one_s4(pdev);
8477 		break;
8478 	default:
8479 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8480 				"1424 Invalid PCI device group: 0x%x\n",
8481 				phba->pci_dev_grp);
8482 		break;
8483 	}
8484 	return;
8485 }
8486 
8487 /**
8488  * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
8489  * @pdev: pointer to PCI device
8490  * @msg: power management message
8491  *
8492  * This routine is to be registered to the kernel's PCI subsystem to support
8493  * system Power Management (PM). When PM invokes this method, it dispatches
8494  * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
8495  * suspend the device.
8496  *
8497  * Return code
8498  * 	0 - driver suspended the device
8499  * 	Error otherwise
8500  **/
8501 static int
8502 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
8503 {
8504 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8505 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8506 	int rc = -ENODEV;
8507 
8508 	switch (phba->pci_dev_grp) {
8509 	case LPFC_PCI_DEV_LP:
8510 		rc = lpfc_pci_suspend_one_s3(pdev, msg);
8511 		break;
8512 	case LPFC_PCI_DEV_OC:
8513 		rc = lpfc_pci_suspend_one_s4(pdev, msg);
8514 		break;
8515 	default:
8516 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8517 				"1425 Invalid PCI device group: 0x%x\n",
8518 				phba->pci_dev_grp);
8519 		break;
8520 	}
8521 	return rc;
8522 }
8523 
8524 /**
8525  * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
8526  * @pdev: pointer to PCI device
8527  *
8528  * This routine is to be registered to the kernel's PCI subsystem to support
8529  * system Power Management (PM). When PM invokes this method, it dispatches
8530  * the action to the proper SLI-3 or SLI-4 device resume routine, which will
8531  * resume the device.
8532  *
8533  * Return code
8534  * 	0 - driver suspended the device
8535  * 	Error otherwise
8536  **/
8537 static int
8538 lpfc_pci_resume_one(struct pci_dev *pdev)
8539 {
8540 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8541 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8542 	int rc = -ENODEV;
8543 
8544 	switch (phba->pci_dev_grp) {
8545 	case LPFC_PCI_DEV_LP:
8546 		rc = lpfc_pci_resume_one_s3(pdev);
8547 		break;
8548 	case LPFC_PCI_DEV_OC:
8549 		rc = lpfc_pci_resume_one_s4(pdev);
8550 		break;
8551 	default:
8552 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8553 				"1426 Invalid PCI device group: 0x%x\n",
8554 				phba->pci_dev_grp);
8555 		break;
8556 	}
8557 	return rc;
8558 }
8559 
8560 /**
8561  * lpfc_io_error_detected - lpfc method for handling PCI I/O error
8562  * @pdev: pointer to PCI device.
8563  * @state: the current PCI connection state.
8564  *
8565  * This routine is registered to the PCI subsystem for error handling. This
8566  * function is called by the PCI subsystem after a PCI bus error affecting
8567  * this device has been detected. When this routine is invoked, it dispatches
8568  * the action to the proper SLI-3 or SLI-4 device error detected handling
8569  * routine, which will perform the proper error detected operation.
8570  *
8571  * Return codes
8572  * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
8573  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8574  **/
8575 static pci_ers_result_t
8576 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
8577 {
8578 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8579 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8580 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
8581 
8582 	switch (phba->pci_dev_grp) {
8583 	case LPFC_PCI_DEV_LP:
8584 		rc = lpfc_io_error_detected_s3(pdev, state);
8585 		break;
8586 	case LPFC_PCI_DEV_OC:
8587 		rc = lpfc_io_error_detected_s4(pdev, state);
8588 		break;
8589 	default:
8590 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8591 				"1427 Invalid PCI device group: 0x%x\n",
8592 				phba->pci_dev_grp);
8593 		break;
8594 	}
8595 	return rc;
8596 }
8597 
8598 /**
8599  * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
8600  * @pdev: pointer to PCI device.
8601  *
8602  * This routine is registered to the PCI subsystem for error handling. This
8603  * function is called after PCI bus has been reset to restart the PCI card
8604  * from scratch, as if from a cold-boot. When this routine is invoked, it
8605  * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
8606  * routine, which will perform the proper device reset.
8607  *
8608  * Return codes
8609  * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
8610  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8611  **/
8612 static pci_ers_result_t
8613 lpfc_io_slot_reset(struct pci_dev *pdev)
8614 {
8615 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8616 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8617 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
8618 
8619 	switch (phba->pci_dev_grp) {
8620 	case LPFC_PCI_DEV_LP:
8621 		rc = lpfc_io_slot_reset_s3(pdev);
8622 		break;
8623 	case LPFC_PCI_DEV_OC:
8624 		rc = lpfc_io_slot_reset_s4(pdev);
8625 		break;
8626 	default:
8627 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8628 				"1428 Invalid PCI device group: 0x%x\n",
8629 				phba->pci_dev_grp);
8630 		break;
8631 	}
8632 	return rc;
8633 }
8634 
8635 /**
8636  * lpfc_io_resume - lpfc method for resuming PCI I/O operation
8637  * @pdev: pointer to PCI device
8638  *
8639  * This routine is registered to the PCI subsystem for error handling. It
8640  * is called when kernel error recovery tells the lpfc driver that it is
8641  * OK to resume normal PCI operation after PCI bus error recovery. When
8642  * this routine is invoked, it dispatches the action to the proper SLI-3
8643  * or SLI-4 device io_resume routine, which will resume the device operation.
8644  **/
8645 static void
8646 lpfc_io_resume(struct pci_dev *pdev)
8647 {
8648 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
8649 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8650 
8651 	switch (phba->pci_dev_grp) {
8652 	case LPFC_PCI_DEV_LP:
8653 		lpfc_io_resume_s3(pdev);
8654 		break;
8655 	case LPFC_PCI_DEV_OC:
8656 		lpfc_io_resume_s4(pdev);
8657 		break;
8658 	default:
8659 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8660 				"1429 Invalid PCI device group: 0x%x\n",
8661 				phba->pci_dev_grp);
8662 		break;
8663 	}
8664 	return;
8665 }
8666 
8667 static struct pci_device_id lpfc_id_table[] = {
8668 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
8669 		PCI_ANY_ID, PCI_ANY_ID, },
8670 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
8671 		PCI_ANY_ID, PCI_ANY_ID, },
8672 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
8673 		PCI_ANY_ID, PCI_ANY_ID, },
8674 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
8675 		PCI_ANY_ID, PCI_ANY_ID, },
8676 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
8677 		PCI_ANY_ID, PCI_ANY_ID, },
8678 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
8679 		PCI_ANY_ID, PCI_ANY_ID, },
8680 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
8681 		PCI_ANY_ID, PCI_ANY_ID, },
8682 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
8683 		PCI_ANY_ID, PCI_ANY_ID, },
8684 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
8685 		PCI_ANY_ID, PCI_ANY_ID, },
8686 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
8687 		PCI_ANY_ID, PCI_ANY_ID, },
8688 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
8689 		PCI_ANY_ID, PCI_ANY_ID, },
8690 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
8691 		PCI_ANY_ID, PCI_ANY_ID, },
8692 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
8693 		PCI_ANY_ID, PCI_ANY_ID, },
8694 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
8695 		PCI_ANY_ID, PCI_ANY_ID, },
8696 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
8697 		PCI_ANY_ID, PCI_ANY_ID, },
8698 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
8699 		PCI_ANY_ID, PCI_ANY_ID, },
8700 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
8701 		PCI_ANY_ID, PCI_ANY_ID, },
8702 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
8703 		PCI_ANY_ID, PCI_ANY_ID, },
8704 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
8705 		PCI_ANY_ID, PCI_ANY_ID, },
8706 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
8707 		PCI_ANY_ID, PCI_ANY_ID, },
8708 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
8709 		PCI_ANY_ID, PCI_ANY_ID, },
8710 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
8711 		PCI_ANY_ID, PCI_ANY_ID, },
8712 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
8713 		PCI_ANY_ID, PCI_ANY_ID, },
8714 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
8715 		PCI_ANY_ID, PCI_ANY_ID, },
8716 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
8717 		PCI_ANY_ID, PCI_ANY_ID, },
8718 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
8719 		PCI_ANY_ID, PCI_ANY_ID, },
8720 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
8721 		PCI_ANY_ID, PCI_ANY_ID, },
8722 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
8723 		PCI_ANY_ID, PCI_ANY_ID, },
8724 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
8725 		PCI_ANY_ID, PCI_ANY_ID, },
8726 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
8727 		PCI_ANY_ID, PCI_ANY_ID, },
8728 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
8729 		PCI_ANY_ID, PCI_ANY_ID, },
8730 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
8731 		PCI_ANY_ID, PCI_ANY_ID, },
8732 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
8733 		PCI_ANY_ID, PCI_ANY_ID, },
8734 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
8735 		PCI_ANY_ID, PCI_ANY_ID, },
8736 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
8737 		PCI_ANY_ID, PCI_ANY_ID, },
8738 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
8739 		PCI_ANY_ID, PCI_ANY_ID, },
8740 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
8741 		PCI_ANY_ID, PCI_ANY_ID, },
8742 	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
8743 		PCI_ANY_ID, PCI_ANY_ID, },
8744 	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
8745 		PCI_ANY_ID, PCI_ANY_ID, },
8746 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
8747 		PCI_ANY_ID, PCI_ANY_ID, },
8748 	{ 0 }
8749 };
8750 
8751 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
8752 
8753 static struct pci_error_handlers lpfc_err_handler = {
8754 	.error_detected = lpfc_io_error_detected,
8755 	.slot_reset = lpfc_io_slot_reset,
8756 	.resume = lpfc_io_resume,
8757 };
8758 
8759 static struct pci_driver lpfc_driver = {
8760 	.name		= LPFC_DRIVER_NAME,
8761 	.id_table	= lpfc_id_table,
8762 	.probe		= lpfc_pci_probe_one,
8763 	.remove		= __devexit_p(lpfc_pci_remove_one),
8764 	.suspend        = lpfc_pci_suspend_one,
8765 	.resume		= lpfc_pci_resume_one,
8766 	.err_handler    = &lpfc_err_handler,
8767 };
8768 
8769 /**
8770  * lpfc_init - lpfc module initialization routine
8771  *
8772  * This routine is to be invoked when the lpfc module is loaded into the
8773  * kernel. The special kernel macro module_init() is used to indicate the
8774  * role of this routine to the kernel as lpfc module entry point.
8775  *
8776  * Return codes
8777  *   0 - successful
8778  *   -ENOMEM - FC attach transport failed
8779  *   all others - failed
8780  */
8781 static int __init
8782 lpfc_init(void)
8783 {
8784 	int error = 0;
8785 
8786 	printk(LPFC_MODULE_DESC "\n");
8787 	printk(LPFC_COPYRIGHT "\n");
8788 
8789 	if (lpfc_enable_npiv) {
8790 		lpfc_transport_functions.vport_create = lpfc_vport_create;
8791 		lpfc_transport_functions.vport_delete = lpfc_vport_delete;
8792 	}
8793 	lpfc_transport_template =
8794 				fc_attach_transport(&lpfc_transport_functions);
8795 	if (lpfc_transport_template == NULL)
8796 		return -ENOMEM;
8797 	if (lpfc_enable_npiv) {
8798 		lpfc_vport_transport_template =
8799 			fc_attach_transport(&lpfc_vport_transport_functions);
8800 		if (lpfc_vport_transport_template == NULL) {
8801 			fc_release_transport(lpfc_transport_template);
8802 			return -ENOMEM;
8803 		}
8804 	}
8805 	error = pci_register_driver(&lpfc_driver);
8806 	if (error) {
8807 		fc_release_transport(lpfc_transport_template);
8808 		if (lpfc_enable_npiv)
8809 			fc_release_transport(lpfc_vport_transport_template);
8810 	}
8811 
8812 	return error;
8813 }
8814 
8815 /**
8816  * lpfc_exit - lpfc module removal routine
8817  *
8818  * This routine is invoked when the lpfc module is removed from the kernel.
8819  * The special kernel macro module_exit() is used to indicate the role of
8820  * this routine to the kernel as lpfc module exit point.
8821  */
8822 static void __exit
8823 lpfc_exit(void)
8824 {
8825 	pci_unregister_driver(&lpfc_driver);
8826 	fc_release_transport(lpfc_transport_template);
8827 	if (lpfc_enable_npiv)
8828 		fc_release_transport(lpfc_vport_transport_template);
8829 	if (_dump_buf_data) {
8830 		printk(KERN_ERR	"9062 BLKGRD: freeing %lu pages for "
8831 				"_dump_buf_data at 0x%p\n",
8832 				(1L << _dump_buf_data_order), _dump_buf_data);
8833 		free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
8834 	}
8835 
8836 	if (_dump_buf_dif) {
8837 		printk(KERN_ERR	"9049 BLKGRD: freeing %lu pages for "
8838 				"_dump_buf_dif at 0x%p\n",
8839 				(1L << _dump_buf_dif_order), _dump_buf_dif);
8840 		free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
8841 	}
8842 }
8843 
8844 module_init(lpfc_init);
8845 module_exit(lpfc_exit);
8846 MODULE_LICENSE("GPL");
8847 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
8848 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
8849 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
8850