xref: /linux/drivers/scsi/lpfc/lpfc_init.c (revision 9e4e86a604dfd06402933467578c4b79f5412b2c)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  *******************************************************************/
23 
24 #include <linux/blkdev.h>
25 #include <linux/crc32.h>
26 #include <linux/delay.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/idr.h>
29 #include <linux/interrupt.h>
30 #include <linux/module.h>
31 #include <linux/kthread.h>
32 #include <linux/pci.h>
33 #include <linux/spinlock.h>
34 #include <linux/sched/clock.h>
35 #include <linux/ctype.h>
36 #include <linux/slab.h>
37 #include <linux/firmware.h>
38 #include <linux/miscdevice.h>
39 #include <linux/percpu.h>
40 #include <linux/irq.h>
41 #include <linux/bitops.h>
42 #include <linux/crash_dump.h>
43 #include <linux/cpu.h>
44 #include <linux/cpuhotplug.h>
45 
46 #include <scsi/scsi.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_host.h>
49 #include <scsi/scsi_transport_fc.h>
50 #include <scsi/scsi_tcq.h>
51 #include <scsi/fc/fc_fs.h>
52 
53 #include "lpfc_hw4.h"
54 #include "lpfc_hw.h"
55 #include "lpfc_sli.h"
56 #include "lpfc_sli4.h"
57 #include "lpfc_nl.h"
58 #include "lpfc_disc.h"
59 #include "lpfc.h"
60 #include "lpfc_scsi.h"
61 #include "lpfc_nvme.h"
62 #include "lpfc_logmsg.h"
63 #include "lpfc_crtn.h"
64 #include "lpfc_vport.h"
65 #include "lpfc_version.h"
66 #include "lpfc_ids.h"
67 
68 static enum cpuhp_state lpfc_cpuhp_state;
69 /* Used when mapping IRQ vectors in a driver centric manner */
70 static uint32_t lpfc_present_cpu;
71 static bool lpfc_pldv_detect;
72 
73 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
74 static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
75 static void lpfc_cpuhp_add(struct lpfc_hba *phba);
76 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
77 static int lpfc_post_rcv_buf(struct lpfc_hba *);
78 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
79 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
80 static int lpfc_setup_endian_order(struct lpfc_hba *);
81 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
82 static void lpfc_free_els_sgl_list(struct lpfc_hba *);
83 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
84 static void lpfc_init_sgl_list(struct lpfc_hba *);
85 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
86 static void lpfc_free_active_sgl(struct lpfc_hba *);
87 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
88 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
89 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
90 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
91 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
92 static void lpfc_sli4_disable_intr(struct lpfc_hba *);
93 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
94 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
95 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
96 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
97 static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
98 static void lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba);
99 static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba);
100 
101 static struct scsi_transport_template *lpfc_transport_template = NULL;
102 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
103 static DEFINE_IDR(lpfc_hba_index);
104 #define LPFC_NVMET_BUF_POST 254
105 static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport);
106 static void lpfc_cgn_update_tstamp(struct lpfc_hba *phba, struct lpfc_cgn_ts *ts);
107 
108 /**
109  * lpfc_config_port_prep - Perform lpfc initialization prior to config port
110  * @phba: pointer to lpfc hba data structure.
111  *
112  * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
113  * mailbox command. It retrieves the revision information from the HBA and
114  * collects the Vital Product Data (VPD) about the HBA for preparing the
115  * configuration of the HBA.
116  *
117  * Return codes:
118  *   0 - success.
119  *   -ERESTART - requests the SLI layer to reset the HBA and try again.
120  *   Any other value - indicates an error.
121  **/
122 int
lpfc_config_port_prep(struct lpfc_hba * phba)123 lpfc_config_port_prep(struct lpfc_hba *phba)
124 {
125 	lpfc_vpd_t *vp = &phba->vpd;
126 	int i = 0, rc;
127 	LPFC_MBOXQ_t *pmb;
128 	MAILBOX_t *mb;
129 	char *lpfc_vpd_data = NULL;
130 	uint16_t offset = 0;
131 	static char licensed[56] =
132 		    "key unlock for use with gnu public licensed code only\0";
133 	static int init_key = 1;
134 
135 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
136 	if (!pmb) {
137 		phba->link_state = LPFC_HBA_ERROR;
138 		return -ENOMEM;
139 	}
140 
141 	mb = &pmb->u.mb;
142 	phba->link_state = LPFC_INIT_MBX_CMDS;
143 
144 	if (lpfc_is_LC_HBA(phba->pcidev->device)) {
145 		if (init_key) {
146 			uint32_t *ptext = (uint32_t *) licensed;
147 
148 			for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
149 				*ptext = cpu_to_be32(*ptext);
150 			init_key = 0;
151 		}
152 
153 		lpfc_read_nv(phba, pmb);
154 		memset((char*)mb->un.varRDnvp.rsvd3, 0,
155 			sizeof (mb->un.varRDnvp.rsvd3));
156 		memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
157 			 sizeof (licensed));
158 
159 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
160 
161 		if (rc != MBX_SUCCESS) {
162 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
163 					"0324 Config Port initialization "
164 					"error, mbxCmd x%x READ_NVPARM, "
165 					"mbxStatus x%x\n",
166 					mb->mbxCommand, mb->mbxStatus);
167 			mempool_free(pmb, phba->mbox_mem_pool);
168 			return -ERESTART;
169 		}
170 		memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
171 		       sizeof(phba->wwnn));
172 		memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
173 		       sizeof(phba->wwpn));
174 	}
175 
176 	/*
177 	 * Clear all option bits except LPFC_SLI3_BG_ENABLED,
178 	 * which was already set in lpfc_get_cfgparam()
179 	 */
180 	phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
181 
182 	/* Setup and issue mailbox READ REV command */
183 	lpfc_read_rev(phba, pmb);
184 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
185 	if (rc != MBX_SUCCESS) {
186 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
187 				"0439 Adapter failed to init, mbxCmd x%x "
188 				"READ_REV, mbxStatus x%x\n",
189 				mb->mbxCommand, mb->mbxStatus);
190 		mempool_free( pmb, phba->mbox_mem_pool);
191 		return -ERESTART;
192 	}
193 
194 
195 	/*
196 	 * The value of rr must be 1 since the driver set the cv field to 1.
197 	 * This setting requires the FW to set all revision fields.
198 	 */
199 	if (mb->un.varRdRev.rr == 0) {
200 		vp->rev.rBit = 0;
201 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
202 				"0440 Adapter failed to init, READ_REV has "
203 				"missing revision information.\n");
204 		mempool_free(pmb, phba->mbox_mem_pool);
205 		return -ERESTART;
206 	}
207 
208 	if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
209 		mempool_free(pmb, phba->mbox_mem_pool);
210 		return -EINVAL;
211 	}
212 
213 	/* Save information as VPD data */
214 	vp->rev.rBit = 1;
215 	memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
216 	vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
217 	memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
218 	vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
219 	memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
220 	vp->rev.biuRev = mb->un.varRdRev.biuRev;
221 	vp->rev.smRev = mb->un.varRdRev.smRev;
222 	vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
223 	vp->rev.endecRev = mb->un.varRdRev.endecRev;
224 	vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
225 	vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
226 	vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
227 	vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
228 	vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
229 	vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
230 
231 	/* If the sli feature level is less then 9, we must
232 	 * tear down all RPIs and VPIs on link down if NPIV
233 	 * is enabled.
234 	 */
235 	if (vp->rev.feaLevelHigh < 9)
236 		phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
237 
238 	if (lpfc_is_LC_HBA(phba->pcidev->device))
239 		memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
240 						sizeof (phba->RandomData));
241 
242 	/* Get adapter VPD information */
243 	lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
244 	if (!lpfc_vpd_data)
245 		goto out_free_mbox;
246 	do {
247 		lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
248 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
249 
250 		if (rc != MBX_SUCCESS) {
251 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
252 					"0441 VPD not present on adapter, "
253 					"mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
254 					mb->mbxCommand, mb->mbxStatus);
255 			mb->un.varDmp.word_cnt = 0;
256 		}
257 		/* dump mem may return a zero when finished or we got a
258 		 * mailbox error, either way we are done.
259 		 */
260 		if (mb->un.varDmp.word_cnt == 0)
261 			break;
262 
263 		if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
264 			mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
265 		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
266 				      lpfc_vpd_data + offset,
267 				      mb->un.varDmp.word_cnt);
268 		offset += mb->un.varDmp.word_cnt;
269 	} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
270 
271 	lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
272 
273 	kfree(lpfc_vpd_data);
274 out_free_mbox:
275 	mempool_free(pmb, phba->mbox_mem_pool);
276 	return 0;
277 }
278 
279 /**
280  * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
281  * @phba: pointer to lpfc hba data structure.
282  * @pmboxq: pointer to the driver internal queue element for mailbox command.
283  *
284  * This is the completion handler for driver's configuring asynchronous event
285  * mailbox command to the device. If the mailbox command returns successfully,
286  * it will set internal async event support flag to 1; otherwise, it will
287  * set internal async event support flag to 0.
288  **/
289 static void
lpfc_config_async_cmpl(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmboxq)290 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
291 {
292 	if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
293 		phba->temp_sensor_support = 1;
294 	else
295 		phba->temp_sensor_support = 0;
296 	mempool_free(pmboxq, phba->mbox_mem_pool);
297 	return;
298 }
299 
300 /**
301  * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
302  * @phba: pointer to lpfc hba data structure.
303  * @pmboxq: pointer to the driver internal queue element for mailbox command.
304  *
305  * This is the completion handler for dump mailbox command for getting
306  * wake up parameters. When this command complete, the response contain
307  * Option rom version of the HBA. This function translate the version number
308  * into a human readable string and store it in OptionROMVersion.
309  **/
310 static void
lpfc_dump_wakeup_param_cmpl(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmboxq)311 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
312 {
313 	struct prog_id *prg;
314 	uint32_t prog_id_word;
315 	char dist = ' ';
316 	/* character array used for decoding dist type. */
317 	char dist_char[] = "nabx";
318 
319 	if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
320 		mempool_free(pmboxq, phba->mbox_mem_pool);
321 		return;
322 	}
323 
324 	prg = (struct prog_id *) &prog_id_word;
325 
326 	/* word 7 contain option rom version */
327 	prog_id_word = pmboxq->u.mb.un.varWords[7];
328 
329 	/* Decode the Option rom version word to a readable string */
330 	dist = dist_char[prg->dist];
331 
332 	if ((prg->dist == 3) && (prg->num == 0))
333 		snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
334 			prg->ver, prg->rev, prg->lev);
335 	else
336 		snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
337 			prg->ver, prg->rev, prg->lev,
338 			dist, prg->num);
339 	mempool_free(pmboxq, phba->mbox_mem_pool);
340 	return;
341 }
342 
343 /**
344  * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
345  * @vport: pointer to lpfc vport data structure.
346  *
347  *
348  * Return codes
349  *   None.
350  **/
351 void
lpfc_update_vport_wwn(struct lpfc_vport * vport)352 lpfc_update_vport_wwn(struct lpfc_vport *vport)
353 {
354 	struct lpfc_hba *phba = vport->phba;
355 
356 	/*
357 	 * If the name is empty or there exists a soft name
358 	 * then copy the service params name, otherwise use the fc name
359 	 */
360 	if (vport->fc_nodename.u.wwn[0] == 0)
361 		memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
362 			sizeof(struct lpfc_name));
363 	else
364 		memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
365 			sizeof(struct lpfc_name));
366 
367 	/*
368 	 * If the port name has changed, then set the Param changes flag
369 	 * to unreg the login
370 	 */
371 	if (vport->fc_portname.u.wwn[0] != 0 &&
372 		memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
373 		       sizeof(struct lpfc_name))) {
374 		vport->vport_flag |= FAWWPN_PARAM_CHG;
375 
376 		if (phba->sli_rev == LPFC_SLI_REV4 &&
377 		    vport->port_type == LPFC_PHYSICAL_PORT &&
378 		    phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_FABRIC) {
379 			if (!(phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG))
380 				phba->sli4_hba.fawwpn_flag &=
381 						~LPFC_FAWWPN_FABRIC;
382 			lpfc_printf_log(phba, KERN_INFO,
383 					LOG_SLI | LOG_DISCOVERY | LOG_ELS,
384 					"2701 FA-PWWN change WWPN from %llx to "
385 					"%llx: vflag x%x fawwpn_flag x%x\n",
386 					wwn_to_u64(vport->fc_portname.u.wwn),
387 					wwn_to_u64
388 					   (vport->fc_sparam.portName.u.wwn),
389 					vport->vport_flag,
390 					phba->sli4_hba.fawwpn_flag);
391 			memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
392 			       sizeof(struct lpfc_name));
393 		}
394 	}
395 
396 	if (vport->fc_portname.u.wwn[0] == 0)
397 		memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
398 		       sizeof(struct lpfc_name));
399 	else
400 		memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
401 		       sizeof(struct lpfc_name));
402 }
403 
404 /**
405  * lpfc_config_port_post - Perform lpfc initialization after config port
406  * @phba: pointer to lpfc hba data structure.
407  *
408  * This routine will do LPFC initialization after the CONFIG_PORT mailbox
409  * command call. It performs all internal resource and state setups on the
410  * port: post IOCB buffers, enable appropriate host interrupt attentions,
411  * ELS ring timers, etc.
412  *
413  * Return codes
414  *   0 - success.
415  *   Any other value - error.
416  **/
417 int
lpfc_config_port_post(struct lpfc_hba * phba)418 lpfc_config_port_post(struct lpfc_hba *phba)
419 {
420 	struct lpfc_vport *vport = phba->pport;
421 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
422 	LPFC_MBOXQ_t *pmb;
423 	MAILBOX_t *mb;
424 	struct lpfc_dmabuf *mp;
425 	struct lpfc_sli *psli = &phba->sli;
426 	uint32_t status, timeout;
427 	int i, j;
428 	int rc;
429 
430 	spin_lock_irq(&phba->hbalock);
431 	/*
432 	 * If the Config port completed correctly the HBA is not
433 	 * over heated any more.
434 	 */
435 	if (phba->over_temp_state == HBA_OVER_TEMP)
436 		phba->over_temp_state = HBA_NORMAL_TEMP;
437 	spin_unlock_irq(&phba->hbalock);
438 
439 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
440 	if (!pmb) {
441 		phba->link_state = LPFC_HBA_ERROR;
442 		return -ENOMEM;
443 	}
444 	mb = &pmb->u.mb;
445 
446 	/* Get login parameters for NID.  */
447 	rc = lpfc_read_sparam(phba, pmb, 0);
448 	if (rc) {
449 		mempool_free(pmb, phba->mbox_mem_pool);
450 		return -ENOMEM;
451 	}
452 
453 	pmb->vport = vport;
454 	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
455 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
456 				"0448 Adapter failed init, mbxCmd x%x "
457 				"READ_SPARM mbxStatus x%x\n",
458 				mb->mbxCommand, mb->mbxStatus);
459 		phba->link_state = LPFC_HBA_ERROR;
460 		lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
461 		return -EIO;
462 	}
463 
464 	mp = pmb->ctx_buf;
465 
466 	/* This dmabuf was allocated by lpfc_read_sparam. The dmabuf is no
467 	 * longer needed.  Prevent unintended ctx_buf access as the mbox is
468 	 * reused.
469 	 */
470 	memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
471 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
472 	kfree(mp);
473 	pmb->ctx_buf = NULL;
474 	lpfc_update_vport_wwn(vport);
475 
476 	/* Update the fc_host data structures with new wwn. */
477 	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
478 	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
479 	fc_host_max_npiv_vports(shost) = phba->max_vpi;
480 
481 	/* If no serial number in VPD data, use low 6 bytes of WWNN */
482 	/* This should be consolidated into parse_vpd ? - mr */
483 	if (phba->SerialNumber[0] == 0) {
484 		uint8_t *outptr;
485 
486 		outptr = &vport->fc_nodename.u.s.IEEE[0];
487 		for (i = 0; i < 12; i++) {
488 			status = *outptr++;
489 			j = ((status & 0xf0) >> 4);
490 			if (j <= 9)
491 				phba->SerialNumber[i] =
492 				    (char)((uint8_t) 0x30 + (uint8_t) j);
493 			else
494 				phba->SerialNumber[i] =
495 				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
496 			i++;
497 			j = (status & 0xf);
498 			if (j <= 9)
499 				phba->SerialNumber[i] =
500 				    (char)((uint8_t) 0x30 + (uint8_t) j);
501 			else
502 				phba->SerialNumber[i] =
503 				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
504 		}
505 	}
506 
507 	lpfc_read_config(phba, pmb);
508 	pmb->vport = vport;
509 	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
510 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
511 				"0453 Adapter failed to init, mbxCmd x%x "
512 				"READ_CONFIG, mbxStatus x%x\n",
513 				mb->mbxCommand, mb->mbxStatus);
514 		phba->link_state = LPFC_HBA_ERROR;
515 		mempool_free( pmb, phba->mbox_mem_pool);
516 		return -EIO;
517 	}
518 
519 	/* Check if the port is disabled */
520 	lpfc_sli_read_link_ste(phba);
521 
522 	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
523 	if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
524 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
525 				"3359 HBA queue depth changed from %d to %d\n",
526 				phba->cfg_hba_queue_depth,
527 				mb->un.varRdConfig.max_xri);
528 		phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
529 	}
530 
531 	phba->lmt = mb->un.varRdConfig.lmt;
532 
533 	/* Get the default values for Model Name and Description */
534 	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
535 
536 	phba->link_state = LPFC_LINK_DOWN;
537 
538 	/* Only process IOCBs on ELS ring till hba_state is READY */
539 	if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
540 		psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
541 	if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
542 		psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
543 
544 	/* Post receive buffers for desired rings */
545 	if (phba->sli_rev != 3)
546 		lpfc_post_rcv_buf(phba);
547 
548 	/*
549 	 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
550 	 */
551 	if (phba->intr_type == MSIX) {
552 		rc = lpfc_config_msi(phba, pmb);
553 		if (rc) {
554 			mempool_free(pmb, phba->mbox_mem_pool);
555 			return -EIO;
556 		}
557 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
558 		if (rc != MBX_SUCCESS) {
559 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
560 					"0352 Config MSI mailbox command "
561 					"failed, mbxCmd x%x, mbxStatus x%x\n",
562 					pmb->u.mb.mbxCommand,
563 					pmb->u.mb.mbxStatus);
564 			mempool_free(pmb, phba->mbox_mem_pool);
565 			return -EIO;
566 		}
567 	}
568 
569 	spin_lock_irq(&phba->hbalock);
570 	/* Initialize ERATT handling flag */
571 	clear_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
572 
573 	/* Enable appropriate host interrupts */
574 	if (lpfc_readl(phba->HCregaddr, &status)) {
575 		spin_unlock_irq(&phba->hbalock);
576 		return -EIO;
577 	}
578 	status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
579 	if (psli->num_rings > 0)
580 		status |= HC_R0INT_ENA;
581 	if (psli->num_rings > 1)
582 		status |= HC_R1INT_ENA;
583 	if (psli->num_rings > 2)
584 		status |= HC_R2INT_ENA;
585 	if (psli->num_rings > 3)
586 		status |= HC_R3INT_ENA;
587 
588 	if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
589 	    (phba->cfg_poll & DISABLE_FCP_RING_INT))
590 		status &= ~(HC_R0INT_ENA);
591 
592 	writel(status, phba->HCregaddr);
593 	readl(phba->HCregaddr); /* flush */
594 	spin_unlock_irq(&phba->hbalock);
595 
596 	/* Set up ring-0 (ELS) timer */
597 	timeout = phba->fc_ratov * 2;
598 	mod_timer(&vport->els_tmofunc,
599 		  jiffies + secs_to_jiffies(timeout));
600 	/* Set up heart beat (HB) timer */
601 	mod_timer(&phba->hb_tmofunc,
602 		  jiffies + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL));
603 	clear_bit(HBA_HBEAT_INP, &phba->hba_flag);
604 	clear_bit(HBA_HBEAT_TMO, &phba->hba_flag);
605 	phba->last_completion_time = jiffies;
606 	/* Set up error attention (ERATT) polling timer */
607 	mod_timer(&phba->eratt_poll,
608 		  jiffies + secs_to_jiffies(phba->eratt_poll_interval));
609 
610 	if (test_bit(LINK_DISABLED, &phba->hba_flag)) {
611 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
612 				"2598 Adapter Link is disabled.\n");
613 		lpfc_down_link(phba, pmb);
614 		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
615 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
616 		if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
617 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
618 					"2599 Adapter failed to issue DOWN_LINK"
619 					" mbox command rc 0x%x\n", rc);
620 
621 			mempool_free(pmb, phba->mbox_mem_pool);
622 			return -EIO;
623 		}
624 	} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
625 		mempool_free(pmb, phba->mbox_mem_pool);
626 		rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
627 		if (rc)
628 			return rc;
629 	}
630 	/* MBOX buffer will be freed in mbox compl */
631 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
632 	if (!pmb) {
633 		phba->link_state = LPFC_HBA_ERROR;
634 		return -ENOMEM;
635 	}
636 
637 	lpfc_config_async(phba, pmb, LPFC_ELS_RING);
638 	pmb->mbox_cmpl = lpfc_config_async_cmpl;
639 	pmb->vport = phba->pport;
640 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
641 
642 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
643 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
644 				"0456 Adapter failed to issue "
645 				"ASYNCEVT_ENABLE mbox status x%x\n",
646 				rc);
647 		mempool_free(pmb, phba->mbox_mem_pool);
648 	}
649 
650 	/* Get Option rom version */
651 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
652 	if (!pmb) {
653 		phba->link_state = LPFC_HBA_ERROR;
654 		return -ENOMEM;
655 	}
656 
657 	lpfc_dump_wakeup_param(phba, pmb);
658 	pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
659 	pmb->vport = phba->pport;
660 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
661 
662 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
663 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
664 				"0435 Adapter failed "
665 				"to get Option ROM version status x%x\n", rc);
666 		mempool_free(pmb, phba->mbox_mem_pool);
667 	}
668 
669 	return 0;
670 }
671 
672 /**
673  * lpfc_sli4_refresh_params - update driver copy of params.
674  * @phba: Pointer to HBA context object.
675  *
676  * This is called to refresh driver copy of dynamic fields from the
677  * common_get_sli4_parameters descriptor.
678  **/
679 int
lpfc_sli4_refresh_params(struct lpfc_hba * phba)680 lpfc_sli4_refresh_params(struct lpfc_hba *phba)
681 {
682 	LPFC_MBOXQ_t *mboxq;
683 	struct lpfc_mqe *mqe;
684 	struct lpfc_sli4_parameters *mbx_sli4_parameters;
685 	int length, rc;
686 
687 	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
688 	if (!mboxq)
689 		return -ENOMEM;
690 
691 	mqe = &mboxq->u.mqe;
692 	/* Read the port's SLI4 Config Parameters */
693 	length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
694 		  sizeof(struct lpfc_sli4_cfg_mhdr));
695 	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
696 			 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
697 			 length, LPFC_SLI4_MBX_EMBED);
698 
699 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
700 	if (unlikely(rc)) {
701 		mempool_free(mboxq, phba->mbox_mem_pool);
702 		return rc;
703 	}
704 	mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
705 	phba->sli4_hba.pc_sli4_params.mi_cap =
706 		bf_get(cfg_mi_ver, mbx_sli4_parameters);
707 
708 	/* Are we forcing MI off via module parameter? */
709 	if (phba->cfg_enable_mi)
710 		phba->sli4_hba.pc_sli4_params.mi_ver =
711 			bf_get(cfg_mi_ver, mbx_sli4_parameters);
712 	else
713 		phba->sli4_hba.pc_sli4_params.mi_ver = 0;
714 
715 	phba->sli4_hba.pc_sli4_params.cmf =
716 			bf_get(cfg_cmf, mbx_sli4_parameters);
717 	phba->sli4_hba.pc_sli4_params.pls =
718 			bf_get(cfg_pvl, mbx_sli4_parameters);
719 
720 	mempool_free(mboxq, phba->mbox_mem_pool);
721 	return rc;
722 }
723 
724 /**
725  * lpfc_hba_init_link - Initialize the FC link
726  * @phba: pointer to lpfc hba data structure.
727  * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
728  *
729  * This routine will issue the INIT_LINK mailbox command call.
730  * It is available to other drivers through the lpfc_hba data
731  * structure for use as a delayed link up mechanism with the
732  * module parameter lpfc_suppress_link_up.
733  *
734  * Return code
735  *		0 - success
736  *		Any other value - error
737  **/
738 static int
lpfc_hba_init_link(struct lpfc_hba * phba,uint32_t flag)739 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
740 {
741 	return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
742 }
743 
744 /**
745  * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
746  * @phba: pointer to lpfc hba data structure.
747  * @fc_topology: desired fc topology.
748  * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
749  *
750  * This routine will issue the INIT_LINK mailbox command call.
751  * It is available to other drivers through the lpfc_hba data
752  * structure for use as a delayed link up mechanism with the
753  * module parameter lpfc_suppress_link_up.
754  *
755  * Return code
756  *              0 - success
757  *              Any other value - error
758  **/
759 int
lpfc_hba_init_link_fc_topology(struct lpfc_hba * phba,uint32_t fc_topology,uint32_t flag)760 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
761 			       uint32_t flag)
762 {
763 	struct lpfc_vport *vport = phba->pport;
764 	LPFC_MBOXQ_t *pmb;
765 	MAILBOX_t *mb;
766 	int rc;
767 
768 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
769 	if (!pmb) {
770 		phba->link_state = LPFC_HBA_ERROR;
771 		return -ENOMEM;
772 	}
773 	mb = &pmb->u.mb;
774 	pmb->vport = vport;
775 
776 	if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
777 	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
778 	     !(phba->lmt & LMT_1Gb)) ||
779 	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
780 	     !(phba->lmt & LMT_2Gb)) ||
781 	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
782 	     !(phba->lmt & LMT_4Gb)) ||
783 	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
784 	     !(phba->lmt & LMT_8Gb)) ||
785 	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
786 	     !(phba->lmt & LMT_10Gb)) ||
787 	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
788 	     !(phba->lmt & LMT_16Gb)) ||
789 	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
790 	     !(phba->lmt & LMT_32Gb)) ||
791 	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
792 	     !(phba->lmt & LMT_64Gb)) ||
793 	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_128G) &&
794 	     !(phba->lmt & LMT_128Gb))) {
795 		/* Reset link speed to auto */
796 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
797 				"1302 Invalid speed for this board:%d "
798 				"Reset link speed to auto.\n",
799 				phba->cfg_link_speed);
800 			phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
801 	}
802 	lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
803 	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
804 	if (phba->sli_rev < LPFC_SLI_REV4)
805 		lpfc_set_loopback_flag(phba);
806 	rc = lpfc_sli_issue_mbox(phba, pmb, flag);
807 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
808 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
809 				"0498 Adapter failed to init, mbxCmd x%x "
810 				"INIT_LINK, mbxStatus x%x\n",
811 				mb->mbxCommand, mb->mbxStatus);
812 		if (phba->sli_rev <= LPFC_SLI_REV3) {
813 			/* Clear all interrupt enable conditions */
814 			writel(0, phba->HCregaddr);
815 			readl(phba->HCregaddr); /* flush */
816 			/* Clear all pending interrupts */
817 			writel(0xffffffff, phba->HAregaddr);
818 			readl(phba->HAregaddr); /* flush */
819 		}
820 		phba->link_state = LPFC_HBA_ERROR;
821 		if (rc != MBX_BUSY || flag == MBX_POLL)
822 			mempool_free(pmb, phba->mbox_mem_pool);
823 		return -EIO;
824 	}
825 	phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
826 	if (flag == MBX_POLL)
827 		mempool_free(pmb, phba->mbox_mem_pool);
828 
829 	return 0;
830 }
831 
832 /**
833  * lpfc_hba_down_link - this routine downs the FC link
834  * @phba: pointer to lpfc hba data structure.
835  * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
836  *
837  * This routine will issue the DOWN_LINK mailbox command call.
838  * It is available to other drivers through the lpfc_hba data
839  * structure for use to stop the link.
840  *
841  * Return code
842  *		0 - success
843  *		Any other value - error
844  **/
845 static int
lpfc_hba_down_link(struct lpfc_hba * phba,uint32_t flag)846 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
847 {
848 	LPFC_MBOXQ_t *pmb;
849 	int rc;
850 
851 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
852 	if (!pmb) {
853 		phba->link_state = LPFC_HBA_ERROR;
854 		return -ENOMEM;
855 	}
856 
857 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
858 			"0491 Adapter Link is disabled.\n");
859 	lpfc_down_link(phba, pmb);
860 	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
861 	rc = lpfc_sli_issue_mbox(phba, pmb, flag);
862 	if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
863 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
864 				"2522 Adapter failed to issue DOWN_LINK"
865 				" mbox command rc 0x%x\n", rc);
866 
867 		mempool_free(pmb, phba->mbox_mem_pool);
868 		return -EIO;
869 	}
870 	if (flag == MBX_POLL)
871 		mempool_free(pmb, phba->mbox_mem_pool);
872 
873 	return 0;
874 }
875 
876 /**
877  * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
878  * @phba: pointer to lpfc HBA data structure.
879  *
880  * This routine will do LPFC uninitialization before the HBA is reset when
881  * bringing down the SLI Layer.
882  *
883  * Return codes
884  *   0 - success.
885  *   Any other value - error.
886  **/
887 int
lpfc_hba_down_prep(struct lpfc_hba * phba)888 lpfc_hba_down_prep(struct lpfc_hba *phba)
889 {
890 	struct lpfc_vport **vports;
891 	int i;
892 
893 	if (phba->sli_rev <= LPFC_SLI_REV3) {
894 		/* Disable interrupts */
895 		writel(0, phba->HCregaddr);
896 		readl(phba->HCregaddr); /* flush */
897 	}
898 
899 	if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
900 		lpfc_cleanup_discovery_resources(phba->pport);
901 	else {
902 		vports = lpfc_create_vport_work_array(phba);
903 		if (vports != NULL)
904 			for (i = 0; i <= phba->max_vports &&
905 				vports[i] != NULL; i++)
906 				lpfc_cleanup_discovery_resources(vports[i]);
907 		lpfc_destroy_vport_work_array(phba, vports);
908 	}
909 	return 0;
910 }
911 
912 /**
913  * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
914  * rspiocb which got deferred
915  *
916  * @phba: pointer to lpfc HBA data structure.
917  *
918  * This routine will cleanup completed slow path events after HBA is reset
919  * when bringing down the SLI Layer.
920  *
921  *
922  * Return codes
923  *   void.
924  **/
925 static void
lpfc_sli4_free_sp_events(struct lpfc_hba * phba)926 lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
927 {
928 	struct lpfc_iocbq *rspiocbq;
929 	struct hbq_dmabuf *dmabuf;
930 	struct lpfc_cq_event *cq_event;
931 
932 	clear_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag);
933 
934 	while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
935 		/* Get the response iocb from the head of work queue */
936 		spin_lock_irq(&phba->hbalock);
937 		list_remove_head(&phba->sli4_hba.sp_queue_event,
938 				 cq_event, struct lpfc_cq_event, list);
939 		spin_unlock_irq(&phba->hbalock);
940 
941 		switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
942 		case CQE_CODE_COMPL_WQE:
943 			rspiocbq = container_of(cq_event, struct lpfc_iocbq,
944 						 cq_event);
945 			lpfc_sli_release_iocbq(phba, rspiocbq);
946 			break;
947 		case CQE_CODE_RECEIVE:
948 		case CQE_CODE_RECEIVE_V1:
949 			dmabuf = container_of(cq_event, struct hbq_dmabuf,
950 					      cq_event);
951 			lpfc_in_buf_free(phba, &dmabuf->dbuf);
952 		}
953 	}
954 }
955 
956 /**
957  * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
958  * @phba: pointer to lpfc HBA data structure.
959  *
960  * This routine will cleanup posted ELS buffers after the HBA is reset
961  * when bringing down the SLI Layer.
962  *
963  *
964  * Return codes
965  *   void.
966  **/
967 static void
lpfc_hba_free_post_buf(struct lpfc_hba * phba)968 lpfc_hba_free_post_buf(struct lpfc_hba *phba)
969 {
970 	struct lpfc_sli *psli = &phba->sli;
971 	struct lpfc_sli_ring *pring;
972 	struct lpfc_dmabuf *mp, *next_mp;
973 	LIST_HEAD(buflist);
974 	int count;
975 
976 	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
977 		lpfc_sli_hbqbuf_free_all(phba);
978 	else {
979 		/* Cleanup preposted buffers on the ELS ring */
980 		pring = &psli->sli3_ring[LPFC_ELS_RING];
981 		spin_lock_irq(&phba->hbalock);
982 		list_splice_init(&pring->postbufq, &buflist);
983 		spin_unlock_irq(&phba->hbalock);
984 
985 		count = 0;
986 		list_for_each_entry_safe(mp, next_mp, &buflist, list) {
987 			list_del(&mp->list);
988 			count++;
989 			lpfc_mbuf_free(phba, mp->virt, mp->phys);
990 			kfree(mp);
991 		}
992 
993 		spin_lock_irq(&phba->hbalock);
994 		pring->postbufq_cnt -= count;
995 		spin_unlock_irq(&phba->hbalock);
996 	}
997 }
998 
999 /**
1000  * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
1001  * @phba: pointer to lpfc HBA data structure.
1002  *
1003  * This routine will cleanup the txcmplq after the HBA is reset when bringing
1004  * down the SLI Layer.
1005  *
1006  * Return codes
1007  *   void
1008  **/
1009 static void
lpfc_hba_clean_txcmplq(struct lpfc_hba * phba)1010 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
1011 {
1012 	struct lpfc_sli *psli = &phba->sli;
1013 	struct lpfc_queue *qp = NULL;
1014 	struct lpfc_sli_ring *pring;
1015 	LIST_HEAD(completions);
1016 	int i;
1017 	struct lpfc_iocbq *piocb, *next_iocb;
1018 
1019 	if (phba->sli_rev != LPFC_SLI_REV4) {
1020 		for (i = 0; i < psli->num_rings; i++) {
1021 			pring = &psli->sli3_ring[i];
1022 			spin_lock_irq(&phba->hbalock);
1023 			/* At this point in time the HBA is either reset or DOA
1024 			 * Nothing should be on txcmplq as it will
1025 			 * NEVER complete.
1026 			 */
1027 			list_splice_init(&pring->txcmplq, &completions);
1028 			pring->txcmplq_cnt = 0;
1029 			spin_unlock_irq(&phba->hbalock);
1030 
1031 			lpfc_sli_abort_iocb_ring(phba, pring);
1032 		}
1033 		/* Cancel all the IOCBs from the completions list */
1034 		lpfc_sli_cancel_iocbs(phba, &completions,
1035 				      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
1036 		return;
1037 	}
1038 	list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1039 		pring = qp->pring;
1040 		if (!pring)
1041 			continue;
1042 		spin_lock_irq(&pring->ring_lock);
1043 		list_for_each_entry_safe(piocb, next_iocb,
1044 					 &pring->txcmplq, list)
1045 			piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
1046 		list_splice_init(&pring->txcmplq, &completions);
1047 		pring->txcmplq_cnt = 0;
1048 		spin_unlock_irq(&pring->ring_lock);
1049 		lpfc_sli_abort_iocb_ring(phba, pring);
1050 	}
1051 	/* Cancel all the IOCBs from the completions list */
1052 	lpfc_sli_cancel_iocbs(phba, &completions,
1053 			      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
1054 }
1055 
1056 /**
1057  * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
1058  * @phba: pointer to lpfc HBA data structure.
1059  *
1060  * This routine will do uninitialization after the HBA is reset when bring
1061  * down the SLI Layer.
1062  *
1063  * Return codes
1064  *   0 - success.
1065  *   Any other value - error.
1066  **/
1067 static int
lpfc_hba_down_post_s3(struct lpfc_hba * phba)1068 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1069 {
1070 	lpfc_hba_free_post_buf(phba);
1071 	lpfc_hba_clean_txcmplq(phba);
1072 	return 0;
1073 }
1074 
1075 /**
1076  * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1077  * @phba: pointer to lpfc HBA data structure.
1078  *
1079  * This routine will do uninitialization after the HBA is reset when bring
1080  * down the SLI Layer.
1081  *
1082  * Return codes
1083  *   0 - success.
1084  *   Any other value - error.
1085  **/
1086 static int
lpfc_hba_down_post_s4(struct lpfc_hba * phba)1087 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1088 {
1089 	struct lpfc_io_buf *psb, *psb_next;
1090 	struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
1091 	struct lpfc_sli4_hdw_queue *qp;
1092 	LIST_HEAD(aborts);
1093 	LIST_HEAD(nvmet_aborts);
1094 	struct lpfc_sglq *sglq_entry = NULL;
1095 	int cnt, idx;
1096 
1097 
1098 	lpfc_sli_hbqbuf_free_all(phba);
1099 	lpfc_hba_clean_txcmplq(phba);
1100 
1101 	/* At this point in time the HBA is either reset or DOA. Either
1102 	 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
1103 	 * on the lpfc_els_sgl_list so that it can either be freed if the
1104 	 * driver is unloading or reposted if the driver is restarting
1105 	 * the port.
1106 	 */
1107 
1108 	/* sgl_list_lock required because worker thread uses this
1109 	 * list.
1110 	 */
1111 	spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
1112 	list_for_each_entry(sglq_entry,
1113 		&phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1114 		sglq_entry->state = SGL_FREED;
1115 
1116 	list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1117 			&phba->sli4_hba.lpfc_els_sgl_list);
1118 
1119 
1120 	spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
1121 
1122 	/* abts_xxxx_buf_list_lock required because worker thread uses this
1123 	 * list.
1124 	 */
1125 	spin_lock_irq(&phba->hbalock);
1126 	cnt = 0;
1127 	for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1128 		qp = &phba->sli4_hba.hdwq[idx];
1129 
1130 		spin_lock(&qp->abts_io_buf_list_lock);
1131 		list_splice_init(&qp->lpfc_abts_io_buf_list,
1132 				 &aborts);
1133 
1134 		list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1135 			psb->pCmd = NULL;
1136 			psb->status = IOSTAT_SUCCESS;
1137 			cnt++;
1138 		}
1139 		spin_lock(&qp->io_buf_list_put_lock);
1140 		list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1141 		qp->put_io_bufs += qp->abts_scsi_io_bufs;
1142 		qp->put_io_bufs += qp->abts_nvme_io_bufs;
1143 		qp->abts_scsi_io_bufs = 0;
1144 		qp->abts_nvme_io_bufs = 0;
1145 		spin_unlock(&qp->io_buf_list_put_lock);
1146 		spin_unlock(&qp->abts_io_buf_list_lock);
1147 	}
1148 	spin_unlock_irq(&phba->hbalock);
1149 
1150 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1151 		spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1152 		list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1153 				 &nvmet_aborts);
1154 		spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1155 		list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1156 			ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
1157 			lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1158 		}
1159 	}
1160 
1161 	lpfc_sli4_free_sp_events(phba);
1162 	return cnt;
1163 }
1164 
1165 /**
1166  * lpfc_hba_down_post - Wrapper func for hba down post routine
1167  * @phba: pointer to lpfc HBA data structure.
1168  *
1169  * This routine wraps the actual SLI3 or SLI4 routine for performing
1170  * uninitialization after the HBA is reset when bring down the SLI Layer.
1171  *
1172  * Return codes
1173  *   0 - success.
1174  *   Any other value - error.
1175  **/
1176 int
lpfc_hba_down_post(struct lpfc_hba * phba)1177 lpfc_hba_down_post(struct lpfc_hba *phba)
1178 {
1179 	return (*phba->lpfc_hba_down_post)(phba);
1180 }
1181 
1182 /**
1183  * lpfc_hb_timeout - The HBA-timer timeout handler
1184  * @t: timer context used to obtain the pointer to lpfc hba data structure.
1185  *
1186  * This is the HBA-timer timeout handler registered to the lpfc driver. When
1187  * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1188  * work-port-events bitmap and the worker thread is notified. This timeout
1189  * event will be used by the worker thread to invoke the actual timeout
1190  * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1191  * be performed in the timeout handler and the HBA timeout event bit shall
1192  * be cleared by the worker thread after it has taken the event bitmap out.
1193  **/
1194 static void
lpfc_hb_timeout(struct timer_list * t)1195 lpfc_hb_timeout(struct timer_list *t)
1196 {
1197 	struct lpfc_hba *phba;
1198 	uint32_t tmo_posted;
1199 	unsigned long iflag;
1200 
1201 	phba = timer_container_of(phba, t, hb_tmofunc);
1202 
1203 	/* Check for heart beat timeout conditions */
1204 	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1205 	tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1206 	if (!tmo_posted)
1207 		phba->pport->work_port_events |= WORKER_HB_TMO;
1208 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1209 
1210 	/* Tell the worker thread there is work to do */
1211 	if (!tmo_posted)
1212 		lpfc_worker_wake_up(phba);
1213 	return;
1214 }
1215 
1216 /**
1217  * lpfc_rrq_timeout - The RRQ-timer timeout handler
1218  * @t: timer context used to obtain the pointer to lpfc hba data structure.
1219  *
1220  * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1221  * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1222  * work-port-events bitmap and the worker thread is notified. This timeout
1223  * event will be used by the worker thread to invoke the actual timeout
1224  * handler routine, lpfc_rrq_handler. Any periodical operations will
1225  * be performed in the timeout handler and the RRQ timeout event bit shall
1226  * be cleared by the worker thread after it has taken the event bitmap out.
1227  **/
1228 static void
lpfc_rrq_timeout(struct timer_list * t)1229 lpfc_rrq_timeout(struct timer_list *t)
1230 {
1231 	struct lpfc_hba *phba;
1232 
1233 	phba = timer_container_of(phba, t, rrq_tmr);
1234 	if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
1235 		clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
1236 		return;
1237 	}
1238 
1239 	set_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
1240 	lpfc_worker_wake_up(phba);
1241 }
1242 
1243 /**
1244  * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1245  * @phba: pointer to lpfc hba data structure.
1246  * @pmboxq: pointer to the driver internal queue element for mailbox command.
1247  *
1248  * This is the callback function to the lpfc heart-beat mailbox command.
1249  * If configured, the lpfc driver issues the heart-beat mailbox command to
1250  * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1251  * heart-beat mailbox command is issued, the driver shall set up heart-beat
1252  * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1253  * heart-beat outstanding state. Once the mailbox command comes back and
1254  * no error conditions detected, the heart-beat mailbox command timer is
1255  * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1256  * state is cleared for the next heart-beat. If the timer expired with the
1257  * heart-beat outstanding state set, the driver will put the HBA offline.
1258  **/
1259 static void
lpfc_hb_mbox_cmpl(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmboxq)1260 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1261 {
1262 	clear_bit(HBA_HBEAT_INP, &phba->hba_flag);
1263 	clear_bit(HBA_HBEAT_TMO, &phba->hba_flag);
1264 
1265 	/* Check and reset heart-beat timer if necessary */
1266 	mempool_free(pmboxq, phba->mbox_mem_pool);
1267 	if (!test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag) &&
1268 	    !(phba->link_state == LPFC_HBA_ERROR) &&
1269 	    !test_bit(FC_UNLOADING, &phba->pport->load_flag))
1270 		mod_timer(&phba->hb_tmofunc,
1271 			  jiffies +
1272 			  secs_to_jiffies(LPFC_HB_MBOX_INTERVAL));
1273 	return;
1274 }
1275 
1276 /*
1277  * lpfc_idle_stat_delay_work - idle_stat tracking
1278  *
1279  * This routine tracks per-eq idle_stat and determines polling decisions.
1280  *
1281  * Return codes:
1282  *   None
1283  **/
1284 static void
lpfc_idle_stat_delay_work(struct work_struct * work)1285 lpfc_idle_stat_delay_work(struct work_struct *work)
1286 {
1287 	struct lpfc_hba *phba = container_of(to_delayed_work(work),
1288 					     struct lpfc_hba,
1289 					     idle_stat_delay_work);
1290 	struct lpfc_queue *eq;
1291 	struct lpfc_sli4_hdw_queue *hdwq;
1292 	struct lpfc_idle_stat *idle_stat;
1293 	u32 i, idle_percent;
1294 	u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
1295 
1296 	if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
1297 		return;
1298 
1299 	if (phba->link_state == LPFC_HBA_ERROR ||
1300 	    test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag) ||
1301 	    phba->cmf_active_mode != LPFC_CFG_OFF)
1302 		goto requeue;
1303 
1304 	for_each_present_cpu(i) {
1305 		hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
1306 		eq = hdwq->hba_eq;
1307 
1308 		/* Skip if we've already handled this eq's primary CPU */
1309 		if (eq->chann != i)
1310 			continue;
1311 
1312 		idle_stat = &phba->sli4_hba.idle_stat[i];
1313 
1314 		/* get_cpu_idle_time returns values as running counters. Thus,
1315 		 * to know the amount for this period, the prior counter values
1316 		 * need to be subtracted from the current counter values.
1317 		 * From there, the idle time stat can be calculated as a
1318 		 * percentage of 100 - the sum of the other consumption times.
1319 		 */
1320 		wall_idle = get_cpu_idle_time(i, &wall, 1);
1321 		diff_idle = wall_idle - idle_stat->prev_idle;
1322 		diff_wall = wall - idle_stat->prev_wall;
1323 
1324 		if (diff_wall <= diff_idle)
1325 			busy_time = 0;
1326 		else
1327 			busy_time = diff_wall - diff_idle;
1328 
1329 		idle_percent = div64_u64(100 * busy_time, diff_wall);
1330 		idle_percent = 100 - idle_percent;
1331 
1332 		if (idle_percent < 15)
1333 			eq->poll_mode = LPFC_QUEUE_WORK;
1334 		else
1335 			eq->poll_mode = LPFC_THREADED_IRQ;
1336 
1337 		idle_stat->prev_idle = wall_idle;
1338 		idle_stat->prev_wall = wall;
1339 	}
1340 
1341 requeue:
1342 	schedule_delayed_work(&phba->idle_stat_delay_work,
1343 			      msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
1344 }
1345 
1346 static void
lpfc_hb_eq_delay_work(struct work_struct * work)1347 lpfc_hb_eq_delay_work(struct work_struct *work)
1348 {
1349 	struct lpfc_hba *phba = container_of(to_delayed_work(work),
1350 					     struct lpfc_hba, eq_delay_work);
1351 	struct lpfc_eq_intr_info *eqi, *eqi_new;
1352 	struct lpfc_queue *eq, *eq_next;
1353 	unsigned char *ena_delay = NULL;
1354 	uint32_t usdelay;
1355 	int i;
1356 
1357 	if (!phba->cfg_auto_imax ||
1358 	    test_bit(FC_UNLOADING, &phba->pport->load_flag))
1359 		return;
1360 
1361 	if (phba->link_state == LPFC_HBA_ERROR ||
1362 	    test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
1363 		goto requeue;
1364 
1365 	ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1366 			    GFP_KERNEL);
1367 	if (!ena_delay)
1368 		goto requeue;
1369 
1370 	for (i = 0; i < phba->cfg_irq_chann; i++) {
1371 		/* Get the EQ corresponding to the IRQ vector */
1372 		eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1373 		if (!eq)
1374 			continue;
1375 		if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
1376 			eq->q_flag &= ~HBA_EQ_DELAY_CHK;
1377 			ena_delay[eq->last_cpu] = 1;
1378 		}
1379 	}
1380 
1381 	for_each_present_cpu(i) {
1382 		eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1383 		if (ena_delay[i]) {
1384 			usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
1385 			if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1386 				usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1387 		} else {
1388 			usdelay = 0;
1389 		}
1390 
1391 		eqi->icnt = 0;
1392 
1393 		list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1394 			if (unlikely(eq->last_cpu != i)) {
1395 				eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1396 						      eq->last_cpu);
1397 				list_move_tail(&eq->cpu_list, &eqi_new->list);
1398 				continue;
1399 			}
1400 			if (usdelay != eq->q_mode)
1401 				lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1402 							 usdelay);
1403 		}
1404 	}
1405 
1406 	kfree(ena_delay);
1407 
1408 requeue:
1409 	queue_delayed_work(phba->wq, &phba->eq_delay_work,
1410 			   msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1411 }
1412 
1413 /**
1414  * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
1415  * @phba: pointer to lpfc hba data structure.
1416  *
1417  * For each heartbeat, this routine does some heuristic methods to adjust
1418  * XRI distribution. The goal is to fully utilize free XRIs.
1419  **/
lpfc_hb_mxp_handler(struct lpfc_hba * phba)1420 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1421 {
1422 	u32 i;
1423 	u32 hwq_count;
1424 
1425 	hwq_count = phba->cfg_hdw_queue;
1426 	for (i = 0; i < hwq_count; i++) {
1427 		/* Adjust XRIs in private pool */
1428 		lpfc_adjust_pvt_pool_count(phba, i);
1429 
1430 		/* Adjust high watermark */
1431 		lpfc_adjust_high_watermark(phba, i);
1432 
1433 #ifdef LPFC_MXP_STAT
1434 		/* Snapshot pbl, pvt and busy count */
1435 		lpfc_snapshot_mxp(phba, i);
1436 #endif
1437 	}
1438 }
1439 
1440 /**
1441  * lpfc_issue_hb_mbox - Issues heart-beat mailbox command
1442  * @phba: pointer to lpfc hba data structure.
1443  *
1444  * If a HB mbox is not already in progrees, this routine will allocate
1445  * a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command,
1446  * and issue it. The HBA_HBEAT_INP flag means the command is in progress.
1447  **/
1448 int
lpfc_issue_hb_mbox(struct lpfc_hba * phba)1449 lpfc_issue_hb_mbox(struct lpfc_hba *phba)
1450 {
1451 	LPFC_MBOXQ_t *pmboxq;
1452 	int retval;
1453 
1454 	/* Is a Heartbeat mbox already in progress */
1455 	if (test_bit(HBA_HBEAT_INP, &phba->hba_flag))
1456 		return 0;
1457 
1458 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1459 	if (!pmboxq)
1460 		return -ENOMEM;
1461 
1462 	lpfc_heart_beat(phba, pmboxq);
1463 	pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1464 	pmboxq->vport = phba->pport;
1465 	retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
1466 
1467 	if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
1468 		mempool_free(pmboxq, phba->mbox_mem_pool);
1469 		return -ENXIO;
1470 	}
1471 	set_bit(HBA_HBEAT_INP, &phba->hba_flag);
1472 
1473 	return 0;
1474 }
1475 
1476 /**
1477  * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command
1478  * @phba: pointer to lpfc hba data structure.
1479  *
1480  * The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO
1481  * flag is set, it will force a MBX_HEARTBEAT mbox command, regardless
1482  * of the value of lpfc_enable_hba_heartbeat.
1483  * If lpfc_enable_hba_heartbeat is set, the timeout routine will always
1484  * try to issue a MBX_HEARTBEAT mbox command.
1485  **/
1486 void
lpfc_issue_hb_tmo(struct lpfc_hba * phba)1487 lpfc_issue_hb_tmo(struct lpfc_hba *phba)
1488 {
1489 	if (phba->cfg_enable_hba_heartbeat)
1490 		return;
1491 	set_bit(HBA_HBEAT_TMO, &phba->hba_flag);
1492 }
1493 
1494 /**
1495  * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1496  * @phba: pointer to lpfc hba data structure.
1497  *
1498  * This is the actual HBA-timer timeout handler to be invoked by the worker
1499  * thread whenever the HBA timer fired and HBA-timeout event posted. This
1500  * handler performs any periodic operations needed for the device. If such
1501  * periodic event has already been attended to either in the interrupt handler
1502  * or by processing slow-ring or fast-ring events within the HBA-timer
1503  * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1504  * the timer for the next timeout period. If lpfc heart-beat mailbox command
1505  * is configured and there is no heart-beat mailbox command outstanding, a
1506  * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1507  * has been a heart-beat mailbox command outstanding, the HBA shall be put
1508  * to offline.
1509  **/
1510 void
lpfc_hb_timeout_handler(struct lpfc_hba * phba)1511 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1512 {
1513 	struct lpfc_vport **vports;
1514 	struct lpfc_dmabuf *buf_ptr;
1515 	int retval = 0;
1516 	int i, tmo;
1517 	struct lpfc_sli *psli = &phba->sli;
1518 	LIST_HEAD(completions);
1519 
1520 	if (phba->cfg_xri_rebalancing) {
1521 		/* Multi-XRI pools handler */
1522 		lpfc_hb_mxp_handler(phba);
1523 	}
1524 
1525 	vports = lpfc_create_vport_work_array(phba);
1526 	if (vports != NULL)
1527 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1528 			lpfc_rcv_seq_check_edtov(vports[i]);
1529 			lpfc_fdmi_change_check(vports[i]);
1530 		}
1531 	lpfc_destroy_vport_work_array(phba, vports);
1532 
1533 	if (phba->link_state == LPFC_HBA_ERROR ||
1534 	    test_bit(FC_UNLOADING, &phba->pport->load_flag) ||
1535 	    test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
1536 		return;
1537 
1538 	if (phba->elsbuf_cnt &&
1539 		(phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1540 		spin_lock_irq(&phba->hbalock);
1541 		list_splice_init(&phba->elsbuf, &completions);
1542 		phba->elsbuf_cnt = 0;
1543 		phba->elsbuf_prev_cnt = 0;
1544 		spin_unlock_irq(&phba->hbalock);
1545 
1546 		while (!list_empty(&completions)) {
1547 			list_remove_head(&completions, buf_ptr,
1548 				struct lpfc_dmabuf, list);
1549 			lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1550 			kfree(buf_ptr);
1551 		}
1552 	}
1553 	phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1554 
1555 	/* If there is no heart beat outstanding, issue a heartbeat command */
1556 	if (phba->cfg_enable_hba_heartbeat) {
1557 		/* If IOs are completing, no need to issue a MBX_HEARTBEAT */
1558 		spin_lock_irq(&phba->pport->work_port_lock);
1559 		if (time_after(phba->last_completion_time +
1560 				secs_to_jiffies(LPFC_HB_MBOX_INTERVAL),
1561 				jiffies)) {
1562 			spin_unlock_irq(&phba->pport->work_port_lock);
1563 			if (test_bit(HBA_HBEAT_INP, &phba->hba_flag))
1564 				tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1565 			else
1566 				tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1567 			goto out;
1568 		}
1569 		spin_unlock_irq(&phba->pport->work_port_lock);
1570 
1571 		/* Check if a MBX_HEARTBEAT is already in progress */
1572 		if (test_bit(HBA_HBEAT_INP, &phba->hba_flag)) {
1573 			/*
1574 			 * If heart beat timeout called with HBA_HBEAT_INP set
1575 			 * we need to give the hb mailbox cmd a chance to
1576 			 * complete or TMO.
1577 			 */
1578 			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1579 				"0459 Adapter heartbeat still outstanding: "
1580 				"last compl time was %d ms.\n",
1581 				jiffies_to_msecs(jiffies
1582 					 - phba->last_completion_time));
1583 			tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1584 		} else {
1585 			if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1586 				(list_empty(&psli->mboxq))) {
1587 
1588 				retval = lpfc_issue_hb_mbox(phba);
1589 				if (retval) {
1590 					tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1591 					goto out;
1592 				}
1593 				phba->skipped_hb = 0;
1594 			} else if (time_before_eq(phba->last_completion_time,
1595 					phba->skipped_hb)) {
1596 				lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1597 					"2857 Last completion time not "
1598 					" updated in %d ms\n",
1599 					jiffies_to_msecs(jiffies
1600 						 - phba->last_completion_time));
1601 			} else
1602 				phba->skipped_hb = jiffies;
1603 
1604 			tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1605 			goto out;
1606 		}
1607 	} else {
1608 		/* Check to see if we want to force a MBX_HEARTBEAT */
1609 		if (test_bit(HBA_HBEAT_TMO, &phba->hba_flag)) {
1610 			retval = lpfc_issue_hb_mbox(phba);
1611 			if (retval)
1612 				tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1613 			else
1614 				tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1615 			goto out;
1616 		}
1617 		tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1618 	}
1619 out:
1620 	mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo));
1621 }
1622 
1623 /**
1624  * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1625  * @phba: pointer to lpfc hba data structure.
1626  *
1627  * This routine is called to bring the HBA offline when HBA hardware error
1628  * other than Port Error 6 has been detected.
1629  **/
1630 static void
lpfc_offline_eratt(struct lpfc_hba * phba)1631 lpfc_offline_eratt(struct lpfc_hba *phba)
1632 {
1633 	struct lpfc_sli   *psli = &phba->sli;
1634 
1635 	spin_lock_irq(&phba->hbalock);
1636 	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1637 	spin_unlock_irq(&phba->hbalock);
1638 	lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1639 
1640 	lpfc_offline(phba);
1641 	lpfc_reset_barrier(phba);
1642 	spin_lock_irq(&phba->hbalock);
1643 	lpfc_sli_brdreset(phba);
1644 	spin_unlock_irq(&phba->hbalock);
1645 	lpfc_hba_down_post(phba);
1646 	lpfc_sli_brdready(phba, HS_MBRDY);
1647 	lpfc_unblock_mgmt_io(phba);
1648 	phba->link_state = LPFC_HBA_ERROR;
1649 	return;
1650 }
1651 
1652 /**
1653  * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1654  * @phba: pointer to lpfc hba data structure.
1655  *
1656  * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1657  * other than Port Error 6 has been detected.
1658  **/
1659 void
lpfc_sli4_offline_eratt(struct lpfc_hba * phba)1660 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1661 {
1662 	spin_lock_irq(&phba->hbalock);
1663 	if (phba->link_state == LPFC_HBA_ERROR &&
1664 		test_bit(HBA_PCI_ERR, &phba->bit_flags)) {
1665 		spin_unlock_irq(&phba->hbalock);
1666 		return;
1667 	}
1668 	phba->link_state = LPFC_HBA_ERROR;
1669 	spin_unlock_irq(&phba->hbalock);
1670 
1671 	lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1672 	lpfc_sli_flush_io_rings(phba);
1673 	lpfc_offline(phba);
1674 	lpfc_hba_down_post(phba);
1675 	lpfc_unblock_mgmt_io(phba);
1676 }
1677 
1678 /**
1679  * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1680  * @phba: pointer to lpfc hba data structure.
1681  *
1682  * This routine is invoked to handle the deferred HBA hardware error
1683  * conditions. This type of error is indicated by HBA by setting ER1
1684  * and another ER bit in the host status register. The driver will
1685  * wait until the ER1 bit clears before handling the error condition.
1686  **/
1687 static void
lpfc_handle_deferred_eratt(struct lpfc_hba * phba)1688 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1689 {
1690 	uint32_t old_host_status = phba->work_hs;
1691 	struct lpfc_sli *psli = &phba->sli;
1692 
1693 	/* If the pci channel is offline, ignore possible errors,
1694 	 * since we cannot communicate with the pci card anyway.
1695 	 */
1696 	if (pci_channel_offline(phba->pcidev)) {
1697 		clear_bit(DEFER_ERATT, &phba->hba_flag);
1698 		return;
1699 	}
1700 
1701 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1702 			"0479 Deferred Adapter Hardware Error "
1703 			"Data: x%x x%x x%x\n",
1704 			phba->work_hs, phba->work_status[0],
1705 			phba->work_status[1]);
1706 
1707 	spin_lock_irq(&phba->hbalock);
1708 	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1709 	spin_unlock_irq(&phba->hbalock);
1710 
1711 
1712 	/*
1713 	 * Firmware stops when it triggred erratt. That could cause the I/Os
1714 	 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1715 	 * SCSI layer retry it after re-establishing link.
1716 	 */
1717 	lpfc_sli_abort_fcp_rings(phba);
1718 
1719 	/*
1720 	 * There was a firmware error. Take the hba offline and then
1721 	 * attempt to restart it.
1722 	 */
1723 	lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1724 	lpfc_offline(phba);
1725 
1726 	/* Wait for the ER1 bit to clear.*/
1727 	while (phba->work_hs & HS_FFER1) {
1728 		msleep(100);
1729 		if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1730 			phba->work_hs = UNPLUG_ERR ;
1731 			break;
1732 		}
1733 		/* If driver is unloading let the worker thread continue */
1734 		if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
1735 			phba->work_hs = 0;
1736 			break;
1737 		}
1738 	}
1739 
1740 	/*
1741 	 * This is to ptrotect against a race condition in which
1742 	 * first write to the host attention register clear the
1743 	 * host status register.
1744 	 */
1745 	if (!phba->work_hs && !test_bit(FC_UNLOADING, &phba->pport->load_flag))
1746 		phba->work_hs = old_host_status & ~HS_FFER1;
1747 
1748 	clear_bit(DEFER_ERATT, &phba->hba_flag);
1749 	phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1750 	phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1751 }
1752 
1753 static void
lpfc_board_errevt_to_mgmt(struct lpfc_hba * phba)1754 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1755 {
1756 	struct lpfc_board_event_header board_event;
1757 	struct Scsi_Host *shost;
1758 
1759 	board_event.event_type = FC_REG_BOARD_EVENT;
1760 	board_event.subcategory = LPFC_EVENT_PORTINTERR;
1761 	shost = lpfc_shost_from_vport(phba->pport);
1762 	fc_host_post_vendor_event(shost, fc_get_event_number(),
1763 				  sizeof(board_event),
1764 				  (char *) &board_event,
1765 				  LPFC_NL_VENDOR_ID);
1766 }
1767 
1768 /**
1769  * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1770  * @phba: pointer to lpfc hba data structure.
1771  *
1772  * This routine is invoked to handle the following HBA hardware error
1773  * conditions:
1774  * 1 - HBA error attention interrupt
1775  * 2 - DMA ring index out of range
1776  * 3 - Mailbox command came back as unknown
1777  **/
1778 static void
lpfc_handle_eratt_s3(struct lpfc_hba * phba)1779 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1780 {
1781 	struct lpfc_vport *vport = phba->pport;
1782 	struct lpfc_sli   *psli = &phba->sli;
1783 	uint32_t event_data;
1784 	unsigned long temperature;
1785 	struct temp_event temp_event_data;
1786 	struct Scsi_Host  *shost;
1787 
1788 	/* If the pci channel is offline, ignore possible errors,
1789 	 * since we cannot communicate with the pci card anyway.
1790 	 */
1791 	if (pci_channel_offline(phba->pcidev)) {
1792 		clear_bit(DEFER_ERATT, &phba->hba_flag);
1793 		return;
1794 	}
1795 
1796 	/* If resets are disabled then leave the HBA alone and return */
1797 	if (!phba->cfg_enable_hba_reset)
1798 		return;
1799 
1800 	/* Send an internal error event to mgmt application */
1801 	lpfc_board_errevt_to_mgmt(phba);
1802 
1803 	if (test_bit(DEFER_ERATT, &phba->hba_flag))
1804 		lpfc_handle_deferred_eratt(phba);
1805 
1806 	if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1807 		if (phba->work_hs & HS_FFER6)
1808 			/* Re-establishing Link */
1809 			lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1810 					"1301 Re-establishing Link "
1811 					"Data: x%x x%x x%x\n",
1812 					phba->work_hs, phba->work_status[0],
1813 					phba->work_status[1]);
1814 		if (phba->work_hs & HS_FFER8)
1815 			/* Device Zeroization */
1816 			lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1817 					"2861 Host Authentication device "
1818 					"zeroization Data:x%x x%x x%x\n",
1819 					phba->work_hs, phba->work_status[0],
1820 					phba->work_status[1]);
1821 
1822 		spin_lock_irq(&phba->hbalock);
1823 		psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1824 		spin_unlock_irq(&phba->hbalock);
1825 
1826 		/*
1827 		* Firmware stops when it triggled erratt with HS_FFER6.
1828 		* That could cause the I/Os dropped by the firmware.
1829 		* Error iocb (I/O) on txcmplq and let the SCSI layer
1830 		* retry it after re-establishing link.
1831 		*/
1832 		lpfc_sli_abort_fcp_rings(phba);
1833 
1834 		/*
1835 		 * There was a firmware error.  Take the hba offline and then
1836 		 * attempt to restart it.
1837 		 */
1838 		lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1839 		lpfc_offline(phba);
1840 		lpfc_sli_brdrestart(phba);
1841 		if (lpfc_online(phba) == 0) {	/* Initialize the HBA */
1842 			lpfc_unblock_mgmt_io(phba);
1843 			return;
1844 		}
1845 		lpfc_unblock_mgmt_io(phba);
1846 	} else if (phba->work_hs & HS_CRIT_TEMP) {
1847 		temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1848 		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1849 		temp_event_data.event_code = LPFC_CRIT_TEMP;
1850 		temp_event_data.data = (uint32_t)temperature;
1851 
1852 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1853 				"0406 Adapter maximum temperature exceeded "
1854 				"(%ld), taking this port offline "
1855 				"Data: x%x x%x x%x\n",
1856 				temperature, phba->work_hs,
1857 				phba->work_status[0], phba->work_status[1]);
1858 
1859 		shost = lpfc_shost_from_vport(phba->pport);
1860 		fc_host_post_vendor_event(shost, fc_get_event_number(),
1861 					  sizeof(temp_event_data),
1862 					  (char *) &temp_event_data,
1863 					  SCSI_NL_VID_TYPE_PCI
1864 					  | PCI_VENDOR_ID_EMULEX);
1865 
1866 		spin_lock_irq(&phba->hbalock);
1867 		phba->over_temp_state = HBA_OVER_TEMP;
1868 		spin_unlock_irq(&phba->hbalock);
1869 		lpfc_offline_eratt(phba);
1870 
1871 	} else {
1872 		/* The if clause above forces this code path when the status
1873 		 * failure is a value other than FFER6. Do not call the offline
1874 		 * twice. This is the adapter hardware error path.
1875 		 */
1876 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1877 				"0457 Adapter Hardware Error "
1878 				"Data: x%x x%x x%x\n",
1879 				phba->work_hs,
1880 				phba->work_status[0], phba->work_status[1]);
1881 
1882 		event_data = FC_REG_DUMP_EVENT;
1883 		shost = lpfc_shost_from_vport(vport);
1884 		fc_host_post_vendor_event(shost, fc_get_event_number(),
1885 				sizeof(event_data), (char *) &event_data,
1886 				SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1887 
1888 		lpfc_offline_eratt(phba);
1889 	}
1890 	return;
1891 }
1892 
1893 /**
1894  * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1895  * @phba: pointer to lpfc hba data structure.
1896  * @mbx_action: flag for mailbox shutdown action.
1897  * @en_rn_msg: send reset/port recovery message.
1898  * This routine is invoked to perform an SLI4 port PCI function reset in
1899  * response to port status register polling attention. It waits for port
1900  * status register (ERR, RDY, RN) bits before proceeding with function reset.
1901  * During this process, interrupt vectors are freed and later requested
1902  * for handling possible port resource change.
1903  **/
1904 static int
lpfc_sli4_port_sta_fn_reset(struct lpfc_hba * phba,int mbx_action,bool en_rn_msg)1905 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1906 			    bool en_rn_msg)
1907 {
1908 	int rc;
1909 	uint32_t intr_mode;
1910 	LPFC_MBOXQ_t *mboxq;
1911 
1912 	/* Notifying the transport that the targets are going offline. */
1913 	lpfc_scsi_dev_block(phba);
1914 
1915 	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1916 	    LPFC_SLI_INTF_IF_TYPE_2) {
1917 		/*
1918 		 * On error status condition, driver need to wait for port
1919 		 * ready before performing reset.
1920 		 */
1921 		rc = lpfc_sli4_pdev_status_reg_wait(phba);
1922 		if (rc)
1923 			return rc;
1924 	}
1925 
1926 	/* need reset: attempt for port recovery */
1927 	if (en_rn_msg)
1928 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1929 				"2887 Reset Needed: Attempting Port "
1930 				"Recovery...\n");
1931 
1932 	/* If we are no wait, the HBA has been reset and is not
1933 	 * functional, thus we should clear
1934 	 * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags.
1935 	 */
1936 	if (mbx_action == LPFC_MBX_NO_WAIT) {
1937 		spin_lock_irq(&phba->hbalock);
1938 		phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
1939 		if (phba->sli.mbox_active) {
1940 			mboxq = phba->sli.mbox_active;
1941 			mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
1942 			__lpfc_mbox_cmpl_put(phba, mboxq);
1943 			phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1944 			phba->sli.mbox_active = NULL;
1945 		}
1946 		spin_unlock_irq(&phba->hbalock);
1947 	}
1948 
1949 	lpfc_offline_prep(phba, mbx_action);
1950 	lpfc_sli_flush_io_rings(phba);
1951 	lpfc_nvme_flush_abts_list(phba);
1952 	lpfc_nvmels_flush_cmd(phba);
1953 	lpfc_offline(phba);
1954 	/* release interrupt for possible resource change */
1955 	lpfc_sli4_disable_intr(phba);
1956 	rc = lpfc_sli_brdrestart(phba);
1957 	if (rc) {
1958 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1959 				"6309 Failed to restart board\n");
1960 		return rc;
1961 	}
1962 	/* request and enable interrupt */
1963 	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1964 	if (intr_mode == LPFC_INTR_ERROR) {
1965 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1966 				"3175 Failed to enable interrupt\n");
1967 		return -EIO;
1968 	}
1969 	phba->intr_mode = intr_mode;
1970 	rc = lpfc_online(phba);
1971 	if (rc == 0)
1972 		lpfc_unblock_mgmt_io(phba);
1973 
1974 	return rc;
1975 }
1976 
1977 /**
1978  * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1979  * @phba: pointer to lpfc hba data structure.
1980  *
1981  * This routine is invoked to handle the SLI4 HBA hardware error attention
1982  * conditions.
1983  **/
1984 static void
lpfc_handle_eratt_s4(struct lpfc_hba * phba)1985 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1986 {
1987 	struct lpfc_vport *vport = phba->pport;
1988 	uint32_t event_data;
1989 	struct Scsi_Host *shost;
1990 	uint32_t if_type;
1991 	struct lpfc_register portstat_reg = {0};
1992 	uint32_t reg_err1, reg_err2;
1993 	uint32_t uerrlo_reg, uemasklo_reg;
1994 	uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1995 	bool en_rn_msg = true;
1996 	struct temp_event temp_event_data;
1997 	struct lpfc_register portsmphr_reg;
1998 	int rc, i;
1999 
2000 	/* If the pci channel is offline, ignore possible errors, since
2001 	 * we cannot communicate with the pci card anyway.
2002 	 */
2003 	if (pci_channel_offline(phba->pcidev)) {
2004 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2005 				"3166 pci channel is offline\n");
2006 		lpfc_sli_flush_io_rings(phba);
2007 		return;
2008 	}
2009 
2010 	memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
2011 	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
2012 	switch (if_type) {
2013 	case LPFC_SLI_INTF_IF_TYPE_0:
2014 		pci_rd_rc1 = lpfc_readl(
2015 				phba->sli4_hba.u.if_type0.UERRLOregaddr,
2016 				&uerrlo_reg);
2017 		pci_rd_rc2 = lpfc_readl(
2018 				phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
2019 				&uemasklo_reg);
2020 		/* consider PCI bus read error as pci_channel_offline */
2021 		if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
2022 			return;
2023 		if (!test_bit(HBA_RECOVERABLE_UE, &phba->hba_flag)) {
2024 			lpfc_sli4_offline_eratt(phba);
2025 			return;
2026 		}
2027 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2028 				"7623 Checking UE recoverable");
2029 
2030 		for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
2031 			if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
2032 				       &portsmphr_reg.word0))
2033 				continue;
2034 
2035 			smphr_port_status = bf_get(lpfc_port_smphr_port_status,
2036 						   &portsmphr_reg);
2037 			if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
2038 			    LPFC_PORT_SEM_UE_RECOVERABLE)
2039 				break;
2040 			/*Sleep for 1Sec, before checking SEMAPHORE */
2041 			msleep(1000);
2042 		}
2043 
2044 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2045 				"4827 smphr_port_status x%x : Waited %dSec",
2046 				smphr_port_status, i);
2047 
2048 		/* Recoverable UE, reset the HBA device */
2049 		if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
2050 		    LPFC_PORT_SEM_UE_RECOVERABLE) {
2051 			for (i = 0; i < 20; i++) {
2052 				msleep(1000);
2053 				if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
2054 				    &portsmphr_reg.word0) &&
2055 				    (LPFC_POST_STAGE_PORT_READY ==
2056 				     bf_get(lpfc_port_smphr_port_status,
2057 				     &portsmphr_reg))) {
2058 					rc = lpfc_sli4_port_sta_fn_reset(phba,
2059 						LPFC_MBX_NO_WAIT, en_rn_msg);
2060 					if (rc == 0)
2061 						return;
2062 					lpfc_printf_log(phba, KERN_ERR,
2063 						LOG_TRACE_EVENT,
2064 						"4215 Failed to recover UE");
2065 					break;
2066 				}
2067 			}
2068 		}
2069 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2070 				"7624 Firmware not ready: Failing UE recovery,"
2071 				" waited %dSec", i);
2072 		phba->link_state = LPFC_HBA_ERROR;
2073 		break;
2074 
2075 	case LPFC_SLI_INTF_IF_TYPE_2:
2076 	case LPFC_SLI_INTF_IF_TYPE_6:
2077 		pci_rd_rc1 = lpfc_readl(
2078 				phba->sli4_hba.u.if_type2.STATUSregaddr,
2079 				&portstat_reg.word0);
2080 		/* consider PCI bus read error as pci_channel_offline */
2081 		if (pci_rd_rc1 == -EIO) {
2082 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2083 				"3151 PCI bus read access failure: x%x\n",
2084 				readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
2085 			lpfc_sli4_offline_eratt(phba);
2086 			return;
2087 		}
2088 		reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
2089 		reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
2090 		if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
2091 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2092 					"2889 Port Overtemperature event, "
2093 					"taking port offline Data: x%x x%x\n",
2094 					reg_err1, reg_err2);
2095 
2096 			phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
2097 			temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
2098 			temp_event_data.event_code = LPFC_CRIT_TEMP;
2099 			temp_event_data.data = 0xFFFFFFFF;
2100 
2101 			shost = lpfc_shost_from_vport(phba->pport);
2102 			fc_host_post_vendor_event(shost, fc_get_event_number(),
2103 						  sizeof(temp_event_data),
2104 						  (char *)&temp_event_data,
2105 						  SCSI_NL_VID_TYPE_PCI
2106 						  | PCI_VENDOR_ID_EMULEX);
2107 
2108 			spin_lock_irq(&phba->hbalock);
2109 			phba->over_temp_state = HBA_OVER_TEMP;
2110 			spin_unlock_irq(&phba->hbalock);
2111 			lpfc_sli4_offline_eratt(phba);
2112 			return;
2113 		}
2114 		if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2115 		    reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
2116 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2117 					"3143 Port Down: Firmware Update "
2118 					"Detected\n");
2119 			en_rn_msg = false;
2120 		} else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2121 			 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2122 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2123 					"3144 Port Down: Debug Dump\n");
2124 		else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2125 			 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
2126 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2127 					"3145 Port Down: Provisioning\n");
2128 
2129 		/* If resets are disabled then leave the HBA alone and return */
2130 		if (!phba->cfg_enable_hba_reset)
2131 			return;
2132 
2133 		/* Check port status register for function reset */
2134 		rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
2135 				en_rn_msg);
2136 		if (rc == 0) {
2137 			/* don't report event on forced debug dump */
2138 			if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2139 			    reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2140 				return;
2141 			else
2142 				break;
2143 		}
2144 		/* fall through for not able to recover */
2145 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2146 				"3152 Unrecoverable error\n");
2147 		lpfc_sli4_offline_eratt(phba);
2148 		break;
2149 	case LPFC_SLI_INTF_IF_TYPE_1:
2150 	default:
2151 		break;
2152 	}
2153 	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2154 			"3123 Report dump event to upper layer\n");
2155 	/* Send an internal error event to mgmt application */
2156 	lpfc_board_errevt_to_mgmt(phba);
2157 
2158 	event_data = FC_REG_DUMP_EVENT;
2159 	shost = lpfc_shost_from_vport(vport);
2160 	fc_host_post_vendor_event(shost, fc_get_event_number(),
2161 				  sizeof(event_data), (char *) &event_data,
2162 				  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
2163 }
2164 
2165 /**
2166  * lpfc_handle_eratt - Wrapper func for handling hba error attention
2167  * @phba: pointer to lpfc HBA data structure.
2168  *
2169  * This routine wraps the actual SLI3 or SLI4 hba error attention handling
2170  * routine from the API jump table function pointer from the lpfc_hba struct.
2171  *
2172  * Return codes
2173  *   0 - success.
2174  *   Any other value - error.
2175  **/
2176 void
lpfc_handle_eratt(struct lpfc_hba * phba)2177 lpfc_handle_eratt(struct lpfc_hba *phba)
2178 {
2179 	(*phba->lpfc_handle_eratt)(phba);
2180 }
2181 
2182 /**
2183  * lpfc_handle_latt - The HBA link event handler
2184  * @phba: pointer to lpfc hba data structure.
2185  *
2186  * This routine is invoked from the worker thread to handle a HBA host
2187  * attention link event. SLI3 only.
2188  **/
2189 void
lpfc_handle_latt(struct lpfc_hba * phba)2190 lpfc_handle_latt(struct lpfc_hba *phba)
2191 {
2192 	struct lpfc_vport *vport = phba->pport;
2193 	struct lpfc_sli   *psli = &phba->sli;
2194 	LPFC_MBOXQ_t *pmb;
2195 	volatile uint32_t control;
2196 	int rc = 0;
2197 
2198 	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2199 	if (!pmb) {
2200 		rc = 1;
2201 		goto lpfc_handle_latt_err_exit;
2202 	}
2203 
2204 	rc = lpfc_mbox_rsrc_prep(phba, pmb);
2205 	if (rc) {
2206 		rc = 2;
2207 		mempool_free(pmb, phba->mbox_mem_pool);
2208 		goto lpfc_handle_latt_err_exit;
2209 	}
2210 
2211 	/* Cleanup any outstanding ELS commands */
2212 	lpfc_els_flush_all_cmd(phba);
2213 	psli->slistat.link_event++;
2214 	lpfc_read_topology(phba, pmb, pmb->ctx_buf);
2215 	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2216 	pmb->vport = vport;
2217 	/* Block ELS IOCBs until we have processed this mbox command */
2218 	phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2219 	rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2220 	if (rc == MBX_NOT_FINISHED) {
2221 		rc = 4;
2222 		goto lpfc_handle_latt_free_mbuf;
2223 	}
2224 
2225 	/* Clear Link Attention in HA REG */
2226 	spin_lock_irq(&phba->hbalock);
2227 	writel(HA_LATT, phba->HAregaddr);
2228 	readl(phba->HAregaddr); /* flush */
2229 	spin_unlock_irq(&phba->hbalock);
2230 
2231 	return;
2232 
2233 lpfc_handle_latt_free_mbuf:
2234 	phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2235 	lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
2236 lpfc_handle_latt_err_exit:
2237 	/* Enable Link attention interrupts */
2238 	spin_lock_irq(&phba->hbalock);
2239 	psli->sli_flag |= LPFC_PROCESS_LA;
2240 	control = readl(phba->HCregaddr);
2241 	control |= HC_LAINT_ENA;
2242 	writel(control, phba->HCregaddr);
2243 	readl(phba->HCregaddr); /* flush */
2244 
2245 	/* Clear Link Attention in HA REG */
2246 	writel(HA_LATT, phba->HAregaddr);
2247 	readl(phba->HAregaddr); /* flush */
2248 	spin_unlock_irq(&phba->hbalock);
2249 	lpfc_linkdown(phba);
2250 	phba->link_state = LPFC_HBA_ERROR;
2251 
2252 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2253 			"0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2254 
2255 	return;
2256 }
2257 
2258 static void
lpfc_fill_vpd(struct lpfc_hba * phba,uint8_t * vpd,int length,int * pindex)2259 lpfc_fill_vpd(struct lpfc_hba *phba, uint8_t *vpd, int length, int *pindex)
2260 {
2261 	int i, j;
2262 
2263 	while (length > 0) {
2264 		/* Look for Serial Number */
2265 		if ((vpd[*pindex] == 'S') && (vpd[*pindex + 1] == 'N')) {
2266 			*pindex += 2;
2267 			i = vpd[*pindex];
2268 			*pindex += 1;
2269 			j = 0;
2270 			length -= (3+i);
2271 			while (i--) {
2272 				phba->SerialNumber[j++] = vpd[(*pindex)++];
2273 				if (j == 31)
2274 					break;
2275 			}
2276 			phba->SerialNumber[j] = 0;
2277 			continue;
2278 		} else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '1')) {
2279 			phba->vpd_flag |= VPD_MODEL_DESC;
2280 			*pindex += 2;
2281 			i = vpd[*pindex];
2282 			*pindex += 1;
2283 			j = 0;
2284 			length -= (3+i);
2285 			while (i--) {
2286 				phba->ModelDesc[j++] = vpd[(*pindex)++];
2287 				if (j == 255)
2288 					break;
2289 			}
2290 			phba->ModelDesc[j] = 0;
2291 			continue;
2292 		} else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '2')) {
2293 			phba->vpd_flag |= VPD_MODEL_NAME;
2294 			*pindex += 2;
2295 			i = vpd[*pindex];
2296 			*pindex += 1;
2297 			j = 0;
2298 			length -= (3+i);
2299 			while (i--) {
2300 				phba->ModelName[j++] = vpd[(*pindex)++];
2301 				if (j == 79)
2302 					break;
2303 			}
2304 			phba->ModelName[j] = 0;
2305 			continue;
2306 		} else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '3')) {
2307 			phba->vpd_flag |= VPD_PROGRAM_TYPE;
2308 			*pindex += 2;
2309 			i = vpd[*pindex];
2310 			*pindex += 1;
2311 			j = 0;
2312 			length -= (3+i);
2313 			while (i--) {
2314 				phba->ProgramType[j++] = vpd[(*pindex)++];
2315 				if (j == 255)
2316 					break;
2317 			}
2318 			phba->ProgramType[j] = 0;
2319 			continue;
2320 		} else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '4')) {
2321 			phba->vpd_flag |= VPD_PORT;
2322 			*pindex += 2;
2323 			i = vpd[*pindex];
2324 			*pindex += 1;
2325 			j = 0;
2326 			length -= (3 + i);
2327 			while (i--) {
2328 				if ((phba->sli_rev == LPFC_SLI_REV4) &&
2329 				    (phba->sli4_hba.pport_name_sta ==
2330 				     LPFC_SLI4_PPNAME_GET)) {
2331 					j++;
2332 					(*pindex)++;
2333 				} else
2334 					phba->Port[j++] = vpd[(*pindex)++];
2335 				if (j == 19)
2336 					break;
2337 			}
2338 			if ((phba->sli_rev != LPFC_SLI_REV4) ||
2339 			    (phba->sli4_hba.pport_name_sta ==
2340 			     LPFC_SLI4_PPNAME_NON))
2341 				phba->Port[j] = 0;
2342 			continue;
2343 		} else {
2344 			*pindex += 2;
2345 			i = vpd[*pindex];
2346 			*pindex += 1;
2347 			*pindex += i;
2348 			length -= (3 + i);
2349 		}
2350 	}
2351 }
2352 
2353 /**
2354  * lpfc_parse_vpd - Parse VPD (Vital Product Data)
2355  * @phba: pointer to lpfc hba data structure.
2356  * @vpd: pointer to the vital product data.
2357  * @len: length of the vital product data in bytes.
2358  *
2359  * This routine parses the Vital Product Data (VPD). The VPD is treated as
2360  * an array of characters. In this routine, the ModelName, ProgramType, and
2361  * ModelDesc, etc. fields of the phba data structure will be populated.
2362  *
2363  * Return codes
2364  *   0 - pointer to the VPD passed in is NULL
2365  *   1 - success
2366  **/
2367 int
lpfc_parse_vpd(struct lpfc_hba * phba,uint8_t * vpd,int len)2368 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2369 {
2370 	uint8_t lenlo, lenhi;
2371 	int Length;
2372 	int i;
2373 	int finished = 0;
2374 	int index = 0;
2375 
2376 	if (!vpd)
2377 		return 0;
2378 
2379 	/* Vital Product */
2380 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2381 			"0455 Vital Product Data: x%x x%x x%x x%x\n",
2382 			(uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2383 			(uint32_t) vpd[3]);
2384 	while (!finished && (index < (len - 4))) {
2385 		switch (vpd[index]) {
2386 		case 0x82:
2387 		case 0x91:
2388 			index += 1;
2389 			lenlo = vpd[index];
2390 			index += 1;
2391 			lenhi = vpd[index];
2392 			index += 1;
2393 			i = ((((unsigned short)lenhi) << 8) + lenlo);
2394 			index += i;
2395 			break;
2396 		case 0x90:
2397 			index += 1;
2398 			lenlo = vpd[index];
2399 			index += 1;
2400 			lenhi = vpd[index];
2401 			index += 1;
2402 			Length = ((((unsigned short)lenhi) << 8) + lenlo);
2403 			if (Length > len - index)
2404 				Length = len - index;
2405 
2406 			lpfc_fill_vpd(phba, vpd, Length, &index);
2407 			finished = 0;
2408 			break;
2409 		case 0x78:
2410 			finished = 1;
2411 			break;
2412 		default:
2413 			index ++;
2414 			break;
2415 		}
2416 	}
2417 
2418 	return(1);
2419 }
2420 
2421 /**
2422  * lpfc_get_atto_model_desc - Retrieve ATTO HBA device model name and description
2423  * @phba: pointer to lpfc hba data structure.
2424  * @mdp: pointer to the data structure to hold the derived model name.
2425  * @descp: pointer to the data structure to hold the derived description.
2426  *
2427  * This routine retrieves HBA's description based on its registered PCI device
2428  * ID. The @descp passed into this function points to an array of 256 chars. It
2429  * shall be returned with the model name, maximum speed, and the host bus type.
2430  * The @mdp passed into this function points to an array of 80 chars. When the
2431  * function returns, the @mdp will be filled with the model name.
2432  **/
2433 static void
lpfc_get_atto_model_desc(struct lpfc_hba * phba,uint8_t * mdp,uint8_t * descp)2434 lpfc_get_atto_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2435 {
2436 	uint16_t sub_dev_id = phba->pcidev->subsystem_device;
2437 	char *model = "<Unknown>";
2438 	int tbolt = 0;
2439 
2440 	switch (sub_dev_id) {
2441 	case PCI_DEVICE_ID_CLRY_161E:
2442 		model = "161E";
2443 		break;
2444 	case PCI_DEVICE_ID_CLRY_162E:
2445 		model = "162E";
2446 		break;
2447 	case PCI_DEVICE_ID_CLRY_164E:
2448 		model = "164E";
2449 		break;
2450 	case PCI_DEVICE_ID_CLRY_161P:
2451 		model = "161P";
2452 		break;
2453 	case PCI_DEVICE_ID_CLRY_162P:
2454 		model = "162P";
2455 		break;
2456 	case PCI_DEVICE_ID_CLRY_164P:
2457 		model = "164P";
2458 		break;
2459 	case PCI_DEVICE_ID_CLRY_321E:
2460 		model = "321E";
2461 		break;
2462 	case PCI_DEVICE_ID_CLRY_322E:
2463 		model = "322E";
2464 		break;
2465 	case PCI_DEVICE_ID_CLRY_324E:
2466 		model = "324E";
2467 		break;
2468 	case PCI_DEVICE_ID_CLRY_321P:
2469 		model = "321P";
2470 		break;
2471 	case PCI_DEVICE_ID_CLRY_322P:
2472 		model = "322P";
2473 		break;
2474 	case PCI_DEVICE_ID_CLRY_324P:
2475 		model = "324P";
2476 		break;
2477 	case PCI_DEVICE_ID_TLFC_2XX2:
2478 		model = "2XX2";
2479 		tbolt = 1;
2480 		break;
2481 	case PCI_DEVICE_ID_TLFC_3162:
2482 		model = "3162";
2483 		tbolt = 1;
2484 		break;
2485 	case PCI_DEVICE_ID_TLFC_3322:
2486 		model = "3322";
2487 		tbolt = 1;
2488 		break;
2489 	default:
2490 		model = "Unknown";
2491 		break;
2492 	}
2493 
2494 	if (mdp && mdp[0] == '\0')
2495 		snprintf(mdp, 79, "%s", model);
2496 
2497 	if (descp && descp[0] == '\0')
2498 		snprintf(descp, 255,
2499 			 "ATTO %s%s, Fibre Channel Adapter Initiator, Port %s",
2500 			 (tbolt) ? "ThunderLink FC " : "Celerity FC-",
2501 			 model,
2502 			 phba->Port);
2503 }
2504 
2505 /**
2506  * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
2507  * @phba: pointer to lpfc hba data structure.
2508  * @mdp: pointer to the data structure to hold the derived model name.
2509  * @descp: pointer to the data structure to hold the derived description.
2510  *
2511  * This routine retrieves HBA's description based on its registered PCI device
2512  * ID. The @descp passed into this function points to an array of 256 chars. It
2513  * shall be returned with the model name, maximum speed, and the host bus type.
2514  * The @mdp passed into this function points to an array of 80 chars. When the
2515  * function returns, the @mdp will be filled with the model name.
2516  **/
2517 static void
lpfc_get_hba_model_desc(struct lpfc_hba * phba,uint8_t * mdp,uint8_t * descp)2518 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2519 {
2520 	lpfc_vpd_t *vp;
2521 	uint16_t dev_id = phba->pcidev->device;
2522 	int max_speed;
2523 	int GE = 0;
2524 	int oneConnect = 0; /* default is not a oneConnect */
2525 	struct {
2526 		char *name;
2527 		char *bus;
2528 		char *function;
2529 	} m = {"<Unknown>", "", ""};
2530 
2531 	if (mdp && mdp[0] != '\0'
2532 		&& descp && descp[0] != '\0')
2533 		return;
2534 
2535 	if (phba->pcidev->vendor == PCI_VENDOR_ID_ATTO) {
2536 		lpfc_get_atto_model_desc(phba, mdp, descp);
2537 		return;
2538 	}
2539 
2540 	if (phba->lmt & LMT_128Gb)
2541 		max_speed = 128;
2542 	else if (phba->lmt & LMT_64Gb)
2543 		max_speed = 64;
2544 	else if (phba->lmt & LMT_32Gb)
2545 		max_speed = 32;
2546 	else if (phba->lmt & LMT_16Gb)
2547 		max_speed = 16;
2548 	else if (phba->lmt & LMT_10Gb)
2549 		max_speed = 10;
2550 	else if (phba->lmt & LMT_8Gb)
2551 		max_speed = 8;
2552 	else if (phba->lmt & LMT_4Gb)
2553 		max_speed = 4;
2554 	else if (phba->lmt & LMT_2Gb)
2555 		max_speed = 2;
2556 	else if (phba->lmt & LMT_1Gb)
2557 		max_speed = 1;
2558 	else
2559 		max_speed = 0;
2560 
2561 	vp = &phba->vpd;
2562 
2563 	switch (dev_id) {
2564 	case PCI_DEVICE_ID_FIREFLY:
2565 		m = (typeof(m)){"LP6000", "PCI",
2566 				"Obsolete, Unsupported Fibre Channel Adapter"};
2567 		break;
2568 	case PCI_DEVICE_ID_SUPERFLY:
2569 		if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2570 			m = (typeof(m)){"LP7000", "PCI", ""};
2571 		else
2572 			m = (typeof(m)){"LP7000E", "PCI", ""};
2573 		m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2574 		break;
2575 	case PCI_DEVICE_ID_DRAGONFLY:
2576 		m = (typeof(m)){"LP8000", "PCI",
2577 				"Obsolete, Unsupported Fibre Channel Adapter"};
2578 		break;
2579 	case PCI_DEVICE_ID_CENTAUR:
2580 		if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2581 			m = (typeof(m)){"LP9002", "PCI", ""};
2582 		else
2583 			m = (typeof(m)){"LP9000", "PCI", ""};
2584 		m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2585 		break;
2586 	case PCI_DEVICE_ID_RFLY:
2587 		m = (typeof(m)){"LP952", "PCI",
2588 				"Obsolete, Unsupported Fibre Channel Adapter"};
2589 		break;
2590 	case PCI_DEVICE_ID_PEGASUS:
2591 		m = (typeof(m)){"LP9802", "PCI-X",
2592 				"Obsolete, Unsupported Fibre Channel Adapter"};
2593 		break;
2594 	case PCI_DEVICE_ID_THOR:
2595 		m = (typeof(m)){"LP10000", "PCI-X",
2596 				"Obsolete, Unsupported Fibre Channel Adapter"};
2597 		break;
2598 	case PCI_DEVICE_ID_VIPER:
2599 		m = (typeof(m)){"LPX1000",  "PCI-X",
2600 				"Obsolete, Unsupported Fibre Channel Adapter"};
2601 		break;
2602 	case PCI_DEVICE_ID_PFLY:
2603 		m = (typeof(m)){"LP982", "PCI-X",
2604 				"Obsolete, Unsupported Fibre Channel Adapter"};
2605 		break;
2606 	case PCI_DEVICE_ID_TFLY:
2607 		m = (typeof(m)){"LP1050", "PCI-X",
2608 				"Obsolete, Unsupported Fibre Channel Adapter"};
2609 		break;
2610 	case PCI_DEVICE_ID_HELIOS:
2611 		m = (typeof(m)){"LP11000", "PCI-X2",
2612 				"Obsolete, Unsupported Fibre Channel Adapter"};
2613 		break;
2614 	case PCI_DEVICE_ID_HELIOS_SCSP:
2615 		m = (typeof(m)){"LP11000-SP", "PCI-X2",
2616 				"Obsolete, Unsupported Fibre Channel Adapter"};
2617 		break;
2618 	case PCI_DEVICE_ID_HELIOS_DCSP:
2619 		m = (typeof(m)){"LP11002-SP",  "PCI-X2",
2620 				"Obsolete, Unsupported Fibre Channel Adapter"};
2621 		break;
2622 	case PCI_DEVICE_ID_NEPTUNE:
2623 		m = (typeof(m)){"LPe1000", "PCIe",
2624 				"Obsolete, Unsupported Fibre Channel Adapter"};
2625 		break;
2626 	case PCI_DEVICE_ID_NEPTUNE_SCSP:
2627 		m = (typeof(m)){"LPe1000-SP", "PCIe",
2628 				"Obsolete, Unsupported Fibre Channel Adapter"};
2629 		break;
2630 	case PCI_DEVICE_ID_NEPTUNE_DCSP:
2631 		m = (typeof(m)){"LPe1002-SP", "PCIe",
2632 				"Obsolete, Unsupported Fibre Channel Adapter"};
2633 		break;
2634 	case PCI_DEVICE_ID_BMID:
2635 		m = (typeof(m)){"LP1150", "PCI-X2",
2636 				"Obsolete, Unsupported Fibre Channel Adapter"};
2637 		break;
2638 	case PCI_DEVICE_ID_BSMB:
2639 		m = (typeof(m)){"LP111", "PCI-X2",
2640 				"Obsolete, Unsupported Fibre Channel Adapter"};
2641 		break;
2642 	case PCI_DEVICE_ID_ZEPHYR:
2643 		m = (typeof(m)){"LPe11000", "PCIe",
2644 				"Obsolete, Unsupported Fibre Channel Adapter"};
2645 		break;
2646 	case PCI_DEVICE_ID_ZEPHYR_SCSP:
2647 		m = (typeof(m)){"LPe11000", "PCIe",
2648 				"Obsolete, Unsupported Fibre Channel Adapter"};
2649 		break;
2650 	case PCI_DEVICE_ID_ZEPHYR_DCSP:
2651 		m = (typeof(m)){"LP2105", "PCIe",
2652 				"Obsolete, Unsupported FCoE Adapter"};
2653 		GE = 1;
2654 		break;
2655 	case PCI_DEVICE_ID_ZMID:
2656 		m = (typeof(m)){"LPe1150", "PCIe",
2657 				"Obsolete, Unsupported Fibre Channel Adapter"};
2658 		break;
2659 	case PCI_DEVICE_ID_ZSMB:
2660 		m = (typeof(m)){"LPe111", "PCIe",
2661 				"Obsolete, Unsupported Fibre Channel Adapter"};
2662 		break;
2663 	case PCI_DEVICE_ID_LP101:
2664 		m = (typeof(m)){"LP101", "PCI-X",
2665 				"Obsolete, Unsupported Fibre Channel Adapter"};
2666 		break;
2667 	case PCI_DEVICE_ID_LP10000S:
2668 		m = (typeof(m)){"LP10000-S", "PCI",
2669 				"Obsolete, Unsupported Fibre Channel Adapter"};
2670 		break;
2671 	case PCI_DEVICE_ID_LP11000S:
2672 		m = (typeof(m)){"LP11000-S", "PCI-X2",
2673 				"Obsolete, Unsupported Fibre Channel Adapter"};
2674 		break;
2675 	case PCI_DEVICE_ID_LPE11000S:
2676 		m = (typeof(m)){"LPe11000-S", "PCIe",
2677 				"Obsolete, Unsupported Fibre Channel Adapter"};
2678 		break;
2679 	case PCI_DEVICE_ID_SAT:
2680 		m = (typeof(m)){"LPe12000", "PCIe",
2681 				"Obsolete, Unsupported Fibre Channel Adapter"};
2682 		break;
2683 	case PCI_DEVICE_ID_SAT_MID:
2684 		m = (typeof(m)){"LPe1250", "PCIe",
2685 				"Obsolete, Unsupported Fibre Channel Adapter"};
2686 		break;
2687 	case PCI_DEVICE_ID_SAT_SMB:
2688 		m = (typeof(m)){"LPe121", "PCIe",
2689 				"Obsolete, Unsupported Fibre Channel Adapter"};
2690 		break;
2691 	case PCI_DEVICE_ID_SAT_DCSP:
2692 		m = (typeof(m)){"LPe12002-SP", "PCIe",
2693 				"Obsolete, Unsupported Fibre Channel Adapter"};
2694 		break;
2695 	case PCI_DEVICE_ID_SAT_SCSP:
2696 		m = (typeof(m)){"LPe12000-SP", "PCIe",
2697 				"Obsolete, Unsupported Fibre Channel Adapter"};
2698 		break;
2699 	case PCI_DEVICE_ID_SAT_S:
2700 		m = (typeof(m)){"LPe12000-S", "PCIe",
2701 				"Obsolete, Unsupported Fibre Channel Adapter"};
2702 		break;
2703 	case PCI_DEVICE_ID_PROTEUS_VF:
2704 		m = (typeof(m)){"LPev12000", "PCIe IOV",
2705 				"Obsolete, Unsupported Fibre Channel Adapter"};
2706 		break;
2707 	case PCI_DEVICE_ID_PROTEUS_PF:
2708 		m = (typeof(m)){"LPev12000", "PCIe IOV",
2709 				"Obsolete, Unsupported Fibre Channel Adapter"};
2710 		break;
2711 	case PCI_DEVICE_ID_PROTEUS_S:
2712 		m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2713 				"Obsolete, Unsupported Fibre Channel Adapter"};
2714 		break;
2715 	case PCI_DEVICE_ID_TIGERSHARK:
2716 		oneConnect = 1;
2717 		m = (typeof(m)){"OCe10100", "PCIe",
2718 				"Obsolete, Unsupported FCoE Adapter"};
2719 		break;
2720 	case PCI_DEVICE_ID_TOMCAT:
2721 		oneConnect = 1;
2722 		m = (typeof(m)){"OCe11100", "PCIe",
2723 				"Obsolete, Unsupported FCoE Adapter"};
2724 		break;
2725 	case PCI_DEVICE_ID_FALCON:
2726 		m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2727 				"Obsolete, Unsupported Fibre Channel Adapter"};
2728 		break;
2729 	case PCI_DEVICE_ID_BALIUS:
2730 		m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2731 				"Obsolete, Unsupported Fibre Channel Adapter"};
2732 		break;
2733 	case PCI_DEVICE_ID_LANCER_FC:
2734 		m = (typeof(m)){"LPe16000", "PCIe",
2735 				"Obsolete, Unsupported Fibre Channel Adapter"};
2736 		break;
2737 	case PCI_DEVICE_ID_LANCER_FC_VF:
2738 		m = (typeof(m)){"LPe16000", "PCIe",
2739 				"Obsolete, Unsupported Fibre Channel Adapter"};
2740 		break;
2741 	case PCI_DEVICE_ID_LANCER_FCOE:
2742 		oneConnect = 1;
2743 		m = (typeof(m)){"OCe15100", "PCIe",
2744 				"Obsolete, Unsupported FCoE Adapter"};
2745 		break;
2746 	case PCI_DEVICE_ID_LANCER_FCOE_VF:
2747 		oneConnect = 1;
2748 		m = (typeof(m)){"OCe15100", "PCIe",
2749 				"Obsolete, Unsupported FCoE Adapter"};
2750 		break;
2751 	case PCI_DEVICE_ID_LANCER_G6_FC:
2752 		m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2753 		break;
2754 	case PCI_DEVICE_ID_LANCER_G7_FC:
2755 		m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2756 		break;
2757 	case PCI_DEVICE_ID_LANCER_G7P_FC:
2758 		m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"};
2759 		break;
2760 	case PCI_DEVICE_ID_LANCER_G8_FC:
2761 		m = (typeof(m)){"LPe42100", "PCIe", "Fibre Channel Adapter"};
2762 		break;
2763 	case PCI_DEVICE_ID_SKYHAWK:
2764 	case PCI_DEVICE_ID_SKYHAWK_VF:
2765 		oneConnect = 1;
2766 		m = (typeof(m)){"OCe14000", "PCIe",
2767 				"Obsolete, Unsupported FCoE Adapter"};
2768 		break;
2769 	default:
2770 		m = (typeof(m)){"Unknown", "", ""};
2771 		break;
2772 	}
2773 
2774 	if (mdp && mdp[0] == '\0')
2775 		snprintf(mdp, 79,"%s", m.name);
2776 	/*
2777 	 * oneConnect hba requires special processing, they are all initiators
2778 	 * and we put the port number on the end
2779 	 */
2780 	if (descp && descp[0] == '\0') {
2781 		if (oneConnect)
2782 			snprintf(descp, 255,
2783 				"Emulex OneConnect %s, %s Initiator %s",
2784 				m.name, m.function,
2785 				phba->Port);
2786 		else if (max_speed == 0)
2787 			snprintf(descp, 255,
2788 				"Emulex %s %s %s",
2789 				m.name, m.bus, m.function);
2790 		else
2791 			snprintf(descp, 255,
2792 				"Emulex %s %d%s %s %s",
2793 				m.name, max_speed, (GE) ? "GE" : "Gb",
2794 				m.bus, m.function);
2795 	}
2796 }
2797 
2798 /**
2799  * lpfc_sli3_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2800  * @phba: pointer to lpfc hba data structure.
2801  * @pring: pointer to a IOCB ring.
2802  * @cnt: the number of IOCBs to be posted to the IOCB ring.
2803  *
2804  * This routine posts a given number of IOCBs with the associated DMA buffer
2805  * descriptors specified by the cnt argument to the given IOCB ring.
2806  *
2807  * Return codes
2808  *   The number of IOCBs NOT able to be posted to the IOCB ring.
2809  **/
2810 int
lpfc_sli3_post_buffer(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,int cnt)2811 lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2812 {
2813 	IOCB_t *icmd;
2814 	struct lpfc_iocbq *iocb;
2815 	struct lpfc_dmabuf *mp1, *mp2;
2816 
2817 	cnt += pring->missbufcnt;
2818 
2819 	/* While there are buffers to post */
2820 	while (cnt > 0) {
2821 		/* Allocate buffer for  command iocb */
2822 		iocb = lpfc_sli_get_iocbq(phba);
2823 		if (iocb == NULL) {
2824 			pring->missbufcnt = cnt;
2825 			return cnt;
2826 		}
2827 		icmd = &iocb->iocb;
2828 
2829 		/* 2 buffers can be posted per command */
2830 		/* Allocate buffer to post */
2831 		mp1 = kmalloc_obj(struct lpfc_dmabuf);
2832 		if (mp1)
2833 		    mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2834 		if (!mp1 || !mp1->virt) {
2835 			kfree(mp1);
2836 			lpfc_sli_release_iocbq(phba, iocb);
2837 			pring->missbufcnt = cnt;
2838 			return cnt;
2839 		}
2840 
2841 		INIT_LIST_HEAD(&mp1->list);
2842 		/* Allocate buffer to post */
2843 		if (cnt > 1) {
2844 			mp2 = kmalloc_obj(struct lpfc_dmabuf);
2845 			if (mp2)
2846 				mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2847 							    &mp2->phys);
2848 			if (!mp2 || !mp2->virt) {
2849 				kfree(mp2);
2850 				lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2851 				kfree(mp1);
2852 				lpfc_sli_release_iocbq(phba, iocb);
2853 				pring->missbufcnt = cnt;
2854 				return cnt;
2855 			}
2856 
2857 			INIT_LIST_HEAD(&mp2->list);
2858 		} else {
2859 			mp2 = NULL;
2860 		}
2861 
2862 		icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2863 		icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2864 		icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2865 		icmd->ulpBdeCount = 1;
2866 		cnt--;
2867 		if (mp2) {
2868 			icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2869 			icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2870 			icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2871 			cnt--;
2872 			icmd->ulpBdeCount = 2;
2873 		}
2874 
2875 		icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2876 		icmd->ulpLe = 1;
2877 
2878 		if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2879 		    IOCB_ERROR) {
2880 			lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2881 			kfree(mp1);
2882 			cnt++;
2883 			if (mp2) {
2884 				lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2885 				kfree(mp2);
2886 				cnt++;
2887 			}
2888 			lpfc_sli_release_iocbq(phba, iocb);
2889 			pring->missbufcnt = cnt;
2890 			return cnt;
2891 		}
2892 		lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2893 		if (mp2)
2894 			lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2895 	}
2896 	pring->missbufcnt = 0;
2897 	return 0;
2898 }
2899 
2900 /**
2901  * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2902  * @phba: pointer to lpfc hba data structure.
2903  *
2904  * This routine posts initial receive IOCB buffers to the ELS ring. The
2905  * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2906  * set to 64 IOCBs. SLI3 only.
2907  *
2908  * Return codes
2909  *   0 - success (currently always success)
2910  **/
2911 static int
lpfc_post_rcv_buf(struct lpfc_hba * phba)2912 lpfc_post_rcv_buf(struct lpfc_hba *phba)
2913 {
2914 	struct lpfc_sli *psli = &phba->sli;
2915 
2916 	/* Ring 0, ELS / CT buffers */
2917 	lpfc_sli3_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2918 	/* Ring 2 - FCP no buffers needed */
2919 
2920 	return 0;
2921 }
2922 
2923 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2924 
2925 /**
2926  * lpfc_sha_init - Set up initial array of hash table entries
2927  * @HashResultPointer: pointer to an array as hash table.
2928  *
2929  * This routine sets up the initial values to the array of hash table entries
2930  * for the LC HBAs.
2931  **/
2932 static void
lpfc_sha_init(uint32_t * HashResultPointer)2933 lpfc_sha_init(uint32_t * HashResultPointer)
2934 {
2935 	HashResultPointer[0] = 0x67452301;
2936 	HashResultPointer[1] = 0xEFCDAB89;
2937 	HashResultPointer[2] = 0x98BADCFE;
2938 	HashResultPointer[3] = 0x10325476;
2939 	HashResultPointer[4] = 0xC3D2E1F0;
2940 }
2941 
2942 /**
2943  * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2944  * @HashResultPointer: pointer to an initial/result hash table.
2945  * @HashWorkingPointer: pointer to an working hash table.
2946  *
2947  * This routine iterates an initial hash table pointed by @HashResultPointer
2948  * with the values from the working hash table pointeed by @HashWorkingPointer.
2949  * The results are putting back to the initial hash table, returned through
2950  * the @HashResultPointer as the result hash table.
2951  **/
2952 static void
lpfc_sha_iterate(uint32_t * HashResultPointer,uint32_t * HashWorkingPointer)2953 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2954 {
2955 	int t;
2956 	uint32_t TEMP;
2957 	uint32_t A, B, C, D, E;
2958 	t = 16;
2959 	do {
2960 		HashWorkingPointer[t] =
2961 		    S(1,
2962 		      HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2963 								     8] ^
2964 		      HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2965 	} while (++t <= 79);
2966 	t = 0;
2967 	A = HashResultPointer[0];
2968 	B = HashResultPointer[1];
2969 	C = HashResultPointer[2];
2970 	D = HashResultPointer[3];
2971 	E = HashResultPointer[4];
2972 
2973 	do {
2974 		if (t < 20) {
2975 			TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2976 		} else if (t < 40) {
2977 			TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2978 		} else if (t < 60) {
2979 			TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2980 		} else {
2981 			TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2982 		}
2983 		TEMP += S(5, A) + E + HashWorkingPointer[t];
2984 		E = D;
2985 		D = C;
2986 		C = S(30, B);
2987 		B = A;
2988 		A = TEMP;
2989 	} while (++t <= 79);
2990 
2991 	HashResultPointer[0] += A;
2992 	HashResultPointer[1] += B;
2993 	HashResultPointer[2] += C;
2994 	HashResultPointer[3] += D;
2995 	HashResultPointer[4] += E;
2996 
2997 }
2998 
2999 /**
3000  * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
3001  * @RandomChallenge: pointer to the entry of host challenge random number array.
3002  * @HashWorking: pointer to the entry of the working hash array.
3003  *
3004  * This routine calculates the working hash array referred by @HashWorking
3005  * from the challenge random numbers associated with the host, referred by
3006  * @RandomChallenge. The result is put into the entry of the working hash
3007  * array and returned by reference through @HashWorking.
3008  **/
3009 static void
lpfc_challenge_key(uint32_t * RandomChallenge,uint32_t * HashWorking)3010 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
3011 {
3012 	*HashWorking = (*RandomChallenge ^ *HashWorking);
3013 }
3014 
3015 /**
3016  * lpfc_hba_init - Perform special handling for LC HBA initialization
3017  * @phba: pointer to lpfc hba data structure.
3018  * @hbainit: pointer to an array of unsigned 32-bit integers.
3019  *
3020  * This routine performs the special handling for LC HBA initialization.
3021  **/
3022 void
lpfc_hba_init(struct lpfc_hba * phba,uint32_t * hbainit)3023 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
3024 {
3025 	int t;
3026 	uint32_t *HashWorking;
3027 	uint32_t *pwwnn = (uint32_t *) phba->wwnn;
3028 
3029 	HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
3030 	if (!HashWorking)
3031 		return;
3032 
3033 	HashWorking[0] = HashWorking[78] = *pwwnn++;
3034 	HashWorking[1] = HashWorking[79] = *pwwnn;
3035 
3036 	for (t = 0; t < 7; t++)
3037 		lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
3038 
3039 	lpfc_sha_init(hbainit);
3040 	lpfc_sha_iterate(hbainit, HashWorking);
3041 	kfree(HashWorking);
3042 }
3043 
3044 /**
3045  * lpfc_cleanup - Performs vport cleanups before deleting a vport
3046  * @vport: pointer to a virtual N_Port data structure.
3047  *
3048  * This routine performs the necessary cleanups before deleting the @vport.
3049  * It invokes the discovery state machine to perform necessary state
3050  * transitions and to release the ndlps associated with the @vport. Note,
3051  * the physical port is treated as @vport 0.
3052  **/
3053 void
lpfc_cleanup(struct lpfc_vport * vport)3054 lpfc_cleanup(struct lpfc_vport *vport)
3055 {
3056 	struct lpfc_hba   *phba = vport->phba;
3057 	struct lpfc_nodelist *ndlp, *next_ndlp;
3058 	int i = 0;
3059 
3060 	if (phba->link_state > LPFC_LINK_DOWN)
3061 		lpfc_port_link_failure(vport);
3062 
3063 	/* Clean up VMID resources */
3064 	if (lpfc_is_vmid_enabled(phba))
3065 		lpfc_vmid_vport_cleanup(vport);
3066 
3067 	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
3068 		/* Fabric Ports not in UNMAPPED state are cleaned up in the
3069 		 * DEVICE_RM event.
3070 		 */
3071 		if (ndlp->nlp_type & NLP_FABRIC &&
3072 		    ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
3073 			lpfc_disc_state_machine(vport, ndlp, NULL,
3074 					NLP_EVT_DEVICE_RECOVERY);
3075 
3076 		if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD)))
3077 			lpfc_disc_state_machine(vport, ndlp, NULL,
3078 					NLP_EVT_DEVICE_RM);
3079 	}
3080 
3081 	/* This is a special case flush to return all
3082 	 * IOs before entering this loop. There are
3083 	 * two points in the code where a flush is
3084 	 * avoided if the FC_UNLOADING flag is set.
3085 	 * one is in the multipool destroy,
3086 	 * (this prevents a crash) and the other is
3087 	 * in the nvme abort handler, ( also prevents
3088 	 * a crash). Both of these exceptions are
3089 	 * cases where the slot is still accessible.
3090 	 * The flush here is only when the pci slot
3091 	 * is offline.
3092 	 */
3093 	if (test_bit(FC_UNLOADING, &vport->load_flag) &&
3094 	    pci_channel_offline(phba->pcidev))
3095 		lpfc_sli_flush_io_rings(vport->phba);
3096 
3097 	/* At this point, ALL ndlp's should be gone
3098 	 * because of the previous NLP_EVT_DEVICE_RM.
3099 	 * Lets wait for this to happen, if needed.
3100 	 */
3101 	while (!list_empty(&vport->fc_nodes)) {
3102 		if (i++ > 3000) {
3103 			lpfc_printf_vlog(vport, KERN_ERR,
3104 					 LOG_TRACE_EVENT,
3105 				"0233 Nodelist not empty\n");
3106 			list_for_each_entry_safe(ndlp, next_ndlp,
3107 						&vport->fc_nodes, nlp_listp) {
3108 				lpfc_printf_vlog(ndlp->vport, KERN_ERR,
3109 						 LOG_DISCOVERY,
3110 						 "0282 did:x%x ndlp:x%px "
3111 						 "refcnt:%d xflags x%x "
3112 						 "nflag x%lx\n",
3113 						 ndlp->nlp_DID, (void *)ndlp,
3114 						 kref_read(&ndlp->kref),
3115 						 ndlp->fc4_xpt_flags,
3116 						 ndlp->nlp_flag);
3117 			}
3118 			break;
3119 		}
3120 
3121 		/* Wait for any activity on ndlps to settle */
3122 		msleep(10);
3123 	}
3124 	lpfc_cleanup_vports_rrqs(vport, NULL);
3125 }
3126 
3127 /**
3128  * lpfc_stop_vport_timers - Stop all the timers associated with a vport
3129  * @vport: pointer to a virtual N_Port data structure.
3130  *
3131  * This routine stops all the timers associated with a @vport. This function
3132  * is invoked before disabling or deleting a @vport. Note that the physical
3133  * port is treated as @vport 0.
3134  **/
3135 void
lpfc_stop_vport_timers(struct lpfc_vport * vport)3136 lpfc_stop_vport_timers(struct lpfc_vport *vport)
3137 {
3138 	timer_delete_sync(&vport->els_tmofunc);
3139 	timer_delete_sync(&vport->delayed_disc_tmo);
3140 	lpfc_can_disctmo(vport);
3141 	return;
3142 }
3143 
3144 /**
3145  * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
3146  * @phba: pointer to lpfc hba data structure.
3147  *
3148  * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
3149  * caller of this routine should already hold the host lock.
3150  **/
3151 void
__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba * phba)3152 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3153 {
3154 	/* Clear pending FCF rediscovery wait flag */
3155 	phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3156 
3157 	/* Now, try to stop the timer */
3158 	timer_delete(&phba->fcf.redisc_wait);
3159 }
3160 
3161 /**
3162  * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
3163  * @phba: pointer to lpfc hba data structure.
3164  *
3165  * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
3166  * checks whether the FCF rediscovery wait timer is pending with the host
3167  * lock held before proceeding with disabling the timer and clearing the
3168  * wait timer pendig flag.
3169  **/
3170 void
lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba * phba)3171 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3172 {
3173 	spin_lock_irq(&phba->hbalock);
3174 	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3175 		/* FCF rediscovery timer already fired or stopped */
3176 		spin_unlock_irq(&phba->hbalock);
3177 		return;
3178 	}
3179 	__lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3180 	/* Clear failover in progress flags */
3181 	phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
3182 	spin_unlock_irq(&phba->hbalock);
3183 }
3184 
3185 /**
3186  * lpfc_cmf_stop - Stop CMF processing
3187  * @phba: pointer to lpfc hba data structure.
3188  *
3189  * This is called when the link goes down or if CMF mode is turned OFF.
3190  * It is also called when going offline or unloaded just before the
3191  * congestion info buffer is unregistered.
3192  **/
3193 void
lpfc_cmf_stop(struct lpfc_hba * phba)3194 lpfc_cmf_stop(struct lpfc_hba *phba)
3195 {
3196 	int cpu;
3197 	struct lpfc_cgn_stat *cgs;
3198 
3199 	/* We only do something if CMF is enabled */
3200 	if (!phba->sli4_hba.pc_sli4_params.cmf)
3201 		return;
3202 
3203 	lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3204 			"6221 Stop CMF / Cancel Timer\n");
3205 
3206 	/* Cancel the CMF timer */
3207 	hrtimer_cancel(&phba->cmf_stats_timer);
3208 	hrtimer_cancel(&phba->cmf_timer);
3209 
3210 	/* Zero CMF counters */
3211 	atomic_set(&phba->cmf_busy, 0);
3212 	for_each_present_cpu(cpu) {
3213 		cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3214 		atomic64_set(&cgs->total_bytes, 0);
3215 		atomic64_set(&cgs->rcv_bytes, 0);
3216 		atomic_set(&cgs->rx_io_cnt, 0);
3217 		atomic64_set(&cgs->rx_latency, 0);
3218 	}
3219 	atomic_set(&phba->cmf_bw_wait, 0);
3220 
3221 	/* Resume any blocked IO - Queue unblock on workqueue */
3222 	queue_work(phba->wq, &phba->unblock_request_work);
3223 }
3224 
3225 static inline uint64_t
lpfc_get_max_line_rate(struct lpfc_hba * phba)3226 lpfc_get_max_line_rate(struct lpfc_hba *phba)
3227 {
3228 	uint64_t rate = lpfc_sli_port_speed_get(phba);
3229 
3230 	return ((((unsigned long)rate) * 1024 * 1024) / 10);
3231 }
3232 
3233 void
lpfc_cmf_signal_init(struct lpfc_hba * phba)3234 lpfc_cmf_signal_init(struct lpfc_hba *phba)
3235 {
3236 	lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3237 			"6223 Signal CMF init\n");
3238 
3239 	/* Use the new fc_linkspeed to recalculate */
3240 	phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
3241 	phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba);
3242 	phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
3243 					    phba->cmf_interval_rate, 1000);
3244 	phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count;
3245 
3246 	/* This is a signal to firmware to sync up CMF BW with link speed */
3247 	lpfc_issue_cmf_sync_wqe(phba, 0, 0);
3248 }
3249 
3250 /**
3251  * lpfc_cmf_start - Start CMF processing
3252  * @phba: pointer to lpfc hba data structure.
3253  *
3254  * This is called when the link comes up or if CMF mode is turned OFF
3255  * to Monitor or Managed.
3256  **/
3257 void
lpfc_cmf_start(struct lpfc_hba * phba)3258 lpfc_cmf_start(struct lpfc_hba *phba)
3259 {
3260 	struct lpfc_cgn_stat *cgs;
3261 	int cpu;
3262 
3263 	/* We only do something if CMF is enabled */
3264 	if (!phba->sli4_hba.pc_sli4_params.cmf ||
3265 	    phba->cmf_active_mode == LPFC_CFG_OFF)
3266 		return;
3267 
3268 	/* Reinitialize congestion buffer info */
3269 	lpfc_init_congestion_buf(phba);
3270 
3271 	atomic_set(&phba->cgn_fabric_warn_cnt, 0);
3272 	atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
3273 	atomic_set(&phba->cgn_sync_alarm_cnt, 0);
3274 	atomic_set(&phba->cgn_sync_warn_cnt, 0);
3275 
3276 	atomic_set(&phba->cmf_busy, 0);
3277 	for_each_present_cpu(cpu) {
3278 		cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3279 		atomic64_set(&cgs->total_bytes, 0);
3280 		atomic64_set(&cgs->rcv_bytes, 0);
3281 		atomic_set(&cgs->rx_io_cnt, 0);
3282 		atomic64_set(&cgs->rx_latency, 0);
3283 	}
3284 	phba->cmf_latency.tv_sec = 0;
3285 	phba->cmf_latency.tv_nsec = 0;
3286 
3287 	lpfc_cmf_signal_init(phba);
3288 
3289 	lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3290 			"6222 Start CMF / Timer\n");
3291 
3292 	phba->cmf_timer_cnt = 0;
3293 	hrtimer_start(&phba->cmf_timer,
3294 		      ktime_set(0, LPFC_CMF_INTERVAL * NSEC_PER_MSEC),
3295 		      HRTIMER_MODE_REL);
3296 	hrtimer_start(&phba->cmf_stats_timer,
3297 		      ktime_set(0, LPFC_SEC_MIN * NSEC_PER_SEC),
3298 		      HRTIMER_MODE_REL);
3299 	/* Setup for latency check in IO cmpl routines */
3300 	ktime_get_real_ts64(&phba->cmf_latency);
3301 
3302 	atomic_set(&phba->cmf_bw_wait, 0);
3303 	atomic_set(&phba->cmf_stop_io, 0);
3304 }
3305 
3306 /**
3307  * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
3308  * @phba: pointer to lpfc hba data structure.
3309  *
3310  * This routine stops all the timers associated with a HBA. This function is
3311  * invoked before either putting a HBA offline or unloading the driver.
3312  **/
3313 void
lpfc_stop_hba_timers(struct lpfc_hba * phba)3314 lpfc_stop_hba_timers(struct lpfc_hba *phba)
3315 {
3316 	if (phba->pport)
3317 		lpfc_stop_vport_timers(phba->pport);
3318 	cancel_delayed_work_sync(&phba->eq_delay_work);
3319 	cancel_delayed_work_sync(&phba->idle_stat_delay_work);
3320 	timer_delete_sync(&phba->sli.mbox_tmo);
3321 	timer_delete_sync(&phba->fabric_block_timer);
3322 	timer_delete_sync(&phba->eratt_poll);
3323 	timer_delete_sync(&phba->hb_tmofunc);
3324 	if (phba->sli_rev == LPFC_SLI_REV4) {
3325 		timer_delete_sync(&phba->rrq_tmr);
3326 		clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
3327 	}
3328 	clear_bit(HBA_HBEAT_INP, &phba->hba_flag);
3329 	clear_bit(HBA_HBEAT_TMO, &phba->hba_flag);
3330 
3331 	switch (phba->pci_dev_grp) {
3332 	case LPFC_PCI_DEV_LP:
3333 		/* Stop any LightPulse device specific driver timers */
3334 		timer_delete_sync(&phba->fcp_poll_timer);
3335 		break;
3336 	case LPFC_PCI_DEV_OC:
3337 		/* Stop any OneConnect device specific driver timers */
3338 		lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3339 		break;
3340 	default:
3341 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3342 				"0297 Invalid device group (x%x)\n",
3343 				phba->pci_dev_grp);
3344 		break;
3345 	}
3346 	return;
3347 }
3348 
3349 /**
3350  * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
3351  * @phba: pointer to lpfc hba data structure.
3352  * @mbx_action: flag for mailbox no wait action.
3353  *
3354  * This routine marks a HBA's management interface as blocked. Once the HBA's
3355  * management interface is marked as blocked, all the user space access to
3356  * the HBA, whether they are from sysfs interface or libdfc interface will
3357  * all be blocked. The HBA is set to block the management interface when the
3358  * driver prepares the HBA interface for online or offline.
3359  **/
3360 static void
lpfc_block_mgmt_io(struct lpfc_hba * phba,int mbx_action)3361 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
3362 {
3363 	unsigned long iflag;
3364 	uint8_t actcmd = MBX_HEARTBEAT;
3365 	unsigned long timeout;
3366 
3367 	spin_lock_irqsave(&phba->hbalock, iflag);
3368 	phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
3369 	spin_unlock_irqrestore(&phba->hbalock, iflag);
3370 	if (mbx_action == LPFC_MBX_NO_WAIT)
3371 		return;
3372 	timeout = secs_to_jiffies(LPFC_MBOX_TMO) + jiffies;
3373 	spin_lock_irqsave(&phba->hbalock, iflag);
3374 	if (phba->sli.mbox_active) {
3375 		actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
3376 		/* Determine how long we might wait for the active mailbox
3377 		 * command to be gracefully completed by firmware.
3378 		 */
3379 		timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba,
3380 				phba->sli.mbox_active)) + jiffies;
3381 	}
3382 	spin_unlock_irqrestore(&phba->hbalock, iflag);
3383 
3384 	/* Wait for the outstnading mailbox command to complete */
3385 	while (phba->sli.mbox_active) {
3386 		/* Check active mailbox complete status every 2ms */
3387 		msleep(2);
3388 		if (time_after(jiffies, timeout)) {
3389 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3390 					"2813 Mgmt IO is Blocked %x "
3391 					"- mbox cmd %x still active\n",
3392 					phba->sli.sli_flag, actcmd);
3393 			break;
3394 		}
3395 	}
3396 }
3397 
3398 /**
3399  * lpfc_sli4_node_rpi_restore - Recover assigned RPIs for active nodes.
3400  * @phba: pointer to lpfc hba data structure.
3401  *
3402  * Allocate RPIs for all active remote nodes. This is needed whenever
3403  * an SLI4 adapter is reset and the driver is not unloading. Its purpose
3404  * is to fixup the temporary rpi assignments.
3405  **/
3406 void
lpfc_sli4_node_rpi_restore(struct lpfc_hba * phba)3407 lpfc_sli4_node_rpi_restore(struct lpfc_hba *phba)
3408 {
3409 	struct lpfc_nodelist  *ndlp, *next_ndlp;
3410 	struct lpfc_vport **vports;
3411 	int i, rpi;
3412 
3413 	if (phba->sli_rev != LPFC_SLI_REV4)
3414 		return;
3415 
3416 	vports = lpfc_create_vport_work_array(phba);
3417 	if (!vports)
3418 		return;
3419 
3420 	for (i = 0; i <= phba->max_vports && vports[i]; i++) {
3421 		if (test_bit(FC_UNLOADING, &vports[i]->load_flag))
3422 			continue;
3423 
3424 		list_for_each_entry_safe(ndlp, next_ndlp,
3425 					 &vports[i]->fc_nodes,
3426 					 nlp_listp) {
3427 			rpi = lpfc_sli4_alloc_rpi(phba);
3428 			if (rpi == LPFC_RPI_ALLOC_ERROR) {
3429 				lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3430 						 LOG_NODE | LOG_DISCOVERY,
3431 						 "0099 RPI alloc error for "
3432 						 "ndlp x%px DID:x%06x "
3433 						 "flg:x%lx\n",
3434 						 ndlp, ndlp->nlp_DID,
3435 						 ndlp->nlp_flag);
3436 				continue;
3437 			}
3438 			ndlp->nlp_rpi = rpi;
3439 			lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3440 					 LOG_NODE | LOG_DISCOVERY,
3441 					 "0009 Assign RPI x%x to ndlp x%px "
3442 					 "DID:x%06x flg:x%lx\n",
3443 					 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
3444 					 ndlp->nlp_flag);
3445 		}
3446 	}
3447 	lpfc_destroy_vport_work_array(phba, vports);
3448 }
3449 
3450 /**
3451  * lpfc_create_expedite_pool - create expedite pool
3452  * @phba: pointer to lpfc hba data structure.
3453  *
3454  * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0
3455  * to expedite pool. Mark them as expedite.
3456  **/
lpfc_create_expedite_pool(struct lpfc_hba * phba)3457 static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3458 {
3459 	struct lpfc_sli4_hdw_queue *qp;
3460 	struct lpfc_io_buf *lpfc_ncmd;
3461 	struct lpfc_io_buf *lpfc_ncmd_next;
3462 	struct lpfc_epd_pool *epd_pool;
3463 	unsigned long iflag;
3464 
3465 	epd_pool = &phba->epd_pool;
3466 	qp = &phba->sli4_hba.hdwq[0];
3467 
3468 	spin_lock_init(&epd_pool->lock);
3469 	spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3470 	spin_lock(&epd_pool->lock);
3471 	INIT_LIST_HEAD(&epd_pool->list);
3472 	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3473 				 &qp->lpfc_io_buf_list_put, list) {
3474 		list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3475 		lpfc_ncmd->expedite = true;
3476 		qp->put_io_bufs--;
3477 		epd_pool->count++;
3478 		if (epd_pool->count >= XRI_BATCH)
3479 			break;
3480 	}
3481 	spin_unlock(&epd_pool->lock);
3482 	spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3483 }
3484 
3485 /**
3486  * lpfc_destroy_expedite_pool - destroy expedite pool
3487  * @phba: pointer to lpfc hba data structure.
3488  *
3489  * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put
3490  * of HWQ 0. Clear the mark.
3491  **/
lpfc_destroy_expedite_pool(struct lpfc_hba * phba)3492 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3493 {
3494 	struct lpfc_sli4_hdw_queue *qp;
3495 	struct lpfc_io_buf *lpfc_ncmd;
3496 	struct lpfc_io_buf *lpfc_ncmd_next;
3497 	struct lpfc_epd_pool *epd_pool;
3498 	unsigned long iflag;
3499 
3500 	epd_pool = &phba->epd_pool;
3501 	qp = &phba->sli4_hba.hdwq[0];
3502 
3503 	spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3504 	spin_lock(&epd_pool->lock);
3505 	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3506 				 &epd_pool->list, list) {
3507 		list_move_tail(&lpfc_ncmd->list,
3508 			       &qp->lpfc_io_buf_list_put);
3509 		lpfc_ncmd->flags = false;
3510 		qp->put_io_bufs++;
3511 		epd_pool->count--;
3512 	}
3513 	spin_unlock(&epd_pool->lock);
3514 	spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3515 }
3516 
3517 /**
3518  * lpfc_create_multixri_pools - create multi-XRI pools
3519  * @phba: pointer to lpfc hba data structure.
3520  *
3521  * This routine initialize public, private per HWQ. Then, move XRIs from
3522  * lpfc_io_buf_list_put to public pool. High and low watermark are also
3523  * Initialized.
3524  **/
lpfc_create_multixri_pools(struct lpfc_hba * phba)3525 void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3526 {
3527 	u32 i, j;
3528 	u32 hwq_count;
3529 	u32 count_per_hwq;
3530 	struct lpfc_io_buf *lpfc_ncmd;
3531 	struct lpfc_io_buf *lpfc_ncmd_next;
3532 	unsigned long iflag;
3533 	struct lpfc_sli4_hdw_queue *qp;
3534 	struct lpfc_multixri_pool *multixri_pool;
3535 	struct lpfc_pbl_pool *pbl_pool;
3536 	struct lpfc_pvt_pool *pvt_pool;
3537 
3538 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3539 			"1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3540 			phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3541 			phba->sli4_hba.io_xri_cnt);
3542 
3543 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3544 		lpfc_create_expedite_pool(phba);
3545 
3546 	hwq_count = phba->cfg_hdw_queue;
3547 	count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3548 
3549 	for (i = 0; i < hwq_count; i++) {
3550 		multixri_pool = kzalloc_obj(*multixri_pool);
3551 
3552 		if (!multixri_pool) {
3553 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3554 					"1238 Failed to allocate memory for "
3555 					"multixri_pool\n");
3556 
3557 			if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3558 				lpfc_destroy_expedite_pool(phba);
3559 
3560 			j = 0;
3561 			while (j < i) {
3562 				qp = &phba->sli4_hba.hdwq[j];
3563 				kfree(qp->p_multixri_pool);
3564 				j++;
3565 			}
3566 			phba->cfg_xri_rebalancing = 0;
3567 			return;
3568 		}
3569 
3570 		qp = &phba->sli4_hba.hdwq[i];
3571 		qp->p_multixri_pool = multixri_pool;
3572 
3573 		multixri_pool->xri_limit = count_per_hwq;
3574 		multixri_pool->rrb_next_hwqid = i;
3575 
3576 		/* Deal with public free xri pool */
3577 		pbl_pool = &multixri_pool->pbl_pool;
3578 		spin_lock_init(&pbl_pool->lock);
3579 		spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3580 		spin_lock(&pbl_pool->lock);
3581 		INIT_LIST_HEAD(&pbl_pool->list);
3582 		list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3583 					 &qp->lpfc_io_buf_list_put, list) {
3584 			list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3585 			qp->put_io_bufs--;
3586 			pbl_pool->count++;
3587 		}
3588 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3589 				"1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3590 				pbl_pool->count, i);
3591 		spin_unlock(&pbl_pool->lock);
3592 		spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3593 
3594 		/* Deal with private free xri pool */
3595 		pvt_pool = &multixri_pool->pvt_pool;
3596 		pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3597 		pvt_pool->low_watermark = XRI_BATCH;
3598 		spin_lock_init(&pvt_pool->lock);
3599 		spin_lock_irqsave(&pvt_pool->lock, iflag);
3600 		INIT_LIST_HEAD(&pvt_pool->list);
3601 		pvt_pool->count = 0;
3602 		spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3603 	}
3604 }
3605 
3606 /**
3607  * lpfc_destroy_multixri_pools - destroy multi-XRI pools
3608  * @phba: pointer to lpfc hba data structure.
3609  *
3610  * This routine returns XRIs from public/private to lpfc_io_buf_list_put.
3611  **/
lpfc_destroy_multixri_pools(struct lpfc_hba * phba)3612 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3613 {
3614 	u32 i;
3615 	u32 hwq_count;
3616 	struct lpfc_io_buf *lpfc_ncmd;
3617 	struct lpfc_io_buf *lpfc_ncmd_next;
3618 	unsigned long iflag;
3619 	struct lpfc_sli4_hdw_queue *qp;
3620 	struct lpfc_multixri_pool *multixri_pool;
3621 	struct lpfc_pbl_pool *pbl_pool;
3622 	struct lpfc_pvt_pool *pvt_pool;
3623 
3624 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3625 		lpfc_destroy_expedite_pool(phba);
3626 
3627 	if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
3628 		lpfc_sli_flush_io_rings(phba);
3629 
3630 	hwq_count = phba->cfg_hdw_queue;
3631 
3632 	for (i = 0; i < hwq_count; i++) {
3633 		qp = &phba->sli4_hba.hdwq[i];
3634 		multixri_pool = qp->p_multixri_pool;
3635 		if (!multixri_pool)
3636 			continue;
3637 
3638 		qp->p_multixri_pool = NULL;
3639 
3640 		spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3641 
3642 		/* Deal with public free xri pool */
3643 		pbl_pool = &multixri_pool->pbl_pool;
3644 		spin_lock(&pbl_pool->lock);
3645 
3646 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3647 				"1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3648 				pbl_pool->count, i);
3649 
3650 		list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3651 					 &pbl_pool->list, list) {
3652 			list_move_tail(&lpfc_ncmd->list,
3653 				       &qp->lpfc_io_buf_list_put);
3654 			qp->put_io_bufs++;
3655 			pbl_pool->count--;
3656 		}
3657 
3658 		INIT_LIST_HEAD(&pbl_pool->list);
3659 		pbl_pool->count = 0;
3660 
3661 		spin_unlock(&pbl_pool->lock);
3662 
3663 		/* Deal with private free xri pool */
3664 		pvt_pool = &multixri_pool->pvt_pool;
3665 		spin_lock(&pvt_pool->lock);
3666 
3667 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3668 				"1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3669 				pvt_pool->count, i);
3670 
3671 		list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3672 					 &pvt_pool->list, list) {
3673 			list_move_tail(&lpfc_ncmd->list,
3674 				       &qp->lpfc_io_buf_list_put);
3675 			qp->put_io_bufs++;
3676 			pvt_pool->count--;
3677 		}
3678 
3679 		INIT_LIST_HEAD(&pvt_pool->list);
3680 		pvt_pool->count = 0;
3681 
3682 		spin_unlock(&pvt_pool->lock);
3683 		spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3684 
3685 		kfree(multixri_pool);
3686 	}
3687 }
3688 
3689 /**
3690  * lpfc_online - Initialize and bring a HBA online
3691  * @phba: pointer to lpfc hba data structure.
3692  *
3693  * This routine initializes the HBA and brings a HBA online. During this
3694  * process, the management interface is blocked to prevent user space access
3695  * to the HBA interfering with the driver initialization.
3696  *
3697  * Return codes
3698  *   0 - successful
3699  *   1 - failed
3700  **/
3701 int
lpfc_online(struct lpfc_hba * phba)3702 lpfc_online(struct lpfc_hba *phba)
3703 {
3704 	struct lpfc_vport *vport;
3705 	struct lpfc_vport **vports;
3706 	int i, error = 0;
3707 	bool vpis_cleared = false;
3708 
3709 	if (!phba)
3710 		return 0;
3711 	vport = phba->pport;
3712 
3713 	if (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag))
3714 		return 0;
3715 
3716 	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3717 			"0458 Bring Adapter online\n");
3718 
3719 	lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3720 
3721 	if (phba->sli_rev == LPFC_SLI_REV4) {
3722 		if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
3723 			lpfc_unblock_mgmt_io(phba);
3724 			return 1;
3725 		}
3726 		spin_lock_irq(&phba->hbalock);
3727 		if (!phba->sli4_hba.max_cfg_param.vpi_used)
3728 			vpis_cleared = true;
3729 		spin_unlock_irq(&phba->hbalock);
3730 
3731 		/* Reestablish the local initiator port.
3732 		 * The offline process destroyed the previous lport.
3733 		 */
3734 		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3735 				!phba->nvmet_support) {
3736 			error = lpfc_nvme_create_localport(phba->pport);
3737 			if (error)
3738 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3739 					"6132 NVME restore reg failed "
3740 					"on nvmei error x%x\n", error);
3741 		}
3742 	} else {
3743 		lpfc_sli_queue_init(phba);
3744 		if (lpfc_sli_hba_setup(phba)) {	/* Initialize SLI2/SLI3 HBA */
3745 			lpfc_unblock_mgmt_io(phba);
3746 			return 1;
3747 		}
3748 	}
3749 
3750 	vports = lpfc_create_vport_work_array(phba);
3751 	if (vports != NULL) {
3752 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3753 			clear_bit(FC_OFFLINE_MODE, &vports[i]->fc_flag);
3754 			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3755 				set_bit(FC_VPORT_NEEDS_REG_VPI,
3756 					&vports[i]->fc_flag);
3757 			if (phba->sli_rev == LPFC_SLI_REV4) {
3758 				set_bit(FC_VPORT_NEEDS_INIT_VPI,
3759 					&vports[i]->fc_flag);
3760 				if ((vpis_cleared) &&
3761 				    (vports[i]->port_type !=
3762 					LPFC_PHYSICAL_PORT))
3763 					vports[i]->vpi = 0;
3764 			}
3765 		}
3766 	}
3767 	lpfc_destroy_vport_work_array(phba, vports);
3768 
3769 	if (phba->cfg_xri_rebalancing)
3770 		lpfc_create_multixri_pools(phba);
3771 
3772 	lpfc_cpuhp_add(phba);
3773 
3774 	lpfc_unblock_mgmt_io(phba);
3775 	return 0;
3776 }
3777 
3778 /**
3779  * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
3780  * @phba: pointer to lpfc hba data structure.
3781  *
3782  * This routine marks a HBA's management interface as not blocked. Once the
3783  * HBA's management interface is marked as not blocked, all the user space
3784  * access to the HBA, whether they are from sysfs interface or libdfc
3785  * interface will be allowed. The HBA is set to block the management interface
3786  * when the driver prepares the HBA interface for online or offline and then
3787  * set to unblock the management interface afterwards.
3788  **/
3789 void
lpfc_unblock_mgmt_io(struct lpfc_hba * phba)3790 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3791 {
3792 	unsigned long iflag;
3793 
3794 	spin_lock_irqsave(&phba->hbalock, iflag);
3795 	phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3796 	spin_unlock_irqrestore(&phba->hbalock, iflag);
3797 }
3798 
3799 /**
3800  * lpfc_offline_prep - Prepare a HBA to be brought offline
3801  * @phba: pointer to lpfc hba data structure.
3802  * @mbx_action: flag for mailbox shutdown action.
3803  *
3804  * This routine is invoked to prepare a HBA to be brought offline. It performs
3805  * unregistration login to all the nodes on all vports and flushes the mailbox
3806  * queue to make it ready to be brought offline.
3807  **/
3808 void
lpfc_offline_prep(struct lpfc_hba * phba,int mbx_action)3809 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3810 {
3811 	struct lpfc_vport *vport = phba->pport;
3812 	struct lpfc_nodelist  *ndlp, *next_ndlp;
3813 	struct lpfc_vport **vports;
3814 	struct Scsi_Host *shost;
3815 	int i;
3816 	int offline;
3817 	bool hba_pci_err;
3818 
3819 	if (test_bit(FC_OFFLINE_MODE, &vport->fc_flag))
3820 		return;
3821 
3822 	lpfc_block_mgmt_io(phba, mbx_action);
3823 
3824 	lpfc_linkdown(phba);
3825 
3826 	offline =  pci_channel_offline(phba->pcidev);
3827 	hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
3828 
3829 	/* Issue an unreg_login to all nodes on all vports */
3830 	vports = lpfc_create_vport_work_array(phba);
3831 	if (vports != NULL) {
3832 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3833 			if (test_bit(FC_UNLOADING, &vports[i]->load_flag))
3834 				continue;
3835 			shost = lpfc_shost_from_vport(vports[i]);
3836 			spin_lock_irq(shost->host_lock);
3837 			vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3838 			spin_unlock_irq(shost->host_lock);
3839 			set_bit(FC_VPORT_NEEDS_REG_VPI, &vports[i]->fc_flag);
3840 			clear_bit(FC_VFI_REGISTERED, &vports[i]->fc_flag);
3841 
3842 			list_for_each_entry_safe(ndlp, next_ndlp,
3843 						 &vports[i]->fc_nodes,
3844 						 nlp_listp) {
3845 
3846 				clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag);
3847 				if (offline || hba_pci_err) {
3848 					clear_bit(NLP_UNREG_INP,
3849 						  &ndlp->nlp_flag);
3850 					clear_bit(NLP_RPI_REGISTERED,
3851 						  &ndlp->nlp_flag);
3852 				}
3853 
3854 				if (ndlp->nlp_type & NLP_FABRIC) {
3855 					lpfc_disc_state_machine(vports[i], ndlp,
3856 						NULL, NLP_EVT_DEVICE_RECOVERY);
3857 
3858 					/* Don't remove the node unless the node
3859 					 * has been unregistered with the
3860 					 * transport, and we're not in recovery
3861 					 * before dev_loss_tmo triggered.
3862 					 * Otherwise, let dev_loss take care of
3863 					 * the node.
3864 					 */
3865 					if (!test_bit(NLP_IN_RECOV_POST_DEV_LOSS,
3866 						      &ndlp->save_flags) &&
3867 					    !(ndlp->fc4_xpt_flags &
3868 					      (NVME_XPT_REGD | SCSI_XPT_REGD)))
3869 						lpfc_disc_state_machine
3870 							(vports[i], ndlp,
3871 							 NULL,
3872 							 NLP_EVT_DEVICE_RM);
3873 				}
3874 			}
3875 		}
3876 	}
3877 	lpfc_destroy_vport_work_array(phba, vports);
3878 
3879 	lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3880 
3881 	if (phba->wq)
3882 		flush_workqueue(phba->wq);
3883 }
3884 
3885 /**
3886  * lpfc_offline - Bring a HBA offline
3887  * @phba: pointer to lpfc hba data structure.
3888  *
3889  * This routine actually brings a HBA offline. It stops all the timers
3890  * associated with the HBA, brings down the SLI layer, and eventually
3891  * marks the HBA as in offline state for the upper layer protocol.
3892  **/
3893 void
lpfc_offline(struct lpfc_hba * phba)3894 lpfc_offline(struct lpfc_hba *phba)
3895 {
3896 	struct Scsi_Host  *shost;
3897 	struct lpfc_vport **vports;
3898 	int i;
3899 
3900 	if (test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
3901 		return;
3902 
3903 	/* stop port and all timers associated with this hba */
3904 	lpfc_stop_port(phba);
3905 
3906 	/* Tear down the local and target port registrations.  The
3907 	 * nvme transports need to cleanup.
3908 	 */
3909 	lpfc_nvmet_destroy_targetport(phba);
3910 	lpfc_nvme_destroy_localport(phba->pport);
3911 
3912 	vports = lpfc_create_vport_work_array(phba);
3913 	if (vports != NULL)
3914 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3915 			lpfc_stop_vport_timers(vports[i]);
3916 	lpfc_destroy_vport_work_array(phba, vports);
3917 	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3918 			"0460 Bring Adapter offline\n");
3919 	/* Bring down the SLI Layer and cleanup.  The HBA is offline
3920 	   now.  */
3921 	lpfc_sli_hba_down(phba);
3922 	spin_lock_irq(&phba->hbalock);
3923 	phba->work_ha = 0;
3924 	spin_unlock_irq(&phba->hbalock);
3925 	vports = lpfc_create_vport_work_array(phba);
3926 	if (vports != NULL)
3927 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3928 			shost = lpfc_shost_from_vport(vports[i]);
3929 			spin_lock_irq(shost->host_lock);
3930 			vports[i]->work_port_events = 0;
3931 			spin_unlock_irq(shost->host_lock);
3932 			set_bit(FC_OFFLINE_MODE, &vports[i]->fc_flag);
3933 		}
3934 	lpfc_destroy_vport_work_array(phba, vports);
3935 	/* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled
3936 	 * in hba_unset
3937 	 */
3938 	if (test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
3939 		__lpfc_cpuhp_remove(phba);
3940 
3941 	if (phba->cfg_xri_rebalancing)
3942 		lpfc_destroy_multixri_pools(phba);
3943 }
3944 
3945 /**
3946  * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
3947  * @phba: pointer to lpfc hba data structure.
3948  *
3949  * This routine is to free all the SCSI buffers and IOCBs from the driver
3950  * list back to kernel. It is called from lpfc_pci_remove_one to free
3951  * the internal resources before the device is removed from the system.
3952  **/
3953 static void
lpfc_scsi_free(struct lpfc_hba * phba)3954 lpfc_scsi_free(struct lpfc_hba *phba)
3955 {
3956 	struct lpfc_io_buf *sb, *sb_next;
3957 
3958 	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3959 		return;
3960 
3961 	spin_lock_irq(&phba->hbalock);
3962 
3963 	/* Release all the lpfc_scsi_bufs maintained by this host. */
3964 
3965 	spin_lock(&phba->scsi_buf_list_put_lock);
3966 	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3967 				 list) {
3968 		list_del(&sb->list);
3969 		dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3970 			      sb->dma_handle);
3971 		kfree(sb);
3972 		phba->total_scsi_bufs--;
3973 	}
3974 	spin_unlock(&phba->scsi_buf_list_put_lock);
3975 
3976 	spin_lock(&phba->scsi_buf_list_get_lock);
3977 	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3978 				 list) {
3979 		list_del(&sb->list);
3980 		dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3981 			      sb->dma_handle);
3982 		kfree(sb);
3983 		phba->total_scsi_bufs--;
3984 	}
3985 	spin_unlock(&phba->scsi_buf_list_get_lock);
3986 	spin_unlock_irq(&phba->hbalock);
3987 }
3988 
3989 /**
3990  * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
3991  * @phba: pointer to lpfc hba data structure.
3992  *
3993  * This routine is to free all the IO buffers and IOCBs from the driver
3994  * list back to kernel. It is called from lpfc_pci_remove_one to free
3995  * the internal resources before the device is removed from the system.
3996  **/
3997 void
lpfc_io_free(struct lpfc_hba * phba)3998 lpfc_io_free(struct lpfc_hba *phba)
3999 {
4000 	struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
4001 	struct lpfc_sli4_hdw_queue *qp;
4002 	int idx;
4003 
4004 	for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4005 		qp = &phba->sli4_hba.hdwq[idx];
4006 		/* Release all the lpfc_nvme_bufs maintained by this host. */
4007 		spin_lock(&qp->io_buf_list_put_lock);
4008 		list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4009 					 &qp->lpfc_io_buf_list_put,
4010 					 list) {
4011 			list_del(&lpfc_ncmd->list);
4012 			qp->put_io_bufs--;
4013 			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4014 				      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4015 			if (phba->cfg_xpsgl && !phba->nvmet_support)
4016 				lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
4017 			lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
4018 			kfree(lpfc_ncmd);
4019 			qp->total_io_bufs--;
4020 		}
4021 		spin_unlock(&qp->io_buf_list_put_lock);
4022 
4023 		spin_lock(&qp->io_buf_list_get_lock);
4024 		list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4025 					 &qp->lpfc_io_buf_list_get,
4026 					 list) {
4027 			list_del(&lpfc_ncmd->list);
4028 			qp->get_io_bufs--;
4029 			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4030 				      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4031 			if (phba->cfg_xpsgl && !phba->nvmet_support)
4032 				lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
4033 			lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
4034 			kfree(lpfc_ncmd);
4035 			qp->total_io_bufs--;
4036 		}
4037 		spin_unlock(&qp->io_buf_list_get_lock);
4038 	}
4039 }
4040 
4041 /**
4042  * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
4043  * @phba: pointer to lpfc hba data structure.
4044  *
4045  * This routine first calculates the sizes of the current els and allocated
4046  * scsi sgl lists, and then goes through all sgls to updates the physical
4047  * XRIs assigned due to port function reset. During port initialization, the
4048  * current els and allocated scsi sgl lists are 0s.
4049  *
4050  * Return codes
4051  *   0 - successful (for now, it always returns 0)
4052  **/
4053 int
lpfc_sli4_els_sgl_update(struct lpfc_hba * phba)4054 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
4055 {
4056 	struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
4057 	uint16_t i, lxri, xri_cnt, els_xri_cnt;
4058 	LIST_HEAD(els_sgl_list);
4059 	int rc;
4060 
4061 	/*
4062 	 * update on pci function's els xri-sgl list
4063 	 */
4064 	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4065 
4066 	if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
4067 		/* els xri-sgl expanded */
4068 		xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
4069 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4070 				"3157 ELS xri-sgl count increased from "
4071 				"%d to %d\n", phba->sli4_hba.els_xri_cnt,
4072 				els_xri_cnt);
4073 		/* allocate the additional els sgls */
4074 		for (i = 0; i < xri_cnt; i++) {
4075 			sglq_entry = kzalloc_obj(struct lpfc_sglq);
4076 			if (sglq_entry == NULL) {
4077 				lpfc_printf_log(phba, KERN_ERR,
4078 						LOG_TRACE_EVENT,
4079 						"2562 Failure to allocate an "
4080 						"ELS sgl entry:%d\n", i);
4081 				rc = -ENOMEM;
4082 				goto out_free_mem;
4083 			}
4084 			sglq_entry->buff_type = GEN_BUFF_TYPE;
4085 			sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
4086 							   &sglq_entry->phys);
4087 			if (sglq_entry->virt == NULL) {
4088 				kfree(sglq_entry);
4089 				lpfc_printf_log(phba, KERN_ERR,
4090 						LOG_TRACE_EVENT,
4091 						"2563 Failure to allocate an "
4092 						"ELS mbuf:%d\n", i);
4093 				rc = -ENOMEM;
4094 				goto out_free_mem;
4095 			}
4096 			sglq_entry->sgl = sglq_entry->virt;
4097 			memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
4098 			sglq_entry->state = SGL_FREED;
4099 			list_add_tail(&sglq_entry->list, &els_sgl_list);
4100 		}
4101 		spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
4102 		list_splice_init(&els_sgl_list,
4103 				 &phba->sli4_hba.lpfc_els_sgl_list);
4104 		spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
4105 	} else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
4106 		/* els xri-sgl shrinked */
4107 		xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
4108 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4109 				"3158 ELS xri-sgl count decreased from "
4110 				"%d to %d\n", phba->sli4_hba.els_xri_cnt,
4111 				els_xri_cnt);
4112 		spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
4113 		list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
4114 				 &els_sgl_list);
4115 		/* release extra els sgls from list */
4116 		for (i = 0; i < xri_cnt; i++) {
4117 			list_remove_head(&els_sgl_list,
4118 					 sglq_entry, struct lpfc_sglq, list);
4119 			if (sglq_entry) {
4120 				__lpfc_mbuf_free(phba, sglq_entry->virt,
4121 						 sglq_entry->phys);
4122 				kfree(sglq_entry);
4123 			}
4124 		}
4125 		list_splice_init(&els_sgl_list,
4126 				 &phba->sli4_hba.lpfc_els_sgl_list);
4127 		spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
4128 	} else
4129 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4130 				"3163 ELS xri-sgl count unchanged: %d\n",
4131 				els_xri_cnt);
4132 	phba->sli4_hba.els_xri_cnt = els_xri_cnt;
4133 
4134 	/* update xris to els sgls on the list */
4135 	sglq_entry = NULL;
4136 	sglq_entry_next = NULL;
4137 	list_for_each_entry_safe(sglq_entry, sglq_entry_next,
4138 				 &phba->sli4_hba.lpfc_els_sgl_list, list) {
4139 		lxri = lpfc_sli4_next_xritag(phba);
4140 		if (lxri == NO_XRI) {
4141 			lpfc_printf_log(phba, KERN_ERR,
4142 					LOG_TRACE_EVENT,
4143 					"2400 Failed to allocate xri for "
4144 					"ELS sgl\n");
4145 			rc = -ENOMEM;
4146 			goto out_free_mem;
4147 		}
4148 		sglq_entry->sli4_lxritag = lxri;
4149 		sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4150 	}
4151 	return 0;
4152 
4153 out_free_mem:
4154 	lpfc_free_els_sgl_list(phba);
4155 	return rc;
4156 }
4157 
4158 /**
4159  * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
4160  * @phba: pointer to lpfc hba data structure.
4161  *
4162  * This routine first calculates the sizes of the current els and allocated
4163  * scsi sgl lists, and then goes through all sgls to updates the physical
4164  * XRIs assigned due to port function reset. During port initialization, the
4165  * current els and allocated scsi sgl lists are 0s.
4166  *
4167  * Return codes
4168  *   0 - successful (for now, it always returns 0)
4169  **/
4170 int
lpfc_sli4_nvmet_sgl_update(struct lpfc_hba * phba)4171 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
4172 {
4173 	struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
4174 	uint16_t i, lxri, xri_cnt, els_xri_cnt;
4175 	uint16_t nvmet_xri_cnt;
4176 	LIST_HEAD(nvmet_sgl_list);
4177 	int rc;
4178 
4179 	/*
4180 	 * update on pci function's nvmet xri-sgl list
4181 	 */
4182 	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4183 
4184 	/* For NVMET, ALL remaining XRIs are dedicated for IO processing */
4185 	nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4186 	if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
4187 		/* els xri-sgl expanded */
4188 		xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
4189 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4190 				"6302 NVMET xri-sgl cnt grew from %d to %d\n",
4191 				phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
4192 		/* allocate the additional nvmet sgls */
4193 		for (i = 0; i < xri_cnt; i++) {
4194 			sglq_entry = kzalloc_obj(struct lpfc_sglq);
4195 			if (sglq_entry == NULL) {
4196 				lpfc_printf_log(phba, KERN_ERR,
4197 						LOG_TRACE_EVENT,
4198 						"6303 Failure to allocate an "
4199 						"NVMET sgl entry:%d\n", i);
4200 				rc = -ENOMEM;
4201 				goto out_free_mem;
4202 			}
4203 			sglq_entry->buff_type = NVMET_BUFF_TYPE;
4204 			sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
4205 							   &sglq_entry->phys);
4206 			if (sglq_entry->virt == NULL) {
4207 				kfree(sglq_entry);
4208 				lpfc_printf_log(phba, KERN_ERR,
4209 						LOG_TRACE_EVENT,
4210 						"6304 Failure to allocate an "
4211 						"NVMET buf:%d\n", i);
4212 				rc = -ENOMEM;
4213 				goto out_free_mem;
4214 			}
4215 			sglq_entry->sgl = sglq_entry->virt;
4216 			memset(sglq_entry->sgl, 0,
4217 			       phba->cfg_sg_dma_buf_size);
4218 			sglq_entry->state = SGL_FREED;
4219 			list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
4220 		}
4221 		spin_lock_irq(&phba->hbalock);
4222 		spin_lock(&phba->sli4_hba.sgl_list_lock);
4223 		list_splice_init(&nvmet_sgl_list,
4224 				 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4225 		spin_unlock(&phba->sli4_hba.sgl_list_lock);
4226 		spin_unlock_irq(&phba->hbalock);
4227 	} else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
4228 		/* nvmet xri-sgl shrunk */
4229 		xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
4230 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4231 				"6305 NVMET xri-sgl count decreased from "
4232 				"%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
4233 				nvmet_xri_cnt);
4234 		spin_lock_irq(&phba->hbalock);
4235 		spin_lock(&phba->sli4_hba.sgl_list_lock);
4236 		list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
4237 				 &nvmet_sgl_list);
4238 		/* release extra nvmet sgls from list */
4239 		for (i = 0; i < xri_cnt; i++) {
4240 			list_remove_head(&nvmet_sgl_list,
4241 					 sglq_entry, struct lpfc_sglq, list);
4242 			if (sglq_entry) {
4243 				lpfc_nvmet_buf_free(phba, sglq_entry->virt,
4244 						    sglq_entry->phys);
4245 				kfree(sglq_entry);
4246 			}
4247 		}
4248 		list_splice_init(&nvmet_sgl_list,
4249 				 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4250 		spin_unlock(&phba->sli4_hba.sgl_list_lock);
4251 		spin_unlock_irq(&phba->hbalock);
4252 	} else
4253 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4254 				"6306 NVMET xri-sgl count unchanged: %d\n",
4255 				nvmet_xri_cnt);
4256 	phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
4257 
4258 	/* update xris to nvmet sgls on the list */
4259 	sglq_entry = NULL;
4260 	sglq_entry_next = NULL;
4261 	list_for_each_entry_safe(sglq_entry, sglq_entry_next,
4262 				 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
4263 		lxri = lpfc_sli4_next_xritag(phba);
4264 		if (lxri == NO_XRI) {
4265 			lpfc_printf_log(phba, KERN_ERR,
4266 					LOG_TRACE_EVENT,
4267 					"6307 Failed to allocate xri for "
4268 					"NVMET sgl\n");
4269 			rc = -ENOMEM;
4270 			goto out_free_mem;
4271 		}
4272 		sglq_entry->sli4_lxritag = lxri;
4273 		sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4274 	}
4275 	return 0;
4276 
4277 out_free_mem:
4278 	lpfc_free_nvmet_sgl_list(phba);
4279 	return rc;
4280 }
4281 
4282 int
lpfc_io_buf_flush(struct lpfc_hba * phba,struct list_head * cbuf)4283 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
4284 {
4285 	LIST_HEAD(blist);
4286 	struct lpfc_sli4_hdw_queue *qp;
4287 	struct lpfc_io_buf *lpfc_cmd;
4288 	struct lpfc_io_buf *iobufp, *prev_iobufp;
4289 	int idx, cnt, xri, inserted;
4290 
4291 	cnt = 0;
4292 	for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4293 		qp = &phba->sli4_hba.hdwq[idx];
4294 		spin_lock_irq(&qp->io_buf_list_get_lock);
4295 		spin_lock(&qp->io_buf_list_put_lock);
4296 
4297 		/* Take everything off the get and put lists */
4298 		list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
4299 		list_splice(&qp->lpfc_io_buf_list_put, &blist);
4300 		INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
4301 		INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
4302 		cnt += qp->get_io_bufs + qp->put_io_bufs;
4303 		qp->get_io_bufs = 0;
4304 		qp->put_io_bufs = 0;
4305 		qp->total_io_bufs = 0;
4306 		spin_unlock(&qp->io_buf_list_put_lock);
4307 		spin_unlock_irq(&qp->io_buf_list_get_lock);
4308 	}
4309 
4310 	/*
4311 	 * Take IO buffers off blist and put on cbuf sorted by XRI.
4312 	 * This is because POST_SGL takes a sequential range of XRIs
4313 	 * to post to the firmware.
4314 	 */
4315 	for (idx = 0; idx < cnt; idx++) {
4316 		list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
4317 		if (!lpfc_cmd)
4318 			return cnt;
4319 		if (idx == 0) {
4320 			list_add_tail(&lpfc_cmd->list, cbuf);
4321 			continue;
4322 		}
4323 		xri = lpfc_cmd->cur_iocbq.sli4_xritag;
4324 		inserted = 0;
4325 		prev_iobufp = NULL;
4326 		list_for_each_entry(iobufp, cbuf, list) {
4327 			if (xri < iobufp->cur_iocbq.sli4_xritag) {
4328 				if (prev_iobufp)
4329 					list_add(&lpfc_cmd->list,
4330 						 &prev_iobufp->list);
4331 				else
4332 					list_add(&lpfc_cmd->list, cbuf);
4333 				inserted = 1;
4334 				break;
4335 			}
4336 			prev_iobufp = iobufp;
4337 		}
4338 		if (!inserted)
4339 			list_add_tail(&lpfc_cmd->list, cbuf);
4340 	}
4341 	return cnt;
4342 }
4343 
4344 int
lpfc_io_buf_replenish(struct lpfc_hba * phba,struct list_head * cbuf)4345 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
4346 {
4347 	struct lpfc_sli4_hdw_queue *qp;
4348 	struct lpfc_io_buf *lpfc_cmd;
4349 	int idx, cnt;
4350 	unsigned long iflags;
4351 
4352 	qp = phba->sli4_hba.hdwq;
4353 	cnt = 0;
4354 	while (!list_empty(cbuf)) {
4355 		for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4356 			list_remove_head(cbuf, lpfc_cmd,
4357 					 struct lpfc_io_buf, list);
4358 			if (!lpfc_cmd)
4359 				return cnt;
4360 			cnt++;
4361 			qp = &phba->sli4_hba.hdwq[idx];
4362 			lpfc_cmd->hdwq_no = idx;
4363 			lpfc_cmd->hdwq = qp;
4364 			lpfc_cmd->cur_iocbq.cmd_cmpl = NULL;
4365 			spin_lock_irqsave(&qp->io_buf_list_put_lock, iflags);
4366 			list_add_tail(&lpfc_cmd->list,
4367 				      &qp->lpfc_io_buf_list_put);
4368 			qp->put_io_bufs++;
4369 			qp->total_io_bufs++;
4370 			spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
4371 					       iflags);
4372 		}
4373 	}
4374 	return cnt;
4375 }
4376 
4377 /**
4378  * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
4379  * @phba: pointer to lpfc hba data structure.
4380  *
4381  * This routine first calculates the sizes of the current els and allocated
4382  * scsi sgl lists, and then goes through all sgls to updates the physical
4383  * XRIs assigned due to port function reset. During port initialization, the
4384  * current els and allocated scsi sgl lists are 0s.
4385  *
4386  * Return codes
4387  *   0 - successful (for now, it always returns 0)
4388  **/
4389 int
lpfc_sli4_io_sgl_update(struct lpfc_hba * phba)4390 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
4391 {
4392 	struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
4393 	uint16_t i, lxri, els_xri_cnt;
4394 	uint16_t io_xri_cnt, io_xri_max;
4395 	LIST_HEAD(io_sgl_list);
4396 	int rc, cnt;
4397 
4398 	/*
4399 	 * update on pci function's allocated nvme xri-sgl list
4400 	 */
4401 
4402 	/* maximum number of xris available for nvme buffers */
4403 	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4404 	io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4405 	phba->sli4_hba.io_xri_max = io_xri_max;
4406 
4407 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4408 			"6074 Current allocated XRI sgl count:%d, "
4409 			"maximum XRI count:%d els_xri_cnt:%d\n\n",
4410 			phba->sli4_hba.io_xri_cnt,
4411 			phba->sli4_hba.io_xri_max,
4412 			els_xri_cnt);
4413 
4414 	cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4415 
4416 	if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4417 		/* max nvme xri shrunk below the allocated nvme buffers */
4418 		io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4419 					phba->sli4_hba.io_xri_max;
4420 		/* release the extra allocated nvme buffers */
4421 		for (i = 0; i < io_xri_cnt; i++) {
4422 			list_remove_head(&io_sgl_list, lpfc_ncmd,
4423 					 struct lpfc_io_buf, list);
4424 			if (lpfc_ncmd) {
4425 				dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4426 					      lpfc_ncmd->data,
4427 					      lpfc_ncmd->dma_handle);
4428 				kfree(lpfc_ncmd);
4429 			}
4430 		}
4431 		phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4432 	}
4433 
4434 	/* update xris associated to remaining allocated nvme buffers */
4435 	lpfc_ncmd = NULL;
4436 	lpfc_ncmd_next = NULL;
4437 	phba->sli4_hba.io_xri_cnt = cnt;
4438 	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4439 				 &io_sgl_list, list) {
4440 		lxri = lpfc_sli4_next_xritag(phba);
4441 		if (lxri == NO_XRI) {
4442 			lpfc_printf_log(phba, KERN_ERR,
4443 					LOG_TRACE_EVENT,
4444 					"6075 Failed to allocate xri for "
4445 					"nvme buffer\n");
4446 			rc = -ENOMEM;
4447 			goto out_free_mem;
4448 		}
4449 		lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4450 		lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4451 	}
4452 	cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
4453 	return 0;
4454 
4455 out_free_mem:
4456 	lpfc_io_free(phba);
4457 	return rc;
4458 }
4459 
4460 /**
4461  * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
4462  * @phba: Pointer to lpfc hba data structure.
4463  * @num_to_alloc: The requested number of buffers to allocate.
4464  *
4465  * This routine allocates nvme buffers for device with SLI-4 interface spec,
4466  * the nvme buffer contains all the necessary information needed to initiate
4467  * an I/O. After allocating up to @num_to_allocate IO buffers and put
4468  * them on a list, it post them to the port by using SGL block post.
4469  *
4470  * Return codes:
4471  *   int - number of IO buffers that were allocated and posted.
4472  *   0 = failure, less than num_to_alloc is a partial failure.
4473  **/
4474 int
lpfc_new_io_buf(struct lpfc_hba * phba,int num_to_alloc)4475 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4476 {
4477 	struct lpfc_io_buf *lpfc_ncmd;
4478 	struct lpfc_iocbq *pwqeq;
4479 	uint16_t iotag, lxri = 0;
4480 	int bcnt, num_posted;
4481 	LIST_HEAD(prep_nblist);
4482 	LIST_HEAD(post_nblist);
4483 	LIST_HEAD(nvme_nblist);
4484 
4485 	phba->sli4_hba.io_xri_cnt = 0;
4486 	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4487 		lpfc_ncmd = kzalloc_obj(*lpfc_ncmd);
4488 		if (!lpfc_ncmd)
4489 			break;
4490 		/*
4491 		 * Get memory from the pci pool to map the virt space to
4492 		 * pci bus space for an I/O. The DMA buffer includes the
4493 		 * number of SGE's necessary to support the sg_tablesize.
4494 		 */
4495 		lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4496 						  GFP_KERNEL,
4497 						  &lpfc_ncmd->dma_handle);
4498 		if (!lpfc_ncmd->data) {
4499 			kfree(lpfc_ncmd);
4500 			break;
4501 		}
4502 
4503 		if (phba->cfg_xpsgl && !phba->nvmet_support) {
4504 			INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4505 		} else {
4506 			/*
4507 			 * 4K Page alignment is CRITICAL to BlockGuard, double
4508 			 * check to be sure.
4509 			 */
4510 			if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4511 			    (((unsigned long)(lpfc_ncmd->data) &
4512 			    (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4513 				lpfc_printf_log(phba, KERN_ERR,
4514 						LOG_TRACE_EVENT,
4515 						"3369 Memory alignment err: "
4516 						"addr=%lx\n",
4517 						(unsigned long)lpfc_ncmd->data);
4518 				dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4519 					      lpfc_ncmd->data,
4520 					      lpfc_ncmd->dma_handle);
4521 				kfree(lpfc_ncmd);
4522 				break;
4523 			}
4524 		}
4525 
4526 		INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4527 
4528 		lxri = lpfc_sli4_next_xritag(phba);
4529 		if (lxri == NO_XRI) {
4530 			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4531 				      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4532 			kfree(lpfc_ncmd);
4533 			break;
4534 		}
4535 		pwqeq = &lpfc_ncmd->cur_iocbq;
4536 
4537 		/* Allocate iotag for lpfc_ncmd->cur_iocbq. */
4538 		iotag = lpfc_sli_next_iotag(phba, pwqeq);
4539 		if (iotag == 0) {
4540 			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4541 				      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4542 			kfree(lpfc_ncmd);
4543 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4544 					"6121 Failed to allocate IOTAG for"
4545 					" XRI:0x%x\n", lxri);
4546 			lpfc_sli4_free_xri(phba, lxri);
4547 			break;
4548 		}
4549 		pwqeq->sli4_lxritag = lxri;
4550 		pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4551 
4552 		/* Initialize local short-hand pointers. */
4553 		lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4554 		lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4555 		lpfc_ncmd->cur_iocbq.io_buf = lpfc_ncmd;
4556 		spin_lock_init(&lpfc_ncmd->buf_lock);
4557 
4558 		/* add the nvme buffer to a post list */
4559 		list_add_tail(&lpfc_ncmd->list, &post_nblist);
4560 		phba->sli4_hba.io_xri_cnt++;
4561 	}
4562 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4563 			"6114 Allocate %d out of %d requested new NVME "
4564 			"buffers of size x%zu bytes\n", bcnt, num_to_alloc,
4565 			sizeof(*lpfc_ncmd));
4566 
4567 
4568 	/* post the list of nvme buffer sgls to port if available */
4569 	if (!list_empty(&post_nblist))
4570 		num_posted = lpfc_sli4_post_io_sgl_list(
4571 				phba, &post_nblist, bcnt);
4572 	else
4573 		num_posted = 0;
4574 
4575 	return num_posted;
4576 }
4577 
4578 static uint64_t
lpfc_get_wwpn(struct lpfc_hba * phba)4579 lpfc_get_wwpn(struct lpfc_hba *phba)
4580 {
4581 	uint64_t wwn;
4582 	int rc;
4583 	LPFC_MBOXQ_t *mboxq;
4584 	MAILBOX_t *mb;
4585 
4586 	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4587 						GFP_KERNEL);
4588 	if (!mboxq)
4589 		return (uint64_t)-1;
4590 
4591 	/* First get WWN of HBA instance */
4592 	lpfc_read_nv(phba, mboxq);
4593 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4594 	if (rc != MBX_SUCCESS) {
4595 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4596 				"6019 Mailbox failed , mbxCmd x%x "
4597 				"READ_NV, mbxStatus x%x\n",
4598 				bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4599 				bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4600 		mempool_free(mboxq, phba->mbox_mem_pool);
4601 		return (uint64_t) -1;
4602 	}
4603 	mb = &mboxq->u.mb;
4604 	memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4605 	/* wwn is WWPN of HBA instance */
4606 	mempool_free(mboxq, phba->mbox_mem_pool);
4607 	if (phba->sli_rev == LPFC_SLI_REV4)
4608 		return be64_to_cpu(wwn);
4609 	else
4610 		return rol64(wwn, 32);
4611 }
4612 
lpfc_get_sg_tablesize(struct lpfc_hba * phba)4613 static unsigned short lpfc_get_sg_tablesize(struct lpfc_hba *phba)
4614 {
4615 	if (phba->sli_rev == LPFC_SLI_REV4)
4616 		if (phba->cfg_xpsgl && !phba->nvmet_support)
4617 			return LPFC_MAX_SG_TABLESIZE;
4618 		else
4619 			return phba->cfg_scsi_seg_cnt;
4620 	else
4621 		return phba->cfg_sg_seg_cnt;
4622 }
4623 
4624 /**
4625  * lpfc_vmid_res_alloc - Allocates resources for VMID
4626  * @phba: pointer to lpfc hba data structure.
4627  * @vport: pointer to vport data structure
4628  *
4629  * This routine allocated the resources needed for the VMID.
4630  *
4631  * Return codes
4632  *	0 on Success
4633  *	Non-0 on Failure
4634  */
4635 static int
lpfc_vmid_res_alloc(struct lpfc_hba * phba,struct lpfc_vport * vport)4636 lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport)
4637 {
4638 	/* VMID feature is supported only on SLI4 */
4639 	if (phba->sli_rev == LPFC_SLI_REV3) {
4640 		phba->cfg_vmid_app_header = 0;
4641 		phba->cfg_vmid_priority_tagging = 0;
4642 	}
4643 
4644 	if (lpfc_is_vmid_enabled(phba)) {
4645 		vport->vmid =
4646 		    kzalloc_objs(struct lpfc_vmid, phba->cfg_max_vmid);
4647 		if (!vport->vmid)
4648 			return -ENOMEM;
4649 
4650 		rwlock_init(&vport->vmid_lock);
4651 
4652 		/* Set the VMID parameters for the vport */
4653 		vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging;
4654 		vport->vmid_inactivity_timeout =
4655 		    phba->cfg_vmid_inactivity_timeout;
4656 		vport->max_vmid = phba->cfg_max_vmid;
4657 		vport->cur_vmid_cnt = 0;
4658 
4659 		vport->vmid_priority_range = bitmap_zalloc
4660 			(LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL);
4661 
4662 		if (!vport->vmid_priority_range) {
4663 			kfree(vport->vmid);
4664 			return -ENOMEM;
4665 		}
4666 
4667 		hash_init(vport->hash_table);
4668 	}
4669 	return 0;
4670 }
4671 
4672 /**
4673  * lpfc_create_port - Create an FC port
4674  * @phba: pointer to lpfc hba data structure.
4675  * @instance: a unique integer ID to this FC port.
4676  * @dev: pointer to the device data structure.
4677  *
4678  * This routine creates a FC port for the upper layer protocol. The FC port
4679  * can be created on top of either a physical port or a virtual port provided
4680  * by the HBA. This routine also allocates a SCSI host data structure (shost)
4681  * and associates the FC port created before adding the shost into the SCSI
4682  * layer.
4683  *
4684  * Return codes
4685  *   @vport - pointer to the virtual N_Port data structure.
4686  *   NULL - port create failed.
4687  **/
4688 struct lpfc_vport *
lpfc_create_port(struct lpfc_hba * phba,int instance,struct device * dev)4689 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4690 {
4691 	struct lpfc_vport *vport;
4692 	struct Scsi_Host  *shost = NULL;
4693 	struct scsi_host_template *template;
4694 	int error = 0;
4695 	int i;
4696 	uint64_t wwn;
4697 	bool use_no_reset_hba = false;
4698 	int rc;
4699 	u8 if_type;
4700 
4701 	if (lpfc_no_hba_reset_cnt) {
4702 		if (phba->sli_rev < LPFC_SLI_REV4 &&
4703 		    dev == &phba->pcidev->dev) {
4704 			/* Reset the port first */
4705 			lpfc_sli_brdrestart(phba);
4706 			rc = lpfc_sli_chipset_init(phba);
4707 			if (rc)
4708 				return NULL;
4709 		}
4710 		wwn = lpfc_get_wwpn(phba);
4711 	}
4712 
4713 	for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4714 		if (wwn == lpfc_no_hba_reset[i]) {
4715 			lpfc_printf_log(phba, KERN_ERR,
4716 					LOG_TRACE_EVENT,
4717 					"6020 Setting use_no_reset port=%llx\n",
4718 					wwn);
4719 			use_no_reset_hba = true;
4720 			break;
4721 		}
4722 	}
4723 
4724 	/* Seed template for SCSI host registration */
4725 	if (dev == &phba->pcidev->dev) {
4726 		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4727 			/* Seed physical port template */
4728 			template = &lpfc_template;
4729 
4730 			if (use_no_reset_hba)
4731 				/* template is for a no reset SCSI Host */
4732 				template->eh_host_reset_handler = NULL;
4733 
4734 			/* Seed updated value of sg_tablesize */
4735 			template->sg_tablesize = lpfc_get_sg_tablesize(phba);
4736 		} else {
4737 			/* NVMET is for physical port only */
4738 			template = &lpfc_template_nvme;
4739 		}
4740 	} else {
4741 		/* Seed vport template */
4742 		template = &lpfc_vport_template;
4743 
4744 		/* Seed updated value of sg_tablesize */
4745 		template->sg_tablesize = lpfc_get_sg_tablesize(phba);
4746 	}
4747 
4748 	shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
4749 	if (!shost)
4750 		goto out;
4751 
4752 	vport = (struct lpfc_vport *) shost->hostdata;
4753 	vport->phba = phba;
4754 	set_bit(FC_LOADING, &vport->load_flag);
4755 	set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
4756 	vport->fc_rscn_flush = 0;
4757 	atomic_set(&vport->fc_plogi_cnt, 0);
4758 	atomic_set(&vport->fc_adisc_cnt, 0);
4759 	atomic_set(&vport->fc_reglogin_cnt, 0);
4760 	atomic_set(&vport->fc_prli_cnt, 0);
4761 	atomic_set(&vport->fc_unmap_cnt, 0);
4762 	atomic_set(&vport->fc_map_cnt, 0);
4763 	atomic_set(&vport->fc_npr_cnt, 0);
4764 	atomic_set(&vport->fc_unused_cnt, 0);
4765 	lpfc_get_vport_cfgparam(vport);
4766 
4767 	/* Adjust value in vport */
4768 	vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4769 
4770 	shost->unique_id = instance;
4771 	shost->max_id = LPFC_MAX_TARGET;
4772 	shost->max_lun = vport->cfg_max_luns;
4773 	shost->this_id = -1;
4774 
4775 	/* Set max_cmd_len applicable to ASIC support */
4776 	if (phba->sli_rev == LPFC_SLI_REV4) {
4777 		if_type = bf_get(lpfc_sli_intf_if_type,
4778 				 &phba->sli4_hba.sli_intf);
4779 		switch (if_type) {
4780 		case LPFC_SLI_INTF_IF_TYPE_2:
4781 			fallthrough;
4782 		case LPFC_SLI_INTF_IF_TYPE_6:
4783 			shost->max_cmd_len = LPFC_FCP_CDB_LEN_32;
4784 			break;
4785 		default:
4786 			shost->max_cmd_len = LPFC_FCP_CDB_LEN;
4787 			break;
4788 		}
4789 	} else {
4790 		shost->max_cmd_len = LPFC_FCP_CDB_LEN;
4791 	}
4792 
4793 	if (phba->sli_rev == LPFC_SLI_REV4) {
4794 		if (!phba->cfg_fcp_mq_threshold ||
4795 		    phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4796 			phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4797 
4798 		shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4799 					    phba->cfg_fcp_mq_threshold);
4800 
4801 		shost->dma_boundary =
4802 			phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4803 	} else
4804 		/* SLI-3 has a limited number of hardware queues (3),
4805 		 * thus there is only one for FCP processing.
4806 		 */
4807 		shost->nr_hw_queues = 1;
4808 
4809 	/*
4810 	 * Set initial can_queue value since 0 is no longer supported and
4811 	 * scsi_add_host will fail. This will be adjusted later based on the
4812 	 * max xri value determined in hba setup.
4813 	 */
4814 	shost->can_queue = phba->cfg_hba_queue_depth - 10;
4815 	if (dev != &phba->pcidev->dev) {
4816 		shost->transportt = lpfc_vport_transport_template;
4817 		vport->port_type = LPFC_NPIV_PORT;
4818 	} else {
4819 		shost->transportt = lpfc_transport_template;
4820 		vport->port_type = LPFC_PHYSICAL_PORT;
4821 	}
4822 
4823 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4824 			"9081 CreatePort TMPLATE type %x TBLsize %d "
4825 			"SEGcnt %d/%d\n",
4826 			vport->port_type, shost->sg_tablesize,
4827 			phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
4828 
4829 	/* Allocate the resources for VMID */
4830 	rc = lpfc_vmid_res_alloc(phba, vport);
4831 
4832 	if (rc)
4833 		goto out_put_shost;
4834 
4835 	/* Initialize all internally managed lists. */
4836 	INIT_LIST_HEAD(&vport->fc_nodes);
4837 	spin_lock_init(&vport->fc_nodes_list_lock);
4838 	INIT_LIST_HEAD(&vport->rcv_buffer_list);
4839 	spin_lock_init(&vport->work_port_lock);
4840 
4841 	timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
4842 
4843 	timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
4844 
4845 	timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
4846 
4847 	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4848 		lpfc_setup_bg(phba, shost);
4849 
4850 	error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4851 	if (error)
4852 		goto out_free_vmid;
4853 
4854 	spin_lock_irq(&phba->port_list_lock);
4855 	list_add_tail(&vport->listentry, &phba->port_list);
4856 	spin_unlock_irq(&phba->port_list_lock);
4857 	return vport;
4858 
4859 out_free_vmid:
4860 	kfree(vport->vmid);
4861 	bitmap_free(vport->vmid_priority_range);
4862 out_put_shost:
4863 	scsi_host_put(shost);
4864 out:
4865 	return NULL;
4866 }
4867 
4868 /**
4869  * destroy_port -  destroy an FC port
4870  * @vport: pointer to an lpfc virtual N_Port data structure.
4871  *
4872  * This routine destroys a FC port from the upper layer protocol. All the
4873  * resources associated with the port are released.
4874  **/
4875 void
destroy_port(struct lpfc_vport * vport)4876 destroy_port(struct lpfc_vport *vport)
4877 {
4878 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4879 	struct lpfc_hba  *phba = vport->phba;
4880 
4881 	lpfc_debugfs_terminate(vport);
4882 	fc_remove_host(shost);
4883 	scsi_remove_host(shost);
4884 
4885 	spin_lock_irq(&phba->port_list_lock);
4886 	list_del_init(&vport->listentry);
4887 	spin_unlock_irq(&phba->port_list_lock);
4888 
4889 	lpfc_cleanup(vport);
4890 	return;
4891 }
4892 
4893 /**
4894  * lpfc_get_instance - Get a unique integer ID
4895  *
4896  * This routine allocates a unique integer ID from lpfc_hba_index pool. It
4897  * uses the kernel idr facility to perform the task.
4898  *
4899  * Return codes:
4900  *   instance - a unique integer ID allocated as the new instance.
4901  *   -1 - lpfc get instance failed.
4902  **/
4903 int
lpfc_get_instance(void)4904 lpfc_get_instance(void)
4905 {
4906 	int ret;
4907 
4908 	ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4909 	return ret < 0 ? -1 : ret;
4910 }
4911 
4912 /**
4913  * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
4914  * @shost: pointer to SCSI host data structure.
4915  * @time: elapsed time of the scan in jiffies.
4916  *
4917  * This routine is called by the SCSI layer with a SCSI host to determine
4918  * whether the scan host is finished.
4919  *
4920  * Note: there is no scan_start function as adapter initialization will have
4921  * asynchronously kicked off the link initialization.
4922  *
4923  * Return codes
4924  *   0 - SCSI host scan is not over yet.
4925  *   1 - SCSI host scan is over.
4926  **/
lpfc_scan_finished(struct Scsi_Host * shost,unsigned long time)4927 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4928 {
4929 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4930 	struct lpfc_hba   *phba = vport->phba;
4931 	int stat = 0;
4932 
4933 	spin_lock_irq(shost->host_lock);
4934 
4935 	if (test_bit(FC_UNLOADING, &vport->load_flag)) {
4936 		stat = 1;
4937 		goto finished;
4938 	}
4939 	if (time >= secs_to_jiffies(30)) {
4940 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4941 				"0461 Scanning longer than 30 "
4942 				"seconds.  Continuing initialization\n");
4943 		stat = 1;
4944 		goto finished;
4945 	}
4946 	if (time >= secs_to_jiffies(15) &&
4947 	    phba->link_state <= LPFC_LINK_DOWN) {
4948 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4949 				"0465 Link down longer than 15 "
4950 				"seconds.  Continuing initialization\n");
4951 		stat = 1;
4952 		goto finished;
4953 	}
4954 
4955 	if (vport->port_state != LPFC_VPORT_READY)
4956 		goto finished;
4957 	if (vport->num_disc_nodes || vport->fc_prli_sent)
4958 		goto finished;
4959 	if (!atomic_read(&vport->fc_map_cnt) &&
4960 	    time < secs_to_jiffies(2))
4961 		goto finished;
4962 	if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4963 		goto finished;
4964 
4965 	stat = 1;
4966 
4967 finished:
4968 	spin_unlock_irq(shost->host_lock);
4969 	return stat;
4970 }
4971 
lpfc_host_supported_speeds_set(struct Scsi_Host * shost)4972 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4973 {
4974 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4975 	struct lpfc_hba   *phba = vport->phba;
4976 
4977 	fc_host_supported_speeds(shost) = 0;
4978 	/*
4979 	 * Avoid reporting supported link speed for FCoE as it can't be
4980 	 * controlled via FCoE.
4981 	 */
4982 	if (test_bit(HBA_FCOE_MODE, &phba->hba_flag))
4983 		return;
4984 
4985 	if (phba->lmt & LMT_256Gb)
4986 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT;
4987 	if (phba->lmt & LMT_128Gb)
4988 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4989 	if (phba->lmt & LMT_64Gb)
4990 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4991 	if (phba->lmt & LMT_32Gb)
4992 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4993 	if (phba->lmt & LMT_16Gb)
4994 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4995 	if (phba->lmt & LMT_10Gb)
4996 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4997 	if (phba->lmt & LMT_8Gb)
4998 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4999 	if (phba->lmt & LMT_4Gb)
5000 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
5001 	if (phba->lmt & LMT_2Gb)
5002 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
5003 	if (phba->lmt & LMT_1Gb)
5004 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
5005 }
5006 
5007 /**
5008  * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
5009  * @shost: pointer to SCSI host data structure.
5010  *
5011  * This routine initializes a given SCSI host attributes on a FC port. The
5012  * SCSI host can be either on top of a physical port or a virtual port.
5013  **/
lpfc_host_attrib_init(struct Scsi_Host * shost)5014 void lpfc_host_attrib_init(struct Scsi_Host *shost)
5015 {
5016 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5017 	struct lpfc_hba   *phba = vport->phba;
5018 	/*
5019 	 * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
5020 	 */
5021 
5022 	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
5023 	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
5024 	fc_host_supported_classes(shost) = FC_COS_CLASS3;
5025 
5026 	memset(fc_host_supported_fc4s(shost), 0,
5027 	       sizeof(fc_host_supported_fc4s(shost)));
5028 	fc_host_supported_fc4s(shost)[2] = 1;
5029 	fc_host_supported_fc4s(shost)[7] = 1;
5030 
5031 	lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
5032 				 sizeof fc_host_symbolic_name(shost));
5033 
5034 	lpfc_host_supported_speeds_set(shost);
5035 
5036 	fc_host_maxframe_size(shost) =
5037 		(((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
5038 		(uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
5039 
5040 	fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
5041 
5042 	/* This value is also unchanging */
5043 	memset(fc_host_active_fc4s(shost), 0,
5044 	       sizeof(fc_host_active_fc4s(shost)));
5045 	fc_host_active_fc4s(shost)[2] = 1;
5046 	fc_host_active_fc4s(shost)[7] = 1;
5047 
5048 	fc_host_max_npiv_vports(shost) = phba->max_vpi;
5049 	clear_bit(FC_LOADING, &vport->load_flag);
5050 }
5051 
5052 /**
5053  * lpfc_stop_port_s3 - Stop SLI3 device port
5054  * @phba: pointer to lpfc hba data structure.
5055  *
5056  * This routine is invoked to stop an SLI3 device port, it stops the device
5057  * from generating interrupts and stops the device driver's timers for the
5058  * device.
5059  **/
5060 static void
lpfc_stop_port_s3(struct lpfc_hba * phba)5061 lpfc_stop_port_s3(struct lpfc_hba *phba)
5062 {
5063 	/* Clear all interrupt enable conditions */
5064 	writel(0, phba->HCregaddr);
5065 	readl(phba->HCregaddr); /* flush */
5066 	/* Clear all pending interrupts */
5067 	writel(0xffffffff, phba->HAregaddr);
5068 	readl(phba->HAregaddr); /* flush */
5069 
5070 	/* Reset some HBA SLI setup states */
5071 	lpfc_stop_hba_timers(phba);
5072 	phba->pport->work_port_events = 0;
5073 }
5074 
5075 /**
5076  * lpfc_stop_port_s4 - Stop SLI4 device port
5077  * @phba: pointer to lpfc hba data structure.
5078  *
5079  * This routine is invoked to stop an SLI4 device port, it stops the device
5080  * from generating interrupts and stops the device driver's timers for the
5081  * device.
5082  **/
5083 static void
lpfc_stop_port_s4(struct lpfc_hba * phba)5084 lpfc_stop_port_s4(struct lpfc_hba *phba)
5085 {
5086 	/* Reset some HBA SLI4 setup states */
5087 	lpfc_stop_hba_timers(phba);
5088 	if (phba->pport)
5089 		phba->pport->work_port_events = 0;
5090 	phba->sli4_hba.intr_enable = 0;
5091 }
5092 
5093 /**
5094  * lpfc_stop_port - Wrapper function for stopping hba port
5095  * @phba: Pointer to HBA context object.
5096  *
5097  * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
5098  * the API jump table function pointer from the lpfc_hba struct.
5099  **/
5100 void
lpfc_stop_port(struct lpfc_hba * phba)5101 lpfc_stop_port(struct lpfc_hba *phba)
5102 {
5103 	phba->lpfc_stop_port(phba);
5104 
5105 	if (phba->wq)
5106 		flush_workqueue(phba->wq);
5107 }
5108 
5109 /**
5110  * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
5111  * @phba: Pointer to hba for which this call is being executed.
5112  *
5113  * This routine starts the timer waiting for the FCF rediscovery to complete.
5114  **/
5115 void
lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba * phba)5116 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
5117 {
5118 	unsigned long fcf_redisc_wait_tmo =
5119 		(jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
5120 	/* Start fcf rediscovery wait period timer */
5121 	mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
5122 	spin_lock_irq(&phba->hbalock);
5123 	/* Allow action to new fcf asynchronous event */
5124 	phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
5125 	/* Mark the FCF rediscovery pending state */
5126 	phba->fcf.fcf_flag |= FCF_REDISC_PEND;
5127 	spin_unlock_irq(&phba->hbalock);
5128 }
5129 
5130 /**
5131  * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
5132  * @t: Timer context used to obtain the pointer to lpfc hba data structure.
5133  *
5134  * This routine is invoked when waiting for FCF table rediscover has been
5135  * timed out. If new FCF record(s) has (have) been discovered during the
5136  * wait period, a new FCF event shall be added to the FCOE async event
5137  * list, and then worker thread shall be waked up for processing from the
5138  * worker thread context.
5139  **/
5140 static void
lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list * t)5141 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
5142 {
5143 	struct lpfc_hba *phba = timer_container_of(phba, t, fcf.redisc_wait);
5144 
5145 	/* Don't send FCF rediscovery event if timer cancelled */
5146 	spin_lock_irq(&phba->hbalock);
5147 	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
5148 		spin_unlock_irq(&phba->hbalock);
5149 		return;
5150 	}
5151 	/* Clear FCF rediscovery timer pending flag */
5152 	phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
5153 	/* FCF rediscovery event to worker thread */
5154 	phba->fcf.fcf_flag |= FCF_REDISC_EVT;
5155 	spin_unlock_irq(&phba->hbalock);
5156 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
5157 			"2776 FCF rediscover quiescent timer expired\n");
5158 	/* wake up worker thread */
5159 	lpfc_worker_wake_up(phba);
5160 }
5161 
5162 /**
5163  * lpfc_vmid_poll - VMID timeout detection
5164  * @t: Timer context used to obtain the pointer to lpfc hba data structure.
5165  *
5166  * This routine is invoked when there is no I/O on by a VM for the specified
5167  * amount of time. When this situation is detected, the VMID has to be
5168  * deregistered from the switch and all the local resources freed. The VMID
5169  * will be reassigned to the VM once the I/O begins.
5170  **/
5171 static void
lpfc_vmid_poll(struct timer_list * t)5172 lpfc_vmid_poll(struct timer_list *t)
5173 {
5174 	struct lpfc_hba *phba = timer_container_of(phba, t,
5175 						   inactive_vmid_poll);
5176 	u32 wake_up = 0;
5177 
5178 	/* check if there is a need to issue QFPA */
5179 	if (phba->pport->vmid_priority_tagging) {
5180 		wake_up = 1;
5181 		phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
5182 	}
5183 
5184 	/* Is the vmid inactivity timer enabled */
5185 	if (phba->pport->vmid_inactivity_timeout ||
5186 	    test_bit(FC_DEREGISTER_ALL_APP_ID, &phba->pport->load_flag)) {
5187 		wake_up = 1;
5188 		phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID;
5189 	}
5190 
5191 	if (wake_up)
5192 		lpfc_worker_wake_up(phba);
5193 
5194 	/* restart the timer for the next iteration */
5195 	mod_timer(&phba->inactive_vmid_poll,
5196 		  jiffies + secs_to_jiffies(LPFC_VMID_TIMER));
5197 }
5198 
5199 /**
5200  * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
5201  * @phba: pointer to lpfc hba data structure.
5202  * @acqe_link: pointer to the async link completion queue entry.
5203  *
5204  * This routine is to parse the SLI4 link-attention link fault code.
5205  **/
5206 static void
lpfc_sli4_parse_latt_fault(struct lpfc_hba * phba,struct lpfc_acqe_link * acqe_link)5207 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
5208 			   struct lpfc_acqe_link *acqe_link)
5209 {
5210 	switch (bf_get(lpfc_acqe_fc_la_att_type, acqe_link)) {
5211 	case LPFC_FC_LA_TYPE_LINK_DOWN:
5212 	case LPFC_FC_LA_TYPE_TRUNKING_EVENT:
5213 	case LPFC_FC_LA_TYPE_ACTIVATE_FAIL:
5214 	case LPFC_FC_LA_TYPE_LINK_RESET_PRTCL_EVT:
5215 		break;
5216 	default:
5217 		switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
5218 		case LPFC_ASYNC_LINK_FAULT_NONE:
5219 		case LPFC_ASYNC_LINK_FAULT_LOCAL:
5220 		case LPFC_ASYNC_LINK_FAULT_REMOTE:
5221 		case LPFC_ASYNC_LINK_FAULT_LR_LRR:
5222 			break;
5223 		default:
5224 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5225 					"0398 Unknown link fault code: x%x\n",
5226 					bf_get(lpfc_acqe_link_fault, acqe_link));
5227 			break;
5228 		}
5229 		break;
5230 	}
5231 }
5232 
5233 /**
5234  * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
5235  * @phba: pointer to lpfc hba data structure.
5236  * @acqe_link: pointer to the async link completion queue entry.
5237  *
5238  * This routine is to parse the SLI4 link attention type and translate it
5239  * into the base driver's link attention type coding.
5240  *
5241  * Return: Link attention type in terms of base driver's coding.
5242  **/
5243 static uint8_t
lpfc_sli4_parse_latt_type(struct lpfc_hba * phba,struct lpfc_acqe_link * acqe_link)5244 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
5245 			  struct lpfc_acqe_link *acqe_link)
5246 {
5247 	uint8_t att_type;
5248 
5249 	switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
5250 	case LPFC_ASYNC_LINK_STATUS_DOWN:
5251 	case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
5252 		att_type = LPFC_ATT_LINK_DOWN;
5253 		break;
5254 	case LPFC_ASYNC_LINK_STATUS_UP:
5255 		/* Ignore physical link up events - wait for logical link up */
5256 		att_type = LPFC_ATT_RESERVED;
5257 		break;
5258 	case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
5259 		att_type = LPFC_ATT_LINK_UP;
5260 		break;
5261 	default:
5262 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5263 				"0399 Invalid link attention type: x%x\n",
5264 				bf_get(lpfc_acqe_link_status, acqe_link));
5265 		att_type = LPFC_ATT_RESERVED;
5266 		break;
5267 	}
5268 	return att_type;
5269 }
5270 
5271 /**
5272  * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
5273  * @phba: pointer to lpfc hba data structure.
5274  *
5275  * This routine is to get an SLI3 FC port's link speed in Mbps.
5276  *
5277  * Return: link speed in terms of Mbps.
5278  **/
5279 uint32_t
lpfc_sli_port_speed_get(struct lpfc_hba * phba)5280 lpfc_sli_port_speed_get(struct lpfc_hba *phba)
5281 {
5282 	uint32_t link_speed;
5283 
5284 	if (!lpfc_is_link_up(phba))
5285 		return 0;
5286 
5287 	if (phba->sli_rev <= LPFC_SLI_REV3) {
5288 		switch (phba->fc_linkspeed) {
5289 		case LPFC_LINK_SPEED_1GHZ:
5290 			link_speed = 1000;
5291 			break;
5292 		case LPFC_LINK_SPEED_2GHZ:
5293 			link_speed = 2000;
5294 			break;
5295 		case LPFC_LINK_SPEED_4GHZ:
5296 			link_speed = 4000;
5297 			break;
5298 		case LPFC_LINK_SPEED_8GHZ:
5299 			link_speed = 8000;
5300 			break;
5301 		case LPFC_LINK_SPEED_10GHZ:
5302 			link_speed = 10000;
5303 			break;
5304 		case LPFC_LINK_SPEED_16GHZ:
5305 			link_speed = 16000;
5306 			break;
5307 		default:
5308 			link_speed = 0;
5309 		}
5310 	} else {
5311 		if (phba->sli4_hba.link_state.logical_speed)
5312 			link_speed =
5313 			      phba->sli4_hba.link_state.logical_speed;
5314 		else
5315 			link_speed = phba->sli4_hba.link_state.speed;
5316 	}
5317 	return link_speed;
5318 }
5319 
5320 /**
5321  * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
5322  * @phba: pointer to lpfc hba data structure.
5323  * @evt_code: asynchronous event code.
5324  * @speed_code: asynchronous event link speed code.
5325  *
5326  * This routine is to parse the giving SLI4 async event link speed code into
5327  * value of Mbps for the link speed.
5328  *
5329  * Return: link speed in terms of Mbps.
5330  **/
5331 static uint32_t
lpfc_sli4_port_speed_parse(struct lpfc_hba * phba,uint32_t evt_code,uint8_t speed_code)5332 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
5333 			   uint8_t speed_code)
5334 {
5335 	uint32_t port_speed;
5336 
5337 	switch (evt_code) {
5338 	case LPFC_TRAILER_CODE_LINK:
5339 		switch (speed_code) {
5340 		case LPFC_ASYNC_LINK_SPEED_ZERO:
5341 			port_speed = 0;
5342 			break;
5343 		case LPFC_ASYNC_LINK_SPEED_10MBPS:
5344 			port_speed = 10;
5345 			break;
5346 		case LPFC_ASYNC_LINK_SPEED_100MBPS:
5347 			port_speed = 100;
5348 			break;
5349 		case LPFC_ASYNC_LINK_SPEED_1GBPS:
5350 			port_speed = 1000;
5351 			break;
5352 		case LPFC_ASYNC_LINK_SPEED_10GBPS:
5353 			port_speed = 10000;
5354 			break;
5355 		case LPFC_ASYNC_LINK_SPEED_20GBPS:
5356 			port_speed = 20000;
5357 			break;
5358 		case LPFC_ASYNC_LINK_SPEED_25GBPS:
5359 			port_speed = 25000;
5360 			break;
5361 		case LPFC_ASYNC_LINK_SPEED_40GBPS:
5362 			port_speed = 40000;
5363 			break;
5364 		case LPFC_ASYNC_LINK_SPEED_100GBPS:
5365 			port_speed = 100000;
5366 			break;
5367 		default:
5368 			port_speed = 0;
5369 		}
5370 		break;
5371 	case LPFC_TRAILER_CODE_FC:
5372 		switch (speed_code) {
5373 		case LPFC_FC_LA_SPEED_UNKNOWN:
5374 			port_speed = 0;
5375 			break;
5376 		case LPFC_FC_LA_SPEED_1G:
5377 			port_speed = 1000;
5378 			break;
5379 		case LPFC_FC_LA_SPEED_2G:
5380 			port_speed = 2000;
5381 			break;
5382 		case LPFC_FC_LA_SPEED_4G:
5383 			port_speed = 4000;
5384 			break;
5385 		case LPFC_FC_LA_SPEED_8G:
5386 			port_speed = 8000;
5387 			break;
5388 		case LPFC_FC_LA_SPEED_10G:
5389 			port_speed = 10000;
5390 			break;
5391 		case LPFC_FC_LA_SPEED_16G:
5392 			port_speed = 16000;
5393 			break;
5394 		case LPFC_FC_LA_SPEED_32G:
5395 			port_speed = 32000;
5396 			break;
5397 		case LPFC_FC_LA_SPEED_64G:
5398 			port_speed = 64000;
5399 			break;
5400 		case LPFC_FC_LA_SPEED_128G:
5401 			port_speed = 128000;
5402 			break;
5403 		case LPFC_FC_LA_SPEED_256G:
5404 			port_speed = 256000;
5405 			break;
5406 		default:
5407 			port_speed = 0;
5408 		}
5409 		break;
5410 	default:
5411 		port_speed = 0;
5412 	}
5413 	return port_speed;
5414 }
5415 
5416 /**
5417  * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
5418  * @phba: pointer to lpfc hba data structure.
5419  * @acqe_link: pointer to the async link completion queue entry.
5420  *
5421  * This routine is to handle the SLI4 asynchronous FCoE link event.
5422  **/
5423 static void
lpfc_sli4_async_link_evt(struct lpfc_hba * phba,struct lpfc_acqe_link * acqe_link)5424 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
5425 			 struct lpfc_acqe_link *acqe_link)
5426 {
5427 	LPFC_MBOXQ_t *pmb;
5428 	MAILBOX_t *mb;
5429 	struct lpfc_mbx_read_top *la;
5430 	uint8_t att_type;
5431 	int rc;
5432 
5433 	att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
5434 	if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
5435 		return;
5436 	phba->fcoe_eventtag = acqe_link->event_tag;
5437 	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5438 	if (!pmb) {
5439 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5440 				"0395 The mboxq allocation failed\n");
5441 		return;
5442 	}
5443 
5444 	rc = lpfc_mbox_rsrc_prep(phba, pmb);
5445 	if (rc) {
5446 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5447 				"0396 mailbox allocation failed\n");
5448 		goto out_free_pmb;
5449 	}
5450 
5451 	/* Cleanup any outstanding ELS commands */
5452 	lpfc_els_flush_all_cmd(phba);
5453 
5454 	/* Block ELS IOCBs until we have done process link event */
5455 	phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5456 
5457 	/* Update link event statistics */
5458 	phba->sli.slistat.link_event++;
5459 
5460 	/* Create lpfc_handle_latt mailbox command from link ACQE */
5461 	lpfc_read_topology(phba, pmb, pmb->ctx_buf);
5462 	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5463 	pmb->vport = phba->pport;
5464 
5465 	/* Keep the link status for extra SLI4 state machine reference */
5466 	phba->sli4_hba.link_state.speed =
5467 			lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
5468 				bf_get(lpfc_acqe_link_speed, acqe_link));
5469 	phba->sli4_hba.link_state.duplex =
5470 				bf_get(lpfc_acqe_link_duplex, acqe_link);
5471 	phba->sli4_hba.link_state.status =
5472 				bf_get(lpfc_acqe_link_status, acqe_link);
5473 	phba->sli4_hba.link_state.type =
5474 				bf_get(lpfc_acqe_link_type, acqe_link);
5475 	phba->sli4_hba.link_state.number =
5476 				bf_get(lpfc_acqe_link_number, acqe_link);
5477 	phba->sli4_hba.link_state.fault =
5478 				bf_get(lpfc_acqe_link_fault, acqe_link);
5479 	phba->sli4_hba.link_state.logical_speed =
5480 			bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
5481 
5482 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5483 			"2900 Async FC/FCoE Link event - Speed:%dGBit "
5484 			"duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
5485 			"Logical speed:%dMbps Fault:%d\n",
5486 			phba->sli4_hba.link_state.speed,
5487 			phba->sli4_hba.link_state.topology,
5488 			phba->sli4_hba.link_state.status,
5489 			phba->sli4_hba.link_state.type,
5490 			phba->sli4_hba.link_state.number,
5491 			phba->sli4_hba.link_state.logical_speed,
5492 			phba->sli4_hba.link_state.fault);
5493 	/*
5494 	 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
5495 	 * topology info. Note: Optional for non FC-AL ports.
5496 	 */
5497 	if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
5498 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5499 		if (rc == MBX_NOT_FINISHED)
5500 			goto out_free_pmb;
5501 		return;
5502 	}
5503 	/*
5504 	 * For FCoE Mode: fill in all the topology information we need and call
5505 	 * the READ_TOPOLOGY completion routine to continue without actually
5506 	 * sending the READ_TOPOLOGY mailbox command to the port.
5507 	 */
5508 	/* Initialize completion status */
5509 	mb = &pmb->u.mb;
5510 	mb->mbxStatus = MBX_SUCCESS;
5511 
5512 	/* Parse port fault information field */
5513 	lpfc_sli4_parse_latt_fault(phba, acqe_link);
5514 
5515 	/* Parse and translate link attention fields */
5516 	la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
5517 	la->eventTag = acqe_link->event_tag;
5518 	bf_set(lpfc_mbx_read_top_att_type, la, att_type);
5519 	bf_set(lpfc_mbx_read_top_link_spd, la,
5520 	       (bf_get(lpfc_acqe_link_speed, acqe_link)));
5521 
5522 	/* Fake the following irrelevant fields */
5523 	bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
5524 	bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
5525 	bf_set(lpfc_mbx_read_top_il, la, 0);
5526 	bf_set(lpfc_mbx_read_top_pb, la, 0);
5527 	bf_set(lpfc_mbx_read_top_fa, la, 0);
5528 	bf_set(lpfc_mbx_read_top_mm, la, 0);
5529 
5530 	/* Invoke the lpfc_handle_latt mailbox command callback function */
5531 	lpfc_mbx_cmpl_read_topology(phba, pmb);
5532 
5533 	return;
5534 
5535 out_free_pmb:
5536 	lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
5537 }
5538 
5539 /**
5540  * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
5541  * topology.
5542  * @phba: pointer to lpfc hba data structure.
5543  * @speed_code: asynchronous event link speed code.
5544  *
5545  * This routine is to parse the giving SLI4 async event link speed code into
5546  * value of Read topology link speed.
5547  *
5548  * Return: link speed in terms of Read topology.
5549  **/
5550 static uint8_t
lpfc_async_link_speed_to_read_top(struct lpfc_hba * phba,uint8_t speed_code)5551 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5552 {
5553 	uint8_t port_speed;
5554 
5555 	switch (speed_code) {
5556 	case LPFC_FC_LA_SPEED_1G:
5557 		port_speed = LPFC_LINK_SPEED_1GHZ;
5558 		break;
5559 	case LPFC_FC_LA_SPEED_2G:
5560 		port_speed = LPFC_LINK_SPEED_2GHZ;
5561 		break;
5562 	case LPFC_FC_LA_SPEED_4G:
5563 		port_speed = LPFC_LINK_SPEED_4GHZ;
5564 		break;
5565 	case LPFC_FC_LA_SPEED_8G:
5566 		port_speed = LPFC_LINK_SPEED_8GHZ;
5567 		break;
5568 	case LPFC_FC_LA_SPEED_16G:
5569 		port_speed = LPFC_LINK_SPEED_16GHZ;
5570 		break;
5571 	case LPFC_FC_LA_SPEED_32G:
5572 		port_speed = LPFC_LINK_SPEED_32GHZ;
5573 		break;
5574 	case LPFC_FC_LA_SPEED_64G:
5575 		port_speed = LPFC_LINK_SPEED_64GHZ;
5576 		break;
5577 	case LPFC_FC_LA_SPEED_128G:
5578 		port_speed = LPFC_LINK_SPEED_128GHZ;
5579 		break;
5580 	case LPFC_FC_LA_SPEED_256G:
5581 		port_speed = LPFC_LINK_SPEED_256GHZ;
5582 		break;
5583 	default:
5584 		port_speed = 0;
5585 		break;
5586 	}
5587 
5588 	return port_speed;
5589 }
5590 
5591 void
lpfc_cgn_dump_rxmonitor(struct lpfc_hba * phba)5592 lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba)
5593 {
5594 	if (!phba->rx_monitor) {
5595 		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5596 				"4411 Rx Monitor Info is empty.\n");
5597 	} else {
5598 		lpfc_rx_monitor_report(phba, phba->rx_monitor, NULL, 0,
5599 				       LPFC_MAX_RXMONITOR_DUMP);
5600 	}
5601 }
5602 
5603 /**
5604  * lpfc_cgn_update_stat - Save data into congestion stats buffer
5605  * @phba: pointer to lpfc hba data structure.
5606  * @dtag: FPIN descriptor received
5607  *
5608  * Increment the FPIN received counter/time when it happens.
5609  */
5610 void
lpfc_cgn_update_stat(struct lpfc_hba * phba,uint32_t dtag)5611 lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
5612 {
5613 	struct lpfc_cgn_info *cp;
5614 	u32 value;
5615 
5616 	/* Make sure we have a congestion info buffer */
5617 	if (!phba->cgn_i)
5618 		return;
5619 	cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5620 
5621 	/* Update congestion statistics */
5622 	switch (dtag) {
5623 	case ELS_DTAG_LNK_INTEGRITY:
5624 		le32_add_cpu(&cp->link_integ_notification, 1);
5625 		lpfc_cgn_update_tstamp(phba, &cp->stat_lnk);
5626 		break;
5627 	case ELS_DTAG_DELIVERY:
5628 		le32_add_cpu(&cp->delivery_notification, 1);
5629 		lpfc_cgn_update_tstamp(phba, &cp->stat_delivery);
5630 		break;
5631 	case ELS_DTAG_PEER_CONGEST:
5632 		le32_add_cpu(&cp->cgn_peer_notification, 1);
5633 		lpfc_cgn_update_tstamp(phba, &cp->stat_peer);
5634 		break;
5635 	case ELS_DTAG_CONGESTION:
5636 		le32_add_cpu(&cp->cgn_notification, 1);
5637 		lpfc_cgn_update_tstamp(phba, &cp->stat_fpin);
5638 	}
5639 	if (phba->cgn_fpin_frequency &&
5640 	    phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5641 		value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5642 		cp->cgn_stat_npm = value;
5643 	}
5644 
5645 	value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ);
5646 	cp->cgn_info_crc = cpu_to_le32(value);
5647 }
5648 
5649 /**
5650  * lpfc_cgn_update_tstamp - Update cmf timestamp
5651  * @phba: pointer to lpfc hba data structure.
5652  * @ts: structure to write the timestamp to.
5653  */
5654 void
lpfc_cgn_update_tstamp(struct lpfc_hba * phba,struct lpfc_cgn_ts * ts)5655 lpfc_cgn_update_tstamp(struct lpfc_hba *phba, struct lpfc_cgn_ts *ts)
5656 {
5657 	struct timespec64 cur_time;
5658 	struct tm tm_val;
5659 
5660 	ktime_get_real_ts64(&cur_time);
5661 	time64_to_tm(cur_time.tv_sec, 0, &tm_val);
5662 
5663 	ts->month = tm_val.tm_mon + 1;
5664 	ts->day	= tm_val.tm_mday;
5665 	ts->year = tm_val.tm_year - 100;
5666 	ts->hour = tm_val.tm_hour;
5667 	ts->minute = tm_val.tm_min;
5668 	ts->second = tm_val.tm_sec;
5669 
5670 	lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5671 			"2646 Updated CMF timestamp : "
5672 			"%u/%u/%u %u:%u:%u\n",
5673 			ts->day, ts->month,
5674 			ts->year, ts->hour,
5675 			ts->minute, ts->second);
5676 }
5677 
5678 /**
5679  * lpfc_cmf_stats_timer - Save data into registered congestion buffer
5680  * @timer: Timer cookie to access lpfc private data
5681  *
5682  * Save the congestion event data every minute.
5683  * On the hour collapse all the minute data into hour data. Every day
5684  * collapse all the hour data into daily data. Separate driver
5685  * and fabrc congestion event counters that will be saved out
5686  * to the registered congestion buffer every minute.
5687  */
5688 static enum hrtimer_restart
lpfc_cmf_stats_timer(struct hrtimer * timer)5689 lpfc_cmf_stats_timer(struct hrtimer *timer)
5690 {
5691 	struct lpfc_hba *phba;
5692 	struct lpfc_cgn_info *cp;
5693 	uint32_t i, index;
5694 	uint16_t value, mvalue;
5695 	uint64_t bps;
5696 	uint32_t mbps;
5697 	uint32_t dvalue, wvalue, lvalue, avalue;
5698 	uint64_t latsum;
5699 	__le16 *ptr;
5700 	__le32 *lptr;
5701 	__le16 *mptr;
5702 
5703 	phba = container_of(timer, struct lpfc_hba, cmf_stats_timer);
5704 	/* Make sure we have a congestion info buffer */
5705 	if (!phba->cgn_i)
5706 		return HRTIMER_NORESTART;
5707 	cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5708 
5709 	phba->cgn_evt_timestamp = jiffies +
5710 			msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
5711 	phba->cgn_evt_minute++;
5712 
5713 	/* We should get to this point in the routine on 1 minute intervals */
5714 	lpfc_cgn_update_tstamp(phba, &cp->base_time);
5715 
5716 	if (phba->cgn_fpin_frequency &&
5717 	    phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5718 		value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5719 		cp->cgn_stat_npm = value;
5720 	}
5721 
5722 	/* Read and clear the latency counters for this minute */
5723 	lvalue = atomic_read(&phba->cgn_latency_evt_cnt);
5724 	latsum = atomic64_read(&phba->cgn_latency_evt);
5725 	atomic_set(&phba->cgn_latency_evt_cnt, 0);
5726 	atomic64_set(&phba->cgn_latency_evt, 0);
5727 
5728 	/* We need to store MB/sec bandwidth in the congestion information.
5729 	 * block_cnt is count of 512 byte blocks for the entire minute,
5730 	 * bps will get bytes per sec before finally converting to MB/sec.
5731 	 */
5732 	bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512;
5733 	phba->rx_block_cnt = 0;
5734 	mvalue = bps / (1024 * 1024); /* convert to MB/sec */
5735 
5736 	/* Every minute */
5737 	/* cgn parameters */
5738 	cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
5739 	cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
5740 	cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
5741 	cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
5742 
5743 	/* Fill in default LUN qdepth */
5744 	value = (uint16_t)(phba->pport->cfg_lun_queue_depth);
5745 	cp->cgn_lunq = cpu_to_le16(value);
5746 
5747 	/* Record congestion buffer info - every minute
5748 	 * cgn_driver_evt_cnt (Driver events)
5749 	 * cgn_fabric_warn_cnt (Congestion Warnings)
5750 	 * cgn_latency_evt_cnt / cgn_latency_evt (IO Latency)
5751 	 * cgn_fabric_alarm_cnt (Congestion Alarms)
5752 	 */
5753 	index = ++cp->cgn_index_minute;
5754 	if (cp->cgn_index_minute == LPFC_MIN_HOUR) {
5755 		cp->cgn_index_minute = 0;
5756 		index = 0;
5757 	}
5758 
5759 	/* Get the number of driver events in this sample and reset counter */
5760 	dvalue = atomic_read(&phba->cgn_driver_evt_cnt);
5761 	atomic_set(&phba->cgn_driver_evt_cnt, 0);
5762 
5763 	/* Get the number of warning events - FPIN and Signal for this minute */
5764 	wvalue = 0;
5765 	if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) ||
5766 	    phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
5767 	    phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5768 		wvalue = atomic_read(&phba->cgn_fabric_warn_cnt);
5769 	atomic_set(&phba->cgn_fabric_warn_cnt, 0);
5770 
5771 	/* Get the number of alarm events - FPIN and Signal for this minute */
5772 	avalue = 0;
5773 	if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) ||
5774 	    phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5775 		avalue = atomic_read(&phba->cgn_fabric_alarm_cnt);
5776 	atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
5777 
5778 	/* Collect the driver, warning, alarm and latency counts for this
5779 	 * minute into the driver congestion buffer.
5780 	 */
5781 	ptr = &cp->cgn_drvr_min[index];
5782 	value = (uint16_t)dvalue;
5783 	*ptr = cpu_to_le16(value);
5784 
5785 	ptr = &cp->cgn_warn_min[index];
5786 	value = (uint16_t)wvalue;
5787 	*ptr = cpu_to_le16(value);
5788 
5789 	ptr = &cp->cgn_alarm_min[index];
5790 	value = (uint16_t)avalue;
5791 	*ptr = cpu_to_le16(value);
5792 
5793 	lptr = &cp->cgn_latency_min[index];
5794 	if (lvalue) {
5795 		lvalue = (uint32_t)div_u64(latsum, lvalue);
5796 		*lptr = cpu_to_le32(lvalue);
5797 	} else {
5798 		*lptr = 0;
5799 	}
5800 
5801 	/* Collect the bandwidth value into the driver's congesion buffer. */
5802 	mptr = &cp->cgn_bw_min[index];
5803 	*mptr = cpu_to_le16(mvalue);
5804 
5805 	lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5806 			"2418 Congestion Info - minute (%d): %d %d %d %d %d\n",
5807 			index, dvalue, wvalue, *lptr, mvalue, avalue);
5808 
5809 	/* Every hour */
5810 	if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) {
5811 		/* Record congestion buffer info - every hour
5812 		 * Collapse all minutes into an hour
5813 		 */
5814 		index = ++cp->cgn_index_hour;
5815 		if (cp->cgn_index_hour == LPFC_HOUR_DAY) {
5816 			cp->cgn_index_hour = 0;
5817 			index = 0;
5818 		}
5819 
5820 		dvalue = 0;
5821 		wvalue = 0;
5822 		lvalue = 0;
5823 		avalue = 0;
5824 		mvalue = 0;
5825 		mbps = 0;
5826 		for (i = 0; i < LPFC_MIN_HOUR; i++) {
5827 			dvalue += le16_to_cpu(cp->cgn_drvr_min[i]);
5828 			wvalue += le16_to_cpu(cp->cgn_warn_min[i]);
5829 			lvalue += le32_to_cpu(cp->cgn_latency_min[i]);
5830 			mbps += le16_to_cpu(cp->cgn_bw_min[i]);
5831 			avalue += le16_to_cpu(cp->cgn_alarm_min[i]);
5832 		}
5833 		if (lvalue)		/* Avg of latency averages */
5834 			lvalue /= LPFC_MIN_HOUR;
5835 		if (mbps)		/* Avg of Bandwidth averages */
5836 			mvalue = mbps / LPFC_MIN_HOUR;
5837 
5838 		lptr = &cp->cgn_drvr_hr[index];
5839 		*lptr = cpu_to_le32(dvalue);
5840 		lptr = &cp->cgn_warn_hr[index];
5841 		*lptr = cpu_to_le32(wvalue);
5842 		lptr = &cp->cgn_latency_hr[index];
5843 		*lptr = cpu_to_le32(lvalue);
5844 		mptr = &cp->cgn_bw_hr[index];
5845 		*mptr = cpu_to_le16(mvalue);
5846 		lptr = &cp->cgn_alarm_hr[index];
5847 		*lptr = cpu_to_le32(avalue);
5848 
5849 		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5850 				"2419 Congestion Info - hour "
5851 				"(%d): %d %d %d %d %d\n",
5852 				index, dvalue, wvalue, lvalue, mvalue, avalue);
5853 	}
5854 
5855 	/* Every day */
5856 	if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) {
5857 		/* Record congestion buffer info - every hour
5858 		 * Collapse all hours into a day. Rotate days
5859 		 * after LPFC_MAX_CGN_DAYS.
5860 		 */
5861 		index = ++cp->cgn_index_day;
5862 		if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) {
5863 			cp->cgn_index_day = 0;
5864 			index = 0;
5865 		}
5866 
5867 		dvalue = 0;
5868 		wvalue = 0;
5869 		lvalue = 0;
5870 		mvalue = 0;
5871 		mbps = 0;
5872 		avalue = 0;
5873 		for (i = 0; i < LPFC_HOUR_DAY; i++) {
5874 			dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]);
5875 			wvalue += le32_to_cpu(cp->cgn_warn_hr[i]);
5876 			lvalue += le32_to_cpu(cp->cgn_latency_hr[i]);
5877 			mbps += le16_to_cpu(cp->cgn_bw_hr[i]);
5878 			avalue += le32_to_cpu(cp->cgn_alarm_hr[i]);
5879 		}
5880 		if (lvalue)		/* Avg of latency averages */
5881 			lvalue /= LPFC_HOUR_DAY;
5882 		if (mbps)		/* Avg of Bandwidth averages */
5883 			mvalue = mbps / LPFC_HOUR_DAY;
5884 
5885 		lptr = &cp->cgn_drvr_day[index];
5886 		*lptr = cpu_to_le32(dvalue);
5887 		lptr = &cp->cgn_warn_day[index];
5888 		*lptr = cpu_to_le32(wvalue);
5889 		lptr = &cp->cgn_latency_day[index];
5890 		*lptr = cpu_to_le32(lvalue);
5891 		mptr = &cp->cgn_bw_day[index];
5892 		*mptr = cpu_to_le16(mvalue);
5893 		lptr = &cp->cgn_alarm_day[index];
5894 		*lptr = cpu_to_le32(avalue);
5895 
5896 		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5897 				"2420 Congestion Info - daily (%d): "
5898 				"%d %d %d %d %d\n",
5899 				index, dvalue, wvalue, lvalue, mvalue, avalue);
5900 	}
5901 
5902 	/* Use the frequency found in the last rcv'ed FPIN */
5903 	value = phba->cgn_fpin_frequency;
5904 	cp->cgn_warn_freq = cpu_to_le16(value);
5905 	cp->cgn_alarm_freq = cpu_to_le16(value);
5906 
5907 	lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ);
5908 	cp->cgn_info_crc = cpu_to_le32(lvalue);
5909 
5910 	hrtimer_forward_now(timer, ktime_set(0, LPFC_SEC_MIN * NSEC_PER_SEC));
5911 
5912 	return HRTIMER_RESTART;
5913 }
5914 
5915 /**
5916  * lpfc_calc_cmf_latency - latency from start of rxate timer interval
5917  * @phba: The Hba for which this call is being executed.
5918  *
5919  * The routine calculates the latency from the beginning of the CMF timer
5920  * interval to the current point in time. It is called from IO completion
5921  * when we exceed our Bandwidth limitation for the time interval.
5922  */
5923 uint32_t
lpfc_calc_cmf_latency(struct lpfc_hba * phba)5924 lpfc_calc_cmf_latency(struct lpfc_hba *phba)
5925 {
5926 	struct timespec64 cmpl_time;
5927 	uint32_t msec = 0;
5928 
5929 	ktime_get_real_ts64(&cmpl_time);
5930 
5931 	/* This routine works on a ms granularity so sec and usec are
5932 	 * converted accordingly.
5933 	 */
5934 	if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) {
5935 		msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) /
5936 			NSEC_PER_MSEC;
5937 	} else {
5938 		if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) {
5939 			msec = (cmpl_time.tv_sec -
5940 				phba->cmf_latency.tv_sec) * MSEC_PER_SEC;
5941 			msec += ((cmpl_time.tv_nsec -
5942 				  phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC);
5943 		} else {
5944 			msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec -
5945 				1) * MSEC_PER_SEC;
5946 			msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) +
5947 				 cmpl_time.tv_nsec) / NSEC_PER_MSEC);
5948 		}
5949 	}
5950 	return msec;
5951 }
5952 
5953 /**
5954  * lpfc_cmf_timer -  This is the timer function for one congestion
5955  * rate interval.
5956  * @timer: Pointer to the high resolution timer that expired
5957  */
5958 static enum hrtimer_restart
lpfc_cmf_timer(struct hrtimer * timer)5959 lpfc_cmf_timer(struct hrtimer *timer)
5960 {
5961 	struct lpfc_hba *phba = container_of(timer, struct lpfc_hba,
5962 					     cmf_timer);
5963 	struct rx_info_entry entry;
5964 	uint32_t io_cnt;
5965 	uint32_t busy, max_read;
5966 	uint64_t total, rcv, lat, mbpi, extra, cnt;
5967 	int timer_interval = LPFC_CMF_INTERVAL;
5968 	uint32_t ms;
5969 	struct lpfc_cgn_stat *cgs;
5970 	int cpu;
5971 
5972 	/* Only restart the timer if congestion mgmt is on */
5973 	if (phba->cmf_active_mode == LPFC_CFG_OFF ||
5974 	    !phba->cmf_latency.tv_sec) {
5975 		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5976 				"6224 CMF timer exit: %d %lld\n",
5977 				phba->cmf_active_mode,
5978 				(uint64_t)phba->cmf_latency.tv_sec);
5979 		return HRTIMER_NORESTART;
5980 	}
5981 
5982 	/* If pport is not ready yet, just exit and wait for
5983 	 * the next timer cycle to hit.
5984 	 */
5985 	if (!phba->pport)
5986 		goto skip;
5987 
5988 	/* Do not block SCSI IO while in the timer routine since
5989 	 * total_bytes will be cleared
5990 	 */
5991 	atomic_set(&phba->cmf_stop_io, 1);
5992 
5993 	/* First we need to calculate the actual ms between
5994 	 * the last timer interrupt and this one. We ask for
5995 	 * LPFC_CMF_INTERVAL, however the actual time may
5996 	 * vary depending on system overhead.
5997 	 */
5998 	ms = lpfc_calc_cmf_latency(phba);
5999 
6000 
6001 	/* Immediately after we calculate the time since the last
6002 	 * timer interrupt, set the start time for the next
6003 	 * interrupt
6004 	 */
6005 	ktime_get_real_ts64(&phba->cmf_latency);
6006 
6007 	phba->cmf_link_byte_count =
6008 		div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000);
6009 
6010 	/* Collect all the stats from the prior timer interval */
6011 	total = 0;
6012 	io_cnt = 0;
6013 	lat = 0;
6014 	rcv = 0;
6015 	for_each_present_cpu(cpu) {
6016 		cgs = per_cpu_ptr(phba->cmf_stat, cpu);
6017 		total += atomic64_xchg(&cgs->total_bytes, 0);
6018 		io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0);
6019 		lat += atomic64_xchg(&cgs->rx_latency, 0);
6020 		rcv += atomic64_xchg(&cgs->rcv_bytes, 0);
6021 	}
6022 
6023 	/* Before we issue another CMF_SYNC_WQE, retrieve the BW
6024 	 * returned from the last CMF_SYNC_WQE issued, from
6025 	 * cmf_last_sync_bw. This will be the target BW for
6026 	 * this next timer interval.
6027 	 */
6028 	if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
6029 	    phba->link_state != LPFC_LINK_DOWN &&
6030 	    test_bit(HBA_SETUP, &phba->hba_flag)) {
6031 		mbpi = phba->cmf_last_sync_bw;
6032 		phba->cmf_last_sync_bw = 0;
6033 		extra = 0;
6034 
6035 		/* Calculate any extra bytes needed to account for the
6036 		 * timer accuracy. If we are less than LPFC_CMF_INTERVAL
6037 		 * calculate the adjustment needed for total to reflect
6038 		 * a full LPFC_CMF_INTERVAL.
6039 		 */
6040 		if (ms && ms < LPFC_CMF_INTERVAL) {
6041 			cnt = div_u64(total, ms); /* bytes per ms */
6042 			cnt *= LPFC_CMF_INTERVAL; /* what total should be */
6043 			extra = cnt - total;
6044 		}
6045 		lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra);
6046 	} else {
6047 		/* For Monitor mode or link down we want mbpi
6048 		 * to be the full link speed
6049 		 */
6050 		mbpi = phba->cmf_link_byte_count;
6051 		extra = 0;
6052 	}
6053 	phba->cmf_timer_cnt++;
6054 
6055 	if (io_cnt) {
6056 		/* Update congestion info buffer latency in us */
6057 		atomic_add(io_cnt, &phba->cgn_latency_evt_cnt);
6058 		atomic64_add(lat, &phba->cgn_latency_evt);
6059 	}
6060 	busy = atomic_xchg(&phba->cmf_busy, 0);
6061 	max_read = atomic_xchg(&phba->rx_max_read_cnt, 0);
6062 
6063 	/* Calculate MBPI for the next timer interval */
6064 	if (mbpi) {
6065 		if (mbpi > phba->cmf_link_byte_count ||
6066 		    phba->cmf_active_mode == LPFC_CFG_MONITOR)
6067 			mbpi = phba->cmf_link_byte_count;
6068 
6069 		/* Change max_bytes_per_interval to what the prior
6070 		 * CMF_SYNC_WQE cmpl indicated.
6071 		 */
6072 		if (mbpi != phba->cmf_max_bytes_per_interval)
6073 			phba->cmf_max_bytes_per_interval = mbpi;
6074 	}
6075 
6076 	/* Save rxmonitor information for debug */
6077 	if (phba->rx_monitor) {
6078 		entry.total_bytes = total;
6079 		entry.cmf_bytes = total + extra;
6080 		entry.rcv_bytes = rcv;
6081 		entry.cmf_busy = busy;
6082 		entry.cmf_info = phba->cmf_active_info;
6083 		if (io_cnt) {
6084 			entry.avg_io_latency = div_u64(lat, io_cnt);
6085 			entry.avg_io_size = div_u64(rcv, io_cnt);
6086 		} else {
6087 			entry.avg_io_latency = 0;
6088 			entry.avg_io_size = 0;
6089 		}
6090 		entry.max_read_cnt = max_read;
6091 		entry.io_cnt = io_cnt;
6092 		entry.max_bytes_per_interval = mbpi;
6093 		if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
6094 			entry.timer_utilization = phba->cmf_last_ts;
6095 		else
6096 			entry.timer_utilization = ms;
6097 		entry.timer_interval = ms;
6098 		phba->cmf_last_ts = 0;
6099 
6100 		lpfc_rx_monitor_record(phba->rx_monitor, &entry);
6101 	}
6102 
6103 	if (phba->cmf_active_mode == LPFC_CFG_MONITOR) {
6104 		/* If Monitor mode, check if we are oversubscribed
6105 		 * against the full line rate.
6106 		 */
6107 		if (mbpi && total > mbpi)
6108 			atomic_inc(&phba->cgn_driver_evt_cnt);
6109 	}
6110 	phba->rx_block_cnt += div_u64(rcv, 512);  /* save 512 byte block cnt */
6111 
6112 	/* Since total_bytes has already been zero'ed, its okay to unblock
6113 	 * after max_bytes_per_interval is setup.
6114 	 */
6115 	if (atomic_xchg(&phba->cmf_bw_wait, 0))
6116 		queue_work(phba->wq, &phba->unblock_request_work);
6117 
6118 	/* SCSI IO is now unblocked */
6119 	atomic_set(&phba->cmf_stop_io, 0);
6120 
6121 skip:
6122 	hrtimer_forward_now(timer,
6123 			    ktime_set(0, timer_interval * NSEC_PER_MSEC));
6124 	return HRTIMER_RESTART;
6125 }
6126 
6127 #define trunk_link_status(__idx)\
6128 	bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6129 	       ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
6130 		"Link up" : "Link down") : "NA"
6131 /* Did port __idx reported an error */
6132 #define trunk_port_fault(__idx)\
6133 	bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6134 	       (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
6135 
6136 static void
lpfc_update_trunk_link_status(struct lpfc_hba * phba,struct lpfc_acqe_fc_la * acqe_fc)6137 lpfc_update_trunk_link_status(struct lpfc_hba *phba,
6138 			      struct lpfc_acqe_fc_la *acqe_fc)
6139 {
6140 	uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
6141 	uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
6142 	u8 cnt = 0;
6143 
6144 	phba->sli4_hba.link_state.speed =
6145 		lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6146 				bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6147 
6148 	phba->sli4_hba.link_state.logical_speed =
6149 				bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6150 	/* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */
6151 	phba->fc_linkspeed =
6152 		 lpfc_async_link_speed_to_read_top(
6153 				phba,
6154 				bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6155 
6156 	if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
6157 		phba->trunk_link.link0.state =
6158 			bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
6159 			? LPFC_LINK_UP : LPFC_LINK_DOWN;
6160 		phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
6161 		cnt++;
6162 	}
6163 	if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
6164 		phba->trunk_link.link1.state =
6165 			bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
6166 			? LPFC_LINK_UP : LPFC_LINK_DOWN;
6167 		phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
6168 		cnt++;
6169 	}
6170 	if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
6171 		phba->trunk_link.link2.state =
6172 			bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
6173 			? LPFC_LINK_UP : LPFC_LINK_DOWN;
6174 		phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
6175 		cnt++;
6176 	}
6177 	if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
6178 		phba->trunk_link.link3.state =
6179 			bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
6180 			? LPFC_LINK_UP : LPFC_LINK_DOWN;
6181 		phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
6182 		cnt++;
6183 	}
6184 
6185 	if (cnt)
6186 		phba->trunk_link.phy_lnk_speed =
6187 			phba->sli4_hba.link_state.logical_speed / (cnt * 1000);
6188 	else
6189 		phba->trunk_link.phy_lnk_speed = LPFC_LINK_SPEED_UNKNOWN;
6190 
6191 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6192 			"2910 Async FC Trunking Event - Speed:%d\n"
6193 			"\tLogical speed:%d "
6194 			"port0: %s port1: %s port2: %s port3: %s\n",
6195 			phba->sli4_hba.link_state.speed,
6196 			phba->sli4_hba.link_state.logical_speed,
6197 			trunk_link_status(0), trunk_link_status(1),
6198 			trunk_link_status(2), trunk_link_status(3));
6199 
6200 	if (phba->cmf_active_mode != LPFC_CFG_OFF)
6201 		lpfc_cmf_signal_init(phba);
6202 
6203 	if (port_fault)
6204 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6205 				"3202 trunk error:0x%x (%s) seen on port0:%s "
6206 				/*
6207 				 * SLI-4: We have only 0xA error codes
6208 				 * defined as of now. print an appropriate
6209 				 * message in case driver needs to be updated.
6210 				 */
6211 				"port1:%s port2:%s port3:%s\n", err, err > 0xA ?
6212 				"UNDEFINED. update driver." : trunk_errmsg[err],
6213 				trunk_port_fault(0), trunk_port_fault(1),
6214 				trunk_port_fault(2), trunk_port_fault(3));
6215 }
6216 
6217 
6218 /**
6219  * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
6220  * @phba: pointer to lpfc hba data structure.
6221  * @acqe_fc: pointer to the async fc completion queue entry.
6222  *
6223  * This routine is to handle the SLI4 asynchronous FC event. It will simply log
6224  * that the event was received and then issue a read_topology mailbox command so
6225  * that the rest of the driver will treat it the same as SLI3.
6226  **/
6227 static void
lpfc_sli4_async_fc_evt(struct lpfc_hba * phba,struct lpfc_acqe_fc_la * acqe_fc)6228 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
6229 {
6230 	LPFC_MBOXQ_t *pmb;
6231 	MAILBOX_t *mb;
6232 	struct lpfc_mbx_read_top *la;
6233 	char *log_level;
6234 	int rc;
6235 
6236 	if (bf_get(lpfc_trailer_type, acqe_fc) !=
6237 	    LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
6238 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6239 				"2895 Non FC link Event detected.(%d)\n",
6240 				bf_get(lpfc_trailer_type, acqe_fc));
6241 		return;
6242 	}
6243 
6244 	if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6245 	    LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
6246 		lpfc_update_trunk_link_status(phba, acqe_fc);
6247 		return;
6248 	}
6249 
6250 	/* Keep the link status for extra SLI4 state machine reference */
6251 	phba->sli4_hba.link_state.speed =
6252 			lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6253 				bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6254 	phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
6255 	phba->sli4_hba.link_state.topology =
6256 				bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
6257 	phba->sli4_hba.link_state.status =
6258 				bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
6259 	phba->sli4_hba.link_state.type =
6260 				bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
6261 	phba->sli4_hba.link_state.number =
6262 				bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
6263 	phba->sli4_hba.link_state.fault =
6264 				bf_get(lpfc_acqe_link_fault, acqe_fc);
6265 	phba->sli4_hba.link_state.link_status =
6266 				bf_get(lpfc_acqe_fc_la_link_status, acqe_fc);
6267 
6268 	/*
6269 	 * Only select attention types need logical speed modification to what
6270 	 * was previously set.
6271 	 */
6272 	if (phba->sli4_hba.link_state.status >= LPFC_FC_LA_TYPE_LINK_UP &&
6273 	    phba->sli4_hba.link_state.status < LPFC_FC_LA_TYPE_ACTIVATE_FAIL) {
6274 		if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6275 		    LPFC_FC_LA_TYPE_LINK_DOWN)
6276 			phba->sli4_hba.link_state.logical_speed = 0;
6277 		else if (!phba->sli4_hba.conf_trunk)
6278 			phba->sli4_hba.link_state.logical_speed =
6279 				bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6280 	}
6281 
6282 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6283 			"2896 Async FC event - Speed:%dGBaud Topology:x%x "
6284 			"LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
6285 			"%dMbps Fault:x%x Link Status:x%x\n",
6286 			phba->sli4_hba.link_state.speed,
6287 			phba->sli4_hba.link_state.topology,
6288 			phba->sli4_hba.link_state.status,
6289 			phba->sli4_hba.link_state.type,
6290 			phba->sli4_hba.link_state.number,
6291 			phba->sli4_hba.link_state.logical_speed,
6292 			phba->sli4_hba.link_state.fault,
6293 			phba->sli4_hba.link_state.link_status);
6294 
6295 	/*
6296 	 * The following attention types are informational only, providing
6297 	 * further details about link status.  Overwrite the value of
6298 	 * link_state.status appropriately.  No further action is required.
6299 	 */
6300 	if (phba->sli4_hba.link_state.status >= LPFC_FC_LA_TYPE_ACTIVATE_FAIL) {
6301 		switch (phba->sli4_hba.link_state.status) {
6302 		case LPFC_FC_LA_TYPE_ACTIVATE_FAIL:
6303 			log_level = KERN_WARNING;
6304 			phba->sli4_hba.link_state.status =
6305 					LPFC_FC_LA_TYPE_LINK_DOWN;
6306 			break;
6307 		case LPFC_FC_LA_TYPE_LINK_RESET_PRTCL_EVT:
6308 			/*
6309 			 * During bb credit recovery establishment, receiving
6310 			 * this attention type is normal.  Link Up attention
6311 			 * type is expected to occur before this informational
6312 			 * attention type so keep the Link Up status.
6313 			 */
6314 			log_level = KERN_INFO;
6315 			phba->sli4_hba.link_state.status =
6316 					LPFC_FC_LA_TYPE_LINK_UP;
6317 			break;
6318 		default:
6319 			log_level = KERN_INFO;
6320 			break;
6321 		}
6322 		lpfc_log_msg(phba, log_level, LOG_SLI,
6323 			     "2992 Async FC event - Informational Link "
6324 			     "Attention Type x%x\n",
6325 			     bf_get(lpfc_acqe_fc_la_att_type, acqe_fc));
6326 		return;
6327 	}
6328 
6329 	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6330 	if (!pmb) {
6331 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6332 				"2897 The mboxq allocation failed\n");
6333 		return;
6334 	}
6335 	rc = lpfc_mbox_rsrc_prep(phba, pmb);
6336 	if (rc) {
6337 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6338 				"2898 The mboxq prep failed\n");
6339 		goto out_free_pmb;
6340 	}
6341 
6342 	/* Cleanup any outstanding ELS commands */
6343 	lpfc_els_flush_all_cmd(phba);
6344 
6345 	/* Block ELS IOCBs until we have done process link event */
6346 	phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
6347 
6348 	/* Update link event statistics */
6349 	phba->sli.slistat.link_event++;
6350 
6351 	/* Create lpfc_handle_latt mailbox command from link ACQE */
6352 	lpfc_read_topology(phba, pmb, pmb->ctx_buf);
6353 	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
6354 	pmb->vport = phba->pport;
6355 
6356 	if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
6357 		phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
6358 
6359 		switch (phba->sli4_hba.link_state.status) {
6360 		case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
6361 			phba->link_flag |= LS_MDS_LINK_DOWN;
6362 			break;
6363 		case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
6364 			phba->link_flag |= LS_MDS_LOOPBACK;
6365 			break;
6366 		default:
6367 			break;
6368 		}
6369 
6370 		/* Initialize completion status */
6371 		mb = &pmb->u.mb;
6372 		mb->mbxStatus = MBX_SUCCESS;
6373 
6374 		/* Parse port fault information field */
6375 		lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
6376 
6377 		/* Parse and translate link attention fields */
6378 		la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
6379 		la->eventTag = acqe_fc->event_tag;
6380 
6381 		if (phba->sli4_hba.link_state.status ==
6382 		    LPFC_FC_LA_TYPE_UNEXP_WWPN) {
6383 			bf_set(lpfc_mbx_read_top_att_type, la,
6384 			       LPFC_FC_LA_TYPE_UNEXP_WWPN);
6385 		} else {
6386 			bf_set(lpfc_mbx_read_top_att_type, la,
6387 			       LPFC_FC_LA_TYPE_LINK_DOWN);
6388 		}
6389 		/* Invoke the mailbox command callback function */
6390 		lpfc_mbx_cmpl_read_topology(phba, pmb);
6391 
6392 		return;
6393 	}
6394 
6395 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
6396 	if (rc == MBX_NOT_FINISHED)
6397 		goto out_free_pmb;
6398 	return;
6399 
6400 out_free_pmb:
6401 	lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
6402 }
6403 
6404 /**
6405  * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
6406  * @phba: pointer to lpfc hba data structure.
6407  * @acqe_sli: pointer to the async SLI completion queue entry.
6408  *
6409  * This routine is to handle the SLI4 asynchronous SLI events.
6410  **/
6411 static void
lpfc_sli4_async_sli_evt(struct lpfc_hba * phba,struct lpfc_acqe_sli * acqe_sli)6412 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
6413 {
6414 	char port_name;
6415 	char message[128];
6416 	uint8_t status;
6417 	uint8_t evt_type;
6418 	uint8_t operational = 0;
6419 	struct temp_event temp_event_data;
6420 	struct lpfc_acqe_misconfigured_event *misconfigured;
6421 	struct lpfc_acqe_cgn_signal *cgn_signal;
6422 	struct Scsi_Host  *shost;
6423 	struct lpfc_vport **vports;
6424 	int rc, i, cnt;
6425 
6426 	evt_type = bf_get(lpfc_trailer_type, acqe_sli);
6427 
6428 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6429 			"2901 Async SLI event - Type:%d, Event Data: x%08x "
6430 			"x%08x x%08x x%08x\n", evt_type,
6431 			acqe_sli->event_data1, acqe_sli->event_data2,
6432 			acqe_sli->event_data3, acqe_sli->trailer);
6433 
6434 	port_name = phba->Port[0];
6435 	if (port_name == 0x00)
6436 		port_name = '?'; /* get port name is empty */
6437 
6438 	switch (evt_type) {
6439 	case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
6440 		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6441 		temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
6442 		temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6443 
6444 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6445 				"3190 Over Temperature:%d Celsius- Port Name %c\n",
6446 				acqe_sli->event_data1, port_name);
6447 
6448 		phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
6449 		shost = lpfc_shost_from_vport(phba->pport);
6450 		fc_host_post_vendor_event(shost, fc_get_event_number(),
6451 					  sizeof(temp_event_data),
6452 					  (char *)&temp_event_data,
6453 					  SCSI_NL_VID_TYPE_PCI
6454 					  | PCI_VENDOR_ID_EMULEX);
6455 		break;
6456 	case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
6457 		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6458 		temp_event_data.event_code = LPFC_NORMAL_TEMP;
6459 		temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6460 
6461 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_LDS_EVENT,
6462 				"3191 Normal Temperature:%d Celsius - Port Name %c\n",
6463 				acqe_sli->event_data1, port_name);
6464 
6465 		shost = lpfc_shost_from_vport(phba->pport);
6466 		fc_host_post_vendor_event(shost, fc_get_event_number(),
6467 					  sizeof(temp_event_data),
6468 					  (char *)&temp_event_data,
6469 					  SCSI_NL_VID_TYPE_PCI
6470 					  | PCI_VENDOR_ID_EMULEX);
6471 		break;
6472 	case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
6473 		misconfigured = (struct lpfc_acqe_misconfigured_event *)
6474 					&acqe_sli->event_data1;
6475 
6476 		/* fetch the status for this port */
6477 		switch (phba->sli4_hba.lnk_info.lnk_no) {
6478 		case LPFC_LINK_NUMBER_0:
6479 			status = bf_get(lpfc_sli_misconfigured_port0_state,
6480 					&misconfigured->theEvent);
6481 			operational = bf_get(lpfc_sli_misconfigured_port0_op,
6482 					&misconfigured->theEvent);
6483 			break;
6484 		case LPFC_LINK_NUMBER_1:
6485 			status = bf_get(lpfc_sli_misconfigured_port1_state,
6486 					&misconfigured->theEvent);
6487 			operational = bf_get(lpfc_sli_misconfigured_port1_op,
6488 					&misconfigured->theEvent);
6489 			break;
6490 		case LPFC_LINK_NUMBER_2:
6491 			status = bf_get(lpfc_sli_misconfigured_port2_state,
6492 					&misconfigured->theEvent);
6493 			operational = bf_get(lpfc_sli_misconfigured_port2_op,
6494 					&misconfigured->theEvent);
6495 			break;
6496 		case LPFC_LINK_NUMBER_3:
6497 			status = bf_get(lpfc_sli_misconfigured_port3_state,
6498 					&misconfigured->theEvent);
6499 			operational = bf_get(lpfc_sli_misconfigured_port3_op,
6500 					&misconfigured->theEvent);
6501 			break;
6502 		default:
6503 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6504 					"3296 "
6505 					"LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
6506 					"event: Invalid link %d",
6507 					phba->sli4_hba.lnk_info.lnk_no);
6508 			return;
6509 		}
6510 
6511 		/* Skip if optic state unchanged */
6512 		if (phba->sli4_hba.lnk_info.optic_state == status)
6513 			return;
6514 
6515 		switch (status) {
6516 		case LPFC_SLI_EVENT_STATUS_VALID:
6517 			sprintf(message, "Physical Link is functional");
6518 			break;
6519 		case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
6520 			sprintf(message, "Optics faulted/incorrectly "
6521 				"installed/not installed - Reseat optics, "
6522 				"if issue not resolved, replace.");
6523 			break;
6524 		case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
6525 			sprintf(message,
6526 				"Optics of two types installed - Remove one "
6527 				"optic or install matching pair of optics.");
6528 			break;
6529 		case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
6530 			sprintf(message, "Incompatible optics - Replace with "
6531 				"compatible optics for card to function.");
6532 			break;
6533 		case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
6534 			sprintf(message, "Unqualified optics - Replace with "
6535 				"Avago optics for Warranty and Technical "
6536 				"Support - Link is%s operational",
6537 				(operational) ? " not" : "");
6538 			break;
6539 		case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
6540 			sprintf(message, "Uncertified optics - Replace with "
6541 				"Avago-certified optics to enable link "
6542 				"operation - Link is%s operational",
6543 				(operational) ? " not" : "");
6544 			break;
6545 		default:
6546 			/* firmware is reporting a status we don't know about */
6547 			sprintf(message, "Unknown event status x%02x", status);
6548 			break;
6549 		}
6550 
6551 		/* Issue READ_CONFIG mbox command to refresh supported speeds */
6552 		rc = lpfc_sli4_read_config(phba);
6553 		if (rc) {
6554 			phba->lmt = 0;
6555 			lpfc_printf_log(phba, KERN_ERR,
6556 					LOG_TRACE_EVENT,
6557 					"3194 Unable to retrieve supported "
6558 					"speeds, rc = 0x%x\n", rc);
6559 		}
6560 		rc = lpfc_sli4_refresh_params(phba);
6561 		if (rc) {
6562 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6563 					"3174 Unable to update pls support, "
6564 					"rc x%x\n", rc);
6565 		}
6566 		vports = lpfc_create_vport_work_array(phba);
6567 		if (vports != NULL) {
6568 			for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6569 					i++) {
6570 				shost = lpfc_shost_from_vport(vports[i]);
6571 				lpfc_host_supported_speeds_set(shost);
6572 			}
6573 		}
6574 		lpfc_destroy_vport_work_array(phba, vports);
6575 
6576 		phba->sli4_hba.lnk_info.optic_state = status;
6577 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6578 				"3176 Port Name %c %s\n", port_name, message);
6579 		break;
6580 	case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
6581 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6582 				"3192 Remote DPort Test Initiated - "
6583 				"Event Data1:x%08x Event Data2: x%08x\n",
6584 				acqe_sli->event_data1, acqe_sli->event_data2);
6585 		break;
6586 	case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG:
6587 		/* Call FW to obtain active parms */
6588 		lpfc_sli4_cgn_parm_chg_evt(phba);
6589 		break;
6590 	case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
6591 		/* Misconfigured WWN. Reports that the SLI Port is configured
6592 		 * to use FA-WWN, but the attached device doesn’t support it.
6593 		 * Event Data1 - N.A, Event Data2 - N.A
6594 		 * This event only happens on the physical port.
6595 		 */
6596 		lpfc_log_msg(phba, KERN_WARNING, LOG_SLI | LOG_DISCOVERY,
6597 			     "2699 Misconfigured FA-PWWN - Attached device "
6598 			     "does not support FA-PWWN\n");
6599 		phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_FABRIC;
6600 		memset(phba->pport->fc_portname.u.wwn, 0,
6601 		       sizeof(struct lpfc_name));
6602 		break;
6603 	case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
6604 		/* EEPROM failure. No driver action is required */
6605 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6606 			     "2518 EEPROM failure - "
6607 			     "Event Data1: x%08x Event Data2: x%08x\n",
6608 			     acqe_sli->event_data1, acqe_sli->event_data2);
6609 		break;
6610 	case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL:
6611 		if (phba->cmf_active_mode == LPFC_CFG_OFF)
6612 			break;
6613 		cgn_signal = (struct lpfc_acqe_cgn_signal *)
6614 					&acqe_sli->event_data1;
6615 		phba->cgn_acqe_cnt++;
6616 
6617 		cnt = bf_get(lpfc_warn_acqe, cgn_signal);
6618 		atomic64_add(cnt, &phba->cgn_acqe_stat.warn);
6619 		atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm);
6620 
6621 		/* no threshold for CMF, even 1 signal will trigger an event */
6622 
6623 		/* Alarm overrides warning, so check that first */
6624 		if (cgn_signal->alarm_cnt) {
6625 			if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6626 				/* Keep track of alarm cnt for CMF_SYNC_WQE */
6627 				atomic_add(cgn_signal->alarm_cnt,
6628 					   &phba->cgn_sync_alarm_cnt);
6629 			}
6630 		} else if (cnt) {
6631 			/* signal action needs to be taken */
6632 			if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
6633 			    phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6634 				/* Keep track of warning cnt for CMF_SYNC_WQE */
6635 				atomic_add(cnt, &phba->cgn_sync_warn_cnt);
6636 			}
6637 		}
6638 		break;
6639 	case LPFC_SLI_EVENT_TYPE_RD_SIGNAL:
6640 		/* May be accompanied by a temperature event */
6641 		lpfc_printf_log(phba, KERN_INFO,
6642 				LOG_SLI | LOG_LINK_EVENT | LOG_LDS_EVENT,
6643 				"2902 Remote Degrade Signaling: x%08x x%08x "
6644 				"x%08x\n",
6645 				acqe_sli->event_data1, acqe_sli->event_data2,
6646 				acqe_sli->event_data3);
6647 		break;
6648 	case LPFC_SLI_EVENT_TYPE_RESET_CM_STATS:
6649 		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
6650 				"2905 Reset CM statistics\n");
6651 		lpfc_sli4_async_cmstat_evt(phba);
6652 		break;
6653 	default:
6654 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6655 				"3193 Unrecognized SLI event, type: 0x%x",
6656 				evt_type);
6657 		break;
6658 	}
6659 }
6660 
6661 /**
6662  * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
6663  * @vport: pointer to vport data structure.
6664  *
6665  * This routine is to perform Clear Virtual Link (CVL) on a vport in
6666  * response to a CVL event.
6667  *
6668  * Return the pointer to the ndlp with the vport if successful, otherwise
6669  * return NULL.
6670  **/
6671 static struct lpfc_nodelist *
lpfc_sli4_perform_vport_cvl(struct lpfc_vport * vport)6672 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
6673 {
6674 	struct lpfc_nodelist *ndlp;
6675 	struct Scsi_Host *shost;
6676 	struct lpfc_hba *phba;
6677 
6678 	if (!vport)
6679 		return NULL;
6680 	phba = vport->phba;
6681 	if (!phba)
6682 		return NULL;
6683 	ndlp = lpfc_findnode_did(vport, Fabric_DID);
6684 	if (!ndlp) {
6685 		/* Cannot find existing Fabric ndlp, so allocate a new one */
6686 		ndlp = lpfc_nlp_init(vport, Fabric_DID);
6687 		if (!ndlp)
6688 			return NULL;
6689 		/* Set the node type */
6690 		ndlp->nlp_type |= NLP_FABRIC;
6691 		/* Put ndlp onto node list */
6692 		lpfc_enqueue_node(vport, ndlp);
6693 	}
6694 	if ((phba->pport->port_state < LPFC_FLOGI) &&
6695 		(phba->pport->port_state != LPFC_VPORT_FAILED))
6696 		return NULL;
6697 	/* If virtual link is not yet instantiated ignore CVL */
6698 	if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
6699 		&& (vport->port_state != LPFC_VPORT_FAILED))
6700 		return NULL;
6701 	shost = lpfc_shost_from_vport(vport);
6702 	if (!shost)
6703 		return NULL;
6704 	lpfc_linkdown_port(vport);
6705 	lpfc_cleanup_pending_mbox(vport);
6706 	set_bit(FC_VPORT_CVL_RCVD, &vport->fc_flag);
6707 
6708 	return ndlp;
6709 }
6710 
6711 /**
6712  * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
6713  * @phba: pointer to lpfc hba data structure.
6714  *
6715  * This routine is to perform Clear Virtual Link (CVL) on all vports in
6716  * response to a FCF dead event.
6717  **/
6718 static void
lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba * phba)6719 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
6720 {
6721 	struct lpfc_vport **vports;
6722 	int i;
6723 
6724 	vports = lpfc_create_vport_work_array(phba);
6725 	if (vports)
6726 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
6727 			lpfc_sli4_perform_vport_cvl(vports[i]);
6728 	lpfc_destroy_vport_work_array(phba, vports);
6729 }
6730 
6731 /**
6732  * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
6733  * @phba: pointer to lpfc hba data structure.
6734  * @acqe_fip: pointer to the async fcoe completion queue entry.
6735  *
6736  * This routine is to handle the SLI4 asynchronous fcoe event.
6737  **/
6738 static void
lpfc_sli4_async_fip_evt(struct lpfc_hba * phba,struct lpfc_acqe_fip * acqe_fip)6739 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
6740 			struct lpfc_acqe_fip *acqe_fip)
6741 {
6742 	uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
6743 	int rc;
6744 	struct lpfc_vport *vport;
6745 	struct lpfc_nodelist *ndlp;
6746 	int active_vlink_present;
6747 	struct lpfc_vport **vports;
6748 	int i;
6749 
6750 	phba->fc_eventTag = acqe_fip->event_tag;
6751 	phba->fcoe_eventtag = acqe_fip->event_tag;
6752 	switch (event_type) {
6753 	case LPFC_FIP_EVENT_TYPE_NEW_FCF:
6754 	case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
6755 		if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
6756 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6757 					"2546 New FCF event, evt_tag:x%x, "
6758 					"index:x%x\n",
6759 					acqe_fip->event_tag,
6760 					acqe_fip->index);
6761 		else
6762 			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
6763 					LOG_DISCOVERY,
6764 					"2788 FCF param modified event, "
6765 					"evt_tag:x%x, index:x%x\n",
6766 					acqe_fip->event_tag,
6767 					acqe_fip->index);
6768 		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6769 			/*
6770 			 * During period of FCF discovery, read the FCF
6771 			 * table record indexed by the event to update
6772 			 * FCF roundrobin failover eligible FCF bmask.
6773 			 */
6774 			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6775 					LOG_DISCOVERY,
6776 					"2779 Read FCF (x%x) for updating "
6777 					"roundrobin FCF failover bmask\n",
6778 					acqe_fip->index);
6779 			rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
6780 		}
6781 
6782 		/* If the FCF discovery is in progress, do nothing. */
6783 		if (test_bit(FCF_TS_INPROG, &phba->hba_flag))
6784 			break;
6785 		spin_lock_irq(&phba->hbalock);
6786 		/* If fast FCF failover rescan event is pending, do nothing */
6787 		if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
6788 			spin_unlock_irq(&phba->hbalock);
6789 			break;
6790 		}
6791 
6792 		/* If the FCF has been in discovered state, do nothing. */
6793 		if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
6794 			spin_unlock_irq(&phba->hbalock);
6795 			break;
6796 		}
6797 		spin_unlock_irq(&phba->hbalock);
6798 
6799 		/* Otherwise, scan the entire FCF table and re-discover SAN */
6800 		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6801 				"2770 Start FCF table scan per async FCF "
6802 				"event, evt_tag:x%x, index:x%x\n",
6803 				acqe_fip->event_tag, acqe_fip->index);
6804 		rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
6805 						     LPFC_FCOE_FCF_GET_FIRST);
6806 		if (rc)
6807 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6808 					"2547 Issue FCF scan read FCF mailbox "
6809 					"command failed (x%x)\n", rc);
6810 		break;
6811 
6812 	case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
6813 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6814 				"2548 FCF Table full count 0x%x tag 0x%x\n",
6815 				bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
6816 				acqe_fip->event_tag);
6817 		break;
6818 
6819 	case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
6820 		phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6821 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6822 				"2549 FCF (x%x) disconnected from network, "
6823 				 "tag:x%x\n", acqe_fip->index,
6824 				 acqe_fip->event_tag);
6825 		/*
6826 		 * If we are in the middle of FCF failover process, clear
6827 		 * the corresponding FCF bit in the roundrobin bitmap.
6828 		 */
6829 		spin_lock_irq(&phba->hbalock);
6830 		if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
6831 		    (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
6832 			spin_unlock_irq(&phba->hbalock);
6833 			/* Update FLOGI FCF failover eligible FCF bmask */
6834 			lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
6835 			break;
6836 		}
6837 		spin_unlock_irq(&phba->hbalock);
6838 
6839 		/* If the event is not for currently used fcf do nothing */
6840 		if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
6841 			break;
6842 
6843 		/*
6844 		 * Otherwise, request the port to rediscover the entire FCF
6845 		 * table for a fast recovery from case that the current FCF
6846 		 * is no longer valid as we are not in the middle of FCF
6847 		 * failover process already.
6848 		 */
6849 		spin_lock_irq(&phba->hbalock);
6850 		/* Mark the fast failover process in progress */
6851 		phba->fcf.fcf_flag |= FCF_DEAD_DISC;
6852 		spin_unlock_irq(&phba->hbalock);
6853 
6854 		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6855 				"2771 Start FCF fast failover process due to "
6856 				"FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
6857 				"\n", acqe_fip->event_tag, acqe_fip->index);
6858 		rc = lpfc_sli4_redisc_fcf_table(phba);
6859 		if (rc) {
6860 			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6861 					LOG_TRACE_EVENT,
6862 					"2772 Issue FCF rediscover mailbox "
6863 					"command failed, fail through to FCF "
6864 					"dead event\n");
6865 			spin_lock_irq(&phba->hbalock);
6866 			phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
6867 			spin_unlock_irq(&phba->hbalock);
6868 			/*
6869 			 * Last resort will fail over by treating this
6870 			 * as a link down to FCF registration.
6871 			 */
6872 			lpfc_sli4_fcf_dead_failthrough(phba);
6873 		} else {
6874 			/* Reset FCF roundrobin bmask for new discovery */
6875 			lpfc_sli4_clear_fcf_rr_bmask(phba);
6876 			/*
6877 			 * Handling fast FCF failover to a DEAD FCF event is
6878 			 * considered equalivant to receiving CVL to all vports.
6879 			 */
6880 			lpfc_sli4_perform_all_vport_cvl(phba);
6881 		}
6882 		break;
6883 	case LPFC_FIP_EVENT_TYPE_CVL:
6884 		phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6885 		lpfc_printf_log(phba, KERN_ERR,
6886 				LOG_TRACE_EVENT,
6887 			"2718 Clear Virtual Link Received for VPI 0x%x"
6888 			" tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
6889 
6890 		vport = lpfc_find_vport_by_vpid(phba,
6891 						acqe_fip->index);
6892 		ndlp = lpfc_sli4_perform_vport_cvl(vport);
6893 		if (!ndlp)
6894 			break;
6895 		active_vlink_present = 0;
6896 
6897 		vports = lpfc_create_vport_work_array(phba);
6898 		if (vports) {
6899 			for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6900 					i++) {
6901 				if (!test_bit(FC_VPORT_CVL_RCVD,
6902 					      &vports[i]->fc_flag) &&
6903 				    vports[i]->port_state > LPFC_FDISC) {
6904 					active_vlink_present = 1;
6905 					break;
6906 				}
6907 			}
6908 			lpfc_destroy_vport_work_array(phba, vports);
6909 		}
6910 
6911 		/*
6912 		 * Don't re-instantiate if vport is marked for deletion.
6913 		 * If we are here first then vport_delete is going to wait
6914 		 * for discovery to complete.
6915 		 */
6916 		if (!test_bit(FC_UNLOADING, &vport->load_flag) &&
6917 		    active_vlink_present) {
6918 			/*
6919 			 * If there are other active VLinks present,
6920 			 * re-instantiate the Vlink using FDISC.
6921 			 */
6922 			mod_timer(&ndlp->nlp_delayfunc,
6923 				  jiffies + secs_to_jiffies(1));
6924 			set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
6925 			ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
6926 			vport->port_state = LPFC_FDISC;
6927 		} else {
6928 			/*
6929 			 * Otherwise, we request port to rediscover
6930 			 * the entire FCF table for a fast recovery
6931 			 * from possible case that the current FCF
6932 			 * is no longer valid if we are not already
6933 			 * in the FCF failover process.
6934 			 */
6935 			spin_lock_irq(&phba->hbalock);
6936 			if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6937 				spin_unlock_irq(&phba->hbalock);
6938 				break;
6939 			}
6940 			/* Mark the fast failover process in progress */
6941 			phba->fcf.fcf_flag |= FCF_ACVL_DISC;
6942 			spin_unlock_irq(&phba->hbalock);
6943 			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6944 					LOG_DISCOVERY,
6945 					"2773 Start FCF failover per CVL, "
6946 					"evt_tag:x%x\n", acqe_fip->event_tag);
6947 			rc = lpfc_sli4_redisc_fcf_table(phba);
6948 			if (rc) {
6949 				lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6950 						LOG_TRACE_EVENT,
6951 						"2774 Issue FCF rediscover "
6952 						"mailbox command failed, "
6953 						"through to CVL event\n");
6954 				spin_lock_irq(&phba->hbalock);
6955 				phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
6956 				spin_unlock_irq(&phba->hbalock);
6957 				/*
6958 				 * Last resort will be re-try on the
6959 				 * the current registered FCF entry.
6960 				 */
6961 				lpfc_retry_pport_discovery(phba);
6962 			} else
6963 				/*
6964 				 * Reset FCF roundrobin bmask for new
6965 				 * discovery.
6966 				 */
6967 				lpfc_sli4_clear_fcf_rr_bmask(phba);
6968 		}
6969 		break;
6970 	default:
6971 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6972 				"0288 Unknown FCoE event type 0x%x event tag "
6973 				"0x%x\n", event_type, acqe_fip->event_tag);
6974 		break;
6975 	}
6976 }
6977 
6978 /**
6979  * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
6980  * @phba: pointer to lpfc hba data structure.
6981  * @acqe_dcbx: pointer to the async dcbx completion queue entry.
6982  *
6983  * This routine is to handle the SLI4 asynchronous dcbx event.
6984  **/
6985 static void
lpfc_sli4_async_dcbx_evt(struct lpfc_hba * phba,struct lpfc_acqe_dcbx * acqe_dcbx)6986 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
6987 			 struct lpfc_acqe_dcbx *acqe_dcbx)
6988 {
6989 	phba->fc_eventTag = acqe_dcbx->event_tag;
6990 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6991 			"0290 The SLI4 DCBX asynchronous event is not "
6992 			"handled yet\n");
6993 }
6994 
6995 /**
6996  * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
6997  * @phba: pointer to lpfc hba data structure.
6998  * @acqe_grp5: pointer to the async grp5 completion queue entry.
6999  *
7000  * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
7001  * is an asynchronous notified of a logical link speed change.  The Port
7002  * reports the logical link speed in units of 10Mbps.
7003  **/
7004 static void
lpfc_sli4_async_grp5_evt(struct lpfc_hba * phba,struct lpfc_acqe_grp5 * acqe_grp5)7005 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
7006 			 struct lpfc_acqe_grp5 *acqe_grp5)
7007 {
7008 	uint16_t prev_ll_spd;
7009 
7010 	phba->fc_eventTag = acqe_grp5->event_tag;
7011 	phba->fcoe_eventtag = acqe_grp5->event_tag;
7012 	prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
7013 	phba->sli4_hba.link_state.logical_speed =
7014 		(bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
7015 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7016 			"2789 GRP5 Async Event: Updating logical link speed "
7017 			"from %dMbps to %dMbps\n", prev_ll_spd,
7018 			phba->sli4_hba.link_state.logical_speed);
7019 }
7020 
7021 /**
7022  * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event
7023  * @phba: pointer to lpfc hba data structure.
7024  *
7025  * This routine is to handle the SLI4 asynchronous cmstat event. A cmstat event
7026  * is an asynchronous notification of a request to reset CM stats.
7027  **/
7028 static void
lpfc_sli4_async_cmstat_evt(struct lpfc_hba * phba)7029 lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba)
7030 {
7031 	if (!phba->cgn_i)
7032 		return;
7033 	lpfc_init_congestion_stat(phba);
7034 }
7035 
7036 /**
7037  * lpfc_cgn_params_val - Validate FW congestion parameters.
7038  * @phba: pointer to lpfc hba data structure.
7039  * @p_cfg_param: pointer to FW provided congestion parameters.
7040  *
7041  * This routine validates the congestion parameters passed
7042  * by the FW to the driver via an ACQE event.
7043  **/
7044 static void
lpfc_cgn_params_val(struct lpfc_hba * phba,struct lpfc_cgn_param * p_cfg_param)7045 lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param)
7046 {
7047 	spin_lock_irq(&phba->hbalock);
7048 
7049 	if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF,
7050 			     LPFC_CFG_MONITOR)) {
7051 		lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
7052 				"6225 CMF mode param out of range: %d\n",
7053 				 p_cfg_param->cgn_param_mode);
7054 		p_cfg_param->cgn_param_mode = LPFC_CFG_OFF;
7055 	}
7056 
7057 	spin_unlock_irq(&phba->hbalock);
7058 }
7059 
7060 static const char * const lpfc_cmf_mode_to_str[] = {
7061 	"OFF",
7062 	"MANAGED",
7063 	"MONITOR",
7064 };
7065 
7066 /**
7067  * lpfc_cgn_params_parse - Process a FW cong parm change event
7068  * @phba: pointer to lpfc hba data structure.
7069  * @p_cgn_param: pointer to a data buffer with the FW cong params.
7070  * @len: the size of pdata in bytes.
7071  *
7072  * This routine validates the congestion management buffer signature
7073  * from the FW, validates the contents and makes corrections for
7074  * valid, in-range values.  If the signature magic is correct and
7075  * after parameter validation, the contents are copied to the driver's
7076  * @phba structure. If the magic is incorrect, an error message is
7077  * logged.
7078  **/
7079 static void
lpfc_cgn_params_parse(struct lpfc_hba * phba,struct lpfc_cgn_param * p_cgn_param,uint32_t len)7080 lpfc_cgn_params_parse(struct lpfc_hba *phba,
7081 		      struct lpfc_cgn_param *p_cgn_param, uint32_t len)
7082 {
7083 	struct lpfc_cgn_info *cp;
7084 	uint32_t crc, oldmode;
7085 	char acr_string[4] = {0};
7086 
7087 	/* Make sure the FW has encoded the correct magic number to
7088 	 * validate the congestion parameter in FW memory.
7089 	 */
7090 	if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) {
7091 		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
7092 				"4668 FW cgn parm buffer data: "
7093 				"magic 0x%x version %d mode %d "
7094 				"level0 %d level1 %d "
7095 				"level2 %d byte13 %d "
7096 				"byte14 %d byte15 %d "
7097 				"byte11 %d byte12 %d activeMode %d\n",
7098 				p_cgn_param->cgn_param_magic,
7099 				p_cgn_param->cgn_param_version,
7100 				p_cgn_param->cgn_param_mode,
7101 				p_cgn_param->cgn_param_level0,
7102 				p_cgn_param->cgn_param_level1,
7103 				p_cgn_param->cgn_param_level2,
7104 				p_cgn_param->byte13,
7105 				p_cgn_param->byte14,
7106 				p_cgn_param->byte15,
7107 				p_cgn_param->byte11,
7108 				p_cgn_param->byte12,
7109 				phba->cmf_active_mode);
7110 
7111 		oldmode = phba->cmf_active_mode;
7112 
7113 		/* Any parameters out of range are corrected to defaults
7114 		 * by this routine.  No need to fail.
7115 		 */
7116 		lpfc_cgn_params_val(phba, p_cgn_param);
7117 
7118 		/* Parameters are verified, move them into driver storage */
7119 		spin_lock_irq(&phba->hbalock);
7120 		memcpy(&phba->cgn_p, p_cgn_param,
7121 		       sizeof(struct lpfc_cgn_param));
7122 
7123 		/* Update parameters in congestion info buffer now */
7124 		if (phba->cgn_i) {
7125 			cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
7126 			cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
7127 			cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
7128 			cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
7129 			cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
7130 			crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ);
7131 			cp->cgn_info_crc = cpu_to_le32(crc);
7132 		}
7133 		spin_unlock_irq(&phba->hbalock);
7134 
7135 		phba->cmf_active_mode = phba->cgn_p.cgn_param_mode;
7136 
7137 		switch (oldmode) {
7138 		case LPFC_CFG_OFF:
7139 			if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) {
7140 				/* Turning CMF on */
7141 				lpfc_cmf_start(phba);
7142 
7143 				if (phba->link_state >= LPFC_LINK_UP) {
7144 					phba->cgn_reg_fpin =
7145 						phba->cgn_init_reg_fpin;
7146 					phba->cgn_reg_signal =
7147 						phba->cgn_init_reg_signal;
7148 					lpfc_issue_els_edc(phba->pport, 0);
7149 				}
7150 			}
7151 			break;
7152 		case LPFC_CFG_MANAGED:
7153 			switch (phba->cgn_p.cgn_param_mode) {
7154 			case LPFC_CFG_OFF:
7155 				/* Turning CMF off */
7156 				lpfc_cmf_stop(phba);
7157 				if (phba->link_state >= LPFC_LINK_UP)
7158 					lpfc_issue_els_edc(phba->pport, 0);
7159 				break;
7160 			case LPFC_CFG_MONITOR:
7161 				phba->cmf_max_bytes_per_interval =
7162 					phba->cmf_link_byte_count;
7163 
7164 				/* Resume blocked IO - unblock on workqueue */
7165 				queue_work(phba->wq,
7166 					   &phba->unblock_request_work);
7167 				break;
7168 			}
7169 			break;
7170 		case LPFC_CFG_MONITOR:
7171 			switch (phba->cgn_p.cgn_param_mode) {
7172 			case LPFC_CFG_OFF:
7173 				/* Turning CMF off */
7174 				lpfc_cmf_stop(phba);
7175 				if (phba->link_state >= LPFC_LINK_UP)
7176 					lpfc_issue_els_edc(phba->pport, 0);
7177 				break;
7178 			case LPFC_CFG_MANAGED:
7179 				lpfc_cmf_signal_init(phba);
7180 				break;
7181 			}
7182 			break;
7183 		}
7184 		if (oldmode != LPFC_CFG_OFF ||
7185 		    oldmode != phba->cgn_p.cgn_param_mode) {
7186 			if (phba->cgn_p.cgn_param_mode == LPFC_CFG_MANAGED)
7187 				scnprintf(acr_string, sizeof(acr_string), "%u",
7188 					  phba->cgn_p.cgn_param_level0);
7189 			else
7190 				scnprintf(acr_string, sizeof(acr_string), "NA");
7191 
7192 			dev_info(&phba->pcidev->dev, "%d: "
7193 				 "4663 CMF: Mode %s acr %s\n",
7194 				 phba->brd_no,
7195 				 lpfc_cmf_mode_to_str
7196 				 [phba->cgn_p.cgn_param_mode],
7197 				 acr_string);
7198 		}
7199 	} else {
7200 		lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7201 				"4669 FW cgn parm buf wrong magic 0x%x "
7202 				"version %d\n", p_cgn_param->cgn_param_magic,
7203 				p_cgn_param->cgn_param_version);
7204 	}
7205 }
7206 
7207 /**
7208  * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters.
7209  * @phba: pointer to lpfc hba data structure.
7210  *
7211  * This routine issues a read_object mailbox command to
7212  * get the congestion management parameters from the FW
7213  * parses it and updates the driver maintained values.
7214  *
7215  * Returns
7216  *  0     if the object was empty
7217  *  -Eval if an error was encountered
7218  *  Count if bytes were read from object
7219  **/
7220 int
lpfc_sli4_cgn_params_read(struct lpfc_hba * phba)7221 lpfc_sli4_cgn_params_read(struct lpfc_hba *phba)
7222 {
7223 	int ret = 0;
7224 	struct lpfc_cgn_param *p_cgn_param = NULL;
7225 	u32 *pdata = NULL;
7226 	u32 len = 0;
7227 
7228 	/* Find out if the FW has a new set of congestion parameters. */
7229 	len = sizeof(struct lpfc_cgn_param);
7230 	pdata = kzalloc(len, GFP_KERNEL);
7231 	if (!pdata)
7232 		return -ENOMEM;
7233 	ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME,
7234 			       pdata, len);
7235 
7236 	/* 0 means no data.  A negative means error.  A positive means
7237 	 * bytes were copied.
7238 	 */
7239 	if (!ret) {
7240 		lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7241 				"4670 CGN RD OBJ returns no data\n");
7242 		goto rd_obj_err;
7243 	} else if (ret < 0) {
7244 		/* Some error.  Just exit and return it to the caller.*/
7245 		goto rd_obj_err;
7246 	}
7247 
7248 	lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
7249 			"6234 READ CGN PARAMS Successful %d\n", len);
7250 
7251 	/* Parse data pointer over len and update the phba congestion
7252 	 * parameters with values passed back.  The receive rate values
7253 	 * may have been altered in FW, but take no action here.
7254 	 */
7255 	p_cgn_param = (struct lpfc_cgn_param *)pdata;
7256 	lpfc_cgn_params_parse(phba, p_cgn_param, len);
7257 
7258  rd_obj_err:
7259 	kfree(pdata);
7260 	return ret;
7261 }
7262 
7263 /**
7264  * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event
7265  * @phba: pointer to lpfc hba data structure.
7266  *
7267  * The FW generated Async ACQE SLI event calls this routine when
7268  * the event type is an SLI Internal Port Event and the Event Code
7269  * indicates a change to the FW maintained congestion parameters.
7270  *
7271  * This routine executes a Read_Object mailbox call to obtain the
7272  * current congestion parameters maintained in FW and corrects
7273  * the driver's active congestion parameters.
7274  *
7275  * The acqe event is not passed because there is no further data
7276  * required.
7277  *
7278  * Returns nonzero error if event processing encountered an error.
7279  * Zero otherwise for success.
7280  **/
7281 static int
lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba * phba)7282 lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba)
7283 {
7284 	int ret = 0;
7285 
7286 	if (!phba->sli4_hba.pc_sli4_params.cmf) {
7287 		lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7288 				"4664 Cgn Evt when E2E off. Drop event\n");
7289 		return -EACCES;
7290 	}
7291 
7292 	/* If the event is claiming an empty object, it's ok.  A write
7293 	 * could have cleared it.  Only error is a negative return
7294 	 * status.
7295 	 */
7296 	ret = lpfc_sli4_cgn_params_read(phba);
7297 	if (ret < 0) {
7298 		lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7299 				"4667 Error reading Cgn Params (%d)\n",
7300 				ret);
7301 	} else if (!ret) {
7302 		lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7303 				"4673 CGN Event empty object.\n");
7304 	}
7305 	return ret;
7306 }
7307 
7308 /**
7309  * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
7310  * @phba: pointer to lpfc hba data structure.
7311  *
7312  * This routine is invoked by the worker thread to process all the pending
7313  * SLI4 asynchronous events.
7314  **/
lpfc_sli4_async_event_proc(struct lpfc_hba * phba)7315 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
7316 {
7317 	struct lpfc_cq_event *cq_event;
7318 	unsigned long iflags;
7319 
7320 	/* First, declare the async event has been handled */
7321 	clear_bit(ASYNC_EVENT, &phba->hba_flag);
7322 
7323 	/* Now, handle all the async events */
7324 	spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7325 	while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
7326 		list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
7327 				 cq_event, struct lpfc_cq_event, list);
7328 		spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock,
7329 				       iflags);
7330 
7331 		/* Process the asynchronous event */
7332 		switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
7333 		case LPFC_TRAILER_CODE_LINK:
7334 			lpfc_sli4_async_link_evt(phba,
7335 						 &cq_event->cqe.acqe_link);
7336 			break;
7337 		case LPFC_TRAILER_CODE_FCOE:
7338 			lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
7339 			break;
7340 		case LPFC_TRAILER_CODE_DCBX:
7341 			lpfc_sli4_async_dcbx_evt(phba,
7342 						 &cq_event->cqe.acqe_dcbx);
7343 			break;
7344 		case LPFC_TRAILER_CODE_GRP5:
7345 			lpfc_sli4_async_grp5_evt(phba,
7346 						 &cq_event->cqe.acqe_grp5);
7347 			break;
7348 		case LPFC_TRAILER_CODE_FC:
7349 			lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
7350 			break;
7351 		case LPFC_TRAILER_CODE_SLI:
7352 			lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
7353 			break;
7354 		default:
7355 			lpfc_printf_log(phba, KERN_ERR,
7356 					LOG_TRACE_EVENT,
7357 					"1804 Invalid asynchronous event code: "
7358 					"x%x\n", bf_get(lpfc_trailer_code,
7359 					&cq_event->cqe.mcqe_cmpl));
7360 			break;
7361 		}
7362 
7363 		/* Free the completion event processed to the free pool */
7364 		lpfc_sli4_cq_event_release(phba, cq_event);
7365 		spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7366 	}
7367 	spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
7368 }
7369 
7370 /**
7371  * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
7372  * @phba: pointer to lpfc hba data structure.
7373  *
7374  * This routine is invoked by the worker thread to process FCF table
7375  * rediscovery pending completion event.
7376  **/
lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba * phba)7377 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
7378 {
7379 	int rc;
7380 
7381 	spin_lock_irq(&phba->hbalock);
7382 	/* Clear FCF rediscovery timeout event */
7383 	phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
7384 	/* Clear driver fast failover FCF record flag */
7385 	phba->fcf.failover_rec.flag = 0;
7386 	/* Set state for FCF fast failover */
7387 	phba->fcf.fcf_flag |= FCF_REDISC_FOV;
7388 	spin_unlock_irq(&phba->hbalock);
7389 
7390 	/* Scan FCF table from the first entry to re-discover SAN */
7391 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
7392 			"2777 Start post-quiescent FCF table scan\n");
7393 	rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
7394 	if (rc)
7395 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7396 				"2747 Issue FCF scan read FCF mailbox "
7397 				"command failed 0x%x\n", rc);
7398 }
7399 
7400 /**
7401  * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
7402  * @phba: pointer to lpfc hba data structure.
7403  * @dev_grp: The HBA PCI-Device group number.
7404  *
7405  * This routine is invoked to set up the per HBA PCI-Device group function
7406  * API jump table entries.
7407  *
7408  * Return: 0 if success, otherwise -ENODEV
7409  **/
7410 int
lpfc_api_table_setup(struct lpfc_hba * phba,uint8_t dev_grp)7411 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7412 {
7413 	int rc;
7414 
7415 	/* Set up lpfc PCI-device group */
7416 	phba->pci_dev_grp = dev_grp;
7417 
7418 	/* The LPFC_PCI_DEV_OC uses SLI4 */
7419 	if (dev_grp == LPFC_PCI_DEV_OC)
7420 		phba->sli_rev = LPFC_SLI_REV4;
7421 
7422 	/* Set up device INIT API function jump table */
7423 	rc = lpfc_init_api_table_setup(phba, dev_grp);
7424 	if (rc)
7425 		return -ENODEV;
7426 	/* Set up SCSI API function jump table */
7427 	rc = lpfc_scsi_api_table_setup(phba, dev_grp);
7428 	if (rc)
7429 		return -ENODEV;
7430 	/* Set up SLI API function jump table */
7431 	rc = lpfc_sli_api_table_setup(phba, dev_grp);
7432 	if (rc)
7433 		return -ENODEV;
7434 	/* Set up MBOX API function jump table */
7435 	rc = lpfc_mbox_api_table_setup(phba, dev_grp);
7436 	if (rc)
7437 		return -ENODEV;
7438 
7439 	return 0;
7440 }
7441 
7442 /**
7443  * lpfc_log_intr_mode - Log the active interrupt mode
7444  * @phba: pointer to lpfc hba data structure.
7445  * @intr_mode: active interrupt mode adopted.
7446  *
7447  * This routine it invoked to log the currently used active interrupt mode
7448  * to the device.
7449  **/
lpfc_log_intr_mode(struct lpfc_hba * phba,uint32_t intr_mode)7450 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
7451 {
7452 	switch (intr_mode) {
7453 	case 0:
7454 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7455 				"0470 Enable INTx interrupt mode.\n");
7456 		break;
7457 	case 1:
7458 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7459 				"0481 Enabled MSI interrupt mode.\n");
7460 		break;
7461 	case 2:
7462 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7463 				"0480 Enabled MSI-X interrupt mode.\n");
7464 		break;
7465 	default:
7466 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7467 				"0482 Illegal interrupt mode.\n");
7468 		break;
7469 	}
7470 	return;
7471 }
7472 
7473 /**
7474  * lpfc_enable_pci_dev - Enable a generic PCI device.
7475  * @phba: pointer to lpfc hba data structure.
7476  *
7477  * This routine is invoked to enable the PCI device that is common to all
7478  * PCI devices.
7479  *
7480  * Return codes
7481  * 	0 - successful
7482  * 	other values - error
7483  **/
7484 static int
lpfc_enable_pci_dev(struct lpfc_hba * phba)7485 lpfc_enable_pci_dev(struct lpfc_hba *phba)
7486 {
7487 	struct pci_dev *pdev;
7488 
7489 	/* Obtain PCI device reference */
7490 	if (!phba->pcidev)
7491 		goto out_error;
7492 	else
7493 		pdev = phba->pcidev;
7494 	/* Enable PCI device */
7495 	if (pci_enable_device_mem(pdev))
7496 		goto out_error;
7497 	/* Request PCI resource for the device */
7498 	if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
7499 		goto out_disable_device;
7500 	/* Set up device as PCI master and save state for EEH */
7501 	pci_set_master(pdev);
7502 	pci_try_set_mwi(pdev);
7503 	pci_save_state(pdev);
7504 
7505 	/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
7506 	if (pci_is_pcie(pdev))
7507 		pdev->needs_freset = 1;
7508 
7509 	return 0;
7510 
7511 out_disable_device:
7512 	pci_disable_device(pdev);
7513 out_error:
7514 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7515 			"1401 Failed to enable pci device\n");
7516 	return -ENODEV;
7517 }
7518 
7519 /**
7520  * lpfc_disable_pci_dev - Disable a generic PCI device.
7521  * @phba: pointer to lpfc hba data structure.
7522  *
7523  * This routine is invoked to disable the PCI device that is common to all
7524  * PCI devices.
7525  **/
7526 static void
lpfc_disable_pci_dev(struct lpfc_hba * phba)7527 lpfc_disable_pci_dev(struct lpfc_hba *phba)
7528 {
7529 	struct pci_dev *pdev;
7530 
7531 	/* Obtain PCI device reference */
7532 	if (!phba->pcidev)
7533 		return;
7534 	else
7535 		pdev = phba->pcidev;
7536 	/* Release PCI resource and disable PCI device */
7537 	pci_release_mem_regions(pdev);
7538 	pci_disable_device(pdev);
7539 
7540 	return;
7541 }
7542 
7543 /**
7544  * lpfc_reset_hba - Reset a hba
7545  * @phba: pointer to lpfc hba data structure.
7546  *
7547  * This routine is invoked to reset a hba device. It brings the HBA
7548  * offline, performs a board restart, and then brings the board back
7549  * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
7550  * on outstanding mailbox commands.
7551  **/
7552 void
lpfc_reset_hba(struct lpfc_hba * phba)7553 lpfc_reset_hba(struct lpfc_hba *phba)
7554 {
7555 	int rc = 0;
7556 
7557 	/* If resets are disabled then set error state and return. */
7558 	if (!phba->cfg_enable_hba_reset) {
7559 		phba->link_state = LPFC_HBA_ERROR;
7560 		return;
7561 	}
7562 
7563 	/* If not LPFC_SLI_ACTIVE, force all IO to be flushed */
7564 	if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) {
7565 		lpfc_offline_prep(phba, LPFC_MBX_WAIT);
7566 	} else {
7567 		if (test_bit(MBX_TMO_ERR, &phba->bit_flags)) {
7568 			/* Perform a PCI function reset to start from clean */
7569 			rc = lpfc_pci_function_reset(phba);
7570 			lpfc_els_flush_all_cmd(phba);
7571 		}
7572 		lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
7573 		lpfc_sli_flush_io_rings(phba);
7574 	}
7575 	lpfc_offline(phba);
7576 	clear_bit(MBX_TMO_ERR, &phba->bit_flags);
7577 	if (unlikely(rc)) {
7578 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7579 				"8888 PCI function reset failed rc %x\n",
7580 				rc);
7581 	} else {
7582 		lpfc_sli_brdrestart(phba);
7583 		lpfc_online(phba);
7584 		lpfc_unblock_mgmt_io(phba);
7585 	}
7586 }
7587 
7588 /**
7589  * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
7590  * @phba: pointer to lpfc hba data structure.
7591  *
7592  * This function enables the PCI SR-IOV virtual functions to a physical
7593  * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7594  * enable the number of virtual functions to the physical function. As
7595  * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7596  * API call does not considered as an error condition for most of the device.
7597  **/
7598 uint16_t
lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba * phba)7599 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
7600 {
7601 	struct pci_dev *pdev = phba->pcidev;
7602 	uint16_t nr_virtfn;
7603 	int pos;
7604 
7605 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
7606 	if (pos == 0)
7607 		return 0;
7608 
7609 	pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
7610 	return nr_virtfn;
7611 }
7612 
7613 /**
7614  * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
7615  * @phba: pointer to lpfc hba data structure.
7616  * @nr_vfn: number of virtual functions to be enabled.
7617  *
7618  * This function enables the PCI SR-IOV virtual functions to a physical
7619  * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7620  * enable the number of virtual functions to the physical function. As
7621  * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7622  * API call does not considered as an error condition for most of the device.
7623  **/
7624 int
lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba * phba,int nr_vfn)7625 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
7626 {
7627 	struct pci_dev *pdev = phba->pcidev;
7628 	uint16_t max_nr_vfn;
7629 	int rc;
7630 
7631 	max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
7632 	if (nr_vfn > max_nr_vfn) {
7633 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7634 				"3057 Requested vfs (%d) greater than "
7635 				"supported vfs (%d)", nr_vfn, max_nr_vfn);
7636 		return -EINVAL;
7637 	}
7638 
7639 	rc = pci_enable_sriov(pdev, nr_vfn);
7640 	if (rc) {
7641 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7642 				"2806 Failed to enable sriov on this device "
7643 				"with vfn number nr_vf:%d, rc:%d\n",
7644 				nr_vfn, rc);
7645 	} else
7646 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7647 				"2807 Successful enable sriov on this device "
7648 				"with vfn number nr_vf:%d\n", nr_vfn);
7649 	return rc;
7650 }
7651 
7652 static void
lpfc_unblock_requests_work(struct work_struct * work)7653 lpfc_unblock_requests_work(struct work_struct *work)
7654 {
7655 	struct lpfc_hba *phba = container_of(work, struct lpfc_hba,
7656 					     unblock_request_work);
7657 
7658 	lpfc_unblock_requests(phba);
7659 }
7660 
7661 /**
7662  * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
7663  * @phba: pointer to lpfc hba data structure.
7664  *
7665  * This routine is invoked to set up the driver internal resources before the
7666  * device specific resource setup to support the HBA device it attached to.
7667  *
7668  * Return codes
7669  *	0 - successful
7670  *	other values - error
7671  **/
7672 static int
lpfc_setup_driver_resource_phase1(struct lpfc_hba * phba)7673 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
7674 {
7675 	struct lpfc_sli *psli = &phba->sli;
7676 
7677 	/*
7678 	 * Driver resources common to all SLI revisions
7679 	 */
7680 	atomic_set(&phba->fast_event_count, 0);
7681 	atomic_set(&phba->dbg_log_idx, 0);
7682 	atomic_set(&phba->dbg_log_cnt, 0);
7683 	atomic_set(&phba->dbg_log_dmping, 0);
7684 	spin_lock_init(&phba->hbalock);
7685 
7686 	/* Initialize port_list spinlock */
7687 	spin_lock_init(&phba->port_list_lock);
7688 	INIT_LIST_HEAD(&phba->port_list);
7689 
7690 	INIT_LIST_HEAD(&phba->work_list);
7691 
7692 	/* Initialize the wait queue head for the kernel thread */
7693 	init_waitqueue_head(&phba->work_waitq);
7694 
7695 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7696 			"1403 Protocols supported %s %s %s\n",
7697 			((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
7698 				"SCSI" : " "),
7699 			((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
7700 				"NVME" : " "),
7701 			(phba->nvmet_support ? "NVMET" : " "));
7702 
7703 	/* ras_fwlog state */
7704 	spin_lock_init(&phba->ras_fwlog_lock);
7705 
7706 	/* Initialize the IO buffer list used by driver for SLI3 SCSI */
7707 	spin_lock_init(&phba->scsi_buf_list_get_lock);
7708 	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
7709 	spin_lock_init(&phba->scsi_buf_list_put_lock);
7710 	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
7711 
7712 	/* Initialize the fabric iocb list */
7713 	INIT_LIST_HEAD(&phba->fabric_iocb_list);
7714 
7715 	/* Initialize list to save ELS buffers */
7716 	INIT_LIST_HEAD(&phba->elsbuf);
7717 
7718 	/* Initialize FCF connection rec list */
7719 	INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
7720 
7721 	/* Initialize OAS configuration list */
7722 	spin_lock_init(&phba->devicelock);
7723 	INIT_LIST_HEAD(&phba->luns);
7724 
7725 	/* MBOX heartbeat timer */
7726 	timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
7727 	/* Fabric block timer */
7728 	timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
7729 	/* EA polling mode timer */
7730 	timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
7731 	/* Heartbeat timer */
7732 	timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
7733 
7734 	INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
7735 
7736 	INIT_DELAYED_WORK(&phba->idle_stat_delay_work,
7737 			  lpfc_idle_stat_delay_work);
7738 	INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work);
7739 	return 0;
7740 }
7741 
7742 /**
7743  * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
7744  * @phba: pointer to lpfc hba data structure.
7745  *
7746  * This routine is invoked to set up the driver internal resources specific to
7747  * support the SLI-3 HBA device it attached to.
7748  *
7749  * Return codes
7750  * 0 - successful
7751  * other values - error
7752  **/
7753 static int
lpfc_sli_driver_resource_setup(struct lpfc_hba * phba)7754 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
7755 {
7756 	int rc, entry_sz;
7757 
7758 	/*
7759 	 * Initialize timers used by driver
7760 	 */
7761 
7762 	/* FCP polling mode timer */
7763 	timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
7764 
7765 	/* Host attention work mask setup */
7766 	phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
7767 	phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
7768 
7769 	/* Get all the module params for configuring this host */
7770 	lpfc_get_cfgparam(phba);
7771 	/* Set up phase-1 common device driver resources */
7772 
7773 	rc = lpfc_setup_driver_resource_phase1(phba);
7774 	if (rc)
7775 		return -ENODEV;
7776 
7777 	if (!phba->sli.sli3_ring)
7778 		phba->sli.sli3_ring = kzalloc_objs(struct lpfc_sli_ring,
7779 						   LPFC_SLI3_MAX_RING);
7780 	if (!phba->sli.sli3_ring)
7781 		return -ENOMEM;
7782 
7783 	/*
7784 	 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
7785 	 * used to create the sg_dma_buf_pool must be dynamically calculated.
7786 	 */
7787 
7788 	if (phba->sli_rev == LPFC_SLI_REV4)
7789 		entry_sz = sizeof(struct sli4_sge);
7790 	else
7791 		entry_sz = sizeof(struct ulp_bde64);
7792 
7793 	/* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
7794 	if (phba->cfg_enable_bg) {
7795 		/*
7796 		 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
7797 		 * the FCP rsp, and a BDE for each. Sice we have no control
7798 		 * over how many protection data segments the SCSI Layer
7799 		 * will hand us (ie: there could be one for every block
7800 		 * in the IO), we just allocate enough BDEs to accomidate
7801 		 * our max amount and we need to limit lpfc_sg_seg_cnt to
7802 		 * minimize the risk of running out.
7803 		 */
7804 		phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7805 			sizeof(struct fcp_rsp) +
7806 			(LPFC_MAX_SG_SEG_CNT * entry_sz);
7807 
7808 		if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
7809 			phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
7810 
7811 		/* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
7812 		phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
7813 	} else {
7814 		/*
7815 		 * The scsi_buf for a regular I/O will hold the FCP cmnd,
7816 		 * the FCP rsp, a BDE for each, and a BDE for up to
7817 		 * cfg_sg_seg_cnt data segments.
7818 		 */
7819 		phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7820 			sizeof(struct fcp_rsp) +
7821 			((phba->cfg_sg_seg_cnt + 2) * entry_sz);
7822 
7823 		/* Total BDEs in BPL for scsi_sg_list */
7824 		phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
7825 	}
7826 
7827 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
7828 			"9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
7829 			phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
7830 			phba->cfg_total_seg_cnt);
7831 
7832 	phba->max_vpi = LPFC_MAX_VPI;
7833 	/* This will be set to correct value after config_port mbox */
7834 	phba->max_vports = 0;
7835 
7836 	/*
7837 	 * Initialize the SLI Layer to run with lpfc HBAs.
7838 	 */
7839 	lpfc_sli_setup(phba);
7840 	lpfc_sli_queue_init(phba);
7841 
7842 	/* Allocate device driver memory */
7843 	if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
7844 		return -ENOMEM;
7845 
7846 	phba->lpfc_sg_dma_buf_pool =
7847 		dma_pool_create("lpfc_sg_dma_buf_pool",
7848 				&phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
7849 				BPL_ALIGN_SZ, 0);
7850 
7851 	if (!phba->lpfc_sg_dma_buf_pool)
7852 		goto fail_free_mem;
7853 
7854 	phba->lpfc_cmd_rsp_buf_pool =
7855 			dma_pool_create("lpfc_cmd_rsp_buf_pool",
7856 					&phba->pcidev->dev,
7857 					sizeof(struct fcp_cmnd) +
7858 					sizeof(struct fcp_rsp),
7859 					BPL_ALIGN_SZ, 0);
7860 
7861 	if (!phba->lpfc_cmd_rsp_buf_pool)
7862 		goto fail_free_dma_buf_pool;
7863 
7864 	/*
7865 	 * Enable sr-iov virtual functions if supported and configured
7866 	 * through the module parameter.
7867 	 */
7868 	if (phba->cfg_sriov_nr_virtfn > 0) {
7869 		rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
7870 						 phba->cfg_sriov_nr_virtfn);
7871 		if (rc) {
7872 			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7873 					"2808 Requested number of SR-IOV "
7874 					"virtual functions (%d) is not "
7875 					"supported\n",
7876 					phba->cfg_sriov_nr_virtfn);
7877 			phba->cfg_sriov_nr_virtfn = 0;
7878 		}
7879 	}
7880 
7881 	return 0;
7882 
7883 fail_free_dma_buf_pool:
7884 	dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
7885 	phba->lpfc_sg_dma_buf_pool = NULL;
7886 fail_free_mem:
7887 	lpfc_mem_free(phba);
7888 	return -ENOMEM;
7889 }
7890 
7891 /**
7892  * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
7893  * @phba: pointer to lpfc hba data structure.
7894  *
7895  * This routine is invoked to unset the driver internal resources set up
7896  * specific for supporting the SLI-3 HBA device it attached to.
7897  **/
7898 static void
lpfc_sli_driver_resource_unset(struct lpfc_hba * phba)7899 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
7900 {
7901 	/* Free device driver memory allocated */
7902 	lpfc_mem_free_all(phba);
7903 
7904 	return;
7905 }
7906 
7907 /**
7908  * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
7909  * @phba: pointer to lpfc hba data structure.
7910  *
7911  * This routine is invoked to set up the driver internal resources specific to
7912  * support the SLI-4 HBA device it attached to.
7913  *
7914  * Return codes
7915  * 	0 - successful
7916  * 	other values - error
7917  **/
7918 static int
lpfc_sli4_driver_resource_setup(struct lpfc_hba * phba)7919 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
7920 {
7921 	LPFC_MBOXQ_t *mboxq;
7922 	MAILBOX_t *mb;
7923 	int rc, i, max_buf_size;
7924 	int longs;
7925 	int extra;
7926 	uint64_t wwn;
7927 
7928 	phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
7929 	phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
7930 	phba->sli4_hba.curr_disp_cpu = 0;
7931 
7932 	/* Get all the module params for configuring this host */
7933 	lpfc_get_cfgparam(phba);
7934 
7935 	/* Set up phase-1 common device driver resources */
7936 	rc = lpfc_setup_driver_resource_phase1(phba);
7937 	if (rc)
7938 		return -ENODEV;
7939 
7940 	/* Before proceed, wait for POST done and device ready */
7941 	rc = lpfc_sli4_post_status_check(phba);
7942 	if (rc)
7943 		return -ENODEV;
7944 
7945 	/* Allocate all driver workqueues here */
7946 
7947 	/* The lpfc_wq workqueue for deferred irq use */
7948 	phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM | WQ_PERCPU, 0);
7949 	if (!phba->wq)
7950 		return -ENOMEM;
7951 
7952 	/*
7953 	 * Initialize timers used by driver
7954 	 */
7955 
7956 	timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
7957 
7958 	/* FCF rediscover timer */
7959 	timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
7960 
7961 	/* CMF congestion timer */
7962 	hrtimer_setup(&phba->cmf_timer, lpfc_cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7963 	/* CMF 1 minute stats collection timer */
7964 	hrtimer_setup(&phba->cmf_stats_timer, lpfc_cmf_stats_timer, CLOCK_MONOTONIC,
7965 		      HRTIMER_MODE_REL);
7966 
7967 	/*
7968 	 * Control structure for handling external multi-buffer mailbox
7969 	 * command pass-through.
7970 	 */
7971 	memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
7972 		sizeof(struct lpfc_mbox_ext_buf_ctx));
7973 	INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
7974 
7975 	phba->max_vpi = LPFC_MAX_VPI;
7976 
7977 	/* This will be set to correct value after the read_config mbox */
7978 	phba->max_vports = 0;
7979 
7980 	/* Program the default value of vlan_id and fc_map */
7981 	phba->valid_vlan = 0;
7982 	phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
7983 	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
7984 	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
7985 
7986 	/*
7987 	 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
7988 	 * we will associate a new ring, for each EQ/CQ/WQ tuple.
7989 	 * The WQ create will allocate the ring.
7990 	 */
7991 
7992 	/* Initialize buffer queue management fields */
7993 	INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
7994 	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
7995 	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
7996 
7997 	/* for VMID idle timeout if VMID is enabled */
7998 	if (lpfc_is_vmid_enabled(phba))
7999 		timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0);
8000 
8001 	/*
8002 	 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
8003 	 */
8004 	/* Initialize the Abort buffer list used by driver */
8005 	spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
8006 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
8007 
8008 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8009 		/* Initialize the Abort nvme buffer list used by driver */
8010 		spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
8011 		INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
8012 		INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
8013 		spin_lock_init(&phba->sli4_hba.t_active_list_lock);
8014 		INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
8015 	}
8016 
8017 	/* This abort list used by worker thread */
8018 	spin_lock_init(&phba->sli4_hba.sgl_list_lock);
8019 	spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
8020 	spin_lock_init(&phba->sli4_hba.asynce_list_lock);
8021 	spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock);
8022 
8023 	/*
8024 	 * Initialize driver internal slow-path work queues
8025 	 */
8026 
8027 	/* Driver internel slow-path CQ Event pool */
8028 	INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
8029 	/* Response IOCB work queue list */
8030 	INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
8031 	/* Asynchronous event CQ Event work queue list */
8032 	INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
8033 	/* Slow-path XRI aborted CQ Event work queue list */
8034 	INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
8035 	/* Receive queue CQ Event work queue list */
8036 	INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
8037 
8038 	/* Initialize extent block lists. */
8039 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
8040 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
8041 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
8042 	INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
8043 
8044 	/* Initialize mboxq lists. If the early init routines fail
8045 	 * these lists need to be correctly initialized.
8046 	 */
8047 	INIT_LIST_HEAD(&phba->sli.mboxq);
8048 	INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
8049 
8050 	/* initialize optic_state to 0xFF */
8051 	phba->sli4_hba.lnk_info.optic_state = 0xff;
8052 
8053 	/* Allocate device driver memory */
8054 	rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
8055 	if (rc)
8056 		goto out_destroy_workqueue;
8057 
8058 	/* IF Type 2 ports get initialized now. */
8059 	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
8060 	    LPFC_SLI_INTF_IF_TYPE_2) {
8061 		rc = lpfc_pci_function_reset(phba);
8062 		if (unlikely(rc)) {
8063 			rc = -ENODEV;
8064 			goto out_free_mem;
8065 		}
8066 		phba->temp_sensor_support = 1;
8067 	}
8068 
8069 	/* Create the bootstrap mailbox command */
8070 	rc = lpfc_create_bootstrap_mbox(phba);
8071 	if (unlikely(rc))
8072 		goto out_free_mem;
8073 
8074 	/* Set up the host's endian order with the device. */
8075 	rc = lpfc_setup_endian_order(phba);
8076 	if (unlikely(rc))
8077 		goto out_free_bsmbx;
8078 
8079 	/* Set up the hba's configuration parameters. */
8080 	rc = lpfc_sli4_read_config(phba);
8081 	if (unlikely(rc))
8082 		goto out_free_bsmbx;
8083 
8084 	if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) {
8085 		/* Right now the link is down, if FA-PWWN is configured the
8086 		 * firmware will try FLOGI before the driver gets a link up.
8087 		 * If it fails, the driver should get a MISCONFIGURED async
8088 		 * event which will clear this flag. The only notification
8089 		 * the driver gets is if it fails, if it succeeds there is no
8090 		 * notification given. Assume success.
8091 		 */
8092 		phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
8093 	}
8094 
8095 	rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
8096 	if (unlikely(rc))
8097 		goto out_free_bsmbx;
8098 
8099 	/* IF Type 0 ports get initialized now. */
8100 	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8101 	    LPFC_SLI_INTF_IF_TYPE_0) {
8102 		rc = lpfc_pci_function_reset(phba);
8103 		if (unlikely(rc))
8104 			goto out_free_bsmbx;
8105 	}
8106 
8107 	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8108 						       GFP_KERNEL);
8109 	if (!mboxq) {
8110 		rc = -ENOMEM;
8111 		goto out_free_bsmbx;
8112 	}
8113 
8114 	/* Check for NVMET being configured */
8115 	phba->nvmet_support = 0;
8116 	if (lpfc_enable_nvmet_cnt) {
8117 
8118 		/* First get WWN of HBA instance */
8119 		lpfc_read_nv(phba, mboxq);
8120 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8121 		if (rc != MBX_SUCCESS) {
8122 			lpfc_printf_log(phba, KERN_ERR,
8123 					LOG_TRACE_EVENT,
8124 					"6016 Mailbox failed , mbxCmd x%x "
8125 					"READ_NV, mbxStatus x%x\n",
8126 					bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8127 					bf_get(lpfc_mqe_status, &mboxq->u.mqe));
8128 			mempool_free(mboxq, phba->mbox_mem_pool);
8129 			rc = -EIO;
8130 			goto out_free_bsmbx;
8131 		}
8132 		mb = &mboxq->u.mb;
8133 		memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
8134 		       sizeof(uint64_t));
8135 		wwn = cpu_to_be64(wwn);
8136 		phba->sli4_hba.wwnn.u.name = wwn;
8137 		memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
8138 		       sizeof(uint64_t));
8139 		/* wwn is WWPN of HBA instance */
8140 		wwn = cpu_to_be64(wwn);
8141 		phba->sli4_hba.wwpn.u.name = wwn;
8142 
8143 		/* Check to see if it matches any module parameter */
8144 		for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
8145 			if (wwn == lpfc_enable_nvmet[i]) {
8146 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
8147 				if (lpfc_nvmet_mem_alloc(phba))
8148 					break;
8149 
8150 				phba->nvmet_support = 1; /* a match */
8151 
8152 				lpfc_printf_log(phba, KERN_ERR,
8153 						LOG_TRACE_EVENT,
8154 						"6017 NVME Target %016llx\n",
8155 						wwn);
8156 #else
8157 				lpfc_printf_log(phba, KERN_ERR,
8158 						LOG_TRACE_EVENT,
8159 						"6021 Can't enable NVME Target."
8160 						" NVME_TARGET_FC infrastructure"
8161 						" is not in kernel\n");
8162 #endif
8163 				/* Not supported for NVMET */
8164 				phba->cfg_xri_rebalancing = 0;
8165 				if (phba->irq_chann_mode == NHT_MODE) {
8166 					phba->cfg_irq_chann =
8167 						phba->sli4_hba.num_present_cpu;
8168 					phba->cfg_hdw_queue =
8169 						phba->sli4_hba.num_present_cpu;
8170 					phba->irq_chann_mode = NORMAL_MODE;
8171 				}
8172 				break;
8173 			}
8174 		}
8175 	}
8176 
8177 	lpfc_nvme_mod_param_dep(phba);
8178 
8179 	/*
8180 	 * Get sli4 parameters that override parameters from Port capabilities.
8181 	 * If this call fails, it isn't critical unless the SLI4 parameters come
8182 	 * back in conflict.
8183 	 */
8184 	rc = lpfc_get_sli4_parameters(phba, mboxq);
8185 	if (rc) {
8186 		lpfc_log_msg(phba, KERN_WARNING, LOG_INIT,
8187 			     "2999 Could not get SLI4 parameters\n");
8188 		rc = -EIO;
8189 		mempool_free(mboxq, phba->mbox_mem_pool);
8190 		goto out_free_bsmbx;
8191 	}
8192 
8193 	/*
8194 	 * 1 for cmd, 1 for rsp, NVME adds an extra one
8195 	 * for boundary conditions in its max_sgl_segment template.
8196 	 */
8197 	extra = 2;
8198 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
8199 		extra++;
8200 
8201 	/*
8202 	 * It doesn't matter what family our adapter is in, we are
8203 	 * limited to 2 Pages, 512 SGEs, for our SGL.
8204 	 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
8205 	 */
8206 	max_buf_size = (2 * SLI4_PAGE_SIZE);
8207 
8208 	/*
8209 	 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
8210 	 * used to create the sg_dma_buf_pool must be calculated.
8211 	 */
8212 	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8213 		/* Both cfg_enable_bg and cfg_external_dif code paths */
8214 
8215 		/*
8216 		 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
8217 		 * the FCP rsp, and a SGE. Sice we have no control
8218 		 * over how many protection segments the SCSI Layer
8219 		 * will hand us (ie: there could be one for every block
8220 		 * in the IO), just allocate enough SGEs to accomidate
8221 		 * our max amount and we need to limit lpfc_sg_seg_cnt
8222 		 * to minimize the risk of running out.
8223 		 */
8224 		phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd32) +
8225 				sizeof(struct fcp_rsp) + max_buf_size;
8226 
8227 		/* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
8228 		phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
8229 
8230 		/*
8231 		 * If supporting DIF, reduce the seg count for scsi to
8232 		 * allow room for the DIF sges.
8233 		 */
8234 		if (phba->cfg_enable_bg &&
8235 		    phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
8236 			phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
8237 		else
8238 			phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8239 
8240 	} else {
8241 		/*
8242 		 * The scsi_buf for a regular I/O holds the FCP cmnd,
8243 		 * the FCP rsp, a SGE for each, and a SGE for up to
8244 		 * cfg_sg_seg_cnt data segments.
8245 		 */
8246 		phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd32) +
8247 				sizeof(struct fcp_rsp) +
8248 				((phba->cfg_sg_seg_cnt + extra) *
8249 				sizeof(struct sli4_sge));
8250 
8251 		/* Total SGEs for scsi_sg_list */
8252 		phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
8253 		phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8254 
8255 		/*
8256 		 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
8257 		 * need to post 1 page for the SGL.
8258 		 */
8259 	}
8260 
8261 	if (phba->cfg_xpsgl && !phba->nvmet_support)
8262 		phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
8263 	else if (phba->cfg_sg_dma_buf_size  <= LPFC_MIN_SG_SLI4_BUF_SZ)
8264 		phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
8265 	else
8266 		phba->cfg_sg_dma_buf_size =
8267 				SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
8268 
8269 	phba->border_sge_num = phba->cfg_sg_dma_buf_size /
8270 			       sizeof(struct sli4_sge);
8271 
8272 	/* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
8273 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8274 		if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
8275 			lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
8276 					"6300 Reducing NVME sg segment "
8277 					"cnt to %d\n",
8278 					LPFC_MAX_NVME_SEG_CNT);
8279 			phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
8280 		} else
8281 			phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
8282 	}
8283 
8284 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
8285 			"9087 sg_seg_cnt:%d dmabuf_size:%d "
8286 			"total:%d scsi:%d nvme:%d\n",
8287 			phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
8288 			phba->cfg_total_seg_cnt,  phba->cfg_scsi_seg_cnt,
8289 			phba->cfg_nvme_seg_cnt);
8290 
8291 	i = min_t(u32, phba->cfg_sg_dma_buf_size, SLI4_PAGE_SIZE);
8292 
8293 	phba->lpfc_sg_dma_buf_pool =
8294 			dma_pool_create("lpfc_sg_dma_buf_pool",
8295 					&phba->pcidev->dev,
8296 					phba->cfg_sg_dma_buf_size,
8297 					i, 0);
8298 	if (!phba->lpfc_sg_dma_buf_pool) {
8299 		rc = -ENOMEM;
8300 		goto out_free_bsmbx;
8301 	}
8302 
8303 	phba->lpfc_cmd_rsp_buf_pool =
8304 			dma_pool_create("lpfc_cmd_rsp_buf_pool",
8305 					&phba->pcidev->dev,
8306 					sizeof(struct fcp_cmnd32) +
8307 					sizeof(struct fcp_rsp),
8308 					i, 0);
8309 	if (!phba->lpfc_cmd_rsp_buf_pool) {
8310 		rc = -ENOMEM;
8311 		goto out_free_sg_dma_buf;
8312 	}
8313 
8314 	mempool_free(mboxq, phba->mbox_mem_pool);
8315 
8316 	/* Verify OAS is supported */
8317 	lpfc_sli4_oas_verify(phba);
8318 
8319 	/* Verify RAS support on adapter */
8320 	lpfc_sli4_ras_init(phba);
8321 
8322 	/* Verify all the SLI4 queues */
8323 	rc = lpfc_sli4_queue_verify(phba);
8324 	if (rc)
8325 		goto out_free_cmd_rsp_buf;
8326 
8327 	/* Create driver internal CQE event pool */
8328 	rc = lpfc_sli4_cq_event_pool_create(phba);
8329 	if (rc)
8330 		goto out_free_cmd_rsp_buf;
8331 
8332 	/* Initialize sgl lists per host */
8333 	lpfc_init_sgl_list(phba);
8334 
8335 	/* Allocate and initialize active sgl array */
8336 	rc = lpfc_init_active_sgl_array(phba);
8337 	if (rc) {
8338 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8339 				"1430 Failed to initialize sgl list.\n");
8340 		goto out_destroy_cq_event_pool;
8341 	}
8342 	rc = lpfc_sli4_init_rpi_hdrs(phba);
8343 	if (rc) {
8344 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8345 				"1432 Failed to initialize rpi headers.\n");
8346 		goto out_free_active_sgl;
8347 	}
8348 
8349 	/* Allocate eligible FCF bmask memory for FCF roundrobin failover */
8350 	longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
8351 	phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
8352 					 GFP_KERNEL);
8353 	if (!phba->fcf.fcf_rr_bmask) {
8354 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8355 				"2759 Failed allocate memory for FCF round "
8356 				"robin failover bmask\n");
8357 		rc = -ENOMEM;
8358 		goto out_remove_rpi_hdrs;
8359 	}
8360 
8361 	phba->sli4_hba.hba_eq_hdl = kzalloc_objs(struct lpfc_hba_eq_hdl,
8362 						 phba->cfg_irq_chann);
8363 	if (!phba->sli4_hba.hba_eq_hdl) {
8364 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8365 				"2572 Failed allocate memory for "
8366 				"fast-path per-EQ handle array\n");
8367 		rc = -ENOMEM;
8368 		goto out_free_fcf_rr_bmask;
8369 	}
8370 
8371 	phba->sli4_hba.cpu_map = kzalloc_objs(struct lpfc_vector_map_info,
8372 					      phba->sli4_hba.num_possible_cpu);
8373 	if (!phba->sli4_hba.cpu_map) {
8374 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8375 				"3327 Failed allocate memory for msi-x "
8376 				"interrupt vector mapping\n");
8377 		rc = -ENOMEM;
8378 		goto out_free_hba_eq_hdl;
8379 	}
8380 
8381 	phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
8382 	if (!phba->sli4_hba.eq_info) {
8383 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8384 				"3321 Failed allocation for per_cpu stats\n");
8385 		rc = -ENOMEM;
8386 		goto out_free_hba_cpu_map;
8387 	}
8388 
8389 	phba->sli4_hba.idle_stat = kzalloc_objs(*phba->sli4_hba.idle_stat,
8390 						phba->sli4_hba.num_possible_cpu);
8391 	if (!phba->sli4_hba.idle_stat) {
8392 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8393 				"3390 Failed allocation for idle_stat\n");
8394 		rc = -ENOMEM;
8395 		goto out_free_hba_eq_info;
8396 	}
8397 
8398 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8399 	phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
8400 	if (!phba->sli4_hba.c_stat) {
8401 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8402 				"3332 Failed allocating per cpu hdwq stats\n");
8403 		rc = -ENOMEM;
8404 		goto out_free_hba_idle_stat;
8405 	}
8406 #endif
8407 
8408 	phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat);
8409 	if (!phba->cmf_stat) {
8410 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8411 				"3331 Failed allocating per cpu cgn stats\n");
8412 		rc = -ENOMEM;
8413 		goto out_free_hba_hdwq_info;
8414 	}
8415 
8416 	/*
8417 	 * Enable sr-iov virtual functions if supported and configured
8418 	 * through the module parameter.
8419 	 */
8420 	if (phba->cfg_sriov_nr_virtfn > 0) {
8421 		rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
8422 						 phba->cfg_sriov_nr_virtfn);
8423 		if (rc) {
8424 			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8425 					"3020 Requested number of SR-IOV "
8426 					"virtual functions (%d) is not "
8427 					"supported\n",
8428 					phba->cfg_sriov_nr_virtfn);
8429 			phba->cfg_sriov_nr_virtfn = 0;
8430 		}
8431 	}
8432 
8433 	return 0;
8434 
8435 out_free_hba_hdwq_info:
8436 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8437 	free_percpu(phba->sli4_hba.c_stat);
8438 out_free_hba_idle_stat:
8439 #endif
8440 	kfree(phba->sli4_hba.idle_stat);
8441 out_free_hba_eq_info:
8442 	free_percpu(phba->sli4_hba.eq_info);
8443 out_free_hba_cpu_map:
8444 	kfree(phba->sli4_hba.cpu_map);
8445 out_free_hba_eq_hdl:
8446 	kfree(phba->sli4_hba.hba_eq_hdl);
8447 out_free_fcf_rr_bmask:
8448 	kfree(phba->fcf.fcf_rr_bmask);
8449 out_remove_rpi_hdrs:
8450 	lpfc_sli4_remove_rpi_hdrs(phba);
8451 out_free_active_sgl:
8452 	lpfc_free_active_sgl(phba);
8453 out_destroy_cq_event_pool:
8454 	lpfc_sli4_cq_event_pool_destroy(phba);
8455 out_free_cmd_rsp_buf:
8456 	dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
8457 	phba->lpfc_cmd_rsp_buf_pool = NULL;
8458 out_free_sg_dma_buf:
8459 	dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
8460 	phba->lpfc_sg_dma_buf_pool = NULL;
8461 out_free_bsmbx:
8462 	lpfc_destroy_bootstrap_mbox(phba);
8463 out_free_mem:
8464 	lpfc_mem_free(phba);
8465 out_destroy_workqueue:
8466 	destroy_workqueue(phba->wq);
8467 	phba->wq = NULL;
8468 	return rc;
8469 }
8470 
8471 /**
8472  * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
8473  * @phba: pointer to lpfc hba data structure.
8474  *
8475  * This routine is invoked to unset the driver internal resources set up
8476  * specific for supporting the SLI-4 HBA device it attached to.
8477  **/
8478 static void
lpfc_sli4_driver_resource_unset(struct lpfc_hba * phba)8479 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
8480 {
8481 	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
8482 
8483 	free_percpu(phba->sli4_hba.eq_info);
8484 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8485 	free_percpu(phba->sli4_hba.c_stat);
8486 #endif
8487 	free_percpu(phba->cmf_stat);
8488 	kfree(phba->sli4_hba.idle_stat);
8489 
8490 	/* Free memory allocated for msi-x interrupt vector to CPU mapping */
8491 	kfree(phba->sli4_hba.cpu_map);
8492 	phba->sli4_hba.num_possible_cpu = 0;
8493 	phba->sli4_hba.num_present_cpu = 0;
8494 	phba->sli4_hba.curr_disp_cpu = 0;
8495 	cpumask_clear(&phba->sli4_hba.irq_aff_mask);
8496 
8497 	/* Free memory allocated for fast-path work queue handles */
8498 	kfree(phba->sli4_hba.hba_eq_hdl);
8499 
8500 	/* Free the allocated rpi headers. */
8501 	lpfc_sli4_remove_rpi_hdrs(phba);
8502 	lpfc_sli4_remove_rpis(phba);
8503 
8504 	/* Free eligible FCF index bmask */
8505 	kfree(phba->fcf.fcf_rr_bmask);
8506 
8507 	/* Free the ELS sgl list */
8508 	lpfc_free_active_sgl(phba);
8509 	lpfc_free_els_sgl_list(phba);
8510 	lpfc_free_nvmet_sgl_list(phba);
8511 
8512 	/* Free the completion queue EQ event pool */
8513 	lpfc_sli4_cq_event_release_all(phba);
8514 	lpfc_sli4_cq_event_pool_destroy(phba);
8515 
8516 	/* Release resource identifiers. */
8517 	lpfc_sli4_dealloc_resource_identifiers(phba);
8518 
8519 	/* Free the bsmbx region. */
8520 	lpfc_destroy_bootstrap_mbox(phba);
8521 
8522 	/* Free the SLI Layer memory with SLI4 HBAs */
8523 	lpfc_mem_free_all(phba);
8524 
8525 	/* Free the current connect table */
8526 	list_for_each_entry_safe(conn_entry, next_conn_entry,
8527 		&phba->fcf_conn_rec_list, list) {
8528 		list_del_init(&conn_entry->list);
8529 		kfree(conn_entry);
8530 	}
8531 
8532 	return;
8533 }
8534 
8535 /**
8536  * lpfc_init_api_table_setup - Set up init api function jump table
8537  * @phba: The hba struct for which this call is being executed.
8538  * @dev_grp: The HBA PCI-Device group number.
8539  *
8540  * This routine sets up the device INIT interface API function jump table
8541  * in @phba struct.
8542  *
8543  * Returns: 0 - success, -ENODEV - failure.
8544  **/
8545 int
lpfc_init_api_table_setup(struct lpfc_hba * phba,uint8_t dev_grp)8546 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8547 {
8548 	phba->lpfc_hba_init_link = lpfc_hba_init_link;
8549 	phba->lpfc_hba_down_link = lpfc_hba_down_link;
8550 	phba->lpfc_selective_reset = lpfc_selective_reset;
8551 	switch (dev_grp) {
8552 	case LPFC_PCI_DEV_LP:
8553 		phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
8554 		phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
8555 		phba->lpfc_stop_port = lpfc_stop_port_s3;
8556 		break;
8557 	case LPFC_PCI_DEV_OC:
8558 		phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
8559 		phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
8560 		phba->lpfc_stop_port = lpfc_stop_port_s4;
8561 		break;
8562 	default:
8563 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8564 				"1431 Invalid HBA PCI-device group: 0x%x\n",
8565 				dev_grp);
8566 		return -ENODEV;
8567 	}
8568 	return 0;
8569 }
8570 
8571 /**
8572  * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
8573  * @phba: pointer to lpfc hba data structure.
8574  *
8575  * This routine is invoked to set up the driver internal resources after the
8576  * device specific resource setup to support the HBA device it attached to.
8577  *
8578  * Return codes
8579  * 	0 - successful
8580  * 	other values - error
8581  **/
8582 static int
lpfc_setup_driver_resource_phase2(struct lpfc_hba * phba)8583 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
8584 {
8585 	int error;
8586 
8587 	/* Startup the kernel thread for this host adapter. */
8588 	phba->worker_thread = kthread_run(lpfc_do_work, phba,
8589 					  "lpfc_worker_%d", phba->brd_no);
8590 	if (IS_ERR(phba->worker_thread)) {
8591 		error = PTR_ERR(phba->worker_thread);
8592 		return error;
8593 	}
8594 
8595 	return 0;
8596 }
8597 
8598 /**
8599  * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
8600  * @phba: pointer to lpfc hba data structure.
8601  *
8602  * This routine is invoked to unset the driver internal resources set up after
8603  * the device specific resource setup for supporting the HBA device it
8604  * attached to.
8605  **/
8606 static void
lpfc_unset_driver_resource_phase2(struct lpfc_hba * phba)8607 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
8608 {
8609 	if (phba->wq) {
8610 		destroy_workqueue(phba->wq);
8611 		phba->wq = NULL;
8612 	}
8613 
8614 	/* Stop kernel worker thread */
8615 	if (phba->worker_thread)
8616 		kthread_stop(phba->worker_thread);
8617 }
8618 
8619 /**
8620  * lpfc_free_iocb_list - Free iocb list.
8621  * @phba: pointer to lpfc hba data structure.
8622  *
8623  * This routine is invoked to free the driver's IOCB list and memory.
8624  **/
8625 void
lpfc_free_iocb_list(struct lpfc_hba * phba)8626 lpfc_free_iocb_list(struct lpfc_hba *phba)
8627 {
8628 	struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
8629 
8630 	spin_lock_irq(&phba->hbalock);
8631 	list_for_each_entry_safe(iocbq_entry, iocbq_next,
8632 				 &phba->lpfc_iocb_list, list) {
8633 		list_del(&iocbq_entry->list);
8634 		kfree(iocbq_entry);
8635 		phba->total_iocbq_bufs--;
8636 	}
8637 	spin_unlock_irq(&phba->hbalock);
8638 
8639 	return;
8640 }
8641 
8642 /**
8643  * lpfc_init_iocb_list - Allocate and initialize iocb list.
8644  * @phba: pointer to lpfc hba data structure.
8645  * @iocb_count: number of requested iocbs
8646  *
8647  * This routine is invoked to allocate and initizlize the driver's IOCB
8648  * list and set up the IOCB tag array accordingly.
8649  *
8650  * Return codes
8651  *	0 - successful
8652  *	other values - error
8653  **/
8654 int
lpfc_init_iocb_list(struct lpfc_hba * phba,int iocb_count)8655 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
8656 {
8657 	struct lpfc_iocbq *iocbq_entry = NULL;
8658 	uint16_t iotag;
8659 	int i;
8660 
8661 	/* Initialize and populate the iocb list per host.  */
8662 	INIT_LIST_HEAD(&phba->lpfc_iocb_list);
8663 	for (i = 0; i < iocb_count; i++) {
8664 		iocbq_entry = kzalloc_obj(struct lpfc_iocbq);
8665 		if (iocbq_entry == NULL) {
8666 			printk(KERN_ERR "%s: only allocated %d iocbs of "
8667 				"expected %d count. Unloading driver.\n",
8668 				__func__, i, iocb_count);
8669 			goto out_free_iocbq;
8670 		}
8671 
8672 		iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
8673 		if (iotag == 0) {
8674 			kfree(iocbq_entry);
8675 			printk(KERN_ERR "%s: failed to allocate IOTAG. "
8676 				"Unloading driver.\n", __func__);
8677 			goto out_free_iocbq;
8678 		}
8679 		iocbq_entry->sli4_lxritag = NO_XRI;
8680 		iocbq_entry->sli4_xritag = NO_XRI;
8681 
8682 		spin_lock_irq(&phba->hbalock);
8683 		list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
8684 		phba->total_iocbq_bufs++;
8685 		spin_unlock_irq(&phba->hbalock);
8686 	}
8687 
8688 	return 0;
8689 
8690 out_free_iocbq:
8691 	lpfc_free_iocb_list(phba);
8692 
8693 	return -ENOMEM;
8694 }
8695 
8696 /**
8697  * lpfc_free_sgl_list - Free a given sgl list.
8698  * @phba: pointer to lpfc hba data structure.
8699  * @sglq_list: pointer to the head of sgl list.
8700  *
8701  * This routine is invoked to free a give sgl list and memory.
8702  **/
8703 void
lpfc_free_sgl_list(struct lpfc_hba * phba,struct list_head * sglq_list)8704 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
8705 {
8706 	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8707 
8708 	list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
8709 		list_del(&sglq_entry->list);
8710 		lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
8711 		kfree(sglq_entry);
8712 	}
8713 }
8714 
8715 /**
8716  * lpfc_free_els_sgl_list - Free els sgl list.
8717  * @phba: pointer to lpfc hba data structure.
8718  *
8719  * This routine is invoked to free the driver's els sgl list and memory.
8720  **/
8721 static void
lpfc_free_els_sgl_list(struct lpfc_hba * phba)8722 lpfc_free_els_sgl_list(struct lpfc_hba *phba)
8723 {
8724 	LIST_HEAD(sglq_list);
8725 
8726 	/* Retrieve all els sgls from driver list */
8727 	spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
8728 	list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
8729 	spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
8730 
8731 	/* Now free the sgl list */
8732 	lpfc_free_sgl_list(phba, &sglq_list);
8733 }
8734 
8735 /**
8736  * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
8737  * @phba: pointer to lpfc hba data structure.
8738  *
8739  * This routine is invoked to free the driver's nvmet sgl list and memory.
8740  **/
8741 static void
lpfc_free_nvmet_sgl_list(struct lpfc_hba * phba)8742 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
8743 {
8744 	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8745 	LIST_HEAD(sglq_list);
8746 
8747 	/* Retrieve all nvmet sgls from driver list */
8748 	spin_lock_irq(&phba->hbalock);
8749 	spin_lock(&phba->sli4_hba.sgl_list_lock);
8750 	list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
8751 	spin_unlock(&phba->sli4_hba.sgl_list_lock);
8752 	spin_unlock_irq(&phba->hbalock);
8753 
8754 	/* Now free the sgl list */
8755 	list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
8756 		list_del(&sglq_entry->list);
8757 		lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
8758 		kfree(sglq_entry);
8759 	}
8760 
8761 	/* Update the nvmet_xri_cnt to reflect no current sgls.
8762 	 * The next initialization cycle sets the count and allocates
8763 	 * the sgls over again.
8764 	 */
8765 	phba->sli4_hba.nvmet_xri_cnt = 0;
8766 }
8767 
8768 /**
8769  * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
8770  * @phba: pointer to lpfc hba data structure.
8771  *
8772  * This routine is invoked to allocate the driver's active sgl memory.
8773  * This array will hold the sglq_entry's for active IOs.
8774  **/
8775 static int
lpfc_init_active_sgl_array(struct lpfc_hba * phba)8776 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
8777 {
8778 	int size;
8779 	size = sizeof(struct lpfc_sglq *);
8780 	size *= phba->sli4_hba.max_cfg_param.max_xri;
8781 
8782 	phba->sli4_hba.lpfc_sglq_active_list =
8783 		kzalloc(size, GFP_KERNEL);
8784 	if (!phba->sli4_hba.lpfc_sglq_active_list)
8785 		return -ENOMEM;
8786 	return 0;
8787 }
8788 
8789 /**
8790  * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
8791  * @phba: pointer to lpfc hba data structure.
8792  *
8793  * This routine is invoked to walk through the array of active sglq entries
8794  * and free all of the resources.
8795  * This is just a place holder for now.
8796  **/
8797 static void
lpfc_free_active_sgl(struct lpfc_hba * phba)8798 lpfc_free_active_sgl(struct lpfc_hba *phba)
8799 {
8800 	kfree(phba->sli4_hba.lpfc_sglq_active_list);
8801 }
8802 
8803 /**
8804  * lpfc_init_sgl_list - Allocate and initialize sgl list.
8805  * @phba: pointer to lpfc hba data structure.
8806  *
8807  * This routine is invoked to allocate and initizlize the driver's sgl
8808  * list and set up the sgl xritag tag array accordingly.
8809  *
8810  **/
8811 static void
lpfc_init_sgl_list(struct lpfc_hba * phba)8812 lpfc_init_sgl_list(struct lpfc_hba *phba)
8813 {
8814 	/* Initialize and populate the sglq list per host/VF. */
8815 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
8816 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8817 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
8818 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
8819 
8820 	/* els xri-sgl book keeping */
8821 	phba->sli4_hba.els_xri_cnt = 0;
8822 
8823 	/* nvme xri-buffer book keeping */
8824 	phba->sli4_hba.io_xri_cnt = 0;
8825 }
8826 
8827 /**
8828  * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
8829  * @phba: pointer to lpfc hba data structure.
8830  *
8831  * This routine is invoked to post rpi header templates to the
8832  * port for those SLI4 ports that do not support extents.  This routine
8833  * posts a PAGE_SIZE memory region to the port to hold up to
8834  * PAGE_SIZE modulo 64 rpi context headers.  This is an initialization routine
8835  * and should be called only when interrupts are disabled.
8836  *
8837  * Return codes
8838  * 	0 - successful
8839  *	-ERROR - otherwise.
8840  **/
8841 int
lpfc_sli4_init_rpi_hdrs(struct lpfc_hba * phba)8842 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
8843 {
8844 	int rc = 0;
8845 	struct lpfc_rpi_hdr *rpi_hdr;
8846 
8847 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
8848 	if (!phba->sli4_hba.rpi_hdrs_in_use)
8849 		return rc;
8850 	if (phba->sli4_hba.extents_in_use)
8851 		return -EIO;
8852 
8853 	rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
8854 	if (!rpi_hdr) {
8855 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8856 				"0391 Error during rpi post operation\n");
8857 		lpfc_sli4_remove_rpis(phba);
8858 		rc = -ENODEV;
8859 	}
8860 
8861 	return rc;
8862 }
8863 
8864 /**
8865  * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
8866  * @phba: pointer to lpfc hba data structure.
8867  *
8868  * This routine is invoked to allocate a single 4KB memory region to
8869  * support rpis and stores them in the phba.  This single region
8870  * provides support for up to 64 rpis.  The region is used globally
8871  * by the device.
8872  *
8873  * Returns:
8874  *   A valid rpi hdr on success.
8875  *   A NULL pointer on any failure.
8876  **/
8877 struct lpfc_rpi_hdr *
lpfc_sli4_create_rpi_hdr(struct lpfc_hba * phba)8878 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
8879 {
8880 	uint16_t rpi_limit, curr_rpi_range;
8881 	struct lpfc_dmabuf *dmabuf;
8882 	struct lpfc_rpi_hdr *rpi_hdr;
8883 
8884 	/*
8885 	 * If the SLI4 port supports extents, posting the rpi header isn't
8886 	 * required.  Set the expected maximum count and let the actual value
8887 	 * get set when extents are fully allocated.
8888 	 */
8889 	if (!phba->sli4_hba.rpi_hdrs_in_use)
8890 		return NULL;
8891 	if (phba->sli4_hba.extents_in_use)
8892 		return NULL;
8893 
8894 	/* The limit on the logical index is just the max_rpi count. */
8895 	rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
8896 
8897 	spin_lock_irq(&phba->hbalock);
8898 	/*
8899 	 * Establish the starting RPI in this header block.  The starting
8900 	 * rpi is normalized to a zero base because the physical rpi is
8901 	 * port based.
8902 	 */
8903 	curr_rpi_range = phba->sli4_hba.next_rpi;
8904 	spin_unlock_irq(&phba->hbalock);
8905 
8906 	/* Reached full RPI range */
8907 	if (curr_rpi_range == rpi_limit)
8908 		return NULL;
8909 
8910 	/*
8911 	 * First allocate the protocol header region for the port.  The
8912 	 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
8913 	 */
8914 	dmabuf = kzalloc_obj(struct lpfc_dmabuf);
8915 	if (!dmabuf)
8916 		return NULL;
8917 
8918 	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
8919 					  LPFC_HDR_TEMPLATE_SIZE,
8920 					  &dmabuf->phys, GFP_KERNEL);
8921 	if (!dmabuf->virt) {
8922 		rpi_hdr = NULL;
8923 		goto err_free_dmabuf;
8924 	}
8925 
8926 	if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
8927 		rpi_hdr = NULL;
8928 		goto err_free_coherent;
8929 	}
8930 
8931 	/* Save the rpi header data for cleanup later. */
8932 	rpi_hdr = kzalloc_obj(struct lpfc_rpi_hdr);
8933 	if (!rpi_hdr)
8934 		goto err_free_coherent;
8935 
8936 	rpi_hdr->dmabuf = dmabuf;
8937 	rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
8938 	rpi_hdr->page_count = 1;
8939 	spin_lock_irq(&phba->hbalock);
8940 
8941 	/* The rpi_hdr stores the logical index only. */
8942 	rpi_hdr->start_rpi = curr_rpi_range;
8943 	rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
8944 	list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
8945 
8946 	spin_unlock_irq(&phba->hbalock);
8947 	return rpi_hdr;
8948 
8949  err_free_coherent:
8950 	dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
8951 			  dmabuf->virt, dmabuf->phys);
8952  err_free_dmabuf:
8953 	kfree(dmabuf);
8954 	return NULL;
8955 }
8956 
8957 /**
8958  * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
8959  * @phba: pointer to lpfc hba data structure.
8960  *
8961  * This routine is invoked to remove all memory resources allocated
8962  * to support rpis for SLI4 ports not supporting extents. This routine
8963  * presumes the caller has released all rpis consumed by fabric or port
8964  * logins and is prepared to have the header pages removed.
8965  **/
8966 void
lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba * phba)8967 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
8968 {
8969 	struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
8970 
8971 	if (!phba->sli4_hba.rpi_hdrs_in_use)
8972 		goto exit;
8973 
8974 	list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
8975 				 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
8976 		list_del(&rpi_hdr->list);
8977 		dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
8978 				  rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
8979 		kfree(rpi_hdr->dmabuf);
8980 		kfree(rpi_hdr);
8981 	}
8982  exit:
8983 	/* There are no rpis available to the port now. */
8984 	phba->sli4_hba.next_rpi = 0;
8985 }
8986 
8987 /**
8988  * lpfc_hba_alloc - Allocate driver hba data structure for a device.
8989  * @pdev: pointer to pci device data structure.
8990  *
8991  * This routine is invoked to allocate the driver hba data structure for an
8992  * HBA device. If the allocation is successful, the phba reference to the
8993  * PCI device data structure is set.
8994  *
8995  * Return codes
8996  *      pointer to @phba - successful
8997  *      NULL - error
8998  **/
8999 static struct lpfc_hba *
lpfc_hba_alloc(struct pci_dev * pdev)9000 lpfc_hba_alloc(struct pci_dev *pdev)
9001 {
9002 	struct lpfc_hba *phba;
9003 
9004 	/* Allocate memory for HBA structure */
9005 	phba = kzalloc_obj(struct lpfc_hba);
9006 	if (!phba) {
9007 		dev_err(&pdev->dev, "failed to allocate hba struct\n");
9008 		return NULL;
9009 	}
9010 
9011 	/* Set reference to PCI device in HBA structure */
9012 	phba->pcidev = pdev;
9013 
9014 	/* Assign an unused board number */
9015 	phba->brd_no = lpfc_get_instance();
9016 	if (phba->brd_no < 0) {
9017 		kfree(phba);
9018 		return NULL;
9019 	}
9020 	phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
9021 
9022 	spin_lock_init(&phba->ct_ev_lock);
9023 	INIT_LIST_HEAD(&phba->ct_ev_waiters);
9024 
9025 	return phba;
9026 }
9027 
9028 /**
9029  * lpfc_hba_free - Free driver hba data structure with a device.
9030  * @phba: pointer to lpfc hba data structure.
9031  *
9032  * This routine is invoked to free the driver hba data structure with an
9033  * HBA device.
9034  **/
9035 static void
lpfc_hba_free(struct lpfc_hba * phba)9036 lpfc_hba_free(struct lpfc_hba *phba)
9037 {
9038 	if (phba->sli_rev == LPFC_SLI_REV4)
9039 		kfree(phba->sli4_hba.hdwq);
9040 
9041 	/* Release the driver assigned board number */
9042 	idr_remove(&lpfc_hba_index, phba->brd_no);
9043 
9044 	/* Free memory allocated with sli3 rings */
9045 	kfree(phba->sli.sli3_ring);
9046 	phba->sli.sli3_ring = NULL;
9047 
9048 	kfree(phba);
9049 	return;
9050 }
9051 
9052 /**
9053  * lpfc_setup_fdmi_mask - Setup initial FDMI mask for HBA and Port attributes
9054  * @vport: pointer to lpfc vport data structure.
9055  *
9056  * This routine is will setup initial FDMI attribute masks for
9057  * FDMI2 or SmartSAN depending on module parameters. The driver will attempt
9058  * to get these attributes first before falling back, the attribute
9059  * fallback hierarchy is SmartSAN -> FDMI2 -> FMDI1
9060  **/
9061 void
lpfc_setup_fdmi_mask(struct lpfc_vport * vport)9062 lpfc_setup_fdmi_mask(struct lpfc_vport *vport)
9063 {
9064 	struct lpfc_hba *phba = vport->phba;
9065 
9066 	set_bit(FC_ALLOW_FDMI, &vport->load_flag);
9067 	if (phba->cfg_enable_SmartSAN ||
9068 	    phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) {
9069 		/* Setup appropriate attribute masks */
9070 		vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
9071 		if (phba->cfg_enable_SmartSAN)
9072 			vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
9073 		else
9074 			vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
9075 	}
9076 
9077 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
9078 			 "6077 Setup FDMI mask: hba x%x port x%x\n",
9079 			 vport->fdmi_hba_mask, vport->fdmi_port_mask);
9080 }
9081 
9082 /**
9083  * lpfc_create_shost - Create hba physical port with associated scsi host.
9084  * @phba: pointer to lpfc hba data structure.
9085  *
9086  * This routine is invoked to create HBA physical port and associate a SCSI
9087  * host with it.
9088  *
9089  * Return codes
9090  *      0 - successful
9091  *      other values - error
9092  **/
9093 static int
lpfc_create_shost(struct lpfc_hba * phba)9094 lpfc_create_shost(struct lpfc_hba *phba)
9095 {
9096 	struct lpfc_vport *vport;
9097 	struct Scsi_Host  *shost;
9098 
9099 	/* Initialize HBA FC structure */
9100 	phba->fc_edtov = FF_DEF_EDTOV;
9101 	phba->fc_ratov = FF_DEF_RATOV;
9102 	phba->fc_altov = FF_DEF_ALTOV;
9103 	phba->fc_arbtov = FF_DEF_ARBTOV;
9104 
9105 	atomic_set(&phba->sdev_cnt, 0);
9106 	vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
9107 	if (!vport)
9108 		return -ENODEV;
9109 
9110 	shost = lpfc_shost_from_vport(vport);
9111 	phba->pport = vport;
9112 
9113 	if (phba->nvmet_support) {
9114 		/* Only 1 vport (pport) will support NVME target */
9115 		phba->targetport = NULL;
9116 		phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
9117 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
9118 				"6076 NVME Target Found\n");
9119 	}
9120 
9121 	lpfc_debugfs_initialize(vport);
9122 	/* Put reference to SCSI host to driver's device private data */
9123 	pci_set_drvdata(phba->pcidev, shost);
9124 
9125 	lpfc_setup_fdmi_mask(vport);
9126 
9127 	/*
9128 	 * At this point we are fully registered with PSA. In addition,
9129 	 * any initial discovery should be completed.
9130 	 */
9131 	return 0;
9132 }
9133 
9134 /**
9135  * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
9136  * @phba: pointer to lpfc hba data structure.
9137  *
9138  * This routine is invoked to destroy HBA physical port and the associated
9139  * SCSI host.
9140  **/
9141 static void
lpfc_destroy_shost(struct lpfc_hba * phba)9142 lpfc_destroy_shost(struct lpfc_hba *phba)
9143 {
9144 	struct lpfc_vport *vport = phba->pport;
9145 
9146 	/* Destroy physical port that associated with the SCSI host */
9147 	destroy_port(vport);
9148 
9149 	return;
9150 }
9151 
9152 /**
9153  * lpfc_setup_bg - Setup Block guard structures and debug areas.
9154  * @phba: pointer to lpfc hba data structure.
9155  * @shost: the shost to be used to detect Block guard settings.
9156  *
9157  * This routine sets up the local Block guard protocol settings for @shost.
9158  * This routine also allocates memory for debugging bg buffers.
9159  **/
9160 static void
lpfc_setup_bg(struct lpfc_hba * phba,struct Scsi_Host * shost)9161 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
9162 {
9163 	uint32_t old_mask;
9164 	uint32_t old_guard;
9165 
9166 	if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9167 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9168 				"1478 Registering BlockGuard with the "
9169 				"SCSI layer\n");
9170 
9171 		old_mask = phba->cfg_prot_mask;
9172 		old_guard = phba->cfg_prot_guard;
9173 
9174 		/* Only allow supported values */
9175 		phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
9176 			SHOST_DIX_TYPE0_PROTECTION |
9177 			SHOST_DIX_TYPE1_PROTECTION);
9178 		phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
9179 					 SHOST_DIX_GUARD_CRC);
9180 
9181 		/* DIF Type 1 protection for profiles AST1/C1 is end to end */
9182 		if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
9183 			phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
9184 
9185 		if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9186 			if ((old_mask != phba->cfg_prot_mask) ||
9187 				(old_guard != phba->cfg_prot_guard))
9188 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9189 					"1475 Registering BlockGuard with the "
9190 					"SCSI layer: mask %d  guard %d\n",
9191 					phba->cfg_prot_mask,
9192 					phba->cfg_prot_guard);
9193 
9194 			scsi_host_set_prot(shost, phba->cfg_prot_mask);
9195 			scsi_host_set_guard(shost, phba->cfg_prot_guard);
9196 		} else
9197 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9198 				"1479 Not Registering BlockGuard with the SCSI "
9199 				"layer, Bad protection parameters: %d %d\n",
9200 				old_mask, old_guard);
9201 	}
9202 }
9203 
9204 /**
9205  * lpfc_post_init_setup - Perform necessary device post initialization setup.
9206  * @phba: pointer to lpfc hba data structure.
9207  *
9208  * This routine is invoked to perform all the necessary post initialization
9209  * setup for the device.
9210  **/
9211 static void
lpfc_post_init_setup(struct lpfc_hba * phba)9212 lpfc_post_init_setup(struct lpfc_hba *phba)
9213 {
9214 	struct Scsi_Host  *shost;
9215 	struct lpfc_adapter_event_header adapter_event;
9216 
9217 	/* Get the default values for Model Name and Description */
9218 	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9219 
9220 	/*
9221 	 * hba setup may have changed the hba_queue_depth so we need to
9222 	 * adjust the value of can_queue.
9223 	 */
9224 	shost = pci_get_drvdata(phba->pcidev);
9225 	shost->can_queue = phba->cfg_hba_queue_depth - 10;
9226 
9227 	lpfc_host_attrib_init(shost);
9228 
9229 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9230 		spin_lock_irq(shost->host_lock);
9231 		lpfc_poll_start_timer(phba);
9232 		spin_unlock_irq(shost->host_lock);
9233 	}
9234 
9235 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9236 			"0428 Perform SCSI scan\n");
9237 	/* Send board arrival event to upper layer */
9238 	adapter_event.event_type = FC_REG_ADAPTER_EVENT;
9239 	adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
9240 	fc_host_post_vendor_event(shost, fc_get_event_number(),
9241 				  sizeof(adapter_event),
9242 				  (char *) &adapter_event,
9243 				  LPFC_NL_VENDOR_ID);
9244 	return;
9245 }
9246 
9247 /**
9248  * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
9249  * @phba: pointer to lpfc hba data structure.
9250  *
9251  * This routine is invoked to set up the PCI device memory space for device
9252  * with SLI-3 interface spec.
9253  *
9254  * Return codes
9255  * 	0 - successful
9256  * 	other values - error
9257  **/
9258 static int
lpfc_sli_pci_mem_setup(struct lpfc_hba * phba)9259 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
9260 {
9261 	struct pci_dev *pdev = phba->pcidev;
9262 	unsigned long bar0map_len, bar2map_len;
9263 	int i, hbq_count;
9264 	void *ptr;
9265 	int error;
9266 
9267 	if (!pdev)
9268 		return -ENODEV;
9269 
9270 	/* Set the device DMA mask size */
9271 	error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9272 	if (error)
9273 		error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9274 	if (error)
9275 		return error;
9276 	error = -ENODEV;
9277 
9278 	/* Get the bus address of Bar0 and Bar2 and the number of bytes
9279 	 * required by each mapping.
9280 	 */
9281 	phba->pci_bar0_map = pci_resource_start(pdev, 0);
9282 	bar0map_len = pci_resource_len(pdev, 0);
9283 
9284 	phba->pci_bar2_map = pci_resource_start(pdev, 2);
9285 	bar2map_len = pci_resource_len(pdev, 2);
9286 
9287 	/* Map HBA SLIM to a kernel virtual address. */
9288 	phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
9289 	if (!phba->slim_memmap_p) {
9290 		dev_printk(KERN_ERR, &pdev->dev,
9291 			   "ioremap failed for SLIM memory.\n");
9292 		goto out;
9293 	}
9294 
9295 	/* Map HBA Control Registers to a kernel virtual address. */
9296 	phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
9297 	if (!phba->ctrl_regs_memmap_p) {
9298 		dev_printk(KERN_ERR, &pdev->dev,
9299 			   "ioremap failed for HBA control registers.\n");
9300 		goto out_iounmap_slim;
9301 	}
9302 
9303 	/* Allocate memory for SLI-2 structures */
9304 	phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9305 					       &phba->slim2p.phys, GFP_KERNEL);
9306 	if (!phba->slim2p.virt)
9307 		goto out_iounmap;
9308 
9309 	phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
9310 	phba->mbox_ext = (phba->slim2p.virt +
9311 		offsetof(struct lpfc_sli2_slim, mbx_ext_words));
9312 	phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
9313 	phba->IOCBs = (phba->slim2p.virt +
9314 		       offsetof(struct lpfc_sli2_slim, IOCBs));
9315 
9316 	phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
9317 						 lpfc_sli_hbq_size(),
9318 						 &phba->hbqslimp.phys,
9319 						 GFP_KERNEL);
9320 	if (!phba->hbqslimp.virt)
9321 		goto out_free_slim;
9322 
9323 	hbq_count = lpfc_sli_hbq_count();
9324 	ptr = phba->hbqslimp.virt;
9325 	for (i = 0; i < hbq_count; ++i) {
9326 		phba->hbqs[i].hbq_virt = ptr;
9327 		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
9328 		ptr += (lpfc_hbq_defs[i]->entry_count *
9329 			sizeof(struct lpfc_hbq_entry));
9330 	}
9331 	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
9332 	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
9333 
9334 	memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
9335 
9336 	phba->MBslimaddr = phba->slim_memmap_p;
9337 	phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
9338 	phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
9339 	phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
9340 	phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
9341 
9342 	return 0;
9343 
9344 out_free_slim:
9345 	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9346 			  phba->slim2p.virt, phba->slim2p.phys);
9347 out_iounmap:
9348 	iounmap(phba->ctrl_regs_memmap_p);
9349 out_iounmap_slim:
9350 	iounmap(phba->slim_memmap_p);
9351 out:
9352 	return error;
9353 }
9354 
9355 /**
9356  * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
9357  * @phba: pointer to lpfc hba data structure.
9358  *
9359  * This routine is invoked to unset the PCI device memory space for device
9360  * with SLI-3 interface spec.
9361  **/
9362 static void
lpfc_sli_pci_mem_unset(struct lpfc_hba * phba)9363 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
9364 {
9365 	struct pci_dev *pdev;
9366 
9367 	/* Obtain PCI device reference */
9368 	if (!phba->pcidev)
9369 		return;
9370 	else
9371 		pdev = phba->pcidev;
9372 
9373 	/* Free coherent DMA memory allocated */
9374 	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
9375 			  phba->hbqslimp.virt, phba->hbqslimp.phys);
9376 	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9377 			  phba->slim2p.virt, phba->slim2p.phys);
9378 
9379 	/* I/O memory unmap */
9380 	iounmap(phba->ctrl_regs_memmap_p);
9381 	iounmap(phba->slim_memmap_p);
9382 
9383 	return;
9384 }
9385 
9386 /**
9387  * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
9388  * @phba: pointer to lpfc hba data structure.
9389  *
9390  * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
9391  * done and check status.
9392  *
9393  * Return 0 if successful, otherwise -ENODEV.
9394  **/
9395 int
lpfc_sli4_post_status_check(struct lpfc_hba * phba)9396 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
9397 {
9398 	struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
9399 	struct lpfc_register reg_data;
9400 	int i, port_error = 0;
9401 	uint32_t if_type;
9402 
9403 	memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
9404 	memset(&reg_data, 0, sizeof(reg_data));
9405 	if (!phba->sli4_hba.PSMPHRregaddr)
9406 		return -ENODEV;
9407 
9408 	/* Wait up to 30 seconds for the SLI Port POST done and ready */
9409 	for (i = 0; i < 3000; i++) {
9410 		if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
9411 			&portsmphr_reg.word0) ||
9412 			(bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
9413 			/* Port has a fatal POST error, break out */
9414 			port_error = -ENODEV;
9415 			break;
9416 		}
9417 		if (LPFC_POST_STAGE_PORT_READY ==
9418 		    bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
9419 			break;
9420 		msleep(10);
9421 	}
9422 
9423 	/*
9424 	 * If there was a port error during POST, then don't proceed with
9425 	 * other register reads as the data may not be valid.  Just exit.
9426 	 */
9427 	if (port_error) {
9428 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9429 			"1408 Port Failed POST - portsmphr=0x%x, "
9430 			"perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
9431 			"scr2=x%x, hscratch=x%x, pstatus=x%x\n",
9432 			portsmphr_reg.word0,
9433 			bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
9434 			bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
9435 			bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
9436 			bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
9437 			bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
9438 			bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
9439 			bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
9440 			bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
9441 	} else {
9442 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9443 				"2534 Device Info: SLIFamily=0x%x, "
9444 				"SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
9445 				"SLIHint_2=0x%x, FT=0x%x\n",
9446 				bf_get(lpfc_sli_intf_sli_family,
9447 				       &phba->sli4_hba.sli_intf),
9448 				bf_get(lpfc_sli_intf_slirev,
9449 				       &phba->sli4_hba.sli_intf),
9450 				bf_get(lpfc_sli_intf_if_type,
9451 				       &phba->sli4_hba.sli_intf),
9452 				bf_get(lpfc_sli_intf_sli_hint1,
9453 				       &phba->sli4_hba.sli_intf),
9454 				bf_get(lpfc_sli_intf_sli_hint2,
9455 				       &phba->sli4_hba.sli_intf),
9456 				bf_get(lpfc_sli_intf_func_type,
9457 				       &phba->sli4_hba.sli_intf));
9458 		/*
9459 		 * Check for other Port errors during the initialization
9460 		 * process.  Fail the load if the port did not come up
9461 		 * correctly.
9462 		 */
9463 		if_type = bf_get(lpfc_sli_intf_if_type,
9464 				 &phba->sli4_hba.sli_intf);
9465 		switch (if_type) {
9466 		case LPFC_SLI_INTF_IF_TYPE_0:
9467 			phba->sli4_hba.ue_mask_lo =
9468 			      readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
9469 			phba->sli4_hba.ue_mask_hi =
9470 			      readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
9471 			uerrlo_reg.word0 =
9472 			      readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
9473 			uerrhi_reg.word0 =
9474 				readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
9475 			if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
9476 			    (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
9477 				lpfc_printf_log(phba, KERN_ERR,
9478 						LOG_TRACE_EVENT,
9479 						"1422 Unrecoverable Error "
9480 						"Detected during POST "
9481 						"uerr_lo_reg=0x%x, "
9482 						"uerr_hi_reg=0x%x, "
9483 						"ue_mask_lo_reg=0x%x, "
9484 						"ue_mask_hi_reg=0x%x\n",
9485 						uerrlo_reg.word0,
9486 						uerrhi_reg.word0,
9487 						phba->sli4_hba.ue_mask_lo,
9488 						phba->sli4_hba.ue_mask_hi);
9489 				port_error = -ENODEV;
9490 			}
9491 			break;
9492 		case LPFC_SLI_INTF_IF_TYPE_2:
9493 		case LPFC_SLI_INTF_IF_TYPE_6:
9494 			/* Final checks.  The port status should be clean. */
9495 			if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
9496 				&reg_data.word0) ||
9497 				lpfc_sli4_unrecoverable_port(&reg_data)) {
9498 				phba->work_status[0] =
9499 					readl(phba->sli4_hba.u.if_type2.
9500 					      ERR1regaddr);
9501 				phba->work_status[1] =
9502 					readl(phba->sli4_hba.u.if_type2.
9503 					      ERR2regaddr);
9504 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9505 					"2888 Unrecoverable port error "
9506 					"following POST: port status reg "
9507 					"0x%x, port_smphr reg 0x%x, "
9508 					"error 1=0x%x, error 2=0x%x\n",
9509 					reg_data.word0,
9510 					portsmphr_reg.word0,
9511 					phba->work_status[0],
9512 					phba->work_status[1]);
9513 				port_error = -ENODEV;
9514 				break;
9515 			}
9516 
9517 			if (lpfc_pldv_detect &&
9518 			    bf_get(lpfc_sli_intf_sli_family,
9519 				   &phba->sli4_hba.sli_intf) ==
9520 					LPFC_SLI_INTF_FAMILY_G6)
9521 				pci_write_config_byte(phba->pcidev,
9522 						      LPFC_SLI_INTF, CFG_PLD);
9523 			break;
9524 		case LPFC_SLI_INTF_IF_TYPE_1:
9525 		default:
9526 			break;
9527 		}
9528 	}
9529 	return port_error;
9530 }
9531 
9532 /**
9533  * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
9534  * @phba: pointer to lpfc hba data structure.
9535  * @if_type:  The SLI4 interface type getting configured.
9536  *
9537  * This routine is invoked to set up SLI4 BAR0 PCI config space register
9538  * memory map.
9539  **/
9540 static void
lpfc_sli4_bar0_register_memmap(struct lpfc_hba * phba,uint32_t if_type)9541 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9542 {
9543 	switch (if_type) {
9544 	case LPFC_SLI_INTF_IF_TYPE_0:
9545 		phba->sli4_hba.u.if_type0.UERRLOregaddr =
9546 			phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
9547 		phba->sli4_hba.u.if_type0.UERRHIregaddr =
9548 			phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
9549 		phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
9550 			phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
9551 		phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
9552 			phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
9553 		phba->sli4_hba.SLIINTFregaddr =
9554 			phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9555 		break;
9556 	case LPFC_SLI_INTF_IF_TYPE_2:
9557 		phba->sli4_hba.u.if_type2.EQDregaddr =
9558 			phba->sli4_hba.conf_regs_memmap_p +
9559 						LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9560 		phba->sli4_hba.u.if_type2.ERR1regaddr =
9561 			phba->sli4_hba.conf_regs_memmap_p +
9562 						LPFC_CTL_PORT_ER1_OFFSET;
9563 		phba->sli4_hba.u.if_type2.ERR2regaddr =
9564 			phba->sli4_hba.conf_regs_memmap_p +
9565 						LPFC_CTL_PORT_ER2_OFFSET;
9566 		phba->sli4_hba.u.if_type2.CTRLregaddr =
9567 			phba->sli4_hba.conf_regs_memmap_p +
9568 						LPFC_CTL_PORT_CTL_OFFSET;
9569 		phba->sli4_hba.u.if_type2.STATUSregaddr =
9570 			phba->sli4_hba.conf_regs_memmap_p +
9571 						LPFC_CTL_PORT_STA_OFFSET;
9572 		phba->sli4_hba.SLIINTFregaddr =
9573 			phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9574 		phba->sli4_hba.PSMPHRregaddr =
9575 			phba->sli4_hba.conf_regs_memmap_p +
9576 						LPFC_CTL_PORT_SEM_OFFSET;
9577 		phba->sli4_hba.RQDBregaddr =
9578 			phba->sli4_hba.conf_regs_memmap_p +
9579 						LPFC_ULP0_RQ_DOORBELL;
9580 		phba->sli4_hba.WQDBregaddr =
9581 			phba->sli4_hba.conf_regs_memmap_p +
9582 						LPFC_ULP0_WQ_DOORBELL;
9583 		phba->sli4_hba.CQDBregaddr =
9584 			phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
9585 		phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9586 		phba->sli4_hba.MQDBregaddr =
9587 			phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
9588 		phba->sli4_hba.BMBXregaddr =
9589 			phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9590 		break;
9591 	case LPFC_SLI_INTF_IF_TYPE_6:
9592 		phba->sli4_hba.u.if_type2.EQDregaddr =
9593 			phba->sli4_hba.conf_regs_memmap_p +
9594 						LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9595 		phba->sli4_hba.u.if_type2.ERR1regaddr =
9596 			phba->sli4_hba.conf_regs_memmap_p +
9597 						LPFC_CTL_PORT_ER1_OFFSET;
9598 		phba->sli4_hba.u.if_type2.ERR2regaddr =
9599 			phba->sli4_hba.conf_regs_memmap_p +
9600 						LPFC_CTL_PORT_ER2_OFFSET;
9601 		phba->sli4_hba.u.if_type2.CTRLregaddr =
9602 			phba->sli4_hba.conf_regs_memmap_p +
9603 						LPFC_CTL_PORT_CTL_OFFSET;
9604 		phba->sli4_hba.u.if_type2.STATUSregaddr =
9605 			phba->sli4_hba.conf_regs_memmap_p +
9606 						LPFC_CTL_PORT_STA_OFFSET;
9607 		phba->sli4_hba.PSMPHRregaddr =
9608 			phba->sli4_hba.conf_regs_memmap_p +
9609 						LPFC_CTL_PORT_SEM_OFFSET;
9610 		phba->sli4_hba.BMBXregaddr =
9611 			phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9612 		break;
9613 	case LPFC_SLI_INTF_IF_TYPE_1:
9614 	default:
9615 		dev_printk(KERN_ERR, &phba->pcidev->dev,
9616 			   "FATAL - unsupported SLI4 interface type - %d\n",
9617 			   if_type);
9618 		break;
9619 	}
9620 }
9621 
9622 /**
9623  * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
9624  * @phba: pointer to lpfc hba data structure.
9625  * @if_type: sli if type to operate on.
9626  *
9627  * This routine is invoked to set up SLI4 BAR1 register memory map.
9628  **/
9629 static void
lpfc_sli4_bar1_register_memmap(struct lpfc_hba * phba,uint32_t if_type)9630 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9631 {
9632 	switch (if_type) {
9633 	case LPFC_SLI_INTF_IF_TYPE_0:
9634 		phba->sli4_hba.PSMPHRregaddr =
9635 			phba->sli4_hba.ctrl_regs_memmap_p +
9636 			LPFC_SLIPORT_IF0_SMPHR;
9637 		phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9638 			LPFC_HST_ISR0;
9639 		phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9640 			LPFC_HST_IMR0;
9641 		phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9642 			LPFC_HST_ISCR0;
9643 		break;
9644 	case LPFC_SLI_INTF_IF_TYPE_6:
9645 		phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9646 			LPFC_IF6_RQ_DOORBELL;
9647 		phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9648 			LPFC_IF6_WQ_DOORBELL;
9649 		phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9650 			LPFC_IF6_CQ_DOORBELL;
9651 		phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9652 			LPFC_IF6_EQ_DOORBELL;
9653 		phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9654 			LPFC_IF6_MQ_DOORBELL;
9655 		break;
9656 	case LPFC_SLI_INTF_IF_TYPE_2:
9657 	case LPFC_SLI_INTF_IF_TYPE_1:
9658 	default:
9659 		dev_err(&phba->pcidev->dev,
9660 			   "FATAL - unsupported SLI4 interface type - %d\n",
9661 			   if_type);
9662 		break;
9663 	}
9664 }
9665 
9666 /**
9667  * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
9668  * @phba: pointer to lpfc hba data structure.
9669  * @vf: virtual function number
9670  *
9671  * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
9672  * based on the given viftual function number, @vf.
9673  *
9674  * Return 0 if successful, otherwise -ENODEV.
9675  **/
9676 static int
lpfc_sli4_bar2_register_memmap(struct lpfc_hba * phba,uint32_t vf)9677 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
9678 {
9679 	if (vf > LPFC_VIR_FUNC_MAX)
9680 		return -ENODEV;
9681 
9682 	phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9683 				vf * LPFC_VFR_PAGE_SIZE +
9684 					LPFC_ULP0_RQ_DOORBELL);
9685 	phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9686 				vf * LPFC_VFR_PAGE_SIZE +
9687 					LPFC_ULP0_WQ_DOORBELL);
9688 	phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9689 				vf * LPFC_VFR_PAGE_SIZE +
9690 					LPFC_EQCQ_DOORBELL);
9691 	phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9692 	phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9693 				vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
9694 	phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9695 				vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
9696 	return 0;
9697 }
9698 
9699 /**
9700  * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
9701  * @phba: pointer to lpfc hba data structure.
9702  *
9703  * This routine is invoked to create the bootstrap mailbox
9704  * region consistent with the SLI-4 interface spec.  This
9705  * routine allocates all memory necessary to communicate
9706  * mailbox commands to the port and sets up all alignment
9707  * needs.  No locks are expected to be held when calling
9708  * this routine.
9709  *
9710  * Return codes
9711  * 	0 - successful
9712  * 	-ENOMEM - could not allocated memory.
9713  **/
9714 static int
lpfc_create_bootstrap_mbox(struct lpfc_hba * phba)9715 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
9716 {
9717 	uint32_t bmbx_size;
9718 	struct lpfc_dmabuf *dmabuf;
9719 	struct dma_address *dma_address;
9720 	uint32_t pa_addr;
9721 	uint64_t phys_addr;
9722 
9723 	dmabuf = kzalloc_obj(struct lpfc_dmabuf);
9724 	if (!dmabuf)
9725 		return -ENOMEM;
9726 
9727 	/*
9728 	 * The bootstrap mailbox region is comprised of 2 parts
9729 	 * plus an alignment restriction of 16 bytes.
9730 	 */
9731 	bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
9732 	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
9733 					  &dmabuf->phys, GFP_KERNEL);
9734 	if (!dmabuf->virt) {
9735 		kfree(dmabuf);
9736 		return -ENOMEM;
9737 	}
9738 
9739 	/*
9740 	 * Initialize the bootstrap mailbox pointers now so that the register
9741 	 * operations are simple later.  The mailbox dma address is required
9742 	 * to be 16-byte aligned.  Also align the virtual memory as each
9743 	 * maibox is copied into the bmbx mailbox region before issuing the
9744 	 * command to the port.
9745 	 */
9746 	phba->sli4_hba.bmbx.dmabuf = dmabuf;
9747 	phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
9748 
9749 	phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
9750 					      LPFC_ALIGN_16_BYTE);
9751 	phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
9752 					      LPFC_ALIGN_16_BYTE);
9753 
9754 	/*
9755 	 * Set the high and low physical addresses now.  The SLI4 alignment
9756 	 * requirement is 16 bytes and the mailbox is posted to the port
9757 	 * as two 30-bit addresses.  The other data is a bit marking whether
9758 	 * the 30-bit address is the high or low address.
9759 	 * Upcast bmbx aphys to 64bits so shift instruction compiles
9760 	 * clean on 32 bit machines.
9761 	 */
9762 	dma_address = &phba->sli4_hba.bmbx.dma_address;
9763 	phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
9764 	pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
9765 	dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
9766 					   LPFC_BMBX_BIT1_ADDR_HI);
9767 
9768 	pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
9769 	dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
9770 					   LPFC_BMBX_BIT1_ADDR_LO);
9771 	return 0;
9772 }
9773 
9774 /**
9775  * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
9776  * @phba: pointer to lpfc hba data structure.
9777  *
9778  * This routine is invoked to teardown the bootstrap mailbox
9779  * region and release all host resources. This routine requires
9780  * the caller to ensure all mailbox commands recovered, no
9781  * additional mailbox comands are sent, and interrupts are disabled
9782  * before calling this routine.
9783  *
9784  **/
9785 static void
lpfc_destroy_bootstrap_mbox(struct lpfc_hba * phba)9786 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
9787 {
9788 	dma_free_coherent(&phba->pcidev->dev,
9789 			  phba->sli4_hba.bmbx.bmbx_size,
9790 			  phba->sli4_hba.bmbx.dmabuf->virt,
9791 			  phba->sli4_hba.bmbx.dmabuf->phys);
9792 
9793 	kfree(phba->sli4_hba.bmbx.dmabuf);
9794 	memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
9795 }
9796 
9797 static const char * const lpfc_topo_to_str[] = {
9798 	"Loop then P2P",
9799 	"Loopback",
9800 	"P2P Only",
9801 	"Unsupported",
9802 	"Loop Only",
9803 	"Unsupported",
9804 	"P2P then Loop",
9805 };
9806 
9807 #define	LINK_FLAGS_DEF	0x0
9808 #define	LINK_FLAGS_P2P	0x1
9809 #define	LINK_FLAGS_LOOP	0x2
9810 /**
9811  * lpfc_map_topology - Map the topology read from READ_CONFIG
9812  * @phba: pointer to lpfc hba data structure.
9813  * @rd_config: pointer to read config data
9814  *
9815  * This routine is invoked to map the topology values as read
9816  * from the read config mailbox command. If the persistent
9817  * topology feature is supported, the firmware will provide the
9818  * saved topology information to be used in INIT_LINK
9819  **/
9820 static void
lpfc_map_topology(struct lpfc_hba * phba,struct lpfc_mbx_read_config * rd_config)9821 lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
9822 {
9823 	u8 ptv, tf, pt;
9824 
9825 	ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
9826 	tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
9827 	pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
9828 
9829 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9830 			"2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
9831 			 ptv, tf, pt);
9832 	if (!ptv) {
9833 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9834 				"2019 FW does not support persistent topology "
9835 				"Using driver parameter defined value [%s]",
9836 				lpfc_topo_to_str[phba->cfg_topology]);
9837 		return;
9838 	}
9839 	/* FW supports persistent topology - override module parameter value */
9840 	set_bit(HBA_PERSISTENT_TOPO, &phba->hba_flag);
9841 
9842 	/* if ASIC_GEN_NUM >= 0xC) */
9843 	if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
9844 		    LPFC_SLI_INTF_IF_TYPE_6) ||
9845 	    (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
9846 		    LPFC_SLI_INTF_FAMILY_G6)) {
9847 		if (!tf)
9848 			phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
9849 					? FLAGS_TOPOLOGY_MODE_LOOP
9850 					: FLAGS_TOPOLOGY_MODE_PT_PT);
9851 		else
9852 			clear_bit(HBA_PERSISTENT_TOPO, &phba->hba_flag);
9853 	} else { /* G5 */
9854 		if (tf)
9855 			/* If topology failover set - pt is '0' or '1' */
9856 			phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
9857 					      FLAGS_TOPOLOGY_MODE_LOOP_PT);
9858 		else
9859 			phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
9860 					? FLAGS_TOPOLOGY_MODE_PT_PT
9861 					: FLAGS_TOPOLOGY_MODE_LOOP);
9862 	}
9863 	if (test_bit(HBA_PERSISTENT_TOPO, &phba->hba_flag))
9864 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9865 				"2020 Using persistent topology value [%s]",
9866 				lpfc_topo_to_str[phba->cfg_topology]);
9867 	else
9868 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9869 				"2021 Invalid topology values from FW "
9870 				"Using driver parameter defined value [%s]",
9871 				lpfc_topo_to_str[phba->cfg_topology]);
9872 }
9873 
9874 /**
9875  * lpfc_sli4_read_config - Get the config parameters.
9876  * @phba: pointer to lpfc hba data structure.
9877  *
9878  * This routine is invoked to read the configuration parameters from the HBA.
9879  * The configuration parameters are used to set the base and maximum values
9880  * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
9881  * allocation for the port.
9882  *
9883  * Return codes
9884  * 	0 - successful
9885  * 	-ENOMEM - No available memory
9886  *      -EIO - The mailbox failed to complete successfully.
9887  **/
9888 int
lpfc_sli4_read_config(struct lpfc_hba * phba)9889 lpfc_sli4_read_config(struct lpfc_hba *phba)
9890 {
9891 	LPFC_MBOXQ_t *pmb;
9892 	struct lpfc_mbx_read_config *rd_config;
9893 	union  lpfc_sli4_cfg_shdr *shdr;
9894 	uint32_t shdr_status, shdr_add_status;
9895 	struct lpfc_mbx_get_func_cfg *get_func_cfg;
9896 	struct lpfc_rsrc_desc_fcfcoe *desc;
9897 	char *pdesc_0;
9898 	uint16_t forced_link_speed;
9899 	uint32_t if_type, qmin, fawwpn;
9900 	int length, i, rc = 0, rc2;
9901 
9902 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9903 	if (!pmb) {
9904 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9905 				"2011 Unable to allocate memory for issuing "
9906 				"SLI_CONFIG_SPECIAL mailbox command\n");
9907 		return -ENOMEM;
9908 	}
9909 
9910 	lpfc_read_config(phba, pmb);
9911 
9912 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
9913 	if (rc != MBX_SUCCESS) {
9914 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9915 				"2012 Mailbox failed , mbxCmd x%x "
9916 				"READ_CONFIG, mbxStatus x%x\n",
9917 				bf_get(lpfc_mqe_command, &pmb->u.mqe),
9918 				bf_get(lpfc_mqe_status, &pmb->u.mqe));
9919 		rc = -EIO;
9920 	} else {
9921 		rd_config = &pmb->u.mqe.un.rd_config;
9922 		if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
9923 			phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
9924 			phba->sli4_hba.lnk_info.lnk_tp =
9925 				bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
9926 			phba->sli4_hba.lnk_info.lnk_no =
9927 				bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
9928 			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9929 					"3081 lnk_type:%d, lnk_numb:%d\n",
9930 					phba->sli4_hba.lnk_info.lnk_tp,
9931 					phba->sli4_hba.lnk_info.lnk_no);
9932 		} else
9933 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9934 					"3082 Mailbox (x%x) returned ldv:x0\n",
9935 					bf_get(lpfc_mqe_command, &pmb->u.mqe));
9936 		if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
9937 			phba->bbcredit_support = 1;
9938 			phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
9939 		}
9940 
9941 		fawwpn = bf_get(lpfc_mbx_rd_conf_fawwpn, rd_config);
9942 
9943 		if (fawwpn) {
9944 			lpfc_printf_log(phba, KERN_INFO,
9945 					LOG_INIT | LOG_DISCOVERY,
9946 					"2702 READ_CONFIG: FA-PWWN is "
9947 					"configured on\n");
9948 			phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_CONFIG;
9949 		} else {
9950 			/* Clear FW configured flag, preserve driver flag */
9951 			phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_CONFIG;
9952 		}
9953 
9954 		phba->sli4_hba.conf_trunk =
9955 			bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
9956 		phba->sli4_hba.extents_in_use =
9957 			bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
9958 
9959 		phba->sli4_hba.max_cfg_param.max_xri =
9960 			bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
9961 		/* Reduce resource usage in kdump environment */
9962 		if (is_kdump_kernel() &&
9963 		    phba->sli4_hba.max_cfg_param.max_xri > 512)
9964 			phba->sli4_hba.max_cfg_param.max_xri = 512;
9965 		phba->sli4_hba.max_cfg_param.xri_base =
9966 			bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
9967 		phba->sli4_hba.max_cfg_param.max_vpi =
9968 			bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
9969 		/* Limit the max we support */
9970 		if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
9971 			phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
9972 		phba->sli4_hba.max_cfg_param.vpi_base =
9973 			bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
9974 		phba->sli4_hba.max_cfg_param.max_rpi =
9975 			bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
9976 		phba->sli4_hba.max_cfg_param.rpi_base =
9977 			bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
9978 		phba->sli4_hba.max_cfg_param.max_vfi =
9979 			bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
9980 		phba->sli4_hba.max_cfg_param.vfi_base =
9981 			bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
9982 		phba->sli4_hba.max_cfg_param.max_fcfi =
9983 			bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
9984 		phba->sli4_hba.max_cfg_param.max_eq =
9985 			bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
9986 		phba->sli4_hba.max_cfg_param.max_rq =
9987 			bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
9988 		phba->sli4_hba.max_cfg_param.max_wq =
9989 			bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
9990 		phba->sli4_hba.max_cfg_param.max_cq =
9991 			bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
9992 		phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
9993 		phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
9994 		phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
9995 		phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
9996 		phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
9997 				(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
9998 		phba->max_vports = phba->max_vpi;
9999 
10000 		if (bf_get(lpfc_mbx_rd_conf_fedif, rd_config))
10001 			phba->sli4_hba.encryption_support = true;
10002 		else
10003 			phba->sli4_hba.encryption_support = false;
10004 
10005 		/* Next decide on FPIN or Signal E2E CGN support
10006 		 * For congestion alarms and warnings valid combination are:
10007 		 * 1. FPIN alarms / FPIN warnings
10008 		 * 2. Signal alarms / Signal warnings
10009 		 * 3. FPIN alarms / Signal warnings
10010 		 * 4. Signal alarms / FPIN warnings
10011 		 *
10012 		 * Initialize the adapter frequency to 100 mSecs
10013 		 */
10014 		phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
10015 		phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
10016 		phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
10017 
10018 		if (lpfc_use_cgn_signal) {
10019 			if (bf_get(lpfc_mbx_rd_conf_wcs, rd_config)) {
10020 				phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
10021 				phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
10022 			}
10023 			if (bf_get(lpfc_mbx_rd_conf_acs, rd_config)) {
10024 				/* MUST support both alarm and warning
10025 				 * because EDC does not support alarm alone.
10026 				 */
10027 				if (phba->cgn_reg_signal !=
10028 				    EDC_CG_SIG_WARN_ONLY) {
10029 					/* Must support both or none */
10030 					phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
10031 					phba->cgn_reg_signal =
10032 						EDC_CG_SIG_NOTSUPPORTED;
10033 				} else {
10034 					phba->cgn_reg_signal =
10035 						EDC_CG_SIG_WARN_ALARM;
10036 					phba->cgn_reg_fpin =
10037 						LPFC_CGN_FPIN_NONE;
10038 				}
10039 			}
10040 		}
10041 
10042 		/* Set the congestion initial signal and fpin values. */
10043 		phba->cgn_init_reg_fpin = phba->cgn_reg_fpin;
10044 		phba->cgn_init_reg_signal = phba->cgn_reg_signal;
10045 
10046 		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
10047 				"6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n",
10048 				phba->cgn_reg_signal, phba->cgn_reg_fpin);
10049 
10050 		lpfc_map_topology(phba, rd_config);
10051 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10052 				"2003 cfg params Extents? %d "
10053 				"XRI(B:%d M:%d), "
10054 				"VPI(B:%d M:%d) "
10055 				"VFI(B:%d M:%d) "
10056 				"RPI(B:%d M:%d) "
10057 				"FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n",
10058 				phba->sli4_hba.extents_in_use,
10059 				phba->sli4_hba.max_cfg_param.xri_base,
10060 				phba->sli4_hba.max_cfg_param.max_xri,
10061 				phba->sli4_hba.max_cfg_param.vpi_base,
10062 				phba->sli4_hba.max_cfg_param.max_vpi,
10063 				phba->sli4_hba.max_cfg_param.vfi_base,
10064 				phba->sli4_hba.max_cfg_param.max_vfi,
10065 				phba->sli4_hba.max_cfg_param.rpi_base,
10066 				phba->sli4_hba.max_cfg_param.max_rpi,
10067 				phba->sli4_hba.max_cfg_param.max_fcfi,
10068 				phba->sli4_hba.max_cfg_param.max_eq,
10069 				phba->sli4_hba.max_cfg_param.max_cq,
10070 				phba->sli4_hba.max_cfg_param.max_wq,
10071 				phba->sli4_hba.max_cfg_param.max_rq,
10072 				phba->lmt);
10073 
10074 		/*
10075 		 * Calculate queue resources based on how
10076 		 * many WQ/CQ/EQs are available.
10077 		 */
10078 		qmin = phba->sli4_hba.max_cfg_param.max_wq;
10079 		if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
10080 			qmin = phba->sli4_hba.max_cfg_param.max_cq;
10081 		/*
10082 		 * Reserve 4 (ELS, NVME LS, MBOX, plus one extra) and
10083 		 * the remainder can be used for NVME / FCP.
10084 		 */
10085 		qmin -= 4;
10086 		if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
10087 			qmin = phba->sli4_hba.max_cfg_param.max_eq;
10088 
10089 		/* Check to see if there is enough for default cfg */
10090 		if ((phba->cfg_irq_chann > qmin) ||
10091 		    (phba->cfg_hdw_queue > qmin)) {
10092 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10093 					"2005 Reducing Queues - "
10094 					"FW resource limitation: "
10095 					"WQ %d CQ %d EQ %d: min %d: "
10096 					"IRQ %d HDWQ %d\n",
10097 					phba->sli4_hba.max_cfg_param.max_wq,
10098 					phba->sli4_hba.max_cfg_param.max_cq,
10099 					phba->sli4_hba.max_cfg_param.max_eq,
10100 					qmin, phba->cfg_irq_chann,
10101 					phba->cfg_hdw_queue);
10102 
10103 			if (phba->cfg_irq_chann > qmin)
10104 				phba->cfg_irq_chann = qmin;
10105 			if (phba->cfg_hdw_queue > qmin)
10106 				phba->cfg_hdw_queue = qmin;
10107 		}
10108 	}
10109 
10110 	if (rc)
10111 		goto read_cfg_out;
10112 
10113 	/* Update link speed if forced link speed is supported */
10114 	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10115 	if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10116 		forced_link_speed =
10117 			bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
10118 		if (forced_link_speed) {
10119 			set_bit(HBA_FORCED_LINK_SPEED, &phba->hba_flag);
10120 
10121 			switch (forced_link_speed) {
10122 			case LINK_SPEED_1G:
10123 				phba->cfg_link_speed =
10124 					LPFC_USER_LINK_SPEED_1G;
10125 				break;
10126 			case LINK_SPEED_2G:
10127 				phba->cfg_link_speed =
10128 					LPFC_USER_LINK_SPEED_2G;
10129 				break;
10130 			case LINK_SPEED_4G:
10131 				phba->cfg_link_speed =
10132 					LPFC_USER_LINK_SPEED_4G;
10133 				break;
10134 			case LINK_SPEED_8G:
10135 				phba->cfg_link_speed =
10136 					LPFC_USER_LINK_SPEED_8G;
10137 				break;
10138 			case LINK_SPEED_10G:
10139 				phba->cfg_link_speed =
10140 					LPFC_USER_LINK_SPEED_10G;
10141 				break;
10142 			case LINK_SPEED_16G:
10143 				phba->cfg_link_speed =
10144 					LPFC_USER_LINK_SPEED_16G;
10145 				break;
10146 			case LINK_SPEED_32G:
10147 				phba->cfg_link_speed =
10148 					LPFC_USER_LINK_SPEED_32G;
10149 				break;
10150 			case LINK_SPEED_64G:
10151 				phba->cfg_link_speed =
10152 					LPFC_USER_LINK_SPEED_64G;
10153 				break;
10154 			case LINK_SPEED_128G:
10155 				phba->cfg_link_speed =
10156 					LPFC_USER_LINK_SPEED_128G;
10157 				break;
10158 			case 0xffff:
10159 				phba->cfg_link_speed =
10160 					LPFC_USER_LINK_SPEED_AUTO;
10161 				break;
10162 			default:
10163 				lpfc_printf_log(phba, KERN_ERR,
10164 						LOG_TRACE_EVENT,
10165 						"0047 Unrecognized link "
10166 						"speed : %d\n",
10167 						forced_link_speed);
10168 				phba->cfg_link_speed =
10169 					LPFC_USER_LINK_SPEED_AUTO;
10170 			}
10171 		}
10172 	}
10173 
10174 	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
10175 	length = phba->sli4_hba.max_cfg_param.max_xri -
10176 			lpfc_sli4_get_els_iocb_cnt(phba);
10177 	if (phba->cfg_hba_queue_depth > length) {
10178 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10179 				"3361 HBA queue depth changed from %d to %d\n",
10180 				phba->cfg_hba_queue_depth, length);
10181 		phba->cfg_hba_queue_depth = length;
10182 	}
10183 
10184 	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
10185 	    LPFC_SLI_INTF_IF_TYPE_2)
10186 		goto read_cfg_out;
10187 
10188 	/* get the pf# and vf# for SLI4 if_type 2 port */
10189 	length = (sizeof(struct lpfc_mbx_get_func_cfg) -
10190 		  sizeof(struct lpfc_sli4_cfg_mhdr));
10191 	lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
10192 			 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
10193 			 length, LPFC_SLI4_MBX_EMBED);
10194 
10195 	rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
10196 	shdr = (union lpfc_sli4_cfg_shdr *)
10197 				&pmb->u.mqe.un.sli4_config.header.cfg_shdr;
10198 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10199 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10200 	if (rc2 || shdr_status || shdr_add_status) {
10201 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10202 				"3026 Mailbox failed , mbxCmd x%x "
10203 				"GET_FUNCTION_CONFIG, mbxStatus x%x\n",
10204 				bf_get(lpfc_mqe_command, &pmb->u.mqe),
10205 				bf_get(lpfc_mqe_status, &pmb->u.mqe));
10206 		goto read_cfg_out;
10207 	}
10208 
10209 	/* search for fc_fcoe resrouce descriptor */
10210 	get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
10211 
10212 	pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
10213 	desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
10214 	length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
10215 	if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
10216 		length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
10217 	else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
10218 		goto read_cfg_out;
10219 
10220 	for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
10221 		desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
10222 		if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
10223 		    bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
10224 			phba->sli4_hba.iov.pf_number =
10225 				bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
10226 			phba->sli4_hba.iov.vf_number =
10227 				bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
10228 			break;
10229 		}
10230 	}
10231 
10232 	if (i < LPFC_RSRC_DESC_MAX_NUM)
10233 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10234 				"3027 GET_FUNCTION_CONFIG: pf_number:%d, "
10235 				"vf_number:%d\n", phba->sli4_hba.iov.pf_number,
10236 				phba->sli4_hba.iov.vf_number);
10237 	else
10238 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10239 				"3028 GET_FUNCTION_CONFIG: failed to find "
10240 				"Resource Descriptor:x%x\n",
10241 				LPFC_RSRC_DESC_TYPE_FCFCOE);
10242 
10243 read_cfg_out:
10244 	mempool_free(pmb, phba->mbox_mem_pool);
10245 	return rc;
10246 }
10247 
10248 /**
10249  * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
10250  * @phba: pointer to lpfc hba data structure.
10251  *
10252  * This routine is invoked to setup the port-side endian order when
10253  * the port if_type is 0.  This routine has no function for other
10254  * if_types.
10255  *
10256  * Return codes
10257  * 	0 - successful
10258  * 	-ENOMEM - No available memory
10259  *      -EIO - The mailbox failed to complete successfully.
10260  **/
10261 static int
lpfc_setup_endian_order(struct lpfc_hba * phba)10262 lpfc_setup_endian_order(struct lpfc_hba *phba)
10263 {
10264 	LPFC_MBOXQ_t *mboxq;
10265 	uint32_t if_type, rc = 0;
10266 	uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
10267 				      HOST_ENDIAN_HIGH_WORD1};
10268 
10269 	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10270 	switch (if_type) {
10271 	case LPFC_SLI_INTF_IF_TYPE_0:
10272 		mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10273 						       GFP_KERNEL);
10274 		if (!mboxq) {
10275 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10276 					"0492 Unable to allocate memory for "
10277 					"issuing SLI_CONFIG_SPECIAL mailbox "
10278 					"command\n");
10279 			return -ENOMEM;
10280 		}
10281 
10282 		/*
10283 		 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
10284 		 * two words to contain special data values and no other data.
10285 		 */
10286 		memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
10287 		memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
10288 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10289 		if (rc != MBX_SUCCESS) {
10290 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10291 					"0493 SLI_CONFIG_SPECIAL mailbox "
10292 					"failed with status x%x\n",
10293 					rc);
10294 			rc = -EIO;
10295 		}
10296 		mempool_free(mboxq, phba->mbox_mem_pool);
10297 		break;
10298 	case LPFC_SLI_INTF_IF_TYPE_6:
10299 	case LPFC_SLI_INTF_IF_TYPE_2:
10300 	case LPFC_SLI_INTF_IF_TYPE_1:
10301 	default:
10302 		break;
10303 	}
10304 	return rc;
10305 }
10306 
10307 /**
10308  * lpfc_sli4_queue_verify - Verify and update EQ counts
10309  * @phba: pointer to lpfc hba data structure.
10310  *
10311  * This routine is invoked to check the user settable queue counts for EQs.
10312  * After this routine is called the counts will be set to valid values that
10313  * adhere to the constraints of the system's interrupt vectors and the port's
10314  * queue resources.
10315  *
10316  * Return codes
10317  *      0 - successful
10318  *      -ENOMEM - No available memory
10319  **/
10320 static int
lpfc_sli4_queue_verify(struct lpfc_hba * phba)10321 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
10322 {
10323 	/*
10324 	 * Sanity check for configured queue parameters against the run-time
10325 	 * device parameters
10326 	 */
10327 
10328 	if (phba->nvmet_support) {
10329 		if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
10330 			phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
10331 		if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
10332 			phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
10333 	}
10334 
10335 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10336 			"2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
10337 			phba->cfg_hdw_queue, phba->cfg_irq_chann,
10338 			phba->cfg_nvmet_mrq);
10339 
10340 	/* Get EQ depth from module parameter, fake the default for now */
10341 	phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10342 	phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10343 
10344 	/* Get CQ depth from module parameter, fake the default for now */
10345 	phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10346 	phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10347 	return 0;
10348 }
10349 
10350 static int
lpfc_alloc_io_wq_cq(struct lpfc_hba * phba,int idx)10351 lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
10352 {
10353 	struct lpfc_queue *qdesc;
10354 	u32 wqesize;
10355 	int cpu;
10356 
10357 	cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
10358 	/* Create Fast Path IO CQs */
10359 	if (phba->enab_exp_wqcq_pages)
10360 		/* Increase the CQ size when WQEs contain an embedded cdb */
10361 		qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10362 					      phba->sli4_hba.cq_esize,
10363 					      LPFC_CQE_EXP_COUNT, cpu);
10364 
10365 	else
10366 		qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10367 					      phba->sli4_hba.cq_esize,
10368 					      phba->sli4_hba.cq_ecount, cpu);
10369 	if (!qdesc) {
10370 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10371 				"0499 Failed allocate fast-path IO CQ (%d)\n",
10372 				idx);
10373 		return 1;
10374 	}
10375 	qdesc->qe_valid = 1;
10376 	qdesc->hdwq = idx;
10377 	qdesc->chann = cpu;
10378 	phba->sli4_hba.hdwq[idx].io_cq = qdesc;
10379 
10380 	/* Create Fast Path IO WQs */
10381 	if (phba->enab_exp_wqcq_pages) {
10382 		/* Increase the WQ size when WQEs contain an embedded cdb */
10383 		wqesize = (phba->fcp_embed_io) ?
10384 			LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
10385 		qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10386 					      wqesize,
10387 					      LPFC_WQE_EXP_COUNT, cpu);
10388 	} else
10389 		qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10390 					      phba->sli4_hba.wq_esize,
10391 					      phba->sli4_hba.wq_ecount, cpu);
10392 
10393 	if (!qdesc) {
10394 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10395 				"0503 Failed allocate fast-path IO WQ (%d)\n",
10396 				idx);
10397 		return 1;
10398 	}
10399 	qdesc->hdwq = idx;
10400 	qdesc->chann = cpu;
10401 	phba->sli4_hba.hdwq[idx].io_wq = qdesc;
10402 	list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10403 	return 0;
10404 }
10405 
10406 /**
10407  * lpfc_sli4_queue_create - Create all the SLI4 queues
10408  * @phba: pointer to lpfc hba data structure.
10409  *
10410  * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
10411  * operation. For each SLI4 queue type, the parameters such as queue entry
10412  * count (queue depth) shall be taken from the module parameter. For now,
10413  * we just use some constant number as place holder.
10414  *
10415  * Return codes
10416  *      0 - successful
10417  *      -ENOMEM - No availble memory
10418  *      -EIO - The mailbox failed to complete successfully.
10419  **/
10420 int
lpfc_sli4_queue_create(struct lpfc_hba * phba)10421 lpfc_sli4_queue_create(struct lpfc_hba *phba)
10422 {
10423 	struct lpfc_queue *qdesc;
10424 	int idx, cpu, eqcpu;
10425 	struct lpfc_sli4_hdw_queue *qp;
10426 	struct lpfc_vector_map_info *cpup;
10427 	struct lpfc_vector_map_info *eqcpup;
10428 	struct lpfc_eq_intr_info *eqi;
10429 	u32 wqesize;
10430 
10431 	/*
10432 	 * Create HBA Record arrays.
10433 	 * Both NVME and FCP will share that same vectors / EQs
10434 	 */
10435 	phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
10436 	phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
10437 	phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
10438 	phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
10439 	phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
10440 	phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
10441 	phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10442 	phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10443 	phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10444 	phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10445 
10446 	if (!phba->sli4_hba.hdwq) {
10447 		phba->sli4_hba.hdwq = kzalloc_objs(struct lpfc_sli4_hdw_queue,
10448 						   phba->cfg_hdw_queue);
10449 		if (!phba->sli4_hba.hdwq) {
10450 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10451 					"6427 Failed allocate memory for "
10452 					"fast-path Hardware Queue array\n");
10453 			goto out_error;
10454 		}
10455 		/* Prepare hardware queues to take IO buffers */
10456 		for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10457 			qp = &phba->sli4_hba.hdwq[idx];
10458 			spin_lock_init(&qp->io_buf_list_get_lock);
10459 			spin_lock_init(&qp->io_buf_list_put_lock);
10460 			INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
10461 			INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
10462 			qp->get_io_bufs = 0;
10463 			qp->put_io_bufs = 0;
10464 			qp->total_io_bufs = 0;
10465 			spin_lock_init(&qp->abts_io_buf_list_lock);
10466 			INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
10467 			qp->abts_scsi_io_bufs = 0;
10468 			qp->abts_nvme_io_bufs = 0;
10469 			INIT_LIST_HEAD(&qp->sgl_list);
10470 			INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
10471 			spin_lock_init(&qp->hdwq_lock);
10472 		}
10473 	}
10474 
10475 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10476 		if (phba->nvmet_support) {
10477 			phba->sli4_hba.nvmet_cqset = kzalloc_objs(struct lpfc_queue *,
10478 								  phba->cfg_nvmet_mrq);
10479 			if (!phba->sli4_hba.nvmet_cqset) {
10480 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10481 					"3121 Fail allocate memory for "
10482 					"fast-path CQ set array\n");
10483 				goto out_error;
10484 			}
10485 			phba->sli4_hba.nvmet_mrq_hdr = kzalloc_objs(struct lpfc_queue *,
10486 								    phba->cfg_nvmet_mrq);
10487 			if (!phba->sli4_hba.nvmet_mrq_hdr) {
10488 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10489 					"3122 Fail allocate memory for "
10490 					"fast-path RQ set hdr array\n");
10491 				goto out_error;
10492 			}
10493 			phba->sli4_hba.nvmet_mrq_data = kzalloc_objs(struct lpfc_queue *,
10494 								     phba->cfg_nvmet_mrq);
10495 			if (!phba->sli4_hba.nvmet_mrq_data) {
10496 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10497 					"3124 Fail allocate memory for "
10498 					"fast-path RQ set data array\n");
10499 				goto out_error;
10500 			}
10501 		}
10502 	}
10503 
10504 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10505 
10506 	/* Create HBA Event Queues (EQs) */
10507 	for_each_present_cpu(cpu) {
10508 		/* We only want to create 1 EQ per vector, even though
10509 		 * multiple CPUs might be using that vector. so only
10510 		 * selects the CPUs that are LPFC_CPU_FIRST_IRQ.
10511 		 */
10512 		cpup = &phba->sli4_hba.cpu_map[cpu];
10513 		if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
10514 			continue;
10515 
10516 		/* Get a ptr to the Hardware Queue associated with this CPU */
10517 		qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10518 
10519 		/* Allocate an EQ */
10520 		qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10521 					      phba->sli4_hba.eq_esize,
10522 					      phba->sli4_hba.eq_ecount, cpu);
10523 		if (!qdesc) {
10524 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10525 					"0497 Failed allocate EQ (%d)\n",
10526 					cpup->hdwq);
10527 			goto out_error;
10528 		}
10529 		qdesc->qe_valid = 1;
10530 		qdesc->hdwq = cpup->hdwq;
10531 		qdesc->chann = cpu; /* First CPU this EQ is affinitized to */
10532 		qdesc->last_cpu = qdesc->chann;
10533 
10534 		/* Save the allocated EQ in the Hardware Queue */
10535 		qp->hba_eq = qdesc;
10536 
10537 		eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
10538 		list_add(&qdesc->cpu_list, &eqi->list);
10539 	}
10540 
10541 	/* Now we need to populate the other Hardware Queues, that share
10542 	 * an IRQ vector, with the associated EQ ptr.
10543 	 */
10544 	for_each_present_cpu(cpu) {
10545 		cpup = &phba->sli4_hba.cpu_map[cpu];
10546 
10547 		/* Check for EQ already allocated in previous loop */
10548 		if (cpup->flag & LPFC_CPU_FIRST_IRQ)
10549 			continue;
10550 
10551 		/* Check for multiple CPUs per hdwq */
10552 		qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10553 		if (qp->hba_eq)
10554 			continue;
10555 
10556 		/* We need to share an EQ for this hdwq */
10557 		eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
10558 		eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
10559 		qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
10560 	}
10561 
10562 	/* Allocate IO Path SLI4 CQ/WQs */
10563 	for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10564 		if (lpfc_alloc_io_wq_cq(phba, idx))
10565 			goto out_error;
10566 	}
10567 
10568 	if (phba->nvmet_support) {
10569 		for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10570 			cpu = lpfc_find_cpu_handle(phba, idx,
10571 						   LPFC_FIND_BY_HDWQ);
10572 			qdesc = lpfc_sli4_queue_alloc(phba,
10573 						      LPFC_DEFAULT_PAGE_SIZE,
10574 						      phba->sli4_hba.cq_esize,
10575 						      phba->sli4_hba.cq_ecount,
10576 						      cpu);
10577 			if (!qdesc) {
10578 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10579 						"3142 Failed allocate NVME "
10580 						"CQ Set (%d)\n", idx);
10581 				goto out_error;
10582 			}
10583 			qdesc->qe_valid = 1;
10584 			qdesc->hdwq = idx;
10585 			qdesc->chann = cpu;
10586 			phba->sli4_hba.nvmet_cqset[idx] = qdesc;
10587 		}
10588 	}
10589 
10590 	/*
10591 	 * Create Slow Path Completion Queues (CQs)
10592 	 */
10593 
10594 	cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
10595 	/* Create slow-path Mailbox Command Complete Queue */
10596 	qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10597 				      phba->sli4_hba.cq_esize,
10598 				      phba->sli4_hba.cq_ecount, cpu);
10599 	if (!qdesc) {
10600 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10601 				"0500 Failed allocate slow-path mailbox CQ\n");
10602 		goto out_error;
10603 	}
10604 	qdesc->qe_valid = 1;
10605 	phba->sli4_hba.mbx_cq = qdesc;
10606 
10607 	/* Create slow-path ELS Complete Queue */
10608 	qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10609 				      phba->sli4_hba.cq_esize,
10610 				      phba->sli4_hba.cq_ecount, cpu);
10611 	if (!qdesc) {
10612 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10613 				"0501 Failed allocate slow-path ELS CQ\n");
10614 		goto out_error;
10615 	}
10616 	qdesc->qe_valid = 1;
10617 	qdesc->chann = cpu;
10618 	phba->sli4_hba.els_cq = qdesc;
10619 
10620 
10621 	/*
10622 	 * Create Slow Path Work Queues (WQs)
10623 	 */
10624 
10625 	/* Create Mailbox Command Queue */
10626 
10627 	qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10628 				      phba->sli4_hba.mq_esize,
10629 				      phba->sli4_hba.mq_ecount, cpu);
10630 	if (!qdesc) {
10631 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10632 				"0505 Failed allocate slow-path MQ\n");
10633 		goto out_error;
10634 	}
10635 	qdesc->chann = cpu;
10636 	phba->sli4_hba.mbx_wq = qdesc;
10637 
10638 	/*
10639 	 * Create ELS Work Queues
10640 	 */
10641 
10642 	/*
10643 	 * Create slow-path ELS Work Queue.
10644 	 * Increase the ELS WQ size when WQEs contain an embedded cdb
10645 	 */
10646 	wqesize = (phba->fcp_embed_io) ?
10647 			LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
10648 
10649 	qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10650 				      wqesize,
10651 				      phba->sli4_hba.wq_ecount, cpu);
10652 	if (!qdesc) {
10653 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10654 				"0504 Failed allocate slow-path ELS WQ\n");
10655 		goto out_error;
10656 	}
10657 	qdesc->chann = cpu;
10658 	phba->sli4_hba.els_wq = qdesc;
10659 	list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10660 
10661 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10662 		/* Create NVME LS Complete Queue */
10663 		qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10664 					      phba->sli4_hba.cq_esize,
10665 					      phba->sli4_hba.cq_ecount, cpu);
10666 		if (!qdesc) {
10667 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10668 					"6079 Failed allocate NVME LS CQ\n");
10669 			goto out_error;
10670 		}
10671 		qdesc->chann = cpu;
10672 		qdesc->qe_valid = 1;
10673 		phba->sli4_hba.nvmels_cq = qdesc;
10674 
10675 		/* Create NVME LS Work Queue */
10676 		qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10677 					      phba->sli4_hba.wq_esize,
10678 					      phba->sli4_hba.wq_ecount, cpu);
10679 		if (!qdesc) {
10680 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10681 					"6080 Failed allocate NVME LS WQ\n");
10682 			goto out_error;
10683 		}
10684 		qdesc->chann = cpu;
10685 		phba->sli4_hba.nvmels_wq = qdesc;
10686 		list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10687 	}
10688 
10689 	/*
10690 	 * Create Receive Queue (RQ)
10691 	 */
10692 
10693 	/* Create Receive Queue for header */
10694 	qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10695 				      phba->sli4_hba.rq_esize,
10696 				      phba->sli4_hba.rq_ecount, cpu);
10697 	if (!qdesc) {
10698 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10699 				"0506 Failed allocate receive HRQ\n");
10700 		goto out_error;
10701 	}
10702 	phba->sli4_hba.hdr_rq = qdesc;
10703 
10704 	/* Create Receive Queue for data */
10705 	qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10706 				      phba->sli4_hba.rq_esize,
10707 				      phba->sli4_hba.rq_ecount, cpu);
10708 	if (!qdesc) {
10709 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10710 				"0507 Failed allocate receive DRQ\n");
10711 		goto out_error;
10712 	}
10713 	phba->sli4_hba.dat_rq = qdesc;
10714 
10715 	if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
10716 	    phba->nvmet_support) {
10717 		for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10718 			cpu = lpfc_find_cpu_handle(phba, idx,
10719 						   LPFC_FIND_BY_HDWQ);
10720 			/* Create NVMET Receive Queue for header */
10721 			qdesc = lpfc_sli4_queue_alloc(phba,
10722 						      LPFC_DEFAULT_PAGE_SIZE,
10723 						      phba->sli4_hba.rq_esize,
10724 						      LPFC_NVMET_RQE_DEF_COUNT,
10725 						      cpu);
10726 			if (!qdesc) {
10727 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10728 						"3146 Failed allocate "
10729 						"receive HRQ\n");
10730 				goto out_error;
10731 			}
10732 			qdesc->hdwq = idx;
10733 			phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
10734 
10735 			/* Only needed for header of RQ pair */
10736 			qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
10737 						   GFP_KERNEL,
10738 						   cpu_to_node(cpu));
10739 			if (qdesc->rqbp == NULL) {
10740 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10741 						"6131 Failed allocate "
10742 						"Header RQBP\n");
10743 				goto out_error;
10744 			}
10745 
10746 			/* Put list in known state in case driver load fails. */
10747 			INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
10748 
10749 			/* Create NVMET Receive Queue for data */
10750 			qdesc = lpfc_sli4_queue_alloc(phba,
10751 						      LPFC_DEFAULT_PAGE_SIZE,
10752 						      phba->sli4_hba.rq_esize,
10753 						      LPFC_NVMET_RQE_DEF_COUNT,
10754 						      cpu);
10755 			if (!qdesc) {
10756 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10757 						"3156 Failed allocate "
10758 						"receive DRQ\n");
10759 				goto out_error;
10760 			}
10761 			qdesc->hdwq = idx;
10762 			phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
10763 		}
10764 	}
10765 
10766 	/* Clear NVME stats */
10767 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10768 		for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10769 			memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
10770 			       sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
10771 		}
10772 	}
10773 
10774 	/* Clear SCSI stats */
10775 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
10776 		for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10777 			memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
10778 			       sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
10779 		}
10780 	}
10781 
10782 	return 0;
10783 
10784 out_error:
10785 	lpfc_sli4_queue_destroy(phba);
10786 	return -ENOMEM;
10787 }
10788 
10789 static inline void
__lpfc_sli4_release_queue(struct lpfc_queue ** qp)10790 __lpfc_sli4_release_queue(struct lpfc_queue **qp)
10791 {
10792 	if (*qp != NULL) {
10793 		lpfc_sli4_queue_free(*qp);
10794 		*qp = NULL;
10795 	}
10796 }
10797 
10798 static inline void
lpfc_sli4_release_queues(struct lpfc_queue *** qs,int max)10799 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
10800 {
10801 	int idx;
10802 
10803 	if (*qs == NULL)
10804 		return;
10805 
10806 	for (idx = 0; idx < max; idx++)
10807 		__lpfc_sli4_release_queue(&(*qs)[idx]);
10808 
10809 	kfree(*qs);
10810 	*qs = NULL;
10811 }
10812 
10813 static inline void
lpfc_sli4_release_hdwq(struct lpfc_hba * phba)10814 lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
10815 {
10816 	struct lpfc_sli4_hdw_queue *hdwq;
10817 	struct lpfc_queue *eq;
10818 	uint32_t idx;
10819 
10820 	hdwq = phba->sli4_hba.hdwq;
10821 
10822 	/* Loop thru all Hardware Queues */
10823 	for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10824 		/* Free the CQ/WQ corresponding to the Hardware Queue */
10825 		lpfc_sli4_queue_free(hdwq[idx].io_cq);
10826 		lpfc_sli4_queue_free(hdwq[idx].io_wq);
10827 		hdwq[idx].hba_eq = NULL;
10828 		hdwq[idx].io_cq = NULL;
10829 		hdwq[idx].io_wq = NULL;
10830 		if (phba->cfg_xpsgl && !phba->nvmet_support)
10831 			lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
10832 		lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
10833 	}
10834 	/* Loop thru all IRQ vectors */
10835 	for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
10836 		/* Free the EQ corresponding to the IRQ vector */
10837 		eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
10838 		lpfc_sli4_queue_free(eq);
10839 		phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
10840 	}
10841 }
10842 
10843 /**
10844  * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
10845  * @phba: pointer to lpfc hba data structure.
10846  *
10847  * This routine is invoked to release all the SLI4 queues with the FCoE HBA
10848  * operation.
10849  *
10850  * Return codes
10851  *      0 - successful
10852  *      -ENOMEM - No available memory
10853  *      -EIO - The mailbox failed to complete successfully.
10854  **/
10855 void
lpfc_sli4_queue_destroy(struct lpfc_hba * phba)10856 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
10857 {
10858 	/*
10859 	 * Set FREE_INIT before beginning to free the queues.
10860 	 * Wait until the users of queues to acknowledge to
10861 	 * release queues by clearing FREE_WAIT.
10862 	 */
10863 	spin_lock_irq(&phba->hbalock);
10864 	phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
10865 	while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
10866 		spin_unlock_irq(&phba->hbalock);
10867 		msleep(20);
10868 		spin_lock_irq(&phba->hbalock);
10869 	}
10870 	spin_unlock_irq(&phba->hbalock);
10871 
10872 	lpfc_sli4_cleanup_poll_list(phba);
10873 
10874 	/* Release HBA eqs */
10875 	if (phba->sli4_hba.hdwq)
10876 		lpfc_sli4_release_hdwq(phba);
10877 
10878 	if (phba->nvmet_support) {
10879 		lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
10880 					 phba->cfg_nvmet_mrq);
10881 
10882 		lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
10883 					 phba->cfg_nvmet_mrq);
10884 		lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
10885 					 phba->cfg_nvmet_mrq);
10886 	}
10887 
10888 	/* Release mailbox command work queue */
10889 	__lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
10890 
10891 	/* Release ELS work queue */
10892 	__lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
10893 
10894 	/* Release ELS work queue */
10895 	__lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
10896 
10897 	/* Release unsolicited receive queue */
10898 	__lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
10899 	__lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
10900 
10901 	/* Release ELS complete queue */
10902 	__lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
10903 
10904 	/* Release NVME LS complete queue */
10905 	__lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
10906 
10907 	/* Release mailbox command complete queue */
10908 	__lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
10909 
10910 	/* Everything on this list has been freed */
10911 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10912 
10913 	/* Done with freeing the queues */
10914 	spin_lock_irq(&phba->hbalock);
10915 	phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
10916 	spin_unlock_irq(&phba->hbalock);
10917 }
10918 
10919 int
lpfc_free_rq_buffer(struct lpfc_hba * phba,struct lpfc_queue * rq)10920 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
10921 {
10922 	struct lpfc_rqb *rqbp;
10923 	struct lpfc_dmabuf *h_buf;
10924 	struct rqb_dmabuf *rqb_buffer;
10925 
10926 	rqbp = rq->rqbp;
10927 	while (!list_empty(&rqbp->rqb_buffer_list)) {
10928 		list_remove_head(&rqbp->rqb_buffer_list, h_buf,
10929 				 struct lpfc_dmabuf, list);
10930 
10931 		rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
10932 		(rqbp->rqb_free_buffer)(phba, rqb_buffer);
10933 		rqbp->buffer_count--;
10934 	}
10935 	return 1;
10936 }
10937 
10938 static int
lpfc_create_wq_cq(struct lpfc_hba * phba,struct lpfc_queue * eq,struct lpfc_queue * cq,struct lpfc_queue * wq,uint16_t * cq_map,int qidx,uint32_t qtype)10939 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
10940 	struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
10941 	int qidx, uint32_t qtype)
10942 {
10943 	struct lpfc_sli_ring *pring;
10944 	int rc;
10945 
10946 	if (!eq || !cq || !wq) {
10947 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10948 			"6085 Fast-path %s (%d) not allocated\n",
10949 			((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
10950 		return -ENOMEM;
10951 	}
10952 
10953 	/* create the Cq first */
10954 	rc = lpfc_cq_create(phba, cq, eq,
10955 			(qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
10956 	if (rc) {
10957 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10958 				"6086 Failed setup of CQ (%d), rc = 0x%x\n",
10959 				qidx, (uint32_t)rc);
10960 		return rc;
10961 	}
10962 
10963 	if (qtype != LPFC_MBOX) {
10964 		/* Setup cq_map for fast lookup */
10965 		if (cq_map)
10966 			*cq_map = cq->queue_id;
10967 
10968 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10969 			"6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
10970 			qidx, cq->queue_id, qidx, eq->queue_id);
10971 
10972 		/* create the wq */
10973 		rc = lpfc_wq_create(phba, wq, cq, qtype);
10974 		if (rc) {
10975 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10976 				"4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
10977 				qidx, (uint32_t)rc);
10978 			/* no need to tear down cq - caller will do so */
10979 			return rc;
10980 		}
10981 
10982 		/* Bind this CQ/WQ to the NVME ring */
10983 		pring = wq->pring;
10984 		pring->sli.sli4.wqp = (void *)wq;
10985 		cq->pring = pring;
10986 
10987 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10988 			"2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
10989 			qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
10990 	} else {
10991 		rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
10992 		if (rc) {
10993 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10994 					"0539 Failed setup of slow-path MQ: "
10995 					"rc = 0x%x\n", rc);
10996 			/* no need to tear down cq - caller will do so */
10997 			return rc;
10998 		}
10999 
11000 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11001 			"2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
11002 			phba->sli4_hba.mbx_wq->queue_id,
11003 			phba->sli4_hba.mbx_cq->queue_id);
11004 	}
11005 
11006 	return 0;
11007 }
11008 
11009 /**
11010  * lpfc_setup_cq_lookup - Setup the CQ lookup table
11011  * @phba: pointer to lpfc hba data structure.
11012  *
11013  * This routine will populate the cq_lookup table by all
11014  * available CQ queue_id's.
11015  **/
11016 static void
lpfc_setup_cq_lookup(struct lpfc_hba * phba)11017 lpfc_setup_cq_lookup(struct lpfc_hba *phba)
11018 {
11019 	struct lpfc_queue *eq, *childq;
11020 	int qidx;
11021 
11022 	memset(phba->sli4_hba.cq_lookup, 0,
11023 	       (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
11024 	/* Loop thru all IRQ vectors */
11025 	for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11026 		/* Get the EQ corresponding to the IRQ vector */
11027 		eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
11028 		if (!eq)
11029 			continue;
11030 		/* Loop through all CQs associated with that EQ */
11031 		list_for_each_entry(childq, &eq->child_list, list) {
11032 			if (childq->queue_id > phba->sli4_hba.cq_max)
11033 				continue;
11034 			if (childq->subtype == LPFC_IO)
11035 				phba->sli4_hba.cq_lookup[childq->queue_id] =
11036 					childq;
11037 		}
11038 	}
11039 }
11040 
11041 /**
11042  * lpfc_sli4_queue_setup - Set up all the SLI4 queues
11043  * @phba: pointer to lpfc hba data structure.
11044  *
11045  * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
11046  * operation.
11047  *
11048  * Return codes
11049  *      0 - successful
11050  *      -ENOMEM - No available memory
11051  *      -EIO - The mailbox failed to complete successfully.
11052  **/
11053 int
lpfc_sli4_queue_setup(struct lpfc_hba * phba)11054 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
11055 {
11056 	uint32_t shdr_status, shdr_add_status;
11057 	union lpfc_sli4_cfg_shdr *shdr;
11058 	struct lpfc_vector_map_info *cpup;
11059 	struct lpfc_sli4_hdw_queue *qp;
11060 	LPFC_MBOXQ_t *mboxq;
11061 	int qidx, cpu;
11062 	uint32_t length, usdelay;
11063 	int rc = -ENOMEM;
11064 
11065 	/* Check for dual-ULP support */
11066 	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11067 	if (!mboxq) {
11068 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11069 				"3249 Unable to allocate memory for "
11070 				"QUERY_FW_CFG mailbox command\n");
11071 		return -ENOMEM;
11072 	}
11073 	length = (sizeof(struct lpfc_mbx_query_fw_config) -
11074 		  sizeof(struct lpfc_sli4_cfg_mhdr));
11075 	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11076 			 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
11077 			 length, LPFC_SLI4_MBX_EMBED);
11078 
11079 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11080 
11081 	shdr = (union lpfc_sli4_cfg_shdr *)
11082 			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
11083 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11084 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11085 	if (shdr_status || shdr_add_status || rc) {
11086 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11087 				"3250 QUERY_FW_CFG mailbox failed with status "
11088 				"x%x add_status x%x, mbx status x%x\n",
11089 				shdr_status, shdr_add_status, rc);
11090 		mempool_free(mboxq, phba->mbox_mem_pool);
11091 		rc = -ENXIO;
11092 		goto out_error;
11093 	}
11094 
11095 	phba->sli4_hba.fw_func_mode =
11096 			mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
11097 	phba->sli4_hba.physical_port =
11098 			mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
11099 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11100 			"3251 QUERY_FW_CFG: func_mode:x%x\n",
11101 			phba->sli4_hba.fw_func_mode);
11102 
11103 	mempool_free(mboxq, phba->mbox_mem_pool);
11104 
11105 	/*
11106 	 * Set up HBA Event Queues (EQs)
11107 	 */
11108 	qp = phba->sli4_hba.hdwq;
11109 
11110 	/* Set up HBA event queue */
11111 	if (!qp) {
11112 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11113 				"3147 Fast-path EQs not allocated\n");
11114 		rc = -ENOMEM;
11115 		goto out_error;
11116 	}
11117 
11118 	/* Loop thru all IRQ vectors */
11119 	for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11120 		/* Create HBA Event Queues (EQs) in order */
11121 		for_each_present_cpu(cpu) {
11122 			cpup = &phba->sli4_hba.cpu_map[cpu];
11123 
11124 			/* Look for the CPU thats using that vector with
11125 			 * LPFC_CPU_FIRST_IRQ set.
11126 			 */
11127 			if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11128 				continue;
11129 			if (qidx != cpup->eq)
11130 				continue;
11131 
11132 			/* Create an EQ for that vector */
11133 			rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
11134 					    phba->cfg_fcp_imax);
11135 			if (rc) {
11136 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11137 						"0523 Failed setup of fast-path"
11138 						" EQ (%d), rc = 0x%x\n",
11139 						cpup->eq, (uint32_t)rc);
11140 				goto out_destroy;
11141 			}
11142 
11143 			/* Save the EQ for that vector in the hba_eq_hdl */
11144 			phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
11145 				qp[cpup->hdwq].hba_eq;
11146 
11147 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11148 					"2584 HBA EQ setup: queue[%d]-id=%d\n",
11149 					cpup->eq,
11150 					qp[cpup->hdwq].hba_eq->queue_id);
11151 		}
11152 	}
11153 
11154 	/* Loop thru all Hardware Queues */
11155 	for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11156 		cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
11157 		cpup = &phba->sli4_hba.cpu_map[cpu];
11158 
11159 		/* Create the CQ/WQ corresponding to the Hardware Queue */
11160 		rc = lpfc_create_wq_cq(phba,
11161 				       phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
11162 				       qp[qidx].io_cq,
11163 				       qp[qidx].io_wq,
11164 				       &phba->sli4_hba.hdwq[qidx].io_cq_map,
11165 				       qidx,
11166 				       LPFC_IO);
11167 		if (rc) {
11168 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11169 					"0535 Failed to setup fastpath "
11170 					"IO WQ/CQ (%d), rc = 0x%x\n",
11171 					qidx, (uint32_t)rc);
11172 			goto out_destroy;
11173 		}
11174 	}
11175 
11176 	/*
11177 	 * Set up Slow Path Complete Queues (CQs)
11178 	 */
11179 
11180 	/* Set up slow-path MBOX CQ/MQ */
11181 
11182 	if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
11183 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11184 				"0528 %s not allocated\n",
11185 				phba->sli4_hba.mbx_cq ?
11186 				"Mailbox WQ" : "Mailbox CQ");
11187 		rc = -ENOMEM;
11188 		goto out_destroy;
11189 	}
11190 
11191 	rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11192 			       phba->sli4_hba.mbx_cq,
11193 			       phba->sli4_hba.mbx_wq,
11194 			       NULL, 0, LPFC_MBOX);
11195 	if (rc) {
11196 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11197 			"0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
11198 			(uint32_t)rc);
11199 		goto out_destroy;
11200 	}
11201 	if (phba->nvmet_support) {
11202 		if (!phba->sli4_hba.nvmet_cqset) {
11203 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11204 					"3165 Fast-path NVME CQ Set "
11205 					"array not allocated\n");
11206 			rc = -ENOMEM;
11207 			goto out_destroy;
11208 		}
11209 		if (phba->cfg_nvmet_mrq > 1) {
11210 			rc = lpfc_cq_create_set(phba,
11211 					phba->sli4_hba.nvmet_cqset,
11212 					qp,
11213 					LPFC_WCQ, LPFC_NVMET);
11214 			if (rc) {
11215 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11216 						"3164 Failed setup of NVME CQ "
11217 						"Set, rc = 0x%x\n",
11218 						(uint32_t)rc);
11219 				goto out_destroy;
11220 			}
11221 		} else {
11222 			/* Set up NVMET Receive Complete Queue */
11223 			rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
11224 					    qp[0].hba_eq,
11225 					    LPFC_WCQ, LPFC_NVMET);
11226 			if (rc) {
11227 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11228 						"6089 Failed setup NVMET CQ: "
11229 						"rc = 0x%x\n", (uint32_t)rc);
11230 				goto out_destroy;
11231 			}
11232 			phba->sli4_hba.nvmet_cqset[0]->chann = 0;
11233 
11234 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11235 					"6090 NVMET CQ setup: cq-id=%d, "
11236 					"parent eq-id=%d\n",
11237 					phba->sli4_hba.nvmet_cqset[0]->queue_id,
11238 					qp[0].hba_eq->queue_id);
11239 		}
11240 	}
11241 
11242 	/* Set up slow-path ELS WQ/CQ */
11243 	if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
11244 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11245 				"0530 ELS %s not allocated\n",
11246 				phba->sli4_hba.els_cq ? "WQ" : "CQ");
11247 		rc = -ENOMEM;
11248 		goto out_destroy;
11249 	}
11250 	rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11251 			       phba->sli4_hba.els_cq,
11252 			       phba->sli4_hba.els_wq,
11253 			       NULL, 0, LPFC_ELS);
11254 	if (rc) {
11255 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11256 				"0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
11257 				(uint32_t)rc);
11258 		goto out_destroy;
11259 	}
11260 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11261 			"2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
11262 			phba->sli4_hba.els_wq->queue_id,
11263 			phba->sli4_hba.els_cq->queue_id);
11264 
11265 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11266 		/* Set up NVME LS Complete Queue */
11267 		if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
11268 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11269 					"6091 LS %s not allocated\n",
11270 					phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
11271 			rc = -ENOMEM;
11272 			goto out_destroy;
11273 		}
11274 		rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11275 				       phba->sli4_hba.nvmels_cq,
11276 				       phba->sli4_hba.nvmels_wq,
11277 				       NULL, 0, LPFC_NVME_LS);
11278 		if (rc) {
11279 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11280 					"0526 Failed setup of NVVME LS WQ/CQ: "
11281 					"rc = 0x%x\n", (uint32_t)rc);
11282 			goto out_destroy;
11283 		}
11284 
11285 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11286 				"6096 ELS WQ setup: wq-id=%d, "
11287 				"parent cq-id=%d\n",
11288 				phba->sli4_hba.nvmels_wq->queue_id,
11289 				phba->sli4_hba.nvmels_cq->queue_id);
11290 	}
11291 
11292 	/*
11293 	 * Create NVMET Receive Queue (RQ)
11294 	 */
11295 	if (phba->nvmet_support) {
11296 		if ((!phba->sli4_hba.nvmet_cqset) ||
11297 		    (!phba->sli4_hba.nvmet_mrq_hdr) ||
11298 		    (!phba->sli4_hba.nvmet_mrq_data)) {
11299 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11300 					"6130 MRQ CQ Queues not "
11301 					"allocated\n");
11302 			rc = -ENOMEM;
11303 			goto out_destroy;
11304 		}
11305 		if (phba->cfg_nvmet_mrq > 1) {
11306 			rc = lpfc_mrq_create(phba,
11307 					     phba->sli4_hba.nvmet_mrq_hdr,
11308 					     phba->sli4_hba.nvmet_mrq_data,
11309 					     phba->sli4_hba.nvmet_cqset,
11310 					     LPFC_NVMET);
11311 			if (rc) {
11312 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11313 						"6098 Failed setup of NVMET "
11314 						"MRQ: rc = 0x%x\n",
11315 						(uint32_t)rc);
11316 				goto out_destroy;
11317 			}
11318 
11319 		} else {
11320 			rc = lpfc_rq_create(phba,
11321 					    phba->sli4_hba.nvmet_mrq_hdr[0],
11322 					    phba->sli4_hba.nvmet_mrq_data[0],
11323 					    phba->sli4_hba.nvmet_cqset[0],
11324 					    LPFC_NVMET);
11325 			if (rc) {
11326 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11327 						"6057 Failed setup of NVMET "
11328 						"Receive Queue: rc = 0x%x\n",
11329 						(uint32_t)rc);
11330 				goto out_destroy;
11331 			}
11332 
11333 			lpfc_printf_log(
11334 				phba, KERN_INFO, LOG_INIT,
11335 				"6099 NVMET RQ setup: hdr-rq-id=%d, "
11336 				"dat-rq-id=%d parent cq-id=%d\n",
11337 				phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
11338 				phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
11339 				phba->sli4_hba.nvmet_cqset[0]->queue_id);
11340 
11341 		}
11342 	}
11343 
11344 	if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
11345 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11346 				"0540 Receive Queue not allocated\n");
11347 		rc = -ENOMEM;
11348 		goto out_destroy;
11349 	}
11350 
11351 	rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
11352 			    phba->sli4_hba.els_cq, LPFC_USOL);
11353 	if (rc) {
11354 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11355 				"0541 Failed setup of Receive Queue: "
11356 				"rc = 0x%x\n", (uint32_t)rc);
11357 		goto out_destroy;
11358 	}
11359 
11360 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11361 			"2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
11362 			"parent cq-id=%d\n",
11363 			phba->sli4_hba.hdr_rq->queue_id,
11364 			phba->sli4_hba.dat_rq->queue_id,
11365 			phba->sli4_hba.els_cq->queue_id);
11366 
11367 	if (phba->cfg_fcp_imax)
11368 		usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
11369 	else
11370 		usdelay = 0;
11371 
11372 	for (qidx = 0; qidx < phba->cfg_irq_chann;
11373 	     qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
11374 		lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
11375 					 usdelay);
11376 
11377 	if (phba->sli4_hba.cq_max) {
11378 		kfree(phba->sli4_hba.cq_lookup);
11379 		phba->sli4_hba.cq_lookup = kzalloc_objs(struct lpfc_queue *,
11380 							(phba->sli4_hba.cq_max + 1));
11381 		if (!phba->sli4_hba.cq_lookup) {
11382 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11383 					"0549 Failed setup of CQ Lookup table: "
11384 					"size 0x%x\n", phba->sli4_hba.cq_max);
11385 			rc = -ENOMEM;
11386 			goto out_destroy;
11387 		}
11388 		lpfc_setup_cq_lookup(phba);
11389 	}
11390 	return 0;
11391 
11392 out_destroy:
11393 	lpfc_sli4_queue_unset(phba);
11394 out_error:
11395 	return rc;
11396 }
11397 
11398 /**
11399  * lpfc_sli4_queue_unset - Unset all the SLI4 queues
11400  * @phba: pointer to lpfc hba data structure.
11401  *
11402  * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
11403  * operation.
11404  *
11405  * Return codes
11406  *      0 - successful
11407  *      -ENOMEM - No available memory
11408  *      -EIO - The mailbox failed to complete successfully.
11409  **/
11410 void
lpfc_sli4_queue_unset(struct lpfc_hba * phba)11411 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
11412 {
11413 	struct lpfc_sli4_hdw_queue *qp;
11414 	struct lpfc_queue *eq;
11415 	int qidx;
11416 
11417 	/* Unset mailbox command work queue */
11418 	if (phba->sli4_hba.mbx_wq)
11419 		lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
11420 
11421 	/* Unset NVME LS work queue */
11422 	if (phba->sli4_hba.nvmels_wq)
11423 		lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
11424 
11425 	/* Unset ELS work queue */
11426 	if (phba->sli4_hba.els_wq)
11427 		lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
11428 
11429 	/* Unset unsolicited receive queue */
11430 	if (phba->sli4_hba.hdr_rq)
11431 		lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
11432 				phba->sli4_hba.dat_rq);
11433 
11434 	/* Unset mailbox command complete queue */
11435 	if (phba->sli4_hba.mbx_cq)
11436 		lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
11437 
11438 	/* Unset ELS complete queue */
11439 	if (phba->sli4_hba.els_cq)
11440 		lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
11441 
11442 	/* Unset NVME LS complete queue */
11443 	if (phba->sli4_hba.nvmels_cq)
11444 		lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
11445 
11446 	if (phba->nvmet_support) {
11447 		/* Unset NVMET MRQ queue */
11448 		if (phba->sli4_hba.nvmet_mrq_hdr) {
11449 			for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11450 				lpfc_rq_destroy(
11451 					phba,
11452 					phba->sli4_hba.nvmet_mrq_hdr[qidx],
11453 					phba->sli4_hba.nvmet_mrq_data[qidx]);
11454 		}
11455 
11456 		/* Unset NVMET CQ Set complete queue */
11457 		if (phba->sli4_hba.nvmet_cqset) {
11458 			for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11459 				lpfc_cq_destroy(
11460 					phba, phba->sli4_hba.nvmet_cqset[qidx]);
11461 		}
11462 	}
11463 
11464 	/* Unset fast-path SLI4 queues */
11465 	if (phba->sli4_hba.hdwq) {
11466 		/* Loop thru all Hardware Queues */
11467 		for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11468 			/* Destroy the CQ/WQ corresponding to Hardware Queue */
11469 			qp = &phba->sli4_hba.hdwq[qidx];
11470 			lpfc_wq_destroy(phba, qp->io_wq);
11471 			lpfc_cq_destroy(phba, qp->io_cq);
11472 		}
11473 		/* Loop thru all IRQ vectors */
11474 		for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11475 			/* Destroy the EQ corresponding to the IRQ vector */
11476 			eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
11477 			lpfc_eq_destroy(phba, eq);
11478 		}
11479 	}
11480 
11481 	kfree(phba->sli4_hba.cq_lookup);
11482 	phba->sli4_hba.cq_lookup = NULL;
11483 	phba->sli4_hba.cq_max = 0;
11484 }
11485 
11486 /**
11487  * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
11488  * @phba: pointer to lpfc hba data structure.
11489  *
11490  * This routine is invoked to allocate and set up a pool of completion queue
11491  * events. The body of the completion queue event is a completion queue entry
11492  * CQE. For now, this pool is used for the interrupt service routine to queue
11493  * the following HBA completion queue events for the worker thread to process:
11494  *   - Mailbox asynchronous events
11495  *   - Receive queue completion unsolicited events
11496  * Later, this can be used for all the slow-path events.
11497  *
11498  * Return codes
11499  *      0 - successful
11500  *      -ENOMEM - No available memory
11501  **/
11502 static int
lpfc_sli4_cq_event_pool_create(struct lpfc_hba * phba)11503 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
11504 {
11505 	struct lpfc_cq_event *cq_event;
11506 	int i;
11507 
11508 	for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
11509 		cq_event = kmalloc_obj(struct lpfc_cq_event);
11510 		if (!cq_event)
11511 			goto out_pool_create_fail;
11512 		list_add_tail(&cq_event->list,
11513 			      &phba->sli4_hba.sp_cqe_event_pool);
11514 	}
11515 	return 0;
11516 
11517 out_pool_create_fail:
11518 	lpfc_sli4_cq_event_pool_destroy(phba);
11519 	return -ENOMEM;
11520 }
11521 
11522 /**
11523  * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
11524  * @phba: pointer to lpfc hba data structure.
11525  *
11526  * This routine is invoked to free the pool of completion queue events at
11527  * driver unload time. Note that, it is the responsibility of the driver
11528  * cleanup routine to free all the outstanding completion-queue events
11529  * allocated from this pool back into the pool before invoking this routine
11530  * to destroy the pool.
11531  **/
11532 static void
lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba * phba)11533 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
11534 {
11535 	struct lpfc_cq_event *cq_event, *next_cq_event;
11536 
11537 	list_for_each_entry_safe(cq_event, next_cq_event,
11538 				 &phba->sli4_hba.sp_cqe_event_pool, list) {
11539 		list_del(&cq_event->list);
11540 		kfree(cq_event);
11541 	}
11542 }
11543 
11544 /**
11545  * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11546  * @phba: pointer to lpfc hba data structure.
11547  *
11548  * This routine is the lock free version of the API invoked to allocate a
11549  * completion-queue event from the free pool.
11550  *
11551  * Return: Pointer to the newly allocated completion-queue event if successful
11552  *         NULL otherwise.
11553  **/
11554 struct lpfc_cq_event *
__lpfc_sli4_cq_event_alloc(struct lpfc_hba * phba)11555 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11556 {
11557 	struct lpfc_cq_event *cq_event = NULL;
11558 
11559 	list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
11560 			 struct lpfc_cq_event, list);
11561 	return cq_event;
11562 }
11563 
11564 /**
11565  * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11566  * @phba: pointer to lpfc hba data structure.
11567  *
11568  * This routine is the lock version of the API invoked to allocate a
11569  * completion-queue event from the free pool.
11570  *
11571  * Return: Pointer to the newly allocated completion-queue event if successful
11572  *         NULL otherwise.
11573  **/
11574 struct lpfc_cq_event *
lpfc_sli4_cq_event_alloc(struct lpfc_hba * phba)11575 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11576 {
11577 	struct lpfc_cq_event *cq_event;
11578 	unsigned long iflags;
11579 
11580 	spin_lock_irqsave(&phba->hbalock, iflags);
11581 	cq_event = __lpfc_sli4_cq_event_alloc(phba);
11582 	spin_unlock_irqrestore(&phba->hbalock, iflags);
11583 	return cq_event;
11584 }
11585 
11586 /**
11587  * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11588  * @phba: pointer to lpfc hba data structure.
11589  * @cq_event: pointer to the completion queue event to be freed.
11590  *
11591  * This routine is the lock free version of the API invoked to release a
11592  * completion-queue event back into the free pool.
11593  **/
11594 void
__lpfc_sli4_cq_event_release(struct lpfc_hba * phba,struct lpfc_cq_event * cq_event)11595 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11596 			     struct lpfc_cq_event *cq_event)
11597 {
11598 	list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
11599 }
11600 
11601 /**
11602  * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11603  * @phba: pointer to lpfc hba data structure.
11604  * @cq_event: pointer to the completion queue event to be freed.
11605  *
11606  * This routine is the lock version of the API invoked to release a
11607  * completion-queue event back into the free pool.
11608  **/
11609 void
lpfc_sli4_cq_event_release(struct lpfc_hba * phba,struct lpfc_cq_event * cq_event)11610 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11611 			   struct lpfc_cq_event *cq_event)
11612 {
11613 	unsigned long iflags;
11614 	spin_lock_irqsave(&phba->hbalock, iflags);
11615 	__lpfc_sli4_cq_event_release(phba, cq_event);
11616 	spin_unlock_irqrestore(&phba->hbalock, iflags);
11617 }
11618 
11619 /**
11620  * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
11621  * @phba: pointer to lpfc hba data structure.
11622  *
11623  * This routine is to free all the pending completion-queue events to the
11624  * back into the free pool for device reset.
11625  **/
11626 static void
lpfc_sli4_cq_event_release_all(struct lpfc_hba * phba)11627 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
11628 {
11629 	LIST_HEAD(cq_event_list);
11630 	struct lpfc_cq_event *cq_event;
11631 	unsigned long iflags;
11632 
11633 	/* Retrieve all the pending WCQEs from pending WCQE lists */
11634 
11635 	/* Pending ELS XRI abort events */
11636 	spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11637 	list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
11638 			 &cq_event_list);
11639 	spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11640 
11641 	/* Pending asynnc events */
11642 	spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
11643 	list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
11644 			 &cq_event_list);
11645 	spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
11646 
11647 	while (!list_empty(&cq_event_list)) {
11648 		list_remove_head(&cq_event_list, cq_event,
11649 				 struct lpfc_cq_event, list);
11650 		lpfc_sli4_cq_event_release(phba, cq_event);
11651 	}
11652 }
11653 
11654 /**
11655  * lpfc_pci_function_reset - Reset pci function.
11656  * @phba: pointer to lpfc hba data structure.
11657  *
11658  * This routine is invoked to request a PCI function reset. It will destroys
11659  * all resources assigned to the PCI function which originates this request.
11660  *
11661  * Return codes
11662  *      0 - successful
11663  *      -ENOMEM - No available memory
11664  *      -EIO - The mailbox failed to complete successfully.
11665  **/
11666 int
lpfc_pci_function_reset(struct lpfc_hba * phba)11667 lpfc_pci_function_reset(struct lpfc_hba *phba)
11668 {
11669 	LPFC_MBOXQ_t *mboxq;
11670 	uint32_t rc = 0, if_type;
11671 	uint32_t shdr_status, shdr_add_status;
11672 	uint32_t rdy_chk;
11673 	uint32_t port_reset = 0;
11674 	union lpfc_sli4_cfg_shdr *shdr;
11675 	struct lpfc_register reg_data;
11676 	uint16_t devid;
11677 
11678 	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11679 	switch (if_type) {
11680 	case LPFC_SLI_INTF_IF_TYPE_0:
11681 		mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
11682 						       GFP_KERNEL);
11683 		if (!mboxq) {
11684 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11685 					"0494 Unable to allocate memory for "
11686 					"issuing SLI_FUNCTION_RESET mailbox "
11687 					"command\n");
11688 			return -ENOMEM;
11689 		}
11690 
11691 		/* Setup PCI function reset mailbox-ioctl command */
11692 		lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11693 				 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
11694 				 LPFC_SLI4_MBX_EMBED);
11695 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11696 		shdr = (union lpfc_sli4_cfg_shdr *)
11697 			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
11698 		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11699 		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
11700 					 &shdr->response);
11701 		mempool_free(mboxq, phba->mbox_mem_pool);
11702 		if (shdr_status || shdr_add_status || rc) {
11703 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11704 					"0495 SLI_FUNCTION_RESET mailbox "
11705 					"failed with status x%x add_status x%x,"
11706 					" mbx status x%x\n",
11707 					shdr_status, shdr_add_status, rc);
11708 			rc = -ENXIO;
11709 		}
11710 		break;
11711 	case LPFC_SLI_INTF_IF_TYPE_2:
11712 	case LPFC_SLI_INTF_IF_TYPE_6:
11713 wait:
11714 		/*
11715 		 * Poll the Port Status Register and wait for RDY for
11716 		 * up to 30 seconds. If the port doesn't respond, treat
11717 		 * it as an error.
11718 		 */
11719 		for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
11720 			if (lpfc_readl(phba->sli4_hba.u.if_type2.
11721 				STATUSregaddr, &reg_data.word0)) {
11722 				rc = -ENODEV;
11723 				goto out;
11724 			}
11725 			if (bf_get(lpfc_sliport_status_rdy, &reg_data))
11726 				break;
11727 			msleep(20);
11728 		}
11729 
11730 		if (!bf_get(lpfc_sliport_status_rdy, &reg_data)) {
11731 			phba->work_status[0] = readl(
11732 				phba->sli4_hba.u.if_type2.ERR1regaddr);
11733 			phba->work_status[1] = readl(
11734 				phba->sli4_hba.u.if_type2.ERR2regaddr);
11735 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11736 					"2890 Port not ready, port status reg "
11737 					"0x%x error 1=0x%x, error 2=0x%x\n",
11738 					reg_data.word0,
11739 					phba->work_status[0],
11740 					phba->work_status[1]);
11741 			rc = -ENODEV;
11742 			goto out;
11743 		}
11744 
11745 		if (bf_get(lpfc_sliport_status_pldv, &reg_data))
11746 			lpfc_pldv_detect = true;
11747 
11748 		if (!port_reset) {
11749 			/*
11750 			 * Reset the port now
11751 			 */
11752 			reg_data.word0 = 0;
11753 			bf_set(lpfc_sliport_ctrl_end, &reg_data,
11754 			       LPFC_SLIPORT_LITTLE_ENDIAN);
11755 			bf_set(lpfc_sliport_ctrl_ip, &reg_data,
11756 			       LPFC_SLIPORT_INIT_PORT);
11757 			writel(reg_data.word0, phba->sli4_hba.u.if_type2.
11758 			       CTRLregaddr);
11759 			/* flush */
11760 			pci_read_config_word(phba->pcidev,
11761 					     PCI_DEVICE_ID, &devid);
11762 
11763 			port_reset = 1;
11764 			msleep(20);
11765 			goto wait;
11766 		} else if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
11767 			rc = -ENODEV;
11768 			goto out;
11769 		}
11770 		break;
11771 
11772 	case LPFC_SLI_INTF_IF_TYPE_1:
11773 	default:
11774 		break;
11775 	}
11776 
11777 out:
11778 	/* Catch the not-ready port failure after a port reset. */
11779 	if (rc) {
11780 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11781 				"3317 HBA not functional: IP Reset Failed "
11782 				"try: echo fw_reset > board_mode\n");
11783 		rc = -ENODEV;
11784 	}
11785 
11786 	return rc;
11787 }
11788 
11789 /**
11790  * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
11791  * @phba: pointer to lpfc hba data structure.
11792  *
11793  * This routine is invoked to set up the PCI device memory space for device
11794  * with SLI-4 interface spec.
11795  *
11796  * Return codes
11797  * 	0 - successful
11798  * 	other values - error
11799  **/
11800 static int
lpfc_sli4_pci_mem_setup(struct lpfc_hba * phba)11801 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
11802 {
11803 	struct pci_dev *pdev = phba->pcidev;
11804 	unsigned long bar0map_len, bar1map_len, bar2map_len;
11805 	int error;
11806 	uint32_t if_type;
11807 	u8 sli_family;
11808 
11809 	if (!pdev)
11810 		return -ENODEV;
11811 
11812 	/* Set the device DMA mask size */
11813 	error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11814 	if (error)
11815 		error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11816 	if (error)
11817 		return error;
11818 
11819 	/*
11820 	 * The BARs and register set definitions and offset locations are
11821 	 * dependent on the if_type.
11822 	 */
11823 	if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
11824 				  &phba->sli4_hba.sli_intf.word0)) {
11825 		return -ENODEV;
11826 	}
11827 
11828 	/* There is no SLI3 failback for SLI4 devices. */
11829 	if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
11830 	    LPFC_SLI_INTF_VALID) {
11831 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11832 				"2894 SLI_INTF reg contents invalid "
11833 				"sli_intf reg 0x%x\n",
11834 				phba->sli4_hba.sli_intf.word0);
11835 		return -ENODEV;
11836 	}
11837 
11838 	/* Check if ASIC_ID register should be read */
11839 	sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
11840 	if (sli_family == LPFC_SLI_INTF_ASIC_ID) {
11841 		if (pci_read_config_dword(pdev, LPFC_ASIC_ID_OFFSET,
11842 					  &phba->sli4_hba.asic_id.word0))
11843 			return -ENODEV;
11844 	}
11845 
11846 	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11847 	/*
11848 	 * Get the bus address of SLI4 device Bar regions and the
11849 	 * number of bytes required by each mapping. The mapping of the
11850 	 * particular PCI BARs regions is dependent on the type of
11851 	 * SLI4 device.
11852 	 */
11853 	if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
11854 		phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
11855 		bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
11856 
11857 		/*
11858 		 * Map SLI4 PCI Config Space Register base to a kernel virtual
11859 		 * addr
11860 		 */
11861 		phba->sli4_hba.conf_regs_memmap_p =
11862 			ioremap(phba->pci_bar0_map, bar0map_len);
11863 		if (!phba->sli4_hba.conf_regs_memmap_p) {
11864 			dev_printk(KERN_ERR, &pdev->dev,
11865 				   "ioremap failed for SLI4 PCI config "
11866 				   "registers.\n");
11867 			return -ENODEV;
11868 		}
11869 		phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
11870 		/* Set up BAR0 PCI config space register memory map */
11871 		lpfc_sli4_bar0_register_memmap(phba, if_type);
11872 	} else {
11873 		phba->pci_bar0_map = pci_resource_start(pdev, 1);
11874 		bar0map_len = pci_resource_len(pdev, 1);
11875 		if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
11876 			dev_printk(KERN_ERR, &pdev->dev,
11877 			   "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
11878 			return -ENODEV;
11879 		}
11880 		phba->sli4_hba.conf_regs_memmap_p =
11881 				ioremap(phba->pci_bar0_map, bar0map_len);
11882 		if (!phba->sli4_hba.conf_regs_memmap_p) {
11883 			dev_printk(KERN_ERR, &pdev->dev,
11884 				"ioremap failed for SLI4 PCI config "
11885 				"registers.\n");
11886 			return -ENODEV;
11887 		}
11888 		lpfc_sli4_bar0_register_memmap(phba, if_type);
11889 	}
11890 
11891 	if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11892 		if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
11893 			/*
11894 			 * Map SLI4 if type 0 HBA Control Register base to a
11895 			 * kernel virtual address and setup the registers.
11896 			 */
11897 			phba->pci_bar1_map = pci_resource_start(pdev,
11898 								PCI_64BIT_BAR2);
11899 			bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11900 			phba->sli4_hba.ctrl_regs_memmap_p =
11901 					ioremap(phba->pci_bar1_map,
11902 						bar1map_len);
11903 			if (!phba->sli4_hba.ctrl_regs_memmap_p) {
11904 				dev_err(&pdev->dev,
11905 					   "ioremap failed for SLI4 HBA "
11906 					    "control registers.\n");
11907 				error = -ENOMEM;
11908 				goto out_iounmap_conf;
11909 			}
11910 			phba->pci_bar2_memmap_p =
11911 					 phba->sli4_hba.ctrl_regs_memmap_p;
11912 			lpfc_sli4_bar1_register_memmap(phba, if_type);
11913 		} else {
11914 			error = -ENOMEM;
11915 			goto out_iounmap_conf;
11916 		}
11917 	}
11918 
11919 	if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
11920 	    (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
11921 		/*
11922 		 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel
11923 		 * virtual address and setup the registers.
11924 		 */
11925 		phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
11926 		bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11927 		phba->sli4_hba.drbl_regs_memmap_p =
11928 				ioremap(phba->pci_bar1_map, bar1map_len);
11929 		if (!phba->sli4_hba.drbl_regs_memmap_p) {
11930 			dev_err(&pdev->dev,
11931 			   "ioremap failed for SLI4 HBA doorbell registers.\n");
11932 			error = -ENOMEM;
11933 			goto out_iounmap_conf;
11934 		}
11935 		phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
11936 		lpfc_sli4_bar1_register_memmap(phba, if_type);
11937 	}
11938 
11939 	if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11940 		if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
11941 			/*
11942 			 * Map SLI4 if type 0 HBA Doorbell Register base to
11943 			 * a kernel virtual address and setup the registers.
11944 			 */
11945 			phba->pci_bar2_map = pci_resource_start(pdev,
11946 								PCI_64BIT_BAR4);
11947 			bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
11948 			phba->sli4_hba.drbl_regs_memmap_p =
11949 					ioremap(phba->pci_bar2_map,
11950 						bar2map_len);
11951 			if (!phba->sli4_hba.drbl_regs_memmap_p) {
11952 				dev_err(&pdev->dev,
11953 					   "ioremap failed for SLI4 HBA"
11954 					   " doorbell registers.\n");
11955 				error = -ENOMEM;
11956 				goto out_iounmap_ctrl;
11957 			}
11958 			phba->pci_bar4_memmap_p =
11959 					phba->sli4_hba.drbl_regs_memmap_p;
11960 			error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
11961 			if (error)
11962 				goto out_iounmap_all;
11963 		} else {
11964 			error = -ENOMEM;
11965 			goto out_iounmap_ctrl;
11966 		}
11967 	}
11968 
11969 	if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
11970 	    pci_resource_start(pdev, PCI_64BIT_BAR4)) {
11971 		/*
11972 		 * Map SLI4 if type 6 HBA DPP Register base to a kernel
11973 		 * virtual address and setup the registers.
11974 		 */
11975 		phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
11976 		bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
11977 		phba->sli4_hba.dpp_regs_memmap_p =
11978 				ioremap(phba->pci_bar2_map, bar2map_len);
11979 		if (!phba->sli4_hba.dpp_regs_memmap_p) {
11980 			dev_err(&pdev->dev,
11981 			   "ioremap failed for SLI4 HBA dpp registers.\n");
11982 			error = -ENOMEM;
11983 			goto out_iounmap_all;
11984 		}
11985 		phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
11986 	}
11987 
11988 	/* Set up the EQ/CQ register handeling functions now */
11989 	switch (if_type) {
11990 	case LPFC_SLI_INTF_IF_TYPE_0:
11991 	case LPFC_SLI_INTF_IF_TYPE_2:
11992 		phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
11993 		phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
11994 		phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
11995 		break;
11996 	case LPFC_SLI_INTF_IF_TYPE_6:
11997 		phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
11998 		phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
11999 		phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
12000 		break;
12001 	default:
12002 		break;
12003 	}
12004 
12005 	return 0;
12006 
12007 out_iounmap_all:
12008 	if (phba->sli4_hba.drbl_regs_memmap_p)
12009 		iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12010 out_iounmap_ctrl:
12011 	if (phba->sli4_hba.ctrl_regs_memmap_p)
12012 		iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
12013 out_iounmap_conf:
12014 	iounmap(phba->sli4_hba.conf_regs_memmap_p);
12015 
12016 	return error;
12017 }
12018 
12019 /**
12020  * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
12021  * @phba: pointer to lpfc hba data structure.
12022  *
12023  * This routine is invoked to unset the PCI device memory space for device
12024  * with SLI-4 interface spec.
12025  **/
12026 static void
lpfc_sli4_pci_mem_unset(struct lpfc_hba * phba)12027 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
12028 {
12029 	uint32_t if_type;
12030 	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12031 
12032 	switch (if_type) {
12033 	case LPFC_SLI_INTF_IF_TYPE_0:
12034 		iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12035 		iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
12036 		iounmap(phba->sli4_hba.conf_regs_memmap_p);
12037 		break;
12038 	case LPFC_SLI_INTF_IF_TYPE_2:
12039 		iounmap(phba->sli4_hba.conf_regs_memmap_p);
12040 		break;
12041 	case LPFC_SLI_INTF_IF_TYPE_6:
12042 		iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12043 		iounmap(phba->sli4_hba.conf_regs_memmap_p);
12044 		if (phba->sli4_hba.dpp_regs_memmap_p)
12045 			iounmap(phba->sli4_hba.dpp_regs_memmap_p);
12046 		if (phba->sli4_hba.dpp_regs_memmap_wc_p)
12047 			iounmap(phba->sli4_hba.dpp_regs_memmap_wc_p);
12048 		break;
12049 	case LPFC_SLI_INTF_IF_TYPE_1:
12050 		break;
12051 	default:
12052 		dev_printk(KERN_ERR, &phba->pcidev->dev,
12053 			   "FATAL - unsupported SLI4 interface type - %d\n",
12054 			   if_type);
12055 		break;
12056 	}
12057 }
12058 
12059 /**
12060  * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
12061  * @phba: pointer to lpfc hba data structure.
12062  *
12063  * This routine is invoked to enable the MSI-X interrupt vectors to device
12064  * with SLI-3 interface specs.
12065  *
12066  * Return codes
12067  *   0 - successful
12068  *   other values - error
12069  **/
12070 static int
lpfc_sli_enable_msix(struct lpfc_hba * phba)12071 lpfc_sli_enable_msix(struct lpfc_hba *phba)
12072 {
12073 	int rc;
12074 	LPFC_MBOXQ_t *pmb;
12075 
12076 	/* Set up MSI-X multi-message vectors */
12077 	rc = pci_alloc_irq_vectors(phba->pcidev,
12078 			LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
12079 	if (rc < 0) {
12080 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12081 				"0420 PCI enable MSI-X failed (%d)\n", rc);
12082 		goto vec_fail_out;
12083 	}
12084 
12085 	/*
12086 	 * Assign MSI-X vectors to interrupt handlers
12087 	 */
12088 
12089 	/* vector-0 is associated to slow-path handler */
12090 	rc = request_irq(pci_irq_vector(phba->pcidev, 0),
12091 			 &lpfc_sli_sp_intr_handler, 0,
12092 			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
12093 	if (rc) {
12094 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12095 				"0421 MSI-X slow-path request_irq failed "
12096 				"(%d)\n", rc);
12097 		goto msi_fail_out;
12098 	}
12099 
12100 	/* vector-1 is associated to fast-path handler */
12101 	rc = request_irq(pci_irq_vector(phba->pcidev, 1),
12102 			 &lpfc_sli_fp_intr_handler, 0,
12103 			 LPFC_FP_DRIVER_HANDLER_NAME, phba);
12104 
12105 	if (rc) {
12106 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12107 				"0429 MSI-X fast-path request_irq failed "
12108 				"(%d)\n", rc);
12109 		goto irq_fail_out;
12110 	}
12111 
12112 	/*
12113 	 * Configure HBA MSI-X attention conditions to messages
12114 	 */
12115 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12116 
12117 	if (!pmb) {
12118 		rc = -ENOMEM;
12119 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12120 				"0474 Unable to allocate memory for issuing "
12121 				"MBOX_CONFIG_MSI command\n");
12122 		goto mem_fail_out;
12123 	}
12124 	rc = lpfc_config_msi(phba, pmb);
12125 	if (rc)
12126 		goto mbx_fail_out;
12127 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
12128 	if (rc != MBX_SUCCESS) {
12129 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
12130 				"0351 Config MSI mailbox command failed, "
12131 				"mbxCmd x%x, mbxStatus x%x\n",
12132 				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
12133 		goto mbx_fail_out;
12134 	}
12135 
12136 	/* Free memory allocated for mailbox command */
12137 	mempool_free(pmb, phba->mbox_mem_pool);
12138 	return rc;
12139 
12140 mbx_fail_out:
12141 	/* Free memory allocated for mailbox command */
12142 	mempool_free(pmb, phba->mbox_mem_pool);
12143 
12144 mem_fail_out:
12145 	/* free the irq already requested */
12146 	free_irq(pci_irq_vector(phba->pcidev, 1), phba);
12147 
12148 irq_fail_out:
12149 	/* free the irq already requested */
12150 	free_irq(pci_irq_vector(phba->pcidev, 0), phba);
12151 
12152 msi_fail_out:
12153 	/* Unconfigure MSI-X capability structure */
12154 	pci_free_irq_vectors(phba->pcidev);
12155 
12156 vec_fail_out:
12157 	return rc;
12158 }
12159 
12160 /**
12161  * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
12162  * @phba: pointer to lpfc hba data structure.
12163  *
12164  * This routine is invoked to enable the MSI interrupt mode to device with
12165  * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
12166  * enable the MSI vector. The device driver is responsible for calling the
12167  * request_irq() to register MSI vector with a interrupt the handler, which
12168  * is done in this function.
12169  *
12170  * Return codes
12171  * 	0 - successful
12172  * 	other values - error
12173  */
12174 static int
lpfc_sli_enable_msi(struct lpfc_hba * phba)12175 lpfc_sli_enable_msi(struct lpfc_hba *phba)
12176 {
12177 	int rc;
12178 
12179 	rc = pci_enable_msi(phba->pcidev);
12180 	if (!rc)
12181 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12182 				"0012 PCI enable MSI mode success.\n");
12183 	else {
12184 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12185 				"0471 PCI enable MSI mode failed (%d)\n", rc);
12186 		return rc;
12187 	}
12188 
12189 	rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12190 			 0, LPFC_DRIVER_NAME, phba);
12191 	if (rc) {
12192 		pci_disable_msi(phba->pcidev);
12193 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12194 				"0478 MSI request_irq failed (%d)\n", rc);
12195 	}
12196 	return rc;
12197 }
12198 
12199 /**
12200  * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
12201  * @phba: pointer to lpfc hba data structure.
12202  * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
12203  *
12204  * This routine is invoked to enable device interrupt and associate driver's
12205  * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
12206  * spec. Depends on the interrupt mode configured to the driver, the driver
12207  * will try to fallback from the configured interrupt mode to an interrupt
12208  * mode which is supported by the platform, kernel, and device in the order
12209  * of:
12210  * MSI-X -> MSI -> IRQ.
12211  *
12212  * Return codes
12213  *   0 - successful
12214  *   other values - error
12215  **/
12216 static uint32_t
lpfc_sli_enable_intr(struct lpfc_hba * phba,uint32_t cfg_mode)12217 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
12218 {
12219 	uint32_t intr_mode = LPFC_INTR_ERROR;
12220 	int retval;
12221 
12222 	/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
12223 	retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
12224 	if (retval)
12225 		return intr_mode;
12226 	clear_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag);
12227 
12228 	if (cfg_mode == 2) {
12229 		/* Now, try to enable MSI-X interrupt mode */
12230 		retval = lpfc_sli_enable_msix(phba);
12231 		if (!retval) {
12232 			/* Indicate initialization to MSI-X mode */
12233 			phba->intr_type = MSIX;
12234 			intr_mode = 2;
12235 		}
12236 	}
12237 
12238 	/* Fallback to MSI if MSI-X initialization failed */
12239 	if (cfg_mode >= 1 && phba->intr_type == NONE) {
12240 		retval = lpfc_sli_enable_msi(phba);
12241 		if (!retval) {
12242 			/* Indicate initialization to MSI mode */
12243 			phba->intr_type = MSI;
12244 			intr_mode = 1;
12245 		}
12246 	}
12247 
12248 	/* Fallback to INTx if both MSI-X/MSI initalization failed */
12249 	if (phba->intr_type == NONE) {
12250 		retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12251 				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
12252 		if (!retval) {
12253 			/* Indicate initialization to INTx mode */
12254 			phba->intr_type = INTx;
12255 			intr_mode = 0;
12256 		}
12257 	}
12258 	return intr_mode;
12259 }
12260 
12261 /**
12262  * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
12263  * @phba: pointer to lpfc hba data structure.
12264  *
12265  * This routine is invoked to disable device interrupt and disassociate the
12266  * driver's interrupt handler(s) from interrupt vector(s) to device with
12267  * SLI-3 interface spec. Depending on the interrupt mode, the driver will
12268  * release the interrupt vector(s) for the message signaled interrupt.
12269  **/
12270 static void
lpfc_sli_disable_intr(struct lpfc_hba * phba)12271 lpfc_sli_disable_intr(struct lpfc_hba *phba)
12272 {
12273 	int nr_irqs, i;
12274 
12275 	if (phba->intr_type == MSIX)
12276 		nr_irqs = LPFC_MSIX_VECTORS;
12277 	else
12278 		nr_irqs = 1;
12279 
12280 	for (i = 0; i < nr_irqs; i++)
12281 		free_irq(pci_irq_vector(phba->pcidev, i), phba);
12282 	pci_free_irq_vectors(phba->pcidev);
12283 
12284 	/* Reset interrupt management states */
12285 	phba->intr_type = NONE;
12286 	phba->sli.slistat.sli_intr = 0;
12287 }
12288 
12289 /**
12290  * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
12291  * @phba: pointer to lpfc hba data structure.
12292  * @id: EQ vector index or Hardware Queue index
12293  * @match: LPFC_FIND_BY_EQ = match by EQ
12294  *         LPFC_FIND_BY_HDWQ = match by Hardware Queue
12295  * Return the CPU that matches the selection criteria
12296  */
12297 static uint16_t
lpfc_find_cpu_handle(struct lpfc_hba * phba,uint16_t id,int match)12298 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
12299 {
12300 	struct lpfc_vector_map_info *cpup;
12301 	int cpu;
12302 
12303 	/* Loop through all CPUs */
12304 	for_each_present_cpu(cpu) {
12305 		cpup = &phba->sli4_hba.cpu_map[cpu];
12306 
12307 		/* If we are matching by EQ, there may be multiple CPUs using
12308 		 * using the same vector, so select the one with
12309 		 * LPFC_CPU_FIRST_IRQ set.
12310 		 */
12311 		if ((match == LPFC_FIND_BY_EQ) &&
12312 		    (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
12313 		    (cpup->eq == id))
12314 			return cpu;
12315 
12316 		/* If matching by HDWQ, select the first CPU that matches */
12317 		if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
12318 			return cpu;
12319 	}
12320 	return 0;
12321 }
12322 
12323 #ifdef CONFIG_X86
12324 /**
12325  * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
12326  * @phba: pointer to lpfc hba data structure.
12327  * @cpu: CPU map index
12328  * @phys_id: CPU package physical id
12329  * @core_id: CPU core id
12330  */
12331 static int
lpfc_find_hyper(struct lpfc_hba * phba,int cpu,uint16_t phys_id,uint16_t core_id)12332 lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
12333 		uint16_t phys_id, uint16_t core_id)
12334 {
12335 	struct lpfc_vector_map_info *cpup;
12336 	int idx;
12337 
12338 	for_each_present_cpu(idx) {
12339 		cpup = &phba->sli4_hba.cpu_map[idx];
12340 		/* Does the cpup match the one we are looking for */
12341 		if ((cpup->phys_id == phys_id) &&
12342 		    (cpup->core_id == core_id) &&
12343 		    (cpu != idx))
12344 			return 1;
12345 	}
12346 	return 0;
12347 }
12348 #endif
12349 
12350 /*
12351  * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
12352  * @phba: pointer to lpfc hba data structure.
12353  * @eqidx: index for eq and irq vector
12354  * @flag: flags to set for vector_map structure
12355  * @cpu: cpu used to index vector_map structure
12356  *
12357  * The routine assigns eq info into vector_map structure
12358  */
12359 static inline void
lpfc_assign_eq_map_info(struct lpfc_hba * phba,uint16_t eqidx,uint16_t flag,unsigned int cpu)12360 lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
12361 			unsigned int cpu)
12362 {
12363 	struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
12364 	struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
12365 
12366 	cpup->eq = eqidx;
12367 	cpup->flag |= flag;
12368 
12369 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12370 			"3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
12371 			cpu, eqhdl->irq, cpup->eq, cpup->flag);
12372 }
12373 
12374 /**
12375  * lpfc_cpu_map_array_init - Initialize cpu_map structure
12376  * @phba: pointer to lpfc hba data structure.
12377  *
12378  * The routine initializes the cpu_map array structure
12379  */
12380 static void
lpfc_cpu_map_array_init(struct lpfc_hba * phba)12381 lpfc_cpu_map_array_init(struct lpfc_hba *phba)
12382 {
12383 	struct lpfc_vector_map_info *cpup;
12384 	struct lpfc_eq_intr_info *eqi;
12385 	int cpu;
12386 
12387 	for_each_possible_cpu(cpu) {
12388 		cpup = &phba->sli4_hba.cpu_map[cpu];
12389 		cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
12390 		cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
12391 		cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
12392 		cpup->eq = LPFC_VECTOR_MAP_EMPTY;
12393 		cpup->flag = 0;
12394 		eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
12395 		INIT_LIST_HEAD(&eqi->list);
12396 		eqi->icnt = 0;
12397 	}
12398 }
12399 
12400 /**
12401  * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
12402  * @phba: pointer to lpfc hba data structure.
12403  *
12404  * The routine initializes the hba_eq_hdl array structure
12405  */
12406 static void
lpfc_hba_eq_hdl_array_init(struct lpfc_hba * phba)12407 lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
12408 {
12409 	struct lpfc_hba_eq_hdl *eqhdl;
12410 	int i;
12411 
12412 	for (i = 0; i < phba->cfg_irq_chann; i++) {
12413 		eqhdl = lpfc_get_eq_hdl(i);
12414 		eqhdl->irq = LPFC_IRQ_EMPTY;
12415 		eqhdl->phba = phba;
12416 	}
12417 }
12418 
12419 /**
12420  * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
12421  * @phba: pointer to lpfc hba data structure.
12422  * @vectors: number of msix vectors allocated.
12423  *
12424  * The routine will figure out the CPU affinity assignment for every
12425  * MSI-X vector allocated for the HBA.
12426  * In addition, the CPU to IO channel mapping will be calculated
12427  * and the phba->sli4_hba.cpu_map array will reflect this.
12428  */
12429 static void
lpfc_cpu_affinity_check(struct lpfc_hba * phba,int vectors)12430 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
12431 {
12432 	int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
12433 	int max_phys_id, min_phys_id;
12434 	int max_core_id, min_core_id;
12435 	struct lpfc_vector_map_info *cpup;
12436 	struct lpfc_vector_map_info *new_cpup;
12437 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12438 	struct lpfc_hdwq_stat *c_stat;
12439 #endif
12440 
12441 	max_phys_id = 0;
12442 	min_phys_id = LPFC_VECTOR_MAP_EMPTY;
12443 	max_core_id = 0;
12444 	min_core_id = LPFC_VECTOR_MAP_EMPTY;
12445 
12446 	/* Update CPU map with physical id and core id of each CPU */
12447 	for_each_present_cpu(cpu) {
12448 		cpup = &phba->sli4_hba.cpu_map[cpu];
12449 #ifdef CONFIG_X86
12450 		cpup->phys_id = topology_physical_package_id(cpu);
12451 		cpup->core_id = topology_core_id(cpu);
12452 		if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
12453 			cpup->flag |= LPFC_CPU_MAP_HYPER;
12454 #else
12455 		/* No distinction between CPUs for other platforms */
12456 		cpup->phys_id = 0;
12457 		cpup->core_id = cpu;
12458 #endif
12459 
12460 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12461 				"3328 CPU %d physid %d coreid %d flag x%x\n",
12462 				cpu, cpup->phys_id, cpup->core_id, cpup->flag);
12463 
12464 		if (cpup->phys_id > max_phys_id)
12465 			max_phys_id = cpup->phys_id;
12466 		if (cpup->phys_id < min_phys_id)
12467 			min_phys_id = cpup->phys_id;
12468 
12469 		if (cpup->core_id > max_core_id)
12470 			max_core_id = cpup->core_id;
12471 		if (cpup->core_id < min_core_id)
12472 			min_core_id = cpup->core_id;
12473 	}
12474 
12475 	/* After looking at each irq vector assigned to this pcidev, its
12476 	 * possible to see that not ALL CPUs have been accounted for.
12477 	 * Next we will set any unassigned (unaffinitized) cpu map
12478 	 * entries to a IRQ on the same phys_id.
12479 	 */
12480 	first_cpu = cpumask_first(cpu_present_mask);
12481 	start_cpu = first_cpu;
12482 
12483 	for_each_present_cpu(cpu) {
12484 		cpup = &phba->sli4_hba.cpu_map[cpu];
12485 
12486 		/* Is this CPU entry unassigned */
12487 		if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12488 			/* Mark CPU as IRQ not assigned by the kernel */
12489 			cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12490 
12491 			/* If so, find a new_cpup that is on the SAME
12492 			 * phys_id as cpup. start_cpu will start where we
12493 			 * left off so all unassigned entries don't get assgined
12494 			 * the IRQ of the first entry.
12495 			 */
12496 			new_cpu = start_cpu;
12497 			for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12498 				new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12499 				if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12500 				    (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
12501 				    (new_cpup->phys_id == cpup->phys_id))
12502 					goto found_same;
12503 				new_cpu = lpfc_next_present_cpu(new_cpu);
12504 			}
12505 			/* At this point, we leave the CPU as unassigned */
12506 			continue;
12507 found_same:
12508 			/* We found a matching phys_id, so copy the IRQ info */
12509 			cpup->eq = new_cpup->eq;
12510 
12511 			/* Bump start_cpu to the next slot to minmize the
12512 			 * chance of having multiple unassigned CPU entries
12513 			 * selecting the same IRQ.
12514 			 */
12515 			start_cpu = lpfc_next_present_cpu(new_cpu);
12516 
12517 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12518 					"3337 Set Affinity: CPU %d "
12519 					"eq %d from peer cpu %d same "
12520 					"phys_id (%d)\n",
12521 					cpu, cpup->eq, new_cpu,
12522 					cpup->phys_id);
12523 		}
12524 	}
12525 
12526 	/* Set any unassigned cpu map entries to a IRQ on any phys_id */
12527 	start_cpu = first_cpu;
12528 
12529 	for_each_present_cpu(cpu) {
12530 		cpup = &phba->sli4_hba.cpu_map[cpu];
12531 
12532 		/* Is this entry unassigned */
12533 		if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12534 			/* Mark it as IRQ not assigned by the kernel */
12535 			cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12536 
12537 			/* If so, find a new_cpup thats on ANY phys_id
12538 			 * as the cpup. start_cpu will start where we
12539 			 * left off so all unassigned entries don't get
12540 			 * assigned the IRQ of the first entry.
12541 			 */
12542 			new_cpu = start_cpu;
12543 			for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12544 				new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12545 				if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12546 				    (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
12547 					goto found_any;
12548 				new_cpu = lpfc_next_present_cpu(new_cpu);
12549 			}
12550 			/* We should never leave an entry unassigned */
12551 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12552 					"3339 Set Affinity: CPU %d "
12553 					"eq %d UNASSIGNED\n",
12554 					cpup->hdwq, cpup->eq);
12555 			continue;
12556 found_any:
12557 			/* We found an available entry, copy the IRQ info */
12558 			cpup->eq = new_cpup->eq;
12559 
12560 			/* Bump start_cpu to the next slot to minmize the
12561 			 * chance of having multiple unassigned CPU entries
12562 			 * selecting the same IRQ.
12563 			 */
12564 			start_cpu = lpfc_next_present_cpu(new_cpu);
12565 
12566 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12567 					"3338 Set Affinity: CPU %d "
12568 					"eq %d from peer cpu %d (%d/%d)\n",
12569 					cpu, cpup->eq, new_cpu,
12570 					new_cpup->phys_id, new_cpup->core_id);
12571 		}
12572 	}
12573 
12574 	/* Assign hdwq indices that are unique across all cpus in the map
12575 	 * that are also FIRST_CPUs.
12576 	 */
12577 	idx = 0;
12578 	for_each_present_cpu(cpu) {
12579 		cpup = &phba->sli4_hba.cpu_map[cpu];
12580 
12581 		/* Only FIRST IRQs get a hdwq index assignment. */
12582 		if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12583 			continue;
12584 
12585 		/* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
12586 		cpup->hdwq = idx;
12587 		idx++;
12588 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12589 				"3333 Set Affinity: CPU %d (phys %d core %d): "
12590 				"hdwq %d eq %d flg x%x\n",
12591 				cpu, cpup->phys_id, cpup->core_id,
12592 				cpup->hdwq, cpup->eq, cpup->flag);
12593 	}
12594 	/* Associate a hdwq with each cpu_map entry
12595 	 * This will be 1 to 1 - hdwq to cpu, unless there are less
12596 	 * hardware queues then CPUs. For that case we will just round-robin
12597 	 * the available hardware queues as they get assigned to CPUs.
12598 	 * The next_idx is the idx from the FIRST_CPU loop above to account
12599 	 * for irq_chann < hdwq.  The idx is used for round-robin assignments
12600 	 * and needs to start at 0.
12601 	 */
12602 	next_idx = idx;
12603 	start_cpu = 0;
12604 	idx = 0;
12605 	for_each_present_cpu(cpu) {
12606 		cpup = &phba->sli4_hba.cpu_map[cpu];
12607 
12608 		/* FIRST cpus are already mapped. */
12609 		if (cpup->flag & LPFC_CPU_FIRST_IRQ)
12610 			continue;
12611 
12612 		/* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq
12613 		 * of the unassigned cpus to the next idx so that all
12614 		 * hdw queues are fully utilized.
12615 		 */
12616 		if (next_idx < phba->cfg_hdw_queue) {
12617 			cpup->hdwq = next_idx;
12618 			next_idx++;
12619 			continue;
12620 		}
12621 
12622 		/* Not a First CPU and all hdw_queues are used.  Reuse a
12623 		 * Hardware Queue for another CPU, so be smart about it
12624 		 * and pick one that has its IRQ/EQ mapped to the same phys_id
12625 		 * (CPU package) and core_id.
12626 		 */
12627 		new_cpu = start_cpu;
12628 		for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12629 			new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12630 			if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12631 			    new_cpup->phys_id == cpup->phys_id &&
12632 			    new_cpup->core_id == cpup->core_id) {
12633 				goto found_hdwq;
12634 			}
12635 			new_cpu = lpfc_next_present_cpu(new_cpu);
12636 		}
12637 
12638 		/* If we can't match both phys_id and core_id,
12639 		 * settle for just a phys_id match.
12640 		 */
12641 		new_cpu = start_cpu;
12642 		for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12643 			new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12644 			if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12645 			    new_cpup->phys_id == cpup->phys_id)
12646 				goto found_hdwq;
12647 			new_cpu = lpfc_next_present_cpu(new_cpu);
12648 		}
12649 
12650 		/* Otherwise just round robin on cfg_hdw_queue */
12651 		cpup->hdwq = idx % phba->cfg_hdw_queue;
12652 		idx++;
12653 		goto logit;
12654  found_hdwq:
12655 		/* We found an available entry, copy the IRQ info */
12656 		start_cpu = lpfc_next_present_cpu(new_cpu);
12657 		cpup->hdwq = new_cpup->hdwq;
12658  logit:
12659 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12660 				"3335 Set Affinity: CPU %d (phys %d core %d): "
12661 				"hdwq %d eq %d flg x%x\n",
12662 				cpu, cpup->phys_id, cpup->core_id,
12663 				cpup->hdwq, cpup->eq, cpup->flag);
12664 	}
12665 
12666 	/*
12667 	 * Initialize the cpu_map slots for not-present cpus in case
12668 	 * a cpu is hot-added. Perform a simple hdwq round robin assignment.
12669 	 */
12670 	idx = 0;
12671 	for_each_possible_cpu(cpu) {
12672 		cpup = &phba->sli4_hba.cpu_map[cpu];
12673 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12674 		c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
12675 		c_stat->hdwq_no = cpup->hdwq;
12676 #endif
12677 		if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
12678 			continue;
12679 
12680 		cpup->hdwq = idx++ % phba->cfg_hdw_queue;
12681 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12682 		c_stat->hdwq_no = cpup->hdwq;
12683 #endif
12684 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12685 				"3340 Set Affinity: not present "
12686 				"CPU %d hdwq %d\n",
12687 				cpu, cpup->hdwq);
12688 	}
12689 
12690 	/* The cpu_map array will be used later during initialization
12691 	 * when EQ / CQ / WQs are allocated and configured.
12692 	 */
12693 	return;
12694 }
12695 
12696 /**
12697  * lpfc_cpuhp_get_eq
12698  *
12699  * @phba:   pointer to lpfc hba data structure.
12700  * @cpu:    cpu going offline
12701  * @eqlist: eq list to append to
12702  */
12703 static int
lpfc_cpuhp_get_eq(struct lpfc_hba * phba,unsigned int cpu,struct list_head * eqlist)12704 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
12705 		  struct list_head *eqlist)
12706 {
12707 	const struct cpumask *maskp;
12708 	struct lpfc_queue *eq;
12709 	struct cpumask *tmp;
12710 	u16 idx;
12711 
12712 	tmp = kzalloc(cpumask_size(), GFP_KERNEL);
12713 	if (!tmp)
12714 		return -ENOMEM;
12715 
12716 	for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12717 		maskp = pci_irq_get_affinity(phba->pcidev, idx);
12718 		if (!maskp)
12719 			continue;
12720 		/*
12721 		 * if irq is not affinitized to the cpu going
12722 		 * then we don't need to poll the eq attached
12723 		 * to it.
12724 		 */
12725 		if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
12726 			continue;
12727 		/* get the cpus that are online and are affini-
12728 		 * tized to this irq vector.  If the count is
12729 		 * more than 1 then cpuhp is not going to shut-
12730 		 * down this vector.  Since this cpu has not
12731 		 * gone offline yet, we need >1.
12732 		 */
12733 		cpumask_and(tmp, maskp, cpu_online_mask);
12734 		if (cpumask_weight(tmp) > 1)
12735 			continue;
12736 
12737 		/* Now that we have an irq to shutdown, get the eq
12738 		 * mapped to this irq.  Note: multiple hdwq's in
12739 		 * the software can share an eq, but eventually
12740 		 * only eq will be mapped to this vector
12741 		 */
12742 		eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
12743 		list_add(&eq->_poll_list, eqlist);
12744 	}
12745 	kfree(tmp);
12746 	return 0;
12747 }
12748 
__lpfc_cpuhp_remove(struct lpfc_hba * phba)12749 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
12750 {
12751 	if (phba->sli_rev != LPFC_SLI_REV4)
12752 		return;
12753 
12754 	cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
12755 					    &phba->cpuhp);
12756 	/*
12757 	 * unregistering the instance doesn't stop the polling
12758 	 * timer. Wait for the poll timer to retire.
12759 	 */
12760 	synchronize_rcu();
12761 	timer_delete_sync(&phba->cpuhp_poll_timer);
12762 }
12763 
lpfc_cpuhp_remove(struct lpfc_hba * phba)12764 static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
12765 {
12766 	if (phba->pport &&
12767 	    test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
12768 		return;
12769 
12770 	__lpfc_cpuhp_remove(phba);
12771 }
12772 
lpfc_cpuhp_add(struct lpfc_hba * phba)12773 static void lpfc_cpuhp_add(struct lpfc_hba *phba)
12774 {
12775 	if (phba->sli_rev != LPFC_SLI_REV4)
12776 		return;
12777 
12778 	rcu_read_lock();
12779 
12780 	if (!list_empty(&phba->poll_list))
12781 		mod_timer(&phba->cpuhp_poll_timer,
12782 			  jiffies + msecs_to_jiffies(LPFC_POLL_HB));
12783 
12784 	rcu_read_unlock();
12785 
12786 	cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
12787 					 &phba->cpuhp);
12788 }
12789 
__lpfc_cpuhp_checks(struct lpfc_hba * phba,int * retval)12790 static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
12791 {
12792 	if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
12793 		*retval = -EAGAIN;
12794 		return true;
12795 	}
12796 
12797 	if (phba->sli_rev != LPFC_SLI_REV4) {
12798 		*retval = 0;
12799 		return true;
12800 	}
12801 
12802 	/* proceed with the hotplug */
12803 	return false;
12804 }
12805 
12806 /**
12807  * lpfc_irq_set_aff - set IRQ affinity
12808  * @eqhdl: EQ handle
12809  * @cpu: cpu to set affinity
12810  *
12811  **/
12812 static inline void
lpfc_irq_set_aff(struct lpfc_hba_eq_hdl * eqhdl,unsigned int cpu)12813 lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
12814 {
12815 	cpumask_clear(&eqhdl->aff_mask);
12816 	cpumask_set_cpu(cpu, &eqhdl->aff_mask);
12817 	irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12818 	irq_set_affinity(eqhdl->irq, &eqhdl->aff_mask);
12819 }
12820 
12821 /**
12822  * lpfc_irq_clear_aff - clear IRQ affinity
12823  * @eqhdl: EQ handle
12824  *
12825  **/
12826 static inline void
lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl * eqhdl)12827 lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
12828 {
12829 	cpumask_clear(&eqhdl->aff_mask);
12830 	irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12831 }
12832 
12833 /**
12834  * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
12835  * @phba: pointer to HBA context object.
12836  * @cpu: cpu going offline/online
12837  * @offline: true, cpu is going offline. false, cpu is coming online.
12838  *
12839  * If cpu is going offline, we'll try our best effort to find the next
12840  * online cpu on the phba's original_mask and migrate all offlining IRQ
12841  * affinities.
12842  *
12843  * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu.
12844  *
12845  * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on
12846  *	 PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
12847  *
12848  **/
12849 static void
lpfc_irq_rebalance(struct lpfc_hba * phba,unsigned int cpu,bool offline)12850 lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
12851 {
12852 	struct lpfc_vector_map_info *cpup;
12853 	struct cpumask *aff_mask;
12854 	unsigned int cpu_select, cpu_next, idx;
12855 	const struct cpumask *orig_mask;
12856 
12857 	if (phba->irq_chann_mode == NORMAL_MODE)
12858 		return;
12859 
12860 	orig_mask = &phba->sli4_hba.irq_aff_mask;
12861 
12862 	if (!cpumask_test_cpu(cpu, orig_mask))
12863 		return;
12864 
12865 	cpup = &phba->sli4_hba.cpu_map[cpu];
12866 
12867 	if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12868 		return;
12869 
12870 	if (offline) {
12871 		/* Find next online CPU on original mask */
12872 		cpu_next = cpumask_next_wrap(cpu, orig_mask);
12873 		cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
12874 
12875 		/* Found a valid CPU */
12876 		if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
12877 			/* Go through each eqhdl and ensure offlining
12878 			 * cpu aff_mask is migrated
12879 			 */
12880 			for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12881 				aff_mask = lpfc_get_aff_mask(idx);
12882 
12883 				/* Migrate affinity */
12884 				if (cpumask_test_cpu(cpu, aff_mask))
12885 					lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
12886 							 cpu_select);
12887 			}
12888 		} else {
12889 			/* Rely on irqbalance if no online CPUs left on NUMA */
12890 			for (idx = 0; idx < phba->cfg_irq_chann; idx++)
12891 				lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
12892 		}
12893 	} else {
12894 		/* Migrate affinity back to this CPU */
12895 		lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
12896 	}
12897 }
12898 
lpfc_cpu_offline(unsigned int cpu,struct hlist_node * node)12899 static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
12900 {
12901 	struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12902 	struct lpfc_queue *eq, *next;
12903 	LIST_HEAD(eqlist);
12904 	int retval;
12905 
12906 	if (!phba) {
12907 		WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12908 		return 0;
12909 	}
12910 
12911 	if (__lpfc_cpuhp_checks(phba, &retval))
12912 		return retval;
12913 
12914 	lpfc_irq_rebalance(phba, cpu, true);
12915 
12916 	retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
12917 	if (retval)
12918 		return retval;
12919 
12920 	/* start polling on these eq's */
12921 	list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
12922 		list_del_init(&eq->_poll_list);
12923 		lpfc_sli4_start_polling(eq);
12924 	}
12925 
12926 	return 0;
12927 }
12928 
lpfc_cpu_online(unsigned int cpu,struct hlist_node * node)12929 static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
12930 {
12931 	struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12932 	struct lpfc_queue *eq, *next;
12933 	unsigned int n;
12934 	int retval;
12935 
12936 	if (!phba) {
12937 		WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12938 		return 0;
12939 	}
12940 
12941 	if (__lpfc_cpuhp_checks(phba, &retval))
12942 		return retval;
12943 
12944 	lpfc_irq_rebalance(phba, cpu, false);
12945 
12946 	list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
12947 		n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
12948 		if (n == cpu)
12949 			lpfc_sli4_stop_polling(eq);
12950 	}
12951 
12952 	return 0;
12953 }
12954 
12955 /**
12956  * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
12957  * @phba: pointer to lpfc hba data structure.
12958  *
12959  * This routine is invoked to enable the MSI-X interrupt vectors to device
12960  * with SLI-4 interface spec.  It also allocates MSI-X vectors and maps them
12961  * to cpus on the system.
12962  *
12963  * When cfg_irq_numa is enabled, the adapter will only allocate vectors for
12964  * the number of cpus on the same numa node as this adapter.  The vectors are
12965  * allocated without requesting OS affinity mapping.  A vector will be
12966  * allocated and assigned to each online and offline cpu.  If the cpu is
12967  * online, then affinity will be set to that cpu.  If the cpu is offline, then
12968  * affinity will be set to the nearest peer cpu within the numa node that is
12969  * online.  If there are no online cpus within the numa node, affinity is not
12970  * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping
12971  * is consistent with the way cpu online/offline is handled when cfg_irq_numa is
12972  * configured.
12973  *
12974  * If numa mode is not enabled and there is more than 1 vector allocated, then
12975  * the driver relies on the managed irq interface where the OS assigns vector to
12976  * cpu affinity.  The driver will then use that affinity mapping to setup its
12977  * cpu mapping table.
12978  *
12979  * Return codes
12980  * 0 - successful
12981  * other values - error
12982  **/
12983 static int
lpfc_sli4_enable_msix(struct lpfc_hba * phba)12984 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
12985 {
12986 	int vectors, rc, index;
12987 	char *name;
12988 	const struct cpumask *aff_mask = NULL;
12989 	unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
12990 	struct lpfc_vector_map_info *cpup;
12991 	struct lpfc_hba_eq_hdl *eqhdl;
12992 	const struct cpumask *maskp;
12993 	unsigned int flags = PCI_IRQ_MSIX;
12994 
12995 	/* Set up MSI-X multi-message vectors */
12996 	vectors = phba->cfg_irq_chann;
12997 
12998 	if (phba->irq_chann_mode != NORMAL_MODE)
12999 		aff_mask = &phba->sli4_hba.irq_aff_mask;
13000 
13001 	if (aff_mask) {
13002 		cpu_cnt = cpumask_weight(aff_mask);
13003 		vectors = min(phba->cfg_irq_chann, cpu_cnt);
13004 
13005 		/* cpu: iterates over aff_mask including offline or online
13006 		 * cpu_select: iterates over online aff_mask to set affinity
13007 		 */
13008 		cpu = cpumask_first(aff_mask);
13009 		cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
13010 	} else {
13011 		flags |= PCI_IRQ_AFFINITY;
13012 	}
13013 
13014 	rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
13015 	if (rc < 0) {
13016 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13017 				"0484 PCI enable MSI-X failed (%d)\n", rc);
13018 		goto vec_fail_out;
13019 	}
13020 	vectors = rc;
13021 
13022 	/* Assign MSI-X vectors to interrupt handlers */
13023 	for (index = 0; index < vectors; index++) {
13024 		eqhdl = lpfc_get_eq_hdl(index);
13025 		name = eqhdl->handler_name;
13026 		memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
13027 		snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
13028 			 LPFC_DRIVER_HANDLER_NAME"%d", index);
13029 
13030 		eqhdl->idx = index;
13031 		rc = pci_irq_vector(phba->pcidev, index);
13032 		if (rc < 0) {
13033 			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13034 					"0489 MSI-X fast-path (%d) "
13035 					"pci_irq_vec failed (%d)\n", index, rc);
13036 			goto cfg_fail_out;
13037 		}
13038 		eqhdl->irq = rc;
13039 
13040 		rc = request_threaded_irq(eqhdl->irq,
13041 					  &lpfc_sli4_hba_intr_handler,
13042 					  &lpfc_sli4_hba_intr_handler_th,
13043 					  0, name, eqhdl);
13044 		if (rc) {
13045 			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13046 					"0486 MSI-X fast-path (%d) "
13047 					"request_irq failed (%d)\n", index, rc);
13048 			goto cfg_fail_out;
13049 		}
13050 
13051 		if (aff_mask) {
13052 			/* If found a neighboring online cpu, set affinity */
13053 			if (cpu_select < nr_cpu_ids)
13054 				lpfc_irq_set_aff(eqhdl, cpu_select);
13055 
13056 			/* Assign EQ to cpu_map */
13057 			lpfc_assign_eq_map_info(phba, index,
13058 						LPFC_CPU_FIRST_IRQ,
13059 						cpu);
13060 
13061 			/* Iterate to next offline or online cpu in aff_mask */
13062 			cpu = cpumask_next(cpu, aff_mask);
13063 
13064 			/* Reached the end of the aff_mask */
13065 			if (cpu >= nr_cpu_ids)
13066 				break;
13067 
13068 			/* Find next online cpu in aff_mask to set affinity */
13069 			cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
13070 		} else if (vectors == 1) {
13071 			cpu = cpumask_first(cpu_present_mask);
13072 			lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
13073 						cpu);
13074 		} else {
13075 			maskp = pci_irq_get_affinity(phba->pcidev, index);
13076 
13077 			/* Loop through all CPUs associated with vector index */
13078 			for_each_cpu_and(cpu, maskp, cpu_present_mask) {
13079 				cpup = &phba->sli4_hba.cpu_map[cpu];
13080 
13081 				/* If this is the first CPU thats assigned to
13082 				 * this vector, set LPFC_CPU_FIRST_IRQ.
13083 				 *
13084 				 * With certain platforms its possible that irq
13085 				 * vectors are affinitized to all the cpu's.
13086 				 * This can result in each cpu_map.eq to be set
13087 				 * to the last vector, resulting in overwrite
13088 				 * of all the previous cpu_map.eq.  Ensure that
13089 				 * each vector receives a place in cpu_map.
13090 				 * Later call to lpfc_cpu_affinity_check will
13091 				 * ensure we are nicely balanced out.
13092 				 */
13093 				if (cpup->eq != LPFC_VECTOR_MAP_EMPTY)
13094 					continue;
13095 				lpfc_assign_eq_map_info(phba, index,
13096 							LPFC_CPU_FIRST_IRQ,
13097 							cpu);
13098 				break;
13099 			}
13100 		}
13101 	}
13102 
13103 	if (vectors != phba->cfg_irq_chann) {
13104 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13105 				"3238 Reducing IO channels to match number of "
13106 				"MSI-X vectors, requested %d got %d\n",
13107 				phba->cfg_irq_chann, vectors);
13108 		if (phba->cfg_irq_chann > vectors)
13109 			phba->cfg_irq_chann = vectors;
13110 	}
13111 
13112 	return rc;
13113 
13114 cfg_fail_out:
13115 	/* free the irq already requested */
13116 	for (--index; index >= 0; index--) {
13117 		eqhdl = lpfc_get_eq_hdl(index);
13118 		lpfc_irq_clear_aff(eqhdl);
13119 		free_irq(eqhdl->irq, eqhdl);
13120 	}
13121 
13122 	/* Unconfigure MSI-X capability structure */
13123 	pci_free_irq_vectors(phba->pcidev);
13124 
13125 vec_fail_out:
13126 	return rc;
13127 }
13128 
13129 /**
13130  * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
13131  * @phba: pointer to lpfc hba data structure.
13132  *
13133  * This routine is invoked to enable the MSI interrupt mode to device with
13134  * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
13135  * called to enable the MSI vector. The device driver is responsible for
13136  * calling the request_irq() to register MSI vector with a interrupt the
13137  * handler, which is done in this function.
13138  *
13139  * Return codes
13140  * 	0 - successful
13141  * 	other values - error
13142  **/
13143 static int
lpfc_sli4_enable_msi(struct lpfc_hba * phba)13144 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
13145 {
13146 	int rc, index;
13147 	unsigned int cpu;
13148 	struct lpfc_hba_eq_hdl *eqhdl;
13149 
13150 	rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
13151 				   PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
13152 	if (rc > 0)
13153 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13154 				"0487 PCI enable MSI mode success.\n");
13155 	else {
13156 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13157 				"0488 PCI enable MSI mode failed (%d)\n", rc);
13158 		return rc ? rc : -1;
13159 	}
13160 
13161 	rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13162 			 0, LPFC_DRIVER_NAME, phba);
13163 	if (rc) {
13164 		pci_free_irq_vectors(phba->pcidev);
13165 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13166 				"0490 MSI request_irq failed (%d)\n", rc);
13167 		return rc;
13168 	}
13169 
13170 	eqhdl = lpfc_get_eq_hdl(0);
13171 	rc = pci_irq_vector(phba->pcidev, 0);
13172 	if (rc < 0) {
13173 		free_irq(phba->pcidev->irq, phba);
13174 		pci_free_irq_vectors(phba->pcidev);
13175 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13176 				"0496 MSI pci_irq_vec failed (%d)\n", rc);
13177 		return rc;
13178 	}
13179 	eqhdl->irq = rc;
13180 
13181 	cpu = cpumask_first(cpu_present_mask);
13182 	lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
13183 
13184 	for (index = 0; index < phba->cfg_irq_chann; index++) {
13185 		eqhdl = lpfc_get_eq_hdl(index);
13186 		eqhdl->idx = index;
13187 	}
13188 
13189 	return 0;
13190 }
13191 
13192 /**
13193  * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
13194  * @phba: pointer to lpfc hba data structure.
13195  * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
13196  *
13197  * This routine is invoked to enable device interrupt and associate driver's
13198  * interrupt handler(s) to interrupt vector(s) to device with SLI-4
13199  * interface spec. Depends on the interrupt mode configured to the driver,
13200  * the driver will try to fallback from the configured interrupt mode to an
13201  * interrupt mode which is supported by the platform, kernel, and device in
13202  * the order of:
13203  * MSI-X -> MSI -> IRQ.
13204  *
13205  * Return codes
13206  *	Interrupt mode (2, 1, 0) - successful
13207  *	LPFC_INTR_ERROR - error
13208  **/
13209 static uint32_t
lpfc_sli4_enable_intr(struct lpfc_hba * phba,uint32_t cfg_mode)13210 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
13211 {
13212 	uint32_t intr_mode = LPFC_INTR_ERROR;
13213 	int retval, idx;
13214 
13215 	if (cfg_mode == 2) {
13216 		/* Preparation before conf_msi mbox cmd */
13217 		retval = 0;
13218 		if (!retval) {
13219 			/* Now, try to enable MSI-X interrupt mode */
13220 			retval = lpfc_sli4_enable_msix(phba);
13221 			if (!retval) {
13222 				/* Indicate initialization to MSI-X mode */
13223 				phba->intr_type = MSIX;
13224 				intr_mode = 2;
13225 			}
13226 		}
13227 	}
13228 
13229 	/* Fallback to MSI if MSI-X initialization failed */
13230 	if (cfg_mode >= 1 && phba->intr_type == NONE) {
13231 		retval = lpfc_sli4_enable_msi(phba);
13232 		if (!retval) {
13233 			/* Indicate initialization to MSI mode */
13234 			phba->intr_type = MSI;
13235 			intr_mode = 1;
13236 		}
13237 	}
13238 
13239 	/* Fallback to INTx if both MSI-X/MSI initalization failed */
13240 	if (phba->intr_type == NONE) {
13241 		retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13242 				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
13243 		if (!retval) {
13244 			struct lpfc_hba_eq_hdl *eqhdl;
13245 			unsigned int cpu;
13246 
13247 			/* Indicate initialization to INTx mode */
13248 			phba->intr_type = INTx;
13249 			intr_mode = 0;
13250 
13251 			eqhdl = lpfc_get_eq_hdl(0);
13252 			retval = pci_irq_vector(phba->pcidev, 0);
13253 			if (retval < 0) {
13254 				free_irq(phba->pcidev->irq, phba);
13255 				lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13256 					"0502 INTR pci_irq_vec failed (%d)\n",
13257 					 retval);
13258 				return LPFC_INTR_ERROR;
13259 			}
13260 			eqhdl->irq = retval;
13261 
13262 			cpu = cpumask_first(cpu_present_mask);
13263 			lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
13264 						cpu);
13265 			for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
13266 				eqhdl = lpfc_get_eq_hdl(idx);
13267 				eqhdl->idx = idx;
13268 			}
13269 		}
13270 	}
13271 	return intr_mode;
13272 }
13273 
13274 /**
13275  * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
13276  * @phba: pointer to lpfc hba data structure.
13277  *
13278  * This routine is invoked to disable device interrupt and disassociate
13279  * the driver's interrupt handler(s) from interrupt vector(s) to device
13280  * with SLI-4 interface spec. Depending on the interrupt mode, the driver
13281  * will release the interrupt vector(s) for the message signaled interrupt.
13282  **/
13283 static void
lpfc_sli4_disable_intr(struct lpfc_hba * phba)13284 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
13285 {
13286 	/* Disable the currently initialized interrupt mode */
13287 	if (phba->intr_type == MSIX) {
13288 		int index;
13289 		struct lpfc_hba_eq_hdl *eqhdl;
13290 
13291 		/* Free up MSI-X multi-message vectors */
13292 		for (index = 0; index < phba->cfg_irq_chann; index++) {
13293 			eqhdl = lpfc_get_eq_hdl(index);
13294 			lpfc_irq_clear_aff(eqhdl);
13295 			free_irq(eqhdl->irq, eqhdl);
13296 		}
13297 	} else {
13298 		free_irq(phba->pcidev->irq, phba);
13299 	}
13300 
13301 	pci_free_irq_vectors(phba->pcidev);
13302 
13303 	/* Reset interrupt management states */
13304 	phba->intr_type = NONE;
13305 	phba->sli.slistat.sli_intr = 0;
13306 }
13307 
13308 /**
13309  * lpfc_unset_hba - Unset SLI3 hba device initialization
13310  * @phba: pointer to lpfc hba data structure.
13311  *
13312  * This routine is invoked to unset the HBA device initialization steps to
13313  * a device with SLI-3 interface spec.
13314  **/
13315 static void
lpfc_unset_hba(struct lpfc_hba * phba)13316 lpfc_unset_hba(struct lpfc_hba *phba)
13317 {
13318 	set_bit(FC_UNLOADING, &phba->pport->load_flag);
13319 
13320 	kfree(phba->vpi_bmask);
13321 	kfree(phba->vpi_ids);
13322 
13323 	lpfc_stop_hba_timers(phba);
13324 
13325 	phba->pport->work_port_events = 0;
13326 
13327 	lpfc_sli_hba_down(phba);
13328 
13329 	lpfc_sli_brdrestart(phba);
13330 
13331 	lpfc_sli_disable_intr(phba);
13332 
13333 	return;
13334 }
13335 
13336 /**
13337  * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
13338  * @phba: Pointer to HBA context object.
13339  *
13340  * This function is called in the SLI4 code path to wait for completion
13341  * of device's XRIs exchange busy. It will check the XRI exchange busy
13342  * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
13343  * that, it will check the XRI exchange busy on outstanding FCP and ELS
13344  * I/Os every 30 seconds, log error message, and wait forever. Only when
13345  * all XRI exchange busy complete, the driver unload shall proceed with
13346  * invoking the function reset ioctl mailbox command to the CNA and the
13347  * the rest of the driver unload resource release.
13348  **/
13349 static void
lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba * phba)13350 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
13351 {
13352 	struct lpfc_sli4_hdw_queue *qp;
13353 	int idx, ccnt;
13354 	int wait_time = 0;
13355 	int io_xri_cmpl = 1;
13356 	int nvmet_xri_cmpl = 1;
13357 	int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13358 
13359 	/* Driver just aborted IOs during the hba_unset process.  Pause
13360 	 * here to give the HBA time to complete the IO and get entries
13361 	 * into the abts lists.
13362 	 */
13363 	msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
13364 
13365 	/* Wait for NVME pending IO to flush back to transport. */
13366 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13367 		lpfc_nvme_wait_for_io_drain(phba);
13368 
13369 	ccnt = 0;
13370 	for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13371 		qp = &phba->sli4_hba.hdwq[idx];
13372 		io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
13373 		if (!io_xri_cmpl) /* if list is NOT empty */
13374 			ccnt++;
13375 	}
13376 	if (ccnt)
13377 		io_xri_cmpl = 0;
13378 
13379 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13380 		nvmet_xri_cmpl =
13381 			list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13382 	}
13383 
13384 	while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
13385 		if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
13386 			if (!nvmet_xri_cmpl)
13387 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13388 						"6424 NVMET XRI exchange busy "
13389 						"wait time: %d seconds.\n",
13390 						wait_time/1000);
13391 			if (!io_xri_cmpl)
13392 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13393 						"6100 IO XRI exchange busy "
13394 						"wait time: %d seconds.\n",
13395 						wait_time/1000);
13396 			if (!els_xri_cmpl)
13397 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13398 						"2878 ELS XRI exchange busy "
13399 						"wait time: %d seconds.\n",
13400 						wait_time/1000);
13401 			msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
13402 			wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
13403 		} else {
13404 			msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
13405 			wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
13406 		}
13407 
13408 		ccnt = 0;
13409 		for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13410 			qp = &phba->sli4_hba.hdwq[idx];
13411 			io_xri_cmpl = list_empty(
13412 			    &qp->lpfc_abts_io_buf_list);
13413 			if (!io_xri_cmpl) /* if list is NOT empty */
13414 				ccnt++;
13415 		}
13416 		if (ccnt)
13417 			io_xri_cmpl = 0;
13418 
13419 		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13420 			nvmet_xri_cmpl = list_empty(
13421 				&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13422 		}
13423 		els_xri_cmpl =
13424 			list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13425 
13426 	}
13427 }
13428 
13429 /**
13430  * lpfc_sli4_hba_unset - Unset the fcoe hba
13431  * @phba: Pointer to HBA context object.
13432  *
13433  * This function is called in the SLI4 code path to reset the HBA's FCoE
13434  * function. The caller is not required to hold any lock. This routine
13435  * issues PCI function reset mailbox command to reset the FCoE function.
13436  * At the end of the function, it calls lpfc_hba_down_post function to
13437  * free any pending commands.
13438  **/
13439 static void
lpfc_sli4_hba_unset(struct lpfc_hba * phba)13440 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
13441 {
13442 	int wait_cnt = 0;
13443 	LPFC_MBOXQ_t *mboxq;
13444 	struct pci_dev *pdev = phba->pcidev;
13445 
13446 	lpfc_stop_hba_timers(phba);
13447 	hrtimer_cancel(&phba->cmf_stats_timer);
13448 	hrtimer_cancel(&phba->cmf_timer);
13449 
13450 	if (phba->pport)
13451 		phba->sli4_hba.intr_enable = 0;
13452 
13453 	/*
13454 	 * Gracefully wait out the potential current outstanding asynchronous
13455 	 * mailbox command.
13456 	 */
13457 
13458 	/* First, block any pending async mailbox command from posted */
13459 	spin_lock_irq(&phba->hbalock);
13460 	phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13461 	spin_unlock_irq(&phba->hbalock);
13462 	/* Now, trying to wait it out if we can */
13463 	while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13464 		msleep(10);
13465 		if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
13466 			break;
13467 	}
13468 	/* Forcefully release the outstanding mailbox command if timed out */
13469 	if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13470 		spin_lock_irq(&phba->hbalock);
13471 		mboxq = phba->sli.mbox_active;
13472 		mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
13473 		__lpfc_mbox_cmpl_put(phba, mboxq);
13474 		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13475 		phba->sli.mbox_active = NULL;
13476 		spin_unlock_irq(&phba->hbalock);
13477 	}
13478 
13479 	/* Abort all iocbs associated with the hba */
13480 	lpfc_sli_hba_iocb_abort(phba);
13481 
13482 	if (!pci_channel_offline(phba->pcidev))
13483 		/* Wait for completion of device XRI exchange busy */
13484 		lpfc_sli4_xri_exchange_busy_wait(phba);
13485 
13486 	/* per-phba callback de-registration for hotplug event */
13487 	if (phba->pport)
13488 		lpfc_cpuhp_remove(phba);
13489 
13490 	/* Disable PCI subsystem interrupt */
13491 	lpfc_sli4_disable_intr(phba);
13492 
13493 	/* Disable SR-IOV if enabled */
13494 	if (phba->cfg_sriov_nr_virtfn)
13495 		pci_disable_sriov(pdev);
13496 
13497 	/* Stop kthread signal shall trigger work_done one more time */
13498 	kthread_stop(phba->worker_thread);
13499 
13500 	/* Disable FW logging to host memory */
13501 	lpfc_ras_stop_fwlog(phba);
13502 
13503 	lpfc_sli4_queue_unset(phba);
13504 
13505 	/* Reset SLI4 HBA FCoE function */
13506 	lpfc_pci_function_reset(phba);
13507 
13508 	/* release all queue allocated resources. */
13509 	lpfc_sli4_queue_destroy(phba);
13510 
13511 	/* Free RAS DMA memory */
13512 	if (phba->ras_fwlog.ras_enabled)
13513 		lpfc_sli4_ras_dma_free(phba);
13514 
13515 	/* Stop the SLI4 device port */
13516 	if (phba->pport)
13517 		phba->pport->work_port_events = 0;
13518 }
13519 
13520 /*
13521  * The routine corresponds with the algorithm the HBA firmware
13522  * uses to validate the data integrity.
13523  */
13524 uint32_t
lpfc_cgn_calc_crc32(const void * data,size_t size)13525 lpfc_cgn_calc_crc32(const void *data, size_t size)
13526 {
13527 	return ~crc32c(~0, data, size);
13528 }
13529 
13530 void
lpfc_init_congestion_buf(struct lpfc_hba * phba)13531 lpfc_init_congestion_buf(struct lpfc_hba *phba)
13532 {
13533 	struct lpfc_cgn_info *cp;
13534 	uint16_t size;
13535 	uint32_t crc;
13536 
13537 	lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13538 			"6235 INIT Congestion Buffer %p\n", phba->cgn_i);
13539 
13540 	if (!phba->cgn_i)
13541 		return;
13542 	cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13543 
13544 	atomic_set(&phba->cgn_fabric_warn_cnt, 0);
13545 	atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
13546 	atomic_set(&phba->cgn_sync_alarm_cnt, 0);
13547 	atomic_set(&phba->cgn_sync_warn_cnt, 0);
13548 
13549 	atomic_set(&phba->cgn_driver_evt_cnt, 0);
13550 	atomic_set(&phba->cgn_latency_evt_cnt, 0);
13551 	atomic64_set(&phba->cgn_latency_evt, 0);
13552 	phba->cgn_evt_minute = 0;
13553 
13554 	memset(cp, 0xff, offsetof(struct lpfc_cgn_info, cgn_stat));
13555 	cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ);
13556 	cp->cgn_info_version = LPFC_CGN_INFO_V4;
13557 
13558 	/* cgn parameters */
13559 	cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
13560 	cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
13561 	cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
13562 	cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
13563 
13564 	lpfc_cgn_update_tstamp(phba, &cp->base_time);
13565 
13566 	/* Fill in default LUN qdepth */
13567 	if (phba->pport) {
13568 		size = (uint16_t)(phba->pport->cfg_lun_queue_depth);
13569 		cp->cgn_lunq = cpu_to_le16(size);
13570 	}
13571 
13572 	/* last used Index initialized to 0xff already */
13573 
13574 	cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13575 	cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13576 	crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ);
13577 	cp->cgn_info_crc = cpu_to_le32(crc);
13578 
13579 	phba->cgn_evt_timestamp = jiffies +
13580 		msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
13581 }
13582 
13583 void
lpfc_init_congestion_stat(struct lpfc_hba * phba)13584 lpfc_init_congestion_stat(struct lpfc_hba *phba)
13585 {
13586 	struct lpfc_cgn_info *cp;
13587 	uint32_t crc;
13588 
13589 	lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13590 			"6236 INIT Congestion Stat %p\n", phba->cgn_i);
13591 
13592 	if (!phba->cgn_i)
13593 		return;
13594 
13595 	cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13596 	memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat));
13597 
13598 	lpfc_cgn_update_tstamp(phba, &cp->stat_start);
13599 	crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ);
13600 	cp->cgn_info_crc = cpu_to_le32(crc);
13601 }
13602 
13603 /**
13604  * __lpfc_reg_congestion_buf - register congestion info buffer with HBA
13605  * @phba: Pointer to hba context object.
13606  * @reg: flag to determine register or unregister.
13607  */
13608 static int
__lpfc_reg_congestion_buf(struct lpfc_hba * phba,int reg)13609 __lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg)
13610 {
13611 	struct lpfc_mbx_reg_congestion_buf *reg_congestion_buf;
13612 	union  lpfc_sli4_cfg_shdr *shdr;
13613 	uint32_t shdr_status, shdr_add_status;
13614 	LPFC_MBOXQ_t *mboxq;
13615 	int length, rc;
13616 
13617 	if (!phba->cgn_i)
13618 		return -ENXIO;
13619 
13620 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13621 	if (!mboxq) {
13622 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13623 				"2641 REG_CONGESTION_BUF mbox allocation fail: "
13624 				"HBA state x%x reg %d\n",
13625 				phba->pport->port_state, reg);
13626 		return -ENOMEM;
13627 	}
13628 
13629 	length = (sizeof(struct lpfc_mbx_reg_congestion_buf) -
13630 		sizeof(struct lpfc_sli4_cfg_mhdr));
13631 	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13632 			 LPFC_MBOX_OPCODE_REG_CONGESTION_BUF, length,
13633 			 LPFC_SLI4_MBX_EMBED);
13634 	reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf;
13635 	bf_set(lpfc_mbx_reg_cgn_buf_type, reg_congestion_buf, 1);
13636 	if (reg > 0)
13637 		bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 1);
13638 	else
13639 		bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 0);
13640 	reg_congestion_buf->length = sizeof(struct lpfc_cgn_info);
13641 	reg_congestion_buf->addr_lo =
13642 		putPaddrLow(phba->cgn_i->phys);
13643 	reg_congestion_buf->addr_hi =
13644 		putPaddrHigh(phba->cgn_i->phys);
13645 
13646 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13647 	shdr = (union lpfc_sli4_cfg_shdr *)
13648 		&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
13649 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13650 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
13651 				 &shdr->response);
13652 	mempool_free(mboxq, phba->mbox_mem_pool);
13653 	if (shdr_status || shdr_add_status || rc) {
13654 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13655 				"2642 REG_CONGESTION_BUF mailbox "
13656 				"failed with status x%x add_status x%x,"
13657 				" mbx status x%x reg %d\n",
13658 				shdr_status, shdr_add_status, rc, reg);
13659 		return -ENXIO;
13660 	}
13661 	return 0;
13662 }
13663 
13664 int
lpfc_unreg_congestion_buf(struct lpfc_hba * phba)13665 lpfc_unreg_congestion_buf(struct lpfc_hba *phba)
13666 {
13667 	lpfc_cmf_stop(phba);
13668 	return __lpfc_reg_congestion_buf(phba, 0);
13669 }
13670 
13671 int
lpfc_reg_congestion_buf(struct lpfc_hba * phba)13672 lpfc_reg_congestion_buf(struct lpfc_hba *phba)
13673 {
13674 	return __lpfc_reg_congestion_buf(phba, 1);
13675 }
13676 
13677 /**
13678  * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
13679  * @phba: Pointer to HBA context object.
13680  * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
13681  *
13682  * This function is called in the SLI4 code path to read the port's
13683  * sli4 capabilities.
13684  *
13685  * This function may be be called from any context that can block-wait
13686  * for the completion.  The expectation is that this routine is called
13687  * typically from probe_one or from the online routine.
13688  **/
13689 int
lpfc_get_sli4_parameters(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq)13690 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
13691 {
13692 	int rc;
13693 	struct lpfc_mqe *mqe = &mboxq->u.mqe;
13694 	struct lpfc_pc_sli4_params *sli4_params;
13695 	uint32_t mbox_tmo;
13696 	int length;
13697 	bool exp_wqcq_pages = true;
13698 	struct lpfc_sli4_parameters *mbx_sli4_parameters;
13699 
13700 	/*
13701 	 * By default, the driver assumes the SLI4 port requires RPI
13702 	 * header postings.  The SLI4_PARAM response will correct this
13703 	 * assumption.
13704 	 */
13705 	phba->sli4_hba.rpi_hdrs_in_use = 1;
13706 
13707 	/* Read the port's SLI4 Config Parameters */
13708 	length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
13709 		  sizeof(struct lpfc_sli4_cfg_mhdr));
13710 	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13711 			 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
13712 			 length, LPFC_SLI4_MBX_EMBED);
13713 	if (!phba->sli4_hba.intr_enable)
13714 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13715 	else {
13716 		mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
13717 		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
13718 	}
13719 	if (unlikely(rc))
13720 		return rc;
13721 	sli4_params = &phba->sli4_hba.pc_sli4_params;
13722 	mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
13723 	sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
13724 	sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
13725 	sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
13726 	sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
13727 					     mbx_sli4_parameters);
13728 	sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
13729 					     mbx_sli4_parameters);
13730 	if (bf_get(cfg_phwq, mbx_sli4_parameters))
13731 		phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
13732 	else
13733 		phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
13734 	sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
13735 	sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
13736 					   mbx_sli4_parameters);
13737 	sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
13738 	sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
13739 	sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
13740 	sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
13741 	sli4_params->rqv =
13742 		(sli4_params->if_type < LPFC_SLI_INTF_IF_TYPE_2) ?
13743 			LPFC_Q_CREATE_VERSION_0 : LPFC_Q_CREATE_VERSION_1;
13744 	sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
13745 	sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
13746 	sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
13747 	sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
13748 	sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
13749 	sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
13750 					    mbx_sli4_parameters);
13751 	sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
13752 	sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
13753 					   mbx_sli4_parameters);
13754 	phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
13755 	phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
13756 	sli4_params->mi_cap = bf_get(cfg_mi_ver, mbx_sli4_parameters);
13757 
13758 	/* Check for Extended Pre-Registered SGL support */
13759 	phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
13760 
13761 	/* Check for firmware nvme support */
13762 	rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
13763 		     bf_get(cfg_xib, mbx_sli4_parameters));
13764 
13765 	if (rc) {
13766 		/* Save this to indicate the Firmware supports NVME */
13767 		sli4_params->nvme = 1;
13768 
13769 		/* Firmware NVME support, check driver FC4 NVME support */
13770 		if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
13771 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13772 					"6133 Disabling NVME support: "
13773 					"FC4 type not supported: x%x\n",
13774 					phba->cfg_enable_fc4_type);
13775 			goto fcponly;
13776 		}
13777 	} else {
13778 		/* No firmware NVME support, check driver FC4 NVME support */
13779 		sli4_params->nvme = 0;
13780 		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13781 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
13782 					"6101 Disabling NVME support: Not "
13783 					"supported by firmware (%d %d) x%x\n",
13784 					bf_get(cfg_nvme, mbx_sli4_parameters),
13785 					bf_get(cfg_xib, mbx_sli4_parameters),
13786 					phba->cfg_enable_fc4_type);
13787 fcponly:
13788 			phba->nvmet_support = 0;
13789 			phba->cfg_nvmet_mrq = 0;
13790 			phba->cfg_nvme_seg_cnt = 0;
13791 
13792 			/* If no FC4 type support, move to just SCSI support */
13793 			if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
13794 				return -ENODEV;
13795 			phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
13796 		}
13797 	}
13798 
13799 	/* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
13800 	 * accommodate 512K and 1M IOs in a single nvme buf.
13801 	 */
13802 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13803 		phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
13804 
13805 	/*
13806 	 * To support Suppress Response feature we must satisfy 3 conditions.
13807 	 * lpfc_suppress_rsp module parameter must be set (default).
13808 	 * In SLI4-Parameters Descriptor:
13809 	 * Extended Inline Buffers (XIB) must be supported.
13810 	 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported
13811 	 * (double negative).
13812 	 */
13813 	if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
13814 	    !(bf_get(cfg_nosr, mbx_sli4_parameters)))
13815 		phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
13816 	else
13817 		phba->cfg_suppress_rsp = 0;
13818 
13819 	if (bf_get(cfg_eqdr, mbx_sli4_parameters))
13820 		phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
13821 
13822 	/* Make sure that sge_supp_len can be handled by the driver */
13823 	if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
13824 		sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
13825 
13826 	dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len);
13827 
13828 	/*
13829 	 * Check whether the adapter supports an embedded copy of the
13830 	 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order
13831 	 * to use this option, 128-byte WQEs must be used.
13832 	 */
13833 	if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
13834 		phba->fcp_embed_io = 1;
13835 	else
13836 		phba->fcp_embed_io = 0;
13837 
13838 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13839 			"6422 XIB %d: FCP %d NVME %d %d %d\n",
13840 			bf_get(cfg_xib, mbx_sli4_parameters),
13841 			phba->fcp_embed_io, sli4_params->nvme,
13842 			phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
13843 
13844 	if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
13845 	    LPFC_SLI_INTF_IF_TYPE_2) &&
13846 	    (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
13847 		 LPFC_SLI_INTF_FAMILY_LNCR_A0))
13848 		exp_wqcq_pages = false;
13849 
13850 	if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
13851 	    (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
13852 	    exp_wqcq_pages &&
13853 	    (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
13854 		phba->enab_exp_wqcq_pages = 1;
13855 	else
13856 		phba->enab_exp_wqcq_pages = 0;
13857 	/*
13858 	 * Check if the SLI port supports MDS Diagnostics
13859 	 */
13860 	if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
13861 		phba->mds_diags_support = 1;
13862 	else
13863 		phba->mds_diags_support = 0;
13864 
13865 	/*
13866 	 * Check if the SLI port supports NSLER
13867 	 */
13868 	if (bf_get(cfg_nsler, mbx_sli4_parameters))
13869 		phba->nsler = 1;
13870 	else
13871 		phba->nsler = 0;
13872 
13873 	return 0;
13874 }
13875 
13876 /**
13877  * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
13878  * @pdev: pointer to PCI device
13879  * @pid: pointer to PCI device identifier
13880  *
13881  * This routine is to be called to attach a device with SLI-3 interface spec
13882  * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
13883  * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
13884  * information of the device and driver to see if the driver state that it can
13885  * support this kind of device. If the match is successful, the driver core
13886  * invokes this routine. If this routine determines it can claim the HBA, it
13887  * does all the initialization that it needs to do to handle the HBA properly.
13888  *
13889  * Return code
13890  * 	0 - driver can claim the device
13891  * 	negative value - driver can not claim the device
13892  **/
13893 static int
lpfc_pci_probe_one_s3(struct pci_dev * pdev,const struct pci_device_id * pid)13894 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
13895 {
13896 	struct lpfc_hba   *phba;
13897 	struct lpfc_vport *vport = NULL;
13898 	struct Scsi_Host  *shost = NULL;
13899 	int error;
13900 	uint32_t cfg_mode, intr_mode;
13901 
13902 	/* Allocate memory for HBA structure */
13903 	phba = lpfc_hba_alloc(pdev);
13904 	if (!phba)
13905 		return -ENOMEM;
13906 
13907 	/* Perform generic PCI device enabling operation */
13908 	error = lpfc_enable_pci_dev(phba);
13909 	if (error)
13910 		goto out_free_phba;
13911 
13912 	/* Set up SLI API function jump table for PCI-device group-0 HBAs */
13913 	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
13914 	if (error)
13915 		goto out_disable_pci_dev;
13916 
13917 	/* Set up SLI-3 specific device PCI memory space */
13918 	error = lpfc_sli_pci_mem_setup(phba);
13919 	if (error) {
13920 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13921 				"1402 Failed to set up pci memory space.\n");
13922 		goto out_disable_pci_dev;
13923 	}
13924 
13925 	/* Set up SLI-3 specific device driver resources */
13926 	error = lpfc_sli_driver_resource_setup(phba);
13927 	if (error) {
13928 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13929 				"1404 Failed to set up driver resource.\n");
13930 		goto out_unset_pci_mem_s3;
13931 	}
13932 
13933 	/* Initialize and populate the iocb list per host */
13934 
13935 	error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
13936 	if (error) {
13937 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13938 				"1405 Failed to initialize iocb list.\n");
13939 		goto out_unset_driver_resource_s3;
13940 	}
13941 
13942 	/* Set up common device driver resources */
13943 	error = lpfc_setup_driver_resource_phase2(phba);
13944 	if (error) {
13945 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13946 				"1406 Failed to set up driver resource.\n");
13947 		goto out_free_iocb_list;
13948 	}
13949 
13950 	/* Get the default values for Model Name and Description */
13951 	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
13952 
13953 	/* Create SCSI host to the physical port */
13954 	error = lpfc_create_shost(phba);
13955 	if (error) {
13956 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13957 				"1407 Failed to create scsi host.\n");
13958 		goto out_unset_driver_resource;
13959 	}
13960 
13961 	/* Configure sysfs attributes */
13962 	vport = phba->pport;
13963 	error = lpfc_alloc_sysfs_attr(vport);
13964 	if (error) {
13965 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13966 				"1476 Failed to allocate sysfs attr\n");
13967 		goto out_destroy_shost;
13968 	}
13969 
13970 	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
13971 	/* Now, trying to enable interrupt and bring up the device */
13972 	cfg_mode = phba->cfg_use_msi;
13973 	while (true) {
13974 		/* Put device to a known state before enabling interrupt */
13975 		lpfc_stop_port(phba);
13976 		/* Configure and enable interrupt */
13977 		intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
13978 		if (intr_mode == LPFC_INTR_ERROR) {
13979 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13980 					"0431 Failed to enable interrupt.\n");
13981 			error = -ENODEV;
13982 			goto out_free_sysfs_attr;
13983 		}
13984 		/* SLI-3 HBA setup */
13985 		if (lpfc_sli_hba_setup(phba)) {
13986 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13987 					"1477 Failed to set up hba\n");
13988 			error = -ENODEV;
13989 			goto out_remove_device;
13990 		}
13991 
13992 		/* Wait 50ms for the interrupts of previous mailbox commands */
13993 		msleep(50);
13994 		/* Check active interrupts on message signaled interrupts */
13995 		if (intr_mode == 0 ||
13996 		    phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
13997 			/* Log the current active interrupt mode */
13998 			phba->intr_mode = intr_mode;
13999 			lpfc_log_intr_mode(phba, intr_mode);
14000 			break;
14001 		} else {
14002 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14003 					"0447 Configure interrupt mode (%d) "
14004 					"failed active interrupt test.\n",
14005 					intr_mode);
14006 			/* Disable the current interrupt mode */
14007 			lpfc_sli_disable_intr(phba);
14008 			/* Try next level of interrupt mode */
14009 			cfg_mode = --intr_mode;
14010 		}
14011 	}
14012 
14013 	/* Perform post initialization setup */
14014 	lpfc_post_init_setup(phba);
14015 
14016 	/* Check if there are static vports to be created. */
14017 	lpfc_create_static_vport(phba);
14018 
14019 	return 0;
14020 
14021 out_remove_device:
14022 	lpfc_unset_hba(phba);
14023 out_free_sysfs_attr:
14024 	lpfc_free_sysfs_attr(vport);
14025 out_destroy_shost:
14026 	lpfc_destroy_shost(phba);
14027 out_unset_driver_resource:
14028 	lpfc_unset_driver_resource_phase2(phba);
14029 out_free_iocb_list:
14030 	lpfc_free_iocb_list(phba);
14031 out_unset_driver_resource_s3:
14032 	lpfc_sli_driver_resource_unset(phba);
14033 out_unset_pci_mem_s3:
14034 	lpfc_sli_pci_mem_unset(phba);
14035 out_disable_pci_dev:
14036 	lpfc_disable_pci_dev(phba);
14037 	if (shost)
14038 		scsi_host_put(shost);
14039 out_free_phba:
14040 	lpfc_hba_free(phba);
14041 	return error;
14042 }
14043 
14044 /**
14045  * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
14046  * @pdev: pointer to PCI device
14047  *
14048  * This routine is to be called to disattach a device with SLI-3 interface
14049  * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
14050  * removed from PCI bus, it performs all the necessary cleanup for the HBA
14051  * device to be removed from the PCI subsystem properly.
14052  **/
14053 static void
lpfc_pci_remove_one_s3(struct pci_dev * pdev)14054 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
14055 {
14056 	struct Scsi_Host  *shost = pci_get_drvdata(pdev);
14057 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
14058 	struct lpfc_vport **vports;
14059 	struct lpfc_hba   *phba = vport->phba;
14060 	int i;
14061 
14062 	set_bit(FC_UNLOADING, &vport->load_flag);
14063 
14064 	lpfc_free_sysfs_attr(vport);
14065 
14066 	/* Release all the vports against this physical port */
14067 	vports = lpfc_create_vport_work_array(phba);
14068 	if (vports != NULL)
14069 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
14070 			if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
14071 				continue;
14072 			fc_vport_terminate(vports[i]->fc_vport);
14073 		}
14074 	lpfc_destroy_vport_work_array(phba, vports);
14075 
14076 	/* Remove FC host with the physical port */
14077 	fc_remove_host(shost);
14078 	scsi_remove_host(shost);
14079 
14080 	/* Clean up all nodes, mailboxes and IOs. */
14081 	lpfc_cleanup(vport);
14082 
14083 	/*
14084 	 * Bring down the SLI Layer. This step disable all interrupts,
14085 	 * clears the rings, discards all mailbox commands, and resets
14086 	 * the HBA.
14087 	 */
14088 
14089 	/* HBA interrupt will be disabled after this call */
14090 	lpfc_sli_hba_down(phba);
14091 	/* Stop kthread signal shall trigger work_done one more time */
14092 	kthread_stop(phba->worker_thread);
14093 	/* Final cleanup of txcmplq and reset the HBA */
14094 	lpfc_sli_brdrestart(phba);
14095 
14096 	kfree(phba->vpi_bmask);
14097 	kfree(phba->vpi_ids);
14098 
14099 	lpfc_stop_hba_timers(phba);
14100 	spin_lock_irq(&phba->port_list_lock);
14101 	list_del_init(&vport->listentry);
14102 	spin_unlock_irq(&phba->port_list_lock);
14103 
14104 	lpfc_debugfs_terminate(vport);
14105 
14106 	/* Disable SR-IOV if enabled */
14107 	if (phba->cfg_sriov_nr_virtfn)
14108 		pci_disable_sriov(pdev);
14109 
14110 	/* Disable interrupt */
14111 	lpfc_sli_disable_intr(phba);
14112 
14113 	scsi_host_put(shost);
14114 
14115 	/*
14116 	 * Call scsi_free before mem_free since scsi bufs are released to their
14117 	 * corresponding pools here.
14118 	 */
14119 	lpfc_scsi_free(phba);
14120 	lpfc_free_iocb_list(phba);
14121 
14122 	lpfc_mem_free_all(phba);
14123 
14124 	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
14125 			  phba->hbqslimp.virt, phba->hbqslimp.phys);
14126 
14127 	/* Free resources associated with SLI2 interface */
14128 	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
14129 			  phba->slim2p.virt, phba->slim2p.phys);
14130 
14131 	/* unmap adapter SLIM and Control Registers */
14132 	iounmap(phba->ctrl_regs_memmap_p);
14133 	iounmap(phba->slim_memmap_p);
14134 
14135 	lpfc_hba_free(phba);
14136 
14137 	pci_release_mem_regions(pdev);
14138 	pci_disable_device(pdev);
14139 }
14140 
14141 /**
14142  * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
14143  * @dev_d: pointer to device
14144  *
14145  * This routine is to be called from the kernel's PCI subsystem to support
14146  * system Power Management (PM) to device with SLI-3 interface spec. When
14147  * PM invokes this method, it quiesces the device by stopping the driver's
14148  * worker thread for the device, turning off device's interrupt and DMA,
14149  * and bring the device offline. Note that as the driver implements the
14150  * minimum PM requirements to a power-aware driver's PM support for the
14151  * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
14152  * to the suspend() method call will be treated as SUSPEND and the driver will
14153  * fully reinitialize its device during resume() method call, the driver will
14154  * set device to PCI_D3hot state in PCI config space instead of setting it
14155  * according to the @msg provided by the PM.
14156  *
14157  * Return code
14158  * 	0 - driver suspended the device
14159  * 	Error otherwise
14160  **/
14161 static int __maybe_unused
lpfc_pci_suspend_one_s3(struct device * dev_d)14162 lpfc_pci_suspend_one_s3(struct device *dev_d)
14163 {
14164 	struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14165 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14166 
14167 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14168 			"0473 PCI device Power Management suspend.\n");
14169 
14170 	/* Bring down the device */
14171 	lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14172 	lpfc_offline(phba);
14173 	kthread_stop(phba->worker_thread);
14174 
14175 	/* Disable interrupt from device */
14176 	lpfc_sli_disable_intr(phba);
14177 
14178 	return 0;
14179 }
14180 
14181 /**
14182  * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
14183  * @dev_d: pointer to device
14184  *
14185  * This routine is to be called from the kernel's PCI subsystem to support
14186  * system Power Management (PM) to device with SLI-3 interface spec. When PM
14187  * invokes this method, it restores the device's PCI config space state and
14188  * fully reinitializes the device and brings it online. Note that as the
14189  * driver implements the minimum PM requirements to a power-aware driver's
14190  * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
14191  * FREEZE) to the suspend() method call will be treated as SUSPEND and the
14192  * driver will fully reinitialize its device during resume() method call,
14193  * the device will be set to PCI_D0 directly in PCI config space before
14194  * restoring the state.
14195  *
14196  * Return code
14197  * 	0 - driver suspended the device
14198  * 	Error otherwise
14199  **/
14200 static int __maybe_unused
lpfc_pci_resume_one_s3(struct device * dev_d)14201 lpfc_pci_resume_one_s3(struct device *dev_d)
14202 {
14203 	struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14204 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14205 	uint32_t intr_mode;
14206 	int error;
14207 
14208 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14209 			"0452 PCI device Power Management resume.\n");
14210 
14211 	/* Startup the kernel thread for this host adapter. */
14212 	phba->worker_thread = kthread_run(lpfc_do_work, phba,
14213 					"lpfc_worker_%d", phba->brd_no);
14214 	if (IS_ERR(phba->worker_thread)) {
14215 		error = PTR_ERR(phba->worker_thread);
14216 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14217 				"0434 PM resume failed to start worker "
14218 				"thread: error=x%x.\n", error);
14219 		return error;
14220 	}
14221 
14222 	/* Init cpu_map array */
14223 	lpfc_cpu_map_array_init(phba);
14224 	/* Init hba_eq_hdl array */
14225 	lpfc_hba_eq_hdl_array_init(phba);
14226 	/* Configure and enable interrupt */
14227 	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14228 	if (intr_mode == LPFC_INTR_ERROR) {
14229 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14230 				"0430 PM resume Failed to enable interrupt\n");
14231 		return -EIO;
14232 	} else
14233 		phba->intr_mode = intr_mode;
14234 
14235 	/* Restart HBA and bring it online */
14236 	lpfc_sli_brdrestart(phba);
14237 	lpfc_online(phba);
14238 
14239 	/* Log the current active interrupt mode */
14240 	lpfc_log_intr_mode(phba, phba->intr_mode);
14241 
14242 	return 0;
14243 }
14244 
14245 /**
14246  * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
14247  * @phba: pointer to lpfc hba data structure.
14248  *
14249  * This routine is called to prepare the SLI3 device for PCI slot recover. It
14250  * aborts all the outstanding SCSI I/Os to the pci device.
14251  **/
14252 static void
lpfc_sli_prep_dev_for_recover(struct lpfc_hba * phba)14253 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
14254 {
14255 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14256 			"2723 PCI channel I/O abort preparing for recovery\n");
14257 
14258 	/*
14259 	 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
14260 	 * and let the SCSI mid-layer to retry them to recover.
14261 	 */
14262 	lpfc_sli_abort_fcp_rings(phba);
14263 }
14264 
14265 /**
14266  * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
14267  * @phba: pointer to lpfc hba data structure.
14268  *
14269  * This routine is called to prepare the SLI3 device for PCI slot reset. It
14270  * disables the device interrupt and pci device, and aborts the internal FCP
14271  * pending I/Os.
14272  **/
14273 static void
lpfc_sli_prep_dev_for_reset(struct lpfc_hba * phba)14274 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
14275 {
14276 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14277 			"2710 PCI channel disable preparing for reset\n");
14278 
14279 	/* Block any management I/Os to the device */
14280 	lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
14281 
14282 	/* Block all SCSI devices' I/Os on the host */
14283 	lpfc_scsi_dev_block(phba);
14284 
14285 	/* Flush all driver's outstanding SCSI I/Os as we are to reset */
14286 	lpfc_sli_flush_io_rings(phba);
14287 
14288 	/* stop all timers */
14289 	lpfc_stop_hba_timers(phba);
14290 
14291 	/* Disable interrupt and pci device */
14292 	lpfc_sli_disable_intr(phba);
14293 	pci_disable_device(phba->pcidev);
14294 }
14295 
14296 /**
14297  * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
14298  * @phba: pointer to lpfc hba data structure.
14299  *
14300  * This routine is called to prepare the SLI3 device for PCI slot permanently
14301  * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
14302  * pending I/Os.
14303  **/
14304 static void
lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba * phba)14305 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
14306 {
14307 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14308 			"2711 PCI channel permanent disable for failure\n");
14309 	/* Block all SCSI devices' I/Os on the host */
14310 	lpfc_scsi_dev_block(phba);
14311 	lpfc_sli4_prep_dev_for_reset(phba);
14312 
14313 	/* stop all timers */
14314 	lpfc_stop_hba_timers(phba);
14315 
14316 	/* Clean up all driver's outstanding SCSI I/Os */
14317 	lpfc_sli_flush_io_rings(phba);
14318 }
14319 
14320 /**
14321  * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
14322  * @pdev: pointer to PCI device.
14323  * @state: the current PCI connection state.
14324  *
14325  * This routine is called from the PCI subsystem for I/O error handling to
14326  * device with SLI-3 interface spec. This function is called by the PCI
14327  * subsystem after a PCI bus error affecting this device has been detected.
14328  * When this function is invoked, it will need to stop all the I/Os and
14329  * interrupt(s) to the device. Once that is done, it will return
14330  * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
14331  * as desired.
14332  *
14333  * Return codes
14334  *	PCI_ERS_RESULT_CAN_RECOVER - can be recovered without reset
14335  * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
14336  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14337  **/
14338 static pci_ers_result_t
lpfc_io_error_detected_s3(struct pci_dev * pdev,pci_channel_state_t state)14339 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
14340 {
14341 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
14342 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14343 
14344 	switch (state) {
14345 	case pci_channel_io_normal:
14346 		/* Non-fatal error, prepare for recovery */
14347 		lpfc_sli_prep_dev_for_recover(phba);
14348 		return PCI_ERS_RESULT_CAN_RECOVER;
14349 	case pci_channel_io_frozen:
14350 		/* Fatal error, prepare for slot reset */
14351 		lpfc_sli_prep_dev_for_reset(phba);
14352 		return PCI_ERS_RESULT_NEED_RESET;
14353 	case pci_channel_io_perm_failure:
14354 		/* Permanent failure, prepare for device down */
14355 		lpfc_sli_prep_dev_for_perm_failure(phba);
14356 		return PCI_ERS_RESULT_DISCONNECT;
14357 	default:
14358 		/* Unknown state, prepare and request slot reset */
14359 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14360 				"0472 Unknown PCI error state: x%x\n", state);
14361 		lpfc_sli_prep_dev_for_reset(phba);
14362 		return PCI_ERS_RESULT_NEED_RESET;
14363 	}
14364 }
14365 
14366 /**
14367  * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
14368  * @pdev: pointer to PCI device.
14369  *
14370  * This routine is called from the PCI subsystem for error handling to
14371  * device with SLI-3 interface spec. This is called after PCI bus has been
14372  * reset to restart the PCI card from scratch, as if from a cold-boot.
14373  * During the PCI subsystem error recovery, after driver returns
14374  * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
14375  * recovery and then call this routine before calling the .resume method
14376  * to recover the device. This function will initialize the HBA device,
14377  * enable the interrupt, but it will just put the HBA to offline state
14378  * without passing any I/O traffic.
14379  *
14380  * Return codes
14381  * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
14382  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14383  */
14384 static pci_ers_result_t
lpfc_io_slot_reset_s3(struct pci_dev * pdev)14385 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
14386 {
14387 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
14388 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14389 	struct lpfc_sli *psli = &phba->sli;
14390 	uint32_t intr_mode;
14391 
14392 	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
14393 	if (pci_enable_device_mem(pdev)) {
14394 		printk(KERN_ERR "lpfc: Cannot re-enable "
14395 			"PCI device after reset.\n");
14396 		return PCI_ERS_RESULT_DISCONNECT;
14397 	}
14398 
14399 	pci_restore_state(pdev);
14400 
14401 	if (pdev->is_busmaster)
14402 		pci_set_master(pdev);
14403 
14404 	spin_lock_irq(&phba->hbalock);
14405 	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
14406 	spin_unlock_irq(&phba->hbalock);
14407 
14408 	/* Configure and enable interrupt */
14409 	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14410 	if (intr_mode == LPFC_INTR_ERROR) {
14411 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14412 				"0427 Cannot re-enable interrupt after "
14413 				"slot reset.\n");
14414 		return PCI_ERS_RESULT_DISCONNECT;
14415 	} else
14416 		phba->intr_mode = intr_mode;
14417 
14418 	/* Take device offline, it will perform cleanup */
14419 	lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14420 	lpfc_offline(phba);
14421 	lpfc_sli_brdrestart(phba);
14422 
14423 	/* Log the current active interrupt mode */
14424 	lpfc_log_intr_mode(phba, phba->intr_mode);
14425 
14426 	return PCI_ERS_RESULT_RECOVERED;
14427 }
14428 
14429 /**
14430  * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
14431  * @pdev: pointer to PCI device
14432  *
14433  * This routine is called from the PCI subsystem for error handling to device
14434  * with SLI-3 interface spec. It is called when kernel error recovery tells
14435  * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
14436  * error recovery. After this call, traffic can start to flow from this device
14437  * again.
14438  */
14439 static void
lpfc_io_resume_s3(struct pci_dev * pdev)14440 lpfc_io_resume_s3(struct pci_dev *pdev)
14441 {
14442 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
14443 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14444 
14445 	/* Bring device online, it will be no-op for non-fatal error resume */
14446 	lpfc_online(phba);
14447 }
14448 
14449 /**
14450  * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
14451  * @phba: pointer to lpfc hba data structure.
14452  *
14453  * returns the number of ELS/CT IOCBs to reserve
14454  **/
14455 int
lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba * phba)14456 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
14457 {
14458 	int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
14459 
14460 	if (phba->sli_rev == LPFC_SLI_REV4) {
14461 		if (max_xri <= 100)
14462 			return 10;
14463 		else if (max_xri <= 256)
14464 			return 25;
14465 		else if (max_xri <= 512)
14466 			return 50;
14467 		else if (max_xri <= 1024)
14468 			return 100;
14469 		else if (max_xri <= 1536)
14470 			return 150;
14471 		else if (max_xri <= 2048)
14472 			return 200;
14473 		else
14474 			return 250;
14475 	} else
14476 		return 0;
14477 }
14478 
14479 /**
14480  * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
14481  * @phba: pointer to lpfc hba data structure.
14482  *
14483  * returns the number of ELS/CT + NVMET IOCBs to reserve
14484  **/
14485 int
lpfc_sli4_get_iocb_cnt(struct lpfc_hba * phba)14486 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
14487 {
14488 	int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
14489 
14490 	if (phba->nvmet_support)
14491 		max_xri += LPFC_NVMET_BUF_POST;
14492 	return max_xri;
14493 }
14494 
14495 
14496 static int
lpfc_log_write_firmware_error(struct lpfc_hba * phba,uint32_t offset,uint32_t magic_number,uint32_t ftype,uint32_t fid,uint32_t fsize,const struct firmware * fw)14497 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
14498 	uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
14499 	const struct firmware *fw)
14500 {
14501 	int rc;
14502 	u8 sli_family;
14503 
14504 	sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
14505 
14506 	/* Refer to ASIC_ID register case */
14507 	if (sli_family == LPFC_SLI_INTF_ASIC_ID)
14508 		sli_family = bf_get(lpfc_asic_id_gen_num,
14509 				    &phba->sli4_hba.asic_id);
14510 
14511 	/* Three cases:  (1) FW was not supported on the detected adapter.
14512 	 * (2) FW update has been locked out administratively.
14513 	 * (3) Some other error during FW update.
14514 	 * In each case, an unmaskable message is written to the console
14515 	 * for admin diagnosis.
14516 	 */
14517 	if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
14518 	    (sli_family == LPFC_SLI_INTF_FAMILY_G6 &&
14519 	     magic_number != MAGIC_NUMBER_G6) ||
14520 	    (sli_family == LPFC_SLI_INTF_FAMILY_G7 &&
14521 	     magic_number != MAGIC_NUMBER_G7) ||
14522 	    (sli_family == LPFC_SLI_INTF_FAMILY_G7P &&
14523 	     magic_number != MAGIC_NUMBER_G7P) ||
14524 	    (sli_family == LPFC_SLI_INTF_FAMILY_G8 &&
14525 	     magic_number != MAGIC_NUMBER_G8)) {
14526 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14527 				"3030 This firmware version is not supported on"
14528 				" this HBA model. Device:%x Magic:%x Type:%x "
14529 				"ID:%x Size %d %zd\n",
14530 				phba->pcidev->device, magic_number, ftype, fid,
14531 				fsize, fw->size);
14532 		rc = -EINVAL;
14533 	} else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
14534 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14535 				"3021 Firmware downloads have been prohibited "
14536 				"by a system configuration setting on "
14537 				"Device:%x Magic:%x Type:%x ID:%x Size %d "
14538 				"%zd\n",
14539 				phba->pcidev->device, magic_number, ftype, fid,
14540 				fsize, fw->size);
14541 		rc = -EACCES;
14542 	} else {
14543 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14544 				"3022 FW Download failed. Add Status x%x "
14545 				"Device:%x Magic:%x Type:%x ID:%x Size %d "
14546 				"%zd\n",
14547 				offset, phba->pcidev->device, magic_number,
14548 				ftype, fid, fsize, fw->size);
14549 		rc = -EIO;
14550 	}
14551 	return rc;
14552 }
14553 
14554 /**
14555  * lpfc_write_firmware - attempt to write a firmware image to the port
14556  * @fw: pointer to firmware image returned from request_firmware.
14557  * @context: pointer to firmware image returned from request_firmware.
14558  *
14559  **/
14560 static void
lpfc_write_firmware(const struct firmware * fw,void * context)14561 lpfc_write_firmware(const struct firmware *fw, void *context)
14562 {
14563 	struct lpfc_hba *phba = (struct lpfc_hba *)context;
14564 	char fwrev[FW_REV_STR_SIZE];
14565 	struct lpfc_grp_hdr *image;
14566 	struct list_head dma_buffer_list;
14567 	int i, rc = 0;
14568 	struct lpfc_dmabuf *dmabuf, *next;
14569 	uint32_t offset = 0, temp_offset = 0;
14570 	uint32_t magic_number, ftype, fid, fsize;
14571 
14572 	/* It can be null in no-wait mode, sanity check */
14573 	if (!fw) {
14574 		rc = -ENXIO;
14575 		goto out;
14576 	}
14577 	image = (struct lpfc_grp_hdr *)fw->data;
14578 
14579 	magic_number = be32_to_cpu(image->magic_number);
14580 	ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
14581 	fid = bf_get_be32(lpfc_grp_hdr_id, image);
14582 	fsize = be32_to_cpu(image->size);
14583 
14584 	INIT_LIST_HEAD(&dma_buffer_list);
14585 	lpfc_decode_firmware_rev(phba, fwrev, 1);
14586 	if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
14587 		lpfc_log_msg(phba, KERN_NOTICE, LOG_INIT | LOG_SLI,
14588 			     "3023 Updating Firmware, Current Version:%s "
14589 			     "New Version:%s\n",
14590 			     fwrev, image->revision);
14591 		for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
14592 			dmabuf = kzalloc_obj(struct lpfc_dmabuf);
14593 			if (!dmabuf) {
14594 				rc = -ENOMEM;
14595 				goto release_out;
14596 			}
14597 			dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14598 							  SLI4_PAGE_SIZE,
14599 							  &dmabuf->phys,
14600 							  GFP_KERNEL);
14601 			if (!dmabuf->virt) {
14602 				kfree(dmabuf);
14603 				rc = -ENOMEM;
14604 				goto release_out;
14605 			}
14606 			list_add_tail(&dmabuf->list, &dma_buffer_list);
14607 		}
14608 		while (offset < fw->size) {
14609 			temp_offset = offset;
14610 			list_for_each_entry(dmabuf, &dma_buffer_list, list) {
14611 				if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
14612 					memcpy(dmabuf->virt,
14613 					       fw->data + temp_offset,
14614 					       fw->size - temp_offset);
14615 					temp_offset = fw->size;
14616 					break;
14617 				}
14618 				memcpy(dmabuf->virt, fw->data + temp_offset,
14619 				       SLI4_PAGE_SIZE);
14620 				temp_offset += SLI4_PAGE_SIZE;
14621 			}
14622 			rc = lpfc_wr_object(phba, &dma_buffer_list,
14623 				    (fw->size - offset), &offset);
14624 			if (rc) {
14625 				rc = lpfc_log_write_firmware_error(phba, offset,
14626 								   magic_number,
14627 								   ftype,
14628 								   fid,
14629 								   fsize,
14630 								   fw);
14631 				goto release_out;
14632 			}
14633 		}
14634 		rc = offset;
14635 	} else
14636 		lpfc_log_msg(phba, KERN_NOTICE, LOG_INIT | LOG_SLI,
14637 			     "3029 Skipped Firmware update, Current "
14638 			     "Version:%s New Version:%s\n",
14639 			     fwrev, image->revision);
14640 
14641 release_out:
14642 	list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
14643 		list_del(&dmabuf->list);
14644 		dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
14645 				  dmabuf->virt, dmabuf->phys);
14646 		kfree(dmabuf);
14647 	}
14648 	release_firmware(fw);
14649 out:
14650 	if (rc < 0)
14651 		lpfc_log_msg(phba, KERN_ERR, LOG_INIT | LOG_SLI,
14652 			     "3062 Firmware update error, status %d.\n", rc);
14653 	else
14654 		lpfc_log_msg(phba, KERN_NOTICE, LOG_INIT | LOG_SLI,
14655 			     "3024 Firmware update success: size %d.\n", rc);
14656 }
14657 
14658 /**
14659  * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
14660  * @phba: pointer to lpfc hba data structure.
14661  * @fw_upgrade: which firmware to update.
14662  *
14663  * This routine is called to perform Linux generic firmware upgrade on device
14664  * that supports such feature.
14665  **/
14666 int
lpfc_sli4_request_firmware_update(struct lpfc_hba * phba,uint8_t fw_upgrade)14667 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
14668 {
14669 	char file_name[ELX_FW_NAME_SIZE] = {0};
14670 	int ret;
14671 	const struct firmware *fw;
14672 
14673 	/* Only supported on SLI4 interface type 2 for now */
14674 	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
14675 	    LPFC_SLI_INTF_IF_TYPE_2)
14676 		return -EPERM;
14677 
14678 	scnprintf(file_name, sizeof(file_name), "%s.grp", phba->ModelName);
14679 
14680 	if (fw_upgrade == INT_FW_UPGRADE) {
14681 		ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
14682 					file_name, &phba->pcidev->dev,
14683 					GFP_KERNEL, (void *)phba,
14684 					lpfc_write_firmware);
14685 	} else if (fw_upgrade == RUN_FW_UPGRADE) {
14686 		ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
14687 		if (!ret)
14688 			lpfc_write_firmware(fw, (void *)phba);
14689 	} else {
14690 		ret = -EINVAL;
14691 	}
14692 
14693 	return ret;
14694 }
14695 
14696 /**
14697  * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
14698  * @pdev: pointer to PCI device
14699  * @pid: pointer to PCI device identifier
14700  *
14701  * This routine is called from the kernel's PCI subsystem to device with
14702  * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14703  * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
14704  * information of the device and driver to see if the driver state that it
14705  * can support this kind of device. If the match is successful, the driver
14706  * core invokes this routine. If this routine determines it can claim the HBA,
14707  * it does all the initialization that it needs to do to handle the HBA
14708  * properly.
14709  *
14710  * Return code
14711  * 	0 - driver can claim the device
14712  * 	negative value - driver can not claim the device
14713  **/
14714 static int
lpfc_pci_probe_one_s4(struct pci_dev * pdev,const struct pci_device_id * pid)14715 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
14716 {
14717 	struct lpfc_hba   *phba;
14718 	struct lpfc_vport *vport = NULL;
14719 	struct Scsi_Host  *shost = NULL;
14720 	int error;
14721 	uint32_t cfg_mode, intr_mode;
14722 
14723 	/* Allocate memory for HBA structure */
14724 	phba = lpfc_hba_alloc(pdev);
14725 	if (!phba)
14726 		return -ENOMEM;
14727 
14728 	INIT_LIST_HEAD(&phba->poll_list);
14729 
14730 	/* Perform generic PCI device enabling operation */
14731 	error = lpfc_enable_pci_dev(phba);
14732 	if (error)
14733 		goto out_free_phba;
14734 
14735 	/* Set up SLI API function jump table for PCI-device group-1 HBAs */
14736 	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
14737 	if (error)
14738 		goto out_disable_pci_dev;
14739 
14740 	/* Set up SLI-4 specific device PCI memory space */
14741 	error = lpfc_sli4_pci_mem_setup(phba);
14742 	if (error) {
14743 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14744 				"1410 Failed to set up pci memory space.\n");
14745 		goto out_disable_pci_dev;
14746 	}
14747 
14748 	/* Set up SLI-4 Specific device driver resources */
14749 	error = lpfc_sli4_driver_resource_setup(phba);
14750 	if (error) {
14751 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14752 				"1412 Failed to set up driver resource.\n");
14753 		goto out_unset_pci_mem_s4;
14754 	}
14755 
14756 	spin_lock_init(&phba->rrq_list_lock);
14757 	INIT_LIST_HEAD(&phba->active_rrq_list);
14758 	INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
14759 
14760 	/* Set up common device driver resources */
14761 	error = lpfc_setup_driver_resource_phase2(phba);
14762 	if (error) {
14763 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14764 				"1414 Failed to set up driver resource.\n");
14765 		goto out_unset_driver_resource_s4;
14766 	}
14767 
14768 	/* Get the default values for Model Name and Description */
14769 	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
14770 
14771 	/* Now, trying to enable interrupt and bring up the device */
14772 	cfg_mode = phba->cfg_use_msi;
14773 
14774 	/* Put device to a known state before enabling interrupt */
14775 	phba->pport = NULL;
14776 	lpfc_stop_port(phba);
14777 
14778 	/* Init cpu_map array */
14779 	lpfc_cpu_map_array_init(phba);
14780 
14781 	/* Init hba_eq_hdl array */
14782 	lpfc_hba_eq_hdl_array_init(phba);
14783 
14784 	/* Configure and enable interrupt */
14785 	intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
14786 	if (intr_mode == LPFC_INTR_ERROR) {
14787 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14788 				"0426 Failed to enable interrupt.\n");
14789 		error = -ENODEV;
14790 		goto out_unset_driver_resource;
14791 	}
14792 	/* Default to single EQ for non-MSI-X */
14793 	if (phba->intr_type != MSIX) {
14794 		phba->cfg_irq_chann = 1;
14795 		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14796 			if (phba->nvmet_support)
14797 				phba->cfg_nvmet_mrq = 1;
14798 		}
14799 	}
14800 	lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
14801 
14802 	/* Create SCSI host to the physical port */
14803 	error = lpfc_create_shost(phba);
14804 	if (error) {
14805 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14806 				"1415 Failed to create scsi host.\n");
14807 		goto out_disable_intr;
14808 	}
14809 	vport = phba->pport;
14810 	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
14811 
14812 	/* Configure sysfs attributes */
14813 	error = lpfc_alloc_sysfs_attr(vport);
14814 	if (error) {
14815 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14816 				"1416 Failed to allocate sysfs attr\n");
14817 		goto out_destroy_shost;
14818 	}
14819 
14820 	/* Set up SLI-4 HBA */
14821 	if (lpfc_sli4_hba_setup(phba)) {
14822 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14823 				"1421 Failed to set up hba\n");
14824 		error = -ENODEV;
14825 		goto out_free_sysfs_attr;
14826 	}
14827 
14828 	/* Log the current active interrupt mode */
14829 	phba->intr_mode = intr_mode;
14830 	lpfc_log_intr_mode(phba, intr_mode);
14831 
14832 	/* Perform post initialization setup */
14833 	lpfc_post_init_setup(phba);
14834 
14835 	/* NVME support in FW earlier in the driver load corrects the
14836 	 * FC4 type making a check for nvme_support unnecessary.
14837 	 */
14838 	if (phba->nvmet_support == 0) {
14839 		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14840 			/* Create NVME binding with nvme_fc_transport. This
14841 			 * ensures the vport is initialized.  If the localport
14842 			 * create fails, it should not unload the driver to
14843 			 * support field issues.
14844 			 */
14845 			error = lpfc_nvme_create_localport(vport);
14846 			if (error) {
14847 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14848 						"6004 NVME registration "
14849 						"failed, error x%x\n",
14850 						error);
14851 			}
14852 		}
14853 	}
14854 
14855 	/* check for firmware upgrade or downgrade */
14856 	if (phba->cfg_request_firmware_upgrade)
14857 		lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
14858 
14859 	/* Check if there are static vports to be created. */
14860 	lpfc_create_static_vport(phba);
14861 
14862 	timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
14863 	cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
14864 
14865 	return 0;
14866 
14867 out_free_sysfs_attr:
14868 	lpfc_free_sysfs_attr(vport);
14869 out_destroy_shost:
14870 	lpfc_destroy_shost(phba);
14871 out_disable_intr:
14872 	lpfc_sli4_disable_intr(phba);
14873 out_unset_driver_resource:
14874 	lpfc_unset_driver_resource_phase2(phba);
14875 out_unset_driver_resource_s4:
14876 	lpfc_sli4_driver_resource_unset(phba);
14877 out_unset_pci_mem_s4:
14878 	lpfc_sli4_pci_mem_unset(phba);
14879 out_disable_pci_dev:
14880 	lpfc_disable_pci_dev(phba);
14881 	if (shost)
14882 		scsi_host_put(shost);
14883 out_free_phba:
14884 	lpfc_hba_free(phba);
14885 	return error;
14886 }
14887 
14888 /**
14889  * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
14890  * @pdev: pointer to PCI device
14891  *
14892  * This routine is called from the kernel's PCI subsystem to device with
14893  * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14894  * removed from PCI bus, it performs all the necessary cleanup for the HBA
14895  * device to be removed from the PCI subsystem properly.
14896  **/
14897 static void
lpfc_pci_remove_one_s4(struct pci_dev * pdev)14898 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
14899 {
14900 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
14901 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
14902 	struct lpfc_vport **vports;
14903 	struct lpfc_hba *phba = vport->phba;
14904 	int i;
14905 
14906 	/* Mark the device unloading flag */
14907 	set_bit(FC_UNLOADING, &vport->load_flag);
14908 	if (phba->cgn_i)
14909 		lpfc_unreg_congestion_buf(phba);
14910 
14911 	lpfc_free_sysfs_attr(vport);
14912 
14913 	/* Release all the vports against this physical port */
14914 	vports = lpfc_create_vport_work_array(phba);
14915 	if (vports != NULL)
14916 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
14917 			if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
14918 				continue;
14919 			fc_vport_terminate(vports[i]->fc_vport);
14920 		}
14921 	lpfc_destroy_vport_work_array(phba, vports);
14922 
14923 	/* Remove FC host with the physical port */
14924 	fc_remove_host(shost);
14925 	scsi_remove_host(shost);
14926 
14927 	/* Perform ndlp cleanup on the physical port.  The nvme and nvmet
14928 	 * localports are destroyed after to cleanup all transport memory.
14929 	 */
14930 	lpfc_cleanup(vport);
14931 	lpfc_nvmet_destroy_targetport(phba);
14932 	lpfc_nvme_destroy_localport(vport);
14933 
14934 	/* De-allocate multi-XRI pools */
14935 	if (phba->cfg_xri_rebalancing)
14936 		lpfc_destroy_multixri_pools(phba);
14937 
14938 	/*
14939 	 * Bring down the SLI Layer. This step disables all interrupts,
14940 	 * clears the rings, discards all mailbox commands, and resets
14941 	 * the HBA FCoE function.
14942 	 */
14943 	lpfc_debugfs_terminate(vport);
14944 
14945 	lpfc_stop_hba_timers(phba);
14946 	spin_lock_irq(&phba->port_list_lock);
14947 	list_del_init(&vport->listentry);
14948 	spin_unlock_irq(&phba->port_list_lock);
14949 
14950 	/* Perform scsi free before driver resource_unset since scsi
14951 	 * buffers are released to their corresponding pools here.
14952 	 */
14953 	lpfc_io_free(phba);
14954 	lpfc_free_iocb_list(phba);
14955 	lpfc_sli4_hba_unset(phba);
14956 
14957 	lpfc_unset_driver_resource_phase2(phba);
14958 	lpfc_sli4_driver_resource_unset(phba);
14959 
14960 	/* Unmap adapter Control and Doorbell registers */
14961 	lpfc_sli4_pci_mem_unset(phba);
14962 
14963 	/* Release PCI resources and disable device's PCI function */
14964 	scsi_host_put(shost);
14965 	lpfc_disable_pci_dev(phba);
14966 
14967 	/* Finally, free the driver's device data structure */
14968 	lpfc_hba_free(phba);
14969 
14970 	return;
14971 }
14972 
14973 /**
14974  * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
14975  * @dev_d: pointer to device
14976  *
14977  * This routine is called from the kernel's PCI subsystem to support system
14978  * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
14979  * this method, it quiesces the device by stopping the driver's worker
14980  * thread for the device, turning off device's interrupt and DMA, and bring
14981  * the device offline. Note that as the driver implements the minimum PM
14982  * requirements to a power-aware driver's PM support for suspend/resume -- all
14983  * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
14984  * method call will be treated as SUSPEND and the driver will fully
14985  * reinitialize its device during resume() method call, the driver will set
14986  * device to PCI_D3hot state in PCI config space instead of setting it
14987  * according to the @msg provided by the PM.
14988  *
14989  * Return code
14990  * 	0 - driver suspended the device
14991  * 	Error otherwise
14992  **/
14993 static int __maybe_unused
lpfc_pci_suspend_one_s4(struct device * dev_d)14994 lpfc_pci_suspend_one_s4(struct device *dev_d)
14995 {
14996 	struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14997 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14998 
14999 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15000 			"2843 PCI device Power Management suspend.\n");
15001 
15002 	/* Bring down the device */
15003 	lpfc_offline_prep(phba, LPFC_MBX_WAIT);
15004 	lpfc_offline(phba);
15005 	kthread_stop(phba->worker_thread);
15006 
15007 	/* Disable interrupt from device */
15008 	lpfc_sli4_disable_intr(phba);
15009 	lpfc_sli4_queue_destroy(phba);
15010 
15011 	return 0;
15012 }
15013 
15014 /**
15015  * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
15016  * @dev_d: pointer to device
15017  *
15018  * This routine is called from the kernel's PCI subsystem to support system
15019  * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
15020  * this method, it restores the device's PCI config space state and fully
15021  * reinitializes the device and brings it online. Note that as the driver
15022  * implements the minimum PM requirements to a power-aware driver's PM for
15023  * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
15024  * to the suspend() method call will be treated as SUSPEND and the driver
15025  * will fully reinitialize its device during resume() method call, the device
15026  * will be set to PCI_D0 directly in PCI config space before restoring the
15027  * state.
15028  *
15029  * Return code
15030  * 	0 - driver suspended the device
15031  * 	Error otherwise
15032  **/
15033 static int __maybe_unused
lpfc_pci_resume_one_s4(struct device * dev_d)15034 lpfc_pci_resume_one_s4(struct device *dev_d)
15035 {
15036 	struct Scsi_Host *shost = dev_get_drvdata(dev_d);
15037 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15038 	uint32_t intr_mode;
15039 	int error;
15040 
15041 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15042 			"0292 PCI device Power Management resume.\n");
15043 
15044 	 /* Startup the kernel thread for this host adapter. */
15045 	phba->worker_thread = kthread_run(lpfc_do_work, phba,
15046 					"lpfc_worker_%d", phba->brd_no);
15047 	if (IS_ERR(phba->worker_thread)) {
15048 		error = PTR_ERR(phba->worker_thread);
15049 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15050 				"0293 PM resume failed to start worker "
15051 				"thread: error=x%x.\n", error);
15052 		return error;
15053 	}
15054 
15055 	/* Configure and enable interrupt */
15056 	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15057 	if (intr_mode == LPFC_INTR_ERROR) {
15058 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15059 				"0294 PM resume Failed to enable interrupt\n");
15060 		return -EIO;
15061 	} else
15062 		phba->intr_mode = intr_mode;
15063 
15064 	/* Restart HBA and bring it online */
15065 	lpfc_sli_brdrestart(phba);
15066 	lpfc_online(phba);
15067 
15068 	/* Log the current active interrupt mode */
15069 	lpfc_log_intr_mode(phba, phba->intr_mode);
15070 
15071 	return 0;
15072 }
15073 
15074 /**
15075  * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
15076  * @phba: pointer to lpfc hba data structure.
15077  *
15078  * This routine is called to prepare the SLI4 device for PCI slot recover. It
15079  * aborts all the outstanding SCSI I/Os to the pci device.
15080  **/
15081 static void
lpfc_sli4_prep_dev_for_recover(struct lpfc_hba * phba)15082 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
15083 {
15084 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15085 			"2828 PCI channel I/O abort preparing for recovery\n");
15086 	/*
15087 	 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
15088 	 * and let the SCSI mid-layer to retry them to recover.
15089 	 */
15090 	lpfc_sli_abort_fcp_rings(phba);
15091 }
15092 
15093 /**
15094  * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
15095  * @phba: pointer to lpfc hba data structure.
15096  *
15097  * This routine is called to prepare the SLI4 device for PCI slot reset. It
15098  * disables the device interrupt and pci device, and aborts the internal FCP
15099  * pending I/Os.
15100  **/
15101 static void
lpfc_sli4_prep_dev_for_reset(struct lpfc_hba * phba)15102 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
15103 {
15104 	int offline =  pci_channel_offline(phba->pcidev);
15105 
15106 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15107 			"2826 PCI channel disable preparing for reset offline"
15108 			" %d\n", offline);
15109 
15110 	/* Block any management I/Os to the device */
15111 	lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
15112 
15113 
15114 	/* HBA_PCI_ERR was set in io_error_detect */
15115 	lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
15116 	/* Flush all driver's outstanding I/Os as we are to reset */
15117 	lpfc_sli_flush_io_rings(phba);
15118 	lpfc_offline(phba);
15119 
15120 	/* stop all timers */
15121 	lpfc_stop_hba_timers(phba);
15122 
15123 	lpfc_sli4_queue_destroy(phba);
15124 	/* Disable interrupt and pci device */
15125 	lpfc_sli4_disable_intr(phba);
15126 	pci_disable_device(phba->pcidev);
15127 }
15128 
15129 /**
15130  * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
15131  * @phba: pointer to lpfc hba data structure.
15132  *
15133  * This routine is called to prepare the SLI4 device for PCI slot permanently
15134  * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
15135  * pending I/Os.
15136  **/
15137 static void
lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba * phba)15138 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
15139 {
15140 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15141 			"2827 PCI channel permanent disable for failure\n");
15142 
15143 	/* Block all SCSI devices' I/Os on the host */
15144 	lpfc_scsi_dev_block(phba);
15145 
15146 	/* stop all timers */
15147 	lpfc_stop_hba_timers(phba);
15148 
15149 	/* Clean up all driver's outstanding I/Os */
15150 	lpfc_sli_flush_io_rings(phba);
15151 }
15152 
15153 /**
15154  * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
15155  * @pdev: pointer to PCI device.
15156  * @state: the current PCI connection state.
15157  *
15158  * This routine is called from the PCI subsystem for error handling to device
15159  * with SLI-4 interface spec. This function is called by the PCI subsystem
15160  * after a PCI bus error affecting this device has been detected. When this
15161  * function is invoked, it will need to stop all the I/Os and interrupt(s)
15162  * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
15163  * for the PCI subsystem to perform proper recovery as desired.
15164  *
15165  * Return codes
15166  * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15167  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15168  **/
15169 static pci_ers_result_t
lpfc_io_error_detected_s4(struct pci_dev * pdev,pci_channel_state_t state)15170 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
15171 {
15172 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
15173 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15174 	bool hba_pci_err;
15175 
15176 	switch (state) {
15177 	case pci_channel_io_normal:
15178 		/* Non-fatal error, prepare for recovery */
15179 		lpfc_sli4_prep_dev_for_recover(phba);
15180 		return PCI_ERS_RESULT_CAN_RECOVER;
15181 	case pci_channel_io_frozen:
15182 		hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
15183 		/* Fatal error, prepare for slot reset */
15184 		if (!hba_pci_err)
15185 			lpfc_sli4_prep_dev_for_reset(phba);
15186 		else
15187 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15188 					"2832  Already handling PCI error "
15189 					"state: x%x\n", state);
15190 		return PCI_ERS_RESULT_NEED_RESET;
15191 	case pci_channel_io_perm_failure:
15192 		set_bit(HBA_PCI_ERR, &phba->bit_flags);
15193 		/* Permanent failure, prepare for device down */
15194 		lpfc_sli4_prep_dev_for_perm_failure(phba);
15195 		return PCI_ERS_RESULT_DISCONNECT;
15196 	default:
15197 		hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
15198 		if (!hba_pci_err)
15199 			lpfc_sli4_prep_dev_for_reset(phba);
15200 		/* Unknown state, prepare and request slot reset */
15201 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15202 				"2825 Unknown PCI error state: x%x\n", state);
15203 		lpfc_sli4_prep_dev_for_reset(phba);
15204 		return PCI_ERS_RESULT_NEED_RESET;
15205 	}
15206 }
15207 
15208 /**
15209  * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
15210  * @pdev: pointer to PCI device.
15211  *
15212  * This routine is called from the PCI subsystem for error handling to device
15213  * with SLI-4 interface spec. It is called after PCI bus has been reset to
15214  * restart the PCI card from scratch, as if from a cold-boot. During the
15215  * PCI subsystem error recovery, after the driver returns
15216  * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
15217  * recovery and then call this routine before calling the .resume method to
15218  * recover the device. This function will initialize the HBA device, enable
15219  * the interrupt, but it will just put the HBA to offline state without
15220  * passing any I/O traffic.
15221  *
15222  * Return codes
15223  * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
15224  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15225  */
15226 static pci_ers_result_t
lpfc_io_slot_reset_s4(struct pci_dev * pdev)15227 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
15228 {
15229 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
15230 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15231 	struct lpfc_sli *psli = &phba->sli;
15232 	uint32_t intr_mode;
15233 	bool hba_pci_err;
15234 
15235 	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
15236 	if (pci_enable_device_mem(pdev)) {
15237 		printk(KERN_ERR "lpfc: Cannot re-enable "
15238 		       "PCI device after reset.\n");
15239 		return PCI_ERS_RESULT_DISCONNECT;
15240 	}
15241 
15242 	pci_restore_state(pdev);
15243 
15244 	hba_pci_err = test_and_clear_bit(HBA_PCI_ERR, &phba->bit_flags);
15245 	if (!hba_pci_err)
15246 		dev_info(&pdev->dev,
15247 			 "hba_pci_err was not set, recovering slot reset.\n");
15248 	/*
15249 	 * As the new kernel behavior of pci_restore_state() API call clears
15250 	 * device saved_state flag, need to save the restored state again.
15251 	 */
15252 	pci_save_state(pdev);
15253 
15254 	if (pdev->is_busmaster)
15255 		pci_set_master(pdev);
15256 
15257 	spin_lock_irq(&phba->hbalock);
15258 	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
15259 	spin_unlock_irq(&phba->hbalock);
15260 
15261 	/* Init cpu_map array */
15262 	lpfc_cpu_map_array_init(phba);
15263 	/* Configure and enable interrupt */
15264 	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15265 	if (intr_mode == LPFC_INTR_ERROR) {
15266 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15267 				"2824 Cannot re-enable interrupt after "
15268 				"slot reset.\n");
15269 		return PCI_ERS_RESULT_DISCONNECT;
15270 	} else
15271 		phba->intr_mode = intr_mode;
15272 	lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
15273 
15274 	/* Log the current active interrupt mode */
15275 	lpfc_log_intr_mode(phba, phba->intr_mode);
15276 
15277 	return PCI_ERS_RESULT_RECOVERED;
15278 }
15279 
15280 /**
15281  * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
15282  * @pdev: pointer to PCI device
15283  *
15284  * This routine is called from the PCI subsystem for error handling to device
15285  * with SLI-4 interface spec. It is called when kernel error recovery tells
15286  * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
15287  * error recovery. After this call, traffic can start to flow from this device
15288  * again.
15289  **/
15290 static void
lpfc_io_resume_s4(struct pci_dev * pdev)15291 lpfc_io_resume_s4(struct pci_dev *pdev)
15292 {
15293 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
15294 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15295 
15296 	/*
15297 	 * In case of slot reset, as function reset is performed through
15298 	 * mailbox command which needs DMA to be enabled, this operation
15299 	 * has to be moved to the io resume phase. Taking device offline
15300 	 * will perform the necessary cleanup.
15301 	 */
15302 	if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
15303 		/* Perform device reset */
15304 		lpfc_sli_brdrestart(phba);
15305 		/* Bring the device back online */
15306 		lpfc_online(phba);
15307 	}
15308 }
15309 
15310 /**
15311  * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
15312  * @pdev: pointer to PCI device
15313  * @pid: pointer to PCI device identifier
15314  *
15315  * This routine is to be registered to the kernel's PCI subsystem. When an
15316  * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
15317  * at PCI device-specific information of the device and driver to see if the
15318  * driver state that it can support this kind of device. If the match is
15319  * successful, the driver core invokes this routine. This routine dispatches
15320  * the action to the proper SLI-3 or SLI-4 device probing routine, which will
15321  * do all the initialization that it needs to do to handle the HBA device
15322  * properly.
15323  *
15324  * Return code
15325  * 	0 - driver can claim the device
15326  * 	negative value - driver can not claim the device
15327  **/
15328 static int
lpfc_pci_probe_one(struct pci_dev * pdev,const struct pci_device_id * pid)15329 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
15330 {
15331 	int rc;
15332 	struct lpfc_sli_intf intf;
15333 
15334 	if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
15335 		return -ENODEV;
15336 
15337 	if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
15338 	    (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
15339 		rc = lpfc_pci_probe_one_s4(pdev, pid);
15340 	else
15341 		rc = lpfc_pci_probe_one_s3(pdev, pid);
15342 
15343 	return rc;
15344 }
15345 
15346 /**
15347  * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
15348  * @pdev: pointer to PCI device
15349  *
15350  * This routine is to be registered to the kernel's PCI subsystem. When an
15351  * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
15352  * This routine dispatches the action to the proper SLI-3 or SLI-4 device
15353  * remove routine, which will perform all the necessary cleanup for the
15354  * device to be removed from the PCI subsystem properly.
15355  **/
15356 static void
lpfc_pci_remove_one(struct pci_dev * pdev)15357 lpfc_pci_remove_one(struct pci_dev *pdev)
15358 {
15359 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
15360 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15361 
15362 	switch (phba->pci_dev_grp) {
15363 	case LPFC_PCI_DEV_LP:
15364 		lpfc_pci_remove_one_s3(pdev);
15365 		break;
15366 	case LPFC_PCI_DEV_OC:
15367 		lpfc_pci_remove_one_s4(pdev);
15368 		break;
15369 	default:
15370 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15371 				"1424 Invalid PCI device group: 0x%x\n",
15372 				phba->pci_dev_grp);
15373 		break;
15374 	}
15375 	return;
15376 }
15377 
15378 /**
15379  * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
15380  * @dev: pointer to device
15381  *
15382  * This routine is to be registered to the kernel's PCI subsystem to support
15383  * system Power Management (PM). When PM invokes this method, it dispatches
15384  * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
15385  * suspend the device.
15386  *
15387  * Return code
15388  * 	0 - driver suspended the device
15389  * 	Error otherwise
15390  **/
15391 static int __maybe_unused
lpfc_pci_suspend_one(struct device * dev)15392 lpfc_pci_suspend_one(struct device *dev)
15393 {
15394 	struct Scsi_Host *shost = dev_get_drvdata(dev);
15395 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15396 	int rc = -ENODEV;
15397 
15398 	switch (phba->pci_dev_grp) {
15399 	case LPFC_PCI_DEV_LP:
15400 		rc = lpfc_pci_suspend_one_s3(dev);
15401 		break;
15402 	case LPFC_PCI_DEV_OC:
15403 		rc = lpfc_pci_suspend_one_s4(dev);
15404 		break;
15405 	default:
15406 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15407 				"1425 Invalid PCI device group: 0x%x\n",
15408 				phba->pci_dev_grp);
15409 		break;
15410 	}
15411 	return rc;
15412 }
15413 
15414 /**
15415  * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
15416  * @dev: pointer to device
15417  *
15418  * This routine is to be registered to the kernel's PCI subsystem to support
15419  * system Power Management (PM). When PM invokes this method, it dispatches
15420  * the action to the proper SLI-3 or SLI-4 device resume routine, which will
15421  * resume the device.
15422  *
15423  * Return code
15424  * 	0 - driver suspended the device
15425  * 	Error otherwise
15426  **/
15427 static int __maybe_unused
lpfc_pci_resume_one(struct device * dev)15428 lpfc_pci_resume_one(struct device *dev)
15429 {
15430 	struct Scsi_Host *shost = dev_get_drvdata(dev);
15431 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15432 	int rc = -ENODEV;
15433 
15434 	switch (phba->pci_dev_grp) {
15435 	case LPFC_PCI_DEV_LP:
15436 		rc = lpfc_pci_resume_one_s3(dev);
15437 		break;
15438 	case LPFC_PCI_DEV_OC:
15439 		rc = lpfc_pci_resume_one_s4(dev);
15440 		break;
15441 	default:
15442 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15443 				"1426 Invalid PCI device group: 0x%x\n",
15444 				phba->pci_dev_grp);
15445 		break;
15446 	}
15447 	return rc;
15448 }
15449 
15450 /**
15451  * lpfc_io_error_detected - lpfc method for handling PCI I/O error
15452  * @pdev: pointer to PCI device.
15453  * @state: the current PCI connection state.
15454  *
15455  * This routine is registered to the PCI subsystem for error handling. This
15456  * function is called by the PCI subsystem after a PCI bus error affecting
15457  * this device has been detected. When this routine is invoked, it dispatches
15458  * the action to the proper SLI-3 or SLI-4 device error detected handling
15459  * routine, which will perform the proper error detected operation.
15460  *
15461  * Return codes
15462  * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15463  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15464  **/
15465 static pci_ers_result_t
lpfc_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)15466 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
15467 {
15468 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
15469 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15470 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15471 
15472 	if (phba->link_state == LPFC_HBA_ERROR &&
15473 	    test_bit(HBA_IOQ_FLUSH, &phba->hba_flag))
15474 		return PCI_ERS_RESULT_NEED_RESET;
15475 
15476 	switch (phba->pci_dev_grp) {
15477 	case LPFC_PCI_DEV_LP:
15478 		rc = lpfc_io_error_detected_s3(pdev, state);
15479 		break;
15480 	case LPFC_PCI_DEV_OC:
15481 		rc = lpfc_io_error_detected_s4(pdev, state);
15482 		break;
15483 	default:
15484 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15485 				"1427 Invalid PCI device group: 0x%x\n",
15486 				phba->pci_dev_grp);
15487 		break;
15488 	}
15489 	return rc;
15490 }
15491 
15492 /**
15493  * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
15494  * @pdev: pointer to PCI device.
15495  *
15496  * This routine is registered to the PCI subsystem for error handling. This
15497  * function is called after PCI bus has been reset to restart the PCI card
15498  * from scratch, as if from a cold-boot. When this routine is invoked, it
15499  * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
15500  * routine, which will perform the proper device reset.
15501  *
15502  * Return codes
15503  * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
15504  * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15505  **/
15506 static pci_ers_result_t
lpfc_io_slot_reset(struct pci_dev * pdev)15507 lpfc_io_slot_reset(struct pci_dev *pdev)
15508 {
15509 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
15510 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15511 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15512 
15513 	switch (phba->pci_dev_grp) {
15514 	case LPFC_PCI_DEV_LP:
15515 		rc = lpfc_io_slot_reset_s3(pdev);
15516 		break;
15517 	case LPFC_PCI_DEV_OC:
15518 		rc = lpfc_io_slot_reset_s4(pdev);
15519 		break;
15520 	default:
15521 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15522 				"1428 Invalid PCI device group: 0x%x\n",
15523 				phba->pci_dev_grp);
15524 		break;
15525 	}
15526 	return rc;
15527 }
15528 
15529 /**
15530  * lpfc_io_resume - lpfc method for resuming PCI I/O operation
15531  * @pdev: pointer to PCI device
15532  *
15533  * This routine is registered to the PCI subsystem for error handling. It
15534  * is called when kernel error recovery tells the lpfc driver that it is
15535  * OK to resume normal PCI operation after PCI bus error recovery. When
15536  * this routine is invoked, it dispatches the action to the proper SLI-3
15537  * or SLI-4 device io_resume routine, which will resume the device operation.
15538  **/
15539 static void
lpfc_io_resume(struct pci_dev * pdev)15540 lpfc_io_resume(struct pci_dev *pdev)
15541 {
15542 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
15543 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15544 
15545 	switch (phba->pci_dev_grp) {
15546 	case LPFC_PCI_DEV_LP:
15547 		lpfc_io_resume_s3(pdev);
15548 		break;
15549 	case LPFC_PCI_DEV_OC:
15550 		lpfc_io_resume_s4(pdev);
15551 		break;
15552 	default:
15553 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15554 				"1429 Invalid PCI device group: 0x%x\n",
15555 				phba->pci_dev_grp);
15556 		break;
15557 	}
15558 	return;
15559 }
15560 
15561 /**
15562  * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
15563  * @phba: pointer to lpfc hba data structure.
15564  *
15565  * This routine checks to see if OAS is supported for this adapter. If
15566  * supported, the configure Flash Optimized Fabric flag is set.  Otherwise,
15567  * the enable oas flag is cleared and the pool created for OAS device data
15568  * is destroyed.
15569  *
15570  **/
15571 static void
lpfc_sli4_oas_verify(struct lpfc_hba * phba)15572 lpfc_sli4_oas_verify(struct lpfc_hba *phba)
15573 {
15574 
15575 	if (!phba->cfg_EnableXLane)
15576 		return;
15577 
15578 	if (phba->sli4_hba.pc_sli4_params.oas_supported) {
15579 		phba->cfg_fof = 1;
15580 	} else {
15581 		phba->cfg_fof = 0;
15582 		mempool_destroy(phba->device_data_mem_pool);
15583 		phba->device_data_mem_pool = NULL;
15584 	}
15585 
15586 	return;
15587 }
15588 
15589 /**
15590  * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
15591  * @phba: pointer to lpfc hba data structure.
15592  *
15593  * This routine checks to see if RAS is supported by the adapter. Check the
15594  * function through which RAS support enablement is to be done.
15595  **/
15596 void
lpfc_sli4_ras_init(struct lpfc_hba * phba)15597 lpfc_sli4_ras_init(struct lpfc_hba *phba)
15598 {
15599 	/* if ASIC_GEN_NUM >= 0xC) */
15600 	if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
15601 		    LPFC_SLI_INTF_IF_TYPE_6) ||
15602 	    (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
15603 		    LPFC_SLI_INTF_FAMILY_G6)) {
15604 		phba->ras_fwlog.ras_hwsupport = true;
15605 		if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
15606 		    phba->cfg_ras_fwlog_buffsize)
15607 			phba->ras_fwlog.ras_enabled = true;
15608 		else
15609 			phba->ras_fwlog.ras_enabled = false;
15610 	} else {
15611 		phba->ras_fwlog.ras_hwsupport = false;
15612 	}
15613 }
15614 
15615 
15616 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
15617 
15618 static const struct pci_error_handlers lpfc_err_handler = {
15619 	.error_detected = lpfc_io_error_detected,
15620 	.slot_reset = lpfc_io_slot_reset,
15621 	.resume = lpfc_io_resume,
15622 };
15623 
15624 static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one,
15625 			 lpfc_pci_suspend_one,
15626 			 lpfc_pci_resume_one);
15627 
15628 static struct pci_driver lpfc_driver = {
15629 	.name		= LPFC_DRIVER_NAME,
15630 	.id_table	= lpfc_id_table,
15631 	.probe		= lpfc_pci_probe_one,
15632 	.remove		= lpfc_pci_remove_one,
15633 	.shutdown	= lpfc_pci_remove_one,
15634 	.driver.pm	= &lpfc_pci_pm_ops_one,
15635 	.err_handler    = &lpfc_err_handler,
15636 };
15637 
15638 static const struct file_operations lpfc_mgmt_fop = {
15639 	.owner = THIS_MODULE,
15640 };
15641 
15642 static struct miscdevice lpfc_mgmt_dev = {
15643 	.minor = MISC_DYNAMIC_MINOR,
15644 	.name = "lpfcmgmt",
15645 	.fops = &lpfc_mgmt_fop,
15646 };
15647 
15648 /**
15649  * lpfc_init - lpfc module initialization routine
15650  *
15651  * This routine is to be invoked when the lpfc module is loaded into the
15652  * kernel. The special kernel macro module_init() is used to indicate the
15653  * role of this routine to the kernel as lpfc module entry point.
15654  *
15655  * Return codes
15656  *   0 - successful
15657  *   -ENOMEM - FC attach transport failed
15658  *   all others - failed
15659  */
15660 static int __init
lpfc_init(void)15661 lpfc_init(void)
15662 {
15663 	int error = 0;
15664 
15665 	pr_info(LPFC_MODULE_DESC "\n");
15666 	pr_info(LPFC_COPYRIGHT "\n");
15667 
15668 	error = misc_register(&lpfc_mgmt_dev);
15669 	if (error)
15670 		printk(KERN_ERR "Could not register lpfcmgmt device, "
15671 			"misc_register returned with status %d", error);
15672 
15673 	error = -ENOMEM;
15674 	lpfc_transport_functions.vport_create = lpfc_vport_create;
15675 	lpfc_transport_functions.vport_delete = lpfc_vport_delete;
15676 	lpfc_transport_template =
15677 				fc_attach_transport(&lpfc_transport_functions);
15678 	if (lpfc_transport_template == NULL)
15679 		goto unregister;
15680 	lpfc_vport_transport_template =
15681 		fc_attach_transport(&lpfc_vport_transport_functions);
15682 	if (lpfc_vport_transport_template == NULL) {
15683 		fc_release_transport(lpfc_transport_template);
15684 		goto unregister;
15685 	}
15686 	lpfc_wqe_cmd_template();
15687 	lpfc_nvmet_cmd_template();
15688 
15689 	/* Initialize in case vector mapping is needed */
15690 	lpfc_present_cpu = num_present_cpus();
15691 
15692 	lpfc_pldv_detect = false;
15693 
15694 	error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
15695 					"lpfc/sli4:online",
15696 					lpfc_cpu_online, lpfc_cpu_offline);
15697 	if (error < 0)
15698 		goto cpuhp_failure;
15699 	lpfc_cpuhp_state = error;
15700 
15701 	error = pci_register_driver(&lpfc_driver);
15702 	if (error)
15703 		goto unwind;
15704 
15705 	return error;
15706 
15707 unwind:
15708 	cpuhp_remove_multi_state(lpfc_cpuhp_state);
15709 cpuhp_failure:
15710 	fc_release_transport(lpfc_transport_template);
15711 	fc_release_transport(lpfc_vport_transport_template);
15712 unregister:
15713 	misc_deregister(&lpfc_mgmt_dev);
15714 
15715 	return error;
15716 }
15717 
lpfc_dmp_dbg(struct lpfc_hba * phba)15718 void lpfc_dmp_dbg(struct lpfc_hba *phba)
15719 {
15720 	unsigned int start_idx;
15721 	unsigned int dbg_cnt;
15722 	unsigned int temp_idx;
15723 	int i;
15724 	int j = 0;
15725 	unsigned long rem_nsec;
15726 
15727 	if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
15728 		return;
15729 
15730 	start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ;
15731 	dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt);
15732 	if (!dbg_cnt)
15733 		goto out;
15734 	temp_idx = start_idx;
15735 	if (dbg_cnt >= DBG_LOG_SZ) {
15736 		dbg_cnt = DBG_LOG_SZ;
15737 		temp_idx -= 1;
15738 	} else {
15739 		if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) {
15740 			temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ;
15741 		} else {
15742 			if (start_idx < dbg_cnt)
15743 				start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx);
15744 			else
15745 				start_idx -= dbg_cnt;
15746 		}
15747 	}
15748 	dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n",
15749 		 start_idx, temp_idx, dbg_cnt);
15750 
15751 	for (i = 0; i < dbg_cnt; i++) {
15752 		if ((start_idx + i) < DBG_LOG_SZ)
15753 			temp_idx = (start_idx + i) % DBG_LOG_SZ;
15754 		else
15755 			temp_idx = j++;
15756 		rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC);
15757 		dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s",
15758 			 temp_idx,
15759 			 (unsigned long)phba->dbg_log[temp_idx].t_ns,
15760 			 rem_nsec / 1000,
15761 			 phba->dbg_log[temp_idx].log);
15762 	}
15763 out:
15764 	atomic_set(&phba->dbg_log_cnt, 0);
15765 	atomic_set(&phba->dbg_log_dmping, 0);
15766 }
15767 
15768 __printf(2, 3)
lpfc_dbg_print(struct lpfc_hba * phba,const char * fmt,...)15769 void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...)
15770 {
15771 	unsigned int idx;
15772 	va_list args;
15773 	int dbg_dmping = atomic_read(&phba->dbg_log_dmping);
15774 	struct va_format vaf;
15775 
15776 
15777 	va_start(args, fmt);
15778 	if (unlikely(dbg_dmping)) {
15779 		vaf.fmt = fmt;
15780 		vaf.va = &args;
15781 		dev_info(&phba->pcidev->dev, "%pV", &vaf);
15782 		va_end(args);
15783 		return;
15784 	}
15785 	idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) %
15786 		DBG_LOG_SZ;
15787 
15788 	atomic_inc(&phba->dbg_log_cnt);
15789 
15790 	vscnprintf(phba->dbg_log[idx].log,
15791 		   sizeof(phba->dbg_log[idx].log), fmt, args);
15792 	va_end(args);
15793 
15794 	phba->dbg_log[idx].t_ns = local_clock();
15795 }
15796 
15797 /**
15798  * lpfc_exit - lpfc module removal routine
15799  *
15800  * This routine is invoked when the lpfc module is removed from the kernel.
15801  * The special kernel macro module_exit() is used to indicate the role of
15802  * this routine to the kernel as lpfc module exit point.
15803  */
15804 static void __exit
lpfc_exit(void)15805 lpfc_exit(void)
15806 {
15807 	misc_deregister(&lpfc_mgmt_dev);
15808 	pci_unregister_driver(&lpfc_driver);
15809 	cpuhp_remove_multi_state(lpfc_cpuhp_state);
15810 	fc_release_transport(lpfc_transport_template);
15811 	fc_release_transport(lpfc_vport_transport_template);
15812 	idr_destroy(&lpfc_hba_index);
15813 }
15814 
15815 module_init(lpfc_init);
15816 module_exit(lpfc_exit);
15817 MODULE_LICENSE("GPL");
15818 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
15819 MODULE_AUTHOR("Broadcom");
15820 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
15821