Lines Matching +full:serial +full:- +full:dat +full:- +full:low

4  * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
26 #include <linux/dma-mapping.h>
108 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
117 * 0 - success.
118 * -ERESTART - requests the SLI layer to reset the HBA and try again.
119 * Any other value - indicates an error.
124 lpfc_vpd_t *vp = &phba->vpd;
134 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
136 phba->link_state = LPFC_HBA_ERROR;
137 return -ENOMEM;
140 mb = &pmb->u.mb;
141 phba->link_state = LPFC_INIT_MBX_CMDS;
143 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
153 memset((char*)mb->un.varRDnvp.rsvd3, 0,
154 sizeof (mb->un.varRDnvp.rsvd3));
155 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
165 mb->mbxCommand, mb->mbxStatus);
166 mempool_free(pmb, phba->mbox_mem_pool);
167 return -ERESTART;
169 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
170 sizeof(phba->wwnn));
171 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
172 sizeof(phba->wwpn));
179 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
188 mb->mbxCommand, mb->mbxStatus);
189 mempool_free( pmb, phba->mbox_mem_pool);
190 return -ERESTART;
198 if (mb->un.varRdRev.rr == 0) {
199 vp->rev.rBit = 0;
203 mempool_free(pmb, phba->mbox_mem_pool);
204 return -ERESTART;
207 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
208 mempool_free(pmb, phba->mbox_mem_pool);
209 return -EINVAL;
213 vp->rev.rBit = 1;
214 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
215 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
216 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
217 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
218 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
219 vp->rev.biuRev = mb->un.varRdRev.biuRev;
220 vp->rev.smRev = mb->un.varRdRev.smRev;
221 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
222 vp->rev.endecRev = mb->un.varRdRev.endecRev;
223 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
224 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
225 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
226 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
227 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
228 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
234 if (vp->rev.feaLevelHigh < 9)
235 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
237 if (lpfc_is_LC_HBA(phba->pcidev->device))
238 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
239 sizeof (phba->RandomData));
253 mb->mbxCommand, mb->mbxStatus);
254 mb->un.varDmp.word_cnt = 0;
259 if (mb->un.varDmp.word_cnt == 0)
262 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
263 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
266 mb->un.varDmp.word_cnt);
267 offset += mb->un.varDmp.word_cnt;
268 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
274 mempool_free(pmb, phba->mbox_mem_pool);
279 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
291 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
292 phba->temp_sensor_support = 1;
294 phba->temp_sensor_support = 0;
295 mempool_free(pmboxq, phba->mbox_mem_pool);
300 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
318 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
319 mempool_free(pmboxq, phba->mbox_mem_pool);
326 prog_id_word = pmboxq->u.mb.un.varWords[7];
329 dist = dist_char[prg->dist];
331 if ((prg->dist == 3) && (prg->num == 0))
332 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
333 prg->ver, prg->rev, prg->lev);
335 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
336 prg->ver, prg->rev, prg->lev,
337 dist, prg->num);
338 mempool_free(pmboxq, phba->mbox_mem_pool);
343 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
353 struct lpfc_hba *phba = vport->phba;
359 if (vport->fc_nodename.u.wwn[0] == 0)
360 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
363 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
370 if (vport->fc_portname.u.wwn[0] != 0 &&
371 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
373 vport->vport_flag |= FAWWPN_PARAM_CHG;
375 if (phba->sli_rev == LPFC_SLI_REV4 &&
376 vport->port_type == LPFC_PHYSICAL_PORT &&
377 phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_FABRIC) {
378 if (!(phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG))
379 phba->sli4_hba.fawwpn_flag &=
383 "2701 FA-PWWN change WWPN from %llx to "
385 wwn_to_u64(vport->fc_portname.u.wwn),
387 (vport->fc_sparam.portName.u.wwn),
388 vport->vport_flag,
389 phba->sli4_hba.fawwpn_flag);
390 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
395 if (vport->fc_portname.u.wwn[0] == 0)
396 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
399 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
404 * lpfc_config_port_post - Perform lpfc initialization after config port
413 * 0 - success.
414 * Any other value - error.
419 struct lpfc_vport *vport = phba->pport;
424 struct lpfc_sli *psli = &phba->sli;
429 spin_lock_irq(&phba->hbalock);
434 if (phba->over_temp_state == HBA_OVER_TEMP)
435 phba->over_temp_state = HBA_NORMAL_TEMP;
436 spin_unlock_irq(&phba->hbalock);
438 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
440 phba->link_state = LPFC_HBA_ERROR;
441 return -ENOMEM;
443 mb = &pmb->u.mb;
448 mempool_free(pmb, phba->mbox_mem_pool);
449 return -ENOMEM;
452 pmb->vport = vport;
457 mb->mbxCommand, mb->mbxStatus);
458 phba->link_state = LPFC_HBA_ERROR;
460 return -EIO;
463 mp = pmb->ctx_buf;
469 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
470 lpfc_mbuf_free(phba, mp->virt, mp->phys);
472 pmb->ctx_buf = NULL;
476 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
477 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
478 fc_host_max_npiv_vports(shost) = phba->max_vpi;
480 /* If no serial number in VPD data, use low 6 bytes of WWNN */
481 /* This should be consolidated into parse_vpd ? - mr */
482 if (phba->SerialNumber[0] == 0) {
485 outptr = &vport->fc_nodename.u.s.IEEE[0];
490 phba->SerialNumber[i] =
493 phba->SerialNumber[i] =
494 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
498 phba->SerialNumber[i] =
501 phba->SerialNumber[i] =
502 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
507 pmb->vport = vport;
512 mb->mbxCommand, mb->mbxStatus);
513 phba->link_state = LPFC_HBA_ERROR;
514 mempool_free( pmb, phba->mbox_mem_pool);
515 return -EIO;
522 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
525 phba->cfg_hba_queue_depth,
526 mb->un.varRdConfig.max_xri);
527 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
530 phba->lmt = mb->un.varRdConfig.lmt;
533 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
535 phba->link_state = LPFC_LINK_DOWN;
538 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
539 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
540 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
541 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
544 if (phba->sli_rev != 3)
548 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
550 if (phba->intr_type == MSIX) {
553 mempool_free(pmb, phba->mbox_mem_pool);
554 return -EIO;
561 pmb->u.mb.mbxCommand,
562 pmb->u.mb.mbxStatus);
563 mempool_free(pmb, phba->mbox_mem_pool);
564 return -EIO;
568 spin_lock_irq(&phba->hbalock);
570 clear_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
573 if (lpfc_readl(phba->HCregaddr, &status)) {
574 spin_unlock_irq(&phba->hbalock);
575 return -EIO;
578 if (psli->num_rings > 0)
580 if (psli->num_rings > 1)
582 if (psli->num_rings > 2)
584 if (psli->num_rings > 3)
587 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
588 (phba->cfg_poll & DISABLE_FCP_RING_INT))
591 writel(status, phba->HCregaddr);
592 readl(phba->HCregaddr); /* flush */
593 spin_unlock_irq(&phba->hbalock);
595 /* Set up ring-0 (ELS) timer */
596 timeout = phba->fc_ratov * 2;
597 mod_timer(&vport->els_tmofunc,
600 mod_timer(&phba->hb_tmofunc,
602 clear_bit(HBA_HBEAT_INP, &phba->hba_flag);
603 clear_bit(HBA_HBEAT_TMO, &phba->hba_flag);
604 phba->last_completion_time = jiffies;
606 mod_timer(&phba->eratt_poll,
607 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
609 if (test_bit(LINK_DISABLED, &phba->hba_flag)) {
613 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
620 mempool_free(pmb, phba->mbox_mem_pool);
621 return -EIO;
623 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
624 mempool_free(pmb, phba->mbox_mem_pool);
625 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
630 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
632 phba->link_state = LPFC_HBA_ERROR;
633 return -ENOMEM;
637 pmb->mbox_cmpl = lpfc_config_async_cmpl;
638 pmb->vport = phba->pport;
646 mempool_free(pmb, phba->mbox_mem_pool);
650 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
652 phba->link_state = LPFC_HBA_ERROR;
653 return -ENOMEM;
657 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
658 pmb->vport = phba->pport;
665 mempool_free(pmb, phba->mbox_mem_pool);
672 * lpfc_sli4_refresh_params - update driver copy of params.
686 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
688 return -ENOMEM;
690 mqe = &mboxq->u.mqe;
692 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
700 mempool_free(mboxq, phba->mbox_mem_pool);
703 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
704 phba->sli4_hba.pc_sli4_params.mi_cap =
708 if (phba->cfg_enable_mi)
709 phba->sli4_hba.pc_sli4_params.mi_ver =
712 phba->sli4_hba.pc_sli4_params.mi_ver = 0;
714 phba->sli4_hba.pc_sli4_params.cmf =
716 phba->sli4_hba.pc_sli4_params.pls =
719 mempool_free(mboxq, phba->mbox_mem_pool);
724 * lpfc_hba_init_link - Initialize the FC link
726 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
734 * 0 - success
735 * Any other value - error
740 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
744 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
747 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
755 * 0 - success
756 * Any other value - error
762 struct lpfc_vport *vport = phba->pport;
767 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
769 phba->link_state = LPFC_HBA_ERROR;
770 return -ENOMEM;
772 mb = &pmb->u.mb;
773 pmb->vport = vport;
775 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
776 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
777 !(phba->lmt & LMT_1Gb)) ||
778 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
779 !(phba->lmt & LMT_2Gb)) ||
780 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
781 !(phba->lmt & LMT_4Gb)) ||
782 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
783 !(phba->lmt & LMT_8Gb)) ||
784 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
785 !(phba->lmt & LMT_10Gb)) ||
786 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
787 !(phba->lmt & LMT_16Gb)) ||
788 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
789 !(phba->lmt & LMT_32Gb)) ||
790 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
791 !(phba->lmt & LMT_64Gb))) {
796 phba->cfg_link_speed);
797 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
799 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
800 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
801 if (phba->sli_rev < LPFC_SLI_REV4)
808 mb->mbxCommand, mb->mbxStatus);
809 if (phba->sli_rev <= LPFC_SLI_REV3) {
811 writel(0, phba->HCregaddr);
812 readl(phba->HCregaddr); /* flush */
814 writel(0xffffffff, phba->HAregaddr);
815 readl(phba->HAregaddr); /* flush */
817 phba->link_state = LPFC_HBA_ERROR;
819 mempool_free(pmb, phba->mbox_mem_pool);
820 return -EIO;
822 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
824 mempool_free(pmb, phba->mbox_mem_pool);
830 * lpfc_hba_down_link - this routine downs the FC link
832 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
839 * 0 - success
840 * Any other value - error
848 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
850 phba->link_state = LPFC_HBA_ERROR;
851 return -ENOMEM;
857 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
864 mempool_free(pmb, phba->mbox_mem_pool);
865 return -EIO;
868 mempool_free(pmb, phba->mbox_mem_pool);
874 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
881 * 0 - success.
882 * Any other value - error.
890 if (phba->sli_rev <= LPFC_SLI_REV3) {
892 writel(0, phba->HCregaddr);
893 readl(phba->HCregaddr); /* flush */
896 if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
897 lpfc_cleanup_discovery_resources(phba->pport);
901 for (i = 0; i <= phba->max_vports &&
910 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
929 clear_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag);
931 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
933 spin_lock_irq(&phba->hbalock);
934 list_remove_head(&phba->sli4_hba.sp_queue_event,
936 spin_unlock_irq(&phba->hbalock);
938 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
948 lpfc_in_buf_free(phba, &dmabuf->dbuf);
954 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
967 struct lpfc_sli *psli = &phba->sli;
973 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
977 pring = &psli->sli3_ring[LPFC_ELS_RING];
978 spin_lock_irq(&phba->hbalock);
979 list_splice_init(&pring->postbufq, &buflist);
980 spin_unlock_irq(&phba->hbalock);
984 list_del(&mp->list);
986 lpfc_mbuf_free(phba, mp->virt, mp->phys);
990 spin_lock_irq(&phba->hbalock);
991 pring->postbufq_cnt -= count;
992 spin_unlock_irq(&phba->hbalock);
997 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
1009 struct lpfc_sli *psli = &phba->sli;
1016 if (phba->sli_rev != LPFC_SLI_REV4) {
1017 for (i = 0; i < psli->num_rings; i++) {
1018 pring = &psli->sli3_ring[i];
1019 spin_lock_irq(&phba->hbalock);
1024 list_splice_init(&pring->txcmplq, &completions);
1025 pring->txcmplq_cnt = 0;
1026 spin_unlock_irq(&phba->hbalock);
1035 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1036 pring = qp->pring;
1039 spin_lock_irq(&pring->ring_lock);
1041 &pring->txcmplq, list)
1042 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
1043 list_splice_init(&pring->txcmplq, &completions);
1044 pring->txcmplq_cnt = 0;
1045 spin_unlock_irq(&pring->ring_lock);
1054 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
1061 * 0 - success.
1062 * Any other value - error.
1073 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1080 * 0 - success.
1081 * Any other value - error.
1109 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
1111 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1112 sglq_entry->state = SGL_FREED;
1114 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1115 &phba->sli4_hba.lpfc_els_sgl_list);
1118 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
1123 spin_lock_irq(&phba->hbalock);
1125 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1126 qp = &phba->sli4_hba.hdwq[idx];
1128 spin_lock(&qp->abts_io_buf_list_lock);
1129 list_splice_init(&qp->lpfc_abts_io_buf_list,
1133 psb->pCmd = NULL;
1134 psb->status = IOSTAT_SUCCESS;
1137 spin_lock(&qp->io_buf_list_put_lock);
1138 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1139 qp->put_io_bufs += qp->abts_scsi_io_bufs;
1140 qp->put_io_bufs += qp->abts_nvme_io_bufs;
1141 qp->abts_scsi_io_bufs = 0;
1142 qp->abts_nvme_io_bufs = 0;
1143 spin_unlock(&qp->io_buf_list_put_lock);
1144 spin_unlock(&qp->abts_io_buf_list_lock);
1146 spin_unlock_irq(&phba->hbalock);
1148 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1149 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1150 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1152 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1154 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
1155 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1164 * lpfc_hba_down_post - Wrapper func for hba down post routine
1171 * 0 - success.
1172 * Any other value - error.
1177 return (*phba->lpfc_hba_down_post)(phba);
1181 * lpfc_hb_timeout - The HBA-timer timeout handler
1184 * This is the HBA-timer timeout handler registered to the lpfc driver. When
1186 * work-port-events bitmap and the worker thread is notified. This timeout
1202 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1203 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1205 phba->pport->work_port_events |= WORKER_HB_TMO;
1206 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1215 * lpfc_rrq_timeout - The RRQ-timer timeout handler
1218 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1220 * work-port-events bitmap and the worker thread is notified. This timeout
1232 if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
1233 clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
1237 set_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
1242 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1246 * This is the callback function to the lpfc heart-beat mailbox command.
1247 * If configured, the lpfc driver issues the heart-beat mailbox command to
1249 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1251 * heart-beat outstanding state. Once the mailbox command comes back and
1252 * no error conditions detected, the heart-beat mailbox command timer is
1253 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1254 * state is cleared for the next heart-beat. If the timer expired with the
1255 * heart-beat outstanding state set, the driver will put the HBA offline.
1260 clear_bit(HBA_HBEAT_INP, &phba->hba_flag);
1261 clear_bit(HBA_HBEAT_TMO, &phba->hba_flag);
1263 /* Check and reset heart-beat timer if necessary */
1264 mempool_free(pmboxq, phba->mbox_mem_pool);
1265 if (!test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag) &&
1266 !(phba->link_state == LPFC_HBA_ERROR) &&
1267 !test_bit(FC_UNLOADING, &phba->pport->load_flag))
1268 mod_timer(&phba->hb_tmofunc,
1275 * lpfc_idle_stat_delay_work - idle_stat tracking
1277 * This routine tracks per-eq idle_stat and determines polling decisions.
1294 if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
1297 if (phba->link_state == LPFC_HBA_ERROR ||
1298 test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag) ||
1299 phba->cmf_active_mode != LPFC_CFG_OFF)
1303 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
1304 eq = hdwq->hba_eq;
1307 if (eq->chann != i)
1310 idle_stat = &phba->sli4_hba.idle_stat[i];
1316 * percentage of 100 - the sum of the other consumption times.
1319 diff_idle = wall_idle - idle_stat->prev_idle;
1320 diff_wall = wall - idle_stat->prev_wall;
1325 busy_time = diff_wall - diff_idle;
1328 idle_percent = 100 - idle_percent;
1331 eq->poll_mode = LPFC_QUEUE_WORK;
1333 eq->poll_mode = LPFC_THREADED_IRQ;
1335 idle_stat->prev_idle = wall_idle;
1336 idle_stat->prev_wall = wall;
1340 schedule_delayed_work(&phba->idle_stat_delay_work,
1355 if (!phba->cfg_auto_imax ||
1356 test_bit(FC_UNLOADING, &phba->pport->load_flag))
1359 if (phba->link_state == LPFC_HBA_ERROR ||
1360 test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
1363 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1368 for (i = 0; i < phba->cfg_irq_chann; i++) {
1370 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1373 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
1374 eq->q_flag &= ~HBA_EQ_DELAY_CHK;
1375 ena_delay[eq->last_cpu] = 1;
1380 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1382 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
1389 eqi->icnt = 0;
1391 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1392 if (unlikely(eq->last_cpu != i)) {
1393 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1394 eq->last_cpu);
1395 list_move_tail(&eq->cpu_list, &eqi_new->list);
1398 if (usdelay != eq->q_mode)
1399 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1407 queue_delayed_work(phba->wq, &phba->eq_delay_work,
1412 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
1423 hwq_count = phba->cfg_hdw_queue;
1439 * lpfc_issue_hb_mbox - Issues heart-beat mailbox command
1453 if (test_bit(HBA_HBEAT_INP, &phba->hba_flag))
1456 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1458 return -ENOMEM;
1461 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1462 pmboxq->vport = phba->pport;
1466 mempool_free(pmboxq, phba->mbox_mem_pool);
1467 return -ENXIO;
1469 set_bit(HBA_HBEAT_INP, &phba->hba_flag);
1475 * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command
1487 if (phba->cfg_enable_hba_heartbeat)
1489 set_bit(HBA_HBEAT_TMO, &phba->hba_flag);
1493 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1496 * This is the actual HBA-timer timeout handler to be invoked by the worker
1497 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1500 * or by processing slow-ring or fast-ring events within the HBA-timer
1502 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1503 * is configured and there is no heart-beat mailbox command outstanding, a
1504 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1505 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1515 struct lpfc_sli *psli = &phba->sli;
1518 if (phba->cfg_xri_rebalancing) {
1519 /* Multi-XRI pools handler */
1525 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1531 if (phba->link_state == LPFC_HBA_ERROR ||
1532 test_bit(FC_UNLOADING, &phba->pport->load_flag) ||
1533 test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
1536 if (phba->elsbuf_cnt &&
1537 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1538 spin_lock_irq(&phba->hbalock);
1539 list_splice_init(&phba->elsbuf, &completions);
1540 phba->elsbuf_cnt = 0;
1541 phba->elsbuf_prev_cnt = 0;
1542 spin_unlock_irq(&phba->hbalock);
1547 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1551 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1554 if (phba->cfg_enable_hba_heartbeat) {
1556 spin_lock_irq(&phba->pport->work_port_lock);
1557 if (time_after(phba->last_completion_time +
1560 spin_unlock_irq(&phba->pport->work_port_lock);
1561 if (test_bit(HBA_HBEAT_INP, &phba->hba_flag))
1567 spin_unlock_irq(&phba->pport->work_port_lock);
1570 if (test_bit(HBA_HBEAT_INP, &phba->hba_flag)) {
1580 - phba->last_completion_time));
1583 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1584 (list_empty(&psli->mboxq))) {
1591 phba->skipped_hb = 0;
1592 } else if (time_before_eq(phba->last_completion_time,
1593 phba->skipped_hb)) {
1598 - phba->last_completion_time));
1600 phba->skipped_hb = jiffies;
1607 if (test_bit(HBA_HBEAT_TMO, &phba->hba_flag)) {
1618 mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo));
1622 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1631 struct lpfc_sli *psli = &phba->sli;
1633 spin_lock_irq(&phba->hbalock);
1634 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1635 spin_unlock_irq(&phba->hbalock);
1640 spin_lock_irq(&phba->hbalock);
1642 spin_unlock_irq(&phba->hbalock);
1646 phba->link_state = LPFC_HBA_ERROR;
1651 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1660 spin_lock_irq(&phba->hbalock);
1661 if (phba->link_state == LPFC_HBA_ERROR &&
1662 test_bit(HBA_PCI_ERR, &phba->bit_flags)) {
1663 spin_unlock_irq(&phba->hbalock);
1666 phba->link_state = LPFC_HBA_ERROR;
1667 spin_unlock_irq(&phba->hbalock);
1677 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1688 uint32_t old_host_status = phba->work_hs;
1689 struct lpfc_sli *psli = &phba->sli;
1694 if (pci_channel_offline(phba->pcidev)) {
1695 clear_bit(DEFER_ERATT, &phba->hba_flag);
1702 phba->work_hs, phba->work_status[0],
1703 phba->work_status[1]);
1705 spin_lock_irq(&phba->hbalock);
1706 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1707 spin_unlock_irq(&phba->hbalock);
1713 * SCSI layer retry it after re-establishing link.
1725 while (phba->work_hs & HS_FFER1) {
1727 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1728 phba->work_hs = UNPLUG_ERR ;
1732 if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
1733 phba->work_hs = 0;
1743 if (!phba->work_hs && !test_bit(FC_UNLOADING, &phba->pport->load_flag))
1744 phba->work_hs = old_host_status & ~HS_FFER1;
1746 clear_bit(DEFER_ERATT, &phba->hba_flag);
1747 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1748 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1759 shost = lpfc_shost_from_vport(phba->pport);
1767 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1772 * 1 - HBA error attention interrupt
1773 * 2 - DMA ring index out of range
1774 * 3 - Mailbox command came back as unknown
1779 struct lpfc_vport *vport = phba->pport;
1780 struct lpfc_sli *psli = &phba->sli;
1789 if (pci_channel_offline(phba->pcidev)) {
1790 clear_bit(DEFER_ERATT, &phba->hba_flag);
1795 if (!phba->cfg_enable_hba_reset)
1801 if (test_bit(DEFER_ERATT, &phba->hba_flag))
1804 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1805 if (phba->work_hs & HS_FFER6)
1806 /* Re-establishing Link */
1808 "1301 Re-establishing Link "
1810 phba->work_hs, phba->work_status[0],
1811 phba->work_status[1]);
1812 if (phba->work_hs & HS_FFER8)
1817 phba->work_hs, phba->work_status[0],
1818 phba->work_status[1]);
1820 spin_lock_irq(&phba->hbalock);
1821 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1822 spin_unlock_irq(&phba->hbalock);
1828 * retry it after re-establishing link.
1844 } else if (phba->work_hs & HS_CRIT_TEMP) {
1845 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1854 temperature, phba->work_hs,
1855 phba->work_status[0], phba->work_status[1]);
1857 shost = lpfc_shost_from_vport(phba->pport);
1864 spin_lock_irq(&phba->hbalock);
1865 phba->over_temp_state = HBA_OVER_TEMP;
1866 spin_unlock_irq(&phba->hbalock);
1877 phba->work_hs,
1878 phba->work_status[0], phba->work_status[1]);
1892 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1910 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1932 spin_lock_irq(&phba->hbalock);
1933 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
1934 if (phba->sli.mbox_active) {
1935 mboxq = phba->sli.mbox_active;
1936 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
1938 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1939 phba->sli.mbox_active = NULL;
1941 spin_unlock_irq(&phba->hbalock);
1957 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1961 return -EIO;
1963 phba->intr_mode = intr_mode;
1972 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1981 struct lpfc_vport *vport = phba->pport;
1997 if (pci_channel_offline(phba->pcidev)) {
2005 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
2009 phba->sli4_hba.u.if_type0.UERRLOregaddr,
2012 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
2015 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
2017 if (!test_bit(HBA_RECOVERABLE_UE, &phba->hba_flag)) {
2024 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
2025 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
2047 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
2066 phba->link_state = LPFC_HBA_ERROR;
2072 phba->sli4_hba.u.if_type2.STATUSregaddr,
2075 if (pci_rd_rc1 == -EIO) {
2078 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
2082 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
2083 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
2090 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
2095 shost = lpfc_shost_from_vport(phba->pport);
2102 spin_lock_irq(&phba->hbalock);
2103 phba->over_temp_state = HBA_OVER_TEMP;
2104 spin_unlock_irq(&phba->hbalock);
2124 if (!phba->cfg_enable_hba_reset)
2160 * lpfc_handle_eratt - Wrapper func for handling hba error attention
2167 * 0 - success.
2168 * Any other value - error.
2173 (*phba->lpfc_handle_eratt)(phba);
2177 * lpfc_handle_latt - The HBA link event handler
2186 struct lpfc_vport *vport = phba->pport;
2187 struct lpfc_sli *psli = &phba->sli;
2192 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2201 mempool_free(pmb, phba->mbox_mem_pool);
2207 psli->slistat.link_event++;
2208 lpfc_read_topology(phba, pmb, pmb->ctx_buf);
2209 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2210 pmb->vport = vport;
2212 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2220 spin_lock_irq(&phba->hbalock);
2221 writel(HA_LATT, phba->HAregaddr);
2222 readl(phba->HAregaddr); /* flush */
2223 spin_unlock_irq(&phba->hbalock);
2228 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2232 spin_lock_irq(&phba->hbalock);
2233 psli->sli_flag |= LPFC_PROCESS_LA;
2234 control = readl(phba->HCregaddr);
2236 writel(control, phba->HCregaddr);
2237 readl(phba->HCregaddr); /* flush */
2240 writel(HA_LATT, phba->HAregaddr);
2241 readl(phba->HAregaddr); /* flush */
2242 spin_unlock_irq(&phba->hbalock);
2244 phba->link_state = LPFC_HBA_ERROR;
2258 /* Look for Serial Number */
2264 length -= (3+i);
2265 while (i--) {
2266 phba->SerialNumber[j++] = vpd[(*pindex)++];
2270 phba->SerialNumber[j] = 0;
2273 phba->vpd_flag |= VPD_MODEL_DESC;
2278 length -= (3+i);
2279 while (i--) {
2280 phba->ModelDesc[j++] = vpd[(*pindex)++];
2284 phba->ModelDesc[j] = 0;
2287 phba->vpd_flag |= VPD_MODEL_NAME;
2292 length -= (3+i);
2293 while (i--) {
2294 phba->ModelName[j++] = vpd[(*pindex)++];
2298 phba->ModelName[j] = 0;
2301 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2306 length -= (3+i);
2307 while (i--) {
2308 phba->ProgramType[j++] = vpd[(*pindex)++];
2312 phba->ProgramType[j] = 0;
2315 phba->vpd_flag |= VPD_PORT;
2320 length -= (3 + i);
2321 while (i--) {
2322 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2323 (phba->sli4_hba.pport_name_sta ==
2328 phba->Port[j++] = vpd[(*pindex)++];
2332 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2333 (phba->sli4_hba.pport_name_sta ==
2335 phba->Port[j] = 0;
2342 length -= (3 + i);
2348 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
2358 * 0 - pointer to the VPD passed in is NULL
2359 * 1 - success
2378 while (!finished && (index < (len - 4))) {
2397 if (Length > len - index)
2398 Length = len - index;
2416 * lpfc_get_atto_model_desc - Retrieve ATTO HBA device model name and description
2430 uint16_t sub_dev_id = phba->pcidev->subsystem_device;
2494 (tbolt) ? "ThunderLink FC " : "Celerity FC-",
2496 phba->Port);
2500 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
2515 uint16_t dev_id = phba->pcidev->device;
2529 if (phba->pcidev->vendor == PCI_VENDOR_ID_ATTO) {
2534 if (phba->lmt & LMT_64Gb)
2536 else if (phba->lmt & LMT_32Gb)
2538 else if (phba->lmt & LMT_16Gb)
2540 else if (phba->lmt & LMT_10Gb)
2542 else if (phba->lmt & LMT_8Gb)
2544 else if (phba->lmt & LMT_4Gb)
2546 else if (phba->lmt & LMT_2Gb)
2548 else if (phba->lmt & LMT_1Gb)
2553 vp = &phba->vpd;
2561 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2572 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2583 m = (typeof(m)){"LP9802", "PCI-X",
2587 m = (typeof(m)){"LP10000", "PCI-X",
2591 m = (typeof(m)){"LPX1000", "PCI-X",
2595 m = (typeof(m)){"LP982", "PCI-X",
2599 m = (typeof(m)){"LP1050", "PCI-X",
2603 m = (typeof(m)){"LP11000", "PCI-X2",
2607 m = (typeof(m)){"LP11000-SP", "PCI-X2",
2611 m = (typeof(m)){"LP11002-SP", "PCI-X2",
2619 m = (typeof(m)){"LPe1000-SP", "PCIe",
2623 m = (typeof(m)){"LPe1002-SP", "PCIe",
2627 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2630 m = (typeof(m)){"LP111", "PCI-X2",
2650 m = (typeof(m)){"LP101", "PCI-X",
2654 m = (typeof(m)){"LP10000-S", "PCI",
2658 m = (typeof(m)){"LP11000-S", "PCI-X2",
2662 m = (typeof(m)){"LPe11000-S", "PCIe",
2675 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2678 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2681 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2692 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2704 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2757 phba->Port);
2771 * lpfc_sli3_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2789 cnt += pring->missbufcnt;
2796 pring->missbufcnt = cnt;
2799 icmd = &iocb->iocb;
2805 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2806 if (!mp1 || !mp1->virt) {
2809 pring->missbufcnt = cnt;
2813 INIT_LIST_HEAD(&mp1->list);
2818 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2819 &mp2->phys);
2820 if (!mp2 || !mp2->virt) {
2822 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2825 pring->missbufcnt = cnt;
2829 INIT_LIST_HEAD(&mp2->list);
2834 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2835 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2836 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2837 icmd->ulpBdeCount = 1;
2838 cnt--;
2840 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2841 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2842 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2843 cnt--;
2844 icmd->ulpBdeCount = 2;
2847 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2848 icmd->ulpLe = 1;
2850 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2852 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2856 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2861 pring->missbufcnt = cnt;
2868 pring->missbufcnt = 0;
2873 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2881 * 0 - success (currently always success)
2886 struct lpfc_sli *psli = &phba->sli;
2889 lpfc_sli3_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2890 /* Ring 2 - FCP no buffers needed */
2895 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2898 * lpfc_sha_init - Set up initial array of hash table entries
2915 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2934 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2936 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2972 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2988 * lpfc_hba_init - Perform special handling for LC HBA initialization
2990 * @hbainit: pointer to an array of unsigned 32-bit integers.
2999 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
3009 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
3017 * lpfc_cleanup - Performs vport cleanups before deleting a vport
3028 struct lpfc_hba *phba = vport->phba;
3032 if (phba->link_state > LPFC_LINK_DOWN)
3039 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
3040 if (vport->port_type != LPFC_PHYSICAL_PORT &&
3041 ndlp->nlp_DID == Fabric_DID) {
3047 if (ndlp->nlp_DID == Fabric_Cntl_DID &&
3048 ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
3056 if (ndlp->nlp_type & NLP_FABRIC &&
3057 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
3061 if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD)))
3078 if (test_bit(FC_UNLOADING, &vport->load_flag) &&
3079 pci_channel_offline(phba->pcidev))
3080 lpfc_sli_flush_io_rings(vport->phba);
3086 while (!list_empty(&vport->fc_nodes)) {
3092 &vport->fc_nodes, nlp_listp) {
3093 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
3098 ndlp->nlp_DID, (void *)ndlp,
3099 kref_read(&ndlp->kref),
3100 ndlp->fc4_xpt_flags,
3101 ndlp->nlp_flag);
3113 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
3123 del_timer_sync(&vport->els_tmofunc);
3124 del_timer_sync(&vport->delayed_disc_tmo);
3130 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
3140 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3143 del_timer(&phba->fcf.redisc_wait);
3147 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
3158 spin_lock_irq(&phba->hbalock);
3159 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3161 spin_unlock_irq(&phba->hbalock);
3166 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
3167 spin_unlock_irq(&phba->hbalock);
3171 * lpfc_cmf_stop - Stop CMF processing
3185 if (!phba->sli4_hba.pc_sli4_params.cmf)
3192 hrtimer_cancel(&phba->cmf_stats_timer);
3193 hrtimer_cancel(&phba->cmf_timer);
3196 atomic_set(&phba->cmf_busy, 0);
3198 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3199 atomic64_set(&cgs->total_bytes, 0);
3200 atomic64_set(&cgs->rcv_bytes, 0);
3201 atomic_set(&cgs->rx_io_cnt, 0);
3202 atomic64_set(&cgs->rx_latency, 0);
3204 atomic_set(&phba->cmf_bw_wait, 0);
3206 /* Resume any blocked IO - Queue unblock on workqueue */
3207 queue_work(phba->wq, &phba->unblock_request_work);
3225 phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
3226 phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba);
3227 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
3228 phba->cmf_interval_rate, 1000);
3229 phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count;
3236 * lpfc_cmf_start - Start CMF processing
3249 if (!phba->sli4_hba.pc_sli4_params.cmf ||
3250 phba->cmf_active_mode == LPFC_CFG_OFF)
3256 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
3257 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
3258 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
3259 atomic_set(&phba->cgn_sync_warn_cnt, 0);
3261 atomic_set(&phba->cmf_busy, 0);
3263 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3264 atomic64_set(&cgs->total_bytes, 0);
3265 atomic64_set(&cgs->rcv_bytes, 0);
3266 atomic_set(&cgs->rx_io_cnt, 0);
3267 atomic64_set(&cgs->rx_latency, 0);
3269 phba->cmf_latency.tv_sec = 0;
3270 phba->cmf_latency.tv_nsec = 0;
3277 phba->cmf_timer_cnt = 0;
3278 hrtimer_start(&phba->cmf_timer,
3281 hrtimer_start(&phba->cmf_stats_timer,
3285 ktime_get_real_ts64(&phba->cmf_latency);
3287 atomic_set(&phba->cmf_bw_wait, 0);
3288 atomic_set(&phba->cmf_stop_io, 0);
3292 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
3301 if (phba->pport)
3302 lpfc_stop_vport_timers(phba->pport);
3303 cancel_delayed_work_sync(&phba->eq_delay_work);
3304 cancel_delayed_work_sync(&phba->idle_stat_delay_work);
3305 del_timer_sync(&phba->sli.mbox_tmo);
3306 del_timer_sync(&phba->fabric_block_timer);
3307 del_timer_sync(&phba->eratt_poll);
3308 del_timer_sync(&phba->hb_tmofunc);
3309 if (phba->sli_rev == LPFC_SLI_REV4) {
3310 del_timer_sync(&phba->rrq_tmr);
3311 clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
3313 clear_bit(HBA_HBEAT_INP, &phba->hba_flag);
3314 clear_bit(HBA_HBEAT_TMO, &phba->hba_flag);
3316 switch (phba->pci_dev_grp) {
3319 del_timer_sync(&phba->fcp_poll_timer);
3328 phba->pci_dev_grp);
3335 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
3352 spin_lock_irqsave(&phba->hbalock, iflag);
3353 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
3354 spin_unlock_irqrestore(&phba->hbalock, iflag);
3358 spin_lock_irqsave(&phba->hbalock, iflag);
3359 if (phba->sli.mbox_active) {
3360 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
3365 phba->sli.mbox_active) * 1000) + jiffies;
3367 spin_unlock_irqrestore(&phba->hbalock, iflag);
3370 while (phba->sli.mbox_active) {
3376 "- mbox cmd %x still active\n",
3377 phba->sli.sli_flag, actcmd);
3384 * lpfc_sli4_node_rpi_restore - Recover assigned RPIs for active nodes.
3398 if (phba->sli_rev != LPFC_SLI_REV4)
3405 for (i = 0; i <= phba->max_vports && vports[i]; i++) {
3406 if (test_bit(FC_UNLOADING, &vports[i]->load_flag))
3410 &vports[i]->fc_nodes,
3414 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3419 ndlp, ndlp->nlp_DID,
3420 ndlp->nlp_flag);
3423 ndlp->nlp_rpi = rpi;
3424 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3428 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
3429 ndlp->nlp_flag);
3436 * lpfc_create_expedite_pool - create expedite pool
3450 epd_pool = &phba->epd_pool;
3451 qp = &phba->sli4_hba.hdwq[0];
3453 spin_lock_init(&epd_pool->lock);
3454 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3455 spin_lock(&epd_pool->lock);
3456 INIT_LIST_HEAD(&epd_pool->list);
3458 &qp->lpfc_io_buf_list_put, list) {
3459 list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3460 lpfc_ncmd->expedite = true;
3461 qp->put_io_bufs--;
3462 epd_pool->count++;
3463 if (epd_pool->count >= XRI_BATCH)
3466 spin_unlock(&epd_pool->lock);
3467 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3471 * lpfc_destroy_expedite_pool - destroy expedite pool
3485 epd_pool = &phba->epd_pool;
3486 qp = &phba->sli4_hba.hdwq[0];
3488 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3489 spin_lock(&epd_pool->lock);
3491 &epd_pool->list, list) {
3492 list_move_tail(&lpfc_ncmd->list,
3493 &qp->lpfc_io_buf_list_put);
3494 lpfc_ncmd->flags = false;
3495 qp->put_io_bufs++;
3496 epd_pool->count--;
3498 spin_unlock(&epd_pool->lock);
3499 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3503 * lpfc_create_multixri_pools - create multi-XRI pools
3507 * lpfc_io_buf_list_put to public pool. High and low watermark are also
3525 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3526 phba->sli4_hba.io_xri_cnt);
3528 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3531 hwq_count = phba->cfg_hdw_queue;
3532 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3542 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3547 qp = &phba->sli4_hba.hdwq[j];
3548 kfree(qp->p_multixri_pool);
3551 phba->cfg_xri_rebalancing = 0;
3555 qp = &phba->sli4_hba.hdwq[i];
3556 qp->p_multixri_pool = multixri_pool;
3558 multixri_pool->xri_limit = count_per_hwq;
3559 multixri_pool->rrb_next_hwqid = i;
3562 pbl_pool = &multixri_pool->pbl_pool;
3563 spin_lock_init(&pbl_pool->lock);
3564 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3565 spin_lock(&pbl_pool->lock);
3566 INIT_LIST_HEAD(&pbl_pool->list);
3568 &qp->lpfc_io_buf_list_put, list) {
3569 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3570 qp->put_io_bufs--;
3571 pbl_pool->count++;
3575 pbl_pool->count, i);
3576 spin_unlock(&pbl_pool->lock);
3577 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3580 pvt_pool = &multixri_pool->pvt_pool;
3581 pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3582 pvt_pool->low_watermark = XRI_BATCH;
3583 spin_lock_init(&pvt_pool->lock);
3584 spin_lock_irqsave(&pvt_pool->lock, iflag);
3585 INIT_LIST_HEAD(&pvt_pool->list);
3586 pvt_pool->count = 0;
3587 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3592 * lpfc_destroy_multixri_pools - destroy multi-XRI pools
3609 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3612 if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
3615 hwq_count = phba->cfg_hdw_queue;
3618 qp = &phba->sli4_hba.hdwq[i];
3619 multixri_pool = qp->p_multixri_pool;
3623 qp->p_multixri_pool = NULL;
3625 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3628 pbl_pool = &multixri_pool->pbl_pool;
3629 spin_lock(&pbl_pool->lock);
3633 pbl_pool->count, i);
3636 &pbl_pool->list, list) {
3637 list_move_tail(&lpfc_ncmd->list,
3638 &qp->lpfc_io_buf_list_put);
3639 qp->put_io_bufs++;
3640 pbl_pool->count--;
3643 INIT_LIST_HEAD(&pbl_pool->list);
3644 pbl_pool->count = 0;
3646 spin_unlock(&pbl_pool->lock);
3649 pvt_pool = &multixri_pool->pvt_pool;
3650 spin_lock(&pvt_pool->lock);
3654 pvt_pool->count, i);
3657 &pvt_pool->list, list) {
3658 list_move_tail(&lpfc_ncmd->list,
3659 &qp->lpfc_io_buf_list_put);
3660 qp->put_io_bufs++;
3661 pvt_pool->count--;
3664 INIT_LIST_HEAD(&pvt_pool->list);
3665 pvt_pool->count = 0;
3667 spin_unlock(&pvt_pool->lock);
3668 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3675 * lpfc_online - Initialize and bring a HBA online
3683 * 0 - successful
3684 * 1 - failed
3696 vport = phba->pport;
3698 if (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag))
3706 if (phba->sli_rev == LPFC_SLI_REV4) {
3711 spin_lock_irq(&phba->hbalock);
3712 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3714 spin_unlock_irq(&phba->hbalock);
3719 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3720 !phba->nvmet_support) {
3721 error = lpfc_nvme_create_localport(phba->pport);
3737 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3738 clear_bit(FC_OFFLINE_MODE, &vports[i]->fc_flag);
3739 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3741 &vports[i]->fc_flag);
3742 if (phba->sli_rev == LPFC_SLI_REV4) {
3744 &vports[i]->fc_flag);
3746 (vports[i]->port_type !=
3748 vports[i]->vpi = 0;
3754 if (phba->cfg_xri_rebalancing)
3764 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
3779 spin_lock_irqsave(&phba->hbalock, iflag);
3780 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3781 spin_unlock_irqrestore(&phba->hbalock, iflag);
3785 * lpfc_offline_prep - Prepare a HBA to be brought offline
3796 struct lpfc_vport *vport = phba->pport;
3804 if (test_bit(FC_OFFLINE_MODE, &vport->fc_flag))
3811 offline = pci_channel_offline(phba->pcidev);
3812 hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
3817 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3818 if (test_bit(FC_UNLOADING, &vports[i]->load_flag))
3821 spin_lock_irq(shost->host_lock);
3822 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3823 spin_unlock_irq(shost->host_lock);
3824 set_bit(FC_VPORT_NEEDS_REG_VPI, &vports[i]->fc_flag);
3825 clear_bit(FC_VFI_REGISTERED, &vports[i]->fc_flag);
3828 &vports[i]->fc_nodes,
3831 clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag);
3834 &ndlp->nlp_flag);
3836 &ndlp->nlp_flag);
3839 if (ndlp->nlp_type & NLP_FABRIC) {
3851 &ndlp->save_flags) &&
3852 !(ndlp->fc4_xpt_flags &
3866 if (phba->wq)
3867 flush_workqueue(phba->wq);
3871 * lpfc_offline - Bring a HBA offline
3885 if (test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
3895 lpfc_nvme_destroy_localport(phba->pport);
3899 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3907 spin_lock_irq(&phba->hbalock);
3908 phba->work_ha = 0;
3909 spin_unlock_irq(&phba->hbalock);
3912 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3914 spin_lock_irq(shost->host_lock);
3915 vports[i]->work_port_events = 0;
3916 spin_unlock_irq(shost->host_lock);
3917 set_bit(FC_OFFLINE_MODE, &vports[i]->fc_flag);
3923 if (test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
3926 if (phba->cfg_xri_rebalancing)
3931 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
3943 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3946 spin_lock_irq(&phba->hbalock);
3950 spin_lock(&phba->scsi_buf_list_put_lock);
3951 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3953 list_del(&sb->list);
3954 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3955 sb->dma_handle);
3957 phba->total_scsi_bufs--;
3959 spin_unlock(&phba->scsi_buf_list_put_lock);
3961 spin_lock(&phba->scsi_buf_list_get_lock);
3962 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3964 list_del(&sb->list);
3965 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3966 sb->dma_handle);
3968 phba->total_scsi_bufs--;
3970 spin_unlock(&phba->scsi_buf_list_get_lock);
3971 spin_unlock_irq(&phba->hbalock);
3975 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
3989 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3990 qp = &phba->sli4_hba.hdwq[idx];
3992 spin_lock(&qp->io_buf_list_put_lock);
3994 &qp->lpfc_io_buf_list_put,
3996 list_del(&lpfc_ncmd->list);
3997 qp->put_io_bufs--;
3998 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3999 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4000 if (phba->cfg_xpsgl && !phba->nvmet_support)
4004 qp->total_io_bufs--;
4006 spin_unlock(&qp->io_buf_list_put_lock);
4008 spin_lock(&qp->io_buf_list_get_lock);
4010 &qp->lpfc_io_buf_list_get,
4012 list_del(&lpfc_ncmd->list);
4013 qp->get_io_bufs--;
4014 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4015 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4016 if (phba->cfg_xpsgl && !phba->nvmet_support)
4020 qp->total_io_bufs--;
4022 spin_unlock(&qp->io_buf_list_get_lock);
4027 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
4036 * 0 - successful (for now, it always returns 0)
4047 * update on pci function's els xri-sgl list
4051 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
4052 /* els xri-sgl expanded */
4053 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
4055 "3157 ELS xri-sgl count increased from "
4056 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
4067 rc = -ENOMEM;
4070 sglq_entry->buff_type = GEN_BUFF_TYPE;
4071 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
4072 &sglq_entry->phys);
4073 if (sglq_entry->virt == NULL) {
4079 rc = -ENOMEM;
4082 sglq_entry->sgl = sglq_entry->virt;
4083 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
4084 sglq_entry->state = SGL_FREED;
4085 list_add_tail(&sglq_entry->list, &els_sgl_list);
4087 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
4089 &phba->sli4_hba.lpfc_els_sgl_list);
4090 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
4091 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
4092 /* els xri-sgl shrinked */
4093 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
4095 "3158 ELS xri-sgl count decreased from "
4096 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
4098 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
4099 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
4106 __lpfc_mbuf_free(phba, sglq_entry->virt,
4107 sglq_entry->phys);
4112 &phba->sli4_hba.lpfc_els_sgl_list);
4113 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
4116 "3163 ELS xri-sgl count unchanged: %d\n",
4118 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
4124 &phba->sli4_hba.lpfc_els_sgl_list, list) {
4131 rc = -ENOMEM;
4134 sglq_entry->sli4_lxritag = lxri;
4135 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4145 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
4154 * 0 - successful (for now, it always returns 0)
4166 * update on pci function's nvmet xri-sgl list
4171 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4172 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
4173 /* els xri-sgl expanded */
4174 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
4176 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
4177 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
4187 rc = -ENOMEM;
4190 sglq_entry->buff_type = NVMET_BUFF_TYPE;
4191 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
4192 &sglq_entry->phys);
4193 if (sglq_entry->virt == NULL) {
4199 rc = -ENOMEM;
4202 sglq_entry->sgl = sglq_entry->virt;
4203 memset(sglq_entry->sgl, 0,
4204 phba->cfg_sg_dma_buf_size);
4205 sglq_entry->state = SGL_FREED;
4206 list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
4208 spin_lock_irq(&phba->hbalock);
4209 spin_lock(&phba->sli4_hba.sgl_list_lock);
4211 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4212 spin_unlock(&phba->sli4_hba.sgl_list_lock);
4213 spin_unlock_irq(&phba->hbalock);
4214 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
4215 /* nvmet xri-sgl shrunk */
4216 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
4218 "6305 NVMET xri-sgl count decreased from "
4219 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
4221 spin_lock_irq(&phba->hbalock);
4222 spin_lock(&phba->sli4_hba.sgl_list_lock);
4223 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
4230 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
4231 sglq_entry->phys);
4236 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4237 spin_unlock(&phba->sli4_hba.sgl_list_lock);
4238 spin_unlock_irq(&phba->hbalock);
4241 "6306 NVMET xri-sgl count unchanged: %d\n",
4243 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
4249 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
4256 rc = -ENOMEM;
4259 sglq_entry->sli4_lxritag = lxri;
4260 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4279 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4280 qp = &phba->sli4_hba.hdwq[idx];
4281 spin_lock_irq(&qp->io_buf_list_get_lock);
4282 spin_lock(&qp->io_buf_list_put_lock);
4285 list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
4286 list_splice(&qp->lpfc_io_buf_list_put, &blist);
4287 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
4288 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
4289 cnt += qp->get_io_bufs + qp->put_io_bufs;
4290 qp->get_io_bufs = 0;
4291 qp->put_io_bufs = 0;
4292 qp->total_io_bufs = 0;
4293 spin_unlock(&qp->io_buf_list_put_lock);
4294 spin_unlock_irq(&qp->io_buf_list_get_lock);
4307 list_add_tail(&lpfc_cmd->list, cbuf);
4310 xri = lpfc_cmd->cur_iocbq.sli4_xritag;
4314 if (xri < iobufp->cur_iocbq.sli4_xritag) {
4316 list_add(&lpfc_cmd->list,
4317 &prev_iobufp->list);
4319 list_add(&lpfc_cmd->list, cbuf);
4326 list_add_tail(&lpfc_cmd->list, cbuf);
4339 qp = phba->sli4_hba.hdwq;
4342 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4348 qp = &phba->sli4_hba.hdwq[idx];
4349 lpfc_cmd->hdwq_no = idx;
4350 lpfc_cmd->hdwq = qp;
4351 lpfc_cmd->cur_iocbq.cmd_cmpl = NULL;
4352 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflags);
4353 list_add_tail(&lpfc_cmd->list,
4354 &qp->lpfc_io_buf_list_put);
4355 qp->put_io_bufs++;
4356 qp->total_io_bufs++;
4357 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
4365 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
4374 * 0 - successful (for now, it always returns 0)
4386 * update on pci function's allocated nvme xri-sgl list
4391 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4392 phba->sli4_hba.io_xri_max = io_xri_max;
4397 phba->sli4_hba.io_xri_cnt,
4398 phba->sli4_hba.io_xri_max,
4403 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4405 io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4406 phba->sli4_hba.io_xri_max;
4412 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4413 lpfc_ncmd->data,
4414 lpfc_ncmd->dma_handle);
4418 phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4424 phba->sli4_hba.io_xri_cnt = cnt;
4433 rc = -ENOMEM;
4436 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4437 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4448 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
4452 * This routine allocates nvme buffers for device with SLI-4 interface spec,
4458 * int - number of IO buffers that were allocated and posted.
4472 phba->sli4_hba.io_xri_cnt = 0;
4482 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4484 &lpfc_ncmd->dma_handle);
4485 if (!lpfc_ncmd->data) {
4490 if (phba->cfg_xpsgl && !phba->nvmet_support) {
4491 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4497 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4498 (((unsigned long)(lpfc_ncmd->data) &
4499 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4504 (unsigned long)lpfc_ncmd->data);
4505 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4506 lpfc_ncmd->data,
4507 lpfc_ncmd->dma_handle);
4513 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4517 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4518 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4522 pwqeq = &lpfc_ncmd->cur_iocbq;
4524 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
4527 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4528 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4536 pwqeq->sli4_lxritag = lxri;
4537 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4539 /* Initialize local short-hand pointers. */
4540 lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4541 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4542 lpfc_ncmd->cur_iocbq.io_buf = lpfc_ncmd;
4543 spin_lock_init(&lpfc_ncmd->buf_lock);
4546 list_add_tail(&lpfc_ncmd->list, &post_nblist);
4547 phba->sli4_hba.io_xri_cnt++;
4573 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4576 return (uint64_t)-1;
4585 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4586 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4587 mempool_free(mboxq, phba->mbox_mem_pool);
4588 return (uint64_t) -1;
4590 mb = &mboxq->u.mb;
4591 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4593 mempool_free(mboxq, phba->mbox_mem_pool);
4594 if (phba->sli_rev == LPFC_SLI_REV4)
4602 if (phba->sli_rev == LPFC_SLI_REV4)
4603 if (phba->cfg_xpsgl && !phba->nvmet_support)
4606 return phba->cfg_scsi_seg_cnt;
4608 return phba->cfg_sg_seg_cnt;
4612 * lpfc_vmid_res_alloc - Allocates resources for VMID
4620 * Non-0 on Failure
4626 if (phba->sli_rev == LPFC_SLI_REV3) {
4627 phba->cfg_vmid_app_header = 0;
4628 phba->cfg_vmid_priority_tagging = 0;
4632 vport->vmid =
4633 kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid),
4635 if (!vport->vmid)
4636 return -ENOMEM;
4638 rwlock_init(&vport->vmid_lock);
4641 vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging;
4642 vport->vmid_inactivity_timeout =
4643 phba->cfg_vmid_inactivity_timeout;
4644 vport->max_vmid = phba->cfg_max_vmid;
4645 vport->cur_vmid_cnt = 0;
4647 vport->vmid_priority_range = bitmap_zalloc
4650 if (!vport->vmid_priority_range) {
4651 kfree(vport->vmid);
4652 return -ENOMEM;
4655 hash_init(vport->hash_table);
4661 * lpfc_create_port - Create an FC port
4673 * @vport - pointer to the virtual N_Port data structure.
4674 * NULL - port create failed.
4690 if (phba->sli_rev < LPFC_SLI_REV4 &&
4691 dev == &phba->pcidev->dev) {
4713 if (dev == &phba->pcidev->dev) {
4714 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4720 template->eh_host_reset_handler = NULL;
4723 template->sg_tablesize = lpfc_get_sg_tablesize(phba);
4733 template->sg_tablesize = lpfc_get_sg_tablesize(phba);
4740 vport = (struct lpfc_vport *) shost->hostdata;
4741 vport->phba = phba;
4742 set_bit(FC_LOADING, &vport->load_flag);
4743 set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
4744 vport->fc_rscn_flush = 0;
4745 atomic_set(&vport->fc_plogi_cnt, 0);
4746 atomic_set(&vport->fc_adisc_cnt, 0);
4747 atomic_set(&vport->fc_reglogin_cnt, 0);
4748 atomic_set(&vport->fc_prli_cnt, 0);
4749 atomic_set(&vport->fc_unmap_cnt, 0);
4750 atomic_set(&vport->fc_map_cnt, 0);
4751 atomic_set(&vport->fc_npr_cnt, 0);
4752 atomic_set(&vport->fc_unused_cnt, 0);
4756 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4758 shost->unique_id = instance;
4759 shost->max_id = LPFC_MAX_TARGET;
4760 shost->max_lun = vport->cfg_max_luns;
4761 shost->this_id = -1;
4764 if (phba->sli_rev == LPFC_SLI_REV4) {
4766 &phba->sli4_hba.sli_intf);
4771 shost->max_cmd_len = LPFC_FCP_CDB_LEN_32;
4774 shost->max_cmd_len = LPFC_FCP_CDB_LEN;
4778 shost->max_cmd_len = LPFC_FCP_CDB_LEN;
4781 if (phba->sli_rev == LPFC_SLI_REV4) {
4782 if (!phba->cfg_fcp_mq_threshold ||
4783 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4784 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4786 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4787 phba->cfg_fcp_mq_threshold);
4789 shost->dma_boundary =
4790 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4792 /* SLI-3 has a limited number of hardware queues (3),
4795 shost->nr_hw_queues = 1;
4802 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4803 if (dev != &phba->pcidev->dev) {
4804 shost->transportt = lpfc_vport_transport_template;
4805 vport->port_type = LPFC_NPIV_PORT;
4807 shost->transportt = lpfc_transport_template;
4808 vport->port_type = LPFC_PHYSICAL_PORT;
4814 vport->port_type, shost->sg_tablesize,
4815 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
4824 INIT_LIST_HEAD(&vport->fc_nodes);
4825 spin_lock_init(&vport->fc_nodes_list_lock);
4826 INIT_LIST_HEAD(&vport->rcv_buffer_list);
4827 spin_lock_init(&vport->work_port_lock);
4829 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
4831 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
4833 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
4835 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4838 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4842 spin_lock_irq(&phba->port_list_lock);
4843 list_add_tail(&vport->listentry, &phba->port_list);
4844 spin_unlock_irq(&phba->port_list_lock);
4848 kfree(vport->vmid);
4849 bitmap_free(vport->vmid_priority_range);
4857 * destroy_port - destroy an FC port
4867 struct lpfc_hba *phba = vport->phba;
4873 spin_lock_irq(&phba->port_list_lock);
4874 list_del_init(&vport->listentry);
4875 spin_unlock_irq(&phba->port_list_lock);
4882 * lpfc_get_instance - Get a unique integer ID
4888 * instance - a unique integer ID allocated as the new instance.
4889 * -1 - lpfc get instance failed.
4897 return ret < 0 ? -1 : ret;
4901 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
4912 * 0 - SCSI host scan is not over yet.
4913 * 1 - SCSI host scan is over.
4917 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4918 struct lpfc_hba *phba = vport->phba;
4921 spin_lock_irq(shost->host_lock);
4923 if (test_bit(FC_UNLOADING, &vport->load_flag)) {
4935 phba->link_state <= LPFC_LINK_DOWN) {
4943 if (vport->port_state != LPFC_VPORT_READY)
4945 if (vport->num_disc_nodes || vport->fc_prli_sent)
4947 if (!atomic_read(&vport->fc_map_cnt) &&
4950 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4956 spin_unlock_irq(shost->host_lock);
4962 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4963 struct lpfc_hba *phba = vport->phba;
4970 if (test_bit(HBA_FCOE_MODE, &phba->hba_flag))
4973 if (phba->lmt & LMT_256Gb)
4975 if (phba->lmt & LMT_128Gb)
4977 if (phba->lmt & LMT_64Gb)
4979 if (phba->lmt & LMT_32Gb)
4981 if (phba->lmt & LMT_16Gb)
4983 if (phba->lmt & LMT_10Gb)
4985 if (phba->lmt & LMT_8Gb)
4987 if (phba->lmt & LMT_4Gb)
4989 if (phba->lmt & LMT_2Gb)
4991 if (phba->lmt & LMT_1Gb)
4996 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
5004 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5005 struct lpfc_hba *phba = vport->phba;
5010 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
5011 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
5025 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
5026 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
5028 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
5036 fc_host_max_npiv_vports(shost) = phba->max_vpi;
5037 clear_bit(FC_LOADING, &vport->load_flag);
5041 * lpfc_stop_port_s3 - Stop SLI3 device port
5052 writel(0, phba->HCregaddr);
5053 readl(phba->HCregaddr); /* flush */
5055 writel(0xffffffff, phba->HAregaddr);
5056 readl(phba->HAregaddr); /* flush */
5060 phba->pport->work_port_events = 0;
5064 * lpfc_stop_port_s4 - Stop SLI4 device port
5076 if (phba->pport)
5077 phba->pport->work_port_events = 0;
5078 phba->sli4_hba.intr_enable = 0;
5082 * lpfc_stop_port - Wrapper function for stopping hba port
5091 phba->lpfc_stop_port(phba);
5093 if (phba->wq)
5094 flush_workqueue(phba->wq);
5098 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
5109 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
5110 spin_lock_irq(&phba->hbalock);
5112 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
5114 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
5115 spin_unlock_irq(&phba->hbalock);
5119 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
5134 spin_lock_irq(&phba->hbalock);
5135 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
5136 spin_unlock_irq(&phba->hbalock);
5140 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
5142 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
5143 spin_unlock_irq(&phba->hbalock);
5151 * lpfc_vmid_poll - VMID timeout detection
5166 if (phba->pport->vmid_priority_tagging) {
5168 phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
5172 if (phba->pport->vmid_inactivity_timeout ||
5173 test_bit(FC_DEREGISTER_ALL_APP_ID, &phba->pport->load_flag)) {
5175 phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID;
5182 mod_timer(&phba->inactive_vmid_poll,
5187 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
5191 * This routine is to parse the SLI4 link-attention link fault code.
5221 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
5242 /* Ignore physical link up events - wait for logical link up */
5259 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
5274 if (phba->sli_rev <= LPFC_SLI_REV3) {
5275 switch (phba->fc_linkspeed) {
5298 if (phba->sli4_hba.link_state.logical_speed)
5300 phba->sli4_hba.link_state.logical_speed;
5302 link_speed = phba->sli4_hba.link_state.speed;
5308 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
5404 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
5423 phba->fcoe_eventtag = acqe_link->event_tag;
5424 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5442 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5445 phba->sli.slistat.link_event++;
5448 lpfc_read_topology(phba, pmb, pmb->ctx_buf);
5449 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5450 pmb->vport = phba->pport;
5453 phba->sli4_hba.link_state.speed =
5456 phba->sli4_hba.link_state.duplex =
5458 phba->sli4_hba.link_state.status =
5460 phba->sli4_hba.link_state.type =
5462 phba->sli4_hba.link_state.number =
5464 phba->sli4_hba.link_state.fault =
5466 phba->sli4_hba.link_state.logical_speed =
5470 "2900 Async FC/FCoE Link event - Speed:%dGBit "
5473 phba->sli4_hba.link_state.speed,
5474 phba->sli4_hba.link_state.topology,
5475 phba->sli4_hba.link_state.status,
5476 phba->sli4_hba.link_state.type,
5477 phba->sli4_hba.link_state.number,
5478 phba->sli4_hba.link_state.logical_speed,
5479 phba->sli4_hba.link_state.fault);
5482 * topology info. Note: Optional for non FC-AL ports.
5484 if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
5496 mb = &pmb->u.mb;
5497 mb->mbxStatus = MBX_SUCCESS;
5503 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
5504 la->eventTag = acqe_link->event_tag;
5527 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
5581 if (!phba->rx_monitor) {
5585 lpfc_rx_monitor_report(phba, phba->rx_monitor, NULL, 0,
5591 * lpfc_cgn_update_stat - Save data into congestion stats buffer
5604 if (!phba->cgn_i)
5606 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5611 le32_add_cpu(&cp->link_integ_notification, 1);
5612 lpfc_cgn_update_tstamp(phba, &cp->stat_lnk);
5615 le32_add_cpu(&cp->delivery_notification, 1);
5616 lpfc_cgn_update_tstamp(phba, &cp->stat_delivery);
5619 le32_add_cpu(&cp->cgn_peer_notification, 1);
5620 lpfc_cgn_update_tstamp(phba, &cp->stat_peer);
5623 le32_add_cpu(&cp->cgn_notification, 1);
5624 lpfc_cgn_update_tstamp(phba, &cp->stat_fpin);
5626 if (phba->cgn_fpin_frequency &&
5627 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5628 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5629 cp->cgn_stat_npm = value;
5634 cp->cgn_info_crc = cpu_to_le32(value);
5638 * lpfc_cgn_update_tstamp - Update cmf timestamp
5651 ts->month = tm_val.tm_mon + 1;
5652 ts->day = tm_val.tm_mday;
5653 ts->year = tm_val.tm_year - 100;
5654 ts->hour = tm_val.tm_hour;
5655 ts->minute = tm_val.tm_min;
5656 ts->second = tm_val.tm_sec;
5661 ts->day, ts->month,
5662 ts->year, ts->hour,
5663 ts->minute, ts->second);
5667 * lpfc_cmf_stats_timer - Save data into registered congestion buffer
5693 if (!phba->cgn_i)
5695 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5697 phba->cgn_evt_timestamp = jiffies +
5699 phba->cgn_evt_minute++;
5702 lpfc_cgn_update_tstamp(phba, &cp->base_time);
5704 if (phba->cgn_fpin_frequency &&
5705 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5706 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5707 cp->cgn_stat_npm = value;
5711 lvalue = atomic_read(&phba->cgn_latency_evt_cnt);
5712 latsum = atomic64_read(&phba->cgn_latency_evt);
5713 atomic_set(&phba->cgn_latency_evt_cnt, 0);
5714 atomic64_set(&phba->cgn_latency_evt, 0);
5720 bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512;
5721 phba->rx_block_cnt = 0;
5726 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
5727 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
5728 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
5729 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
5732 value = (uint16_t)(phba->pport->cfg_lun_queue_depth);
5733 cp->cgn_lunq = cpu_to_le16(value);
5735 /* Record congestion buffer info - every minute
5741 index = ++cp->cgn_index_minute;
5742 if (cp->cgn_index_minute == LPFC_MIN_HOUR) {
5743 cp->cgn_index_minute = 0;
5748 dvalue = atomic_read(&phba->cgn_driver_evt_cnt);
5749 atomic_set(&phba->cgn_driver_evt_cnt, 0);
5751 /* Get the number of warning events - FPIN and Signal for this minute */
5753 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) ||
5754 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
5755 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5756 wvalue = atomic_read(&phba->cgn_fabric_warn_cnt);
5757 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
5759 /* Get the number of alarm events - FPIN and Signal for this minute */
5761 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) ||
5762 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5763 avalue = atomic_read(&phba->cgn_fabric_alarm_cnt);
5764 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
5769 ptr = &cp->cgn_drvr_min[index];
5773 ptr = &cp->cgn_warn_min[index];
5777 ptr = &cp->cgn_alarm_min[index];
5781 lptr = &cp->cgn_latency_min[index];
5790 mptr = &cp->cgn_bw_min[index];
5794 "2418 Congestion Info - minute (%d): %d %d %d %d %d\n",
5798 if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) {
5799 /* Record congestion buffer info - every hour
5802 index = ++cp->cgn_index_hour;
5803 if (cp->cgn_index_hour == LPFC_HOUR_DAY) {
5804 cp->cgn_index_hour = 0;
5815 dvalue += le16_to_cpu(cp->cgn_drvr_min[i]);
5816 wvalue += le16_to_cpu(cp->cgn_warn_min[i]);
5817 lvalue += le32_to_cpu(cp->cgn_latency_min[i]);
5818 mbps += le16_to_cpu(cp->cgn_bw_min[i]);
5819 avalue += le16_to_cpu(cp->cgn_alarm_min[i]);
5826 lptr = &cp->cgn_drvr_hr[index];
5828 lptr = &cp->cgn_warn_hr[index];
5830 lptr = &cp->cgn_latency_hr[index];
5832 mptr = &cp->cgn_bw_hr[index];
5834 lptr = &cp->cgn_alarm_hr[index];
5838 "2419 Congestion Info - hour "
5844 if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) {
5845 /* Record congestion buffer info - every hour
5849 index = ++cp->cgn_index_day;
5850 if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) {
5851 cp->cgn_index_day = 0;
5862 dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]);
5863 wvalue += le32_to_cpu(cp->cgn_warn_hr[i]);
5864 lvalue += le32_to_cpu(cp->cgn_latency_hr[i]);
5865 mbps += le16_to_cpu(cp->cgn_bw_hr[i]);
5866 avalue += le32_to_cpu(cp->cgn_alarm_hr[i]);
5873 lptr = &cp->cgn_drvr_day[index];
5875 lptr = &cp->cgn_warn_day[index];
5877 lptr = &cp->cgn_latency_day[index];
5879 mptr = &cp->cgn_bw_day[index];
5881 lptr = &cp->cgn_alarm_day[index];
5885 "2420 Congestion Info - daily (%d): "
5891 value = phba->cgn_fpin_frequency;
5892 cp->cgn_warn_freq = cpu_to_le16(value);
5893 cp->cgn_alarm_freq = cpu_to_le16(value);
5897 cp->cgn_info_crc = cpu_to_le32(lvalue);
5905 * lpfc_calc_cmf_latency - latency from start of rxate timer interval
5923 if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) {
5924 msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) /
5927 if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) {
5928 msec = (cmpl_time.tv_sec -
5929 phba->cmf_latency.tv_sec) * MSEC_PER_SEC;
5930 msec += ((cmpl_time.tv_nsec -
5931 phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC);
5933 msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec -
5935 msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) +
5943 * lpfc_cmf_timer - This is the timer function for one congestion
5962 if (phba->cmf_active_mode == LPFC_CFG_OFF ||
5963 !phba->cmf_latency.tv_sec) {
5966 phba->cmf_active_mode,
5967 (uint64_t)phba->cmf_latency.tv_sec);
5974 if (!phba->pport)
5980 atomic_set(&phba->cmf_stop_io, 1);
5994 ktime_get_real_ts64(&phba->cmf_latency);
5996 phba->cmf_link_byte_count =
5997 div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000);
6005 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
6006 total += atomic64_xchg(&cgs->total_bytes, 0);
6007 io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0);
6008 lat += atomic64_xchg(&cgs->rx_latency, 0);
6009 rcv += atomic64_xchg(&cgs->rcv_bytes, 0);
6017 if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
6018 phba->link_state != LPFC_LINK_DOWN &&
6019 test_bit(HBA_SETUP, &phba->hba_flag)) {
6020 mbpi = phba->cmf_last_sync_bw;
6021 phba->cmf_last_sync_bw = 0;
6032 extra = cnt - total;
6039 mbpi = phba->cmf_link_byte_count;
6042 phba->cmf_timer_cnt++;
6046 atomic_add(io_cnt, &phba->cgn_latency_evt_cnt);
6047 atomic64_add(lat, &phba->cgn_latency_evt);
6049 busy = atomic_xchg(&phba->cmf_busy, 0);
6050 max_read = atomic_xchg(&phba->rx_max_read_cnt, 0);
6054 if (mbpi > phba->cmf_link_byte_count ||
6055 phba->cmf_active_mode == LPFC_CFG_MONITOR)
6056 mbpi = phba->cmf_link_byte_count;
6061 if (mbpi != phba->cmf_max_bytes_per_interval)
6062 phba->cmf_max_bytes_per_interval = mbpi;
6066 if (phba->rx_monitor) {
6071 entry.cmf_info = phba->cmf_active_info;
6082 if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
6083 entry.timer_utilization = phba->cmf_last_ts;
6087 phba->cmf_last_ts = 0;
6089 lpfc_rx_monitor_record(phba->rx_monitor, &entry);
6092 if (phba->cmf_active_mode == LPFC_CFG_MONITOR) {
6097 atomic_inc(&phba->cgn_driver_evt_cnt);
6099 phba->rx_block_cnt += div_u64(rcv, 512); /* save 512 byte block cnt */
6104 if (atomic_xchg(&phba->cmf_bw_wait, 0))
6105 queue_work(phba->wq, &phba->unblock_request_work);
6108 atomic_set(&phba->cmf_stop_io, 0);
6118 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
6133 phba->sli4_hba.link_state.speed =
6137 phba->sli4_hba.link_state.logical_speed =
6140 phba->fc_linkspeed =
6146 phba->trunk_link.link0.state =
6149 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
6153 phba->trunk_link.link1.state =
6156 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
6160 phba->trunk_link.link2.state =
6163 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
6167 phba->trunk_link.link3.state =
6170 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
6175 phba->trunk_link.phy_lnk_speed =
6176 phba->sli4_hba.link_state.logical_speed / (cnt * 1000);
6178 phba->trunk_link.phy_lnk_speed = LPFC_LINK_SPEED_UNKNOWN;
6181 "2910 Async FC Trunking Event - Speed:%d\n"
6184 phba->sli4_hba.link_state.speed,
6185 phba->sli4_hba.link_state.logical_speed,
6189 if (phba->cmf_active_mode != LPFC_CFG_OFF)
6196 * SLI-4: We have only 0xA error codes
6208 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
6240 phba->sli4_hba.link_state.speed =
6243 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
6244 phba->sli4_hba.link_state.topology =
6246 phba->sli4_hba.link_state.status =
6248 phba->sli4_hba.link_state.type =
6250 phba->sli4_hba.link_state.number =
6252 phba->sli4_hba.link_state.fault =
6254 phba->sli4_hba.link_state.link_status =
6261 if (phba->sli4_hba.link_state.status >= LPFC_FC_LA_TYPE_LINK_UP &&
6262 phba->sli4_hba.link_state.status < LPFC_FC_LA_TYPE_ACTIVATE_FAIL) {
6265 phba->sli4_hba.link_state.logical_speed = 0;
6266 else if (!phba->sli4_hba.conf_trunk)
6267 phba->sli4_hba.link_state.logical_speed =
6272 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
6275 phba->sli4_hba.link_state.speed,
6276 phba->sli4_hba.link_state.topology,
6277 phba->sli4_hba.link_state.status,
6278 phba->sli4_hba.link_state.type,
6279 phba->sli4_hba.link_state.number,
6280 phba->sli4_hba.link_state.logical_speed,
6281 phba->sli4_hba.link_state.fault,
6282 phba->sli4_hba.link_state.link_status);
6289 if (phba->sli4_hba.link_state.status >= LPFC_FC_LA_TYPE_ACTIVATE_FAIL) {
6290 switch (phba->sli4_hba.link_state.status) {
6293 phba->sli4_hba.link_state.status =
6304 phba->sli4_hba.link_state.status =
6312 "2992 Async FC event - Informational Link "
6318 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6335 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
6338 phba->sli.slistat.link_event++;
6341 lpfc_read_topology(phba, pmb, pmb->ctx_buf);
6342 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
6343 pmb->vport = phba->pport;
6345 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
6346 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
6348 switch (phba->sli4_hba.link_state.status) {
6350 phba->link_flag |= LS_MDS_LINK_DOWN;
6353 phba->link_flag |= LS_MDS_LOOPBACK;
6360 mb = &pmb->u.mb;
6361 mb->mbxStatus = MBX_SUCCESS;
6367 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
6368 la->eventTag = acqe_fc->event_tag;
6370 if (phba->sli4_hba.link_state.status ==
6394 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
6418 "2901 Async SLI event - Type:%d, Event Data: x%08x "
6420 acqe_sli->event_data1, acqe_sli->event_data2,
6421 acqe_sli->event_data3, acqe_sli->trailer);
6423 port_name = phba->Port[0];
6431 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6434 "3190 Over Temperature:%d Celsius- Port Name %c\n",
6435 acqe_sli->event_data1, port_name);
6437 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
6438 shost = lpfc_shost_from_vport(phba->pport);
6448 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6451 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
6452 acqe_sli->event_data1, port_name);
6454 shost = lpfc_shost_from_vport(phba->pport);
6463 &acqe_sli->event_data1;
6466 switch (phba->sli4_hba.lnk_info.lnk_no) {
6469 &misconfigured->theEvent);
6471 &misconfigured->theEvent);
6475 &misconfigured->theEvent);
6477 &misconfigured->theEvent);
6481 &misconfigured->theEvent);
6483 &misconfigured->theEvent);
6487 &misconfigured->theEvent);
6489 &misconfigured->theEvent);
6496 phba->sli4_hba.lnk_info.lnk_no);
6501 if (phba->sli4_hba.lnk_info.optic_state == status)
6510 "installed/not installed - Reseat optics, "
6515 "Optics of two types installed - Remove one "
6519 sprintf(message, "Incompatible optics - Replace with "
6523 sprintf(message, "Unqualified optics - Replace with "
6525 "Support - Link is%s operational",
6529 sprintf(message, "Uncertified optics - Replace with "
6530 "Avago-certified optics to enable link "
6531 "operation - Link is%s operational",
6543 phba->lmt = 0;
6557 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6565 phba->sli4_hba.lnk_info.optic_state = status;
6571 "3192 Remote DPort Test Initiated - "
6573 acqe_sli->event_data1, acqe_sli->event_data2);
6581 * to use FA-WWN, but the attached device doesn’t support it.
6582 * Event Data1 - N.A, Event Data2 - N.A
6586 "2699 Misconfigured FA-PWWN - Attached device "
6587 "does not support FA-PWWN\n");
6588 phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_FABRIC;
6589 memset(phba->pport->fc_portname.u.wwn, 0,
6595 "2518 EEPROM failure - "
6597 acqe_sli->event_data1, acqe_sli->event_data2);
6600 if (phba->cmf_active_mode == LPFC_CFG_OFF)
6603 &acqe_sli->event_data1;
6604 phba->cgn_acqe_cnt++;
6607 atomic64_add(cnt, &phba->cgn_acqe_stat.warn);
6608 atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm);
6613 if (cgn_signal->alarm_cnt) {
6614 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6616 atomic_add(cgn_signal->alarm_cnt,
6617 &phba->cgn_sync_alarm_cnt);
6621 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
6622 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6624 atomic_add(cnt, &phba->cgn_sync_warn_cnt);
6634 acqe_sli->event_data1, acqe_sli->event_data2,
6635 acqe_sli->event_data3);
6651 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
6669 phba = vport->phba;
6679 ndlp->nlp_type |= NLP_FABRIC;
6683 if ((phba->pport->port_state < LPFC_FLOGI) &&
6684 (phba->pport->port_state != LPFC_VPORT_FAILED))
6687 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
6688 && (vport->port_state != LPFC_VPORT_FAILED))
6695 set_bit(FC_VPORT_CVL_RCVD, &vport->fc_flag);
6701 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
6715 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
6721 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
6739 phba->fc_eventTag = acqe_fip->event_tag;
6740 phba->fcoe_eventtag = acqe_fip->event_tag;
6748 acqe_fip->event_tag,
6749 acqe_fip->index);
6755 acqe_fip->event_tag,
6756 acqe_fip->index);
6757 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6767 acqe_fip->index);
6768 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
6772 if (test_bit(FCF_TS_INPROG, &phba->hba_flag))
6774 spin_lock_irq(&phba->hbalock);
6776 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
6777 spin_unlock_irq(&phba->hbalock);
6782 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
6783 spin_unlock_irq(&phba->hbalock);
6786 spin_unlock_irq(&phba->hbalock);
6788 /* Otherwise, scan the entire FCF table and re-discover SAN */
6792 acqe_fip->event_tag, acqe_fip->index);
6805 acqe_fip->event_tag);
6809 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6812 "tag:x%x\n", acqe_fip->index,
6813 acqe_fip->event_tag);
6818 spin_lock_irq(&phba->hbalock);
6819 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
6820 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
6821 spin_unlock_irq(&phba->hbalock);
6823 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
6826 spin_unlock_irq(&phba->hbalock);
6829 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
6838 spin_lock_irq(&phba->hbalock);
6840 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
6841 spin_unlock_irq(&phba->hbalock);
6846 "\n", acqe_fip->event_tag, acqe_fip->index);
6854 spin_lock_irq(&phba->hbalock);
6855 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
6856 spin_unlock_irq(&phba->hbalock);
6873 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6877 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
6880 acqe_fip->index);
6888 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6891 &vports[i]->fc_flag) &&
6892 vports[i]->port_state > LPFC_FDISC) {
6901 * Don't re-instantiate if vport is marked for deletion.
6905 if (!test_bit(FC_UNLOADING, &vport->load_flag) &&
6909 * re-instantiate the Vlink using FDISC.
6911 mod_timer(&ndlp->nlp_delayfunc,
6913 set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
6914 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
6915 vport->port_state = LPFC_FDISC;
6924 spin_lock_irq(&phba->hbalock);
6925 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6926 spin_unlock_irq(&phba->hbalock);
6930 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
6931 spin_unlock_irq(&phba->hbalock);
6935 "evt_tag:x%x\n", acqe_fip->event_tag);
6943 spin_lock_irq(&phba->hbalock);
6944 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
6945 spin_unlock_irq(&phba->hbalock);
6947 * Last resort will be re-try on the
6962 "0x%x\n", event_type, acqe_fip->event_tag);
6968 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
6978 phba->fc_eventTag = acqe_dcbx->event_tag;
6985 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
6999 phba->fc_eventTag = acqe_grp5->event_tag;
7000 phba->fcoe_eventtag = acqe_grp5->event_tag;
7001 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
7002 phba->sli4_hba.link_state.logical_speed =
7007 phba->sli4_hba.link_state.logical_speed);
7011 * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event
7020 if (!phba->cgn_i)
7026 * lpfc_cgn_params_val - Validate FW congestion parameters.
7036 spin_lock_irq(&phba->hbalock);
7038 if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF,
7042 p_cfg_param->cgn_param_mode);
7043 p_cfg_param->cgn_param_mode = LPFC_CFG_OFF;
7046 spin_unlock_irq(&phba->hbalock);
7056 * lpfc_cgn_params_parse - Process a FW cong parm change event
7063 * valid, in-range values. If the signature magic is correct and
7079 if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) {
7087 p_cgn_param->cgn_param_magic,
7088 p_cgn_param->cgn_param_version,
7089 p_cgn_param->cgn_param_mode,
7090 p_cgn_param->cgn_param_level0,
7091 p_cgn_param->cgn_param_level1,
7092 p_cgn_param->cgn_param_level2,
7093 p_cgn_param->byte13,
7094 p_cgn_param->byte14,
7095 p_cgn_param->byte15,
7096 p_cgn_param->byte11,
7097 p_cgn_param->byte12,
7098 phba->cmf_active_mode);
7100 oldmode = phba->cmf_active_mode;
7108 spin_lock_irq(&phba->hbalock);
7109 memcpy(&phba->cgn_p, p_cgn_param,
7113 if (phba->cgn_i) {
7114 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
7115 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
7116 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
7117 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
7118 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
7121 cp->cgn_info_crc = cpu_to_le32(crc);
7123 spin_unlock_irq(&phba->hbalock);
7125 phba->cmf_active_mode = phba->cgn_p.cgn_param_mode;
7129 if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) {
7133 if (phba->link_state >= LPFC_LINK_UP) {
7134 phba->cgn_reg_fpin =
7135 phba->cgn_init_reg_fpin;
7136 phba->cgn_reg_signal =
7137 phba->cgn_init_reg_signal;
7138 lpfc_issue_els_edc(phba->pport, 0);
7143 switch (phba->cgn_p.cgn_param_mode) {
7147 if (phba->link_state >= LPFC_LINK_UP)
7148 lpfc_issue_els_edc(phba->pport, 0);
7151 phba->cmf_max_bytes_per_interval =
7152 phba->cmf_link_byte_count;
7154 /* Resume blocked IO - unblock on workqueue */
7155 queue_work(phba->wq,
7156 &phba->unblock_request_work);
7161 switch (phba->cgn_p.cgn_param_mode) {
7165 if (phba->link_state >= LPFC_LINK_UP)
7166 lpfc_issue_els_edc(phba->pport, 0);
7175 oldmode != phba->cgn_p.cgn_param_mode) {
7176 if (phba->cgn_p.cgn_param_mode == LPFC_CFG_MANAGED)
7178 phba->cgn_p.cgn_param_level0);
7182 dev_info(&phba->pcidev->dev, "%d: "
7184 phba->brd_no,
7186 [phba->cgn_p.cgn_param_mode],
7192 "version %d\n", p_cgn_param->cgn_param_magic,
7193 p_cgn_param->cgn_param_version);
7198 * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters.
7207 * -Eval if an error was encountered
7222 return -ENOMEM;
7254 * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event
7276 if (!phba->sli4_hba.pc_sli4_params.cmf) {
7279 return -EACCES;
7299 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
7311 clear_bit(ASYNC_EVENT, &phba->hba_flag);
7314 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7315 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
7316 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
7318 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock,
7322 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
7325 &cq_event->cqe.acqe_link);
7328 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
7332 &cq_event->cqe.acqe_dcbx);
7336 &cq_event->cqe.acqe_grp5);
7339 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
7342 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
7349 &cq_event->cqe.mcqe_cmpl));
7355 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7357 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
7361 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
7371 spin_lock_irq(&phba->hbalock);
7373 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
7375 phba->fcf.failover_rec.flag = 0;
7377 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
7378 spin_unlock_irq(&phba->hbalock);
7380 /* Scan FCF table from the first entry to re-discover SAN */
7382 "2777 Start post-quiescent FCF table scan\n");
7391 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
7393 * @dev_grp: The HBA PCI-Device group number.
7395 * This routine is invoked to set up the per HBA PCI-Device group function
7398 * Return: 0 if success, otherwise -ENODEV
7405 /* Set up lpfc PCI-device group */
7406 phba->pci_dev_grp = dev_grp;
7410 phba->sli_rev = LPFC_SLI_REV4;
7415 return -ENODEV;
7419 return -ENODEV;
7423 return -ENODEV;
7427 return -ENODEV;
7433 * lpfc_log_intr_mode - Log the active interrupt mode
7453 "0480 Enabled MSI-X interrupt mode.\n");
7464 * lpfc_enable_pci_dev - Enable a generic PCI device.
7471 * 0 - successful
7472 * other values - error
7480 if (!phba->pcidev)
7483 pdev = phba->pcidev;
7497 pdev->needs_freset = 1;
7506 return -ENODEV;
7510 * lpfc_disable_pci_dev - Disable a generic PCI device.
7522 if (!phba->pcidev)
7525 pdev = phba->pcidev;
7534 * lpfc_reset_hba - Reset a hba
7548 if (!phba->cfg_enable_hba_reset) {
7549 phba->link_state = LPFC_HBA_ERROR;
7554 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) {
7557 if (test_bit(MBX_TMO_ERR, &phba->bit_flags)) {
7566 clear_bit(MBX_TMO_ERR, &phba->bit_flags);
7579 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
7582 * This function enables the PCI SR-IOV virtual functions to a physical
7583 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7585 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7591 struct pci_dev *pdev = phba->pcidev;
7604 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
7608 * This function enables the PCI SR-IOV virtual functions to a physical
7609 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7611 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7617 struct pci_dev *pdev = phba->pcidev;
7626 return -EINVAL;
7652 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
7659 * 0 - successful
7660 * other values - error
7665 struct lpfc_sli *psli = &phba->sli;
7670 atomic_set(&phba->fast_event_count, 0);
7671 atomic_set(&phba->dbg_log_idx, 0);
7672 atomic_set(&phba->dbg_log_cnt, 0);
7673 atomic_set(&phba->dbg_log_dmping, 0);
7674 spin_lock_init(&phba->hbalock);
7677 spin_lock_init(&phba->port_list_lock);
7678 INIT_LIST_HEAD(&phba->port_list);
7680 INIT_LIST_HEAD(&phba->work_list);
7683 init_waitqueue_head(&phba->work_waitq);
7687 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
7689 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
7691 (phba->nvmet_support ? "NVMET" : " "));
7694 spin_lock_init(&phba->ras_fwlog_lock);
7697 spin_lock_init(&phba->scsi_buf_list_get_lock);
7698 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
7699 spin_lock_init(&phba->scsi_buf_list_put_lock);
7700 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
7703 INIT_LIST_HEAD(&phba->fabric_iocb_list);
7706 INIT_LIST_HEAD(&phba->elsbuf);
7709 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
7712 spin_lock_init(&phba->devicelock);
7713 INIT_LIST_HEAD(&phba->luns);
7716 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
7718 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
7720 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
7722 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
7724 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
7726 INIT_DELAYED_WORK(&phba->idle_stat_delay_work,
7728 INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work);
7733 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
7737 * support the SLI-3 HBA device it attached to.
7740 * 0 - successful
7741 * other values - error
7753 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
7756 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
7757 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
7761 /* Set up phase-1 common device driver resources */
7765 return -ENODEV;
7767 if (!phba->sli.sli3_ring)
7768 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
7771 if (!phba->sli.sli3_ring)
7772 return -ENOMEM;
7779 if (phba->sli_rev == LPFC_SLI_REV4)
7785 if (phba->cfg_enable_bg) {
7787 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
7795 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7799 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
7800 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
7803 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
7810 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7812 ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
7815 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
7820 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
7821 phba->cfg_total_seg_cnt);
7823 phba->max_vpi = LPFC_MAX_VPI;
7825 phba->max_vports = 0;
7835 return -ENOMEM;
7837 phba->lpfc_sg_dma_buf_pool =
7839 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
7842 if (!phba->lpfc_sg_dma_buf_pool)
7845 phba->lpfc_cmd_rsp_buf_pool =
7847 &phba->pcidev->dev,
7852 if (!phba->lpfc_cmd_rsp_buf_pool)
7856 * Enable sr-iov virtual functions if supported and configured
7859 if (phba->cfg_sriov_nr_virtfn > 0) {
7861 phba->cfg_sriov_nr_virtfn);
7864 "2808 Requested number of SR-IOV "
7867 phba->cfg_sriov_nr_virtfn);
7868 phba->cfg_sriov_nr_virtfn = 0;
7875 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
7876 phba->lpfc_sg_dma_buf_pool = NULL;
7879 return -ENOMEM;
7883 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
7887 * specific for supporting the SLI-3 HBA device it attached to.
7899 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
7903 * support the SLI-4 HBA device it attached to.
7906 * 0 - successful
7907 * other values - error
7921 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
7922 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
7923 phba->sli4_hba.curr_disp_cpu = 0;
7928 /* Set up phase-1 common device driver resources */
7931 return -ENODEV;
7936 return -ENODEV;
7941 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
7942 if (!phba->wq)
7943 return -ENOMEM;
7949 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
7952 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
7955 hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7956 phba->cmf_timer.function = lpfc_cmf_timer;
7958 hrtimer_init(&phba->cmf_stats_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7959 phba->cmf_stats_timer.function = lpfc_cmf_stats_timer;
7962 * Control structure for handling external multi-buffer mailbox
7963 * command pass-through.
7965 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
7967 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
7969 phba->max_vpi = LPFC_MAX_VPI;
7972 phba->max_vports = 0;
7975 phba->valid_vlan = 0;
7976 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
7977 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
7978 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
7987 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
7988 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
7989 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
7993 timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0);
7999 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
8000 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
8002 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8004 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
8005 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
8006 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
8007 spin_lock_init(&phba->sli4_hba.t_active_list_lock);
8008 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
8012 spin_lock_init(&phba->sli4_hba.sgl_list_lock);
8013 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
8014 spin_lock_init(&phba->sli4_hba.asynce_list_lock);
8015 spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock);
8018 * Initialize driver internal slow-path work queues
8021 /* Driver internel slow-path CQ Event pool */
8022 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
8024 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
8026 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
8027 /* Slow-path XRI aborted CQ Event work queue list */
8028 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
8030 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
8033 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
8034 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
8035 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
8036 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
8041 INIT_LIST_HEAD(&phba->sli.mboxq);
8042 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
8045 phba->sli4_hba.lnk_info.optic_state = 0xff;
8053 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
8057 rc = -ENODEV;
8060 phba->temp_sensor_support = 1;
8078 if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) {
8079 /* Right now the link is down, if FA-PWWN is configured the
8086 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
8094 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8101 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8104 rc = -ENOMEM;
8109 phba->nvmet_support = 0;
8120 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8121 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
8122 mempool_free(mboxq, phba->mbox_mem_pool);
8123 rc = -EIO;
8126 mb = &mboxq->u.mb;
8127 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
8130 phba->sli4_hba.wwnn.u.name = wwn;
8131 memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
8135 phba->sli4_hba.wwpn.u.name = wwn;
8144 phba->nvmet_support = 1; /* a match */
8158 phba->cfg_xri_rebalancing = 0;
8159 if (phba->irq_chann_mode == NHT_MODE) {
8160 phba->cfg_irq_chann =
8161 phba->sli4_hba.num_present_cpu;
8162 phba->cfg_hdw_queue =
8163 phba->sli4_hba.num_present_cpu;
8164 phba->irq_chann_mode = NORMAL_MODE;
8181 &phba->sli4_hba.sli_intf);
8183 &phba->sli4_hba.sli_intf);
8184 if (phba->sli4_hba.extents_in_use &&
8185 phba->sli4_hba.rpi_hdrs_in_use) {
8191 mempool_free(mboxq, phba->mbox_mem_pool);
8192 rc = -EIO;
8198 mempool_free(mboxq, phba->mbox_mem_pool);
8199 rc = -EIO;
8209 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
8223 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8227 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
8235 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd32) +
8239 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
8245 if (phba->cfg_enable_bg &&
8246 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
8247 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
8249 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8257 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd32) +
8259 ((phba->cfg_sg_seg_cnt + extra) *
8263 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
8264 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8267 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
8272 if (phba->cfg_xpsgl && !phba->nvmet_support)
8273 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
8274 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
8275 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
8277 phba->cfg_sg_dma_buf_size =
8278 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
8280 phba->border_sge_num = phba->cfg_sg_dma_buf_size /
8284 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8285 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
8290 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
8292 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
8298 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
8299 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
8300 phba->cfg_nvme_seg_cnt);
8302 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
8303 i = phba->cfg_sg_dma_buf_size;
8307 phba->lpfc_sg_dma_buf_pool =
8309 &phba->pcidev->dev,
8310 phba->cfg_sg_dma_buf_size,
8312 if (!phba->lpfc_sg_dma_buf_pool) {
8313 rc = -ENOMEM;
8317 phba->lpfc_cmd_rsp_buf_pool =
8319 &phba->pcidev->dev,
8323 if (!phba->lpfc_cmd_rsp_buf_pool) {
8324 rc = -ENOMEM;
8328 mempool_free(mboxq, phba->mbox_mem_pool);
8364 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
8365 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
8367 if (!phba->fcf.fcf_rr_bmask) {
8371 rc = -ENOMEM;
8375 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
8378 if (!phba->sli4_hba.hba_eq_hdl) {
8381 "fast-path per-EQ handle array\n");
8382 rc = -ENOMEM;
8386 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
8389 if (!phba->sli4_hba.cpu_map) {
8391 "3327 Failed allocate memory for msi-x "
8393 rc = -ENOMEM;
8397 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
8398 if (!phba->sli4_hba.eq_info) {
8401 rc = -ENOMEM;
8405 phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu,
8406 sizeof(*phba->sli4_hba.idle_stat),
8408 if (!phba->sli4_hba.idle_stat) {
8411 rc = -ENOMEM;
8416 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
8417 if (!phba->sli4_hba.c_stat) {
8420 rc = -ENOMEM;
8425 phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat);
8426 if (!phba->cmf_stat) {
8429 rc = -ENOMEM;
8434 * Enable sr-iov virtual functions if supported and configured
8437 if (phba->cfg_sriov_nr_virtfn > 0) {
8439 phba->cfg_sriov_nr_virtfn);
8442 "3020 Requested number of SR-IOV "
8445 phba->cfg_sriov_nr_virtfn);
8446 phba->cfg_sriov_nr_virtfn = 0;
8454 free_percpu(phba->sli4_hba.c_stat);
8457 kfree(phba->sli4_hba.idle_stat);
8459 free_percpu(phba->sli4_hba.eq_info);
8461 kfree(phba->sli4_hba.cpu_map);
8463 kfree(phba->sli4_hba.hba_eq_hdl);
8465 kfree(phba->fcf.fcf_rr_bmask);
8473 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
8474 phba->lpfc_cmd_rsp_buf_pool = NULL;
8476 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
8477 phba->lpfc_sg_dma_buf_pool = NULL;
8483 destroy_workqueue(phba->wq);
8484 phba->wq = NULL;
8489 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
8493 * specific for supporting the SLI-4 HBA device it attached to.
8500 free_percpu(phba->sli4_hba.eq_info);
8502 free_percpu(phba->sli4_hba.c_stat);
8504 free_percpu(phba->cmf_stat);
8505 kfree(phba->sli4_hba.idle_stat);
8507 /* Free memory allocated for msi-x interrupt vector to CPU mapping */
8508 kfree(phba->sli4_hba.cpu_map);
8509 phba->sli4_hba.num_possible_cpu = 0;
8510 phba->sli4_hba.num_present_cpu = 0;
8511 phba->sli4_hba.curr_disp_cpu = 0;
8512 cpumask_clear(&phba->sli4_hba.irq_aff_mask);
8514 /* Free memory allocated for fast-path work queue handles */
8515 kfree(phba->sli4_hba.hba_eq_hdl);
8522 kfree(phba->fcf.fcf_rr_bmask);
8544 &phba->fcf_conn_rec_list, list) {
8545 list_del_init(&conn_entry->list);
8553 * lpfc_init_api_table_setup - Set up init api function jump table
8555 * @dev_grp: The HBA PCI-Device group number.
8560 * Returns: 0 - success, -ENODEV - failure.
8565 phba->lpfc_hba_init_link = lpfc_hba_init_link;
8566 phba->lpfc_hba_down_link = lpfc_hba_down_link;
8567 phba->lpfc_selective_reset = lpfc_selective_reset;
8570 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
8571 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
8572 phba->lpfc_stop_port = lpfc_stop_port_s3;
8575 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
8576 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
8577 phba->lpfc_stop_port = lpfc_stop_port_s4;
8581 "1431 Invalid HBA PCI-device group: 0x%x\n",
8583 return -ENODEV;
8589 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
8596 * 0 - successful
8597 * other values - error
8605 phba->worker_thread = kthread_run(lpfc_do_work, phba,
8606 "lpfc_worker_%d", phba->brd_no);
8607 if (IS_ERR(phba->worker_thread)) {
8608 error = PTR_ERR(phba->worker_thread);
8616 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
8626 if (phba->wq) {
8627 destroy_workqueue(phba->wq);
8628 phba->wq = NULL;
8632 if (phba->worker_thread)
8633 kthread_stop(phba->worker_thread);
8637 * lpfc_free_iocb_list - Free iocb list.
8647 spin_lock_irq(&phba->hbalock);
8649 &phba->lpfc_iocb_list, list) {
8650 list_del(&iocbq_entry->list);
8652 phba->total_iocbq_bufs--;
8654 spin_unlock_irq(&phba->hbalock);
8660 * lpfc_init_iocb_list - Allocate and initialize iocb list.
8668 * 0 - successful
8669 * other values - error
8679 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
8696 iocbq_entry->sli4_lxritag = NO_XRI;
8697 iocbq_entry->sli4_xritag = NO_XRI;
8699 spin_lock_irq(&phba->hbalock);
8700 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
8701 phba->total_iocbq_bufs++;
8702 spin_unlock_irq(&phba->hbalock);
8710 return -ENOMEM;
8714 * lpfc_free_sgl_list - Free a given sgl list.
8726 list_del(&sglq_entry->list);
8727 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
8733 * lpfc_free_els_sgl_list - Free els sgl list.
8744 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
8745 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
8746 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
8753 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
8765 spin_lock_irq(&phba->hbalock);
8766 spin_lock(&phba->sli4_hba.sgl_list_lock);
8767 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
8768 spin_unlock(&phba->sli4_hba.sgl_list_lock);
8769 spin_unlock_irq(&phba->hbalock);
8773 list_del(&sglq_entry->list);
8774 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
8782 phba->sli4_hba.nvmet_xri_cnt = 0;
8786 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
8797 size *= phba->sli4_hba.max_cfg_param.max_xri;
8799 phba->sli4_hba.lpfc_sglq_active_list =
8801 if (!phba->sli4_hba.lpfc_sglq_active_list)
8802 return -ENOMEM;
8807 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
8817 kfree(phba->sli4_hba.lpfc_sglq_active_list);
8821 * lpfc_init_sgl_list - Allocate and initialize sgl list.
8832 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
8833 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8834 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
8835 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
8837 /* els xri-sgl book keeping */
8838 phba->sli4_hba.els_xri_cnt = 0;
8840 /* nvme xri-buffer book keeping */
8841 phba->sli4_hba.io_xri_cnt = 0;
8845 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
8855 * 0 - successful
8856 * -ERROR - otherwise.
8864 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
8865 if (!phba->sli4_hba.rpi_hdrs_in_use)
8867 if (phba->sli4_hba.extents_in_use)
8868 return -EIO;
8875 rc = -ENODEV;
8882 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
8906 if (!phba->sli4_hba.rpi_hdrs_in_use)
8908 if (phba->sli4_hba.extents_in_use)
8912 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
8914 spin_lock_irq(&phba->hbalock);
8920 curr_rpi_range = phba->sli4_hba.next_rpi;
8921 spin_unlock_irq(&phba->hbalock);
8929 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
8935 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
8937 &dmabuf->phys, GFP_KERNEL);
8938 if (!dmabuf->virt) {
8943 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
8953 rpi_hdr->dmabuf = dmabuf;
8954 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
8955 rpi_hdr->page_count = 1;
8956 spin_lock_irq(&phba->hbalock);
8959 rpi_hdr->start_rpi = curr_rpi_range;
8960 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
8961 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
8963 spin_unlock_irq(&phba->hbalock);
8967 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
8968 dmabuf->virt, dmabuf->phys);
8975 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
8988 if (!phba->sli4_hba.rpi_hdrs_in_use)
8992 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
8993 list_del(&rpi_hdr->list);
8994 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
8995 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
8996 kfree(rpi_hdr->dmabuf);
9001 phba->sli4_hba.next_rpi = 0;
9005 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
9013 * pointer to @phba - successful
9014 * NULL - error
9024 dev_err(&pdev->dev, "failed to allocate hba struct\n");
9029 phba->pcidev = pdev;
9032 phba->brd_no = lpfc_get_instance();
9033 if (phba->brd_no < 0) {
9037 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
9039 spin_lock_init(&phba->ct_ev_lock);
9040 INIT_LIST_HEAD(&phba->ct_ev_waiters);
9046 * lpfc_hba_free - Free driver hba data structure with a device.
9055 if (phba->sli_rev == LPFC_SLI_REV4)
9056 kfree(phba->sli4_hba.hdwq);
9059 idr_remove(&lpfc_hba_index, phba->brd_no);
9062 kfree(phba->sli.sli3_ring);
9063 phba->sli.sli3_ring = NULL;
9070 * lpfc_setup_fdmi_mask - Setup initial FDMI mask for HBA and Port attributes
9076 * fallback hierarchy is SmartSAN -> FDMI2 -> FMDI1
9081 struct lpfc_hba *phba = vport->phba;
9083 set_bit(FC_ALLOW_FDMI, &vport->load_flag);
9084 if (phba->cfg_enable_SmartSAN ||
9085 phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) {
9087 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
9088 if (phba->cfg_enable_SmartSAN)
9089 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
9091 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
9096 vport->fdmi_hba_mask, vport->fdmi_port_mask);
9100 * lpfc_create_shost - Create hba physical port with associated scsi host.
9107 * 0 - successful
9108 * other values - error
9117 phba->fc_edtov = FF_DEF_EDTOV;
9118 phba->fc_ratov = FF_DEF_RATOV;
9119 phba->fc_altov = FF_DEF_ALTOV;
9120 phba->fc_arbtov = FF_DEF_ARBTOV;
9122 atomic_set(&phba->sdev_cnt, 0);
9123 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
9125 return -ENODEV;
9128 phba->pport = vport;
9130 if (phba->nvmet_support) {
9132 phba->targetport = NULL;
9133 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
9140 pci_set_drvdata(phba->pcidev, shost);
9152 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
9161 struct lpfc_vport *vport = phba->pport;
9170 * lpfc_setup_bg - Setup Block guard structures and debug areas.
9183 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9188 old_mask = phba->cfg_prot_mask;
9189 old_guard = phba->cfg_prot_guard;
9192 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
9195 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
9199 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
9200 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
9202 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9203 if ((old_mask != phba->cfg_prot_mask) ||
9204 (old_guard != phba->cfg_prot_guard))
9208 phba->cfg_prot_mask,
9209 phba->cfg_prot_guard);
9211 scsi_host_set_prot(shost, phba->cfg_prot_mask);
9212 scsi_host_set_guard(shost, phba->cfg_prot_guard);
9222 * lpfc_post_init_setup - Perform necessary device post initialization setup.
9235 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9241 shost = pci_get_drvdata(phba->pcidev);
9242 shost->can_queue = phba->cfg_hba_queue_depth - 10;
9246 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9247 spin_lock_irq(shost->host_lock);
9249 spin_unlock_irq(shost->host_lock);
9265 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
9269 * with SLI-3 interface spec.
9272 * 0 - successful
9273 * other values - error
9278 struct pci_dev *pdev = phba->pcidev;
9285 return -ENODEV;
9288 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9290 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9293 error = -ENODEV;
9298 phba->pci_bar0_map = pci_resource_start(pdev, 0);
9301 phba->pci_bar2_map = pci_resource_start(pdev, 2);
9305 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
9306 if (!phba->slim_memmap_p) {
9307 dev_printk(KERN_ERR, &pdev->dev,
9313 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
9314 if (!phba->ctrl_regs_memmap_p) {
9315 dev_printk(KERN_ERR, &pdev->dev,
9320 /* Allocate memory for SLI-2 structures */
9321 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9322 &phba->slim2p.phys, GFP_KERNEL);
9323 if (!phba->slim2p.virt)
9326 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
9327 phba->mbox_ext = (phba->slim2p.virt +
9329 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
9330 phba->IOCBs = (phba->slim2p.virt +
9333 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
9335 &phba->hbqslimp.phys,
9337 if (!phba->hbqslimp.virt)
9341 ptr = phba->hbqslimp.virt;
9343 phba->hbqs[i].hbq_virt = ptr;
9344 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
9345 ptr += (lpfc_hbq_defs[i]->entry_count *
9348 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
9349 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
9351 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
9353 phba->MBslimaddr = phba->slim_memmap_p;
9354 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
9355 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
9356 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
9357 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
9362 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9363 phba->slim2p.virt, phba->slim2p.phys);
9365 iounmap(phba->ctrl_regs_memmap_p);
9367 iounmap(phba->slim_memmap_p);
9373 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
9377 * with SLI-3 interface spec.
9385 if (!phba->pcidev)
9388 pdev = phba->pcidev;
9391 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
9392 phba->hbqslimp.virt, phba->hbqslimp.phys);
9393 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9394 phba->slim2p.virt, phba->slim2p.phys);
9397 iounmap(phba->ctrl_regs_memmap_p);
9398 iounmap(phba->slim_memmap_p);
9404 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
9410 * Return 0 if successful, otherwise -ENODEV.
9422 if (!phba->sli4_hba.PSMPHRregaddr)
9423 return -ENODEV;
9427 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
9431 port_error = -ENODEV;
9446 "1408 Port Failed POST - portsmphr=0x%x, "
9464 &phba->sli4_hba.sli_intf),
9466 &phba->sli4_hba.sli_intf),
9468 &phba->sli4_hba.sli_intf),
9470 &phba->sli4_hba.sli_intf),
9472 &phba->sli4_hba.sli_intf),
9474 &phba->sli4_hba.sli_intf));
9481 &phba->sli4_hba.sli_intf);
9484 phba->sli4_hba.ue_mask_lo =
9485 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
9486 phba->sli4_hba.ue_mask_hi =
9487 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
9489 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
9491 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
9492 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
9493 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
9504 phba->sli4_hba.ue_mask_lo,
9505 phba->sli4_hba.ue_mask_hi);
9506 port_error = -ENODEV;
9512 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
9515 phba->work_status[0] =
9516 readl(phba->sli4_hba.u.if_type2.
9518 phba->work_status[1] =
9519 readl(phba->sli4_hba.u.if_type2.
9528 phba->work_status[0],
9529 phba->work_status[1]);
9530 port_error = -ENODEV;
9536 &phba->sli4_hba.sli_intf) ==
9538 pci_write_config_byte(phba->pcidev,
9550 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
9562 phba->sli4_hba.u.if_type0.UERRLOregaddr =
9563 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
9564 phba->sli4_hba.u.if_type0.UERRHIregaddr =
9565 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
9566 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
9567 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
9568 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
9569 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
9570 phba->sli4_hba.SLIINTFregaddr =
9571 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9574 phba->sli4_hba.u.if_type2.EQDregaddr =
9575 phba->sli4_hba.conf_regs_memmap_p +
9577 phba->sli4_hba.u.if_type2.ERR1regaddr =
9578 phba->sli4_hba.conf_regs_memmap_p +
9580 phba->sli4_hba.u.if_type2.ERR2regaddr =
9581 phba->sli4_hba.conf_regs_memmap_p +
9583 phba->sli4_hba.u.if_type2.CTRLregaddr =
9584 phba->sli4_hba.conf_regs_memmap_p +
9586 phba->sli4_hba.u.if_type2.STATUSregaddr =
9587 phba->sli4_hba.conf_regs_memmap_p +
9589 phba->sli4_hba.SLIINTFregaddr =
9590 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9591 phba->sli4_hba.PSMPHRregaddr =
9592 phba->sli4_hba.conf_regs_memmap_p +
9594 phba->sli4_hba.RQDBregaddr =
9595 phba->sli4_hba.conf_regs_memmap_p +
9597 phba->sli4_hba.WQDBregaddr =
9598 phba->sli4_hba.conf_regs_memmap_p +
9600 phba->sli4_hba.CQDBregaddr =
9601 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
9602 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9603 phba->sli4_hba.MQDBregaddr =
9604 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
9605 phba->sli4_hba.BMBXregaddr =
9606 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9609 phba->sli4_hba.u.if_type2.EQDregaddr =
9610 phba->sli4_hba.conf_regs_memmap_p +
9612 phba->sli4_hba.u.if_type2.ERR1regaddr =
9613 phba->sli4_hba.conf_regs_memmap_p +
9615 phba->sli4_hba.u.if_type2.ERR2regaddr =
9616 phba->sli4_hba.conf_regs_memmap_p +
9618 phba->sli4_hba.u.if_type2.CTRLregaddr =
9619 phba->sli4_hba.conf_regs_memmap_p +
9621 phba->sli4_hba.u.if_type2.STATUSregaddr =
9622 phba->sli4_hba.conf_regs_memmap_p +
9624 phba->sli4_hba.PSMPHRregaddr =
9625 phba->sli4_hba.conf_regs_memmap_p +
9627 phba->sli4_hba.BMBXregaddr =
9628 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9632 dev_printk(KERN_ERR, &phba->pcidev->dev,
9633 "FATAL - unsupported SLI4 interface type - %d\n",
9640 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
9651 phba->sli4_hba.PSMPHRregaddr =
9652 phba->sli4_hba.ctrl_regs_memmap_p +
9654 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9656 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9658 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9662 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9664 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9666 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9668 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9670 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9676 dev_err(&phba->pcidev->dev,
9677 "FATAL - unsupported SLI4 interface type - %d\n",
9684 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
9691 * Return 0 if successful, otherwise -ENODEV.
9697 return -ENODEV;
9699 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9702 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9705 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9708 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9709 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9711 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9717 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
9721 * region consistent with the SLI-4 interface spec. This
9728 * 0 - successful
9729 * -ENOMEM - could not allocated memory.
9742 return -ENOMEM;
9748 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
9749 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
9750 &dmabuf->phys, GFP_KERNEL);
9751 if (!dmabuf->virt) {
9753 return -ENOMEM;
9759 * to be 16-byte aligned. Also align the virtual memory as each
9763 phba->sli4_hba.bmbx.dmabuf = dmabuf;
9764 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
9766 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
9768 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
9772 * Set the high and low physical addresses now. The SLI4 alignment
9774 * as two 30-bit addresses. The other data is a bit marking whether
9775 * the 30-bit address is the high or low address.
9779 dma_address = &phba->sli4_hba.bmbx.dma_address;
9780 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
9782 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
9785 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
9786 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
9792 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
9805 dma_free_coherent(&phba->pcidev->dev,
9806 phba->sli4_hba.bmbx.bmbx_size,
9807 phba->sli4_hba.bmbx.dmabuf->virt,
9808 phba->sli4_hba.bmbx.dmabuf->phys);
9810 kfree(phba->sli4_hba.bmbx.dmabuf);
9811 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
9828 * lpfc_map_topology - Map the topology read from READ_CONFIG
9853 lpfc_topo_to_str[phba->cfg_topology]);
9856 /* FW supports persistent topology - override module parameter value */
9857 set_bit(HBA_PERSISTENT_TOPO, &phba->hba_flag);
9860 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
9862 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
9865 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
9869 clear_bit(HBA_PERSISTENT_TOPO, &phba->hba_flag);
9872 /* If topology failover set - pt is '0' or '1' */
9873 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
9876 phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
9880 if (test_bit(HBA_PERSISTENT_TOPO, &phba->hba_flag))
9883 lpfc_topo_to_str[phba->cfg_topology]);
9888 lpfc_topo_to_str[phba->cfg_topology]);
9892 * lpfc_sli4_read_config - Get the config parameters.
9901 * 0 - successful
9902 * -ENOMEM - No available memory
9903 * -EIO - The mailbox failed to complete successfully.
9919 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9924 return -ENOMEM;
9934 bf_get(lpfc_mqe_command, &pmb->u.mqe),
9935 bf_get(lpfc_mqe_status, &pmb->u.mqe));
9936 rc = -EIO;
9938 rd_config = &pmb->u.mqe.un.rd_config;
9940 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
9941 phba->sli4_hba.lnk_info.lnk_tp =
9943 phba->sli4_hba.lnk_info.lnk_no =
9947 phba->sli4_hba.lnk_info.lnk_tp,
9948 phba->sli4_hba.lnk_info.lnk_no);
9952 bf_get(lpfc_mqe_command, &pmb->u.mqe));
9954 phba->bbcredit_support = 1;
9955 phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
9963 "2702 READ_CONFIG: FA-PWWN is "
9965 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_CONFIG;
9968 phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_CONFIG;
9971 phba->sli4_hba.conf_trunk =
9973 phba->sli4_hba.extents_in_use =
9976 phba->sli4_hba.max_cfg_param.max_xri =
9980 phba->sli4_hba.max_cfg_param.max_xri > 512)
9981 phba->sli4_hba.max_cfg_param.max_xri = 512;
9982 phba->sli4_hba.max_cfg_param.xri_base =
9984 phba->sli4_hba.max_cfg_param.max_vpi =
9987 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
9988 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
9989 phba->sli4_hba.max_cfg_param.vpi_base =
9991 phba->sli4_hba.max_cfg_param.max_rpi =
9993 phba->sli4_hba.max_cfg_param.rpi_base =
9995 phba->sli4_hba.max_cfg_param.max_vfi =
9997 phba->sli4_hba.max_cfg_param.vfi_base =
9999 phba->sli4_hba.max_cfg_param.max_fcfi =
10001 phba->sli4_hba.max_cfg_param.max_eq =
10003 phba->sli4_hba.max_cfg_param.max_rq =
10005 phba->sli4_hba.max_cfg_param.max_wq =
10007 phba->sli4_hba.max_cfg_param.max_cq =
10009 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
10010 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
10011 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
10012 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
10013 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
10014 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
10015 phba->max_vports = phba->max_vpi;
10026 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
10027 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
10028 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
10032 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
10033 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
10039 if (phba->cgn_reg_signal !=
10042 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
10043 phba->cgn_reg_signal =
10046 phba->cgn_reg_signal =
10048 phba->cgn_reg_fpin =
10055 phba->cgn_init_reg_fpin = phba->cgn_reg_fpin;
10056 phba->cgn_init_reg_signal = phba->cgn_reg_signal;
10060 phba->cgn_reg_signal, phba->cgn_reg_fpin);
10070 phba->sli4_hba.extents_in_use,
10071 phba->sli4_hba.max_cfg_param.xri_base,
10072 phba->sli4_hba.max_cfg_param.max_xri,
10073 phba->sli4_hba.max_cfg_param.vpi_base,
10074 phba->sli4_hba.max_cfg_param.max_vpi,
10075 phba->sli4_hba.max_cfg_param.vfi_base,
10076 phba->sli4_hba.max_cfg_param.max_vfi,
10077 phba->sli4_hba.max_cfg_param.rpi_base,
10078 phba->sli4_hba.max_cfg_param.max_rpi,
10079 phba->sli4_hba.max_cfg_param.max_fcfi,
10080 phba->sli4_hba.max_cfg_param.max_eq,
10081 phba->sli4_hba.max_cfg_param.max_cq,
10082 phba->sli4_hba.max_cfg_param.max_wq,
10083 phba->sli4_hba.max_cfg_param.max_rq,
10084 phba->lmt);
10090 qmin = phba->sli4_hba.max_cfg_param.max_wq;
10091 if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
10092 qmin = phba->sli4_hba.max_cfg_param.max_cq;
10097 qmin -= 4;
10098 if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
10099 qmin = phba->sli4_hba.max_cfg_param.max_eq;
10102 if ((phba->cfg_irq_chann > qmin) ||
10103 (phba->cfg_hdw_queue > qmin)) {
10105 "2005 Reducing Queues - "
10109 phba->sli4_hba.max_cfg_param.max_wq,
10110 phba->sli4_hba.max_cfg_param.max_cq,
10111 phba->sli4_hba.max_cfg_param.max_eq,
10112 qmin, phba->cfg_irq_chann,
10113 phba->cfg_hdw_queue);
10115 if (phba->cfg_irq_chann > qmin)
10116 phba->cfg_irq_chann = qmin;
10117 if (phba->cfg_hdw_queue > qmin)
10118 phba->cfg_hdw_queue = qmin;
10126 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10131 set_bit(HBA_FORCED_LINK_SPEED, &phba->hba_flag);
10135 phba->cfg_link_speed =
10139 phba->cfg_link_speed =
10143 phba->cfg_link_speed =
10147 phba->cfg_link_speed =
10151 phba->cfg_link_speed =
10155 phba->cfg_link_speed =
10159 phba->cfg_link_speed =
10163 phba->cfg_link_speed =
10167 phba->cfg_link_speed =
10176 phba->cfg_link_speed =
10183 length = phba->sli4_hba.max_cfg_param.max_xri -
10185 if (phba->cfg_hba_queue_depth > length) {
10188 phba->cfg_hba_queue_depth, length);
10189 phba->cfg_hba_queue_depth = length;
10192 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
10197 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
10205 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
10206 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10207 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10212 bf_get(lpfc_mqe_command, &pmb->u.mqe),
10213 bf_get(lpfc_mqe_status, &pmb->u.mqe));
10218 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
10220 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
10232 phba->sli4_hba.iov.pf_number =
10234 phba->sli4_hba.iov.vf_number =
10243 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
10244 phba->sli4_hba.iov.vf_number);
10252 mempool_free(pmb, phba->mbox_mem_pool);
10257 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
10260 * This routine is invoked to setup the port-side endian order when
10265 * 0 - successful
10266 * -ENOMEM - No available memory
10267 * -EIO - The mailbox failed to complete successfully.
10277 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10280 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10287 return -ENOMEM;
10295 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
10302 rc = -EIO;
10304 mempool_free(mboxq, phba->mbox_mem_pool);
10316 * lpfc_sli4_queue_verify - Verify and update EQ counts
10325 * 0 - successful
10326 * -ENOMEM - No available memory
10332 * Sanity check for configured queue parameters against the run-time
10336 if (phba->nvmet_support) {
10337 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
10338 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
10339 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
10340 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
10345 phba->cfg_hdw_queue, phba->cfg_irq_chann,
10346 phba->cfg_nvmet_mrq);
10349 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10350 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10353 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10354 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10367 if (phba->enab_exp_wqcq_pages)
10370 phba->sli4_hba.cq_esize,
10375 phba->sli4_hba.cq_esize,
10376 phba->sli4_hba.cq_ecount, cpu);
10379 "0499 Failed allocate fast-path IO CQ (%d)\n",
10383 qdesc->qe_valid = 1;
10384 qdesc->hdwq = idx;
10385 qdesc->chann = cpu;
10386 phba->sli4_hba.hdwq[idx].io_cq = qdesc;
10389 if (phba->enab_exp_wqcq_pages) {
10391 wqesize = (phba->fcp_embed_io) ?
10392 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
10398 phba->sli4_hba.wq_esize,
10399 phba->sli4_hba.wq_ecount, cpu);
10403 "0503 Failed allocate fast-path IO WQ (%d)\n",
10407 qdesc->hdwq = idx;
10408 qdesc->chann = cpu;
10409 phba->sli4_hba.hdwq[idx].io_wq = qdesc;
10410 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10415 * lpfc_sli4_queue_create - Create all the SLI4 queues
10424 * 0 - successful
10425 * -ENOMEM - No availble memory
10426 * -EIO - The mailbox failed to complete successfully.
10443 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
10444 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
10445 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
10446 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
10447 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
10448 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
10449 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10450 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10451 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10452 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10454 if (!phba->sli4_hba.hdwq) {
10455 phba->sli4_hba.hdwq = kcalloc(
10456 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
10458 if (!phba->sli4_hba.hdwq) {
10461 "fast-path Hardware Queue array\n");
10465 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10466 qp = &phba->sli4_hba.hdwq[idx];
10467 spin_lock_init(&qp->io_buf_list_get_lock);
10468 spin_lock_init(&qp->io_buf_list_put_lock);
10469 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
10470 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
10471 qp->get_io_bufs = 0;
10472 qp->put_io_bufs = 0;
10473 qp->total_io_bufs = 0;
10474 spin_lock_init(&qp->abts_io_buf_list_lock);
10475 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
10476 qp->abts_scsi_io_bufs = 0;
10477 qp->abts_nvme_io_bufs = 0;
10478 INIT_LIST_HEAD(&qp->sgl_list);
10479 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
10480 spin_lock_init(&qp->hdwq_lock);
10484 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10485 if (phba->nvmet_support) {
10486 phba->sli4_hba.nvmet_cqset = kcalloc(
10487 phba->cfg_nvmet_mrq,
10490 if (!phba->sli4_hba.nvmet_cqset) {
10493 "fast-path CQ set array\n");
10496 phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
10497 phba->cfg_nvmet_mrq,
10500 if (!phba->sli4_hba.nvmet_mrq_hdr) {
10503 "fast-path RQ set hdr array\n");
10506 phba->sli4_hba.nvmet_mrq_data = kcalloc(
10507 phba->cfg_nvmet_mrq,
10510 if (!phba->sli4_hba.nvmet_mrq_data) {
10513 "fast-path RQ set data array\n");
10519 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10527 cpup = &phba->sli4_hba.cpu_map[cpu];
10528 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
10532 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10536 phba->sli4_hba.eq_esize,
10537 phba->sli4_hba.eq_ecount, cpu);
10541 cpup->hdwq);
10544 qdesc->qe_valid = 1;
10545 qdesc->hdwq = cpup->hdwq;
10546 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */
10547 qdesc->last_cpu = qdesc->chann;
10550 qp->hba_eq = qdesc;
10552 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
10553 list_add(&qdesc->cpu_list, &eqi->list);
10560 cpup = &phba->sli4_hba.cpu_map[cpu];
10563 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
10567 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10568 if (qp->hba_eq)
10572 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
10573 eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
10574 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
10578 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10583 if (phba->nvmet_support) {
10584 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10589 phba->sli4_hba.cq_esize,
10590 phba->sli4_hba.cq_ecount,
10598 qdesc->qe_valid = 1;
10599 qdesc->hdwq = idx;
10600 qdesc->chann = cpu;
10601 phba->sli4_hba.nvmet_cqset[idx] = qdesc;
10610 /* Create slow-path Mailbox Command Complete Queue */
10612 phba->sli4_hba.cq_esize,
10613 phba->sli4_hba.cq_ecount, cpu);
10616 "0500 Failed allocate slow-path mailbox CQ\n");
10619 qdesc->qe_valid = 1;
10620 phba->sli4_hba.mbx_cq = qdesc;
10622 /* Create slow-path ELS Complete Queue */
10624 phba->sli4_hba.cq_esize,
10625 phba->sli4_hba.cq_ecount, cpu);
10628 "0501 Failed allocate slow-path ELS CQ\n");
10631 qdesc->qe_valid = 1;
10632 qdesc->chann = cpu;
10633 phba->sli4_hba.els_cq = qdesc;
10643 phba->sli4_hba.mq_esize,
10644 phba->sli4_hba.mq_ecount, cpu);
10647 "0505 Failed allocate slow-path MQ\n");
10650 qdesc->chann = cpu;
10651 phba->sli4_hba.mbx_wq = qdesc;
10658 * Create slow-path ELS Work Queue.
10661 wqesize = (phba->fcp_embed_io) ?
10662 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
10666 phba->sli4_hba.wq_ecount, cpu);
10669 "0504 Failed allocate slow-path ELS WQ\n");
10672 qdesc->chann = cpu;
10673 phba->sli4_hba.els_wq = qdesc;
10674 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10676 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10679 phba->sli4_hba.cq_esize,
10680 phba->sli4_hba.cq_ecount, cpu);
10686 qdesc->chann = cpu;
10687 qdesc->qe_valid = 1;
10688 phba->sli4_hba.nvmels_cq = qdesc;
10692 phba->sli4_hba.wq_esize,
10693 phba->sli4_hba.wq_ecount, cpu);
10699 qdesc->chann = cpu;
10700 phba->sli4_hba.nvmels_wq = qdesc;
10701 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10710 phba->sli4_hba.rq_esize,
10711 phba->sli4_hba.rq_ecount, cpu);
10717 phba->sli4_hba.hdr_rq = qdesc;
10721 phba->sli4_hba.rq_esize,
10722 phba->sli4_hba.rq_ecount, cpu);
10728 phba->sli4_hba.dat_rq = qdesc;
10730 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
10731 phba->nvmet_support) {
10732 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10738 phba->sli4_hba.rq_esize,
10747 qdesc->hdwq = idx;
10748 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
10751 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
10754 if (qdesc->rqbp == NULL) {
10762 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
10767 phba->sli4_hba.rq_esize,
10776 qdesc->hdwq = idx;
10777 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
10782 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10783 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10784 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
10785 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
10790 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
10791 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10792 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
10793 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
10801 return -ENOMEM;
10835 hdwq = phba->sli4_hba.hdwq;
10838 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10845 if (phba->cfg_xpsgl && !phba->nvmet_support)
10850 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
10852 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
10854 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
10859 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
10866 * 0 - successful
10867 * -ENOMEM - No available memory
10868 * -EIO - The mailbox failed to complete successfully.
10878 spin_lock_irq(&phba->hbalock);
10879 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
10880 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
10881 spin_unlock_irq(&phba->hbalock);
10883 spin_lock_irq(&phba->hbalock);
10885 spin_unlock_irq(&phba->hbalock);
10890 if (phba->sli4_hba.hdwq)
10893 if (phba->nvmet_support) {
10894 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
10895 phba->cfg_nvmet_mrq);
10897 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
10898 phba->cfg_nvmet_mrq);
10899 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
10900 phba->cfg_nvmet_mrq);
10904 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
10907 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
10910 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
10913 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
10914 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
10917 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
10920 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
10923 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
10926 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10929 spin_lock_irq(&phba->hbalock);
10930 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
10931 spin_unlock_irq(&phba->hbalock);
10941 rqbp = rq->rqbp;
10942 while (!list_empty(&rqbp->rqb_buffer_list)) {
10943 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
10947 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
10948 rqbp->buffer_count--;
10963 "6085 Fast-path %s (%d) not allocated\n",
10965 return -ENOMEM;
10981 *cq_map = cq->queue_id;
10984 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
10985 qidx, cq->queue_id, qidx, eq->queue_id);
10993 /* no need to tear down cq - caller will do so */
10998 pring = wq->pring;
10999 pring->sli.sli4.wqp = (void *)wq;
11000 cq->pring = pring;
11003 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
11004 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
11009 "0539 Failed setup of slow-path MQ: "
11011 /* no need to tear down cq - caller will do so */
11016 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
11017 phba->sli4_hba.mbx_wq->queue_id,
11018 phba->sli4_hba.mbx_cq->queue_id);
11025 * lpfc_setup_cq_lookup - Setup the CQ lookup table
11037 memset(phba->sli4_hba.cq_lookup, 0,
11038 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
11040 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11042 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
11046 list_for_each_entry(childq, &eq->child_list, list) {
11047 if (childq->queue_id > phba->sli4_hba.cq_max)
11049 if (childq->subtype == LPFC_IO)
11050 phba->sli4_hba.cq_lookup[childq->queue_id] =
11057 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
11064 * 0 - successful
11065 * -ENOMEM - No available memory
11066 * -EIO - The mailbox failed to complete successfully.
11078 int rc = -ENOMEM;
11080 /* Check for dual-ULP support */
11081 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11086 return -ENOMEM;
11088 length = (sizeof(struct lpfc_mbx_query_fw_config) -
11097 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
11098 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11099 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11105 mempool_free(mboxq, phba->mbox_mem_pool);
11106 rc = -ENXIO;
11110 phba->sli4_hba.fw_func_mode =
11111 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
11112 phba->sli4_hba.physical_port =
11113 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
11116 phba->sli4_hba.fw_func_mode);
11118 mempool_free(mboxq, phba->mbox_mem_pool);
11123 qp = phba->sli4_hba.hdwq;
11128 "3147 Fast-path EQs not allocated\n");
11129 rc = -ENOMEM;
11134 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11137 cpup = &phba->sli4_hba.cpu_map[cpu];
11142 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11144 if (qidx != cpup->eq)
11148 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
11149 phba->cfg_fcp_imax);
11152 "0523 Failed setup of fast-path"
11154 cpup->eq, (uint32_t)rc);
11159 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
11160 qp[cpup->hdwq].hba_eq;
11163 "2584 HBA EQ setup: queue[%d]-id=%d\n",
11164 cpup->eq,
11165 qp[cpup->hdwq].hba_eq->queue_id);
11170 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11172 cpup = &phba->sli4_hba.cpu_map[cpu];
11176 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
11179 &phba->sli4_hba.hdwq[qidx].io_cq_map,
11195 /* Set up slow-path MBOX CQ/MQ */
11197 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
11200 phba->sli4_hba.mbx_cq ?
11202 rc = -ENOMEM;
11207 phba->sli4_hba.mbx_cq,
11208 phba->sli4_hba.mbx_wq,
11216 if (phba->nvmet_support) {
11217 if (!phba->sli4_hba.nvmet_cqset) {
11219 "3165 Fast-path NVME CQ Set "
11221 rc = -ENOMEM;
11224 if (phba->cfg_nvmet_mrq > 1) {
11226 phba->sli4_hba.nvmet_cqset,
11238 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
11247 phba->sli4_hba.nvmet_cqset[0]->chann = 0;
11250 "6090 NVMET CQ setup: cq-id=%d, "
11251 "parent eq-id=%d\n",
11252 phba->sli4_hba.nvmet_cqset[0]->queue_id,
11253 qp[0].hba_eq->queue_id);
11257 /* Set up slow-path ELS WQ/CQ */
11258 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
11261 phba->sli4_hba.els_cq ? "WQ" : "CQ");
11262 rc = -ENOMEM;
11266 phba->sli4_hba.els_cq,
11267 phba->sli4_hba.els_wq,
11276 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
11277 phba->sli4_hba.els_wq->queue_id,
11278 phba->sli4_hba.els_cq->queue_id);
11280 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11282 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
11285 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
11286 rc = -ENOMEM;
11290 phba->sli4_hba.nvmels_cq,
11291 phba->sli4_hba.nvmels_wq,
11301 "6096 ELS WQ setup: wq-id=%d, "
11302 "parent cq-id=%d\n",
11303 phba->sli4_hba.nvmels_wq->queue_id,
11304 phba->sli4_hba.nvmels_cq->queue_id);
11310 if (phba->nvmet_support) {
11311 if ((!phba->sli4_hba.nvmet_cqset) ||
11312 (!phba->sli4_hba.nvmet_mrq_hdr) ||
11313 (!phba->sli4_hba.nvmet_mrq_data)) {
11317 rc = -ENOMEM;
11320 if (phba->cfg_nvmet_mrq > 1) {
11322 phba->sli4_hba.nvmet_mrq_hdr,
11323 phba->sli4_hba.nvmet_mrq_data,
11324 phba->sli4_hba.nvmet_cqset,
11336 phba->sli4_hba.nvmet_mrq_hdr[0],
11337 phba->sli4_hba.nvmet_mrq_data[0],
11338 phba->sli4_hba.nvmet_cqset[0],
11350 "6099 NVMET RQ setup: hdr-rq-id=%d, "
11351 "dat-rq-id=%d parent cq-id=%d\n",
11352 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
11353 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
11354 phba->sli4_hba.nvmet_cqset[0]->queue_id);
11359 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
11362 rc = -ENOMEM;
11366 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
11367 phba->sli4_hba.els_cq, LPFC_USOL);
11376 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
11377 "parent cq-id=%d\n",
11378 phba->sli4_hba.hdr_rq->queue_id,
11379 phba->sli4_hba.dat_rq->queue_id,
11380 phba->sli4_hba.els_cq->queue_id);
11382 if (phba->cfg_fcp_imax)
11383 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
11387 for (qidx = 0; qidx < phba->cfg_irq_chann;
11392 if (phba->sli4_hba.cq_max) {
11393 kfree(phba->sli4_hba.cq_lookup);
11394 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
11396 if (!phba->sli4_hba.cq_lookup) {
11399 "size 0x%x\n", phba->sli4_hba.cq_max);
11400 rc = -ENOMEM;
11414 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
11421 * 0 - successful
11422 * -ENOMEM - No available memory
11423 * -EIO - The mailbox failed to complete successfully.
11433 if (phba->sli4_hba.mbx_wq)
11434 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
11437 if (phba->sli4_hba.nvmels_wq)
11438 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
11441 if (phba->sli4_hba.els_wq)
11442 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
11445 if (phba->sli4_hba.hdr_rq)
11446 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
11447 phba->sli4_hba.dat_rq);
11450 if (phba->sli4_hba.mbx_cq)
11451 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
11454 if (phba->sli4_hba.els_cq)
11455 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
11458 if (phba->sli4_hba.nvmels_cq)
11459 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
11461 if (phba->nvmet_support) {
11463 if (phba->sli4_hba.nvmet_mrq_hdr) {
11464 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11467 phba->sli4_hba.nvmet_mrq_hdr[qidx],
11468 phba->sli4_hba.nvmet_mrq_data[qidx]);
11472 if (phba->sli4_hba.nvmet_cqset) {
11473 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11475 phba, phba->sli4_hba.nvmet_cqset[qidx]);
11479 /* Unset fast-path SLI4 queues */
11480 if (phba->sli4_hba.hdwq) {
11482 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11484 qp = &phba->sli4_hba.hdwq[qidx];
11485 lpfc_wq_destroy(phba, qp->io_wq);
11486 lpfc_cq_destroy(phba, qp->io_cq);
11489 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11491 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
11496 kfree(phba->sli4_hba.cq_lookup);
11497 phba->sli4_hba.cq_lookup = NULL;
11498 phba->sli4_hba.cq_max = 0;
11502 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
11509 * - Mailbox asynchronous events
11510 * - Receive queue completion unsolicited events
11511 * Later, this can be used for all the slow-path events.
11514 * 0 - successful
11515 * -ENOMEM - No available memory
11523 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
11527 list_add_tail(&cq_event->list,
11528 &phba->sli4_hba.sp_cqe_event_pool);
11534 return -ENOMEM;
11538 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
11543 * cleanup routine to free all the outstanding completion-queue events
11553 &phba->sli4_hba.sp_cqe_event_pool, list) {
11554 list_del(&cq_event->list);
11560 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11564 * completion-queue event from the free pool.
11566 * Return: Pointer to the newly allocated completion-queue event if successful
11574 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
11580 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11584 * completion-queue event from the free pool.
11586 * Return: Pointer to the newly allocated completion-queue event if successful
11595 spin_lock_irqsave(&phba->hbalock, iflags);
11597 spin_unlock_irqrestore(&phba->hbalock, iflags);
11602 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11607 * completion-queue event back into the free pool.
11613 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
11617 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11622 * completion-queue event back into the free pool.
11629 spin_lock_irqsave(&phba->hbalock, iflags);
11631 spin_unlock_irqrestore(&phba->hbalock, iflags);
11635 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
11638 * This routine is to free all the pending completion-queue events to the
11651 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11652 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
11654 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11657 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
11658 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
11660 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
11670 * lpfc_pci_function_reset - Reset pci function.
11677 * 0 - successful
11678 * -ENOMEM - No available memory
11679 * -EIO - The mailbox failed to complete successfully.
11693 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11696 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
11703 return -ENOMEM;
11706 /* Setup PCI function reset mailbox-ioctl command */
11712 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
11713 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11715 &shdr->response);
11716 mempool_free(mboxq, phba->mbox_mem_pool);
11723 rc = -ENXIO;
11735 if (lpfc_readl(phba->sli4_hba.u.if_type2.
11737 rc = -ENODEV;
11746 phba->work_status[0] = readl(
11747 phba->sli4_hba.u.if_type2.ERR1regaddr);
11748 phba->work_status[1] = readl(
11749 phba->sli4_hba.u.if_type2.ERR2regaddr);
11754 phba->work_status[0],
11755 phba->work_status[1]);
11756 rc = -ENODEV;
11772 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
11775 pci_read_config_word(phba->pcidev,
11782 rc = -ENODEV;
11793 /* Catch the not-ready port failure after a port reset. */
11798 rc = -ENODEV;
11805 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
11809 * with SLI-4 interface spec.
11812 * 0 - successful
11813 * other values - error
11818 struct pci_dev *pdev = phba->pcidev;
11824 return -ENODEV;
11827 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11829 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11838 &phba->sli4_hba.sli_intf.word0)) {
11839 return -ENODEV;
11843 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
11848 phba->sli4_hba.sli_intf.word0);
11849 return -ENODEV;
11852 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11860 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
11867 phba->sli4_hba.conf_regs_memmap_p =
11868 ioremap(phba->pci_bar0_map, bar0map_len);
11869 if (!phba->sli4_hba.conf_regs_memmap_p) {
11870 dev_printk(KERN_ERR, &pdev->dev,
11873 return -ENODEV;
11875 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
11879 phba->pci_bar0_map = pci_resource_start(pdev, 1);
11882 dev_printk(KERN_ERR, &pdev->dev,
11883 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
11884 return -ENODEV;
11886 phba->sli4_hba.conf_regs_memmap_p =
11887 ioremap(phba->pci_bar0_map, bar0map_len);
11888 if (!phba->sli4_hba.conf_regs_memmap_p) {
11889 dev_printk(KERN_ERR, &pdev->dev,
11892 return -ENODEV;
11903 phba->pci_bar1_map = pci_resource_start(pdev,
11906 phba->sli4_hba.ctrl_regs_memmap_p =
11907 ioremap(phba->pci_bar1_map,
11909 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
11910 dev_err(&pdev->dev,
11913 error = -ENOMEM;
11916 phba->pci_bar2_memmap_p =
11917 phba->sli4_hba.ctrl_regs_memmap_p;
11920 error = -ENOMEM;
11931 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
11933 phba->sli4_hba.drbl_regs_memmap_p =
11934 ioremap(phba->pci_bar1_map, bar1map_len);
11935 if (!phba->sli4_hba.drbl_regs_memmap_p) {
11936 dev_err(&pdev->dev,
11938 error = -ENOMEM;
11941 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
11951 phba->pci_bar2_map = pci_resource_start(pdev,
11954 phba->sli4_hba.drbl_regs_memmap_p =
11955 ioremap(phba->pci_bar2_map,
11957 if (!phba->sli4_hba.drbl_regs_memmap_p) {
11958 dev_err(&pdev->dev,
11961 error = -ENOMEM;
11964 phba->pci_bar4_memmap_p =
11965 phba->sli4_hba.drbl_regs_memmap_p;
11970 error = -ENOMEM;
11981 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
11983 phba->sli4_hba.dpp_regs_memmap_p =
11984 ioremap(phba->pci_bar2_map, bar2map_len);
11985 if (!phba->sli4_hba.dpp_regs_memmap_p) {
11986 dev_err(&pdev->dev,
11988 error = -ENOMEM;
11991 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
11998 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
11999 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
12000 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
12003 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
12004 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
12005 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
12014 if (phba->sli4_hba.drbl_regs_memmap_p)
12015 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12017 if (phba->sli4_hba.ctrl_regs_memmap_p)
12018 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
12020 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12026 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
12030 * with SLI-4 interface spec.
12036 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12040 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12041 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
12042 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12045 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12048 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12049 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12050 if (phba->sli4_hba.dpp_regs_memmap_p)
12051 iounmap(phba->sli4_hba.dpp_regs_memmap_p);
12056 dev_printk(KERN_ERR, &phba->pcidev->dev,
12057 "FATAL - unsupported SLI4 interface type - %d\n",
12064 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
12067 * This routine is invoked to enable the MSI-X interrupt vectors to device
12068 * with SLI-3 interface specs.
12071 * 0 - successful
12072 * other values - error
12080 /* Set up MSI-X multi-message vectors */
12081 rc = pci_alloc_irq_vectors(phba->pcidev,
12085 "0420 PCI enable MSI-X failed (%d)\n", rc);
12090 * Assign MSI-X vectors to interrupt handlers
12093 /* vector-0 is associated to slow-path handler */
12094 rc = request_irq(pci_irq_vector(phba->pcidev, 0),
12099 "0421 MSI-X slow-path request_irq failed "
12104 /* vector-1 is associated to fast-path handler */
12105 rc = request_irq(pci_irq_vector(phba->pcidev, 1),
12111 "0429 MSI-X fast-path request_irq failed "
12117 * Configure HBA MSI-X attention conditions to messages
12119 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12122 rc = -ENOMEM;
12136 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
12141 mempool_free(pmb, phba->mbox_mem_pool);
12146 mempool_free(pmb, phba->mbox_mem_pool);
12150 free_irq(pci_irq_vector(phba->pcidev, 1), phba);
12154 free_irq(pci_irq_vector(phba->pcidev, 0), phba);
12157 /* Unconfigure MSI-X capability structure */
12158 pci_free_irq_vectors(phba->pcidev);
12165 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
12169 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
12175 * 0 - successful
12176 * other values - error
12183 rc = pci_enable_msi(phba->pcidev);
12193 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12196 pci_disable_msi(phba->pcidev);
12204 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
12206 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
12209 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
12214 * MSI-X -> MSI -> IRQ.
12217 * 0 - successful
12218 * other values - error
12230 clear_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag);
12233 /* Now, try to enable MSI-X interrupt mode */
12236 /* Indicate initialization to MSI-X mode */
12237 phba->intr_type = MSIX;
12242 /* Fallback to MSI if MSI-X initialization failed */
12243 if (cfg_mode >= 1 && phba->intr_type == NONE) {
12247 phba->intr_type = MSI;
12252 /* Fallback to INTx if both MSI-X/MSI initalization failed */
12253 if (phba->intr_type == NONE) {
12254 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12258 phba->intr_type = INTx;
12266 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
12271 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
12279 if (phba->intr_type == MSIX)
12285 free_irq(pci_irq_vector(phba->pcidev, i), phba);
12286 pci_free_irq_vectors(phba->pcidev);
12289 phba->intr_type = NONE;
12290 phba->sli.slistat.sli_intr = 0;
12294 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
12309 cpup = &phba->sli4_hba.cpu_map[cpu];
12316 (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
12317 (cpup->eq == id))
12321 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
12329 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
12343 cpup = &phba->sli4_hba.cpu_map[idx];
12345 if ((cpup->phys_id == phys_id) &&
12346 (cpup->core_id == core_id) &&
12355 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
12367 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
12370 cpup->eq = eqidx;
12371 cpup->flag |= flag;
12375 cpu, eqhdl->irq, cpup->eq, cpup->flag);
12379 * lpfc_cpu_map_array_init - Initialize cpu_map structure
12392 cpup = &phba->sli4_hba.cpu_map[cpu];
12393 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
12394 cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
12395 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
12396 cpup->eq = LPFC_VECTOR_MAP_EMPTY;
12397 cpup->flag = 0;
12398 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
12399 INIT_LIST_HEAD(&eqi->list);
12400 eqi->icnt = 0;
12405 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
12416 for (i = 0; i < phba->cfg_irq_chann; i++) {
12418 eqhdl->irq = LPFC_IRQ_EMPTY;
12419 eqhdl->phba = phba;
12424 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
12429 * MSI-X vector allocated for the HBA.
12431 * and the phba->sli4_hba.cpu_map array will reflect this.
12452 cpup = &phba->sli4_hba.cpu_map[cpu];
12454 cpup->phys_id = topology_physical_package_id(cpu);
12455 cpup->core_id = topology_core_id(cpu);
12456 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
12457 cpup->flag |= LPFC_CPU_MAP_HYPER;
12460 cpup->phys_id = 0;
12461 cpup->core_id = cpu;
12466 cpu, cpup->phys_id, cpup->core_id, cpup->flag);
12468 if (cpup->phys_id > max_phys_id)
12469 max_phys_id = cpup->phys_id;
12470 if (cpup->phys_id < min_phys_id)
12471 min_phys_id = cpup->phys_id;
12473 if (cpup->core_id > max_core_id)
12474 max_core_id = cpup->core_id;
12475 if (cpup->core_id < min_core_id)
12476 min_core_id = cpup->core_id;
12488 cpup = &phba->sli4_hba.cpu_map[cpu];
12491 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12493 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12501 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12502 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12503 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12504 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
12505 (new_cpup->phys_id == cpup->phys_id))
12513 cpup->eq = new_cpup->eq;
12525 cpu, cpup->eq, new_cpu,
12526 cpup->phys_id);
12534 cpup = &phba->sli4_hba.cpu_map[cpu];
12537 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12539 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12547 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12548 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12549 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12550 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
12558 cpup->hdwq, cpup->eq);
12562 cpup->eq = new_cpup->eq;
12573 cpu, cpup->eq, new_cpu,
12574 new_cpup->phys_id, new_cpup->core_id);
12583 cpup = &phba->sli4_hba.cpu_map[cpu];
12586 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12590 cpup->hdwq = idx;
12595 cpu, cpup->phys_id, cpup->core_id,
12596 cpup->hdwq, cpup->eq, cpup->flag);
12599 * This will be 1 to 1 - hdwq to cpu, unless there are less
12600 * hardware queues then CPUs. For that case we will just round-robin
12603 * for irq_chann < hdwq. The idx is used for round-robin assignments
12610 cpup = &phba->sli4_hba.cpu_map[cpu];
12613 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
12620 if (next_idx < phba->cfg_hdw_queue) {
12621 cpup->hdwq = next_idx;
12632 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12633 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12634 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12635 new_cpup->phys_id == cpup->phys_id &&
12636 new_cpup->core_id == cpup->core_id) {
12646 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12647 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12648 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12649 new_cpup->phys_id == cpup->phys_id)
12655 cpup->hdwq = idx % phba->cfg_hdw_queue;
12661 cpup->hdwq = new_cpup->hdwq;
12666 cpu, cpup->phys_id, cpup->core_id,
12667 cpup->hdwq, cpup->eq, cpup->flag);
12671 * Initialize the cpu_map slots for not-present cpus in case
12672 * a cpu is hot-added. Perform a simple hdwq round robin assignment.
12676 cpup = &phba->sli4_hba.cpu_map[cpu];
12678 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
12679 c_stat->hdwq_no = cpup->hdwq;
12681 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
12684 cpup->hdwq = idx++ % phba->cfg_hdw_queue;
12686 c_stat->hdwq_no = cpup->hdwq;
12691 cpu, cpup->hdwq);
12718 return -ENOMEM;
12720 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12721 maskp = pci_irq_get_affinity(phba->pcidev, idx);
12731 /* get the cpus that are online and are affini-
12733 * more than 1 then cpuhp is not going to shut-
12746 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
12747 list_add(&eq->_poll_list, eqlist);
12755 if (phba->sli_rev != LPFC_SLI_REV4)
12759 &phba->cpuhp);
12765 del_timer_sync(&phba->cpuhp_poll_timer);
12770 if (phba->pport &&
12771 test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
12779 if (phba->sli_rev != LPFC_SLI_REV4)
12784 if (!list_empty(&phba->poll_list))
12785 mod_timer(&phba->cpuhp_poll_timer,
12791 &phba->cpuhp);
12796 if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
12797 *retval = -EAGAIN;
12801 if (phba->sli_rev != LPFC_SLI_REV4) {
12811 * lpfc_irq_set_aff - set IRQ affinity
12819 cpumask_clear(&eqhdl->aff_mask);
12820 cpumask_set_cpu(cpu, &eqhdl->aff_mask);
12821 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12822 irq_set_affinity(eqhdl->irq, &eqhdl->aff_mask);
12826 * lpfc_irq_clear_aff - clear IRQ affinity
12833 cpumask_clear(&eqhdl->aff_mask);
12834 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12838 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
12850 * PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
12861 if (phba->irq_chann_mode == NORMAL_MODE)
12864 orig_mask = &phba->sli4_hba.irq_aff_mask;
12869 cpup = &phba->sli4_hba.cpu_map[cpu];
12871 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12884 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12894 for (idx = 0; idx < phba->cfg_irq_chann; idx++)
12899 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
12926 list_del_init(&eq->_poll_list);
12950 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
12951 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
12960 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
12963 * This routine is invoked to enable the MSI-X interrupt vectors to device
12964 * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them
12984 * 0 - successful
12985 * other values - error
12999 /* Set up MSI-X multi-message vectors */
13000 vectors = phba->cfg_irq_chann;
13002 if (phba->irq_chann_mode != NORMAL_MODE)
13003 aff_mask = &phba->sli4_hba.irq_aff_mask;
13007 vectors = min(phba->cfg_irq_chann, cpu_cnt);
13018 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
13021 "0484 PCI enable MSI-X failed (%d)\n", rc);
13026 /* Assign MSI-X vectors to interrupt handlers */
13029 name = eqhdl->handler_name;
13034 eqhdl->idx = index;
13035 rc = pci_irq_vector(phba->pcidev, index);
13038 "0489 MSI-X fast-path (%d) "
13042 eqhdl->irq = rc;
13044 rc = request_threaded_irq(eqhdl->irq,
13050 "0486 MSI-X fast-path (%d) "
13075 maskp = pci_irq_get_affinity(phba->pcidev, index);
13079 cpup = &phba->sli4_hba.cpu_map[cpu];
13093 if (cpup->eq != LPFC_VECTOR_MAP_EMPTY)
13103 if (vectors != phba->cfg_irq_chann) {
13106 "MSI-X vectors, requested %d got %d\n",
13107 phba->cfg_irq_chann, vectors);
13108 if (phba->cfg_irq_chann > vectors)
13109 phba->cfg_irq_chann = vectors;
13116 for (--index; index >= 0; index--) {
13119 free_irq(eqhdl->irq, eqhdl);
13122 /* Unconfigure MSI-X capability structure */
13123 pci_free_irq_vectors(phba->pcidev);
13130 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
13134 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
13140 * 0 - successful
13141 * other values - error
13150 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
13158 return rc ? rc : -1;
13161 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13164 pci_free_irq_vectors(phba->pcidev);
13171 rc = pci_irq_vector(phba->pcidev, 0);
13173 pci_free_irq_vectors(phba->pcidev);
13178 eqhdl->irq = rc;
13183 for (index = 0; index < phba->cfg_irq_chann; index++) {
13185 eqhdl->idx = index;
13192 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
13194 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
13197 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
13202 * MSI-X -> MSI -> IRQ.
13205 * Interrupt mode (2, 1, 0) - successful
13206 * LPFC_INTR_ERROR - error
13218 /* Now, try to enable MSI-X interrupt mode */
13221 /* Indicate initialization to MSI-X mode */
13222 phba->intr_type = MSIX;
13228 /* Fallback to MSI if MSI-X initialization failed */
13229 if (cfg_mode >= 1 && phba->intr_type == NONE) {
13233 phba->intr_type = MSI;
13238 /* Fallback to INTx if both MSI-X/MSI initalization failed */
13239 if (phba->intr_type == NONE) {
13240 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13247 phba->intr_type = INTx;
13251 retval = pci_irq_vector(phba->pcidev, 0);
13258 eqhdl->irq = retval;
13263 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
13265 eqhdl->idx = idx;
13273 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
13278 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
13285 if (phba->intr_type == MSIX) {
13289 /* Free up MSI-X multi-message vectors */
13290 for (index = 0; index < phba->cfg_irq_chann; index++) {
13293 free_irq(eqhdl->irq, eqhdl);
13296 free_irq(phba->pcidev->irq, phba);
13299 pci_free_irq_vectors(phba->pcidev);
13302 phba->intr_type = NONE;
13303 phba->sli.slistat.sli_intr = 0;
13307 * lpfc_unset_hba - Unset SLI3 hba device initialization
13311 * a device with SLI-3 interface spec.
13316 set_bit(FC_UNLOADING, &phba->pport->load_flag);
13318 kfree(phba->vpi_bmask);
13319 kfree(phba->vpi_ids);
13323 phba->pport->work_port_events = 0;
13335 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
13355 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13364 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13368 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13369 qp = &phba->sli4_hba.hdwq[idx];
13370 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
13377 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13379 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13407 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13408 qp = &phba->sli4_hba.hdwq[idx];
13410 &qp->lpfc_abts_io_buf_list);
13417 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13419 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13422 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13428 * lpfc_sli4_hba_unset - Unset the fcoe hba
13442 struct pci_dev *pdev = phba->pcidev;
13445 hrtimer_cancel(&phba->cmf_stats_timer);
13446 hrtimer_cancel(&phba->cmf_timer);
13448 if (phba->pport)
13449 phba->sli4_hba.intr_enable = 0;
13457 spin_lock_irq(&phba->hbalock);
13458 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13459 spin_unlock_irq(&phba->hbalock);
13461 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13467 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13468 spin_lock_irq(&phba->hbalock);
13469 mboxq = phba->sli.mbox_active;
13470 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
13472 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13473 phba->sli.mbox_active = NULL;
13474 spin_unlock_irq(&phba->hbalock);
13480 if (!pci_channel_offline(phba->pcidev))
13484 /* per-phba callback de-registration for hotplug event */
13485 if (phba->pport)
13491 /* Disable SR-IOV if enabled */
13492 if (phba->cfg_sriov_nr_virtfn)
13496 kthread_stop(phba->worker_thread);
13510 if (phba->ras_fwlog.ras_enabled)
13514 if (phba->pport)
13515 phba->pport->work_port_events = 0;
13576 "6235 INIT Congestion Buffer %p\n", phba->cgn_i);
13578 if (!phba->cgn_i)
13580 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13582 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
13583 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
13584 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
13585 atomic_set(&phba->cgn_sync_warn_cnt, 0);
13587 atomic_set(&phba->cgn_driver_evt_cnt, 0);
13588 atomic_set(&phba->cgn_latency_evt_cnt, 0);
13589 atomic64_set(&phba->cgn_latency_evt, 0);
13590 phba->cgn_evt_minute = 0;
13593 cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ);
13594 cp->cgn_info_version = LPFC_CGN_INFO_V4;
13597 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
13598 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
13599 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
13600 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
13602 lpfc_cgn_update_tstamp(phba, &cp->base_time);
13605 if (phba->pport) {
13606 size = (uint16_t)(phba->pport->cfg_lun_queue_depth);
13607 cp->cgn_lunq = cpu_to_le16(size);
13612 cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13613 cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13615 cp->cgn_info_crc = cpu_to_le32(crc);
13617 phba->cgn_evt_timestamp = jiffies +
13628 "6236 INIT Congestion Stat %p\n", phba->cgn_i);
13630 if (!phba->cgn_i)
13633 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13634 memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat));
13636 lpfc_cgn_update_tstamp(phba, &cp->stat_start);
13638 cp->cgn_info_crc = cpu_to_le32(crc);
13642 * __lpfc_reg_congestion_buf - register congestion info buffer with HBA
13655 if (!phba->cgn_i)
13656 return -ENXIO;
13658 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13663 phba->pport->port_state, reg);
13664 return -ENOMEM;
13667 length = (sizeof(struct lpfc_mbx_reg_congestion_buf) -
13672 reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf;
13678 reg_congestion_buf->length = sizeof(struct lpfc_cgn_info);
13679 reg_congestion_buf->addr_lo =
13680 putPaddrLow(phba->cgn_i->phys);
13681 reg_congestion_buf->addr_hi =
13682 putPaddrHigh(phba->cgn_i->phys);
13686 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
13687 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13689 &shdr->response);
13690 mempool_free(mboxq, phba->mbox_mem_pool);
13697 return -ENXIO;
13716 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
13723 * This function may be be called from any context that can block-wait
13731 struct lpfc_mqe *mqe = &mboxq->u.mqe;
13743 phba->sli4_hba.rpi_hdrs_in_use = 1;
13746 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
13751 if (!phba->sli4_hba.intr_enable)
13759 sli4_params = &phba->sli4_hba.pc_sli4_params;
13760 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
13761 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
13762 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
13763 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
13764 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
13766 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
13769 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
13771 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
13772 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
13773 sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
13775 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
13776 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
13777 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
13778 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
13779 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
13780 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
13781 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
13782 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
13783 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
13784 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
13785 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
13787 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
13788 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
13790 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
13791 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
13792 sli4_params->mi_cap = bf_get(cfg_mi_ver, mbx_sli4_parameters);
13794 /* Check for Extended Pre-Registered SGL support */
13795 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
13803 sli4_params->nvme = 1;
13806 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
13810 phba->cfg_enable_fc4_type);
13815 sli4_params->nvme = 0;
13816 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13822 phba->cfg_enable_fc4_type);
13824 phba->nvmet_support = 0;
13825 phba->cfg_nvmet_mrq = 0;
13826 phba->cfg_nvme_seg_cnt = 0;
13829 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
13830 return -ENODEV;
13831 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
13838 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13839 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
13843 phba->cfg_enable_pbde = 1;
13845 phba->cfg_enable_pbde = 0;
13850 * In SLI4-Parameters Descriptor:
13855 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
13857 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
13859 phba->cfg_suppress_rsp = 0;
13862 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
13865 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
13866 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
13868 dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len);
13873 * to use this option, 128-byte WQEs must be used.
13876 phba->fcp_embed_io = 1;
13878 phba->fcp_embed_io = 0;
13883 phba->cfg_enable_pbde,
13884 phba->fcp_embed_io, sli4_params->nvme,
13885 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
13887 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
13889 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
13896 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
13897 phba->enab_exp_wqcq_pages = 1;
13899 phba->enab_exp_wqcq_pages = 0;
13904 phba->mds_diags_support = 1;
13906 phba->mds_diags_support = 0;
13912 phba->nsler = 1;
13914 phba->nsler = 0;
13920 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
13924 * This routine is to be called to attach a device with SLI-3 interface spec
13925 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
13926 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
13933 * 0 - driver can claim the device
13934 * negative value - driver can not claim the device
13948 return -ENOMEM;
13955 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
13960 /* Set up SLI-3 specific device PCI memory space */
13968 /* Set up SLI-3 specific device driver resources */
13994 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
14005 vport = phba->pport;
14015 cfg_mode = phba->cfg_use_msi;
14024 error = -ENODEV;
14027 /* SLI-3 HBA setup */
14031 error = -ENODEV;
14039 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
14041 phba->intr_mode = intr_mode;
14052 cfg_mode = --intr_mode;
14088 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
14091 * This routine is to be called to disattach a device with SLI-3 interface
14092 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
14100 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
14102 struct lpfc_hba *phba = vport->phba;
14105 set_bit(FC_UNLOADING, &vport->load_flag);
14112 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
14113 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
14115 fc_vport_terminate(vports[i]->fc_vport);
14135 kthread_stop(phba->worker_thread);
14139 kfree(phba->vpi_bmask);
14140 kfree(phba->vpi_ids);
14143 spin_lock_irq(&phba->port_list_lock);
14144 list_del_init(&vport->listentry);
14145 spin_unlock_irq(&phba->port_list_lock);
14149 /* Disable SR-IOV if enabled */
14150 if (phba->cfg_sriov_nr_virtfn)
14167 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
14168 phba->hbqslimp.virt, phba->hbqslimp.phys);
14171 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
14172 phba->slim2p.virt, phba->slim2p.phys);
14175 iounmap(phba->ctrl_regs_memmap_p);
14176 iounmap(phba->slim_memmap_p);
14185 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
14189 * system Power Management (PM) to device with SLI-3 interface spec. When
14193 * minimum PM requirements to a power-aware driver's PM support for the
14194 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
14201 * 0 - driver suspended the device
14208 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14216 kthread_stop(phba->worker_thread);
14225 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
14229 * system Power Management (PM) to device with SLI-3 interface spec. When PM
14232 * driver implements the minimum PM requirements to a power-aware driver's
14233 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
14240 * 0 - driver suspended the device
14247 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14255 phba->worker_thread = kthread_run(lpfc_do_work, phba,
14256 "lpfc_worker_%d", phba->brd_no);
14257 if (IS_ERR(phba->worker_thread)) {
14258 error = PTR_ERR(phba->worker_thread);
14270 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14274 return -EIO;
14276 phba->intr_mode = intr_mode;
14283 lpfc_log_intr_mode(phba, phba->intr_mode);
14289 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
14303 * and let the SCSI mid-layer to retry them to recover.
14309 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
14336 pci_disable_device(phba->pcidev);
14340 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
14364 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
14369 * device with SLI-3 interface spec. This function is called by the PCI
14377 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
14378 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
14379 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14385 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14389 /* Non-fatal error, prepare for recovery */
14410 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
14414 * device with SLI-3 interface spec. This is called after PCI bus has been
14415 * reset to restart the PCI card from scratch, as if from a cold-boot.
14424 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
14425 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14431 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14432 struct lpfc_sli *psli = &phba->sli;
14435 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
14437 printk(KERN_ERR "lpfc: Cannot re-enable "
14450 if (pdev->is_busmaster)
14453 spin_lock_irq(&phba->hbalock);
14454 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
14455 spin_unlock_irq(&phba->hbalock);
14458 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14461 "0427 Cannot re-enable interrupt after "
14465 phba->intr_mode = intr_mode;
14473 lpfc_log_intr_mode(phba, phba->intr_mode);
14479 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
14483 * with SLI-3 interface spec. It is called when kernel error recovery tells
14492 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14494 /* Bring device online, it will be no-op for non-fatal error resume */
14499 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
14507 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
14509 if (phba->sli_rev == LPFC_SLI_REV4) {
14529 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
14539 if (phba->nvmet_support)
14553 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
14571 phba->pcidev->device, magic_number, ftype, fid,
14572 fsize, fw->size);
14573 rc = -EINVAL;
14580 phba->pcidev->device, magic_number, ftype, fid,
14581 fsize, fw->size);
14582 rc = -EACCES;
14588 offset, phba->pcidev->device, magic_number,
14589 ftype, fid, fsize, fw->size);
14590 rc = -EIO;
14596 * lpfc_write_firmware - attempt to write a firmware image to the port
14613 /* It can be null in no-wait mode, sanity check */
14615 rc = -ENXIO;
14618 image = (struct lpfc_grp_hdr *)fw->data;
14620 magic_number = be32_to_cpu(image->magic_number);
14623 fsize = be32_to_cpu(image->size);
14627 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
14631 fwrev, image->revision);
14636 rc = -ENOMEM;
14639 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14641 &dmabuf->phys,
14643 if (!dmabuf->virt) {
14645 rc = -ENOMEM;
14648 list_add_tail(&dmabuf->list, &dma_buffer_list);
14650 while (offset < fw->size) {
14653 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
14654 memcpy(dmabuf->virt,
14655 fw->data + temp_offset,
14656 fw->size - temp_offset);
14657 temp_offset = fw->size;
14660 memcpy(dmabuf->virt, fw->data + temp_offset,
14665 (fw->size - offset), &offset);
14681 fwrev, image->revision);
14685 list_del(&dmabuf->list);
14686 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
14687 dmabuf->virt, dmabuf->phys);
14701 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
14716 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
14718 return -EPERM;
14720 scnprintf(file_name, sizeof(file_name), "%s.grp", phba->ModelName);
14724 file_name, &phba->pcidev->dev,
14728 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
14732 ret = -EINVAL;
14739 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
14744 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14745 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
14753 * 0 - driver can claim the device
14754 * negative value - driver can not claim the device
14768 return -ENOMEM;
14770 INIT_LIST_HEAD(&phba->poll_list);
14777 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
14782 /* Set up SLI-4 specific device PCI memory space */
14790 /* Set up SLI-4 Specific device driver resources */
14798 spin_lock_init(&phba->rrq_list_lock);
14799 INIT_LIST_HEAD(&phba->active_rrq_list);
14800 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
14811 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
14814 cfg_mode = phba->cfg_use_msi;
14817 phba->pport = NULL;
14831 error = -ENODEV;
14834 /* Default to single EQ for non-MSI-X */
14835 if (phba->intr_type != MSIX) {
14836 phba->cfg_irq_chann = 1;
14837 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14838 if (phba->nvmet_support)
14839 phba->cfg_nvmet_mrq = 1;
14842 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
14851 vport = phba->pport;
14862 /* Set up SLI-4 HBA */
14866 error = -ENODEV;
14871 phba->intr_mode = intr_mode;
14880 if (phba->nvmet_support == 0) {
14881 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14898 if (phba->cfg_request_firmware_upgrade)
14904 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
14905 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
14931 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
14935 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14943 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
14945 struct lpfc_hba *phba = vport->phba;
14949 set_bit(FC_UNLOADING, &vport->load_flag);
14950 if (phba->cgn_i)
14958 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
14959 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
14961 fc_vport_terminate(vports[i]->fc_vport);
14976 /* De-allocate multi-XRI pools */
14977 if (phba->cfg_xri_rebalancing)
14988 spin_lock_irq(&phba->port_list_lock);
14989 list_del_init(&vport->listentry);
14990 spin_unlock_irq(&phba->port_list_lock);
15016 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
15020 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
15024 * requirements to a power-aware driver's PM support for suspend/resume -- all
15032 * 0 - driver suspended the device
15039 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15047 kthread_stop(phba->worker_thread);
15057 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
15061 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
15064 * implements the minimum PM requirements to a power-aware driver's PM for
15065 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
15072 * 0 - driver suspended the device
15079 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15087 phba->worker_thread = kthread_run(lpfc_do_work, phba,
15088 "lpfc_worker_%d", phba->brd_no);
15089 if (IS_ERR(phba->worker_thread)) {
15090 error = PTR_ERR(phba->worker_thread);
15098 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15102 return -EIO;
15104 phba->intr_mode = intr_mode;
15111 lpfc_log_intr_mode(phba, phba->intr_mode);
15117 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
15130 * and let the SCSI mid-layer to retry them to recover.
15136 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
15146 int offline = pci_channel_offline(phba->pcidev);
15168 pci_disable_device(phba->pcidev);
15172 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
15196 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
15201 * with SLI-4 interface spec. This function is called by the PCI subsystem
15208 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15209 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15215 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15220 /* Non-fatal error, prepare for recovery */
15224 hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
15234 set_bit(HBA_PCI_ERR, &phba->bit_flags);
15239 hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
15251 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
15255 * with SLI-4 interface spec. It is called after PCI bus has been reset to
15256 * restart the PCI card from scratch, as if from a cold-boot. During the
15265 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
15266 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15272 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15273 struct lpfc_sli *psli = &phba->sli;
15277 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
15279 printk(KERN_ERR "lpfc: Cannot re-enable "
15286 hba_pci_err = test_and_clear_bit(HBA_PCI_ERR, &phba->bit_flags);
15288 dev_info(&pdev->dev,
15296 if (pdev->is_busmaster)
15299 spin_lock_irq(&phba->hbalock);
15300 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
15301 spin_unlock_irq(&phba->hbalock);
15306 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15309 "2824 Cannot re-enable interrupt after "
15313 phba->intr_mode = intr_mode;
15314 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
15317 lpfc_log_intr_mode(phba, phba->intr_mode);
15323 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
15327 * with SLI-4 interface spec. It is called when kernel error recovery tells
15336 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15344 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
15353 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
15359 * at PCI device-specific information of the device and driver to see if the
15362 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
15367 * 0 - driver can claim the device
15368 * negative value - driver can not claim the device
15377 return -ENODEV;
15389 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
15394 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
15402 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15404 switch (phba->pci_dev_grp) {
15414 phba->pci_dev_grp);
15421 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
15426 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
15430 * 0 - driver suspended the device
15437 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15438 int rc = -ENODEV;
15440 switch (phba->pci_dev_grp) {
15450 phba->pci_dev_grp);
15457 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
15462 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
15466 * 0 - driver suspended the device
15473 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15474 int rc = -ENODEV;
15476 switch (phba->pci_dev_grp) {
15486 phba->pci_dev_grp);
15493 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
15500 * the action to the proper SLI-3 or SLI-4 device error detected handling
15504 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15505 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15511 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15514 if (phba->link_state == LPFC_HBA_ERROR &&
15515 test_bit(HBA_IOQ_FLUSH, &phba->hba_flag))
15518 switch (phba->pci_dev_grp) {
15528 phba->pci_dev_grp);
15535 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
15540 * from scratch, as if from a cold-boot. When this routine is invoked, it
15541 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
15545 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
15546 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15552 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15555 switch (phba->pci_dev_grp) {
15565 phba->pci_dev_grp);
15572 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
15578 * this routine is invoked, it dispatches the action to the proper SLI-3
15579 * or SLI-4 device io_resume routine, which will resume the device operation.
15585 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15587 switch (phba->pci_dev_grp) {
15597 phba->pci_dev_grp);
15604 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
15617 if (!phba->cfg_EnableXLane)
15620 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
15621 phba->cfg_fof = 1;
15623 phba->cfg_fof = 0;
15624 mempool_destroy(phba->device_data_mem_pool);
15625 phba->device_data_mem_pool = NULL;
15632 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
15642 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
15644 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
15646 phba->ras_fwlog.ras_hwsupport = true;
15647 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
15648 phba->cfg_ras_fwlog_buffsize)
15649 phba->ras_fwlog.ras_enabled = true;
15651 phba->ras_fwlog.ras_enabled = false;
15653 phba->ras_fwlog.ras_hwsupport = false;
15691 * lpfc_init - lpfc module initialization routine
15698 * 0 - successful
15699 * -ENOMEM - FC attach transport failed
15700 * all others - failed
15715 error = -ENOMEM;
15769 if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
15772 start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ;
15773 dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt);
15779 temp_idx -= 1;
15781 if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) {
15785 start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx);
15787 start_idx -= dbg_cnt;
15790 dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n",
15798 rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC);
15799 dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s",
15801 (unsigned long)phba->dbg_log[temp_idx].t_ns,
15803 phba->dbg_log[temp_idx].log);
15806 atomic_set(&phba->dbg_log_cnt, 0);
15807 atomic_set(&phba->dbg_log_dmping, 0);
15815 int dbg_dmping = atomic_read(&phba->dbg_log_dmping);
15823 dev_info(&phba->pcidev->dev, "%pV", &vaf);
15827 idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) %
15830 atomic_inc(&phba->dbg_log_cnt);
15832 vscnprintf(phba->dbg_log[idx].log,
15833 sizeof(phba->dbg_log[idx].log), fmt, args);
15836 phba->dbg_log[idx].t_ns = local_clock();
15840 * lpfc_exit - lpfc module removal routine