1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/delay.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/idr.h> 28 #include <linux/interrupt.h> 29 #include <linux/module.h> 30 #include <linux/kthread.h> 31 #include <linux/pci.h> 32 #include <linux/spinlock.h> 33 #include <linux/ctype.h> 34 #include <linux/aer.h> 35 #include <linux/slab.h> 36 #include <linux/firmware.h> 37 #include <linux/miscdevice.h> 38 #include <linux/percpu.h> 39 #include <linux/msi.h> 40 #include <linux/irq.h> 41 #include <linux/bitops.h> 42 #include <linux/crash_dump.h> 43 #include <linux/cpu.h> 44 #include <linux/cpuhotplug.h> 45 46 #include <scsi/scsi.h> 47 #include <scsi/scsi_device.h> 48 #include <scsi/scsi_host.h> 49 #include <scsi/scsi_transport_fc.h> 50 #include <scsi/scsi_tcq.h> 51 #include <scsi/fc/fc_fs.h> 52 53 #include <linux/nvme-fc-driver.h> 54 55 #include "lpfc_hw4.h" 56 #include "lpfc_hw.h" 57 #include "lpfc_sli.h" 58 #include "lpfc_sli4.h" 59 #include "lpfc_nl.h" 60 #include "lpfc_disc.h" 61 #include "lpfc.h" 62 #include "lpfc_scsi.h" 63 #include "lpfc_nvme.h" 64 #include "lpfc_nvmet.h" 65 #include "lpfc_logmsg.h" 66 #include "lpfc_crtn.h" 67 #include "lpfc_vport.h" 68 #include "lpfc_version.h" 69 #include "lpfc_ids.h" 70 71 static enum cpuhp_state lpfc_cpuhp_state; 72 /* Used when mapping IRQ vectors in a driver centric manner */ 73 static uint32_t lpfc_present_cpu; 74 75 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba); 76 static void lpfc_cpuhp_remove(struct lpfc_hba *phba); 77 static void lpfc_cpuhp_add(struct lpfc_hba *phba); 78 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 79 static int lpfc_post_rcv_buf(struct lpfc_hba *); 80 static int lpfc_sli4_queue_verify(struct lpfc_hba *); 81 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 82 static int lpfc_setup_endian_order(struct lpfc_hba *); 83 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 84 static void lpfc_free_els_sgl_list(struct lpfc_hba *); 85 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *); 86 static void lpfc_init_sgl_list(struct lpfc_hba *); 87 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 88 static void lpfc_free_active_sgl(struct lpfc_hba *); 89 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 90 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 91 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 92 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 93 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 94 static void lpfc_sli4_disable_intr(struct lpfc_hba *); 95 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); 96 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); 97 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int); 98 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *); 99 100 static struct scsi_transport_template *lpfc_transport_template = NULL; 101 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 102 static DEFINE_IDR(lpfc_hba_index); 103 #define LPFC_NVMET_BUF_POST 254 104 105 /** 106 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 107 * @phba: pointer to lpfc hba data structure. 108 * 109 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 110 * mailbox command. It retrieves the revision information from the HBA and 111 * collects the Vital Product Data (VPD) about the HBA for preparing the 112 * configuration of the HBA. 113 * 114 * Return codes: 115 * 0 - success. 116 * -ERESTART - requests the SLI layer to reset the HBA and try again. 117 * Any other value - indicates an error. 118 **/ 119 int 120 lpfc_config_port_prep(struct lpfc_hba *phba) 121 { 122 lpfc_vpd_t *vp = &phba->vpd; 123 int i = 0, rc; 124 LPFC_MBOXQ_t *pmb; 125 MAILBOX_t *mb; 126 char *lpfc_vpd_data = NULL; 127 uint16_t offset = 0; 128 static char licensed[56] = 129 "key unlock for use with gnu public licensed code only\0"; 130 static int init_key = 1; 131 132 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 133 if (!pmb) { 134 phba->link_state = LPFC_HBA_ERROR; 135 return -ENOMEM; 136 } 137 138 mb = &pmb->u.mb; 139 phba->link_state = LPFC_INIT_MBX_CMDS; 140 141 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 142 if (init_key) { 143 uint32_t *ptext = (uint32_t *) licensed; 144 145 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 146 *ptext = cpu_to_be32(*ptext); 147 init_key = 0; 148 } 149 150 lpfc_read_nv(phba, pmb); 151 memset((char*)mb->un.varRDnvp.rsvd3, 0, 152 sizeof (mb->un.varRDnvp.rsvd3)); 153 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 154 sizeof (licensed)); 155 156 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 157 158 if (rc != MBX_SUCCESS) { 159 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 160 "0324 Config Port initialization " 161 "error, mbxCmd x%x READ_NVPARM, " 162 "mbxStatus x%x\n", 163 mb->mbxCommand, mb->mbxStatus); 164 mempool_free(pmb, phba->mbox_mem_pool); 165 return -ERESTART; 166 } 167 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 168 sizeof(phba->wwnn)); 169 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 170 sizeof(phba->wwpn)); 171 } 172 173 /* 174 * Clear all option bits except LPFC_SLI3_BG_ENABLED, 175 * which was already set in lpfc_get_cfgparam() 176 */ 177 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED; 178 179 /* Setup and issue mailbox READ REV command */ 180 lpfc_read_rev(phba, pmb); 181 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 182 if (rc != MBX_SUCCESS) { 183 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 184 "0439 Adapter failed to init, mbxCmd x%x " 185 "READ_REV, mbxStatus x%x\n", 186 mb->mbxCommand, mb->mbxStatus); 187 mempool_free( pmb, phba->mbox_mem_pool); 188 return -ERESTART; 189 } 190 191 192 /* 193 * The value of rr must be 1 since the driver set the cv field to 1. 194 * This setting requires the FW to set all revision fields. 195 */ 196 if (mb->un.varRdRev.rr == 0) { 197 vp->rev.rBit = 0; 198 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 199 "0440 Adapter failed to init, READ_REV has " 200 "missing revision information.\n"); 201 mempool_free(pmb, phba->mbox_mem_pool); 202 return -ERESTART; 203 } 204 205 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 206 mempool_free(pmb, phba->mbox_mem_pool); 207 return -EINVAL; 208 } 209 210 /* Save information as VPD data */ 211 vp->rev.rBit = 1; 212 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 213 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 214 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 215 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 216 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 217 vp->rev.biuRev = mb->un.varRdRev.biuRev; 218 vp->rev.smRev = mb->un.varRdRev.smRev; 219 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 220 vp->rev.endecRev = mb->un.varRdRev.endecRev; 221 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 222 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 223 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 224 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 225 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 226 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 227 228 /* If the sli feature level is less then 9, we must 229 * tear down all RPIs and VPIs on link down if NPIV 230 * is enabled. 231 */ 232 if (vp->rev.feaLevelHigh < 9) 233 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 234 235 if (lpfc_is_LC_HBA(phba->pcidev->device)) 236 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 237 sizeof (phba->RandomData)); 238 239 /* Get adapter VPD information */ 240 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 241 if (!lpfc_vpd_data) 242 goto out_free_mbox; 243 do { 244 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 245 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 246 247 if (rc != MBX_SUCCESS) { 248 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 249 "0441 VPD not present on adapter, " 250 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 251 mb->mbxCommand, mb->mbxStatus); 252 mb->un.varDmp.word_cnt = 0; 253 } 254 /* dump mem may return a zero when finished or we got a 255 * mailbox error, either way we are done. 256 */ 257 if (mb->un.varDmp.word_cnt == 0) 258 break; 259 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 260 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 261 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 262 lpfc_vpd_data + offset, 263 mb->un.varDmp.word_cnt); 264 offset += mb->un.varDmp.word_cnt; 265 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 266 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 267 268 kfree(lpfc_vpd_data); 269 out_free_mbox: 270 mempool_free(pmb, phba->mbox_mem_pool); 271 return 0; 272 } 273 274 /** 275 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 276 * @phba: pointer to lpfc hba data structure. 277 * @pmboxq: pointer to the driver internal queue element for mailbox command. 278 * 279 * This is the completion handler for driver's configuring asynchronous event 280 * mailbox command to the device. If the mailbox command returns successfully, 281 * it will set internal async event support flag to 1; otherwise, it will 282 * set internal async event support flag to 0. 283 **/ 284 static void 285 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 286 { 287 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 288 phba->temp_sensor_support = 1; 289 else 290 phba->temp_sensor_support = 0; 291 mempool_free(pmboxq, phba->mbox_mem_pool); 292 return; 293 } 294 295 /** 296 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 297 * @phba: pointer to lpfc hba data structure. 298 * @pmboxq: pointer to the driver internal queue element for mailbox command. 299 * 300 * This is the completion handler for dump mailbox command for getting 301 * wake up parameters. When this command complete, the response contain 302 * Option rom version of the HBA. This function translate the version number 303 * into a human readable string and store it in OptionROMVersion. 304 **/ 305 static void 306 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 307 { 308 struct prog_id *prg; 309 uint32_t prog_id_word; 310 char dist = ' '; 311 /* character array used for decoding dist type. */ 312 char dist_char[] = "nabx"; 313 314 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 315 mempool_free(pmboxq, phba->mbox_mem_pool); 316 return; 317 } 318 319 prg = (struct prog_id *) &prog_id_word; 320 321 /* word 7 contain option rom version */ 322 prog_id_word = pmboxq->u.mb.un.varWords[7]; 323 324 /* Decode the Option rom version word to a readable string */ 325 if (prg->dist < 4) 326 dist = dist_char[prg->dist]; 327 328 if ((prg->dist == 3) && (prg->num == 0)) 329 snprintf(phba->OptionROMVersion, 32, "%d.%d%d", 330 prg->ver, prg->rev, prg->lev); 331 else 332 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d", 333 prg->ver, prg->rev, prg->lev, 334 dist, prg->num); 335 mempool_free(pmboxq, phba->mbox_mem_pool); 336 return; 337 } 338 339 /** 340 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, 341 * cfg_soft_wwnn, cfg_soft_wwpn 342 * @vport: pointer to lpfc vport data structure. 343 * 344 * 345 * Return codes 346 * None. 347 **/ 348 void 349 lpfc_update_vport_wwn(struct lpfc_vport *vport) 350 { 351 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level; 352 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0]; 353 354 /* If the soft name exists then update it using the service params */ 355 if (vport->phba->cfg_soft_wwnn) 356 u64_to_wwn(vport->phba->cfg_soft_wwnn, 357 vport->fc_sparam.nodeName.u.wwn); 358 if (vport->phba->cfg_soft_wwpn) 359 u64_to_wwn(vport->phba->cfg_soft_wwpn, 360 vport->fc_sparam.portName.u.wwn); 361 362 /* 363 * If the name is empty or there exists a soft name 364 * then copy the service params name, otherwise use the fc name 365 */ 366 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) 367 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 368 sizeof(struct lpfc_name)); 369 else 370 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, 371 sizeof(struct lpfc_name)); 372 373 /* 374 * If the port name has changed, then set the Param changes flag 375 * to unreg the login 376 */ 377 if (vport->fc_portname.u.wwn[0] != 0 && 378 memcmp(&vport->fc_portname, &vport->fc_sparam.portName, 379 sizeof(struct lpfc_name))) 380 vport->vport_flag |= FAWWPN_PARAM_CHG; 381 382 if (vport->fc_portname.u.wwn[0] == 0 || 383 vport->phba->cfg_soft_wwpn || 384 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) || 385 vport->vport_flag & FAWWPN_SET) { 386 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 387 sizeof(struct lpfc_name)); 388 vport->vport_flag &= ~FAWWPN_SET; 389 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) 390 vport->vport_flag |= FAWWPN_SET; 391 } 392 else 393 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, 394 sizeof(struct lpfc_name)); 395 } 396 397 /** 398 * lpfc_config_port_post - Perform lpfc initialization after config port 399 * @phba: pointer to lpfc hba data structure. 400 * 401 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 402 * command call. It performs all internal resource and state setups on the 403 * port: post IOCB buffers, enable appropriate host interrupt attentions, 404 * ELS ring timers, etc. 405 * 406 * Return codes 407 * 0 - success. 408 * Any other value - error. 409 **/ 410 int 411 lpfc_config_port_post(struct lpfc_hba *phba) 412 { 413 struct lpfc_vport *vport = phba->pport; 414 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 415 LPFC_MBOXQ_t *pmb; 416 MAILBOX_t *mb; 417 struct lpfc_dmabuf *mp; 418 struct lpfc_sli *psli = &phba->sli; 419 uint32_t status, timeout; 420 int i, j; 421 int rc; 422 423 spin_lock_irq(&phba->hbalock); 424 /* 425 * If the Config port completed correctly the HBA is not 426 * over heated any more. 427 */ 428 if (phba->over_temp_state == HBA_OVER_TEMP) 429 phba->over_temp_state = HBA_NORMAL_TEMP; 430 spin_unlock_irq(&phba->hbalock); 431 432 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 433 if (!pmb) { 434 phba->link_state = LPFC_HBA_ERROR; 435 return -ENOMEM; 436 } 437 mb = &pmb->u.mb; 438 439 /* Get login parameters for NID. */ 440 rc = lpfc_read_sparam(phba, pmb, 0); 441 if (rc) { 442 mempool_free(pmb, phba->mbox_mem_pool); 443 return -ENOMEM; 444 } 445 446 pmb->vport = vport; 447 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 448 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 449 "0448 Adapter failed init, mbxCmd x%x " 450 "READ_SPARM mbxStatus x%x\n", 451 mb->mbxCommand, mb->mbxStatus); 452 phba->link_state = LPFC_HBA_ERROR; 453 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 454 mempool_free(pmb, phba->mbox_mem_pool); 455 lpfc_mbuf_free(phba, mp->virt, mp->phys); 456 kfree(mp); 457 return -EIO; 458 } 459 460 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 461 462 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 463 lpfc_mbuf_free(phba, mp->virt, mp->phys); 464 kfree(mp); 465 pmb->ctx_buf = NULL; 466 lpfc_update_vport_wwn(vport); 467 468 /* Update the fc_host data structures with new wwn. */ 469 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 470 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 471 fc_host_max_npiv_vports(shost) = phba->max_vpi; 472 473 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 474 /* This should be consolidated into parse_vpd ? - mr */ 475 if (phba->SerialNumber[0] == 0) { 476 uint8_t *outptr; 477 478 outptr = &vport->fc_nodename.u.s.IEEE[0]; 479 for (i = 0; i < 12; i++) { 480 status = *outptr++; 481 j = ((status & 0xf0) >> 4); 482 if (j <= 9) 483 phba->SerialNumber[i] = 484 (char)((uint8_t) 0x30 + (uint8_t) j); 485 else 486 phba->SerialNumber[i] = 487 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 488 i++; 489 j = (status & 0xf); 490 if (j <= 9) 491 phba->SerialNumber[i] = 492 (char)((uint8_t) 0x30 + (uint8_t) j); 493 else 494 phba->SerialNumber[i] = 495 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 496 } 497 } 498 499 lpfc_read_config(phba, pmb); 500 pmb->vport = vport; 501 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 502 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 503 "0453 Adapter failed to init, mbxCmd x%x " 504 "READ_CONFIG, mbxStatus x%x\n", 505 mb->mbxCommand, mb->mbxStatus); 506 phba->link_state = LPFC_HBA_ERROR; 507 mempool_free( pmb, phba->mbox_mem_pool); 508 return -EIO; 509 } 510 511 /* Check if the port is disabled */ 512 lpfc_sli_read_link_ste(phba); 513 514 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 515 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) { 516 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 517 "3359 HBA queue depth changed from %d to %d\n", 518 phba->cfg_hba_queue_depth, 519 mb->un.varRdConfig.max_xri); 520 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri; 521 } 522 523 phba->lmt = mb->un.varRdConfig.lmt; 524 525 /* Get the default values for Model Name and Description */ 526 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 527 528 phba->link_state = LPFC_LINK_DOWN; 529 530 /* Only process IOCBs on ELS ring till hba_state is READY */ 531 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr) 532 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT; 533 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr) 534 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT; 535 536 /* Post receive buffers for desired rings */ 537 if (phba->sli_rev != 3) 538 lpfc_post_rcv_buf(phba); 539 540 /* 541 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 542 */ 543 if (phba->intr_type == MSIX) { 544 rc = lpfc_config_msi(phba, pmb); 545 if (rc) { 546 mempool_free(pmb, phba->mbox_mem_pool); 547 return -EIO; 548 } 549 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 550 if (rc != MBX_SUCCESS) { 551 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 552 "0352 Config MSI mailbox command " 553 "failed, mbxCmd x%x, mbxStatus x%x\n", 554 pmb->u.mb.mbxCommand, 555 pmb->u.mb.mbxStatus); 556 mempool_free(pmb, phba->mbox_mem_pool); 557 return -EIO; 558 } 559 } 560 561 spin_lock_irq(&phba->hbalock); 562 /* Initialize ERATT handling flag */ 563 phba->hba_flag &= ~HBA_ERATT_HANDLED; 564 565 /* Enable appropriate host interrupts */ 566 if (lpfc_readl(phba->HCregaddr, &status)) { 567 spin_unlock_irq(&phba->hbalock); 568 return -EIO; 569 } 570 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 571 if (psli->num_rings > 0) 572 status |= HC_R0INT_ENA; 573 if (psli->num_rings > 1) 574 status |= HC_R1INT_ENA; 575 if (psli->num_rings > 2) 576 status |= HC_R2INT_ENA; 577 if (psli->num_rings > 3) 578 status |= HC_R3INT_ENA; 579 580 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 581 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 582 status &= ~(HC_R0INT_ENA); 583 584 writel(status, phba->HCregaddr); 585 readl(phba->HCregaddr); /* flush */ 586 spin_unlock_irq(&phba->hbalock); 587 588 /* Set up ring-0 (ELS) timer */ 589 timeout = phba->fc_ratov * 2; 590 mod_timer(&vport->els_tmofunc, 591 jiffies + msecs_to_jiffies(1000 * timeout)); 592 /* Set up heart beat (HB) timer */ 593 mod_timer(&phba->hb_tmofunc, 594 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 595 phba->hb_outstanding = 0; 596 phba->last_completion_time = jiffies; 597 /* Set up error attention (ERATT) polling timer */ 598 mod_timer(&phba->eratt_poll, 599 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 600 601 if (phba->hba_flag & LINK_DISABLED) { 602 lpfc_printf_log(phba, 603 KERN_ERR, LOG_INIT, 604 "2598 Adapter Link is disabled.\n"); 605 lpfc_down_link(phba, pmb); 606 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 607 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 608 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 609 lpfc_printf_log(phba, 610 KERN_ERR, LOG_INIT, 611 "2599 Adapter failed to issue DOWN_LINK" 612 " mbox command rc 0x%x\n", rc); 613 614 mempool_free(pmb, phba->mbox_mem_pool); 615 return -EIO; 616 } 617 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 618 mempool_free(pmb, phba->mbox_mem_pool); 619 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 620 if (rc) 621 return rc; 622 } 623 /* MBOX buffer will be freed in mbox compl */ 624 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 625 if (!pmb) { 626 phba->link_state = LPFC_HBA_ERROR; 627 return -ENOMEM; 628 } 629 630 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 631 pmb->mbox_cmpl = lpfc_config_async_cmpl; 632 pmb->vport = phba->pport; 633 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 634 635 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 636 lpfc_printf_log(phba, 637 KERN_ERR, 638 LOG_INIT, 639 "0456 Adapter failed to issue " 640 "ASYNCEVT_ENABLE mbox status x%x\n", 641 rc); 642 mempool_free(pmb, phba->mbox_mem_pool); 643 } 644 645 /* Get Option rom version */ 646 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 647 if (!pmb) { 648 phba->link_state = LPFC_HBA_ERROR; 649 return -ENOMEM; 650 } 651 652 lpfc_dump_wakeup_param(phba, pmb); 653 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 654 pmb->vport = phba->pport; 655 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 656 657 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 658 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 659 "to get Option ROM version status x%x\n", rc); 660 mempool_free(pmb, phba->mbox_mem_pool); 661 } 662 663 return 0; 664 } 665 666 /** 667 * lpfc_hba_init_link - Initialize the FC link 668 * @phba: pointer to lpfc hba data structure. 669 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 670 * 671 * This routine will issue the INIT_LINK mailbox command call. 672 * It is available to other drivers through the lpfc_hba data 673 * structure for use as a delayed link up mechanism with the 674 * module parameter lpfc_suppress_link_up. 675 * 676 * Return code 677 * 0 - success 678 * Any other value - error 679 **/ 680 static int 681 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 682 { 683 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); 684 } 685 686 /** 687 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology 688 * @phba: pointer to lpfc hba data structure. 689 * @fc_topology: desired fc topology. 690 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 691 * 692 * This routine will issue the INIT_LINK mailbox command call. 693 * It is available to other drivers through the lpfc_hba data 694 * structure for use as a delayed link up mechanism with the 695 * module parameter lpfc_suppress_link_up. 696 * 697 * Return code 698 * 0 - success 699 * Any other value - error 700 **/ 701 int 702 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, 703 uint32_t flag) 704 { 705 struct lpfc_vport *vport = phba->pport; 706 LPFC_MBOXQ_t *pmb; 707 MAILBOX_t *mb; 708 int rc; 709 710 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 711 if (!pmb) { 712 phba->link_state = LPFC_HBA_ERROR; 713 return -ENOMEM; 714 } 715 mb = &pmb->u.mb; 716 pmb->vport = vport; 717 718 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || 719 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && 720 !(phba->lmt & LMT_1Gb)) || 721 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && 722 !(phba->lmt & LMT_2Gb)) || 723 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && 724 !(phba->lmt & LMT_4Gb)) || 725 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && 726 !(phba->lmt & LMT_8Gb)) || 727 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && 728 !(phba->lmt & LMT_10Gb)) || 729 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && 730 !(phba->lmt & LMT_16Gb)) || 731 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) && 732 !(phba->lmt & LMT_32Gb)) || 733 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) && 734 !(phba->lmt & LMT_64Gb))) { 735 /* Reset link speed to auto */ 736 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 737 "1302 Invalid speed for this board:%d " 738 "Reset link speed to auto.\n", 739 phba->cfg_link_speed); 740 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 741 } 742 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); 743 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 744 if (phba->sli_rev < LPFC_SLI_REV4) 745 lpfc_set_loopback_flag(phba); 746 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 747 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 748 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 749 "0498 Adapter failed to init, mbxCmd x%x " 750 "INIT_LINK, mbxStatus x%x\n", 751 mb->mbxCommand, mb->mbxStatus); 752 if (phba->sli_rev <= LPFC_SLI_REV3) { 753 /* Clear all interrupt enable conditions */ 754 writel(0, phba->HCregaddr); 755 readl(phba->HCregaddr); /* flush */ 756 /* Clear all pending interrupts */ 757 writel(0xffffffff, phba->HAregaddr); 758 readl(phba->HAregaddr); /* flush */ 759 } 760 phba->link_state = LPFC_HBA_ERROR; 761 if (rc != MBX_BUSY || flag == MBX_POLL) 762 mempool_free(pmb, phba->mbox_mem_pool); 763 return -EIO; 764 } 765 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 766 if (flag == MBX_POLL) 767 mempool_free(pmb, phba->mbox_mem_pool); 768 769 return 0; 770 } 771 772 /** 773 * lpfc_hba_down_link - this routine downs the FC link 774 * @phba: pointer to lpfc hba data structure. 775 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 776 * 777 * This routine will issue the DOWN_LINK mailbox command call. 778 * It is available to other drivers through the lpfc_hba data 779 * structure for use to stop the link. 780 * 781 * Return code 782 * 0 - success 783 * Any other value - error 784 **/ 785 static int 786 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 787 { 788 LPFC_MBOXQ_t *pmb; 789 int rc; 790 791 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 792 if (!pmb) { 793 phba->link_state = LPFC_HBA_ERROR; 794 return -ENOMEM; 795 } 796 797 lpfc_printf_log(phba, 798 KERN_ERR, LOG_INIT, 799 "0491 Adapter Link is disabled.\n"); 800 lpfc_down_link(phba, pmb); 801 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 802 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 803 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 804 lpfc_printf_log(phba, 805 KERN_ERR, LOG_INIT, 806 "2522 Adapter failed to issue DOWN_LINK" 807 " mbox command rc 0x%x\n", rc); 808 809 mempool_free(pmb, phba->mbox_mem_pool); 810 return -EIO; 811 } 812 if (flag == MBX_POLL) 813 mempool_free(pmb, phba->mbox_mem_pool); 814 815 return 0; 816 } 817 818 /** 819 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 820 * @phba: pointer to lpfc HBA data structure. 821 * 822 * This routine will do LPFC uninitialization before the HBA is reset when 823 * bringing down the SLI Layer. 824 * 825 * Return codes 826 * 0 - success. 827 * Any other value - error. 828 **/ 829 int 830 lpfc_hba_down_prep(struct lpfc_hba *phba) 831 { 832 struct lpfc_vport **vports; 833 int i; 834 835 if (phba->sli_rev <= LPFC_SLI_REV3) { 836 /* Disable interrupts */ 837 writel(0, phba->HCregaddr); 838 readl(phba->HCregaddr); /* flush */ 839 } 840 841 if (phba->pport->load_flag & FC_UNLOADING) 842 lpfc_cleanup_discovery_resources(phba->pport); 843 else { 844 vports = lpfc_create_vport_work_array(phba); 845 if (vports != NULL) 846 for (i = 0; i <= phba->max_vports && 847 vports[i] != NULL; i++) 848 lpfc_cleanup_discovery_resources(vports[i]); 849 lpfc_destroy_vport_work_array(phba, vports); 850 } 851 return 0; 852 } 853 854 /** 855 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free 856 * rspiocb which got deferred 857 * 858 * @phba: pointer to lpfc HBA data structure. 859 * 860 * This routine will cleanup completed slow path events after HBA is reset 861 * when bringing down the SLI Layer. 862 * 863 * 864 * Return codes 865 * void. 866 **/ 867 static void 868 lpfc_sli4_free_sp_events(struct lpfc_hba *phba) 869 { 870 struct lpfc_iocbq *rspiocbq; 871 struct hbq_dmabuf *dmabuf; 872 struct lpfc_cq_event *cq_event; 873 874 spin_lock_irq(&phba->hbalock); 875 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 876 spin_unlock_irq(&phba->hbalock); 877 878 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 879 /* Get the response iocb from the head of work queue */ 880 spin_lock_irq(&phba->hbalock); 881 list_remove_head(&phba->sli4_hba.sp_queue_event, 882 cq_event, struct lpfc_cq_event, list); 883 spin_unlock_irq(&phba->hbalock); 884 885 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 886 case CQE_CODE_COMPL_WQE: 887 rspiocbq = container_of(cq_event, struct lpfc_iocbq, 888 cq_event); 889 lpfc_sli_release_iocbq(phba, rspiocbq); 890 break; 891 case CQE_CODE_RECEIVE: 892 case CQE_CODE_RECEIVE_V1: 893 dmabuf = container_of(cq_event, struct hbq_dmabuf, 894 cq_event); 895 lpfc_in_buf_free(phba, &dmabuf->dbuf); 896 } 897 } 898 } 899 900 /** 901 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset 902 * @phba: pointer to lpfc HBA data structure. 903 * 904 * This routine will cleanup posted ELS buffers after the HBA is reset 905 * when bringing down the SLI Layer. 906 * 907 * 908 * Return codes 909 * void. 910 **/ 911 static void 912 lpfc_hba_free_post_buf(struct lpfc_hba *phba) 913 { 914 struct lpfc_sli *psli = &phba->sli; 915 struct lpfc_sli_ring *pring; 916 struct lpfc_dmabuf *mp, *next_mp; 917 LIST_HEAD(buflist); 918 int count; 919 920 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 921 lpfc_sli_hbqbuf_free_all(phba); 922 else { 923 /* Cleanup preposted buffers on the ELS ring */ 924 pring = &psli->sli3_ring[LPFC_ELS_RING]; 925 spin_lock_irq(&phba->hbalock); 926 list_splice_init(&pring->postbufq, &buflist); 927 spin_unlock_irq(&phba->hbalock); 928 929 count = 0; 930 list_for_each_entry_safe(mp, next_mp, &buflist, list) { 931 list_del(&mp->list); 932 count++; 933 lpfc_mbuf_free(phba, mp->virt, mp->phys); 934 kfree(mp); 935 } 936 937 spin_lock_irq(&phba->hbalock); 938 pring->postbufq_cnt -= count; 939 spin_unlock_irq(&phba->hbalock); 940 } 941 } 942 943 /** 944 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset 945 * @phba: pointer to lpfc HBA data structure. 946 * 947 * This routine will cleanup the txcmplq after the HBA is reset when bringing 948 * down the SLI Layer. 949 * 950 * Return codes 951 * void 952 **/ 953 static void 954 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) 955 { 956 struct lpfc_sli *psli = &phba->sli; 957 struct lpfc_queue *qp = NULL; 958 struct lpfc_sli_ring *pring; 959 LIST_HEAD(completions); 960 int i; 961 struct lpfc_iocbq *piocb, *next_iocb; 962 963 if (phba->sli_rev != LPFC_SLI_REV4) { 964 for (i = 0; i < psli->num_rings; i++) { 965 pring = &psli->sli3_ring[i]; 966 spin_lock_irq(&phba->hbalock); 967 /* At this point in time the HBA is either reset or DOA 968 * Nothing should be on txcmplq as it will 969 * NEVER complete. 970 */ 971 list_splice_init(&pring->txcmplq, &completions); 972 pring->txcmplq_cnt = 0; 973 spin_unlock_irq(&phba->hbalock); 974 975 lpfc_sli_abort_iocb_ring(phba, pring); 976 } 977 /* Cancel all the IOCBs from the completions list */ 978 lpfc_sli_cancel_iocbs(phba, &completions, 979 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 980 return; 981 } 982 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 983 pring = qp->pring; 984 if (!pring) 985 continue; 986 spin_lock_irq(&pring->ring_lock); 987 list_for_each_entry_safe(piocb, next_iocb, 988 &pring->txcmplq, list) 989 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 990 list_splice_init(&pring->txcmplq, &completions); 991 pring->txcmplq_cnt = 0; 992 spin_unlock_irq(&pring->ring_lock); 993 lpfc_sli_abort_iocb_ring(phba, pring); 994 } 995 /* Cancel all the IOCBs from the completions list */ 996 lpfc_sli_cancel_iocbs(phba, &completions, 997 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 998 } 999 1000 /** 1001 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 1002 int i; 1003 * @phba: pointer to lpfc HBA data structure. 1004 * 1005 * This routine will do uninitialization after the HBA is reset when bring 1006 * down the SLI Layer. 1007 * 1008 * Return codes 1009 * 0 - success. 1010 * Any other value - error. 1011 **/ 1012 static int 1013 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 1014 { 1015 lpfc_hba_free_post_buf(phba); 1016 lpfc_hba_clean_txcmplq(phba); 1017 return 0; 1018 } 1019 1020 /** 1021 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 1022 * @phba: pointer to lpfc HBA data structure. 1023 * 1024 * This routine will do uninitialization after the HBA is reset when bring 1025 * down the SLI Layer. 1026 * 1027 * Return codes 1028 * 0 - success. 1029 * Any other value - error. 1030 **/ 1031 static int 1032 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 1033 { 1034 struct lpfc_io_buf *psb, *psb_next; 1035 struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next; 1036 struct lpfc_sli4_hdw_queue *qp; 1037 LIST_HEAD(aborts); 1038 LIST_HEAD(nvme_aborts); 1039 LIST_HEAD(nvmet_aborts); 1040 struct lpfc_sglq *sglq_entry = NULL; 1041 int cnt, idx; 1042 1043 1044 lpfc_sli_hbqbuf_free_all(phba); 1045 lpfc_hba_clean_txcmplq(phba); 1046 1047 /* At this point in time the HBA is either reset or DOA. Either 1048 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 1049 * on the lpfc_els_sgl_list so that it can either be freed if the 1050 * driver is unloading or reposted if the driver is restarting 1051 * the port. 1052 */ 1053 spin_lock_irq(&phba->hbalock); /* required for lpfc_els_sgl_list and */ 1054 /* scsl_buf_list */ 1055 /* sgl_list_lock required because worker thread uses this 1056 * list. 1057 */ 1058 spin_lock(&phba->sli4_hba.sgl_list_lock); 1059 list_for_each_entry(sglq_entry, 1060 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 1061 sglq_entry->state = SGL_FREED; 1062 1063 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 1064 &phba->sli4_hba.lpfc_els_sgl_list); 1065 1066 1067 spin_unlock(&phba->sli4_hba.sgl_list_lock); 1068 1069 /* abts_xxxx_buf_list_lock required because worker thread uses this 1070 * list. 1071 */ 1072 cnt = 0; 1073 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 1074 qp = &phba->sli4_hba.hdwq[idx]; 1075 1076 spin_lock(&qp->abts_io_buf_list_lock); 1077 list_splice_init(&qp->lpfc_abts_io_buf_list, 1078 &aborts); 1079 1080 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 1081 psb->pCmd = NULL; 1082 psb->status = IOSTAT_SUCCESS; 1083 cnt++; 1084 } 1085 spin_lock(&qp->io_buf_list_put_lock); 1086 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put); 1087 qp->put_io_bufs += qp->abts_scsi_io_bufs; 1088 qp->put_io_bufs += qp->abts_nvme_io_bufs; 1089 qp->abts_scsi_io_bufs = 0; 1090 qp->abts_nvme_io_bufs = 0; 1091 spin_unlock(&qp->io_buf_list_put_lock); 1092 spin_unlock(&qp->abts_io_buf_list_lock); 1093 } 1094 spin_unlock_irq(&phba->hbalock); 1095 1096 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 1097 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1098 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 1099 &nvmet_aborts); 1100 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1101 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { 1102 ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP); 1103 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 1104 } 1105 } 1106 1107 lpfc_sli4_free_sp_events(phba); 1108 return cnt; 1109 } 1110 1111 /** 1112 * lpfc_hba_down_post - Wrapper func for hba down post routine 1113 * @phba: pointer to lpfc HBA data structure. 1114 * 1115 * This routine wraps the actual SLI3 or SLI4 routine for performing 1116 * uninitialization after the HBA is reset when bring down the SLI Layer. 1117 * 1118 * Return codes 1119 * 0 - success. 1120 * Any other value - error. 1121 **/ 1122 int 1123 lpfc_hba_down_post(struct lpfc_hba *phba) 1124 { 1125 return (*phba->lpfc_hba_down_post)(phba); 1126 } 1127 1128 /** 1129 * lpfc_hb_timeout - The HBA-timer timeout handler 1130 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 1131 * 1132 * This is the HBA-timer timeout handler registered to the lpfc driver. When 1133 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 1134 * work-port-events bitmap and the worker thread is notified. This timeout 1135 * event will be used by the worker thread to invoke the actual timeout 1136 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 1137 * be performed in the timeout handler and the HBA timeout event bit shall 1138 * be cleared by the worker thread after it has taken the event bitmap out. 1139 **/ 1140 static void 1141 lpfc_hb_timeout(struct timer_list *t) 1142 { 1143 struct lpfc_hba *phba; 1144 uint32_t tmo_posted; 1145 unsigned long iflag; 1146 1147 phba = from_timer(phba, t, hb_tmofunc); 1148 1149 /* Check for heart beat timeout conditions */ 1150 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1151 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 1152 if (!tmo_posted) 1153 phba->pport->work_port_events |= WORKER_HB_TMO; 1154 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1155 1156 /* Tell the worker thread there is work to do */ 1157 if (!tmo_posted) 1158 lpfc_worker_wake_up(phba); 1159 return; 1160 } 1161 1162 /** 1163 * lpfc_rrq_timeout - The RRQ-timer timeout handler 1164 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 1165 * 1166 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 1167 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 1168 * work-port-events bitmap and the worker thread is notified. This timeout 1169 * event will be used by the worker thread to invoke the actual timeout 1170 * handler routine, lpfc_rrq_handler. Any periodical operations will 1171 * be performed in the timeout handler and the RRQ timeout event bit shall 1172 * be cleared by the worker thread after it has taken the event bitmap out. 1173 **/ 1174 static void 1175 lpfc_rrq_timeout(struct timer_list *t) 1176 { 1177 struct lpfc_hba *phba; 1178 unsigned long iflag; 1179 1180 phba = from_timer(phba, t, rrq_tmr); 1181 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1182 if (!(phba->pport->load_flag & FC_UNLOADING)) 1183 phba->hba_flag |= HBA_RRQ_ACTIVE; 1184 else 1185 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1186 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1187 1188 if (!(phba->pport->load_flag & FC_UNLOADING)) 1189 lpfc_worker_wake_up(phba); 1190 } 1191 1192 /** 1193 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 1194 * @phba: pointer to lpfc hba data structure. 1195 * @pmboxq: pointer to the driver internal queue element for mailbox command. 1196 * 1197 * This is the callback function to the lpfc heart-beat mailbox command. 1198 * If configured, the lpfc driver issues the heart-beat mailbox command to 1199 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 1200 * heart-beat mailbox command is issued, the driver shall set up heart-beat 1201 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 1202 * heart-beat outstanding state. Once the mailbox command comes back and 1203 * no error conditions detected, the heart-beat mailbox command timer is 1204 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 1205 * state is cleared for the next heart-beat. If the timer expired with the 1206 * heart-beat outstanding state set, the driver will put the HBA offline. 1207 **/ 1208 static void 1209 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 1210 { 1211 unsigned long drvr_flag; 1212 1213 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1214 phba->hb_outstanding = 0; 1215 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1216 1217 /* Check and reset heart-beat timer is necessary */ 1218 mempool_free(pmboxq, phba->mbox_mem_pool); 1219 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 1220 !(phba->link_state == LPFC_HBA_ERROR) && 1221 !(phba->pport->load_flag & FC_UNLOADING)) 1222 mod_timer(&phba->hb_tmofunc, 1223 jiffies + 1224 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1225 return; 1226 } 1227 1228 static void 1229 lpfc_hb_eq_delay_work(struct work_struct *work) 1230 { 1231 struct lpfc_hba *phba = container_of(to_delayed_work(work), 1232 struct lpfc_hba, eq_delay_work); 1233 struct lpfc_eq_intr_info *eqi, *eqi_new; 1234 struct lpfc_queue *eq, *eq_next; 1235 unsigned char *ena_delay = NULL; 1236 uint32_t usdelay; 1237 int i; 1238 1239 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING) 1240 return; 1241 1242 if (phba->link_state == LPFC_HBA_ERROR || 1243 phba->pport->fc_flag & FC_OFFLINE_MODE) 1244 goto requeue; 1245 1246 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay), 1247 GFP_KERNEL); 1248 if (!ena_delay) 1249 goto requeue; 1250 1251 for (i = 0; i < phba->cfg_irq_chann; i++) { 1252 /* Get the EQ corresponding to the IRQ vector */ 1253 eq = phba->sli4_hba.hba_eq_hdl[i].eq; 1254 if (!eq) 1255 continue; 1256 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) { 1257 eq->q_flag &= ~HBA_EQ_DELAY_CHK; 1258 ena_delay[eq->last_cpu] = 1; 1259 } 1260 } 1261 1262 for_each_present_cpu(i) { 1263 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); 1264 if (ena_delay[i]) { 1265 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP; 1266 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY) 1267 usdelay = LPFC_MAX_AUTO_EQ_DELAY; 1268 } else { 1269 usdelay = 0; 1270 } 1271 1272 eqi->icnt = 0; 1273 1274 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) { 1275 if (unlikely(eq->last_cpu != i)) { 1276 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info, 1277 eq->last_cpu); 1278 list_move_tail(&eq->cpu_list, &eqi_new->list); 1279 continue; 1280 } 1281 if (usdelay != eq->q_mode) 1282 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1, 1283 usdelay); 1284 } 1285 } 1286 1287 kfree(ena_delay); 1288 1289 requeue: 1290 queue_delayed_work(phba->wq, &phba->eq_delay_work, 1291 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); 1292 } 1293 1294 /** 1295 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution 1296 * @phba: pointer to lpfc hba data structure. 1297 * 1298 * For each heartbeat, this routine does some heuristic methods to adjust 1299 * XRI distribution. The goal is to fully utilize free XRIs. 1300 **/ 1301 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba) 1302 { 1303 u32 i; 1304 u32 hwq_count; 1305 1306 hwq_count = phba->cfg_hdw_queue; 1307 for (i = 0; i < hwq_count; i++) { 1308 /* Adjust XRIs in private pool */ 1309 lpfc_adjust_pvt_pool_count(phba, i); 1310 1311 /* Adjust high watermark */ 1312 lpfc_adjust_high_watermark(phba, i); 1313 1314 #ifdef LPFC_MXP_STAT 1315 /* Snapshot pbl, pvt and busy count */ 1316 lpfc_snapshot_mxp(phba, i); 1317 #endif 1318 } 1319 } 1320 1321 /** 1322 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 1323 * @phba: pointer to lpfc hba data structure. 1324 * 1325 * This is the actual HBA-timer timeout handler to be invoked by the worker 1326 * thread whenever the HBA timer fired and HBA-timeout event posted. This 1327 * handler performs any periodic operations needed for the device. If such 1328 * periodic event has already been attended to either in the interrupt handler 1329 * or by processing slow-ring or fast-ring events within the HBA-timer 1330 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1331 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1332 * is configured and there is no heart-beat mailbox command outstanding, a 1333 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1334 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1335 * to offline. 1336 **/ 1337 void 1338 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1339 { 1340 struct lpfc_vport **vports; 1341 LPFC_MBOXQ_t *pmboxq; 1342 struct lpfc_dmabuf *buf_ptr; 1343 int retval, i; 1344 struct lpfc_sli *psli = &phba->sli; 1345 LIST_HEAD(completions); 1346 1347 if (phba->cfg_xri_rebalancing) { 1348 /* Multi-XRI pools handler */ 1349 lpfc_hb_mxp_handler(phba); 1350 } 1351 1352 vports = lpfc_create_vport_work_array(phba); 1353 if (vports != NULL) 1354 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 1355 lpfc_rcv_seq_check_edtov(vports[i]); 1356 lpfc_fdmi_change_check(vports[i]); 1357 } 1358 lpfc_destroy_vport_work_array(phba, vports); 1359 1360 if ((phba->link_state == LPFC_HBA_ERROR) || 1361 (phba->pport->load_flag & FC_UNLOADING) || 1362 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1363 return; 1364 1365 spin_lock_irq(&phba->pport->work_port_lock); 1366 1367 if (time_after(phba->last_completion_time + 1368 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), 1369 jiffies)) { 1370 spin_unlock_irq(&phba->pport->work_port_lock); 1371 if (!phba->hb_outstanding) 1372 mod_timer(&phba->hb_tmofunc, 1373 jiffies + 1374 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1375 else 1376 mod_timer(&phba->hb_tmofunc, 1377 jiffies + 1378 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1379 return; 1380 } 1381 spin_unlock_irq(&phba->pport->work_port_lock); 1382 1383 if (phba->elsbuf_cnt && 1384 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1385 spin_lock_irq(&phba->hbalock); 1386 list_splice_init(&phba->elsbuf, &completions); 1387 phba->elsbuf_cnt = 0; 1388 phba->elsbuf_prev_cnt = 0; 1389 spin_unlock_irq(&phba->hbalock); 1390 1391 while (!list_empty(&completions)) { 1392 list_remove_head(&completions, buf_ptr, 1393 struct lpfc_dmabuf, list); 1394 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1395 kfree(buf_ptr); 1396 } 1397 } 1398 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1399 1400 /* If there is no heart beat outstanding, issue a heartbeat command */ 1401 if (phba->cfg_enable_hba_heartbeat) { 1402 if (!phba->hb_outstanding) { 1403 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1404 (list_empty(&psli->mboxq))) { 1405 pmboxq = mempool_alloc(phba->mbox_mem_pool, 1406 GFP_KERNEL); 1407 if (!pmboxq) { 1408 mod_timer(&phba->hb_tmofunc, 1409 jiffies + 1410 msecs_to_jiffies(1000 * 1411 LPFC_HB_MBOX_INTERVAL)); 1412 return; 1413 } 1414 1415 lpfc_heart_beat(phba, pmboxq); 1416 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1417 pmboxq->vport = phba->pport; 1418 retval = lpfc_sli_issue_mbox(phba, pmboxq, 1419 MBX_NOWAIT); 1420 1421 if (retval != MBX_BUSY && 1422 retval != MBX_SUCCESS) { 1423 mempool_free(pmboxq, 1424 phba->mbox_mem_pool); 1425 mod_timer(&phba->hb_tmofunc, 1426 jiffies + 1427 msecs_to_jiffies(1000 * 1428 LPFC_HB_MBOX_INTERVAL)); 1429 return; 1430 } 1431 phba->skipped_hb = 0; 1432 phba->hb_outstanding = 1; 1433 } else if (time_before_eq(phba->last_completion_time, 1434 phba->skipped_hb)) { 1435 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1436 "2857 Last completion time not " 1437 " updated in %d ms\n", 1438 jiffies_to_msecs(jiffies 1439 - phba->last_completion_time)); 1440 } else 1441 phba->skipped_hb = jiffies; 1442 1443 mod_timer(&phba->hb_tmofunc, 1444 jiffies + 1445 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1446 return; 1447 } else { 1448 /* 1449 * If heart beat timeout called with hb_outstanding set 1450 * we need to give the hb mailbox cmd a chance to 1451 * complete or TMO. 1452 */ 1453 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1454 "0459 Adapter heartbeat still out" 1455 "standing:last compl time was %d ms.\n", 1456 jiffies_to_msecs(jiffies 1457 - phba->last_completion_time)); 1458 mod_timer(&phba->hb_tmofunc, 1459 jiffies + 1460 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1461 } 1462 } else { 1463 mod_timer(&phba->hb_tmofunc, 1464 jiffies + 1465 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1466 } 1467 } 1468 1469 /** 1470 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1471 * @phba: pointer to lpfc hba data structure. 1472 * 1473 * This routine is called to bring the HBA offline when HBA hardware error 1474 * other than Port Error 6 has been detected. 1475 **/ 1476 static void 1477 lpfc_offline_eratt(struct lpfc_hba *phba) 1478 { 1479 struct lpfc_sli *psli = &phba->sli; 1480 1481 spin_lock_irq(&phba->hbalock); 1482 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1483 spin_unlock_irq(&phba->hbalock); 1484 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1485 1486 lpfc_offline(phba); 1487 lpfc_reset_barrier(phba); 1488 spin_lock_irq(&phba->hbalock); 1489 lpfc_sli_brdreset(phba); 1490 spin_unlock_irq(&phba->hbalock); 1491 lpfc_hba_down_post(phba); 1492 lpfc_sli_brdready(phba, HS_MBRDY); 1493 lpfc_unblock_mgmt_io(phba); 1494 phba->link_state = LPFC_HBA_ERROR; 1495 return; 1496 } 1497 1498 /** 1499 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1500 * @phba: pointer to lpfc hba data structure. 1501 * 1502 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1503 * other than Port Error 6 has been detected. 1504 **/ 1505 void 1506 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1507 { 1508 spin_lock_irq(&phba->hbalock); 1509 phba->link_state = LPFC_HBA_ERROR; 1510 spin_unlock_irq(&phba->hbalock); 1511 1512 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1513 lpfc_sli_flush_io_rings(phba); 1514 lpfc_offline(phba); 1515 lpfc_hba_down_post(phba); 1516 lpfc_unblock_mgmt_io(phba); 1517 } 1518 1519 /** 1520 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1521 * @phba: pointer to lpfc hba data structure. 1522 * 1523 * This routine is invoked to handle the deferred HBA hardware error 1524 * conditions. This type of error is indicated by HBA by setting ER1 1525 * and another ER bit in the host status register. The driver will 1526 * wait until the ER1 bit clears before handling the error condition. 1527 **/ 1528 static void 1529 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1530 { 1531 uint32_t old_host_status = phba->work_hs; 1532 struct lpfc_sli *psli = &phba->sli; 1533 1534 /* If the pci channel is offline, ignore possible errors, 1535 * since we cannot communicate with the pci card anyway. 1536 */ 1537 if (pci_channel_offline(phba->pcidev)) { 1538 spin_lock_irq(&phba->hbalock); 1539 phba->hba_flag &= ~DEFER_ERATT; 1540 spin_unlock_irq(&phba->hbalock); 1541 return; 1542 } 1543 1544 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1545 "0479 Deferred Adapter Hardware Error " 1546 "Data: x%x x%x x%x\n", 1547 phba->work_hs, 1548 phba->work_status[0], phba->work_status[1]); 1549 1550 spin_lock_irq(&phba->hbalock); 1551 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1552 spin_unlock_irq(&phba->hbalock); 1553 1554 1555 /* 1556 * Firmware stops when it triggred erratt. That could cause the I/Os 1557 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1558 * SCSI layer retry it after re-establishing link. 1559 */ 1560 lpfc_sli_abort_fcp_rings(phba); 1561 1562 /* 1563 * There was a firmware error. Take the hba offline and then 1564 * attempt to restart it. 1565 */ 1566 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 1567 lpfc_offline(phba); 1568 1569 /* Wait for the ER1 bit to clear.*/ 1570 while (phba->work_hs & HS_FFER1) { 1571 msleep(100); 1572 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { 1573 phba->work_hs = UNPLUG_ERR ; 1574 break; 1575 } 1576 /* If driver is unloading let the worker thread continue */ 1577 if (phba->pport->load_flag & FC_UNLOADING) { 1578 phba->work_hs = 0; 1579 break; 1580 } 1581 } 1582 1583 /* 1584 * This is to ptrotect against a race condition in which 1585 * first write to the host attention register clear the 1586 * host status register. 1587 */ 1588 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1589 phba->work_hs = old_host_status & ~HS_FFER1; 1590 1591 spin_lock_irq(&phba->hbalock); 1592 phba->hba_flag &= ~DEFER_ERATT; 1593 spin_unlock_irq(&phba->hbalock); 1594 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1595 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1596 } 1597 1598 static void 1599 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1600 { 1601 struct lpfc_board_event_header board_event; 1602 struct Scsi_Host *shost; 1603 1604 board_event.event_type = FC_REG_BOARD_EVENT; 1605 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1606 shost = lpfc_shost_from_vport(phba->pport); 1607 fc_host_post_vendor_event(shost, fc_get_event_number(), 1608 sizeof(board_event), 1609 (char *) &board_event, 1610 LPFC_NL_VENDOR_ID); 1611 } 1612 1613 /** 1614 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1615 * @phba: pointer to lpfc hba data structure. 1616 * 1617 * This routine is invoked to handle the following HBA hardware error 1618 * conditions: 1619 * 1 - HBA error attention interrupt 1620 * 2 - DMA ring index out of range 1621 * 3 - Mailbox command came back as unknown 1622 **/ 1623 static void 1624 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1625 { 1626 struct lpfc_vport *vport = phba->pport; 1627 struct lpfc_sli *psli = &phba->sli; 1628 uint32_t event_data; 1629 unsigned long temperature; 1630 struct temp_event temp_event_data; 1631 struct Scsi_Host *shost; 1632 1633 /* If the pci channel is offline, ignore possible errors, 1634 * since we cannot communicate with the pci card anyway. 1635 */ 1636 if (pci_channel_offline(phba->pcidev)) { 1637 spin_lock_irq(&phba->hbalock); 1638 phba->hba_flag &= ~DEFER_ERATT; 1639 spin_unlock_irq(&phba->hbalock); 1640 return; 1641 } 1642 1643 /* If resets are disabled then leave the HBA alone and return */ 1644 if (!phba->cfg_enable_hba_reset) 1645 return; 1646 1647 /* Send an internal error event to mgmt application */ 1648 lpfc_board_errevt_to_mgmt(phba); 1649 1650 if (phba->hba_flag & DEFER_ERATT) 1651 lpfc_handle_deferred_eratt(phba); 1652 1653 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1654 if (phba->work_hs & HS_FFER6) 1655 /* Re-establishing Link */ 1656 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1657 "1301 Re-establishing Link " 1658 "Data: x%x x%x x%x\n", 1659 phba->work_hs, phba->work_status[0], 1660 phba->work_status[1]); 1661 if (phba->work_hs & HS_FFER8) 1662 /* Device Zeroization */ 1663 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1664 "2861 Host Authentication device " 1665 "zeroization Data:x%x x%x x%x\n", 1666 phba->work_hs, phba->work_status[0], 1667 phba->work_status[1]); 1668 1669 spin_lock_irq(&phba->hbalock); 1670 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1671 spin_unlock_irq(&phba->hbalock); 1672 1673 /* 1674 * Firmware stops when it triggled erratt with HS_FFER6. 1675 * That could cause the I/Os dropped by the firmware. 1676 * Error iocb (I/O) on txcmplq and let the SCSI layer 1677 * retry it after re-establishing link. 1678 */ 1679 lpfc_sli_abort_fcp_rings(phba); 1680 1681 /* 1682 * There was a firmware error. Take the hba offline and then 1683 * attempt to restart it. 1684 */ 1685 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1686 lpfc_offline(phba); 1687 lpfc_sli_brdrestart(phba); 1688 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1689 lpfc_unblock_mgmt_io(phba); 1690 return; 1691 } 1692 lpfc_unblock_mgmt_io(phba); 1693 } else if (phba->work_hs & HS_CRIT_TEMP) { 1694 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1695 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1696 temp_event_data.event_code = LPFC_CRIT_TEMP; 1697 temp_event_data.data = (uint32_t)temperature; 1698 1699 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1700 "0406 Adapter maximum temperature exceeded " 1701 "(%ld), taking this port offline " 1702 "Data: x%x x%x x%x\n", 1703 temperature, phba->work_hs, 1704 phba->work_status[0], phba->work_status[1]); 1705 1706 shost = lpfc_shost_from_vport(phba->pport); 1707 fc_host_post_vendor_event(shost, fc_get_event_number(), 1708 sizeof(temp_event_data), 1709 (char *) &temp_event_data, 1710 SCSI_NL_VID_TYPE_PCI 1711 | PCI_VENDOR_ID_EMULEX); 1712 1713 spin_lock_irq(&phba->hbalock); 1714 phba->over_temp_state = HBA_OVER_TEMP; 1715 spin_unlock_irq(&phba->hbalock); 1716 lpfc_offline_eratt(phba); 1717 1718 } else { 1719 /* The if clause above forces this code path when the status 1720 * failure is a value other than FFER6. Do not call the offline 1721 * twice. This is the adapter hardware error path. 1722 */ 1723 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1724 "0457 Adapter Hardware Error " 1725 "Data: x%x x%x x%x\n", 1726 phba->work_hs, 1727 phba->work_status[0], phba->work_status[1]); 1728 1729 event_data = FC_REG_DUMP_EVENT; 1730 shost = lpfc_shost_from_vport(vport); 1731 fc_host_post_vendor_event(shost, fc_get_event_number(), 1732 sizeof(event_data), (char *) &event_data, 1733 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1734 1735 lpfc_offline_eratt(phba); 1736 } 1737 return; 1738 } 1739 1740 /** 1741 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg 1742 * @phba: pointer to lpfc hba data structure. 1743 * @mbx_action: flag for mailbox shutdown action. 1744 * 1745 * This routine is invoked to perform an SLI4 port PCI function reset in 1746 * response to port status register polling attention. It waits for port 1747 * status register (ERR, RDY, RN) bits before proceeding with function reset. 1748 * During this process, interrupt vectors are freed and later requested 1749 * for handling possible port resource change. 1750 **/ 1751 static int 1752 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, 1753 bool en_rn_msg) 1754 { 1755 int rc; 1756 uint32_t intr_mode; 1757 1758 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 1759 LPFC_SLI_INTF_IF_TYPE_2) { 1760 /* 1761 * On error status condition, driver need to wait for port 1762 * ready before performing reset. 1763 */ 1764 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1765 if (rc) 1766 return rc; 1767 } 1768 1769 /* need reset: attempt for port recovery */ 1770 if (en_rn_msg) 1771 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1772 "2887 Reset Needed: Attempting Port " 1773 "Recovery...\n"); 1774 lpfc_offline_prep(phba, mbx_action); 1775 lpfc_sli_flush_io_rings(phba); 1776 lpfc_offline(phba); 1777 /* release interrupt for possible resource change */ 1778 lpfc_sli4_disable_intr(phba); 1779 rc = lpfc_sli_brdrestart(phba); 1780 if (rc) { 1781 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1782 "6309 Failed to restart board\n"); 1783 return rc; 1784 } 1785 /* request and enable interrupt */ 1786 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 1787 if (intr_mode == LPFC_INTR_ERROR) { 1788 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1789 "3175 Failed to enable interrupt\n"); 1790 return -EIO; 1791 } 1792 phba->intr_mode = intr_mode; 1793 rc = lpfc_online(phba); 1794 if (rc == 0) 1795 lpfc_unblock_mgmt_io(phba); 1796 1797 return rc; 1798 } 1799 1800 /** 1801 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1802 * @phba: pointer to lpfc hba data structure. 1803 * 1804 * This routine is invoked to handle the SLI4 HBA hardware error attention 1805 * conditions. 1806 **/ 1807 static void 1808 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1809 { 1810 struct lpfc_vport *vport = phba->pport; 1811 uint32_t event_data; 1812 struct Scsi_Host *shost; 1813 uint32_t if_type; 1814 struct lpfc_register portstat_reg = {0}; 1815 uint32_t reg_err1, reg_err2; 1816 uint32_t uerrlo_reg, uemasklo_reg; 1817 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2; 1818 bool en_rn_msg = true; 1819 struct temp_event temp_event_data; 1820 struct lpfc_register portsmphr_reg; 1821 int rc, i; 1822 1823 /* If the pci channel is offline, ignore possible errors, since 1824 * we cannot communicate with the pci card anyway. 1825 */ 1826 if (pci_channel_offline(phba->pcidev)) { 1827 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1828 "3166 pci channel is offline\n"); 1829 lpfc_sli4_offline_eratt(phba); 1830 return; 1831 } 1832 1833 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 1834 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1835 switch (if_type) { 1836 case LPFC_SLI_INTF_IF_TYPE_0: 1837 pci_rd_rc1 = lpfc_readl( 1838 phba->sli4_hba.u.if_type0.UERRLOregaddr, 1839 &uerrlo_reg); 1840 pci_rd_rc2 = lpfc_readl( 1841 phba->sli4_hba.u.if_type0.UEMASKLOregaddr, 1842 &uemasklo_reg); 1843 /* consider PCI bus read error as pci_channel_offline */ 1844 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) 1845 return; 1846 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { 1847 lpfc_sli4_offline_eratt(phba); 1848 return; 1849 } 1850 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1851 "7623 Checking UE recoverable"); 1852 1853 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { 1854 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1855 &portsmphr_reg.word0)) 1856 continue; 1857 1858 smphr_port_status = bf_get(lpfc_port_smphr_port_status, 1859 &portsmphr_reg); 1860 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1861 LPFC_PORT_SEM_UE_RECOVERABLE) 1862 break; 1863 /*Sleep for 1Sec, before checking SEMAPHORE */ 1864 msleep(1000); 1865 } 1866 1867 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1868 "4827 smphr_port_status x%x : Waited %dSec", 1869 smphr_port_status, i); 1870 1871 /* Recoverable UE, reset the HBA device */ 1872 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1873 LPFC_PORT_SEM_UE_RECOVERABLE) { 1874 for (i = 0; i < 20; i++) { 1875 msleep(1000); 1876 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1877 &portsmphr_reg.word0) && 1878 (LPFC_POST_STAGE_PORT_READY == 1879 bf_get(lpfc_port_smphr_port_status, 1880 &portsmphr_reg))) { 1881 rc = lpfc_sli4_port_sta_fn_reset(phba, 1882 LPFC_MBX_NO_WAIT, en_rn_msg); 1883 if (rc == 0) 1884 return; 1885 lpfc_printf_log(phba, 1886 KERN_ERR, LOG_INIT, 1887 "4215 Failed to recover UE"); 1888 break; 1889 } 1890 } 1891 } 1892 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1893 "7624 Firmware not ready: Failing UE recovery," 1894 " waited %dSec", i); 1895 phba->link_state = LPFC_HBA_ERROR; 1896 break; 1897 1898 case LPFC_SLI_INTF_IF_TYPE_2: 1899 case LPFC_SLI_INTF_IF_TYPE_6: 1900 pci_rd_rc1 = lpfc_readl( 1901 phba->sli4_hba.u.if_type2.STATUSregaddr, 1902 &portstat_reg.word0); 1903 /* consider PCI bus read error as pci_channel_offline */ 1904 if (pci_rd_rc1 == -EIO) { 1905 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1906 "3151 PCI bus read access failure: x%x\n", 1907 readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); 1908 lpfc_sli4_offline_eratt(phba); 1909 return; 1910 } 1911 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 1912 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 1913 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1914 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1915 "2889 Port Overtemperature event, " 1916 "taking port offline Data: x%x x%x\n", 1917 reg_err1, reg_err2); 1918 1919 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 1920 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1921 temp_event_data.event_code = LPFC_CRIT_TEMP; 1922 temp_event_data.data = 0xFFFFFFFF; 1923 1924 shost = lpfc_shost_from_vport(phba->pport); 1925 fc_host_post_vendor_event(shost, fc_get_event_number(), 1926 sizeof(temp_event_data), 1927 (char *)&temp_event_data, 1928 SCSI_NL_VID_TYPE_PCI 1929 | PCI_VENDOR_ID_EMULEX); 1930 1931 spin_lock_irq(&phba->hbalock); 1932 phba->over_temp_state = HBA_OVER_TEMP; 1933 spin_unlock_irq(&phba->hbalock); 1934 lpfc_sli4_offline_eratt(phba); 1935 return; 1936 } 1937 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1938 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { 1939 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1940 "3143 Port Down: Firmware Update " 1941 "Detected\n"); 1942 en_rn_msg = false; 1943 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1944 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1945 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1946 "3144 Port Down: Debug Dump\n"); 1947 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1948 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) 1949 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1950 "3145 Port Down: Provisioning\n"); 1951 1952 /* If resets are disabled then leave the HBA alone and return */ 1953 if (!phba->cfg_enable_hba_reset) 1954 return; 1955 1956 /* Check port status register for function reset */ 1957 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, 1958 en_rn_msg); 1959 if (rc == 0) { 1960 /* don't report event on forced debug dump */ 1961 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1962 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1963 return; 1964 else 1965 break; 1966 } 1967 /* fall through for not able to recover */ 1968 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1969 "3152 Unrecoverable error\n"); 1970 phba->link_state = LPFC_HBA_ERROR; 1971 break; 1972 case LPFC_SLI_INTF_IF_TYPE_1: 1973 default: 1974 break; 1975 } 1976 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1977 "3123 Report dump event to upper layer\n"); 1978 /* Send an internal error event to mgmt application */ 1979 lpfc_board_errevt_to_mgmt(phba); 1980 1981 event_data = FC_REG_DUMP_EVENT; 1982 shost = lpfc_shost_from_vport(vport); 1983 fc_host_post_vendor_event(shost, fc_get_event_number(), 1984 sizeof(event_data), (char *) &event_data, 1985 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1986 } 1987 1988 /** 1989 * lpfc_handle_eratt - Wrapper func for handling hba error attention 1990 * @phba: pointer to lpfc HBA data structure. 1991 * 1992 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 1993 * routine from the API jump table function pointer from the lpfc_hba struct. 1994 * 1995 * Return codes 1996 * 0 - success. 1997 * Any other value - error. 1998 **/ 1999 void 2000 lpfc_handle_eratt(struct lpfc_hba *phba) 2001 { 2002 (*phba->lpfc_handle_eratt)(phba); 2003 } 2004 2005 /** 2006 * lpfc_handle_latt - The HBA link event handler 2007 * @phba: pointer to lpfc hba data structure. 2008 * 2009 * This routine is invoked from the worker thread to handle a HBA host 2010 * attention link event. SLI3 only. 2011 **/ 2012 void 2013 lpfc_handle_latt(struct lpfc_hba *phba) 2014 { 2015 struct lpfc_vport *vport = phba->pport; 2016 struct lpfc_sli *psli = &phba->sli; 2017 LPFC_MBOXQ_t *pmb; 2018 volatile uint32_t control; 2019 struct lpfc_dmabuf *mp; 2020 int rc = 0; 2021 2022 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2023 if (!pmb) { 2024 rc = 1; 2025 goto lpfc_handle_latt_err_exit; 2026 } 2027 2028 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2029 if (!mp) { 2030 rc = 2; 2031 goto lpfc_handle_latt_free_pmb; 2032 } 2033 2034 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 2035 if (!mp->virt) { 2036 rc = 3; 2037 goto lpfc_handle_latt_free_mp; 2038 } 2039 2040 /* Cleanup any outstanding ELS commands */ 2041 lpfc_els_flush_all_cmd(phba); 2042 2043 psli->slistat.link_event++; 2044 lpfc_read_topology(phba, pmb, mp); 2045 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 2046 pmb->vport = vport; 2047 /* Block ELS IOCBs until we have processed this mbox command */ 2048 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 2049 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 2050 if (rc == MBX_NOT_FINISHED) { 2051 rc = 4; 2052 goto lpfc_handle_latt_free_mbuf; 2053 } 2054 2055 /* Clear Link Attention in HA REG */ 2056 spin_lock_irq(&phba->hbalock); 2057 writel(HA_LATT, phba->HAregaddr); 2058 readl(phba->HAregaddr); /* flush */ 2059 spin_unlock_irq(&phba->hbalock); 2060 2061 return; 2062 2063 lpfc_handle_latt_free_mbuf: 2064 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 2065 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2066 lpfc_handle_latt_free_mp: 2067 kfree(mp); 2068 lpfc_handle_latt_free_pmb: 2069 mempool_free(pmb, phba->mbox_mem_pool); 2070 lpfc_handle_latt_err_exit: 2071 /* Enable Link attention interrupts */ 2072 spin_lock_irq(&phba->hbalock); 2073 psli->sli_flag |= LPFC_PROCESS_LA; 2074 control = readl(phba->HCregaddr); 2075 control |= HC_LAINT_ENA; 2076 writel(control, phba->HCregaddr); 2077 readl(phba->HCregaddr); /* flush */ 2078 2079 /* Clear Link Attention in HA REG */ 2080 writel(HA_LATT, phba->HAregaddr); 2081 readl(phba->HAregaddr); /* flush */ 2082 spin_unlock_irq(&phba->hbalock); 2083 lpfc_linkdown(phba); 2084 phba->link_state = LPFC_HBA_ERROR; 2085 2086 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 2087 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 2088 2089 return; 2090 } 2091 2092 /** 2093 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 2094 * @phba: pointer to lpfc hba data structure. 2095 * @vpd: pointer to the vital product data. 2096 * @len: length of the vital product data in bytes. 2097 * 2098 * This routine parses the Vital Product Data (VPD). The VPD is treated as 2099 * an array of characters. In this routine, the ModelName, ProgramType, and 2100 * ModelDesc, etc. fields of the phba data structure will be populated. 2101 * 2102 * Return codes 2103 * 0 - pointer to the VPD passed in is NULL 2104 * 1 - success 2105 **/ 2106 int 2107 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 2108 { 2109 uint8_t lenlo, lenhi; 2110 int Length; 2111 int i, j; 2112 int finished = 0; 2113 int index = 0; 2114 2115 if (!vpd) 2116 return 0; 2117 2118 /* Vital Product */ 2119 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2120 "0455 Vital Product Data: x%x x%x x%x x%x\n", 2121 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 2122 (uint32_t) vpd[3]); 2123 while (!finished && (index < (len - 4))) { 2124 switch (vpd[index]) { 2125 case 0x82: 2126 case 0x91: 2127 index += 1; 2128 lenlo = vpd[index]; 2129 index += 1; 2130 lenhi = vpd[index]; 2131 index += 1; 2132 i = ((((unsigned short)lenhi) << 8) + lenlo); 2133 index += i; 2134 break; 2135 case 0x90: 2136 index += 1; 2137 lenlo = vpd[index]; 2138 index += 1; 2139 lenhi = vpd[index]; 2140 index += 1; 2141 Length = ((((unsigned short)lenhi) << 8) + lenlo); 2142 if (Length > len - index) 2143 Length = len - index; 2144 while (Length > 0) { 2145 /* Look for Serial Number */ 2146 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 2147 index += 2; 2148 i = vpd[index]; 2149 index += 1; 2150 j = 0; 2151 Length -= (3+i); 2152 while(i--) { 2153 phba->SerialNumber[j++] = vpd[index++]; 2154 if (j == 31) 2155 break; 2156 } 2157 phba->SerialNumber[j] = 0; 2158 continue; 2159 } 2160 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 2161 phba->vpd_flag |= VPD_MODEL_DESC; 2162 index += 2; 2163 i = vpd[index]; 2164 index += 1; 2165 j = 0; 2166 Length -= (3+i); 2167 while(i--) { 2168 phba->ModelDesc[j++] = vpd[index++]; 2169 if (j == 255) 2170 break; 2171 } 2172 phba->ModelDesc[j] = 0; 2173 continue; 2174 } 2175 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 2176 phba->vpd_flag |= VPD_MODEL_NAME; 2177 index += 2; 2178 i = vpd[index]; 2179 index += 1; 2180 j = 0; 2181 Length -= (3+i); 2182 while(i--) { 2183 phba->ModelName[j++] = vpd[index++]; 2184 if (j == 79) 2185 break; 2186 } 2187 phba->ModelName[j] = 0; 2188 continue; 2189 } 2190 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 2191 phba->vpd_flag |= VPD_PROGRAM_TYPE; 2192 index += 2; 2193 i = vpd[index]; 2194 index += 1; 2195 j = 0; 2196 Length -= (3+i); 2197 while(i--) { 2198 phba->ProgramType[j++] = vpd[index++]; 2199 if (j == 255) 2200 break; 2201 } 2202 phba->ProgramType[j] = 0; 2203 continue; 2204 } 2205 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 2206 phba->vpd_flag |= VPD_PORT; 2207 index += 2; 2208 i = vpd[index]; 2209 index += 1; 2210 j = 0; 2211 Length -= (3+i); 2212 while(i--) { 2213 if ((phba->sli_rev == LPFC_SLI_REV4) && 2214 (phba->sli4_hba.pport_name_sta == 2215 LPFC_SLI4_PPNAME_GET)) { 2216 j++; 2217 index++; 2218 } else 2219 phba->Port[j++] = vpd[index++]; 2220 if (j == 19) 2221 break; 2222 } 2223 if ((phba->sli_rev != LPFC_SLI_REV4) || 2224 (phba->sli4_hba.pport_name_sta == 2225 LPFC_SLI4_PPNAME_NON)) 2226 phba->Port[j] = 0; 2227 continue; 2228 } 2229 else { 2230 index += 2; 2231 i = vpd[index]; 2232 index += 1; 2233 index += i; 2234 Length -= (3 + i); 2235 } 2236 } 2237 finished = 0; 2238 break; 2239 case 0x78: 2240 finished = 1; 2241 break; 2242 default: 2243 index ++; 2244 break; 2245 } 2246 } 2247 2248 return(1); 2249 } 2250 2251 /** 2252 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 2253 * @phba: pointer to lpfc hba data structure. 2254 * @mdp: pointer to the data structure to hold the derived model name. 2255 * @descp: pointer to the data structure to hold the derived description. 2256 * 2257 * This routine retrieves HBA's description based on its registered PCI device 2258 * ID. The @descp passed into this function points to an array of 256 chars. It 2259 * shall be returned with the model name, maximum speed, and the host bus type. 2260 * The @mdp passed into this function points to an array of 80 chars. When the 2261 * function returns, the @mdp will be filled with the model name. 2262 **/ 2263 static void 2264 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 2265 { 2266 lpfc_vpd_t *vp; 2267 uint16_t dev_id = phba->pcidev->device; 2268 int max_speed; 2269 int GE = 0; 2270 int oneConnect = 0; /* default is not a oneConnect */ 2271 struct { 2272 char *name; 2273 char *bus; 2274 char *function; 2275 } m = {"<Unknown>", "", ""}; 2276 2277 if (mdp && mdp[0] != '\0' 2278 && descp && descp[0] != '\0') 2279 return; 2280 2281 if (phba->lmt & LMT_64Gb) 2282 max_speed = 64; 2283 else if (phba->lmt & LMT_32Gb) 2284 max_speed = 32; 2285 else if (phba->lmt & LMT_16Gb) 2286 max_speed = 16; 2287 else if (phba->lmt & LMT_10Gb) 2288 max_speed = 10; 2289 else if (phba->lmt & LMT_8Gb) 2290 max_speed = 8; 2291 else if (phba->lmt & LMT_4Gb) 2292 max_speed = 4; 2293 else if (phba->lmt & LMT_2Gb) 2294 max_speed = 2; 2295 else if (phba->lmt & LMT_1Gb) 2296 max_speed = 1; 2297 else 2298 max_speed = 0; 2299 2300 vp = &phba->vpd; 2301 2302 switch (dev_id) { 2303 case PCI_DEVICE_ID_FIREFLY: 2304 m = (typeof(m)){"LP6000", "PCI", 2305 "Obsolete, Unsupported Fibre Channel Adapter"}; 2306 break; 2307 case PCI_DEVICE_ID_SUPERFLY: 2308 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 2309 m = (typeof(m)){"LP7000", "PCI", ""}; 2310 else 2311 m = (typeof(m)){"LP7000E", "PCI", ""}; 2312 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2313 break; 2314 case PCI_DEVICE_ID_DRAGONFLY: 2315 m = (typeof(m)){"LP8000", "PCI", 2316 "Obsolete, Unsupported Fibre Channel Adapter"}; 2317 break; 2318 case PCI_DEVICE_ID_CENTAUR: 2319 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 2320 m = (typeof(m)){"LP9002", "PCI", ""}; 2321 else 2322 m = (typeof(m)){"LP9000", "PCI", ""}; 2323 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2324 break; 2325 case PCI_DEVICE_ID_RFLY: 2326 m = (typeof(m)){"LP952", "PCI", 2327 "Obsolete, Unsupported Fibre Channel Adapter"}; 2328 break; 2329 case PCI_DEVICE_ID_PEGASUS: 2330 m = (typeof(m)){"LP9802", "PCI-X", 2331 "Obsolete, Unsupported Fibre Channel Adapter"}; 2332 break; 2333 case PCI_DEVICE_ID_THOR: 2334 m = (typeof(m)){"LP10000", "PCI-X", 2335 "Obsolete, Unsupported Fibre Channel Adapter"}; 2336 break; 2337 case PCI_DEVICE_ID_VIPER: 2338 m = (typeof(m)){"LPX1000", "PCI-X", 2339 "Obsolete, Unsupported Fibre Channel Adapter"}; 2340 break; 2341 case PCI_DEVICE_ID_PFLY: 2342 m = (typeof(m)){"LP982", "PCI-X", 2343 "Obsolete, Unsupported Fibre Channel Adapter"}; 2344 break; 2345 case PCI_DEVICE_ID_TFLY: 2346 m = (typeof(m)){"LP1050", "PCI-X", 2347 "Obsolete, Unsupported Fibre Channel Adapter"}; 2348 break; 2349 case PCI_DEVICE_ID_HELIOS: 2350 m = (typeof(m)){"LP11000", "PCI-X2", 2351 "Obsolete, Unsupported Fibre Channel Adapter"}; 2352 break; 2353 case PCI_DEVICE_ID_HELIOS_SCSP: 2354 m = (typeof(m)){"LP11000-SP", "PCI-X2", 2355 "Obsolete, Unsupported Fibre Channel Adapter"}; 2356 break; 2357 case PCI_DEVICE_ID_HELIOS_DCSP: 2358 m = (typeof(m)){"LP11002-SP", "PCI-X2", 2359 "Obsolete, Unsupported Fibre Channel Adapter"}; 2360 break; 2361 case PCI_DEVICE_ID_NEPTUNE: 2362 m = (typeof(m)){"LPe1000", "PCIe", 2363 "Obsolete, Unsupported Fibre Channel Adapter"}; 2364 break; 2365 case PCI_DEVICE_ID_NEPTUNE_SCSP: 2366 m = (typeof(m)){"LPe1000-SP", "PCIe", 2367 "Obsolete, Unsupported Fibre Channel Adapter"}; 2368 break; 2369 case PCI_DEVICE_ID_NEPTUNE_DCSP: 2370 m = (typeof(m)){"LPe1002-SP", "PCIe", 2371 "Obsolete, Unsupported Fibre Channel Adapter"}; 2372 break; 2373 case PCI_DEVICE_ID_BMID: 2374 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 2375 break; 2376 case PCI_DEVICE_ID_BSMB: 2377 m = (typeof(m)){"LP111", "PCI-X2", 2378 "Obsolete, Unsupported Fibre Channel Adapter"}; 2379 break; 2380 case PCI_DEVICE_ID_ZEPHYR: 2381 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2382 break; 2383 case PCI_DEVICE_ID_ZEPHYR_SCSP: 2384 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2385 break; 2386 case PCI_DEVICE_ID_ZEPHYR_DCSP: 2387 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 2388 GE = 1; 2389 break; 2390 case PCI_DEVICE_ID_ZMID: 2391 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 2392 break; 2393 case PCI_DEVICE_ID_ZSMB: 2394 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 2395 break; 2396 case PCI_DEVICE_ID_LP101: 2397 m = (typeof(m)){"LP101", "PCI-X", 2398 "Obsolete, Unsupported Fibre Channel Adapter"}; 2399 break; 2400 case PCI_DEVICE_ID_LP10000S: 2401 m = (typeof(m)){"LP10000-S", "PCI", 2402 "Obsolete, Unsupported Fibre Channel Adapter"}; 2403 break; 2404 case PCI_DEVICE_ID_LP11000S: 2405 m = (typeof(m)){"LP11000-S", "PCI-X2", 2406 "Obsolete, Unsupported Fibre Channel Adapter"}; 2407 break; 2408 case PCI_DEVICE_ID_LPE11000S: 2409 m = (typeof(m)){"LPe11000-S", "PCIe", 2410 "Obsolete, Unsupported Fibre Channel Adapter"}; 2411 break; 2412 case PCI_DEVICE_ID_SAT: 2413 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 2414 break; 2415 case PCI_DEVICE_ID_SAT_MID: 2416 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 2417 break; 2418 case PCI_DEVICE_ID_SAT_SMB: 2419 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 2420 break; 2421 case PCI_DEVICE_ID_SAT_DCSP: 2422 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 2423 break; 2424 case PCI_DEVICE_ID_SAT_SCSP: 2425 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 2426 break; 2427 case PCI_DEVICE_ID_SAT_S: 2428 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 2429 break; 2430 case PCI_DEVICE_ID_HORNET: 2431 m = (typeof(m)){"LP21000", "PCIe", 2432 "Obsolete, Unsupported FCoE Adapter"}; 2433 GE = 1; 2434 break; 2435 case PCI_DEVICE_ID_PROTEUS_VF: 2436 m = (typeof(m)){"LPev12000", "PCIe IOV", 2437 "Obsolete, Unsupported Fibre Channel Adapter"}; 2438 break; 2439 case PCI_DEVICE_ID_PROTEUS_PF: 2440 m = (typeof(m)){"LPev12000", "PCIe IOV", 2441 "Obsolete, Unsupported Fibre Channel Adapter"}; 2442 break; 2443 case PCI_DEVICE_ID_PROTEUS_S: 2444 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 2445 "Obsolete, Unsupported Fibre Channel Adapter"}; 2446 break; 2447 case PCI_DEVICE_ID_TIGERSHARK: 2448 oneConnect = 1; 2449 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 2450 break; 2451 case PCI_DEVICE_ID_TOMCAT: 2452 oneConnect = 1; 2453 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 2454 break; 2455 case PCI_DEVICE_ID_FALCON: 2456 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 2457 "EmulexSecure Fibre"}; 2458 break; 2459 case PCI_DEVICE_ID_BALIUS: 2460 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 2461 "Obsolete, Unsupported Fibre Channel Adapter"}; 2462 break; 2463 case PCI_DEVICE_ID_LANCER_FC: 2464 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 2465 break; 2466 case PCI_DEVICE_ID_LANCER_FC_VF: 2467 m = (typeof(m)){"LPe16000", "PCIe", 2468 "Obsolete, Unsupported Fibre Channel Adapter"}; 2469 break; 2470 case PCI_DEVICE_ID_LANCER_FCOE: 2471 oneConnect = 1; 2472 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2473 break; 2474 case PCI_DEVICE_ID_LANCER_FCOE_VF: 2475 oneConnect = 1; 2476 m = (typeof(m)){"OCe15100", "PCIe", 2477 "Obsolete, Unsupported FCoE"}; 2478 break; 2479 case PCI_DEVICE_ID_LANCER_G6_FC: 2480 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"}; 2481 break; 2482 case PCI_DEVICE_ID_LANCER_G7_FC: 2483 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"}; 2484 break; 2485 case PCI_DEVICE_ID_SKYHAWK: 2486 case PCI_DEVICE_ID_SKYHAWK_VF: 2487 oneConnect = 1; 2488 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; 2489 break; 2490 default: 2491 m = (typeof(m)){"Unknown", "", ""}; 2492 break; 2493 } 2494 2495 if (mdp && mdp[0] == '\0') 2496 snprintf(mdp, 79,"%s", m.name); 2497 /* 2498 * oneConnect hba requires special processing, they are all initiators 2499 * and we put the port number on the end 2500 */ 2501 if (descp && descp[0] == '\0') { 2502 if (oneConnect) 2503 snprintf(descp, 255, 2504 "Emulex OneConnect %s, %s Initiator %s", 2505 m.name, m.function, 2506 phba->Port); 2507 else if (max_speed == 0) 2508 snprintf(descp, 255, 2509 "Emulex %s %s %s", 2510 m.name, m.bus, m.function); 2511 else 2512 snprintf(descp, 255, 2513 "Emulex %s %d%s %s %s", 2514 m.name, max_speed, (GE) ? "GE" : "Gb", 2515 m.bus, m.function); 2516 } 2517 } 2518 2519 /** 2520 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 2521 * @phba: pointer to lpfc hba data structure. 2522 * @pring: pointer to a IOCB ring. 2523 * @cnt: the number of IOCBs to be posted to the IOCB ring. 2524 * 2525 * This routine posts a given number of IOCBs with the associated DMA buffer 2526 * descriptors specified by the cnt argument to the given IOCB ring. 2527 * 2528 * Return codes 2529 * The number of IOCBs NOT able to be posted to the IOCB ring. 2530 **/ 2531 int 2532 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 2533 { 2534 IOCB_t *icmd; 2535 struct lpfc_iocbq *iocb; 2536 struct lpfc_dmabuf *mp1, *mp2; 2537 2538 cnt += pring->missbufcnt; 2539 2540 /* While there are buffers to post */ 2541 while (cnt > 0) { 2542 /* Allocate buffer for command iocb */ 2543 iocb = lpfc_sli_get_iocbq(phba); 2544 if (iocb == NULL) { 2545 pring->missbufcnt = cnt; 2546 return cnt; 2547 } 2548 icmd = &iocb->iocb; 2549 2550 /* 2 buffers can be posted per command */ 2551 /* Allocate buffer to post */ 2552 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2553 if (mp1) 2554 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 2555 if (!mp1 || !mp1->virt) { 2556 kfree(mp1); 2557 lpfc_sli_release_iocbq(phba, iocb); 2558 pring->missbufcnt = cnt; 2559 return cnt; 2560 } 2561 2562 INIT_LIST_HEAD(&mp1->list); 2563 /* Allocate buffer to post */ 2564 if (cnt > 1) { 2565 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2566 if (mp2) 2567 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 2568 &mp2->phys); 2569 if (!mp2 || !mp2->virt) { 2570 kfree(mp2); 2571 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2572 kfree(mp1); 2573 lpfc_sli_release_iocbq(phba, iocb); 2574 pring->missbufcnt = cnt; 2575 return cnt; 2576 } 2577 2578 INIT_LIST_HEAD(&mp2->list); 2579 } else { 2580 mp2 = NULL; 2581 } 2582 2583 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2584 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2585 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2586 icmd->ulpBdeCount = 1; 2587 cnt--; 2588 if (mp2) { 2589 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2590 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2591 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2592 cnt--; 2593 icmd->ulpBdeCount = 2; 2594 } 2595 2596 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2597 icmd->ulpLe = 1; 2598 2599 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2600 IOCB_ERROR) { 2601 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2602 kfree(mp1); 2603 cnt++; 2604 if (mp2) { 2605 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2606 kfree(mp2); 2607 cnt++; 2608 } 2609 lpfc_sli_release_iocbq(phba, iocb); 2610 pring->missbufcnt = cnt; 2611 return cnt; 2612 } 2613 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2614 if (mp2) 2615 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2616 } 2617 pring->missbufcnt = 0; 2618 return 0; 2619 } 2620 2621 /** 2622 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2623 * @phba: pointer to lpfc hba data structure. 2624 * 2625 * This routine posts initial receive IOCB buffers to the ELS ring. The 2626 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2627 * set to 64 IOCBs. SLI3 only. 2628 * 2629 * Return codes 2630 * 0 - success (currently always success) 2631 **/ 2632 static int 2633 lpfc_post_rcv_buf(struct lpfc_hba *phba) 2634 { 2635 struct lpfc_sli *psli = &phba->sli; 2636 2637 /* Ring 0, ELS / CT buffers */ 2638 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2639 /* Ring 2 - FCP no buffers needed */ 2640 2641 return 0; 2642 } 2643 2644 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2645 2646 /** 2647 * lpfc_sha_init - Set up initial array of hash table entries 2648 * @HashResultPointer: pointer to an array as hash table. 2649 * 2650 * This routine sets up the initial values to the array of hash table entries 2651 * for the LC HBAs. 2652 **/ 2653 static void 2654 lpfc_sha_init(uint32_t * HashResultPointer) 2655 { 2656 HashResultPointer[0] = 0x67452301; 2657 HashResultPointer[1] = 0xEFCDAB89; 2658 HashResultPointer[2] = 0x98BADCFE; 2659 HashResultPointer[3] = 0x10325476; 2660 HashResultPointer[4] = 0xC3D2E1F0; 2661 } 2662 2663 /** 2664 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2665 * @HashResultPointer: pointer to an initial/result hash table. 2666 * @HashWorkingPointer: pointer to an working hash table. 2667 * 2668 * This routine iterates an initial hash table pointed by @HashResultPointer 2669 * with the values from the working hash table pointeed by @HashWorkingPointer. 2670 * The results are putting back to the initial hash table, returned through 2671 * the @HashResultPointer as the result hash table. 2672 **/ 2673 static void 2674 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2675 { 2676 int t; 2677 uint32_t TEMP; 2678 uint32_t A, B, C, D, E; 2679 t = 16; 2680 do { 2681 HashWorkingPointer[t] = 2682 S(1, 2683 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2684 8] ^ 2685 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2686 } while (++t <= 79); 2687 t = 0; 2688 A = HashResultPointer[0]; 2689 B = HashResultPointer[1]; 2690 C = HashResultPointer[2]; 2691 D = HashResultPointer[3]; 2692 E = HashResultPointer[4]; 2693 2694 do { 2695 if (t < 20) { 2696 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2697 } else if (t < 40) { 2698 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2699 } else if (t < 60) { 2700 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2701 } else { 2702 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2703 } 2704 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2705 E = D; 2706 D = C; 2707 C = S(30, B); 2708 B = A; 2709 A = TEMP; 2710 } while (++t <= 79); 2711 2712 HashResultPointer[0] += A; 2713 HashResultPointer[1] += B; 2714 HashResultPointer[2] += C; 2715 HashResultPointer[3] += D; 2716 HashResultPointer[4] += E; 2717 2718 } 2719 2720 /** 2721 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2722 * @RandomChallenge: pointer to the entry of host challenge random number array. 2723 * @HashWorking: pointer to the entry of the working hash array. 2724 * 2725 * This routine calculates the working hash array referred by @HashWorking 2726 * from the challenge random numbers associated with the host, referred by 2727 * @RandomChallenge. The result is put into the entry of the working hash 2728 * array and returned by reference through @HashWorking. 2729 **/ 2730 static void 2731 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2732 { 2733 *HashWorking = (*RandomChallenge ^ *HashWorking); 2734 } 2735 2736 /** 2737 * lpfc_hba_init - Perform special handling for LC HBA initialization 2738 * @phba: pointer to lpfc hba data structure. 2739 * @hbainit: pointer to an array of unsigned 32-bit integers. 2740 * 2741 * This routine performs the special handling for LC HBA initialization. 2742 **/ 2743 void 2744 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2745 { 2746 int t; 2747 uint32_t *HashWorking; 2748 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2749 2750 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2751 if (!HashWorking) 2752 return; 2753 2754 HashWorking[0] = HashWorking[78] = *pwwnn++; 2755 HashWorking[1] = HashWorking[79] = *pwwnn; 2756 2757 for (t = 0; t < 7; t++) 2758 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2759 2760 lpfc_sha_init(hbainit); 2761 lpfc_sha_iterate(hbainit, HashWorking); 2762 kfree(HashWorking); 2763 } 2764 2765 /** 2766 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2767 * @vport: pointer to a virtual N_Port data structure. 2768 * 2769 * This routine performs the necessary cleanups before deleting the @vport. 2770 * It invokes the discovery state machine to perform necessary state 2771 * transitions and to release the ndlps associated with the @vport. Note, 2772 * the physical port is treated as @vport 0. 2773 **/ 2774 void 2775 lpfc_cleanup(struct lpfc_vport *vport) 2776 { 2777 struct lpfc_hba *phba = vport->phba; 2778 struct lpfc_nodelist *ndlp, *next_ndlp; 2779 int i = 0; 2780 2781 if (phba->link_state > LPFC_LINK_DOWN) 2782 lpfc_port_link_failure(vport); 2783 2784 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2785 if (!NLP_CHK_NODE_ACT(ndlp)) { 2786 ndlp = lpfc_enable_node(vport, ndlp, 2787 NLP_STE_UNUSED_NODE); 2788 if (!ndlp) 2789 continue; 2790 spin_lock_irq(&phba->ndlp_lock); 2791 NLP_SET_FREE_REQ(ndlp); 2792 spin_unlock_irq(&phba->ndlp_lock); 2793 /* Trigger the release of the ndlp memory */ 2794 lpfc_nlp_put(ndlp); 2795 continue; 2796 } 2797 spin_lock_irq(&phba->ndlp_lock); 2798 if (NLP_CHK_FREE_REQ(ndlp)) { 2799 /* The ndlp should not be in memory free mode already */ 2800 spin_unlock_irq(&phba->ndlp_lock); 2801 continue; 2802 } else 2803 /* Indicate request for freeing ndlp memory */ 2804 NLP_SET_FREE_REQ(ndlp); 2805 spin_unlock_irq(&phba->ndlp_lock); 2806 2807 if (vport->port_type != LPFC_PHYSICAL_PORT && 2808 ndlp->nlp_DID == Fabric_DID) { 2809 /* Just free up ndlp with Fabric_DID for vports */ 2810 lpfc_nlp_put(ndlp); 2811 continue; 2812 } 2813 2814 /* take care of nodes in unused state before the state 2815 * machine taking action. 2816 */ 2817 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 2818 lpfc_nlp_put(ndlp); 2819 continue; 2820 } 2821 2822 if (ndlp->nlp_type & NLP_FABRIC) 2823 lpfc_disc_state_machine(vport, ndlp, NULL, 2824 NLP_EVT_DEVICE_RECOVERY); 2825 2826 lpfc_disc_state_machine(vport, ndlp, NULL, 2827 NLP_EVT_DEVICE_RM); 2828 } 2829 2830 /* At this point, ALL ndlp's should be gone 2831 * because of the previous NLP_EVT_DEVICE_RM. 2832 * Lets wait for this to happen, if needed. 2833 */ 2834 while (!list_empty(&vport->fc_nodes)) { 2835 if (i++ > 3000) { 2836 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2837 "0233 Nodelist not empty\n"); 2838 list_for_each_entry_safe(ndlp, next_ndlp, 2839 &vport->fc_nodes, nlp_listp) { 2840 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2841 LOG_NODE, 2842 "0282 did:x%x ndlp:x%px " 2843 "usgmap:x%x refcnt:%d\n", 2844 ndlp->nlp_DID, (void *)ndlp, 2845 ndlp->nlp_usg_map, 2846 kref_read(&ndlp->kref)); 2847 } 2848 break; 2849 } 2850 2851 /* Wait for any activity on ndlps to settle */ 2852 msleep(10); 2853 } 2854 lpfc_cleanup_vports_rrqs(vport, NULL); 2855 } 2856 2857 /** 2858 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2859 * @vport: pointer to a virtual N_Port data structure. 2860 * 2861 * This routine stops all the timers associated with a @vport. This function 2862 * is invoked before disabling or deleting a @vport. Note that the physical 2863 * port is treated as @vport 0. 2864 **/ 2865 void 2866 lpfc_stop_vport_timers(struct lpfc_vport *vport) 2867 { 2868 del_timer_sync(&vport->els_tmofunc); 2869 del_timer_sync(&vport->delayed_disc_tmo); 2870 lpfc_can_disctmo(vport); 2871 return; 2872 } 2873 2874 /** 2875 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2876 * @phba: pointer to lpfc hba data structure. 2877 * 2878 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2879 * caller of this routine should already hold the host lock. 2880 **/ 2881 void 2882 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2883 { 2884 /* Clear pending FCF rediscovery wait flag */ 2885 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2886 2887 /* Now, try to stop the timer */ 2888 del_timer(&phba->fcf.redisc_wait); 2889 } 2890 2891 /** 2892 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2893 * @phba: pointer to lpfc hba data structure. 2894 * 2895 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2896 * checks whether the FCF rediscovery wait timer is pending with the host 2897 * lock held before proceeding with disabling the timer and clearing the 2898 * wait timer pendig flag. 2899 **/ 2900 void 2901 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2902 { 2903 spin_lock_irq(&phba->hbalock); 2904 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2905 /* FCF rediscovery timer already fired or stopped */ 2906 spin_unlock_irq(&phba->hbalock); 2907 return; 2908 } 2909 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2910 /* Clear failover in progress flags */ 2911 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 2912 spin_unlock_irq(&phba->hbalock); 2913 } 2914 2915 /** 2916 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2917 * @phba: pointer to lpfc hba data structure. 2918 * 2919 * This routine stops all the timers associated with a HBA. This function is 2920 * invoked before either putting a HBA offline or unloading the driver. 2921 **/ 2922 void 2923 lpfc_stop_hba_timers(struct lpfc_hba *phba) 2924 { 2925 if (phba->pport) 2926 lpfc_stop_vport_timers(phba->pport); 2927 cancel_delayed_work_sync(&phba->eq_delay_work); 2928 del_timer_sync(&phba->sli.mbox_tmo); 2929 del_timer_sync(&phba->fabric_block_timer); 2930 del_timer_sync(&phba->eratt_poll); 2931 del_timer_sync(&phba->hb_tmofunc); 2932 if (phba->sli_rev == LPFC_SLI_REV4) { 2933 del_timer_sync(&phba->rrq_tmr); 2934 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 2935 } 2936 phba->hb_outstanding = 0; 2937 2938 switch (phba->pci_dev_grp) { 2939 case LPFC_PCI_DEV_LP: 2940 /* Stop any LightPulse device specific driver timers */ 2941 del_timer_sync(&phba->fcp_poll_timer); 2942 break; 2943 case LPFC_PCI_DEV_OC: 2944 /* Stop any OneConnect device specific driver timers */ 2945 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2946 break; 2947 default: 2948 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2949 "0297 Invalid device group (x%x)\n", 2950 phba->pci_dev_grp); 2951 break; 2952 } 2953 return; 2954 } 2955 2956 /** 2957 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2958 * @phba: pointer to lpfc hba data structure. 2959 * 2960 * This routine marks a HBA's management interface as blocked. Once the HBA's 2961 * management interface is marked as blocked, all the user space access to 2962 * the HBA, whether they are from sysfs interface or libdfc interface will 2963 * all be blocked. The HBA is set to block the management interface when the 2964 * driver prepares the HBA interface for online or offline. 2965 **/ 2966 static void 2967 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) 2968 { 2969 unsigned long iflag; 2970 uint8_t actcmd = MBX_HEARTBEAT; 2971 unsigned long timeout; 2972 2973 spin_lock_irqsave(&phba->hbalock, iflag); 2974 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2975 spin_unlock_irqrestore(&phba->hbalock, iflag); 2976 if (mbx_action == LPFC_MBX_NO_WAIT) 2977 return; 2978 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 2979 spin_lock_irqsave(&phba->hbalock, iflag); 2980 if (phba->sli.mbox_active) { 2981 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 2982 /* Determine how long we might wait for the active mailbox 2983 * command to be gracefully completed by firmware. 2984 */ 2985 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 2986 phba->sli.mbox_active) * 1000) + jiffies; 2987 } 2988 spin_unlock_irqrestore(&phba->hbalock, iflag); 2989 2990 /* Wait for the outstnading mailbox command to complete */ 2991 while (phba->sli.mbox_active) { 2992 /* Check active mailbox complete status every 2ms */ 2993 msleep(2); 2994 if (time_after(jiffies, timeout)) { 2995 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2996 "2813 Mgmt IO is Blocked %x " 2997 "- mbox cmd %x still active\n", 2998 phba->sli.sli_flag, actcmd); 2999 break; 3000 } 3001 } 3002 } 3003 3004 /** 3005 * lpfc_sli4_node_prep - Assign RPIs for active nodes. 3006 * @phba: pointer to lpfc hba data structure. 3007 * 3008 * Allocate RPIs for all active remote nodes. This is needed whenever 3009 * an SLI4 adapter is reset and the driver is not unloading. Its purpose 3010 * is to fixup the temporary rpi assignments. 3011 **/ 3012 void 3013 lpfc_sli4_node_prep(struct lpfc_hba *phba) 3014 { 3015 struct lpfc_nodelist *ndlp, *next_ndlp; 3016 struct lpfc_vport **vports; 3017 int i, rpi; 3018 unsigned long flags; 3019 3020 if (phba->sli_rev != LPFC_SLI_REV4) 3021 return; 3022 3023 vports = lpfc_create_vport_work_array(phba); 3024 if (vports == NULL) 3025 return; 3026 3027 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3028 if (vports[i]->load_flag & FC_UNLOADING) 3029 continue; 3030 3031 list_for_each_entry_safe(ndlp, next_ndlp, 3032 &vports[i]->fc_nodes, 3033 nlp_listp) { 3034 if (!NLP_CHK_NODE_ACT(ndlp)) 3035 continue; 3036 rpi = lpfc_sli4_alloc_rpi(phba); 3037 if (rpi == LPFC_RPI_ALLOC_ERROR) { 3038 spin_lock_irqsave(&phba->ndlp_lock, flags); 3039 NLP_CLR_NODE_ACT(ndlp); 3040 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 3041 continue; 3042 } 3043 ndlp->nlp_rpi = rpi; 3044 lpfc_printf_vlog(ndlp->vport, KERN_INFO, 3045 LOG_NODE | LOG_DISCOVERY, 3046 "0009 Assign RPI x%x to ndlp x%px " 3047 "DID:x%06x flg:x%x map:x%x\n", 3048 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID, 3049 ndlp->nlp_flag, ndlp->nlp_usg_map); 3050 } 3051 } 3052 lpfc_destroy_vport_work_array(phba, vports); 3053 } 3054 3055 /** 3056 * lpfc_create_expedite_pool - create expedite pool 3057 * @phba: pointer to lpfc hba data structure. 3058 * 3059 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0 3060 * to expedite pool. Mark them as expedite. 3061 **/ 3062 static void lpfc_create_expedite_pool(struct lpfc_hba *phba) 3063 { 3064 struct lpfc_sli4_hdw_queue *qp; 3065 struct lpfc_io_buf *lpfc_ncmd; 3066 struct lpfc_io_buf *lpfc_ncmd_next; 3067 struct lpfc_epd_pool *epd_pool; 3068 unsigned long iflag; 3069 3070 epd_pool = &phba->epd_pool; 3071 qp = &phba->sli4_hba.hdwq[0]; 3072 3073 spin_lock_init(&epd_pool->lock); 3074 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3075 spin_lock(&epd_pool->lock); 3076 INIT_LIST_HEAD(&epd_pool->list); 3077 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3078 &qp->lpfc_io_buf_list_put, list) { 3079 list_move_tail(&lpfc_ncmd->list, &epd_pool->list); 3080 lpfc_ncmd->expedite = true; 3081 qp->put_io_bufs--; 3082 epd_pool->count++; 3083 if (epd_pool->count >= XRI_BATCH) 3084 break; 3085 } 3086 spin_unlock(&epd_pool->lock); 3087 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3088 } 3089 3090 /** 3091 * lpfc_destroy_expedite_pool - destroy expedite pool 3092 * @phba: pointer to lpfc hba data structure. 3093 * 3094 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put 3095 * of HWQ 0. Clear the mark. 3096 **/ 3097 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba) 3098 { 3099 struct lpfc_sli4_hdw_queue *qp; 3100 struct lpfc_io_buf *lpfc_ncmd; 3101 struct lpfc_io_buf *lpfc_ncmd_next; 3102 struct lpfc_epd_pool *epd_pool; 3103 unsigned long iflag; 3104 3105 epd_pool = &phba->epd_pool; 3106 qp = &phba->sli4_hba.hdwq[0]; 3107 3108 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3109 spin_lock(&epd_pool->lock); 3110 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3111 &epd_pool->list, list) { 3112 list_move_tail(&lpfc_ncmd->list, 3113 &qp->lpfc_io_buf_list_put); 3114 lpfc_ncmd->flags = false; 3115 qp->put_io_bufs++; 3116 epd_pool->count--; 3117 } 3118 spin_unlock(&epd_pool->lock); 3119 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3120 } 3121 3122 /** 3123 * lpfc_create_multixri_pools - create multi-XRI pools 3124 * @phba: pointer to lpfc hba data structure. 3125 * 3126 * This routine initialize public, private per HWQ. Then, move XRIs from 3127 * lpfc_io_buf_list_put to public pool. High and low watermark are also 3128 * Initialized. 3129 **/ 3130 void lpfc_create_multixri_pools(struct lpfc_hba *phba) 3131 { 3132 u32 i, j; 3133 u32 hwq_count; 3134 u32 count_per_hwq; 3135 struct lpfc_io_buf *lpfc_ncmd; 3136 struct lpfc_io_buf *lpfc_ncmd_next; 3137 unsigned long iflag; 3138 struct lpfc_sli4_hdw_queue *qp; 3139 struct lpfc_multixri_pool *multixri_pool; 3140 struct lpfc_pbl_pool *pbl_pool; 3141 struct lpfc_pvt_pool *pvt_pool; 3142 3143 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3144 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n", 3145 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu, 3146 phba->sli4_hba.io_xri_cnt); 3147 3148 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3149 lpfc_create_expedite_pool(phba); 3150 3151 hwq_count = phba->cfg_hdw_queue; 3152 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count; 3153 3154 for (i = 0; i < hwq_count; i++) { 3155 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL); 3156 3157 if (!multixri_pool) { 3158 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3159 "1238 Failed to allocate memory for " 3160 "multixri_pool\n"); 3161 3162 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3163 lpfc_destroy_expedite_pool(phba); 3164 3165 j = 0; 3166 while (j < i) { 3167 qp = &phba->sli4_hba.hdwq[j]; 3168 kfree(qp->p_multixri_pool); 3169 j++; 3170 } 3171 phba->cfg_xri_rebalancing = 0; 3172 return; 3173 } 3174 3175 qp = &phba->sli4_hba.hdwq[i]; 3176 qp->p_multixri_pool = multixri_pool; 3177 3178 multixri_pool->xri_limit = count_per_hwq; 3179 multixri_pool->rrb_next_hwqid = i; 3180 3181 /* Deal with public free xri pool */ 3182 pbl_pool = &multixri_pool->pbl_pool; 3183 spin_lock_init(&pbl_pool->lock); 3184 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3185 spin_lock(&pbl_pool->lock); 3186 INIT_LIST_HEAD(&pbl_pool->list); 3187 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3188 &qp->lpfc_io_buf_list_put, list) { 3189 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list); 3190 qp->put_io_bufs--; 3191 pbl_pool->count++; 3192 } 3193 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3194 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n", 3195 pbl_pool->count, i); 3196 spin_unlock(&pbl_pool->lock); 3197 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3198 3199 /* Deal with private free xri pool */ 3200 pvt_pool = &multixri_pool->pvt_pool; 3201 pvt_pool->high_watermark = multixri_pool->xri_limit / 2; 3202 pvt_pool->low_watermark = XRI_BATCH; 3203 spin_lock_init(&pvt_pool->lock); 3204 spin_lock_irqsave(&pvt_pool->lock, iflag); 3205 INIT_LIST_HEAD(&pvt_pool->list); 3206 pvt_pool->count = 0; 3207 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 3208 } 3209 } 3210 3211 /** 3212 * lpfc_destroy_multixri_pools - destroy multi-XRI pools 3213 * @phba: pointer to lpfc hba data structure. 3214 * 3215 * This routine returns XRIs from public/private to lpfc_io_buf_list_put. 3216 **/ 3217 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba) 3218 { 3219 u32 i; 3220 u32 hwq_count; 3221 struct lpfc_io_buf *lpfc_ncmd; 3222 struct lpfc_io_buf *lpfc_ncmd_next; 3223 unsigned long iflag; 3224 struct lpfc_sli4_hdw_queue *qp; 3225 struct lpfc_multixri_pool *multixri_pool; 3226 struct lpfc_pbl_pool *pbl_pool; 3227 struct lpfc_pvt_pool *pvt_pool; 3228 3229 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3230 lpfc_destroy_expedite_pool(phba); 3231 3232 if (!(phba->pport->load_flag & FC_UNLOADING)) 3233 lpfc_sli_flush_io_rings(phba); 3234 3235 hwq_count = phba->cfg_hdw_queue; 3236 3237 for (i = 0; i < hwq_count; i++) { 3238 qp = &phba->sli4_hba.hdwq[i]; 3239 multixri_pool = qp->p_multixri_pool; 3240 if (!multixri_pool) 3241 continue; 3242 3243 qp->p_multixri_pool = NULL; 3244 3245 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3246 3247 /* Deal with public free xri pool */ 3248 pbl_pool = &multixri_pool->pbl_pool; 3249 spin_lock(&pbl_pool->lock); 3250 3251 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3252 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n", 3253 pbl_pool->count, i); 3254 3255 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3256 &pbl_pool->list, list) { 3257 list_move_tail(&lpfc_ncmd->list, 3258 &qp->lpfc_io_buf_list_put); 3259 qp->put_io_bufs++; 3260 pbl_pool->count--; 3261 } 3262 3263 INIT_LIST_HEAD(&pbl_pool->list); 3264 pbl_pool->count = 0; 3265 3266 spin_unlock(&pbl_pool->lock); 3267 3268 /* Deal with private free xri pool */ 3269 pvt_pool = &multixri_pool->pvt_pool; 3270 spin_lock(&pvt_pool->lock); 3271 3272 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3273 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n", 3274 pvt_pool->count, i); 3275 3276 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3277 &pvt_pool->list, list) { 3278 list_move_tail(&lpfc_ncmd->list, 3279 &qp->lpfc_io_buf_list_put); 3280 qp->put_io_bufs++; 3281 pvt_pool->count--; 3282 } 3283 3284 INIT_LIST_HEAD(&pvt_pool->list); 3285 pvt_pool->count = 0; 3286 3287 spin_unlock(&pvt_pool->lock); 3288 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3289 3290 kfree(multixri_pool); 3291 } 3292 } 3293 3294 /** 3295 * lpfc_online - Initialize and bring a HBA online 3296 * @phba: pointer to lpfc hba data structure. 3297 * 3298 * This routine initializes the HBA and brings a HBA online. During this 3299 * process, the management interface is blocked to prevent user space access 3300 * to the HBA interfering with the driver initialization. 3301 * 3302 * Return codes 3303 * 0 - successful 3304 * 1 - failed 3305 **/ 3306 int 3307 lpfc_online(struct lpfc_hba *phba) 3308 { 3309 struct lpfc_vport *vport; 3310 struct lpfc_vport **vports; 3311 int i, error = 0; 3312 bool vpis_cleared = false; 3313 3314 if (!phba) 3315 return 0; 3316 vport = phba->pport; 3317 3318 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 3319 return 0; 3320 3321 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3322 "0458 Bring Adapter online\n"); 3323 3324 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 3325 3326 if (phba->sli_rev == LPFC_SLI_REV4) { 3327 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 3328 lpfc_unblock_mgmt_io(phba); 3329 return 1; 3330 } 3331 spin_lock_irq(&phba->hbalock); 3332 if (!phba->sli4_hba.max_cfg_param.vpi_used) 3333 vpis_cleared = true; 3334 spin_unlock_irq(&phba->hbalock); 3335 3336 /* Reestablish the local initiator port. 3337 * The offline process destroyed the previous lport. 3338 */ 3339 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && 3340 !phba->nvmet_support) { 3341 error = lpfc_nvme_create_localport(phba->pport); 3342 if (error) 3343 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3344 "6132 NVME restore reg failed " 3345 "on nvmei error x%x\n", error); 3346 } 3347 } else { 3348 lpfc_sli_queue_init(phba); 3349 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 3350 lpfc_unblock_mgmt_io(phba); 3351 return 1; 3352 } 3353 } 3354 3355 vports = lpfc_create_vport_work_array(phba); 3356 if (vports != NULL) { 3357 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3358 struct Scsi_Host *shost; 3359 shost = lpfc_shost_from_vport(vports[i]); 3360 spin_lock_irq(shost->host_lock); 3361 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 3362 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3363 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3364 if (phba->sli_rev == LPFC_SLI_REV4) { 3365 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 3366 if ((vpis_cleared) && 3367 (vports[i]->port_type != 3368 LPFC_PHYSICAL_PORT)) 3369 vports[i]->vpi = 0; 3370 } 3371 spin_unlock_irq(shost->host_lock); 3372 } 3373 } 3374 lpfc_destroy_vport_work_array(phba, vports); 3375 3376 if (phba->cfg_xri_rebalancing) 3377 lpfc_create_multixri_pools(phba); 3378 3379 lpfc_cpuhp_add(phba); 3380 3381 lpfc_unblock_mgmt_io(phba); 3382 return 0; 3383 } 3384 3385 /** 3386 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 3387 * @phba: pointer to lpfc hba data structure. 3388 * 3389 * This routine marks a HBA's management interface as not blocked. Once the 3390 * HBA's management interface is marked as not blocked, all the user space 3391 * access to the HBA, whether they are from sysfs interface or libdfc 3392 * interface will be allowed. The HBA is set to block the management interface 3393 * when the driver prepares the HBA interface for online or offline and then 3394 * set to unblock the management interface afterwards. 3395 **/ 3396 void 3397 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 3398 { 3399 unsigned long iflag; 3400 3401 spin_lock_irqsave(&phba->hbalock, iflag); 3402 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 3403 spin_unlock_irqrestore(&phba->hbalock, iflag); 3404 } 3405 3406 /** 3407 * lpfc_offline_prep - Prepare a HBA to be brought offline 3408 * @phba: pointer to lpfc hba data structure. 3409 * 3410 * This routine is invoked to prepare a HBA to be brought offline. It performs 3411 * unregistration login to all the nodes on all vports and flushes the mailbox 3412 * queue to make it ready to be brought offline. 3413 **/ 3414 void 3415 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) 3416 { 3417 struct lpfc_vport *vport = phba->pport; 3418 struct lpfc_nodelist *ndlp, *next_ndlp; 3419 struct lpfc_vport **vports; 3420 struct Scsi_Host *shost; 3421 int i; 3422 3423 if (vport->fc_flag & FC_OFFLINE_MODE) 3424 return; 3425 3426 lpfc_block_mgmt_io(phba, mbx_action); 3427 3428 lpfc_linkdown(phba); 3429 3430 /* Issue an unreg_login to all nodes on all vports */ 3431 vports = lpfc_create_vport_work_array(phba); 3432 if (vports != NULL) { 3433 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3434 if (vports[i]->load_flag & FC_UNLOADING) 3435 continue; 3436 shost = lpfc_shost_from_vport(vports[i]); 3437 spin_lock_irq(shost->host_lock); 3438 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 3439 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3440 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 3441 spin_unlock_irq(shost->host_lock); 3442 3443 shost = lpfc_shost_from_vport(vports[i]); 3444 list_for_each_entry_safe(ndlp, next_ndlp, 3445 &vports[i]->fc_nodes, 3446 nlp_listp) { 3447 if ((!NLP_CHK_NODE_ACT(ndlp)) || 3448 ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 3449 /* Driver must assume RPI is invalid for 3450 * any unused or inactive node. 3451 */ 3452 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 3453 continue; 3454 } 3455 3456 if (ndlp->nlp_type & NLP_FABRIC) { 3457 lpfc_disc_state_machine(vports[i], ndlp, 3458 NULL, NLP_EVT_DEVICE_RECOVERY); 3459 lpfc_disc_state_machine(vports[i], ndlp, 3460 NULL, NLP_EVT_DEVICE_RM); 3461 } 3462 spin_lock_irq(shost->host_lock); 3463 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 3464 spin_unlock_irq(shost->host_lock); 3465 /* 3466 * Whenever an SLI4 port goes offline, free the 3467 * RPI. Get a new RPI when the adapter port 3468 * comes back online. 3469 */ 3470 if (phba->sli_rev == LPFC_SLI_REV4) { 3471 lpfc_printf_vlog(ndlp->vport, KERN_INFO, 3472 LOG_NODE | LOG_DISCOVERY, 3473 "0011 Free RPI x%x on " 3474 "ndlp:x%px did x%x " 3475 "usgmap:x%x\n", 3476 ndlp->nlp_rpi, ndlp, 3477 ndlp->nlp_DID, 3478 ndlp->nlp_usg_map); 3479 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 3480 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 3481 } 3482 lpfc_unreg_rpi(vports[i], ndlp); 3483 } 3484 } 3485 } 3486 lpfc_destroy_vport_work_array(phba, vports); 3487 3488 lpfc_sli_mbox_sys_shutdown(phba, mbx_action); 3489 3490 if (phba->wq) 3491 flush_workqueue(phba->wq); 3492 } 3493 3494 /** 3495 * lpfc_offline - Bring a HBA offline 3496 * @phba: pointer to lpfc hba data structure. 3497 * 3498 * This routine actually brings a HBA offline. It stops all the timers 3499 * associated with the HBA, brings down the SLI layer, and eventually 3500 * marks the HBA as in offline state for the upper layer protocol. 3501 **/ 3502 void 3503 lpfc_offline(struct lpfc_hba *phba) 3504 { 3505 struct Scsi_Host *shost; 3506 struct lpfc_vport **vports; 3507 int i; 3508 3509 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 3510 return; 3511 3512 /* stop port and all timers associated with this hba */ 3513 lpfc_stop_port(phba); 3514 3515 /* Tear down the local and target port registrations. The 3516 * nvme transports need to cleanup. 3517 */ 3518 lpfc_nvmet_destroy_targetport(phba); 3519 lpfc_nvme_destroy_localport(phba->pport); 3520 3521 vports = lpfc_create_vport_work_array(phba); 3522 if (vports != NULL) 3523 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3524 lpfc_stop_vport_timers(vports[i]); 3525 lpfc_destroy_vport_work_array(phba, vports); 3526 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3527 "0460 Bring Adapter offline\n"); 3528 /* Bring down the SLI Layer and cleanup. The HBA is offline 3529 now. */ 3530 lpfc_sli_hba_down(phba); 3531 spin_lock_irq(&phba->hbalock); 3532 phba->work_ha = 0; 3533 spin_unlock_irq(&phba->hbalock); 3534 vports = lpfc_create_vport_work_array(phba); 3535 if (vports != NULL) 3536 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3537 shost = lpfc_shost_from_vport(vports[i]); 3538 spin_lock_irq(shost->host_lock); 3539 vports[i]->work_port_events = 0; 3540 vports[i]->fc_flag |= FC_OFFLINE_MODE; 3541 spin_unlock_irq(shost->host_lock); 3542 } 3543 lpfc_destroy_vport_work_array(phba, vports); 3544 __lpfc_cpuhp_remove(phba); 3545 3546 if (phba->cfg_xri_rebalancing) 3547 lpfc_destroy_multixri_pools(phba); 3548 } 3549 3550 /** 3551 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 3552 * @phba: pointer to lpfc hba data structure. 3553 * 3554 * This routine is to free all the SCSI buffers and IOCBs from the driver 3555 * list back to kernel. It is called from lpfc_pci_remove_one to free 3556 * the internal resources before the device is removed from the system. 3557 **/ 3558 static void 3559 lpfc_scsi_free(struct lpfc_hba *phba) 3560 { 3561 struct lpfc_io_buf *sb, *sb_next; 3562 3563 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 3564 return; 3565 3566 spin_lock_irq(&phba->hbalock); 3567 3568 /* Release all the lpfc_scsi_bufs maintained by this host. */ 3569 3570 spin_lock(&phba->scsi_buf_list_put_lock); 3571 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, 3572 list) { 3573 list_del(&sb->list); 3574 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3575 sb->dma_handle); 3576 kfree(sb); 3577 phba->total_scsi_bufs--; 3578 } 3579 spin_unlock(&phba->scsi_buf_list_put_lock); 3580 3581 spin_lock(&phba->scsi_buf_list_get_lock); 3582 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, 3583 list) { 3584 list_del(&sb->list); 3585 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3586 sb->dma_handle); 3587 kfree(sb); 3588 phba->total_scsi_bufs--; 3589 } 3590 spin_unlock(&phba->scsi_buf_list_get_lock); 3591 spin_unlock_irq(&phba->hbalock); 3592 } 3593 3594 /** 3595 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists 3596 * @phba: pointer to lpfc hba data structure. 3597 * 3598 * This routine is to free all the IO buffers and IOCBs from the driver 3599 * list back to kernel. It is called from lpfc_pci_remove_one to free 3600 * the internal resources before the device is removed from the system. 3601 **/ 3602 void 3603 lpfc_io_free(struct lpfc_hba *phba) 3604 { 3605 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; 3606 struct lpfc_sli4_hdw_queue *qp; 3607 int idx; 3608 3609 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 3610 qp = &phba->sli4_hba.hdwq[idx]; 3611 /* Release all the lpfc_nvme_bufs maintained by this host. */ 3612 spin_lock(&qp->io_buf_list_put_lock); 3613 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3614 &qp->lpfc_io_buf_list_put, 3615 list) { 3616 list_del(&lpfc_ncmd->list); 3617 qp->put_io_bufs--; 3618 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3619 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 3620 if (phba->cfg_xpsgl && !phba->nvmet_support) 3621 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); 3622 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); 3623 kfree(lpfc_ncmd); 3624 qp->total_io_bufs--; 3625 } 3626 spin_unlock(&qp->io_buf_list_put_lock); 3627 3628 spin_lock(&qp->io_buf_list_get_lock); 3629 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3630 &qp->lpfc_io_buf_list_get, 3631 list) { 3632 list_del(&lpfc_ncmd->list); 3633 qp->get_io_bufs--; 3634 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3635 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 3636 if (phba->cfg_xpsgl && !phba->nvmet_support) 3637 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); 3638 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); 3639 kfree(lpfc_ncmd); 3640 qp->total_io_bufs--; 3641 } 3642 spin_unlock(&qp->io_buf_list_get_lock); 3643 } 3644 } 3645 3646 /** 3647 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping 3648 * @phba: pointer to lpfc hba data structure. 3649 * 3650 * This routine first calculates the sizes of the current els and allocated 3651 * scsi sgl lists, and then goes through all sgls to updates the physical 3652 * XRIs assigned due to port function reset. During port initialization, the 3653 * current els and allocated scsi sgl lists are 0s. 3654 * 3655 * Return codes 3656 * 0 - successful (for now, it always returns 0) 3657 **/ 3658 int 3659 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba) 3660 { 3661 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3662 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3663 LIST_HEAD(els_sgl_list); 3664 int rc; 3665 3666 /* 3667 * update on pci function's els xri-sgl list 3668 */ 3669 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3670 3671 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { 3672 /* els xri-sgl expanded */ 3673 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; 3674 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3675 "3157 ELS xri-sgl count increased from " 3676 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3677 els_xri_cnt); 3678 /* allocate the additional els sgls */ 3679 for (i = 0; i < xri_cnt; i++) { 3680 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3681 GFP_KERNEL); 3682 if (sglq_entry == NULL) { 3683 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3684 "2562 Failure to allocate an " 3685 "ELS sgl entry:%d\n", i); 3686 rc = -ENOMEM; 3687 goto out_free_mem; 3688 } 3689 sglq_entry->buff_type = GEN_BUFF_TYPE; 3690 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, 3691 &sglq_entry->phys); 3692 if (sglq_entry->virt == NULL) { 3693 kfree(sglq_entry); 3694 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3695 "2563 Failure to allocate an " 3696 "ELS mbuf:%d\n", i); 3697 rc = -ENOMEM; 3698 goto out_free_mem; 3699 } 3700 sglq_entry->sgl = sglq_entry->virt; 3701 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 3702 sglq_entry->state = SGL_FREED; 3703 list_add_tail(&sglq_entry->list, &els_sgl_list); 3704 } 3705 spin_lock_irq(&phba->hbalock); 3706 spin_lock(&phba->sli4_hba.sgl_list_lock); 3707 list_splice_init(&els_sgl_list, 3708 &phba->sli4_hba.lpfc_els_sgl_list); 3709 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3710 spin_unlock_irq(&phba->hbalock); 3711 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { 3712 /* els xri-sgl shrinked */ 3713 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; 3714 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3715 "3158 ELS xri-sgl count decreased from " 3716 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3717 els_xri_cnt); 3718 spin_lock_irq(&phba->hbalock); 3719 spin_lock(&phba->sli4_hba.sgl_list_lock); 3720 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, 3721 &els_sgl_list); 3722 /* release extra els sgls from list */ 3723 for (i = 0; i < xri_cnt; i++) { 3724 list_remove_head(&els_sgl_list, 3725 sglq_entry, struct lpfc_sglq, list); 3726 if (sglq_entry) { 3727 __lpfc_mbuf_free(phba, sglq_entry->virt, 3728 sglq_entry->phys); 3729 kfree(sglq_entry); 3730 } 3731 } 3732 list_splice_init(&els_sgl_list, 3733 &phba->sli4_hba.lpfc_els_sgl_list); 3734 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3735 spin_unlock_irq(&phba->hbalock); 3736 } else 3737 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3738 "3163 ELS xri-sgl count unchanged: %d\n", 3739 els_xri_cnt); 3740 phba->sli4_hba.els_xri_cnt = els_xri_cnt; 3741 3742 /* update xris to els sgls on the list */ 3743 sglq_entry = NULL; 3744 sglq_entry_next = NULL; 3745 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3746 &phba->sli4_hba.lpfc_els_sgl_list, list) { 3747 lxri = lpfc_sli4_next_xritag(phba); 3748 if (lxri == NO_XRI) { 3749 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3750 "2400 Failed to allocate xri for " 3751 "ELS sgl\n"); 3752 rc = -ENOMEM; 3753 goto out_free_mem; 3754 } 3755 sglq_entry->sli4_lxritag = lxri; 3756 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3757 } 3758 return 0; 3759 3760 out_free_mem: 3761 lpfc_free_els_sgl_list(phba); 3762 return rc; 3763 } 3764 3765 /** 3766 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping 3767 * @phba: pointer to lpfc hba data structure. 3768 * 3769 * This routine first calculates the sizes of the current els and allocated 3770 * scsi sgl lists, and then goes through all sgls to updates the physical 3771 * XRIs assigned due to port function reset. During port initialization, the 3772 * current els and allocated scsi sgl lists are 0s. 3773 * 3774 * Return codes 3775 * 0 - successful (for now, it always returns 0) 3776 **/ 3777 int 3778 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) 3779 { 3780 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3781 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3782 uint16_t nvmet_xri_cnt; 3783 LIST_HEAD(nvmet_sgl_list); 3784 int rc; 3785 3786 /* 3787 * update on pci function's nvmet xri-sgl list 3788 */ 3789 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3790 3791 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */ 3792 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 3793 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { 3794 /* els xri-sgl expanded */ 3795 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt; 3796 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3797 "6302 NVMET xri-sgl cnt grew from %d to %d\n", 3798 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt); 3799 /* allocate the additional nvmet sgls */ 3800 for (i = 0; i < xri_cnt; i++) { 3801 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3802 GFP_KERNEL); 3803 if (sglq_entry == NULL) { 3804 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3805 "6303 Failure to allocate an " 3806 "NVMET sgl entry:%d\n", i); 3807 rc = -ENOMEM; 3808 goto out_free_mem; 3809 } 3810 sglq_entry->buff_type = NVMET_BUFF_TYPE; 3811 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0, 3812 &sglq_entry->phys); 3813 if (sglq_entry->virt == NULL) { 3814 kfree(sglq_entry); 3815 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3816 "6304 Failure to allocate an " 3817 "NVMET buf:%d\n", i); 3818 rc = -ENOMEM; 3819 goto out_free_mem; 3820 } 3821 sglq_entry->sgl = sglq_entry->virt; 3822 memset(sglq_entry->sgl, 0, 3823 phba->cfg_sg_dma_buf_size); 3824 sglq_entry->state = SGL_FREED; 3825 list_add_tail(&sglq_entry->list, &nvmet_sgl_list); 3826 } 3827 spin_lock_irq(&phba->hbalock); 3828 spin_lock(&phba->sli4_hba.sgl_list_lock); 3829 list_splice_init(&nvmet_sgl_list, 3830 &phba->sli4_hba.lpfc_nvmet_sgl_list); 3831 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3832 spin_unlock_irq(&phba->hbalock); 3833 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) { 3834 /* nvmet xri-sgl shrunk */ 3835 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt; 3836 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3837 "6305 NVMET xri-sgl count decreased from " 3838 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt, 3839 nvmet_xri_cnt); 3840 spin_lock_irq(&phba->hbalock); 3841 spin_lock(&phba->sli4_hba.sgl_list_lock); 3842 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, 3843 &nvmet_sgl_list); 3844 /* release extra nvmet sgls from list */ 3845 for (i = 0; i < xri_cnt; i++) { 3846 list_remove_head(&nvmet_sgl_list, 3847 sglq_entry, struct lpfc_sglq, list); 3848 if (sglq_entry) { 3849 lpfc_nvmet_buf_free(phba, sglq_entry->virt, 3850 sglq_entry->phys); 3851 kfree(sglq_entry); 3852 } 3853 } 3854 list_splice_init(&nvmet_sgl_list, 3855 &phba->sli4_hba.lpfc_nvmet_sgl_list); 3856 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3857 spin_unlock_irq(&phba->hbalock); 3858 } else 3859 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3860 "6306 NVMET xri-sgl count unchanged: %d\n", 3861 nvmet_xri_cnt); 3862 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt; 3863 3864 /* update xris to nvmet sgls on the list */ 3865 sglq_entry = NULL; 3866 sglq_entry_next = NULL; 3867 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3868 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) { 3869 lxri = lpfc_sli4_next_xritag(phba); 3870 if (lxri == NO_XRI) { 3871 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3872 "6307 Failed to allocate xri for " 3873 "NVMET sgl\n"); 3874 rc = -ENOMEM; 3875 goto out_free_mem; 3876 } 3877 sglq_entry->sli4_lxritag = lxri; 3878 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3879 } 3880 return 0; 3881 3882 out_free_mem: 3883 lpfc_free_nvmet_sgl_list(phba); 3884 return rc; 3885 } 3886 3887 int 3888 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf) 3889 { 3890 LIST_HEAD(blist); 3891 struct lpfc_sli4_hdw_queue *qp; 3892 struct lpfc_io_buf *lpfc_cmd; 3893 struct lpfc_io_buf *iobufp, *prev_iobufp; 3894 int idx, cnt, xri, inserted; 3895 3896 cnt = 0; 3897 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 3898 qp = &phba->sli4_hba.hdwq[idx]; 3899 spin_lock_irq(&qp->io_buf_list_get_lock); 3900 spin_lock(&qp->io_buf_list_put_lock); 3901 3902 /* Take everything off the get and put lists */ 3903 list_splice_init(&qp->lpfc_io_buf_list_get, &blist); 3904 list_splice(&qp->lpfc_io_buf_list_put, &blist); 3905 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 3906 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 3907 cnt += qp->get_io_bufs + qp->put_io_bufs; 3908 qp->get_io_bufs = 0; 3909 qp->put_io_bufs = 0; 3910 qp->total_io_bufs = 0; 3911 spin_unlock(&qp->io_buf_list_put_lock); 3912 spin_unlock_irq(&qp->io_buf_list_get_lock); 3913 } 3914 3915 /* 3916 * Take IO buffers off blist and put on cbuf sorted by XRI. 3917 * This is because POST_SGL takes a sequential range of XRIs 3918 * to post to the firmware. 3919 */ 3920 for (idx = 0; idx < cnt; idx++) { 3921 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list); 3922 if (!lpfc_cmd) 3923 return cnt; 3924 if (idx == 0) { 3925 list_add_tail(&lpfc_cmd->list, cbuf); 3926 continue; 3927 } 3928 xri = lpfc_cmd->cur_iocbq.sli4_xritag; 3929 inserted = 0; 3930 prev_iobufp = NULL; 3931 list_for_each_entry(iobufp, cbuf, list) { 3932 if (xri < iobufp->cur_iocbq.sli4_xritag) { 3933 if (prev_iobufp) 3934 list_add(&lpfc_cmd->list, 3935 &prev_iobufp->list); 3936 else 3937 list_add(&lpfc_cmd->list, cbuf); 3938 inserted = 1; 3939 break; 3940 } 3941 prev_iobufp = iobufp; 3942 } 3943 if (!inserted) 3944 list_add_tail(&lpfc_cmd->list, cbuf); 3945 } 3946 return cnt; 3947 } 3948 3949 int 3950 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf) 3951 { 3952 struct lpfc_sli4_hdw_queue *qp; 3953 struct lpfc_io_buf *lpfc_cmd; 3954 int idx, cnt; 3955 3956 qp = phba->sli4_hba.hdwq; 3957 cnt = 0; 3958 while (!list_empty(cbuf)) { 3959 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 3960 list_remove_head(cbuf, lpfc_cmd, 3961 struct lpfc_io_buf, list); 3962 if (!lpfc_cmd) 3963 return cnt; 3964 cnt++; 3965 qp = &phba->sli4_hba.hdwq[idx]; 3966 lpfc_cmd->hdwq_no = idx; 3967 lpfc_cmd->hdwq = qp; 3968 lpfc_cmd->cur_iocbq.wqe_cmpl = NULL; 3969 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL; 3970 spin_lock(&qp->io_buf_list_put_lock); 3971 list_add_tail(&lpfc_cmd->list, 3972 &qp->lpfc_io_buf_list_put); 3973 qp->put_io_bufs++; 3974 qp->total_io_bufs++; 3975 spin_unlock(&qp->io_buf_list_put_lock); 3976 } 3977 } 3978 return cnt; 3979 } 3980 3981 /** 3982 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping 3983 * @phba: pointer to lpfc hba data structure. 3984 * 3985 * This routine first calculates the sizes of the current els and allocated 3986 * scsi sgl lists, and then goes through all sgls to updates the physical 3987 * XRIs assigned due to port function reset. During port initialization, the 3988 * current els and allocated scsi sgl lists are 0s. 3989 * 3990 * Return codes 3991 * 0 - successful (for now, it always returns 0) 3992 **/ 3993 int 3994 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba) 3995 { 3996 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL; 3997 uint16_t i, lxri, els_xri_cnt; 3998 uint16_t io_xri_cnt, io_xri_max; 3999 LIST_HEAD(io_sgl_list); 4000 int rc, cnt; 4001 4002 /* 4003 * update on pci function's allocated nvme xri-sgl list 4004 */ 4005 4006 /* maximum number of xris available for nvme buffers */ 4007 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4008 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4009 phba->sli4_hba.io_xri_max = io_xri_max; 4010 4011 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4012 "6074 Current allocated XRI sgl count:%d, " 4013 "maximum XRI count:%d\n", 4014 phba->sli4_hba.io_xri_cnt, 4015 phba->sli4_hba.io_xri_max); 4016 4017 cnt = lpfc_io_buf_flush(phba, &io_sgl_list); 4018 4019 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) { 4020 /* max nvme xri shrunk below the allocated nvme buffers */ 4021 io_xri_cnt = phba->sli4_hba.io_xri_cnt - 4022 phba->sli4_hba.io_xri_max; 4023 /* release the extra allocated nvme buffers */ 4024 for (i = 0; i < io_xri_cnt; i++) { 4025 list_remove_head(&io_sgl_list, lpfc_ncmd, 4026 struct lpfc_io_buf, list); 4027 if (lpfc_ncmd) { 4028 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4029 lpfc_ncmd->data, 4030 lpfc_ncmd->dma_handle); 4031 kfree(lpfc_ncmd); 4032 } 4033 } 4034 phba->sli4_hba.io_xri_cnt -= io_xri_cnt; 4035 } 4036 4037 /* update xris associated to remaining allocated nvme buffers */ 4038 lpfc_ncmd = NULL; 4039 lpfc_ncmd_next = NULL; 4040 phba->sli4_hba.io_xri_cnt = cnt; 4041 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 4042 &io_sgl_list, list) { 4043 lxri = lpfc_sli4_next_xritag(phba); 4044 if (lxri == NO_XRI) { 4045 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4046 "6075 Failed to allocate xri for " 4047 "nvme buffer\n"); 4048 rc = -ENOMEM; 4049 goto out_free_mem; 4050 } 4051 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; 4052 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4053 } 4054 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list); 4055 return 0; 4056 4057 out_free_mem: 4058 lpfc_io_free(phba); 4059 return rc; 4060 } 4061 4062 /** 4063 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec 4064 * @vport: The virtual port for which this call being executed. 4065 * @num_to_allocate: The requested number of buffers to allocate. 4066 * 4067 * This routine allocates nvme buffers for device with SLI-4 interface spec, 4068 * the nvme buffer contains all the necessary information needed to initiate 4069 * an I/O. After allocating up to @num_to_allocate IO buffers and put 4070 * them on a list, it post them to the port by using SGL block post. 4071 * 4072 * Return codes: 4073 * int - number of IO buffers that were allocated and posted. 4074 * 0 = failure, less than num_to_alloc is a partial failure. 4075 **/ 4076 int 4077 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc) 4078 { 4079 struct lpfc_io_buf *lpfc_ncmd; 4080 struct lpfc_iocbq *pwqeq; 4081 uint16_t iotag, lxri = 0; 4082 int bcnt, num_posted; 4083 LIST_HEAD(prep_nblist); 4084 LIST_HEAD(post_nblist); 4085 LIST_HEAD(nvme_nblist); 4086 4087 phba->sli4_hba.io_xri_cnt = 0; 4088 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 4089 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL); 4090 if (!lpfc_ncmd) 4091 break; 4092 /* 4093 * Get memory from the pci pool to map the virt space to 4094 * pci bus space for an I/O. The DMA buffer includes the 4095 * number of SGE's necessary to support the sg_tablesize. 4096 */ 4097 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, 4098 GFP_KERNEL, 4099 &lpfc_ncmd->dma_handle); 4100 if (!lpfc_ncmd->data) { 4101 kfree(lpfc_ncmd); 4102 break; 4103 } 4104 4105 if (phba->cfg_xpsgl && !phba->nvmet_support) { 4106 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list); 4107 } else { 4108 /* 4109 * 4K Page alignment is CRITICAL to BlockGuard, double 4110 * check to be sure. 4111 */ 4112 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 4113 (((unsigned long)(lpfc_ncmd->data) & 4114 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { 4115 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 4116 "3369 Memory alignment err: " 4117 "addr=%lx\n", 4118 (unsigned long)lpfc_ncmd->data); 4119 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4120 lpfc_ncmd->data, 4121 lpfc_ncmd->dma_handle); 4122 kfree(lpfc_ncmd); 4123 break; 4124 } 4125 } 4126 4127 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list); 4128 4129 lxri = lpfc_sli4_next_xritag(phba); 4130 if (lxri == NO_XRI) { 4131 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4132 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4133 kfree(lpfc_ncmd); 4134 break; 4135 } 4136 pwqeq = &lpfc_ncmd->cur_iocbq; 4137 4138 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */ 4139 iotag = lpfc_sli_next_iotag(phba, pwqeq); 4140 if (iotag == 0) { 4141 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4142 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4143 kfree(lpfc_ncmd); 4144 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 4145 "6121 Failed to allocate IOTAG for" 4146 " XRI:0x%x\n", lxri); 4147 lpfc_sli4_free_xri(phba, lxri); 4148 break; 4149 } 4150 pwqeq->sli4_lxritag = lxri; 4151 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4152 pwqeq->context1 = lpfc_ncmd; 4153 4154 /* Initialize local short-hand pointers. */ 4155 lpfc_ncmd->dma_sgl = lpfc_ncmd->data; 4156 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle; 4157 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd; 4158 spin_lock_init(&lpfc_ncmd->buf_lock); 4159 4160 /* add the nvme buffer to a post list */ 4161 list_add_tail(&lpfc_ncmd->list, &post_nblist); 4162 phba->sli4_hba.io_xri_cnt++; 4163 } 4164 lpfc_printf_log(phba, KERN_INFO, LOG_NVME, 4165 "6114 Allocate %d out of %d requested new NVME " 4166 "buffers\n", bcnt, num_to_alloc); 4167 4168 /* post the list of nvme buffer sgls to port if available */ 4169 if (!list_empty(&post_nblist)) 4170 num_posted = lpfc_sli4_post_io_sgl_list( 4171 phba, &post_nblist, bcnt); 4172 else 4173 num_posted = 0; 4174 4175 return num_posted; 4176 } 4177 4178 static uint64_t 4179 lpfc_get_wwpn(struct lpfc_hba *phba) 4180 { 4181 uint64_t wwn; 4182 int rc; 4183 LPFC_MBOXQ_t *mboxq; 4184 MAILBOX_t *mb; 4185 4186 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 4187 GFP_KERNEL); 4188 if (!mboxq) 4189 return (uint64_t)-1; 4190 4191 /* First get WWN of HBA instance */ 4192 lpfc_read_nv(phba, mboxq); 4193 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4194 if (rc != MBX_SUCCESS) { 4195 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4196 "6019 Mailbox failed , mbxCmd x%x " 4197 "READ_NV, mbxStatus x%x\n", 4198 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 4199 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 4200 mempool_free(mboxq, phba->mbox_mem_pool); 4201 return (uint64_t) -1; 4202 } 4203 mb = &mboxq->u.mb; 4204 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t)); 4205 /* wwn is WWPN of HBA instance */ 4206 mempool_free(mboxq, phba->mbox_mem_pool); 4207 if (phba->sli_rev == LPFC_SLI_REV4) 4208 return be64_to_cpu(wwn); 4209 else 4210 return rol64(wwn, 32); 4211 } 4212 4213 /** 4214 * lpfc_create_port - Create an FC port 4215 * @phba: pointer to lpfc hba data structure. 4216 * @instance: a unique integer ID to this FC port. 4217 * @dev: pointer to the device data structure. 4218 * 4219 * This routine creates a FC port for the upper layer protocol. The FC port 4220 * can be created on top of either a physical port or a virtual port provided 4221 * by the HBA. This routine also allocates a SCSI host data structure (shost) 4222 * and associates the FC port created before adding the shost into the SCSI 4223 * layer. 4224 * 4225 * Return codes 4226 * @vport - pointer to the virtual N_Port data structure. 4227 * NULL - port create failed. 4228 **/ 4229 struct lpfc_vport * 4230 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 4231 { 4232 struct lpfc_vport *vport; 4233 struct Scsi_Host *shost = NULL; 4234 struct scsi_host_template *template; 4235 int error = 0; 4236 int i; 4237 uint64_t wwn; 4238 bool use_no_reset_hba = false; 4239 int rc; 4240 4241 if (lpfc_no_hba_reset_cnt) { 4242 if (phba->sli_rev < LPFC_SLI_REV4 && 4243 dev == &phba->pcidev->dev) { 4244 /* Reset the port first */ 4245 lpfc_sli_brdrestart(phba); 4246 rc = lpfc_sli_chipset_init(phba); 4247 if (rc) 4248 return NULL; 4249 } 4250 wwn = lpfc_get_wwpn(phba); 4251 } 4252 4253 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) { 4254 if (wwn == lpfc_no_hba_reset[i]) { 4255 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4256 "6020 Setting use_no_reset port=%llx\n", 4257 wwn); 4258 use_no_reset_hba = true; 4259 break; 4260 } 4261 } 4262 4263 /* Seed template for SCSI host registration */ 4264 if (dev == &phba->pcidev->dev) { 4265 template = &phba->port_template; 4266 4267 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 4268 /* Seed physical port template */ 4269 memcpy(template, &lpfc_template, sizeof(*template)); 4270 4271 if (use_no_reset_hba) { 4272 /* template is for a no reset SCSI Host */ 4273 template->max_sectors = 0xffff; 4274 template->eh_host_reset_handler = NULL; 4275 } 4276 4277 /* Template for all vports this physical port creates */ 4278 memcpy(&phba->vport_template, &lpfc_template, 4279 sizeof(*template)); 4280 phba->vport_template.max_sectors = 0xffff; 4281 phba->vport_template.shost_attrs = lpfc_vport_attrs; 4282 phba->vport_template.eh_bus_reset_handler = NULL; 4283 phba->vport_template.eh_host_reset_handler = NULL; 4284 phba->vport_template.vendor_id = 0; 4285 4286 /* Initialize the host templates with updated value */ 4287 if (phba->sli_rev == LPFC_SLI_REV4) { 4288 template->sg_tablesize = phba->cfg_scsi_seg_cnt; 4289 phba->vport_template.sg_tablesize = 4290 phba->cfg_scsi_seg_cnt; 4291 } else { 4292 template->sg_tablesize = phba->cfg_sg_seg_cnt; 4293 phba->vport_template.sg_tablesize = 4294 phba->cfg_sg_seg_cnt; 4295 } 4296 4297 } else { 4298 /* NVMET is for physical port only */ 4299 memcpy(template, &lpfc_template_nvme, 4300 sizeof(*template)); 4301 } 4302 } else { 4303 template = &phba->vport_template; 4304 } 4305 4306 shost = scsi_host_alloc(template, sizeof(struct lpfc_vport)); 4307 if (!shost) 4308 goto out; 4309 4310 vport = (struct lpfc_vport *) shost->hostdata; 4311 vport->phba = phba; 4312 vport->load_flag |= FC_LOADING; 4313 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 4314 vport->fc_rscn_flush = 0; 4315 lpfc_get_vport_cfgparam(vport); 4316 4317 /* Adjust value in vport */ 4318 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type; 4319 4320 shost->unique_id = instance; 4321 shost->max_id = LPFC_MAX_TARGET; 4322 shost->max_lun = vport->cfg_max_luns; 4323 shost->this_id = -1; 4324 shost->max_cmd_len = 16; 4325 4326 if (phba->sli_rev == LPFC_SLI_REV4) { 4327 if (!phba->cfg_fcp_mq_threshold || 4328 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue) 4329 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue; 4330 4331 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(), 4332 phba->cfg_fcp_mq_threshold); 4333 4334 shost->dma_boundary = 4335 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 4336 4337 if (phba->cfg_xpsgl && !phba->nvmet_support) 4338 shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE; 4339 else 4340 shost->sg_tablesize = phba->cfg_scsi_seg_cnt; 4341 } else 4342 /* SLI-3 has a limited number of hardware queues (3), 4343 * thus there is only one for FCP processing. 4344 */ 4345 shost->nr_hw_queues = 1; 4346 4347 /* 4348 * Set initial can_queue value since 0 is no longer supported and 4349 * scsi_add_host will fail. This will be adjusted later based on the 4350 * max xri value determined in hba setup. 4351 */ 4352 shost->can_queue = phba->cfg_hba_queue_depth - 10; 4353 if (dev != &phba->pcidev->dev) { 4354 shost->transportt = lpfc_vport_transport_template; 4355 vport->port_type = LPFC_NPIV_PORT; 4356 } else { 4357 shost->transportt = lpfc_transport_template; 4358 vport->port_type = LPFC_PHYSICAL_PORT; 4359 } 4360 4361 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 4362 "9081 CreatePort TMPLATE type %x TBLsize %d " 4363 "SEGcnt %d/%d\n", 4364 vport->port_type, shost->sg_tablesize, 4365 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt); 4366 4367 /* Initialize all internally managed lists. */ 4368 INIT_LIST_HEAD(&vport->fc_nodes); 4369 INIT_LIST_HEAD(&vport->rcv_buffer_list); 4370 spin_lock_init(&vport->work_port_lock); 4371 4372 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0); 4373 4374 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0); 4375 4376 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0); 4377 4378 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 4379 lpfc_setup_bg(phba, shost); 4380 4381 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 4382 if (error) 4383 goto out_put_shost; 4384 4385 spin_lock_irq(&phba->port_list_lock); 4386 list_add_tail(&vport->listentry, &phba->port_list); 4387 spin_unlock_irq(&phba->port_list_lock); 4388 return vport; 4389 4390 out_put_shost: 4391 scsi_host_put(shost); 4392 out: 4393 return NULL; 4394 } 4395 4396 /** 4397 * destroy_port - destroy an FC port 4398 * @vport: pointer to an lpfc virtual N_Port data structure. 4399 * 4400 * This routine destroys a FC port from the upper layer protocol. All the 4401 * resources associated with the port are released. 4402 **/ 4403 void 4404 destroy_port(struct lpfc_vport *vport) 4405 { 4406 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4407 struct lpfc_hba *phba = vport->phba; 4408 4409 lpfc_debugfs_terminate(vport); 4410 fc_remove_host(shost); 4411 scsi_remove_host(shost); 4412 4413 spin_lock_irq(&phba->port_list_lock); 4414 list_del_init(&vport->listentry); 4415 spin_unlock_irq(&phba->port_list_lock); 4416 4417 lpfc_cleanup(vport); 4418 return; 4419 } 4420 4421 /** 4422 * lpfc_get_instance - Get a unique integer ID 4423 * 4424 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 4425 * uses the kernel idr facility to perform the task. 4426 * 4427 * Return codes: 4428 * instance - a unique integer ID allocated as the new instance. 4429 * -1 - lpfc get instance failed. 4430 **/ 4431 int 4432 lpfc_get_instance(void) 4433 { 4434 int ret; 4435 4436 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL); 4437 return ret < 0 ? -1 : ret; 4438 } 4439 4440 /** 4441 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 4442 * @shost: pointer to SCSI host data structure. 4443 * @time: elapsed time of the scan in jiffies. 4444 * 4445 * This routine is called by the SCSI layer with a SCSI host to determine 4446 * whether the scan host is finished. 4447 * 4448 * Note: there is no scan_start function as adapter initialization will have 4449 * asynchronously kicked off the link initialization. 4450 * 4451 * Return codes 4452 * 0 - SCSI host scan is not over yet. 4453 * 1 - SCSI host scan is over. 4454 **/ 4455 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 4456 { 4457 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4458 struct lpfc_hba *phba = vport->phba; 4459 int stat = 0; 4460 4461 spin_lock_irq(shost->host_lock); 4462 4463 if (vport->load_flag & FC_UNLOADING) { 4464 stat = 1; 4465 goto finished; 4466 } 4467 if (time >= msecs_to_jiffies(30 * 1000)) { 4468 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4469 "0461 Scanning longer than 30 " 4470 "seconds. Continuing initialization\n"); 4471 stat = 1; 4472 goto finished; 4473 } 4474 if (time >= msecs_to_jiffies(15 * 1000) && 4475 phba->link_state <= LPFC_LINK_DOWN) { 4476 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4477 "0465 Link down longer than 15 " 4478 "seconds. Continuing initialization\n"); 4479 stat = 1; 4480 goto finished; 4481 } 4482 4483 if (vport->port_state != LPFC_VPORT_READY) 4484 goto finished; 4485 if (vport->num_disc_nodes || vport->fc_prli_sent) 4486 goto finished; 4487 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) 4488 goto finished; 4489 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 4490 goto finished; 4491 4492 stat = 1; 4493 4494 finished: 4495 spin_unlock_irq(shost->host_lock); 4496 return stat; 4497 } 4498 4499 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost) 4500 { 4501 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4502 struct lpfc_hba *phba = vport->phba; 4503 4504 fc_host_supported_speeds(shost) = 0; 4505 if (phba->lmt & LMT_128Gb) 4506 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT; 4507 if (phba->lmt & LMT_64Gb) 4508 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT; 4509 if (phba->lmt & LMT_32Gb) 4510 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT; 4511 if (phba->lmt & LMT_16Gb) 4512 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; 4513 if (phba->lmt & LMT_10Gb) 4514 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 4515 if (phba->lmt & LMT_8Gb) 4516 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 4517 if (phba->lmt & LMT_4Gb) 4518 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 4519 if (phba->lmt & LMT_2Gb) 4520 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 4521 if (phba->lmt & LMT_1Gb) 4522 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 4523 } 4524 4525 /** 4526 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 4527 * @shost: pointer to SCSI host data structure. 4528 * 4529 * This routine initializes a given SCSI host attributes on a FC port. The 4530 * SCSI host can be either on top of a physical port or a virtual port. 4531 **/ 4532 void lpfc_host_attrib_init(struct Scsi_Host *shost) 4533 { 4534 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4535 struct lpfc_hba *phba = vport->phba; 4536 /* 4537 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 4538 */ 4539 4540 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 4541 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 4542 fc_host_supported_classes(shost) = FC_COS_CLASS3; 4543 4544 memset(fc_host_supported_fc4s(shost), 0, 4545 sizeof(fc_host_supported_fc4s(shost))); 4546 fc_host_supported_fc4s(shost)[2] = 1; 4547 fc_host_supported_fc4s(shost)[7] = 1; 4548 4549 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 4550 sizeof fc_host_symbolic_name(shost)); 4551 4552 lpfc_host_supported_speeds_set(shost); 4553 4554 fc_host_maxframe_size(shost) = 4555 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 4556 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 4557 4558 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 4559 4560 /* This value is also unchanging */ 4561 memset(fc_host_active_fc4s(shost), 0, 4562 sizeof(fc_host_active_fc4s(shost))); 4563 fc_host_active_fc4s(shost)[2] = 1; 4564 fc_host_active_fc4s(shost)[7] = 1; 4565 4566 fc_host_max_npiv_vports(shost) = phba->max_vpi; 4567 spin_lock_irq(shost->host_lock); 4568 vport->load_flag &= ~FC_LOADING; 4569 spin_unlock_irq(shost->host_lock); 4570 } 4571 4572 /** 4573 * lpfc_stop_port_s3 - Stop SLI3 device port 4574 * @phba: pointer to lpfc hba data structure. 4575 * 4576 * This routine is invoked to stop an SLI3 device port, it stops the device 4577 * from generating interrupts and stops the device driver's timers for the 4578 * device. 4579 **/ 4580 static void 4581 lpfc_stop_port_s3(struct lpfc_hba *phba) 4582 { 4583 /* Clear all interrupt enable conditions */ 4584 writel(0, phba->HCregaddr); 4585 readl(phba->HCregaddr); /* flush */ 4586 /* Clear all pending interrupts */ 4587 writel(0xffffffff, phba->HAregaddr); 4588 readl(phba->HAregaddr); /* flush */ 4589 4590 /* Reset some HBA SLI setup states */ 4591 lpfc_stop_hba_timers(phba); 4592 phba->pport->work_port_events = 0; 4593 } 4594 4595 /** 4596 * lpfc_stop_port_s4 - Stop SLI4 device port 4597 * @phba: pointer to lpfc hba data structure. 4598 * 4599 * This routine is invoked to stop an SLI4 device port, it stops the device 4600 * from generating interrupts and stops the device driver's timers for the 4601 * device. 4602 **/ 4603 static void 4604 lpfc_stop_port_s4(struct lpfc_hba *phba) 4605 { 4606 /* Reset some HBA SLI4 setup states */ 4607 lpfc_stop_hba_timers(phba); 4608 if (phba->pport) 4609 phba->pport->work_port_events = 0; 4610 phba->sli4_hba.intr_enable = 0; 4611 } 4612 4613 /** 4614 * lpfc_stop_port - Wrapper function for stopping hba port 4615 * @phba: Pointer to HBA context object. 4616 * 4617 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 4618 * the API jump table function pointer from the lpfc_hba struct. 4619 **/ 4620 void 4621 lpfc_stop_port(struct lpfc_hba *phba) 4622 { 4623 phba->lpfc_stop_port(phba); 4624 4625 if (phba->wq) 4626 flush_workqueue(phba->wq); 4627 } 4628 4629 /** 4630 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 4631 * @phba: Pointer to hba for which this call is being executed. 4632 * 4633 * This routine starts the timer waiting for the FCF rediscovery to complete. 4634 **/ 4635 void 4636 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 4637 { 4638 unsigned long fcf_redisc_wait_tmo = 4639 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 4640 /* Start fcf rediscovery wait period timer */ 4641 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 4642 spin_lock_irq(&phba->hbalock); 4643 /* Allow action to new fcf asynchronous event */ 4644 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 4645 /* Mark the FCF rediscovery pending state */ 4646 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 4647 spin_unlock_irq(&phba->hbalock); 4648 } 4649 4650 /** 4651 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 4652 * @ptr: Map to lpfc_hba data structure pointer. 4653 * 4654 * This routine is invoked when waiting for FCF table rediscover has been 4655 * timed out. If new FCF record(s) has (have) been discovered during the 4656 * wait period, a new FCF event shall be added to the FCOE async event 4657 * list, and then worker thread shall be waked up for processing from the 4658 * worker thread context. 4659 **/ 4660 static void 4661 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t) 4662 { 4663 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait); 4664 4665 /* Don't send FCF rediscovery event if timer cancelled */ 4666 spin_lock_irq(&phba->hbalock); 4667 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 4668 spin_unlock_irq(&phba->hbalock); 4669 return; 4670 } 4671 /* Clear FCF rediscovery timer pending flag */ 4672 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 4673 /* FCF rediscovery event to worker thread */ 4674 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 4675 spin_unlock_irq(&phba->hbalock); 4676 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 4677 "2776 FCF rediscover quiescent timer expired\n"); 4678 /* wake up worker thread */ 4679 lpfc_worker_wake_up(phba); 4680 } 4681 4682 /** 4683 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 4684 * @phba: pointer to lpfc hba data structure. 4685 * @acqe_link: pointer to the async link completion queue entry. 4686 * 4687 * This routine is to parse the SLI4 link-attention link fault code. 4688 **/ 4689 static void 4690 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 4691 struct lpfc_acqe_link *acqe_link) 4692 { 4693 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 4694 case LPFC_ASYNC_LINK_FAULT_NONE: 4695 case LPFC_ASYNC_LINK_FAULT_LOCAL: 4696 case LPFC_ASYNC_LINK_FAULT_REMOTE: 4697 case LPFC_ASYNC_LINK_FAULT_LR_LRR: 4698 break; 4699 default: 4700 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4701 "0398 Unknown link fault code: x%x\n", 4702 bf_get(lpfc_acqe_link_fault, acqe_link)); 4703 break; 4704 } 4705 } 4706 4707 /** 4708 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 4709 * @phba: pointer to lpfc hba data structure. 4710 * @acqe_link: pointer to the async link completion queue entry. 4711 * 4712 * This routine is to parse the SLI4 link attention type and translate it 4713 * into the base driver's link attention type coding. 4714 * 4715 * Return: Link attention type in terms of base driver's coding. 4716 **/ 4717 static uint8_t 4718 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 4719 struct lpfc_acqe_link *acqe_link) 4720 { 4721 uint8_t att_type; 4722 4723 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 4724 case LPFC_ASYNC_LINK_STATUS_DOWN: 4725 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 4726 att_type = LPFC_ATT_LINK_DOWN; 4727 break; 4728 case LPFC_ASYNC_LINK_STATUS_UP: 4729 /* Ignore physical link up events - wait for logical link up */ 4730 att_type = LPFC_ATT_RESERVED; 4731 break; 4732 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 4733 att_type = LPFC_ATT_LINK_UP; 4734 break; 4735 default: 4736 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4737 "0399 Invalid link attention type: x%x\n", 4738 bf_get(lpfc_acqe_link_status, acqe_link)); 4739 att_type = LPFC_ATT_RESERVED; 4740 break; 4741 } 4742 return att_type; 4743 } 4744 4745 /** 4746 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed 4747 * @phba: pointer to lpfc hba data structure. 4748 * 4749 * This routine is to get an SLI3 FC port's link speed in Mbps. 4750 * 4751 * Return: link speed in terms of Mbps. 4752 **/ 4753 uint32_t 4754 lpfc_sli_port_speed_get(struct lpfc_hba *phba) 4755 { 4756 uint32_t link_speed; 4757 4758 if (!lpfc_is_link_up(phba)) 4759 return 0; 4760 4761 if (phba->sli_rev <= LPFC_SLI_REV3) { 4762 switch (phba->fc_linkspeed) { 4763 case LPFC_LINK_SPEED_1GHZ: 4764 link_speed = 1000; 4765 break; 4766 case LPFC_LINK_SPEED_2GHZ: 4767 link_speed = 2000; 4768 break; 4769 case LPFC_LINK_SPEED_4GHZ: 4770 link_speed = 4000; 4771 break; 4772 case LPFC_LINK_SPEED_8GHZ: 4773 link_speed = 8000; 4774 break; 4775 case LPFC_LINK_SPEED_10GHZ: 4776 link_speed = 10000; 4777 break; 4778 case LPFC_LINK_SPEED_16GHZ: 4779 link_speed = 16000; 4780 break; 4781 default: 4782 link_speed = 0; 4783 } 4784 } else { 4785 if (phba->sli4_hba.link_state.logical_speed) 4786 link_speed = 4787 phba->sli4_hba.link_state.logical_speed; 4788 else 4789 link_speed = phba->sli4_hba.link_state.speed; 4790 } 4791 return link_speed; 4792 } 4793 4794 /** 4795 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed 4796 * @phba: pointer to lpfc hba data structure. 4797 * @evt_code: asynchronous event code. 4798 * @speed_code: asynchronous event link speed code. 4799 * 4800 * This routine is to parse the giving SLI4 async event link speed code into 4801 * value of Mbps for the link speed. 4802 * 4803 * Return: link speed in terms of Mbps. 4804 **/ 4805 static uint32_t 4806 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, 4807 uint8_t speed_code) 4808 { 4809 uint32_t port_speed; 4810 4811 switch (evt_code) { 4812 case LPFC_TRAILER_CODE_LINK: 4813 switch (speed_code) { 4814 case LPFC_ASYNC_LINK_SPEED_ZERO: 4815 port_speed = 0; 4816 break; 4817 case LPFC_ASYNC_LINK_SPEED_10MBPS: 4818 port_speed = 10; 4819 break; 4820 case LPFC_ASYNC_LINK_SPEED_100MBPS: 4821 port_speed = 100; 4822 break; 4823 case LPFC_ASYNC_LINK_SPEED_1GBPS: 4824 port_speed = 1000; 4825 break; 4826 case LPFC_ASYNC_LINK_SPEED_10GBPS: 4827 port_speed = 10000; 4828 break; 4829 case LPFC_ASYNC_LINK_SPEED_20GBPS: 4830 port_speed = 20000; 4831 break; 4832 case LPFC_ASYNC_LINK_SPEED_25GBPS: 4833 port_speed = 25000; 4834 break; 4835 case LPFC_ASYNC_LINK_SPEED_40GBPS: 4836 port_speed = 40000; 4837 break; 4838 default: 4839 port_speed = 0; 4840 } 4841 break; 4842 case LPFC_TRAILER_CODE_FC: 4843 switch (speed_code) { 4844 case LPFC_FC_LA_SPEED_UNKNOWN: 4845 port_speed = 0; 4846 break; 4847 case LPFC_FC_LA_SPEED_1G: 4848 port_speed = 1000; 4849 break; 4850 case LPFC_FC_LA_SPEED_2G: 4851 port_speed = 2000; 4852 break; 4853 case LPFC_FC_LA_SPEED_4G: 4854 port_speed = 4000; 4855 break; 4856 case LPFC_FC_LA_SPEED_8G: 4857 port_speed = 8000; 4858 break; 4859 case LPFC_FC_LA_SPEED_10G: 4860 port_speed = 10000; 4861 break; 4862 case LPFC_FC_LA_SPEED_16G: 4863 port_speed = 16000; 4864 break; 4865 case LPFC_FC_LA_SPEED_32G: 4866 port_speed = 32000; 4867 break; 4868 case LPFC_FC_LA_SPEED_64G: 4869 port_speed = 64000; 4870 break; 4871 case LPFC_FC_LA_SPEED_128G: 4872 port_speed = 128000; 4873 break; 4874 default: 4875 port_speed = 0; 4876 } 4877 break; 4878 default: 4879 port_speed = 0; 4880 } 4881 return port_speed; 4882 } 4883 4884 /** 4885 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 4886 * @phba: pointer to lpfc hba data structure. 4887 * @acqe_link: pointer to the async link completion queue entry. 4888 * 4889 * This routine is to handle the SLI4 asynchronous FCoE link event. 4890 **/ 4891 static void 4892 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 4893 struct lpfc_acqe_link *acqe_link) 4894 { 4895 struct lpfc_dmabuf *mp; 4896 LPFC_MBOXQ_t *pmb; 4897 MAILBOX_t *mb; 4898 struct lpfc_mbx_read_top *la; 4899 uint8_t att_type; 4900 int rc; 4901 4902 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 4903 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 4904 return; 4905 phba->fcoe_eventtag = acqe_link->event_tag; 4906 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4907 if (!pmb) { 4908 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4909 "0395 The mboxq allocation failed\n"); 4910 return; 4911 } 4912 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4913 if (!mp) { 4914 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4915 "0396 The lpfc_dmabuf allocation failed\n"); 4916 goto out_free_pmb; 4917 } 4918 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 4919 if (!mp->virt) { 4920 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4921 "0397 The mbuf allocation failed\n"); 4922 goto out_free_dmabuf; 4923 } 4924 4925 /* Cleanup any outstanding ELS commands */ 4926 lpfc_els_flush_all_cmd(phba); 4927 4928 /* Block ELS IOCBs until we have done process link event */ 4929 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 4930 4931 /* Update link event statistics */ 4932 phba->sli.slistat.link_event++; 4933 4934 /* Create lpfc_handle_latt mailbox command from link ACQE */ 4935 lpfc_read_topology(phba, pmb, mp); 4936 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 4937 pmb->vport = phba->pport; 4938 4939 /* Keep the link status for extra SLI4 state machine reference */ 4940 phba->sli4_hba.link_state.speed = 4941 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK, 4942 bf_get(lpfc_acqe_link_speed, acqe_link)); 4943 phba->sli4_hba.link_state.duplex = 4944 bf_get(lpfc_acqe_link_duplex, acqe_link); 4945 phba->sli4_hba.link_state.status = 4946 bf_get(lpfc_acqe_link_status, acqe_link); 4947 phba->sli4_hba.link_state.type = 4948 bf_get(lpfc_acqe_link_type, acqe_link); 4949 phba->sli4_hba.link_state.number = 4950 bf_get(lpfc_acqe_link_number, acqe_link); 4951 phba->sli4_hba.link_state.fault = 4952 bf_get(lpfc_acqe_link_fault, acqe_link); 4953 phba->sli4_hba.link_state.logical_speed = 4954 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10; 4955 4956 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4957 "2900 Async FC/FCoE Link event - Speed:%dGBit " 4958 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 4959 "Logical speed:%dMbps Fault:%d\n", 4960 phba->sli4_hba.link_state.speed, 4961 phba->sli4_hba.link_state.topology, 4962 phba->sli4_hba.link_state.status, 4963 phba->sli4_hba.link_state.type, 4964 phba->sli4_hba.link_state.number, 4965 phba->sli4_hba.link_state.logical_speed, 4966 phba->sli4_hba.link_state.fault); 4967 /* 4968 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 4969 * topology info. Note: Optional for non FC-AL ports. 4970 */ 4971 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 4972 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 4973 if (rc == MBX_NOT_FINISHED) 4974 goto out_free_dmabuf; 4975 return; 4976 } 4977 /* 4978 * For FCoE Mode: fill in all the topology information we need and call 4979 * the READ_TOPOLOGY completion routine to continue without actually 4980 * sending the READ_TOPOLOGY mailbox command to the port. 4981 */ 4982 /* Initialize completion status */ 4983 mb = &pmb->u.mb; 4984 mb->mbxStatus = MBX_SUCCESS; 4985 4986 /* Parse port fault information field */ 4987 lpfc_sli4_parse_latt_fault(phba, acqe_link); 4988 4989 /* Parse and translate link attention fields */ 4990 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 4991 la->eventTag = acqe_link->event_tag; 4992 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 4993 bf_set(lpfc_mbx_read_top_link_spd, la, 4994 (bf_get(lpfc_acqe_link_speed, acqe_link))); 4995 4996 /* Fake the the following irrelvant fields */ 4997 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 4998 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 4999 bf_set(lpfc_mbx_read_top_il, la, 0); 5000 bf_set(lpfc_mbx_read_top_pb, la, 0); 5001 bf_set(lpfc_mbx_read_top_fa, la, 0); 5002 bf_set(lpfc_mbx_read_top_mm, la, 0); 5003 5004 /* Invoke the lpfc_handle_latt mailbox command callback function */ 5005 lpfc_mbx_cmpl_read_topology(phba, pmb); 5006 5007 return; 5008 5009 out_free_dmabuf: 5010 kfree(mp); 5011 out_free_pmb: 5012 mempool_free(pmb, phba->mbox_mem_pool); 5013 } 5014 5015 /** 5016 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read 5017 * topology. 5018 * @phba: pointer to lpfc hba data structure. 5019 * @evt_code: asynchronous event code. 5020 * @speed_code: asynchronous event link speed code. 5021 * 5022 * This routine is to parse the giving SLI4 async event link speed code into 5023 * value of Read topology link speed. 5024 * 5025 * Return: link speed in terms of Read topology. 5026 **/ 5027 static uint8_t 5028 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code) 5029 { 5030 uint8_t port_speed; 5031 5032 switch (speed_code) { 5033 case LPFC_FC_LA_SPEED_1G: 5034 port_speed = LPFC_LINK_SPEED_1GHZ; 5035 break; 5036 case LPFC_FC_LA_SPEED_2G: 5037 port_speed = LPFC_LINK_SPEED_2GHZ; 5038 break; 5039 case LPFC_FC_LA_SPEED_4G: 5040 port_speed = LPFC_LINK_SPEED_4GHZ; 5041 break; 5042 case LPFC_FC_LA_SPEED_8G: 5043 port_speed = LPFC_LINK_SPEED_8GHZ; 5044 break; 5045 case LPFC_FC_LA_SPEED_16G: 5046 port_speed = LPFC_LINK_SPEED_16GHZ; 5047 break; 5048 case LPFC_FC_LA_SPEED_32G: 5049 port_speed = LPFC_LINK_SPEED_32GHZ; 5050 break; 5051 case LPFC_FC_LA_SPEED_64G: 5052 port_speed = LPFC_LINK_SPEED_64GHZ; 5053 break; 5054 case LPFC_FC_LA_SPEED_128G: 5055 port_speed = LPFC_LINK_SPEED_128GHZ; 5056 break; 5057 case LPFC_FC_LA_SPEED_256G: 5058 port_speed = LPFC_LINK_SPEED_256GHZ; 5059 break; 5060 default: 5061 port_speed = 0; 5062 break; 5063 } 5064 5065 return port_speed; 5066 } 5067 5068 #define trunk_link_status(__idx)\ 5069 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 5070 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\ 5071 "Link up" : "Link down") : "NA" 5072 /* Did port __idx reported an error */ 5073 #define trunk_port_fault(__idx)\ 5074 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 5075 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA" 5076 5077 static void 5078 lpfc_update_trunk_link_status(struct lpfc_hba *phba, 5079 struct lpfc_acqe_fc_la *acqe_fc) 5080 { 5081 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc); 5082 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc); 5083 5084 phba->sli4_hba.link_state.speed = 5085 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 5086 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 5087 5088 phba->sli4_hba.link_state.logical_speed = 5089 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 5090 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */ 5091 phba->fc_linkspeed = 5092 lpfc_async_link_speed_to_read_top( 5093 phba, 5094 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 5095 5096 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) { 5097 phba->trunk_link.link0.state = 5098 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc) 5099 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5100 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0; 5101 } 5102 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) { 5103 phba->trunk_link.link1.state = 5104 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc) 5105 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5106 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0; 5107 } 5108 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) { 5109 phba->trunk_link.link2.state = 5110 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc) 5111 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5112 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0; 5113 } 5114 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) { 5115 phba->trunk_link.link3.state = 5116 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc) 5117 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5118 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0; 5119 } 5120 5121 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5122 "2910 Async FC Trunking Event - Speed:%d\n" 5123 "\tLogical speed:%d " 5124 "port0: %s port1: %s port2: %s port3: %s\n", 5125 phba->sli4_hba.link_state.speed, 5126 phba->sli4_hba.link_state.logical_speed, 5127 trunk_link_status(0), trunk_link_status(1), 5128 trunk_link_status(2), trunk_link_status(3)); 5129 5130 if (port_fault) 5131 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5132 "3202 trunk error:0x%x (%s) seen on port0:%s " 5133 /* 5134 * SLI-4: We have only 0xA error codes 5135 * defined as of now. print an appropriate 5136 * message in case driver needs to be updated. 5137 */ 5138 "port1:%s port2:%s port3:%s\n", err, err > 0xA ? 5139 "UNDEFINED. update driver." : trunk_errmsg[err], 5140 trunk_port_fault(0), trunk_port_fault(1), 5141 trunk_port_fault(2), trunk_port_fault(3)); 5142 } 5143 5144 5145 /** 5146 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 5147 * @phba: pointer to lpfc hba data structure. 5148 * @acqe_fc: pointer to the async fc completion queue entry. 5149 * 5150 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 5151 * that the event was received and then issue a read_topology mailbox command so 5152 * that the rest of the driver will treat it the same as SLI3. 5153 **/ 5154 static void 5155 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 5156 { 5157 struct lpfc_dmabuf *mp; 5158 LPFC_MBOXQ_t *pmb; 5159 MAILBOX_t *mb; 5160 struct lpfc_mbx_read_top *la; 5161 int rc; 5162 5163 if (bf_get(lpfc_trailer_type, acqe_fc) != 5164 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 5165 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5166 "2895 Non FC link Event detected.(%d)\n", 5167 bf_get(lpfc_trailer_type, acqe_fc)); 5168 return; 5169 } 5170 5171 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == 5172 LPFC_FC_LA_TYPE_TRUNKING_EVENT) { 5173 lpfc_update_trunk_link_status(phba, acqe_fc); 5174 return; 5175 } 5176 5177 /* Keep the link status for extra SLI4 state machine reference */ 5178 phba->sli4_hba.link_state.speed = 5179 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 5180 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 5181 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 5182 phba->sli4_hba.link_state.topology = 5183 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 5184 phba->sli4_hba.link_state.status = 5185 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 5186 phba->sli4_hba.link_state.type = 5187 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 5188 phba->sli4_hba.link_state.number = 5189 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 5190 phba->sli4_hba.link_state.fault = 5191 bf_get(lpfc_acqe_link_fault, acqe_fc); 5192 5193 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == 5194 LPFC_FC_LA_TYPE_LINK_DOWN) 5195 phba->sli4_hba.link_state.logical_speed = 0; 5196 else if (!phba->sli4_hba.conf_trunk) 5197 phba->sli4_hba.link_state.logical_speed = 5198 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 5199 5200 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5201 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 5202 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 5203 "%dMbps Fault:%d\n", 5204 phba->sli4_hba.link_state.speed, 5205 phba->sli4_hba.link_state.topology, 5206 phba->sli4_hba.link_state.status, 5207 phba->sli4_hba.link_state.type, 5208 phba->sli4_hba.link_state.number, 5209 phba->sli4_hba.link_state.logical_speed, 5210 phba->sli4_hba.link_state.fault); 5211 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5212 if (!pmb) { 5213 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5214 "2897 The mboxq allocation failed\n"); 5215 return; 5216 } 5217 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5218 if (!mp) { 5219 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5220 "2898 The lpfc_dmabuf allocation failed\n"); 5221 goto out_free_pmb; 5222 } 5223 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 5224 if (!mp->virt) { 5225 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5226 "2899 The mbuf allocation failed\n"); 5227 goto out_free_dmabuf; 5228 } 5229 5230 /* Cleanup any outstanding ELS commands */ 5231 lpfc_els_flush_all_cmd(phba); 5232 5233 /* Block ELS IOCBs until we have done process link event */ 5234 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 5235 5236 /* Update link event statistics */ 5237 phba->sli.slistat.link_event++; 5238 5239 /* Create lpfc_handle_latt mailbox command from link ACQE */ 5240 lpfc_read_topology(phba, pmb, mp); 5241 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 5242 pmb->vport = phba->pport; 5243 5244 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { 5245 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK); 5246 5247 switch (phba->sli4_hba.link_state.status) { 5248 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN: 5249 phba->link_flag |= LS_MDS_LINK_DOWN; 5250 break; 5251 case LPFC_FC_LA_TYPE_MDS_LOOPBACK: 5252 phba->link_flag |= LS_MDS_LOOPBACK; 5253 break; 5254 default: 5255 break; 5256 } 5257 5258 /* Initialize completion status */ 5259 mb = &pmb->u.mb; 5260 mb->mbxStatus = MBX_SUCCESS; 5261 5262 /* Parse port fault information field */ 5263 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc); 5264 5265 /* Parse and translate link attention fields */ 5266 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; 5267 la->eventTag = acqe_fc->event_tag; 5268 5269 if (phba->sli4_hba.link_state.status == 5270 LPFC_FC_LA_TYPE_UNEXP_WWPN) { 5271 bf_set(lpfc_mbx_read_top_att_type, la, 5272 LPFC_FC_LA_TYPE_UNEXP_WWPN); 5273 } else { 5274 bf_set(lpfc_mbx_read_top_att_type, la, 5275 LPFC_FC_LA_TYPE_LINK_DOWN); 5276 } 5277 /* Invoke the mailbox command callback function */ 5278 lpfc_mbx_cmpl_read_topology(phba, pmb); 5279 5280 return; 5281 } 5282 5283 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 5284 if (rc == MBX_NOT_FINISHED) 5285 goto out_free_dmabuf; 5286 return; 5287 5288 out_free_dmabuf: 5289 kfree(mp); 5290 out_free_pmb: 5291 mempool_free(pmb, phba->mbox_mem_pool); 5292 } 5293 5294 /** 5295 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 5296 * @phba: pointer to lpfc hba data structure. 5297 * @acqe_fc: pointer to the async SLI completion queue entry. 5298 * 5299 * This routine is to handle the SLI4 asynchronous SLI events. 5300 **/ 5301 static void 5302 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 5303 { 5304 char port_name; 5305 char message[128]; 5306 uint8_t status; 5307 uint8_t evt_type; 5308 uint8_t operational = 0; 5309 struct temp_event temp_event_data; 5310 struct lpfc_acqe_misconfigured_event *misconfigured; 5311 struct Scsi_Host *shost; 5312 struct lpfc_vport **vports; 5313 int rc, i; 5314 5315 evt_type = bf_get(lpfc_trailer_type, acqe_sli); 5316 5317 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5318 "2901 Async SLI event - Type:%d, Event Data: x%08x " 5319 "x%08x x%08x x%08x\n", evt_type, 5320 acqe_sli->event_data1, acqe_sli->event_data2, 5321 acqe_sli->reserved, acqe_sli->trailer); 5322 5323 port_name = phba->Port[0]; 5324 if (port_name == 0x00) 5325 port_name = '?'; /* get port name is empty */ 5326 5327 switch (evt_type) { 5328 case LPFC_SLI_EVENT_TYPE_OVER_TEMP: 5329 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 5330 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 5331 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 5332 5333 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5334 "3190 Over Temperature:%d Celsius- Port Name %c\n", 5335 acqe_sli->event_data1, port_name); 5336 5337 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 5338 shost = lpfc_shost_from_vport(phba->pport); 5339 fc_host_post_vendor_event(shost, fc_get_event_number(), 5340 sizeof(temp_event_data), 5341 (char *)&temp_event_data, 5342 SCSI_NL_VID_TYPE_PCI 5343 | PCI_VENDOR_ID_EMULEX); 5344 break; 5345 case LPFC_SLI_EVENT_TYPE_NORM_TEMP: 5346 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 5347 temp_event_data.event_code = LPFC_NORMAL_TEMP; 5348 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 5349 5350 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5351 "3191 Normal Temperature:%d Celsius - Port Name %c\n", 5352 acqe_sli->event_data1, port_name); 5353 5354 shost = lpfc_shost_from_vport(phba->pport); 5355 fc_host_post_vendor_event(shost, fc_get_event_number(), 5356 sizeof(temp_event_data), 5357 (char *)&temp_event_data, 5358 SCSI_NL_VID_TYPE_PCI 5359 | PCI_VENDOR_ID_EMULEX); 5360 break; 5361 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED: 5362 misconfigured = (struct lpfc_acqe_misconfigured_event *) 5363 &acqe_sli->event_data1; 5364 5365 /* fetch the status for this port */ 5366 switch (phba->sli4_hba.lnk_info.lnk_no) { 5367 case LPFC_LINK_NUMBER_0: 5368 status = bf_get(lpfc_sli_misconfigured_port0_state, 5369 &misconfigured->theEvent); 5370 operational = bf_get(lpfc_sli_misconfigured_port0_op, 5371 &misconfigured->theEvent); 5372 break; 5373 case LPFC_LINK_NUMBER_1: 5374 status = bf_get(lpfc_sli_misconfigured_port1_state, 5375 &misconfigured->theEvent); 5376 operational = bf_get(lpfc_sli_misconfigured_port1_op, 5377 &misconfigured->theEvent); 5378 break; 5379 case LPFC_LINK_NUMBER_2: 5380 status = bf_get(lpfc_sli_misconfigured_port2_state, 5381 &misconfigured->theEvent); 5382 operational = bf_get(lpfc_sli_misconfigured_port2_op, 5383 &misconfigured->theEvent); 5384 break; 5385 case LPFC_LINK_NUMBER_3: 5386 status = bf_get(lpfc_sli_misconfigured_port3_state, 5387 &misconfigured->theEvent); 5388 operational = bf_get(lpfc_sli_misconfigured_port3_op, 5389 &misconfigured->theEvent); 5390 break; 5391 default: 5392 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5393 "3296 " 5394 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED " 5395 "event: Invalid link %d", 5396 phba->sli4_hba.lnk_info.lnk_no); 5397 return; 5398 } 5399 5400 /* Skip if optic state unchanged */ 5401 if (phba->sli4_hba.lnk_info.optic_state == status) 5402 return; 5403 5404 switch (status) { 5405 case LPFC_SLI_EVENT_STATUS_VALID: 5406 sprintf(message, "Physical Link is functional"); 5407 break; 5408 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: 5409 sprintf(message, "Optics faulted/incorrectly " 5410 "installed/not installed - Reseat optics, " 5411 "if issue not resolved, replace."); 5412 break; 5413 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: 5414 sprintf(message, 5415 "Optics of two types installed - Remove one " 5416 "optic or install matching pair of optics."); 5417 break; 5418 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: 5419 sprintf(message, "Incompatible optics - Replace with " 5420 "compatible optics for card to function."); 5421 break; 5422 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED: 5423 sprintf(message, "Unqualified optics - Replace with " 5424 "Avago optics for Warranty and Technical " 5425 "Support - Link is%s operational", 5426 (operational) ? " not" : ""); 5427 break; 5428 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED: 5429 sprintf(message, "Uncertified optics - Replace with " 5430 "Avago-certified optics to enable link " 5431 "operation - Link is%s operational", 5432 (operational) ? " not" : ""); 5433 break; 5434 default: 5435 /* firmware is reporting a status we don't know about */ 5436 sprintf(message, "Unknown event status x%02x", status); 5437 break; 5438 } 5439 5440 /* Issue READ_CONFIG mbox command to refresh supported speeds */ 5441 rc = lpfc_sli4_read_config(phba); 5442 if (rc) { 5443 phba->lmt = 0; 5444 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5445 "3194 Unable to retrieve supported " 5446 "speeds, rc = 0x%x\n", rc); 5447 } 5448 vports = lpfc_create_vport_work_array(phba); 5449 if (vports != NULL) { 5450 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 5451 i++) { 5452 shost = lpfc_shost_from_vport(vports[i]); 5453 lpfc_host_supported_speeds_set(shost); 5454 } 5455 } 5456 lpfc_destroy_vport_work_array(phba, vports); 5457 5458 phba->sli4_hba.lnk_info.optic_state = status; 5459 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5460 "3176 Port Name %c %s\n", port_name, message); 5461 break; 5462 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT: 5463 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5464 "3192 Remote DPort Test Initiated - " 5465 "Event Data1:x%08x Event Data2: x%08x\n", 5466 acqe_sli->event_data1, acqe_sli->event_data2); 5467 break; 5468 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN: 5469 /* Misconfigured WWN. Reports that the SLI Port is configured 5470 * to use FA-WWN, but the attached device doesn’t support it. 5471 * No driver action is required. 5472 * Event Data1 - N.A, Event Data2 - N.A 5473 */ 5474 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI, 5475 "2699 Misconfigured FA-WWN - Attached device does " 5476 "not support FA-WWN\n"); 5477 break; 5478 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE: 5479 /* EEPROM failure. No driver action is required */ 5480 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5481 "2518 EEPROM failure - " 5482 "Event Data1: x%08x Event Data2: x%08x\n", 5483 acqe_sli->event_data1, acqe_sli->event_data2); 5484 break; 5485 default: 5486 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5487 "3193 Unrecognized SLI event, type: 0x%x", 5488 evt_type); 5489 break; 5490 } 5491 } 5492 5493 /** 5494 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 5495 * @vport: pointer to vport data structure. 5496 * 5497 * This routine is to perform Clear Virtual Link (CVL) on a vport in 5498 * response to a CVL event. 5499 * 5500 * Return the pointer to the ndlp with the vport if successful, otherwise 5501 * return NULL. 5502 **/ 5503 static struct lpfc_nodelist * 5504 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 5505 { 5506 struct lpfc_nodelist *ndlp; 5507 struct Scsi_Host *shost; 5508 struct lpfc_hba *phba; 5509 5510 if (!vport) 5511 return NULL; 5512 phba = vport->phba; 5513 if (!phba) 5514 return NULL; 5515 ndlp = lpfc_findnode_did(vport, Fabric_DID); 5516 if (!ndlp) { 5517 /* Cannot find existing Fabric ndlp, so allocate a new one */ 5518 ndlp = lpfc_nlp_init(vport, Fabric_DID); 5519 if (!ndlp) 5520 return 0; 5521 /* Set the node type */ 5522 ndlp->nlp_type |= NLP_FABRIC; 5523 /* Put ndlp onto node list */ 5524 lpfc_enqueue_node(vport, ndlp); 5525 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 5526 /* re-setup ndlp without removing from node list */ 5527 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 5528 if (!ndlp) 5529 return 0; 5530 } 5531 if ((phba->pport->port_state < LPFC_FLOGI) && 5532 (phba->pport->port_state != LPFC_VPORT_FAILED)) 5533 return NULL; 5534 /* If virtual link is not yet instantiated ignore CVL */ 5535 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 5536 && (vport->port_state != LPFC_VPORT_FAILED)) 5537 return NULL; 5538 shost = lpfc_shost_from_vport(vport); 5539 if (!shost) 5540 return NULL; 5541 lpfc_linkdown_port(vport); 5542 lpfc_cleanup_pending_mbox(vport); 5543 spin_lock_irq(shost->host_lock); 5544 vport->fc_flag |= FC_VPORT_CVL_RCVD; 5545 spin_unlock_irq(shost->host_lock); 5546 5547 return ndlp; 5548 } 5549 5550 /** 5551 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 5552 * @vport: pointer to lpfc hba data structure. 5553 * 5554 * This routine is to perform Clear Virtual Link (CVL) on all vports in 5555 * response to a FCF dead event. 5556 **/ 5557 static void 5558 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 5559 { 5560 struct lpfc_vport **vports; 5561 int i; 5562 5563 vports = lpfc_create_vport_work_array(phba); 5564 if (vports) 5565 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 5566 lpfc_sli4_perform_vport_cvl(vports[i]); 5567 lpfc_destroy_vport_work_array(phba, vports); 5568 } 5569 5570 /** 5571 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 5572 * @phba: pointer to lpfc hba data structure. 5573 * @acqe_link: pointer to the async fcoe completion queue entry. 5574 * 5575 * This routine is to handle the SLI4 asynchronous fcoe event. 5576 **/ 5577 static void 5578 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 5579 struct lpfc_acqe_fip *acqe_fip) 5580 { 5581 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 5582 int rc; 5583 struct lpfc_vport *vport; 5584 struct lpfc_nodelist *ndlp; 5585 struct Scsi_Host *shost; 5586 int active_vlink_present; 5587 struct lpfc_vport **vports; 5588 int i; 5589 5590 phba->fc_eventTag = acqe_fip->event_tag; 5591 phba->fcoe_eventtag = acqe_fip->event_tag; 5592 switch (event_type) { 5593 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 5594 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 5595 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 5596 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5597 LOG_DISCOVERY, 5598 "2546 New FCF event, evt_tag:x%x, " 5599 "index:x%x\n", 5600 acqe_fip->event_tag, 5601 acqe_fip->index); 5602 else 5603 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 5604 LOG_DISCOVERY, 5605 "2788 FCF param modified event, " 5606 "evt_tag:x%x, index:x%x\n", 5607 acqe_fip->event_tag, 5608 acqe_fip->index); 5609 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 5610 /* 5611 * During period of FCF discovery, read the FCF 5612 * table record indexed by the event to update 5613 * FCF roundrobin failover eligible FCF bmask. 5614 */ 5615 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 5616 LOG_DISCOVERY, 5617 "2779 Read FCF (x%x) for updating " 5618 "roundrobin FCF failover bmask\n", 5619 acqe_fip->index); 5620 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 5621 } 5622 5623 /* If the FCF discovery is in progress, do nothing. */ 5624 spin_lock_irq(&phba->hbalock); 5625 if (phba->hba_flag & FCF_TS_INPROG) { 5626 spin_unlock_irq(&phba->hbalock); 5627 break; 5628 } 5629 /* If fast FCF failover rescan event is pending, do nothing */ 5630 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) { 5631 spin_unlock_irq(&phba->hbalock); 5632 break; 5633 } 5634 5635 /* If the FCF has been in discovered state, do nothing. */ 5636 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 5637 spin_unlock_irq(&phba->hbalock); 5638 break; 5639 } 5640 spin_unlock_irq(&phba->hbalock); 5641 5642 /* Otherwise, scan the entire FCF table and re-discover SAN */ 5643 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5644 "2770 Start FCF table scan per async FCF " 5645 "event, evt_tag:x%x, index:x%x\n", 5646 acqe_fip->event_tag, acqe_fip->index); 5647 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 5648 LPFC_FCOE_FCF_GET_FIRST); 5649 if (rc) 5650 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5651 "2547 Issue FCF scan read FCF mailbox " 5652 "command failed (x%x)\n", rc); 5653 break; 5654 5655 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 5656 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5657 "2548 FCF Table full count 0x%x tag 0x%x\n", 5658 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 5659 acqe_fip->event_tag); 5660 break; 5661 5662 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 5663 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 5664 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5665 "2549 FCF (x%x) disconnected from network, " 5666 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); 5667 /* 5668 * If we are in the middle of FCF failover process, clear 5669 * the corresponding FCF bit in the roundrobin bitmap. 5670 */ 5671 spin_lock_irq(&phba->hbalock); 5672 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && 5673 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) { 5674 spin_unlock_irq(&phba->hbalock); 5675 /* Update FLOGI FCF failover eligible FCF bmask */ 5676 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 5677 break; 5678 } 5679 spin_unlock_irq(&phba->hbalock); 5680 5681 /* If the event is not for currently used fcf do nothing */ 5682 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 5683 break; 5684 5685 /* 5686 * Otherwise, request the port to rediscover the entire FCF 5687 * table for a fast recovery from case that the current FCF 5688 * is no longer valid as we are not in the middle of FCF 5689 * failover process already. 5690 */ 5691 spin_lock_irq(&phba->hbalock); 5692 /* Mark the fast failover process in progress */ 5693 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 5694 spin_unlock_irq(&phba->hbalock); 5695 5696 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5697 "2771 Start FCF fast failover process due to " 5698 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 5699 "\n", acqe_fip->event_tag, acqe_fip->index); 5700 rc = lpfc_sli4_redisc_fcf_table(phba); 5701 if (rc) { 5702 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5703 LOG_DISCOVERY, 5704 "2772 Issue FCF rediscover mailbox " 5705 "command failed, fail through to FCF " 5706 "dead event\n"); 5707 spin_lock_irq(&phba->hbalock); 5708 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 5709 spin_unlock_irq(&phba->hbalock); 5710 /* 5711 * Last resort will fail over by treating this 5712 * as a link down to FCF registration. 5713 */ 5714 lpfc_sli4_fcf_dead_failthrough(phba); 5715 } else { 5716 /* Reset FCF roundrobin bmask for new discovery */ 5717 lpfc_sli4_clear_fcf_rr_bmask(phba); 5718 /* 5719 * Handling fast FCF failover to a DEAD FCF event is 5720 * considered equalivant to receiving CVL to all vports. 5721 */ 5722 lpfc_sli4_perform_all_vport_cvl(phba); 5723 } 5724 break; 5725 case LPFC_FIP_EVENT_TYPE_CVL: 5726 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 5727 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5728 "2718 Clear Virtual Link Received for VPI 0x%x" 5729 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 5730 5731 vport = lpfc_find_vport_by_vpid(phba, 5732 acqe_fip->index); 5733 ndlp = lpfc_sli4_perform_vport_cvl(vport); 5734 if (!ndlp) 5735 break; 5736 active_vlink_present = 0; 5737 5738 vports = lpfc_create_vport_work_array(phba); 5739 if (vports) { 5740 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 5741 i++) { 5742 if ((!(vports[i]->fc_flag & 5743 FC_VPORT_CVL_RCVD)) && 5744 (vports[i]->port_state > LPFC_FDISC)) { 5745 active_vlink_present = 1; 5746 break; 5747 } 5748 } 5749 lpfc_destroy_vport_work_array(phba, vports); 5750 } 5751 5752 /* 5753 * Don't re-instantiate if vport is marked for deletion. 5754 * If we are here first then vport_delete is going to wait 5755 * for discovery to complete. 5756 */ 5757 if (!(vport->load_flag & FC_UNLOADING) && 5758 active_vlink_present) { 5759 /* 5760 * If there are other active VLinks present, 5761 * re-instantiate the Vlink using FDISC. 5762 */ 5763 mod_timer(&ndlp->nlp_delayfunc, 5764 jiffies + msecs_to_jiffies(1000)); 5765 shost = lpfc_shost_from_vport(vport); 5766 spin_lock_irq(shost->host_lock); 5767 ndlp->nlp_flag |= NLP_DELAY_TMO; 5768 spin_unlock_irq(shost->host_lock); 5769 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 5770 vport->port_state = LPFC_FDISC; 5771 } else { 5772 /* 5773 * Otherwise, we request port to rediscover 5774 * the entire FCF table for a fast recovery 5775 * from possible case that the current FCF 5776 * is no longer valid if we are not already 5777 * in the FCF failover process. 5778 */ 5779 spin_lock_irq(&phba->hbalock); 5780 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 5781 spin_unlock_irq(&phba->hbalock); 5782 break; 5783 } 5784 /* Mark the fast failover process in progress */ 5785 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 5786 spin_unlock_irq(&phba->hbalock); 5787 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 5788 LOG_DISCOVERY, 5789 "2773 Start FCF failover per CVL, " 5790 "evt_tag:x%x\n", acqe_fip->event_tag); 5791 rc = lpfc_sli4_redisc_fcf_table(phba); 5792 if (rc) { 5793 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5794 LOG_DISCOVERY, 5795 "2774 Issue FCF rediscover " 5796 "mailbox command failed, " 5797 "through to CVL event\n"); 5798 spin_lock_irq(&phba->hbalock); 5799 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 5800 spin_unlock_irq(&phba->hbalock); 5801 /* 5802 * Last resort will be re-try on the 5803 * the current registered FCF entry. 5804 */ 5805 lpfc_retry_pport_discovery(phba); 5806 } else 5807 /* 5808 * Reset FCF roundrobin bmask for new 5809 * discovery. 5810 */ 5811 lpfc_sli4_clear_fcf_rr_bmask(phba); 5812 } 5813 break; 5814 default: 5815 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5816 "0288 Unknown FCoE event type 0x%x event tag " 5817 "0x%x\n", event_type, acqe_fip->event_tag); 5818 break; 5819 } 5820 } 5821 5822 /** 5823 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 5824 * @phba: pointer to lpfc hba data structure. 5825 * @acqe_link: pointer to the async dcbx completion queue entry. 5826 * 5827 * This routine is to handle the SLI4 asynchronous dcbx event. 5828 **/ 5829 static void 5830 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 5831 struct lpfc_acqe_dcbx *acqe_dcbx) 5832 { 5833 phba->fc_eventTag = acqe_dcbx->event_tag; 5834 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5835 "0290 The SLI4 DCBX asynchronous event is not " 5836 "handled yet\n"); 5837 } 5838 5839 /** 5840 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 5841 * @phba: pointer to lpfc hba data structure. 5842 * @acqe_link: pointer to the async grp5 completion queue entry. 5843 * 5844 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 5845 * is an asynchronous notified of a logical link speed change. The Port 5846 * reports the logical link speed in units of 10Mbps. 5847 **/ 5848 static void 5849 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 5850 struct lpfc_acqe_grp5 *acqe_grp5) 5851 { 5852 uint16_t prev_ll_spd; 5853 5854 phba->fc_eventTag = acqe_grp5->event_tag; 5855 phba->fcoe_eventtag = acqe_grp5->event_tag; 5856 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 5857 phba->sli4_hba.link_state.logical_speed = 5858 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10; 5859 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5860 "2789 GRP5 Async Event: Updating logical link speed " 5861 "from %dMbps to %dMbps\n", prev_ll_spd, 5862 phba->sli4_hba.link_state.logical_speed); 5863 } 5864 5865 /** 5866 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 5867 * @phba: pointer to lpfc hba data structure. 5868 * 5869 * This routine is invoked by the worker thread to process all the pending 5870 * SLI4 asynchronous events. 5871 **/ 5872 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 5873 { 5874 struct lpfc_cq_event *cq_event; 5875 5876 /* First, declare the async event has been handled */ 5877 spin_lock_irq(&phba->hbalock); 5878 phba->hba_flag &= ~ASYNC_EVENT; 5879 spin_unlock_irq(&phba->hbalock); 5880 /* Now, handle all the async events */ 5881 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 5882 /* Get the first event from the head of the event queue */ 5883 spin_lock_irq(&phba->hbalock); 5884 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 5885 cq_event, struct lpfc_cq_event, list); 5886 spin_unlock_irq(&phba->hbalock); 5887 /* Process the asynchronous event */ 5888 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 5889 case LPFC_TRAILER_CODE_LINK: 5890 lpfc_sli4_async_link_evt(phba, 5891 &cq_event->cqe.acqe_link); 5892 break; 5893 case LPFC_TRAILER_CODE_FCOE: 5894 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 5895 break; 5896 case LPFC_TRAILER_CODE_DCBX: 5897 lpfc_sli4_async_dcbx_evt(phba, 5898 &cq_event->cqe.acqe_dcbx); 5899 break; 5900 case LPFC_TRAILER_CODE_GRP5: 5901 lpfc_sli4_async_grp5_evt(phba, 5902 &cq_event->cqe.acqe_grp5); 5903 break; 5904 case LPFC_TRAILER_CODE_FC: 5905 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 5906 break; 5907 case LPFC_TRAILER_CODE_SLI: 5908 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 5909 break; 5910 default: 5911 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5912 "1804 Invalid asynchronous event code: " 5913 "x%x\n", bf_get(lpfc_trailer_code, 5914 &cq_event->cqe.mcqe_cmpl)); 5915 break; 5916 } 5917 /* Free the completion event processed to the free pool */ 5918 lpfc_sli4_cq_event_release(phba, cq_event); 5919 } 5920 } 5921 5922 /** 5923 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 5924 * @phba: pointer to lpfc hba data structure. 5925 * 5926 * This routine is invoked by the worker thread to process FCF table 5927 * rediscovery pending completion event. 5928 **/ 5929 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 5930 { 5931 int rc; 5932 5933 spin_lock_irq(&phba->hbalock); 5934 /* Clear FCF rediscovery timeout event */ 5935 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 5936 /* Clear driver fast failover FCF record flag */ 5937 phba->fcf.failover_rec.flag = 0; 5938 /* Set state for FCF fast failover */ 5939 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 5940 spin_unlock_irq(&phba->hbalock); 5941 5942 /* Scan FCF table from the first entry to re-discover SAN */ 5943 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5944 "2777 Start post-quiescent FCF table scan\n"); 5945 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 5946 if (rc) 5947 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5948 "2747 Issue FCF scan read FCF mailbox " 5949 "command failed 0x%x\n", rc); 5950 } 5951 5952 /** 5953 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 5954 * @phba: pointer to lpfc hba data structure. 5955 * @dev_grp: The HBA PCI-Device group number. 5956 * 5957 * This routine is invoked to set up the per HBA PCI-Device group function 5958 * API jump table entries. 5959 * 5960 * Return: 0 if success, otherwise -ENODEV 5961 **/ 5962 int 5963 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 5964 { 5965 int rc; 5966 5967 /* Set up lpfc PCI-device group */ 5968 phba->pci_dev_grp = dev_grp; 5969 5970 /* The LPFC_PCI_DEV_OC uses SLI4 */ 5971 if (dev_grp == LPFC_PCI_DEV_OC) 5972 phba->sli_rev = LPFC_SLI_REV4; 5973 5974 /* Set up device INIT API function jump table */ 5975 rc = lpfc_init_api_table_setup(phba, dev_grp); 5976 if (rc) 5977 return -ENODEV; 5978 /* Set up SCSI API function jump table */ 5979 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 5980 if (rc) 5981 return -ENODEV; 5982 /* Set up SLI API function jump table */ 5983 rc = lpfc_sli_api_table_setup(phba, dev_grp); 5984 if (rc) 5985 return -ENODEV; 5986 /* Set up MBOX API function jump table */ 5987 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 5988 if (rc) 5989 return -ENODEV; 5990 5991 return 0; 5992 } 5993 5994 /** 5995 * lpfc_log_intr_mode - Log the active interrupt mode 5996 * @phba: pointer to lpfc hba data structure. 5997 * @intr_mode: active interrupt mode adopted. 5998 * 5999 * This routine it invoked to log the currently used active interrupt mode 6000 * to the device. 6001 **/ 6002 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 6003 { 6004 switch (intr_mode) { 6005 case 0: 6006 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6007 "0470 Enable INTx interrupt mode.\n"); 6008 break; 6009 case 1: 6010 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6011 "0481 Enabled MSI interrupt mode.\n"); 6012 break; 6013 case 2: 6014 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6015 "0480 Enabled MSI-X interrupt mode.\n"); 6016 break; 6017 default: 6018 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6019 "0482 Illegal interrupt mode.\n"); 6020 break; 6021 } 6022 return; 6023 } 6024 6025 /** 6026 * lpfc_cpumask_of_node_init - initalizes cpumask of phba's NUMA node 6027 * @phba: Pointer to HBA context object. 6028 * 6029 **/ 6030 static void 6031 lpfc_cpumask_of_node_init(struct lpfc_hba *phba) 6032 { 6033 unsigned int cpu, numa_node; 6034 struct cpumask *numa_mask = &phba->sli4_hba.numa_mask; 6035 6036 cpumask_clear(numa_mask); 6037 6038 /* Check if we're a NUMA architecture */ 6039 numa_node = dev_to_node(&phba->pcidev->dev); 6040 if (numa_node == NUMA_NO_NODE) 6041 return; 6042 6043 for_each_possible_cpu(cpu) 6044 if (cpu_to_node(cpu) == numa_node) 6045 cpumask_set_cpu(cpu, numa_mask); 6046 } 6047 6048 /** 6049 * lpfc_enable_pci_dev - Enable a generic PCI device. 6050 * @phba: pointer to lpfc hba data structure. 6051 * 6052 * This routine is invoked to enable the PCI device that is common to all 6053 * PCI devices. 6054 * 6055 * Return codes 6056 * 0 - successful 6057 * other values - error 6058 **/ 6059 static int 6060 lpfc_enable_pci_dev(struct lpfc_hba *phba) 6061 { 6062 struct pci_dev *pdev; 6063 6064 /* Obtain PCI device reference */ 6065 if (!phba->pcidev) 6066 goto out_error; 6067 else 6068 pdev = phba->pcidev; 6069 /* Enable PCI device */ 6070 if (pci_enable_device_mem(pdev)) 6071 goto out_error; 6072 /* Request PCI resource for the device */ 6073 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME)) 6074 goto out_disable_device; 6075 /* Set up device as PCI master and save state for EEH */ 6076 pci_set_master(pdev); 6077 pci_try_set_mwi(pdev); 6078 pci_save_state(pdev); 6079 6080 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 6081 if (pci_is_pcie(pdev)) 6082 pdev->needs_freset = 1; 6083 6084 return 0; 6085 6086 out_disable_device: 6087 pci_disable_device(pdev); 6088 out_error: 6089 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6090 "1401 Failed to enable pci device\n"); 6091 return -ENODEV; 6092 } 6093 6094 /** 6095 * lpfc_disable_pci_dev - Disable a generic PCI device. 6096 * @phba: pointer to lpfc hba data structure. 6097 * 6098 * This routine is invoked to disable the PCI device that is common to all 6099 * PCI devices. 6100 **/ 6101 static void 6102 lpfc_disable_pci_dev(struct lpfc_hba *phba) 6103 { 6104 struct pci_dev *pdev; 6105 6106 /* Obtain PCI device reference */ 6107 if (!phba->pcidev) 6108 return; 6109 else 6110 pdev = phba->pcidev; 6111 /* Release PCI resource and disable PCI device */ 6112 pci_release_mem_regions(pdev); 6113 pci_disable_device(pdev); 6114 6115 return; 6116 } 6117 6118 /** 6119 * lpfc_reset_hba - Reset a hba 6120 * @phba: pointer to lpfc hba data structure. 6121 * 6122 * This routine is invoked to reset a hba device. It brings the HBA 6123 * offline, performs a board restart, and then brings the board back 6124 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 6125 * on outstanding mailbox commands. 6126 **/ 6127 void 6128 lpfc_reset_hba(struct lpfc_hba *phba) 6129 { 6130 /* If resets are disabled then set error state and return. */ 6131 if (!phba->cfg_enable_hba_reset) { 6132 phba->link_state = LPFC_HBA_ERROR; 6133 return; 6134 } 6135 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 6136 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 6137 else 6138 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 6139 lpfc_offline(phba); 6140 lpfc_sli_brdrestart(phba); 6141 lpfc_online(phba); 6142 lpfc_unblock_mgmt_io(phba); 6143 } 6144 6145 /** 6146 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions 6147 * @phba: pointer to lpfc hba data structure. 6148 * 6149 * This function enables the PCI SR-IOV virtual functions to a physical 6150 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 6151 * enable the number of virtual functions to the physical function. As 6152 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 6153 * API call does not considered as an error condition for most of the device. 6154 **/ 6155 uint16_t 6156 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) 6157 { 6158 struct pci_dev *pdev = phba->pcidev; 6159 uint16_t nr_virtfn; 6160 int pos; 6161 6162 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 6163 if (pos == 0) 6164 return 0; 6165 6166 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); 6167 return nr_virtfn; 6168 } 6169 6170 /** 6171 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 6172 * @phba: pointer to lpfc hba data structure. 6173 * @nr_vfn: number of virtual functions to be enabled. 6174 * 6175 * This function enables the PCI SR-IOV virtual functions to a physical 6176 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 6177 * enable the number of virtual functions to the physical function. As 6178 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 6179 * API call does not considered as an error condition for most of the device. 6180 **/ 6181 int 6182 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 6183 { 6184 struct pci_dev *pdev = phba->pcidev; 6185 uint16_t max_nr_vfn; 6186 int rc; 6187 6188 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); 6189 if (nr_vfn > max_nr_vfn) { 6190 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6191 "3057 Requested vfs (%d) greater than " 6192 "supported vfs (%d)", nr_vfn, max_nr_vfn); 6193 return -EINVAL; 6194 } 6195 6196 rc = pci_enable_sriov(pdev, nr_vfn); 6197 if (rc) { 6198 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6199 "2806 Failed to enable sriov on this device " 6200 "with vfn number nr_vf:%d, rc:%d\n", 6201 nr_vfn, rc); 6202 } else 6203 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6204 "2807 Successful enable sriov on this device " 6205 "with vfn number nr_vf:%d\n", nr_vfn); 6206 return rc; 6207 } 6208 6209 /** 6210 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 6211 * @phba: pointer to lpfc hba data structure. 6212 * 6213 * This routine is invoked to set up the driver internal resources before the 6214 * device specific resource setup to support the HBA device it attached to. 6215 * 6216 * Return codes 6217 * 0 - successful 6218 * other values - error 6219 **/ 6220 static int 6221 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 6222 { 6223 struct lpfc_sli *psli = &phba->sli; 6224 6225 /* 6226 * Driver resources common to all SLI revisions 6227 */ 6228 atomic_set(&phba->fast_event_count, 0); 6229 spin_lock_init(&phba->hbalock); 6230 6231 /* Initialize ndlp management spinlock */ 6232 spin_lock_init(&phba->ndlp_lock); 6233 6234 /* Initialize port_list spinlock */ 6235 spin_lock_init(&phba->port_list_lock); 6236 INIT_LIST_HEAD(&phba->port_list); 6237 6238 INIT_LIST_HEAD(&phba->work_list); 6239 init_waitqueue_head(&phba->wait_4_mlo_m_q); 6240 6241 /* Initialize the wait queue head for the kernel thread */ 6242 init_waitqueue_head(&phba->work_waitq); 6243 6244 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6245 "1403 Protocols supported %s %s %s\n", 6246 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ? 6247 "SCSI" : " "), 6248 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ? 6249 "NVME" : " "), 6250 (phba->nvmet_support ? "NVMET" : " ")); 6251 6252 /* Initialize the IO buffer list used by driver for SLI3 SCSI */ 6253 spin_lock_init(&phba->scsi_buf_list_get_lock); 6254 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); 6255 spin_lock_init(&phba->scsi_buf_list_put_lock); 6256 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 6257 6258 /* Initialize the fabric iocb list */ 6259 INIT_LIST_HEAD(&phba->fabric_iocb_list); 6260 6261 /* Initialize list to save ELS buffers */ 6262 INIT_LIST_HEAD(&phba->elsbuf); 6263 6264 /* Initialize FCF connection rec list */ 6265 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 6266 6267 /* Initialize OAS configuration list */ 6268 spin_lock_init(&phba->devicelock); 6269 INIT_LIST_HEAD(&phba->luns); 6270 6271 /* MBOX heartbeat timer */ 6272 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0); 6273 /* Fabric block timer */ 6274 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0); 6275 /* EA polling mode timer */ 6276 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0); 6277 /* Heartbeat timer */ 6278 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0); 6279 6280 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work); 6281 6282 return 0; 6283 } 6284 6285 /** 6286 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev 6287 * @phba: pointer to lpfc hba data structure. 6288 * 6289 * This routine is invoked to set up the driver internal resources specific to 6290 * support the SLI-3 HBA device it attached to. 6291 * 6292 * Return codes 6293 * 0 - successful 6294 * other values - error 6295 **/ 6296 static int 6297 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 6298 { 6299 int rc, entry_sz; 6300 6301 /* 6302 * Initialize timers used by driver 6303 */ 6304 6305 /* FCP polling mode timer */ 6306 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0); 6307 6308 /* Host attention work mask setup */ 6309 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 6310 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 6311 6312 /* Get all the module params for configuring this host */ 6313 lpfc_get_cfgparam(phba); 6314 /* Set up phase-1 common device driver resources */ 6315 6316 rc = lpfc_setup_driver_resource_phase1(phba); 6317 if (rc) 6318 return -ENODEV; 6319 6320 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 6321 phba->menlo_flag |= HBA_MENLO_SUPPORT; 6322 /* check for menlo minimum sg count */ 6323 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 6324 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 6325 } 6326 6327 if (!phba->sli.sli3_ring) 6328 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING, 6329 sizeof(struct lpfc_sli_ring), 6330 GFP_KERNEL); 6331 if (!phba->sli.sli3_ring) 6332 return -ENOMEM; 6333 6334 /* 6335 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 6336 * used to create the sg_dma_buf_pool must be dynamically calculated. 6337 */ 6338 6339 if (phba->sli_rev == LPFC_SLI_REV4) 6340 entry_sz = sizeof(struct sli4_sge); 6341 else 6342 entry_sz = sizeof(struct ulp_bde64); 6343 6344 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ 6345 if (phba->cfg_enable_bg) { 6346 /* 6347 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 6348 * the FCP rsp, and a BDE for each. Sice we have no control 6349 * over how many protection data segments the SCSI Layer 6350 * will hand us (ie: there could be one for every block 6351 * in the IO), we just allocate enough BDEs to accomidate 6352 * our max amount and we need to limit lpfc_sg_seg_cnt to 6353 * minimize the risk of running out. 6354 */ 6355 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6356 sizeof(struct fcp_rsp) + 6357 (LPFC_MAX_SG_SEG_CNT * entry_sz); 6358 6359 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) 6360 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; 6361 6362 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ 6363 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; 6364 } else { 6365 /* 6366 * The scsi_buf for a regular I/O will hold the FCP cmnd, 6367 * the FCP rsp, a BDE for each, and a BDE for up to 6368 * cfg_sg_seg_cnt data segments. 6369 */ 6370 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6371 sizeof(struct fcp_rsp) + 6372 ((phba->cfg_sg_seg_cnt + 2) * entry_sz); 6373 6374 /* Total BDEs in BPL for scsi_sg_list */ 6375 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 6376 } 6377 6378 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 6379 "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", 6380 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 6381 phba->cfg_total_seg_cnt); 6382 6383 phba->max_vpi = LPFC_MAX_VPI; 6384 /* This will be set to correct value after config_port mbox */ 6385 phba->max_vports = 0; 6386 6387 /* 6388 * Initialize the SLI Layer to run with lpfc HBAs. 6389 */ 6390 lpfc_sli_setup(phba); 6391 lpfc_sli_queue_init(phba); 6392 6393 /* Allocate device driver memory */ 6394 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 6395 return -ENOMEM; 6396 6397 phba->lpfc_sg_dma_buf_pool = 6398 dma_pool_create("lpfc_sg_dma_buf_pool", 6399 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size, 6400 BPL_ALIGN_SZ, 0); 6401 6402 if (!phba->lpfc_sg_dma_buf_pool) 6403 goto fail_free_mem; 6404 6405 phba->lpfc_cmd_rsp_buf_pool = 6406 dma_pool_create("lpfc_cmd_rsp_buf_pool", 6407 &phba->pcidev->dev, 6408 sizeof(struct fcp_cmnd) + 6409 sizeof(struct fcp_rsp), 6410 BPL_ALIGN_SZ, 0); 6411 6412 if (!phba->lpfc_cmd_rsp_buf_pool) 6413 goto fail_free_dma_buf_pool; 6414 6415 /* 6416 * Enable sr-iov virtual functions if supported and configured 6417 * through the module parameter. 6418 */ 6419 if (phba->cfg_sriov_nr_virtfn > 0) { 6420 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 6421 phba->cfg_sriov_nr_virtfn); 6422 if (rc) { 6423 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6424 "2808 Requested number of SR-IOV " 6425 "virtual functions (%d) is not " 6426 "supported\n", 6427 phba->cfg_sriov_nr_virtfn); 6428 phba->cfg_sriov_nr_virtfn = 0; 6429 } 6430 } 6431 6432 return 0; 6433 6434 fail_free_dma_buf_pool: 6435 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); 6436 phba->lpfc_sg_dma_buf_pool = NULL; 6437 fail_free_mem: 6438 lpfc_mem_free(phba); 6439 return -ENOMEM; 6440 } 6441 6442 /** 6443 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 6444 * @phba: pointer to lpfc hba data structure. 6445 * 6446 * This routine is invoked to unset the driver internal resources set up 6447 * specific for supporting the SLI-3 HBA device it attached to. 6448 **/ 6449 static void 6450 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 6451 { 6452 /* Free device driver memory allocated */ 6453 lpfc_mem_free_all(phba); 6454 6455 return; 6456 } 6457 6458 /** 6459 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 6460 * @phba: pointer to lpfc hba data structure. 6461 * 6462 * This routine is invoked to set up the driver internal resources specific to 6463 * support the SLI-4 HBA device it attached to. 6464 * 6465 * Return codes 6466 * 0 - successful 6467 * other values - error 6468 **/ 6469 static int 6470 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 6471 { 6472 LPFC_MBOXQ_t *mboxq; 6473 MAILBOX_t *mb; 6474 int rc, i, max_buf_size; 6475 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 6476 struct lpfc_mqe *mqe; 6477 int longs; 6478 int extra; 6479 uint64_t wwn; 6480 u32 if_type; 6481 u32 if_fam; 6482 6483 phba->sli4_hba.num_present_cpu = lpfc_present_cpu; 6484 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1; 6485 phba->sli4_hba.curr_disp_cpu = 0; 6486 lpfc_cpumask_of_node_init(phba); 6487 6488 /* Get all the module params for configuring this host */ 6489 lpfc_get_cfgparam(phba); 6490 6491 /* Set up phase-1 common device driver resources */ 6492 rc = lpfc_setup_driver_resource_phase1(phba); 6493 if (rc) 6494 return -ENODEV; 6495 6496 /* Before proceed, wait for POST done and device ready */ 6497 rc = lpfc_sli4_post_status_check(phba); 6498 if (rc) 6499 return -ENODEV; 6500 6501 /* Allocate all driver workqueues here */ 6502 6503 /* The lpfc_wq workqueue for deferred irq use */ 6504 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); 6505 6506 /* 6507 * Initialize timers used by driver 6508 */ 6509 6510 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0); 6511 6512 /* FCF rediscover timer */ 6513 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0); 6514 6515 /* 6516 * Control structure for handling external multi-buffer mailbox 6517 * command pass-through. 6518 */ 6519 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, 6520 sizeof(struct lpfc_mbox_ext_buf_ctx)); 6521 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 6522 6523 phba->max_vpi = LPFC_MAX_VPI; 6524 6525 /* This will be set to correct value after the read_config mbox */ 6526 phba->max_vports = 0; 6527 6528 /* Program the default value of vlan_id and fc_map */ 6529 phba->valid_vlan = 0; 6530 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 6531 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 6532 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 6533 6534 /* 6535 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands 6536 * we will associate a new ring, for each EQ/CQ/WQ tuple. 6537 * The WQ create will allocate the ring. 6538 */ 6539 6540 /* Initialize buffer queue management fields */ 6541 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list); 6542 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 6543 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 6544 6545 /* 6546 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 6547 */ 6548 /* Initialize the Abort buffer list used by driver */ 6549 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock); 6550 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list); 6551 6552 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 6553 /* Initialize the Abort nvme buffer list used by driver */ 6554 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock); 6555 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 6556 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); 6557 spin_lock_init(&phba->sli4_hba.t_active_list_lock); 6558 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list); 6559 } 6560 6561 /* This abort list used by worker thread */ 6562 spin_lock_init(&phba->sli4_hba.sgl_list_lock); 6563 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); 6564 6565 /* 6566 * Initialize driver internal slow-path work queues 6567 */ 6568 6569 /* Driver internel slow-path CQ Event pool */ 6570 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 6571 /* Response IOCB work queue list */ 6572 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 6573 /* Asynchronous event CQ Event work queue list */ 6574 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 6575 /* Fast-path XRI aborted CQ Event work queue list */ 6576 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 6577 /* Slow-path XRI aborted CQ Event work queue list */ 6578 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 6579 /* Receive queue CQ Event work queue list */ 6580 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 6581 6582 /* Initialize extent block lists. */ 6583 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); 6584 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); 6585 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); 6586 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); 6587 6588 /* Initialize mboxq lists. If the early init routines fail 6589 * these lists need to be correctly initialized. 6590 */ 6591 INIT_LIST_HEAD(&phba->sli.mboxq); 6592 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl); 6593 6594 /* initialize optic_state to 0xFF */ 6595 phba->sli4_hba.lnk_info.optic_state = 0xff; 6596 6597 /* Allocate device driver memory */ 6598 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 6599 if (rc) 6600 return -ENOMEM; 6601 6602 /* IF Type 2 ports get initialized now. */ 6603 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 6604 LPFC_SLI_INTF_IF_TYPE_2) { 6605 rc = lpfc_pci_function_reset(phba); 6606 if (unlikely(rc)) { 6607 rc = -ENODEV; 6608 goto out_free_mem; 6609 } 6610 phba->temp_sensor_support = 1; 6611 } 6612 6613 /* Create the bootstrap mailbox command */ 6614 rc = lpfc_create_bootstrap_mbox(phba); 6615 if (unlikely(rc)) 6616 goto out_free_mem; 6617 6618 /* Set up the host's endian order with the device. */ 6619 rc = lpfc_setup_endian_order(phba); 6620 if (unlikely(rc)) 6621 goto out_free_bsmbx; 6622 6623 /* Set up the hba's configuration parameters. */ 6624 rc = lpfc_sli4_read_config(phba); 6625 if (unlikely(rc)) 6626 goto out_free_bsmbx; 6627 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba); 6628 if (unlikely(rc)) 6629 goto out_free_bsmbx; 6630 6631 /* IF Type 0 ports get initialized now. */ 6632 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 6633 LPFC_SLI_INTF_IF_TYPE_0) { 6634 rc = lpfc_pci_function_reset(phba); 6635 if (unlikely(rc)) 6636 goto out_free_bsmbx; 6637 } 6638 6639 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 6640 GFP_KERNEL); 6641 if (!mboxq) { 6642 rc = -ENOMEM; 6643 goto out_free_bsmbx; 6644 } 6645 6646 /* Check for NVMET being configured */ 6647 phba->nvmet_support = 0; 6648 if (lpfc_enable_nvmet_cnt) { 6649 6650 /* First get WWN of HBA instance */ 6651 lpfc_read_nv(phba, mboxq); 6652 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6653 if (rc != MBX_SUCCESS) { 6654 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6655 "6016 Mailbox failed , mbxCmd x%x " 6656 "READ_NV, mbxStatus x%x\n", 6657 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 6658 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 6659 mempool_free(mboxq, phba->mbox_mem_pool); 6660 rc = -EIO; 6661 goto out_free_bsmbx; 6662 } 6663 mb = &mboxq->u.mb; 6664 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename, 6665 sizeof(uint64_t)); 6666 wwn = cpu_to_be64(wwn); 6667 phba->sli4_hba.wwnn.u.name = wwn; 6668 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, 6669 sizeof(uint64_t)); 6670 /* wwn is WWPN of HBA instance */ 6671 wwn = cpu_to_be64(wwn); 6672 phba->sli4_hba.wwpn.u.name = wwn; 6673 6674 /* Check to see if it matches any module parameter */ 6675 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { 6676 if (wwn == lpfc_enable_nvmet[i]) { 6677 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 6678 if (lpfc_nvmet_mem_alloc(phba)) 6679 break; 6680 6681 phba->nvmet_support = 1; /* a match */ 6682 6683 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6684 "6017 NVME Target %016llx\n", 6685 wwn); 6686 #else 6687 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6688 "6021 Can't enable NVME Target." 6689 " NVME_TARGET_FC infrastructure" 6690 " is not in kernel\n"); 6691 #endif 6692 /* Not supported for NVMET */ 6693 phba->cfg_xri_rebalancing = 0; 6694 break; 6695 } 6696 } 6697 } 6698 6699 lpfc_nvme_mod_param_dep(phba); 6700 6701 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */ 6702 lpfc_supported_pages(mboxq); 6703 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6704 if (!rc) { 6705 mqe = &mboxq->u.mqe; 6706 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 6707 LPFC_MAX_SUPPORTED_PAGES); 6708 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 6709 switch (pn_page[i]) { 6710 case LPFC_SLI4_PARAMETERS: 6711 phba->sli4_hba.pc_sli4_params.supported = 1; 6712 break; 6713 default: 6714 break; 6715 } 6716 } 6717 /* Read the port's SLI4 Parameters capabilities if supported. */ 6718 if (phba->sli4_hba.pc_sli4_params.supported) 6719 rc = lpfc_pc_sli4_params_get(phba, mboxq); 6720 if (rc) { 6721 mempool_free(mboxq, phba->mbox_mem_pool); 6722 rc = -EIO; 6723 goto out_free_bsmbx; 6724 } 6725 } 6726 6727 /* 6728 * Get sli4 parameters that override parameters from Port capabilities. 6729 * If this call fails, it isn't critical unless the SLI4 parameters come 6730 * back in conflict. 6731 */ 6732 rc = lpfc_get_sli4_parameters(phba, mboxq); 6733 if (rc) { 6734 if_type = bf_get(lpfc_sli_intf_if_type, 6735 &phba->sli4_hba.sli_intf); 6736 if_fam = bf_get(lpfc_sli_intf_sli_family, 6737 &phba->sli4_hba.sli_intf); 6738 if (phba->sli4_hba.extents_in_use && 6739 phba->sli4_hba.rpi_hdrs_in_use) { 6740 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6741 "2999 Unsupported SLI4 Parameters " 6742 "Extents and RPI headers enabled.\n"); 6743 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 && 6744 if_fam == LPFC_SLI_INTF_FAMILY_BE2) { 6745 mempool_free(mboxq, phba->mbox_mem_pool); 6746 rc = -EIO; 6747 goto out_free_bsmbx; 6748 } 6749 } 6750 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 && 6751 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) { 6752 mempool_free(mboxq, phba->mbox_mem_pool); 6753 rc = -EIO; 6754 goto out_free_bsmbx; 6755 } 6756 } 6757 6758 /* 6759 * 1 for cmd, 1 for rsp, NVME adds an extra one 6760 * for boundary conditions in its max_sgl_segment template. 6761 */ 6762 extra = 2; 6763 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 6764 extra++; 6765 6766 /* 6767 * It doesn't matter what family our adapter is in, we are 6768 * limited to 2 Pages, 512 SGEs, for our SGL. 6769 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp 6770 */ 6771 max_buf_size = (2 * SLI4_PAGE_SIZE); 6772 6773 /* 6774 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size 6775 * used to create the sg_dma_buf_pool must be calculated. 6776 */ 6777 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 6778 /* Both cfg_enable_bg and cfg_external_dif code paths */ 6779 6780 /* 6781 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd, 6782 * the FCP rsp, and a SGE. Sice we have no control 6783 * over how many protection segments the SCSI Layer 6784 * will hand us (ie: there could be one for every block 6785 * in the IO), just allocate enough SGEs to accomidate 6786 * our max amount and we need to limit lpfc_sg_seg_cnt 6787 * to minimize the risk of running out. 6788 */ 6789 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6790 sizeof(struct fcp_rsp) + max_buf_size; 6791 6792 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ 6793 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; 6794 6795 /* 6796 * If supporting DIF, reduce the seg count for scsi to 6797 * allow room for the DIF sges. 6798 */ 6799 if (phba->cfg_enable_bg && 6800 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF) 6801 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF; 6802 else 6803 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 6804 6805 } else { 6806 /* 6807 * The scsi_buf for a regular I/O holds the FCP cmnd, 6808 * the FCP rsp, a SGE for each, and a SGE for up to 6809 * cfg_sg_seg_cnt data segments. 6810 */ 6811 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6812 sizeof(struct fcp_rsp) + 6813 ((phba->cfg_sg_seg_cnt + extra) * 6814 sizeof(struct sli4_sge)); 6815 6816 /* Total SGEs for scsi_sg_list */ 6817 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra; 6818 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 6819 6820 /* 6821 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only 6822 * need to post 1 page for the SGL. 6823 */ 6824 } 6825 6826 if (phba->cfg_xpsgl && !phba->nvmet_support) 6827 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE; 6828 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) 6829 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; 6830 else 6831 phba->cfg_sg_dma_buf_size = 6832 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); 6833 6834 phba->border_sge_num = phba->cfg_sg_dma_buf_size / 6835 sizeof(struct sli4_sge); 6836 6837 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */ 6838 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 6839 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { 6840 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, 6841 "6300 Reducing NVME sg segment " 6842 "cnt to %d\n", 6843 LPFC_MAX_NVME_SEG_CNT); 6844 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 6845 } else 6846 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; 6847 } 6848 6849 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 6850 "9087 sg_seg_cnt:%d dmabuf_size:%d " 6851 "total:%d scsi:%d nvme:%d\n", 6852 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 6853 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt, 6854 phba->cfg_nvme_seg_cnt); 6855 6856 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE) 6857 i = phba->cfg_sg_dma_buf_size; 6858 else 6859 i = SLI4_PAGE_SIZE; 6860 6861 phba->lpfc_sg_dma_buf_pool = 6862 dma_pool_create("lpfc_sg_dma_buf_pool", 6863 &phba->pcidev->dev, 6864 phba->cfg_sg_dma_buf_size, 6865 i, 0); 6866 if (!phba->lpfc_sg_dma_buf_pool) 6867 goto out_free_bsmbx; 6868 6869 phba->lpfc_cmd_rsp_buf_pool = 6870 dma_pool_create("lpfc_cmd_rsp_buf_pool", 6871 &phba->pcidev->dev, 6872 sizeof(struct fcp_cmnd) + 6873 sizeof(struct fcp_rsp), 6874 i, 0); 6875 if (!phba->lpfc_cmd_rsp_buf_pool) 6876 goto out_free_sg_dma_buf; 6877 6878 mempool_free(mboxq, phba->mbox_mem_pool); 6879 6880 /* Verify OAS is supported */ 6881 lpfc_sli4_oas_verify(phba); 6882 6883 /* Verify RAS support on adapter */ 6884 lpfc_sli4_ras_init(phba); 6885 6886 /* Verify all the SLI4 queues */ 6887 rc = lpfc_sli4_queue_verify(phba); 6888 if (rc) 6889 goto out_free_cmd_rsp_buf; 6890 6891 /* Create driver internal CQE event pool */ 6892 rc = lpfc_sli4_cq_event_pool_create(phba); 6893 if (rc) 6894 goto out_free_cmd_rsp_buf; 6895 6896 /* Initialize sgl lists per host */ 6897 lpfc_init_sgl_list(phba); 6898 6899 /* Allocate and initialize active sgl array */ 6900 rc = lpfc_init_active_sgl_array(phba); 6901 if (rc) { 6902 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6903 "1430 Failed to initialize sgl list.\n"); 6904 goto out_destroy_cq_event_pool; 6905 } 6906 rc = lpfc_sli4_init_rpi_hdrs(phba); 6907 if (rc) { 6908 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6909 "1432 Failed to initialize rpi headers.\n"); 6910 goto out_free_active_sgl; 6911 } 6912 6913 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 6914 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 6915 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long), 6916 GFP_KERNEL); 6917 if (!phba->fcf.fcf_rr_bmask) { 6918 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6919 "2759 Failed allocate memory for FCF round " 6920 "robin failover bmask\n"); 6921 rc = -ENOMEM; 6922 goto out_remove_rpi_hdrs; 6923 } 6924 6925 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann, 6926 sizeof(struct lpfc_hba_eq_hdl), 6927 GFP_KERNEL); 6928 if (!phba->sli4_hba.hba_eq_hdl) { 6929 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6930 "2572 Failed allocate memory for " 6931 "fast-path per-EQ handle array\n"); 6932 rc = -ENOMEM; 6933 goto out_free_fcf_rr_bmask; 6934 } 6935 6936 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu, 6937 sizeof(struct lpfc_vector_map_info), 6938 GFP_KERNEL); 6939 if (!phba->sli4_hba.cpu_map) { 6940 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6941 "3327 Failed allocate memory for msi-x " 6942 "interrupt vector mapping\n"); 6943 rc = -ENOMEM; 6944 goto out_free_hba_eq_hdl; 6945 } 6946 6947 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info); 6948 if (!phba->sli4_hba.eq_info) { 6949 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6950 "3321 Failed allocation for per_cpu stats\n"); 6951 rc = -ENOMEM; 6952 goto out_free_hba_cpu_map; 6953 } 6954 6955 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 6956 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat); 6957 if (!phba->sli4_hba.c_stat) { 6958 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6959 "3332 Failed allocating per cpu hdwq stats\n"); 6960 rc = -ENOMEM; 6961 goto out_free_hba_eq_info; 6962 } 6963 #endif 6964 6965 /* 6966 * Enable sr-iov virtual functions if supported and configured 6967 * through the module parameter. 6968 */ 6969 if (phba->cfg_sriov_nr_virtfn > 0) { 6970 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 6971 phba->cfg_sriov_nr_virtfn); 6972 if (rc) { 6973 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6974 "3020 Requested number of SR-IOV " 6975 "virtual functions (%d) is not " 6976 "supported\n", 6977 phba->cfg_sriov_nr_virtfn); 6978 phba->cfg_sriov_nr_virtfn = 0; 6979 } 6980 } 6981 6982 return 0; 6983 6984 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 6985 out_free_hba_eq_info: 6986 free_percpu(phba->sli4_hba.eq_info); 6987 #endif 6988 out_free_hba_cpu_map: 6989 kfree(phba->sli4_hba.cpu_map); 6990 out_free_hba_eq_hdl: 6991 kfree(phba->sli4_hba.hba_eq_hdl); 6992 out_free_fcf_rr_bmask: 6993 kfree(phba->fcf.fcf_rr_bmask); 6994 out_remove_rpi_hdrs: 6995 lpfc_sli4_remove_rpi_hdrs(phba); 6996 out_free_active_sgl: 6997 lpfc_free_active_sgl(phba); 6998 out_destroy_cq_event_pool: 6999 lpfc_sli4_cq_event_pool_destroy(phba); 7000 out_free_cmd_rsp_buf: 7001 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); 7002 phba->lpfc_cmd_rsp_buf_pool = NULL; 7003 out_free_sg_dma_buf: 7004 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); 7005 phba->lpfc_sg_dma_buf_pool = NULL; 7006 out_free_bsmbx: 7007 lpfc_destroy_bootstrap_mbox(phba); 7008 out_free_mem: 7009 lpfc_mem_free(phba); 7010 return rc; 7011 } 7012 7013 /** 7014 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 7015 * @phba: pointer to lpfc hba data structure. 7016 * 7017 * This routine is invoked to unset the driver internal resources set up 7018 * specific for supporting the SLI-4 HBA device it attached to. 7019 **/ 7020 static void 7021 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 7022 { 7023 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 7024 7025 free_percpu(phba->sli4_hba.eq_info); 7026 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 7027 free_percpu(phba->sli4_hba.c_stat); 7028 #endif 7029 7030 /* Free memory allocated for msi-x interrupt vector to CPU mapping */ 7031 kfree(phba->sli4_hba.cpu_map); 7032 phba->sli4_hba.num_possible_cpu = 0; 7033 phba->sli4_hba.num_present_cpu = 0; 7034 phba->sli4_hba.curr_disp_cpu = 0; 7035 cpumask_clear(&phba->sli4_hba.numa_mask); 7036 7037 /* Free memory allocated for fast-path work queue handles */ 7038 kfree(phba->sli4_hba.hba_eq_hdl); 7039 7040 /* Free the allocated rpi headers. */ 7041 lpfc_sli4_remove_rpi_hdrs(phba); 7042 lpfc_sli4_remove_rpis(phba); 7043 7044 /* Free eligible FCF index bmask */ 7045 kfree(phba->fcf.fcf_rr_bmask); 7046 7047 /* Free the ELS sgl list */ 7048 lpfc_free_active_sgl(phba); 7049 lpfc_free_els_sgl_list(phba); 7050 lpfc_free_nvmet_sgl_list(phba); 7051 7052 /* Free the completion queue EQ event pool */ 7053 lpfc_sli4_cq_event_release_all(phba); 7054 lpfc_sli4_cq_event_pool_destroy(phba); 7055 7056 /* Release resource identifiers. */ 7057 lpfc_sli4_dealloc_resource_identifiers(phba); 7058 7059 /* Free the bsmbx region. */ 7060 lpfc_destroy_bootstrap_mbox(phba); 7061 7062 /* Free the SLI Layer memory with SLI4 HBAs */ 7063 lpfc_mem_free_all(phba); 7064 7065 /* Free the current connect table */ 7066 list_for_each_entry_safe(conn_entry, next_conn_entry, 7067 &phba->fcf_conn_rec_list, list) { 7068 list_del_init(&conn_entry->list); 7069 kfree(conn_entry); 7070 } 7071 7072 return; 7073 } 7074 7075 /** 7076 * lpfc_init_api_table_setup - Set up init api function jump table 7077 * @phba: The hba struct for which this call is being executed. 7078 * @dev_grp: The HBA PCI-Device group number. 7079 * 7080 * This routine sets up the device INIT interface API function jump table 7081 * in @phba struct. 7082 * 7083 * Returns: 0 - success, -ENODEV - failure. 7084 **/ 7085 int 7086 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 7087 { 7088 phba->lpfc_hba_init_link = lpfc_hba_init_link; 7089 phba->lpfc_hba_down_link = lpfc_hba_down_link; 7090 phba->lpfc_selective_reset = lpfc_selective_reset; 7091 switch (dev_grp) { 7092 case LPFC_PCI_DEV_LP: 7093 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 7094 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 7095 phba->lpfc_stop_port = lpfc_stop_port_s3; 7096 break; 7097 case LPFC_PCI_DEV_OC: 7098 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 7099 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 7100 phba->lpfc_stop_port = lpfc_stop_port_s4; 7101 break; 7102 default: 7103 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7104 "1431 Invalid HBA PCI-device group: 0x%x\n", 7105 dev_grp); 7106 return -ENODEV; 7107 break; 7108 } 7109 return 0; 7110 } 7111 7112 /** 7113 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 7114 * @phba: pointer to lpfc hba data structure. 7115 * 7116 * This routine is invoked to set up the driver internal resources after the 7117 * device specific resource setup to support the HBA device it attached to. 7118 * 7119 * Return codes 7120 * 0 - successful 7121 * other values - error 7122 **/ 7123 static int 7124 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 7125 { 7126 int error; 7127 7128 /* Startup the kernel thread for this host adapter. */ 7129 phba->worker_thread = kthread_run(lpfc_do_work, phba, 7130 "lpfc_worker_%d", phba->brd_no); 7131 if (IS_ERR(phba->worker_thread)) { 7132 error = PTR_ERR(phba->worker_thread); 7133 return error; 7134 } 7135 7136 return 0; 7137 } 7138 7139 /** 7140 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 7141 * @phba: pointer to lpfc hba data structure. 7142 * 7143 * This routine is invoked to unset the driver internal resources set up after 7144 * the device specific resource setup for supporting the HBA device it 7145 * attached to. 7146 **/ 7147 static void 7148 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 7149 { 7150 if (phba->wq) { 7151 flush_workqueue(phba->wq); 7152 destroy_workqueue(phba->wq); 7153 phba->wq = NULL; 7154 } 7155 7156 /* Stop kernel worker thread */ 7157 if (phba->worker_thread) 7158 kthread_stop(phba->worker_thread); 7159 } 7160 7161 /** 7162 * lpfc_free_iocb_list - Free iocb list. 7163 * @phba: pointer to lpfc hba data structure. 7164 * 7165 * This routine is invoked to free the driver's IOCB list and memory. 7166 **/ 7167 void 7168 lpfc_free_iocb_list(struct lpfc_hba *phba) 7169 { 7170 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 7171 7172 spin_lock_irq(&phba->hbalock); 7173 list_for_each_entry_safe(iocbq_entry, iocbq_next, 7174 &phba->lpfc_iocb_list, list) { 7175 list_del(&iocbq_entry->list); 7176 kfree(iocbq_entry); 7177 phba->total_iocbq_bufs--; 7178 } 7179 spin_unlock_irq(&phba->hbalock); 7180 7181 return; 7182 } 7183 7184 /** 7185 * lpfc_init_iocb_list - Allocate and initialize iocb list. 7186 * @phba: pointer to lpfc hba data structure. 7187 * 7188 * This routine is invoked to allocate and initizlize the driver's IOCB 7189 * list and set up the IOCB tag array accordingly. 7190 * 7191 * Return codes 7192 * 0 - successful 7193 * other values - error 7194 **/ 7195 int 7196 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 7197 { 7198 struct lpfc_iocbq *iocbq_entry = NULL; 7199 uint16_t iotag; 7200 int i; 7201 7202 /* Initialize and populate the iocb list per host. */ 7203 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 7204 for (i = 0; i < iocb_count; i++) { 7205 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 7206 if (iocbq_entry == NULL) { 7207 printk(KERN_ERR "%s: only allocated %d iocbs of " 7208 "expected %d count. Unloading driver.\n", 7209 __func__, i, iocb_count); 7210 goto out_free_iocbq; 7211 } 7212 7213 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 7214 if (iotag == 0) { 7215 kfree(iocbq_entry); 7216 printk(KERN_ERR "%s: failed to allocate IOTAG. " 7217 "Unloading driver.\n", __func__); 7218 goto out_free_iocbq; 7219 } 7220 iocbq_entry->sli4_lxritag = NO_XRI; 7221 iocbq_entry->sli4_xritag = NO_XRI; 7222 7223 spin_lock_irq(&phba->hbalock); 7224 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 7225 phba->total_iocbq_bufs++; 7226 spin_unlock_irq(&phba->hbalock); 7227 } 7228 7229 return 0; 7230 7231 out_free_iocbq: 7232 lpfc_free_iocb_list(phba); 7233 7234 return -ENOMEM; 7235 } 7236 7237 /** 7238 * lpfc_free_sgl_list - Free a given sgl list. 7239 * @phba: pointer to lpfc hba data structure. 7240 * @sglq_list: pointer to the head of sgl list. 7241 * 7242 * This routine is invoked to free a give sgl list and memory. 7243 **/ 7244 void 7245 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list) 7246 { 7247 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 7248 7249 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) { 7250 list_del(&sglq_entry->list); 7251 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 7252 kfree(sglq_entry); 7253 } 7254 } 7255 7256 /** 7257 * lpfc_free_els_sgl_list - Free els sgl list. 7258 * @phba: pointer to lpfc hba data structure. 7259 * 7260 * This routine is invoked to free the driver's els sgl list and memory. 7261 **/ 7262 static void 7263 lpfc_free_els_sgl_list(struct lpfc_hba *phba) 7264 { 7265 LIST_HEAD(sglq_list); 7266 7267 /* Retrieve all els sgls from driver list */ 7268 spin_lock_irq(&phba->hbalock); 7269 spin_lock(&phba->sli4_hba.sgl_list_lock); 7270 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list); 7271 spin_unlock(&phba->sli4_hba.sgl_list_lock); 7272 spin_unlock_irq(&phba->hbalock); 7273 7274 /* Now free the sgl list */ 7275 lpfc_free_sgl_list(phba, &sglq_list); 7276 } 7277 7278 /** 7279 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list. 7280 * @phba: pointer to lpfc hba data structure. 7281 * 7282 * This routine is invoked to free the driver's nvmet sgl list and memory. 7283 **/ 7284 static void 7285 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba) 7286 { 7287 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 7288 LIST_HEAD(sglq_list); 7289 7290 /* Retrieve all nvmet sgls from driver list */ 7291 spin_lock_irq(&phba->hbalock); 7292 spin_lock(&phba->sli4_hba.sgl_list_lock); 7293 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list); 7294 spin_unlock(&phba->sli4_hba.sgl_list_lock); 7295 spin_unlock_irq(&phba->hbalock); 7296 7297 /* Now free the sgl list */ 7298 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) { 7299 list_del(&sglq_entry->list); 7300 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys); 7301 kfree(sglq_entry); 7302 } 7303 7304 /* Update the nvmet_xri_cnt to reflect no current sgls. 7305 * The next initialization cycle sets the count and allocates 7306 * the sgls over again. 7307 */ 7308 phba->sli4_hba.nvmet_xri_cnt = 0; 7309 } 7310 7311 /** 7312 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 7313 * @phba: pointer to lpfc hba data structure. 7314 * 7315 * This routine is invoked to allocate the driver's active sgl memory. 7316 * This array will hold the sglq_entry's for active IOs. 7317 **/ 7318 static int 7319 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 7320 { 7321 int size; 7322 size = sizeof(struct lpfc_sglq *); 7323 size *= phba->sli4_hba.max_cfg_param.max_xri; 7324 7325 phba->sli4_hba.lpfc_sglq_active_list = 7326 kzalloc(size, GFP_KERNEL); 7327 if (!phba->sli4_hba.lpfc_sglq_active_list) 7328 return -ENOMEM; 7329 return 0; 7330 } 7331 7332 /** 7333 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 7334 * @phba: pointer to lpfc hba data structure. 7335 * 7336 * This routine is invoked to walk through the array of active sglq entries 7337 * and free all of the resources. 7338 * This is just a place holder for now. 7339 **/ 7340 static void 7341 lpfc_free_active_sgl(struct lpfc_hba *phba) 7342 { 7343 kfree(phba->sli4_hba.lpfc_sglq_active_list); 7344 } 7345 7346 /** 7347 * lpfc_init_sgl_list - Allocate and initialize sgl list. 7348 * @phba: pointer to lpfc hba data structure. 7349 * 7350 * This routine is invoked to allocate and initizlize the driver's sgl 7351 * list and set up the sgl xritag tag array accordingly. 7352 * 7353 **/ 7354 static void 7355 lpfc_init_sgl_list(struct lpfc_hba *phba) 7356 { 7357 /* Initialize and populate the sglq list per host/VF. */ 7358 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list); 7359 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 7360 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list); 7361 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 7362 7363 /* els xri-sgl book keeping */ 7364 phba->sli4_hba.els_xri_cnt = 0; 7365 7366 /* nvme xri-buffer book keeping */ 7367 phba->sli4_hba.io_xri_cnt = 0; 7368 } 7369 7370 /** 7371 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 7372 * @phba: pointer to lpfc hba data structure. 7373 * 7374 * This routine is invoked to post rpi header templates to the 7375 * port for those SLI4 ports that do not support extents. This routine 7376 * posts a PAGE_SIZE memory region to the port to hold up to 7377 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 7378 * and should be called only when interrupts are disabled. 7379 * 7380 * Return codes 7381 * 0 - successful 7382 * -ERROR - otherwise. 7383 **/ 7384 int 7385 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 7386 { 7387 int rc = 0; 7388 struct lpfc_rpi_hdr *rpi_hdr; 7389 7390 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 7391 if (!phba->sli4_hba.rpi_hdrs_in_use) 7392 return rc; 7393 if (phba->sli4_hba.extents_in_use) 7394 return -EIO; 7395 7396 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 7397 if (!rpi_hdr) { 7398 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7399 "0391 Error during rpi post operation\n"); 7400 lpfc_sli4_remove_rpis(phba); 7401 rc = -ENODEV; 7402 } 7403 7404 return rc; 7405 } 7406 7407 /** 7408 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 7409 * @phba: pointer to lpfc hba data structure. 7410 * 7411 * This routine is invoked to allocate a single 4KB memory region to 7412 * support rpis and stores them in the phba. This single region 7413 * provides support for up to 64 rpis. The region is used globally 7414 * by the device. 7415 * 7416 * Returns: 7417 * A valid rpi hdr on success. 7418 * A NULL pointer on any failure. 7419 **/ 7420 struct lpfc_rpi_hdr * 7421 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 7422 { 7423 uint16_t rpi_limit, curr_rpi_range; 7424 struct lpfc_dmabuf *dmabuf; 7425 struct lpfc_rpi_hdr *rpi_hdr; 7426 7427 /* 7428 * If the SLI4 port supports extents, posting the rpi header isn't 7429 * required. Set the expected maximum count and let the actual value 7430 * get set when extents are fully allocated. 7431 */ 7432 if (!phba->sli4_hba.rpi_hdrs_in_use) 7433 return NULL; 7434 if (phba->sli4_hba.extents_in_use) 7435 return NULL; 7436 7437 /* The limit on the logical index is just the max_rpi count. */ 7438 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi; 7439 7440 spin_lock_irq(&phba->hbalock); 7441 /* 7442 * Establish the starting RPI in this header block. The starting 7443 * rpi is normalized to a zero base because the physical rpi is 7444 * port based. 7445 */ 7446 curr_rpi_range = phba->sli4_hba.next_rpi; 7447 spin_unlock_irq(&phba->hbalock); 7448 7449 /* Reached full RPI range */ 7450 if (curr_rpi_range == rpi_limit) 7451 return NULL; 7452 7453 /* 7454 * First allocate the protocol header region for the port. The 7455 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 7456 */ 7457 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 7458 if (!dmabuf) 7459 return NULL; 7460 7461 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 7462 LPFC_HDR_TEMPLATE_SIZE, 7463 &dmabuf->phys, GFP_KERNEL); 7464 if (!dmabuf->virt) { 7465 rpi_hdr = NULL; 7466 goto err_free_dmabuf; 7467 } 7468 7469 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 7470 rpi_hdr = NULL; 7471 goto err_free_coherent; 7472 } 7473 7474 /* Save the rpi header data for cleanup later. */ 7475 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 7476 if (!rpi_hdr) 7477 goto err_free_coherent; 7478 7479 rpi_hdr->dmabuf = dmabuf; 7480 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 7481 rpi_hdr->page_count = 1; 7482 spin_lock_irq(&phba->hbalock); 7483 7484 /* The rpi_hdr stores the logical index only. */ 7485 rpi_hdr->start_rpi = curr_rpi_range; 7486 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT; 7487 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 7488 7489 spin_unlock_irq(&phba->hbalock); 7490 return rpi_hdr; 7491 7492 err_free_coherent: 7493 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 7494 dmabuf->virt, dmabuf->phys); 7495 err_free_dmabuf: 7496 kfree(dmabuf); 7497 return NULL; 7498 } 7499 7500 /** 7501 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 7502 * @phba: pointer to lpfc hba data structure. 7503 * 7504 * This routine is invoked to remove all memory resources allocated 7505 * to support rpis for SLI4 ports not supporting extents. This routine 7506 * presumes the caller has released all rpis consumed by fabric or port 7507 * logins and is prepared to have the header pages removed. 7508 **/ 7509 void 7510 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 7511 { 7512 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 7513 7514 if (!phba->sli4_hba.rpi_hdrs_in_use) 7515 goto exit; 7516 7517 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 7518 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 7519 list_del(&rpi_hdr->list); 7520 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 7521 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 7522 kfree(rpi_hdr->dmabuf); 7523 kfree(rpi_hdr); 7524 } 7525 exit: 7526 /* There are no rpis available to the port now. */ 7527 phba->sli4_hba.next_rpi = 0; 7528 } 7529 7530 /** 7531 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 7532 * @pdev: pointer to pci device data structure. 7533 * 7534 * This routine is invoked to allocate the driver hba data structure for an 7535 * HBA device. If the allocation is successful, the phba reference to the 7536 * PCI device data structure is set. 7537 * 7538 * Return codes 7539 * pointer to @phba - successful 7540 * NULL - error 7541 **/ 7542 static struct lpfc_hba * 7543 lpfc_hba_alloc(struct pci_dev *pdev) 7544 { 7545 struct lpfc_hba *phba; 7546 7547 /* Allocate memory for HBA structure */ 7548 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 7549 if (!phba) { 7550 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 7551 return NULL; 7552 } 7553 7554 /* Set reference to PCI device in HBA structure */ 7555 phba->pcidev = pdev; 7556 7557 /* Assign an unused board number */ 7558 phba->brd_no = lpfc_get_instance(); 7559 if (phba->brd_no < 0) { 7560 kfree(phba); 7561 return NULL; 7562 } 7563 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL; 7564 7565 spin_lock_init(&phba->ct_ev_lock); 7566 INIT_LIST_HEAD(&phba->ct_ev_waiters); 7567 7568 return phba; 7569 } 7570 7571 /** 7572 * lpfc_hba_free - Free driver hba data structure with a device. 7573 * @phba: pointer to lpfc hba data structure. 7574 * 7575 * This routine is invoked to free the driver hba data structure with an 7576 * HBA device. 7577 **/ 7578 static void 7579 lpfc_hba_free(struct lpfc_hba *phba) 7580 { 7581 if (phba->sli_rev == LPFC_SLI_REV4) 7582 kfree(phba->sli4_hba.hdwq); 7583 7584 /* Release the driver assigned board number */ 7585 idr_remove(&lpfc_hba_index, phba->brd_no); 7586 7587 /* Free memory allocated with sli3 rings */ 7588 kfree(phba->sli.sli3_ring); 7589 phba->sli.sli3_ring = NULL; 7590 7591 kfree(phba); 7592 return; 7593 } 7594 7595 /** 7596 * lpfc_create_shost - Create hba physical port with associated scsi host. 7597 * @phba: pointer to lpfc hba data structure. 7598 * 7599 * This routine is invoked to create HBA physical port and associate a SCSI 7600 * host with it. 7601 * 7602 * Return codes 7603 * 0 - successful 7604 * other values - error 7605 **/ 7606 static int 7607 lpfc_create_shost(struct lpfc_hba *phba) 7608 { 7609 struct lpfc_vport *vport; 7610 struct Scsi_Host *shost; 7611 7612 /* Initialize HBA FC structure */ 7613 phba->fc_edtov = FF_DEF_EDTOV; 7614 phba->fc_ratov = FF_DEF_RATOV; 7615 phba->fc_altov = FF_DEF_ALTOV; 7616 phba->fc_arbtov = FF_DEF_ARBTOV; 7617 7618 atomic_set(&phba->sdev_cnt, 0); 7619 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 7620 if (!vport) 7621 return -ENODEV; 7622 7623 shost = lpfc_shost_from_vport(vport); 7624 phba->pport = vport; 7625 7626 if (phba->nvmet_support) { 7627 /* Only 1 vport (pport) will support NVME target */ 7628 phba->targetport = NULL; 7629 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; 7630 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC, 7631 "6076 NVME Target Found\n"); 7632 } 7633 7634 lpfc_debugfs_initialize(vport); 7635 /* Put reference to SCSI host to driver's device private data */ 7636 pci_set_drvdata(phba->pcidev, shost); 7637 7638 /* 7639 * At this point we are fully registered with PSA. In addition, 7640 * any initial discovery should be completed. 7641 */ 7642 vport->load_flag |= FC_ALLOW_FDMI; 7643 if (phba->cfg_enable_SmartSAN || 7644 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 7645 7646 /* Setup appropriate attribute masks */ 7647 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 7648 if (phba->cfg_enable_SmartSAN) 7649 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 7650 else 7651 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 7652 } 7653 return 0; 7654 } 7655 7656 /** 7657 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 7658 * @phba: pointer to lpfc hba data structure. 7659 * 7660 * This routine is invoked to destroy HBA physical port and the associated 7661 * SCSI host. 7662 **/ 7663 static void 7664 lpfc_destroy_shost(struct lpfc_hba *phba) 7665 { 7666 struct lpfc_vport *vport = phba->pport; 7667 7668 /* Destroy physical port that associated with the SCSI host */ 7669 destroy_port(vport); 7670 7671 return; 7672 } 7673 7674 /** 7675 * lpfc_setup_bg - Setup Block guard structures and debug areas. 7676 * @phba: pointer to lpfc hba data structure. 7677 * @shost: the shost to be used to detect Block guard settings. 7678 * 7679 * This routine sets up the local Block guard protocol settings for @shost. 7680 * This routine also allocates memory for debugging bg buffers. 7681 **/ 7682 static void 7683 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 7684 { 7685 uint32_t old_mask; 7686 uint32_t old_guard; 7687 7688 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 7689 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7690 "1478 Registering BlockGuard with the " 7691 "SCSI layer\n"); 7692 7693 old_mask = phba->cfg_prot_mask; 7694 old_guard = phba->cfg_prot_guard; 7695 7696 /* Only allow supported values */ 7697 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | 7698 SHOST_DIX_TYPE0_PROTECTION | 7699 SHOST_DIX_TYPE1_PROTECTION); 7700 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP | 7701 SHOST_DIX_GUARD_CRC); 7702 7703 /* DIF Type 1 protection for profiles AST1/C1 is end to end */ 7704 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION) 7705 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; 7706 7707 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 7708 if ((old_mask != phba->cfg_prot_mask) || 7709 (old_guard != phba->cfg_prot_guard)) 7710 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7711 "1475 Registering BlockGuard with the " 7712 "SCSI layer: mask %d guard %d\n", 7713 phba->cfg_prot_mask, 7714 phba->cfg_prot_guard); 7715 7716 scsi_host_set_prot(shost, phba->cfg_prot_mask); 7717 scsi_host_set_guard(shost, phba->cfg_prot_guard); 7718 } else 7719 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7720 "1479 Not Registering BlockGuard with the SCSI " 7721 "layer, Bad protection parameters: %d %d\n", 7722 old_mask, old_guard); 7723 } 7724 } 7725 7726 /** 7727 * lpfc_post_init_setup - Perform necessary device post initialization setup. 7728 * @phba: pointer to lpfc hba data structure. 7729 * 7730 * This routine is invoked to perform all the necessary post initialization 7731 * setup for the device. 7732 **/ 7733 static void 7734 lpfc_post_init_setup(struct lpfc_hba *phba) 7735 { 7736 struct Scsi_Host *shost; 7737 struct lpfc_adapter_event_header adapter_event; 7738 7739 /* Get the default values for Model Name and Description */ 7740 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 7741 7742 /* 7743 * hba setup may have changed the hba_queue_depth so we need to 7744 * adjust the value of can_queue. 7745 */ 7746 shost = pci_get_drvdata(phba->pcidev); 7747 shost->can_queue = phba->cfg_hba_queue_depth - 10; 7748 7749 lpfc_host_attrib_init(shost); 7750 7751 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 7752 spin_lock_irq(shost->host_lock); 7753 lpfc_poll_start_timer(phba); 7754 spin_unlock_irq(shost->host_lock); 7755 } 7756 7757 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7758 "0428 Perform SCSI scan\n"); 7759 /* Send board arrival event to upper layer */ 7760 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 7761 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 7762 fc_host_post_vendor_event(shost, fc_get_event_number(), 7763 sizeof(adapter_event), 7764 (char *) &adapter_event, 7765 LPFC_NL_VENDOR_ID); 7766 return; 7767 } 7768 7769 /** 7770 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 7771 * @phba: pointer to lpfc hba data structure. 7772 * 7773 * This routine is invoked to set up the PCI device memory space for device 7774 * with SLI-3 interface spec. 7775 * 7776 * Return codes 7777 * 0 - successful 7778 * other values - error 7779 **/ 7780 static int 7781 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 7782 { 7783 struct pci_dev *pdev = phba->pcidev; 7784 unsigned long bar0map_len, bar2map_len; 7785 int i, hbq_count; 7786 void *ptr; 7787 int error; 7788 7789 if (!pdev) 7790 return -ENODEV; 7791 7792 /* Set the device DMA mask size */ 7793 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 7794 if (error) 7795 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 7796 if (error) 7797 return error; 7798 error = -ENODEV; 7799 7800 /* Get the bus address of Bar0 and Bar2 and the number of bytes 7801 * required by each mapping. 7802 */ 7803 phba->pci_bar0_map = pci_resource_start(pdev, 0); 7804 bar0map_len = pci_resource_len(pdev, 0); 7805 7806 phba->pci_bar2_map = pci_resource_start(pdev, 2); 7807 bar2map_len = pci_resource_len(pdev, 2); 7808 7809 /* Map HBA SLIM to a kernel virtual address. */ 7810 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 7811 if (!phba->slim_memmap_p) { 7812 dev_printk(KERN_ERR, &pdev->dev, 7813 "ioremap failed for SLIM memory.\n"); 7814 goto out; 7815 } 7816 7817 /* Map HBA Control Registers to a kernel virtual address. */ 7818 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 7819 if (!phba->ctrl_regs_memmap_p) { 7820 dev_printk(KERN_ERR, &pdev->dev, 7821 "ioremap failed for HBA control registers.\n"); 7822 goto out_iounmap_slim; 7823 } 7824 7825 /* Allocate memory for SLI-2 structures */ 7826 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7827 &phba->slim2p.phys, GFP_KERNEL); 7828 if (!phba->slim2p.virt) 7829 goto out_iounmap; 7830 7831 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 7832 phba->mbox_ext = (phba->slim2p.virt + 7833 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 7834 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 7835 phba->IOCBs = (phba->slim2p.virt + 7836 offsetof(struct lpfc_sli2_slim, IOCBs)); 7837 7838 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 7839 lpfc_sli_hbq_size(), 7840 &phba->hbqslimp.phys, 7841 GFP_KERNEL); 7842 if (!phba->hbqslimp.virt) 7843 goto out_free_slim; 7844 7845 hbq_count = lpfc_sli_hbq_count(); 7846 ptr = phba->hbqslimp.virt; 7847 for (i = 0; i < hbq_count; ++i) { 7848 phba->hbqs[i].hbq_virt = ptr; 7849 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 7850 ptr += (lpfc_hbq_defs[i]->entry_count * 7851 sizeof(struct lpfc_hbq_entry)); 7852 } 7853 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 7854 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 7855 7856 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 7857 7858 phba->MBslimaddr = phba->slim_memmap_p; 7859 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 7860 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 7861 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 7862 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 7863 7864 return 0; 7865 7866 out_free_slim: 7867 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7868 phba->slim2p.virt, phba->slim2p.phys); 7869 out_iounmap: 7870 iounmap(phba->ctrl_regs_memmap_p); 7871 out_iounmap_slim: 7872 iounmap(phba->slim_memmap_p); 7873 out: 7874 return error; 7875 } 7876 7877 /** 7878 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 7879 * @phba: pointer to lpfc hba data structure. 7880 * 7881 * This routine is invoked to unset the PCI device memory space for device 7882 * with SLI-3 interface spec. 7883 **/ 7884 static void 7885 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 7886 { 7887 struct pci_dev *pdev; 7888 7889 /* Obtain PCI device reference */ 7890 if (!phba->pcidev) 7891 return; 7892 else 7893 pdev = phba->pcidev; 7894 7895 /* Free coherent DMA memory allocated */ 7896 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 7897 phba->hbqslimp.virt, phba->hbqslimp.phys); 7898 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7899 phba->slim2p.virt, phba->slim2p.phys); 7900 7901 /* I/O memory unmap */ 7902 iounmap(phba->ctrl_regs_memmap_p); 7903 iounmap(phba->slim_memmap_p); 7904 7905 return; 7906 } 7907 7908 /** 7909 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 7910 * @phba: pointer to lpfc hba data structure. 7911 * 7912 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 7913 * done and check status. 7914 * 7915 * Return 0 if successful, otherwise -ENODEV. 7916 **/ 7917 int 7918 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 7919 { 7920 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 7921 struct lpfc_register reg_data; 7922 int i, port_error = 0; 7923 uint32_t if_type; 7924 7925 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 7926 memset(®_data, 0, sizeof(reg_data)); 7927 if (!phba->sli4_hba.PSMPHRregaddr) 7928 return -ENODEV; 7929 7930 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 7931 for (i = 0; i < 3000; i++) { 7932 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 7933 &portsmphr_reg.word0) || 7934 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { 7935 /* Port has a fatal POST error, break out */ 7936 port_error = -ENODEV; 7937 break; 7938 } 7939 if (LPFC_POST_STAGE_PORT_READY == 7940 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 7941 break; 7942 msleep(10); 7943 } 7944 7945 /* 7946 * If there was a port error during POST, then don't proceed with 7947 * other register reads as the data may not be valid. Just exit. 7948 */ 7949 if (port_error) { 7950 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7951 "1408 Port Failed POST - portsmphr=0x%x, " 7952 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 7953 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 7954 portsmphr_reg.word0, 7955 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 7956 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 7957 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 7958 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 7959 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 7960 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 7961 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 7962 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 7963 } else { 7964 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7965 "2534 Device Info: SLIFamily=0x%x, " 7966 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 7967 "SLIHint_2=0x%x, FT=0x%x\n", 7968 bf_get(lpfc_sli_intf_sli_family, 7969 &phba->sli4_hba.sli_intf), 7970 bf_get(lpfc_sli_intf_slirev, 7971 &phba->sli4_hba.sli_intf), 7972 bf_get(lpfc_sli_intf_if_type, 7973 &phba->sli4_hba.sli_intf), 7974 bf_get(lpfc_sli_intf_sli_hint1, 7975 &phba->sli4_hba.sli_intf), 7976 bf_get(lpfc_sli_intf_sli_hint2, 7977 &phba->sli4_hba.sli_intf), 7978 bf_get(lpfc_sli_intf_func_type, 7979 &phba->sli4_hba.sli_intf)); 7980 /* 7981 * Check for other Port errors during the initialization 7982 * process. Fail the load if the port did not come up 7983 * correctly. 7984 */ 7985 if_type = bf_get(lpfc_sli_intf_if_type, 7986 &phba->sli4_hba.sli_intf); 7987 switch (if_type) { 7988 case LPFC_SLI_INTF_IF_TYPE_0: 7989 phba->sli4_hba.ue_mask_lo = 7990 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 7991 phba->sli4_hba.ue_mask_hi = 7992 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 7993 uerrlo_reg.word0 = 7994 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 7995 uerrhi_reg.word0 = 7996 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 7997 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 7998 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 7999 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8000 "1422 Unrecoverable Error " 8001 "Detected during POST " 8002 "uerr_lo_reg=0x%x, " 8003 "uerr_hi_reg=0x%x, " 8004 "ue_mask_lo_reg=0x%x, " 8005 "ue_mask_hi_reg=0x%x\n", 8006 uerrlo_reg.word0, 8007 uerrhi_reg.word0, 8008 phba->sli4_hba.ue_mask_lo, 8009 phba->sli4_hba.ue_mask_hi); 8010 port_error = -ENODEV; 8011 } 8012 break; 8013 case LPFC_SLI_INTF_IF_TYPE_2: 8014 case LPFC_SLI_INTF_IF_TYPE_6: 8015 /* Final checks. The port status should be clean. */ 8016 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 8017 ®_data.word0) || 8018 (bf_get(lpfc_sliport_status_err, ®_data) && 8019 !bf_get(lpfc_sliport_status_rn, ®_data))) { 8020 phba->work_status[0] = 8021 readl(phba->sli4_hba.u.if_type2. 8022 ERR1regaddr); 8023 phba->work_status[1] = 8024 readl(phba->sli4_hba.u.if_type2. 8025 ERR2regaddr); 8026 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8027 "2888 Unrecoverable port error " 8028 "following POST: port status reg " 8029 "0x%x, port_smphr reg 0x%x, " 8030 "error 1=0x%x, error 2=0x%x\n", 8031 reg_data.word0, 8032 portsmphr_reg.word0, 8033 phba->work_status[0], 8034 phba->work_status[1]); 8035 port_error = -ENODEV; 8036 } 8037 break; 8038 case LPFC_SLI_INTF_IF_TYPE_1: 8039 default: 8040 break; 8041 } 8042 } 8043 return port_error; 8044 } 8045 8046 /** 8047 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 8048 * @phba: pointer to lpfc hba data structure. 8049 * @if_type: The SLI4 interface type getting configured. 8050 * 8051 * This routine is invoked to set up SLI4 BAR0 PCI config space register 8052 * memory map. 8053 **/ 8054 static void 8055 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 8056 { 8057 switch (if_type) { 8058 case LPFC_SLI_INTF_IF_TYPE_0: 8059 phba->sli4_hba.u.if_type0.UERRLOregaddr = 8060 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 8061 phba->sli4_hba.u.if_type0.UERRHIregaddr = 8062 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 8063 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 8064 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 8065 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 8066 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 8067 phba->sli4_hba.SLIINTFregaddr = 8068 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 8069 break; 8070 case LPFC_SLI_INTF_IF_TYPE_2: 8071 phba->sli4_hba.u.if_type2.EQDregaddr = 8072 phba->sli4_hba.conf_regs_memmap_p + 8073 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 8074 phba->sli4_hba.u.if_type2.ERR1regaddr = 8075 phba->sli4_hba.conf_regs_memmap_p + 8076 LPFC_CTL_PORT_ER1_OFFSET; 8077 phba->sli4_hba.u.if_type2.ERR2regaddr = 8078 phba->sli4_hba.conf_regs_memmap_p + 8079 LPFC_CTL_PORT_ER2_OFFSET; 8080 phba->sli4_hba.u.if_type2.CTRLregaddr = 8081 phba->sli4_hba.conf_regs_memmap_p + 8082 LPFC_CTL_PORT_CTL_OFFSET; 8083 phba->sli4_hba.u.if_type2.STATUSregaddr = 8084 phba->sli4_hba.conf_regs_memmap_p + 8085 LPFC_CTL_PORT_STA_OFFSET; 8086 phba->sli4_hba.SLIINTFregaddr = 8087 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 8088 phba->sli4_hba.PSMPHRregaddr = 8089 phba->sli4_hba.conf_regs_memmap_p + 8090 LPFC_CTL_PORT_SEM_OFFSET; 8091 phba->sli4_hba.RQDBregaddr = 8092 phba->sli4_hba.conf_regs_memmap_p + 8093 LPFC_ULP0_RQ_DOORBELL; 8094 phba->sli4_hba.WQDBregaddr = 8095 phba->sli4_hba.conf_regs_memmap_p + 8096 LPFC_ULP0_WQ_DOORBELL; 8097 phba->sli4_hba.CQDBregaddr = 8098 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 8099 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 8100 phba->sli4_hba.MQDBregaddr = 8101 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 8102 phba->sli4_hba.BMBXregaddr = 8103 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 8104 break; 8105 case LPFC_SLI_INTF_IF_TYPE_6: 8106 phba->sli4_hba.u.if_type2.EQDregaddr = 8107 phba->sli4_hba.conf_regs_memmap_p + 8108 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 8109 phba->sli4_hba.u.if_type2.ERR1regaddr = 8110 phba->sli4_hba.conf_regs_memmap_p + 8111 LPFC_CTL_PORT_ER1_OFFSET; 8112 phba->sli4_hba.u.if_type2.ERR2regaddr = 8113 phba->sli4_hba.conf_regs_memmap_p + 8114 LPFC_CTL_PORT_ER2_OFFSET; 8115 phba->sli4_hba.u.if_type2.CTRLregaddr = 8116 phba->sli4_hba.conf_regs_memmap_p + 8117 LPFC_CTL_PORT_CTL_OFFSET; 8118 phba->sli4_hba.u.if_type2.STATUSregaddr = 8119 phba->sli4_hba.conf_regs_memmap_p + 8120 LPFC_CTL_PORT_STA_OFFSET; 8121 phba->sli4_hba.PSMPHRregaddr = 8122 phba->sli4_hba.conf_regs_memmap_p + 8123 LPFC_CTL_PORT_SEM_OFFSET; 8124 phba->sli4_hba.BMBXregaddr = 8125 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 8126 break; 8127 case LPFC_SLI_INTF_IF_TYPE_1: 8128 default: 8129 dev_printk(KERN_ERR, &phba->pcidev->dev, 8130 "FATAL - unsupported SLI4 interface type - %d\n", 8131 if_type); 8132 break; 8133 } 8134 } 8135 8136 /** 8137 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 8138 * @phba: pointer to lpfc hba data structure. 8139 * 8140 * This routine is invoked to set up SLI4 BAR1 register memory map. 8141 **/ 8142 static void 8143 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 8144 { 8145 switch (if_type) { 8146 case LPFC_SLI_INTF_IF_TYPE_0: 8147 phba->sli4_hba.PSMPHRregaddr = 8148 phba->sli4_hba.ctrl_regs_memmap_p + 8149 LPFC_SLIPORT_IF0_SMPHR; 8150 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 8151 LPFC_HST_ISR0; 8152 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 8153 LPFC_HST_IMR0; 8154 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 8155 LPFC_HST_ISCR0; 8156 break; 8157 case LPFC_SLI_INTF_IF_TYPE_6: 8158 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8159 LPFC_IF6_RQ_DOORBELL; 8160 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8161 LPFC_IF6_WQ_DOORBELL; 8162 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8163 LPFC_IF6_CQ_DOORBELL; 8164 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8165 LPFC_IF6_EQ_DOORBELL; 8166 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8167 LPFC_IF6_MQ_DOORBELL; 8168 break; 8169 case LPFC_SLI_INTF_IF_TYPE_2: 8170 case LPFC_SLI_INTF_IF_TYPE_1: 8171 default: 8172 dev_err(&phba->pcidev->dev, 8173 "FATAL - unsupported SLI4 interface type - %d\n", 8174 if_type); 8175 break; 8176 } 8177 } 8178 8179 /** 8180 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 8181 * @phba: pointer to lpfc hba data structure. 8182 * @vf: virtual function number 8183 * 8184 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 8185 * based on the given viftual function number, @vf. 8186 * 8187 * Return 0 if successful, otherwise -ENODEV. 8188 **/ 8189 static int 8190 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 8191 { 8192 if (vf > LPFC_VIR_FUNC_MAX) 8193 return -ENODEV; 8194 8195 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8196 vf * LPFC_VFR_PAGE_SIZE + 8197 LPFC_ULP0_RQ_DOORBELL); 8198 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8199 vf * LPFC_VFR_PAGE_SIZE + 8200 LPFC_ULP0_WQ_DOORBELL); 8201 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8202 vf * LPFC_VFR_PAGE_SIZE + 8203 LPFC_EQCQ_DOORBELL); 8204 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 8205 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8206 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 8207 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8208 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 8209 return 0; 8210 } 8211 8212 /** 8213 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 8214 * @phba: pointer to lpfc hba data structure. 8215 * 8216 * This routine is invoked to create the bootstrap mailbox 8217 * region consistent with the SLI-4 interface spec. This 8218 * routine allocates all memory necessary to communicate 8219 * mailbox commands to the port and sets up all alignment 8220 * needs. No locks are expected to be held when calling 8221 * this routine. 8222 * 8223 * Return codes 8224 * 0 - successful 8225 * -ENOMEM - could not allocated memory. 8226 **/ 8227 static int 8228 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 8229 { 8230 uint32_t bmbx_size; 8231 struct lpfc_dmabuf *dmabuf; 8232 struct dma_address *dma_address; 8233 uint32_t pa_addr; 8234 uint64_t phys_addr; 8235 8236 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 8237 if (!dmabuf) 8238 return -ENOMEM; 8239 8240 /* 8241 * The bootstrap mailbox region is comprised of 2 parts 8242 * plus an alignment restriction of 16 bytes. 8243 */ 8244 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 8245 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size, 8246 &dmabuf->phys, GFP_KERNEL); 8247 if (!dmabuf->virt) { 8248 kfree(dmabuf); 8249 return -ENOMEM; 8250 } 8251 8252 /* 8253 * Initialize the bootstrap mailbox pointers now so that the register 8254 * operations are simple later. The mailbox dma address is required 8255 * to be 16-byte aligned. Also align the virtual memory as each 8256 * maibox is copied into the bmbx mailbox region before issuing the 8257 * command to the port. 8258 */ 8259 phba->sli4_hba.bmbx.dmabuf = dmabuf; 8260 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 8261 8262 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 8263 LPFC_ALIGN_16_BYTE); 8264 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 8265 LPFC_ALIGN_16_BYTE); 8266 8267 /* 8268 * Set the high and low physical addresses now. The SLI4 alignment 8269 * requirement is 16 bytes and the mailbox is posted to the port 8270 * as two 30-bit addresses. The other data is a bit marking whether 8271 * the 30-bit address is the high or low address. 8272 * Upcast bmbx aphys to 64bits so shift instruction compiles 8273 * clean on 32 bit machines. 8274 */ 8275 dma_address = &phba->sli4_hba.bmbx.dma_address; 8276 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 8277 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 8278 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 8279 LPFC_BMBX_BIT1_ADDR_HI); 8280 8281 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 8282 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 8283 LPFC_BMBX_BIT1_ADDR_LO); 8284 return 0; 8285 } 8286 8287 /** 8288 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 8289 * @phba: pointer to lpfc hba data structure. 8290 * 8291 * This routine is invoked to teardown the bootstrap mailbox 8292 * region and release all host resources. This routine requires 8293 * the caller to ensure all mailbox commands recovered, no 8294 * additional mailbox comands are sent, and interrupts are disabled 8295 * before calling this routine. 8296 * 8297 **/ 8298 static void 8299 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 8300 { 8301 dma_free_coherent(&phba->pcidev->dev, 8302 phba->sli4_hba.bmbx.bmbx_size, 8303 phba->sli4_hba.bmbx.dmabuf->virt, 8304 phba->sli4_hba.bmbx.dmabuf->phys); 8305 8306 kfree(phba->sli4_hba.bmbx.dmabuf); 8307 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 8308 } 8309 8310 static const char * const lpfc_topo_to_str[] = { 8311 "Loop then P2P", 8312 "Loopback", 8313 "P2P Only", 8314 "Unsupported", 8315 "Loop Only", 8316 "Unsupported", 8317 "P2P then Loop", 8318 }; 8319 8320 /** 8321 * lpfc_map_topology - Map the topology read from READ_CONFIG 8322 * @phba: pointer to lpfc hba data structure. 8323 * @rdconf: pointer to read config data 8324 * 8325 * This routine is invoked to map the topology values as read 8326 * from the read config mailbox command. If the persistent 8327 * topology feature is supported, the firmware will provide the 8328 * saved topology information to be used in INIT_LINK 8329 * 8330 **/ 8331 #define LINK_FLAGS_DEF 0x0 8332 #define LINK_FLAGS_P2P 0x1 8333 #define LINK_FLAGS_LOOP 0x2 8334 static void 8335 lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config) 8336 { 8337 u8 ptv, tf, pt; 8338 8339 ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config); 8340 tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config); 8341 pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config); 8342 8343 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8344 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x", 8345 ptv, tf, pt); 8346 if (!ptv) { 8347 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8348 "2019 FW does not support persistent topology " 8349 "Using driver parameter defined value [%s]", 8350 lpfc_topo_to_str[phba->cfg_topology]); 8351 return; 8352 } 8353 /* FW supports persistent topology - override module parameter value */ 8354 phba->hba_flag |= HBA_PERSISTENT_TOPO; 8355 switch (phba->pcidev->device) { 8356 case PCI_DEVICE_ID_LANCER_G7_FC: 8357 case PCI_DEVICE_ID_LANCER_G6_FC: 8358 if (!tf) { 8359 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP) 8360 ? FLAGS_TOPOLOGY_MODE_LOOP 8361 : FLAGS_TOPOLOGY_MODE_PT_PT); 8362 } else { 8363 phba->hba_flag &= ~HBA_PERSISTENT_TOPO; 8364 } 8365 break; 8366 default: /* G5 */ 8367 if (tf) { 8368 /* If topology failover set - pt is '0' or '1' */ 8369 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP : 8370 FLAGS_TOPOLOGY_MODE_LOOP_PT); 8371 } else { 8372 phba->cfg_topology = ((pt == LINK_FLAGS_P2P) 8373 ? FLAGS_TOPOLOGY_MODE_PT_PT 8374 : FLAGS_TOPOLOGY_MODE_LOOP); 8375 } 8376 break; 8377 } 8378 if (phba->hba_flag & HBA_PERSISTENT_TOPO) { 8379 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8380 "2020 Using persistent topology value [%s]", 8381 lpfc_topo_to_str[phba->cfg_topology]); 8382 } else { 8383 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8384 "2021 Invalid topology values from FW " 8385 "Using driver parameter defined value [%s]", 8386 lpfc_topo_to_str[phba->cfg_topology]); 8387 } 8388 } 8389 8390 /** 8391 * lpfc_sli4_read_config - Get the config parameters. 8392 * @phba: pointer to lpfc hba data structure. 8393 * 8394 * This routine is invoked to read the configuration parameters from the HBA. 8395 * The configuration parameters are used to set the base and maximum values 8396 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 8397 * allocation for the port. 8398 * 8399 * Return codes 8400 * 0 - successful 8401 * -ENOMEM - No available memory 8402 * -EIO - The mailbox failed to complete successfully. 8403 **/ 8404 int 8405 lpfc_sli4_read_config(struct lpfc_hba *phba) 8406 { 8407 LPFC_MBOXQ_t *pmb; 8408 struct lpfc_mbx_read_config *rd_config; 8409 union lpfc_sli4_cfg_shdr *shdr; 8410 uint32_t shdr_status, shdr_add_status; 8411 struct lpfc_mbx_get_func_cfg *get_func_cfg; 8412 struct lpfc_rsrc_desc_fcfcoe *desc; 8413 char *pdesc_0; 8414 uint16_t forced_link_speed; 8415 uint32_t if_type, qmin; 8416 int length, i, rc = 0, rc2; 8417 8418 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8419 if (!pmb) { 8420 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8421 "2011 Unable to allocate memory for issuing " 8422 "SLI_CONFIG_SPECIAL mailbox command\n"); 8423 return -ENOMEM; 8424 } 8425 8426 lpfc_read_config(phba, pmb); 8427 8428 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 8429 if (rc != MBX_SUCCESS) { 8430 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8431 "2012 Mailbox failed , mbxCmd x%x " 8432 "READ_CONFIG, mbxStatus x%x\n", 8433 bf_get(lpfc_mqe_command, &pmb->u.mqe), 8434 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 8435 rc = -EIO; 8436 } else { 8437 rd_config = &pmb->u.mqe.un.rd_config; 8438 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { 8439 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 8440 phba->sli4_hba.lnk_info.lnk_tp = 8441 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); 8442 phba->sli4_hba.lnk_info.lnk_no = 8443 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); 8444 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8445 "3081 lnk_type:%d, lnk_numb:%d\n", 8446 phba->sli4_hba.lnk_info.lnk_tp, 8447 phba->sli4_hba.lnk_info.lnk_no); 8448 } else 8449 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8450 "3082 Mailbox (x%x) returned ldv:x0\n", 8451 bf_get(lpfc_mqe_command, &pmb->u.mqe)); 8452 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) { 8453 phba->bbcredit_support = 1; 8454 phba->sli4_hba.bbscn_params.word0 = rd_config->word8; 8455 } 8456 8457 phba->sli4_hba.conf_trunk = 8458 bf_get(lpfc_mbx_rd_conf_trunk, rd_config); 8459 phba->sli4_hba.extents_in_use = 8460 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 8461 phba->sli4_hba.max_cfg_param.max_xri = 8462 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 8463 /* Reduce resource usage in kdump environment */ 8464 if (is_kdump_kernel() && 8465 phba->sli4_hba.max_cfg_param.max_xri > 512) 8466 phba->sli4_hba.max_cfg_param.max_xri = 512; 8467 phba->sli4_hba.max_cfg_param.xri_base = 8468 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 8469 phba->sli4_hba.max_cfg_param.max_vpi = 8470 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 8471 /* Limit the max we support */ 8472 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS) 8473 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS; 8474 phba->sli4_hba.max_cfg_param.vpi_base = 8475 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 8476 phba->sli4_hba.max_cfg_param.max_rpi = 8477 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 8478 phba->sli4_hba.max_cfg_param.rpi_base = 8479 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 8480 phba->sli4_hba.max_cfg_param.max_vfi = 8481 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 8482 phba->sli4_hba.max_cfg_param.vfi_base = 8483 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 8484 phba->sli4_hba.max_cfg_param.max_fcfi = 8485 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 8486 phba->sli4_hba.max_cfg_param.max_eq = 8487 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 8488 phba->sli4_hba.max_cfg_param.max_rq = 8489 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 8490 phba->sli4_hba.max_cfg_param.max_wq = 8491 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 8492 phba->sli4_hba.max_cfg_param.max_cq = 8493 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 8494 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 8495 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 8496 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 8497 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 8498 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 8499 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 8500 phba->max_vports = phba->max_vpi; 8501 lpfc_map_topology(phba, rd_config); 8502 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8503 "2003 cfg params Extents? %d " 8504 "XRI(B:%d M:%d), " 8505 "VPI(B:%d M:%d) " 8506 "VFI(B:%d M:%d) " 8507 "RPI(B:%d M:%d) " 8508 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n", 8509 phba->sli4_hba.extents_in_use, 8510 phba->sli4_hba.max_cfg_param.xri_base, 8511 phba->sli4_hba.max_cfg_param.max_xri, 8512 phba->sli4_hba.max_cfg_param.vpi_base, 8513 phba->sli4_hba.max_cfg_param.max_vpi, 8514 phba->sli4_hba.max_cfg_param.vfi_base, 8515 phba->sli4_hba.max_cfg_param.max_vfi, 8516 phba->sli4_hba.max_cfg_param.rpi_base, 8517 phba->sli4_hba.max_cfg_param.max_rpi, 8518 phba->sli4_hba.max_cfg_param.max_fcfi, 8519 phba->sli4_hba.max_cfg_param.max_eq, 8520 phba->sli4_hba.max_cfg_param.max_cq, 8521 phba->sli4_hba.max_cfg_param.max_wq, 8522 phba->sli4_hba.max_cfg_param.max_rq); 8523 8524 /* 8525 * Calculate queue resources based on how 8526 * many WQ/CQ/EQs are available. 8527 */ 8528 qmin = phba->sli4_hba.max_cfg_param.max_wq; 8529 if (phba->sli4_hba.max_cfg_param.max_cq < qmin) 8530 qmin = phba->sli4_hba.max_cfg_param.max_cq; 8531 if (phba->sli4_hba.max_cfg_param.max_eq < qmin) 8532 qmin = phba->sli4_hba.max_cfg_param.max_eq; 8533 /* 8534 * Whats left after this can go toward NVME / FCP. 8535 * The minus 4 accounts for ELS, NVME LS, MBOX 8536 * plus one extra. When configured for 8537 * NVMET, FCP io channel WQs are not created. 8538 */ 8539 qmin -= 4; 8540 8541 /* Check to see if there is enough for NVME */ 8542 if ((phba->cfg_irq_chann > qmin) || 8543 (phba->cfg_hdw_queue > qmin)) { 8544 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8545 "2005 Reducing Queues: " 8546 "WQ %d CQ %d EQ %d: min %d: " 8547 "IRQ %d HDWQ %d\n", 8548 phba->sli4_hba.max_cfg_param.max_wq, 8549 phba->sli4_hba.max_cfg_param.max_cq, 8550 phba->sli4_hba.max_cfg_param.max_eq, 8551 qmin, phba->cfg_irq_chann, 8552 phba->cfg_hdw_queue); 8553 8554 if (phba->cfg_irq_chann > qmin) 8555 phba->cfg_irq_chann = qmin; 8556 if (phba->cfg_hdw_queue > qmin) 8557 phba->cfg_hdw_queue = qmin; 8558 } 8559 } 8560 8561 if (rc) 8562 goto read_cfg_out; 8563 8564 /* Update link speed if forced link speed is supported */ 8565 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8566 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 8567 forced_link_speed = 8568 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config); 8569 if (forced_link_speed) { 8570 phba->hba_flag |= HBA_FORCED_LINK_SPEED; 8571 8572 switch (forced_link_speed) { 8573 case LINK_SPEED_1G: 8574 phba->cfg_link_speed = 8575 LPFC_USER_LINK_SPEED_1G; 8576 break; 8577 case LINK_SPEED_2G: 8578 phba->cfg_link_speed = 8579 LPFC_USER_LINK_SPEED_2G; 8580 break; 8581 case LINK_SPEED_4G: 8582 phba->cfg_link_speed = 8583 LPFC_USER_LINK_SPEED_4G; 8584 break; 8585 case LINK_SPEED_8G: 8586 phba->cfg_link_speed = 8587 LPFC_USER_LINK_SPEED_8G; 8588 break; 8589 case LINK_SPEED_10G: 8590 phba->cfg_link_speed = 8591 LPFC_USER_LINK_SPEED_10G; 8592 break; 8593 case LINK_SPEED_16G: 8594 phba->cfg_link_speed = 8595 LPFC_USER_LINK_SPEED_16G; 8596 break; 8597 case LINK_SPEED_32G: 8598 phba->cfg_link_speed = 8599 LPFC_USER_LINK_SPEED_32G; 8600 break; 8601 case LINK_SPEED_64G: 8602 phba->cfg_link_speed = 8603 LPFC_USER_LINK_SPEED_64G; 8604 break; 8605 case 0xffff: 8606 phba->cfg_link_speed = 8607 LPFC_USER_LINK_SPEED_AUTO; 8608 break; 8609 default: 8610 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8611 "0047 Unrecognized link " 8612 "speed : %d\n", 8613 forced_link_speed); 8614 phba->cfg_link_speed = 8615 LPFC_USER_LINK_SPEED_AUTO; 8616 } 8617 } 8618 } 8619 8620 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 8621 length = phba->sli4_hba.max_cfg_param.max_xri - 8622 lpfc_sli4_get_els_iocb_cnt(phba); 8623 if (phba->cfg_hba_queue_depth > length) { 8624 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8625 "3361 HBA queue depth changed from %d to %d\n", 8626 phba->cfg_hba_queue_depth, length); 8627 phba->cfg_hba_queue_depth = length; 8628 } 8629 8630 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 8631 LPFC_SLI_INTF_IF_TYPE_2) 8632 goto read_cfg_out; 8633 8634 /* get the pf# and vf# for SLI4 if_type 2 port */ 8635 length = (sizeof(struct lpfc_mbx_get_func_cfg) - 8636 sizeof(struct lpfc_sli4_cfg_mhdr)); 8637 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, 8638 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 8639 length, LPFC_SLI4_MBX_EMBED); 8640 8641 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 8642 shdr = (union lpfc_sli4_cfg_shdr *) 8643 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 8644 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 8645 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 8646 if (rc2 || shdr_status || shdr_add_status) { 8647 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8648 "3026 Mailbox failed , mbxCmd x%x " 8649 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 8650 bf_get(lpfc_mqe_command, &pmb->u.mqe), 8651 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 8652 goto read_cfg_out; 8653 } 8654 8655 /* search for fc_fcoe resrouce descriptor */ 8656 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 8657 8658 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; 8659 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; 8660 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc); 8661 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD) 8662 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH; 8663 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH) 8664 goto read_cfg_out; 8665 8666 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 8667 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i); 8668 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 8669 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) { 8670 phba->sli4_hba.iov.pf_number = 8671 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 8672 phba->sli4_hba.iov.vf_number = 8673 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); 8674 break; 8675 } 8676 } 8677 8678 if (i < LPFC_RSRC_DESC_MAX_NUM) 8679 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8680 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 8681 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 8682 phba->sli4_hba.iov.vf_number); 8683 else 8684 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8685 "3028 GET_FUNCTION_CONFIG: failed to find " 8686 "Resource Descriptor:x%x\n", 8687 LPFC_RSRC_DESC_TYPE_FCFCOE); 8688 8689 read_cfg_out: 8690 mempool_free(pmb, phba->mbox_mem_pool); 8691 return rc; 8692 } 8693 8694 /** 8695 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 8696 * @phba: pointer to lpfc hba data structure. 8697 * 8698 * This routine is invoked to setup the port-side endian order when 8699 * the port if_type is 0. This routine has no function for other 8700 * if_types. 8701 * 8702 * Return codes 8703 * 0 - successful 8704 * -ENOMEM - No available memory 8705 * -EIO - The mailbox failed to complete successfully. 8706 **/ 8707 static int 8708 lpfc_setup_endian_order(struct lpfc_hba *phba) 8709 { 8710 LPFC_MBOXQ_t *mboxq; 8711 uint32_t if_type, rc = 0; 8712 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 8713 HOST_ENDIAN_HIGH_WORD1}; 8714 8715 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8716 switch (if_type) { 8717 case LPFC_SLI_INTF_IF_TYPE_0: 8718 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 8719 GFP_KERNEL); 8720 if (!mboxq) { 8721 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8722 "0492 Unable to allocate memory for " 8723 "issuing SLI_CONFIG_SPECIAL mailbox " 8724 "command\n"); 8725 return -ENOMEM; 8726 } 8727 8728 /* 8729 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 8730 * two words to contain special data values and no other data. 8731 */ 8732 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 8733 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 8734 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8735 if (rc != MBX_SUCCESS) { 8736 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8737 "0493 SLI_CONFIG_SPECIAL mailbox " 8738 "failed with status x%x\n", 8739 rc); 8740 rc = -EIO; 8741 } 8742 mempool_free(mboxq, phba->mbox_mem_pool); 8743 break; 8744 case LPFC_SLI_INTF_IF_TYPE_6: 8745 case LPFC_SLI_INTF_IF_TYPE_2: 8746 case LPFC_SLI_INTF_IF_TYPE_1: 8747 default: 8748 break; 8749 } 8750 return rc; 8751 } 8752 8753 /** 8754 * lpfc_sli4_queue_verify - Verify and update EQ counts 8755 * @phba: pointer to lpfc hba data structure. 8756 * 8757 * This routine is invoked to check the user settable queue counts for EQs. 8758 * After this routine is called the counts will be set to valid values that 8759 * adhere to the constraints of the system's interrupt vectors and the port's 8760 * queue resources. 8761 * 8762 * Return codes 8763 * 0 - successful 8764 * -ENOMEM - No available memory 8765 **/ 8766 static int 8767 lpfc_sli4_queue_verify(struct lpfc_hba *phba) 8768 { 8769 /* 8770 * Sanity check for configured queue parameters against the run-time 8771 * device parameters 8772 */ 8773 8774 if (phba->nvmet_support) { 8775 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq) 8776 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; 8777 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) 8778 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; 8779 } 8780 8781 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8782 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n", 8783 phba->cfg_hdw_queue, phba->cfg_irq_chann, 8784 phba->cfg_nvmet_mrq); 8785 8786 /* Get EQ depth from module parameter, fake the default for now */ 8787 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 8788 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 8789 8790 /* Get CQ depth from module parameter, fake the default for now */ 8791 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 8792 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 8793 return 0; 8794 } 8795 8796 static int 8797 lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx) 8798 { 8799 struct lpfc_queue *qdesc; 8800 u32 wqesize; 8801 int cpu; 8802 8803 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ); 8804 /* Create Fast Path IO CQs */ 8805 if (phba->enab_exp_wqcq_pages) 8806 /* Increase the CQ size when WQEs contain an embedded cdb */ 8807 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 8808 phba->sli4_hba.cq_esize, 8809 LPFC_CQE_EXP_COUNT, cpu); 8810 8811 else 8812 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8813 phba->sli4_hba.cq_esize, 8814 phba->sli4_hba.cq_ecount, cpu); 8815 if (!qdesc) { 8816 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8817 "0499 Failed allocate fast-path IO CQ (%d)\n", idx); 8818 return 1; 8819 } 8820 qdesc->qe_valid = 1; 8821 qdesc->hdwq = idx; 8822 qdesc->chann = cpu; 8823 phba->sli4_hba.hdwq[idx].io_cq = qdesc; 8824 8825 /* Create Fast Path IO WQs */ 8826 if (phba->enab_exp_wqcq_pages) { 8827 /* Increase the WQ size when WQEs contain an embedded cdb */ 8828 wqesize = (phba->fcp_embed_io) ? 8829 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; 8830 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 8831 wqesize, 8832 LPFC_WQE_EXP_COUNT, cpu); 8833 } else 8834 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8835 phba->sli4_hba.wq_esize, 8836 phba->sli4_hba.wq_ecount, cpu); 8837 8838 if (!qdesc) { 8839 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8840 "0503 Failed allocate fast-path IO WQ (%d)\n", 8841 idx); 8842 return 1; 8843 } 8844 qdesc->hdwq = idx; 8845 qdesc->chann = cpu; 8846 phba->sli4_hba.hdwq[idx].io_wq = qdesc; 8847 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 8848 return 0; 8849 } 8850 8851 /** 8852 * lpfc_sli4_queue_create - Create all the SLI4 queues 8853 * @phba: pointer to lpfc hba data structure. 8854 * 8855 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 8856 * operation. For each SLI4 queue type, the parameters such as queue entry 8857 * count (queue depth) shall be taken from the module parameter. For now, 8858 * we just use some constant number as place holder. 8859 * 8860 * Return codes 8861 * 0 - successful 8862 * -ENOMEM - No availble memory 8863 * -EIO - The mailbox failed to complete successfully. 8864 **/ 8865 int 8866 lpfc_sli4_queue_create(struct lpfc_hba *phba) 8867 { 8868 struct lpfc_queue *qdesc; 8869 int idx, cpu, eqcpu; 8870 struct lpfc_sli4_hdw_queue *qp; 8871 struct lpfc_vector_map_info *cpup; 8872 struct lpfc_vector_map_info *eqcpup; 8873 struct lpfc_eq_intr_info *eqi; 8874 8875 /* 8876 * Create HBA Record arrays. 8877 * Both NVME and FCP will share that same vectors / EQs 8878 */ 8879 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 8880 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 8881 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 8882 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 8883 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 8884 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 8885 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 8886 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 8887 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 8888 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 8889 8890 if (!phba->sli4_hba.hdwq) { 8891 phba->sli4_hba.hdwq = kcalloc( 8892 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue), 8893 GFP_KERNEL); 8894 if (!phba->sli4_hba.hdwq) { 8895 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8896 "6427 Failed allocate memory for " 8897 "fast-path Hardware Queue array\n"); 8898 goto out_error; 8899 } 8900 /* Prepare hardware queues to take IO buffers */ 8901 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 8902 qp = &phba->sli4_hba.hdwq[idx]; 8903 spin_lock_init(&qp->io_buf_list_get_lock); 8904 spin_lock_init(&qp->io_buf_list_put_lock); 8905 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 8906 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 8907 qp->get_io_bufs = 0; 8908 qp->put_io_bufs = 0; 8909 qp->total_io_bufs = 0; 8910 spin_lock_init(&qp->abts_io_buf_list_lock); 8911 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list); 8912 qp->abts_scsi_io_bufs = 0; 8913 qp->abts_nvme_io_bufs = 0; 8914 INIT_LIST_HEAD(&qp->sgl_list); 8915 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list); 8916 spin_lock_init(&qp->hdwq_lock); 8917 } 8918 } 8919 8920 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 8921 if (phba->nvmet_support) { 8922 phba->sli4_hba.nvmet_cqset = kcalloc( 8923 phba->cfg_nvmet_mrq, 8924 sizeof(struct lpfc_queue *), 8925 GFP_KERNEL); 8926 if (!phba->sli4_hba.nvmet_cqset) { 8927 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8928 "3121 Fail allocate memory for " 8929 "fast-path CQ set array\n"); 8930 goto out_error; 8931 } 8932 phba->sli4_hba.nvmet_mrq_hdr = kcalloc( 8933 phba->cfg_nvmet_mrq, 8934 sizeof(struct lpfc_queue *), 8935 GFP_KERNEL); 8936 if (!phba->sli4_hba.nvmet_mrq_hdr) { 8937 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8938 "3122 Fail allocate memory for " 8939 "fast-path RQ set hdr array\n"); 8940 goto out_error; 8941 } 8942 phba->sli4_hba.nvmet_mrq_data = kcalloc( 8943 phba->cfg_nvmet_mrq, 8944 sizeof(struct lpfc_queue *), 8945 GFP_KERNEL); 8946 if (!phba->sli4_hba.nvmet_mrq_data) { 8947 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8948 "3124 Fail allocate memory for " 8949 "fast-path RQ set data array\n"); 8950 goto out_error; 8951 } 8952 } 8953 } 8954 8955 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 8956 8957 /* Create HBA Event Queues (EQs) */ 8958 for_each_present_cpu(cpu) { 8959 /* We only want to create 1 EQ per vector, even though 8960 * multiple CPUs might be using that vector. so only 8961 * selects the CPUs that are LPFC_CPU_FIRST_IRQ. 8962 */ 8963 cpup = &phba->sli4_hba.cpu_map[cpu]; 8964 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 8965 continue; 8966 8967 /* Get a ptr to the Hardware Queue associated with this CPU */ 8968 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; 8969 8970 /* Allocate an EQ */ 8971 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8972 phba->sli4_hba.eq_esize, 8973 phba->sli4_hba.eq_ecount, cpu); 8974 if (!qdesc) { 8975 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8976 "0497 Failed allocate EQ (%d)\n", 8977 cpup->hdwq); 8978 goto out_error; 8979 } 8980 qdesc->qe_valid = 1; 8981 qdesc->hdwq = cpup->hdwq; 8982 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */ 8983 qdesc->last_cpu = qdesc->chann; 8984 8985 /* Save the allocated EQ in the Hardware Queue */ 8986 qp->hba_eq = qdesc; 8987 8988 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu); 8989 list_add(&qdesc->cpu_list, &eqi->list); 8990 } 8991 8992 /* Now we need to populate the other Hardware Queues, that share 8993 * an IRQ vector, with the associated EQ ptr. 8994 */ 8995 for_each_present_cpu(cpu) { 8996 cpup = &phba->sli4_hba.cpu_map[cpu]; 8997 8998 /* Check for EQ already allocated in previous loop */ 8999 if (cpup->flag & LPFC_CPU_FIRST_IRQ) 9000 continue; 9001 9002 /* Check for multiple CPUs per hdwq */ 9003 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; 9004 if (qp->hba_eq) 9005 continue; 9006 9007 /* We need to share an EQ for this hdwq */ 9008 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ); 9009 eqcpup = &phba->sli4_hba.cpu_map[eqcpu]; 9010 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq; 9011 } 9012 9013 /* Allocate IO Path SLI4 CQ/WQs */ 9014 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9015 if (lpfc_alloc_io_wq_cq(phba, idx)) 9016 goto out_error; 9017 } 9018 9019 if (phba->nvmet_support) { 9020 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 9021 cpu = lpfc_find_cpu_handle(phba, idx, 9022 LPFC_FIND_BY_HDWQ); 9023 qdesc = lpfc_sli4_queue_alloc(phba, 9024 LPFC_DEFAULT_PAGE_SIZE, 9025 phba->sli4_hba.cq_esize, 9026 phba->sli4_hba.cq_ecount, 9027 cpu); 9028 if (!qdesc) { 9029 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9030 "3142 Failed allocate NVME " 9031 "CQ Set (%d)\n", idx); 9032 goto out_error; 9033 } 9034 qdesc->qe_valid = 1; 9035 qdesc->hdwq = idx; 9036 qdesc->chann = cpu; 9037 phba->sli4_hba.nvmet_cqset[idx] = qdesc; 9038 } 9039 } 9040 9041 /* 9042 * Create Slow Path Completion Queues (CQs) 9043 */ 9044 9045 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ); 9046 /* Create slow-path Mailbox Command Complete Queue */ 9047 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9048 phba->sli4_hba.cq_esize, 9049 phba->sli4_hba.cq_ecount, cpu); 9050 if (!qdesc) { 9051 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9052 "0500 Failed allocate slow-path mailbox CQ\n"); 9053 goto out_error; 9054 } 9055 qdesc->qe_valid = 1; 9056 phba->sli4_hba.mbx_cq = qdesc; 9057 9058 /* Create slow-path ELS Complete Queue */ 9059 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9060 phba->sli4_hba.cq_esize, 9061 phba->sli4_hba.cq_ecount, cpu); 9062 if (!qdesc) { 9063 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9064 "0501 Failed allocate slow-path ELS CQ\n"); 9065 goto out_error; 9066 } 9067 qdesc->qe_valid = 1; 9068 qdesc->chann = cpu; 9069 phba->sli4_hba.els_cq = qdesc; 9070 9071 9072 /* 9073 * Create Slow Path Work Queues (WQs) 9074 */ 9075 9076 /* Create Mailbox Command Queue */ 9077 9078 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9079 phba->sli4_hba.mq_esize, 9080 phba->sli4_hba.mq_ecount, cpu); 9081 if (!qdesc) { 9082 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9083 "0505 Failed allocate slow-path MQ\n"); 9084 goto out_error; 9085 } 9086 qdesc->chann = cpu; 9087 phba->sli4_hba.mbx_wq = qdesc; 9088 9089 /* 9090 * Create ELS Work Queues 9091 */ 9092 9093 /* Create slow-path ELS Work Queue */ 9094 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9095 phba->sli4_hba.wq_esize, 9096 phba->sli4_hba.wq_ecount, cpu); 9097 if (!qdesc) { 9098 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9099 "0504 Failed allocate slow-path ELS WQ\n"); 9100 goto out_error; 9101 } 9102 qdesc->chann = cpu; 9103 phba->sli4_hba.els_wq = qdesc; 9104 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 9105 9106 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9107 /* Create NVME LS Complete Queue */ 9108 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9109 phba->sli4_hba.cq_esize, 9110 phba->sli4_hba.cq_ecount, cpu); 9111 if (!qdesc) { 9112 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9113 "6079 Failed allocate NVME LS CQ\n"); 9114 goto out_error; 9115 } 9116 qdesc->chann = cpu; 9117 qdesc->qe_valid = 1; 9118 phba->sli4_hba.nvmels_cq = qdesc; 9119 9120 /* Create NVME LS Work Queue */ 9121 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9122 phba->sli4_hba.wq_esize, 9123 phba->sli4_hba.wq_ecount, cpu); 9124 if (!qdesc) { 9125 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9126 "6080 Failed allocate NVME LS WQ\n"); 9127 goto out_error; 9128 } 9129 qdesc->chann = cpu; 9130 phba->sli4_hba.nvmels_wq = qdesc; 9131 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 9132 } 9133 9134 /* 9135 * Create Receive Queue (RQ) 9136 */ 9137 9138 /* Create Receive Queue for header */ 9139 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9140 phba->sli4_hba.rq_esize, 9141 phba->sli4_hba.rq_ecount, cpu); 9142 if (!qdesc) { 9143 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9144 "0506 Failed allocate receive HRQ\n"); 9145 goto out_error; 9146 } 9147 phba->sli4_hba.hdr_rq = qdesc; 9148 9149 /* Create Receive Queue for data */ 9150 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9151 phba->sli4_hba.rq_esize, 9152 phba->sli4_hba.rq_ecount, cpu); 9153 if (!qdesc) { 9154 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9155 "0507 Failed allocate receive DRQ\n"); 9156 goto out_error; 9157 } 9158 phba->sli4_hba.dat_rq = qdesc; 9159 9160 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && 9161 phba->nvmet_support) { 9162 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 9163 cpu = lpfc_find_cpu_handle(phba, idx, 9164 LPFC_FIND_BY_HDWQ); 9165 /* Create NVMET Receive Queue for header */ 9166 qdesc = lpfc_sli4_queue_alloc(phba, 9167 LPFC_DEFAULT_PAGE_SIZE, 9168 phba->sli4_hba.rq_esize, 9169 LPFC_NVMET_RQE_DEF_COUNT, 9170 cpu); 9171 if (!qdesc) { 9172 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9173 "3146 Failed allocate " 9174 "receive HRQ\n"); 9175 goto out_error; 9176 } 9177 qdesc->hdwq = idx; 9178 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc; 9179 9180 /* Only needed for header of RQ pair */ 9181 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp), 9182 GFP_KERNEL, 9183 cpu_to_node(cpu)); 9184 if (qdesc->rqbp == NULL) { 9185 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9186 "6131 Failed allocate " 9187 "Header RQBP\n"); 9188 goto out_error; 9189 } 9190 9191 /* Put list in known state in case driver load fails. */ 9192 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list); 9193 9194 /* Create NVMET Receive Queue for data */ 9195 qdesc = lpfc_sli4_queue_alloc(phba, 9196 LPFC_DEFAULT_PAGE_SIZE, 9197 phba->sli4_hba.rq_esize, 9198 LPFC_NVMET_RQE_DEF_COUNT, 9199 cpu); 9200 if (!qdesc) { 9201 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9202 "3156 Failed allocate " 9203 "receive DRQ\n"); 9204 goto out_error; 9205 } 9206 qdesc->hdwq = idx; 9207 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc; 9208 } 9209 } 9210 9211 /* Clear NVME stats */ 9212 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9213 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9214 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0, 9215 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat)); 9216 } 9217 } 9218 9219 /* Clear SCSI stats */ 9220 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 9221 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9222 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0, 9223 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat)); 9224 } 9225 } 9226 9227 return 0; 9228 9229 out_error: 9230 lpfc_sli4_queue_destroy(phba); 9231 return -ENOMEM; 9232 } 9233 9234 static inline void 9235 __lpfc_sli4_release_queue(struct lpfc_queue **qp) 9236 { 9237 if (*qp != NULL) { 9238 lpfc_sli4_queue_free(*qp); 9239 *qp = NULL; 9240 } 9241 } 9242 9243 static inline void 9244 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max) 9245 { 9246 int idx; 9247 9248 if (*qs == NULL) 9249 return; 9250 9251 for (idx = 0; idx < max; idx++) 9252 __lpfc_sli4_release_queue(&(*qs)[idx]); 9253 9254 kfree(*qs); 9255 *qs = NULL; 9256 } 9257 9258 static inline void 9259 lpfc_sli4_release_hdwq(struct lpfc_hba *phba) 9260 { 9261 struct lpfc_sli4_hdw_queue *hdwq; 9262 struct lpfc_queue *eq; 9263 uint32_t idx; 9264 9265 hdwq = phba->sli4_hba.hdwq; 9266 9267 /* Loop thru all Hardware Queues */ 9268 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9269 /* Free the CQ/WQ corresponding to the Hardware Queue */ 9270 lpfc_sli4_queue_free(hdwq[idx].io_cq); 9271 lpfc_sli4_queue_free(hdwq[idx].io_wq); 9272 hdwq[idx].hba_eq = NULL; 9273 hdwq[idx].io_cq = NULL; 9274 hdwq[idx].io_wq = NULL; 9275 if (phba->cfg_xpsgl && !phba->nvmet_support) 9276 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]); 9277 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]); 9278 } 9279 /* Loop thru all IRQ vectors */ 9280 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 9281 /* Free the EQ corresponding to the IRQ vector */ 9282 eq = phba->sli4_hba.hba_eq_hdl[idx].eq; 9283 lpfc_sli4_queue_free(eq); 9284 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL; 9285 } 9286 } 9287 9288 /** 9289 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 9290 * @phba: pointer to lpfc hba data structure. 9291 * 9292 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 9293 * operation. 9294 * 9295 * Return codes 9296 * 0 - successful 9297 * -ENOMEM - No available memory 9298 * -EIO - The mailbox failed to complete successfully. 9299 **/ 9300 void 9301 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 9302 { 9303 /* 9304 * Set FREE_INIT before beginning to free the queues. 9305 * Wait until the users of queues to acknowledge to 9306 * release queues by clearing FREE_WAIT. 9307 */ 9308 spin_lock_irq(&phba->hbalock); 9309 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT; 9310 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) { 9311 spin_unlock_irq(&phba->hbalock); 9312 msleep(20); 9313 spin_lock_irq(&phba->hbalock); 9314 } 9315 spin_unlock_irq(&phba->hbalock); 9316 9317 lpfc_sli4_cleanup_poll_list(phba); 9318 9319 /* Release HBA eqs */ 9320 if (phba->sli4_hba.hdwq) 9321 lpfc_sli4_release_hdwq(phba); 9322 9323 if (phba->nvmet_support) { 9324 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, 9325 phba->cfg_nvmet_mrq); 9326 9327 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr, 9328 phba->cfg_nvmet_mrq); 9329 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data, 9330 phba->cfg_nvmet_mrq); 9331 } 9332 9333 /* Release mailbox command work queue */ 9334 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq); 9335 9336 /* Release ELS work queue */ 9337 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq); 9338 9339 /* Release ELS work queue */ 9340 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq); 9341 9342 /* Release unsolicited receive queue */ 9343 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq); 9344 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq); 9345 9346 /* Release ELS complete queue */ 9347 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq); 9348 9349 /* Release NVME LS complete queue */ 9350 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq); 9351 9352 /* Release mailbox command complete queue */ 9353 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq); 9354 9355 /* Everything on this list has been freed */ 9356 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 9357 9358 /* Done with freeing the queues */ 9359 spin_lock_irq(&phba->hbalock); 9360 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT; 9361 spin_unlock_irq(&phba->hbalock); 9362 } 9363 9364 int 9365 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) 9366 { 9367 struct lpfc_rqb *rqbp; 9368 struct lpfc_dmabuf *h_buf; 9369 struct rqb_dmabuf *rqb_buffer; 9370 9371 rqbp = rq->rqbp; 9372 while (!list_empty(&rqbp->rqb_buffer_list)) { 9373 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 9374 struct lpfc_dmabuf, list); 9375 9376 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf); 9377 (rqbp->rqb_free_buffer)(phba, rqb_buffer); 9378 rqbp->buffer_count--; 9379 } 9380 return 1; 9381 } 9382 9383 static int 9384 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, 9385 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map, 9386 int qidx, uint32_t qtype) 9387 { 9388 struct lpfc_sli_ring *pring; 9389 int rc; 9390 9391 if (!eq || !cq || !wq) { 9392 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9393 "6085 Fast-path %s (%d) not allocated\n", 9394 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx); 9395 return -ENOMEM; 9396 } 9397 9398 /* create the Cq first */ 9399 rc = lpfc_cq_create(phba, cq, eq, 9400 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype); 9401 if (rc) { 9402 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9403 "6086 Failed setup of CQ (%d), rc = 0x%x\n", 9404 qidx, (uint32_t)rc); 9405 return rc; 9406 } 9407 9408 if (qtype != LPFC_MBOX) { 9409 /* Setup cq_map for fast lookup */ 9410 if (cq_map) 9411 *cq_map = cq->queue_id; 9412 9413 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9414 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n", 9415 qidx, cq->queue_id, qidx, eq->queue_id); 9416 9417 /* create the wq */ 9418 rc = lpfc_wq_create(phba, wq, cq, qtype); 9419 if (rc) { 9420 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9421 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n", 9422 qidx, (uint32_t)rc); 9423 /* no need to tear down cq - caller will do so */ 9424 return rc; 9425 } 9426 9427 /* Bind this CQ/WQ to the NVME ring */ 9428 pring = wq->pring; 9429 pring->sli.sli4.wqp = (void *)wq; 9430 cq->pring = pring; 9431 9432 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9433 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n", 9434 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id); 9435 } else { 9436 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX); 9437 if (rc) { 9438 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9439 "0539 Failed setup of slow-path MQ: " 9440 "rc = 0x%x\n", rc); 9441 /* no need to tear down cq - caller will do so */ 9442 return rc; 9443 } 9444 9445 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9446 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 9447 phba->sli4_hba.mbx_wq->queue_id, 9448 phba->sli4_hba.mbx_cq->queue_id); 9449 } 9450 9451 return 0; 9452 } 9453 9454 /** 9455 * lpfc_setup_cq_lookup - Setup the CQ lookup table 9456 * @phba: pointer to lpfc hba data structure. 9457 * 9458 * This routine will populate the cq_lookup table by all 9459 * available CQ queue_id's. 9460 **/ 9461 static void 9462 lpfc_setup_cq_lookup(struct lpfc_hba *phba) 9463 { 9464 struct lpfc_queue *eq, *childq; 9465 int qidx; 9466 9467 memset(phba->sli4_hba.cq_lookup, 0, 9468 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1))); 9469 /* Loop thru all IRQ vectors */ 9470 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 9471 /* Get the EQ corresponding to the IRQ vector */ 9472 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 9473 if (!eq) 9474 continue; 9475 /* Loop through all CQs associated with that EQ */ 9476 list_for_each_entry(childq, &eq->child_list, list) { 9477 if (childq->queue_id > phba->sli4_hba.cq_max) 9478 continue; 9479 if (childq->subtype == LPFC_IO) 9480 phba->sli4_hba.cq_lookup[childq->queue_id] = 9481 childq; 9482 } 9483 } 9484 } 9485 9486 /** 9487 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 9488 * @phba: pointer to lpfc hba data structure. 9489 * 9490 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 9491 * operation. 9492 * 9493 * Return codes 9494 * 0 - successful 9495 * -ENOMEM - No available memory 9496 * -EIO - The mailbox failed to complete successfully. 9497 **/ 9498 int 9499 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 9500 { 9501 uint32_t shdr_status, shdr_add_status; 9502 union lpfc_sli4_cfg_shdr *shdr; 9503 struct lpfc_vector_map_info *cpup; 9504 struct lpfc_sli4_hdw_queue *qp; 9505 LPFC_MBOXQ_t *mboxq; 9506 int qidx, cpu; 9507 uint32_t length, usdelay; 9508 int rc = -ENOMEM; 9509 9510 /* Check for dual-ULP support */ 9511 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9512 if (!mboxq) { 9513 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9514 "3249 Unable to allocate memory for " 9515 "QUERY_FW_CFG mailbox command\n"); 9516 return -ENOMEM; 9517 } 9518 length = (sizeof(struct lpfc_mbx_query_fw_config) - 9519 sizeof(struct lpfc_sli4_cfg_mhdr)); 9520 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 9521 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 9522 length, LPFC_SLI4_MBX_EMBED); 9523 9524 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9525 9526 shdr = (union lpfc_sli4_cfg_shdr *) 9527 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 9528 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 9529 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 9530 if (shdr_status || shdr_add_status || rc) { 9531 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9532 "3250 QUERY_FW_CFG mailbox failed with status " 9533 "x%x add_status x%x, mbx status x%x\n", 9534 shdr_status, shdr_add_status, rc); 9535 if (rc != MBX_TIMEOUT) 9536 mempool_free(mboxq, phba->mbox_mem_pool); 9537 rc = -ENXIO; 9538 goto out_error; 9539 } 9540 9541 phba->sli4_hba.fw_func_mode = 9542 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; 9543 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; 9544 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; 9545 phba->sli4_hba.physical_port = 9546 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port; 9547 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9548 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " 9549 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, 9550 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); 9551 9552 if (rc != MBX_TIMEOUT) 9553 mempool_free(mboxq, phba->mbox_mem_pool); 9554 9555 /* 9556 * Set up HBA Event Queues (EQs) 9557 */ 9558 qp = phba->sli4_hba.hdwq; 9559 9560 /* Set up HBA event queue */ 9561 if (!qp) { 9562 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9563 "3147 Fast-path EQs not allocated\n"); 9564 rc = -ENOMEM; 9565 goto out_error; 9566 } 9567 9568 /* Loop thru all IRQ vectors */ 9569 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 9570 /* Create HBA Event Queues (EQs) in order */ 9571 for_each_present_cpu(cpu) { 9572 cpup = &phba->sli4_hba.cpu_map[cpu]; 9573 9574 /* Look for the CPU thats using that vector with 9575 * LPFC_CPU_FIRST_IRQ set. 9576 */ 9577 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 9578 continue; 9579 if (qidx != cpup->eq) 9580 continue; 9581 9582 /* Create an EQ for that vector */ 9583 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq, 9584 phba->cfg_fcp_imax); 9585 if (rc) { 9586 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9587 "0523 Failed setup of fast-path" 9588 " EQ (%d), rc = 0x%x\n", 9589 cpup->eq, (uint32_t)rc); 9590 goto out_destroy; 9591 } 9592 9593 /* Save the EQ for that vector in the hba_eq_hdl */ 9594 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq = 9595 qp[cpup->hdwq].hba_eq; 9596 9597 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9598 "2584 HBA EQ setup: queue[%d]-id=%d\n", 9599 cpup->eq, 9600 qp[cpup->hdwq].hba_eq->queue_id); 9601 } 9602 } 9603 9604 /* Loop thru all Hardware Queues */ 9605 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 9606 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ); 9607 cpup = &phba->sli4_hba.cpu_map[cpu]; 9608 9609 /* Create the CQ/WQ corresponding to the Hardware Queue */ 9610 rc = lpfc_create_wq_cq(phba, 9611 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq, 9612 qp[qidx].io_cq, 9613 qp[qidx].io_wq, 9614 &phba->sli4_hba.hdwq[qidx].io_cq_map, 9615 qidx, 9616 LPFC_IO); 9617 if (rc) { 9618 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9619 "0535 Failed to setup fastpath " 9620 "IO WQ/CQ (%d), rc = 0x%x\n", 9621 qidx, (uint32_t)rc); 9622 goto out_destroy; 9623 } 9624 } 9625 9626 /* 9627 * Set up Slow Path Complete Queues (CQs) 9628 */ 9629 9630 /* Set up slow-path MBOX CQ/MQ */ 9631 9632 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) { 9633 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9634 "0528 %s not allocated\n", 9635 phba->sli4_hba.mbx_cq ? 9636 "Mailbox WQ" : "Mailbox CQ"); 9637 rc = -ENOMEM; 9638 goto out_destroy; 9639 } 9640 9641 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 9642 phba->sli4_hba.mbx_cq, 9643 phba->sli4_hba.mbx_wq, 9644 NULL, 0, LPFC_MBOX); 9645 if (rc) { 9646 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9647 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n", 9648 (uint32_t)rc); 9649 goto out_destroy; 9650 } 9651 if (phba->nvmet_support) { 9652 if (!phba->sli4_hba.nvmet_cqset) { 9653 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9654 "3165 Fast-path NVME CQ Set " 9655 "array not allocated\n"); 9656 rc = -ENOMEM; 9657 goto out_destroy; 9658 } 9659 if (phba->cfg_nvmet_mrq > 1) { 9660 rc = lpfc_cq_create_set(phba, 9661 phba->sli4_hba.nvmet_cqset, 9662 qp, 9663 LPFC_WCQ, LPFC_NVMET); 9664 if (rc) { 9665 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9666 "3164 Failed setup of NVME CQ " 9667 "Set, rc = 0x%x\n", 9668 (uint32_t)rc); 9669 goto out_destroy; 9670 } 9671 } else { 9672 /* Set up NVMET Receive Complete Queue */ 9673 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0], 9674 qp[0].hba_eq, 9675 LPFC_WCQ, LPFC_NVMET); 9676 if (rc) { 9677 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9678 "6089 Failed setup NVMET CQ: " 9679 "rc = 0x%x\n", (uint32_t)rc); 9680 goto out_destroy; 9681 } 9682 phba->sli4_hba.nvmet_cqset[0]->chann = 0; 9683 9684 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9685 "6090 NVMET CQ setup: cq-id=%d, " 9686 "parent eq-id=%d\n", 9687 phba->sli4_hba.nvmet_cqset[0]->queue_id, 9688 qp[0].hba_eq->queue_id); 9689 } 9690 } 9691 9692 /* Set up slow-path ELS WQ/CQ */ 9693 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) { 9694 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9695 "0530 ELS %s not allocated\n", 9696 phba->sli4_hba.els_cq ? "WQ" : "CQ"); 9697 rc = -ENOMEM; 9698 goto out_destroy; 9699 } 9700 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 9701 phba->sli4_hba.els_cq, 9702 phba->sli4_hba.els_wq, 9703 NULL, 0, LPFC_ELS); 9704 if (rc) { 9705 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9706 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n", 9707 (uint32_t)rc); 9708 goto out_destroy; 9709 } 9710 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9711 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 9712 phba->sli4_hba.els_wq->queue_id, 9713 phba->sli4_hba.els_cq->queue_id); 9714 9715 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9716 /* Set up NVME LS Complete Queue */ 9717 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) { 9718 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9719 "6091 LS %s not allocated\n", 9720 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ"); 9721 rc = -ENOMEM; 9722 goto out_destroy; 9723 } 9724 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 9725 phba->sli4_hba.nvmels_cq, 9726 phba->sli4_hba.nvmels_wq, 9727 NULL, 0, LPFC_NVME_LS); 9728 if (rc) { 9729 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9730 "0526 Failed setup of NVVME LS WQ/CQ: " 9731 "rc = 0x%x\n", (uint32_t)rc); 9732 goto out_destroy; 9733 } 9734 9735 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9736 "6096 ELS WQ setup: wq-id=%d, " 9737 "parent cq-id=%d\n", 9738 phba->sli4_hba.nvmels_wq->queue_id, 9739 phba->sli4_hba.nvmels_cq->queue_id); 9740 } 9741 9742 /* 9743 * Create NVMET Receive Queue (RQ) 9744 */ 9745 if (phba->nvmet_support) { 9746 if ((!phba->sli4_hba.nvmet_cqset) || 9747 (!phba->sli4_hba.nvmet_mrq_hdr) || 9748 (!phba->sli4_hba.nvmet_mrq_data)) { 9749 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9750 "6130 MRQ CQ Queues not " 9751 "allocated\n"); 9752 rc = -ENOMEM; 9753 goto out_destroy; 9754 } 9755 if (phba->cfg_nvmet_mrq > 1) { 9756 rc = lpfc_mrq_create(phba, 9757 phba->sli4_hba.nvmet_mrq_hdr, 9758 phba->sli4_hba.nvmet_mrq_data, 9759 phba->sli4_hba.nvmet_cqset, 9760 LPFC_NVMET); 9761 if (rc) { 9762 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9763 "6098 Failed setup of NVMET " 9764 "MRQ: rc = 0x%x\n", 9765 (uint32_t)rc); 9766 goto out_destroy; 9767 } 9768 9769 } else { 9770 rc = lpfc_rq_create(phba, 9771 phba->sli4_hba.nvmet_mrq_hdr[0], 9772 phba->sli4_hba.nvmet_mrq_data[0], 9773 phba->sli4_hba.nvmet_cqset[0], 9774 LPFC_NVMET); 9775 if (rc) { 9776 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9777 "6057 Failed setup of NVMET " 9778 "Receive Queue: rc = 0x%x\n", 9779 (uint32_t)rc); 9780 goto out_destroy; 9781 } 9782 9783 lpfc_printf_log( 9784 phba, KERN_INFO, LOG_INIT, 9785 "6099 NVMET RQ setup: hdr-rq-id=%d, " 9786 "dat-rq-id=%d parent cq-id=%d\n", 9787 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id, 9788 phba->sli4_hba.nvmet_mrq_data[0]->queue_id, 9789 phba->sli4_hba.nvmet_cqset[0]->queue_id); 9790 9791 } 9792 } 9793 9794 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 9795 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9796 "0540 Receive Queue not allocated\n"); 9797 rc = -ENOMEM; 9798 goto out_destroy; 9799 } 9800 9801 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 9802 phba->sli4_hba.els_cq, LPFC_USOL); 9803 if (rc) { 9804 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9805 "0541 Failed setup of Receive Queue: " 9806 "rc = 0x%x\n", (uint32_t)rc); 9807 goto out_destroy; 9808 } 9809 9810 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9811 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 9812 "parent cq-id=%d\n", 9813 phba->sli4_hba.hdr_rq->queue_id, 9814 phba->sli4_hba.dat_rq->queue_id, 9815 phba->sli4_hba.els_cq->queue_id); 9816 9817 if (phba->cfg_fcp_imax) 9818 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax; 9819 else 9820 usdelay = 0; 9821 9822 for (qidx = 0; qidx < phba->cfg_irq_chann; 9823 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) 9824 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT, 9825 usdelay); 9826 9827 if (phba->sli4_hba.cq_max) { 9828 kfree(phba->sli4_hba.cq_lookup); 9829 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1), 9830 sizeof(struct lpfc_queue *), GFP_KERNEL); 9831 if (!phba->sli4_hba.cq_lookup) { 9832 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9833 "0549 Failed setup of CQ Lookup table: " 9834 "size 0x%x\n", phba->sli4_hba.cq_max); 9835 rc = -ENOMEM; 9836 goto out_destroy; 9837 } 9838 lpfc_setup_cq_lookup(phba); 9839 } 9840 return 0; 9841 9842 out_destroy: 9843 lpfc_sli4_queue_unset(phba); 9844 out_error: 9845 return rc; 9846 } 9847 9848 /** 9849 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 9850 * @phba: pointer to lpfc hba data structure. 9851 * 9852 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 9853 * operation. 9854 * 9855 * Return codes 9856 * 0 - successful 9857 * -ENOMEM - No available memory 9858 * -EIO - The mailbox failed to complete successfully. 9859 **/ 9860 void 9861 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 9862 { 9863 struct lpfc_sli4_hdw_queue *qp; 9864 struct lpfc_queue *eq; 9865 int qidx; 9866 9867 /* Unset mailbox command work queue */ 9868 if (phba->sli4_hba.mbx_wq) 9869 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 9870 9871 /* Unset NVME LS work queue */ 9872 if (phba->sli4_hba.nvmels_wq) 9873 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); 9874 9875 /* Unset ELS work queue */ 9876 if (phba->sli4_hba.els_wq) 9877 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 9878 9879 /* Unset unsolicited receive queue */ 9880 if (phba->sli4_hba.hdr_rq) 9881 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, 9882 phba->sli4_hba.dat_rq); 9883 9884 /* Unset mailbox command complete queue */ 9885 if (phba->sli4_hba.mbx_cq) 9886 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 9887 9888 /* Unset ELS complete queue */ 9889 if (phba->sli4_hba.els_cq) 9890 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 9891 9892 /* Unset NVME LS complete queue */ 9893 if (phba->sli4_hba.nvmels_cq) 9894 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq); 9895 9896 if (phba->nvmet_support) { 9897 /* Unset NVMET MRQ queue */ 9898 if (phba->sli4_hba.nvmet_mrq_hdr) { 9899 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 9900 lpfc_rq_destroy( 9901 phba, 9902 phba->sli4_hba.nvmet_mrq_hdr[qidx], 9903 phba->sli4_hba.nvmet_mrq_data[qidx]); 9904 } 9905 9906 /* Unset NVMET CQ Set complete queue */ 9907 if (phba->sli4_hba.nvmet_cqset) { 9908 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 9909 lpfc_cq_destroy( 9910 phba, phba->sli4_hba.nvmet_cqset[qidx]); 9911 } 9912 } 9913 9914 /* Unset fast-path SLI4 queues */ 9915 if (phba->sli4_hba.hdwq) { 9916 /* Loop thru all Hardware Queues */ 9917 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 9918 /* Destroy the CQ/WQ corresponding to Hardware Queue */ 9919 qp = &phba->sli4_hba.hdwq[qidx]; 9920 lpfc_wq_destroy(phba, qp->io_wq); 9921 lpfc_cq_destroy(phba, qp->io_cq); 9922 } 9923 /* Loop thru all IRQ vectors */ 9924 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 9925 /* Destroy the EQ corresponding to the IRQ vector */ 9926 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 9927 lpfc_eq_destroy(phba, eq); 9928 } 9929 } 9930 9931 kfree(phba->sli4_hba.cq_lookup); 9932 phba->sli4_hba.cq_lookup = NULL; 9933 phba->sli4_hba.cq_max = 0; 9934 } 9935 9936 /** 9937 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 9938 * @phba: pointer to lpfc hba data structure. 9939 * 9940 * This routine is invoked to allocate and set up a pool of completion queue 9941 * events. The body of the completion queue event is a completion queue entry 9942 * CQE. For now, this pool is used for the interrupt service routine to queue 9943 * the following HBA completion queue events for the worker thread to process: 9944 * - Mailbox asynchronous events 9945 * - Receive queue completion unsolicited events 9946 * Later, this can be used for all the slow-path events. 9947 * 9948 * Return codes 9949 * 0 - successful 9950 * -ENOMEM - No available memory 9951 **/ 9952 static int 9953 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 9954 { 9955 struct lpfc_cq_event *cq_event; 9956 int i; 9957 9958 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 9959 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 9960 if (!cq_event) 9961 goto out_pool_create_fail; 9962 list_add_tail(&cq_event->list, 9963 &phba->sli4_hba.sp_cqe_event_pool); 9964 } 9965 return 0; 9966 9967 out_pool_create_fail: 9968 lpfc_sli4_cq_event_pool_destroy(phba); 9969 return -ENOMEM; 9970 } 9971 9972 /** 9973 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 9974 * @phba: pointer to lpfc hba data structure. 9975 * 9976 * This routine is invoked to free the pool of completion queue events at 9977 * driver unload time. Note that, it is the responsibility of the driver 9978 * cleanup routine to free all the outstanding completion-queue events 9979 * allocated from this pool back into the pool before invoking this routine 9980 * to destroy the pool. 9981 **/ 9982 static void 9983 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 9984 { 9985 struct lpfc_cq_event *cq_event, *next_cq_event; 9986 9987 list_for_each_entry_safe(cq_event, next_cq_event, 9988 &phba->sli4_hba.sp_cqe_event_pool, list) { 9989 list_del(&cq_event->list); 9990 kfree(cq_event); 9991 } 9992 } 9993 9994 /** 9995 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 9996 * @phba: pointer to lpfc hba data structure. 9997 * 9998 * This routine is the lock free version of the API invoked to allocate a 9999 * completion-queue event from the free pool. 10000 * 10001 * Return: Pointer to the newly allocated completion-queue event if successful 10002 * NULL otherwise. 10003 **/ 10004 struct lpfc_cq_event * 10005 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 10006 { 10007 struct lpfc_cq_event *cq_event = NULL; 10008 10009 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 10010 struct lpfc_cq_event, list); 10011 return cq_event; 10012 } 10013 10014 /** 10015 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 10016 * @phba: pointer to lpfc hba data structure. 10017 * 10018 * This routine is the lock version of the API invoked to allocate a 10019 * completion-queue event from the free pool. 10020 * 10021 * Return: Pointer to the newly allocated completion-queue event if successful 10022 * NULL otherwise. 10023 **/ 10024 struct lpfc_cq_event * 10025 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 10026 { 10027 struct lpfc_cq_event *cq_event; 10028 unsigned long iflags; 10029 10030 spin_lock_irqsave(&phba->hbalock, iflags); 10031 cq_event = __lpfc_sli4_cq_event_alloc(phba); 10032 spin_unlock_irqrestore(&phba->hbalock, iflags); 10033 return cq_event; 10034 } 10035 10036 /** 10037 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 10038 * @phba: pointer to lpfc hba data structure. 10039 * @cq_event: pointer to the completion queue event to be freed. 10040 * 10041 * This routine is the lock free version of the API invoked to release a 10042 * completion-queue event back into the free pool. 10043 **/ 10044 void 10045 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 10046 struct lpfc_cq_event *cq_event) 10047 { 10048 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 10049 } 10050 10051 /** 10052 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 10053 * @phba: pointer to lpfc hba data structure. 10054 * @cq_event: pointer to the completion queue event to be freed. 10055 * 10056 * This routine is the lock version of the API invoked to release a 10057 * completion-queue event back into the free pool. 10058 **/ 10059 void 10060 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 10061 struct lpfc_cq_event *cq_event) 10062 { 10063 unsigned long iflags; 10064 spin_lock_irqsave(&phba->hbalock, iflags); 10065 __lpfc_sli4_cq_event_release(phba, cq_event); 10066 spin_unlock_irqrestore(&phba->hbalock, iflags); 10067 } 10068 10069 /** 10070 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 10071 * @phba: pointer to lpfc hba data structure. 10072 * 10073 * This routine is to free all the pending completion-queue events to the 10074 * back into the free pool for device reset. 10075 **/ 10076 static void 10077 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 10078 { 10079 LIST_HEAD(cqelist); 10080 struct lpfc_cq_event *cqe; 10081 unsigned long iflags; 10082 10083 /* Retrieve all the pending WCQEs from pending WCQE lists */ 10084 spin_lock_irqsave(&phba->hbalock, iflags); 10085 /* Pending FCP XRI abort events */ 10086 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 10087 &cqelist); 10088 /* Pending ELS XRI abort events */ 10089 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 10090 &cqelist); 10091 /* Pending asynnc events */ 10092 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 10093 &cqelist); 10094 spin_unlock_irqrestore(&phba->hbalock, iflags); 10095 10096 while (!list_empty(&cqelist)) { 10097 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 10098 lpfc_sli4_cq_event_release(phba, cqe); 10099 } 10100 } 10101 10102 /** 10103 * lpfc_pci_function_reset - Reset pci function. 10104 * @phba: pointer to lpfc hba data structure. 10105 * 10106 * This routine is invoked to request a PCI function reset. It will destroys 10107 * all resources assigned to the PCI function which originates this request. 10108 * 10109 * Return codes 10110 * 0 - successful 10111 * -ENOMEM - No available memory 10112 * -EIO - The mailbox failed to complete successfully. 10113 **/ 10114 int 10115 lpfc_pci_function_reset(struct lpfc_hba *phba) 10116 { 10117 LPFC_MBOXQ_t *mboxq; 10118 uint32_t rc = 0, if_type; 10119 uint32_t shdr_status, shdr_add_status; 10120 uint32_t rdy_chk; 10121 uint32_t port_reset = 0; 10122 union lpfc_sli4_cfg_shdr *shdr; 10123 struct lpfc_register reg_data; 10124 uint16_t devid; 10125 10126 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10127 switch (if_type) { 10128 case LPFC_SLI_INTF_IF_TYPE_0: 10129 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 10130 GFP_KERNEL); 10131 if (!mboxq) { 10132 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10133 "0494 Unable to allocate memory for " 10134 "issuing SLI_FUNCTION_RESET mailbox " 10135 "command\n"); 10136 return -ENOMEM; 10137 } 10138 10139 /* Setup PCI function reset mailbox-ioctl command */ 10140 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 10141 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 10142 LPFC_SLI4_MBX_EMBED); 10143 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 10144 shdr = (union lpfc_sli4_cfg_shdr *) 10145 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 10146 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10147 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 10148 &shdr->response); 10149 if (rc != MBX_TIMEOUT) 10150 mempool_free(mboxq, phba->mbox_mem_pool); 10151 if (shdr_status || shdr_add_status || rc) { 10152 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10153 "0495 SLI_FUNCTION_RESET mailbox " 10154 "failed with status x%x add_status x%x," 10155 " mbx status x%x\n", 10156 shdr_status, shdr_add_status, rc); 10157 rc = -ENXIO; 10158 } 10159 break; 10160 case LPFC_SLI_INTF_IF_TYPE_2: 10161 case LPFC_SLI_INTF_IF_TYPE_6: 10162 wait: 10163 /* 10164 * Poll the Port Status Register and wait for RDY for 10165 * up to 30 seconds. If the port doesn't respond, treat 10166 * it as an error. 10167 */ 10168 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) { 10169 if (lpfc_readl(phba->sli4_hba.u.if_type2. 10170 STATUSregaddr, ®_data.word0)) { 10171 rc = -ENODEV; 10172 goto out; 10173 } 10174 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 10175 break; 10176 msleep(20); 10177 } 10178 10179 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) { 10180 phba->work_status[0] = readl( 10181 phba->sli4_hba.u.if_type2.ERR1regaddr); 10182 phba->work_status[1] = readl( 10183 phba->sli4_hba.u.if_type2.ERR2regaddr); 10184 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10185 "2890 Port not ready, port status reg " 10186 "0x%x error 1=0x%x, error 2=0x%x\n", 10187 reg_data.word0, 10188 phba->work_status[0], 10189 phba->work_status[1]); 10190 rc = -ENODEV; 10191 goto out; 10192 } 10193 10194 if (!port_reset) { 10195 /* 10196 * Reset the port now 10197 */ 10198 reg_data.word0 = 0; 10199 bf_set(lpfc_sliport_ctrl_end, ®_data, 10200 LPFC_SLIPORT_LITTLE_ENDIAN); 10201 bf_set(lpfc_sliport_ctrl_ip, ®_data, 10202 LPFC_SLIPORT_INIT_PORT); 10203 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 10204 CTRLregaddr); 10205 /* flush */ 10206 pci_read_config_word(phba->pcidev, 10207 PCI_DEVICE_ID, &devid); 10208 10209 port_reset = 1; 10210 msleep(20); 10211 goto wait; 10212 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) { 10213 rc = -ENODEV; 10214 goto out; 10215 } 10216 break; 10217 10218 case LPFC_SLI_INTF_IF_TYPE_1: 10219 default: 10220 break; 10221 } 10222 10223 out: 10224 /* Catch the not-ready port failure after a port reset. */ 10225 if (rc) { 10226 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10227 "3317 HBA not functional: IP Reset Failed " 10228 "try: echo fw_reset > board_mode\n"); 10229 rc = -ENODEV; 10230 } 10231 10232 return rc; 10233 } 10234 10235 /** 10236 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 10237 * @phba: pointer to lpfc hba data structure. 10238 * 10239 * This routine is invoked to set up the PCI device memory space for device 10240 * with SLI-4 interface spec. 10241 * 10242 * Return codes 10243 * 0 - successful 10244 * other values - error 10245 **/ 10246 static int 10247 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 10248 { 10249 struct pci_dev *pdev = phba->pcidev; 10250 unsigned long bar0map_len, bar1map_len, bar2map_len; 10251 int error; 10252 uint32_t if_type; 10253 10254 if (!pdev) 10255 return -ENODEV; 10256 10257 /* Set the device DMA mask size */ 10258 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 10259 if (error) 10260 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 10261 if (error) 10262 return error; 10263 10264 /* 10265 * The BARs and register set definitions and offset locations are 10266 * dependent on the if_type. 10267 */ 10268 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 10269 &phba->sli4_hba.sli_intf.word0)) { 10270 return -ENODEV; 10271 } 10272 10273 /* There is no SLI3 failback for SLI4 devices. */ 10274 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 10275 LPFC_SLI_INTF_VALID) { 10276 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10277 "2894 SLI_INTF reg contents invalid " 10278 "sli_intf reg 0x%x\n", 10279 phba->sli4_hba.sli_intf.word0); 10280 return -ENODEV; 10281 } 10282 10283 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10284 /* 10285 * Get the bus address of SLI4 device Bar regions and the 10286 * number of bytes required by each mapping. The mapping of the 10287 * particular PCI BARs regions is dependent on the type of 10288 * SLI4 device. 10289 */ 10290 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { 10291 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); 10292 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); 10293 10294 /* 10295 * Map SLI4 PCI Config Space Register base to a kernel virtual 10296 * addr 10297 */ 10298 phba->sli4_hba.conf_regs_memmap_p = 10299 ioremap(phba->pci_bar0_map, bar0map_len); 10300 if (!phba->sli4_hba.conf_regs_memmap_p) { 10301 dev_printk(KERN_ERR, &pdev->dev, 10302 "ioremap failed for SLI4 PCI config " 10303 "registers.\n"); 10304 return -ENODEV; 10305 } 10306 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; 10307 /* Set up BAR0 PCI config space register memory map */ 10308 lpfc_sli4_bar0_register_memmap(phba, if_type); 10309 } else { 10310 phba->pci_bar0_map = pci_resource_start(pdev, 1); 10311 bar0map_len = pci_resource_len(pdev, 1); 10312 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 10313 dev_printk(KERN_ERR, &pdev->dev, 10314 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 10315 return -ENODEV; 10316 } 10317 phba->sli4_hba.conf_regs_memmap_p = 10318 ioremap(phba->pci_bar0_map, bar0map_len); 10319 if (!phba->sli4_hba.conf_regs_memmap_p) { 10320 dev_printk(KERN_ERR, &pdev->dev, 10321 "ioremap failed for SLI4 PCI config " 10322 "registers.\n"); 10323 return -ENODEV; 10324 } 10325 lpfc_sli4_bar0_register_memmap(phba, if_type); 10326 } 10327 10328 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 10329 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) { 10330 /* 10331 * Map SLI4 if type 0 HBA Control Register base to a 10332 * kernel virtual address and setup the registers. 10333 */ 10334 phba->pci_bar1_map = pci_resource_start(pdev, 10335 PCI_64BIT_BAR2); 10336 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 10337 phba->sli4_hba.ctrl_regs_memmap_p = 10338 ioremap(phba->pci_bar1_map, 10339 bar1map_len); 10340 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 10341 dev_err(&pdev->dev, 10342 "ioremap failed for SLI4 HBA " 10343 "control registers.\n"); 10344 error = -ENOMEM; 10345 goto out_iounmap_conf; 10346 } 10347 phba->pci_bar2_memmap_p = 10348 phba->sli4_hba.ctrl_regs_memmap_p; 10349 lpfc_sli4_bar1_register_memmap(phba, if_type); 10350 } else { 10351 error = -ENOMEM; 10352 goto out_iounmap_conf; 10353 } 10354 } 10355 10356 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) && 10357 (pci_resource_start(pdev, PCI_64BIT_BAR2))) { 10358 /* 10359 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel 10360 * virtual address and setup the registers. 10361 */ 10362 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); 10363 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 10364 phba->sli4_hba.drbl_regs_memmap_p = 10365 ioremap(phba->pci_bar1_map, bar1map_len); 10366 if (!phba->sli4_hba.drbl_regs_memmap_p) { 10367 dev_err(&pdev->dev, 10368 "ioremap failed for SLI4 HBA doorbell registers.\n"); 10369 error = -ENOMEM; 10370 goto out_iounmap_conf; 10371 } 10372 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; 10373 lpfc_sli4_bar1_register_memmap(phba, if_type); 10374 } 10375 10376 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 10377 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) { 10378 /* 10379 * Map SLI4 if type 0 HBA Doorbell Register base to 10380 * a kernel virtual address and setup the registers. 10381 */ 10382 phba->pci_bar2_map = pci_resource_start(pdev, 10383 PCI_64BIT_BAR4); 10384 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 10385 phba->sli4_hba.drbl_regs_memmap_p = 10386 ioremap(phba->pci_bar2_map, 10387 bar2map_len); 10388 if (!phba->sli4_hba.drbl_regs_memmap_p) { 10389 dev_err(&pdev->dev, 10390 "ioremap failed for SLI4 HBA" 10391 " doorbell registers.\n"); 10392 error = -ENOMEM; 10393 goto out_iounmap_ctrl; 10394 } 10395 phba->pci_bar4_memmap_p = 10396 phba->sli4_hba.drbl_regs_memmap_p; 10397 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 10398 if (error) 10399 goto out_iounmap_all; 10400 } else { 10401 error = -ENOMEM; 10402 goto out_iounmap_all; 10403 } 10404 } 10405 10406 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 && 10407 pci_resource_start(pdev, PCI_64BIT_BAR4)) { 10408 /* 10409 * Map SLI4 if type 6 HBA DPP Register base to a kernel 10410 * virtual address and setup the registers. 10411 */ 10412 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); 10413 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 10414 phba->sli4_hba.dpp_regs_memmap_p = 10415 ioremap(phba->pci_bar2_map, bar2map_len); 10416 if (!phba->sli4_hba.dpp_regs_memmap_p) { 10417 dev_err(&pdev->dev, 10418 "ioremap failed for SLI4 HBA dpp registers.\n"); 10419 error = -ENOMEM; 10420 goto out_iounmap_ctrl; 10421 } 10422 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p; 10423 } 10424 10425 /* Set up the EQ/CQ register handeling functions now */ 10426 switch (if_type) { 10427 case LPFC_SLI_INTF_IF_TYPE_0: 10428 case LPFC_SLI_INTF_IF_TYPE_2: 10429 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr; 10430 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db; 10431 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db; 10432 break; 10433 case LPFC_SLI_INTF_IF_TYPE_6: 10434 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr; 10435 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db; 10436 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db; 10437 break; 10438 default: 10439 break; 10440 } 10441 10442 return 0; 10443 10444 out_iounmap_all: 10445 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 10446 out_iounmap_ctrl: 10447 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 10448 out_iounmap_conf: 10449 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10450 10451 return error; 10452 } 10453 10454 /** 10455 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 10456 * @phba: pointer to lpfc hba data structure. 10457 * 10458 * This routine is invoked to unset the PCI device memory space for device 10459 * with SLI-4 interface spec. 10460 **/ 10461 static void 10462 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 10463 { 10464 uint32_t if_type; 10465 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10466 10467 switch (if_type) { 10468 case LPFC_SLI_INTF_IF_TYPE_0: 10469 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 10470 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 10471 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10472 break; 10473 case LPFC_SLI_INTF_IF_TYPE_2: 10474 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10475 break; 10476 case LPFC_SLI_INTF_IF_TYPE_6: 10477 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 10478 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10479 if (phba->sli4_hba.dpp_regs_memmap_p) 10480 iounmap(phba->sli4_hba.dpp_regs_memmap_p); 10481 break; 10482 case LPFC_SLI_INTF_IF_TYPE_1: 10483 default: 10484 dev_printk(KERN_ERR, &phba->pcidev->dev, 10485 "FATAL - unsupported SLI4 interface type - %d\n", 10486 if_type); 10487 break; 10488 } 10489 } 10490 10491 /** 10492 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 10493 * @phba: pointer to lpfc hba data structure. 10494 * 10495 * This routine is invoked to enable the MSI-X interrupt vectors to device 10496 * with SLI-3 interface specs. 10497 * 10498 * Return codes 10499 * 0 - successful 10500 * other values - error 10501 **/ 10502 static int 10503 lpfc_sli_enable_msix(struct lpfc_hba *phba) 10504 { 10505 int rc; 10506 LPFC_MBOXQ_t *pmb; 10507 10508 /* Set up MSI-X multi-message vectors */ 10509 rc = pci_alloc_irq_vectors(phba->pcidev, 10510 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX); 10511 if (rc < 0) { 10512 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10513 "0420 PCI enable MSI-X failed (%d)\n", rc); 10514 goto vec_fail_out; 10515 } 10516 10517 /* 10518 * Assign MSI-X vectors to interrupt handlers 10519 */ 10520 10521 /* vector-0 is associated to slow-path handler */ 10522 rc = request_irq(pci_irq_vector(phba->pcidev, 0), 10523 &lpfc_sli_sp_intr_handler, 0, 10524 LPFC_SP_DRIVER_HANDLER_NAME, phba); 10525 if (rc) { 10526 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10527 "0421 MSI-X slow-path request_irq failed " 10528 "(%d)\n", rc); 10529 goto msi_fail_out; 10530 } 10531 10532 /* vector-1 is associated to fast-path handler */ 10533 rc = request_irq(pci_irq_vector(phba->pcidev, 1), 10534 &lpfc_sli_fp_intr_handler, 0, 10535 LPFC_FP_DRIVER_HANDLER_NAME, phba); 10536 10537 if (rc) { 10538 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10539 "0429 MSI-X fast-path request_irq failed " 10540 "(%d)\n", rc); 10541 goto irq_fail_out; 10542 } 10543 10544 /* 10545 * Configure HBA MSI-X attention conditions to messages 10546 */ 10547 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10548 10549 if (!pmb) { 10550 rc = -ENOMEM; 10551 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10552 "0474 Unable to allocate memory for issuing " 10553 "MBOX_CONFIG_MSI command\n"); 10554 goto mem_fail_out; 10555 } 10556 rc = lpfc_config_msi(phba, pmb); 10557 if (rc) 10558 goto mbx_fail_out; 10559 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 10560 if (rc != MBX_SUCCESS) { 10561 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 10562 "0351 Config MSI mailbox command failed, " 10563 "mbxCmd x%x, mbxStatus x%x\n", 10564 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 10565 goto mbx_fail_out; 10566 } 10567 10568 /* Free memory allocated for mailbox command */ 10569 mempool_free(pmb, phba->mbox_mem_pool); 10570 return rc; 10571 10572 mbx_fail_out: 10573 /* Free memory allocated for mailbox command */ 10574 mempool_free(pmb, phba->mbox_mem_pool); 10575 10576 mem_fail_out: 10577 /* free the irq already requested */ 10578 free_irq(pci_irq_vector(phba->pcidev, 1), phba); 10579 10580 irq_fail_out: 10581 /* free the irq already requested */ 10582 free_irq(pci_irq_vector(phba->pcidev, 0), phba); 10583 10584 msi_fail_out: 10585 /* Unconfigure MSI-X capability structure */ 10586 pci_free_irq_vectors(phba->pcidev); 10587 10588 vec_fail_out: 10589 return rc; 10590 } 10591 10592 /** 10593 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 10594 * @phba: pointer to lpfc hba data structure. 10595 * 10596 * This routine is invoked to enable the MSI interrupt mode to device with 10597 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 10598 * enable the MSI vector. The device driver is responsible for calling the 10599 * request_irq() to register MSI vector with a interrupt the handler, which 10600 * is done in this function. 10601 * 10602 * Return codes 10603 * 0 - successful 10604 * other values - error 10605 */ 10606 static int 10607 lpfc_sli_enable_msi(struct lpfc_hba *phba) 10608 { 10609 int rc; 10610 10611 rc = pci_enable_msi(phba->pcidev); 10612 if (!rc) 10613 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10614 "0462 PCI enable MSI mode success.\n"); 10615 else { 10616 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10617 "0471 PCI enable MSI mode failed (%d)\n", rc); 10618 return rc; 10619 } 10620 10621 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 10622 0, LPFC_DRIVER_NAME, phba); 10623 if (rc) { 10624 pci_disable_msi(phba->pcidev); 10625 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10626 "0478 MSI request_irq failed (%d)\n", rc); 10627 } 10628 return rc; 10629 } 10630 10631 /** 10632 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 10633 * @phba: pointer to lpfc hba data structure. 10634 * 10635 * This routine is invoked to enable device interrupt and associate driver's 10636 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 10637 * spec. Depends on the interrupt mode configured to the driver, the driver 10638 * will try to fallback from the configured interrupt mode to an interrupt 10639 * mode which is supported by the platform, kernel, and device in the order 10640 * of: 10641 * MSI-X -> MSI -> IRQ. 10642 * 10643 * Return codes 10644 * 0 - successful 10645 * other values - error 10646 **/ 10647 static uint32_t 10648 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 10649 { 10650 uint32_t intr_mode = LPFC_INTR_ERROR; 10651 int retval; 10652 10653 if (cfg_mode == 2) { 10654 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 10655 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 10656 if (!retval) { 10657 /* Now, try to enable MSI-X interrupt mode */ 10658 retval = lpfc_sli_enable_msix(phba); 10659 if (!retval) { 10660 /* Indicate initialization to MSI-X mode */ 10661 phba->intr_type = MSIX; 10662 intr_mode = 2; 10663 } 10664 } 10665 } 10666 10667 /* Fallback to MSI if MSI-X initialization failed */ 10668 if (cfg_mode >= 1 && phba->intr_type == NONE) { 10669 retval = lpfc_sli_enable_msi(phba); 10670 if (!retval) { 10671 /* Indicate initialization to MSI mode */ 10672 phba->intr_type = MSI; 10673 intr_mode = 1; 10674 } 10675 } 10676 10677 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 10678 if (phba->intr_type == NONE) { 10679 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 10680 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 10681 if (!retval) { 10682 /* Indicate initialization to INTx mode */ 10683 phba->intr_type = INTx; 10684 intr_mode = 0; 10685 } 10686 } 10687 return intr_mode; 10688 } 10689 10690 /** 10691 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 10692 * @phba: pointer to lpfc hba data structure. 10693 * 10694 * This routine is invoked to disable device interrupt and disassociate the 10695 * driver's interrupt handler(s) from interrupt vector(s) to device with 10696 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 10697 * release the interrupt vector(s) for the message signaled interrupt. 10698 **/ 10699 static void 10700 lpfc_sli_disable_intr(struct lpfc_hba *phba) 10701 { 10702 int nr_irqs, i; 10703 10704 if (phba->intr_type == MSIX) 10705 nr_irqs = LPFC_MSIX_VECTORS; 10706 else 10707 nr_irqs = 1; 10708 10709 for (i = 0; i < nr_irqs; i++) 10710 free_irq(pci_irq_vector(phba->pcidev, i), phba); 10711 pci_free_irq_vectors(phba->pcidev); 10712 10713 /* Reset interrupt management states */ 10714 phba->intr_type = NONE; 10715 phba->sli.slistat.sli_intr = 0; 10716 } 10717 10718 /** 10719 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue 10720 * @phba: pointer to lpfc hba data structure. 10721 * @id: EQ vector index or Hardware Queue index 10722 * @match: LPFC_FIND_BY_EQ = match by EQ 10723 * LPFC_FIND_BY_HDWQ = match by Hardware Queue 10724 * Return the CPU that matches the selection criteria 10725 */ 10726 static uint16_t 10727 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match) 10728 { 10729 struct lpfc_vector_map_info *cpup; 10730 int cpu; 10731 10732 /* Loop through all CPUs */ 10733 for_each_present_cpu(cpu) { 10734 cpup = &phba->sli4_hba.cpu_map[cpu]; 10735 10736 /* If we are matching by EQ, there may be multiple CPUs using 10737 * using the same vector, so select the one with 10738 * LPFC_CPU_FIRST_IRQ set. 10739 */ 10740 if ((match == LPFC_FIND_BY_EQ) && 10741 (cpup->flag & LPFC_CPU_FIRST_IRQ) && 10742 (cpup->eq == id)) 10743 return cpu; 10744 10745 /* If matching by HDWQ, select the first CPU that matches */ 10746 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id)) 10747 return cpu; 10748 } 10749 return 0; 10750 } 10751 10752 #ifdef CONFIG_X86 10753 /** 10754 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded 10755 * @phba: pointer to lpfc hba data structure. 10756 * @cpu: CPU map index 10757 * @phys_id: CPU package physical id 10758 * @core_id: CPU core id 10759 */ 10760 static int 10761 lpfc_find_hyper(struct lpfc_hba *phba, int cpu, 10762 uint16_t phys_id, uint16_t core_id) 10763 { 10764 struct lpfc_vector_map_info *cpup; 10765 int idx; 10766 10767 for_each_present_cpu(idx) { 10768 cpup = &phba->sli4_hba.cpu_map[idx]; 10769 /* Does the cpup match the one we are looking for */ 10770 if ((cpup->phys_id == phys_id) && 10771 (cpup->core_id == core_id) && 10772 (cpu != idx)) 10773 return 1; 10774 } 10775 return 0; 10776 } 10777 #endif 10778 10779 /* 10780 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure 10781 * @phba: pointer to lpfc hba data structure. 10782 * @eqidx: index for eq and irq vector 10783 * @flag: flags to set for vector_map structure 10784 * @cpu: cpu used to index vector_map structure 10785 * 10786 * The routine assigns eq info into vector_map structure 10787 */ 10788 static inline void 10789 lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag, 10790 unsigned int cpu) 10791 { 10792 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu]; 10793 struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx); 10794 10795 cpup->eq = eqidx; 10796 cpup->flag |= flag; 10797 10798 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10799 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n", 10800 cpu, eqhdl->irq, cpup->eq, cpup->flag); 10801 } 10802 10803 /** 10804 * lpfc_cpu_map_array_init - Initialize cpu_map structure 10805 * @phba: pointer to lpfc hba data structure. 10806 * 10807 * The routine initializes the cpu_map array structure 10808 */ 10809 static void 10810 lpfc_cpu_map_array_init(struct lpfc_hba *phba) 10811 { 10812 struct lpfc_vector_map_info *cpup; 10813 struct lpfc_eq_intr_info *eqi; 10814 int cpu; 10815 10816 for_each_possible_cpu(cpu) { 10817 cpup = &phba->sli4_hba.cpu_map[cpu]; 10818 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY; 10819 cpup->core_id = LPFC_VECTOR_MAP_EMPTY; 10820 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY; 10821 cpup->eq = LPFC_VECTOR_MAP_EMPTY; 10822 cpup->flag = 0; 10823 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu); 10824 INIT_LIST_HEAD(&eqi->list); 10825 eqi->icnt = 0; 10826 } 10827 } 10828 10829 /** 10830 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure 10831 * @phba: pointer to lpfc hba data structure. 10832 * 10833 * The routine initializes the hba_eq_hdl array structure 10834 */ 10835 static void 10836 lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba) 10837 { 10838 struct lpfc_hba_eq_hdl *eqhdl; 10839 int i; 10840 10841 for (i = 0; i < phba->cfg_irq_chann; i++) { 10842 eqhdl = lpfc_get_eq_hdl(i); 10843 eqhdl->irq = LPFC_VECTOR_MAP_EMPTY; 10844 eqhdl->phba = phba; 10845 } 10846 } 10847 10848 /** 10849 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings 10850 * @phba: pointer to lpfc hba data structure. 10851 * @vectors: number of msix vectors allocated. 10852 * 10853 * The routine will figure out the CPU affinity assignment for every 10854 * MSI-X vector allocated for the HBA. 10855 * In addition, the CPU to IO channel mapping will be calculated 10856 * and the phba->sli4_hba.cpu_map array will reflect this. 10857 */ 10858 static void 10859 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) 10860 { 10861 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu; 10862 int max_phys_id, min_phys_id; 10863 int max_core_id, min_core_id; 10864 struct lpfc_vector_map_info *cpup; 10865 struct lpfc_vector_map_info *new_cpup; 10866 #ifdef CONFIG_X86 10867 struct cpuinfo_x86 *cpuinfo; 10868 #endif 10869 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 10870 struct lpfc_hdwq_stat *c_stat; 10871 #endif 10872 10873 max_phys_id = 0; 10874 min_phys_id = LPFC_VECTOR_MAP_EMPTY; 10875 max_core_id = 0; 10876 min_core_id = LPFC_VECTOR_MAP_EMPTY; 10877 10878 /* Update CPU map with physical id and core id of each CPU */ 10879 for_each_present_cpu(cpu) { 10880 cpup = &phba->sli4_hba.cpu_map[cpu]; 10881 #ifdef CONFIG_X86 10882 cpuinfo = &cpu_data(cpu); 10883 cpup->phys_id = cpuinfo->phys_proc_id; 10884 cpup->core_id = cpuinfo->cpu_core_id; 10885 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id)) 10886 cpup->flag |= LPFC_CPU_MAP_HYPER; 10887 #else 10888 /* No distinction between CPUs for other platforms */ 10889 cpup->phys_id = 0; 10890 cpup->core_id = cpu; 10891 #endif 10892 10893 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10894 "3328 CPU %d physid %d coreid %d flag x%x\n", 10895 cpu, cpup->phys_id, cpup->core_id, cpup->flag); 10896 10897 if (cpup->phys_id > max_phys_id) 10898 max_phys_id = cpup->phys_id; 10899 if (cpup->phys_id < min_phys_id) 10900 min_phys_id = cpup->phys_id; 10901 10902 if (cpup->core_id > max_core_id) 10903 max_core_id = cpup->core_id; 10904 if (cpup->core_id < min_core_id) 10905 min_core_id = cpup->core_id; 10906 } 10907 10908 /* After looking at each irq vector assigned to this pcidev, its 10909 * possible to see that not ALL CPUs have been accounted for. 10910 * Next we will set any unassigned (unaffinitized) cpu map 10911 * entries to a IRQ on the same phys_id. 10912 */ 10913 first_cpu = cpumask_first(cpu_present_mask); 10914 start_cpu = first_cpu; 10915 10916 for_each_present_cpu(cpu) { 10917 cpup = &phba->sli4_hba.cpu_map[cpu]; 10918 10919 /* Is this CPU entry unassigned */ 10920 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 10921 /* Mark CPU as IRQ not assigned by the kernel */ 10922 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; 10923 10924 /* If so, find a new_cpup thats on the the SAME 10925 * phys_id as cpup. start_cpu will start where we 10926 * left off so all unassigned entries don't get assgined 10927 * the IRQ of the first entry. 10928 */ 10929 new_cpu = start_cpu; 10930 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 10931 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 10932 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && 10933 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) && 10934 (new_cpup->phys_id == cpup->phys_id)) 10935 goto found_same; 10936 new_cpu = cpumask_next( 10937 new_cpu, cpu_present_mask); 10938 if (new_cpu == nr_cpumask_bits) 10939 new_cpu = first_cpu; 10940 } 10941 /* At this point, we leave the CPU as unassigned */ 10942 continue; 10943 found_same: 10944 /* We found a matching phys_id, so copy the IRQ info */ 10945 cpup->eq = new_cpup->eq; 10946 10947 /* Bump start_cpu to the next slot to minmize the 10948 * chance of having multiple unassigned CPU entries 10949 * selecting the same IRQ. 10950 */ 10951 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 10952 if (start_cpu == nr_cpumask_bits) 10953 start_cpu = first_cpu; 10954 10955 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10956 "3337 Set Affinity: CPU %d " 10957 "eq %d from peer cpu %d same " 10958 "phys_id (%d)\n", 10959 cpu, cpup->eq, new_cpu, 10960 cpup->phys_id); 10961 } 10962 } 10963 10964 /* Set any unassigned cpu map entries to a IRQ on any phys_id */ 10965 start_cpu = first_cpu; 10966 10967 for_each_present_cpu(cpu) { 10968 cpup = &phba->sli4_hba.cpu_map[cpu]; 10969 10970 /* Is this entry unassigned */ 10971 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 10972 /* Mark it as IRQ not assigned by the kernel */ 10973 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; 10974 10975 /* If so, find a new_cpup thats on ANY phys_id 10976 * as the cpup. start_cpu will start where we 10977 * left off so all unassigned entries don't get 10978 * assigned the IRQ of the first entry. 10979 */ 10980 new_cpu = start_cpu; 10981 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 10982 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 10983 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && 10984 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY)) 10985 goto found_any; 10986 new_cpu = cpumask_next( 10987 new_cpu, cpu_present_mask); 10988 if (new_cpu == nr_cpumask_bits) 10989 new_cpu = first_cpu; 10990 } 10991 /* We should never leave an entry unassigned */ 10992 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10993 "3339 Set Affinity: CPU %d " 10994 "eq %d UNASSIGNED\n", 10995 cpup->hdwq, cpup->eq); 10996 continue; 10997 found_any: 10998 /* We found an available entry, copy the IRQ info */ 10999 cpup->eq = new_cpup->eq; 11000 11001 /* Bump start_cpu to the next slot to minmize the 11002 * chance of having multiple unassigned CPU entries 11003 * selecting the same IRQ. 11004 */ 11005 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 11006 if (start_cpu == nr_cpumask_bits) 11007 start_cpu = first_cpu; 11008 11009 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11010 "3338 Set Affinity: CPU %d " 11011 "eq %d from peer cpu %d (%d/%d)\n", 11012 cpu, cpup->eq, new_cpu, 11013 new_cpup->phys_id, new_cpup->core_id); 11014 } 11015 } 11016 11017 /* Assign hdwq indices that are unique across all cpus in the map 11018 * that are also FIRST_CPUs. 11019 */ 11020 idx = 0; 11021 for_each_present_cpu(cpu) { 11022 cpup = &phba->sli4_hba.cpu_map[cpu]; 11023 11024 /* Only FIRST IRQs get a hdwq index assignment. */ 11025 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 11026 continue; 11027 11028 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */ 11029 cpup->hdwq = idx; 11030 idx++; 11031 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11032 "3333 Set Affinity: CPU %d (phys %d core %d): " 11033 "hdwq %d eq %d flg x%x\n", 11034 cpu, cpup->phys_id, cpup->core_id, 11035 cpup->hdwq, cpup->eq, cpup->flag); 11036 } 11037 /* Associate a hdwq with each cpu_map entry 11038 * This will be 1 to 1 - hdwq to cpu, unless there are less 11039 * hardware queues then CPUs. For that case we will just round-robin 11040 * the available hardware queues as they get assigned to CPUs. 11041 * The next_idx is the idx from the FIRST_CPU loop above to account 11042 * for irq_chann < hdwq. The idx is used for round-robin assignments 11043 * and needs to start at 0. 11044 */ 11045 next_idx = idx; 11046 start_cpu = 0; 11047 idx = 0; 11048 for_each_present_cpu(cpu) { 11049 cpup = &phba->sli4_hba.cpu_map[cpu]; 11050 11051 /* FIRST cpus are already mapped. */ 11052 if (cpup->flag & LPFC_CPU_FIRST_IRQ) 11053 continue; 11054 11055 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq 11056 * of the unassigned cpus to the next idx so that all 11057 * hdw queues are fully utilized. 11058 */ 11059 if (next_idx < phba->cfg_hdw_queue) { 11060 cpup->hdwq = next_idx; 11061 next_idx++; 11062 continue; 11063 } 11064 11065 /* Not a First CPU and all hdw_queues are used. Reuse a 11066 * Hardware Queue for another CPU, so be smart about it 11067 * and pick one that has its IRQ/EQ mapped to the same phys_id 11068 * (CPU package) and core_id. 11069 */ 11070 new_cpu = start_cpu; 11071 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 11072 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 11073 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && 11074 new_cpup->phys_id == cpup->phys_id && 11075 new_cpup->core_id == cpup->core_id) { 11076 goto found_hdwq; 11077 } 11078 new_cpu = cpumask_next(new_cpu, cpu_present_mask); 11079 if (new_cpu == nr_cpumask_bits) 11080 new_cpu = first_cpu; 11081 } 11082 11083 /* If we can't match both phys_id and core_id, 11084 * settle for just a phys_id match. 11085 */ 11086 new_cpu = start_cpu; 11087 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 11088 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 11089 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && 11090 new_cpup->phys_id == cpup->phys_id) 11091 goto found_hdwq; 11092 11093 new_cpu = cpumask_next(new_cpu, cpu_present_mask); 11094 if (new_cpu == nr_cpumask_bits) 11095 new_cpu = first_cpu; 11096 } 11097 11098 /* Otherwise just round robin on cfg_hdw_queue */ 11099 cpup->hdwq = idx % phba->cfg_hdw_queue; 11100 idx++; 11101 goto logit; 11102 found_hdwq: 11103 /* We found an available entry, copy the IRQ info */ 11104 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 11105 if (start_cpu == nr_cpumask_bits) 11106 start_cpu = first_cpu; 11107 cpup->hdwq = new_cpup->hdwq; 11108 logit: 11109 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11110 "3335 Set Affinity: CPU %d (phys %d core %d): " 11111 "hdwq %d eq %d flg x%x\n", 11112 cpu, cpup->phys_id, cpup->core_id, 11113 cpup->hdwq, cpup->eq, cpup->flag); 11114 } 11115 11116 /* 11117 * Initialize the cpu_map slots for not-present cpus in case 11118 * a cpu is hot-added. Perform a simple hdwq round robin assignment. 11119 */ 11120 idx = 0; 11121 for_each_possible_cpu(cpu) { 11122 cpup = &phba->sli4_hba.cpu_map[cpu]; 11123 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 11124 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu); 11125 c_stat->hdwq_no = cpup->hdwq; 11126 #endif 11127 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) 11128 continue; 11129 11130 cpup->hdwq = idx++ % phba->cfg_hdw_queue; 11131 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 11132 c_stat->hdwq_no = cpup->hdwq; 11133 #endif 11134 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11135 "3340 Set Affinity: not present " 11136 "CPU %d hdwq %d\n", 11137 cpu, cpup->hdwq); 11138 } 11139 11140 /* The cpu_map array will be used later during initialization 11141 * when EQ / CQ / WQs are allocated and configured. 11142 */ 11143 return; 11144 } 11145 11146 /** 11147 * lpfc_cpuhp_get_eq 11148 * 11149 * @phba: pointer to lpfc hba data structure. 11150 * @cpu: cpu going offline 11151 * @eqlist: 11152 */ 11153 static int 11154 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu, 11155 struct list_head *eqlist) 11156 { 11157 const struct cpumask *maskp; 11158 struct lpfc_queue *eq; 11159 struct cpumask *tmp; 11160 u16 idx; 11161 11162 tmp = kzalloc(cpumask_size(), GFP_KERNEL); 11163 if (!tmp) 11164 return -ENOMEM; 11165 11166 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 11167 maskp = pci_irq_get_affinity(phba->pcidev, idx); 11168 if (!maskp) 11169 continue; 11170 /* 11171 * if irq is not affinitized to the cpu going 11172 * then we don't need to poll the eq attached 11173 * to it. 11174 */ 11175 if (!cpumask_and(tmp, maskp, cpumask_of(cpu))) 11176 continue; 11177 /* get the cpus that are online and are affini- 11178 * tized to this irq vector. If the count is 11179 * more than 1 then cpuhp is not going to shut- 11180 * down this vector. Since this cpu has not 11181 * gone offline yet, we need >1. 11182 */ 11183 cpumask_and(tmp, maskp, cpu_online_mask); 11184 if (cpumask_weight(tmp) > 1) 11185 continue; 11186 11187 /* Now that we have an irq to shutdown, get the eq 11188 * mapped to this irq. Note: multiple hdwq's in 11189 * the software can share an eq, but eventually 11190 * only eq will be mapped to this vector 11191 */ 11192 eq = phba->sli4_hba.hba_eq_hdl[idx].eq; 11193 list_add(&eq->_poll_list, eqlist); 11194 } 11195 kfree(tmp); 11196 return 0; 11197 } 11198 11199 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba) 11200 { 11201 if (phba->sli_rev != LPFC_SLI_REV4) 11202 return; 11203 11204 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state, 11205 &phba->cpuhp); 11206 /* 11207 * unregistering the instance doesn't stop the polling 11208 * timer. Wait for the poll timer to retire. 11209 */ 11210 synchronize_rcu(); 11211 del_timer_sync(&phba->cpuhp_poll_timer); 11212 } 11213 11214 static void lpfc_cpuhp_remove(struct lpfc_hba *phba) 11215 { 11216 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 11217 return; 11218 11219 __lpfc_cpuhp_remove(phba); 11220 } 11221 11222 static void lpfc_cpuhp_add(struct lpfc_hba *phba) 11223 { 11224 if (phba->sli_rev != LPFC_SLI_REV4) 11225 return; 11226 11227 rcu_read_lock(); 11228 11229 if (!list_empty(&phba->poll_list)) 11230 mod_timer(&phba->cpuhp_poll_timer, 11231 jiffies + msecs_to_jiffies(LPFC_POLL_HB)); 11232 11233 rcu_read_unlock(); 11234 11235 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, 11236 &phba->cpuhp); 11237 } 11238 11239 static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval) 11240 { 11241 if (phba->pport->load_flag & FC_UNLOADING) { 11242 *retval = -EAGAIN; 11243 return true; 11244 } 11245 11246 if (phba->sli_rev != LPFC_SLI_REV4) { 11247 *retval = 0; 11248 return true; 11249 } 11250 11251 /* proceed with the hotplug */ 11252 return false; 11253 } 11254 11255 /** 11256 * lpfc_irq_set_aff - set IRQ affinity 11257 * @eqhdl: EQ handle 11258 * @cpu: cpu to set affinity 11259 * 11260 **/ 11261 static inline void 11262 lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu) 11263 { 11264 cpumask_clear(&eqhdl->aff_mask); 11265 cpumask_set_cpu(cpu, &eqhdl->aff_mask); 11266 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING); 11267 irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask); 11268 } 11269 11270 /** 11271 * lpfc_irq_clear_aff - clear IRQ affinity 11272 * @eqhdl: EQ handle 11273 * 11274 **/ 11275 static inline void 11276 lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl) 11277 { 11278 cpumask_clear(&eqhdl->aff_mask); 11279 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING); 11280 irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask); 11281 } 11282 11283 /** 11284 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event 11285 * @phba: pointer to HBA context object. 11286 * @cpu: cpu going offline/online 11287 * @offline: true, cpu is going offline. false, cpu is coming online. 11288 * 11289 * If cpu is going offline, we'll try our best effort to find the next 11290 * online cpu on the phba's NUMA node and migrate all offlining IRQ affinities. 11291 * 11292 * If cpu is coming online, reaffinitize the IRQ back to the onlineng cpu. 11293 * 11294 * Note: Call only if cfg_irq_numa is enabled, otherwise rely on 11295 * PCI_IRQ_AFFINITY to auto-manage IRQ affinity. 11296 * 11297 **/ 11298 static void 11299 lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline) 11300 { 11301 struct lpfc_vector_map_info *cpup; 11302 struct cpumask *aff_mask; 11303 unsigned int cpu_select, cpu_next, idx; 11304 const struct cpumask *numa_mask; 11305 11306 if (!phba->cfg_irq_numa) 11307 return; 11308 11309 numa_mask = &phba->sli4_hba.numa_mask; 11310 11311 if (!cpumask_test_cpu(cpu, numa_mask)) 11312 return; 11313 11314 cpup = &phba->sli4_hba.cpu_map[cpu]; 11315 11316 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 11317 return; 11318 11319 if (offline) { 11320 /* Find next online CPU on NUMA node */ 11321 cpu_next = cpumask_next_wrap(cpu, numa_mask, cpu, true); 11322 cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu_next); 11323 11324 /* Found a valid CPU */ 11325 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) { 11326 /* Go through each eqhdl and ensure offlining 11327 * cpu aff_mask is migrated 11328 */ 11329 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 11330 aff_mask = lpfc_get_aff_mask(idx); 11331 11332 /* Migrate affinity */ 11333 if (cpumask_test_cpu(cpu, aff_mask)) 11334 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx), 11335 cpu_select); 11336 } 11337 } else { 11338 /* Rely on irqbalance if no online CPUs left on NUMA */ 11339 for (idx = 0; idx < phba->cfg_irq_chann; idx++) 11340 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx)); 11341 } 11342 } else { 11343 /* Migrate affinity back to this CPU */ 11344 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu); 11345 } 11346 } 11347 11348 static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node) 11349 { 11350 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); 11351 struct lpfc_queue *eq, *next; 11352 LIST_HEAD(eqlist); 11353 int retval; 11354 11355 if (!phba) { 11356 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); 11357 return 0; 11358 } 11359 11360 if (__lpfc_cpuhp_checks(phba, &retval)) 11361 return retval; 11362 11363 lpfc_irq_rebalance(phba, cpu, true); 11364 11365 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist); 11366 if (retval) 11367 return retval; 11368 11369 /* start polling on these eq's */ 11370 list_for_each_entry_safe(eq, next, &eqlist, _poll_list) { 11371 list_del_init(&eq->_poll_list); 11372 lpfc_sli4_start_polling(eq); 11373 } 11374 11375 return 0; 11376 } 11377 11378 static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node) 11379 { 11380 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); 11381 struct lpfc_queue *eq, *next; 11382 unsigned int n; 11383 int retval; 11384 11385 if (!phba) { 11386 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); 11387 return 0; 11388 } 11389 11390 if (__lpfc_cpuhp_checks(phba, &retval)) 11391 return retval; 11392 11393 lpfc_irq_rebalance(phba, cpu, false); 11394 11395 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) { 11396 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ); 11397 if (n == cpu) 11398 lpfc_sli4_stop_polling(eq); 11399 } 11400 11401 return 0; 11402 } 11403 11404 /** 11405 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 11406 * @phba: pointer to lpfc hba data structure. 11407 * 11408 * This routine is invoked to enable the MSI-X interrupt vectors to device 11409 * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them 11410 * to cpus on the system. 11411 * 11412 * When cfg_irq_numa is enabled, the adapter will only allocate vectors for 11413 * the number of cpus on the same numa node as this adapter. The vectors are 11414 * allocated without requesting OS affinity mapping. A vector will be 11415 * allocated and assigned to each online and offline cpu. If the cpu is 11416 * online, then affinity will be set to that cpu. If the cpu is offline, then 11417 * affinity will be set to the nearest peer cpu within the numa node that is 11418 * online. If there are no online cpus within the numa node, affinity is not 11419 * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping 11420 * is consistent with the way cpu online/offline is handled when cfg_irq_numa is 11421 * configured. 11422 * 11423 * If numa mode is not enabled and there is more than 1 vector allocated, then 11424 * the driver relies on the managed irq interface where the OS assigns vector to 11425 * cpu affinity. The driver will then use that affinity mapping to setup its 11426 * cpu mapping table. 11427 * 11428 * Return codes 11429 * 0 - successful 11430 * other values - error 11431 **/ 11432 static int 11433 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 11434 { 11435 int vectors, rc, index; 11436 char *name; 11437 const struct cpumask *numa_mask = NULL; 11438 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids; 11439 struct lpfc_hba_eq_hdl *eqhdl; 11440 const struct cpumask *maskp; 11441 bool first; 11442 unsigned int flags = PCI_IRQ_MSIX; 11443 11444 /* Set up MSI-X multi-message vectors */ 11445 vectors = phba->cfg_irq_chann; 11446 11447 if (phba->cfg_irq_numa) { 11448 numa_mask = &phba->sli4_hba.numa_mask; 11449 cpu_cnt = cpumask_weight(numa_mask); 11450 vectors = min(phba->cfg_irq_chann, cpu_cnt); 11451 11452 /* cpu: iterates over numa_mask including offline or online 11453 * cpu_select: iterates over online numa_mask to set affinity 11454 */ 11455 cpu = cpumask_first(numa_mask); 11456 cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu); 11457 } else { 11458 flags |= PCI_IRQ_AFFINITY; 11459 } 11460 11461 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags); 11462 if (rc < 0) { 11463 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11464 "0484 PCI enable MSI-X failed (%d)\n", rc); 11465 goto vec_fail_out; 11466 } 11467 vectors = rc; 11468 11469 /* Assign MSI-X vectors to interrupt handlers */ 11470 for (index = 0; index < vectors; index++) { 11471 eqhdl = lpfc_get_eq_hdl(index); 11472 name = eqhdl->handler_name; 11473 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ); 11474 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ, 11475 LPFC_DRIVER_HANDLER_NAME"%d", index); 11476 11477 eqhdl->idx = index; 11478 rc = request_irq(pci_irq_vector(phba->pcidev, index), 11479 &lpfc_sli4_hba_intr_handler, 0, 11480 name, eqhdl); 11481 if (rc) { 11482 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 11483 "0486 MSI-X fast-path (%d) " 11484 "request_irq failed (%d)\n", index, rc); 11485 goto cfg_fail_out; 11486 } 11487 11488 eqhdl->irq = pci_irq_vector(phba->pcidev, index); 11489 11490 if (phba->cfg_irq_numa) { 11491 /* If found a neighboring online cpu, set affinity */ 11492 if (cpu_select < nr_cpu_ids) 11493 lpfc_irq_set_aff(eqhdl, cpu_select); 11494 11495 /* Assign EQ to cpu_map */ 11496 lpfc_assign_eq_map_info(phba, index, 11497 LPFC_CPU_FIRST_IRQ, 11498 cpu); 11499 11500 /* Iterate to next offline or online cpu in numa_mask */ 11501 cpu = cpumask_next(cpu, numa_mask); 11502 11503 /* Find next online cpu in numa_mask to set affinity */ 11504 cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu); 11505 } else if (vectors == 1) { 11506 cpu = cpumask_first(cpu_present_mask); 11507 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ, 11508 cpu); 11509 } else { 11510 maskp = pci_irq_get_affinity(phba->pcidev, index); 11511 11512 first = true; 11513 /* Loop through all CPUs associated with vector index */ 11514 for_each_cpu_and(cpu, maskp, cpu_present_mask) { 11515 /* If this is the first CPU thats assigned to 11516 * this vector, set LPFC_CPU_FIRST_IRQ. 11517 */ 11518 lpfc_assign_eq_map_info(phba, index, 11519 first ? 11520 LPFC_CPU_FIRST_IRQ : 0, 11521 cpu); 11522 if (first) 11523 first = false; 11524 } 11525 } 11526 } 11527 11528 if (vectors != phba->cfg_irq_chann) { 11529 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11530 "3238 Reducing IO channels to match number of " 11531 "MSI-X vectors, requested %d got %d\n", 11532 phba->cfg_irq_chann, vectors); 11533 if (phba->cfg_irq_chann > vectors) 11534 phba->cfg_irq_chann = vectors; 11535 } 11536 11537 return rc; 11538 11539 cfg_fail_out: 11540 /* free the irq already requested */ 11541 for (--index; index >= 0; index--) { 11542 eqhdl = lpfc_get_eq_hdl(index); 11543 lpfc_irq_clear_aff(eqhdl); 11544 irq_set_affinity_hint(eqhdl->irq, NULL); 11545 free_irq(eqhdl->irq, eqhdl); 11546 } 11547 11548 /* Unconfigure MSI-X capability structure */ 11549 pci_free_irq_vectors(phba->pcidev); 11550 11551 vec_fail_out: 11552 return rc; 11553 } 11554 11555 /** 11556 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 11557 * @phba: pointer to lpfc hba data structure. 11558 * 11559 * This routine is invoked to enable the MSI interrupt mode to device with 11560 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is 11561 * called to enable the MSI vector. The device driver is responsible for 11562 * calling the request_irq() to register MSI vector with a interrupt the 11563 * handler, which is done in this function. 11564 * 11565 * Return codes 11566 * 0 - successful 11567 * other values - error 11568 **/ 11569 static int 11570 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 11571 { 11572 int rc, index; 11573 unsigned int cpu; 11574 struct lpfc_hba_eq_hdl *eqhdl; 11575 11576 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1, 11577 PCI_IRQ_MSI | PCI_IRQ_AFFINITY); 11578 if (rc > 0) 11579 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11580 "0487 PCI enable MSI mode success.\n"); 11581 else { 11582 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11583 "0488 PCI enable MSI mode failed (%d)\n", rc); 11584 return rc ? rc : -1; 11585 } 11586 11587 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 11588 0, LPFC_DRIVER_NAME, phba); 11589 if (rc) { 11590 pci_free_irq_vectors(phba->pcidev); 11591 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 11592 "0490 MSI request_irq failed (%d)\n", rc); 11593 return rc; 11594 } 11595 11596 eqhdl = lpfc_get_eq_hdl(0); 11597 eqhdl->irq = pci_irq_vector(phba->pcidev, 0); 11598 11599 cpu = cpumask_first(cpu_present_mask); 11600 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu); 11601 11602 for (index = 0; index < phba->cfg_irq_chann; index++) { 11603 eqhdl = lpfc_get_eq_hdl(index); 11604 eqhdl->idx = index; 11605 } 11606 11607 return 0; 11608 } 11609 11610 /** 11611 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 11612 * @phba: pointer to lpfc hba data structure. 11613 * 11614 * This routine is invoked to enable device interrupt and associate driver's 11615 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 11616 * interface spec. Depends on the interrupt mode configured to the driver, 11617 * the driver will try to fallback from the configured interrupt mode to an 11618 * interrupt mode which is supported by the platform, kernel, and device in 11619 * the order of: 11620 * MSI-X -> MSI -> IRQ. 11621 * 11622 * Return codes 11623 * 0 - successful 11624 * other values - error 11625 **/ 11626 static uint32_t 11627 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 11628 { 11629 uint32_t intr_mode = LPFC_INTR_ERROR; 11630 int retval, idx; 11631 11632 if (cfg_mode == 2) { 11633 /* Preparation before conf_msi mbox cmd */ 11634 retval = 0; 11635 if (!retval) { 11636 /* Now, try to enable MSI-X interrupt mode */ 11637 retval = lpfc_sli4_enable_msix(phba); 11638 if (!retval) { 11639 /* Indicate initialization to MSI-X mode */ 11640 phba->intr_type = MSIX; 11641 intr_mode = 2; 11642 } 11643 } 11644 } 11645 11646 /* Fallback to MSI if MSI-X initialization failed */ 11647 if (cfg_mode >= 1 && phba->intr_type == NONE) { 11648 retval = lpfc_sli4_enable_msi(phba); 11649 if (!retval) { 11650 /* Indicate initialization to MSI mode */ 11651 phba->intr_type = MSI; 11652 intr_mode = 1; 11653 } 11654 } 11655 11656 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 11657 if (phba->intr_type == NONE) { 11658 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 11659 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 11660 if (!retval) { 11661 struct lpfc_hba_eq_hdl *eqhdl; 11662 unsigned int cpu; 11663 11664 /* Indicate initialization to INTx mode */ 11665 phba->intr_type = INTx; 11666 intr_mode = 0; 11667 11668 eqhdl = lpfc_get_eq_hdl(0); 11669 eqhdl->irq = pci_irq_vector(phba->pcidev, 0); 11670 11671 cpu = cpumask_first(cpu_present_mask); 11672 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, 11673 cpu); 11674 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 11675 eqhdl = lpfc_get_eq_hdl(idx); 11676 eqhdl->idx = idx; 11677 } 11678 } 11679 } 11680 return intr_mode; 11681 } 11682 11683 /** 11684 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 11685 * @phba: pointer to lpfc hba data structure. 11686 * 11687 * This routine is invoked to disable device interrupt and disassociate 11688 * the driver's interrupt handler(s) from interrupt vector(s) to device 11689 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 11690 * will release the interrupt vector(s) for the message signaled interrupt. 11691 **/ 11692 static void 11693 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 11694 { 11695 /* Disable the currently initialized interrupt mode */ 11696 if (phba->intr_type == MSIX) { 11697 int index; 11698 struct lpfc_hba_eq_hdl *eqhdl; 11699 11700 /* Free up MSI-X multi-message vectors */ 11701 for (index = 0; index < phba->cfg_irq_chann; index++) { 11702 eqhdl = lpfc_get_eq_hdl(index); 11703 lpfc_irq_clear_aff(eqhdl); 11704 irq_set_affinity_hint(eqhdl->irq, NULL); 11705 free_irq(eqhdl->irq, eqhdl); 11706 } 11707 } else { 11708 free_irq(phba->pcidev->irq, phba); 11709 } 11710 11711 pci_free_irq_vectors(phba->pcidev); 11712 11713 /* Reset interrupt management states */ 11714 phba->intr_type = NONE; 11715 phba->sli.slistat.sli_intr = 0; 11716 } 11717 11718 /** 11719 * lpfc_unset_hba - Unset SLI3 hba device initialization 11720 * @phba: pointer to lpfc hba data structure. 11721 * 11722 * This routine is invoked to unset the HBA device initialization steps to 11723 * a device with SLI-3 interface spec. 11724 **/ 11725 static void 11726 lpfc_unset_hba(struct lpfc_hba *phba) 11727 { 11728 struct lpfc_vport *vport = phba->pport; 11729 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11730 11731 spin_lock_irq(shost->host_lock); 11732 vport->load_flag |= FC_UNLOADING; 11733 spin_unlock_irq(shost->host_lock); 11734 11735 kfree(phba->vpi_bmask); 11736 kfree(phba->vpi_ids); 11737 11738 lpfc_stop_hba_timers(phba); 11739 11740 phba->pport->work_port_events = 0; 11741 11742 lpfc_sli_hba_down(phba); 11743 11744 lpfc_sli_brdrestart(phba); 11745 11746 lpfc_sli_disable_intr(phba); 11747 11748 return; 11749 } 11750 11751 /** 11752 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 11753 * @phba: Pointer to HBA context object. 11754 * 11755 * This function is called in the SLI4 code path to wait for completion 11756 * of device's XRIs exchange busy. It will check the XRI exchange busy 11757 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 11758 * that, it will check the XRI exchange busy on outstanding FCP and ELS 11759 * I/Os every 30 seconds, log error message, and wait forever. Only when 11760 * all XRI exchange busy complete, the driver unload shall proceed with 11761 * invoking the function reset ioctl mailbox command to the CNA and the 11762 * the rest of the driver unload resource release. 11763 **/ 11764 static void 11765 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 11766 { 11767 struct lpfc_sli4_hdw_queue *qp; 11768 int idx, ccnt; 11769 int wait_time = 0; 11770 int io_xri_cmpl = 1; 11771 int nvmet_xri_cmpl = 1; 11772 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 11773 11774 /* Driver just aborted IOs during the hba_unset process. Pause 11775 * here to give the HBA time to complete the IO and get entries 11776 * into the abts lists. 11777 */ 11778 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5); 11779 11780 /* Wait for NVME pending IO to flush back to transport. */ 11781 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 11782 lpfc_nvme_wait_for_io_drain(phba); 11783 11784 ccnt = 0; 11785 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 11786 qp = &phba->sli4_hba.hdwq[idx]; 11787 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list); 11788 if (!io_xri_cmpl) /* if list is NOT empty */ 11789 ccnt++; 11790 } 11791 if (ccnt) 11792 io_xri_cmpl = 0; 11793 11794 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11795 nvmet_xri_cmpl = 11796 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 11797 } 11798 11799 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) { 11800 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 11801 if (!nvmet_xri_cmpl) 11802 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11803 "6424 NVMET XRI exchange busy " 11804 "wait time: %d seconds.\n", 11805 wait_time/1000); 11806 if (!io_xri_cmpl) 11807 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11808 "6100 IO XRI exchange busy " 11809 "wait time: %d seconds.\n", 11810 wait_time/1000); 11811 if (!els_xri_cmpl) 11812 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11813 "2878 ELS XRI exchange busy " 11814 "wait time: %d seconds.\n", 11815 wait_time/1000); 11816 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 11817 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 11818 } else { 11819 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 11820 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 11821 } 11822 11823 ccnt = 0; 11824 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 11825 qp = &phba->sli4_hba.hdwq[idx]; 11826 io_xri_cmpl = list_empty( 11827 &qp->lpfc_abts_io_buf_list); 11828 if (!io_xri_cmpl) /* if list is NOT empty */ 11829 ccnt++; 11830 } 11831 if (ccnt) 11832 io_xri_cmpl = 0; 11833 11834 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11835 nvmet_xri_cmpl = list_empty( 11836 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 11837 } 11838 els_xri_cmpl = 11839 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 11840 11841 } 11842 } 11843 11844 /** 11845 * lpfc_sli4_hba_unset - Unset the fcoe hba 11846 * @phba: Pointer to HBA context object. 11847 * 11848 * This function is called in the SLI4 code path to reset the HBA's FCoE 11849 * function. The caller is not required to hold any lock. This routine 11850 * issues PCI function reset mailbox command to reset the FCoE function. 11851 * At the end of the function, it calls lpfc_hba_down_post function to 11852 * free any pending commands. 11853 **/ 11854 static void 11855 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 11856 { 11857 int wait_cnt = 0; 11858 LPFC_MBOXQ_t *mboxq; 11859 struct pci_dev *pdev = phba->pcidev; 11860 11861 lpfc_stop_hba_timers(phba); 11862 if (phba->pport) 11863 phba->sli4_hba.intr_enable = 0; 11864 11865 /* 11866 * Gracefully wait out the potential current outstanding asynchronous 11867 * mailbox command. 11868 */ 11869 11870 /* First, block any pending async mailbox command from posted */ 11871 spin_lock_irq(&phba->hbalock); 11872 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 11873 spin_unlock_irq(&phba->hbalock); 11874 /* Now, trying to wait it out if we can */ 11875 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 11876 msleep(10); 11877 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 11878 break; 11879 } 11880 /* Forcefully release the outstanding mailbox command if timed out */ 11881 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 11882 spin_lock_irq(&phba->hbalock); 11883 mboxq = phba->sli.mbox_active; 11884 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 11885 __lpfc_mbox_cmpl_put(phba, mboxq); 11886 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 11887 phba->sli.mbox_active = NULL; 11888 spin_unlock_irq(&phba->hbalock); 11889 } 11890 11891 /* Abort all iocbs associated with the hba */ 11892 lpfc_sli_hba_iocb_abort(phba); 11893 11894 /* Wait for completion of device XRI exchange busy */ 11895 lpfc_sli4_xri_exchange_busy_wait(phba); 11896 11897 /* per-phba callback de-registration for hotplug event */ 11898 lpfc_cpuhp_remove(phba); 11899 11900 /* Disable PCI subsystem interrupt */ 11901 lpfc_sli4_disable_intr(phba); 11902 11903 /* Disable SR-IOV if enabled */ 11904 if (phba->cfg_sriov_nr_virtfn) 11905 pci_disable_sriov(pdev); 11906 11907 /* Stop kthread signal shall trigger work_done one more time */ 11908 kthread_stop(phba->worker_thread); 11909 11910 /* Disable FW logging to host memory */ 11911 lpfc_ras_stop_fwlog(phba); 11912 11913 /* Unset the queues shared with the hardware then release all 11914 * allocated resources. 11915 */ 11916 lpfc_sli4_queue_unset(phba); 11917 lpfc_sli4_queue_destroy(phba); 11918 11919 /* Reset SLI4 HBA FCoE function */ 11920 lpfc_pci_function_reset(phba); 11921 11922 /* Free RAS DMA memory */ 11923 if (phba->ras_fwlog.ras_enabled) 11924 lpfc_sli4_ras_dma_free(phba); 11925 11926 /* Stop the SLI4 device port */ 11927 if (phba->pport) 11928 phba->pport->work_port_events = 0; 11929 } 11930 11931 /** 11932 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 11933 * @phba: Pointer to HBA context object. 11934 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 11935 * 11936 * This function is called in the SLI4 code path to read the port's 11937 * sli4 capabilities. 11938 * 11939 * This function may be be called from any context that can block-wait 11940 * for the completion. The expectation is that this routine is called 11941 * typically from probe_one or from the online routine. 11942 **/ 11943 int 11944 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 11945 { 11946 int rc; 11947 struct lpfc_mqe *mqe; 11948 struct lpfc_pc_sli4_params *sli4_params; 11949 uint32_t mbox_tmo; 11950 11951 rc = 0; 11952 mqe = &mboxq->u.mqe; 11953 11954 /* Read the port's SLI4 Parameters port capabilities */ 11955 lpfc_pc_sli4_params(mboxq); 11956 if (!phba->sli4_hba.intr_enable) 11957 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 11958 else { 11959 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 11960 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 11961 } 11962 11963 if (unlikely(rc)) 11964 return 1; 11965 11966 sli4_params = &phba->sli4_hba.pc_sli4_params; 11967 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 11968 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 11969 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 11970 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 11971 &mqe->un.sli4_params); 11972 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 11973 &mqe->un.sli4_params); 11974 sli4_params->proto_types = mqe->un.sli4_params.word3; 11975 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 11976 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 11977 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 11978 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 11979 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 11980 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 11981 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 11982 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 11983 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 11984 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 11985 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 11986 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 11987 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 11988 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 11989 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 11990 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 11991 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 11992 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 11993 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 11994 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 11995 11996 /* Make sure that sge_supp_len can be handled by the driver */ 11997 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 11998 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 11999 12000 return rc; 12001 } 12002 12003 /** 12004 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 12005 * @phba: Pointer to HBA context object. 12006 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 12007 * 12008 * This function is called in the SLI4 code path to read the port's 12009 * sli4 capabilities. 12010 * 12011 * This function may be be called from any context that can block-wait 12012 * for the completion. The expectation is that this routine is called 12013 * typically from probe_one or from the online routine. 12014 **/ 12015 int 12016 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 12017 { 12018 int rc; 12019 struct lpfc_mqe *mqe = &mboxq->u.mqe; 12020 struct lpfc_pc_sli4_params *sli4_params; 12021 uint32_t mbox_tmo; 12022 int length; 12023 bool exp_wqcq_pages = true; 12024 struct lpfc_sli4_parameters *mbx_sli4_parameters; 12025 12026 /* 12027 * By default, the driver assumes the SLI4 port requires RPI 12028 * header postings. The SLI4_PARAM response will correct this 12029 * assumption. 12030 */ 12031 phba->sli4_hba.rpi_hdrs_in_use = 1; 12032 12033 /* Read the port's SLI4 Config Parameters */ 12034 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 12035 sizeof(struct lpfc_sli4_cfg_mhdr)); 12036 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 12037 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 12038 length, LPFC_SLI4_MBX_EMBED); 12039 if (!phba->sli4_hba.intr_enable) 12040 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 12041 else { 12042 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 12043 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 12044 } 12045 if (unlikely(rc)) 12046 return rc; 12047 sli4_params = &phba->sli4_hba.pc_sli4_params; 12048 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 12049 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 12050 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 12051 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 12052 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 12053 mbx_sli4_parameters); 12054 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 12055 mbx_sli4_parameters); 12056 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 12057 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 12058 else 12059 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 12060 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 12061 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); 12062 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); 12063 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 12064 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 12065 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 12066 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 12067 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters); 12068 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters); 12069 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); 12070 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters); 12071 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters); 12072 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 12073 mbx_sli4_parameters); 12074 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); 12075 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 12076 mbx_sli4_parameters); 12077 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); 12078 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); 12079 12080 /* Check for Extended Pre-Registered SGL support */ 12081 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters); 12082 12083 /* Check for firmware nvme support */ 12084 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) && 12085 bf_get(cfg_xib, mbx_sli4_parameters)); 12086 12087 if (rc) { 12088 /* Save this to indicate the Firmware supports NVME */ 12089 sli4_params->nvme = 1; 12090 12091 /* Firmware NVME support, check driver FC4 NVME support */ 12092 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) { 12093 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 12094 "6133 Disabling NVME support: " 12095 "FC4 type not supported: x%x\n", 12096 phba->cfg_enable_fc4_type); 12097 goto fcponly; 12098 } 12099 } else { 12100 /* No firmware NVME support, check driver FC4 NVME support */ 12101 sli4_params->nvme = 0; 12102 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 12103 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME, 12104 "6101 Disabling NVME support: Not " 12105 "supported by firmware (%d %d) x%x\n", 12106 bf_get(cfg_nvme, mbx_sli4_parameters), 12107 bf_get(cfg_xib, mbx_sli4_parameters), 12108 phba->cfg_enable_fc4_type); 12109 fcponly: 12110 phba->nvme_support = 0; 12111 phba->nvmet_support = 0; 12112 phba->cfg_nvmet_mrq = 0; 12113 phba->cfg_nvme_seg_cnt = 0; 12114 12115 /* If no FC4 type support, move to just SCSI support */ 12116 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 12117 return -ENODEV; 12118 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; 12119 } 12120 } 12121 12122 /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to 12123 * accommodate 512K and 1M IOs in a single nvme buf. 12124 */ 12125 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 12126 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 12127 12128 /* Only embed PBDE for if_type 6, PBDE support requires xib be set */ 12129 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 12130 LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters))) 12131 phba->cfg_enable_pbde = 0; 12132 12133 /* 12134 * To support Suppress Response feature we must satisfy 3 conditions. 12135 * lpfc_suppress_rsp module parameter must be set (default). 12136 * In SLI4-Parameters Descriptor: 12137 * Extended Inline Buffers (XIB) must be supported. 12138 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported 12139 * (double negative). 12140 */ 12141 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) && 12142 !(bf_get(cfg_nosr, mbx_sli4_parameters))) 12143 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP; 12144 else 12145 phba->cfg_suppress_rsp = 0; 12146 12147 if (bf_get(cfg_eqdr, mbx_sli4_parameters)) 12148 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR; 12149 12150 /* Make sure that sge_supp_len can be handled by the driver */ 12151 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 12152 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 12153 12154 /* 12155 * Check whether the adapter supports an embedded copy of the 12156 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order 12157 * to use this option, 128-byte WQEs must be used. 12158 */ 12159 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters)) 12160 phba->fcp_embed_io = 1; 12161 else 12162 phba->fcp_embed_io = 0; 12163 12164 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 12165 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n", 12166 bf_get(cfg_xib, mbx_sli4_parameters), 12167 phba->cfg_enable_pbde, 12168 phba->fcp_embed_io, phba->nvme_support, 12169 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp); 12170 12171 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 12172 LPFC_SLI_INTF_IF_TYPE_2) && 12173 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 12174 LPFC_SLI_INTF_FAMILY_LNCR_A0)) 12175 exp_wqcq_pages = false; 12176 12177 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) && 12178 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) && 12179 exp_wqcq_pages && 12180 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT)) 12181 phba->enab_exp_wqcq_pages = 1; 12182 else 12183 phba->enab_exp_wqcq_pages = 0; 12184 /* 12185 * Check if the SLI port supports MDS Diagnostics 12186 */ 12187 if (bf_get(cfg_mds_diags, mbx_sli4_parameters)) 12188 phba->mds_diags_support = 1; 12189 else 12190 phba->mds_diags_support = 0; 12191 12192 /* 12193 * Check if the SLI port supports NSLER 12194 */ 12195 if (bf_get(cfg_nsler, mbx_sli4_parameters)) 12196 phba->nsler = 1; 12197 else 12198 phba->nsler = 0; 12199 12200 return 0; 12201 } 12202 12203 /** 12204 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 12205 * @pdev: pointer to PCI device 12206 * @pid: pointer to PCI device identifier 12207 * 12208 * This routine is to be called to attach a device with SLI-3 interface spec 12209 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 12210 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 12211 * information of the device and driver to see if the driver state that it can 12212 * support this kind of device. If the match is successful, the driver core 12213 * invokes this routine. If this routine determines it can claim the HBA, it 12214 * does all the initialization that it needs to do to handle the HBA properly. 12215 * 12216 * Return code 12217 * 0 - driver can claim the device 12218 * negative value - driver can not claim the device 12219 **/ 12220 static int 12221 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 12222 { 12223 struct lpfc_hba *phba; 12224 struct lpfc_vport *vport = NULL; 12225 struct Scsi_Host *shost = NULL; 12226 int error; 12227 uint32_t cfg_mode, intr_mode; 12228 12229 /* Allocate memory for HBA structure */ 12230 phba = lpfc_hba_alloc(pdev); 12231 if (!phba) 12232 return -ENOMEM; 12233 12234 /* Perform generic PCI device enabling operation */ 12235 error = lpfc_enable_pci_dev(phba); 12236 if (error) 12237 goto out_free_phba; 12238 12239 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 12240 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 12241 if (error) 12242 goto out_disable_pci_dev; 12243 12244 /* Set up SLI-3 specific device PCI memory space */ 12245 error = lpfc_sli_pci_mem_setup(phba); 12246 if (error) { 12247 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12248 "1402 Failed to set up pci memory space.\n"); 12249 goto out_disable_pci_dev; 12250 } 12251 12252 /* Set up SLI-3 specific device driver resources */ 12253 error = lpfc_sli_driver_resource_setup(phba); 12254 if (error) { 12255 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12256 "1404 Failed to set up driver resource.\n"); 12257 goto out_unset_pci_mem_s3; 12258 } 12259 12260 /* Initialize and populate the iocb list per host */ 12261 12262 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 12263 if (error) { 12264 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12265 "1405 Failed to initialize iocb list.\n"); 12266 goto out_unset_driver_resource_s3; 12267 } 12268 12269 /* Set up common device driver resources */ 12270 error = lpfc_setup_driver_resource_phase2(phba); 12271 if (error) { 12272 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12273 "1406 Failed to set up driver resource.\n"); 12274 goto out_free_iocb_list; 12275 } 12276 12277 /* Get the default values for Model Name and Description */ 12278 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 12279 12280 /* Create SCSI host to the physical port */ 12281 error = lpfc_create_shost(phba); 12282 if (error) { 12283 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12284 "1407 Failed to create scsi host.\n"); 12285 goto out_unset_driver_resource; 12286 } 12287 12288 /* Configure sysfs attributes */ 12289 vport = phba->pport; 12290 error = lpfc_alloc_sysfs_attr(vport); 12291 if (error) { 12292 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12293 "1476 Failed to allocate sysfs attr\n"); 12294 goto out_destroy_shost; 12295 } 12296 12297 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 12298 /* Now, trying to enable interrupt and bring up the device */ 12299 cfg_mode = phba->cfg_use_msi; 12300 while (true) { 12301 /* Put device to a known state before enabling interrupt */ 12302 lpfc_stop_port(phba); 12303 /* Configure and enable interrupt */ 12304 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 12305 if (intr_mode == LPFC_INTR_ERROR) { 12306 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12307 "0431 Failed to enable interrupt.\n"); 12308 error = -ENODEV; 12309 goto out_free_sysfs_attr; 12310 } 12311 /* SLI-3 HBA setup */ 12312 if (lpfc_sli_hba_setup(phba)) { 12313 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12314 "1477 Failed to set up hba\n"); 12315 error = -ENODEV; 12316 goto out_remove_device; 12317 } 12318 12319 /* Wait 50ms for the interrupts of previous mailbox commands */ 12320 msleep(50); 12321 /* Check active interrupts on message signaled interrupts */ 12322 if (intr_mode == 0 || 12323 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 12324 /* Log the current active interrupt mode */ 12325 phba->intr_mode = intr_mode; 12326 lpfc_log_intr_mode(phba, intr_mode); 12327 break; 12328 } else { 12329 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12330 "0447 Configure interrupt mode (%d) " 12331 "failed active interrupt test.\n", 12332 intr_mode); 12333 /* Disable the current interrupt mode */ 12334 lpfc_sli_disable_intr(phba); 12335 /* Try next level of interrupt mode */ 12336 cfg_mode = --intr_mode; 12337 } 12338 } 12339 12340 /* Perform post initialization setup */ 12341 lpfc_post_init_setup(phba); 12342 12343 /* Check if there are static vports to be created. */ 12344 lpfc_create_static_vport(phba); 12345 12346 return 0; 12347 12348 out_remove_device: 12349 lpfc_unset_hba(phba); 12350 out_free_sysfs_attr: 12351 lpfc_free_sysfs_attr(vport); 12352 out_destroy_shost: 12353 lpfc_destroy_shost(phba); 12354 out_unset_driver_resource: 12355 lpfc_unset_driver_resource_phase2(phba); 12356 out_free_iocb_list: 12357 lpfc_free_iocb_list(phba); 12358 out_unset_driver_resource_s3: 12359 lpfc_sli_driver_resource_unset(phba); 12360 out_unset_pci_mem_s3: 12361 lpfc_sli_pci_mem_unset(phba); 12362 out_disable_pci_dev: 12363 lpfc_disable_pci_dev(phba); 12364 if (shost) 12365 scsi_host_put(shost); 12366 out_free_phba: 12367 lpfc_hba_free(phba); 12368 return error; 12369 } 12370 12371 /** 12372 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 12373 * @pdev: pointer to PCI device 12374 * 12375 * This routine is to be called to disattach a device with SLI-3 interface 12376 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 12377 * removed from PCI bus, it performs all the necessary cleanup for the HBA 12378 * device to be removed from the PCI subsystem properly. 12379 **/ 12380 static void 12381 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 12382 { 12383 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12384 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 12385 struct lpfc_vport **vports; 12386 struct lpfc_hba *phba = vport->phba; 12387 int i; 12388 12389 spin_lock_irq(&phba->hbalock); 12390 vport->load_flag |= FC_UNLOADING; 12391 spin_unlock_irq(&phba->hbalock); 12392 12393 lpfc_free_sysfs_attr(vport); 12394 12395 /* Release all the vports against this physical port */ 12396 vports = lpfc_create_vport_work_array(phba); 12397 if (vports != NULL) 12398 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 12399 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 12400 continue; 12401 fc_vport_terminate(vports[i]->fc_vport); 12402 } 12403 lpfc_destroy_vport_work_array(phba, vports); 12404 12405 /* Remove FC host and then SCSI host with the physical port */ 12406 fc_remove_host(shost); 12407 scsi_remove_host(shost); 12408 12409 lpfc_cleanup(vport); 12410 12411 /* 12412 * Bring down the SLI Layer. This step disable all interrupts, 12413 * clears the rings, discards all mailbox commands, and resets 12414 * the HBA. 12415 */ 12416 12417 /* HBA interrupt will be disabled after this call */ 12418 lpfc_sli_hba_down(phba); 12419 /* Stop kthread signal shall trigger work_done one more time */ 12420 kthread_stop(phba->worker_thread); 12421 /* Final cleanup of txcmplq and reset the HBA */ 12422 lpfc_sli_brdrestart(phba); 12423 12424 kfree(phba->vpi_bmask); 12425 kfree(phba->vpi_ids); 12426 12427 lpfc_stop_hba_timers(phba); 12428 spin_lock_irq(&phba->port_list_lock); 12429 list_del_init(&vport->listentry); 12430 spin_unlock_irq(&phba->port_list_lock); 12431 12432 lpfc_debugfs_terminate(vport); 12433 12434 /* Disable SR-IOV if enabled */ 12435 if (phba->cfg_sriov_nr_virtfn) 12436 pci_disable_sriov(pdev); 12437 12438 /* Disable interrupt */ 12439 lpfc_sli_disable_intr(phba); 12440 12441 scsi_host_put(shost); 12442 12443 /* 12444 * Call scsi_free before mem_free since scsi bufs are released to their 12445 * corresponding pools here. 12446 */ 12447 lpfc_scsi_free(phba); 12448 lpfc_free_iocb_list(phba); 12449 12450 lpfc_mem_free_all(phba); 12451 12452 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 12453 phba->hbqslimp.virt, phba->hbqslimp.phys); 12454 12455 /* Free resources associated with SLI2 interface */ 12456 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 12457 phba->slim2p.virt, phba->slim2p.phys); 12458 12459 /* unmap adapter SLIM and Control Registers */ 12460 iounmap(phba->ctrl_regs_memmap_p); 12461 iounmap(phba->slim_memmap_p); 12462 12463 lpfc_hba_free(phba); 12464 12465 pci_release_mem_regions(pdev); 12466 pci_disable_device(pdev); 12467 } 12468 12469 /** 12470 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 12471 * @pdev: pointer to PCI device 12472 * @msg: power management message 12473 * 12474 * This routine is to be called from the kernel's PCI subsystem to support 12475 * system Power Management (PM) to device with SLI-3 interface spec. When 12476 * PM invokes this method, it quiesces the device by stopping the driver's 12477 * worker thread for the device, turning off device's interrupt and DMA, 12478 * and bring the device offline. Note that as the driver implements the 12479 * minimum PM requirements to a power-aware driver's PM support for the 12480 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 12481 * to the suspend() method call will be treated as SUSPEND and the driver will 12482 * fully reinitialize its device during resume() method call, the driver will 12483 * set device to PCI_D3hot state in PCI config space instead of setting it 12484 * according to the @msg provided by the PM. 12485 * 12486 * Return code 12487 * 0 - driver suspended the device 12488 * Error otherwise 12489 **/ 12490 static int 12491 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 12492 { 12493 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12494 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12495 12496 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12497 "0473 PCI device Power Management suspend.\n"); 12498 12499 /* Bring down the device */ 12500 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 12501 lpfc_offline(phba); 12502 kthread_stop(phba->worker_thread); 12503 12504 /* Disable interrupt from device */ 12505 lpfc_sli_disable_intr(phba); 12506 12507 /* Save device state to PCI config space */ 12508 pci_save_state(pdev); 12509 pci_set_power_state(pdev, PCI_D3hot); 12510 12511 return 0; 12512 } 12513 12514 /** 12515 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 12516 * @pdev: pointer to PCI device 12517 * 12518 * This routine is to be called from the kernel's PCI subsystem to support 12519 * system Power Management (PM) to device with SLI-3 interface spec. When PM 12520 * invokes this method, it restores the device's PCI config space state and 12521 * fully reinitializes the device and brings it online. Note that as the 12522 * driver implements the minimum PM requirements to a power-aware driver's 12523 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 12524 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 12525 * driver will fully reinitialize its device during resume() method call, 12526 * the device will be set to PCI_D0 directly in PCI config space before 12527 * restoring the state. 12528 * 12529 * Return code 12530 * 0 - driver suspended the device 12531 * Error otherwise 12532 **/ 12533 static int 12534 lpfc_pci_resume_one_s3(struct pci_dev *pdev) 12535 { 12536 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12537 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12538 uint32_t intr_mode; 12539 int error; 12540 12541 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12542 "0452 PCI device Power Management resume.\n"); 12543 12544 /* Restore device state from PCI config space */ 12545 pci_set_power_state(pdev, PCI_D0); 12546 pci_restore_state(pdev); 12547 12548 /* 12549 * As the new kernel behavior of pci_restore_state() API call clears 12550 * device saved_state flag, need to save the restored state again. 12551 */ 12552 pci_save_state(pdev); 12553 12554 if (pdev->is_busmaster) 12555 pci_set_master(pdev); 12556 12557 /* Startup the kernel thread for this host adapter. */ 12558 phba->worker_thread = kthread_run(lpfc_do_work, phba, 12559 "lpfc_worker_%d", phba->brd_no); 12560 if (IS_ERR(phba->worker_thread)) { 12561 error = PTR_ERR(phba->worker_thread); 12562 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12563 "0434 PM resume failed to start worker " 12564 "thread: error=x%x.\n", error); 12565 return error; 12566 } 12567 12568 /* Configure and enable interrupt */ 12569 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 12570 if (intr_mode == LPFC_INTR_ERROR) { 12571 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12572 "0430 PM resume Failed to enable interrupt\n"); 12573 return -EIO; 12574 } else 12575 phba->intr_mode = intr_mode; 12576 12577 /* Restart HBA and bring it online */ 12578 lpfc_sli_brdrestart(phba); 12579 lpfc_online(phba); 12580 12581 /* Log the current active interrupt mode */ 12582 lpfc_log_intr_mode(phba, phba->intr_mode); 12583 12584 return 0; 12585 } 12586 12587 /** 12588 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 12589 * @phba: pointer to lpfc hba data structure. 12590 * 12591 * This routine is called to prepare the SLI3 device for PCI slot recover. It 12592 * aborts all the outstanding SCSI I/Os to the pci device. 12593 **/ 12594 static void 12595 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 12596 { 12597 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12598 "2723 PCI channel I/O abort preparing for recovery\n"); 12599 12600 /* 12601 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 12602 * and let the SCSI mid-layer to retry them to recover. 12603 */ 12604 lpfc_sli_abort_fcp_rings(phba); 12605 } 12606 12607 /** 12608 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 12609 * @phba: pointer to lpfc hba data structure. 12610 * 12611 * This routine is called to prepare the SLI3 device for PCI slot reset. It 12612 * disables the device interrupt and pci device, and aborts the internal FCP 12613 * pending I/Os. 12614 **/ 12615 static void 12616 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 12617 { 12618 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12619 "2710 PCI channel disable preparing for reset\n"); 12620 12621 /* Block any management I/Os to the device */ 12622 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 12623 12624 /* Block all SCSI devices' I/Os on the host */ 12625 lpfc_scsi_dev_block(phba); 12626 12627 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 12628 lpfc_sli_flush_io_rings(phba); 12629 12630 /* stop all timers */ 12631 lpfc_stop_hba_timers(phba); 12632 12633 /* Disable interrupt and pci device */ 12634 lpfc_sli_disable_intr(phba); 12635 pci_disable_device(phba->pcidev); 12636 } 12637 12638 /** 12639 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 12640 * @phba: pointer to lpfc hba data structure. 12641 * 12642 * This routine is called to prepare the SLI3 device for PCI slot permanently 12643 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 12644 * pending I/Os. 12645 **/ 12646 static void 12647 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 12648 { 12649 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12650 "2711 PCI channel permanent disable for failure\n"); 12651 /* Block all SCSI devices' I/Os on the host */ 12652 lpfc_scsi_dev_block(phba); 12653 12654 /* stop all timers */ 12655 lpfc_stop_hba_timers(phba); 12656 12657 /* Clean up all driver's outstanding SCSI I/Os */ 12658 lpfc_sli_flush_io_rings(phba); 12659 } 12660 12661 /** 12662 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 12663 * @pdev: pointer to PCI device. 12664 * @state: the current PCI connection state. 12665 * 12666 * This routine is called from the PCI subsystem for I/O error handling to 12667 * device with SLI-3 interface spec. This function is called by the PCI 12668 * subsystem after a PCI bus error affecting this device has been detected. 12669 * When this function is invoked, it will need to stop all the I/Os and 12670 * interrupt(s) to the device. Once that is done, it will return 12671 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 12672 * as desired. 12673 * 12674 * Return codes 12675 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 12676 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 12677 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 12678 **/ 12679 static pci_ers_result_t 12680 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 12681 { 12682 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12683 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12684 12685 switch (state) { 12686 case pci_channel_io_normal: 12687 /* Non-fatal error, prepare for recovery */ 12688 lpfc_sli_prep_dev_for_recover(phba); 12689 return PCI_ERS_RESULT_CAN_RECOVER; 12690 case pci_channel_io_frozen: 12691 /* Fatal error, prepare for slot reset */ 12692 lpfc_sli_prep_dev_for_reset(phba); 12693 return PCI_ERS_RESULT_NEED_RESET; 12694 case pci_channel_io_perm_failure: 12695 /* Permanent failure, prepare for device down */ 12696 lpfc_sli_prep_dev_for_perm_failure(phba); 12697 return PCI_ERS_RESULT_DISCONNECT; 12698 default: 12699 /* Unknown state, prepare and request slot reset */ 12700 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12701 "0472 Unknown PCI error state: x%x\n", state); 12702 lpfc_sli_prep_dev_for_reset(phba); 12703 return PCI_ERS_RESULT_NEED_RESET; 12704 } 12705 } 12706 12707 /** 12708 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 12709 * @pdev: pointer to PCI device. 12710 * 12711 * This routine is called from the PCI subsystem for error handling to 12712 * device with SLI-3 interface spec. This is called after PCI bus has been 12713 * reset to restart the PCI card from scratch, as if from a cold-boot. 12714 * During the PCI subsystem error recovery, after driver returns 12715 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 12716 * recovery and then call this routine before calling the .resume method 12717 * to recover the device. This function will initialize the HBA device, 12718 * enable the interrupt, but it will just put the HBA to offline state 12719 * without passing any I/O traffic. 12720 * 12721 * Return codes 12722 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 12723 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 12724 */ 12725 static pci_ers_result_t 12726 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 12727 { 12728 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12729 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12730 struct lpfc_sli *psli = &phba->sli; 12731 uint32_t intr_mode; 12732 12733 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 12734 if (pci_enable_device_mem(pdev)) { 12735 printk(KERN_ERR "lpfc: Cannot re-enable " 12736 "PCI device after reset.\n"); 12737 return PCI_ERS_RESULT_DISCONNECT; 12738 } 12739 12740 pci_restore_state(pdev); 12741 12742 /* 12743 * As the new kernel behavior of pci_restore_state() API call clears 12744 * device saved_state flag, need to save the restored state again. 12745 */ 12746 pci_save_state(pdev); 12747 12748 if (pdev->is_busmaster) 12749 pci_set_master(pdev); 12750 12751 spin_lock_irq(&phba->hbalock); 12752 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 12753 spin_unlock_irq(&phba->hbalock); 12754 12755 /* Configure and enable interrupt */ 12756 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 12757 if (intr_mode == LPFC_INTR_ERROR) { 12758 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12759 "0427 Cannot re-enable interrupt after " 12760 "slot reset.\n"); 12761 return PCI_ERS_RESULT_DISCONNECT; 12762 } else 12763 phba->intr_mode = intr_mode; 12764 12765 /* Take device offline, it will perform cleanup */ 12766 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 12767 lpfc_offline(phba); 12768 lpfc_sli_brdrestart(phba); 12769 12770 /* Log the current active interrupt mode */ 12771 lpfc_log_intr_mode(phba, phba->intr_mode); 12772 12773 return PCI_ERS_RESULT_RECOVERED; 12774 } 12775 12776 /** 12777 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 12778 * @pdev: pointer to PCI device 12779 * 12780 * This routine is called from the PCI subsystem for error handling to device 12781 * with SLI-3 interface spec. It is called when kernel error recovery tells 12782 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 12783 * error recovery. After this call, traffic can start to flow from this device 12784 * again. 12785 */ 12786 static void 12787 lpfc_io_resume_s3(struct pci_dev *pdev) 12788 { 12789 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12790 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12791 12792 /* Bring device online, it will be no-op for non-fatal error resume */ 12793 lpfc_online(phba); 12794 } 12795 12796 /** 12797 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 12798 * @phba: pointer to lpfc hba data structure. 12799 * 12800 * returns the number of ELS/CT IOCBs to reserve 12801 **/ 12802 int 12803 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 12804 { 12805 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 12806 12807 if (phba->sli_rev == LPFC_SLI_REV4) { 12808 if (max_xri <= 100) 12809 return 10; 12810 else if (max_xri <= 256) 12811 return 25; 12812 else if (max_xri <= 512) 12813 return 50; 12814 else if (max_xri <= 1024) 12815 return 100; 12816 else if (max_xri <= 1536) 12817 return 150; 12818 else if (max_xri <= 2048) 12819 return 200; 12820 else 12821 return 250; 12822 } else 12823 return 0; 12824 } 12825 12826 /** 12827 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve 12828 * @phba: pointer to lpfc hba data structure. 12829 * 12830 * returns the number of ELS/CT + NVMET IOCBs to reserve 12831 **/ 12832 int 12833 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba) 12834 { 12835 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba); 12836 12837 if (phba->nvmet_support) 12838 max_xri += LPFC_NVMET_BUF_POST; 12839 return max_xri; 12840 } 12841 12842 12843 static int 12844 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset, 12845 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize, 12846 const struct firmware *fw) 12847 { 12848 int rc; 12849 12850 /* Three cases: (1) FW was not supported on the detected adapter. 12851 * (2) FW update has been locked out administratively. 12852 * (3) Some other error during FW update. 12853 * In each case, an unmaskable message is written to the console 12854 * for admin diagnosis. 12855 */ 12856 if (offset == ADD_STATUS_FW_NOT_SUPPORTED || 12857 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC && 12858 magic_number != MAGIC_NUMBER_G6) || 12859 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC && 12860 magic_number != MAGIC_NUMBER_G7)) { 12861 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12862 "3030 This firmware version is not supported on" 12863 " this HBA model. Device:%x Magic:%x Type:%x " 12864 "ID:%x Size %d %zd\n", 12865 phba->pcidev->device, magic_number, ftype, fid, 12866 fsize, fw->size); 12867 rc = -EINVAL; 12868 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) { 12869 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12870 "3021 Firmware downloads have been prohibited " 12871 "by a system configuration setting on " 12872 "Device:%x Magic:%x Type:%x ID:%x Size %d " 12873 "%zd\n", 12874 phba->pcidev->device, magic_number, ftype, fid, 12875 fsize, fw->size); 12876 rc = -EACCES; 12877 } else { 12878 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12879 "3022 FW Download failed. Add Status x%x " 12880 "Device:%x Magic:%x Type:%x ID:%x Size %d " 12881 "%zd\n", 12882 offset, phba->pcidev->device, magic_number, 12883 ftype, fid, fsize, fw->size); 12884 rc = -EIO; 12885 } 12886 return rc; 12887 } 12888 12889 /** 12890 * lpfc_write_firmware - attempt to write a firmware image to the port 12891 * @fw: pointer to firmware image returned from request_firmware. 12892 * @context: pointer to firmware image returned from request_firmware. 12893 * @ret: return value this routine provides to the caller. 12894 * 12895 **/ 12896 static void 12897 lpfc_write_firmware(const struct firmware *fw, void *context) 12898 { 12899 struct lpfc_hba *phba = (struct lpfc_hba *)context; 12900 char fwrev[FW_REV_STR_SIZE]; 12901 struct lpfc_grp_hdr *image; 12902 struct list_head dma_buffer_list; 12903 int i, rc = 0; 12904 struct lpfc_dmabuf *dmabuf, *next; 12905 uint32_t offset = 0, temp_offset = 0; 12906 uint32_t magic_number, ftype, fid, fsize; 12907 12908 /* It can be null in no-wait mode, sanity check */ 12909 if (!fw) { 12910 rc = -ENXIO; 12911 goto out; 12912 } 12913 image = (struct lpfc_grp_hdr *)fw->data; 12914 12915 magic_number = be32_to_cpu(image->magic_number); 12916 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image); 12917 fid = bf_get_be32(lpfc_grp_hdr_id, image); 12918 fsize = be32_to_cpu(image->size); 12919 12920 INIT_LIST_HEAD(&dma_buffer_list); 12921 lpfc_decode_firmware_rev(phba, fwrev, 1); 12922 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 12923 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12924 "3023 Updating Firmware, Current Version:%s " 12925 "New Version:%s\n", 12926 fwrev, image->revision); 12927 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 12928 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 12929 GFP_KERNEL); 12930 if (!dmabuf) { 12931 rc = -ENOMEM; 12932 goto release_out; 12933 } 12934 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 12935 SLI4_PAGE_SIZE, 12936 &dmabuf->phys, 12937 GFP_KERNEL); 12938 if (!dmabuf->virt) { 12939 kfree(dmabuf); 12940 rc = -ENOMEM; 12941 goto release_out; 12942 } 12943 list_add_tail(&dmabuf->list, &dma_buffer_list); 12944 } 12945 while (offset < fw->size) { 12946 temp_offset = offset; 12947 list_for_each_entry(dmabuf, &dma_buffer_list, list) { 12948 if (temp_offset + SLI4_PAGE_SIZE > fw->size) { 12949 memcpy(dmabuf->virt, 12950 fw->data + temp_offset, 12951 fw->size - temp_offset); 12952 temp_offset = fw->size; 12953 break; 12954 } 12955 memcpy(dmabuf->virt, fw->data + temp_offset, 12956 SLI4_PAGE_SIZE); 12957 temp_offset += SLI4_PAGE_SIZE; 12958 } 12959 rc = lpfc_wr_object(phba, &dma_buffer_list, 12960 (fw->size - offset), &offset); 12961 if (rc) { 12962 rc = lpfc_log_write_firmware_error(phba, offset, 12963 magic_number, 12964 ftype, 12965 fid, 12966 fsize, 12967 fw); 12968 goto release_out; 12969 } 12970 } 12971 rc = offset; 12972 } else 12973 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12974 "3029 Skipped Firmware update, Current " 12975 "Version:%s New Version:%s\n", 12976 fwrev, image->revision); 12977 12978 release_out: 12979 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 12980 list_del(&dmabuf->list); 12981 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 12982 dmabuf->virt, dmabuf->phys); 12983 kfree(dmabuf); 12984 } 12985 release_firmware(fw); 12986 out: 12987 if (rc < 0) 12988 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12989 "3062 Firmware update error, status %d.\n", rc); 12990 else 12991 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12992 "3024 Firmware update success: size %d.\n", rc); 12993 } 12994 12995 /** 12996 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade 12997 * @phba: pointer to lpfc hba data structure. 12998 * 12999 * This routine is called to perform Linux generic firmware upgrade on device 13000 * that supports such feature. 13001 **/ 13002 int 13003 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) 13004 { 13005 uint8_t file_name[ELX_MODEL_NAME_SIZE]; 13006 int ret; 13007 const struct firmware *fw; 13008 13009 /* Only supported on SLI4 interface type 2 for now */ 13010 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 13011 LPFC_SLI_INTF_IF_TYPE_2) 13012 return -EPERM; 13013 13014 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); 13015 13016 if (fw_upgrade == INT_FW_UPGRADE) { 13017 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, 13018 file_name, &phba->pcidev->dev, 13019 GFP_KERNEL, (void *)phba, 13020 lpfc_write_firmware); 13021 } else if (fw_upgrade == RUN_FW_UPGRADE) { 13022 ret = request_firmware(&fw, file_name, &phba->pcidev->dev); 13023 if (!ret) 13024 lpfc_write_firmware(fw, (void *)phba); 13025 } else { 13026 ret = -EINVAL; 13027 } 13028 13029 return ret; 13030 } 13031 13032 /** 13033 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 13034 * @pdev: pointer to PCI device 13035 * @pid: pointer to PCI device identifier 13036 * 13037 * This routine is called from the kernel's PCI subsystem to device with 13038 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 13039 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 13040 * information of the device and driver to see if the driver state that it 13041 * can support this kind of device. If the match is successful, the driver 13042 * core invokes this routine. If this routine determines it can claim the HBA, 13043 * it does all the initialization that it needs to do to handle the HBA 13044 * properly. 13045 * 13046 * Return code 13047 * 0 - driver can claim the device 13048 * negative value - driver can not claim the device 13049 **/ 13050 static int 13051 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 13052 { 13053 struct lpfc_hba *phba; 13054 struct lpfc_vport *vport = NULL; 13055 struct Scsi_Host *shost = NULL; 13056 int error; 13057 uint32_t cfg_mode, intr_mode; 13058 13059 /* Allocate memory for HBA structure */ 13060 phba = lpfc_hba_alloc(pdev); 13061 if (!phba) 13062 return -ENOMEM; 13063 13064 /* Perform generic PCI device enabling operation */ 13065 error = lpfc_enable_pci_dev(phba); 13066 if (error) 13067 goto out_free_phba; 13068 13069 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 13070 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 13071 if (error) 13072 goto out_disable_pci_dev; 13073 13074 /* Set up SLI-4 specific device PCI memory space */ 13075 error = lpfc_sli4_pci_mem_setup(phba); 13076 if (error) { 13077 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13078 "1410 Failed to set up pci memory space.\n"); 13079 goto out_disable_pci_dev; 13080 } 13081 13082 /* Set up SLI-4 Specific device driver resources */ 13083 error = lpfc_sli4_driver_resource_setup(phba); 13084 if (error) { 13085 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13086 "1412 Failed to set up driver resource.\n"); 13087 goto out_unset_pci_mem_s4; 13088 } 13089 13090 INIT_LIST_HEAD(&phba->active_rrq_list); 13091 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 13092 13093 /* Set up common device driver resources */ 13094 error = lpfc_setup_driver_resource_phase2(phba); 13095 if (error) { 13096 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13097 "1414 Failed to set up driver resource.\n"); 13098 goto out_unset_driver_resource_s4; 13099 } 13100 13101 /* Get the default values for Model Name and Description */ 13102 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 13103 13104 /* Now, trying to enable interrupt and bring up the device */ 13105 cfg_mode = phba->cfg_use_msi; 13106 13107 /* Put device to a known state before enabling interrupt */ 13108 phba->pport = NULL; 13109 lpfc_stop_port(phba); 13110 13111 /* Init cpu_map array */ 13112 lpfc_cpu_map_array_init(phba); 13113 13114 /* Init hba_eq_hdl array */ 13115 lpfc_hba_eq_hdl_array_init(phba); 13116 13117 /* Configure and enable interrupt */ 13118 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 13119 if (intr_mode == LPFC_INTR_ERROR) { 13120 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13121 "0426 Failed to enable interrupt.\n"); 13122 error = -ENODEV; 13123 goto out_unset_driver_resource; 13124 } 13125 /* Default to single EQ for non-MSI-X */ 13126 if (phba->intr_type != MSIX) { 13127 phba->cfg_irq_chann = 1; 13128 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13129 if (phba->nvmet_support) 13130 phba->cfg_nvmet_mrq = 1; 13131 } 13132 } 13133 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); 13134 13135 /* Create SCSI host to the physical port */ 13136 error = lpfc_create_shost(phba); 13137 if (error) { 13138 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13139 "1415 Failed to create scsi host.\n"); 13140 goto out_disable_intr; 13141 } 13142 vport = phba->pport; 13143 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 13144 13145 /* Configure sysfs attributes */ 13146 error = lpfc_alloc_sysfs_attr(vport); 13147 if (error) { 13148 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13149 "1416 Failed to allocate sysfs attr\n"); 13150 goto out_destroy_shost; 13151 } 13152 13153 /* Set up SLI-4 HBA */ 13154 if (lpfc_sli4_hba_setup(phba)) { 13155 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13156 "1421 Failed to set up hba\n"); 13157 error = -ENODEV; 13158 goto out_free_sysfs_attr; 13159 } 13160 13161 /* Log the current active interrupt mode */ 13162 phba->intr_mode = intr_mode; 13163 lpfc_log_intr_mode(phba, intr_mode); 13164 13165 /* Perform post initialization setup */ 13166 lpfc_post_init_setup(phba); 13167 13168 /* NVME support in FW earlier in the driver load corrects the 13169 * FC4 type making a check for nvme_support unnecessary. 13170 */ 13171 if (phba->nvmet_support == 0) { 13172 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13173 /* Create NVME binding with nvme_fc_transport. This 13174 * ensures the vport is initialized. If the localport 13175 * create fails, it should not unload the driver to 13176 * support field issues. 13177 */ 13178 error = lpfc_nvme_create_localport(vport); 13179 if (error) { 13180 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13181 "6004 NVME registration " 13182 "failed, error x%x\n", 13183 error); 13184 } 13185 } 13186 } 13187 13188 /* check for firmware upgrade or downgrade */ 13189 if (phba->cfg_request_firmware_upgrade) 13190 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); 13191 13192 /* Check if there are static vports to be created. */ 13193 lpfc_create_static_vport(phba); 13194 13195 /* Enable RAS FW log support */ 13196 lpfc_sli4_ras_setup(phba); 13197 13198 INIT_LIST_HEAD(&phba->poll_list); 13199 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0); 13200 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp); 13201 13202 return 0; 13203 13204 out_free_sysfs_attr: 13205 lpfc_free_sysfs_attr(vport); 13206 out_destroy_shost: 13207 lpfc_destroy_shost(phba); 13208 out_disable_intr: 13209 lpfc_sli4_disable_intr(phba); 13210 out_unset_driver_resource: 13211 lpfc_unset_driver_resource_phase2(phba); 13212 out_unset_driver_resource_s4: 13213 lpfc_sli4_driver_resource_unset(phba); 13214 out_unset_pci_mem_s4: 13215 lpfc_sli4_pci_mem_unset(phba); 13216 out_disable_pci_dev: 13217 lpfc_disable_pci_dev(phba); 13218 if (shost) 13219 scsi_host_put(shost); 13220 out_free_phba: 13221 lpfc_hba_free(phba); 13222 return error; 13223 } 13224 13225 /** 13226 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 13227 * @pdev: pointer to PCI device 13228 * 13229 * This routine is called from the kernel's PCI subsystem to device with 13230 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 13231 * removed from PCI bus, it performs all the necessary cleanup for the HBA 13232 * device to be removed from the PCI subsystem properly. 13233 **/ 13234 static void 13235 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 13236 { 13237 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13238 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 13239 struct lpfc_vport **vports; 13240 struct lpfc_hba *phba = vport->phba; 13241 int i; 13242 13243 /* Mark the device unloading flag */ 13244 spin_lock_irq(&phba->hbalock); 13245 vport->load_flag |= FC_UNLOADING; 13246 spin_unlock_irq(&phba->hbalock); 13247 13248 /* Free the HBA sysfs attributes */ 13249 lpfc_free_sysfs_attr(vport); 13250 13251 /* Release all the vports against this physical port */ 13252 vports = lpfc_create_vport_work_array(phba); 13253 if (vports != NULL) 13254 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 13255 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 13256 continue; 13257 fc_vport_terminate(vports[i]->fc_vport); 13258 } 13259 lpfc_destroy_vport_work_array(phba, vports); 13260 13261 /* Remove FC host and then SCSI host with the physical port */ 13262 fc_remove_host(shost); 13263 scsi_remove_host(shost); 13264 13265 /* Perform ndlp cleanup on the physical port. The nvme and nvmet 13266 * localports are destroyed after to cleanup all transport memory. 13267 */ 13268 lpfc_cleanup(vport); 13269 lpfc_nvmet_destroy_targetport(phba); 13270 lpfc_nvme_destroy_localport(vport); 13271 13272 /* De-allocate multi-XRI pools */ 13273 if (phba->cfg_xri_rebalancing) 13274 lpfc_destroy_multixri_pools(phba); 13275 13276 /* 13277 * Bring down the SLI Layer. This step disables all interrupts, 13278 * clears the rings, discards all mailbox commands, and resets 13279 * the HBA FCoE function. 13280 */ 13281 lpfc_debugfs_terminate(vport); 13282 13283 lpfc_stop_hba_timers(phba); 13284 spin_lock_irq(&phba->port_list_lock); 13285 list_del_init(&vport->listentry); 13286 spin_unlock_irq(&phba->port_list_lock); 13287 13288 /* Perform scsi free before driver resource_unset since scsi 13289 * buffers are released to their corresponding pools here. 13290 */ 13291 lpfc_io_free(phba); 13292 lpfc_free_iocb_list(phba); 13293 lpfc_sli4_hba_unset(phba); 13294 13295 lpfc_unset_driver_resource_phase2(phba); 13296 lpfc_sli4_driver_resource_unset(phba); 13297 13298 /* Unmap adapter Control and Doorbell registers */ 13299 lpfc_sli4_pci_mem_unset(phba); 13300 13301 /* Release PCI resources and disable device's PCI function */ 13302 scsi_host_put(shost); 13303 lpfc_disable_pci_dev(phba); 13304 13305 /* Finally, free the driver's device data structure */ 13306 lpfc_hba_free(phba); 13307 13308 return; 13309 } 13310 13311 /** 13312 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 13313 * @pdev: pointer to PCI device 13314 * @msg: power management message 13315 * 13316 * This routine is called from the kernel's PCI subsystem to support system 13317 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 13318 * this method, it quiesces the device by stopping the driver's worker 13319 * thread for the device, turning off device's interrupt and DMA, and bring 13320 * the device offline. Note that as the driver implements the minimum PM 13321 * requirements to a power-aware driver's PM support for suspend/resume -- all 13322 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 13323 * method call will be treated as SUSPEND and the driver will fully 13324 * reinitialize its device during resume() method call, the driver will set 13325 * device to PCI_D3hot state in PCI config space instead of setting it 13326 * according to the @msg provided by the PM. 13327 * 13328 * Return code 13329 * 0 - driver suspended the device 13330 * Error otherwise 13331 **/ 13332 static int 13333 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 13334 { 13335 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13336 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13337 13338 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13339 "2843 PCI device Power Management suspend.\n"); 13340 13341 /* Bring down the device */ 13342 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 13343 lpfc_offline(phba); 13344 kthread_stop(phba->worker_thread); 13345 13346 /* Disable interrupt from device */ 13347 lpfc_sli4_disable_intr(phba); 13348 lpfc_sli4_queue_destroy(phba); 13349 13350 /* Save device state to PCI config space */ 13351 pci_save_state(pdev); 13352 pci_set_power_state(pdev, PCI_D3hot); 13353 13354 return 0; 13355 } 13356 13357 /** 13358 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 13359 * @pdev: pointer to PCI device 13360 * 13361 * This routine is called from the kernel's PCI subsystem to support system 13362 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 13363 * this method, it restores the device's PCI config space state and fully 13364 * reinitializes the device and brings it online. Note that as the driver 13365 * implements the minimum PM requirements to a power-aware driver's PM for 13366 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 13367 * to the suspend() method call will be treated as SUSPEND and the driver 13368 * will fully reinitialize its device during resume() method call, the device 13369 * will be set to PCI_D0 directly in PCI config space before restoring the 13370 * state. 13371 * 13372 * Return code 13373 * 0 - driver suspended the device 13374 * Error otherwise 13375 **/ 13376 static int 13377 lpfc_pci_resume_one_s4(struct pci_dev *pdev) 13378 { 13379 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13380 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13381 uint32_t intr_mode; 13382 int error; 13383 13384 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13385 "0292 PCI device Power Management resume.\n"); 13386 13387 /* Restore device state from PCI config space */ 13388 pci_set_power_state(pdev, PCI_D0); 13389 pci_restore_state(pdev); 13390 13391 /* 13392 * As the new kernel behavior of pci_restore_state() API call clears 13393 * device saved_state flag, need to save the restored state again. 13394 */ 13395 pci_save_state(pdev); 13396 13397 if (pdev->is_busmaster) 13398 pci_set_master(pdev); 13399 13400 /* Startup the kernel thread for this host adapter. */ 13401 phba->worker_thread = kthread_run(lpfc_do_work, phba, 13402 "lpfc_worker_%d", phba->brd_no); 13403 if (IS_ERR(phba->worker_thread)) { 13404 error = PTR_ERR(phba->worker_thread); 13405 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13406 "0293 PM resume failed to start worker " 13407 "thread: error=x%x.\n", error); 13408 return error; 13409 } 13410 13411 /* Configure and enable interrupt */ 13412 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 13413 if (intr_mode == LPFC_INTR_ERROR) { 13414 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13415 "0294 PM resume Failed to enable interrupt\n"); 13416 return -EIO; 13417 } else 13418 phba->intr_mode = intr_mode; 13419 13420 /* Restart HBA and bring it online */ 13421 lpfc_sli_brdrestart(phba); 13422 lpfc_online(phba); 13423 13424 /* Log the current active interrupt mode */ 13425 lpfc_log_intr_mode(phba, phba->intr_mode); 13426 13427 return 0; 13428 } 13429 13430 /** 13431 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 13432 * @phba: pointer to lpfc hba data structure. 13433 * 13434 * This routine is called to prepare the SLI4 device for PCI slot recover. It 13435 * aborts all the outstanding SCSI I/Os to the pci device. 13436 **/ 13437 static void 13438 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 13439 { 13440 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13441 "2828 PCI channel I/O abort preparing for recovery\n"); 13442 /* 13443 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 13444 * and let the SCSI mid-layer to retry them to recover. 13445 */ 13446 lpfc_sli_abort_fcp_rings(phba); 13447 } 13448 13449 /** 13450 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 13451 * @phba: pointer to lpfc hba data structure. 13452 * 13453 * This routine is called to prepare the SLI4 device for PCI slot reset. It 13454 * disables the device interrupt and pci device, and aborts the internal FCP 13455 * pending I/Os. 13456 **/ 13457 static void 13458 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 13459 { 13460 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13461 "2826 PCI channel disable preparing for reset\n"); 13462 13463 /* Block any management I/Os to the device */ 13464 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); 13465 13466 /* Block all SCSI devices' I/Os on the host */ 13467 lpfc_scsi_dev_block(phba); 13468 13469 /* Flush all driver's outstanding I/Os as we are to reset */ 13470 lpfc_sli_flush_io_rings(phba); 13471 13472 /* stop all timers */ 13473 lpfc_stop_hba_timers(phba); 13474 13475 /* Disable interrupt and pci device */ 13476 lpfc_sli4_disable_intr(phba); 13477 lpfc_sli4_queue_destroy(phba); 13478 pci_disable_device(phba->pcidev); 13479 } 13480 13481 /** 13482 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 13483 * @phba: pointer to lpfc hba data structure. 13484 * 13485 * This routine is called to prepare the SLI4 device for PCI slot permanently 13486 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 13487 * pending I/Os. 13488 **/ 13489 static void 13490 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 13491 { 13492 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13493 "2827 PCI channel permanent disable for failure\n"); 13494 13495 /* Block all SCSI devices' I/Os on the host */ 13496 lpfc_scsi_dev_block(phba); 13497 13498 /* stop all timers */ 13499 lpfc_stop_hba_timers(phba); 13500 13501 /* Clean up all driver's outstanding I/Os */ 13502 lpfc_sli_flush_io_rings(phba); 13503 } 13504 13505 /** 13506 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 13507 * @pdev: pointer to PCI device. 13508 * @state: the current PCI connection state. 13509 * 13510 * This routine is called from the PCI subsystem for error handling to device 13511 * with SLI-4 interface spec. This function is called by the PCI subsystem 13512 * after a PCI bus error affecting this device has been detected. When this 13513 * function is invoked, it will need to stop all the I/Os and interrupt(s) 13514 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 13515 * for the PCI subsystem to perform proper recovery as desired. 13516 * 13517 * Return codes 13518 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 13519 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 13520 **/ 13521 static pci_ers_result_t 13522 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 13523 { 13524 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13525 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13526 13527 switch (state) { 13528 case pci_channel_io_normal: 13529 /* Non-fatal error, prepare for recovery */ 13530 lpfc_sli4_prep_dev_for_recover(phba); 13531 return PCI_ERS_RESULT_CAN_RECOVER; 13532 case pci_channel_io_frozen: 13533 /* Fatal error, prepare for slot reset */ 13534 lpfc_sli4_prep_dev_for_reset(phba); 13535 return PCI_ERS_RESULT_NEED_RESET; 13536 case pci_channel_io_perm_failure: 13537 /* Permanent failure, prepare for device down */ 13538 lpfc_sli4_prep_dev_for_perm_failure(phba); 13539 return PCI_ERS_RESULT_DISCONNECT; 13540 default: 13541 /* Unknown state, prepare and request slot reset */ 13542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13543 "2825 Unknown PCI error state: x%x\n", state); 13544 lpfc_sli4_prep_dev_for_reset(phba); 13545 return PCI_ERS_RESULT_NEED_RESET; 13546 } 13547 } 13548 13549 /** 13550 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 13551 * @pdev: pointer to PCI device. 13552 * 13553 * This routine is called from the PCI subsystem for error handling to device 13554 * with SLI-4 interface spec. It is called after PCI bus has been reset to 13555 * restart the PCI card from scratch, as if from a cold-boot. During the 13556 * PCI subsystem error recovery, after the driver returns 13557 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 13558 * recovery and then call this routine before calling the .resume method to 13559 * recover the device. This function will initialize the HBA device, enable 13560 * the interrupt, but it will just put the HBA to offline state without 13561 * passing any I/O traffic. 13562 * 13563 * Return codes 13564 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 13565 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 13566 */ 13567 static pci_ers_result_t 13568 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 13569 { 13570 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13571 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13572 struct lpfc_sli *psli = &phba->sli; 13573 uint32_t intr_mode; 13574 13575 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 13576 if (pci_enable_device_mem(pdev)) { 13577 printk(KERN_ERR "lpfc: Cannot re-enable " 13578 "PCI device after reset.\n"); 13579 return PCI_ERS_RESULT_DISCONNECT; 13580 } 13581 13582 pci_restore_state(pdev); 13583 13584 /* 13585 * As the new kernel behavior of pci_restore_state() API call clears 13586 * device saved_state flag, need to save the restored state again. 13587 */ 13588 pci_save_state(pdev); 13589 13590 if (pdev->is_busmaster) 13591 pci_set_master(pdev); 13592 13593 spin_lock_irq(&phba->hbalock); 13594 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 13595 spin_unlock_irq(&phba->hbalock); 13596 13597 /* Configure and enable interrupt */ 13598 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 13599 if (intr_mode == LPFC_INTR_ERROR) { 13600 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13601 "2824 Cannot re-enable interrupt after " 13602 "slot reset.\n"); 13603 return PCI_ERS_RESULT_DISCONNECT; 13604 } else 13605 phba->intr_mode = intr_mode; 13606 13607 /* Log the current active interrupt mode */ 13608 lpfc_log_intr_mode(phba, phba->intr_mode); 13609 13610 return PCI_ERS_RESULT_RECOVERED; 13611 } 13612 13613 /** 13614 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 13615 * @pdev: pointer to PCI device 13616 * 13617 * This routine is called from the PCI subsystem for error handling to device 13618 * with SLI-4 interface spec. It is called when kernel error recovery tells 13619 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 13620 * error recovery. After this call, traffic can start to flow from this device 13621 * again. 13622 **/ 13623 static void 13624 lpfc_io_resume_s4(struct pci_dev *pdev) 13625 { 13626 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13627 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13628 13629 /* 13630 * In case of slot reset, as function reset is performed through 13631 * mailbox command which needs DMA to be enabled, this operation 13632 * has to be moved to the io resume phase. Taking device offline 13633 * will perform the necessary cleanup. 13634 */ 13635 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 13636 /* Perform device reset */ 13637 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 13638 lpfc_offline(phba); 13639 lpfc_sli_brdrestart(phba); 13640 /* Bring the device back online */ 13641 lpfc_online(phba); 13642 } 13643 } 13644 13645 /** 13646 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 13647 * @pdev: pointer to PCI device 13648 * @pid: pointer to PCI device identifier 13649 * 13650 * This routine is to be registered to the kernel's PCI subsystem. When an 13651 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 13652 * at PCI device-specific information of the device and driver to see if the 13653 * driver state that it can support this kind of device. If the match is 13654 * successful, the driver core invokes this routine. This routine dispatches 13655 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 13656 * do all the initialization that it needs to do to handle the HBA device 13657 * properly. 13658 * 13659 * Return code 13660 * 0 - driver can claim the device 13661 * negative value - driver can not claim the device 13662 **/ 13663 static int 13664 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 13665 { 13666 int rc; 13667 struct lpfc_sli_intf intf; 13668 13669 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 13670 return -ENODEV; 13671 13672 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 13673 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 13674 rc = lpfc_pci_probe_one_s4(pdev, pid); 13675 else 13676 rc = lpfc_pci_probe_one_s3(pdev, pid); 13677 13678 return rc; 13679 } 13680 13681 /** 13682 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 13683 * @pdev: pointer to PCI device 13684 * 13685 * This routine is to be registered to the kernel's PCI subsystem. When an 13686 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 13687 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 13688 * remove routine, which will perform all the necessary cleanup for the 13689 * device to be removed from the PCI subsystem properly. 13690 **/ 13691 static void 13692 lpfc_pci_remove_one(struct pci_dev *pdev) 13693 { 13694 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13695 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13696 13697 switch (phba->pci_dev_grp) { 13698 case LPFC_PCI_DEV_LP: 13699 lpfc_pci_remove_one_s3(pdev); 13700 break; 13701 case LPFC_PCI_DEV_OC: 13702 lpfc_pci_remove_one_s4(pdev); 13703 break; 13704 default: 13705 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13706 "1424 Invalid PCI device group: 0x%x\n", 13707 phba->pci_dev_grp); 13708 break; 13709 } 13710 return; 13711 } 13712 13713 /** 13714 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 13715 * @pdev: pointer to PCI device 13716 * @msg: power management message 13717 * 13718 * This routine is to be registered to the kernel's PCI subsystem to support 13719 * system Power Management (PM). When PM invokes this method, it dispatches 13720 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 13721 * suspend the device. 13722 * 13723 * Return code 13724 * 0 - driver suspended the device 13725 * Error otherwise 13726 **/ 13727 static int 13728 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 13729 { 13730 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13731 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13732 int rc = -ENODEV; 13733 13734 switch (phba->pci_dev_grp) { 13735 case LPFC_PCI_DEV_LP: 13736 rc = lpfc_pci_suspend_one_s3(pdev, msg); 13737 break; 13738 case LPFC_PCI_DEV_OC: 13739 rc = lpfc_pci_suspend_one_s4(pdev, msg); 13740 break; 13741 default: 13742 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13743 "1425 Invalid PCI device group: 0x%x\n", 13744 phba->pci_dev_grp); 13745 break; 13746 } 13747 return rc; 13748 } 13749 13750 /** 13751 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 13752 * @pdev: pointer to PCI device 13753 * 13754 * This routine is to be registered to the kernel's PCI subsystem to support 13755 * system Power Management (PM). When PM invokes this method, it dispatches 13756 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 13757 * resume the device. 13758 * 13759 * Return code 13760 * 0 - driver suspended the device 13761 * Error otherwise 13762 **/ 13763 static int 13764 lpfc_pci_resume_one(struct pci_dev *pdev) 13765 { 13766 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13767 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13768 int rc = -ENODEV; 13769 13770 switch (phba->pci_dev_grp) { 13771 case LPFC_PCI_DEV_LP: 13772 rc = lpfc_pci_resume_one_s3(pdev); 13773 break; 13774 case LPFC_PCI_DEV_OC: 13775 rc = lpfc_pci_resume_one_s4(pdev); 13776 break; 13777 default: 13778 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13779 "1426 Invalid PCI device group: 0x%x\n", 13780 phba->pci_dev_grp); 13781 break; 13782 } 13783 return rc; 13784 } 13785 13786 /** 13787 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 13788 * @pdev: pointer to PCI device. 13789 * @state: the current PCI connection state. 13790 * 13791 * This routine is registered to the PCI subsystem for error handling. This 13792 * function is called by the PCI subsystem after a PCI bus error affecting 13793 * this device has been detected. When this routine is invoked, it dispatches 13794 * the action to the proper SLI-3 or SLI-4 device error detected handling 13795 * routine, which will perform the proper error detected operation. 13796 * 13797 * Return codes 13798 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 13799 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 13800 **/ 13801 static pci_ers_result_t 13802 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 13803 { 13804 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13805 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13806 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 13807 13808 switch (phba->pci_dev_grp) { 13809 case LPFC_PCI_DEV_LP: 13810 rc = lpfc_io_error_detected_s3(pdev, state); 13811 break; 13812 case LPFC_PCI_DEV_OC: 13813 rc = lpfc_io_error_detected_s4(pdev, state); 13814 break; 13815 default: 13816 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13817 "1427 Invalid PCI device group: 0x%x\n", 13818 phba->pci_dev_grp); 13819 break; 13820 } 13821 return rc; 13822 } 13823 13824 /** 13825 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 13826 * @pdev: pointer to PCI device. 13827 * 13828 * This routine is registered to the PCI subsystem for error handling. This 13829 * function is called after PCI bus has been reset to restart the PCI card 13830 * from scratch, as if from a cold-boot. When this routine is invoked, it 13831 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 13832 * routine, which will perform the proper device reset. 13833 * 13834 * Return codes 13835 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 13836 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 13837 **/ 13838 static pci_ers_result_t 13839 lpfc_io_slot_reset(struct pci_dev *pdev) 13840 { 13841 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13842 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13843 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 13844 13845 switch (phba->pci_dev_grp) { 13846 case LPFC_PCI_DEV_LP: 13847 rc = lpfc_io_slot_reset_s3(pdev); 13848 break; 13849 case LPFC_PCI_DEV_OC: 13850 rc = lpfc_io_slot_reset_s4(pdev); 13851 break; 13852 default: 13853 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13854 "1428 Invalid PCI device group: 0x%x\n", 13855 phba->pci_dev_grp); 13856 break; 13857 } 13858 return rc; 13859 } 13860 13861 /** 13862 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 13863 * @pdev: pointer to PCI device 13864 * 13865 * This routine is registered to the PCI subsystem for error handling. It 13866 * is called when kernel error recovery tells the lpfc driver that it is 13867 * OK to resume normal PCI operation after PCI bus error recovery. When 13868 * this routine is invoked, it dispatches the action to the proper SLI-3 13869 * or SLI-4 device io_resume routine, which will resume the device operation. 13870 **/ 13871 static void 13872 lpfc_io_resume(struct pci_dev *pdev) 13873 { 13874 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13875 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13876 13877 switch (phba->pci_dev_grp) { 13878 case LPFC_PCI_DEV_LP: 13879 lpfc_io_resume_s3(pdev); 13880 break; 13881 case LPFC_PCI_DEV_OC: 13882 lpfc_io_resume_s4(pdev); 13883 break; 13884 default: 13885 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13886 "1429 Invalid PCI device group: 0x%x\n", 13887 phba->pci_dev_grp); 13888 break; 13889 } 13890 return; 13891 } 13892 13893 /** 13894 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter 13895 * @phba: pointer to lpfc hba data structure. 13896 * 13897 * This routine checks to see if OAS is supported for this adapter. If 13898 * supported, the configure Flash Optimized Fabric flag is set. Otherwise, 13899 * the enable oas flag is cleared and the pool created for OAS device data 13900 * is destroyed. 13901 * 13902 **/ 13903 static void 13904 lpfc_sli4_oas_verify(struct lpfc_hba *phba) 13905 { 13906 13907 if (!phba->cfg_EnableXLane) 13908 return; 13909 13910 if (phba->sli4_hba.pc_sli4_params.oas_supported) { 13911 phba->cfg_fof = 1; 13912 } else { 13913 phba->cfg_fof = 0; 13914 mempool_destroy(phba->device_data_mem_pool); 13915 phba->device_data_mem_pool = NULL; 13916 } 13917 13918 return; 13919 } 13920 13921 /** 13922 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter 13923 * @phba: pointer to lpfc hba data structure. 13924 * 13925 * This routine checks to see if RAS is supported by the adapter. Check the 13926 * function through which RAS support enablement is to be done. 13927 **/ 13928 void 13929 lpfc_sli4_ras_init(struct lpfc_hba *phba) 13930 { 13931 switch (phba->pcidev->device) { 13932 case PCI_DEVICE_ID_LANCER_G6_FC: 13933 case PCI_DEVICE_ID_LANCER_G7_FC: 13934 phba->ras_fwlog.ras_hwsupport = true; 13935 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) && 13936 phba->cfg_ras_fwlog_buffsize) 13937 phba->ras_fwlog.ras_enabled = true; 13938 else 13939 phba->ras_fwlog.ras_enabled = false; 13940 break; 13941 default: 13942 phba->ras_fwlog.ras_hwsupport = false; 13943 } 13944 } 13945 13946 13947 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 13948 13949 static const struct pci_error_handlers lpfc_err_handler = { 13950 .error_detected = lpfc_io_error_detected, 13951 .slot_reset = lpfc_io_slot_reset, 13952 .resume = lpfc_io_resume, 13953 }; 13954 13955 static struct pci_driver lpfc_driver = { 13956 .name = LPFC_DRIVER_NAME, 13957 .id_table = lpfc_id_table, 13958 .probe = lpfc_pci_probe_one, 13959 .remove = lpfc_pci_remove_one, 13960 .shutdown = lpfc_pci_remove_one, 13961 .suspend = lpfc_pci_suspend_one, 13962 .resume = lpfc_pci_resume_one, 13963 .err_handler = &lpfc_err_handler, 13964 }; 13965 13966 static const struct file_operations lpfc_mgmt_fop = { 13967 .owner = THIS_MODULE, 13968 }; 13969 13970 static struct miscdevice lpfc_mgmt_dev = { 13971 .minor = MISC_DYNAMIC_MINOR, 13972 .name = "lpfcmgmt", 13973 .fops = &lpfc_mgmt_fop, 13974 }; 13975 13976 /** 13977 * lpfc_init - lpfc module initialization routine 13978 * 13979 * This routine is to be invoked when the lpfc module is loaded into the 13980 * kernel. The special kernel macro module_init() is used to indicate the 13981 * role of this routine to the kernel as lpfc module entry point. 13982 * 13983 * Return codes 13984 * 0 - successful 13985 * -ENOMEM - FC attach transport failed 13986 * all others - failed 13987 */ 13988 static int __init 13989 lpfc_init(void) 13990 { 13991 int error = 0; 13992 13993 printk(LPFC_MODULE_DESC "\n"); 13994 printk(LPFC_COPYRIGHT "\n"); 13995 13996 error = misc_register(&lpfc_mgmt_dev); 13997 if (error) 13998 printk(KERN_ERR "Could not register lpfcmgmt device, " 13999 "misc_register returned with status %d", error); 14000 14001 lpfc_transport_functions.vport_create = lpfc_vport_create; 14002 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 14003 lpfc_transport_template = 14004 fc_attach_transport(&lpfc_transport_functions); 14005 if (lpfc_transport_template == NULL) 14006 return -ENOMEM; 14007 lpfc_vport_transport_template = 14008 fc_attach_transport(&lpfc_vport_transport_functions); 14009 if (lpfc_vport_transport_template == NULL) { 14010 fc_release_transport(lpfc_transport_template); 14011 return -ENOMEM; 14012 } 14013 lpfc_nvme_cmd_template(); 14014 lpfc_nvmet_cmd_template(); 14015 14016 /* Initialize in case vector mapping is needed */ 14017 lpfc_present_cpu = num_present_cpus(); 14018 14019 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, 14020 "lpfc/sli4:online", 14021 lpfc_cpu_online, lpfc_cpu_offline); 14022 if (error < 0) 14023 goto cpuhp_failure; 14024 lpfc_cpuhp_state = error; 14025 14026 error = pci_register_driver(&lpfc_driver); 14027 if (error) 14028 goto unwind; 14029 14030 return error; 14031 14032 unwind: 14033 cpuhp_remove_multi_state(lpfc_cpuhp_state); 14034 cpuhp_failure: 14035 fc_release_transport(lpfc_transport_template); 14036 fc_release_transport(lpfc_vport_transport_template); 14037 14038 return error; 14039 } 14040 14041 /** 14042 * lpfc_exit - lpfc module removal routine 14043 * 14044 * This routine is invoked when the lpfc module is removed from the kernel. 14045 * The special kernel macro module_exit() is used to indicate the role of 14046 * this routine to the kernel as lpfc module exit point. 14047 */ 14048 static void __exit 14049 lpfc_exit(void) 14050 { 14051 misc_deregister(&lpfc_mgmt_dev); 14052 pci_unregister_driver(&lpfc_driver); 14053 cpuhp_remove_multi_state(lpfc_cpuhp_state); 14054 fc_release_transport(lpfc_transport_template); 14055 fc_release_transport(lpfc_vport_transport_template); 14056 idr_destroy(&lpfc_hba_index); 14057 } 14058 14059 module_init(lpfc_init); 14060 module_exit(lpfc_exit); 14061 MODULE_LICENSE("GPL"); 14062 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 14063 MODULE_AUTHOR("Broadcom"); 14064 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 14065