1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/delay.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/idr.h> 28 #include <linux/interrupt.h> 29 #include <linux/module.h> 30 #include <linux/kthread.h> 31 #include <linux/pci.h> 32 #include <linux/spinlock.h> 33 #include <linux/ctype.h> 34 #include <linux/aer.h> 35 #include <linux/slab.h> 36 #include <linux/firmware.h> 37 #include <linux/miscdevice.h> 38 #include <linux/percpu.h> 39 #include <linux/msi.h> 40 #include <linux/irq.h> 41 #include <linux/bitops.h> 42 #include <linux/crash_dump.h> 43 #include <linux/cpu.h> 44 #include <linux/cpuhotplug.h> 45 46 #include <scsi/scsi.h> 47 #include <scsi/scsi_device.h> 48 #include <scsi/scsi_host.h> 49 #include <scsi/scsi_transport_fc.h> 50 #include <scsi/scsi_tcq.h> 51 #include <scsi/fc/fc_fs.h> 52 53 #include <linux/nvme-fc-driver.h> 54 55 #include "lpfc_hw4.h" 56 #include "lpfc_hw.h" 57 #include "lpfc_sli.h" 58 #include "lpfc_sli4.h" 59 #include "lpfc_nl.h" 60 #include "lpfc_disc.h" 61 #include "lpfc.h" 62 #include "lpfc_scsi.h" 63 #include "lpfc_nvme.h" 64 #include "lpfc_nvmet.h" 65 #include "lpfc_logmsg.h" 66 #include "lpfc_crtn.h" 67 #include "lpfc_vport.h" 68 #include "lpfc_version.h" 69 #include "lpfc_ids.h" 70 71 static enum cpuhp_state lpfc_cpuhp_state; 72 /* Used when mapping IRQ vectors in a driver centric manner */ 73 static uint32_t lpfc_present_cpu; 74 75 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba); 76 static void lpfc_cpuhp_remove(struct lpfc_hba *phba); 77 static void lpfc_cpuhp_add(struct lpfc_hba *phba); 78 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 79 static int lpfc_post_rcv_buf(struct lpfc_hba *); 80 static int lpfc_sli4_queue_verify(struct lpfc_hba *); 81 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 82 static int lpfc_setup_endian_order(struct lpfc_hba *); 83 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 84 static void lpfc_free_els_sgl_list(struct lpfc_hba *); 85 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *); 86 static void lpfc_init_sgl_list(struct lpfc_hba *); 87 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 88 static void lpfc_free_active_sgl(struct lpfc_hba *); 89 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 90 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 91 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 92 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 93 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 94 static void lpfc_sli4_disable_intr(struct lpfc_hba *); 95 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); 96 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); 97 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int); 98 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *); 99 100 static struct scsi_transport_template *lpfc_transport_template = NULL; 101 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 102 static DEFINE_IDR(lpfc_hba_index); 103 #define LPFC_NVMET_BUF_POST 254 104 105 /** 106 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 107 * @phba: pointer to lpfc hba data structure. 108 * 109 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 110 * mailbox command. It retrieves the revision information from the HBA and 111 * collects the Vital Product Data (VPD) about the HBA for preparing the 112 * configuration of the HBA. 113 * 114 * Return codes: 115 * 0 - success. 116 * -ERESTART - requests the SLI layer to reset the HBA and try again. 117 * Any other value - indicates an error. 118 **/ 119 int 120 lpfc_config_port_prep(struct lpfc_hba *phba) 121 { 122 lpfc_vpd_t *vp = &phba->vpd; 123 int i = 0, rc; 124 LPFC_MBOXQ_t *pmb; 125 MAILBOX_t *mb; 126 char *lpfc_vpd_data = NULL; 127 uint16_t offset = 0; 128 static char licensed[56] = 129 "key unlock for use with gnu public licensed code only\0"; 130 static int init_key = 1; 131 132 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 133 if (!pmb) { 134 phba->link_state = LPFC_HBA_ERROR; 135 return -ENOMEM; 136 } 137 138 mb = &pmb->u.mb; 139 phba->link_state = LPFC_INIT_MBX_CMDS; 140 141 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 142 if (init_key) { 143 uint32_t *ptext = (uint32_t *) licensed; 144 145 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 146 *ptext = cpu_to_be32(*ptext); 147 init_key = 0; 148 } 149 150 lpfc_read_nv(phba, pmb); 151 memset((char*)mb->un.varRDnvp.rsvd3, 0, 152 sizeof (mb->un.varRDnvp.rsvd3)); 153 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 154 sizeof (licensed)); 155 156 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 157 158 if (rc != MBX_SUCCESS) { 159 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 160 "0324 Config Port initialization " 161 "error, mbxCmd x%x READ_NVPARM, " 162 "mbxStatus x%x\n", 163 mb->mbxCommand, mb->mbxStatus); 164 mempool_free(pmb, phba->mbox_mem_pool); 165 return -ERESTART; 166 } 167 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 168 sizeof(phba->wwnn)); 169 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 170 sizeof(phba->wwpn)); 171 } 172 173 /* 174 * Clear all option bits except LPFC_SLI3_BG_ENABLED, 175 * which was already set in lpfc_get_cfgparam() 176 */ 177 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED; 178 179 /* Setup and issue mailbox READ REV command */ 180 lpfc_read_rev(phba, pmb); 181 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 182 if (rc != MBX_SUCCESS) { 183 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 184 "0439 Adapter failed to init, mbxCmd x%x " 185 "READ_REV, mbxStatus x%x\n", 186 mb->mbxCommand, mb->mbxStatus); 187 mempool_free( pmb, phba->mbox_mem_pool); 188 return -ERESTART; 189 } 190 191 192 /* 193 * The value of rr must be 1 since the driver set the cv field to 1. 194 * This setting requires the FW to set all revision fields. 195 */ 196 if (mb->un.varRdRev.rr == 0) { 197 vp->rev.rBit = 0; 198 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 199 "0440 Adapter failed to init, READ_REV has " 200 "missing revision information.\n"); 201 mempool_free(pmb, phba->mbox_mem_pool); 202 return -ERESTART; 203 } 204 205 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 206 mempool_free(pmb, phba->mbox_mem_pool); 207 return -EINVAL; 208 } 209 210 /* Save information as VPD data */ 211 vp->rev.rBit = 1; 212 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 213 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 214 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 215 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 216 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 217 vp->rev.biuRev = mb->un.varRdRev.biuRev; 218 vp->rev.smRev = mb->un.varRdRev.smRev; 219 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 220 vp->rev.endecRev = mb->un.varRdRev.endecRev; 221 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 222 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 223 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 224 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 225 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 226 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 227 228 /* If the sli feature level is less then 9, we must 229 * tear down all RPIs and VPIs on link down if NPIV 230 * is enabled. 231 */ 232 if (vp->rev.feaLevelHigh < 9) 233 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 234 235 if (lpfc_is_LC_HBA(phba->pcidev->device)) 236 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 237 sizeof (phba->RandomData)); 238 239 /* Get adapter VPD information */ 240 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 241 if (!lpfc_vpd_data) 242 goto out_free_mbox; 243 do { 244 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 245 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 246 247 if (rc != MBX_SUCCESS) { 248 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 249 "0441 VPD not present on adapter, " 250 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 251 mb->mbxCommand, mb->mbxStatus); 252 mb->un.varDmp.word_cnt = 0; 253 } 254 /* dump mem may return a zero when finished or we got a 255 * mailbox error, either way we are done. 256 */ 257 if (mb->un.varDmp.word_cnt == 0) 258 break; 259 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 260 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 261 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 262 lpfc_vpd_data + offset, 263 mb->un.varDmp.word_cnt); 264 offset += mb->un.varDmp.word_cnt; 265 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 266 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 267 268 kfree(lpfc_vpd_data); 269 out_free_mbox: 270 mempool_free(pmb, phba->mbox_mem_pool); 271 return 0; 272 } 273 274 /** 275 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 276 * @phba: pointer to lpfc hba data structure. 277 * @pmboxq: pointer to the driver internal queue element for mailbox command. 278 * 279 * This is the completion handler for driver's configuring asynchronous event 280 * mailbox command to the device. If the mailbox command returns successfully, 281 * it will set internal async event support flag to 1; otherwise, it will 282 * set internal async event support flag to 0. 283 **/ 284 static void 285 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 286 { 287 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 288 phba->temp_sensor_support = 1; 289 else 290 phba->temp_sensor_support = 0; 291 mempool_free(pmboxq, phba->mbox_mem_pool); 292 return; 293 } 294 295 /** 296 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 297 * @phba: pointer to lpfc hba data structure. 298 * @pmboxq: pointer to the driver internal queue element for mailbox command. 299 * 300 * This is the completion handler for dump mailbox command for getting 301 * wake up parameters. When this command complete, the response contain 302 * Option rom version of the HBA. This function translate the version number 303 * into a human readable string and store it in OptionROMVersion. 304 **/ 305 static void 306 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 307 { 308 struct prog_id *prg; 309 uint32_t prog_id_word; 310 char dist = ' '; 311 /* character array used for decoding dist type. */ 312 char dist_char[] = "nabx"; 313 314 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 315 mempool_free(pmboxq, phba->mbox_mem_pool); 316 return; 317 } 318 319 prg = (struct prog_id *) &prog_id_word; 320 321 /* word 7 contain option rom version */ 322 prog_id_word = pmboxq->u.mb.un.varWords[7]; 323 324 /* Decode the Option rom version word to a readable string */ 325 if (prg->dist < 4) 326 dist = dist_char[prg->dist]; 327 328 if ((prg->dist == 3) && (prg->num == 0)) 329 snprintf(phba->OptionROMVersion, 32, "%d.%d%d", 330 prg->ver, prg->rev, prg->lev); 331 else 332 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d", 333 prg->ver, prg->rev, prg->lev, 334 dist, prg->num); 335 mempool_free(pmboxq, phba->mbox_mem_pool); 336 return; 337 } 338 339 /** 340 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, 341 * cfg_soft_wwnn, cfg_soft_wwpn 342 * @vport: pointer to lpfc vport data structure. 343 * 344 * 345 * Return codes 346 * None. 347 **/ 348 void 349 lpfc_update_vport_wwn(struct lpfc_vport *vport) 350 { 351 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level; 352 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0]; 353 354 /* If the soft name exists then update it using the service params */ 355 if (vport->phba->cfg_soft_wwnn) 356 u64_to_wwn(vport->phba->cfg_soft_wwnn, 357 vport->fc_sparam.nodeName.u.wwn); 358 if (vport->phba->cfg_soft_wwpn) 359 u64_to_wwn(vport->phba->cfg_soft_wwpn, 360 vport->fc_sparam.portName.u.wwn); 361 362 /* 363 * If the name is empty or there exists a soft name 364 * then copy the service params name, otherwise use the fc name 365 */ 366 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) 367 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 368 sizeof(struct lpfc_name)); 369 else 370 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, 371 sizeof(struct lpfc_name)); 372 373 /* 374 * If the port name has changed, then set the Param changes flag 375 * to unreg the login 376 */ 377 if (vport->fc_portname.u.wwn[0] != 0 && 378 memcmp(&vport->fc_portname, &vport->fc_sparam.portName, 379 sizeof(struct lpfc_name))) 380 vport->vport_flag |= FAWWPN_PARAM_CHG; 381 382 if (vport->fc_portname.u.wwn[0] == 0 || 383 vport->phba->cfg_soft_wwpn || 384 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) || 385 vport->vport_flag & FAWWPN_SET) { 386 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 387 sizeof(struct lpfc_name)); 388 vport->vport_flag &= ~FAWWPN_SET; 389 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) 390 vport->vport_flag |= FAWWPN_SET; 391 } 392 else 393 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, 394 sizeof(struct lpfc_name)); 395 } 396 397 /** 398 * lpfc_config_port_post - Perform lpfc initialization after config port 399 * @phba: pointer to lpfc hba data structure. 400 * 401 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 402 * command call. It performs all internal resource and state setups on the 403 * port: post IOCB buffers, enable appropriate host interrupt attentions, 404 * ELS ring timers, etc. 405 * 406 * Return codes 407 * 0 - success. 408 * Any other value - error. 409 **/ 410 int 411 lpfc_config_port_post(struct lpfc_hba *phba) 412 { 413 struct lpfc_vport *vport = phba->pport; 414 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 415 LPFC_MBOXQ_t *pmb; 416 MAILBOX_t *mb; 417 struct lpfc_dmabuf *mp; 418 struct lpfc_sli *psli = &phba->sli; 419 uint32_t status, timeout; 420 int i, j; 421 int rc; 422 423 spin_lock_irq(&phba->hbalock); 424 /* 425 * If the Config port completed correctly the HBA is not 426 * over heated any more. 427 */ 428 if (phba->over_temp_state == HBA_OVER_TEMP) 429 phba->over_temp_state = HBA_NORMAL_TEMP; 430 spin_unlock_irq(&phba->hbalock); 431 432 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 433 if (!pmb) { 434 phba->link_state = LPFC_HBA_ERROR; 435 return -ENOMEM; 436 } 437 mb = &pmb->u.mb; 438 439 /* Get login parameters for NID. */ 440 rc = lpfc_read_sparam(phba, pmb, 0); 441 if (rc) { 442 mempool_free(pmb, phba->mbox_mem_pool); 443 return -ENOMEM; 444 } 445 446 pmb->vport = vport; 447 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 448 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 449 "0448 Adapter failed init, mbxCmd x%x " 450 "READ_SPARM mbxStatus x%x\n", 451 mb->mbxCommand, mb->mbxStatus); 452 phba->link_state = LPFC_HBA_ERROR; 453 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 454 mempool_free(pmb, phba->mbox_mem_pool); 455 lpfc_mbuf_free(phba, mp->virt, mp->phys); 456 kfree(mp); 457 return -EIO; 458 } 459 460 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 461 462 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 463 lpfc_mbuf_free(phba, mp->virt, mp->phys); 464 kfree(mp); 465 pmb->ctx_buf = NULL; 466 lpfc_update_vport_wwn(vport); 467 468 /* Update the fc_host data structures with new wwn. */ 469 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 470 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 471 fc_host_max_npiv_vports(shost) = phba->max_vpi; 472 473 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 474 /* This should be consolidated into parse_vpd ? - mr */ 475 if (phba->SerialNumber[0] == 0) { 476 uint8_t *outptr; 477 478 outptr = &vport->fc_nodename.u.s.IEEE[0]; 479 for (i = 0; i < 12; i++) { 480 status = *outptr++; 481 j = ((status & 0xf0) >> 4); 482 if (j <= 9) 483 phba->SerialNumber[i] = 484 (char)((uint8_t) 0x30 + (uint8_t) j); 485 else 486 phba->SerialNumber[i] = 487 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 488 i++; 489 j = (status & 0xf); 490 if (j <= 9) 491 phba->SerialNumber[i] = 492 (char)((uint8_t) 0x30 + (uint8_t) j); 493 else 494 phba->SerialNumber[i] = 495 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 496 } 497 } 498 499 lpfc_read_config(phba, pmb); 500 pmb->vport = vport; 501 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 502 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 503 "0453 Adapter failed to init, mbxCmd x%x " 504 "READ_CONFIG, mbxStatus x%x\n", 505 mb->mbxCommand, mb->mbxStatus); 506 phba->link_state = LPFC_HBA_ERROR; 507 mempool_free( pmb, phba->mbox_mem_pool); 508 return -EIO; 509 } 510 511 /* Check if the port is disabled */ 512 lpfc_sli_read_link_ste(phba); 513 514 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 515 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) { 516 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 517 "3359 HBA queue depth changed from %d to %d\n", 518 phba->cfg_hba_queue_depth, 519 mb->un.varRdConfig.max_xri); 520 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri; 521 } 522 523 phba->lmt = mb->un.varRdConfig.lmt; 524 525 /* Get the default values for Model Name and Description */ 526 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 527 528 phba->link_state = LPFC_LINK_DOWN; 529 530 /* Only process IOCBs on ELS ring till hba_state is READY */ 531 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr) 532 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT; 533 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr) 534 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT; 535 536 /* Post receive buffers for desired rings */ 537 if (phba->sli_rev != 3) 538 lpfc_post_rcv_buf(phba); 539 540 /* 541 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 542 */ 543 if (phba->intr_type == MSIX) { 544 rc = lpfc_config_msi(phba, pmb); 545 if (rc) { 546 mempool_free(pmb, phba->mbox_mem_pool); 547 return -EIO; 548 } 549 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 550 if (rc != MBX_SUCCESS) { 551 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 552 "0352 Config MSI mailbox command " 553 "failed, mbxCmd x%x, mbxStatus x%x\n", 554 pmb->u.mb.mbxCommand, 555 pmb->u.mb.mbxStatus); 556 mempool_free(pmb, phba->mbox_mem_pool); 557 return -EIO; 558 } 559 } 560 561 spin_lock_irq(&phba->hbalock); 562 /* Initialize ERATT handling flag */ 563 phba->hba_flag &= ~HBA_ERATT_HANDLED; 564 565 /* Enable appropriate host interrupts */ 566 if (lpfc_readl(phba->HCregaddr, &status)) { 567 spin_unlock_irq(&phba->hbalock); 568 return -EIO; 569 } 570 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 571 if (psli->num_rings > 0) 572 status |= HC_R0INT_ENA; 573 if (psli->num_rings > 1) 574 status |= HC_R1INT_ENA; 575 if (psli->num_rings > 2) 576 status |= HC_R2INT_ENA; 577 if (psli->num_rings > 3) 578 status |= HC_R3INT_ENA; 579 580 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 581 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 582 status &= ~(HC_R0INT_ENA); 583 584 writel(status, phba->HCregaddr); 585 readl(phba->HCregaddr); /* flush */ 586 spin_unlock_irq(&phba->hbalock); 587 588 /* Set up ring-0 (ELS) timer */ 589 timeout = phba->fc_ratov * 2; 590 mod_timer(&vport->els_tmofunc, 591 jiffies + msecs_to_jiffies(1000 * timeout)); 592 /* Set up heart beat (HB) timer */ 593 mod_timer(&phba->hb_tmofunc, 594 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 595 phba->hb_outstanding = 0; 596 phba->last_completion_time = jiffies; 597 /* Set up error attention (ERATT) polling timer */ 598 mod_timer(&phba->eratt_poll, 599 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 600 601 if (phba->hba_flag & LINK_DISABLED) { 602 lpfc_printf_log(phba, 603 KERN_ERR, LOG_INIT, 604 "2598 Adapter Link is disabled.\n"); 605 lpfc_down_link(phba, pmb); 606 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 607 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 608 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 609 lpfc_printf_log(phba, 610 KERN_ERR, LOG_INIT, 611 "2599 Adapter failed to issue DOWN_LINK" 612 " mbox command rc 0x%x\n", rc); 613 614 mempool_free(pmb, phba->mbox_mem_pool); 615 return -EIO; 616 } 617 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 618 mempool_free(pmb, phba->mbox_mem_pool); 619 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 620 if (rc) 621 return rc; 622 } 623 /* MBOX buffer will be freed in mbox compl */ 624 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 625 if (!pmb) { 626 phba->link_state = LPFC_HBA_ERROR; 627 return -ENOMEM; 628 } 629 630 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 631 pmb->mbox_cmpl = lpfc_config_async_cmpl; 632 pmb->vport = phba->pport; 633 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 634 635 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 636 lpfc_printf_log(phba, 637 KERN_ERR, 638 LOG_INIT, 639 "0456 Adapter failed to issue " 640 "ASYNCEVT_ENABLE mbox status x%x\n", 641 rc); 642 mempool_free(pmb, phba->mbox_mem_pool); 643 } 644 645 /* Get Option rom version */ 646 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 647 if (!pmb) { 648 phba->link_state = LPFC_HBA_ERROR; 649 return -ENOMEM; 650 } 651 652 lpfc_dump_wakeup_param(phba, pmb); 653 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 654 pmb->vport = phba->pport; 655 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 656 657 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 658 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 659 "to get Option ROM version status x%x\n", rc); 660 mempool_free(pmb, phba->mbox_mem_pool); 661 } 662 663 return 0; 664 } 665 666 /** 667 * lpfc_hba_init_link - Initialize the FC link 668 * @phba: pointer to lpfc hba data structure. 669 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 670 * 671 * This routine will issue the INIT_LINK mailbox command call. 672 * It is available to other drivers through the lpfc_hba data 673 * structure for use as a delayed link up mechanism with the 674 * module parameter lpfc_suppress_link_up. 675 * 676 * Return code 677 * 0 - success 678 * Any other value - error 679 **/ 680 static int 681 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 682 { 683 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); 684 } 685 686 /** 687 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology 688 * @phba: pointer to lpfc hba data structure. 689 * @fc_topology: desired fc topology. 690 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 691 * 692 * This routine will issue the INIT_LINK mailbox command call. 693 * It is available to other drivers through the lpfc_hba data 694 * structure for use as a delayed link up mechanism with the 695 * module parameter lpfc_suppress_link_up. 696 * 697 * Return code 698 * 0 - success 699 * Any other value - error 700 **/ 701 int 702 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, 703 uint32_t flag) 704 { 705 struct lpfc_vport *vport = phba->pport; 706 LPFC_MBOXQ_t *pmb; 707 MAILBOX_t *mb; 708 int rc; 709 710 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 711 if (!pmb) { 712 phba->link_state = LPFC_HBA_ERROR; 713 return -ENOMEM; 714 } 715 mb = &pmb->u.mb; 716 pmb->vport = vport; 717 718 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || 719 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && 720 !(phba->lmt & LMT_1Gb)) || 721 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && 722 !(phba->lmt & LMT_2Gb)) || 723 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && 724 !(phba->lmt & LMT_4Gb)) || 725 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && 726 !(phba->lmt & LMT_8Gb)) || 727 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && 728 !(phba->lmt & LMT_10Gb)) || 729 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && 730 !(phba->lmt & LMT_16Gb)) || 731 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) && 732 !(phba->lmt & LMT_32Gb)) || 733 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) && 734 !(phba->lmt & LMT_64Gb))) { 735 /* Reset link speed to auto */ 736 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 737 "1302 Invalid speed for this board:%d " 738 "Reset link speed to auto.\n", 739 phba->cfg_link_speed); 740 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 741 } 742 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); 743 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 744 if (phba->sli_rev < LPFC_SLI_REV4) 745 lpfc_set_loopback_flag(phba); 746 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 747 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 748 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 749 "0498 Adapter failed to init, mbxCmd x%x " 750 "INIT_LINK, mbxStatus x%x\n", 751 mb->mbxCommand, mb->mbxStatus); 752 if (phba->sli_rev <= LPFC_SLI_REV3) { 753 /* Clear all interrupt enable conditions */ 754 writel(0, phba->HCregaddr); 755 readl(phba->HCregaddr); /* flush */ 756 /* Clear all pending interrupts */ 757 writel(0xffffffff, phba->HAregaddr); 758 readl(phba->HAregaddr); /* flush */ 759 } 760 phba->link_state = LPFC_HBA_ERROR; 761 if (rc != MBX_BUSY || flag == MBX_POLL) 762 mempool_free(pmb, phba->mbox_mem_pool); 763 return -EIO; 764 } 765 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 766 if (flag == MBX_POLL) 767 mempool_free(pmb, phba->mbox_mem_pool); 768 769 return 0; 770 } 771 772 /** 773 * lpfc_hba_down_link - this routine downs the FC link 774 * @phba: pointer to lpfc hba data structure. 775 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 776 * 777 * This routine will issue the DOWN_LINK mailbox command call. 778 * It is available to other drivers through the lpfc_hba data 779 * structure for use to stop the link. 780 * 781 * Return code 782 * 0 - success 783 * Any other value - error 784 **/ 785 static int 786 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 787 { 788 LPFC_MBOXQ_t *pmb; 789 int rc; 790 791 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 792 if (!pmb) { 793 phba->link_state = LPFC_HBA_ERROR; 794 return -ENOMEM; 795 } 796 797 lpfc_printf_log(phba, 798 KERN_ERR, LOG_INIT, 799 "0491 Adapter Link is disabled.\n"); 800 lpfc_down_link(phba, pmb); 801 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 802 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 803 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 804 lpfc_printf_log(phba, 805 KERN_ERR, LOG_INIT, 806 "2522 Adapter failed to issue DOWN_LINK" 807 " mbox command rc 0x%x\n", rc); 808 809 mempool_free(pmb, phba->mbox_mem_pool); 810 return -EIO; 811 } 812 if (flag == MBX_POLL) 813 mempool_free(pmb, phba->mbox_mem_pool); 814 815 return 0; 816 } 817 818 /** 819 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 820 * @phba: pointer to lpfc HBA data structure. 821 * 822 * This routine will do LPFC uninitialization before the HBA is reset when 823 * bringing down the SLI Layer. 824 * 825 * Return codes 826 * 0 - success. 827 * Any other value - error. 828 **/ 829 int 830 lpfc_hba_down_prep(struct lpfc_hba *phba) 831 { 832 struct lpfc_vport **vports; 833 int i; 834 835 if (phba->sli_rev <= LPFC_SLI_REV3) { 836 /* Disable interrupts */ 837 writel(0, phba->HCregaddr); 838 readl(phba->HCregaddr); /* flush */ 839 } 840 841 if (phba->pport->load_flag & FC_UNLOADING) 842 lpfc_cleanup_discovery_resources(phba->pport); 843 else { 844 vports = lpfc_create_vport_work_array(phba); 845 if (vports != NULL) 846 for (i = 0; i <= phba->max_vports && 847 vports[i] != NULL; i++) 848 lpfc_cleanup_discovery_resources(vports[i]); 849 lpfc_destroy_vport_work_array(phba, vports); 850 } 851 return 0; 852 } 853 854 /** 855 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free 856 * rspiocb which got deferred 857 * 858 * @phba: pointer to lpfc HBA data structure. 859 * 860 * This routine will cleanup completed slow path events after HBA is reset 861 * when bringing down the SLI Layer. 862 * 863 * 864 * Return codes 865 * void. 866 **/ 867 static void 868 lpfc_sli4_free_sp_events(struct lpfc_hba *phba) 869 { 870 struct lpfc_iocbq *rspiocbq; 871 struct hbq_dmabuf *dmabuf; 872 struct lpfc_cq_event *cq_event; 873 874 spin_lock_irq(&phba->hbalock); 875 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 876 spin_unlock_irq(&phba->hbalock); 877 878 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 879 /* Get the response iocb from the head of work queue */ 880 spin_lock_irq(&phba->hbalock); 881 list_remove_head(&phba->sli4_hba.sp_queue_event, 882 cq_event, struct lpfc_cq_event, list); 883 spin_unlock_irq(&phba->hbalock); 884 885 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 886 case CQE_CODE_COMPL_WQE: 887 rspiocbq = container_of(cq_event, struct lpfc_iocbq, 888 cq_event); 889 lpfc_sli_release_iocbq(phba, rspiocbq); 890 break; 891 case CQE_CODE_RECEIVE: 892 case CQE_CODE_RECEIVE_V1: 893 dmabuf = container_of(cq_event, struct hbq_dmabuf, 894 cq_event); 895 lpfc_in_buf_free(phba, &dmabuf->dbuf); 896 } 897 } 898 } 899 900 /** 901 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset 902 * @phba: pointer to lpfc HBA data structure. 903 * 904 * This routine will cleanup posted ELS buffers after the HBA is reset 905 * when bringing down the SLI Layer. 906 * 907 * 908 * Return codes 909 * void. 910 **/ 911 static void 912 lpfc_hba_free_post_buf(struct lpfc_hba *phba) 913 { 914 struct lpfc_sli *psli = &phba->sli; 915 struct lpfc_sli_ring *pring; 916 struct lpfc_dmabuf *mp, *next_mp; 917 LIST_HEAD(buflist); 918 int count; 919 920 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 921 lpfc_sli_hbqbuf_free_all(phba); 922 else { 923 /* Cleanup preposted buffers on the ELS ring */ 924 pring = &psli->sli3_ring[LPFC_ELS_RING]; 925 spin_lock_irq(&phba->hbalock); 926 list_splice_init(&pring->postbufq, &buflist); 927 spin_unlock_irq(&phba->hbalock); 928 929 count = 0; 930 list_for_each_entry_safe(mp, next_mp, &buflist, list) { 931 list_del(&mp->list); 932 count++; 933 lpfc_mbuf_free(phba, mp->virt, mp->phys); 934 kfree(mp); 935 } 936 937 spin_lock_irq(&phba->hbalock); 938 pring->postbufq_cnt -= count; 939 spin_unlock_irq(&phba->hbalock); 940 } 941 } 942 943 /** 944 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset 945 * @phba: pointer to lpfc HBA data structure. 946 * 947 * This routine will cleanup the txcmplq after the HBA is reset when bringing 948 * down the SLI Layer. 949 * 950 * Return codes 951 * void 952 **/ 953 static void 954 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) 955 { 956 struct lpfc_sli *psli = &phba->sli; 957 struct lpfc_queue *qp = NULL; 958 struct lpfc_sli_ring *pring; 959 LIST_HEAD(completions); 960 int i; 961 struct lpfc_iocbq *piocb, *next_iocb; 962 963 if (phba->sli_rev != LPFC_SLI_REV4) { 964 for (i = 0; i < psli->num_rings; i++) { 965 pring = &psli->sli3_ring[i]; 966 spin_lock_irq(&phba->hbalock); 967 /* At this point in time the HBA is either reset or DOA 968 * Nothing should be on txcmplq as it will 969 * NEVER complete. 970 */ 971 list_splice_init(&pring->txcmplq, &completions); 972 pring->txcmplq_cnt = 0; 973 spin_unlock_irq(&phba->hbalock); 974 975 lpfc_sli_abort_iocb_ring(phba, pring); 976 } 977 /* Cancel all the IOCBs from the completions list */ 978 lpfc_sli_cancel_iocbs(phba, &completions, 979 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 980 return; 981 } 982 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 983 pring = qp->pring; 984 if (!pring) 985 continue; 986 spin_lock_irq(&pring->ring_lock); 987 list_for_each_entry_safe(piocb, next_iocb, 988 &pring->txcmplq, list) 989 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 990 list_splice_init(&pring->txcmplq, &completions); 991 pring->txcmplq_cnt = 0; 992 spin_unlock_irq(&pring->ring_lock); 993 lpfc_sli_abort_iocb_ring(phba, pring); 994 } 995 /* Cancel all the IOCBs from the completions list */ 996 lpfc_sli_cancel_iocbs(phba, &completions, 997 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 998 } 999 1000 /** 1001 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 1002 int i; 1003 * @phba: pointer to lpfc HBA data structure. 1004 * 1005 * This routine will do uninitialization after the HBA is reset when bring 1006 * down the SLI Layer. 1007 * 1008 * Return codes 1009 * 0 - success. 1010 * Any other value - error. 1011 **/ 1012 static int 1013 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 1014 { 1015 lpfc_hba_free_post_buf(phba); 1016 lpfc_hba_clean_txcmplq(phba); 1017 return 0; 1018 } 1019 1020 /** 1021 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 1022 * @phba: pointer to lpfc HBA data structure. 1023 * 1024 * This routine will do uninitialization after the HBA is reset when bring 1025 * down the SLI Layer. 1026 * 1027 * Return codes 1028 * 0 - success. 1029 * Any other value - error. 1030 **/ 1031 static int 1032 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 1033 { 1034 struct lpfc_io_buf *psb, *psb_next; 1035 struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next; 1036 struct lpfc_sli4_hdw_queue *qp; 1037 LIST_HEAD(aborts); 1038 LIST_HEAD(nvme_aborts); 1039 LIST_HEAD(nvmet_aborts); 1040 struct lpfc_sglq *sglq_entry = NULL; 1041 int cnt, idx; 1042 1043 1044 lpfc_sli_hbqbuf_free_all(phba); 1045 lpfc_hba_clean_txcmplq(phba); 1046 1047 /* At this point in time the HBA is either reset or DOA. Either 1048 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 1049 * on the lpfc_els_sgl_list so that it can either be freed if the 1050 * driver is unloading or reposted if the driver is restarting 1051 * the port. 1052 */ 1053 spin_lock_irq(&phba->hbalock); /* required for lpfc_els_sgl_list and */ 1054 /* scsl_buf_list */ 1055 /* sgl_list_lock required because worker thread uses this 1056 * list. 1057 */ 1058 spin_lock(&phba->sli4_hba.sgl_list_lock); 1059 list_for_each_entry(sglq_entry, 1060 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 1061 sglq_entry->state = SGL_FREED; 1062 1063 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 1064 &phba->sli4_hba.lpfc_els_sgl_list); 1065 1066 1067 spin_unlock(&phba->sli4_hba.sgl_list_lock); 1068 1069 /* abts_xxxx_buf_list_lock required because worker thread uses this 1070 * list. 1071 */ 1072 cnt = 0; 1073 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 1074 qp = &phba->sli4_hba.hdwq[idx]; 1075 1076 spin_lock(&qp->abts_io_buf_list_lock); 1077 list_splice_init(&qp->lpfc_abts_io_buf_list, 1078 &aborts); 1079 1080 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 1081 psb->pCmd = NULL; 1082 psb->status = IOSTAT_SUCCESS; 1083 cnt++; 1084 } 1085 spin_lock(&qp->io_buf_list_put_lock); 1086 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put); 1087 qp->put_io_bufs += qp->abts_scsi_io_bufs; 1088 qp->put_io_bufs += qp->abts_nvme_io_bufs; 1089 qp->abts_scsi_io_bufs = 0; 1090 qp->abts_nvme_io_bufs = 0; 1091 spin_unlock(&qp->io_buf_list_put_lock); 1092 spin_unlock(&qp->abts_io_buf_list_lock); 1093 } 1094 spin_unlock_irq(&phba->hbalock); 1095 1096 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 1097 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1098 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 1099 &nvmet_aborts); 1100 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1101 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { 1102 ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP); 1103 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 1104 } 1105 } 1106 1107 lpfc_sli4_free_sp_events(phba); 1108 return cnt; 1109 } 1110 1111 /** 1112 * lpfc_hba_down_post - Wrapper func for hba down post routine 1113 * @phba: pointer to lpfc HBA data structure. 1114 * 1115 * This routine wraps the actual SLI3 or SLI4 routine for performing 1116 * uninitialization after the HBA is reset when bring down the SLI Layer. 1117 * 1118 * Return codes 1119 * 0 - success. 1120 * Any other value - error. 1121 **/ 1122 int 1123 lpfc_hba_down_post(struct lpfc_hba *phba) 1124 { 1125 return (*phba->lpfc_hba_down_post)(phba); 1126 } 1127 1128 /** 1129 * lpfc_hb_timeout - The HBA-timer timeout handler 1130 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 1131 * 1132 * This is the HBA-timer timeout handler registered to the lpfc driver. When 1133 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 1134 * work-port-events bitmap and the worker thread is notified. This timeout 1135 * event will be used by the worker thread to invoke the actual timeout 1136 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 1137 * be performed in the timeout handler and the HBA timeout event bit shall 1138 * be cleared by the worker thread after it has taken the event bitmap out. 1139 **/ 1140 static void 1141 lpfc_hb_timeout(struct timer_list *t) 1142 { 1143 struct lpfc_hba *phba; 1144 uint32_t tmo_posted; 1145 unsigned long iflag; 1146 1147 phba = from_timer(phba, t, hb_tmofunc); 1148 1149 /* Check for heart beat timeout conditions */ 1150 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1151 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 1152 if (!tmo_posted) 1153 phba->pport->work_port_events |= WORKER_HB_TMO; 1154 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1155 1156 /* Tell the worker thread there is work to do */ 1157 if (!tmo_posted) 1158 lpfc_worker_wake_up(phba); 1159 return; 1160 } 1161 1162 /** 1163 * lpfc_rrq_timeout - The RRQ-timer timeout handler 1164 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 1165 * 1166 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 1167 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 1168 * work-port-events bitmap and the worker thread is notified. This timeout 1169 * event will be used by the worker thread to invoke the actual timeout 1170 * handler routine, lpfc_rrq_handler. Any periodical operations will 1171 * be performed in the timeout handler and the RRQ timeout event bit shall 1172 * be cleared by the worker thread after it has taken the event bitmap out. 1173 **/ 1174 static void 1175 lpfc_rrq_timeout(struct timer_list *t) 1176 { 1177 struct lpfc_hba *phba; 1178 unsigned long iflag; 1179 1180 phba = from_timer(phba, t, rrq_tmr); 1181 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1182 if (!(phba->pport->load_flag & FC_UNLOADING)) 1183 phba->hba_flag |= HBA_RRQ_ACTIVE; 1184 else 1185 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1186 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1187 1188 if (!(phba->pport->load_flag & FC_UNLOADING)) 1189 lpfc_worker_wake_up(phba); 1190 } 1191 1192 /** 1193 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 1194 * @phba: pointer to lpfc hba data structure. 1195 * @pmboxq: pointer to the driver internal queue element for mailbox command. 1196 * 1197 * This is the callback function to the lpfc heart-beat mailbox command. 1198 * If configured, the lpfc driver issues the heart-beat mailbox command to 1199 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 1200 * heart-beat mailbox command is issued, the driver shall set up heart-beat 1201 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 1202 * heart-beat outstanding state. Once the mailbox command comes back and 1203 * no error conditions detected, the heart-beat mailbox command timer is 1204 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 1205 * state is cleared for the next heart-beat. If the timer expired with the 1206 * heart-beat outstanding state set, the driver will put the HBA offline. 1207 **/ 1208 static void 1209 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 1210 { 1211 unsigned long drvr_flag; 1212 1213 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1214 phba->hb_outstanding = 0; 1215 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1216 1217 /* Check and reset heart-beat timer is necessary */ 1218 mempool_free(pmboxq, phba->mbox_mem_pool); 1219 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 1220 !(phba->link_state == LPFC_HBA_ERROR) && 1221 !(phba->pport->load_flag & FC_UNLOADING)) 1222 mod_timer(&phba->hb_tmofunc, 1223 jiffies + 1224 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1225 return; 1226 } 1227 1228 static void 1229 lpfc_hb_eq_delay_work(struct work_struct *work) 1230 { 1231 struct lpfc_hba *phba = container_of(to_delayed_work(work), 1232 struct lpfc_hba, eq_delay_work); 1233 struct lpfc_eq_intr_info *eqi, *eqi_new; 1234 struct lpfc_queue *eq, *eq_next; 1235 unsigned char *ena_delay = NULL; 1236 uint32_t usdelay; 1237 int i; 1238 1239 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING) 1240 return; 1241 1242 if (phba->link_state == LPFC_HBA_ERROR || 1243 phba->pport->fc_flag & FC_OFFLINE_MODE) 1244 goto requeue; 1245 1246 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay), 1247 GFP_KERNEL); 1248 if (!ena_delay) 1249 goto requeue; 1250 1251 for (i = 0; i < phba->cfg_irq_chann; i++) { 1252 /* Get the EQ corresponding to the IRQ vector */ 1253 eq = phba->sli4_hba.hba_eq_hdl[i].eq; 1254 if (!eq) 1255 continue; 1256 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) { 1257 eq->q_flag &= ~HBA_EQ_DELAY_CHK; 1258 ena_delay[eq->last_cpu] = 1; 1259 } 1260 } 1261 1262 for_each_present_cpu(i) { 1263 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); 1264 if (ena_delay[i]) { 1265 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP; 1266 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY) 1267 usdelay = LPFC_MAX_AUTO_EQ_DELAY; 1268 } else { 1269 usdelay = 0; 1270 } 1271 1272 eqi->icnt = 0; 1273 1274 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) { 1275 if (unlikely(eq->last_cpu != i)) { 1276 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info, 1277 eq->last_cpu); 1278 list_move_tail(&eq->cpu_list, &eqi_new->list); 1279 continue; 1280 } 1281 if (usdelay != eq->q_mode) 1282 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1, 1283 usdelay); 1284 } 1285 } 1286 1287 kfree(ena_delay); 1288 1289 requeue: 1290 queue_delayed_work(phba->wq, &phba->eq_delay_work, 1291 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); 1292 } 1293 1294 /** 1295 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution 1296 * @phba: pointer to lpfc hba data structure. 1297 * 1298 * For each heartbeat, this routine does some heuristic methods to adjust 1299 * XRI distribution. The goal is to fully utilize free XRIs. 1300 **/ 1301 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba) 1302 { 1303 u32 i; 1304 u32 hwq_count; 1305 1306 hwq_count = phba->cfg_hdw_queue; 1307 for (i = 0; i < hwq_count; i++) { 1308 /* Adjust XRIs in private pool */ 1309 lpfc_adjust_pvt_pool_count(phba, i); 1310 1311 /* Adjust high watermark */ 1312 lpfc_adjust_high_watermark(phba, i); 1313 1314 #ifdef LPFC_MXP_STAT 1315 /* Snapshot pbl, pvt and busy count */ 1316 lpfc_snapshot_mxp(phba, i); 1317 #endif 1318 } 1319 } 1320 1321 /** 1322 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 1323 * @phba: pointer to lpfc hba data structure. 1324 * 1325 * This is the actual HBA-timer timeout handler to be invoked by the worker 1326 * thread whenever the HBA timer fired and HBA-timeout event posted. This 1327 * handler performs any periodic operations needed for the device. If such 1328 * periodic event has already been attended to either in the interrupt handler 1329 * or by processing slow-ring or fast-ring events within the HBA-timer 1330 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1331 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1332 * is configured and there is no heart-beat mailbox command outstanding, a 1333 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1334 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1335 * to offline. 1336 **/ 1337 void 1338 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1339 { 1340 struct lpfc_vport **vports; 1341 LPFC_MBOXQ_t *pmboxq; 1342 struct lpfc_dmabuf *buf_ptr; 1343 int retval, i; 1344 struct lpfc_sli *psli = &phba->sli; 1345 LIST_HEAD(completions); 1346 1347 if (phba->cfg_xri_rebalancing) { 1348 /* Multi-XRI pools handler */ 1349 lpfc_hb_mxp_handler(phba); 1350 } 1351 1352 vports = lpfc_create_vport_work_array(phba); 1353 if (vports != NULL) 1354 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 1355 lpfc_rcv_seq_check_edtov(vports[i]); 1356 lpfc_fdmi_change_check(vports[i]); 1357 } 1358 lpfc_destroy_vport_work_array(phba, vports); 1359 1360 if ((phba->link_state == LPFC_HBA_ERROR) || 1361 (phba->pport->load_flag & FC_UNLOADING) || 1362 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1363 return; 1364 1365 spin_lock_irq(&phba->pport->work_port_lock); 1366 1367 if (time_after(phba->last_completion_time + 1368 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), 1369 jiffies)) { 1370 spin_unlock_irq(&phba->pport->work_port_lock); 1371 if (!phba->hb_outstanding) 1372 mod_timer(&phba->hb_tmofunc, 1373 jiffies + 1374 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1375 else 1376 mod_timer(&phba->hb_tmofunc, 1377 jiffies + 1378 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1379 return; 1380 } 1381 spin_unlock_irq(&phba->pport->work_port_lock); 1382 1383 if (phba->elsbuf_cnt && 1384 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1385 spin_lock_irq(&phba->hbalock); 1386 list_splice_init(&phba->elsbuf, &completions); 1387 phba->elsbuf_cnt = 0; 1388 phba->elsbuf_prev_cnt = 0; 1389 spin_unlock_irq(&phba->hbalock); 1390 1391 while (!list_empty(&completions)) { 1392 list_remove_head(&completions, buf_ptr, 1393 struct lpfc_dmabuf, list); 1394 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1395 kfree(buf_ptr); 1396 } 1397 } 1398 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1399 1400 /* If there is no heart beat outstanding, issue a heartbeat command */ 1401 if (phba->cfg_enable_hba_heartbeat) { 1402 if (!phba->hb_outstanding) { 1403 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1404 (list_empty(&psli->mboxq))) { 1405 pmboxq = mempool_alloc(phba->mbox_mem_pool, 1406 GFP_KERNEL); 1407 if (!pmboxq) { 1408 mod_timer(&phba->hb_tmofunc, 1409 jiffies + 1410 msecs_to_jiffies(1000 * 1411 LPFC_HB_MBOX_INTERVAL)); 1412 return; 1413 } 1414 1415 lpfc_heart_beat(phba, pmboxq); 1416 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1417 pmboxq->vport = phba->pport; 1418 retval = lpfc_sli_issue_mbox(phba, pmboxq, 1419 MBX_NOWAIT); 1420 1421 if (retval != MBX_BUSY && 1422 retval != MBX_SUCCESS) { 1423 mempool_free(pmboxq, 1424 phba->mbox_mem_pool); 1425 mod_timer(&phba->hb_tmofunc, 1426 jiffies + 1427 msecs_to_jiffies(1000 * 1428 LPFC_HB_MBOX_INTERVAL)); 1429 return; 1430 } 1431 phba->skipped_hb = 0; 1432 phba->hb_outstanding = 1; 1433 } else if (time_before_eq(phba->last_completion_time, 1434 phba->skipped_hb)) { 1435 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1436 "2857 Last completion time not " 1437 " updated in %d ms\n", 1438 jiffies_to_msecs(jiffies 1439 - phba->last_completion_time)); 1440 } else 1441 phba->skipped_hb = jiffies; 1442 1443 mod_timer(&phba->hb_tmofunc, 1444 jiffies + 1445 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1446 return; 1447 } else { 1448 /* 1449 * If heart beat timeout called with hb_outstanding set 1450 * we need to give the hb mailbox cmd a chance to 1451 * complete or TMO. 1452 */ 1453 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1454 "0459 Adapter heartbeat still out" 1455 "standing:last compl time was %d ms.\n", 1456 jiffies_to_msecs(jiffies 1457 - phba->last_completion_time)); 1458 mod_timer(&phba->hb_tmofunc, 1459 jiffies + 1460 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1461 } 1462 } else { 1463 mod_timer(&phba->hb_tmofunc, 1464 jiffies + 1465 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1466 } 1467 } 1468 1469 /** 1470 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1471 * @phba: pointer to lpfc hba data structure. 1472 * 1473 * This routine is called to bring the HBA offline when HBA hardware error 1474 * other than Port Error 6 has been detected. 1475 **/ 1476 static void 1477 lpfc_offline_eratt(struct lpfc_hba *phba) 1478 { 1479 struct lpfc_sli *psli = &phba->sli; 1480 1481 spin_lock_irq(&phba->hbalock); 1482 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1483 spin_unlock_irq(&phba->hbalock); 1484 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1485 1486 lpfc_offline(phba); 1487 lpfc_reset_barrier(phba); 1488 spin_lock_irq(&phba->hbalock); 1489 lpfc_sli_brdreset(phba); 1490 spin_unlock_irq(&phba->hbalock); 1491 lpfc_hba_down_post(phba); 1492 lpfc_sli_brdready(phba, HS_MBRDY); 1493 lpfc_unblock_mgmt_io(phba); 1494 phba->link_state = LPFC_HBA_ERROR; 1495 return; 1496 } 1497 1498 /** 1499 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1500 * @phba: pointer to lpfc hba data structure. 1501 * 1502 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1503 * other than Port Error 6 has been detected. 1504 **/ 1505 void 1506 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1507 { 1508 spin_lock_irq(&phba->hbalock); 1509 phba->link_state = LPFC_HBA_ERROR; 1510 spin_unlock_irq(&phba->hbalock); 1511 1512 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1513 lpfc_sli_flush_io_rings(phba); 1514 lpfc_offline(phba); 1515 lpfc_hba_down_post(phba); 1516 lpfc_unblock_mgmt_io(phba); 1517 } 1518 1519 /** 1520 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1521 * @phba: pointer to lpfc hba data structure. 1522 * 1523 * This routine is invoked to handle the deferred HBA hardware error 1524 * conditions. This type of error is indicated by HBA by setting ER1 1525 * and another ER bit in the host status register. The driver will 1526 * wait until the ER1 bit clears before handling the error condition. 1527 **/ 1528 static void 1529 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1530 { 1531 uint32_t old_host_status = phba->work_hs; 1532 struct lpfc_sli *psli = &phba->sli; 1533 1534 /* If the pci channel is offline, ignore possible errors, 1535 * since we cannot communicate with the pci card anyway. 1536 */ 1537 if (pci_channel_offline(phba->pcidev)) { 1538 spin_lock_irq(&phba->hbalock); 1539 phba->hba_flag &= ~DEFER_ERATT; 1540 spin_unlock_irq(&phba->hbalock); 1541 return; 1542 } 1543 1544 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1545 "0479 Deferred Adapter Hardware Error " 1546 "Data: x%x x%x x%x\n", 1547 phba->work_hs, 1548 phba->work_status[0], phba->work_status[1]); 1549 1550 spin_lock_irq(&phba->hbalock); 1551 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1552 spin_unlock_irq(&phba->hbalock); 1553 1554 1555 /* 1556 * Firmware stops when it triggred erratt. That could cause the I/Os 1557 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1558 * SCSI layer retry it after re-establishing link. 1559 */ 1560 lpfc_sli_abort_fcp_rings(phba); 1561 1562 /* 1563 * There was a firmware error. Take the hba offline and then 1564 * attempt to restart it. 1565 */ 1566 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 1567 lpfc_offline(phba); 1568 1569 /* Wait for the ER1 bit to clear.*/ 1570 while (phba->work_hs & HS_FFER1) { 1571 msleep(100); 1572 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { 1573 phba->work_hs = UNPLUG_ERR ; 1574 break; 1575 } 1576 /* If driver is unloading let the worker thread continue */ 1577 if (phba->pport->load_flag & FC_UNLOADING) { 1578 phba->work_hs = 0; 1579 break; 1580 } 1581 } 1582 1583 /* 1584 * This is to ptrotect against a race condition in which 1585 * first write to the host attention register clear the 1586 * host status register. 1587 */ 1588 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1589 phba->work_hs = old_host_status & ~HS_FFER1; 1590 1591 spin_lock_irq(&phba->hbalock); 1592 phba->hba_flag &= ~DEFER_ERATT; 1593 spin_unlock_irq(&phba->hbalock); 1594 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1595 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1596 } 1597 1598 static void 1599 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1600 { 1601 struct lpfc_board_event_header board_event; 1602 struct Scsi_Host *shost; 1603 1604 board_event.event_type = FC_REG_BOARD_EVENT; 1605 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1606 shost = lpfc_shost_from_vport(phba->pport); 1607 fc_host_post_vendor_event(shost, fc_get_event_number(), 1608 sizeof(board_event), 1609 (char *) &board_event, 1610 LPFC_NL_VENDOR_ID); 1611 } 1612 1613 /** 1614 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1615 * @phba: pointer to lpfc hba data structure. 1616 * 1617 * This routine is invoked to handle the following HBA hardware error 1618 * conditions: 1619 * 1 - HBA error attention interrupt 1620 * 2 - DMA ring index out of range 1621 * 3 - Mailbox command came back as unknown 1622 **/ 1623 static void 1624 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1625 { 1626 struct lpfc_vport *vport = phba->pport; 1627 struct lpfc_sli *psli = &phba->sli; 1628 uint32_t event_data; 1629 unsigned long temperature; 1630 struct temp_event temp_event_data; 1631 struct Scsi_Host *shost; 1632 1633 /* If the pci channel is offline, ignore possible errors, 1634 * since we cannot communicate with the pci card anyway. 1635 */ 1636 if (pci_channel_offline(phba->pcidev)) { 1637 spin_lock_irq(&phba->hbalock); 1638 phba->hba_flag &= ~DEFER_ERATT; 1639 spin_unlock_irq(&phba->hbalock); 1640 return; 1641 } 1642 1643 /* If resets are disabled then leave the HBA alone and return */ 1644 if (!phba->cfg_enable_hba_reset) 1645 return; 1646 1647 /* Send an internal error event to mgmt application */ 1648 lpfc_board_errevt_to_mgmt(phba); 1649 1650 if (phba->hba_flag & DEFER_ERATT) 1651 lpfc_handle_deferred_eratt(phba); 1652 1653 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1654 if (phba->work_hs & HS_FFER6) 1655 /* Re-establishing Link */ 1656 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1657 "1301 Re-establishing Link " 1658 "Data: x%x x%x x%x\n", 1659 phba->work_hs, phba->work_status[0], 1660 phba->work_status[1]); 1661 if (phba->work_hs & HS_FFER8) 1662 /* Device Zeroization */ 1663 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1664 "2861 Host Authentication device " 1665 "zeroization Data:x%x x%x x%x\n", 1666 phba->work_hs, phba->work_status[0], 1667 phba->work_status[1]); 1668 1669 spin_lock_irq(&phba->hbalock); 1670 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1671 spin_unlock_irq(&phba->hbalock); 1672 1673 /* 1674 * Firmware stops when it triggled erratt with HS_FFER6. 1675 * That could cause the I/Os dropped by the firmware. 1676 * Error iocb (I/O) on txcmplq and let the SCSI layer 1677 * retry it after re-establishing link. 1678 */ 1679 lpfc_sli_abort_fcp_rings(phba); 1680 1681 /* 1682 * There was a firmware error. Take the hba offline and then 1683 * attempt to restart it. 1684 */ 1685 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1686 lpfc_offline(phba); 1687 lpfc_sli_brdrestart(phba); 1688 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1689 lpfc_unblock_mgmt_io(phba); 1690 return; 1691 } 1692 lpfc_unblock_mgmt_io(phba); 1693 } else if (phba->work_hs & HS_CRIT_TEMP) { 1694 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1695 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1696 temp_event_data.event_code = LPFC_CRIT_TEMP; 1697 temp_event_data.data = (uint32_t)temperature; 1698 1699 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1700 "0406 Adapter maximum temperature exceeded " 1701 "(%ld), taking this port offline " 1702 "Data: x%x x%x x%x\n", 1703 temperature, phba->work_hs, 1704 phba->work_status[0], phba->work_status[1]); 1705 1706 shost = lpfc_shost_from_vport(phba->pport); 1707 fc_host_post_vendor_event(shost, fc_get_event_number(), 1708 sizeof(temp_event_data), 1709 (char *) &temp_event_data, 1710 SCSI_NL_VID_TYPE_PCI 1711 | PCI_VENDOR_ID_EMULEX); 1712 1713 spin_lock_irq(&phba->hbalock); 1714 phba->over_temp_state = HBA_OVER_TEMP; 1715 spin_unlock_irq(&phba->hbalock); 1716 lpfc_offline_eratt(phba); 1717 1718 } else { 1719 /* The if clause above forces this code path when the status 1720 * failure is a value other than FFER6. Do not call the offline 1721 * twice. This is the adapter hardware error path. 1722 */ 1723 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1724 "0457 Adapter Hardware Error " 1725 "Data: x%x x%x x%x\n", 1726 phba->work_hs, 1727 phba->work_status[0], phba->work_status[1]); 1728 1729 event_data = FC_REG_DUMP_EVENT; 1730 shost = lpfc_shost_from_vport(vport); 1731 fc_host_post_vendor_event(shost, fc_get_event_number(), 1732 sizeof(event_data), (char *) &event_data, 1733 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1734 1735 lpfc_offline_eratt(phba); 1736 } 1737 return; 1738 } 1739 1740 /** 1741 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg 1742 * @phba: pointer to lpfc hba data structure. 1743 * @mbx_action: flag for mailbox shutdown action. 1744 * 1745 * This routine is invoked to perform an SLI4 port PCI function reset in 1746 * response to port status register polling attention. It waits for port 1747 * status register (ERR, RDY, RN) bits before proceeding with function reset. 1748 * During this process, interrupt vectors are freed and later requested 1749 * for handling possible port resource change. 1750 **/ 1751 static int 1752 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, 1753 bool en_rn_msg) 1754 { 1755 int rc; 1756 uint32_t intr_mode; 1757 1758 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 1759 LPFC_SLI_INTF_IF_TYPE_2) { 1760 /* 1761 * On error status condition, driver need to wait for port 1762 * ready before performing reset. 1763 */ 1764 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1765 if (rc) 1766 return rc; 1767 } 1768 1769 /* need reset: attempt for port recovery */ 1770 if (en_rn_msg) 1771 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1772 "2887 Reset Needed: Attempting Port " 1773 "Recovery...\n"); 1774 lpfc_offline_prep(phba, mbx_action); 1775 lpfc_sli_flush_io_rings(phba); 1776 lpfc_offline(phba); 1777 /* release interrupt for possible resource change */ 1778 lpfc_sli4_disable_intr(phba); 1779 rc = lpfc_sli_brdrestart(phba); 1780 if (rc) { 1781 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1782 "6309 Failed to restart board\n"); 1783 return rc; 1784 } 1785 /* request and enable interrupt */ 1786 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 1787 if (intr_mode == LPFC_INTR_ERROR) { 1788 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1789 "3175 Failed to enable interrupt\n"); 1790 return -EIO; 1791 } 1792 phba->intr_mode = intr_mode; 1793 rc = lpfc_online(phba); 1794 if (rc == 0) 1795 lpfc_unblock_mgmt_io(phba); 1796 1797 return rc; 1798 } 1799 1800 /** 1801 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1802 * @phba: pointer to lpfc hba data structure. 1803 * 1804 * This routine is invoked to handle the SLI4 HBA hardware error attention 1805 * conditions. 1806 **/ 1807 static void 1808 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1809 { 1810 struct lpfc_vport *vport = phba->pport; 1811 uint32_t event_data; 1812 struct Scsi_Host *shost; 1813 uint32_t if_type; 1814 struct lpfc_register portstat_reg = {0}; 1815 uint32_t reg_err1, reg_err2; 1816 uint32_t uerrlo_reg, uemasklo_reg; 1817 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2; 1818 bool en_rn_msg = true; 1819 struct temp_event temp_event_data; 1820 struct lpfc_register portsmphr_reg; 1821 int rc, i; 1822 1823 /* If the pci channel is offline, ignore possible errors, since 1824 * we cannot communicate with the pci card anyway. 1825 */ 1826 if (pci_channel_offline(phba->pcidev)) { 1827 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1828 "3166 pci channel is offline\n"); 1829 lpfc_sli4_offline_eratt(phba); 1830 return; 1831 } 1832 1833 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 1834 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1835 switch (if_type) { 1836 case LPFC_SLI_INTF_IF_TYPE_0: 1837 pci_rd_rc1 = lpfc_readl( 1838 phba->sli4_hba.u.if_type0.UERRLOregaddr, 1839 &uerrlo_reg); 1840 pci_rd_rc2 = lpfc_readl( 1841 phba->sli4_hba.u.if_type0.UEMASKLOregaddr, 1842 &uemasklo_reg); 1843 /* consider PCI bus read error as pci_channel_offline */ 1844 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) 1845 return; 1846 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { 1847 lpfc_sli4_offline_eratt(phba); 1848 return; 1849 } 1850 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1851 "7623 Checking UE recoverable"); 1852 1853 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { 1854 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1855 &portsmphr_reg.word0)) 1856 continue; 1857 1858 smphr_port_status = bf_get(lpfc_port_smphr_port_status, 1859 &portsmphr_reg); 1860 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1861 LPFC_PORT_SEM_UE_RECOVERABLE) 1862 break; 1863 /*Sleep for 1Sec, before checking SEMAPHORE */ 1864 msleep(1000); 1865 } 1866 1867 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1868 "4827 smphr_port_status x%x : Waited %dSec", 1869 smphr_port_status, i); 1870 1871 /* Recoverable UE, reset the HBA device */ 1872 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1873 LPFC_PORT_SEM_UE_RECOVERABLE) { 1874 for (i = 0; i < 20; i++) { 1875 msleep(1000); 1876 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1877 &portsmphr_reg.word0) && 1878 (LPFC_POST_STAGE_PORT_READY == 1879 bf_get(lpfc_port_smphr_port_status, 1880 &portsmphr_reg))) { 1881 rc = lpfc_sli4_port_sta_fn_reset(phba, 1882 LPFC_MBX_NO_WAIT, en_rn_msg); 1883 if (rc == 0) 1884 return; 1885 lpfc_printf_log(phba, 1886 KERN_ERR, LOG_INIT, 1887 "4215 Failed to recover UE"); 1888 break; 1889 } 1890 } 1891 } 1892 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1893 "7624 Firmware not ready: Failing UE recovery," 1894 " waited %dSec", i); 1895 phba->link_state = LPFC_HBA_ERROR; 1896 break; 1897 1898 case LPFC_SLI_INTF_IF_TYPE_2: 1899 case LPFC_SLI_INTF_IF_TYPE_6: 1900 pci_rd_rc1 = lpfc_readl( 1901 phba->sli4_hba.u.if_type2.STATUSregaddr, 1902 &portstat_reg.word0); 1903 /* consider PCI bus read error as pci_channel_offline */ 1904 if (pci_rd_rc1 == -EIO) { 1905 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1906 "3151 PCI bus read access failure: x%x\n", 1907 readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); 1908 lpfc_sli4_offline_eratt(phba); 1909 return; 1910 } 1911 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 1912 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 1913 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1914 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1915 "2889 Port Overtemperature event, " 1916 "taking port offline Data: x%x x%x\n", 1917 reg_err1, reg_err2); 1918 1919 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 1920 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1921 temp_event_data.event_code = LPFC_CRIT_TEMP; 1922 temp_event_data.data = 0xFFFFFFFF; 1923 1924 shost = lpfc_shost_from_vport(phba->pport); 1925 fc_host_post_vendor_event(shost, fc_get_event_number(), 1926 sizeof(temp_event_data), 1927 (char *)&temp_event_data, 1928 SCSI_NL_VID_TYPE_PCI 1929 | PCI_VENDOR_ID_EMULEX); 1930 1931 spin_lock_irq(&phba->hbalock); 1932 phba->over_temp_state = HBA_OVER_TEMP; 1933 spin_unlock_irq(&phba->hbalock); 1934 lpfc_sli4_offline_eratt(phba); 1935 return; 1936 } 1937 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1938 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { 1939 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1940 "3143 Port Down: Firmware Update " 1941 "Detected\n"); 1942 en_rn_msg = false; 1943 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1944 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1945 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1946 "3144 Port Down: Debug Dump\n"); 1947 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1948 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) 1949 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1950 "3145 Port Down: Provisioning\n"); 1951 1952 /* If resets are disabled then leave the HBA alone and return */ 1953 if (!phba->cfg_enable_hba_reset) 1954 return; 1955 1956 /* Check port status register for function reset */ 1957 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, 1958 en_rn_msg); 1959 if (rc == 0) { 1960 /* don't report event on forced debug dump */ 1961 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1962 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1963 return; 1964 else 1965 break; 1966 } 1967 /* fall through for not able to recover */ 1968 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1969 "3152 Unrecoverable error\n"); 1970 phba->link_state = LPFC_HBA_ERROR; 1971 break; 1972 case LPFC_SLI_INTF_IF_TYPE_1: 1973 default: 1974 break; 1975 } 1976 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1977 "3123 Report dump event to upper layer\n"); 1978 /* Send an internal error event to mgmt application */ 1979 lpfc_board_errevt_to_mgmt(phba); 1980 1981 event_data = FC_REG_DUMP_EVENT; 1982 shost = lpfc_shost_from_vport(vport); 1983 fc_host_post_vendor_event(shost, fc_get_event_number(), 1984 sizeof(event_data), (char *) &event_data, 1985 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1986 } 1987 1988 /** 1989 * lpfc_handle_eratt - Wrapper func for handling hba error attention 1990 * @phba: pointer to lpfc HBA data structure. 1991 * 1992 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 1993 * routine from the API jump table function pointer from the lpfc_hba struct. 1994 * 1995 * Return codes 1996 * 0 - success. 1997 * Any other value - error. 1998 **/ 1999 void 2000 lpfc_handle_eratt(struct lpfc_hba *phba) 2001 { 2002 (*phba->lpfc_handle_eratt)(phba); 2003 } 2004 2005 /** 2006 * lpfc_handle_latt - The HBA link event handler 2007 * @phba: pointer to lpfc hba data structure. 2008 * 2009 * This routine is invoked from the worker thread to handle a HBA host 2010 * attention link event. SLI3 only. 2011 **/ 2012 void 2013 lpfc_handle_latt(struct lpfc_hba *phba) 2014 { 2015 struct lpfc_vport *vport = phba->pport; 2016 struct lpfc_sli *psli = &phba->sli; 2017 LPFC_MBOXQ_t *pmb; 2018 volatile uint32_t control; 2019 struct lpfc_dmabuf *mp; 2020 int rc = 0; 2021 2022 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2023 if (!pmb) { 2024 rc = 1; 2025 goto lpfc_handle_latt_err_exit; 2026 } 2027 2028 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2029 if (!mp) { 2030 rc = 2; 2031 goto lpfc_handle_latt_free_pmb; 2032 } 2033 2034 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 2035 if (!mp->virt) { 2036 rc = 3; 2037 goto lpfc_handle_latt_free_mp; 2038 } 2039 2040 /* Cleanup any outstanding ELS commands */ 2041 lpfc_els_flush_all_cmd(phba); 2042 2043 psli->slistat.link_event++; 2044 lpfc_read_topology(phba, pmb, mp); 2045 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 2046 pmb->vport = vport; 2047 /* Block ELS IOCBs until we have processed this mbox command */ 2048 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 2049 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 2050 if (rc == MBX_NOT_FINISHED) { 2051 rc = 4; 2052 goto lpfc_handle_latt_free_mbuf; 2053 } 2054 2055 /* Clear Link Attention in HA REG */ 2056 spin_lock_irq(&phba->hbalock); 2057 writel(HA_LATT, phba->HAregaddr); 2058 readl(phba->HAregaddr); /* flush */ 2059 spin_unlock_irq(&phba->hbalock); 2060 2061 return; 2062 2063 lpfc_handle_latt_free_mbuf: 2064 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 2065 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2066 lpfc_handle_latt_free_mp: 2067 kfree(mp); 2068 lpfc_handle_latt_free_pmb: 2069 mempool_free(pmb, phba->mbox_mem_pool); 2070 lpfc_handle_latt_err_exit: 2071 /* Enable Link attention interrupts */ 2072 spin_lock_irq(&phba->hbalock); 2073 psli->sli_flag |= LPFC_PROCESS_LA; 2074 control = readl(phba->HCregaddr); 2075 control |= HC_LAINT_ENA; 2076 writel(control, phba->HCregaddr); 2077 readl(phba->HCregaddr); /* flush */ 2078 2079 /* Clear Link Attention in HA REG */ 2080 writel(HA_LATT, phba->HAregaddr); 2081 readl(phba->HAregaddr); /* flush */ 2082 spin_unlock_irq(&phba->hbalock); 2083 lpfc_linkdown(phba); 2084 phba->link_state = LPFC_HBA_ERROR; 2085 2086 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 2087 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 2088 2089 return; 2090 } 2091 2092 /** 2093 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 2094 * @phba: pointer to lpfc hba data structure. 2095 * @vpd: pointer to the vital product data. 2096 * @len: length of the vital product data in bytes. 2097 * 2098 * This routine parses the Vital Product Data (VPD). The VPD is treated as 2099 * an array of characters. In this routine, the ModelName, ProgramType, and 2100 * ModelDesc, etc. fields of the phba data structure will be populated. 2101 * 2102 * Return codes 2103 * 0 - pointer to the VPD passed in is NULL 2104 * 1 - success 2105 **/ 2106 int 2107 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 2108 { 2109 uint8_t lenlo, lenhi; 2110 int Length; 2111 int i, j; 2112 int finished = 0; 2113 int index = 0; 2114 2115 if (!vpd) 2116 return 0; 2117 2118 /* Vital Product */ 2119 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2120 "0455 Vital Product Data: x%x x%x x%x x%x\n", 2121 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 2122 (uint32_t) vpd[3]); 2123 while (!finished && (index < (len - 4))) { 2124 switch (vpd[index]) { 2125 case 0x82: 2126 case 0x91: 2127 index += 1; 2128 lenlo = vpd[index]; 2129 index += 1; 2130 lenhi = vpd[index]; 2131 index += 1; 2132 i = ((((unsigned short)lenhi) << 8) + lenlo); 2133 index += i; 2134 break; 2135 case 0x90: 2136 index += 1; 2137 lenlo = vpd[index]; 2138 index += 1; 2139 lenhi = vpd[index]; 2140 index += 1; 2141 Length = ((((unsigned short)lenhi) << 8) + lenlo); 2142 if (Length > len - index) 2143 Length = len - index; 2144 while (Length > 0) { 2145 /* Look for Serial Number */ 2146 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 2147 index += 2; 2148 i = vpd[index]; 2149 index += 1; 2150 j = 0; 2151 Length -= (3+i); 2152 while(i--) { 2153 phba->SerialNumber[j++] = vpd[index++]; 2154 if (j == 31) 2155 break; 2156 } 2157 phba->SerialNumber[j] = 0; 2158 continue; 2159 } 2160 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 2161 phba->vpd_flag |= VPD_MODEL_DESC; 2162 index += 2; 2163 i = vpd[index]; 2164 index += 1; 2165 j = 0; 2166 Length -= (3+i); 2167 while(i--) { 2168 phba->ModelDesc[j++] = vpd[index++]; 2169 if (j == 255) 2170 break; 2171 } 2172 phba->ModelDesc[j] = 0; 2173 continue; 2174 } 2175 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 2176 phba->vpd_flag |= VPD_MODEL_NAME; 2177 index += 2; 2178 i = vpd[index]; 2179 index += 1; 2180 j = 0; 2181 Length -= (3+i); 2182 while(i--) { 2183 phba->ModelName[j++] = vpd[index++]; 2184 if (j == 79) 2185 break; 2186 } 2187 phba->ModelName[j] = 0; 2188 continue; 2189 } 2190 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 2191 phba->vpd_flag |= VPD_PROGRAM_TYPE; 2192 index += 2; 2193 i = vpd[index]; 2194 index += 1; 2195 j = 0; 2196 Length -= (3+i); 2197 while(i--) { 2198 phba->ProgramType[j++] = vpd[index++]; 2199 if (j == 255) 2200 break; 2201 } 2202 phba->ProgramType[j] = 0; 2203 continue; 2204 } 2205 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 2206 phba->vpd_flag |= VPD_PORT; 2207 index += 2; 2208 i = vpd[index]; 2209 index += 1; 2210 j = 0; 2211 Length -= (3+i); 2212 while(i--) { 2213 if ((phba->sli_rev == LPFC_SLI_REV4) && 2214 (phba->sli4_hba.pport_name_sta == 2215 LPFC_SLI4_PPNAME_GET)) { 2216 j++; 2217 index++; 2218 } else 2219 phba->Port[j++] = vpd[index++]; 2220 if (j == 19) 2221 break; 2222 } 2223 if ((phba->sli_rev != LPFC_SLI_REV4) || 2224 (phba->sli4_hba.pport_name_sta == 2225 LPFC_SLI4_PPNAME_NON)) 2226 phba->Port[j] = 0; 2227 continue; 2228 } 2229 else { 2230 index += 2; 2231 i = vpd[index]; 2232 index += 1; 2233 index += i; 2234 Length -= (3 + i); 2235 } 2236 } 2237 finished = 0; 2238 break; 2239 case 0x78: 2240 finished = 1; 2241 break; 2242 default: 2243 index ++; 2244 break; 2245 } 2246 } 2247 2248 return(1); 2249 } 2250 2251 /** 2252 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 2253 * @phba: pointer to lpfc hba data structure. 2254 * @mdp: pointer to the data structure to hold the derived model name. 2255 * @descp: pointer to the data structure to hold the derived description. 2256 * 2257 * This routine retrieves HBA's description based on its registered PCI device 2258 * ID. The @descp passed into this function points to an array of 256 chars. It 2259 * shall be returned with the model name, maximum speed, and the host bus type. 2260 * The @mdp passed into this function points to an array of 80 chars. When the 2261 * function returns, the @mdp will be filled with the model name. 2262 **/ 2263 static void 2264 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 2265 { 2266 lpfc_vpd_t *vp; 2267 uint16_t dev_id = phba->pcidev->device; 2268 int max_speed; 2269 int GE = 0; 2270 int oneConnect = 0; /* default is not a oneConnect */ 2271 struct { 2272 char *name; 2273 char *bus; 2274 char *function; 2275 } m = {"<Unknown>", "", ""}; 2276 2277 if (mdp && mdp[0] != '\0' 2278 && descp && descp[0] != '\0') 2279 return; 2280 2281 if (phba->lmt & LMT_64Gb) 2282 max_speed = 64; 2283 else if (phba->lmt & LMT_32Gb) 2284 max_speed = 32; 2285 else if (phba->lmt & LMT_16Gb) 2286 max_speed = 16; 2287 else if (phba->lmt & LMT_10Gb) 2288 max_speed = 10; 2289 else if (phba->lmt & LMT_8Gb) 2290 max_speed = 8; 2291 else if (phba->lmt & LMT_4Gb) 2292 max_speed = 4; 2293 else if (phba->lmt & LMT_2Gb) 2294 max_speed = 2; 2295 else if (phba->lmt & LMT_1Gb) 2296 max_speed = 1; 2297 else 2298 max_speed = 0; 2299 2300 vp = &phba->vpd; 2301 2302 switch (dev_id) { 2303 case PCI_DEVICE_ID_FIREFLY: 2304 m = (typeof(m)){"LP6000", "PCI", 2305 "Obsolete, Unsupported Fibre Channel Adapter"}; 2306 break; 2307 case PCI_DEVICE_ID_SUPERFLY: 2308 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 2309 m = (typeof(m)){"LP7000", "PCI", ""}; 2310 else 2311 m = (typeof(m)){"LP7000E", "PCI", ""}; 2312 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2313 break; 2314 case PCI_DEVICE_ID_DRAGONFLY: 2315 m = (typeof(m)){"LP8000", "PCI", 2316 "Obsolete, Unsupported Fibre Channel Adapter"}; 2317 break; 2318 case PCI_DEVICE_ID_CENTAUR: 2319 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 2320 m = (typeof(m)){"LP9002", "PCI", ""}; 2321 else 2322 m = (typeof(m)){"LP9000", "PCI", ""}; 2323 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2324 break; 2325 case PCI_DEVICE_ID_RFLY: 2326 m = (typeof(m)){"LP952", "PCI", 2327 "Obsolete, Unsupported Fibre Channel Adapter"}; 2328 break; 2329 case PCI_DEVICE_ID_PEGASUS: 2330 m = (typeof(m)){"LP9802", "PCI-X", 2331 "Obsolete, Unsupported Fibre Channel Adapter"}; 2332 break; 2333 case PCI_DEVICE_ID_THOR: 2334 m = (typeof(m)){"LP10000", "PCI-X", 2335 "Obsolete, Unsupported Fibre Channel Adapter"}; 2336 break; 2337 case PCI_DEVICE_ID_VIPER: 2338 m = (typeof(m)){"LPX1000", "PCI-X", 2339 "Obsolete, Unsupported Fibre Channel Adapter"}; 2340 break; 2341 case PCI_DEVICE_ID_PFLY: 2342 m = (typeof(m)){"LP982", "PCI-X", 2343 "Obsolete, Unsupported Fibre Channel Adapter"}; 2344 break; 2345 case PCI_DEVICE_ID_TFLY: 2346 m = (typeof(m)){"LP1050", "PCI-X", 2347 "Obsolete, Unsupported Fibre Channel Adapter"}; 2348 break; 2349 case PCI_DEVICE_ID_HELIOS: 2350 m = (typeof(m)){"LP11000", "PCI-X2", 2351 "Obsolete, Unsupported Fibre Channel Adapter"}; 2352 break; 2353 case PCI_DEVICE_ID_HELIOS_SCSP: 2354 m = (typeof(m)){"LP11000-SP", "PCI-X2", 2355 "Obsolete, Unsupported Fibre Channel Adapter"}; 2356 break; 2357 case PCI_DEVICE_ID_HELIOS_DCSP: 2358 m = (typeof(m)){"LP11002-SP", "PCI-X2", 2359 "Obsolete, Unsupported Fibre Channel Adapter"}; 2360 break; 2361 case PCI_DEVICE_ID_NEPTUNE: 2362 m = (typeof(m)){"LPe1000", "PCIe", 2363 "Obsolete, Unsupported Fibre Channel Adapter"}; 2364 break; 2365 case PCI_DEVICE_ID_NEPTUNE_SCSP: 2366 m = (typeof(m)){"LPe1000-SP", "PCIe", 2367 "Obsolete, Unsupported Fibre Channel Adapter"}; 2368 break; 2369 case PCI_DEVICE_ID_NEPTUNE_DCSP: 2370 m = (typeof(m)){"LPe1002-SP", "PCIe", 2371 "Obsolete, Unsupported Fibre Channel Adapter"}; 2372 break; 2373 case PCI_DEVICE_ID_BMID: 2374 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 2375 break; 2376 case PCI_DEVICE_ID_BSMB: 2377 m = (typeof(m)){"LP111", "PCI-X2", 2378 "Obsolete, Unsupported Fibre Channel Adapter"}; 2379 break; 2380 case PCI_DEVICE_ID_ZEPHYR: 2381 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2382 break; 2383 case PCI_DEVICE_ID_ZEPHYR_SCSP: 2384 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2385 break; 2386 case PCI_DEVICE_ID_ZEPHYR_DCSP: 2387 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 2388 GE = 1; 2389 break; 2390 case PCI_DEVICE_ID_ZMID: 2391 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 2392 break; 2393 case PCI_DEVICE_ID_ZSMB: 2394 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 2395 break; 2396 case PCI_DEVICE_ID_LP101: 2397 m = (typeof(m)){"LP101", "PCI-X", 2398 "Obsolete, Unsupported Fibre Channel Adapter"}; 2399 break; 2400 case PCI_DEVICE_ID_LP10000S: 2401 m = (typeof(m)){"LP10000-S", "PCI", 2402 "Obsolete, Unsupported Fibre Channel Adapter"}; 2403 break; 2404 case PCI_DEVICE_ID_LP11000S: 2405 m = (typeof(m)){"LP11000-S", "PCI-X2", 2406 "Obsolete, Unsupported Fibre Channel Adapter"}; 2407 break; 2408 case PCI_DEVICE_ID_LPE11000S: 2409 m = (typeof(m)){"LPe11000-S", "PCIe", 2410 "Obsolete, Unsupported Fibre Channel Adapter"}; 2411 break; 2412 case PCI_DEVICE_ID_SAT: 2413 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 2414 break; 2415 case PCI_DEVICE_ID_SAT_MID: 2416 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 2417 break; 2418 case PCI_DEVICE_ID_SAT_SMB: 2419 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 2420 break; 2421 case PCI_DEVICE_ID_SAT_DCSP: 2422 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 2423 break; 2424 case PCI_DEVICE_ID_SAT_SCSP: 2425 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 2426 break; 2427 case PCI_DEVICE_ID_SAT_S: 2428 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 2429 break; 2430 case PCI_DEVICE_ID_HORNET: 2431 m = (typeof(m)){"LP21000", "PCIe", 2432 "Obsolete, Unsupported FCoE Adapter"}; 2433 GE = 1; 2434 break; 2435 case PCI_DEVICE_ID_PROTEUS_VF: 2436 m = (typeof(m)){"LPev12000", "PCIe IOV", 2437 "Obsolete, Unsupported Fibre Channel Adapter"}; 2438 break; 2439 case PCI_DEVICE_ID_PROTEUS_PF: 2440 m = (typeof(m)){"LPev12000", "PCIe IOV", 2441 "Obsolete, Unsupported Fibre Channel Adapter"}; 2442 break; 2443 case PCI_DEVICE_ID_PROTEUS_S: 2444 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 2445 "Obsolete, Unsupported Fibre Channel Adapter"}; 2446 break; 2447 case PCI_DEVICE_ID_TIGERSHARK: 2448 oneConnect = 1; 2449 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 2450 break; 2451 case PCI_DEVICE_ID_TOMCAT: 2452 oneConnect = 1; 2453 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 2454 break; 2455 case PCI_DEVICE_ID_FALCON: 2456 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 2457 "EmulexSecure Fibre"}; 2458 break; 2459 case PCI_DEVICE_ID_BALIUS: 2460 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 2461 "Obsolete, Unsupported Fibre Channel Adapter"}; 2462 break; 2463 case PCI_DEVICE_ID_LANCER_FC: 2464 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 2465 break; 2466 case PCI_DEVICE_ID_LANCER_FC_VF: 2467 m = (typeof(m)){"LPe16000", "PCIe", 2468 "Obsolete, Unsupported Fibre Channel Adapter"}; 2469 break; 2470 case PCI_DEVICE_ID_LANCER_FCOE: 2471 oneConnect = 1; 2472 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2473 break; 2474 case PCI_DEVICE_ID_LANCER_FCOE_VF: 2475 oneConnect = 1; 2476 m = (typeof(m)){"OCe15100", "PCIe", 2477 "Obsolete, Unsupported FCoE"}; 2478 break; 2479 case PCI_DEVICE_ID_LANCER_G6_FC: 2480 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"}; 2481 break; 2482 case PCI_DEVICE_ID_LANCER_G7_FC: 2483 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"}; 2484 break; 2485 case PCI_DEVICE_ID_SKYHAWK: 2486 case PCI_DEVICE_ID_SKYHAWK_VF: 2487 oneConnect = 1; 2488 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; 2489 break; 2490 default: 2491 m = (typeof(m)){"Unknown", "", ""}; 2492 break; 2493 } 2494 2495 if (mdp && mdp[0] == '\0') 2496 snprintf(mdp, 79,"%s", m.name); 2497 /* 2498 * oneConnect hba requires special processing, they are all initiators 2499 * and we put the port number on the end 2500 */ 2501 if (descp && descp[0] == '\0') { 2502 if (oneConnect) 2503 snprintf(descp, 255, 2504 "Emulex OneConnect %s, %s Initiator %s", 2505 m.name, m.function, 2506 phba->Port); 2507 else if (max_speed == 0) 2508 snprintf(descp, 255, 2509 "Emulex %s %s %s", 2510 m.name, m.bus, m.function); 2511 else 2512 snprintf(descp, 255, 2513 "Emulex %s %d%s %s %s", 2514 m.name, max_speed, (GE) ? "GE" : "Gb", 2515 m.bus, m.function); 2516 } 2517 } 2518 2519 /** 2520 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 2521 * @phba: pointer to lpfc hba data structure. 2522 * @pring: pointer to a IOCB ring. 2523 * @cnt: the number of IOCBs to be posted to the IOCB ring. 2524 * 2525 * This routine posts a given number of IOCBs with the associated DMA buffer 2526 * descriptors specified by the cnt argument to the given IOCB ring. 2527 * 2528 * Return codes 2529 * The number of IOCBs NOT able to be posted to the IOCB ring. 2530 **/ 2531 int 2532 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 2533 { 2534 IOCB_t *icmd; 2535 struct lpfc_iocbq *iocb; 2536 struct lpfc_dmabuf *mp1, *mp2; 2537 2538 cnt += pring->missbufcnt; 2539 2540 /* While there are buffers to post */ 2541 while (cnt > 0) { 2542 /* Allocate buffer for command iocb */ 2543 iocb = lpfc_sli_get_iocbq(phba); 2544 if (iocb == NULL) { 2545 pring->missbufcnt = cnt; 2546 return cnt; 2547 } 2548 icmd = &iocb->iocb; 2549 2550 /* 2 buffers can be posted per command */ 2551 /* Allocate buffer to post */ 2552 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2553 if (mp1) 2554 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 2555 if (!mp1 || !mp1->virt) { 2556 kfree(mp1); 2557 lpfc_sli_release_iocbq(phba, iocb); 2558 pring->missbufcnt = cnt; 2559 return cnt; 2560 } 2561 2562 INIT_LIST_HEAD(&mp1->list); 2563 /* Allocate buffer to post */ 2564 if (cnt > 1) { 2565 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2566 if (mp2) 2567 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 2568 &mp2->phys); 2569 if (!mp2 || !mp2->virt) { 2570 kfree(mp2); 2571 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2572 kfree(mp1); 2573 lpfc_sli_release_iocbq(phba, iocb); 2574 pring->missbufcnt = cnt; 2575 return cnt; 2576 } 2577 2578 INIT_LIST_HEAD(&mp2->list); 2579 } else { 2580 mp2 = NULL; 2581 } 2582 2583 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2584 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2585 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2586 icmd->ulpBdeCount = 1; 2587 cnt--; 2588 if (mp2) { 2589 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2590 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2591 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2592 cnt--; 2593 icmd->ulpBdeCount = 2; 2594 } 2595 2596 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2597 icmd->ulpLe = 1; 2598 2599 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2600 IOCB_ERROR) { 2601 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2602 kfree(mp1); 2603 cnt++; 2604 if (mp2) { 2605 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2606 kfree(mp2); 2607 cnt++; 2608 } 2609 lpfc_sli_release_iocbq(phba, iocb); 2610 pring->missbufcnt = cnt; 2611 return cnt; 2612 } 2613 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2614 if (mp2) 2615 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2616 } 2617 pring->missbufcnt = 0; 2618 return 0; 2619 } 2620 2621 /** 2622 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2623 * @phba: pointer to lpfc hba data structure. 2624 * 2625 * This routine posts initial receive IOCB buffers to the ELS ring. The 2626 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2627 * set to 64 IOCBs. SLI3 only. 2628 * 2629 * Return codes 2630 * 0 - success (currently always success) 2631 **/ 2632 static int 2633 lpfc_post_rcv_buf(struct lpfc_hba *phba) 2634 { 2635 struct lpfc_sli *psli = &phba->sli; 2636 2637 /* Ring 0, ELS / CT buffers */ 2638 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2639 /* Ring 2 - FCP no buffers needed */ 2640 2641 return 0; 2642 } 2643 2644 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2645 2646 /** 2647 * lpfc_sha_init - Set up initial array of hash table entries 2648 * @HashResultPointer: pointer to an array as hash table. 2649 * 2650 * This routine sets up the initial values to the array of hash table entries 2651 * for the LC HBAs. 2652 **/ 2653 static void 2654 lpfc_sha_init(uint32_t * HashResultPointer) 2655 { 2656 HashResultPointer[0] = 0x67452301; 2657 HashResultPointer[1] = 0xEFCDAB89; 2658 HashResultPointer[2] = 0x98BADCFE; 2659 HashResultPointer[3] = 0x10325476; 2660 HashResultPointer[4] = 0xC3D2E1F0; 2661 } 2662 2663 /** 2664 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2665 * @HashResultPointer: pointer to an initial/result hash table. 2666 * @HashWorkingPointer: pointer to an working hash table. 2667 * 2668 * This routine iterates an initial hash table pointed by @HashResultPointer 2669 * with the values from the working hash table pointeed by @HashWorkingPointer. 2670 * The results are putting back to the initial hash table, returned through 2671 * the @HashResultPointer as the result hash table. 2672 **/ 2673 static void 2674 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2675 { 2676 int t; 2677 uint32_t TEMP; 2678 uint32_t A, B, C, D, E; 2679 t = 16; 2680 do { 2681 HashWorkingPointer[t] = 2682 S(1, 2683 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2684 8] ^ 2685 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2686 } while (++t <= 79); 2687 t = 0; 2688 A = HashResultPointer[0]; 2689 B = HashResultPointer[1]; 2690 C = HashResultPointer[2]; 2691 D = HashResultPointer[3]; 2692 E = HashResultPointer[4]; 2693 2694 do { 2695 if (t < 20) { 2696 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2697 } else if (t < 40) { 2698 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2699 } else if (t < 60) { 2700 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2701 } else { 2702 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2703 } 2704 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2705 E = D; 2706 D = C; 2707 C = S(30, B); 2708 B = A; 2709 A = TEMP; 2710 } while (++t <= 79); 2711 2712 HashResultPointer[0] += A; 2713 HashResultPointer[1] += B; 2714 HashResultPointer[2] += C; 2715 HashResultPointer[3] += D; 2716 HashResultPointer[4] += E; 2717 2718 } 2719 2720 /** 2721 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2722 * @RandomChallenge: pointer to the entry of host challenge random number array. 2723 * @HashWorking: pointer to the entry of the working hash array. 2724 * 2725 * This routine calculates the working hash array referred by @HashWorking 2726 * from the challenge random numbers associated with the host, referred by 2727 * @RandomChallenge. The result is put into the entry of the working hash 2728 * array and returned by reference through @HashWorking. 2729 **/ 2730 static void 2731 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2732 { 2733 *HashWorking = (*RandomChallenge ^ *HashWorking); 2734 } 2735 2736 /** 2737 * lpfc_hba_init - Perform special handling for LC HBA initialization 2738 * @phba: pointer to lpfc hba data structure. 2739 * @hbainit: pointer to an array of unsigned 32-bit integers. 2740 * 2741 * This routine performs the special handling for LC HBA initialization. 2742 **/ 2743 void 2744 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2745 { 2746 int t; 2747 uint32_t *HashWorking; 2748 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2749 2750 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2751 if (!HashWorking) 2752 return; 2753 2754 HashWorking[0] = HashWorking[78] = *pwwnn++; 2755 HashWorking[1] = HashWorking[79] = *pwwnn; 2756 2757 for (t = 0; t < 7; t++) 2758 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2759 2760 lpfc_sha_init(hbainit); 2761 lpfc_sha_iterate(hbainit, HashWorking); 2762 kfree(HashWorking); 2763 } 2764 2765 /** 2766 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2767 * @vport: pointer to a virtual N_Port data structure. 2768 * 2769 * This routine performs the necessary cleanups before deleting the @vport. 2770 * It invokes the discovery state machine to perform necessary state 2771 * transitions and to release the ndlps associated with the @vport. Note, 2772 * the physical port is treated as @vport 0. 2773 **/ 2774 void 2775 lpfc_cleanup(struct lpfc_vport *vport) 2776 { 2777 struct lpfc_hba *phba = vport->phba; 2778 struct lpfc_nodelist *ndlp, *next_ndlp; 2779 int i = 0; 2780 2781 if (phba->link_state > LPFC_LINK_DOWN) 2782 lpfc_port_link_failure(vport); 2783 2784 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2785 if (!NLP_CHK_NODE_ACT(ndlp)) { 2786 ndlp = lpfc_enable_node(vport, ndlp, 2787 NLP_STE_UNUSED_NODE); 2788 if (!ndlp) 2789 continue; 2790 spin_lock_irq(&phba->ndlp_lock); 2791 NLP_SET_FREE_REQ(ndlp); 2792 spin_unlock_irq(&phba->ndlp_lock); 2793 /* Trigger the release of the ndlp memory */ 2794 lpfc_nlp_put(ndlp); 2795 continue; 2796 } 2797 spin_lock_irq(&phba->ndlp_lock); 2798 if (NLP_CHK_FREE_REQ(ndlp)) { 2799 /* The ndlp should not be in memory free mode already */ 2800 spin_unlock_irq(&phba->ndlp_lock); 2801 continue; 2802 } else 2803 /* Indicate request for freeing ndlp memory */ 2804 NLP_SET_FREE_REQ(ndlp); 2805 spin_unlock_irq(&phba->ndlp_lock); 2806 2807 if (vport->port_type != LPFC_PHYSICAL_PORT && 2808 ndlp->nlp_DID == Fabric_DID) { 2809 /* Just free up ndlp with Fabric_DID for vports */ 2810 lpfc_nlp_put(ndlp); 2811 continue; 2812 } 2813 2814 /* take care of nodes in unused state before the state 2815 * machine taking action. 2816 */ 2817 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 2818 lpfc_nlp_put(ndlp); 2819 continue; 2820 } 2821 2822 if (ndlp->nlp_type & NLP_FABRIC) 2823 lpfc_disc_state_machine(vport, ndlp, NULL, 2824 NLP_EVT_DEVICE_RECOVERY); 2825 2826 lpfc_disc_state_machine(vport, ndlp, NULL, 2827 NLP_EVT_DEVICE_RM); 2828 } 2829 2830 /* At this point, ALL ndlp's should be gone 2831 * because of the previous NLP_EVT_DEVICE_RM. 2832 * Lets wait for this to happen, if needed. 2833 */ 2834 while (!list_empty(&vport->fc_nodes)) { 2835 if (i++ > 3000) { 2836 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2837 "0233 Nodelist not empty\n"); 2838 list_for_each_entry_safe(ndlp, next_ndlp, 2839 &vport->fc_nodes, nlp_listp) { 2840 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2841 LOG_NODE, 2842 "0282 did:x%x ndlp:x%px " 2843 "usgmap:x%x refcnt:%d\n", 2844 ndlp->nlp_DID, (void *)ndlp, 2845 ndlp->nlp_usg_map, 2846 kref_read(&ndlp->kref)); 2847 } 2848 break; 2849 } 2850 2851 /* Wait for any activity on ndlps to settle */ 2852 msleep(10); 2853 } 2854 lpfc_cleanup_vports_rrqs(vport, NULL); 2855 } 2856 2857 /** 2858 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2859 * @vport: pointer to a virtual N_Port data structure. 2860 * 2861 * This routine stops all the timers associated with a @vport. This function 2862 * is invoked before disabling or deleting a @vport. Note that the physical 2863 * port is treated as @vport 0. 2864 **/ 2865 void 2866 lpfc_stop_vport_timers(struct lpfc_vport *vport) 2867 { 2868 del_timer_sync(&vport->els_tmofunc); 2869 del_timer_sync(&vport->delayed_disc_tmo); 2870 lpfc_can_disctmo(vport); 2871 return; 2872 } 2873 2874 /** 2875 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2876 * @phba: pointer to lpfc hba data structure. 2877 * 2878 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2879 * caller of this routine should already hold the host lock. 2880 **/ 2881 void 2882 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2883 { 2884 /* Clear pending FCF rediscovery wait flag */ 2885 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2886 2887 /* Now, try to stop the timer */ 2888 del_timer(&phba->fcf.redisc_wait); 2889 } 2890 2891 /** 2892 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2893 * @phba: pointer to lpfc hba data structure. 2894 * 2895 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2896 * checks whether the FCF rediscovery wait timer is pending with the host 2897 * lock held before proceeding with disabling the timer and clearing the 2898 * wait timer pendig flag. 2899 **/ 2900 void 2901 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2902 { 2903 spin_lock_irq(&phba->hbalock); 2904 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2905 /* FCF rediscovery timer already fired or stopped */ 2906 spin_unlock_irq(&phba->hbalock); 2907 return; 2908 } 2909 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2910 /* Clear failover in progress flags */ 2911 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 2912 spin_unlock_irq(&phba->hbalock); 2913 } 2914 2915 /** 2916 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2917 * @phba: pointer to lpfc hba data structure. 2918 * 2919 * This routine stops all the timers associated with a HBA. This function is 2920 * invoked before either putting a HBA offline or unloading the driver. 2921 **/ 2922 void 2923 lpfc_stop_hba_timers(struct lpfc_hba *phba) 2924 { 2925 if (phba->pport) 2926 lpfc_stop_vport_timers(phba->pport); 2927 cancel_delayed_work_sync(&phba->eq_delay_work); 2928 del_timer_sync(&phba->sli.mbox_tmo); 2929 del_timer_sync(&phba->fabric_block_timer); 2930 del_timer_sync(&phba->eratt_poll); 2931 del_timer_sync(&phba->hb_tmofunc); 2932 if (phba->sli_rev == LPFC_SLI_REV4) { 2933 del_timer_sync(&phba->rrq_tmr); 2934 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 2935 } 2936 phba->hb_outstanding = 0; 2937 2938 switch (phba->pci_dev_grp) { 2939 case LPFC_PCI_DEV_LP: 2940 /* Stop any LightPulse device specific driver timers */ 2941 del_timer_sync(&phba->fcp_poll_timer); 2942 break; 2943 case LPFC_PCI_DEV_OC: 2944 /* Stop any OneConnect device specific driver timers */ 2945 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2946 break; 2947 default: 2948 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2949 "0297 Invalid device group (x%x)\n", 2950 phba->pci_dev_grp); 2951 break; 2952 } 2953 return; 2954 } 2955 2956 /** 2957 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2958 * @phba: pointer to lpfc hba data structure. 2959 * 2960 * This routine marks a HBA's management interface as blocked. Once the HBA's 2961 * management interface is marked as blocked, all the user space access to 2962 * the HBA, whether they are from sysfs interface or libdfc interface will 2963 * all be blocked. The HBA is set to block the management interface when the 2964 * driver prepares the HBA interface for online or offline. 2965 **/ 2966 static void 2967 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) 2968 { 2969 unsigned long iflag; 2970 uint8_t actcmd = MBX_HEARTBEAT; 2971 unsigned long timeout; 2972 2973 spin_lock_irqsave(&phba->hbalock, iflag); 2974 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2975 spin_unlock_irqrestore(&phba->hbalock, iflag); 2976 if (mbx_action == LPFC_MBX_NO_WAIT) 2977 return; 2978 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 2979 spin_lock_irqsave(&phba->hbalock, iflag); 2980 if (phba->sli.mbox_active) { 2981 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 2982 /* Determine how long we might wait for the active mailbox 2983 * command to be gracefully completed by firmware. 2984 */ 2985 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 2986 phba->sli.mbox_active) * 1000) + jiffies; 2987 } 2988 spin_unlock_irqrestore(&phba->hbalock, iflag); 2989 2990 /* Wait for the outstnading mailbox command to complete */ 2991 while (phba->sli.mbox_active) { 2992 /* Check active mailbox complete status every 2ms */ 2993 msleep(2); 2994 if (time_after(jiffies, timeout)) { 2995 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2996 "2813 Mgmt IO is Blocked %x " 2997 "- mbox cmd %x still active\n", 2998 phba->sli.sli_flag, actcmd); 2999 break; 3000 } 3001 } 3002 } 3003 3004 /** 3005 * lpfc_sli4_node_prep - Assign RPIs for active nodes. 3006 * @phba: pointer to lpfc hba data structure. 3007 * 3008 * Allocate RPIs for all active remote nodes. This is needed whenever 3009 * an SLI4 adapter is reset and the driver is not unloading. Its purpose 3010 * is to fixup the temporary rpi assignments. 3011 **/ 3012 void 3013 lpfc_sli4_node_prep(struct lpfc_hba *phba) 3014 { 3015 struct lpfc_nodelist *ndlp, *next_ndlp; 3016 struct lpfc_vport **vports; 3017 int i, rpi; 3018 unsigned long flags; 3019 3020 if (phba->sli_rev != LPFC_SLI_REV4) 3021 return; 3022 3023 vports = lpfc_create_vport_work_array(phba); 3024 if (vports == NULL) 3025 return; 3026 3027 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3028 if (vports[i]->load_flag & FC_UNLOADING) 3029 continue; 3030 3031 list_for_each_entry_safe(ndlp, next_ndlp, 3032 &vports[i]->fc_nodes, 3033 nlp_listp) { 3034 if (!NLP_CHK_NODE_ACT(ndlp)) 3035 continue; 3036 rpi = lpfc_sli4_alloc_rpi(phba); 3037 if (rpi == LPFC_RPI_ALLOC_ERROR) { 3038 spin_lock_irqsave(&phba->ndlp_lock, flags); 3039 NLP_CLR_NODE_ACT(ndlp); 3040 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 3041 continue; 3042 } 3043 ndlp->nlp_rpi = rpi; 3044 lpfc_printf_vlog(ndlp->vport, KERN_INFO, 3045 LOG_NODE | LOG_DISCOVERY, 3046 "0009 Assign RPI x%x to ndlp x%px " 3047 "DID:x%06x flg:x%x map:x%x\n", 3048 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID, 3049 ndlp->nlp_flag, ndlp->nlp_usg_map); 3050 } 3051 } 3052 lpfc_destroy_vport_work_array(phba, vports); 3053 } 3054 3055 /** 3056 * lpfc_create_expedite_pool - create expedite pool 3057 * @phba: pointer to lpfc hba data structure. 3058 * 3059 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0 3060 * to expedite pool. Mark them as expedite. 3061 **/ 3062 static void lpfc_create_expedite_pool(struct lpfc_hba *phba) 3063 { 3064 struct lpfc_sli4_hdw_queue *qp; 3065 struct lpfc_io_buf *lpfc_ncmd; 3066 struct lpfc_io_buf *lpfc_ncmd_next; 3067 struct lpfc_epd_pool *epd_pool; 3068 unsigned long iflag; 3069 3070 epd_pool = &phba->epd_pool; 3071 qp = &phba->sli4_hba.hdwq[0]; 3072 3073 spin_lock_init(&epd_pool->lock); 3074 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3075 spin_lock(&epd_pool->lock); 3076 INIT_LIST_HEAD(&epd_pool->list); 3077 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3078 &qp->lpfc_io_buf_list_put, list) { 3079 list_move_tail(&lpfc_ncmd->list, &epd_pool->list); 3080 lpfc_ncmd->expedite = true; 3081 qp->put_io_bufs--; 3082 epd_pool->count++; 3083 if (epd_pool->count >= XRI_BATCH) 3084 break; 3085 } 3086 spin_unlock(&epd_pool->lock); 3087 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3088 } 3089 3090 /** 3091 * lpfc_destroy_expedite_pool - destroy expedite pool 3092 * @phba: pointer to lpfc hba data structure. 3093 * 3094 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put 3095 * of HWQ 0. Clear the mark. 3096 **/ 3097 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba) 3098 { 3099 struct lpfc_sli4_hdw_queue *qp; 3100 struct lpfc_io_buf *lpfc_ncmd; 3101 struct lpfc_io_buf *lpfc_ncmd_next; 3102 struct lpfc_epd_pool *epd_pool; 3103 unsigned long iflag; 3104 3105 epd_pool = &phba->epd_pool; 3106 qp = &phba->sli4_hba.hdwq[0]; 3107 3108 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3109 spin_lock(&epd_pool->lock); 3110 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3111 &epd_pool->list, list) { 3112 list_move_tail(&lpfc_ncmd->list, 3113 &qp->lpfc_io_buf_list_put); 3114 lpfc_ncmd->flags = false; 3115 qp->put_io_bufs++; 3116 epd_pool->count--; 3117 } 3118 spin_unlock(&epd_pool->lock); 3119 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3120 } 3121 3122 /** 3123 * lpfc_create_multixri_pools - create multi-XRI pools 3124 * @phba: pointer to lpfc hba data structure. 3125 * 3126 * This routine initialize public, private per HWQ. Then, move XRIs from 3127 * lpfc_io_buf_list_put to public pool. High and low watermark are also 3128 * Initialized. 3129 **/ 3130 void lpfc_create_multixri_pools(struct lpfc_hba *phba) 3131 { 3132 u32 i, j; 3133 u32 hwq_count; 3134 u32 count_per_hwq; 3135 struct lpfc_io_buf *lpfc_ncmd; 3136 struct lpfc_io_buf *lpfc_ncmd_next; 3137 unsigned long iflag; 3138 struct lpfc_sli4_hdw_queue *qp; 3139 struct lpfc_multixri_pool *multixri_pool; 3140 struct lpfc_pbl_pool *pbl_pool; 3141 struct lpfc_pvt_pool *pvt_pool; 3142 3143 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3144 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n", 3145 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu, 3146 phba->sli4_hba.io_xri_cnt); 3147 3148 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3149 lpfc_create_expedite_pool(phba); 3150 3151 hwq_count = phba->cfg_hdw_queue; 3152 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count; 3153 3154 for (i = 0; i < hwq_count; i++) { 3155 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL); 3156 3157 if (!multixri_pool) { 3158 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3159 "1238 Failed to allocate memory for " 3160 "multixri_pool\n"); 3161 3162 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3163 lpfc_destroy_expedite_pool(phba); 3164 3165 j = 0; 3166 while (j < i) { 3167 qp = &phba->sli4_hba.hdwq[j]; 3168 kfree(qp->p_multixri_pool); 3169 j++; 3170 } 3171 phba->cfg_xri_rebalancing = 0; 3172 return; 3173 } 3174 3175 qp = &phba->sli4_hba.hdwq[i]; 3176 qp->p_multixri_pool = multixri_pool; 3177 3178 multixri_pool->xri_limit = count_per_hwq; 3179 multixri_pool->rrb_next_hwqid = i; 3180 3181 /* Deal with public free xri pool */ 3182 pbl_pool = &multixri_pool->pbl_pool; 3183 spin_lock_init(&pbl_pool->lock); 3184 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3185 spin_lock(&pbl_pool->lock); 3186 INIT_LIST_HEAD(&pbl_pool->list); 3187 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3188 &qp->lpfc_io_buf_list_put, list) { 3189 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list); 3190 qp->put_io_bufs--; 3191 pbl_pool->count++; 3192 } 3193 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3194 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n", 3195 pbl_pool->count, i); 3196 spin_unlock(&pbl_pool->lock); 3197 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3198 3199 /* Deal with private free xri pool */ 3200 pvt_pool = &multixri_pool->pvt_pool; 3201 pvt_pool->high_watermark = multixri_pool->xri_limit / 2; 3202 pvt_pool->low_watermark = XRI_BATCH; 3203 spin_lock_init(&pvt_pool->lock); 3204 spin_lock_irqsave(&pvt_pool->lock, iflag); 3205 INIT_LIST_HEAD(&pvt_pool->list); 3206 pvt_pool->count = 0; 3207 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 3208 } 3209 } 3210 3211 /** 3212 * lpfc_destroy_multixri_pools - destroy multi-XRI pools 3213 * @phba: pointer to lpfc hba data structure. 3214 * 3215 * This routine returns XRIs from public/private to lpfc_io_buf_list_put. 3216 **/ 3217 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba) 3218 { 3219 u32 i; 3220 u32 hwq_count; 3221 struct lpfc_io_buf *lpfc_ncmd; 3222 struct lpfc_io_buf *lpfc_ncmd_next; 3223 unsigned long iflag; 3224 struct lpfc_sli4_hdw_queue *qp; 3225 struct lpfc_multixri_pool *multixri_pool; 3226 struct lpfc_pbl_pool *pbl_pool; 3227 struct lpfc_pvt_pool *pvt_pool; 3228 3229 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3230 lpfc_destroy_expedite_pool(phba); 3231 3232 if (!(phba->pport->load_flag & FC_UNLOADING)) 3233 lpfc_sli_flush_io_rings(phba); 3234 3235 hwq_count = phba->cfg_hdw_queue; 3236 3237 for (i = 0; i < hwq_count; i++) { 3238 qp = &phba->sli4_hba.hdwq[i]; 3239 multixri_pool = qp->p_multixri_pool; 3240 if (!multixri_pool) 3241 continue; 3242 3243 qp->p_multixri_pool = NULL; 3244 3245 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3246 3247 /* Deal with public free xri pool */ 3248 pbl_pool = &multixri_pool->pbl_pool; 3249 spin_lock(&pbl_pool->lock); 3250 3251 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3252 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n", 3253 pbl_pool->count, i); 3254 3255 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3256 &pbl_pool->list, list) { 3257 list_move_tail(&lpfc_ncmd->list, 3258 &qp->lpfc_io_buf_list_put); 3259 qp->put_io_bufs++; 3260 pbl_pool->count--; 3261 } 3262 3263 INIT_LIST_HEAD(&pbl_pool->list); 3264 pbl_pool->count = 0; 3265 3266 spin_unlock(&pbl_pool->lock); 3267 3268 /* Deal with private free xri pool */ 3269 pvt_pool = &multixri_pool->pvt_pool; 3270 spin_lock(&pvt_pool->lock); 3271 3272 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3273 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n", 3274 pvt_pool->count, i); 3275 3276 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3277 &pvt_pool->list, list) { 3278 list_move_tail(&lpfc_ncmd->list, 3279 &qp->lpfc_io_buf_list_put); 3280 qp->put_io_bufs++; 3281 pvt_pool->count--; 3282 } 3283 3284 INIT_LIST_HEAD(&pvt_pool->list); 3285 pvt_pool->count = 0; 3286 3287 spin_unlock(&pvt_pool->lock); 3288 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3289 3290 kfree(multixri_pool); 3291 } 3292 } 3293 3294 /** 3295 * lpfc_online - Initialize and bring a HBA online 3296 * @phba: pointer to lpfc hba data structure. 3297 * 3298 * This routine initializes the HBA and brings a HBA online. During this 3299 * process, the management interface is blocked to prevent user space access 3300 * to the HBA interfering with the driver initialization. 3301 * 3302 * Return codes 3303 * 0 - successful 3304 * 1 - failed 3305 **/ 3306 int 3307 lpfc_online(struct lpfc_hba *phba) 3308 { 3309 struct lpfc_vport *vport; 3310 struct lpfc_vport **vports; 3311 int i, error = 0; 3312 bool vpis_cleared = false; 3313 3314 if (!phba) 3315 return 0; 3316 vport = phba->pport; 3317 3318 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 3319 return 0; 3320 3321 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3322 "0458 Bring Adapter online\n"); 3323 3324 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 3325 3326 if (phba->sli_rev == LPFC_SLI_REV4) { 3327 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 3328 lpfc_unblock_mgmt_io(phba); 3329 return 1; 3330 } 3331 spin_lock_irq(&phba->hbalock); 3332 if (!phba->sli4_hba.max_cfg_param.vpi_used) 3333 vpis_cleared = true; 3334 spin_unlock_irq(&phba->hbalock); 3335 3336 /* Reestablish the local initiator port. 3337 * The offline process destroyed the previous lport. 3338 */ 3339 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && 3340 !phba->nvmet_support) { 3341 error = lpfc_nvme_create_localport(phba->pport); 3342 if (error) 3343 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3344 "6132 NVME restore reg failed " 3345 "on nvmei error x%x\n", error); 3346 } 3347 } else { 3348 lpfc_sli_queue_init(phba); 3349 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 3350 lpfc_unblock_mgmt_io(phba); 3351 return 1; 3352 } 3353 } 3354 3355 vports = lpfc_create_vport_work_array(phba); 3356 if (vports != NULL) { 3357 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3358 struct Scsi_Host *shost; 3359 shost = lpfc_shost_from_vport(vports[i]); 3360 spin_lock_irq(shost->host_lock); 3361 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 3362 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3363 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3364 if (phba->sli_rev == LPFC_SLI_REV4) { 3365 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 3366 if ((vpis_cleared) && 3367 (vports[i]->port_type != 3368 LPFC_PHYSICAL_PORT)) 3369 vports[i]->vpi = 0; 3370 } 3371 spin_unlock_irq(shost->host_lock); 3372 } 3373 } 3374 lpfc_destroy_vport_work_array(phba, vports); 3375 3376 if (phba->cfg_xri_rebalancing) 3377 lpfc_create_multixri_pools(phba); 3378 3379 lpfc_cpuhp_add(phba); 3380 3381 lpfc_unblock_mgmt_io(phba); 3382 return 0; 3383 } 3384 3385 /** 3386 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 3387 * @phba: pointer to lpfc hba data structure. 3388 * 3389 * This routine marks a HBA's management interface as not blocked. Once the 3390 * HBA's management interface is marked as not blocked, all the user space 3391 * access to the HBA, whether they are from sysfs interface or libdfc 3392 * interface will be allowed. The HBA is set to block the management interface 3393 * when the driver prepares the HBA interface for online or offline and then 3394 * set to unblock the management interface afterwards. 3395 **/ 3396 void 3397 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 3398 { 3399 unsigned long iflag; 3400 3401 spin_lock_irqsave(&phba->hbalock, iflag); 3402 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 3403 spin_unlock_irqrestore(&phba->hbalock, iflag); 3404 } 3405 3406 /** 3407 * lpfc_offline_prep - Prepare a HBA to be brought offline 3408 * @phba: pointer to lpfc hba data structure. 3409 * 3410 * This routine is invoked to prepare a HBA to be brought offline. It performs 3411 * unregistration login to all the nodes on all vports and flushes the mailbox 3412 * queue to make it ready to be brought offline. 3413 **/ 3414 void 3415 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) 3416 { 3417 struct lpfc_vport *vport = phba->pport; 3418 struct lpfc_nodelist *ndlp, *next_ndlp; 3419 struct lpfc_vport **vports; 3420 struct Scsi_Host *shost; 3421 int i; 3422 3423 if (vport->fc_flag & FC_OFFLINE_MODE) 3424 return; 3425 3426 lpfc_block_mgmt_io(phba, mbx_action); 3427 3428 lpfc_linkdown(phba); 3429 3430 /* Issue an unreg_login to all nodes on all vports */ 3431 vports = lpfc_create_vport_work_array(phba); 3432 if (vports != NULL) { 3433 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3434 if (vports[i]->load_flag & FC_UNLOADING) 3435 continue; 3436 shost = lpfc_shost_from_vport(vports[i]); 3437 spin_lock_irq(shost->host_lock); 3438 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 3439 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3440 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 3441 spin_unlock_irq(shost->host_lock); 3442 3443 shost = lpfc_shost_from_vport(vports[i]); 3444 list_for_each_entry_safe(ndlp, next_ndlp, 3445 &vports[i]->fc_nodes, 3446 nlp_listp) { 3447 if ((!NLP_CHK_NODE_ACT(ndlp)) || 3448 ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 3449 /* Driver must assume RPI is invalid for 3450 * any unused or inactive node. 3451 */ 3452 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 3453 continue; 3454 } 3455 3456 if (ndlp->nlp_type & NLP_FABRIC) { 3457 lpfc_disc_state_machine(vports[i], ndlp, 3458 NULL, NLP_EVT_DEVICE_RECOVERY); 3459 lpfc_disc_state_machine(vports[i], ndlp, 3460 NULL, NLP_EVT_DEVICE_RM); 3461 } 3462 spin_lock_irq(shost->host_lock); 3463 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 3464 spin_unlock_irq(shost->host_lock); 3465 /* 3466 * Whenever an SLI4 port goes offline, free the 3467 * RPI. Get a new RPI when the adapter port 3468 * comes back online. 3469 */ 3470 if (phba->sli_rev == LPFC_SLI_REV4) { 3471 lpfc_printf_vlog(ndlp->vport, KERN_INFO, 3472 LOG_NODE | LOG_DISCOVERY, 3473 "0011 Free RPI x%x on " 3474 "ndlp:x%px did x%x " 3475 "usgmap:x%x\n", 3476 ndlp->nlp_rpi, ndlp, 3477 ndlp->nlp_DID, 3478 ndlp->nlp_usg_map); 3479 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 3480 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 3481 } 3482 lpfc_unreg_rpi(vports[i], ndlp); 3483 } 3484 } 3485 } 3486 lpfc_destroy_vport_work_array(phba, vports); 3487 3488 lpfc_sli_mbox_sys_shutdown(phba, mbx_action); 3489 3490 if (phba->wq) 3491 flush_workqueue(phba->wq); 3492 } 3493 3494 /** 3495 * lpfc_offline - Bring a HBA offline 3496 * @phba: pointer to lpfc hba data structure. 3497 * 3498 * This routine actually brings a HBA offline. It stops all the timers 3499 * associated with the HBA, brings down the SLI layer, and eventually 3500 * marks the HBA as in offline state for the upper layer protocol. 3501 **/ 3502 void 3503 lpfc_offline(struct lpfc_hba *phba) 3504 { 3505 struct Scsi_Host *shost; 3506 struct lpfc_vport **vports; 3507 int i; 3508 3509 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 3510 return; 3511 3512 /* stop port and all timers associated with this hba */ 3513 lpfc_stop_port(phba); 3514 3515 /* Tear down the local and target port registrations. The 3516 * nvme transports need to cleanup. 3517 */ 3518 lpfc_nvmet_destroy_targetport(phba); 3519 lpfc_nvme_destroy_localport(phba->pport); 3520 3521 vports = lpfc_create_vport_work_array(phba); 3522 if (vports != NULL) 3523 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3524 lpfc_stop_vport_timers(vports[i]); 3525 lpfc_destroy_vport_work_array(phba, vports); 3526 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3527 "0460 Bring Adapter offline\n"); 3528 /* Bring down the SLI Layer and cleanup. The HBA is offline 3529 now. */ 3530 lpfc_sli_hba_down(phba); 3531 spin_lock_irq(&phba->hbalock); 3532 phba->work_ha = 0; 3533 spin_unlock_irq(&phba->hbalock); 3534 vports = lpfc_create_vport_work_array(phba); 3535 if (vports != NULL) 3536 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3537 shost = lpfc_shost_from_vport(vports[i]); 3538 spin_lock_irq(shost->host_lock); 3539 vports[i]->work_port_events = 0; 3540 vports[i]->fc_flag |= FC_OFFLINE_MODE; 3541 spin_unlock_irq(shost->host_lock); 3542 } 3543 lpfc_destroy_vport_work_array(phba, vports); 3544 __lpfc_cpuhp_remove(phba); 3545 3546 if (phba->cfg_xri_rebalancing) 3547 lpfc_destroy_multixri_pools(phba); 3548 } 3549 3550 /** 3551 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 3552 * @phba: pointer to lpfc hba data structure. 3553 * 3554 * This routine is to free all the SCSI buffers and IOCBs from the driver 3555 * list back to kernel. It is called from lpfc_pci_remove_one to free 3556 * the internal resources before the device is removed from the system. 3557 **/ 3558 static void 3559 lpfc_scsi_free(struct lpfc_hba *phba) 3560 { 3561 struct lpfc_io_buf *sb, *sb_next; 3562 3563 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 3564 return; 3565 3566 spin_lock_irq(&phba->hbalock); 3567 3568 /* Release all the lpfc_scsi_bufs maintained by this host. */ 3569 3570 spin_lock(&phba->scsi_buf_list_put_lock); 3571 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, 3572 list) { 3573 list_del(&sb->list); 3574 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3575 sb->dma_handle); 3576 kfree(sb); 3577 phba->total_scsi_bufs--; 3578 } 3579 spin_unlock(&phba->scsi_buf_list_put_lock); 3580 3581 spin_lock(&phba->scsi_buf_list_get_lock); 3582 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, 3583 list) { 3584 list_del(&sb->list); 3585 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3586 sb->dma_handle); 3587 kfree(sb); 3588 phba->total_scsi_bufs--; 3589 } 3590 spin_unlock(&phba->scsi_buf_list_get_lock); 3591 spin_unlock_irq(&phba->hbalock); 3592 } 3593 3594 /** 3595 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists 3596 * @phba: pointer to lpfc hba data structure. 3597 * 3598 * This routine is to free all the IO buffers and IOCBs from the driver 3599 * list back to kernel. It is called from lpfc_pci_remove_one to free 3600 * the internal resources before the device is removed from the system. 3601 **/ 3602 void 3603 lpfc_io_free(struct lpfc_hba *phba) 3604 { 3605 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; 3606 struct lpfc_sli4_hdw_queue *qp; 3607 int idx; 3608 3609 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 3610 qp = &phba->sli4_hba.hdwq[idx]; 3611 /* Release all the lpfc_nvme_bufs maintained by this host. */ 3612 spin_lock(&qp->io_buf_list_put_lock); 3613 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3614 &qp->lpfc_io_buf_list_put, 3615 list) { 3616 list_del(&lpfc_ncmd->list); 3617 qp->put_io_bufs--; 3618 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3619 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 3620 if (phba->cfg_xpsgl && !phba->nvmet_support) 3621 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); 3622 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); 3623 kfree(lpfc_ncmd); 3624 qp->total_io_bufs--; 3625 } 3626 spin_unlock(&qp->io_buf_list_put_lock); 3627 3628 spin_lock(&qp->io_buf_list_get_lock); 3629 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3630 &qp->lpfc_io_buf_list_get, 3631 list) { 3632 list_del(&lpfc_ncmd->list); 3633 qp->get_io_bufs--; 3634 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3635 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 3636 if (phba->cfg_xpsgl && !phba->nvmet_support) 3637 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); 3638 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); 3639 kfree(lpfc_ncmd); 3640 qp->total_io_bufs--; 3641 } 3642 spin_unlock(&qp->io_buf_list_get_lock); 3643 } 3644 } 3645 3646 /** 3647 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping 3648 * @phba: pointer to lpfc hba data structure. 3649 * 3650 * This routine first calculates the sizes of the current els and allocated 3651 * scsi sgl lists, and then goes through all sgls to updates the physical 3652 * XRIs assigned due to port function reset. During port initialization, the 3653 * current els and allocated scsi sgl lists are 0s. 3654 * 3655 * Return codes 3656 * 0 - successful (for now, it always returns 0) 3657 **/ 3658 int 3659 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba) 3660 { 3661 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3662 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3663 LIST_HEAD(els_sgl_list); 3664 int rc; 3665 3666 /* 3667 * update on pci function's els xri-sgl list 3668 */ 3669 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3670 3671 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { 3672 /* els xri-sgl expanded */ 3673 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; 3674 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3675 "3157 ELS xri-sgl count increased from " 3676 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3677 els_xri_cnt); 3678 /* allocate the additional els sgls */ 3679 for (i = 0; i < xri_cnt; i++) { 3680 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3681 GFP_KERNEL); 3682 if (sglq_entry == NULL) { 3683 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3684 "2562 Failure to allocate an " 3685 "ELS sgl entry:%d\n", i); 3686 rc = -ENOMEM; 3687 goto out_free_mem; 3688 } 3689 sglq_entry->buff_type = GEN_BUFF_TYPE; 3690 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, 3691 &sglq_entry->phys); 3692 if (sglq_entry->virt == NULL) { 3693 kfree(sglq_entry); 3694 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3695 "2563 Failure to allocate an " 3696 "ELS mbuf:%d\n", i); 3697 rc = -ENOMEM; 3698 goto out_free_mem; 3699 } 3700 sglq_entry->sgl = sglq_entry->virt; 3701 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 3702 sglq_entry->state = SGL_FREED; 3703 list_add_tail(&sglq_entry->list, &els_sgl_list); 3704 } 3705 spin_lock_irq(&phba->hbalock); 3706 spin_lock(&phba->sli4_hba.sgl_list_lock); 3707 list_splice_init(&els_sgl_list, 3708 &phba->sli4_hba.lpfc_els_sgl_list); 3709 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3710 spin_unlock_irq(&phba->hbalock); 3711 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { 3712 /* els xri-sgl shrinked */ 3713 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; 3714 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3715 "3158 ELS xri-sgl count decreased from " 3716 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3717 els_xri_cnt); 3718 spin_lock_irq(&phba->hbalock); 3719 spin_lock(&phba->sli4_hba.sgl_list_lock); 3720 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, 3721 &els_sgl_list); 3722 /* release extra els sgls from list */ 3723 for (i = 0; i < xri_cnt; i++) { 3724 list_remove_head(&els_sgl_list, 3725 sglq_entry, struct lpfc_sglq, list); 3726 if (sglq_entry) { 3727 __lpfc_mbuf_free(phba, sglq_entry->virt, 3728 sglq_entry->phys); 3729 kfree(sglq_entry); 3730 } 3731 } 3732 list_splice_init(&els_sgl_list, 3733 &phba->sli4_hba.lpfc_els_sgl_list); 3734 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3735 spin_unlock_irq(&phba->hbalock); 3736 } else 3737 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3738 "3163 ELS xri-sgl count unchanged: %d\n", 3739 els_xri_cnt); 3740 phba->sli4_hba.els_xri_cnt = els_xri_cnt; 3741 3742 /* update xris to els sgls on the list */ 3743 sglq_entry = NULL; 3744 sglq_entry_next = NULL; 3745 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3746 &phba->sli4_hba.lpfc_els_sgl_list, list) { 3747 lxri = lpfc_sli4_next_xritag(phba); 3748 if (lxri == NO_XRI) { 3749 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3750 "2400 Failed to allocate xri for " 3751 "ELS sgl\n"); 3752 rc = -ENOMEM; 3753 goto out_free_mem; 3754 } 3755 sglq_entry->sli4_lxritag = lxri; 3756 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3757 } 3758 return 0; 3759 3760 out_free_mem: 3761 lpfc_free_els_sgl_list(phba); 3762 return rc; 3763 } 3764 3765 /** 3766 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping 3767 * @phba: pointer to lpfc hba data structure. 3768 * 3769 * This routine first calculates the sizes of the current els and allocated 3770 * scsi sgl lists, and then goes through all sgls to updates the physical 3771 * XRIs assigned due to port function reset. During port initialization, the 3772 * current els and allocated scsi sgl lists are 0s. 3773 * 3774 * Return codes 3775 * 0 - successful (for now, it always returns 0) 3776 **/ 3777 int 3778 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) 3779 { 3780 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3781 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3782 uint16_t nvmet_xri_cnt; 3783 LIST_HEAD(nvmet_sgl_list); 3784 int rc; 3785 3786 /* 3787 * update on pci function's nvmet xri-sgl list 3788 */ 3789 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3790 3791 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */ 3792 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 3793 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { 3794 /* els xri-sgl expanded */ 3795 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt; 3796 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3797 "6302 NVMET xri-sgl cnt grew from %d to %d\n", 3798 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt); 3799 /* allocate the additional nvmet sgls */ 3800 for (i = 0; i < xri_cnt; i++) { 3801 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3802 GFP_KERNEL); 3803 if (sglq_entry == NULL) { 3804 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3805 "6303 Failure to allocate an " 3806 "NVMET sgl entry:%d\n", i); 3807 rc = -ENOMEM; 3808 goto out_free_mem; 3809 } 3810 sglq_entry->buff_type = NVMET_BUFF_TYPE; 3811 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0, 3812 &sglq_entry->phys); 3813 if (sglq_entry->virt == NULL) { 3814 kfree(sglq_entry); 3815 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3816 "6304 Failure to allocate an " 3817 "NVMET buf:%d\n", i); 3818 rc = -ENOMEM; 3819 goto out_free_mem; 3820 } 3821 sglq_entry->sgl = sglq_entry->virt; 3822 memset(sglq_entry->sgl, 0, 3823 phba->cfg_sg_dma_buf_size); 3824 sglq_entry->state = SGL_FREED; 3825 list_add_tail(&sglq_entry->list, &nvmet_sgl_list); 3826 } 3827 spin_lock_irq(&phba->hbalock); 3828 spin_lock(&phba->sli4_hba.sgl_list_lock); 3829 list_splice_init(&nvmet_sgl_list, 3830 &phba->sli4_hba.lpfc_nvmet_sgl_list); 3831 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3832 spin_unlock_irq(&phba->hbalock); 3833 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) { 3834 /* nvmet xri-sgl shrunk */ 3835 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt; 3836 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3837 "6305 NVMET xri-sgl count decreased from " 3838 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt, 3839 nvmet_xri_cnt); 3840 spin_lock_irq(&phba->hbalock); 3841 spin_lock(&phba->sli4_hba.sgl_list_lock); 3842 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, 3843 &nvmet_sgl_list); 3844 /* release extra nvmet sgls from list */ 3845 for (i = 0; i < xri_cnt; i++) { 3846 list_remove_head(&nvmet_sgl_list, 3847 sglq_entry, struct lpfc_sglq, list); 3848 if (sglq_entry) { 3849 lpfc_nvmet_buf_free(phba, sglq_entry->virt, 3850 sglq_entry->phys); 3851 kfree(sglq_entry); 3852 } 3853 } 3854 list_splice_init(&nvmet_sgl_list, 3855 &phba->sli4_hba.lpfc_nvmet_sgl_list); 3856 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3857 spin_unlock_irq(&phba->hbalock); 3858 } else 3859 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3860 "6306 NVMET xri-sgl count unchanged: %d\n", 3861 nvmet_xri_cnt); 3862 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt; 3863 3864 /* update xris to nvmet sgls on the list */ 3865 sglq_entry = NULL; 3866 sglq_entry_next = NULL; 3867 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3868 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) { 3869 lxri = lpfc_sli4_next_xritag(phba); 3870 if (lxri == NO_XRI) { 3871 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3872 "6307 Failed to allocate xri for " 3873 "NVMET sgl\n"); 3874 rc = -ENOMEM; 3875 goto out_free_mem; 3876 } 3877 sglq_entry->sli4_lxritag = lxri; 3878 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3879 } 3880 return 0; 3881 3882 out_free_mem: 3883 lpfc_free_nvmet_sgl_list(phba); 3884 return rc; 3885 } 3886 3887 int 3888 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf) 3889 { 3890 LIST_HEAD(blist); 3891 struct lpfc_sli4_hdw_queue *qp; 3892 struct lpfc_io_buf *lpfc_cmd; 3893 struct lpfc_io_buf *iobufp, *prev_iobufp; 3894 int idx, cnt, xri, inserted; 3895 3896 cnt = 0; 3897 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 3898 qp = &phba->sli4_hba.hdwq[idx]; 3899 spin_lock_irq(&qp->io_buf_list_get_lock); 3900 spin_lock(&qp->io_buf_list_put_lock); 3901 3902 /* Take everything off the get and put lists */ 3903 list_splice_init(&qp->lpfc_io_buf_list_get, &blist); 3904 list_splice(&qp->lpfc_io_buf_list_put, &blist); 3905 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 3906 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 3907 cnt += qp->get_io_bufs + qp->put_io_bufs; 3908 qp->get_io_bufs = 0; 3909 qp->put_io_bufs = 0; 3910 qp->total_io_bufs = 0; 3911 spin_unlock(&qp->io_buf_list_put_lock); 3912 spin_unlock_irq(&qp->io_buf_list_get_lock); 3913 } 3914 3915 /* 3916 * Take IO buffers off blist and put on cbuf sorted by XRI. 3917 * This is because POST_SGL takes a sequential range of XRIs 3918 * to post to the firmware. 3919 */ 3920 for (idx = 0; idx < cnt; idx++) { 3921 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list); 3922 if (!lpfc_cmd) 3923 return cnt; 3924 if (idx == 0) { 3925 list_add_tail(&lpfc_cmd->list, cbuf); 3926 continue; 3927 } 3928 xri = lpfc_cmd->cur_iocbq.sli4_xritag; 3929 inserted = 0; 3930 prev_iobufp = NULL; 3931 list_for_each_entry(iobufp, cbuf, list) { 3932 if (xri < iobufp->cur_iocbq.sli4_xritag) { 3933 if (prev_iobufp) 3934 list_add(&lpfc_cmd->list, 3935 &prev_iobufp->list); 3936 else 3937 list_add(&lpfc_cmd->list, cbuf); 3938 inserted = 1; 3939 break; 3940 } 3941 prev_iobufp = iobufp; 3942 } 3943 if (!inserted) 3944 list_add_tail(&lpfc_cmd->list, cbuf); 3945 } 3946 return cnt; 3947 } 3948 3949 int 3950 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf) 3951 { 3952 struct lpfc_sli4_hdw_queue *qp; 3953 struct lpfc_io_buf *lpfc_cmd; 3954 int idx, cnt; 3955 3956 qp = phba->sli4_hba.hdwq; 3957 cnt = 0; 3958 while (!list_empty(cbuf)) { 3959 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 3960 list_remove_head(cbuf, lpfc_cmd, 3961 struct lpfc_io_buf, list); 3962 if (!lpfc_cmd) 3963 return cnt; 3964 cnt++; 3965 qp = &phba->sli4_hba.hdwq[idx]; 3966 lpfc_cmd->hdwq_no = idx; 3967 lpfc_cmd->hdwq = qp; 3968 lpfc_cmd->cur_iocbq.wqe_cmpl = NULL; 3969 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL; 3970 spin_lock(&qp->io_buf_list_put_lock); 3971 list_add_tail(&lpfc_cmd->list, 3972 &qp->lpfc_io_buf_list_put); 3973 qp->put_io_bufs++; 3974 qp->total_io_bufs++; 3975 spin_unlock(&qp->io_buf_list_put_lock); 3976 } 3977 } 3978 return cnt; 3979 } 3980 3981 /** 3982 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping 3983 * @phba: pointer to lpfc hba data structure. 3984 * 3985 * This routine first calculates the sizes of the current els and allocated 3986 * scsi sgl lists, and then goes through all sgls to updates the physical 3987 * XRIs assigned due to port function reset. During port initialization, the 3988 * current els and allocated scsi sgl lists are 0s. 3989 * 3990 * Return codes 3991 * 0 - successful (for now, it always returns 0) 3992 **/ 3993 int 3994 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba) 3995 { 3996 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL; 3997 uint16_t i, lxri, els_xri_cnt; 3998 uint16_t io_xri_cnt, io_xri_max; 3999 LIST_HEAD(io_sgl_list); 4000 int rc, cnt; 4001 4002 /* 4003 * update on pci function's allocated nvme xri-sgl list 4004 */ 4005 4006 /* maximum number of xris available for nvme buffers */ 4007 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4008 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4009 phba->sli4_hba.io_xri_max = io_xri_max; 4010 4011 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4012 "6074 Current allocated XRI sgl count:%d, " 4013 "maximum XRI count:%d\n", 4014 phba->sli4_hba.io_xri_cnt, 4015 phba->sli4_hba.io_xri_max); 4016 4017 cnt = lpfc_io_buf_flush(phba, &io_sgl_list); 4018 4019 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) { 4020 /* max nvme xri shrunk below the allocated nvme buffers */ 4021 io_xri_cnt = phba->sli4_hba.io_xri_cnt - 4022 phba->sli4_hba.io_xri_max; 4023 /* release the extra allocated nvme buffers */ 4024 for (i = 0; i < io_xri_cnt; i++) { 4025 list_remove_head(&io_sgl_list, lpfc_ncmd, 4026 struct lpfc_io_buf, list); 4027 if (lpfc_ncmd) { 4028 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4029 lpfc_ncmd->data, 4030 lpfc_ncmd->dma_handle); 4031 kfree(lpfc_ncmd); 4032 } 4033 } 4034 phba->sli4_hba.io_xri_cnt -= io_xri_cnt; 4035 } 4036 4037 /* update xris associated to remaining allocated nvme buffers */ 4038 lpfc_ncmd = NULL; 4039 lpfc_ncmd_next = NULL; 4040 phba->sli4_hba.io_xri_cnt = cnt; 4041 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 4042 &io_sgl_list, list) { 4043 lxri = lpfc_sli4_next_xritag(phba); 4044 if (lxri == NO_XRI) { 4045 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4046 "6075 Failed to allocate xri for " 4047 "nvme buffer\n"); 4048 rc = -ENOMEM; 4049 goto out_free_mem; 4050 } 4051 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; 4052 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4053 } 4054 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list); 4055 return 0; 4056 4057 out_free_mem: 4058 lpfc_io_free(phba); 4059 return rc; 4060 } 4061 4062 /** 4063 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec 4064 * @vport: The virtual port for which this call being executed. 4065 * @num_to_allocate: The requested number of buffers to allocate. 4066 * 4067 * This routine allocates nvme buffers for device with SLI-4 interface spec, 4068 * the nvme buffer contains all the necessary information needed to initiate 4069 * an I/O. After allocating up to @num_to_allocate IO buffers and put 4070 * them on a list, it post them to the port by using SGL block post. 4071 * 4072 * Return codes: 4073 * int - number of IO buffers that were allocated and posted. 4074 * 0 = failure, less than num_to_alloc is a partial failure. 4075 **/ 4076 int 4077 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc) 4078 { 4079 struct lpfc_io_buf *lpfc_ncmd; 4080 struct lpfc_iocbq *pwqeq; 4081 uint16_t iotag, lxri = 0; 4082 int bcnt, num_posted; 4083 LIST_HEAD(prep_nblist); 4084 LIST_HEAD(post_nblist); 4085 LIST_HEAD(nvme_nblist); 4086 4087 phba->sli4_hba.io_xri_cnt = 0; 4088 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 4089 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL); 4090 if (!lpfc_ncmd) 4091 break; 4092 /* 4093 * Get memory from the pci pool to map the virt space to 4094 * pci bus space for an I/O. The DMA buffer includes the 4095 * number of SGE's necessary to support the sg_tablesize. 4096 */ 4097 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, 4098 GFP_KERNEL, 4099 &lpfc_ncmd->dma_handle); 4100 if (!lpfc_ncmd->data) { 4101 kfree(lpfc_ncmd); 4102 break; 4103 } 4104 4105 if (phba->cfg_xpsgl && !phba->nvmet_support) { 4106 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list); 4107 } else { 4108 /* 4109 * 4K Page alignment is CRITICAL to BlockGuard, double 4110 * check to be sure. 4111 */ 4112 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 4113 (((unsigned long)(lpfc_ncmd->data) & 4114 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { 4115 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 4116 "3369 Memory alignment err: " 4117 "addr=%lx\n", 4118 (unsigned long)lpfc_ncmd->data); 4119 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4120 lpfc_ncmd->data, 4121 lpfc_ncmd->dma_handle); 4122 kfree(lpfc_ncmd); 4123 break; 4124 } 4125 } 4126 4127 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list); 4128 4129 lxri = lpfc_sli4_next_xritag(phba); 4130 if (lxri == NO_XRI) { 4131 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4132 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4133 kfree(lpfc_ncmd); 4134 break; 4135 } 4136 pwqeq = &lpfc_ncmd->cur_iocbq; 4137 4138 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */ 4139 iotag = lpfc_sli_next_iotag(phba, pwqeq); 4140 if (iotag == 0) { 4141 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4142 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4143 kfree(lpfc_ncmd); 4144 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 4145 "6121 Failed to allocate IOTAG for" 4146 " XRI:0x%x\n", lxri); 4147 lpfc_sli4_free_xri(phba, lxri); 4148 break; 4149 } 4150 pwqeq->sli4_lxritag = lxri; 4151 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4152 pwqeq->context1 = lpfc_ncmd; 4153 4154 /* Initialize local short-hand pointers. */ 4155 lpfc_ncmd->dma_sgl = lpfc_ncmd->data; 4156 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle; 4157 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd; 4158 spin_lock_init(&lpfc_ncmd->buf_lock); 4159 4160 /* add the nvme buffer to a post list */ 4161 list_add_tail(&lpfc_ncmd->list, &post_nblist); 4162 phba->sli4_hba.io_xri_cnt++; 4163 } 4164 lpfc_printf_log(phba, KERN_INFO, LOG_NVME, 4165 "6114 Allocate %d out of %d requested new NVME " 4166 "buffers\n", bcnt, num_to_alloc); 4167 4168 /* post the list of nvme buffer sgls to port if available */ 4169 if (!list_empty(&post_nblist)) 4170 num_posted = lpfc_sli4_post_io_sgl_list( 4171 phba, &post_nblist, bcnt); 4172 else 4173 num_posted = 0; 4174 4175 return num_posted; 4176 } 4177 4178 static uint64_t 4179 lpfc_get_wwpn(struct lpfc_hba *phba) 4180 { 4181 uint64_t wwn; 4182 int rc; 4183 LPFC_MBOXQ_t *mboxq; 4184 MAILBOX_t *mb; 4185 4186 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 4187 GFP_KERNEL); 4188 if (!mboxq) 4189 return (uint64_t)-1; 4190 4191 /* First get WWN of HBA instance */ 4192 lpfc_read_nv(phba, mboxq); 4193 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4194 if (rc != MBX_SUCCESS) { 4195 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4196 "6019 Mailbox failed , mbxCmd x%x " 4197 "READ_NV, mbxStatus x%x\n", 4198 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 4199 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 4200 mempool_free(mboxq, phba->mbox_mem_pool); 4201 return (uint64_t) -1; 4202 } 4203 mb = &mboxq->u.mb; 4204 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t)); 4205 /* wwn is WWPN of HBA instance */ 4206 mempool_free(mboxq, phba->mbox_mem_pool); 4207 if (phba->sli_rev == LPFC_SLI_REV4) 4208 return be64_to_cpu(wwn); 4209 else 4210 return rol64(wwn, 32); 4211 } 4212 4213 /** 4214 * lpfc_create_port - Create an FC port 4215 * @phba: pointer to lpfc hba data structure. 4216 * @instance: a unique integer ID to this FC port. 4217 * @dev: pointer to the device data structure. 4218 * 4219 * This routine creates a FC port for the upper layer protocol. The FC port 4220 * can be created on top of either a physical port or a virtual port provided 4221 * by the HBA. This routine also allocates a SCSI host data structure (shost) 4222 * and associates the FC port created before adding the shost into the SCSI 4223 * layer. 4224 * 4225 * Return codes 4226 * @vport - pointer to the virtual N_Port data structure. 4227 * NULL - port create failed. 4228 **/ 4229 struct lpfc_vport * 4230 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 4231 { 4232 struct lpfc_vport *vport; 4233 struct Scsi_Host *shost = NULL; 4234 int error = 0; 4235 int i; 4236 uint64_t wwn; 4237 bool use_no_reset_hba = false; 4238 int rc; 4239 4240 if (lpfc_no_hba_reset_cnt) { 4241 if (phba->sli_rev < LPFC_SLI_REV4 && 4242 dev == &phba->pcidev->dev) { 4243 /* Reset the port first */ 4244 lpfc_sli_brdrestart(phba); 4245 rc = lpfc_sli_chipset_init(phba); 4246 if (rc) 4247 return NULL; 4248 } 4249 wwn = lpfc_get_wwpn(phba); 4250 } 4251 4252 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) { 4253 if (wwn == lpfc_no_hba_reset[i]) { 4254 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4255 "6020 Setting use_no_reset port=%llx\n", 4256 wwn); 4257 use_no_reset_hba = true; 4258 break; 4259 } 4260 } 4261 4262 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 4263 if (dev != &phba->pcidev->dev) { 4264 shost = scsi_host_alloc(&lpfc_vport_template, 4265 sizeof(struct lpfc_vport)); 4266 } else { 4267 if (!use_no_reset_hba) 4268 shost = scsi_host_alloc(&lpfc_template, 4269 sizeof(struct lpfc_vport)); 4270 else 4271 shost = scsi_host_alloc(&lpfc_template_no_hr, 4272 sizeof(struct lpfc_vport)); 4273 } 4274 } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 4275 shost = scsi_host_alloc(&lpfc_template_nvme, 4276 sizeof(struct lpfc_vport)); 4277 } 4278 if (!shost) 4279 goto out; 4280 4281 vport = (struct lpfc_vport *) shost->hostdata; 4282 vport->phba = phba; 4283 vport->load_flag |= FC_LOADING; 4284 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 4285 vport->fc_rscn_flush = 0; 4286 lpfc_get_vport_cfgparam(vport); 4287 4288 /* Adjust value in vport */ 4289 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type; 4290 4291 shost->unique_id = instance; 4292 shost->max_id = LPFC_MAX_TARGET; 4293 shost->max_lun = vport->cfg_max_luns; 4294 shost->this_id = -1; 4295 shost->max_cmd_len = 16; 4296 4297 if (phba->sli_rev == LPFC_SLI_REV4) { 4298 if (!phba->cfg_fcp_mq_threshold || 4299 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue) 4300 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue; 4301 4302 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(), 4303 phba->cfg_fcp_mq_threshold); 4304 4305 shost->dma_boundary = 4306 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 4307 4308 if (phba->cfg_xpsgl && !phba->nvmet_support) 4309 shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE; 4310 else 4311 shost->sg_tablesize = phba->cfg_scsi_seg_cnt; 4312 } else 4313 /* SLI-3 has a limited number of hardware queues (3), 4314 * thus there is only one for FCP processing. 4315 */ 4316 shost->nr_hw_queues = 1; 4317 4318 /* 4319 * Set initial can_queue value since 0 is no longer supported and 4320 * scsi_add_host will fail. This will be adjusted later based on the 4321 * max xri value determined in hba setup. 4322 */ 4323 shost->can_queue = phba->cfg_hba_queue_depth - 10; 4324 if (dev != &phba->pcidev->dev) { 4325 shost->transportt = lpfc_vport_transport_template; 4326 vport->port_type = LPFC_NPIV_PORT; 4327 } else { 4328 shost->transportt = lpfc_transport_template; 4329 vport->port_type = LPFC_PHYSICAL_PORT; 4330 } 4331 4332 /* Initialize all internally managed lists. */ 4333 INIT_LIST_HEAD(&vport->fc_nodes); 4334 INIT_LIST_HEAD(&vport->rcv_buffer_list); 4335 spin_lock_init(&vport->work_port_lock); 4336 4337 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0); 4338 4339 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0); 4340 4341 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0); 4342 4343 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 4344 lpfc_setup_bg(phba, shost); 4345 4346 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 4347 if (error) 4348 goto out_put_shost; 4349 4350 spin_lock_irq(&phba->port_list_lock); 4351 list_add_tail(&vport->listentry, &phba->port_list); 4352 spin_unlock_irq(&phba->port_list_lock); 4353 return vport; 4354 4355 out_put_shost: 4356 scsi_host_put(shost); 4357 out: 4358 return NULL; 4359 } 4360 4361 /** 4362 * destroy_port - destroy an FC port 4363 * @vport: pointer to an lpfc virtual N_Port data structure. 4364 * 4365 * This routine destroys a FC port from the upper layer protocol. All the 4366 * resources associated with the port are released. 4367 **/ 4368 void 4369 destroy_port(struct lpfc_vport *vport) 4370 { 4371 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4372 struct lpfc_hba *phba = vport->phba; 4373 4374 lpfc_debugfs_terminate(vport); 4375 fc_remove_host(shost); 4376 scsi_remove_host(shost); 4377 4378 spin_lock_irq(&phba->port_list_lock); 4379 list_del_init(&vport->listentry); 4380 spin_unlock_irq(&phba->port_list_lock); 4381 4382 lpfc_cleanup(vport); 4383 return; 4384 } 4385 4386 /** 4387 * lpfc_get_instance - Get a unique integer ID 4388 * 4389 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 4390 * uses the kernel idr facility to perform the task. 4391 * 4392 * Return codes: 4393 * instance - a unique integer ID allocated as the new instance. 4394 * -1 - lpfc get instance failed. 4395 **/ 4396 int 4397 lpfc_get_instance(void) 4398 { 4399 int ret; 4400 4401 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL); 4402 return ret < 0 ? -1 : ret; 4403 } 4404 4405 /** 4406 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 4407 * @shost: pointer to SCSI host data structure. 4408 * @time: elapsed time of the scan in jiffies. 4409 * 4410 * This routine is called by the SCSI layer with a SCSI host to determine 4411 * whether the scan host is finished. 4412 * 4413 * Note: there is no scan_start function as adapter initialization will have 4414 * asynchronously kicked off the link initialization. 4415 * 4416 * Return codes 4417 * 0 - SCSI host scan is not over yet. 4418 * 1 - SCSI host scan is over. 4419 **/ 4420 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 4421 { 4422 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4423 struct lpfc_hba *phba = vport->phba; 4424 int stat = 0; 4425 4426 spin_lock_irq(shost->host_lock); 4427 4428 if (vport->load_flag & FC_UNLOADING) { 4429 stat = 1; 4430 goto finished; 4431 } 4432 if (time >= msecs_to_jiffies(30 * 1000)) { 4433 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4434 "0461 Scanning longer than 30 " 4435 "seconds. Continuing initialization\n"); 4436 stat = 1; 4437 goto finished; 4438 } 4439 if (time >= msecs_to_jiffies(15 * 1000) && 4440 phba->link_state <= LPFC_LINK_DOWN) { 4441 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4442 "0465 Link down longer than 15 " 4443 "seconds. Continuing initialization\n"); 4444 stat = 1; 4445 goto finished; 4446 } 4447 4448 if (vport->port_state != LPFC_VPORT_READY) 4449 goto finished; 4450 if (vport->num_disc_nodes || vport->fc_prli_sent) 4451 goto finished; 4452 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) 4453 goto finished; 4454 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 4455 goto finished; 4456 4457 stat = 1; 4458 4459 finished: 4460 spin_unlock_irq(shost->host_lock); 4461 return stat; 4462 } 4463 4464 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost) 4465 { 4466 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4467 struct lpfc_hba *phba = vport->phba; 4468 4469 fc_host_supported_speeds(shost) = 0; 4470 if (phba->lmt & LMT_128Gb) 4471 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT; 4472 if (phba->lmt & LMT_64Gb) 4473 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT; 4474 if (phba->lmt & LMT_32Gb) 4475 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT; 4476 if (phba->lmt & LMT_16Gb) 4477 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; 4478 if (phba->lmt & LMT_10Gb) 4479 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 4480 if (phba->lmt & LMT_8Gb) 4481 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 4482 if (phba->lmt & LMT_4Gb) 4483 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 4484 if (phba->lmt & LMT_2Gb) 4485 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 4486 if (phba->lmt & LMT_1Gb) 4487 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 4488 } 4489 4490 /** 4491 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 4492 * @shost: pointer to SCSI host data structure. 4493 * 4494 * This routine initializes a given SCSI host attributes on a FC port. The 4495 * SCSI host can be either on top of a physical port or a virtual port. 4496 **/ 4497 void lpfc_host_attrib_init(struct Scsi_Host *shost) 4498 { 4499 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4500 struct lpfc_hba *phba = vport->phba; 4501 /* 4502 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 4503 */ 4504 4505 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 4506 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 4507 fc_host_supported_classes(shost) = FC_COS_CLASS3; 4508 4509 memset(fc_host_supported_fc4s(shost), 0, 4510 sizeof(fc_host_supported_fc4s(shost))); 4511 fc_host_supported_fc4s(shost)[2] = 1; 4512 fc_host_supported_fc4s(shost)[7] = 1; 4513 4514 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 4515 sizeof fc_host_symbolic_name(shost)); 4516 4517 lpfc_host_supported_speeds_set(shost); 4518 4519 fc_host_maxframe_size(shost) = 4520 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 4521 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 4522 4523 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 4524 4525 /* This value is also unchanging */ 4526 memset(fc_host_active_fc4s(shost), 0, 4527 sizeof(fc_host_active_fc4s(shost))); 4528 fc_host_active_fc4s(shost)[2] = 1; 4529 fc_host_active_fc4s(shost)[7] = 1; 4530 4531 fc_host_max_npiv_vports(shost) = phba->max_vpi; 4532 spin_lock_irq(shost->host_lock); 4533 vport->load_flag &= ~FC_LOADING; 4534 spin_unlock_irq(shost->host_lock); 4535 } 4536 4537 /** 4538 * lpfc_stop_port_s3 - Stop SLI3 device port 4539 * @phba: pointer to lpfc hba data structure. 4540 * 4541 * This routine is invoked to stop an SLI3 device port, it stops the device 4542 * from generating interrupts and stops the device driver's timers for the 4543 * device. 4544 **/ 4545 static void 4546 lpfc_stop_port_s3(struct lpfc_hba *phba) 4547 { 4548 /* Clear all interrupt enable conditions */ 4549 writel(0, phba->HCregaddr); 4550 readl(phba->HCregaddr); /* flush */ 4551 /* Clear all pending interrupts */ 4552 writel(0xffffffff, phba->HAregaddr); 4553 readl(phba->HAregaddr); /* flush */ 4554 4555 /* Reset some HBA SLI setup states */ 4556 lpfc_stop_hba_timers(phba); 4557 phba->pport->work_port_events = 0; 4558 } 4559 4560 /** 4561 * lpfc_stop_port_s4 - Stop SLI4 device port 4562 * @phba: pointer to lpfc hba data structure. 4563 * 4564 * This routine is invoked to stop an SLI4 device port, it stops the device 4565 * from generating interrupts and stops the device driver's timers for the 4566 * device. 4567 **/ 4568 static void 4569 lpfc_stop_port_s4(struct lpfc_hba *phba) 4570 { 4571 /* Reset some HBA SLI4 setup states */ 4572 lpfc_stop_hba_timers(phba); 4573 if (phba->pport) 4574 phba->pport->work_port_events = 0; 4575 phba->sli4_hba.intr_enable = 0; 4576 } 4577 4578 /** 4579 * lpfc_stop_port - Wrapper function for stopping hba port 4580 * @phba: Pointer to HBA context object. 4581 * 4582 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 4583 * the API jump table function pointer from the lpfc_hba struct. 4584 **/ 4585 void 4586 lpfc_stop_port(struct lpfc_hba *phba) 4587 { 4588 phba->lpfc_stop_port(phba); 4589 4590 if (phba->wq) 4591 flush_workqueue(phba->wq); 4592 } 4593 4594 /** 4595 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 4596 * @phba: Pointer to hba for which this call is being executed. 4597 * 4598 * This routine starts the timer waiting for the FCF rediscovery to complete. 4599 **/ 4600 void 4601 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 4602 { 4603 unsigned long fcf_redisc_wait_tmo = 4604 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 4605 /* Start fcf rediscovery wait period timer */ 4606 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 4607 spin_lock_irq(&phba->hbalock); 4608 /* Allow action to new fcf asynchronous event */ 4609 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 4610 /* Mark the FCF rediscovery pending state */ 4611 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 4612 spin_unlock_irq(&phba->hbalock); 4613 } 4614 4615 /** 4616 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 4617 * @ptr: Map to lpfc_hba data structure pointer. 4618 * 4619 * This routine is invoked when waiting for FCF table rediscover has been 4620 * timed out. If new FCF record(s) has (have) been discovered during the 4621 * wait period, a new FCF event shall be added to the FCOE async event 4622 * list, and then worker thread shall be waked up for processing from the 4623 * worker thread context. 4624 **/ 4625 static void 4626 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t) 4627 { 4628 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait); 4629 4630 /* Don't send FCF rediscovery event if timer cancelled */ 4631 spin_lock_irq(&phba->hbalock); 4632 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 4633 spin_unlock_irq(&phba->hbalock); 4634 return; 4635 } 4636 /* Clear FCF rediscovery timer pending flag */ 4637 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 4638 /* FCF rediscovery event to worker thread */ 4639 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 4640 spin_unlock_irq(&phba->hbalock); 4641 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 4642 "2776 FCF rediscover quiescent timer expired\n"); 4643 /* wake up worker thread */ 4644 lpfc_worker_wake_up(phba); 4645 } 4646 4647 /** 4648 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 4649 * @phba: pointer to lpfc hba data structure. 4650 * @acqe_link: pointer to the async link completion queue entry. 4651 * 4652 * This routine is to parse the SLI4 link-attention link fault code. 4653 **/ 4654 static void 4655 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 4656 struct lpfc_acqe_link *acqe_link) 4657 { 4658 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 4659 case LPFC_ASYNC_LINK_FAULT_NONE: 4660 case LPFC_ASYNC_LINK_FAULT_LOCAL: 4661 case LPFC_ASYNC_LINK_FAULT_REMOTE: 4662 case LPFC_ASYNC_LINK_FAULT_LR_LRR: 4663 break; 4664 default: 4665 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4666 "0398 Unknown link fault code: x%x\n", 4667 bf_get(lpfc_acqe_link_fault, acqe_link)); 4668 break; 4669 } 4670 } 4671 4672 /** 4673 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 4674 * @phba: pointer to lpfc hba data structure. 4675 * @acqe_link: pointer to the async link completion queue entry. 4676 * 4677 * This routine is to parse the SLI4 link attention type and translate it 4678 * into the base driver's link attention type coding. 4679 * 4680 * Return: Link attention type in terms of base driver's coding. 4681 **/ 4682 static uint8_t 4683 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 4684 struct lpfc_acqe_link *acqe_link) 4685 { 4686 uint8_t att_type; 4687 4688 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 4689 case LPFC_ASYNC_LINK_STATUS_DOWN: 4690 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 4691 att_type = LPFC_ATT_LINK_DOWN; 4692 break; 4693 case LPFC_ASYNC_LINK_STATUS_UP: 4694 /* Ignore physical link up events - wait for logical link up */ 4695 att_type = LPFC_ATT_RESERVED; 4696 break; 4697 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 4698 att_type = LPFC_ATT_LINK_UP; 4699 break; 4700 default: 4701 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4702 "0399 Invalid link attention type: x%x\n", 4703 bf_get(lpfc_acqe_link_status, acqe_link)); 4704 att_type = LPFC_ATT_RESERVED; 4705 break; 4706 } 4707 return att_type; 4708 } 4709 4710 /** 4711 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed 4712 * @phba: pointer to lpfc hba data structure. 4713 * 4714 * This routine is to get an SLI3 FC port's link speed in Mbps. 4715 * 4716 * Return: link speed in terms of Mbps. 4717 **/ 4718 uint32_t 4719 lpfc_sli_port_speed_get(struct lpfc_hba *phba) 4720 { 4721 uint32_t link_speed; 4722 4723 if (!lpfc_is_link_up(phba)) 4724 return 0; 4725 4726 if (phba->sli_rev <= LPFC_SLI_REV3) { 4727 switch (phba->fc_linkspeed) { 4728 case LPFC_LINK_SPEED_1GHZ: 4729 link_speed = 1000; 4730 break; 4731 case LPFC_LINK_SPEED_2GHZ: 4732 link_speed = 2000; 4733 break; 4734 case LPFC_LINK_SPEED_4GHZ: 4735 link_speed = 4000; 4736 break; 4737 case LPFC_LINK_SPEED_8GHZ: 4738 link_speed = 8000; 4739 break; 4740 case LPFC_LINK_SPEED_10GHZ: 4741 link_speed = 10000; 4742 break; 4743 case LPFC_LINK_SPEED_16GHZ: 4744 link_speed = 16000; 4745 break; 4746 default: 4747 link_speed = 0; 4748 } 4749 } else { 4750 if (phba->sli4_hba.link_state.logical_speed) 4751 link_speed = 4752 phba->sli4_hba.link_state.logical_speed; 4753 else 4754 link_speed = phba->sli4_hba.link_state.speed; 4755 } 4756 return link_speed; 4757 } 4758 4759 /** 4760 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed 4761 * @phba: pointer to lpfc hba data structure. 4762 * @evt_code: asynchronous event code. 4763 * @speed_code: asynchronous event link speed code. 4764 * 4765 * This routine is to parse the giving SLI4 async event link speed code into 4766 * value of Mbps for the link speed. 4767 * 4768 * Return: link speed in terms of Mbps. 4769 **/ 4770 static uint32_t 4771 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, 4772 uint8_t speed_code) 4773 { 4774 uint32_t port_speed; 4775 4776 switch (evt_code) { 4777 case LPFC_TRAILER_CODE_LINK: 4778 switch (speed_code) { 4779 case LPFC_ASYNC_LINK_SPEED_ZERO: 4780 port_speed = 0; 4781 break; 4782 case LPFC_ASYNC_LINK_SPEED_10MBPS: 4783 port_speed = 10; 4784 break; 4785 case LPFC_ASYNC_LINK_SPEED_100MBPS: 4786 port_speed = 100; 4787 break; 4788 case LPFC_ASYNC_LINK_SPEED_1GBPS: 4789 port_speed = 1000; 4790 break; 4791 case LPFC_ASYNC_LINK_SPEED_10GBPS: 4792 port_speed = 10000; 4793 break; 4794 case LPFC_ASYNC_LINK_SPEED_20GBPS: 4795 port_speed = 20000; 4796 break; 4797 case LPFC_ASYNC_LINK_SPEED_25GBPS: 4798 port_speed = 25000; 4799 break; 4800 case LPFC_ASYNC_LINK_SPEED_40GBPS: 4801 port_speed = 40000; 4802 break; 4803 default: 4804 port_speed = 0; 4805 } 4806 break; 4807 case LPFC_TRAILER_CODE_FC: 4808 switch (speed_code) { 4809 case LPFC_FC_LA_SPEED_UNKNOWN: 4810 port_speed = 0; 4811 break; 4812 case LPFC_FC_LA_SPEED_1G: 4813 port_speed = 1000; 4814 break; 4815 case LPFC_FC_LA_SPEED_2G: 4816 port_speed = 2000; 4817 break; 4818 case LPFC_FC_LA_SPEED_4G: 4819 port_speed = 4000; 4820 break; 4821 case LPFC_FC_LA_SPEED_8G: 4822 port_speed = 8000; 4823 break; 4824 case LPFC_FC_LA_SPEED_10G: 4825 port_speed = 10000; 4826 break; 4827 case LPFC_FC_LA_SPEED_16G: 4828 port_speed = 16000; 4829 break; 4830 case LPFC_FC_LA_SPEED_32G: 4831 port_speed = 32000; 4832 break; 4833 case LPFC_FC_LA_SPEED_64G: 4834 port_speed = 64000; 4835 break; 4836 case LPFC_FC_LA_SPEED_128G: 4837 port_speed = 128000; 4838 break; 4839 default: 4840 port_speed = 0; 4841 } 4842 break; 4843 default: 4844 port_speed = 0; 4845 } 4846 return port_speed; 4847 } 4848 4849 /** 4850 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 4851 * @phba: pointer to lpfc hba data structure. 4852 * @acqe_link: pointer to the async link completion queue entry. 4853 * 4854 * This routine is to handle the SLI4 asynchronous FCoE link event. 4855 **/ 4856 static void 4857 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 4858 struct lpfc_acqe_link *acqe_link) 4859 { 4860 struct lpfc_dmabuf *mp; 4861 LPFC_MBOXQ_t *pmb; 4862 MAILBOX_t *mb; 4863 struct lpfc_mbx_read_top *la; 4864 uint8_t att_type; 4865 int rc; 4866 4867 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 4868 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 4869 return; 4870 phba->fcoe_eventtag = acqe_link->event_tag; 4871 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4872 if (!pmb) { 4873 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4874 "0395 The mboxq allocation failed\n"); 4875 return; 4876 } 4877 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4878 if (!mp) { 4879 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4880 "0396 The lpfc_dmabuf allocation failed\n"); 4881 goto out_free_pmb; 4882 } 4883 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 4884 if (!mp->virt) { 4885 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4886 "0397 The mbuf allocation failed\n"); 4887 goto out_free_dmabuf; 4888 } 4889 4890 /* Cleanup any outstanding ELS commands */ 4891 lpfc_els_flush_all_cmd(phba); 4892 4893 /* Block ELS IOCBs until we have done process link event */ 4894 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 4895 4896 /* Update link event statistics */ 4897 phba->sli.slistat.link_event++; 4898 4899 /* Create lpfc_handle_latt mailbox command from link ACQE */ 4900 lpfc_read_topology(phba, pmb, mp); 4901 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 4902 pmb->vport = phba->pport; 4903 4904 /* Keep the link status for extra SLI4 state machine reference */ 4905 phba->sli4_hba.link_state.speed = 4906 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK, 4907 bf_get(lpfc_acqe_link_speed, acqe_link)); 4908 phba->sli4_hba.link_state.duplex = 4909 bf_get(lpfc_acqe_link_duplex, acqe_link); 4910 phba->sli4_hba.link_state.status = 4911 bf_get(lpfc_acqe_link_status, acqe_link); 4912 phba->sli4_hba.link_state.type = 4913 bf_get(lpfc_acqe_link_type, acqe_link); 4914 phba->sli4_hba.link_state.number = 4915 bf_get(lpfc_acqe_link_number, acqe_link); 4916 phba->sli4_hba.link_state.fault = 4917 bf_get(lpfc_acqe_link_fault, acqe_link); 4918 phba->sli4_hba.link_state.logical_speed = 4919 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10; 4920 4921 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4922 "2900 Async FC/FCoE Link event - Speed:%dGBit " 4923 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 4924 "Logical speed:%dMbps Fault:%d\n", 4925 phba->sli4_hba.link_state.speed, 4926 phba->sli4_hba.link_state.topology, 4927 phba->sli4_hba.link_state.status, 4928 phba->sli4_hba.link_state.type, 4929 phba->sli4_hba.link_state.number, 4930 phba->sli4_hba.link_state.logical_speed, 4931 phba->sli4_hba.link_state.fault); 4932 /* 4933 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 4934 * topology info. Note: Optional for non FC-AL ports. 4935 */ 4936 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 4937 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 4938 if (rc == MBX_NOT_FINISHED) 4939 goto out_free_dmabuf; 4940 return; 4941 } 4942 /* 4943 * For FCoE Mode: fill in all the topology information we need and call 4944 * the READ_TOPOLOGY completion routine to continue without actually 4945 * sending the READ_TOPOLOGY mailbox command to the port. 4946 */ 4947 /* Initialize completion status */ 4948 mb = &pmb->u.mb; 4949 mb->mbxStatus = MBX_SUCCESS; 4950 4951 /* Parse port fault information field */ 4952 lpfc_sli4_parse_latt_fault(phba, acqe_link); 4953 4954 /* Parse and translate link attention fields */ 4955 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 4956 la->eventTag = acqe_link->event_tag; 4957 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 4958 bf_set(lpfc_mbx_read_top_link_spd, la, 4959 (bf_get(lpfc_acqe_link_speed, acqe_link))); 4960 4961 /* Fake the the following irrelvant fields */ 4962 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 4963 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 4964 bf_set(lpfc_mbx_read_top_il, la, 0); 4965 bf_set(lpfc_mbx_read_top_pb, la, 0); 4966 bf_set(lpfc_mbx_read_top_fa, la, 0); 4967 bf_set(lpfc_mbx_read_top_mm, la, 0); 4968 4969 /* Invoke the lpfc_handle_latt mailbox command callback function */ 4970 lpfc_mbx_cmpl_read_topology(phba, pmb); 4971 4972 return; 4973 4974 out_free_dmabuf: 4975 kfree(mp); 4976 out_free_pmb: 4977 mempool_free(pmb, phba->mbox_mem_pool); 4978 } 4979 4980 /** 4981 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read 4982 * topology. 4983 * @phba: pointer to lpfc hba data structure. 4984 * @evt_code: asynchronous event code. 4985 * @speed_code: asynchronous event link speed code. 4986 * 4987 * This routine is to parse the giving SLI4 async event link speed code into 4988 * value of Read topology link speed. 4989 * 4990 * Return: link speed in terms of Read topology. 4991 **/ 4992 static uint8_t 4993 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code) 4994 { 4995 uint8_t port_speed; 4996 4997 switch (speed_code) { 4998 case LPFC_FC_LA_SPEED_1G: 4999 port_speed = LPFC_LINK_SPEED_1GHZ; 5000 break; 5001 case LPFC_FC_LA_SPEED_2G: 5002 port_speed = LPFC_LINK_SPEED_2GHZ; 5003 break; 5004 case LPFC_FC_LA_SPEED_4G: 5005 port_speed = LPFC_LINK_SPEED_4GHZ; 5006 break; 5007 case LPFC_FC_LA_SPEED_8G: 5008 port_speed = LPFC_LINK_SPEED_8GHZ; 5009 break; 5010 case LPFC_FC_LA_SPEED_16G: 5011 port_speed = LPFC_LINK_SPEED_16GHZ; 5012 break; 5013 case LPFC_FC_LA_SPEED_32G: 5014 port_speed = LPFC_LINK_SPEED_32GHZ; 5015 break; 5016 case LPFC_FC_LA_SPEED_64G: 5017 port_speed = LPFC_LINK_SPEED_64GHZ; 5018 break; 5019 case LPFC_FC_LA_SPEED_128G: 5020 port_speed = LPFC_LINK_SPEED_128GHZ; 5021 break; 5022 case LPFC_FC_LA_SPEED_256G: 5023 port_speed = LPFC_LINK_SPEED_256GHZ; 5024 break; 5025 default: 5026 port_speed = 0; 5027 break; 5028 } 5029 5030 return port_speed; 5031 } 5032 5033 #define trunk_link_status(__idx)\ 5034 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 5035 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\ 5036 "Link up" : "Link down") : "NA" 5037 /* Did port __idx reported an error */ 5038 #define trunk_port_fault(__idx)\ 5039 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 5040 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA" 5041 5042 static void 5043 lpfc_update_trunk_link_status(struct lpfc_hba *phba, 5044 struct lpfc_acqe_fc_la *acqe_fc) 5045 { 5046 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc); 5047 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc); 5048 5049 phba->sli4_hba.link_state.speed = 5050 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 5051 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 5052 5053 phba->sli4_hba.link_state.logical_speed = 5054 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 5055 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */ 5056 phba->fc_linkspeed = 5057 lpfc_async_link_speed_to_read_top( 5058 phba, 5059 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 5060 5061 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) { 5062 phba->trunk_link.link0.state = 5063 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc) 5064 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5065 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0; 5066 } 5067 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) { 5068 phba->trunk_link.link1.state = 5069 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc) 5070 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5071 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0; 5072 } 5073 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) { 5074 phba->trunk_link.link2.state = 5075 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc) 5076 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5077 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0; 5078 } 5079 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) { 5080 phba->trunk_link.link3.state = 5081 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc) 5082 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5083 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0; 5084 } 5085 5086 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5087 "2910 Async FC Trunking Event - Speed:%d\n" 5088 "\tLogical speed:%d " 5089 "port0: %s port1: %s port2: %s port3: %s\n", 5090 phba->sli4_hba.link_state.speed, 5091 phba->sli4_hba.link_state.logical_speed, 5092 trunk_link_status(0), trunk_link_status(1), 5093 trunk_link_status(2), trunk_link_status(3)); 5094 5095 if (port_fault) 5096 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5097 "3202 trunk error:0x%x (%s) seen on port0:%s " 5098 /* 5099 * SLI-4: We have only 0xA error codes 5100 * defined as of now. print an appropriate 5101 * message in case driver needs to be updated. 5102 */ 5103 "port1:%s port2:%s port3:%s\n", err, err > 0xA ? 5104 "UNDEFINED. update driver." : trunk_errmsg[err], 5105 trunk_port_fault(0), trunk_port_fault(1), 5106 trunk_port_fault(2), trunk_port_fault(3)); 5107 } 5108 5109 5110 /** 5111 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 5112 * @phba: pointer to lpfc hba data structure. 5113 * @acqe_fc: pointer to the async fc completion queue entry. 5114 * 5115 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 5116 * that the event was received and then issue a read_topology mailbox command so 5117 * that the rest of the driver will treat it the same as SLI3. 5118 **/ 5119 static void 5120 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 5121 { 5122 struct lpfc_dmabuf *mp; 5123 LPFC_MBOXQ_t *pmb; 5124 MAILBOX_t *mb; 5125 struct lpfc_mbx_read_top *la; 5126 int rc; 5127 5128 if (bf_get(lpfc_trailer_type, acqe_fc) != 5129 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 5130 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5131 "2895 Non FC link Event detected.(%d)\n", 5132 bf_get(lpfc_trailer_type, acqe_fc)); 5133 return; 5134 } 5135 5136 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == 5137 LPFC_FC_LA_TYPE_TRUNKING_EVENT) { 5138 lpfc_update_trunk_link_status(phba, acqe_fc); 5139 return; 5140 } 5141 5142 /* Keep the link status for extra SLI4 state machine reference */ 5143 phba->sli4_hba.link_state.speed = 5144 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 5145 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 5146 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 5147 phba->sli4_hba.link_state.topology = 5148 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 5149 phba->sli4_hba.link_state.status = 5150 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 5151 phba->sli4_hba.link_state.type = 5152 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 5153 phba->sli4_hba.link_state.number = 5154 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 5155 phba->sli4_hba.link_state.fault = 5156 bf_get(lpfc_acqe_link_fault, acqe_fc); 5157 5158 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == 5159 LPFC_FC_LA_TYPE_LINK_DOWN) 5160 phba->sli4_hba.link_state.logical_speed = 0; 5161 else if (!phba->sli4_hba.conf_trunk) 5162 phba->sli4_hba.link_state.logical_speed = 5163 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 5164 5165 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5166 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 5167 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 5168 "%dMbps Fault:%d\n", 5169 phba->sli4_hba.link_state.speed, 5170 phba->sli4_hba.link_state.topology, 5171 phba->sli4_hba.link_state.status, 5172 phba->sli4_hba.link_state.type, 5173 phba->sli4_hba.link_state.number, 5174 phba->sli4_hba.link_state.logical_speed, 5175 phba->sli4_hba.link_state.fault); 5176 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5177 if (!pmb) { 5178 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5179 "2897 The mboxq allocation failed\n"); 5180 return; 5181 } 5182 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5183 if (!mp) { 5184 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5185 "2898 The lpfc_dmabuf allocation failed\n"); 5186 goto out_free_pmb; 5187 } 5188 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 5189 if (!mp->virt) { 5190 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5191 "2899 The mbuf allocation failed\n"); 5192 goto out_free_dmabuf; 5193 } 5194 5195 /* Cleanup any outstanding ELS commands */ 5196 lpfc_els_flush_all_cmd(phba); 5197 5198 /* Block ELS IOCBs until we have done process link event */ 5199 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 5200 5201 /* Update link event statistics */ 5202 phba->sli.slistat.link_event++; 5203 5204 /* Create lpfc_handle_latt mailbox command from link ACQE */ 5205 lpfc_read_topology(phba, pmb, mp); 5206 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 5207 pmb->vport = phba->pport; 5208 5209 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { 5210 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK); 5211 5212 switch (phba->sli4_hba.link_state.status) { 5213 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN: 5214 phba->link_flag |= LS_MDS_LINK_DOWN; 5215 break; 5216 case LPFC_FC_LA_TYPE_MDS_LOOPBACK: 5217 phba->link_flag |= LS_MDS_LOOPBACK; 5218 break; 5219 default: 5220 break; 5221 } 5222 5223 /* Initialize completion status */ 5224 mb = &pmb->u.mb; 5225 mb->mbxStatus = MBX_SUCCESS; 5226 5227 /* Parse port fault information field */ 5228 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc); 5229 5230 /* Parse and translate link attention fields */ 5231 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; 5232 la->eventTag = acqe_fc->event_tag; 5233 5234 if (phba->sli4_hba.link_state.status == 5235 LPFC_FC_LA_TYPE_UNEXP_WWPN) { 5236 bf_set(lpfc_mbx_read_top_att_type, la, 5237 LPFC_FC_LA_TYPE_UNEXP_WWPN); 5238 } else { 5239 bf_set(lpfc_mbx_read_top_att_type, la, 5240 LPFC_FC_LA_TYPE_LINK_DOWN); 5241 } 5242 /* Invoke the mailbox command callback function */ 5243 lpfc_mbx_cmpl_read_topology(phba, pmb); 5244 5245 return; 5246 } 5247 5248 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 5249 if (rc == MBX_NOT_FINISHED) 5250 goto out_free_dmabuf; 5251 return; 5252 5253 out_free_dmabuf: 5254 kfree(mp); 5255 out_free_pmb: 5256 mempool_free(pmb, phba->mbox_mem_pool); 5257 } 5258 5259 /** 5260 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 5261 * @phba: pointer to lpfc hba data structure. 5262 * @acqe_fc: pointer to the async SLI completion queue entry. 5263 * 5264 * This routine is to handle the SLI4 asynchronous SLI events. 5265 **/ 5266 static void 5267 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 5268 { 5269 char port_name; 5270 char message[128]; 5271 uint8_t status; 5272 uint8_t evt_type; 5273 uint8_t operational = 0; 5274 struct temp_event temp_event_data; 5275 struct lpfc_acqe_misconfigured_event *misconfigured; 5276 struct Scsi_Host *shost; 5277 struct lpfc_vport **vports; 5278 int rc, i; 5279 5280 evt_type = bf_get(lpfc_trailer_type, acqe_sli); 5281 5282 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5283 "2901 Async SLI event - Type:%d, Event Data: x%08x " 5284 "x%08x x%08x x%08x\n", evt_type, 5285 acqe_sli->event_data1, acqe_sli->event_data2, 5286 acqe_sli->reserved, acqe_sli->trailer); 5287 5288 port_name = phba->Port[0]; 5289 if (port_name == 0x00) 5290 port_name = '?'; /* get port name is empty */ 5291 5292 switch (evt_type) { 5293 case LPFC_SLI_EVENT_TYPE_OVER_TEMP: 5294 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 5295 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 5296 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 5297 5298 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5299 "3190 Over Temperature:%d Celsius- Port Name %c\n", 5300 acqe_sli->event_data1, port_name); 5301 5302 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 5303 shost = lpfc_shost_from_vport(phba->pport); 5304 fc_host_post_vendor_event(shost, fc_get_event_number(), 5305 sizeof(temp_event_data), 5306 (char *)&temp_event_data, 5307 SCSI_NL_VID_TYPE_PCI 5308 | PCI_VENDOR_ID_EMULEX); 5309 break; 5310 case LPFC_SLI_EVENT_TYPE_NORM_TEMP: 5311 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 5312 temp_event_data.event_code = LPFC_NORMAL_TEMP; 5313 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 5314 5315 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5316 "3191 Normal Temperature:%d Celsius - Port Name %c\n", 5317 acqe_sli->event_data1, port_name); 5318 5319 shost = lpfc_shost_from_vport(phba->pport); 5320 fc_host_post_vendor_event(shost, fc_get_event_number(), 5321 sizeof(temp_event_data), 5322 (char *)&temp_event_data, 5323 SCSI_NL_VID_TYPE_PCI 5324 | PCI_VENDOR_ID_EMULEX); 5325 break; 5326 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED: 5327 misconfigured = (struct lpfc_acqe_misconfigured_event *) 5328 &acqe_sli->event_data1; 5329 5330 /* fetch the status for this port */ 5331 switch (phba->sli4_hba.lnk_info.lnk_no) { 5332 case LPFC_LINK_NUMBER_0: 5333 status = bf_get(lpfc_sli_misconfigured_port0_state, 5334 &misconfigured->theEvent); 5335 operational = bf_get(lpfc_sli_misconfigured_port0_op, 5336 &misconfigured->theEvent); 5337 break; 5338 case LPFC_LINK_NUMBER_1: 5339 status = bf_get(lpfc_sli_misconfigured_port1_state, 5340 &misconfigured->theEvent); 5341 operational = bf_get(lpfc_sli_misconfigured_port1_op, 5342 &misconfigured->theEvent); 5343 break; 5344 case LPFC_LINK_NUMBER_2: 5345 status = bf_get(lpfc_sli_misconfigured_port2_state, 5346 &misconfigured->theEvent); 5347 operational = bf_get(lpfc_sli_misconfigured_port2_op, 5348 &misconfigured->theEvent); 5349 break; 5350 case LPFC_LINK_NUMBER_3: 5351 status = bf_get(lpfc_sli_misconfigured_port3_state, 5352 &misconfigured->theEvent); 5353 operational = bf_get(lpfc_sli_misconfigured_port3_op, 5354 &misconfigured->theEvent); 5355 break; 5356 default: 5357 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5358 "3296 " 5359 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED " 5360 "event: Invalid link %d", 5361 phba->sli4_hba.lnk_info.lnk_no); 5362 return; 5363 } 5364 5365 /* Skip if optic state unchanged */ 5366 if (phba->sli4_hba.lnk_info.optic_state == status) 5367 return; 5368 5369 switch (status) { 5370 case LPFC_SLI_EVENT_STATUS_VALID: 5371 sprintf(message, "Physical Link is functional"); 5372 break; 5373 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: 5374 sprintf(message, "Optics faulted/incorrectly " 5375 "installed/not installed - Reseat optics, " 5376 "if issue not resolved, replace."); 5377 break; 5378 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: 5379 sprintf(message, 5380 "Optics of two types installed - Remove one " 5381 "optic or install matching pair of optics."); 5382 break; 5383 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: 5384 sprintf(message, "Incompatible optics - Replace with " 5385 "compatible optics for card to function."); 5386 break; 5387 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED: 5388 sprintf(message, "Unqualified optics - Replace with " 5389 "Avago optics for Warranty and Technical " 5390 "Support - Link is%s operational", 5391 (operational) ? " not" : ""); 5392 break; 5393 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED: 5394 sprintf(message, "Uncertified optics - Replace with " 5395 "Avago-certified optics to enable link " 5396 "operation - Link is%s operational", 5397 (operational) ? " not" : ""); 5398 break; 5399 default: 5400 /* firmware is reporting a status we don't know about */ 5401 sprintf(message, "Unknown event status x%02x", status); 5402 break; 5403 } 5404 5405 /* Issue READ_CONFIG mbox command to refresh supported speeds */ 5406 rc = lpfc_sli4_read_config(phba); 5407 if (rc) { 5408 phba->lmt = 0; 5409 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5410 "3194 Unable to retrieve supported " 5411 "speeds, rc = 0x%x\n", rc); 5412 } 5413 vports = lpfc_create_vport_work_array(phba); 5414 if (vports != NULL) { 5415 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 5416 i++) { 5417 shost = lpfc_shost_from_vport(vports[i]); 5418 lpfc_host_supported_speeds_set(shost); 5419 } 5420 } 5421 lpfc_destroy_vport_work_array(phba, vports); 5422 5423 phba->sli4_hba.lnk_info.optic_state = status; 5424 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5425 "3176 Port Name %c %s\n", port_name, message); 5426 break; 5427 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT: 5428 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5429 "3192 Remote DPort Test Initiated - " 5430 "Event Data1:x%08x Event Data2: x%08x\n", 5431 acqe_sli->event_data1, acqe_sli->event_data2); 5432 break; 5433 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN: 5434 /* Misconfigured WWN. Reports that the SLI Port is configured 5435 * to use FA-WWN, but the attached device doesn’t support it. 5436 * No driver action is required. 5437 * Event Data1 - N.A, Event Data2 - N.A 5438 */ 5439 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI, 5440 "2699 Misconfigured FA-WWN - Attached device does " 5441 "not support FA-WWN\n"); 5442 break; 5443 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE: 5444 /* EEPROM failure. No driver action is required */ 5445 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5446 "2518 EEPROM failure - " 5447 "Event Data1: x%08x Event Data2: x%08x\n", 5448 acqe_sli->event_data1, acqe_sli->event_data2); 5449 break; 5450 default: 5451 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5452 "3193 Unrecognized SLI event, type: 0x%x", 5453 evt_type); 5454 break; 5455 } 5456 } 5457 5458 /** 5459 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 5460 * @vport: pointer to vport data structure. 5461 * 5462 * This routine is to perform Clear Virtual Link (CVL) on a vport in 5463 * response to a CVL event. 5464 * 5465 * Return the pointer to the ndlp with the vport if successful, otherwise 5466 * return NULL. 5467 **/ 5468 static struct lpfc_nodelist * 5469 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 5470 { 5471 struct lpfc_nodelist *ndlp; 5472 struct Scsi_Host *shost; 5473 struct lpfc_hba *phba; 5474 5475 if (!vport) 5476 return NULL; 5477 phba = vport->phba; 5478 if (!phba) 5479 return NULL; 5480 ndlp = lpfc_findnode_did(vport, Fabric_DID); 5481 if (!ndlp) { 5482 /* Cannot find existing Fabric ndlp, so allocate a new one */ 5483 ndlp = lpfc_nlp_init(vport, Fabric_DID); 5484 if (!ndlp) 5485 return 0; 5486 /* Set the node type */ 5487 ndlp->nlp_type |= NLP_FABRIC; 5488 /* Put ndlp onto node list */ 5489 lpfc_enqueue_node(vport, ndlp); 5490 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 5491 /* re-setup ndlp without removing from node list */ 5492 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 5493 if (!ndlp) 5494 return 0; 5495 } 5496 if ((phba->pport->port_state < LPFC_FLOGI) && 5497 (phba->pport->port_state != LPFC_VPORT_FAILED)) 5498 return NULL; 5499 /* If virtual link is not yet instantiated ignore CVL */ 5500 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 5501 && (vport->port_state != LPFC_VPORT_FAILED)) 5502 return NULL; 5503 shost = lpfc_shost_from_vport(vport); 5504 if (!shost) 5505 return NULL; 5506 lpfc_linkdown_port(vport); 5507 lpfc_cleanup_pending_mbox(vport); 5508 spin_lock_irq(shost->host_lock); 5509 vport->fc_flag |= FC_VPORT_CVL_RCVD; 5510 spin_unlock_irq(shost->host_lock); 5511 5512 return ndlp; 5513 } 5514 5515 /** 5516 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 5517 * @vport: pointer to lpfc hba data structure. 5518 * 5519 * This routine is to perform Clear Virtual Link (CVL) on all vports in 5520 * response to a FCF dead event. 5521 **/ 5522 static void 5523 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 5524 { 5525 struct lpfc_vport **vports; 5526 int i; 5527 5528 vports = lpfc_create_vport_work_array(phba); 5529 if (vports) 5530 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 5531 lpfc_sli4_perform_vport_cvl(vports[i]); 5532 lpfc_destroy_vport_work_array(phba, vports); 5533 } 5534 5535 /** 5536 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 5537 * @phba: pointer to lpfc hba data structure. 5538 * @acqe_link: pointer to the async fcoe completion queue entry. 5539 * 5540 * This routine is to handle the SLI4 asynchronous fcoe event. 5541 **/ 5542 static void 5543 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 5544 struct lpfc_acqe_fip *acqe_fip) 5545 { 5546 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 5547 int rc; 5548 struct lpfc_vport *vport; 5549 struct lpfc_nodelist *ndlp; 5550 struct Scsi_Host *shost; 5551 int active_vlink_present; 5552 struct lpfc_vport **vports; 5553 int i; 5554 5555 phba->fc_eventTag = acqe_fip->event_tag; 5556 phba->fcoe_eventtag = acqe_fip->event_tag; 5557 switch (event_type) { 5558 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 5559 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 5560 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 5561 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5562 LOG_DISCOVERY, 5563 "2546 New FCF event, evt_tag:x%x, " 5564 "index:x%x\n", 5565 acqe_fip->event_tag, 5566 acqe_fip->index); 5567 else 5568 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 5569 LOG_DISCOVERY, 5570 "2788 FCF param modified event, " 5571 "evt_tag:x%x, index:x%x\n", 5572 acqe_fip->event_tag, 5573 acqe_fip->index); 5574 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 5575 /* 5576 * During period of FCF discovery, read the FCF 5577 * table record indexed by the event to update 5578 * FCF roundrobin failover eligible FCF bmask. 5579 */ 5580 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 5581 LOG_DISCOVERY, 5582 "2779 Read FCF (x%x) for updating " 5583 "roundrobin FCF failover bmask\n", 5584 acqe_fip->index); 5585 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 5586 } 5587 5588 /* If the FCF discovery is in progress, do nothing. */ 5589 spin_lock_irq(&phba->hbalock); 5590 if (phba->hba_flag & FCF_TS_INPROG) { 5591 spin_unlock_irq(&phba->hbalock); 5592 break; 5593 } 5594 /* If fast FCF failover rescan event is pending, do nothing */ 5595 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) { 5596 spin_unlock_irq(&phba->hbalock); 5597 break; 5598 } 5599 5600 /* If the FCF has been in discovered state, do nothing. */ 5601 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 5602 spin_unlock_irq(&phba->hbalock); 5603 break; 5604 } 5605 spin_unlock_irq(&phba->hbalock); 5606 5607 /* Otherwise, scan the entire FCF table and re-discover SAN */ 5608 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5609 "2770 Start FCF table scan per async FCF " 5610 "event, evt_tag:x%x, index:x%x\n", 5611 acqe_fip->event_tag, acqe_fip->index); 5612 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 5613 LPFC_FCOE_FCF_GET_FIRST); 5614 if (rc) 5615 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5616 "2547 Issue FCF scan read FCF mailbox " 5617 "command failed (x%x)\n", rc); 5618 break; 5619 5620 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 5621 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5622 "2548 FCF Table full count 0x%x tag 0x%x\n", 5623 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 5624 acqe_fip->event_tag); 5625 break; 5626 5627 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 5628 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 5629 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5630 "2549 FCF (x%x) disconnected from network, " 5631 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); 5632 /* 5633 * If we are in the middle of FCF failover process, clear 5634 * the corresponding FCF bit in the roundrobin bitmap. 5635 */ 5636 spin_lock_irq(&phba->hbalock); 5637 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && 5638 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) { 5639 spin_unlock_irq(&phba->hbalock); 5640 /* Update FLOGI FCF failover eligible FCF bmask */ 5641 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 5642 break; 5643 } 5644 spin_unlock_irq(&phba->hbalock); 5645 5646 /* If the event is not for currently used fcf do nothing */ 5647 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 5648 break; 5649 5650 /* 5651 * Otherwise, request the port to rediscover the entire FCF 5652 * table for a fast recovery from case that the current FCF 5653 * is no longer valid as we are not in the middle of FCF 5654 * failover process already. 5655 */ 5656 spin_lock_irq(&phba->hbalock); 5657 /* Mark the fast failover process in progress */ 5658 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 5659 spin_unlock_irq(&phba->hbalock); 5660 5661 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5662 "2771 Start FCF fast failover process due to " 5663 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 5664 "\n", acqe_fip->event_tag, acqe_fip->index); 5665 rc = lpfc_sli4_redisc_fcf_table(phba); 5666 if (rc) { 5667 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5668 LOG_DISCOVERY, 5669 "2772 Issue FCF rediscover mailbox " 5670 "command failed, fail through to FCF " 5671 "dead event\n"); 5672 spin_lock_irq(&phba->hbalock); 5673 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 5674 spin_unlock_irq(&phba->hbalock); 5675 /* 5676 * Last resort will fail over by treating this 5677 * as a link down to FCF registration. 5678 */ 5679 lpfc_sli4_fcf_dead_failthrough(phba); 5680 } else { 5681 /* Reset FCF roundrobin bmask for new discovery */ 5682 lpfc_sli4_clear_fcf_rr_bmask(phba); 5683 /* 5684 * Handling fast FCF failover to a DEAD FCF event is 5685 * considered equalivant to receiving CVL to all vports. 5686 */ 5687 lpfc_sli4_perform_all_vport_cvl(phba); 5688 } 5689 break; 5690 case LPFC_FIP_EVENT_TYPE_CVL: 5691 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 5692 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5693 "2718 Clear Virtual Link Received for VPI 0x%x" 5694 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 5695 5696 vport = lpfc_find_vport_by_vpid(phba, 5697 acqe_fip->index); 5698 ndlp = lpfc_sli4_perform_vport_cvl(vport); 5699 if (!ndlp) 5700 break; 5701 active_vlink_present = 0; 5702 5703 vports = lpfc_create_vport_work_array(phba); 5704 if (vports) { 5705 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 5706 i++) { 5707 if ((!(vports[i]->fc_flag & 5708 FC_VPORT_CVL_RCVD)) && 5709 (vports[i]->port_state > LPFC_FDISC)) { 5710 active_vlink_present = 1; 5711 break; 5712 } 5713 } 5714 lpfc_destroy_vport_work_array(phba, vports); 5715 } 5716 5717 /* 5718 * Don't re-instantiate if vport is marked for deletion. 5719 * If we are here first then vport_delete is going to wait 5720 * for discovery to complete. 5721 */ 5722 if (!(vport->load_flag & FC_UNLOADING) && 5723 active_vlink_present) { 5724 /* 5725 * If there are other active VLinks present, 5726 * re-instantiate the Vlink using FDISC. 5727 */ 5728 mod_timer(&ndlp->nlp_delayfunc, 5729 jiffies + msecs_to_jiffies(1000)); 5730 shost = lpfc_shost_from_vport(vport); 5731 spin_lock_irq(shost->host_lock); 5732 ndlp->nlp_flag |= NLP_DELAY_TMO; 5733 spin_unlock_irq(shost->host_lock); 5734 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 5735 vport->port_state = LPFC_FDISC; 5736 } else { 5737 /* 5738 * Otherwise, we request port to rediscover 5739 * the entire FCF table for a fast recovery 5740 * from possible case that the current FCF 5741 * is no longer valid if we are not already 5742 * in the FCF failover process. 5743 */ 5744 spin_lock_irq(&phba->hbalock); 5745 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 5746 spin_unlock_irq(&phba->hbalock); 5747 break; 5748 } 5749 /* Mark the fast failover process in progress */ 5750 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 5751 spin_unlock_irq(&phba->hbalock); 5752 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 5753 LOG_DISCOVERY, 5754 "2773 Start FCF failover per CVL, " 5755 "evt_tag:x%x\n", acqe_fip->event_tag); 5756 rc = lpfc_sli4_redisc_fcf_table(phba); 5757 if (rc) { 5758 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5759 LOG_DISCOVERY, 5760 "2774 Issue FCF rediscover " 5761 "mailbox command failed, " 5762 "through to CVL event\n"); 5763 spin_lock_irq(&phba->hbalock); 5764 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 5765 spin_unlock_irq(&phba->hbalock); 5766 /* 5767 * Last resort will be re-try on the 5768 * the current registered FCF entry. 5769 */ 5770 lpfc_retry_pport_discovery(phba); 5771 } else 5772 /* 5773 * Reset FCF roundrobin bmask for new 5774 * discovery. 5775 */ 5776 lpfc_sli4_clear_fcf_rr_bmask(phba); 5777 } 5778 break; 5779 default: 5780 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5781 "0288 Unknown FCoE event type 0x%x event tag " 5782 "0x%x\n", event_type, acqe_fip->event_tag); 5783 break; 5784 } 5785 } 5786 5787 /** 5788 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 5789 * @phba: pointer to lpfc hba data structure. 5790 * @acqe_link: pointer to the async dcbx completion queue entry. 5791 * 5792 * This routine is to handle the SLI4 asynchronous dcbx event. 5793 **/ 5794 static void 5795 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 5796 struct lpfc_acqe_dcbx *acqe_dcbx) 5797 { 5798 phba->fc_eventTag = acqe_dcbx->event_tag; 5799 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5800 "0290 The SLI4 DCBX asynchronous event is not " 5801 "handled yet\n"); 5802 } 5803 5804 /** 5805 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 5806 * @phba: pointer to lpfc hba data structure. 5807 * @acqe_link: pointer to the async grp5 completion queue entry. 5808 * 5809 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 5810 * is an asynchronous notified of a logical link speed change. The Port 5811 * reports the logical link speed in units of 10Mbps. 5812 **/ 5813 static void 5814 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 5815 struct lpfc_acqe_grp5 *acqe_grp5) 5816 { 5817 uint16_t prev_ll_spd; 5818 5819 phba->fc_eventTag = acqe_grp5->event_tag; 5820 phba->fcoe_eventtag = acqe_grp5->event_tag; 5821 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 5822 phba->sli4_hba.link_state.logical_speed = 5823 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10; 5824 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5825 "2789 GRP5 Async Event: Updating logical link speed " 5826 "from %dMbps to %dMbps\n", prev_ll_spd, 5827 phba->sli4_hba.link_state.logical_speed); 5828 } 5829 5830 /** 5831 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 5832 * @phba: pointer to lpfc hba data structure. 5833 * 5834 * This routine is invoked by the worker thread to process all the pending 5835 * SLI4 asynchronous events. 5836 **/ 5837 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 5838 { 5839 struct lpfc_cq_event *cq_event; 5840 5841 /* First, declare the async event has been handled */ 5842 spin_lock_irq(&phba->hbalock); 5843 phba->hba_flag &= ~ASYNC_EVENT; 5844 spin_unlock_irq(&phba->hbalock); 5845 /* Now, handle all the async events */ 5846 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 5847 /* Get the first event from the head of the event queue */ 5848 spin_lock_irq(&phba->hbalock); 5849 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 5850 cq_event, struct lpfc_cq_event, list); 5851 spin_unlock_irq(&phba->hbalock); 5852 /* Process the asynchronous event */ 5853 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 5854 case LPFC_TRAILER_CODE_LINK: 5855 lpfc_sli4_async_link_evt(phba, 5856 &cq_event->cqe.acqe_link); 5857 break; 5858 case LPFC_TRAILER_CODE_FCOE: 5859 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 5860 break; 5861 case LPFC_TRAILER_CODE_DCBX: 5862 lpfc_sli4_async_dcbx_evt(phba, 5863 &cq_event->cqe.acqe_dcbx); 5864 break; 5865 case LPFC_TRAILER_CODE_GRP5: 5866 lpfc_sli4_async_grp5_evt(phba, 5867 &cq_event->cqe.acqe_grp5); 5868 break; 5869 case LPFC_TRAILER_CODE_FC: 5870 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 5871 break; 5872 case LPFC_TRAILER_CODE_SLI: 5873 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 5874 break; 5875 default: 5876 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5877 "1804 Invalid asynchronous event code: " 5878 "x%x\n", bf_get(lpfc_trailer_code, 5879 &cq_event->cqe.mcqe_cmpl)); 5880 break; 5881 } 5882 /* Free the completion event processed to the free pool */ 5883 lpfc_sli4_cq_event_release(phba, cq_event); 5884 } 5885 } 5886 5887 /** 5888 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 5889 * @phba: pointer to lpfc hba data structure. 5890 * 5891 * This routine is invoked by the worker thread to process FCF table 5892 * rediscovery pending completion event. 5893 **/ 5894 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 5895 { 5896 int rc; 5897 5898 spin_lock_irq(&phba->hbalock); 5899 /* Clear FCF rediscovery timeout event */ 5900 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 5901 /* Clear driver fast failover FCF record flag */ 5902 phba->fcf.failover_rec.flag = 0; 5903 /* Set state for FCF fast failover */ 5904 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 5905 spin_unlock_irq(&phba->hbalock); 5906 5907 /* Scan FCF table from the first entry to re-discover SAN */ 5908 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5909 "2777 Start post-quiescent FCF table scan\n"); 5910 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 5911 if (rc) 5912 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5913 "2747 Issue FCF scan read FCF mailbox " 5914 "command failed 0x%x\n", rc); 5915 } 5916 5917 /** 5918 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 5919 * @phba: pointer to lpfc hba data structure. 5920 * @dev_grp: The HBA PCI-Device group number. 5921 * 5922 * This routine is invoked to set up the per HBA PCI-Device group function 5923 * API jump table entries. 5924 * 5925 * Return: 0 if success, otherwise -ENODEV 5926 **/ 5927 int 5928 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 5929 { 5930 int rc; 5931 5932 /* Set up lpfc PCI-device group */ 5933 phba->pci_dev_grp = dev_grp; 5934 5935 /* The LPFC_PCI_DEV_OC uses SLI4 */ 5936 if (dev_grp == LPFC_PCI_DEV_OC) 5937 phba->sli_rev = LPFC_SLI_REV4; 5938 5939 /* Set up device INIT API function jump table */ 5940 rc = lpfc_init_api_table_setup(phba, dev_grp); 5941 if (rc) 5942 return -ENODEV; 5943 /* Set up SCSI API function jump table */ 5944 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 5945 if (rc) 5946 return -ENODEV; 5947 /* Set up SLI API function jump table */ 5948 rc = lpfc_sli_api_table_setup(phba, dev_grp); 5949 if (rc) 5950 return -ENODEV; 5951 /* Set up MBOX API function jump table */ 5952 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 5953 if (rc) 5954 return -ENODEV; 5955 5956 return 0; 5957 } 5958 5959 /** 5960 * lpfc_log_intr_mode - Log the active interrupt mode 5961 * @phba: pointer to lpfc hba data structure. 5962 * @intr_mode: active interrupt mode adopted. 5963 * 5964 * This routine it invoked to log the currently used active interrupt mode 5965 * to the device. 5966 **/ 5967 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 5968 { 5969 switch (intr_mode) { 5970 case 0: 5971 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5972 "0470 Enable INTx interrupt mode.\n"); 5973 break; 5974 case 1: 5975 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5976 "0481 Enabled MSI interrupt mode.\n"); 5977 break; 5978 case 2: 5979 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5980 "0480 Enabled MSI-X interrupt mode.\n"); 5981 break; 5982 default: 5983 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5984 "0482 Illegal interrupt mode.\n"); 5985 break; 5986 } 5987 return; 5988 } 5989 5990 /** 5991 * lpfc_cpumask_of_node_init - initalizes cpumask of phba's NUMA node 5992 * @phba: Pointer to HBA context object. 5993 * 5994 **/ 5995 static void 5996 lpfc_cpumask_of_node_init(struct lpfc_hba *phba) 5997 { 5998 unsigned int cpu, numa_node; 5999 struct cpumask *numa_mask = &phba->sli4_hba.numa_mask; 6000 6001 cpumask_clear(numa_mask); 6002 6003 /* Check if we're a NUMA architecture */ 6004 numa_node = dev_to_node(&phba->pcidev->dev); 6005 if (numa_node == NUMA_NO_NODE) 6006 return; 6007 6008 for_each_possible_cpu(cpu) 6009 if (cpu_to_node(cpu) == numa_node) 6010 cpumask_set_cpu(cpu, numa_mask); 6011 } 6012 6013 /** 6014 * lpfc_enable_pci_dev - Enable a generic PCI device. 6015 * @phba: pointer to lpfc hba data structure. 6016 * 6017 * This routine is invoked to enable the PCI device that is common to all 6018 * PCI devices. 6019 * 6020 * Return codes 6021 * 0 - successful 6022 * other values - error 6023 **/ 6024 static int 6025 lpfc_enable_pci_dev(struct lpfc_hba *phba) 6026 { 6027 struct pci_dev *pdev; 6028 6029 /* Obtain PCI device reference */ 6030 if (!phba->pcidev) 6031 goto out_error; 6032 else 6033 pdev = phba->pcidev; 6034 /* Enable PCI device */ 6035 if (pci_enable_device_mem(pdev)) 6036 goto out_error; 6037 /* Request PCI resource for the device */ 6038 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME)) 6039 goto out_disable_device; 6040 /* Set up device as PCI master and save state for EEH */ 6041 pci_set_master(pdev); 6042 pci_try_set_mwi(pdev); 6043 pci_save_state(pdev); 6044 6045 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 6046 if (pci_is_pcie(pdev)) 6047 pdev->needs_freset = 1; 6048 6049 return 0; 6050 6051 out_disable_device: 6052 pci_disable_device(pdev); 6053 out_error: 6054 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6055 "1401 Failed to enable pci device\n"); 6056 return -ENODEV; 6057 } 6058 6059 /** 6060 * lpfc_disable_pci_dev - Disable a generic PCI device. 6061 * @phba: pointer to lpfc hba data structure. 6062 * 6063 * This routine is invoked to disable the PCI device that is common to all 6064 * PCI devices. 6065 **/ 6066 static void 6067 lpfc_disable_pci_dev(struct lpfc_hba *phba) 6068 { 6069 struct pci_dev *pdev; 6070 6071 /* Obtain PCI device reference */ 6072 if (!phba->pcidev) 6073 return; 6074 else 6075 pdev = phba->pcidev; 6076 /* Release PCI resource and disable PCI device */ 6077 pci_release_mem_regions(pdev); 6078 pci_disable_device(pdev); 6079 6080 return; 6081 } 6082 6083 /** 6084 * lpfc_reset_hba - Reset a hba 6085 * @phba: pointer to lpfc hba data structure. 6086 * 6087 * This routine is invoked to reset a hba device. It brings the HBA 6088 * offline, performs a board restart, and then brings the board back 6089 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 6090 * on outstanding mailbox commands. 6091 **/ 6092 void 6093 lpfc_reset_hba(struct lpfc_hba *phba) 6094 { 6095 /* If resets are disabled then set error state and return. */ 6096 if (!phba->cfg_enable_hba_reset) { 6097 phba->link_state = LPFC_HBA_ERROR; 6098 return; 6099 } 6100 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 6101 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 6102 else 6103 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 6104 lpfc_offline(phba); 6105 lpfc_sli_brdrestart(phba); 6106 lpfc_online(phba); 6107 lpfc_unblock_mgmt_io(phba); 6108 } 6109 6110 /** 6111 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions 6112 * @phba: pointer to lpfc hba data structure. 6113 * 6114 * This function enables the PCI SR-IOV virtual functions to a physical 6115 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 6116 * enable the number of virtual functions to the physical function. As 6117 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 6118 * API call does not considered as an error condition for most of the device. 6119 **/ 6120 uint16_t 6121 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) 6122 { 6123 struct pci_dev *pdev = phba->pcidev; 6124 uint16_t nr_virtfn; 6125 int pos; 6126 6127 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 6128 if (pos == 0) 6129 return 0; 6130 6131 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); 6132 return nr_virtfn; 6133 } 6134 6135 /** 6136 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 6137 * @phba: pointer to lpfc hba data structure. 6138 * @nr_vfn: number of virtual functions to be enabled. 6139 * 6140 * This function enables the PCI SR-IOV virtual functions to a physical 6141 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 6142 * enable the number of virtual functions to the physical function. As 6143 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 6144 * API call does not considered as an error condition for most of the device. 6145 **/ 6146 int 6147 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 6148 { 6149 struct pci_dev *pdev = phba->pcidev; 6150 uint16_t max_nr_vfn; 6151 int rc; 6152 6153 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); 6154 if (nr_vfn > max_nr_vfn) { 6155 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6156 "3057 Requested vfs (%d) greater than " 6157 "supported vfs (%d)", nr_vfn, max_nr_vfn); 6158 return -EINVAL; 6159 } 6160 6161 rc = pci_enable_sriov(pdev, nr_vfn); 6162 if (rc) { 6163 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6164 "2806 Failed to enable sriov on this device " 6165 "with vfn number nr_vf:%d, rc:%d\n", 6166 nr_vfn, rc); 6167 } else 6168 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6169 "2807 Successful enable sriov on this device " 6170 "with vfn number nr_vf:%d\n", nr_vfn); 6171 return rc; 6172 } 6173 6174 /** 6175 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 6176 * @phba: pointer to lpfc hba data structure. 6177 * 6178 * This routine is invoked to set up the driver internal resources before the 6179 * device specific resource setup to support the HBA device it attached to. 6180 * 6181 * Return codes 6182 * 0 - successful 6183 * other values - error 6184 **/ 6185 static int 6186 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 6187 { 6188 struct lpfc_sli *psli = &phba->sli; 6189 6190 /* 6191 * Driver resources common to all SLI revisions 6192 */ 6193 atomic_set(&phba->fast_event_count, 0); 6194 spin_lock_init(&phba->hbalock); 6195 6196 /* Initialize ndlp management spinlock */ 6197 spin_lock_init(&phba->ndlp_lock); 6198 6199 /* Initialize port_list spinlock */ 6200 spin_lock_init(&phba->port_list_lock); 6201 INIT_LIST_HEAD(&phba->port_list); 6202 6203 INIT_LIST_HEAD(&phba->work_list); 6204 init_waitqueue_head(&phba->wait_4_mlo_m_q); 6205 6206 /* Initialize the wait queue head for the kernel thread */ 6207 init_waitqueue_head(&phba->work_waitq); 6208 6209 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6210 "1403 Protocols supported %s %s %s\n", 6211 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ? 6212 "SCSI" : " "), 6213 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ? 6214 "NVME" : " "), 6215 (phba->nvmet_support ? "NVMET" : " ")); 6216 6217 /* Initialize the IO buffer list used by driver for SLI3 SCSI */ 6218 spin_lock_init(&phba->scsi_buf_list_get_lock); 6219 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); 6220 spin_lock_init(&phba->scsi_buf_list_put_lock); 6221 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 6222 6223 /* Initialize the fabric iocb list */ 6224 INIT_LIST_HEAD(&phba->fabric_iocb_list); 6225 6226 /* Initialize list to save ELS buffers */ 6227 INIT_LIST_HEAD(&phba->elsbuf); 6228 6229 /* Initialize FCF connection rec list */ 6230 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 6231 6232 /* Initialize OAS configuration list */ 6233 spin_lock_init(&phba->devicelock); 6234 INIT_LIST_HEAD(&phba->luns); 6235 6236 /* MBOX heartbeat timer */ 6237 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0); 6238 /* Fabric block timer */ 6239 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0); 6240 /* EA polling mode timer */ 6241 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0); 6242 /* Heartbeat timer */ 6243 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0); 6244 6245 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work); 6246 6247 return 0; 6248 } 6249 6250 /** 6251 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev 6252 * @phba: pointer to lpfc hba data structure. 6253 * 6254 * This routine is invoked to set up the driver internal resources specific to 6255 * support the SLI-3 HBA device it attached to. 6256 * 6257 * Return codes 6258 * 0 - successful 6259 * other values - error 6260 **/ 6261 static int 6262 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 6263 { 6264 int rc, entry_sz; 6265 6266 /* 6267 * Initialize timers used by driver 6268 */ 6269 6270 /* FCP polling mode timer */ 6271 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0); 6272 6273 /* Host attention work mask setup */ 6274 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 6275 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 6276 6277 /* Get all the module params for configuring this host */ 6278 lpfc_get_cfgparam(phba); 6279 /* Set up phase-1 common device driver resources */ 6280 6281 rc = lpfc_setup_driver_resource_phase1(phba); 6282 if (rc) 6283 return -ENODEV; 6284 6285 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 6286 phba->menlo_flag |= HBA_MENLO_SUPPORT; 6287 /* check for menlo minimum sg count */ 6288 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 6289 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 6290 } 6291 6292 if (!phba->sli.sli3_ring) 6293 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING, 6294 sizeof(struct lpfc_sli_ring), 6295 GFP_KERNEL); 6296 if (!phba->sli.sli3_ring) 6297 return -ENOMEM; 6298 6299 /* 6300 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 6301 * used to create the sg_dma_buf_pool must be dynamically calculated. 6302 */ 6303 6304 /* Initialize the host templates the configured values. */ 6305 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 6306 lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt; 6307 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 6308 6309 if (phba->sli_rev == LPFC_SLI_REV4) 6310 entry_sz = sizeof(struct sli4_sge); 6311 else 6312 entry_sz = sizeof(struct ulp_bde64); 6313 6314 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ 6315 if (phba->cfg_enable_bg) { 6316 /* 6317 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 6318 * the FCP rsp, and a BDE for each. Sice we have no control 6319 * over how many protection data segments the SCSI Layer 6320 * will hand us (ie: there could be one for every block 6321 * in the IO), we just allocate enough BDEs to accomidate 6322 * our max amount and we need to limit lpfc_sg_seg_cnt to 6323 * minimize the risk of running out. 6324 */ 6325 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6326 sizeof(struct fcp_rsp) + 6327 (LPFC_MAX_SG_SEG_CNT * entry_sz); 6328 6329 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) 6330 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; 6331 6332 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ 6333 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; 6334 } else { 6335 /* 6336 * The scsi_buf for a regular I/O will hold the FCP cmnd, 6337 * the FCP rsp, a BDE for each, and a BDE for up to 6338 * cfg_sg_seg_cnt data segments. 6339 */ 6340 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6341 sizeof(struct fcp_rsp) + 6342 ((phba->cfg_sg_seg_cnt + 2) * entry_sz); 6343 6344 /* Total BDEs in BPL for scsi_sg_list */ 6345 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 6346 } 6347 6348 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 6349 "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", 6350 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 6351 phba->cfg_total_seg_cnt); 6352 6353 phba->max_vpi = LPFC_MAX_VPI; 6354 /* This will be set to correct value after config_port mbox */ 6355 phba->max_vports = 0; 6356 6357 /* 6358 * Initialize the SLI Layer to run with lpfc HBAs. 6359 */ 6360 lpfc_sli_setup(phba); 6361 lpfc_sli_queue_init(phba); 6362 6363 /* Allocate device driver memory */ 6364 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 6365 return -ENOMEM; 6366 6367 phba->lpfc_sg_dma_buf_pool = 6368 dma_pool_create("lpfc_sg_dma_buf_pool", 6369 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size, 6370 BPL_ALIGN_SZ, 0); 6371 6372 if (!phba->lpfc_sg_dma_buf_pool) 6373 goto fail_free_mem; 6374 6375 phba->lpfc_cmd_rsp_buf_pool = 6376 dma_pool_create("lpfc_cmd_rsp_buf_pool", 6377 &phba->pcidev->dev, 6378 sizeof(struct fcp_cmnd) + 6379 sizeof(struct fcp_rsp), 6380 BPL_ALIGN_SZ, 0); 6381 6382 if (!phba->lpfc_cmd_rsp_buf_pool) 6383 goto fail_free_dma_buf_pool; 6384 6385 /* 6386 * Enable sr-iov virtual functions if supported and configured 6387 * through the module parameter. 6388 */ 6389 if (phba->cfg_sriov_nr_virtfn > 0) { 6390 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 6391 phba->cfg_sriov_nr_virtfn); 6392 if (rc) { 6393 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6394 "2808 Requested number of SR-IOV " 6395 "virtual functions (%d) is not " 6396 "supported\n", 6397 phba->cfg_sriov_nr_virtfn); 6398 phba->cfg_sriov_nr_virtfn = 0; 6399 } 6400 } 6401 6402 return 0; 6403 6404 fail_free_dma_buf_pool: 6405 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); 6406 phba->lpfc_sg_dma_buf_pool = NULL; 6407 fail_free_mem: 6408 lpfc_mem_free(phba); 6409 return -ENOMEM; 6410 } 6411 6412 /** 6413 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 6414 * @phba: pointer to lpfc hba data structure. 6415 * 6416 * This routine is invoked to unset the driver internal resources set up 6417 * specific for supporting the SLI-3 HBA device it attached to. 6418 **/ 6419 static void 6420 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 6421 { 6422 /* Free device driver memory allocated */ 6423 lpfc_mem_free_all(phba); 6424 6425 return; 6426 } 6427 6428 /** 6429 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 6430 * @phba: pointer to lpfc hba data structure. 6431 * 6432 * This routine is invoked to set up the driver internal resources specific to 6433 * support the SLI-4 HBA device it attached to. 6434 * 6435 * Return codes 6436 * 0 - successful 6437 * other values - error 6438 **/ 6439 static int 6440 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 6441 { 6442 LPFC_MBOXQ_t *mboxq; 6443 MAILBOX_t *mb; 6444 int rc, i, max_buf_size; 6445 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 6446 struct lpfc_mqe *mqe; 6447 int longs; 6448 int extra; 6449 uint64_t wwn; 6450 u32 if_type; 6451 u32 if_fam; 6452 6453 phba->sli4_hba.num_present_cpu = lpfc_present_cpu; 6454 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1; 6455 phba->sli4_hba.curr_disp_cpu = 0; 6456 lpfc_cpumask_of_node_init(phba); 6457 6458 /* Get all the module params for configuring this host */ 6459 lpfc_get_cfgparam(phba); 6460 6461 /* Set up phase-1 common device driver resources */ 6462 rc = lpfc_setup_driver_resource_phase1(phba); 6463 if (rc) 6464 return -ENODEV; 6465 6466 /* Before proceed, wait for POST done and device ready */ 6467 rc = lpfc_sli4_post_status_check(phba); 6468 if (rc) 6469 return -ENODEV; 6470 6471 /* Allocate all driver workqueues here */ 6472 6473 /* The lpfc_wq workqueue for deferred irq use */ 6474 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); 6475 6476 /* 6477 * Initialize timers used by driver 6478 */ 6479 6480 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0); 6481 6482 /* FCF rediscover timer */ 6483 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0); 6484 6485 /* 6486 * Control structure for handling external multi-buffer mailbox 6487 * command pass-through. 6488 */ 6489 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, 6490 sizeof(struct lpfc_mbox_ext_buf_ctx)); 6491 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 6492 6493 phba->max_vpi = LPFC_MAX_VPI; 6494 6495 /* This will be set to correct value after the read_config mbox */ 6496 phba->max_vports = 0; 6497 6498 /* Program the default value of vlan_id and fc_map */ 6499 phba->valid_vlan = 0; 6500 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 6501 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 6502 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 6503 6504 /* 6505 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands 6506 * we will associate a new ring, for each EQ/CQ/WQ tuple. 6507 * The WQ create will allocate the ring. 6508 */ 6509 6510 /* Initialize buffer queue management fields */ 6511 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list); 6512 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 6513 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 6514 6515 /* 6516 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 6517 */ 6518 /* Initialize the Abort buffer list used by driver */ 6519 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock); 6520 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list); 6521 6522 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 6523 /* Initialize the Abort nvme buffer list used by driver */ 6524 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock); 6525 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 6526 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); 6527 spin_lock_init(&phba->sli4_hba.t_active_list_lock); 6528 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list); 6529 } 6530 6531 /* This abort list used by worker thread */ 6532 spin_lock_init(&phba->sli4_hba.sgl_list_lock); 6533 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); 6534 6535 /* 6536 * Initialize driver internal slow-path work queues 6537 */ 6538 6539 /* Driver internel slow-path CQ Event pool */ 6540 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 6541 /* Response IOCB work queue list */ 6542 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 6543 /* Asynchronous event CQ Event work queue list */ 6544 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 6545 /* Fast-path XRI aborted CQ Event work queue list */ 6546 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 6547 /* Slow-path XRI aborted CQ Event work queue list */ 6548 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 6549 /* Receive queue CQ Event work queue list */ 6550 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 6551 6552 /* Initialize extent block lists. */ 6553 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); 6554 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); 6555 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); 6556 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); 6557 6558 /* Initialize mboxq lists. If the early init routines fail 6559 * these lists need to be correctly initialized. 6560 */ 6561 INIT_LIST_HEAD(&phba->sli.mboxq); 6562 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl); 6563 6564 /* initialize optic_state to 0xFF */ 6565 phba->sli4_hba.lnk_info.optic_state = 0xff; 6566 6567 /* Allocate device driver memory */ 6568 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 6569 if (rc) 6570 return -ENOMEM; 6571 6572 /* IF Type 2 ports get initialized now. */ 6573 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 6574 LPFC_SLI_INTF_IF_TYPE_2) { 6575 rc = lpfc_pci_function_reset(phba); 6576 if (unlikely(rc)) { 6577 rc = -ENODEV; 6578 goto out_free_mem; 6579 } 6580 phba->temp_sensor_support = 1; 6581 } 6582 6583 /* Create the bootstrap mailbox command */ 6584 rc = lpfc_create_bootstrap_mbox(phba); 6585 if (unlikely(rc)) 6586 goto out_free_mem; 6587 6588 /* Set up the host's endian order with the device. */ 6589 rc = lpfc_setup_endian_order(phba); 6590 if (unlikely(rc)) 6591 goto out_free_bsmbx; 6592 6593 /* Set up the hba's configuration parameters. */ 6594 rc = lpfc_sli4_read_config(phba); 6595 if (unlikely(rc)) 6596 goto out_free_bsmbx; 6597 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba); 6598 if (unlikely(rc)) 6599 goto out_free_bsmbx; 6600 6601 /* IF Type 0 ports get initialized now. */ 6602 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 6603 LPFC_SLI_INTF_IF_TYPE_0) { 6604 rc = lpfc_pci_function_reset(phba); 6605 if (unlikely(rc)) 6606 goto out_free_bsmbx; 6607 } 6608 6609 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 6610 GFP_KERNEL); 6611 if (!mboxq) { 6612 rc = -ENOMEM; 6613 goto out_free_bsmbx; 6614 } 6615 6616 /* Check for NVMET being configured */ 6617 phba->nvmet_support = 0; 6618 if (lpfc_enable_nvmet_cnt) { 6619 6620 /* First get WWN of HBA instance */ 6621 lpfc_read_nv(phba, mboxq); 6622 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6623 if (rc != MBX_SUCCESS) { 6624 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6625 "6016 Mailbox failed , mbxCmd x%x " 6626 "READ_NV, mbxStatus x%x\n", 6627 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 6628 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 6629 mempool_free(mboxq, phba->mbox_mem_pool); 6630 rc = -EIO; 6631 goto out_free_bsmbx; 6632 } 6633 mb = &mboxq->u.mb; 6634 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename, 6635 sizeof(uint64_t)); 6636 wwn = cpu_to_be64(wwn); 6637 phba->sli4_hba.wwnn.u.name = wwn; 6638 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, 6639 sizeof(uint64_t)); 6640 /* wwn is WWPN of HBA instance */ 6641 wwn = cpu_to_be64(wwn); 6642 phba->sli4_hba.wwpn.u.name = wwn; 6643 6644 /* Check to see if it matches any module parameter */ 6645 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { 6646 if (wwn == lpfc_enable_nvmet[i]) { 6647 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 6648 if (lpfc_nvmet_mem_alloc(phba)) 6649 break; 6650 6651 phba->nvmet_support = 1; /* a match */ 6652 6653 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6654 "6017 NVME Target %016llx\n", 6655 wwn); 6656 #else 6657 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6658 "6021 Can't enable NVME Target." 6659 " NVME_TARGET_FC infrastructure" 6660 " is not in kernel\n"); 6661 #endif 6662 /* Not supported for NVMET */ 6663 phba->cfg_xri_rebalancing = 0; 6664 break; 6665 } 6666 } 6667 } 6668 6669 lpfc_nvme_mod_param_dep(phba); 6670 6671 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */ 6672 lpfc_supported_pages(mboxq); 6673 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6674 if (!rc) { 6675 mqe = &mboxq->u.mqe; 6676 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 6677 LPFC_MAX_SUPPORTED_PAGES); 6678 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 6679 switch (pn_page[i]) { 6680 case LPFC_SLI4_PARAMETERS: 6681 phba->sli4_hba.pc_sli4_params.supported = 1; 6682 break; 6683 default: 6684 break; 6685 } 6686 } 6687 /* Read the port's SLI4 Parameters capabilities if supported. */ 6688 if (phba->sli4_hba.pc_sli4_params.supported) 6689 rc = lpfc_pc_sli4_params_get(phba, mboxq); 6690 if (rc) { 6691 mempool_free(mboxq, phba->mbox_mem_pool); 6692 rc = -EIO; 6693 goto out_free_bsmbx; 6694 } 6695 } 6696 6697 /* 6698 * Get sli4 parameters that override parameters from Port capabilities. 6699 * If this call fails, it isn't critical unless the SLI4 parameters come 6700 * back in conflict. 6701 */ 6702 rc = lpfc_get_sli4_parameters(phba, mboxq); 6703 if (rc) { 6704 if_type = bf_get(lpfc_sli_intf_if_type, 6705 &phba->sli4_hba.sli_intf); 6706 if_fam = bf_get(lpfc_sli_intf_sli_family, 6707 &phba->sli4_hba.sli_intf); 6708 if (phba->sli4_hba.extents_in_use && 6709 phba->sli4_hba.rpi_hdrs_in_use) { 6710 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6711 "2999 Unsupported SLI4 Parameters " 6712 "Extents and RPI headers enabled.\n"); 6713 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 && 6714 if_fam == LPFC_SLI_INTF_FAMILY_BE2) { 6715 mempool_free(mboxq, phba->mbox_mem_pool); 6716 rc = -EIO; 6717 goto out_free_bsmbx; 6718 } 6719 } 6720 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 && 6721 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) { 6722 mempool_free(mboxq, phba->mbox_mem_pool); 6723 rc = -EIO; 6724 goto out_free_bsmbx; 6725 } 6726 } 6727 6728 /* 6729 * 1 for cmd, 1 for rsp, NVME adds an extra one 6730 * for boundary conditions in its max_sgl_segment template. 6731 */ 6732 extra = 2; 6733 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 6734 extra++; 6735 6736 /* 6737 * It doesn't matter what family our adapter is in, we are 6738 * limited to 2 Pages, 512 SGEs, for our SGL. 6739 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp 6740 */ 6741 max_buf_size = (2 * SLI4_PAGE_SIZE); 6742 6743 /* 6744 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size 6745 * used to create the sg_dma_buf_pool must be calculated. 6746 */ 6747 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 6748 /* Both cfg_enable_bg and cfg_external_dif code paths */ 6749 6750 /* 6751 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd, 6752 * the FCP rsp, and a SGE. Sice we have no control 6753 * over how many protection segments the SCSI Layer 6754 * will hand us (ie: there could be one for every block 6755 * in the IO), just allocate enough SGEs to accomidate 6756 * our max amount and we need to limit lpfc_sg_seg_cnt 6757 * to minimize the risk of running out. 6758 */ 6759 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6760 sizeof(struct fcp_rsp) + max_buf_size; 6761 6762 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ 6763 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; 6764 6765 /* 6766 * If supporting DIF, reduce the seg count for scsi to 6767 * allow room for the DIF sges. 6768 */ 6769 if (phba->cfg_enable_bg && 6770 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF) 6771 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF; 6772 else 6773 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 6774 6775 } else { 6776 /* 6777 * The scsi_buf for a regular I/O holds the FCP cmnd, 6778 * the FCP rsp, a SGE for each, and a SGE for up to 6779 * cfg_sg_seg_cnt data segments. 6780 */ 6781 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6782 sizeof(struct fcp_rsp) + 6783 ((phba->cfg_sg_seg_cnt + extra) * 6784 sizeof(struct sli4_sge)); 6785 6786 /* Total SGEs for scsi_sg_list */ 6787 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra; 6788 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 6789 6790 /* 6791 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only 6792 * need to post 1 page for the SGL. 6793 */ 6794 } 6795 6796 if (phba->cfg_xpsgl && !phba->nvmet_support) 6797 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE; 6798 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) 6799 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; 6800 else 6801 phba->cfg_sg_dma_buf_size = 6802 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); 6803 6804 phba->border_sge_num = phba->cfg_sg_dma_buf_size / 6805 sizeof(struct sli4_sge); 6806 6807 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */ 6808 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 6809 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { 6810 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, 6811 "6300 Reducing NVME sg segment " 6812 "cnt to %d\n", 6813 LPFC_MAX_NVME_SEG_CNT); 6814 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 6815 } else 6816 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; 6817 } 6818 6819 /* Initialize the host templates with the updated values. */ 6820 lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt; 6821 lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt; 6822 lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt; 6823 6824 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 6825 "9087 sg_seg_cnt:%d dmabuf_size:%d " 6826 "total:%d scsi:%d nvme:%d\n", 6827 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 6828 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt, 6829 phba->cfg_nvme_seg_cnt); 6830 6831 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE) 6832 i = phba->cfg_sg_dma_buf_size; 6833 else 6834 i = SLI4_PAGE_SIZE; 6835 6836 phba->lpfc_sg_dma_buf_pool = 6837 dma_pool_create("lpfc_sg_dma_buf_pool", 6838 &phba->pcidev->dev, 6839 phba->cfg_sg_dma_buf_size, 6840 i, 0); 6841 if (!phba->lpfc_sg_dma_buf_pool) 6842 goto out_free_bsmbx; 6843 6844 phba->lpfc_cmd_rsp_buf_pool = 6845 dma_pool_create("lpfc_cmd_rsp_buf_pool", 6846 &phba->pcidev->dev, 6847 sizeof(struct fcp_cmnd) + 6848 sizeof(struct fcp_rsp), 6849 i, 0); 6850 if (!phba->lpfc_cmd_rsp_buf_pool) 6851 goto out_free_sg_dma_buf; 6852 6853 mempool_free(mboxq, phba->mbox_mem_pool); 6854 6855 /* Verify OAS is supported */ 6856 lpfc_sli4_oas_verify(phba); 6857 6858 /* Verify RAS support on adapter */ 6859 lpfc_sli4_ras_init(phba); 6860 6861 /* Verify all the SLI4 queues */ 6862 rc = lpfc_sli4_queue_verify(phba); 6863 if (rc) 6864 goto out_free_cmd_rsp_buf; 6865 6866 /* Create driver internal CQE event pool */ 6867 rc = lpfc_sli4_cq_event_pool_create(phba); 6868 if (rc) 6869 goto out_free_cmd_rsp_buf; 6870 6871 /* Initialize sgl lists per host */ 6872 lpfc_init_sgl_list(phba); 6873 6874 /* Allocate and initialize active sgl array */ 6875 rc = lpfc_init_active_sgl_array(phba); 6876 if (rc) { 6877 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6878 "1430 Failed to initialize sgl list.\n"); 6879 goto out_destroy_cq_event_pool; 6880 } 6881 rc = lpfc_sli4_init_rpi_hdrs(phba); 6882 if (rc) { 6883 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6884 "1432 Failed to initialize rpi headers.\n"); 6885 goto out_free_active_sgl; 6886 } 6887 6888 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 6889 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 6890 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long), 6891 GFP_KERNEL); 6892 if (!phba->fcf.fcf_rr_bmask) { 6893 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6894 "2759 Failed allocate memory for FCF round " 6895 "robin failover bmask\n"); 6896 rc = -ENOMEM; 6897 goto out_remove_rpi_hdrs; 6898 } 6899 6900 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann, 6901 sizeof(struct lpfc_hba_eq_hdl), 6902 GFP_KERNEL); 6903 if (!phba->sli4_hba.hba_eq_hdl) { 6904 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6905 "2572 Failed allocate memory for " 6906 "fast-path per-EQ handle array\n"); 6907 rc = -ENOMEM; 6908 goto out_free_fcf_rr_bmask; 6909 } 6910 6911 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu, 6912 sizeof(struct lpfc_vector_map_info), 6913 GFP_KERNEL); 6914 if (!phba->sli4_hba.cpu_map) { 6915 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6916 "3327 Failed allocate memory for msi-x " 6917 "interrupt vector mapping\n"); 6918 rc = -ENOMEM; 6919 goto out_free_hba_eq_hdl; 6920 } 6921 6922 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info); 6923 if (!phba->sli4_hba.eq_info) { 6924 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6925 "3321 Failed allocation for per_cpu stats\n"); 6926 rc = -ENOMEM; 6927 goto out_free_hba_cpu_map; 6928 } 6929 /* 6930 * Enable sr-iov virtual functions if supported and configured 6931 * through the module parameter. 6932 */ 6933 if (phba->cfg_sriov_nr_virtfn > 0) { 6934 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 6935 phba->cfg_sriov_nr_virtfn); 6936 if (rc) { 6937 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6938 "3020 Requested number of SR-IOV " 6939 "virtual functions (%d) is not " 6940 "supported\n", 6941 phba->cfg_sriov_nr_virtfn); 6942 phba->cfg_sriov_nr_virtfn = 0; 6943 } 6944 } 6945 6946 return 0; 6947 6948 out_free_hba_cpu_map: 6949 kfree(phba->sli4_hba.cpu_map); 6950 out_free_hba_eq_hdl: 6951 kfree(phba->sli4_hba.hba_eq_hdl); 6952 out_free_fcf_rr_bmask: 6953 kfree(phba->fcf.fcf_rr_bmask); 6954 out_remove_rpi_hdrs: 6955 lpfc_sli4_remove_rpi_hdrs(phba); 6956 out_free_active_sgl: 6957 lpfc_free_active_sgl(phba); 6958 out_destroy_cq_event_pool: 6959 lpfc_sli4_cq_event_pool_destroy(phba); 6960 out_free_cmd_rsp_buf: 6961 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); 6962 phba->lpfc_cmd_rsp_buf_pool = NULL; 6963 out_free_sg_dma_buf: 6964 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); 6965 phba->lpfc_sg_dma_buf_pool = NULL; 6966 out_free_bsmbx: 6967 lpfc_destroy_bootstrap_mbox(phba); 6968 out_free_mem: 6969 lpfc_mem_free(phba); 6970 return rc; 6971 } 6972 6973 /** 6974 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 6975 * @phba: pointer to lpfc hba data structure. 6976 * 6977 * This routine is invoked to unset the driver internal resources set up 6978 * specific for supporting the SLI-4 HBA device it attached to. 6979 **/ 6980 static void 6981 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 6982 { 6983 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 6984 6985 free_percpu(phba->sli4_hba.eq_info); 6986 6987 /* Free memory allocated for msi-x interrupt vector to CPU mapping */ 6988 kfree(phba->sli4_hba.cpu_map); 6989 phba->sli4_hba.num_possible_cpu = 0; 6990 phba->sli4_hba.num_present_cpu = 0; 6991 phba->sli4_hba.curr_disp_cpu = 0; 6992 cpumask_clear(&phba->sli4_hba.numa_mask); 6993 6994 /* Free memory allocated for fast-path work queue handles */ 6995 kfree(phba->sli4_hba.hba_eq_hdl); 6996 6997 /* Free the allocated rpi headers. */ 6998 lpfc_sli4_remove_rpi_hdrs(phba); 6999 lpfc_sli4_remove_rpis(phba); 7000 7001 /* Free eligible FCF index bmask */ 7002 kfree(phba->fcf.fcf_rr_bmask); 7003 7004 /* Free the ELS sgl list */ 7005 lpfc_free_active_sgl(phba); 7006 lpfc_free_els_sgl_list(phba); 7007 lpfc_free_nvmet_sgl_list(phba); 7008 7009 /* Free the completion queue EQ event pool */ 7010 lpfc_sli4_cq_event_release_all(phba); 7011 lpfc_sli4_cq_event_pool_destroy(phba); 7012 7013 /* Release resource identifiers. */ 7014 lpfc_sli4_dealloc_resource_identifiers(phba); 7015 7016 /* Free the bsmbx region. */ 7017 lpfc_destroy_bootstrap_mbox(phba); 7018 7019 /* Free the SLI Layer memory with SLI4 HBAs */ 7020 lpfc_mem_free_all(phba); 7021 7022 /* Free the current connect table */ 7023 list_for_each_entry_safe(conn_entry, next_conn_entry, 7024 &phba->fcf_conn_rec_list, list) { 7025 list_del_init(&conn_entry->list); 7026 kfree(conn_entry); 7027 } 7028 7029 return; 7030 } 7031 7032 /** 7033 * lpfc_init_api_table_setup - Set up init api function jump table 7034 * @phba: The hba struct for which this call is being executed. 7035 * @dev_grp: The HBA PCI-Device group number. 7036 * 7037 * This routine sets up the device INIT interface API function jump table 7038 * in @phba struct. 7039 * 7040 * Returns: 0 - success, -ENODEV - failure. 7041 **/ 7042 int 7043 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 7044 { 7045 phba->lpfc_hba_init_link = lpfc_hba_init_link; 7046 phba->lpfc_hba_down_link = lpfc_hba_down_link; 7047 phba->lpfc_selective_reset = lpfc_selective_reset; 7048 switch (dev_grp) { 7049 case LPFC_PCI_DEV_LP: 7050 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 7051 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 7052 phba->lpfc_stop_port = lpfc_stop_port_s3; 7053 break; 7054 case LPFC_PCI_DEV_OC: 7055 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 7056 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 7057 phba->lpfc_stop_port = lpfc_stop_port_s4; 7058 break; 7059 default: 7060 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7061 "1431 Invalid HBA PCI-device group: 0x%x\n", 7062 dev_grp); 7063 return -ENODEV; 7064 break; 7065 } 7066 return 0; 7067 } 7068 7069 /** 7070 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 7071 * @phba: pointer to lpfc hba data structure. 7072 * 7073 * This routine is invoked to set up the driver internal resources after the 7074 * device specific resource setup to support the HBA device it attached to. 7075 * 7076 * Return codes 7077 * 0 - successful 7078 * other values - error 7079 **/ 7080 static int 7081 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 7082 { 7083 int error; 7084 7085 /* Startup the kernel thread for this host adapter. */ 7086 phba->worker_thread = kthread_run(lpfc_do_work, phba, 7087 "lpfc_worker_%d", phba->brd_no); 7088 if (IS_ERR(phba->worker_thread)) { 7089 error = PTR_ERR(phba->worker_thread); 7090 return error; 7091 } 7092 7093 return 0; 7094 } 7095 7096 /** 7097 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 7098 * @phba: pointer to lpfc hba data structure. 7099 * 7100 * This routine is invoked to unset the driver internal resources set up after 7101 * the device specific resource setup for supporting the HBA device it 7102 * attached to. 7103 **/ 7104 static void 7105 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 7106 { 7107 if (phba->wq) { 7108 flush_workqueue(phba->wq); 7109 destroy_workqueue(phba->wq); 7110 phba->wq = NULL; 7111 } 7112 7113 /* Stop kernel worker thread */ 7114 if (phba->worker_thread) 7115 kthread_stop(phba->worker_thread); 7116 } 7117 7118 /** 7119 * lpfc_free_iocb_list - Free iocb list. 7120 * @phba: pointer to lpfc hba data structure. 7121 * 7122 * This routine is invoked to free the driver's IOCB list and memory. 7123 **/ 7124 void 7125 lpfc_free_iocb_list(struct lpfc_hba *phba) 7126 { 7127 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 7128 7129 spin_lock_irq(&phba->hbalock); 7130 list_for_each_entry_safe(iocbq_entry, iocbq_next, 7131 &phba->lpfc_iocb_list, list) { 7132 list_del(&iocbq_entry->list); 7133 kfree(iocbq_entry); 7134 phba->total_iocbq_bufs--; 7135 } 7136 spin_unlock_irq(&phba->hbalock); 7137 7138 return; 7139 } 7140 7141 /** 7142 * lpfc_init_iocb_list - Allocate and initialize iocb list. 7143 * @phba: pointer to lpfc hba data structure. 7144 * 7145 * This routine is invoked to allocate and initizlize the driver's IOCB 7146 * list and set up the IOCB tag array accordingly. 7147 * 7148 * Return codes 7149 * 0 - successful 7150 * other values - error 7151 **/ 7152 int 7153 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 7154 { 7155 struct lpfc_iocbq *iocbq_entry = NULL; 7156 uint16_t iotag; 7157 int i; 7158 7159 /* Initialize and populate the iocb list per host. */ 7160 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 7161 for (i = 0; i < iocb_count; i++) { 7162 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 7163 if (iocbq_entry == NULL) { 7164 printk(KERN_ERR "%s: only allocated %d iocbs of " 7165 "expected %d count. Unloading driver.\n", 7166 __func__, i, iocb_count); 7167 goto out_free_iocbq; 7168 } 7169 7170 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 7171 if (iotag == 0) { 7172 kfree(iocbq_entry); 7173 printk(KERN_ERR "%s: failed to allocate IOTAG. " 7174 "Unloading driver.\n", __func__); 7175 goto out_free_iocbq; 7176 } 7177 iocbq_entry->sli4_lxritag = NO_XRI; 7178 iocbq_entry->sli4_xritag = NO_XRI; 7179 7180 spin_lock_irq(&phba->hbalock); 7181 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 7182 phba->total_iocbq_bufs++; 7183 spin_unlock_irq(&phba->hbalock); 7184 } 7185 7186 return 0; 7187 7188 out_free_iocbq: 7189 lpfc_free_iocb_list(phba); 7190 7191 return -ENOMEM; 7192 } 7193 7194 /** 7195 * lpfc_free_sgl_list - Free a given sgl list. 7196 * @phba: pointer to lpfc hba data structure. 7197 * @sglq_list: pointer to the head of sgl list. 7198 * 7199 * This routine is invoked to free a give sgl list and memory. 7200 **/ 7201 void 7202 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list) 7203 { 7204 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 7205 7206 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) { 7207 list_del(&sglq_entry->list); 7208 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 7209 kfree(sglq_entry); 7210 } 7211 } 7212 7213 /** 7214 * lpfc_free_els_sgl_list - Free els sgl list. 7215 * @phba: pointer to lpfc hba data structure. 7216 * 7217 * This routine is invoked to free the driver's els sgl list and memory. 7218 **/ 7219 static void 7220 lpfc_free_els_sgl_list(struct lpfc_hba *phba) 7221 { 7222 LIST_HEAD(sglq_list); 7223 7224 /* Retrieve all els sgls from driver list */ 7225 spin_lock_irq(&phba->hbalock); 7226 spin_lock(&phba->sli4_hba.sgl_list_lock); 7227 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list); 7228 spin_unlock(&phba->sli4_hba.sgl_list_lock); 7229 spin_unlock_irq(&phba->hbalock); 7230 7231 /* Now free the sgl list */ 7232 lpfc_free_sgl_list(phba, &sglq_list); 7233 } 7234 7235 /** 7236 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list. 7237 * @phba: pointer to lpfc hba data structure. 7238 * 7239 * This routine is invoked to free the driver's nvmet sgl list and memory. 7240 **/ 7241 static void 7242 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba) 7243 { 7244 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 7245 LIST_HEAD(sglq_list); 7246 7247 /* Retrieve all nvmet sgls from driver list */ 7248 spin_lock_irq(&phba->hbalock); 7249 spin_lock(&phba->sli4_hba.sgl_list_lock); 7250 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list); 7251 spin_unlock(&phba->sli4_hba.sgl_list_lock); 7252 spin_unlock_irq(&phba->hbalock); 7253 7254 /* Now free the sgl list */ 7255 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) { 7256 list_del(&sglq_entry->list); 7257 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys); 7258 kfree(sglq_entry); 7259 } 7260 7261 /* Update the nvmet_xri_cnt to reflect no current sgls. 7262 * The next initialization cycle sets the count and allocates 7263 * the sgls over again. 7264 */ 7265 phba->sli4_hba.nvmet_xri_cnt = 0; 7266 } 7267 7268 /** 7269 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 7270 * @phba: pointer to lpfc hba data structure. 7271 * 7272 * This routine is invoked to allocate the driver's active sgl memory. 7273 * This array will hold the sglq_entry's for active IOs. 7274 **/ 7275 static int 7276 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 7277 { 7278 int size; 7279 size = sizeof(struct lpfc_sglq *); 7280 size *= phba->sli4_hba.max_cfg_param.max_xri; 7281 7282 phba->sli4_hba.lpfc_sglq_active_list = 7283 kzalloc(size, GFP_KERNEL); 7284 if (!phba->sli4_hba.lpfc_sglq_active_list) 7285 return -ENOMEM; 7286 return 0; 7287 } 7288 7289 /** 7290 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 7291 * @phba: pointer to lpfc hba data structure. 7292 * 7293 * This routine is invoked to walk through the array of active sglq entries 7294 * and free all of the resources. 7295 * This is just a place holder for now. 7296 **/ 7297 static void 7298 lpfc_free_active_sgl(struct lpfc_hba *phba) 7299 { 7300 kfree(phba->sli4_hba.lpfc_sglq_active_list); 7301 } 7302 7303 /** 7304 * lpfc_init_sgl_list - Allocate and initialize sgl list. 7305 * @phba: pointer to lpfc hba data structure. 7306 * 7307 * This routine is invoked to allocate and initizlize the driver's sgl 7308 * list and set up the sgl xritag tag array accordingly. 7309 * 7310 **/ 7311 static void 7312 lpfc_init_sgl_list(struct lpfc_hba *phba) 7313 { 7314 /* Initialize and populate the sglq list per host/VF. */ 7315 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list); 7316 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 7317 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list); 7318 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 7319 7320 /* els xri-sgl book keeping */ 7321 phba->sli4_hba.els_xri_cnt = 0; 7322 7323 /* nvme xri-buffer book keeping */ 7324 phba->sli4_hba.io_xri_cnt = 0; 7325 } 7326 7327 /** 7328 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 7329 * @phba: pointer to lpfc hba data structure. 7330 * 7331 * This routine is invoked to post rpi header templates to the 7332 * port for those SLI4 ports that do not support extents. This routine 7333 * posts a PAGE_SIZE memory region to the port to hold up to 7334 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 7335 * and should be called only when interrupts are disabled. 7336 * 7337 * Return codes 7338 * 0 - successful 7339 * -ERROR - otherwise. 7340 **/ 7341 int 7342 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 7343 { 7344 int rc = 0; 7345 struct lpfc_rpi_hdr *rpi_hdr; 7346 7347 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 7348 if (!phba->sli4_hba.rpi_hdrs_in_use) 7349 return rc; 7350 if (phba->sli4_hba.extents_in_use) 7351 return -EIO; 7352 7353 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 7354 if (!rpi_hdr) { 7355 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7356 "0391 Error during rpi post operation\n"); 7357 lpfc_sli4_remove_rpis(phba); 7358 rc = -ENODEV; 7359 } 7360 7361 return rc; 7362 } 7363 7364 /** 7365 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 7366 * @phba: pointer to lpfc hba data structure. 7367 * 7368 * This routine is invoked to allocate a single 4KB memory region to 7369 * support rpis and stores them in the phba. This single region 7370 * provides support for up to 64 rpis. The region is used globally 7371 * by the device. 7372 * 7373 * Returns: 7374 * A valid rpi hdr on success. 7375 * A NULL pointer on any failure. 7376 **/ 7377 struct lpfc_rpi_hdr * 7378 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 7379 { 7380 uint16_t rpi_limit, curr_rpi_range; 7381 struct lpfc_dmabuf *dmabuf; 7382 struct lpfc_rpi_hdr *rpi_hdr; 7383 7384 /* 7385 * If the SLI4 port supports extents, posting the rpi header isn't 7386 * required. Set the expected maximum count and let the actual value 7387 * get set when extents are fully allocated. 7388 */ 7389 if (!phba->sli4_hba.rpi_hdrs_in_use) 7390 return NULL; 7391 if (phba->sli4_hba.extents_in_use) 7392 return NULL; 7393 7394 /* The limit on the logical index is just the max_rpi count. */ 7395 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi; 7396 7397 spin_lock_irq(&phba->hbalock); 7398 /* 7399 * Establish the starting RPI in this header block. The starting 7400 * rpi is normalized to a zero base because the physical rpi is 7401 * port based. 7402 */ 7403 curr_rpi_range = phba->sli4_hba.next_rpi; 7404 spin_unlock_irq(&phba->hbalock); 7405 7406 /* Reached full RPI range */ 7407 if (curr_rpi_range == rpi_limit) 7408 return NULL; 7409 7410 /* 7411 * First allocate the protocol header region for the port. The 7412 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 7413 */ 7414 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 7415 if (!dmabuf) 7416 return NULL; 7417 7418 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 7419 LPFC_HDR_TEMPLATE_SIZE, 7420 &dmabuf->phys, GFP_KERNEL); 7421 if (!dmabuf->virt) { 7422 rpi_hdr = NULL; 7423 goto err_free_dmabuf; 7424 } 7425 7426 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 7427 rpi_hdr = NULL; 7428 goto err_free_coherent; 7429 } 7430 7431 /* Save the rpi header data for cleanup later. */ 7432 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 7433 if (!rpi_hdr) 7434 goto err_free_coherent; 7435 7436 rpi_hdr->dmabuf = dmabuf; 7437 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 7438 rpi_hdr->page_count = 1; 7439 spin_lock_irq(&phba->hbalock); 7440 7441 /* The rpi_hdr stores the logical index only. */ 7442 rpi_hdr->start_rpi = curr_rpi_range; 7443 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT; 7444 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 7445 7446 spin_unlock_irq(&phba->hbalock); 7447 return rpi_hdr; 7448 7449 err_free_coherent: 7450 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 7451 dmabuf->virt, dmabuf->phys); 7452 err_free_dmabuf: 7453 kfree(dmabuf); 7454 return NULL; 7455 } 7456 7457 /** 7458 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 7459 * @phba: pointer to lpfc hba data structure. 7460 * 7461 * This routine is invoked to remove all memory resources allocated 7462 * to support rpis for SLI4 ports not supporting extents. This routine 7463 * presumes the caller has released all rpis consumed by fabric or port 7464 * logins and is prepared to have the header pages removed. 7465 **/ 7466 void 7467 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 7468 { 7469 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 7470 7471 if (!phba->sli4_hba.rpi_hdrs_in_use) 7472 goto exit; 7473 7474 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 7475 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 7476 list_del(&rpi_hdr->list); 7477 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 7478 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 7479 kfree(rpi_hdr->dmabuf); 7480 kfree(rpi_hdr); 7481 } 7482 exit: 7483 /* There are no rpis available to the port now. */ 7484 phba->sli4_hba.next_rpi = 0; 7485 } 7486 7487 /** 7488 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 7489 * @pdev: pointer to pci device data structure. 7490 * 7491 * This routine is invoked to allocate the driver hba data structure for an 7492 * HBA device. If the allocation is successful, the phba reference to the 7493 * PCI device data structure is set. 7494 * 7495 * Return codes 7496 * pointer to @phba - successful 7497 * NULL - error 7498 **/ 7499 static struct lpfc_hba * 7500 lpfc_hba_alloc(struct pci_dev *pdev) 7501 { 7502 struct lpfc_hba *phba; 7503 7504 /* Allocate memory for HBA structure */ 7505 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 7506 if (!phba) { 7507 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 7508 return NULL; 7509 } 7510 7511 /* Set reference to PCI device in HBA structure */ 7512 phba->pcidev = pdev; 7513 7514 /* Assign an unused board number */ 7515 phba->brd_no = lpfc_get_instance(); 7516 if (phba->brd_no < 0) { 7517 kfree(phba); 7518 return NULL; 7519 } 7520 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL; 7521 7522 spin_lock_init(&phba->ct_ev_lock); 7523 INIT_LIST_HEAD(&phba->ct_ev_waiters); 7524 7525 return phba; 7526 } 7527 7528 /** 7529 * lpfc_hba_free - Free driver hba data structure with a device. 7530 * @phba: pointer to lpfc hba data structure. 7531 * 7532 * This routine is invoked to free the driver hba data structure with an 7533 * HBA device. 7534 **/ 7535 static void 7536 lpfc_hba_free(struct lpfc_hba *phba) 7537 { 7538 if (phba->sli_rev == LPFC_SLI_REV4) 7539 kfree(phba->sli4_hba.hdwq); 7540 7541 /* Release the driver assigned board number */ 7542 idr_remove(&lpfc_hba_index, phba->brd_no); 7543 7544 /* Free memory allocated with sli3 rings */ 7545 kfree(phba->sli.sli3_ring); 7546 phba->sli.sli3_ring = NULL; 7547 7548 kfree(phba); 7549 return; 7550 } 7551 7552 /** 7553 * lpfc_create_shost - Create hba physical port with associated scsi host. 7554 * @phba: pointer to lpfc hba data structure. 7555 * 7556 * This routine is invoked to create HBA physical port and associate a SCSI 7557 * host with it. 7558 * 7559 * Return codes 7560 * 0 - successful 7561 * other values - error 7562 **/ 7563 static int 7564 lpfc_create_shost(struct lpfc_hba *phba) 7565 { 7566 struct lpfc_vport *vport; 7567 struct Scsi_Host *shost; 7568 7569 /* Initialize HBA FC structure */ 7570 phba->fc_edtov = FF_DEF_EDTOV; 7571 phba->fc_ratov = FF_DEF_RATOV; 7572 phba->fc_altov = FF_DEF_ALTOV; 7573 phba->fc_arbtov = FF_DEF_ARBTOV; 7574 7575 atomic_set(&phba->sdev_cnt, 0); 7576 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 7577 if (!vport) 7578 return -ENODEV; 7579 7580 shost = lpfc_shost_from_vport(vport); 7581 phba->pport = vport; 7582 7583 if (phba->nvmet_support) { 7584 /* Only 1 vport (pport) will support NVME target */ 7585 phba->targetport = NULL; 7586 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; 7587 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC, 7588 "6076 NVME Target Found\n"); 7589 } 7590 7591 lpfc_debugfs_initialize(vport); 7592 /* Put reference to SCSI host to driver's device private data */ 7593 pci_set_drvdata(phba->pcidev, shost); 7594 7595 /* 7596 * At this point we are fully registered with PSA. In addition, 7597 * any initial discovery should be completed. 7598 */ 7599 vport->load_flag |= FC_ALLOW_FDMI; 7600 if (phba->cfg_enable_SmartSAN || 7601 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 7602 7603 /* Setup appropriate attribute masks */ 7604 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 7605 if (phba->cfg_enable_SmartSAN) 7606 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 7607 else 7608 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 7609 } 7610 return 0; 7611 } 7612 7613 /** 7614 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 7615 * @phba: pointer to lpfc hba data structure. 7616 * 7617 * This routine is invoked to destroy HBA physical port and the associated 7618 * SCSI host. 7619 **/ 7620 static void 7621 lpfc_destroy_shost(struct lpfc_hba *phba) 7622 { 7623 struct lpfc_vport *vport = phba->pport; 7624 7625 /* Destroy physical port that associated with the SCSI host */ 7626 destroy_port(vport); 7627 7628 return; 7629 } 7630 7631 /** 7632 * lpfc_setup_bg - Setup Block guard structures and debug areas. 7633 * @phba: pointer to lpfc hba data structure. 7634 * @shost: the shost to be used to detect Block guard settings. 7635 * 7636 * This routine sets up the local Block guard protocol settings for @shost. 7637 * This routine also allocates memory for debugging bg buffers. 7638 **/ 7639 static void 7640 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 7641 { 7642 uint32_t old_mask; 7643 uint32_t old_guard; 7644 7645 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 7646 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7647 "1478 Registering BlockGuard with the " 7648 "SCSI layer\n"); 7649 7650 old_mask = phba->cfg_prot_mask; 7651 old_guard = phba->cfg_prot_guard; 7652 7653 /* Only allow supported values */ 7654 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | 7655 SHOST_DIX_TYPE0_PROTECTION | 7656 SHOST_DIX_TYPE1_PROTECTION); 7657 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP | 7658 SHOST_DIX_GUARD_CRC); 7659 7660 /* DIF Type 1 protection for profiles AST1/C1 is end to end */ 7661 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION) 7662 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; 7663 7664 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 7665 if ((old_mask != phba->cfg_prot_mask) || 7666 (old_guard != phba->cfg_prot_guard)) 7667 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7668 "1475 Registering BlockGuard with the " 7669 "SCSI layer: mask %d guard %d\n", 7670 phba->cfg_prot_mask, 7671 phba->cfg_prot_guard); 7672 7673 scsi_host_set_prot(shost, phba->cfg_prot_mask); 7674 scsi_host_set_guard(shost, phba->cfg_prot_guard); 7675 } else 7676 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7677 "1479 Not Registering BlockGuard with the SCSI " 7678 "layer, Bad protection parameters: %d %d\n", 7679 old_mask, old_guard); 7680 } 7681 } 7682 7683 /** 7684 * lpfc_post_init_setup - Perform necessary device post initialization setup. 7685 * @phba: pointer to lpfc hba data structure. 7686 * 7687 * This routine is invoked to perform all the necessary post initialization 7688 * setup for the device. 7689 **/ 7690 static void 7691 lpfc_post_init_setup(struct lpfc_hba *phba) 7692 { 7693 struct Scsi_Host *shost; 7694 struct lpfc_adapter_event_header adapter_event; 7695 7696 /* Get the default values for Model Name and Description */ 7697 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 7698 7699 /* 7700 * hba setup may have changed the hba_queue_depth so we need to 7701 * adjust the value of can_queue. 7702 */ 7703 shost = pci_get_drvdata(phba->pcidev); 7704 shost->can_queue = phba->cfg_hba_queue_depth - 10; 7705 7706 lpfc_host_attrib_init(shost); 7707 7708 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 7709 spin_lock_irq(shost->host_lock); 7710 lpfc_poll_start_timer(phba); 7711 spin_unlock_irq(shost->host_lock); 7712 } 7713 7714 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7715 "0428 Perform SCSI scan\n"); 7716 /* Send board arrival event to upper layer */ 7717 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 7718 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 7719 fc_host_post_vendor_event(shost, fc_get_event_number(), 7720 sizeof(adapter_event), 7721 (char *) &adapter_event, 7722 LPFC_NL_VENDOR_ID); 7723 return; 7724 } 7725 7726 /** 7727 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 7728 * @phba: pointer to lpfc hba data structure. 7729 * 7730 * This routine is invoked to set up the PCI device memory space for device 7731 * with SLI-3 interface spec. 7732 * 7733 * Return codes 7734 * 0 - successful 7735 * other values - error 7736 **/ 7737 static int 7738 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 7739 { 7740 struct pci_dev *pdev = phba->pcidev; 7741 unsigned long bar0map_len, bar2map_len; 7742 int i, hbq_count; 7743 void *ptr; 7744 int error; 7745 7746 if (!pdev) 7747 return -ENODEV; 7748 7749 /* Set the device DMA mask size */ 7750 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 7751 if (error) 7752 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 7753 if (error) 7754 return error; 7755 error = -ENODEV; 7756 7757 /* Get the bus address of Bar0 and Bar2 and the number of bytes 7758 * required by each mapping. 7759 */ 7760 phba->pci_bar0_map = pci_resource_start(pdev, 0); 7761 bar0map_len = pci_resource_len(pdev, 0); 7762 7763 phba->pci_bar2_map = pci_resource_start(pdev, 2); 7764 bar2map_len = pci_resource_len(pdev, 2); 7765 7766 /* Map HBA SLIM to a kernel virtual address. */ 7767 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 7768 if (!phba->slim_memmap_p) { 7769 dev_printk(KERN_ERR, &pdev->dev, 7770 "ioremap failed for SLIM memory.\n"); 7771 goto out; 7772 } 7773 7774 /* Map HBA Control Registers to a kernel virtual address. */ 7775 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 7776 if (!phba->ctrl_regs_memmap_p) { 7777 dev_printk(KERN_ERR, &pdev->dev, 7778 "ioremap failed for HBA control registers.\n"); 7779 goto out_iounmap_slim; 7780 } 7781 7782 /* Allocate memory for SLI-2 structures */ 7783 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7784 &phba->slim2p.phys, GFP_KERNEL); 7785 if (!phba->slim2p.virt) 7786 goto out_iounmap; 7787 7788 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 7789 phba->mbox_ext = (phba->slim2p.virt + 7790 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 7791 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 7792 phba->IOCBs = (phba->slim2p.virt + 7793 offsetof(struct lpfc_sli2_slim, IOCBs)); 7794 7795 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 7796 lpfc_sli_hbq_size(), 7797 &phba->hbqslimp.phys, 7798 GFP_KERNEL); 7799 if (!phba->hbqslimp.virt) 7800 goto out_free_slim; 7801 7802 hbq_count = lpfc_sli_hbq_count(); 7803 ptr = phba->hbqslimp.virt; 7804 for (i = 0; i < hbq_count; ++i) { 7805 phba->hbqs[i].hbq_virt = ptr; 7806 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 7807 ptr += (lpfc_hbq_defs[i]->entry_count * 7808 sizeof(struct lpfc_hbq_entry)); 7809 } 7810 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 7811 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 7812 7813 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 7814 7815 phba->MBslimaddr = phba->slim_memmap_p; 7816 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 7817 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 7818 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 7819 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 7820 7821 return 0; 7822 7823 out_free_slim: 7824 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7825 phba->slim2p.virt, phba->slim2p.phys); 7826 out_iounmap: 7827 iounmap(phba->ctrl_regs_memmap_p); 7828 out_iounmap_slim: 7829 iounmap(phba->slim_memmap_p); 7830 out: 7831 return error; 7832 } 7833 7834 /** 7835 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 7836 * @phba: pointer to lpfc hba data structure. 7837 * 7838 * This routine is invoked to unset the PCI device memory space for device 7839 * with SLI-3 interface spec. 7840 **/ 7841 static void 7842 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 7843 { 7844 struct pci_dev *pdev; 7845 7846 /* Obtain PCI device reference */ 7847 if (!phba->pcidev) 7848 return; 7849 else 7850 pdev = phba->pcidev; 7851 7852 /* Free coherent DMA memory allocated */ 7853 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 7854 phba->hbqslimp.virt, phba->hbqslimp.phys); 7855 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7856 phba->slim2p.virt, phba->slim2p.phys); 7857 7858 /* I/O memory unmap */ 7859 iounmap(phba->ctrl_regs_memmap_p); 7860 iounmap(phba->slim_memmap_p); 7861 7862 return; 7863 } 7864 7865 /** 7866 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 7867 * @phba: pointer to lpfc hba data structure. 7868 * 7869 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 7870 * done and check status. 7871 * 7872 * Return 0 if successful, otherwise -ENODEV. 7873 **/ 7874 int 7875 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 7876 { 7877 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 7878 struct lpfc_register reg_data; 7879 int i, port_error = 0; 7880 uint32_t if_type; 7881 7882 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 7883 memset(®_data, 0, sizeof(reg_data)); 7884 if (!phba->sli4_hba.PSMPHRregaddr) 7885 return -ENODEV; 7886 7887 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 7888 for (i = 0; i < 3000; i++) { 7889 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 7890 &portsmphr_reg.word0) || 7891 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { 7892 /* Port has a fatal POST error, break out */ 7893 port_error = -ENODEV; 7894 break; 7895 } 7896 if (LPFC_POST_STAGE_PORT_READY == 7897 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 7898 break; 7899 msleep(10); 7900 } 7901 7902 /* 7903 * If there was a port error during POST, then don't proceed with 7904 * other register reads as the data may not be valid. Just exit. 7905 */ 7906 if (port_error) { 7907 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7908 "1408 Port Failed POST - portsmphr=0x%x, " 7909 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 7910 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 7911 portsmphr_reg.word0, 7912 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 7913 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 7914 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 7915 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 7916 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 7917 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 7918 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 7919 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 7920 } else { 7921 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7922 "2534 Device Info: SLIFamily=0x%x, " 7923 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 7924 "SLIHint_2=0x%x, FT=0x%x\n", 7925 bf_get(lpfc_sli_intf_sli_family, 7926 &phba->sli4_hba.sli_intf), 7927 bf_get(lpfc_sli_intf_slirev, 7928 &phba->sli4_hba.sli_intf), 7929 bf_get(lpfc_sli_intf_if_type, 7930 &phba->sli4_hba.sli_intf), 7931 bf_get(lpfc_sli_intf_sli_hint1, 7932 &phba->sli4_hba.sli_intf), 7933 bf_get(lpfc_sli_intf_sli_hint2, 7934 &phba->sli4_hba.sli_intf), 7935 bf_get(lpfc_sli_intf_func_type, 7936 &phba->sli4_hba.sli_intf)); 7937 /* 7938 * Check for other Port errors during the initialization 7939 * process. Fail the load if the port did not come up 7940 * correctly. 7941 */ 7942 if_type = bf_get(lpfc_sli_intf_if_type, 7943 &phba->sli4_hba.sli_intf); 7944 switch (if_type) { 7945 case LPFC_SLI_INTF_IF_TYPE_0: 7946 phba->sli4_hba.ue_mask_lo = 7947 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 7948 phba->sli4_hba.ue_mask_hi = 7949 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 7950 uerrlo_reg.word0 = 7951 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 7952 uerrhi_reg.word0 = 7953 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 7954 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 7955 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 7956 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7957 "1422 Unrecoverable Error " 7958 "Detected during POST " 7959 "uerr_lo_reg=0x%x, " 7960 "uerr_hi_reg=0x%x, " 7961 "ue_mask_lo_reg=0x%x, " 7962 "ue_mask_hi_reg=0x%x\n", 7963 uerrlo_reg.word0, 7964 uerrhi_reg.word0, 7965 phba->sli4_hba.ue_mask_lo, 7966 phba->sli4_hba.ue_mask_hi); 7967 port_error = -ENODEV; 7968 } 7969 break; 7970 case LPFC_SLI_INTF_IF_TYPE_2: 7971 case LPFC_SLI_INTF_IF_TYPE_6: 7972 /* Final checks. The port status should be clean. */ 7973 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 7974 ®_data.word0) || 7975 (bf_get(lpfc_sliport_status_err, ®_data) && 7976 !bf_get(lpfc_sliport_status_rn, ®_data))) { 7977 phba->work_status[0] = 7978 readl(phba->sli4_hba.u.if_type2. 7979 ERR1regaddr); 7980 phba->work_status[1] = 7981 readl(phba->sli4_hba.u.if_type2. 7982 ERR2regaddr); 7983 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7984 "2888 Unrecoverable port error " 7985 "following POST: port status reg " 7986 "0x%x, port_smphr reg 0x%x, " 7987 "error 1=0x%x, error 2=0x%x\n", 7988 reg_data.word0, 7989 portsmphr_reg.word0, 7990 phba->work_status[0], 7991 phba->work_status[1]); 7992 port_error = -ENODEV; 7993 } 7994 break; 7995 case LPFC_SLI_INTF_IF_TYPE_1: 7996 default: 7997 break; 7998 } 7999 } 8000 return port_error; 8001 } 8002 8003 /** 8004 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 8005 * @phba: pointer to lpfc hba data structure. 8006 * @if_type: The SLI4 interface type getting configured. 8007 * 8008 * This routine is invoked to set up SLI4 BAR0 PCI config space register 8009 * memory map. 8010 **/ 8011 static void 8012 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 8013 { 8014 switch (if_type) { 8015 case LPFC_SLI_INTF_IF_TYPE_0: 8016 phba->sli4_hba.u.if_type0.UERRLOregaddr = 8017 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 8018 phba->sli4_hba.u.if_type0.UERRHIregaddr = 8019 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 8020 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 8021 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 8022 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 8023 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 8024 phba->sli4_hba.SLIINTFregaddr = 8025 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 8026 break; 8027 case LPFC_SLI_INTF_IF_TYPE_2: 8028 phba->sli4_hba.u.if_type2.EQDregaddr = 8029 phba->sli4_hba.conf_regs_memmap_p + 8030 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 8031 phba->sli4_hba.u.if_type2.ERR1regaddr = 8032 phba->sli4_hba.conf_regs_memmap_p + 8033 LPFC_CTL_PORT_ER1_OFFSET; 8034 phba->sli4_hba.u.if_type2.ERR2regaddr = 8035 phba->sli4_hba.conf_regs_memmap_p + 8036 LPFC_CTL_PORT_ER2_OFFSET; 8037 phba->sli4_hba.u.if_type2.CTRLregaddr = 8038 phba->sli4_hba.conf_regs_memmap_p + 8039 LPFC_CTL_PORT_CTL_OFFSET; 8040 phba->sli4_hba.u.if_type2.STATUSregaddr = 8041 phba->sli4_hba.conf_regs_memmap_p + 8042 LPFC_CTL_PORT_STA_OFFSET; 8043 phba->sli4_hba.SLIINTFregaddr = 8044 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 8045 phba->sli4_hba.PSMPHRregaddr = 8046 phba->sli4_hba.conf_regs_memmap_p + 8047 LPFC_CTL_PORT_SEM_OFFSET; 8048 phba->sli4_hba.RQDBregaddr = 8049 phba->sli4_hba.conf_regs_memmap_p + 8050 LPFC_ULP0_RQ_DOORBELL; 8051 phba->sli4_hba.WQDBregaddr = 8052 phba->sli4_hba.conf_regs_memmap_p + 8053 LPFC_ULP0_WQ_DOORBELL; 8054 phba->sli4_hba.CQDBregaddr = 8055 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 8056 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 8057 phba->sli4_hba.MQDBregaddr = 8058 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 8059 phba->sli4_hba.BMBXregaddr = 8060 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 8061 break; 8062 case LPFC_SLI_INTF_IF_TYPE_6: 8063 phba->sli4_hba.u.if_type2.EQDregaddr = 8064 phba->sli4_hba.conf_regs_memmap_p + 8065 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 8066 phba->sli4_hba.u.if_type2.ERR1regaddr = 8067 phba->sli4_hba.conf_regs_memmap_p + 8068 LPFC_CTL_PORT_ER1_OFFSET; 8069 phba->sli4_hba.u.if_type2.ERR2regaddr = 8070 phba->sli4_hba.conf_regs_memmap_p + 8071 LPFC_CTL_PORT_ER2_OFFSET; 8072 phba->sli4_hba.u.if_type2.CTRLregaddr = 8073 phba->sli4_hba.conf_regs_memmap_p + 8074 LPFC_CTL_PORT_CTL_OFFSET; 8075 phba->sli4_hba.u.if_type2.STATUSregaddr = 8076 phba->sli4_hba.conf_regs_memmap_p + 8077 LPFC_CTL_PORT_STA_OFFSET; 8078 phba->sli4_hba.PSMPHRregaddr = 8079 phba->sli4_hba.conf_regs_memmap_p + 8080 LPFC_CTL_PORT_SEM_OFFSET; 8081 phba->sli4_hba.BMBXregaddr = 8082 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 8083 break; 8084 case LPFC_SLI_INTF_IF_TYPE_1: 8085 default: 8086 dev_printk(KERN_ERR, &phba->pcidev->dev, 8087 "FATAL - unsupported SLI4 interface type - %d\n", 8088 if_type); 8089 break; 8090 } 8091 } 8092 8093 /** 8094 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 8095 * @phba: pointer to lpfc hba data structure. 8096 * 8097 * This routine is invoked to set up SLI4 BAR1 register memory map. 8098 **/ 8099 static void 8100 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 8101 { 8102 switch (if_type) { 8103 case LPFC_SLI_INTF_IF_TYPE_0: 8104 phba->sli4_hba.PSMPHRregaddr = 8105 phba->sli4_hba.ctrl_regs_memmap_p + 8106 LPFC_SLIPORT_IF0_SMPHR; 8107 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 8108 LPFC_HST_ISR0; 8109 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 8110 LPFC_HST_IMR0; 8111 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 8112 LPFC_HST_ISCR0; 8113 break; 8114 case LPFC_SLI_INTF_IF_TYPE_6: 8115 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8116 LPFC_IF6_RQ_DOORBELL; 8117 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8118 LPFC_IF6_WQ_DOORBELL; 8119 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8120 LPFC_IF6_CQ_DOORBELL; 8121 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8122 LPFC_IF6_EQ_DOORBELL; 8123 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8124 LPFC_IF6_MQ_DOORBELL; 8125 break; 8126 case LPFC_SLI_INTF_IF_TYPE_2: 8127 case LPFC_SLI_INTF_IF_TYPE_1: 8128 default: 8129 dev_err(&phba->pcidev->dev, 8130 "FATAL - unsupported SLI4 interface type - %d\n", 8131 if_type); 8132 break; 8133 } 8134 } 8135 8136 /** 8137 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 8138 * @phba: pointer to lpfc hba data structure. 8139 * @vf: virtual function number 8140 * 8141 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 8142 * based on the given viftual function number, @vf. 8143 * 8144 * Return 0 if successful, otherwise -ENODEV. 8145 **/ 8146 static int 8147 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 8148 { 8149 if (vf > LPFC_VIR_FUNC_MAX) 8150 return -ENODEV; 8151 8152 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8153 vf * LPFC_VFR_PAGE_SIZE + 8154 LPFC_ULP0_RQ_DOORBELL); 8155 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8156 vf * LPFC_VFR_PAGE_SIZE + 8157 LPFC_ULP0_WQ_DOORBELL); 8158 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8159 vf * LPFC_VFR_PAGE_SIZE + 8160 LPFC_EQCQ_DOORBELL); 8161 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 8162 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8163 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 8164 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8165 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 8166 return 0; 8167 } 8168 8169 /** 8170 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 8171 * @phba: pointer to lpfc hba data structure. 8172 * 8173 * This routine is invoked to create the bootstrap mailbox 8174 * region consistent with the SLI-4 interface spec. This 8175 * routine allocates all memory necessary to communicate 8176 * mailbox commands to the port and sets up all alignment 8177 * needs. No locks are expected to be held when calling 8178 * this routine. 8179 * 8180 * Return codes 8181 * 0 - successful 8182 * -ENOMEM - could not allocated memory. 8183 **/ 8184 static int 8185 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 8186 { 8187 uint32_t bmbx_size; 8188 struct lpfc_dmabuf *dmabuf; 8189 struct dma_address *dma_address; 8190 uint32_t pa_addr; 8191 uint64_t phys_addr; 8192 8193 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 8194 if (!dmabuf) 8195 return -ENOMEM; 8196 8197 /* 8198 * The bootstrap mailbox region is comprised of 2 parts 8199 * plus an alignment restriction of 16 bytes. 8200 */ 8201 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 8202 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size, 8203 &dmabuf->phys, GFP_KERNEL); 8204 if (!dmabuf->virt) { 8205 kfree(dmabuf); 8206 return -ENOMEM; 8207 } 8208 8209 /* 8210 * Initialize the bootstrap mailbox pointers now so that the register 8211 * operations are simple later. The mailbox dma address is required 8212 * to be 16-byte aligned. Also align the virtual memory as each 8213 * maibox is copied into the bmbx mailbox region before issuing the 8214 * command to the port. 8215 */ 8216 phba->sli4_hba.bmbx.dmabuf = dmabuf; 8217 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 8218 8219 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 8220 LPFC_ALIGN_16_BYTE); 8221 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 8222 LPFC_ALIGN_16_BYTE); 8223 8224 /* 8225 * Set the high and low physical addresses now. The SLI4 alignment 8226 * requirement is 16 bytes and the mailbox is posted to the port 8227 * as two 30-bit addresses. The other data is a bit marking whether 8228 * the 30-bit address is the high or low address. 8229 * Upcast bmbx aphys to 64bits so shift instruction compiles 8230 * clean on 32 bit machines. 8231 */ 8232 dma_address = &phba->sli4_hba.bmbx.dma_address; 8233 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 8234 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 8235 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 8236 LPFC_BMBX_BIT1_ADDR_HI); 8237 8238 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 8239 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 8240 LPFC_BMBX_BIT1_ADDR_LO); 8241 return 0; 8242 } 8243 8244 /** 8245 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 8246 * @phba: pointer to lpfc hba data structure. 8247 * 8248 * This routine is invoked to teardown the bootstrap mailbox 8249 * region and release all host resources. This routine requires 8250 * the caller to ensure all mailbox commands recovered, no 8251 * additional mailbox comands are sent, and interrupts are disabled 8252 * before calling this routine. 8253 * 8254 **/ 8255 static void 8256 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 8257 { 8258 dma_free_coherent(&phba->pcidev->dev, 8259 phba->sli4_hba.bmbx.bmbx_size, 8260 phba->sli4_hba.bmbx.dmabuf->virt, 8261 phba->sli4_hba.bmbx.dmabuf->phys); 8262 8263 kfree(phba->sli4_hba.bmbx.dmabuf); 8264 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 8265 } 8266 8267 static const char * const lpfc_topo_to_str[] = { 8268 "Loop then P2P", 8269 "Loopback", 8270 "P2P Only", 8271 "Unsupported", 8272 "Loop Only", 8273 "Unsupported", 8274 "P2P then Loop", 8275 }; 8276 8277 /** 8278 * lpfc_map_topology - Map the topology read from READ_CONFIG 8279 * @phba: pointer to lpfc hba data structure. 8280 * @rdconf: pointer to read config data 8281 * 8282 * This routine is invoked to map the topology values as read 8283 * from the read config mailbox command. If the persistent 8284 * topology feature is supported, the firmware will provide the 8285 * saved topology information to be used in INIT_LINK 8286 * 8287 **/ 8288 #define LINK_FLAGS_DEF 0x0 8289 #define LINK_FLAGS_P2P 0x1 8290 #define LINK_FLAGS_LOOP 0x2 8291 static void 8292 lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config) 8293 { 8294 u8 ptv, tf, pt; 8295 8296 ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config); 8297 tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config); 8298 pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config); 8299 8300 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8301 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x", 8302 ptv, tf, pt); 8303 if (!ptv) { 8304 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8305 "2019 FW does not support persistent topology " 8306 "Using driver parameter defined value [%s]", 8307 lpfc_topo_to_str[phba->cfg_topology]); 8308 return; 8309 } 8310 /* FW supports persistent topology - override module parameter value */ 8311 phba->hba_flag |= HBA_PERSISTENT_TOPO; 8312 switch (phba->pcidev->device) { 8313 case PCI_DEVICE_ID_LANCER_G7_FC: 8314 case PCI_DEVICE_ID_LANCER_G6_FC: 8315 if (!tf) { 8316 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP) 8317 ? FLAGS_TOPOLOGY_MODE_LOOP 8318 : FLAGS_TOPOLOGY_MODE_PT_PT); 8319 } else { 8320 phba->hba_flag &= ~HBA_PERSISTENT_TOPO; 8321 } 8322 break; 8323 default: /* G5 */ 8324 if (tf) { 8325 /* If topology failover set - pt is '0' or '1' */ 8326 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP : 8327 FLAGS_TOPOLOGY_MODE_LOOP_PT); 8328 } else { 8329 phba->cfg_topology = ((pt == LINK_FLAGS_P2P) 8330 ? FLAGS_TOPOLOGY_MODE_PT_PT 8331 : FLAGS_TOPOLOGY_MODE_LOOP); 8332 } 8333 break; 8334 } 8335 if (phba->hba_flag & HBA_PERSISTENT_TOPO) { 8336 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8337 "2020 Using persistent topology value [%s]", 8338 lpfc_topo_to_str[phba->cfg_topology]); 8339 } else { 8340 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8341 "2021 Invalid topology values from FW " 8342 "Using driver parameter defined value [%s]", 8343 lpfc_topo_to_str[phba->cfg_topology]); 8344 } 8345 } 8346 8347 /** 8348 * lpfc_sli4_read_config - Get the config parameters. 8349 * @phba: pointer to lpfc hba data structure. 8350 * 8351 * This routine is invoked to read the configuration parameters from the HBA. 8352 * The configuration parameters are used to set the base and maximum values 8353 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 8354 * allocation for the port. 8355 * 8356 * Return codes 8357 * 0 - successful 8358 * -ENOMEM - No available memory 8359 * -EIO - The mailbox failed to complete successfully. 8360 **/ 8361 int 8362 lpfc_sli4_read_config(struct lpfc_hba *phba) 8363 { 8364 LPFC_MBOXQ_t *pmb; 8365 struct lpfc_mbx_read_config *rd_config; 8366 union lpfc_sli4_cfg_shdr *shdr; 8367 uint32_t shdr_status, shdr_add_status; 8368 struct lpfc_mbx_get_func_cfg *get_func_cfg; 8369 struct lpfc_rsrc_desc_fcfcoe *desc; 8370 char *pdesc_0; 8371 uint16_t forced_link_speed; 8372 uint32_t if_type, qmin; 8373 int length, i, rc = 0, rc2; 8374 8375 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8376 if (!pmb) { 8377 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8378 "2011 Unable to allocate memory for issuing " 8379 "SLI_CONFIG_SPECIAL mailbox command\n"); 8380 return -ENOMEM; 8381 } 8382 8383 lpfc_read_config(phba, pmb); 8384 8385 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 8386 if (rc != MBX_SUCCESS) { 8387 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8388 "2012 Mailbox failed , mbxCmd x%x " 8389 "READ_CONFIG, mbxStatus x%x\n", 8390 bf_get(lpfc_mqe_command, &pmb->u.mqe), 8391 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 8392 rc = -EIO; 8393 } else { 8394 rd_config = &pmb->u.mqe.un.rd_config; 8395 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { 8396 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 8397 phba->sli4_hba.lnk_info.lnk_tp = 8398 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); 8399 phba->sli4_hba.lnk_info.lnk_no = 8400 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); 8401 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8402 "3081 lnk_type:%d, lnk_numb:%d\n", 8403 phba->sli4_hba.lnk_info.lnk_tp, 8404 phba->sli4_hba.lnk_info.lnk_no); 8405 } else 8406 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8407 "3082 Mailbox (x%x) returned ldv:x0\n", 8408 bf_get(lpfc_mqe_command, &pmb->u.mqe)); 8409 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) { 8410 phba->bbcredit_support = 1; 8411 phba->sli4_hba.bbscn_params.word0 = rd_config->word8; 8412 } 8413 8414 phba->sli4_hba.conf_trunk = 8415 bf_get(lpfc_mbx_rd_conf_trunk, rd_config); 8416 phba->sli4_hba.extents_in_use = 8417 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 8418 phba->sli4_hba.max_cfg_param.max_xri = 8419 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 8420 /* Reduce resource usage in kdump environment */ 8421 if (is_kdump_kernel() && 8422 phba->sli4_hba.max_cfg_param.max_xri > 512) 8423 phba->sli4_hba.max_cfg_param.max_xri = 512; 8424 phba->sli4_hba.max_cfg_param.xri_base = 8425 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 8426 phba->sli4_hba.max_cfg_param.max_vpi = 8427 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 8428 /* Limit the max we support */ 8429 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS) 8430 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS; 8431 phba->sli4_hba.max_cfg_param.vpi_base = 8432 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 8433 phba->sli4_hba.max_cfg_param.max_rpi = 8434 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 8435 phba->sli4_hba.max_cfg_param.rpi_base = 8436 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 8437 phba->sli4_hba.max_cfg_param.max_vfi = 8438 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 8439 phba->sli4_hba.max_cfg_param.vfi_base = 8440 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 8441 phba->sli4_hba.max_cfg_param.max_fcfi = 8442 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 8443 phba->sli4_hba.max_cfg_param.max_eq = 8444 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 8445 phba->sli4_hba.max_cfg_param.max_rq = 8446 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 8447 phba->sli4_hba.max_cfg_param.max_wq = 8448 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 8449 phba->sli4_hba.max_cfg_param.max_cq = 8450 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 8451 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 8452 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 8453 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 8454 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 8455 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 8456 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 8457 phba->max_vports = phba->max_vpi; 8458 lpfc_map_topology(phba, rd_config); 8459 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8460 "2003 cfg params Extents? %d " 8461 "XRI(B:%d M:%d), " 8462 "VPI(B:%d M:%d) " 8463 "VFI(B:%d M:%d) " 8464 "RPI(B:%d M:%d) " 8465 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n", 8466 phba->sli4_hba.extents_in_use, 8467 phba->sli4_hba.max_cfg_param.xri_base, 8468 phba->sli4_hba.max_cfg_param.max_xri, 8469 phba->sli4_hba.max_cfg_param.vpi_base, 8470 phba->sli4_hba.max_cfg_param.max_vpi, 8471 phba->sli4_hba.max_cfg_param.vfi_base, 8472 phba->sli4_hba.max_cfg_param.max_vfi, 8473 phba->sli4_hba.max_cfg_param.rpi_base, 8474 phba->sli4_hba.max_cfg_param.max_rpi, 8475 phba->sli4_hba.max_cfg_param.max_fcfi, 8476 phba->sli4_hba.max_cfg_param.max_eq, 8477 phba->sli4_hba.max_cfg_param.max_cq, 8478 phba->sli4_hba.max_cfg_param.max_wq, 8479 phba->sli4_hba.max_cfg_param.max_rq); 8480 8481 /* 8482 * Calculate queue resources based on how 8483 * many WQ/CQ/EQs are available. 8484 */ 8485 qmin = phba->sli4_hba.max_cfg_param.max_wq; 8486 if (phba->sli4_hba.max_cfg_param.max_cq < qmin) 8487 qmin = phba->sli4_hba.max_cfg_param.max_cq; 8488 if (phba->sli4_hba.max_cfg_param.max_eq < qmin) 8489 qmin = phba->sli4_hba.max_cfg_param.max_eq; 8490 /* 8491 * Whats left after this can go toward NVME / FCP. 8492 * The minus 4 accounts for ELS, NVME LS, MBOX 8493 * plus one extra. When configured for 8494 * NVMET, FCP io channel WQs are not created. 8495 */ 8496 qmin -= 4; 8497 8498 /* Check to see if there is enough for NVME */ 8499 if ((phba->cfg_irq_chann > qmin) || 8500 (phba->cfg_hdw_queue > qmin)) { 8501 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8502 "2005 Reducing Queues: " 8503 "WQ %d CQ %d EQ %d: min %d: " 8504 "IRQ %d HDWQ %d\n", 8505 phba->sli4_hba.max_cfg_param.max_wq, 8506 phba->sli4_hba.max_cfg_param.max_cq, 8507 phba->sli4_hba.max_cfg_param.max_eq, 8508 qmin, phba->cfg_irq_chann, 8509 phba->cfg_hdw_queue); 8510 8511 if (phba->cfg_irq_chann > qmin) 8512 phba->cfg_irq_chann = qmin; 8513 if (phba->cfg_hdw_queue > qmin) 8514 phba->cfg_hdw_queue = qmin; 8515 } 8516 } 8517 8518 if (rc) 8519 goto read_cfg_out; 8520 8521 /* Update link speed if forced link speed is supported */ 8522 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8523 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 8524 forced_link_speed = 8525 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config); 8526 if (forced_link_speed) { 8527 phba->hba_flag |= HBA_FORCED_LINK_SPEED; 8528 8529 switch (forced_link_speed) { 8530 case LINK_SPEED_1G: 8531 phba->cfg_link_speed = 8532 LPFC_USER_LINK_SPEED_1G; 8533 break; 8534 case LINK_SPEED_2G: 8535 phba->cfg_link_speed = 8536 LPFC_USER_LINK_SPEED_2G; 8537 break; 8538 case LINK_SPEED_4G: 8539 phba->cfg_link_speed = 8540 LPFC_USER_LINK_SPEED_4G; 8541 break; 8542 case LINK_SPEED_8G: 8543 phba->cfg_link_speed = 8544 LPFC_USER_LINK_SPEED_8G; 8545 break; 8546 case LINK_SPEED_10G: 8547 phba->cfg_link_speed = 8548 LPFC_USER_LINK_SPEED_10G; 8549 break; 8550 case LINK_SPEED_16G: 8551 phba->cfg_link_speed = 8552 LPFC_USER_LINK_SPEED_16G; 8553 break; 8554 case LINK_SPEED_32G: 8555 phba->cfg_link_speed = 8556 LPFC_USER_LINK_SPEED_32G; 8557 break; 8558 case LINK_SPEED_64G: 8559 phba->cfg_link_speed = 8560 LPFC_USER_LINK_SPEED_64G; 8561 break; 8562 case 0xffff: 8563 phba->cfg_link_speed = 8564 LPFC_USER_LINK_SPEED_AUTO; 8565 break; 8566 default: 8567 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8568 "0047 Unrecognized link " 8569 "speed : %d\n", 8570 forced_link_speed); 8571 phba->cfg_link_speed = 8572 LPFC_USER_LINK_SPEED_AUTO; 8573 } 8574 } 8575 } 8576 8577 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 8578 length = phba->sli4_hba.max_cfg_param.max_xri - 8579 lpfc_sli4_get_els_iocb_cnt(phba); 8580 if (phba->cfg_hba_queue_depth > length) { 8581 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8582 "3361 HBA queue depth changed from %d to %d\n", 8583 phba->cfg_hba_queue_depth, length); 8584 phba->cfg_hba_queue_depth = length; 8585 } 8586 8587 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 8588 LPFC_SLI_INTF_IF_TYPE_2) 8589 goto read_cfg_out; 8590 8591 /* get the pf# and vf# for SLI4 if_type 2 port */ 8592 length = (sizeof(struct lpfc_mbx_get_func_cfg) - 8593 sizeof(struct lpfc_sli4_cfg_mhdr)); 8594 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, 8595 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 8596 length, LPFC_SLI4_MBX_EMBED); 8597 8598 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 8599 shdr = (union lpfc_sli4_cfg_shdr *) 8600 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 8601 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 8602 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 8603 if (rc2 || shdr_status || shdr_add_status) { 8604 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8605 "3026 Mailbox failed , mbxCmd x%x " 8606 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 8607 bf_get(lpfc_mqe_command, &pmb->u.mqe), 8608 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 8609 goto read_cfg_out; 8610 } 8611 8612 /* search for fc_fcoe resrouce descriptor */ 8613 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 8614 8615 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; 8616 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; 8617 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc); 8618 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD) 8619 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH; 8620 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH) 8621 goto read_cfg_out; 8622 8623 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 8624 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i); 8625 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 8626 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) { 8627 phba->sli4_hba.iov.pf_number = 8628 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 8629 phba->sli4_hba.iov.vf_number = 8630 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); 8631 break; 8632 } 8633 } 8634 8635 if (i < LPFC_RSRC_DESC_MAX_NUM) 8636 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8637 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 8638 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 8639 phba->sli4_hba.iov.vf_number); 8640 else 8641 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8642 "3028 GET_FUNCTION_CONFIG: failed to find " 8643 "Resource Descriptor:x%x\n", 8644 LPFC_RSRC_DESC_TYPE_FCFCOE); 8645 8646 read_cfg_out: 8647 mempool_free(pmb, phba->mbox_mem_pool); 8648 return rc; 8649 } 8650 8651 /** 8652 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 8653 * @phba: pointer to lpfc hba data structure. 8654 * 8655 * This routine is invoked to setup the port-side endian order when 8656 * the port if_type is 0. This routine has no function for other 8657 * if_types. 8658 * 8659 * Return codes 8660 * 0 - successful 8661 * -ENOMEM - No available memory 8662 * -EIO - The mailbox failed to complete successfully. 8663 **/ 8664 static int 8665 lpfc_setup_endian_order(struct lpfc_hba *phba) 8666 { 8667 LPFC_MBOXQ_t *mboxq; 8668 uint32_t if_type, rc = 0; 8669 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 8670 HOST_ENDIAN_HIGH_WORD1}; 8671 8672 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8673 switch (if_type) { 8674 case LPFC_SLI_INTF_IF_TYPE_0: 8675 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 8676 GFP_KERNEL); 8677 if (!mboxq) { 8678 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8679 "0492 Unable to allocate memory for " 8680 "issuing SLI_CONFIG_SPECIAL mailbox " 8681 "command\n"); 8682 return -ENOMEM; 8683 } 8684 8685 /* 8686 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 8687 * two words to contain special data values and no other data. 8688 */ 8689 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 8690 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 8691 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8692 if (rc != MBX_SUCCESS) { 8693 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8694 "0493 SLI_CONFIG_SPECIAL mailbox " 8695 "failed with status x%x\n", 8696 rc); 8697 rc = -EIO; 8698 } 8699 mempool_free(mboxq, phba->mbox_mem_pool); 8700 break; 8701 case LPFC_SLI_INTF_IF_TYPE_6: 8702 case LPFC_SLI_INTF_IF_TYPE_2: 8703 case LPFC_SLI_INTF_IF_TYPE_1: 8704 default: 8705 break; 8706 } 8707 return rc; 8708 } 8709 8710 /** 8711 * lpfc_sli4_queue_verify - Verify and update EQ counts 8712 * @phba: pointer to lpfc hba data structure. 8713 * 8714 * This routine is invoked to check the user settable queue counts for EQs. 8715 * After this routine is called the counts will be set to valid values that 8716 * adhere to the constraints of the system's interrupt vectors and the port's 8717 * queue resources. 8718 * 8719 * Return codes 8720 * 0 - successful 8721 * -ENOMEM - No available memory 8722 **/ 8723 static int 8724 lpfc_sli4_queue_verify(struct lpfc_hba *phba) 8725 { 8726 /* 8727 * Sanity check for configured queue parameters against the run-time 8728 * device parameters 8729 */ 8730 8731 if (phba->nvmet_support) { 8732 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq) 8733 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; 8734 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) 8735 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; 8736 } 8737 8738 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8739 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n", 8740 phba->cfg_hdw_queue, phba->cfg_irq_chann, 8741 phba->cfg_nvmet_mrq); 8742 8743 /* Get EQ depth from module parameter, fake the default for now */ 8744 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 8745 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 8746 8747 /* Get CQ depth from module parameter, fake the default for now */ 8748 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 8749 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 8750 return 0; 8751 } 8752 8753 static int 8754 lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx) 8755 { 8756 struct lpfc_queue *qdesc; 8757 u32 wqesize; 8758 int cpu; 8759 8760 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ); 8761 /* Create Fast Path IO CQs */ 8762 if (phba->enab_exp_wqcq_pages) 8763 /* Increase the CQ size when WQEs contain an embedded cdb */ 8764 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 8765 phba->sli4_hba.cq_esize, 8766 LPFC_CQE_EXP_COUNT, cpu); 8767 8768 else 8769 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8770 phba->sli4_hba.cq_esize, 8771 phba->sli4_hba.cq_ecount, cpu); 8772 if (!qdesc) { 8773 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8774 "0499 Failed allocate fast-path IO CQ (%d)\n", idx); 8775 return 1; 8776 } 8777 qdesc->qe_valid = 1; 8778 qdesc->hdwq = idx; 8779 qdesc->chann = cpu; 8780 phba->sli4_hba.hdwq[idx].io_cq = qdesc; 8781 8782 /* Create Fast Path IO WQs */ 8783 if (phba->enab_exp_wqcq_pages) { 8784 /* Increase the WQ size when WQEs contain an embedded cdb */ 8785 wqesize = (phba->fcp_embed_io) ? 8786 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; 8787 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 8788 wqesize, 8789 LPFC_WQE_EXP_COUNT, cpu); 8790 } else 8791 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8792 phba->sli4_hba.wq_esize, 8793 phba->sli4_hba.wq_ecount, cpu); 8794 8795 if (!qdesc) { 8796 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8797 "0503 Failed allocate fast-path IO WQ (%d)\n", 8798 idx); 8799 return 1; 8800 } 8801 qdesc->hdwq = idx; 8802 qdesc->chann = cpu; 8803 phba->sli4_hba.hdwq[idx].io_wq = qdesc; 8804 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 8805 return 0; 8806 } 8807 8808 /** 8809 * lpfc_sli4_queue_create - Create all the SLI4 queues 8810 * @phba: pointer to lpfc hba data structure. 8811 * 8812 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 8813 * operation. For each SLI4 queue type, the parameters such as queue entry 8814 * count (queue depth) shall be taken from the module parameter. For now, 8815 * we just use some constant number as place holder. 8816 * 8817 * Return codes 8818 * 0 - successful 8819 * -ENOMEM - No availble memory 8820 * -EIO - The mailbox failed to complete successfully. 8821 **/ 8822 int 8823 lpfc_sli4_queue_create(struct lpfc_hba *phba) 8824 { 8825 struct lpfc_queue *qdesc; 8826 int idx, cpu, eqcpu; 8827 struct lpfc_sli4_hdw_queue *qp; 8828 struct lpfc_vector_map_info *cpup; 8829 struct lpfc_vector_map_info *eqcpup; 8830 struct lpfc_eq_intr_info *eqi; 8831 8832 /* 8833 * Create HBA Record arrays. 8834 * Both NVME and FCP will share that same vectors / EQs 8835 */ 8836 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 8837 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 8838 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 8839 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 8840 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 8841 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 8842 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 8843 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 8844 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 8845 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 8846 8847 if (!phba->sli4_hba.hdwq) { 8848 phba->sli4_hba.hdwq = kcalloc( 8849 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue), 8850 GFP_KERNEL); 8851 if (!phba->sli4_hba.hdwq) { 8852 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8853 "6427 Failed allocate memory for " 8854 "fast-path Hardware Queue array\n"); 8855 goto out_error; 8856 } 8857 /* Prepare hardware queues to take IO buffers */ 8858 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 8859 qp = &phba->sli4_hba.hdwq[idx]; 8860 spin_lock_init(&qp->io_buf_list_get_lock); 8861 spin_lock_init(&qp->io_buf_list_put_lock); 8862 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 8863 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 8864 qp->get_io_bufs = 0; 8865 qp->put_io_bufs = 0; 8866 qp->total_io_bufs = 0; 8867 spin_lock_init(&qp->abts_io_buf_list_lock); 8868 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list); 8869 qp->abts_scsi_io_bufs = 0; 8870 qp->abts_nvme_io_bufs = 0; 8871 INIT_LIST_HEAD(&qp->sgl_list); 8872 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list); 8873 spin_lock_init(&qp->hdwq_lock); 8874 } 8875 } 8876 8877 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 8878 if (phba->nvmet_support) { 8879 phba->sli4_hba.nvmet_cqset = kcalloc( 8880 phba->cfg_nvmet_mrq, 8881 sizeof(struct lpfc_queue *), 8882 GFP_KERNEL); 8883 if (!phba->sli4_hba.nvmet_cqset) { 8884 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8885 "3121 Fail allocate memory for " 8886 "fast-path CQ set array\n"); 8887 goto out_error; 8888 } 8889 phba->sli4_hba.nvmet_mrq_hdr = kcalloc( 8890 phba->cfg_nvmet_mrq, 8891 sizeof(struct lpfc_queue *), 8892 GFP_KERNEL); 8893 if (!phba->sli4_hba.nvmet_mrq_hdr) { 8894 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8895 "3122 Fail allocate memory for " 8896 "fast-path RQ set hdr array\n"); 8897 goto out_error; 8898 } 8899 phba->sli4_hba.nvmet_mrq_data = kcalloc( 8900 phba->cfg_nvmet_mrq, 8901 sizeof(struct lpfc_queue *), 8902 GFP_KERNEL); 8903 if (!phba->sli4_hba.nvmet_mrq_data) { 8904 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8905 "3124 Fail allocate memory for " 8906 "fast-path RQ set data array\n"); 8907 goto out_error; 8908 } 8909 } 8910 } 8911 8912 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 8913 8914 /* Create HBA Event Queues (EQs) */ 8915 for_each_present_cpu(cpu) { 8916 /* We only want to create 1 EQ per vector, even though 8917 * multiple CPUs might be using that vector. so only 8918 * selects the CPUs that are LPFC_CPU_FIRST_IRQ. 8919 */ 8920 cpup = &phba->sli4_hba.cpu_map[cpu]; 8921 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 8922 continue; 8923 8924 /* Get a ptr to the Hardware Queue associated with this CPU */ 8925 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; 8926 8927 /* Allocate an EQ */ 8928 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8929 phba->sli4_hba.eq_esize, 8930 phba->sli4_hba.eq_ecount, cpu); 8931 if (!qdesc) { 8932 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8933 "0497 Failed allocate EQ (%d)\n", 8934 cpup->hdwq); 8935 goto out_error; 8936 } 8937 qdesc->qe_valid = 1; 8938 qdesc->hdwq = cpup->hdwq; 8939 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */ 8940 qdesc->last_cpu = qdesc->chann; 8941 8942 /* Save the allocated EQ in the Hardware Queue */ 8943 qp->hba_eq = qdesc; 8944 8945 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu); 8946 list_add(&qdesc->cpu_list, &eqi->list); 8947 } 8948 8949 /* Now we need to populate the other Hardware Queues, that share 8950 * an IRQ vector, with the associated EQ ptr. 8951 */ 8952 for_each_present_cpu(cpu) { 8953 cpup = &phba->sli4_hba.cpu_map[cpu]; 8954 8955 /* Check for EQ already allocated in previous loop */ 8956 if (cpup->flag & LPFC_CPU_FIRST_IRQ) 8957 continue; 8958 8959 /* Check for multiple CPUs per hdwq */ 8960 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; 8961 if (qp->hba_eq) 8962 continue; 8963 8964 /* We need to share an EQ for this hdwq */ 8965 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ); 8966 eqcpup = &phba->sli4_hba.cpu_map[eqcpu]; 8967 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq; 8968 } 8969 8970 /* Allocate IO Path SLI4 CQ/WQs */ 8971 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 8972 if (lpfc_alloc_io_wq_cq(phba, idx)) 8973 goto out_error; 8974 } 8975 8976 if (phba->nvmet_support) { 8977 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 8978 cpu = lpfc_find_cpu_handle(phba, idx, 8979 LPFC_FIND_BY_HDWQ); 8980 qdesc = lpfc_sli4_queue_alloc(phba, 8981 LPFC_DEFAULT_PAGE_SIZE, 8982 phba->sli4_hba.cq_esize, 8983 phba->sli4_hba.cq_ecount, 8984 cpu); 8985 if (!qdesc) { 8986 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8987 "3142 Failed allocate NVME " 8988 "CQ Set (%d)\n", idx); 8989 goto out_error; 8990 } 8991 qdesc->qe_valid = 1; 8992 qdesc->hdwq = idx; 8993 qdesc->chann = cpu; 8994 phba->sli4_hba.nvmet_cqset[idx] = qdesc; 8995 } 8996 } 8997 8998 /* 8999 * Create Slow Path Completion Queues (CQs) 9000 */ 9001 9002 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ); 9003 /* Create slow-path Mailbox Command Complete Queue */ 9004 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9005 phba->sli4_hba.cq_esize, 9006 phba->sli4_hba.cq_ecount, cpu); 9007 if (!qdesc) { 9008 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9009 "0500 Failed allocate slow-path mailbox CQ\n"); 9010 goto out_error; 9011 } 9012 qdesc->qe_valid = 1; 9013 phba->sli4_hba.mbx_cq = qdesc; 9014 9015 /* Create slow-path ELS Complete Queue */ 9016 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9017 phba->sli4_hba.cq_esize, 9018 phba->sli4_hba.cq_ecount, cpu); 9019 if (!qdesc) { 9020 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9021 "0501 Failed allocate slow-path ELS CQ\n"); 9022 goto out_error; 9023 } 9024 qdesc->qe_valid = 1; 9025 qdesc->chann = cpu; 9026 phba->sli4_hba.els_cq = qdesc; 9027 9028 9029 /* 9030 * Create Slow Path Work Queues (WQs) 9031 */ 9032 9033 /* Create Mailbox Command Queue */ 9034 9035 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9036 phba->sli4_hba.mq_esize, 9037 phba->sli4_hba.mq_ecount, cpu); 9038 if (!qdesc) { 9039 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9040 "0505 Failed allocate slow-path MQ\n"); 9041 goto out_error; 9042 } 9043 qdesc->chann = cpu; 9044 phba->sli4_hba.mbx_wq = qdesc; 9045 9046 /* 9047 * Create ELS Work Queues 9048 */ 9049 9050 /* Create slow-path ELS Work Queue */ 9051 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9052 phba->sli4_hba.wq_esize, 9053 phba->sli4_hba.wq_ecount, cpu); 9054 if (!qdesc) { 9055 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9056 "0504 Failed allocate slow-path ELS WQ\n"); 9057 goto out_error; 9058 } 9059 qdesc->chann = cpu; 9060 phba->sli4_hba.els_wq = qdesc; 9061 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 9062 9063 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9064 /* Create NVME LS Complete Queue */ 9065 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9066 phba->sli4_hba.cq_esize, 9067 phba->sli4_hba.cq_ecount, cpu); 9068 if (!qdesc) { 9069 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9070 "6079 Failed allocate NVME LS CQ\n"); 9071 goto out_error; 9072 } 9073 qdesc->chann = cpu; 9074 qdesc->qe_valid = 1; 9075 phba->sli4_hba.nvmels_cq = qdesc; 9076 9077 /* Create NVME LS Work Queue */ 9078 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9079 phba->sli4_hba.wq_esize, 9080 phba->sli4_hba.wq_ecount, cpu); 9081 if (!qdesc) { 9082 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9083 "6080 Failed allocate NVME LS WQ\n"); 9084 goto out_error; 9085 } 9086 qdesc->chann = cpu; 9087 phba->sli4_hba.nvmels_wq = qdesc; 9088 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 9089 } 9090 9091 /* 9092 * Create Receive Queue (RQ) 9093 */ 9094 9095 /* Create Receive Queue for header */ 9096 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9097 phba->sli4_hba.rq_esize, 9098 phba->sli4_hba.rq_ecount, cpu); 9099 if (!qdesc) { 9100 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9101 "0506 Failed allocate receive HRQ\n"); 9102 goto out_error; 9103 } 9104 phba->sli4_hba.hdr_rq = qdesc; 9105 9106 /* Create Receive Queue for data */ 9107 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9108 phba->sli4_hba.rq_esize, 9109 phba->sli4_hba.rq_ecount, cpu); 9110 if (!qdesc) { 9111 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9112 "0507 Failed allocate receive DRQ\n"); 9113 goto out_error; 9114 } 9115 phba->sli4_hba.dat_rq = qdesc; 9116 9117 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && 9118 phba->nvmet_support) { 9119 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 9120 cpu = lpfc_find_cpu_handle(phba, idx, 9121 LPFC_FIND_BY_HDWQ); 9122 /* Create NVMET Receive Queue for header */ 9123 qdesc = lpfc_sli4_queue_alloc(phba, 9124 LPFC_DEFAULT_PAGE_SIZE, 9125 phba->sli4_hba.rq_esize, 9126 LPFC_NVMET_RQE_DEF_COUNT, 9127 cpu); 9128 if (!qdesc) { 9129 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9130 "3146 Failed allocate " 9131 "receive HRQ\n"); 9132 goto out_error; 9133 } 9134 qdesc->hdwq = idx; 9135 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc; 9136 9137 /* Only needed for header of RQ pair */ 9138 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp), 9139 GFP_KERNEL, 9140 cpu_to_node(cpu)); 9141 if (qdesc->rqbp == NULL) { 9142 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9143 "6131 Failed allocate " 9144 "Header RQBP\n"); 9145 goto out_error; 9146 } 9147 9148 /* Put list in known state in case driver load fails. */ 9149 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list); 9150 9151 /* Create NVMET Receive Queue for data */ 9152 qdesc = lpfc_sli4_queue_alloc(phba, 9153 LPFC_DEFAULT_PAGE_SIZE, 9154 phba->sli4_hba.rq_esize, 9155 LPFC_NVMET_RQE_DEF_COUNT, 9156 cpu); 9157 if (!qdesc) { 9158 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9159 "3156 Failed allocate " 9160 "receive DRQ\n"); 9161 goto out_error; 9162 } 9163 qdesc->hdwq = idx; 9164 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc; 9165 } 9166 } 9167 9168 /* Clear NVME stats */ 9169 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9170 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9171 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0, 9172 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat)); 9173 } 9174 } 9175 9176 /* Clear SCSI stats */ 9177 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 9178 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9179 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0, 9180 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat)); 9181 } 9182 } 9183 9184 return 0; 9185 9186 out_error: 9187 lpfc_sli4_queue_destroy(phba); 9188 return -ENOMEM; 9189 } 9190 9191 static inline void 9192 __lpfc_sli4_release_queue(struct lpfc_queue **qp) 9193 { 9194 if (*qp != NULL) { 9195 lpfc_sli4_queue_free(*qp); 9196 *qp = NULL; 9197 } 9198 } 9199 9200 static inline void 9201 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max) 9202 { 9203 int idx; 9204 9205 if (*qs == NULL) 9206 return; 9207 9208 for (idx = 0; idx < max; idx++) 9209 __lpfc_sli4_release_queue(&(*qs)[idx]); 9210 9211 kfree(*qs); 9212 *qs = NULL; 9213 } 9214 9215 static inline void 9216 lpfc_sli4_release_hdwq(struct lpfc_hba *phba) 9217 { 9218 struct lpfc_sli4_hdw_queue *hdwq; 9219 struct lpfc_queue *eq; 9220 uint32_t idx; 9221 9222 hdwq = phba->sli4_hba.hdwq; 9223 9224 /* Loop thru all Hardware Queues */ 9225 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9226 /* Free the CQ/WQ corresponding to the Hardware Queue */ 9227 lpfc_sli4_queue_free(hdwq[idx].io_cq); 9228 lpfc_sli4_queue_free(hdwq[idx].io_wq); 9229 hdwq[idx].hba_eq = NULL; 9230 hdwq[idx].io_cq = NULL; 9231 hdwq[idx].io_wq = NULL; 9232 if (phba->cfg_xpsgl && !phba->nvmet_support) 9233 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]); 9234 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]); 9235 } 9236 /* Loop thru all IRQ vectors */ 9237 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 9238 /* Free the EQ corresponding to the IRQ vector */ 9239 eq = phba->sli4_hba.hba_eq_hdl[idx].eq; 9240 lpfc_sli4_queue_free(eq); 9241 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL; 9242 } 9243 } 9244 9245 /** 9246 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 9247 * @phba: pointer to lpfc hba data structure. 9248 * 9249 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 9250 * operation. 9251 * 9252 * Return codes 9253 * 0 - successful 9254 * -ENOMEM - No available memory 9255 * -EIO - The mailbox failed to complete successfully. 9256 **/ 9257 void 9258 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 9259 { 9260 /* 9261 * Set FREE_INIT before beginning to free the queues. 9262 * Wait until the users of queues to acknowledge to 9263 * release queues by clearing FREE_WAIT. 9264 */ 9265 spin_lock_irq(&phba->hbalock); 9266 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT; 9267 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) { 9268 spin_unlock_irq(&phba->hbalock); 9269 msleep(20); 9270 spin_lock_irq(&phba->hbalock); 9271 } 9272 spin_unlock_irq(&phba->hbalock); 9273 9274 lpfc_sli4_cleanup_poll_list(phba); 9275 9276 /* Release HBA eqs */ 9277 if (phba->sli4_hba.hdwq) 9278 lpfc_sli4_release_hdwq(phba); 9279 9280 if (phba->nvmet_support) { 9281 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, 9282 phba->cfg_nvmet_mrq); 9283 9284 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr, 9285 phba->cfg_nvmet_mrq); 9286 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data, 9287 phba->cfg_nvmet_mrq); 9288 } 9289 9290 /* Release mailbox command work queue */ 9291 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq); 9292 9293 /* Release ELS work queue */ 9294 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq); 9295 9296 /* Release ELS work queue */ 9297 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq); 9298 9299 /* Release unsolicited receive queue */ 9300 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq); 9301 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq); 9302 9303 /* Release ELS complete queue */ 9304 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq); 9305 9306 /* Release NVME LS complete queue */ 9307 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq); 9308 9309 /* Release mailbox command complete queue */ 9310 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq); 9311 9312 /* Everything on this list has been freed */ 9313 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 9314 9315 /* Done with freeing the queues */ 9316 spin_lock_irq(&phba->hbalock); 9317 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT; 9318 spin_unlock_irq(&phba->hbalock); 9319 } 9320 9321 int 9322 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) 9323 { 9324 struct lpfc_rqb *rqbp; 9325 struct lpfc_dmabuf *h_buf; 9326 struct rqb_dmabuf *rqb_buffer; 9327 9328 rqbp = rq->rqbp; 9329 while (!list_empty(&rqbp->rqb_buffer_list)) { 9330 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 9331 struct lpfc_dmabuf, list); 9332 9333 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf); 9334 (rqbp->rqb_free_buffer)(phba, rqb_buffer); 9335 rqbp->buffer_count--; 9336 } 9337 return 1; 9338 } 9339 9340 static int 9341 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, 9342 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map, 9343 int qidx, uint32_t qtype) 9344 { 9345 struct lpfc_sli_ring *pring; 9346 int rc; 9347 9348 if (!eq || !cq || !wq) { 9349 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9350 "6085 Fast-path %s (%d) not allocated\n", 9351 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx); 9352 return -ENOMEM; 9353 } 9354 9355 /* create the Cq first */ 9356 rc = lpfc_cq_create(phba, cq, eq, 9357 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype); 9358 if (rc) { 9359 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9360 "6086 Failed setup of CQ (%d), rc = 0x%x\n", 9361 qidx, (uint32_t)rc); 9362 return rc; 9363 } 9364 9365 if (qtype != LPFC_MBOX) { 9366 /* Setup cq_map for fast lookup */ 9367 if (cq_map) 9368 *cq_map = cq->queue_id; 9369 9370 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9371 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n", 9372 qidx, cq->queue_id, qidx, eq->queue_id); 9373 9374 /* create the wq */ 9375 rc = lpfc_wq_create(phba, wq, cq, qtype); 9376 if (rc) { 9377 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9378 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n", 9379 qidx, (uint32_t)rc); 9380 /* no need to tear down cq - caller will do so */ 9381 return rc; 9382 } 9383 9384 /* Bind this CQ/WQ to the NVME ring */ 9385 pring = wq->pring; 9386 pring->sli.sli4.wqp = (void *)wq; 9387 cq->pring = pring; 9388 9389 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9390 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n", 9391 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id); 9392 } else { 9393 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX); 9394 if (rc) { 9395 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9396 "0539 Failed setup of slow-path MQ: " 9397 "rc = 0x%x\n", rc); 9398 /* no need to tear down cq - caller will do so */ 9399 return rc; 9400 } 9401 9402 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9403 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 9404 phba->sli4_hba.mbx_wq->queue_id, 9405 phba->sli4_hba.mbx_cq->queue_id); 9406 } 9407 9408 return 0; 9409 } 9410 9411 /** 9412 * lpfc_setup_cq_lookup - Setup the CQ lookup table 9413 * @phba: pointer to lpfc hba data structure. 9414 * 9415 * This routine will populate the cq_lookup table by all 9416 * available CQ queue_id's. 9417 **/ 9418 static void 9419 lpfc_setup_cq_lookup(struct lpfc_hba *phba) 9420 { 9421 struct lpfc_queue *eq, *childq; 9422 int qidx; 9423 9424 memset(phba->sli4_hba.cq_lookup, 0, 9425 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1))); 9426 /* Loop thru all IRQ vectors */ 9427 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 9428 /* Get the EQ corresponding to the IRQ vector */ 9429 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 9430 if (!eq) 9431 continue; 9432 /* Loop through all CQs associated with that EQ */ 9433 list_for_each_entry(childq, &eq->child_list, list) { 9434 if (childq->queue_id > phba->sli4_hba.cq_max) 9435 continue; 9436 if (childq->subtype == LPFC_IO) 9437 phba->sli4_hba.cq_lookup[childq->queue_id] = 9438 childq; 9439 } 9440 } 9441 } 9442 9443 /** 9444 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 9445 * @phba: pointer to lpfc hba data structure. 9446 * 9447 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 9448 * operation. 9449 * 9450 * Return codes 9451 * 0 - successful 9452 * -ENOMEM - No available memory 9453 * -EIO - The mailbox failed to complete successfully. 9454 **/ 9455 int 9456 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 9457 { 9458 uint32_t shdr_status, shdr_add_status; 9459 union lpfc_sli4_cfg_shdr *shdr; 9460 struct lpfc_vector_map_info *cpup; 9461 struct lpfc_sli4_hdw_queue *qp; 9462 LPFC_MBOXQ_t *mboxq; 9463 int qidx, cpu; 9464 uint32_t length, usdelay; 9465 int rc = -ENOMEM; 9466 9467 /* Check for dual-ULP support */ 9468 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9469 if (!mboxq) { 9470 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9471 "3249 Unable to allocate memory for " 9472 "QUERY_FW_CFG mailbox command\n"); 9473 return -ENOMEM; 9474 } 9475 length = (sizeof(struct lpfc_mbx_query_fw_config) - 9476 sizeof(struct lpfc_sli4_cfg_mhdr)); 9477 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 9478 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 9479 length, LPFC_SLI4_MBX_EMBED); 9480 9481 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9482 9483 shdr = (union lpfc_sli4_cfg_shdr *) 9484 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 9485 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 9486 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 9487 if (shdr_status || shdr_add_status || rc) { 9488 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9489 "3250 QUERY_FW_CFG mailbox failed with status " 9490 "x%x add_status x%x, mbx status x%x\n", 9491 shdr_status, shdr_add_status, rc); 9492 if (rc != MBX_TIMEOUT) 9493 mempool_free(mboxq, phba->mbox_mem_pool); 9494 rc = -ENXIO; 9495 goto out_error; 9496 } 9497 9498 phba->sli4_hba.fw_func_mode = 9499 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; 9500 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; 9501 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; 9502 phba->sli4_hba.physical_port = 9503 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port; 9504 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9505 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " 9506 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, 9507 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); 9508 9509 if (rc != MBX_TIMEOUT) 9510 mempool_free(mboxq, phba->mbox_mem_pool); 9511 9512 /* 9513 * Set up HBA Event Queues (EQs) 9514 */ 9515 qp = phba->sli4_hba.hdwq; 9516 9517 /* Set up HBA event queue */ 9518 if (!qp) { 9519 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9520 "3147 Fast-path EQs not allocated\n"); 9521 rc = -ENOMEM; 9522 goto out_error; 9523 } 9524 9525 /* Loop thru all IRQ vectors */ 9526 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 9527 /* Create HBA Event Queues (EQs) in order */ 9528 for_each_present_cpu(cpu) { 9529 cpup = &phba->sli4_hba.cpu_map[cpu]; 9530 9531 /* Look for the CPU thats using that vector with 9532 * LPFC_CPU_FIRST_IRQ set. 9533 */ 9534 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 9535 continue; 9536 if (qidx != cpup->eq) 9537 continue; 9538 9539 /* Create an EQ for that vector */ 9540 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq, 9541 phba->cfg_fcp_imax); 9542 if (rc) { 9543 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9544 "0523 Failed setup of fast-path" 9545 " EQ (%d), rc = 0x%x\n", 9546 cpup->eq, (uint32_t)rc); 9547 goto out_destroy; 9548 } 9549 9550 /* Save the EQ for that vector in the hba_eq_hdl */ 9551 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq = 9552 qp[cpup->hdwq].hba_eq; 9553 9554 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9555 "2584 HBA EQ setup: queue[%d]-id=%d\n", 9556 cpup->eq, 9557 qp[cpup->hdwq].hba_eq->queue_id); 9558 } 9559 } 9560 9561 /* Loop thru all Hardware Queues */ 9562 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 9563 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ); 9564 cpup = &phba->sli4_hba.cpu_map[cpu]; 9565 9566 /* Create the CQ/WQ corresponding to the Hardware Queue */ 9567 rc = lpfc_create_wq_cq(phba, 9568 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq, 9569 qp[qidx].io_cq, 9570 qp[qidx].io_wq, 9571 &phba->sli4_hba.hdwq[qidx].io_cq_map, 9572 qidx, 9573 LPFC_IO); 9574 if (rc) { 9575 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9576 "0535 Failed to setup fastpath " 9577 "IO WQ/CQ (%d), rc = 0x%x\n", 9578 qidx, (uint32_t)rc); 9579 goto out_destroy; 9580 } 9581 } 9582 9583 /* 9584 * Set up Slow Path Complete Queues (CQs) 9585 */ 9586 9587 /* Set up slow-path MBOX CQ/MQ */ 9588 9589 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) { 9590 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9591 "0528 %s not allocated\n", 9592 phba->sli4_hba.mbx_cq ? 9593 "Mailbox WQ" : "Mailbox CQ"); 9594 rc = -ENOMEM; 9595 goto out_destroy; 9596 } 9597 9598 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 9599 phba->sli4_hba.mbx_cq, 9600 phba->sli4_hba.mbx_wq, 9601 NULL, 0, LPFC_MBOX); 9602 if (rc) { 9603 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9604 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n", 9605 (uint32_t)rc); 9606 goto out_destroy; 9607 } 9608 if (phba->nvmet_support) { 9609 if (!phba->sli4_hba.nvmet_cqset) { 9610 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9611 "3165 Fast-path NVME CQ Set " 9612 "array not allocated\n"); 9613 rc = -ENOMEM; 9614 goto out_destroy; 9615 } 9616 if (phba->cfg_nvmet_mrq > 1) { 9617 rc = lpfc_cq_create_set(phba, 9618 phba->sli4_hba.nvmet_cqset, 9619 qp, 9620 LPFC_WCQ, LPFC_NVMET); 9621 if (rc) { 9622 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9623 "3164 Failed setup of NVME CQ " 9624 "Set, rc = 0x%x\n", 9625 (uint32_t)rc); 9626 goto out_destroy; 9627 } 9628 } else { 9629 /* Set up NVMET Receive Complete Queue */ 9630 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0], 9631 qp[0].hba_eq, 9632 LPFC_WCQ, LPFC_NVMET); 9633 if (rc) { 9634 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9635 "6089 Failed setup NVMET CQ: " 9636 "rc = 0x%x\n", (uint32_t)rc); 9637 goto out_destroy; 9638 } 9639 phba->sli4_hba.nvmet_cqset[0]->chann = 0; 9640 9641 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9642 "6090 NVMET CQ setup: cq-id=%d, " 9643 "parent eq-id=%d\n", 9644 phba->sli4_hba.nvmet_cqset[0]->queue_id, 9645 qp[0].hba_eq->queue_id); 9646 } 9647 } 9648 9649 /* Set up slow-path ELS WQ/CQ */ 9650 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) { 9651 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9652 "0530 ELS %s not allocated\n", 9653 phba->sli4_hba.els_cq ? "WQ" : "CQ"); 9654 rc = -ENOMEM; 9655 goto out_destroy; 9656 } 9657 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 9658 phba->sli4_hba.els_cq, 9659 phba->sli4_hba.els_wq, 9660 NULL, 0, LPFC_ELS); 9661 if (rc) { 9662 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9663 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n", 9664 (uint32_t)rc); 9665 goto out_destroy; 9666 } 9667 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9668 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 9669 phba->sli4_hba.els_wq->queue_id, 9670 phba->sli4_hba.els_cq->queue_id); 9671 9672 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9673 /* Set up NVME LS Complete Queue */ 9674 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) { 9675 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9676 "6091 LS %s not allocated\n", 9677 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ"); 9678 rc = -ENOMEM; 9679 goto out_destroy; 9680 } 9681 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 9682 phba->sli4_hba.nvmels_cq, 9683 phba->sli4_hba.nvmels_wq, 9684 NULL, 0, LPFC_NVME_LS); 9685 if (rc) { 9686 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9687 "0526 Failed setup of NVVME LS WQ/CQ: " 9688 "rc = 0x%x\n", (uint32_t)rc); 9689 goto out_destroy; 9690 } 9691 9692 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9693 "6096 ELS WQ setup: wq-id=%d, " 9694 "parent cq-id=%d\n", 9695 phba->sli4_hba.nvmels_wq->queue_id, 9696 phba->sli4_hba.nvmels_cq->queue_id); 9697 } 9698 9699 /* 9700 * Create NVMET Receive Queue (RQ) 9701 */ 9702 if (phba->nvmet_support) { 9703 if ((!phba->sli4_hba.nvmet_cqset) || 9704 (!phba->sli4_hba.nvmet_mrq_hdr) || 9705 (!phba->sli4_hba.nvmet_mrq_data)) { 9706 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9707 "6130 MRQ CQ Queues not " 9708 "allocated\n"); 9709 rc = -ENOMEM; 9710 goto out_destroy; 9711 } 9712 if (phba->cfg_nvmet_mrq > 1) { 9713 rc = lpfc_mrq_create(phba, 9714 phba->sli4_hba.nvmet_mrq_hdr, 9715 phba->sli4_hba.nvmet_mrq_data, 9716 phba->sli4_hba.nvmet_cqset, 9717 LPFC_NVMET); 9718 if (rc) { 9719 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9720 "6098 Failed setup of NVMET " 9721 "MRQ: rc = 0x%x\n", 9722 (uint32_t)rc); 9723 goto out_destroy; 9724 } 9725 9726 } else { 9727 rc = lpfc_rq_create(phba, 9728 phba->sli4_hba.nvmet_mrq_hdr[0], 9729 phba->sli4_hba.nvmet_mrq_data[0], 9730 phba->sli4_hba.nvmet_cqset[0], 9731 LPFC_NVMET); 9732 if (rc) { 9733 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9734 "6057 Failed setup of NVMET " 9735 "Receive Queue: rc = 0x%x\n", 9736 (uint32_t)rc); 9737 goto out_destroy; 9738 } 9739 9740 lpfc_printf_log( 9741 phba, KERN_INFO, LOG_INIT, 9742 "6099 NVMET RQ setup: hdr-rq-id=%d, " 9743 "dat-rq-id=%d parent cq-id=%d\n", 9744 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id, 9745 phba->sli4_hba.nvmet_mrq_data[0]->queue_id, 9746 phba->sli4_hba.nvmet_cqset[0]->queue_id); 9747 9748 } 9749 } 9750 9751 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 9752 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9753 "0540 Receive Queue not allocated\n"); 9754 rc = -ENOMEM; 9755 goto out_destroy; 9756 } 9757 9758 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 9759 phba->sli4_hba.els_cq, LPFC_USOL); 9760 if (rc) { 9761 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9762 "0541 Failed setup of Receive Queue: " 9763 "rc = 0x%x\n", (uint32_t)rc); 9764 goto out_destroy; 9765 } 9766 9767 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9768 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 9769 "parent cq-id=%d\n", 9770 phba->sli4_hba.hdr_rq->queue_id, 9771 phba->sli4_hba.dat_rq->queue_id, 9772 phba->sli4_hba.els_cq->queue_id); 9773 9774 if (phba->cfg_fcp_imax) 9775 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax; 9776 else 9777 usdelay = 0; 9778 9779 for (qidx = 0; qidx < phba->cfg_irq_chann; 9780 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) 9781 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT, 9782 usdelay); 9783 9784 if (phba->sli4_hba.cq_max) { 9785 kfree(phba->sli4_hba.cq_lookup); 9786 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1), 9787 sizeof(struct lpfc_queue *), GFP_KERNEL); 9788 if (!phba->sli4_hba.cq_lookup) { 9789 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9790 "0549 Failed setup of CQ Lookup table: " 9791 "size 0x%x\n", phba->sli4_hba.cq_max); 9792 rc = -ENOMEM; 9793 goto out_destroy; 9794 } 9795 lpfc_setup_cq_lookup(phba); 9796 } 9797 return 0; 9798 9799 out_destroy: 9800 lpfc_sli4_queue_unset(phba); 9801 out_error: 9802 return rc; 9803 } 9804 9805 /** 9806 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 9807 * @phba: pointer to lpfc hba data structure. 9808 * 9809 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 9810 * operation. 9811 * 9812 * Return codes 9813 * 0 - successful 9814 * -ENOMEM - No available memory 9815 * -EIO - The mailbox failed to complete successfully. 9816 **/ 9817 void 9818 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 9819 { 9820 struct lpfc_sli4_hdw_queue *qp; 9821 struct lpfc_queue *eq; 9822 int qidx; 9823 9824 /* Unset mailbox command work queue */ 9825 if (phba->sli4_hba.mbx_wq) 9826 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 9827 9828 /* Unset NVME LS work queue */ 9829 if (phba->sli4_hba.nvmels_wq) 9830 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); 9831 9832 /* Unset ELS work queue */ 9833 if (phba->sli4_hba.els_wq) 9834 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 9835 9836 /* Unset unsolicited receive queue */ 9837 if (phba->sli4_hba.hdr_rq) 9838 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, 9839 phba->sli4_hba.dat_rq); 9840 9841 /* Unset mailbox command complete queue */ 9842 if (phba->sli4_hba.mbx_cq) 9843 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 9844 9845 /* Unset ELS complete queue */ 9846 if (phba->sli4_hba.els_cq) 9847 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 9848 9849 /* Unset NVME LS complete queue */ 9850 if (phba->sli4_hba.nvmels_cq) 9851 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq); 9852 9853 if (phba->nvmet_support) { 9854 /* Unset NVMET MRQ queue */ 9855 if (phba->sli4_hba.nvmet_mrq_hdr) { 9856 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 9857 lpfc_rq_destroy( 9858 phba, 9859 phba->sli4_hba.nvmet_mrq_hdr[qidx], 9860 phba->sli4_hba.nvmet_mrq_data[qidx]); 9861 } 9862 9863 /* Unset NVMET CQ Set complete queue */ 9864 if (phba->sli4_hba.nvmet_cqset) { 9865 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 9866 lpfc_cq_destroy( 9867 phba, phba->sli4_hba.nvmet_cqset[qidx]); 9868 } 9869 } 9870 9871 /* Unset fast-path SLI4 queues */ 9872 if (phba->sli4_hba.hdwq) { 9873 /* Loop thru all Hardware Queues */ 9874 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 9875 /* Destroy the CQ/WQ corresponding to Hardware Queue */ 9876 qp = &phba->sli4_hba.hdwq[qidx]; 9877 lpfc_wq_destroy(phba, qp->io_wq); 9878 lpfc_cq_destroy(phba, qp->io_cq); 9879 } 9880 /* Loop thru all IRQ vectors */ 9881 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 9882 /* Destroy the EQ corresponding to the IRQ vector */ 9883 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 9884 lpfc_eq_destroy(phba, eq); 9885 } 9886 } 9887 9888 kfree(phba->sli4_hba.cq_lookup); 9889 phba->sli4_hba.cq_lookup = NULL; 9890 phba->sli4_hba.cq_max = 0; 9891 } 9892 9893 /** 9894 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 9895 * @phba: pointer to lpfc hba data structure. 9896 * 9897 * This routine is invoked to allocate and set up a pool of completion queue 9898 * events. The body of the completion queue event is a completion queue entry 9899 * CQE. For now, this pool is used for the interrupt service routine to queue 9900 * the following HBA completion queue events for the worker thread to process: 9901 * - Mailbox asynchronous events 9902 * - Receive queue completion unsolicited events 9903 * Later, this can be used for all the slow-path events. 9904 * 9905 * Return codes 9906 * 0 - successful 9907 * -ENOMEM - No available memory 9908 **/ 9909 static int 9910 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 9911 { 9912 struct lpfc_cq_event *cq_event; 9913 int i; 9914 9915 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 9916 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 9917 if (!cq_event) 9918 goto out_pool_create_fail; 9919 list_add_tail(&cq_event->list, 9920 &phba->sli4_hba.sp_cqe_event_pool); 9921 } 9922 return 0; 9923 9924 out_pool_create_fail: 9925 lpfc_sli4_cq_event_pool_destroy(phba); 9926 return -ENOMEM; 9927 } 9928 9929 /** 9930 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 9931 * @phba: pointer to lpfc hba data structure. 9932 * 9933 * This routine is invoked to free the pool of completion queue events at 9934 * driver unload time. Note that, it is the responsibility of the driver 9935 * cleanup routine to free all the outstanding completion-queue events 9936 * allocated from this pool back into the pool before invoking this routine 9937 * to destroy the pool. 9938 **/ 9939 static void 9940 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 9941 { 9942 struct lpfc_cq_event *cq_event, *next_cq_event; 9943 9944 list_for_each_entry_safe(cq_event, next_cq_event, 9945 &phba->sli4_hba.sp_cqe_event_pool, list) { 9946 list_del(&cq_event->list); 9947 kfree(cq_event); 9948 } 9949 } 9950 9951 /** 9952 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 9953 * @phba: pointer to lpfc hba data structure. 9954 * 9955 * This routine is the lock free version of the API invoked to allocate a 9956 * completion-queue event from the free pool. 9957 * 9958 * Return: Pointer to the newly allocated completion-queue event if successful 9959 * NULL otherwise. 9960 **/ 9961 struct lpfc_cq_event * 9962 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 9963 { 9964 struct lpfc_cq_event *cq_event = NULL; 9965 9966 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 9967 struct lpfc_cq_event, list); 9968 return cq_event; 9969 } 9970 9971 /** 9972 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 9973 * @phba: pointer to lpfc hba data structure. 9974 * 9975 * This routine is the lock version of the API invoked to allocate a 9976 * completion-queue event from the free pool. 9977 * 9978 * Return: Pointer to the newly allocated completion-queue event if successful 9979 * NULL otherwise. 9980 **/ 9981 struct lpfc_cq_event * 9982 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 9983 { 9984 struct lpfc_cq_event *cq_event; 9985 unsigned long iflags; 9986 9987 spin_lock_irqsave(&phba->hbalock, iflags); 9988 cq_event = __lpfc_sli4_cq_event_alloc(phba); 9989 spin_unlock_irqrestore(&phba->hbalock, iflags); 9990 return cq_event; 9991 } 9992 9993 /** 9994 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 9995 * @phba: pointer to lpfc hba data structure. 9996 * @cq_event: pointer to the completion queue event to be freed. 9997 * 9998 * This routine is the lock free version of the API invoked to release a 9999 * completion-queue event back into the free pool. 10000 **/ 10001 void 10002 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 10003 struct lpfc_cq_event *cq_event) 10004 { 10005 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 10006 } 10007 10008 /** 10009 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 10010 * @phba: pointer to lpfc hba data structure. 10011 * @cq_event: pointer to the completion queue event to be freed. 10012 * 10013 * This routine is the lock version of the API invoked to release a 10014 * completion-queue event back into the free pool. 10015 **/ 10016 void 10017 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 10018 struct lpfc_cq_event *cq_event) 10019 { 10020 unsigned long iflags; 10021 spin_lock_irqsave(&phba->hbalock, iflags); 10022 __lpfc_sli4_cq_event_release(phba, cq_event); 10023 spin_unlock_irqrestore(&phba->hbalock, iflags); 10024 } 10025 10026 /** 10027 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 10028 * @phba: pointer to lpfc hba data structure. 10029 * 10030 * This routine is to free all the pending completion-queue events to the 10031 * back into the free pool for device reset. 10032 **/ 10033 static void 10034 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 10035 { 10036 LIST_HEAD(cqelist); 10037 struct lpfc_cq_event *cqe; 10038 unsigned long iflags; 10039 10040 /* Retrieve all the pending WCQEs from pending WCQE lists */ 10041 spin_lock_irqsave(&phba->hbalock, iflags); 10042 /* Pending FCP XRI abort events */ 10043 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 10044 &cqelist); 10045 /* Pending ELS XRI abort events */ 10046 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 10047 &cqelist); 10048 /* Pending asynnc events */ 10049 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 10050 &cqelist); 10051 spin_unlock_irqrestore(&phba->hbalock, iflags); 10052 10053 while (!list_empty(&cqelist)) { 10054 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 10055 lpfc_sli4_cq_event_release(phba, cqe); 10056 } 10057 } 10058 10059 /** 10060 * lpfc_pci_function_reset - Reset pci function. 10061 * @phba: pointer to lpfc hba data structure. 10062 * 10063 * This routine is invoked to request a PCI function reset. It will destroys 10064 * all resources assigned to the PCI function which originates this request. 10065 * 10066 * Return codes 10067 * 0 - successful 10068 * -ENOMEM - No available memory 10069 * -EIO - The mailbox failed to complete successfully. 10070 **/ 10071 int 10072 lpfc_pci_function_reset(struct lpfc_hba *phba) 10073 { 10074 LPFC_MBOXQ_t *mboxq; 10075 uint32_t rc = 0, if_type; 10076 uint32_t shdr_status, shdr_add_status; 10077 uint32_t rdy_chk; 10078 uint32_t port_reset = 0; 10079 union lpfc_sli4_cfg_shdr *shdr; 10080 struct lpfc_register reg_data; 10081 uint16_t devid; 10082 10083 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10084 switch (if_type) { 10085 case LPFC_SLI_INTF_IF_TYPE_0: 10086 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 10087 GFP_KERNEL); 10088 if (!mboxq) { 10089 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10090 "0494 Unable to allocate memory for " 10091 "issuing SLI_FUNCTION_RESET mailbox " 10092 "command\n"); 10093 return -ENOMEM; 10094 } 10095 10096 /* Setup PCI function reset mailbox-ioctl command */ 10097 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 10098 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 10099 LPFC_SLI4_MBX_EMBED); 10100 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 10101 shdr = (union lpfc_sli4_cfg_shdr *) 10102 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 10103 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10104 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 10105 &shdr->response); 10106 if (rc != MBX_TIMEOUT) 10107 mempool_free(mboxq, phba->mbox_mem_pool); 10108 if (shdr_status || shdr_add_status || rc) { 10109 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10110 "0495 SLI_FUNCTION_RESET mailbox " 10111 "failed with status x%x add_status x%x," 10112 " mbx status x%x\n", 10113 shdr_status, shdr_add_status, rc); 10114 rc = -ENXIO; 10115 } 10116 break; 10117 case LPFC_SLI_INTF_IF_TYPE_2: 10118 case LPFC_SLI_INTF_IF_TYPE_6: 10119 wait: 10120 /* 10121 * Poll the Port Status Register and wait for RDY for 10122 * up to 30 seconds. If the port doesn't respond, treat 10123 * it as an error. 10124 */ 10125 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) { 10126 if (lpfc_readl(phba->sli4_hba.u.if_type2. 10127 STATUSregaddr, ®_data.word0)) { 10128 rc = -ENODEV; 10129 goto out; 10130 } 10131 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 10132 break; 10133 msleep(20); 10134 } 10135 10136 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) { 10137 phba->work_status[0] = readl( 10138 phba->sli4_hba.u.if_type2.ERR1regaddr); 10139 phba->work_status[1] = readl( 10140 phba->sli4_hba.u.if_type2.ERR2regaddr); 10141 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10142 "2890 Port not ready, port status reg " 10143 "0x%x error 1=0x%x, error 2=0x%x\n", 10144 reg_data.word0, 10145 phba->work_status[0], 10146 phba->work_status[1]); 10147 rc = -ENODEV; 10148 goto out; 10149 } 10150 10151 if (!port_reset) { 10152 /* 10153 * Reset the port now 10154 */ 10155 reg_data.word0 = 0; 10156 bf_set(lpfc_sliport_ctrl_end, ®_data, 10157 LPFC_SLIPORT_LITTLE_ENDIAN); 10158 bf_set(lpfc_sliport_ctrl_ip, ®_data, 10159 LPFC_SLIPORT_INIT_PORT); 10160 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 10161 CTRLregaddr); 10162 /* flush */ 10163 pci_read_config_word(phba->pcidev, 10164 PCI_DEVICE_ID, &devid); 10165 10166 port_reset = 1; 10167 msleep(20); 10168 goto wait; 10169 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) { 10170 rc = -ENODEV; 10171 goto out; 10172 } 10173 break; 10174 10175 case LPFC_SLI_INTF_IF_TYPE_1: 10176 default: 10177 break; 10178 } 10179 10180 out: 10181 /* Catch the not-ready port failure after a port reset. */ 10182 if (rc) { 10183 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10184 "3317 HBA not functional: IP Reset Failed " 10185 "try: echo fw_reset > board_mode\n"); 10186 rc = -ENODEV; 10187 } 10188 10189 return rc; 10190 } 10191 10192 /** 10193 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 10194 * @phba: pointer to lpfc hba data structure. 10195 * 10196 * This routine is invoked to set up the PCI device memory space for device 10197 * with SLI-4 interface spec. 10198 * 10199 * Return codes 10200 * 0 - successful 10201 * other values - error 10202 **/ 10203 static int 10204 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 10205 { 10206 struct pci_dev *pdev = phba->pcidev; 10207 unsigned long bar0map_len, bar1map_len, bar2map_len; 10208 int error; 10209 uint32_t if_type; 10210 10211 if (!pdev) 10212 return -ENODEV; 10213 10214 /* Set the device DMA mask size */ 10215 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 10216 if (error) 10217 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 10218 if (error) 10219 return error; 10220 10221 /* 10222 * The BARs and register set definitions and offset locations are 10223 * dependent on the if_type. 10224 */ 10225 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 10226 &phba->sli4_hba.sli_intf.word0)) { 10227 return -ENODEV; 10228 } 10229 10230 /* There is no SLI3 failback for SLI4 devices. */ 10231 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 10232 LPFC_SLI_INTF_VALID) { 10233 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10234 "2894 SLI_INTF reg contents invalid " 10235 "sli_intf reg 0x%x\n", 10236 phba->sli4_hba.sli_intf.word0); 10237 return -ENODEV; 10238 } 10239 10240 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10241 /* 10242 * Get the bus address of SLI4 device Bar regions and the 10243 * number of bytes required by each mapping. The mapping of the 10244 * particular PCI BARs regions is dependent on the type of 10245 * SLI4 device. 10246 */ 10247 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { 10248 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); 10249 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); 10250 10251 /* 10252 * Map SLI4 PCI Config Space Register base to a kernel virtual 10253 * addr 10254 */ 10255 phba->sli4_hba.conf_regs_memmap_p = 10256 ioremap(phba->pci_bar0_map, bar0map_len); 10257 if (!phba->sli4_hba.conf_regs_memmap_p) { 10258 dev_printk(KERN_ERR, &pdev->dev, 10259 "ioremap failed for SLI4 PCI config " 10260 "registers.\n"); 10261 return -ENODEV; 10262 } 10263 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; 10264 /* Set up BAR0 PCI config space register memory map */ 10265 lpfc_sli4_bar0_register_memmap(phba, if_type); 10266 } else { 10267 phba->pci_bar0_map = pci_resource_start(pdev, 1); 10268 bar0map_len = pci_resource_len(pdev, 1); 10269 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 10270 dev_printk(KERN_ERR, &pdev->dev, 10271 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 10272 return -ENODEV; 10273 } 10274 phba->sli4_hba.conf_regs_memmap_p = 10275 ioremap(phba->pci_bar0_map, bar0map_len); 10276 if (!phba->sli4_hba.conf_regs_memmap_p) { 10277 dev_printk(KERN_ERR, &pdev->dev, 10278 "ioremap failed for SLI4 PCI config " 10279 "registers.\n"); 10280 return -ENODEV; 10281 } 10282 lpfc_sli4_bar0_register_memmap(phba, if_type); 10283 } 10284 10285 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 10286 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) { 10287 /* 10288 * Map SLI4 if type 0 HBA Control Register base to a 10289 * kernel virtual address and setup the registers. 10290 */ 10291 phba->pci_bar1_map = pci_resource_start(pdev, 10292 PCI_64BIT_BAR2); 10293 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 10294 phba->sli4_hba.ctrl_regs_memmap_p = 10295 ioremap(phba->pci_bar1_map, 10296 bar1map_len); 10297 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 10298 dev_err(&pdev->dev, 10299 "ioremap failed for SLI4 HBA " 10300 "control registers.\n"); 10301 error = -ENOMEM; 10302 goto out_iounmap_conf; 10303 } 10304 phba->pci_bar2_memmap_p = 10305 phba->sli4_hba.ctrl_regs_memmap_p; 10306 lpfc_sli4_bar1_register_memmap(phba, if_type); 10307 } else { 10308 error = -ENOMEM; 10309 goto out_iounmap_conf; 10310 } 10311 } 10312 10313 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) && 10314 (pci_resource_start(pdev, PCI_64BIT_BAR2))) { 10315 /* 10316 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel 10317 * virtual address and setup the registers. 10318 */ 10319 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); 10320 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 10321 phba->sli4_hba.drbl_regs_memmap_p = 10322 ioremap(phba->pci_bar1_map, bar1map_len); 10323 if (!phba->sli4_hba.drbl_regs_memmap_p) { 10324 dev_err(&pdev->dev, 10325 "ioremap failed for SLI4 HBA doorbell registers.\n"); 10326 error = -ENOMEM; 10327 goto out_iounmap_conf; 10328 } 10329 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; 10330 lpfc_sli4_bar1_register_memmap(phba, if_type); 10331 } 10332 10333 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 10334 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) { 10335 /* 10336 * Map SLI4 if type 0 HBA Doorbell Register base to 10337 * a kernel virtual address and setup the registers. 10338 */ 10339 phba->pci_bar2_map = pci_resource_start(pdev, 10340 PCI_64BIT_BAR4); 10341 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 10342 phba->sli4_hba.drbl_regs_memmap_p = 10343 ioremap(phba->pci_bar2_map, 10344 bar2map_len); 10345 if (!phba->sli4_hba.drbl_regs_memmap_p) { 10346 dev_err(&pdev->dev, 10347 "ioremap failed for SLI4 HBA" 10348 " doorbell registers.\n"); 10349 error = -ENOMEM; 10350 goto out_iounmap_ctrl; 10351 } 10352 phba->pci_bar4_memmap_p = 10353 phba->sli4_hba.drbl_regs_memmap_p; 10354 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 10355 if (error) 10356 goto out_iounmap_all; 10357 } else { 10358 error = -ENOMEM; 10359 goto out_iounmap_all; 10360 } 10361 } 10362 10363 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 && 10364 pci_resource_start(pdev, PCI_64BIT_BAR4)) { 10365 /* 10366 * Map SLI4 if type 6 HBA DPP Register base to a kernel 10367 * virtual address and setup the registers. 10368 */ 10369 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); 10370 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 10371 phba->sli4_hba.dpp_regs_memmap_p = 10372 ioremap(phba->pci_bar2_map, bar2map_len); 10373 if (!phba->sli4_hba.dpp_regs_memmap_p) { 10374 dev_err(&pdev->dev, 10375 "ioremap failed for SLI4 HBA dpp registers.\n"); 10376 error = -ENOMEM; 10377 goto out_iounmap_ctrl; 10378 } 10379 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p; 10380 } 10381 10382 /* Set up the EQ/CQ register handeling functions now */ 10383 switch (if_type) { 10384 case LPFC_SLI_INTF_IF_TYPE_0: 10385 case LPFC_SLI_INTF_IF_TYPE_2: 10386 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr; 10387 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db; 10388 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db; 10389 break; 10390 case LPFC_SLI_INTF_IF_TYPE_6: 10391 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr; 10392 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db; 10393 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db; 10394 break; 10395 default: 10396 break; 10397 } 10398 10399 return 0; 10400 10401 out_iounmap_all: 10402 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 10403 out_iounmap_ctrl: 10404 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 10405 out_iounmap_conf: 10406 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10407 10408 return error; 10409 } 10410 10411 /** 10412 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 10413 * @phba: pointer to lpfc hba data structure. 10414 * 10415 * This routine is invoked to unset the PCI device memory space for device 10416 * with SLI-4 interface spec. 10417 **/ 10418 static void 10419 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 10420 { 10421 uint32_t if_type; 10422 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10423 10424 switch (if_type) { 10425 case LPFC_SLI_INTF_IF_TYPE_0: 10426 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 10427 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 10428 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10429 break; 10430 case LPFC_SLI_INTF_IF_TYPE_2: 10431 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10432 break; 10433 case LPFC_SLI_INTF_IF_TYPE_6: 10434 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 10435 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10436 if (phba->sli4_hba.dpp_regs_memmap_p) 10437 iounmap(phba->sli4_hba.dpp_regs_memmap_p); 10438 break; 10439 case LPFC_SLI_INTF_IF_TYPE_1: 10440 default: 10441 dev_printk(KERN_ERR, &phba->pcidev->dev, 10442 "FATAL - unsupported SLI4 interface type - %d\n", 10443 if_type); 10444 break; 10445 } 10446 } 10447 10448 /** 10449 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 10450 * @phba: pointer to lpfc hba data structure. 10451 * 10452 * This routine is invoked to enable the MSI-X interrupt vectors to device 10453 * with SLI-3 interface specs. 10454 * 10455 * Return codes 10456 * 0 - successful 10457 * other values - error 10458 **/ 10459 static int 10460 lpfc_sli_enable_msix(struct lpfc_hba *phba) 10461 { 10462 int rc; 10463 LPFC_MBOXQ_t *pmb; 10464 10465 /* Set up MSI-X multi-message vectors */ 10466 rc = pci_alloc_irq_vectors(phba->pcidev, 10467 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX); 10468 if (rc < 0) { 10469 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10470 "0420 PCI enable MSI-X failed (%d)\n", rc); 10471 goto vec_fail_out; 10472 } 10473 10474 /* 10475 * Assign MSI-X vectors to interrupt handlers 10476 */ 10477 10478 /* vector-0 is associated to slow-path handler */ 10479 rc = request_irq(pci_irq_vector(phba->pcidev, 0), 10480 &lpfc_sli_sp_intr_handler, 0, 10481 LPFC_SP_DRIVER_HANDLER_NAME, phba); 10482 if (rc) { 10483 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10484 "0421 MSI-X slow-path request_irq failed " 10485 "(%d)\n", rc); 10486 goto msi_fail_out; 10487 } 10488 10489 /* vector-1 is associated to fast-path handler */ 10490 rc = request_irq(pci_irq_vector(phba->pcidev, 1), 10491 &lpfc_sli_fp_intr_handler, 0, 10492 LPFC_FP_DRIVER_HANDLER_NAME, phba); 10493 10494 if (rc) { 10495 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10496 "0429 MSI-X fast-path request_irq failed " 10497 "(%d)\n", rc); 10498 goto irq_fail_out; 10499 } 10500 10501 /* 10502 * Configure HBA MSI-X attention conditions to messages 10503 */ 10504 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10505 10506 if (!pmb) { 10507 rc = -ENOMEM; 10508 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10509 "0474 Unable to allocate memory for issuing " 10510 "MBOX_CONFIG_MSI command\n"); 10511 goto mem_fail_out; 10512 } 10513 rc = lpfc_config_msi(phba, pmb); 10514 if (rc) 10515 goto mbx_fail_out; 10516 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 10517 if (rc != MBX_SUCCESS) { 10518 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 10519 "0351 Config MSI mailbox command failed, " 10520 "mbxCmd x%x, mbxStatus x%x\n", 10521 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 10522 goto mbx_fail_out; 10523 } 10524 10525 /* Free memory allocated for mailbox command */ 10526 mempool_free(pmb, phba->mbox_mem_pool); 10527 return rc; 10528 10529 mbx_fail_out: 10530 /* Free memory allocated for mailbox command */ 10531 mempool_free(pmb, phba->mbox_mem_pool); 10532 10533 mem_fail_out: 10534 /* free the irq already requested */ 10535 free_irq(pci_irq_vector(phba->pcidev, 1), phba); 10536 10537 irq_fail_out: 10538 /* free the irq already requested */ 10539 free_irq(pci_irq_vector(phba->pcidev, 0), phba); 10540 10541 msi_fail_out: 10542 /* Unconfigure MSI-X capability structure */ 10543 pci_free_irq_vectors(phba->pcidev); 10544 10545 vec_fail_out: 10546 return rc; 10547 } 10548 10549 /** 10550 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 10551 * @phba: pointer to lpfc hba data structure. 10552 * 10553 * This routine is invoked to enable the MSI interrupt mode to device with 10554 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 10555 * enable the MSI vector. The device driver is responsible for calling the 10556 * request_irq() to register MSI vector with a interrupt the handler, which 10557 * is done in this function. 10558 * 10559 * Return codes 10560 * 0 - successful 10561 * other values - error 10562 */ 10563 static int 10564 lpfc_sli_enable_msi(struct lpfc_hba *phba) 10565 { 10566 int rc; 10567 10568 rc = pci_enable_msi(phba->pcidev); 10569 if (!rc) 10570 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10571 "0462 PCI enable MSI mode success.\n"); 10572 else { 10573 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10574 "0471 PCI enable MSI mode failed (%d)\n", rc); 10575 return rc; 10576 } 10577 10578 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 10579 0, LPFC_DRIVER_NAME, phba); 10580 if (rc) { 10581 pci_disable_msi(phba->pcidev); 10582 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10583 "0478 MSI request_irq failed (%d)\n", rc); 10584 } 10585 return rc; 10586 } 10587 10588 /** 10589 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 10590 * @phba: pointer to lpfc hba data structure. 10591 * 10592 * This routine is invoked to enable device interrupt and associate driver's 10593 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 10594 * spec. Depends on the interrupt mode configured to the driver, the driver 10595 * will try to fallback from the configured interrupt mode to an interrupt 10596 * mode which is supported by the platform, kernel, and device in the order 10597 * of: 10598 * MSI-X -> MSI -> IRQ. 10599 * 10600 * Return codes 10601 * 0 - successful 10602 * other values - error 10603 **/ 10604 static uint32_t 10605 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 10606 { 10607 uint32_t intr_mode = LPFC_INTR_ERROR; 10608 int retval; 10609 10610 if (cfg_mode == 2) { 10611 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 10612 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 10613 if (!retval) { 10614 /* Now, try to enable MSI-X interrupt mode */ 10615 retval = lpfc_sli_enable_msix(phba); 10616 if (!retval) { 10617 /* Indicate initialization to MSI-X mode */ 10618 phba->intr_type = MSIX; 10619 intr_mode = 2; 10620 } 10621 } 10622 } 10623 10624 /* Fallback to MSI if MSI-X initialization failed */ 10625 if (cfg_mode >= 1 && phba->intr_type == NONE) { 10626 retval = lpfc_sli_enable_msi(phba); 10627 if (!retval) { 10628 /* Indicate initialization to MSI mode */ 10629 phba->intr_type = MSI; 10630 intr_mode = 1; 10631 } 10632 } 10633 10634 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 10635 if (phba->intr_type == NONE) { 10636 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 10637 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 10638 if (!retval) { 10639 /* Indicate initialization to INTx mode */ 10640 phba->intr_type = INTx; 10641 intr_mode = 0; 10642 } 10643 } 10644 return intr_mode; 10645 } 10646 10647 /** 10648 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 10649 * @phba: pointer to lpfc hba data structure. 10650 * 10651 * This routine is invoked to disable device interrupt and disassociate the 10652 * driver's interrupt handler(s) from interrupt vector(s) to device with 10653 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 10654 * release the interrupt vector(s) for the message signaled interrupt. 10655 **/ 10656 static void 10657 lpfc_sli_disable_intr(struct lpfc_hba *phba) 10658 { 10659 int nr_irqs, i; 10660 10661 if (phba->intr_type == MSIX) 10662 nr_irqs = LPFC_MSIX_VECTORS; 10663 else 10664 nr_irqs = 1; 10665 10666 for (i = 0; i < nr_irqs; i++) 10667 free_irq(pci_irq_vector(phba->pcidev, i), phba); 10668 pci_free_irq_vectors(phba->pcidev); 10669 10670 /* Reset interrupt management states */ 10671 phba->intr_type = NONE; 10672 phba->sli.slistat.sli_intr = 0; 10673 } 10674 10675 /** 10676 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue 10677 * @phba: pointer to lpfc hba data structure. 10678 * @id: EQ vector index or Hardware Queue index 10679 * @match: LPFC_FIND_BY_EQ = match by EQ 10680 * LPFC_FIND_BY_HDWQ = match by Hardware Queue 10681 * Return the CPU that matches the selection criteria 10682 */ 10683 static uint16_t 10684 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match) 10685 { 10686 struct lpfc_vector_map_info *cpup; 10687 int cpu; 10688 10689 /* Loop through all CPUs */ 10690 for_each_present_cpu(cpu) { 10691 cpup = &phba->sli4_hba.cpu_map[cpu]; 10692 10693 /* If we are matching by EQ, there may be multiple CPUs using 10694 * using the same vector, so select the one with 10695 * LPFC_CPU_FIRST_IRQ set. 10696 */ 10697 if ((match == LPFC_FIND_BY_EQ) && 10698 (cpup->flag & LPFC_CPU_FIRST_IRQ) && 10699 (cpup->eq == id)) 10700 return cpu; 10701 10702 /* If matching by HDWQ, select the first CPU that matches */ 10703 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id)) 10704 return cpu; 10705 } 10706 return 0; 10707 } 10708 10709 #ifdef CONFIG_X86 10710 /** 10711 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded 10712 * @phba: pointer to lpfc hba data structure. 10713 * @cpu: CPU map index 10714 * @phys_id: CPU package physical id 10715 * @core_id: CPU core id 10716 */ 10717 static int 10718 lpfc_find_hyper(struct lpfc_hba *phba, int cpu, 10719 uint16_t phys_id, uint16_t core_id) 10720 { 10721 struct lpfc_vector_map_info *cpup; 10722 int idx; 10723 10724 for_each_present_cpu(idx) { 10725 cpup = &phba->sli4_hba.cpu_map[idx]; 10726 /* Does the cpup match the one we are looking for */ 10727 if ((cpup->phys_id == phys_id) && 10728 (cpup->core_id == core_id) && 10729 (cpu != idx)) 10730 return 1; 10731 } 10732 return 0; 10733 } 10734 #endif 10735 10736 /* 10737 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure 10738 * @phba: pointer to lpfc hba data structure. 10739 * @eqidx: index for eq and irq vector 10740 * @flag: flags to set for vector_map structure 10741 * @cpu: cpu used to index vector_map structure 10742 * 10743 * The routine assigns eq info into vector_map structure 10744 */ 10745 static inline void 10746 lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag, 10747 unsigned int cpu) 10748 { 10749 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu]; 10750 struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx); 10751 10752 cpup->eq = eqidx; 10753 cpup->flag |= flag; 10754 10755 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10756 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n", 10757 cpu, eqhdl->irq, cpup->eq, cpup->flag); 10758 } 10759 10760 /** 10761 * lpfc_cpu_map_array_init - Initialize cpu_map structure 10762 * @phba: pointer to lpfc hba data structure. 10763 * 10764 * The routine initializes the cpu_map array structure 10765 */ 10766 static void 10767 lpfc_cpu_map_array_init(struct lpfc_hba *phba) 10768 { 10769 struct lpfc_vector_map_info *cpup; 10770 struct lpfc_eq_intr_info *eqi; 10771 int cpu; 10772 10773 for_each_possible_cpu(cpu) { 10774 cpup = &phba->sli4_hba.cpu_map[cpu]; 10775 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY; 10776 cpup->core_id = LPFC_VECTOR_MAP_EMPTY; 10777 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY; 10778 cpup->eq = LPFC_VECTOR_MAP_EMPTY; 10779 cpup->flag = 0; 10780 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu); 10781 INIT_LIST_HEAD(&eqi->list); 10782 eqi->icnt = 0; 10783 } 10784 } 10785 10786 /** 10787 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure 10788 * @phba: pointer to lpfc hba data structure. 10789 * 10790 * The routine initializes the hba_eq_hdl array structure 10791 */ 10792 static void 10793 lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba) 10794 { 10795 struct lpfc_hba_eq_hdl *eqhdl; 10796 int i; 10797 10798 for (i = 0; i < phba->cfg_irq_chann; i++) { 10799 eqhdl = lpfc_get_eq_hdl(i); 10800 eqhdl->irq = LPFC_VECTOR_MAP_EMPTY; 10801 eqhdl->phba = phba; 10802 } 10803 } 10804 10805 /** 10806 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings 10807 * @phba: pointer to lpfc hba data structure. 10808 * @vectors: number of msix vectors allocated. 10809 * 10810 * The routine will figure out the CPU affinity assignment for every 10811 * MSI-X vector allocated for the HBA. 10812 * In addition, the CPU to IO channel mapping will be calculated 10813 * and the phba->sli4_hba.cpu_map array will reflect this. 10814 */ 10815 static void 10816 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) 10817 { 10818 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu; 10819 int max_phys_id, min_phys_id; 10820 int max_core_id, min_core_id; 10821 struct lpfc_vector_map_info *cpup; 10822 struct lpfc_vector_map_info *new_cpup; 10823 #ifdef CONFIG_X86 10824 struct cpuinfo_x86 *cpuinfo; 10825 #endif 10826 10827 max_phys_id = 0; 10828 min_phys_id = LPFC_VECTOR_MAP_EMPTY; 10829 max_core_id = 0; 10830 min_core_id = LPFC_VECTOR_MAP_EMPTY; 10831 10832 /* Update CPU map with physical id and core id of each CPU */ 10833 for_each_present_cpu(cpu) { 10834 cpup = &phba->sli4_hba.cpu_map[cpu]; 10835 #ifdef CONFIG_X86 10836 cpuinfo = &cpu_data(cpu); 10837 cpup->phys_id = cpuinfo->phys_proc_id; 10838 cpup->core_id = cpuinfo->cpu_core_id; 10839 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id)) 10840 cpup->flag |= LPFC_CPU_MAP_HYPER; 10841 #else 10842 /* No distinction between CPUs for other platforms */ 10843 cpup->phys_id = 0; 10844 cpup->core_id = cpu; 10845 #endif 10846 10847 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10848 "3328 CPU %d physid %d coreid %d flag x%x\n", 10849 cpu, cpup->phys_id, cpup->core_id, cpup->flag); 10850 10851 if (cpup->phys_id > max_phys_id) 10852 max_phys_id = cpup->phys_id; 10853 if (cpup->phys_id < min_phys_id) 10854 min_phys_id = cpup->phys_id; 10855 10856 if (cpup->core_id > max_core_id) 10857 max_core_id = cpup->core_id; 10858 if (cpup->core_id < min_core_id) 10859 min_core_id = cpup->core_id; 10860 } 10861 10862 /* After looking at each irq vector assigned to this pcidev, its 10863 * possible to see that not ALL CPUs have been accounted for. 10864 * Next we will set any unassigned (unaffinitized) cpu map 10865 * entries to a IRQ on the same phys_id. 10866 */ 10867 first_cpu = cpumask_first(cpu_present_mask); 10868 start_cpu = first_cpu; 10869 10870 for_each_present_cpu(cpu) { 10871 cpup = &phba->sli4_hba.cpu_map[cpu]; 10872 10873 /* Is this CPU entry unassigned */ 10874 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 10875 /* Mark CPU as IRQ not assigned by the kernel */ 10876 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; 10877 10878 /* If so, find a new_cpup thats on the the SAME 10879 * phys_id as cpup. start_cpu will start where we 10880 * left off so all unassigned entries don't get assgined 10881 * the IRQ of the first entry. 10882 */ 10883 new_cpu = start_cpu; 10884 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 10885 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 10886 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && 10887 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) && 10888 (new_cpup->phys_id == cpup->phys_id)) 10889 goto found_same; 10890 new_cpu = cpumask_next( 10891 new_cpu, cpu_present_mask); 10892 if (new_cpu == nr_cpumask_bits) 10893 new_cpu = first_cpu; 10894 } 10895 /* At this point, we leave the CPU as unassigned */ 10896 continue; 10897 found_same: 10898 /* We found a matching phys_id, so copy the IRQ info */ 10899 cpup->eq = new_cpup->eq; 10900 10901 /* Bump start_cpu to the next slot to minmize the 10902 * chance of having multiple unassigned CPU entries 10903 * selecting the same IRQ. 10904 */ 10905 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 10906 if (start_cpu == nr_cpumask_bits) 10907 start_cpu = first_cpu; 10908 10909 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10910 "3337 Set Affinity: CPU %d " 10911 "eq %d from peer cpu %d same " 10912 "phys_id (%d)\n", 10913 cpu, cpup->eq, new_cpu, 10914 cpup->phys_id); 10915 } 10916 } 10917 10918 /* Set any unassigned cpu map entries to a IRQ on any phys_id */ 10919 start_cpu = first_cpu; 10920 10921 for_each_present_cpu(cpu) { 10922 cpup = &phba->sli4_hba.cpu_map[cpu]; 10923 10924 /* Is this entry unassigned */ 10925 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 10926 /* Mark it as IRQ not assigned by the kernel */ 10927 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; 10928 10929 /* If so, find a new_cpup thats on ANY phys_id 10930 * as the cpup. start_cpu will start where we 10931 * left off so all unassigned entries don't get 10932 * assigned the IRQ of the first entry. 10933 */ 10934 new_cpu = start_cpu; 10935 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 10936 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 10937 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && 10938 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY)) 10939 goto found_any; 10940 new_cpu = cpumask_next( 10941 new_cpu, cpu_present_mask); 10942 if (new_cpu == nr_cpumask_bits) 10943 new_cpu = first_cpu; 10944 } 10945 /* We should never leave an entry unassigned */ 10946 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10947 "3339 Set Affinity: CPU %d " 10948 "eq %d UNASSIGNED\n", 10949 cpup->hdwq, cpup->eq); 10950 continue; 10951 found_any: 10952 /* We found an available entry, copy the IRQ info */ 10953 cpup->eq = new_cpup->eq; 10954 10955 /* Bump start_cpu to the next slot to minmize the 10956 * chance of having multiple unassigned CPU entries 10957 * selecting the same IRQ. 10958 */ 10959 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 10960 if (start_cpu == nr_cpumask_bits) 10961 start_cpu = first_cpu; 10962 10963 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10964 "3338 Set Affinity: CPU %d " 10965 "eq %d from peer cpu %d (%d/%d)\n", 10966 cpu, cpup->eq, new_cpu, 10967 new_cpup->phys_id, new_cpup->core_id); 10968 } 10969 } 10970 10971 /* Assign hdwq indices that are unique across all cpus in the map 10972 * that are also FIRST_CPUs. 10973 */ 10974 idx = 0; 10975 for_each_present_cpu(cpu) { 10976 cpup = &phba->sli4_hba.cpu_map[cpu]; 10977 10978 /* Only FIRST IRQs get a hdwq index assignment. */ 10979 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 10980 continue; 10981 10982 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */ 10983 cpup->hdwq = idx; 10984 idx++; 10985 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10986 "3333 Set Affinity: CPU %d (phys %d core %d): " 10987 "hdwq %d eq %d flg x%x\n", 10988 cpu, cpup->phys_id, cpup->core_id, 10989 cpup->hdwq, cpup->eq, cpup->flag); 10990 } 10991 /* Associate a hdwq with each cpu_map entry 10992 * This will be 1 to 1 - hdwq to cpu, unless there are less 10993 * hardware queues then CPUs. For that case we will just round-robin 10994 * the available hardware queues as they get assigned to CPUs. 10995 * The next_idx is the idx from the FIRST_CPU loop above to account 10996 * for irq_chann < hdwq. The idx is used for round-robin assignments 10997 * and needs to start at 0. 10998 */ 10999 next_idx = idx; 11000 start_cpu = 0; 11001 idx = 0; 11002 for_each_present_cpu(cpu) { 11003 cpup = &phba->sli4_hba.cpu_map[cpu]; 11004 11005 /* FIRST cpus are already mapped. */ 11006 if (cpup->flag & LPFC_CPU_FIRST_IRQ) 11007 continue; 11008 11009 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq 11010 * of the unassigned cpus to the next idx so that all 11011 * hdw queues are fully utilized. 11012 */ 11013 if (next_idx < phba->cfg_hdw_queue) { 11014 cpup->hdwq = next_idx; 11015 next_idx++; 11016 continue; 11017 } 11018 11019 /* Not a First CPU and all hdw_queues are used. Reuse a 11020 * Hardware Queue for another CPU, so be smart about it 11021 * and pick one that has its IRQ/EQ mapped to the same phys_id 11022 * (CPU package) and core_id. 11023 */ 11024 new_cpu = start_cpu; 11025 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 11026 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 11027 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && 11028 new_cpup->phys_id == cpup->phys_id && 11029 new_cpup->core_id == cpup->core_id) { 11030 goto found_hdwq; 11031 } 11032 new_cpu = cpumask_next(new_cpu, cpu_present_mask); 11033 if (new_cpu == nr_cpumask_bits) 11034 new_cpu = first_cpu; 11035 } 11036 11037 /* If we can't match both phys_id and core_id, 11038 * settle for just a phys_id match. 11039 */ 11040 new_cpu = start_cpu; 11041 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 11042 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 11043 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && 11044 new_cpup->phys_id == cpup->phys_id) 11045 goto found_hdwq; 11046 11047 new_cpu = cpumask_next(new_cpu, cpu_present_mask); 11048 if (new_cpu == nr_cpumask_bits) 11049 new_cpu = first_cpu; 11050 } 11051 11052 /* Otherwise just round robin on cfg_hdw_queue */ 11053 cpup->hdwq = idx % phba->cfg_hdw_queue; 11054 idx++; 11055 goto logit; 11056 found_hdwq: 11057 /* We found an available entry, copy the IRQ info */ 11058 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 11059 if (start_cpu == nr_cpumask_bits) 11060 start_cpu = first_cpu; 11061 cpup->hdwq = new_cpup->hdwq; 11062 logit: 11063 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11064 "3335 Set Affinity: CPU %d (phys %d core %d): " 11065 "hdwq %d eq %d flg x%x\n", 11066 cpu, cpup->phys_id, cpup->core_id, 11067 cpup->hdwq, cpup->eq, cpup->flag); 11068 } 11069 11070 /* 11071 * Initialize the cpu_map slots for not-present cpus in case 11072 * a cpu is hot-added. Perform a simple hdwq round robin assignment. 11073 */ 11074 idx = 0; 11075 for_each_possible_cpu(cpu) { 11076 cpup = &phba->sli4_hba.cpu_map[cpu]; 11077 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) 11078 continue; 11079 11080 cpup->hdwq = idx++ % phba->cfg_hdw_queue; 11081 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11082 "3340 Set Affinity: not present " 11083 "CPU %d hdwq %d\n", 11084 cpu, cpup->hdwq); 11085 } 11086 11087 /* The cpu_map array will be used later during initialization 11088 * when EQ / CQ / WQs are allocated and configured. 11089 */ 11090 return; 11091 } 11092 11093 /** 11094 * lpfc_cpuhp_get_eq 11095 * 11096 * @phba: pointer to lpfc hba data structure. 11097 * @cpu: cpu going offline 11098 * @eqlist: 11099 */ 11100 static int 11101 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu, 11102 struct list_head *eqlist) 11103 { 11104 const struct cpumask *maskp; 11105 struct lpfc_queue *eq; 11106 struct cpumask *tmp; 11107 u16 idx; 11108 11109 tmp = kzalloc(cpumask_size(), GFP_KERNEL); 11110 if (!tmp) 11111 return -ENOMEM; 11112 11113 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 11114 maskp = pci_irq_get_affinity(phba->pcidev, idx); 11115 if (!maskp) 11116 continue; 11117 /* 11118 * if irq is not affinitized to the cpu going 11119 * then we don't need to poll the eq attached 11120 * to it. 11121 */ 11122 if (!cpumask_and(tmp, maskp, cpumask_of(cpu))) 11123 continue; 11124 /* get the cpus that are online and are affini- 11125 * tized to this irq vector. If the count is 11126 * more than 1 then cpuhp is not going to shut- 11127 * down this vector. Since this cpu has not 11128 * gone offline yet, we need >1. 11129 */ 11130 cpumask_and(tmp, maskp, cpu_online_mask); 11131 if (cpumask_weight(tmp) > 1) 11132 continue; 11133 11134 /* Now that we have an irq to shutdown, get the eq 11135 * mapped to this irq. Note: multiple hdwq's in 11136 * the software can share an eq, but eventually 11137 * only eq will be mapped to this vector 11138 */ 11139 eq = phba->sli4_hba.hba_eq_hdl[idx].eq; 11140 list_add(&eq->_poll_list, eqlist); 11141 } 11142 kfree(tmp); 11143 return 0; 11144 } 11145 11146 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba) 11147 { 11148 if (phba->sli_rev != LPFC_SLI_REV4) 11149 return; 11150 11151 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state, 11152 &phba->cpuhp); 11153 /* 11154 * unregistering the instance doesn't stop the polling 11155 * timer. Wait for the poll timer to retire. 11156 */ 11157 synchronize_rcu(); 11158 del_timer_sync(&phba->cpuhp_poll_timer); 11159 } 11160 11161 static void lpfc_cpuhp_remove(struct lpfc_hba *phba) 11162 { 11163 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 11164 return; 11165 11166 __lpfc_cpuhp_remove(phba); 11167 } 11168 11169 static void lpfc_cpuhp_add(struct lpfc_hba *phba) 11170 { 11171 if (phba->sli_rev != LPFC_SLI_REV4) 11172 return; 11173 11174 rcu_read_lock(); 11175 11176 if (!list_empty(&phba->poll_list)) { 11177 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0); 11178 mod_timer(&phba->cpuhp_poll_timer, 11179 jiffies + msecs_to_jiffies(LPFC_POLL_HB)); 11180 } 11181 11182 rcu_read_unlock(); 11183 11184 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, 11185 &phba->cpuhp); 11186 } 11187 11188 static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval) 11189 { 11190 if (phba->pport->load_flag & FC_UNLOADING) { 11191 *retval = -EAGAIN; 11192 return true; 11193 } 11194 11195 if (phba->sli_rev != LPFC_SLI_REV4) { 11196 *retval = 0; 11197 return true; 11198 } 11199 11200 /* proceed with the hotplug */ 11201 return false; 11202 } 11203 11204 /** 11205 * lpfc_irq_set_aff - set IRQ affinity 11206 * @eqhdl: EQ handle 11207 * @cpu: cpu to set affinity 11208 * 11209 **/ 11210 static inline void 11211 lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu) 11212 { 11213 cpumask_clear(&eqhdl->aff_mask); 11214 cpumask_set_cpu(cpu, &eqhdl->aff_mask); 11215 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING); 11216 irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask); 11217 } 11218 11219 /** 11220 * lpfc_irq_clear_aff - clear IRQ affinity 11221 * @eqhdl: EQ handle 11222 * 11223 **/ 11224 static inline void 11225 lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl) 11226 { 11227 cpumask_clear(&eqhdl->aff_mask); 11228 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING); 11229 irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask); 11230 } 11231 11232 /** 11233 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event 11234 * @phba: pointer to HBA context object. 11235 * @cpu: cpu going offline/online 11236 * @offline: true, cpu is going offline. false, cpu is coming online. 11237 * 11238 * If cpu is going offline, we'll try our best effort to find the next 11239 * online cpu on the phba's NUMA node and migrate all offlining IRQ affinities. 11240 * 11241 * If cpu is coming online, reaffinitize the IRQ back to the onlineng cpu. 11242 * 11243 * Note: Call only if cfg_irq_numa is enabled, otherwise rely on 11244 * PCI_IRQ_AFFINITY to auto-manage IRQ affinity. 11245 * 11246 **/ 11247 static void 11248 lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline) 11249 { 11250 struct lpfc_vector_map_info *cpup; 11251 struct cpumask *aff_mask; 11252 unsigned int cpu_select, cpu_next, idx; 11253 const struct cpumask *numa_mask; 11254 11255 if (!phba->cfg_irq_numa) 11256 return; 11257 11258 numa_mask = &phba->sli4_hba.numa_mask; 11259 11260 if (!cpumask_test_cpu(cpu, numa_mask)) 11261 return; 11262 11263 cpup = &phba->sli4_hba.cpu_map[cpu]; 11264 11265 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 11266 return; 11267 11268 if (offline) { 11269 /* Find next online CPU on NUMA node */ 11270 cpu_next = cpumask_next_wrap(cpu, numa_mask, cpu, true); 11271 cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu_next); 11272 11273 /* Found a valid CPU */ 11274 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) { 11275 /* Go through each eqhdl and ensure offlining 11276 * cpu aff_mask is migrated 11277 */ 11278 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 11279 aff_mask = lpfc_get_aff_mask(idx); 11280 11281 /* Migrate affinity */ 11282 if (cpumask_test_cpu(cpu, aff_mask)) 11283 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx), 11284 cpu_select); 11285 } 11286 } else { 11287 /* Rely on irqbalance if no online CPUs left on NUMA */ 11288 for (idx = 0; idx < phba->cfg_irq_chann; idx++) 11289 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx)); 11290 } 11291 } else { 11292 /* Migrate affinity back to this CPU */ 11293 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu); 11294 } 11295 } 11296 11297 static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node) 11298 { 11299 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); 11300 struct lpfc_queue *eq, *next; 11301 LIST_HEAD(eqlist); 11302 int retval; 11303 11304 if (!phba) { 11305 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); 11306 return 0; 11307 } 11308 11309 if (__lpfc_cpuhp_checks(phba, &retval)) 11310 return retval; 11311 11312 lpfc_irq_rebalance(phba, cpu, true); 11313 11314 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist); 11315 if (retval) 11316 return retval; 11317 11318 /* start polling on these eq's */ 11319 list_for_each_entry_safe(eq, next, &eqlist, _poll_list) { 11320 list_del_init(&eq->_poll_list); 11321 lpfc_sli4_start_polling(eq); 11322 } 11323 11324 return 0; 11325 } 11326 11327 static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node) 11328 { 11329 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); 11330 struct lpfc_queue *eq, *next; 11331 unsigned int n; 11332 int retval; 11333 11334 if (!phba) { 11335 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); 11336 return 0; 11337 } 11338 11339 if (__lpfc_cpuhp_checks(phba, &retval)) 11340 return retval; 11341 11342 lpfc_irq_rebalance(phba, cpu, false); 11343 11344 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) { 11345 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ); 11346 if (n == cpu) 11347 lpfc_sli4_stop_polling(eq); 11348 } 11349 11350 return 0; 11351 } 11352 11353 /** 11354 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 11355 * @phba: pointer to lpfc hba data structure. 11356 * 11357 * This routine is invoked to enable the MSI-X interrupt vectors to device 11358 * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them 11359 * to cpus on the system. 11360 * 11361 * When cfg_irq_numa is enabled, the adapter will only allocate vectors for 11362 * the number of cpus on the same numa node as this adapter. The vectors are 11363 * allocated without requesting OS affinity mapping. A vector will be 11364 * allocated and assigned to each online and offline cpu. If the cpu is 11365 * online, then affinity will be set to that cpu. If the cpu is offline, then 11366 * affinity will be set to the nearest peer cpu within the numa node that is 11367 * online. If there are no online cpus within the numa node, affinity is not 11368 * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping 11369 * is consistent with the way cpu online/offline is handled when cfg_irq_numa is 11370 * configured. 11371 * 11372 * If numa mode is not enabled and there is more than 1 vector allocated, then 11373 * the driver relies on the managed irq interface where the OS assigns vector to 11374 * cpu affinity. The driver will then use that affinity mapping to setup its 11375 * cpu mapping table. 11376 * 11377 * Return codes 11378 * 0 - successful 11379 * other values - error 11380 **/ 11381 static int 11382 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 11383 { 11384 int vectors, rc, index; 11385 char *name; 11386 const struct cpumask *numa_mask = NULL; 11387 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids; 11388 struct lpfc_hba_eq_hdl *eqhdl; 11389 const struct cpumask *maskp; 11390 bool first; 11391 unsigned int flags = PCI_IRQ_MSIX; 11392 11393 /* Set up MSI-X multi-message vectors */ 11394 vectors = phba->cfg_irq_chann; 11395 11396 if (phba->cfg_irq_numa) { 11397 numa_mask = &phba->sli4_hba.numa_mask; 11398 cpu_cnt = cpumask_weight(numa_mask); 11399 vectors = min(phba->cfg_irq_chann, cpu_cnt); 11400 11401 /* cpu: iterates over numa_mask including offline or online 11402 * cpu_select: iterates over online numa_mask to set affinity 11403 */ 11404 cpu = cpumask_first(numa_mask); 11405 cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu); 11406 } else { 11407 flags |= PCI_IRQ_AFFINITY; 11408 } 11409 11410 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags); 11411 if (rc < 0) { 11412 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11413 "0484 PCI enable MSI-X failed (%d)\n", rc); 11414 goto vec_fail_out; 11415 } 11416 vectors = rc; 11417 11418 /* Assign MSI-X vectors to interrupt handlers */ 11419 for (index = 0; index < vectors; index++) { 11420 eqhdl = lpfc_get_eq_hdl(index); 11421 name = eqhdl->handler_name; 11422 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ); 11423 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ, 11424 LPFC_DRIVER_HANDLER_NAME"%d", index); 11425 11426 eqhdl->idx = index; 11427 rc = request_irq(pci_irq_vector(phba->pcidev, index), 11428 &lpfc_sli4_hba_intr_handler, 0, 11429 name, eqhdl); 11430 if (rc) { 11431 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 11432 "0486 MSI-X fast-path (%d) " 11433 "request_irq failed (%d)\n", index, rc); 11434 goto cfg_fail_out; 11435 } 11436 11437 eqhdl->irq = pci_irq_vector(phba->pcidev, index); 11438 11439 if (phba->cfg_irq_numa) { 11440 /* If found a neighboring online cpu, set affinity */ 11441 if (cpu_select < nr_cpu_ids) 11442 lpfc_irq_set_aff(eqhdl, cpu_select); 11443 11444 /* Assign EQ to cpu_map */ 11445 lpfc_assign_eq_map_info(phba, index, 11446 LPFC_CPU_FIRST_IRQ, 11447 cpu); 11448 11449 /* Iterate to next offline or online cpu in numa_mask */ 11450 cpu = cpumask_next(cpu, numa_mask); 11451 11452 /* Find next online cpu in numa_mask to set affinity */ 11453 cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu); 11454 } else if (vectors == 1) { 11455 cpu = cpumask_first(cpu_present_mask); 11456 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ, 11457 cpu); 11458 } else { 11459 maskp = pci_irq_get_affinity(phba->pcidev, index); 11460 11461 first = true; 11462 /* Loop through all CPUs associated with vector index */ 11463 for_each_cpu_and(cpu, maskp, cpu_present_mask) { 11464 /* If this is the first CPU thats assigned to 11465 * this vector, set LPFC_CPU_FIRST_IRQ. 11466 */ 11467 lpfc_assign_eq_map_info(phba, index, 11468 first ? 11469 LPFC_CPU_FIRST_IRQ : 0, 11470 cpu); 11471 if (first) 11472 first = false; 11473 } 11474 } 11475 } 11476 11477 if (vectors != phba->cfg_irq_chann) { 11478 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11479 "3238 Reducing IO channels to match number of " 11480 "MSI-X vectors, requested %d got %d\n", 11481 phba->cfg_irq_chann, vectors); 11482 if (phba->cfg_irq_chann > vectors) 11483 phba->cfg_irq_chann = vectors; 11484 } 11485 11486 return rc; 11487 11488 cfg_fail_out: 11489 /* free the irq already requested */ 11490 for (--index; index >= 0; index--) { 11491 eqhdl = lpfc_get_eq_hdl(index); 11492 lpfc_irq_clear_aff(eqhdl); 11493 irq_set_affinity_hint(eqhdl->irq, NULL); 11494 free_irq(eqhdl->irq, eqhdl); 11495 } 11496 11497 /* Unconfigure MSI-X capability structure */ 11498 pci_free_irq_vectors(phba->pcidev); 11499 11500 vec_fail_out: 11501 return rc; 11502 } 11503 11504 /** 11505 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 11506 * @phba: pointer to lpfc hba data structure. 11507 * 11508 * This routine is invoked to enable the MSI interrupt mode to device with 11509 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is 11510 * called to enable the MSI vector. The device driver is responsible for 11511 * calling the request_irq() to register MSI vector with a interrupt the 11512 * handler, which is done in this function. 11513 * 11514 * Return codes 11515 * 0 - successful 11516 * other values - error 11517 **/ 11518 static int 11519 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 11520 { 11521 int rc, index; 11522 unsigned int cpu; 11523 struct lpfc_hba_eq_hdl *eqhdl; 11524 11525 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1, 11526 PCI_IRQ_MSI | PCI_IRQ_AFFINITY); 11527 if (rc > 0) 11528 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11529 "0487 PCI enable MSI mode success.\n"); 11530 else { 11531 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11532 "0488 PCI enable MSI mode failed (%d)\n", rc); 11533 return rc ? rc : -1; 11534 } 11535 11536 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 11537 0, LPFC_DRIVER_NAME, phba); 11538 if (rc) { 11539 pci_free_irq_vectors(phba->pcidev); 11540 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 11541 "0490 MSI request_irq failed (%d)\n", rc); 11542 return rc; 11543 } 11544 11545 eqhdl = lpfc_get_eq_hdl(0); 11546 eqhdl->irq = pci_irq_vector(phba->pcidev, 0); 11547 11548 cpu = cpumask_first(cpu_present_mask); 11549 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu); 11550 11551 for (index = 0; index < phba->cfg_irq_chann; index++) { 11552 eqhdl = lpfc_get_eq_hdl(index); 11553 eqhdl->idx = index; 11554 } 11555 11556 return 0; 11557 } 11558 11559 /** 11560 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 11561 * @phba: pointer to lpfc hba data structure. 11562 * 11563 * This routine is invoked to enable device interrupt and associate driver's 11564 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 11565 * interface spec. Depends on the interrupt mode configured to the driver, 11566 * the driver will try to fallback from the configured interrupt mode to an 11567 * interrupt mode which is supported by the platform, kernel, and device in 11568 * the order of: 11569 * MSI-X -> MSI -> IRQ. 11570 * 11571 * Return codes 11572 * 0 - successful 11573 * other values - error 11574 **/ 11575 static uint32_t 11576 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 11577 { 11578 uint32_t intr_mode = LPFC_INTR_ERROR; 11579 int retval, idx; 11580 11581 if (cfg_mode == 2) { 11582 /* Preparation before conf_msi mbox cmd */ 11583 retval = 0; 11584 if (!retval) { 11585 /* Now, try to enable MSI-X interrupt mode */ 11586 retval = lpfc_sli4_enable_msix(phba); 11587 if (!retval) { 11588 /* Indicate initialization to MSI-X mode */ 11589 phba->intr_type = MSIX; 11590 intr_mode = 2; 11591 } 11592 } 11593 } 11594 11595 /* Fallback to MSI if MSI-X initialization failed */ 11596 if (cfg_mode >= 1 && phba->intr_type == NONE) { 11597 retval = lpfc_sli4_enable_msi(phba); 11598 if (!retval) { 11599 /* Indicate initialization to MSI mode */ 11600 phba->intr_type = MSI; 11601 intr_mode = 1; 11602 } 11603 } 11604 11605 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 11606 if (phba->intr_type == NONE) { 11607 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 11608 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 11609 if (!retval) { 11610 struct lpfc_hba_eq_hdl *eqhdl; 11611 unsigned int cpu; 11612 11613 /* Indicate initialization to INTx mode */ 11614 phba->intr_type = INTx; 11615 intr_mode = 0; 11616 11617 eqhdl = lpfc_get_eq_hdl(0); 11618 eqhdl->irq = pci_irq_vector(phba->pcidev, 0); 11619 11620 cpu = cpumask_first(cpu_present_mask); 11621 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, 11622 cpu); 11623 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 11624 eqhdl = lpfc_get_eq_hdl(idx); 11625 eqhdl->idx = idx; 11626 } 11627 } 11628 } 11629 return intr_mode; 11630 } 11631 11632 /** 11633 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 11634 * @phba: pointer to lpfc hba data structure. 11635 * 11636 * This routine is invoked to disable device interrupt and disassociate 11637 * the driver's interrupt handler(s) from interrupt vector(s) to device 11638 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 11639 * will release the interrupt vector(s) for the message signaled interrupt. 11640 **/ 11641 static void 11642 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 11643 { 11644 /* Disable the currently initialized interrupt mode */ 11645 if (phba->intr_type == MSIX) { 11646 int index; 11647 struct lpfc_hba_eq_hdl *eqhdl; 11648 11649 /* Free up MSI-X multi-message vectors */ 11650 for (index = 0; index < phba->cfg_irq_chann; index++) { 11651 eqhdl = lpfc_get_eq_hdl(index); 11652 lpfc_irq_clear_aff(eqhdl); 11653 irq_set_affinity_hint(eqhdl->irq, NULL); 11654 free_irq(eqhdl->irq, eqhdl); 11655 } 11656 } else { 11657 free_irq(phba->pcidev->irq, phba); 11658 } 11659 11660 pci_free_irq_vectors(phba->pcidev); 11661 11662 /* Reset interrupt management states */ 11663 phba->intr_type = NONE; 11664 phba->sli.slistat.sli_intr = 0; 11665 } 11666 11667 /** 11668 * lpfc_unset_hba - Unset SLI3 hba device initialization 11669 * @phba: pointer to lpfc hba data structure. 11670 * 11671 * This routine is invoked to unset the HBA device initialization steps to 11672 * a device with SLI-3 interface spec. 11673 **/ 11674 static void 11675 lpfc_unset_hba(struct lpfc_hba *phba) 11676 { 11677 struct lpfc_vport *vport = phba->pport; 11678 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11679 11680 spin_lock_irq(shost->host_lock); 11681 vport->load_flag |= FC_UNLOADING; 11682 spin_unlock_irq(shost->host_lock); 11683 11684 kfree(phba->vpi_bmask); 11685 kfree(phba->vpi_ids); 11686 11687 lpfc_stop_hba_timers(phba); 11688 11689 phba->pport->work_port_events = 0; 11690 11691 lpfc_sli_hba_down(phba); 11692 11693 lpfc_sli_brdrestart(phba); 11694 11695 lpfc_sli_disable_intr(phba); 11696 11697 return; 11698 } 11699 11700 /** 11701 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 11702 * @phba: Pointer to HBA context object. 11703 * 11704 * This function is called in the SLI4 code path to wait for completion 11705 * of device's XRIs exchange busy. It will check the XRI exchange busy 11706 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 11707 * that, it will check the XRI exchange busy on outstanding FCP and ELS 11708 * I/Os every 30 seconds, log error message, and wait forever. Only when 11709 * all XRI exchange busy complete, the driver unload shall proceed with 11710 * invoking the function reset ioctl mailbox command to the CNA and the 11711 * the rest of the driver unload resource release. 11712 **/ 11713 static void 11714 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 11715 { 11716 struct lpfc_sli4_hdw_queue *qp; 11717 int idx, ccnt; 11718 int wait_time = 0; 11719 int io_xri_cmpl = 1; 11720 int nvmet_xri_cmpl = 1; 11721 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 11722 11723 /* Driver just aborted IOs during the hba_unset process. Pause 11724 * here to give the HBA time to complete the IO and get entries 11725 * into the abts lists. 11726 */ 11727 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5); 11728 11729 /* Wait for NVME pending IO to flush back to transport. */ 11730 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 11731 lpfc_nvme_wait_for_io_drain(phba); 11732 11733 ccnt = 0; 11734 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 11735 qp = &phba->sli4_hba.hdwq[idx]; 11736 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list); 11737 if (!io_xri_cmpl) /* if list is NOT empty */ 11738 ccnt++; 11739 } 11740 if (ccnt) 11741 io_xri_cmpl = 0; 11742 11743 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11744 nvmet_xri_cmpl = 11745 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 11746 } 11747 11748 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) { 11749 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 11750 if (!nvmet_xri_cmpl) 11751 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11752 "6424 NVMET XRI exchange busy " 11753 "wait time: %d seconds.\n", 11754 wait_time/1000); 11755 if (!io_xri_cmpl) 11756 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11757 "6100 IO XRI exchange busy " 11758 "wait time: %d seconds.\n", 11759 wait_time/1000); 11760 if (!els_xri_cmpl) 11761 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11762 "2878 ELS XRI exchange busy " 11763 "wait time: %d seconds.\n", 11764 wait_time/1000); 11765 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 11766 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 11767 } else { 11768 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 11769 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 11770 } 11771 11772 ccnt = 0; 11773 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 11774 qp = &phba->sli4_hba.hdwq[idx]; 11775 io_xri_cmpl = list_empty( 11776 &qp->lpfc_abts_io_buf_list); 11777 if (!io_xri_cmpl) /* if list is NOT empty */ 11778 ccnt++; 11779 } 11780 if (ccnt) 11781 io_xri_cmpl = 0; 11782 11783 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11784 nvmet_xri_cmpl = list_empty( 11785 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 11786 } 11787 els_xri_cmpl = 11788 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 11789 11790 } 11791 } 11792 11793 /** 11794 * lpfc_sli4_hba_unset - Unset the fcoe hba 11795 * @phba: Pointer to HBA context object. 11796 * 11797 * This function is called in the SLI4 code path to reset the HBA's FCoE 11798 * function. The caller is not required to hold any lock. This routine 11799 * issues PCI function reset mailbox command to reset the FCoE function. 11800 * At the end of the function, it calls lpfc_hba_down_post function to 11801 * free any pending commands. 11802 **/ 11803 static void 11804 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 11805 { 11806 int wait_cnt = 0; 11807 LPFC_MBOXQ_t *mboxq; 11808 struct pci_dev *pdev = phba->pcidev; 11809 11810 lpfc_stop_hba_timers(phba); 11811 if (phba->pport) 11812 phba->sli4_hba.intr_enable = 0; 11813 11814 /* 11815 * Gracefully wait out the potential current outstanding asynchronous 11816 * mailbox command. 11817 */ 11818 11819 /* First, block any pending async mailbox command from posted */ 11820 spin_lock_irq(&phba->hbalock); 11821 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 11822 spin_unlock_irq(&phba->hbalock); 11823 /* Now, trying to wait it out if we can */ 11824 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 11825 msleep(10); 11826 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 11827 break; 11828 } 11829 /* Forcefully release the outstanding mailbox command if timed out */ 11830 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 11831 spin_lock_irq(&phba->hbalock); 11832 mboxq = phba->sli.mbox_active; 11833 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 11834 __lpfc_mbox_cmpl_put(phba, mboxq); 11835 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 11836 phba->sli.mbox_active = NULL; 11837 spin_unlock_irq(&phba->hbalock); 11838 } 11839 11840 /* Abort all iocbs associated with the hba */ 11841 lpfc_sli_hba_iocb_abort(phba); 11842 11843 /* Wait for completion of device XRI exchange busy */ 11844 lpfc_sli4_xri_exchange_busy_wait(phba); 11845 11846 /* per-phba callback de-registration for hotplug event */ 11847 lpfc_cpuhp_remove(phba); 11848 11849 /* Disable PCI subsystem interrupt */ 11850 lpfc_sli4_disable_intr(phba); 11851 11852 /* Disable SR-IOV if enabled */ 11853 if (phba->cfg_sriov_nr_virtfn) 11854 pci_disable_sriov(pdev); 11855 11856 /* Stop kthread signal shall trigger work_done one more time */ 11857 kthread_stop(phba->worker_thread); 11858 11859 /* Disable FW logging to host memory */ 11860 lpfc_ras_stop_fwlog(phba); 11861 11862 /* Unset the queues shared with the hardware then release all 11863 * allocated resources. 11864 */ 11865 lpfc_sli4_queue_unset(phba); 11866 lpfc_sli4_queue_destroy(phba); 11867 11868 /* Reset SLI4 HBA FCoE function */ 11869 lpfc_pci_function_reset(phba); 11870 11871 /* Free RAS DMA memory */ 11872 if (phba->ras_fwlog.ras_enabled) 11873 lpfc_sli4_ras_dma_free(phba); 11874 11875 /* Stop the SLI4 device port */ 11876 if (phba->pport) 11877 phba->pport->work_port_events = 0; 11878 } 11879 11880 /** 11881 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 11882 * @phba: Pointer to HBA context object. 11883 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 11884 * 11885 * This function is called in the SLI4 code path to read the port's 11886 * sli4 capabilities. 11887 * 11888 * This function may be be called from any context that can block-wait 11889 * for the completion. The expectation is that this routine is called 11890 * typically from probe_one or from the online routine. 11891 **/ 11892 int 11893 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 11894 { 11895 int rc; 11896 struct lpfc_mqe *mqe; 11897 struct lpfc_pc_sli4_params *sli4_params; 11898 uint32_t mbox_tmo; 11899 11900 rc = 0; 11901 mqe = &mboxq->u.mqe; 11902 11903 /* Read the port's SLI4 Parameters port capabilities */ 11904 lpfc_pc_sli4_params(mboxq); 11905 if (!phba->sli4_hba.intr_enable) 11906 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 11907 else { 11908 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 11909 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 11910 } 11911 11912 if (unlikely(rc)) 11913 return 1; 11914 11915 sli4_params = &phba->sli4_hba.pc_sli4_params; 11916 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 11917 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 11918 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 11919 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 11920 &mqe->un.sli4_params); 11921 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 11922 &mqe->un.sli4_params); 11923 sli4_params->proto_types = mqe->un.sli4_params.word3; 11924 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 11925 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 11926 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 11927 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 11928 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 11929 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 11930 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 11931 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 11932 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 11933 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 11934 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 11935 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 11936 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 11937 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 11938 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 11939 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 11940 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 11941 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 11942 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 11943 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 11944 11945 /* Make sure that sge_supp_len can be handled by the driver */ 11946 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 11947 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 11948 11949 return rc; 11950 } 11951 11952 /** 11953 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 11954 * @phba: Pointer to HBA context object. 11955 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 11956 * 11957 * This function is called in the SLI4 code path to read the port's 11958 * sli4 capabilities. 11959 * 11960 * This function may be be called from any context that can block-wait 11961 * for the completion. The expectation is that this routine is called 11962 * typically from probe_one or from the online routine. 11963 **/ 11964 int 11965 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 11966 { 11967 int rc; 11968 struct lpfc_mqe *mqe = &mboxq->u.mqe; 11969 struct lpfc_pc_sli4_params *sli4_params; 11970 uint32_t mbox_tmo; 11971 int length; 11972 bool exp_wqcq_pages = true; 11973 struct lpfc_sli4_parameters *mbx_sli4_parameters; 11974 11975 /* 11976 * By default, the driver assumes the SLI4 port requires RPI 11977 * header postings. The SLI4_PARAM response will correct this 11978 * assumption. 11979 */ 11980 phba->sli4_hba.rpi_hdrs_in_use = 1; 11981 11982 /* Read the port's SLI4 Config Parameters */ 11983 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 11984 sizeof(struct lpfc_sli4_cfg_mhdr)); 11985 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 11986 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 11987 length, LPFC_SLI4_MBX_EMBED); 11988 if (!phba->sli4_hba.intr_enable) 11989 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 11990 else { 11991 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 11992 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 11993 } 11994 if (unlikely(rc)) 11995 return rc; 11996 sli4_params = &phba->sli4_hba.pc_sli4_params; 11997 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 11998 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 11999 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 12000 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 12001 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 12002 mbx_sli4_parameters); 12003 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 12004 mbx_sli4_parameters); 12005 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 12006 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 12007 else 12008 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 12009 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 12010 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); 12011 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); 12012 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 12013 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 12014 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 12015 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 12016 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters); 12017 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters); 12018 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); 12019 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters); 12020 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters); 12021 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 12022 mbx_sli4_parameters); 12023 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); 12024 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 12025 mbx_sli4_parameters); 12026 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); 12027 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); 12028 12029 /* Check for Extended Pre-Registered SGL support */ 12030 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters); 12031 12032 /* Check for firmware nvme support */ 12033 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) && 12034 bf_get(cfg_xib, mbx_sli4_parameters)); 12035 12036 if (rc) { 12037 /* Save this to indicate the Firmware supports NVME */ 12038 sli4_params->nvme = 1; 12039 12040 /* Firmware NVME support, check driver FC4 NVME support */ 12041 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) { 12042 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 12043 "6133 Disabling NVME support: " 12044 "FC4 type not supported: x%x\n", 12045 phba->cfg_enable_fc4_type); 12046 goto fcponly; 12047 } 12048 } else { 12049 /* No firmware NVME support, check driver FC4 NVME support */ 12050 sli4_params->nvme = 0; 12051 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 12052 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME, 12053 "6101 Disabling NVME support: Not " 12054 "supported by firmware (%d %d) x%x\n", 12055 bf_get(cfg_nvme, mbx_sli4_parameters), 12056 bf_get(cfg_xib, mbx_sli4_parameters), 12057 phba->cfg_enable_fc4_type); 12058 fcponly: 12059 phba->nvme_support = 0; 12060 phba->nvmet_support = 0; 12061 phba->cfg_nvmet_mrq = 0; 12062 phba->cfg_nvme_seg_cnt = 0; 12063 12064 /* If no FC4 type support, move to just SCSI support */ 12065 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 12066 return -ENODEV; 12067 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; 12068 } 12069 } 12070 12071 /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to 12072 * accommodate 512K and 1M IOs in a single nvme buf. 12073 */ 12074 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 12075 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 12076 12077 /* Only embed PBDE for if_type 6, PBDE support requires xib be set */ 12078 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 12079 LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters))) 12080 phba->cfg_enable_pbde = 0; 12081 12082 /* 12083 * To support Suppress Response feature we must satisfy 3 conditions. 12084 * lpfc_suppress_rsp module parameter must be set (default). 12085 * In SLI4-Parameters Descriptor: 12086 * Extended Inline Buffers (XIB) must be supported. 12087 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported 12088 * (double negative). 12089 */ 12090 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) && 12091 !(bf_get(cfg_nosr, mbx_sli4_parameters))) 12092 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP; 12093 else 12094 phba->cfg_suppress_rsp = 0; 12095 12096 if (bf_get(cfg_eqdr, mbx_sli4_parameters)) 12097 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR; 12098 12099 /* Make sure that sge_supp_len can be handled by the driver */ 12100 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 12101 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 12102 12103 /* 12104 * Check whether the adapter supports an embedded copy of the 12105 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order 12106 * to use this option, 128-byte WQEs must be used. 12107 */ 12108 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters)) 12109 phba->fcp_embed_io = 1; 12110 else 12111 phba->fcp_embed_io = 0; 12112 12113 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 12114 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n", 12115 bf_get(cfg_xib, mbx_sli4_parameters), 12116 phba->cfg_enable_pbde, 12117 phba->fcp_embed_io, phba->nvme_support, 12118 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp); 12119 12120 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 12121 LPFC_SLI_INTF_IF_TYPE_2) && 12122 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 12123 LPFC_SLI_INTF_FAMILY_LNCR_A0)) 12124 exp_wqcq_pages = false; 12125 12126 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) && 12127 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) && 12128 exp_wqcq_pages && 12129 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT)) 12130 phba->enab_exp_wqcq_pages = 1; 12131 else 12132 phba->enab_exp_wqcq_pages = 0; 12133 /* 12134 * Check if the SLI port supports MDS Diagnostics 12135 */ 12136 if (bf_get(cfg_mds_diags, mbx_sli4_parameters)) 12137 phba->mds_diags_support = 1; 12138 else 12139 phba->mds_diags_support = 0; 12140 12141 /* 12142 * Check if the SLI port supports NSLER 12143 */ 12144 if (bf_get(cfg_nsler, mbx_sli4_parameters)) 12145 phba->nsler = 1; 12146 else 12147 phba->nsler = 0; 12148 12149 return 0; 12150 } 12151 12152 /** 12153 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 12154 * @pdev: pointer to PCI device 12155 * @pid: pointer to PCI device identifier 12156 * 12157 * This routine is to be called to attach a device with SLI-3 interface spec 12158 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 12159 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 12160 * information of the device and driver to see if the driver state that it can 12161 * support this kind of device. If the match is successful, the driver core 12162 * invokes this routine. If this routine determines it can claim the HBA, it 12163 * does all the initialization that it needs to do to handle the HBA properly. 12164 * 12165 * Return code 12166 * 0 - driver can claim the device 12167 * negative value - driver can not claim the device 12168 **/ 12169 static int 12170 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 12171 { 12172 struct lpfc_hba *phba; 12173 struct lpfc_vport *vport = NULL; 12174 struct Scsi_Host *shost = NULL; 12175 int error; 12176 uint32_t cfg_mode, intr_mode; 12177 12178 /* Allocate memory for HBA structure */ 12179 phba = lpfc_hba_alloc(pdev); 12180 if (!phba) 12181 return -ENOMEM; 12182 12183 /* Perform generic PCI device enabling operation */ 12184 error = lpfc_enable_pci_dev(phba); 12185 if (error) 12186 goto out_free_phba; 12187 12188 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 12189 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 12190 if (error) 12191 goto out_disable_pci_dev; 12192 12193 /* Set up SLI-3 specific device PCI memory space */ 12194 error = lpfc_sli_pci_mem_setup(phba); 12195 if (error) { 12196 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12197 "1402 Failed to set up pci memory space.\n"); 12198 goto out_disable_pci_dev; 12199 } 12200 12201 /* Set up SLI-3 specific device driver resources */ 12202 error = lpfc_sli_driver_resource_setup(phba); 12203 if (error) { 12204 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12205 "1404 Failed to set up driver resource.\n"); 12206 goto out_unset_pci_mem_s3; 12207 } 12208 12209 /* Initialize and populate the iocb list per host */ 12210 12211 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 12212 if (error) { 12213 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12214 "1405 Failed to initialize iocb list.\n"); 12215 goto out_unset_driver_resource_s3; 12216 } 12217 12218 /* Set up common device driver resources */ 12219 error = lpfc_setup_driver_resource_phase2(phba); 12220 if (error) { 12221 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12222 "1406 Failed to set up driver resource.\n"); 12223 goto out_free_iocb_list; 12224 } 12225 12226 /* Get the default values for Model Name and Description */ 12227 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 12228 12229 /* Create SCSI host to the physical port */ 12230 error = lpfc_create_shost(phba); 12231 if (error) { 12232 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12233 "1407 Failed to create scsi host.\n"); 12234 goto out_unset_driver_resource; 12235 } 12236 12237 /* Configure sysfs attributes */ 12238 vport = phba->pport; 12239 error = lpfc_alloc_sysfs_attr(vport); 12240 if (error) { 12241 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12242 "1476 Failed to allocate sysfs attr\n"); 12243 goto out_destroy_shost; 12244 } 12245 12246 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 12247 /* Now, trying to enable interrupt and bring up the device */ 12248 cfg_mode = phba->cfg_use_msi; 12249 while (true) { 12250 /* Put device to a known state before enabling interrupt */ 12251 lpfc_stop_port(phba); 12252 /* Configure and enable interrupt */ 12253 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 12254 if (intr_mode == LPFC_INTR_ERROR) { 12255 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12256 "0431 Failed to enable interrupt.\n"); 12257 error = -ENODEV; 12258 goto out_free_sysfs_attr; 12259 } 12260 /* SLI-3 HBA setup */ 12261 if (lpfc_sli_hba_setup(phba)) { 12262 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12263 "1477 Failed to set up hba\n"); 12264 error = -ENODEV; 12265 goto out_remove_device; 12266 } 12267 12268 /* Wait 50ms for the interrupts of previous mailbox commands */ 12269 msleep(50); 12270 /* Check active interrupts on message signaled interrupts */ 12271 if (intr_mode == 0 || 12272 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 12273 /* Log the current active interrupt mode */ 12274 phba->intr_mode = intr_mode; 12275 lpfc_log_intr_mode(phba, intr_mode); 12276 break; 12277 } else { 12278 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12279 "0447 Configure interrupt mode (%d) " 12280 "failed active interrupt test.\n", 12281 intr_mode); 12282 /* Disable the current interrupt mode */ 12283 lpfc_sli_disable_intr(phba); 12284 /* Try next level of interrupt mode */ 12285 cfg_mode = --intr_mode; 12286 } 12287 } 12288 12289 /* Perform post initialization setup */ 12290 lpfc_post_init_setup(phba); 12291 12292 /* Check if there are static vports to be created. */ 12293 lpfc_create_static_vport(phba); 12294 12295 return 0; 12296 12297 out_remove_device: 12298 lpfc_unset_hba(phba); 12299 out_free_sysfs_attr: 12300 lpfc_free_sysfs_attr(vport); 12301 out_destroy_shost: 12302 lpfc_destroy_shost(phba); 12303 out_unset_driver_resource: 12304 lpfc_unset_driver_resource_phase2(phba); 12305 out_free_iocb_list: 12306 lpfc_free_iocb_list(phba); 12307 out_unset_driver_resource_s3: 12308 lpfc_sli_driver_resource_unset(phba); 12309 out_unset_pci_mem_s3: 12310 lpfc_sli_pci_mem_unset(phba); 12311 out_disable_pci_dev: 12312 lpfc_disable_pci_dev(phba); 12313 if (shost) 12314 scsi_host_put(shost); 12315 out_free_phba: 12316 lpfc_hba_free(phba); 12317 return error; 12318 } 12319 12320 /** 12321 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 12322 * @pdev: pointer to PCI device 12323 * 12324 * This routine is to be called to disattach a device with SLI-3 interface 12325 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 12326 * removed from PCI bus, it performs all the necessary cleanup for the HBA 12327 * device to be removed from the PCI subsystem properly. 12328 **/ 12329 static void 12330 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 12331 { 12332 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12333 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 12334 struct lpfc_vport **vports; 12335 struct lpfc_hba *phba = vport->phba; 12336 int i; 12337 12338 spin_lock_irq(&phba->hbalock); 12339 vport->load_flag |= FC_UNLOADING; 12340 spin_unlock_irq(&phba->hbalock); 12341 12342 lpfc_free_sysfs_attr(vport); 12343 12344 /* Release all the vports against this physical port */ 12345 vports = lpfc_create_vport_work_array(phba); 12346 if (vports != NULL) 12347 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 12348 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 12349 continue; 12350 fc_vport_terminate(vports[i]->fc_vport); 12351 } 12352 lpfc_destroy_vport_work_array(phba, vports); 12353 12354 /* Remove FC host and then SCSI host with the physical port */ 12355 fc_remove_host(shost); 12356 scsi_remove_host(shost); 12357 12358 lpfc_cleanup(vport); 12359 12360 /* 12361 * Bring down the SLI Layer. This step disable all interrupts, 12362 * clears the rings, discards all mailbox commands, and resets 12363 * the HBA. 12364 */ 12365 12366 /* HBA interrupt will be disabled after this call */ 12367 lpfc_sli_hba_down(phba); 12368 /* Stop kthread signal shall trigger work_done one more time */ 12369 kthread_stop(phba->worker_thread); 12370 /* Final cleanup of txcmplq and reset the HBA */ 12371 lpfc_sli_brdrestart(phba); 12372 12373 kfree(phba->vpi_bmask); 12374 kfree(phba->vpi_ids); 12375 12376 lpfc_stop_hba_timers(phba); 12377 spin_lock_irq(&phba->port_list_lock); 12378 list_del_init(&vport->listentry); 12379 spin_unlock_irq(&phba->port_list_lock); 12380 12381 lpfc_debugfs_terminate(vport); 12382 12383 /* Disable SR-IOV if enabled */ 12384 if (phba->cfg_sriov_nr_virtfn) 12385 pci_disable_sriov(pdev); 12386 12387 /* Disable interrupt */ 12388 lpfc_sli_disable_intr(phba); 12389 12390 scsi_host_put(shost); 12391 12392 /* 12393 * Call scsi_free before mem_free since scsi bufs are released to their 12394 * corresponding pools here. 12395 */ 12396 lpfc_scsi_free(phba); 12397 lpfc_free_iocb_list(phba); 12398 12399 lpfc_mem_free_all(phba); 12400 12401 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 12402 phba->hbqslimp.virt, phba->hbqslimp.phys); 12403 12404 /* Free resources associated with SLI2 interface */ 12405 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 12406 phba->slim2p.virt, phba->slim2p.phys); 12407 12408 /* unmap adapter SLIM and Control Registers */ 12409 iounmap(phba->ctrl_regs_memmap_p); 12410 iounmap(phba->slim_memmap_p); 12411 12412 lpfc_hba_free(phba); 12413 12414 pci_release_mem_regions(pdev); 12415 pci_disable_device(pdev); 12416 } 12417 12418 /** 12419 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 12420 * @pdev: pointer to PCI device 12421 * @msg: power management message 12422 * 12423 * This routine is to be called from the kernel's PCI subsystem to support 12424 * system Power Management (PM) to device with SLI-3 interface spec. When 12425 * PM invokes this method, it quiesces the device by stopping the driver's 12426 * worker thread for the device, turning off device's interrupt and DMA, 12427 * and bring the device offline. Note that as the driver implements the 12428 * minimum PM requirements to a power-aware driver's PM support for the 12429 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 12430 * to the suspend() method call will be treated as SUSPEND and the driver will 12431 * fully reinitialize its device during resume() method call, the driver will 12432 * set device to PCI_D3hot state in PCI config space instead of setting it 12433 * according to the @msg provided by the PM. 12434 * 12435 * Return code 12436 * 0 - driver suspended the device 12437 * Error otherwise 12438 **/ 12439 static int 12440 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 12441 { 12442 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12443 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12444 12445 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12446 "0473 PCI device Power Management suspend.\n"); 12447 12448 /* Bring down the device */ 12449 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 12450 lpfc_offline(phba); 12451 kthread_stop(phba->worker_thread); 12452 12453 /* Disable interrupt from device */ 12454 lpfc_sli_disable_intr(phba); 12455 12456 /* Save device state to PCI config space */ 12457 pci_save_state(pdev); 12458 pci_set_power_state(pdev, PCI_D3hot); 12459 12460 return 0; 12461 } 12462 12463 /** 12464 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 12465 * @pdev: pointer to PCI device 12466 * 12467 * This routine is to be called from the kernel's PCI subsystem to support 12468 * system Power Management (PM) to device with SLI-3 interface spec. When PM 12469 * invokes this method, it restores the device's PCI config space state and 12470 * fully reinitializes the device and brings it online. Note that as the 12471 * driver implements the minimum PM requirements to a power-aware driver's 12472 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 12473 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 12474 * driver will fully reinitialize its device during resume() method call, 12475 * the device will be set to PCI_D0 directly in PCI config space before 12476 * restoring the state. 12477 * 12478 * Return code 12479 * 0 - driver suspended the device 12480 * Error otherwise 12481 **/ 12482 static int 12483 lpfc_pci_resume_one_s3(struct pci_dev *pdev) 12484 { 12485 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12486 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12487 uint32_t intr_mode; 12488 int error; 12489 12490 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12491 "0452 PCI device Power Management resume.\n"); 12492 12493 /* Restore device state from PCI config space */ 12494 pci_set_power_state(pdev, PCI_D0); 12495 pci_restore_state(pdev); 12496 12497 /* 12498 * As the new kernel behavior of pci_restore_state() API call clears 12499 * device saved_state flag, need to save the restored state again. 12500 */ 12501 pci_save_state(pdev); 12502 12503 if (pdev->is_busmaster) 12504 pci_set_master(pdev); 12505 12506 /* Startup the kernel thread for this host adapter. */ 12507 phba->worker_thread = kthread_run(lpfc_do_work, phba, 12508 "lpfc_worker_%d", phba->brd_no); 12509 if (IS_ERR(phba->worker_thread)) { 12510 error = PTR_ERR(phba->worker_thread); 12511 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12512 "0434 PM resume failed to start worker " 12513 "thread: error=x%x.\n", error); 12514 return error; 12515 } 12516 12517 /* Configure and enable interrupt */ 12518 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 12519 if (intr_mode == LPFC_INTR_ERROR) { 12520 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12521 "0430 PM resume Failed to enable interrupt\n"); 12522 return -EIO; 12523 } else 12524 phba->intr_mode = intr_mode; 12525 12526 /* Restart HBA and bring it online */ 12527 lpfc_sli_brdrestart(phba); 12528 lpfc_online(phba); 12529 12530 /* Log the current active interrupt mode */ 12531 lpfc_log_intr_mode(phba, phba->intr_mode); 12532 12533 return 0; 12534 } 12535 12536 /** 12537 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 12538 * @phba: pointer to lpfc hba data structure. 12539 * 12540 * This routine is called to prepare the SLI3 device for PCI slot recover. It 12541 * aborts all the outstanding SCSI I/Os to the pci device. 12542 **/ 12543 static void 12544 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 12545 { 12546 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12547 "2723 PCI channel I/O abort preparing for recovery\n"); 12548 12549 /* 12550 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 12551 * and let the SCSI mid-layer to retry them to recover. 12552 */ 12553 lpfc_sli_abort_fcp_rings(phba); 12554 } 12555 12556 /** 12557 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 12558 * @phba: pointer to lpfc hba data structure. 12559 * 12560 * This routine is called to prepare the SLI3 device for PCI slot reset. It 12561 * disables the device interrupt and pci device, and aborts the internal FCP 12562 * pending I/Os. 12563 **/ 12564 static void 12565 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 12566 { 12567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12568 "2710 PCI channel disable preparing for reset\n"); 12569 12570 /* Block any management I/Os to the device */ 12571 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 12572 12573 /* Block all SCSI devices' I/Os on the host */ 12574 lpfc_scsi_dev_block(phba); 12575 12576 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 12577 lpfc_sli_flush_io_rings(phba); 12578 12579 /* stop all timers */ 12580 lpfc_stop_hba_timers(phba); 12581 12582 /* Disable interrupt and pci device */ 12583 lpfc_sli_disable_intr(phba); 12584 pci_disable_device(phba->pcidev); 12585 } 12586 12587 /** 12588 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 12589 * @phba: pointer to lpfc hba data structure. 12590 * 12591 * This routine is called to prepare the SLI3 device for PCI slot permanently 12592 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 12593 * pending I/Os. 12594 **/ 12595 static void 12596 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 12597 { 12598 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12599 "2711 PCI channel permanent disable for failure\n"); 12600 /* Block all SCSI devices' I/Os on the host */ 12601 lpfc_scsi_dev_block(phba); 12602 12603 /* stop all timers */ 12604 lpfc_stop_hba_timers(phba); 12605 12606 /* Clean up all driver's outstanding SCSI I/Os */ 12607 lpfc_sli_flush_io_rings(phba); 12608 } 12609 12610 /** 12611 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 12612 * @pdev: pointer to PCI device. 12613 * @state: the current PCI connection state. 12614 * 12615 * This routine is called from the PCI subsystem for I/O error handling to 12616 * device with SLI-3 interface spec. This function is called by the PCI 12617 * subsystem after a PCI bus error affecting this device has been detected. 12618 * When this function is invoked, it will need to stop all the I/Os and 12619 * interrupt(s) to the device. Once that is done, it will return 12620 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 12621 * as desired. 12622 * 12623 * Return codes 12624 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 12625 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 12626 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 12627 **/ 12628 static pci_ers_result_t 12629 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 12630 { 12631 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12632 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12633 12634 switch (state) { 12635 case pci_channel_io_normal: 12636 /* Non-fatal error, prepare for recovery */ 12637 lpfc_sli_prep_dev_for_recover(phba); 12638 return PCI_ERS_RESULT_CAN_RECOVER; 12639 case pci_channel_io_frozen: 12640 /* Fatal error, prepare for slot reset */ 12641 lpfc_sli_prep_dev_for_reset(phba); 12642 return PCI_ERS_RESULT_NEED_RESET; 12643 case pci_channel_io_perm_failure: 12644 /* Permanent failure, prepare for device down */ 12645 lpfc_sli_prep_dev_for_perm_failure(phba); 12646 return PCI_ERS_RESULT_DISCONNECT; 12647 default: 12648 /* Unknown state, prepare and request slot reset */ 12649 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12650 "0472 Unknown PCI error state: x%x\n", state); 12651 lpfc_sli_prep_dev_for_reset(phba); 12652 return PCI_ERS_RESULT_NEED_RESET; 12653 } 12654 } 12655 12656 /** 12657 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 12658 * @pdev: pointer to PCI device. 12659 * 12660 * This routine is called from the PCI subsystem for error handling to 12661 * device with SLI-3 interface spec. This is called after PCI bus has been 12662 * reset to restart the PCI card from scratch, as if from a cold-boot. 12663 * During the PCI subsystem error recovery, after driver returns 12664 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 12665 * recovery and then call this routine before calling the .resume method 12666 * to recover the device. This function will initialize the HBA device, 12667 * enable the interrupt, but it will just put the HBA to offline state 12668 * without passing any I/O traffic. 12669 * 12670 * Return codes 12671 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 12672 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 12673 */ 12674 static pci_ers_result_t 12675 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 12676 { 12677 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12678 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12679 struct lpfc_sli *psli = &phba->sli; 12680 uint32_t intr_mode; 12681 12682 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 12683 if (pci_enable_device_mem(pdev)) { 12684 printk(KERN_ERR "lpfc: Cannot re-enable " 12685 "PCI device after reset.\n"); 12686 return PCI_ERS_RESULT_DISCONNECT; 12687 } 12688 12689 pci_restore_state(pdev); 12690 12691 /* 12692 * As the new kernel behavior of pci_restore_state() API call clears 12693 * device saved_state flag, need to save the restored state again. 12694 */ 12695 pci_save_state(pdev); 12696 12697 if (pdev->is_busmaster) 12698 pci_set_master(pdev); 12699 12700 spin_lock_irq(&phba->hbalock); 12701 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 12702 spin_unlock_irq(&phba->hbalock); 12703 12704 /* Configure and enable interrupt */ 12705 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 12706 if (intr_mode == LPFC_INTR_ERROR) { 12707 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12708 "0427 Cannot re-enable interrupt after " 12709 "slot reset.\n"); 12710 return PCI_ERS_RESULT_DISCONNECT; 12711 } else 12712 phba->intr_mode = intr_mode; 12713 12714 /* Take device offline, it will perform cleanup */ 12715 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 12716 lpfc_offline(phba); 12717 lpfc_sli_brdrestart(phba); 12718 12719 /* Log the current active interrupt mode */ 12720 lpfc_log_intr_mode(phba, phba->intr_mode); 12721 12722 return PCI_ERS_RESULT_RECOVERED; 12723 } 12724 12725 /** 12726 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 12727 * @pdev: pointer to PCI device 12728 * 12729 * This routine is called from the PCI subsystem for error handling to device 12730 * with SLI-3 interface spec. It is called when kernel error recovery tells 12731 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 12732 * error recovery. After this call, traffic can start to flow from this device 12733 * again. 12734 */ 12735 static void 12736 lpfc_io_resume_s3(struct pci_dev *pdev) 12737 { 12738 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12739 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12740 12741 /* Bring device online, it will be no-op for non-fatal error resume */ 12742 lpfc_online(phba); 12743 } 12744 12745 /** 12746 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 12747 * @phba: pointer to lpfc hba data structure. 12748 * 12749 * returns the number of ELS/CT IOCBs to reserve 12750 **/ 12751 int 12752 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 12753 { 12754 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 12755 12756 if (phba->sli_rev == LPFC_SLI_REV4) { 12757 if (max_xri <= 100) 12758 return 10; 12759 else if (max_xri <= 256) 12760 return 25; 12761 else if (max_xri <= 512) 12762 return 50; 12763 else if (max_xri <= 1024) 12764 return 100; 12765 else if (max_xri <= 1536) 12766 return 150; 12767 else if (max_xri <= 2048) 12768 return 200; 12769 else 12770 return 250; 12771 } else 12772 return 0; 12773 } 12774 12775 /** 12776 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve 12777 * @phba: pointer to lpfc hba data structure. 12778 * 12779 * returns the number of ELS/CT + NVMET IOCBs to reserve 12780 **/ 12781 int 12782 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba) 12783 { 12784 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba); 12785 12786 if (phba->nvmet_support) 12787 max_xri += LPFC_NVMET_BUF_POST; 12788 return max_xri; 12789 } 12790 12791 12792 static int 12793 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset, 12794 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize, 12795 const struct firmware *fw) 12796 { 12797 int rc; 12798 12799 /* Three cases: (1) FW was not supported on the detected adapter. 12800 * (2) FW update has been locked out administratively. 12801 * (3) Some other error during FW update. 12802 * In each case, an unmaskable message is written to the console 12803 * for admin diagnosis. 12804 */ 12805 if (offset == ADD_STATUS_FW_NOT_SUPPORTED || 12806 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC && 12807 magic_number != MAGIC_NUMBER_G6) || 12808 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC && 12809 magic_number != MAGIC_NUMBER_G7)) { 12810 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12811 "3030 This firmware version is not supported on" 12812 " this HBA model. Device:%x Magic:%x Type:%x " 12813 "ID:%x Size %d %zd\n", 12814 phba->pcidev->device, magic_number, ftype, fid, 12815 fsize, fw->size); 12816 rc = -EINVAL; 12817 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) { 12818 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12819 "3021 Firmware downloads have been prohibited " 12820 "by a system configuration setting on " 12821 "Device:%x Magic:%x Type:%x ID:%x Size %d " 12822 "%zd\n", 12823 phba->pcidev->device, magic_number, ftype, fid, 12824 fsize, fw->size); 12825 rc = -EACCES; 12826 } else { 12827 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12828 "3022 FW Download failed. Add Status x%x " 12829 "Device:%x Magic:%x Type:%x ID:%x Size %d " 12830 "%zd\n", 12831 offset, phba->pcidev->device, magic_number, 12832 ftype, fid, fsize, fw->size); 12833 rc = -EIO; 12834 } 12835 return rc; 12836 } 12837 12838 /** 12839 * lpfc_write_firmware - attempt to write a firmware image to the port 12840 * @fw: pointer to firmware image returned from request_firmware. 12841 * @context: pointer to firmware image returned from request_firmware. 12842 * @ret: return value this routine provides to the caller. 12843 * 12844 **/ 12845 static void 12846 lpfc_write_firmware(const struct firmware *fw, void *context) 12847 { 12848 struct lpfc_hba *phba = (struct lpfc_hba *)context; 12849 char fwrev[FW_REV_STR_SIZE]; 12850 struct lpfc_grp_hdr *image; 12851 struct list_head dma_buffer_list; 12852 int i, rc = 0; 12853 struct lpfc_dmabuf *dmabuf, *next; 12854 uint32_t offset = 0, temp_offset = 0; 12855 uint32_t magic_number, ftype, fid, fsize; 12856 12857 /* It can be null in no-wait mode, sanity check */ 12858 if (!fw) { 12859 rc = -ENXIO; 12860 goto out; 12861 } 12862 image = (struct lpfc_grp_hdr *)fw->data; 12863 12864 magic_number = be32_to_cpu(image->magic_number); 12865 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image); 12866 fid = bf_get_be32(lpfc_grp_hdr_id, image); 12867 fsize = be32_to_cpu(image->size); 12868 12869 INIT_LIST_HEAD(&dma_buffer_list); 12870 lpfc_decode_firmware_rev(phba, fwrev, 1); 12871 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 12872 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12873 "3023 Updating Firmware, Current Version:%s " 12874 "New Version:%s\n", 12875 fwrev, image->revision); 12876 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 12877 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 12878 GFP_KERNEL); 12879 if (!dmabuf) { 12880 rc = -ENOMEM; 12881 goto release_out; 12882 } 12883 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 12884 SLI4_PAGE_SIZE, 12885 &dmabuf->phys, 12886 GFP_KERNEL); 12887 if (!dmabuf->virt) { 12888 kfree(dmabuf); 12889 rc = -ENOMEM; 12890 goto release_out; 12891 } 12892 list_add_tail(&dmabuf->list, &dma_buffer_list); 12893 } 12894 while (offset < fw->size) { 12895 temp_offset = offset; 12896 list_for_each_entry(dmabuf, &dma_buffer_list, list) { 12897 if (temp_offset + SLI4_PAGE_SIZE > fw->size) { 12898 memcpy(dmabuf->virt, 12899 fw->data + temp_offset, 12900 fw->size - temp_offset); 12901 temp_offset = fw->size; 12902 break; 12903 } 12904 memcpy(dmabuf->virt, fw->data + temp_offset, 12905 SLI4_PAGE_SIZE); 12906 temp_offset += SLI4_PAGE_SIZE; 12907 } 12908 rc = lpfc_wr_object(phba, &dma_buffer_list, 12909 (fw->size - offset), &offset); 12910 if (rc) { 12911 rc = lpfc_log_write_firmware_error(phba, offset, 12912 magic_number, 12913 ftype, 12914 fid, 12915 fsize, 12916 fw); 12917 goto release_out; 12918 } 12919 } 12920 rc = offset; 12921 } else 12922 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12923 "3029 Skipped Firmware update, Current " 12924 "Version:%s New Version:%s\n", 12925 fwrev, image->revision); 12926 12927 release_out: 12928 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 12929 list_del(&dmabuf->list); 12930 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 12931 dmabuf->virt, dmabuf->phys); 12932 kfree(dmabuf); 12933 } 12934 release_firmware(fw); 12935 out: 12936 if (rc < 0) 12937 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12938 "3062 Firmware update error, status %d.\n", rc); 12939 else 12940 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12941 "3024 Firmware update success: size %d.\n", rc); 12942 } 12943 12944 /** 12945 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade 12946 * @phba: pointer to lpfc hba data structure. 12947 * 12948 * This routine is called to perform Linux generic firmware upgrade on device 12949 * that supports such feature. 12950 **/ 12951 int 12952 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) 12953 { 12954 uint8_t file_name[ELX_MODEL_NAME_SIZE]; 12955 int ret; 12956 const struct firmware *fw; 12957 12958 /* Only supported on SLI4 interface type 2 for now */ 12959 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 12960 LPFC_SLI_INTF_IF_TYPE_2) 12961 return -EPERM; 12962 12963 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); 12964 12965 if (fw_upgrade == INT_FW_UPGRADE) { 12966 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, 12967 file_name, &phba->pcidev->dev, 12968 GFP_KERNEL, (void *)phba, 12969 lpfc_write_firmware); 12970 } else if (fw_upgrade == RUN_FW_UPGRADE) { 12971 ret = request_firmware(&fw, file_name, &phba->pcidev->dev); 12972 if (!ret) 12973 lpfc_write_firmware(fw, (void *)phba); 12974 } else { 12975 ret = -EINVAL; 12976 } 12977 12978 return ret; 12979 } 12980 12981 /** 12982 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 12983 * @pdev: pointer to PCI device 12984 * @pid: pointer to PCI device identifier 12985 * 12986 * This routine is called from the kernel's PCI subsystem to device with 12987 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 12988 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 12989 * information of the device and driver to see if the driver state that it 12990 * can support this kind of device. If the match is successful, the driver 12991 * core invokes this routine. If this routine determines it can claim the HBA, 12992 * it does all the initialization that it needs to do to handle the HBA 12993 * properly. 12994 * 12995 * Return code 12996 * 0 - driver can claim the device 12997 * negative value - driver can not claim the device 12998 **/ 12999 static int 13000 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 13001 { 13002 struct lpfc_hba *phba; 13003 struct lpfc_vport *vport = NULL; 13004 struct Scsi_Host *shost = NULL; 13005 int error; 13006 uint32_t cfg_mode, intr_mode; 13007 13008 /* Allocate memory for HBA structure */ 13009 phba = lpfc_hba_alloc(pdev); 13010 if (!phba) 13011 return -ENOMEM; 13012 13013 /* Perform generic PCI device enabling operation */ 13014 error = lpfc_enable_pci_dev(phba); 13015 if (error) 13016 goto out_free_phba; 13017 13018 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 13019 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 13020 if (error) 13021 goto out_disable_pci_dev; 13022 13023 /* Set up SLI-4 specific device PCI memory space */ 13024 error = lpfc_sli4_pci_mem_setup(phba); 13025 if (error) { 13026 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13027 "1410 Failed to set up pci memory space.\n"); 13028 goto out_disable_pci_dev; 13029 } 13030 13031 /* Set up SLI-4 Specific device driver resources */ 13032 error = lpfc_sli4_driver_resource_setup(phba); 13033 if (error) { 13034 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13035 "1412 Failed to set up driver resource.\n"); 13036 goto out_unset_pci_mem_s4; 13037 } 13038 13039 INIT_LIST_HEAD(&phba->active_rrq_list); 13040 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 13041 13042 /* Set up common device driver resources */ 13043 error = lpfc_setup_driver_resource_phase2(phba); 13044 if (error) { 13045 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13046 "1414 Failed to set up driver resource.\n"); 13047 goto out_unset_driver_resource_s4; 13048 } 13049 13050 /* Get the default values for Model Name and Description */ 13051 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 13052 13053 /* Now, trying to enable interrupt and bring up the device */ 13054 cfg_mode = phba->cfg_use_msi; 13055 13056 /* Put device to a known state before enabling interrupt */ 13057 phba->pport = NULL; 13058 lpfc_stop_port(phba); 13059 13060 /* Init cpu_map array */ 13061 lpfc_cpu_map_array_init(phba); 13062 13063 /* Init hba_eq_hdl array */ 13064 lpfc_hba_eq_hdl_array_init(phba); 13065 13066 /* Configure and enable interrupt */ 13067 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 13068 if (intr_mode == LPFC_INTR_ERROR) { 13069 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13070 "0426 Failed to enable interrupt.\n"); 13071 error = -ENODEV; 13072 goto out_unset_driver_resource; 13073 } 13074 /* Default to single EQ for non-MSI-X */ 13075 if (phba->intr_type != MSIX) { 13076 phba->cfg_irq_chann = 1; 13077 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13078 if (phba->nvmet_support) 13079 phba->cfg_nvmet_mrq = 1; 13080 } 13081 } 13082 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); 13083 13084 /* Create SCSI host to the physical port */ 13085 error = lpfc_create_shost(phba); 13086 if (error) { 13087 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13088 "1415 Failed to create scsi host.\n"); 13089 goto out_disable_intr; 13090 } 13091 vport = phba->pport; 13092 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 13093 13094 /* Configure sysfs attributes */ 13095 error = lpfc_alloc_sysfs_attr(vport); 13096 if (error) { 13097 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13098 "1416 Failed to allocate sysfs attr\n"); 13099 goto out_destroy_shost; 13100 } 13101 13102 /* Set up SLI-4 HBA */ 13103 if (lpfc_sli4_hba_setup(phba)) { 13104 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13105 "1421 Failed to set up hba\n"); 13106 error = -ENODEV; 13107 goto out_free_sysfs_attr; 13108 } 13109 13110 /* Log the current active interrupt mode */ 13111 phba->intr_mode = intr_mode; 13112 lpfc_log_intr_mode(phba, intr_mode); 13113 13114 /* Perform post initialization setup */ 13115 lpfc_post_init_setup(phba); 13116 13117 /* NVME support in FW earlier in the driver load corrects the 13118 * FC4 type making a check for nvme_support unnecessary. 13119 */ 13120 if (phba->nvmet_support == 0) { 13121 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13122 /* Create NVME binding with nvme_fc_transport. This 13123 * ensures the vport is initialized. If the localport 13124 * create fails, it should not unload the driver to 13125 * support field issues. 13126 */ 13127 error = lpfc_nvme_create_localport(vport); 13128 if (error) { 13129 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13130 "6004 NVME registration " 13131 "failed, error x%x\n", 13132 error); 13133 } 13134 } 13135 } 13136 13137 /* check for firmware upgrade or downgrade */ 13138 if (phba->cfg_request_firmware_upgrade) 13139 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); 13140 13141 /* Check if there are static vports to be created. */ 13142 lpfc_create_static_vport(phba); 13143 13144 /* Enable RAS FW log support */ 13145 lpfc_sli4_ras_setup(phba); 13146 13147 INIT_LIST_HEAD(&phba->poll_list); 13148 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp); 13149 13150 return 0; 13151 13152 out_free_sysfs_attr: 13153 lpfc_free_sysfs_attr(vport); 13154 out_destroy_shost: 13155 lpfc_destroy_shost(phba); 13156 out_disable_intr: 13157 lpfc_sli4_disable_intr(phba); 13158 out_unset_driver_resource: 13159 lpfc_unset_driver_resource_phase2(phba); 13160 out_unset_driver_resource_s4: 13161 lpfc_sli4_driver_resource_unset(phba); 13162 out_unset_pci_mem_s4: 13163 lpfc_sli4_pci_mem_unset(phba); 13164 out_disable_pci_dev: 13165 lpfc_disable_pci_dev(phba); 13166 if (shost) 13167 scsi_host_put(shost); 13168 out_free_phba: 13169 lpfc_hba_free(phba); 13170 return error; 13171 } 13172 13173 /** 13174 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 13175 * @pdev: pointer to PCI device 13176 * 13177 * This routine is called from the kernel's PCI subsystem to device with 13178 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 13179 * removed from PCI bus, it performs all the necessary cleanup for the HBA 13180 * device to be removed from the PCI subsystem properly. 13181 **/ 13182 static void 13183 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 13184 { 13185 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13186 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 13187 struct lpfc_vport **vports; 13188 struct lpfc_hba *phba = vport->phba; 13189 int i; 13190 13191 /* Mark the device unloading flag */ 13192 spin_lock_irq(&phba->hbalock); 13193 vport->load_flag |= FC_UNLOADING; 13194 spin_unlock_irq(&phba->hbalock); 13195 13196 /* Free the HBA sysfs attributes */ 13197 lpfc_free_sysfs_attr(vport); 13198 13199 /* Release all the vports against this physical port */ 13200 vports = lpfc_create_vport_work_array(phba); 13201 if (vports != NULL) 13202 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 13203 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 13204 continue; 13205 fc_vport_terminate(vports[i]->fc_vport); 13206 } 13207 lpfc_destroy_vport_work_array(phba, vports); 13208 13209 /* Remove FC host and then SCSI host with the physical port */ 13210 fc_remove_host(shost); 13211 scsi_remove_host(shost); 13212 13213 /* Perform ndlp cleanup on the physical port. The nvme and nvmet 13214 * localports are destroyed after to cleanup all transport memory. 13215 */ 13216 lpfc_cleanup(vport); 13217 lpfc_nvmet_destroy_targetport(phba); 13218 lpfc_nvme_destroy_localport(vport); 13219 13220 /* De-allocate multi-XRI pools */ 13221 if (phba->cfg_xri_rebalancing) 13222 lpfc_destroy_multixri_pools(phba); 13223 13224 /* 13225 * Bring down the SLI Layer. This step disables all interrupts, 13226 * clears the rings, discards all mailbox commands, and resets 13227 * the HBA FCoE function. 13228 */ 13229 lpfc_debugfs_terminate(vport); 13230 13231 lpfc_stop_hba_timers(phba); 13232 spin_lock_irq(&phba->port_list_lock); 13233 list_del_init(&vport->listentry); 13234 spin_unlock_irq(&phba->port_list_lock); 13235 13236 /* Perform scsi free before driver resource_unset since scsi 13237 * buffers are released to their corresponding pools here. 13238 */ 13239 lpfc_io_free(phba); 13240 lpfc_free_iocb_list(phba); 13241 lpfc_sli4_hba_unset(phba); 13242 13243 lpfc_unset_driver_resource_phase2(phba); 13244 lpfc_sli4_driver_resource_unset(phba); 13245 13246 /* Unmap adapter Control and Doorbell registers */ 13247 lpfc_sli4_pci_mem_unset(phba); 13248 13249 /* Release PCI resources and disable device's PCI function */ 13250 scsi_host_put(shost); 13251 lpfc_disable_pci_dev(phba); 13252 13253 /* Finally, free the driver's device data structure */ 13254 lpfc_hba_free(phba); 13255 13256 return; 13257 } 13258 13259 /** 13260 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 13261 * @pdev: pointer to PCI device 13262 * @msg: power management message 13263 * 13264 * This routine is called from the kernel's PCI subsystem to support system 13265 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 13266 * this method, it quiesces the device by stopping the driver's worker 13267 * thread for the device, turning off device's interrupt and DMA, and bring 13268 * the device offline. Note that as the driver implements the minimum PM 13269 * requirements to a power-aware driver's PM support for suspend/resume -- all 13270 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 13271 * method call will be treated as SUSPEND and the driver will fully 13272 * reinitialize its device during resume() method call, the driver will set 13273 * device to PCI_D3hot state in PCI config space instead of setting it 13274 * according to the @msg provided by the PM. 13275 * 13276 * Return code 13277 * 0 - driver suspended the device 13278 * Error otherwise 13279 **/ 13280 static int 13281 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 13282 { 13283 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13284 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13285 13286 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13287 "2843 PCI device Power Management suspend.\n"); 13288 13289 /* Bring down the device */ 13290 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 13291 lpfc_offline(phba); 13292 kthread_stop(phba->worker_thread); 13293 13294 /* Disable interrupt from device */ 13295 lpfc_sli4_disable_intr(phba); 13296 lpfc_sli4_queue_destroy(phba); 13297 13298 /* Save device state to PCI config space */ 13299 pci_save_state(pdev); 13300 pci_set_power_state(pdev, PCI_D3hot); 13301 13302 return 0; 13303 } 13304 13305 /** 13306 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 13307 * @pdev: pointer to PCI device 13308 * 13309 * This routine is called from the kernel's PCI subsystem to support system 13310 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 13311 * this method, it restores the device's PCI config space state and fully 13312 * reinitializes the device and brings it online. Note that as the driver 13313 * implements the minimum PM requirements to a power-aware driver's PM for 13314 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 13315 * to the suspend() method call will be treated as SUSPEND and the driver 13316 * will fully reinitialize its device during resume() method call, the device 13317 * will be set to PCI_D0 directly in PCI config space before restoring the 13318 * state. 13319 * 13320 * Return code 13321 * 0 - driver suspended the device 13322 * Error otherwise 13323 **/ 13324 static int 13325 lpfc_pci_resume_one_s4(struct pci_dev *pdev) 13326 { 13327 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13328 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13329 uint32_t intr_mode; 13330 int error; 13331 13332 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13333 "0292 PCI device Power Management resume.\n"); 13334 13335 /* Restore device state from PCI config space */ 13336 pci_set_power_state(pdev, PCI_D0); 13337 pci_restore_state(pdev); 13338 13339 /* 13340 * As the new kernel behavior of pci_restore_state() API call clears 13341 * device saved_state flag, need to save the restored state again. 13342 */ 13343 pci_save_state(pdev); 13344 13345 if (pdev->is_busmaster) 13346 pci_set_master(pdev); 13347 13348 /* Startup the kernel thread for this host adapter. */ 13349 phba->worker_thread = kthread_run(lpfc_do_work, phba, 13350 "lpfc_worker_%d", phba->brd_no); 13351 if (IS_ERR(phba->worker_thread)) { 13352 error = PTR_ERR(phba->worker_thread); 13353 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13354 "0293 PM resume failed to start worker " 13355 "thread: error=x%x.\n", error); 13356 return error; 13357 } 13358 13359 /* Configure and enable interrupt */ 13360 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 13361 if (intr_mode == LPFC_INTR_ERROR) { 13362 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13363 "0294 PM resume Failed to enable interrupt\n"); 13364 return -EIO; 13365 } else 13366 phba->intr_mode = intr_mode; 13367 13368 /* Restart HBA and bring it online */ 13369 lpfc_sli_brdrestart(phba); 13370 lpfc_online(phba); 13371 13372 /* Log the current active interrupt mode */ 13373 lpfc_log_intr_mode(phba, phba->intr_mode); 13374 13375 return 0; 13376 } 13377 13378 /** 13379 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 13380 * @phba: pointer to lpfc hba data structure. 13381 * 13382 * This routine is called to prepare the SLI4 device for PCI slot recover. It 13383 * aborts all the outstanding SCSI I/Os to the pci device. 13384 **/ 13385 static void 13386 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 13387 { 13388 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13389 "2828 PCI channel I/O abort preparing for recovery\n"); 13390 /* 13391 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 13392 * and let the SCSI mid-layer to retry them to recover. 13393 */ 13394 lpfc_sli_abort_fcp_rings(phba); 13395 } 13396 13397 /** 13398 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 13399 * @phba: pointer to lpfc hba data structure. 13400 * 13401 * This routine is called to prepare the SLI4 device for PCI slot reset. It 13402 * disables the device interrupt and pci device, and aborts the internal FCP 13403 * pending I/Os. 13404 **/ 13405 static void 13406 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 13407 { 13408 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13409 "2826 PCI channel disable preparing for reset\n"); 13410 13411 /* Block any management I/Os to the device */ 13412 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); 13413 13414 /* Block all SCSI devices' I/Os on the host */ 13415 lpfc_scsi_dev_block(phba); 13416 13417 /* Flush all driver's outstanding I/Os as we are to reset */ 13418 lpfc_sli_flush_io_rings(phba); 13419 13420 /* stop all timers */ 13421 lpfc_stop_hba_timers(phba); 13422 13423 /* Disable interrupt and pci device */ 13424 lpfc_sli4_disable_intr(phba); 13425 lpfc_sli4_queue_destroy(phba); 13426 pci_disable_device(phba->pcidev); 13427 } 13428 13429 /** 13430 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 13431 * @phba: pointer to lpfc hba data structure. 13432 * 13433 * This routine is called to prepare the SLI4 device for PCI slot permanently 13434 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 13435 * pending I/Os. 13436 **/ 13437 static void 13438 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 13439 { 13440 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13441 "2827 PCI channel permanent disable for failure\n"); 13442 13443 /* Block all SCSI devices' I/Os on the host */ 13444 lpfc_scsi_dev_block(phba); 13445 13446 /* stop all timers */ 13447 lpfc_stop_hba_timers(phba); 13448 13449 /* Clean up all driver's outstanding I/Os */ 13450 lpfc_sli_flush_io_rings(phba); 13451 } 13452 13453 /** 13454 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 13455 * @pdev: pointer to PCI device. 13456 * @state: the current PCI connection state. 13457 * 13458 * This routine is called from the PCI subsystem for error handling to device 13459 * with SLI-4 interface spec. This function is called by the PCI subsystem 13460 * after a PCI bus error affecting this device has been detected. When this 13461 * function is invoked, it will need to stop all the I/Os and interrupt(s) 13462 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 13463 * for the PCI subsystem to perform proper recovery as desired. 13464 * 13465 * Return codes 13466 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 13467 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 13468 **/ 13469 static pci_ers_result_t 13470 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 13471 { 13472 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13473 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13474 13475 switch (state) { 13476 case pci_channel_io_normal: 13477 /* Non-fatal error, prepare for recovery */ 13478 lpfc_sli4_prep_dev_for_recover(phba); 13479 return PCI_ERS_RESULT_CAN_RECOVER; 13480 case pci_channel_io_frozen: 13481 /* Fatal error, prepare for slot reset */ 13482 lpfc_sli4_prep_dev_for_reset(phba); 13483 return PCI_ERS_RESULT_NEED_RESET; 13484 case pci_channel_io_perm_failure: 13485 /* Permanent failure, prepare for device down */ 13486 lpfc_sli4_prep_dev_for_perm_failure(phba); 13487 return PCI_ERS_RESULT_DISCONNECT; 13488 default: 13489 /* Unknown state, prepare and request slot reset */ 13490 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13491 "2825 Unknown PCI error state: x%x\n", state); 13492 lpfc_sli4_prep_dev_for_reset(phba); 13493 return PCI_ERS_RESULT_NEED_RESET; 13494 } 13495 } 13496 13497 /** 13498 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 13499 * @pdev: pointer to PCI device. 13500 * 13501 * This routine is called from the PCI subsystem for error handling to device 13502 * with SLI-4 interface spec. It is called after PCI bus has been reset to 13503 * restart the PCI card from scratch, as if from a cold-boot. During the 13504 * PCI subsystem error recovery, after the driver returns 13505 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 13506 * recovery and then call this routine before calling the .resume method to 13507 * recover the device. This function will initialize the HBA device, enable 13508 * the interrupt, but it will just put the HBA to offline state without 13509 * passing any I/O traffic. 13510 * 13511 * Return codes 13512 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 13513 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 13514 */ 13515 static pci_ers_result_t 13516 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 13517 { 13518 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13519 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13520 struct lpfc_sli *psli = &phba->sli; 13521 uint32_t intr_mode; 13522 13523 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 13524 if (pci_enable_device_mem(pdev)) { 13525 printk(KERN_ERR "lpfc: Cannot re-enable " 13526 "PCI device after reset.\n"); 13527 return PCI_ERS_RESULT_DISCONNECT; 13528 } 13529 13530 pci_restore_state(pdev); 13531 13532 /* 13533 * As the new kernel behavior of pci_restore_state() API call clears 13534 * device saved_state flag, need to save the restored state again. 13535 */ 13536 pci_save_state(pdev); 13537 13538 if (pdev->is_busmaster) 13539 pci_set_master(pdev); 13540 13541 spin_lock_irq(&phba->hbalock); 13542 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 13543 spin_unlock_irq(&phba->hbalock); 13544 13545 /* Configure and enable interrupt */ 13546 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 13547 if (intr_mode == LPFC_INTR_ERROR) { 13548 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13549 "2824 Cannot re-enable interrupt after " 13550 "slot reset.\n"); 13551 return PCI_ERS_RESULT_DISCONNECT; 13552 } else 13553 phba->intr_mode = intr_mode; 13554 13555 /* Log the current active interrupt mode */ 13556 lpfc_log_intr_mode(phba, phba->intr_mode); 13557 13558 return PCI_ERS_RESULT_RECOVERED; 13559 } 13560 13561 /** 13562 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 13563 * @pdev: pointer to PCI device 13564 * 13565 * This routine is called from the PCI subsystem for error handling to device 13566 * with SLI-4 interface spec. It is called when kernel error recovery tells 13567 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 13568 * error recovery. After this call, traffic can start to flow from this device 13569 * again. 13570 **/ 13571 static void 13572 lpfc_io_resume_s4(struct pci_dev *pdev) 13573 { 13574 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13575 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13576 13577 /* 13578 * In case of slot reset, as function reset is performed through 13579 * mailbox command which needs DMA to be enabled, this operation 13580 * has to be moved to the io resume phase. Taking device offline 13581 * will perform the necessary cleanup. 13582 */ 13583 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 13584 /* Perform device reset */ 13585 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 13586 lpfc_offline(phba); 13587 lpfc_sli_brdrestart(phba); 13588 /* Bring the device back online */ 13589 lpfc_online(phba); 13590 } 13591 } 13592 13593 /** 13594 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 13595 * @pdev: pointer to PCI device 13596 * @pid: pointer to PCI device identifier 13597 * 13598 * This routine is to be registered to the kernel's PCI subsystem. When an 13599 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 13600 * at PCI device-specific information of the device and driver to see if the 13601 * driver state that it can support this kind of device. If the match is 13602 * successful, the driver core invokes this routine. This routine dispatches 13603 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 13604 * do all the initialization that it needs to do to handle the HBA device 13605 * properly. 13606 * 13607 * Return code 13608 * 0 - driver can claim the device 13609 * negative value - driver can not claim the device 13610 **/ 13611 static int 13612 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 13613 { 13614 int rc; 13615 struct lpfc_sli_intf intf; 13616 13617 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 13618 return -ENODEV; 13619 13620 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 13621 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 13622 rc = lpfc_pci_probe_one_s4(pdev, pid); 13623 else 13624 rc = lpfc_pci_probe_one_s3(pdev, pid); 13625 13626 return rc; 13627 } 13628 13629 /** 13630 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 13631 * @pdev: pointer to PCI device 13632 * 13633 * This routine is to be registered to the kernel's PCI subsystem. When an 13634 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 13635 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 13636 * remove routine, which will perform all the necessary cleanup for the 13637 * device to be removed from the PCI subsystem properly. 13638 **/ 13639 static void 13640 lpfc_pci_remove_one(struct pci_dev *pdev) 13641 { 13642 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13643 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13644 13645 switch (phba->pci_dev_grp) { 13646 case LPFC_PCI_DEV_LP: 13647 lpfc_pci_remove_one_s3(pdev); 13648 break; 13649 case LPFC_PCI_DEV_OC: 13650 lpfc_pci_remove_one_s4(pdev); 13651 break; 13652 default: 13653 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13654 "1424 Invalid PCI device group: 0x%x\n", 13655 phba->pci_dev_grp); 13656 break; 13657 } 13658 return; 13659 } 13660 13661 /** 13662 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 13663 * @pdev: pointer to PCI device 13664 * @msg: power management message 13665 * 13666 * This routine is to be registered to the kernel's PCI subsystem to support 13667 * system Power Management (PM). When PM invokes this method, it dispatches 13668 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 13669 * suspend the device. 13670 * 13671 * Return code 13672 * 0 - driver suspended the device 13673 * Error otherwise 13674 **/ 13675 static int 13676 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 13677 { 13678 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13679 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13680 int rc = -ENODEV; 13681 13682 switch (phba->pci_dev_grp) { 13683 case LPFC_PCI_DEV_LP: 13684 rc = lpfc_pci_suspend_one_s3(pdev, msg); 13685 break; 13686 case LPFC_PCI_DEV_OC: 13687 rc = lpfc_pci_suspend_one_s4(pdev, msg); 13688 break; 13689 default: 13690 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13691 "1425 Invalid PCI device group: 0x%x\n", 13692 phba->pci_dev_grp); 13693 break; 13694 } 13695 return rc; 13696 } 13697 13698 /** 13699 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 13700 * @pdev: pointer to PCI device 13701 * 13702 * This routine is to be registered to the kernel's PCI subsystem to support 13703 * system Power Management (PM). When PM invokes this method, it dispatches 13704 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 13705 * resume the device. 13706 * 13707 * Return code 13708 * 0 - driver suspended the device 13709 * Error otherwise 13710 **/ 13711 static int 13712 lpfc_pci_resume_one(struct pci_dev *pdev) 13713 { 13714 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13715 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13716 int rc = -ENODEV; 13717 13718 switch (phba->pci_dev_grp) { 13719 case LPFC_PCI_DEV_LP: 13720 rc = lpfc_pci_resume_one_s3(pdev); 13721 break; 13722 case LPFC_PCI_DEV_OC: 13723 rc = lpfc_pci_resume_one_s4(pdev); 13724 break; 13725 default: 13726 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13727 "1426 Invalid PCI device group: 0x%x\n", 13728 phba->pci_dev_grp); 13729 break; 13730 } 13731 return rc; 13732 } 13733 13734 /** 13735 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 13736 * @pdev: pointer to PCI device. 13737 * @state: the current PCI connection state. 13738 * 13739 * This routine is registered to the PCI subsystem for error handling. This 13740 * function is called by the PCI subsystem after a PCI bus error affecting 13741 * this device has been detected. When this routine is invoked, it dispatches 13742 * the action to the proper SLI-3 or SLI-4 device error detected handling 13743 * routine, which will perform the proper error detected operation. 13744 * 13745 * Return codes 13746 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 13747 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 13748 **/ 13749 static pci_ers_result_t 13750 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 13751 { 13752 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13753 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13754 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 13755 13756 switch (phba->pci_dev_grp) { 13757 case LPFC_PCI_DEV_LP: 13758 rc = lpfc_io_error_detected_s3(pdev, state); 13759 break; 13760 case LPFC_PCI_DEV_OC: 13761 rc = lpfc_io_error_detected_s4(pdev, state); 13762 break; 13763 default: 13764 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13765 "1427 Invalid PCI device group: 0x%x\n", 13766 phba->pci_dev_grp); 13767 break; 13768 } 13769 return rc; 13770 } 13771 13772 /** 13773 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 13774 * @pdev: pointer to PCI device. 13775 * 13776 * This routine is registered to the PCI subsystem for error handling. This 13777 * function is called after PCI bus has been reset to restart the PCI card 13778 * from scratch, as if from a cold-boot. When this routine is invoked, it 13779 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 13780 * routine, which will perform the proper device reset. 13781 * 13782 * Return codes 13783 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 13784 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 13785 **/ 13786 static pci_ers_result_t 13787 lpfc_io_slot_reset(struct pci_dev *pdev) 13788 { 13789 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13790 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13791 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 13792 13793 switch (phba->pci_dev_grp) { 13794 case LPFC_PCI_DEV_LP: 13795 rc = lpfc_io_slot_reset_s3(pdev); 13796 break; 13797 case LPFC_PCI_DEV_OC: 13798 rc = lpfc_io_slot_reset_s4(pdev); 13799 break; 13800 default: 13801 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13802 "1428 Invalid PCI device group: 0x%x\n", 13803 phba->pci_dev_grp); 13804 break; 13805 } 13806 return rc; 13807 } 13808 13809 /** 13810 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 13811 * @pdev: pointer to PCI device 13812 * 13813 * This routine is registered to the PCI subsystem for error handling. It 13814 * is called when kernel error recovery tells the lpfc driver that it is 13815 * OK to resume normal PCI operation after PCI bus error recovery. When 13816 * this routine is invoked, it dispatches the action to the proper SLI-3 13817 * or SLI-4 device io_resume routine, which will resume the device operation. 13818 **/ 13819 static void 13820 lpfc_io_resume(struct pci_dev *pdev) 13821 { 13822 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13823 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13824 13825 switch (phba->pci_dev_grp) { 13826 case LPFC_PCI_DEV_LP: 13827 lpfc_io_resume_s3(pdev); 13828 break; 13829 case LPFC_PCI_DEV_OC: 13830 lpfc_io_resume_s4(pdev); 13831 break; 13832 default: 13833 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13834 "1429 Invalid PCI device group: 0x%x\n", 13835 phba->pci_dev_grp); 13836 break; 13837 } 13838 return; 13839 } 13840 13841 /** 13842 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter 13843 * @phba: pointer to lpfc hba data structure. 13844 * 13845 * This routine checks to see if OAS is supported for this adapter. If 13846 * supported, the configure Flash Optimized Fabric flag is set. Otherwise, 13847 * the enable oas flag is cleared and the pool created for OAS device data 13848 * is destroyed. 13849 * 13850 **/ 13851 static void 13852 lpfc_sli4_oas_verify(struct lpfc_hba *phba) 13853 { 13854 13855 if (!phba->cfg_EnableXLane) 13856 return; 13857 13858 if (phba->sli4_hba.pc_sli4_params.oas_supported) { 13859 phba->cfg_fof = 1; 13860 } else { 13861 phba->cfg_fof = 0; 13862 mempool_destroy(phba->device_data_mem_pool); 13863 phba->device_data_mem_pool = NULL; 13864 } 13865 13866 return; 13867 } 13868 13869 /** 13870 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter 13871 * @phba: pointer to lpfc hba data structure. 13872 * 13873 * This routine checks to see if RAS is supported by the adapter. Check the 13874 * function through which RAS support enablement is to be done. 13875 **/ 13876 void 13877 lpfc_sli4_ras_init(struct lpfc_hba *phba) 13878 { 13879 switch (phba->pcidev->device) { 13880 case PCI_DEVICE_ID_LANCER_G6_FC: 13881 case PCI_DEVICE_ID_LANCER_G7_FC: 13882 phba->ras_fwlog.ras_hwsupport = true; 13883 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) && 13884 phba->cfg_ras_fwlog_buffsize) 13885 phba->ras_fwlog.ras_enabled = true; 13886 else 13887 phba->ras_fwlog.ras_enabled = false; 13888 break; 13889 default: 13890 phba->ras_fwlog.ras_hwsupport = false; 13891 } 13892 } 13893 13894 13895 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 13896 13897 static const struct pci_error_handlers lpfc_err_handler = { 13898 .error_detected = lpfc_io_error_detected, 13899 .slot_reset = lpfc_io_slot_reset, 13900 .resume = lpfc_io_resume, 13901 }; 13902 13903 static struct pci_driver lpfc_driver = { 13904 .name = LPFC_DRIVER_NAME, 13905 .id_table = lpfc_id_table, 13906 .probe = lpfc_pci_probe_one, 13907 .remove = lpfc_pci_remove_one, 13908 .shutdown = lpfc_pci_remove_one, 13909 .suspend = lpfc_pci_suspend_one, 13910 .resume = lpfc_pci_resume_one, 13911 .err_handler = &lpfc_err_handler, 13912 }; 13913 13914 static const struct file_operations lpfc_mgmt_fop = { 13915 .owner = THIS_MODULE, 13916 }; 13917 13918 static struct miscdevice lpfc_mgmt_dev = { 13919 .minor = MISC_DYNAMIC_MINOR, 13920 .name = "lpfcmgmt", 13921 .fops = &lpfc_mgmt_fop, 13922 }; 13923 13924 /** 13925 * lpfc_init - lpfc module initialization routine 13926 * 13927 * This routine is to be invoked when the lpfc module is loaded into the 13928 * kernel. The special kernel macro module_init() is used to indicate the 13929 * role of this routine to the kernel as lpfc module entry point. 13930 * 13931 * Return codes 13932 * 0 - successful 13933 * -ENOMEM - FC attach transport failed 13934 * all others - failed 13935 */ 13936 static int __init 13937 lpfc_init(void) 13938 { 13939 int error = 0; 13940 13941 printk(LPFC_MODULE_DESC "\n"); 13942 printk(LPFC_COPYRIGHT "\n"); 13943 13944 error = misc_register(&lpfc_mgmt_dev); 13945 if (error) 13946 printk(KERN_ERR "Could not register lpfcmgmt device, " 13947 "misc_register returned with status %d", error); 13948 13949 lpfc_transport_functions.vport_create = lpfc_vport_create; 13950 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 13951 lpfc_transport_template = 13952 fc_attach_transport(&lpfc_transport_functions); 13953 if (lpfc_transport_template == NULL) 13954 return -ENOMEM; 13955 lpfc_vport_transport_template = 13956 fc_attach_transport(&lpfc_vport_transport_functions); 13957 if (lpfc_vport_transport_template == NULL) { 13958 fc_release_transport(lpfc_transport_template); 13959 return -ENOMEM; 13960 } 13961 lpfc_nvme_cmd_template(); 13962 lpfc_nvmet_cmd_template(); 13963 13964 /* Initialize in case vector mapping is needed */ 13965 lpfc_present_cpu = num_present_cpus(); 13966 13967 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, 13968 "lpfc/sli4:online", 13969 lpfc_cpu_online, lpfc_cpu_offline); 13970 if (error < 0) 13971 goto cpuhp_failure; 13972 lpfc_cpuhp_state = error; 13973 13974 error = pci_register_driver(&lpfc_driver); 13975 if (error) 13976 goto unwind; 13977 13978 return error; 13979 13980 unwind: 13981 cpuhp_remove_multi_state(lpfc_cpuhp_state); 13982 cpuhp_failure: 13983 fc_release_transport(lpfc_transport_template); 13984 fc_release_transport(lpfc_vport_transport_template); 13985 13986 return error; 13987 } 13988 13989 /** 13990 * lpfc_exit - lpfc module removal routine 13991 * 13992 * This routine is invoked when the lpfc module is removed from the kernel. 13993 * The special kernel macro module_exit() is used to indicate the role of 13994 * this routine to the kernel as lpfc module exit point. 13995 */ 13996 static void __exit 13997 lpfc_exit(void) 13998 { 13999 misc_deregister(&lpfc_mgmt_dev); 14000 pci_unregister_driver(&lpfc_driver); 14001 cpuhp_remove_multi_state(lpfc_cpuhp_state); 14002 fc_release_transport(lpfc_transport_template); 14003 fc_release_transport(lpfc_vport_transport_template); 14004 idr_destroy(&lpfc_hba_index); 14005 } 14006 14007 module_init(lpfc_init); 14008 module_exit(lpfc_exit); 14009 MODULE_LICENSE("GPL"); 14010 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 14011 MODULE_AUTHOR("Broadcom"); 14012 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 14013