1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/delay.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/idr.h> 28 #include <linux/interrupt.h> 29 #include <linux/module.h> 30 #include <linux/kthread.h> 31 #include <linux/pci.h> 32 #include <linux/spinlock.h> 33 #include <linux/ctype.h> 34 #include <linux/aer.h> 35 #include <linux/slab.h> 36 #include <linux/firmware.h> 37 #include <linux/miscdevice.h> 38 #include <linux/percpu.h> 39 #include <linux/msi.h> 40 #include <linux/irq.h> 41 #include <linux/bitops.h> 42 #include <linux/crash_dump.h> 43 #include <linux/cpu.h> 44 #include <linux/cpuhotplug.h> 45 46 #include <scsi/scsi.h> 47 #include <scsi/scsi_device.h> 48 #include <scsi/scsi_host.h> 49 #include <scsi/scsi_transport_fc.h> 50 #include <scsi/scsi_tcq.h> 51 #include <scsi/fc/fc_fs.h> 52 53 #include <linux/nvme-fc-driver.h> 54 55 #include "lpfc_hw4.h" 56 #include "lpfc_hw.h" 57 #include "lpfc_sli.h" 58 #include "lpfc_sli4.h" 59 #include "lpfc_nl.h" 60 #include "lpfc_disc.h" 61 #include "lpfc.h" 62 #include "lpfc_scsi.h" 63 #include "lpfc_nvme.h" 64 #include "lpfc_nvmet.h" 65 #include "lpfc_logmsg.h" 66 #include "lpfc_crtn.h" 67 #include "lpfc_vport.h" 68 #include "lpfc_version.h" 69 #include "lpfc_ids.h" 70 71 static enum cpuhp_state lpfc_cpuhp_state; 72 /* Used when mapping IRQ vectors in a driver centric manner */ 73 static uint32_t lpfc_present_cpu; 74 75 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba); 76 static void lpfc_cpuhp_remove(struct lpfc_hba *phba); 77 static void lpfc_cpuhp_add(struct lpfc_hba *phba); 78 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 79 static int lpfc_post_rcv_buf(struct lpfc_hba *); 80 static int lpfc_sli4_queue_verify(struct lpfc_hba *); 81 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 82 static int lpfc_setup_endian_order(struct lpfc_hba *); 83 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 84 static void lpfc_free_els_sgl_list(struct lpfc_hba *); 85 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *); 86 static void lpfc_init_sgl_list(struct lpfc_hba *); 87 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 88 static void lpfc_free_active_sgl(struct lpfc_hba *); 89 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 90 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 91 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 92 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 93 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 94 static void lpfc_sli4_disable_intr(struct lpfc_hba *); 95 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); 96 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); 97 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int); 98 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *); 99 100 static struct scsi_transport_template *lpfc_transport_template = NULL; 101 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 102 static DEFINE_IDR(lpfc_hba_index); 103 #define LPFC_NVMET_BUF_POST 254 104 105 /** 106 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 107 * @phba: pointer to lpfc hba data structure. 108 * 109 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 110 * mailbox command. It retrieves the revision information from the HBA and 111 * collects the Vital Product Data (VPD) about the HBA for preparing the 112 * configuration of the HBA. 113 * 114 * Return codes: 115 * 0 - success. 116 * -ERESTART - requests the SLI layer to reset the HBA and try again. 117 * Any other value - indicates an error. 118 **/ 119 int 120 lpfc_config_port_prep(struct lpfc_hba *phba) 121 { 122 lpfc_vpd_t *vp = &phba->vpd; 123 int i = 0, rc; 124 LPFC_MBOXQ_t *pmb; 125 MAILBOX_t *mb; 126 char *lpfc_vpd_data = NULL; 127 uint16_t offset = 0; 128 static char licensed[56] = 129 "key unlock for use with gnu public licensed code only\0"; 130 static int init_key = 1; 131 132 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 133 if (!pmb) { 134 phba->link_state = LPFC_HBA_ERROR; 135 return -ENOMEM; 136 } 137 138 mb = &pmb->u.mb; 139 phba->link_state = LPFC_INIT_MBX_CMDS; 140 141 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 142 if (init_key) { 143 uint32_t *ptext = (uint32_t *) licensed; 144 145 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 146 *ptext = cpu_to_be32(*ptext); 147 init_key = 0; 148 } 149 150 lpfc_read_nv(phba, pmb); 151 memset((char*)mb->un.varRDnvp.rsvd3, 0, 152 sizeof (mb->un.varRDnvp.rsvd3)); 153 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 154 sizeof (licensed)); 155 156 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 157 158 if (rc != MBX_SUCCESS) { 159 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 160 "0324 Config Port initialization " 161 "error, mbxCmd x%x READ_NVPARM, " 162 "mbxStatus x%x\n", 163 mb->mbxCommand, mb->mbxStatus); 164 mempool_free(pmb, phba->mbox_mem_pool); 165 return -ERESTART; 166 } 167 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 168 sizeof(phba->wwnn)); 169 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 170 sizeof(phba->wwpn)); 171 } 172 173 /* 174 * Clear all option bits except LPFC_SLI3_BG_ENABLED, 175 * which was already set in lpfc_get_cfgparam() 176 */ 177 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED; 178 179 /* Setup and issue mailbox READ REV command */ 180 lpfc_read_rev(phba, pmb); 181 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 182 if (rc != MBX_SUCCESS) { 183 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 184 "0439 Adapter failed to init, mbxCmd x%x " 185 "READ_REV, mbxStatus x%x\n", 186 mb->mbxCommand, mb->mbxStatus); 187 mempool_free( pmb, phba->mbox_mem_pool); 188 return -ERESTART; 189 } 190 191 192 /* 193 * The value of rr must be 1 since the driver set the cv field to 1. 194 * This setting requires the FW to set all revision fields. 195 */ 196 if (mb->un.varRdRev.rr == 0) { 197 vp->rev.rBit = 0; 198 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 199 "0440 Adapter failed to init, READ_REV has " 200 "missing revision information.\n"); 201 mempool_free(pmb, phba->mbox_mem_pool); 202 return -ERESTART; 203 } 204 205 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 206 mempool_free(pmb, phba->mbox_mem_pool); 207 return -EINVAL; 208 } 209 210 /* Save information as VPD data */ 211 vp->rev.rBit = 1; 212 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 213 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 214 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 215 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 216 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 217 vp->rev.biuRev = mb->un.varRdRev.biuRev; 218 vp->rev.smRev = mb->un.varRdRev.smRev; 219 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 220 vp->rev.endecRev = mb->un.varRdRev.endecRev; 221 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 222 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 223 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 224 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 225 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 226 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 227 228 /* If the sli feature level is less then 9, we must 229 * tear down all RPIs and VPIs on link down if NPIV 230 * is enabled. 231 */ 232 if (vp->rev.feaLevelHigh < 9) 233 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 234 235 if (lpfc_is_LC_HBA(phba->pcidev->device)) 236 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 237 sizeof (phba->RandomData)); 238 239 /* Get adapter VPD information */ 240 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 241 if (!lpfc_vpd_data) 242 goto out_free_mbox; 243 do { 244 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 245 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 246 247 if (rc != MBX_SUCCESS) { 248 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 249 "0441 VPD not present on adapter, " 250 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 251 mb->mbxCommand, mb->mbxStatus); 252 mb->un.varDmp.word_cnt = 0; 253 } 254 /* dump mem may return a zero when finished or we got a 255 * mailbox error, either way we are done. 256 */ 257 if (mb->un.varDmp.word_cnt == 0) 258 break; 259 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 260 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 261 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 262 lpfc_vpd_data + offset, 263 mb->un.varDmp.word_cnt); 264 offset += mb->un.varDmp.word_cnt; 265 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 266 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 267 268 kfree(lpfc_vpd_data); 269 out_free_mbox: 270 mempool_free(pmb, phba->mbox_mem_pool); 271 return 0; 272 } 273 274 /** 275 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 276 * @phba: pointer to lpfc hba data structure. 277 * @pmboxq: pointer to the driver internal queue element for mailbox command. 278 * 279 * This is the completion handler for driver's configuring asynchronous event 280 * mailbox command to the device. If the mailbox command returns successfully, 281 * it will set internal async event support flag to 1; otherwise, it will 282 * set internal async event support flag to 0. 283 **/ 284 static void 285 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 286 { 287 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 288 phba->temp_sensor_support = 1; 289 else 290 phba->temp_sensor_support = 0; 291 mempool_free(pmboxq, phba->mbox_mem_pool); 292 return; 293 } 294 295 /** 296 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 297 * @phba: pointer to lpfc hba data structure. 298 * @pmboxq: pointer to the driver internal queue element for mailbox command. 299 * 300 * This is the completion handler for dump mailbox command for getting 301 * wake up parameters. When this command complete, the response contain 302 * Option rom version of the HBA. This function translate the version number 303 * into a human readable string and store it in OptionROMVersion. 304 **/ 305 static void 306 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 307 { 308 struct prog_id *prg; 309 uint32_t prog_id_word; 310 char dist = ' '; 311 /* character array used for decoding dist type. */ 312 char dist_char[] = "nabx"; 313 314 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 315 mempool_free(pmboxq, phba->mbox_mem_pool); 316 return; 317 } 318 319 prg = (struct prog_id *) &prog_id_word; 320 321 /* word 7 contain option rom version */ 322 prog_id_word = pmboxq->u.mb.un.varWords[7]; 323 324 /* Decode the Option rom version word to a readable string */ 325 if (prg->dist < 4) 326 dist = dist_char[prg->dist]; 327 328 if ((prg->dist == 3) && (prg->num == 0)) 329 snprintf(phba->OptionROMVersion, 32, "%d.%d%d", 330 prg->ver, prg->rev, prg->lev); 331 else 332 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d", 333 prg->ver, prg->rev, prg->lev, 334 dist, prg->num); 335 mempool_free(pmboxq, phba->mbox_mem_pool); 336 return; 337 } 338 339 /** 340 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, 341 * cfg_soft_wwnn, cfg_soft_wwpn 342 * @vport: pointer to lpfc vport data structure. 343 * 344 * 345 * Return codes 346 * None. 347 **/ 348 void 349 lpfc_update_vport_wwn(struct lpfc_vport *vport) 350 { 351 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level; 352 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0]; 353 354 /* If the soft name exists then update it using the service params */ 355 if (vport->phba->cfg_soft_wwnn) 356 u64_to_wwn(vport->phba->cfg_soft_wwnn, 357 vport->fc_sparam.nodeName.u.wwn); 358 if (vport->phba->cfg_soft_wwpn) 359 u64_to_wwn(vport->phba->cfg_soft_wwpn, 360 vport->fc_sparam.portName.u.wwn); 361 362 /* 363 * If the name is empty or there exists a soft name 364 * then copy the service params name, otherwise use the fc name 365 */ 366 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) 367 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 368 sizeof(struct lpfc_name)); 369 else 370 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, 371 sizeof(struct lpfc_name)); 372 373 /* 374 * If the port name has changed, then set the Param changes flag 375 * to unreg the login 376 */ 377 if (vport->fc_portname.u.wwn[0] != 0 && 378 memcmp(&vport->fc_portname, &vport->fc_sparam.portName, 379 sizeof(struct lpfc_name))) 380 vport->vport_flag |= FAWWPN_PARAM_CHG; 381 382 if (vport->fc_portname.u.wwn[0] == 0 || 383 vport->phba->cfg_soft_wwpn || 384 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) || 385 vport->vport_flag & FAWWPN_SET) { 386 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 387 sizeof(struct lpfc_name)); 388 vport->vport_flag &= ~FAWWPN_SET; 389 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) 390 vport->vport_flag |= FAWWPN_SET; 391 } 392 else 393 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, 394 sizeof(struct lpfc_name)); 395 } 396 397 /** 398 * lpfc_config_port_post - Perform lpfc initialization after config port 399 * @phba: pointer to lpfc hba data structure. 400 * 401 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 402 * command call. It performs all internal resource and state setups on the 403 * port: post IOCB buffers, enable appropriate host interrupt attentions, 404 * ELS ring timers, etc. 405 * 406 * Return codes 407 * 0 - success. 408 * Any other value - error. 409 **/ 410 int 411 lpfc_config_port_post(struct lpfc_hba *phba) 412 { 413 struct lpfc_vport *vport = phba->pport; 414 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 415 LPFC_MBOXQ_t *pmb; 416 MAILBOX_t *mb; 417 struct lpfc_dmabuf *mp; 418 struct lpfc_sli *psli = &phba->sli; 419 uint32_t status, timeout; 420 int i, j; 421 int rc; 422 423 spin_lock_irq(&phba->hbalock); 424 /* 425 * If the Config port completed correctly the HBA is not 426 * over heated any more. 427 */ 428 if (phba->over_temp_state == HBA_OVER_TEMP) 429 phba->over_temp_state = HBA_NORMAL_TEMP; 430 spin_unlock_irq(&phba->hbalock); 431 432 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 433 if (!pmb) { 434 phba->link_state = LPFC_HBA_ERROR; 435 return -ENOMEM; 436 } 437 mb = &pmb->u.mb; 438 439 /* Get login parameters for NID. */ 440 rc = lpfc_read_sparam(phba, pmb, 0); 441 if (rc) { 442 mempool_free(pmb, phba->mbox_mem_pool); 443 return -ENOMEM; 444 } 445 446 pmb->vport = vport; 447 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 448 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 449 "0448 Adapter failed init, mbxCmd x%x " 450 "READ_SPARM mbxStatus x%x\n", 451 mb->mbxCommand, mb->mbxStatus); 452 phba->link_state = LPFC_HBA_ERROR; 453 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 454 mempool_free(pmb, phba->mbox_mem_pool); 455 lpfc_mbuf_free(phba, mp->virt, mp->phys); 456 kfree(mp); 457 return -EIO; 458 } 459 460 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 461 462 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 463 lpfc_mbuf_free(phba, mp->virt, mp->phys); 464 kfree(mp); 465 pmb->ctx_buf = NULL; 466 lpfc_update_vport_wwn(vport); 467 468 /* Update the fc_host data structures with new wwn. */ 469 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 470 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 471 fc_host_max_npiv_vports(shost) = phba->max_vpi; 472 473 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 474 /* This should be consolidated into parse_vpd ? - mr */ 475 if (phba->SerialNumber[0] == 0) { 476 uint8_t *outptr; 477 478 outptr = &vport->fc_nodename.u.s.IEEE[0]; 479 for (i = 0; i < 12; i++) { 480 status = *outptr++; 481 j = ((status & 0xf0) >> 4); 482 if (j <= 9) 483 phba->SerialNumber[i] = 484 (char)((uint8_t) 0x30 + (uint8_t) j); 485 else 486 phba->SerialNumber[i] = 487 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 488 i++; 489 j = (status & 0xf); 490 if (j <= 9) 491 phba->SerialNumber[i] = 492 (char)((uint8_t) 0x30 + (uint8_t) j); 493 else 494 phba->SerialNumber[i] = 495 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 496 } 497 } 498 499 lpfc_read_config(phba, pmb); 500 pmb->vport = vport; 501 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 502 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 503 "0453 Adapter failed to init, mbxCmd x%x " 504 "READ_CONFIG, mbxStatus x%x\n", 505 mb->mbxCommand, mb->mbxStatus); 506 phba->link_state = LPFC_HBA_ERROR; 507 mempool_free( pmb, phba->mbox_mem_pool); 508 return -EIO; 509 } 510 511 /* Check if the port is disabled */ 512 lpfc_sli_read_link_ste(phba); 513 514 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 515 i = (mb->un.varRdConfig.max_xri + 1); 516 if (phba->cfg_hba_queue_depth > i) { 517 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 518 "3359 HBA queue depth changed from %d to %d\n", 519 phba->cfg_hba_queue_depth, i); 520 phba->cfg_hba_queue_depth = i; 521 } 522 523 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ 524 i = (mb->un.varRdConfig.max_xri >> 3); 525 if (phba->pport->cfg_lun_queue_depth > i) { 526 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 527 "3360 LUN queue depth changed from %d to %d\n", 528 phba->pport->cfg_lun_queue_depth, i); 529 phba->pport->cfg_lun_queue_depth = i; 530 } 531 532 phba->lmt = mb->un.varRdConfig.lmt; 533 534 /* Get the default values for Model Name and Description */ 535 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 536 537 phba->link_state = LPFC_LINK_DOWN; 538 539 /* Only process IOCBs on ELS ring till hba_state is READY */ 540 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr) 541 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT; 542 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr) 543 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT; 544 545 /* Post receive buffers for desired rings */ 546 if (phba->sli_rev != 3) 547 lpfc_post_rcv_buf(phba); 548 549 /* 550 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 551 */ 552 if (phba->intr_type == MSIX) { 553 rc = lpfc_config_msi(phba, pmb); 554 if (rc) { 555 mempool_free(pmb, phba->mbox_mem_pool); 556 return -EIO; 557 } 558 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 559 if (rc != MBX_SUCCESS) { 560 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 561 "0352 Config MSI mailbox command " 562 "failed, mbxCmd x%x, mbxStatus x%x\n", 563 pmb->u.mb.mbxCommand, 564 pmb->u.mb.mbxStatus); 565 mempool_free(pmb, phba->mbox_mem_pool); 566 return -EIO; 567 } 568 } 569 570 spin_lock_irq(&phba->hbalock); 571 /* Initialize ERATT handling flag */ 572 phba->hba_flag &= ~HBA_ERATT_HANDLED; 573 574 /* Enable appropriate host interrupts */ 575 if (lpfc_readl(phba->HCregaddr, &status)) { 576 spin_unlock_irq(&phba->hbalock); 577 return -EIO; 578 } 579 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 580 if (psli->num_rings > 0) 581 status |= HC_R0INT_ENA; 582 if (psli->num_rings > 1) 583 status |= HC_R1INT_ENA; 584 if (psli->num_rings > 2) 585 status |= HC_R2INT_ENA; 586 if (psli->num_rings > 3) 587 status |= HC_R3INT_ENA; 588 589 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 590 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 591 status &= ~(HC_R0INT_ENA); 592 593 writel(status, phba->HCregaddr); 594 readl(phba->HCregaddr); /* flush */ 595 spin_unlock_irq(&phba->hbalock); 596 597 /* Set up ring-0 (ELS) timer */ 598 timeout = phba->fc_ratov * 2; 599 mod_timer(&vport->els_tmofunc, 600 jiffies + msecs_to_jiffies(1000 * timeout)); 601 /* Set up heart beat (HB) timer */ 602 mod_timer(&phba->hb_tmofunc, 603 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 604 phba->hb_outstanding = 0; 605 phba->last_completion_time = jiffies; 606 /* Set up error attention (ERATT) polling timer */ 607 mod_timer(&phba->eratt_poll, 608 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 609 610 if (phba->hba_flag & LINK_DISABLED) { 611 lpfc_printf_log(phba, 612 KERN_ERR, LOG_INIT, 613 "2598 Adapter Link is disabled.\n"); 614 lpfc_down_link(phba, pmb); 615 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 616 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 617 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 618 lpfc_printf_log(phba, 619 KERN_ERR, LOG_INIT, 620 "2599 Adapter failed to issue DOWN_LINK" 621 " mbox command rc 0x%x\n", rc); 622 623 mempool_free(pmb, phba->mbox_mem_pool); 624 return -EIO; 625 } 626 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 627 mempool_free(pmb, phba->mbox_mem_pool); 628 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 629 if (rc) 630 return rc; 631 } 632 /* MBOX buffer will be freed in mbox compl */ 633 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 634 if (!pmb) { 635 phba->link_state = LPFC_HBA_ERROR; 636 return -ENOMEM; 637 } 638 639 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 640 pmb->mbox_cmpl = lpfc_config_async_cmpl; 641 pmb->vport = phba->pport; 642 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 643 644 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 645 lpfc_printf_log(phba, 646 KERN_ERR, 647 LOG_INIT, 648 "0456 Adapter failed to issue " 649 "ASYNCEVT_ENABLE mbox status x%x\n", 650 rc); 651 mempool_free(pmb, phba->mbox_mem_pool); 652 } 653 654 /* Get Option rom version */ 655 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 656 if (!pmb) { 657 phba->link_state = LPFC_HBA_ERROR; 658 return -ENOMEM; 659 } 660 661 lpfc_dump_wakeup_param(phba, pmb); 662 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 663 pmb->vport = phba->pport; 664 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 665 666 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 667 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 668 "to get Option ROM version status x%x\n", rc); 669 mempool_free(pmb, phba->mbox_mem_pool); 670 } 671 672 return 0; 673 } 674 675 /** 676 * lpfc_hba_init_link - Initialize the FC link 677 * @phba: pointer to lpfc hba data structure. 678 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 679 * 680 * This routine will issue the INIT_LINK mailbox command call. 681 * It is available to other drivers through the lpfc_hba data 682 * structure for use as a delayed link up mechanism with the 683 * module parameter lpfc_suppress_link_up. 684 * 685 * Return code 686 * 0 - success 687 * Any other value - error 688 **/ 689 static int 690 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 691 { 692 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); 693 } 694 695 /** 696 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology 697 * @phba: pointer to lpfc hba data structure. 698 * @fc_topology: desired fc topology. 699 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 700 * 701 * This routine will issue the INIT_LINK mailbox command call. 702 * It is available to other drivers through the lpfc_hba data 703 * structure for use as a delayed link up mechanism with the 704 * module parameter lpfc_suppress_link_up. 705 * 706 * Return code 707 * 0 - success 708 * Any other value - error 709 **/ 710 int 711 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, 712 uint32_t flag) 713 { 714 struct lpfc_vport *vport = phba->pport; 715 LPFC_MBOXQ_t *pmb; 716 MAILBOX_t *mb; 717 int rc; 718 719 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 720 if (!pmb) { 721 phba->link_state = LPFC_HBA_ERROR; 722 return -ENOMEM; 723 } 724 mb = &pmb->u.mb; 725 pmb->vport = vport; 726 727 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || 728 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && 729 !(phba->lmt & LMT_1Gb)) || 730 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && 731 !(phba->lmt & LMT_2Gb)) || 732 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && 733 !(phba->lmt & LMT_4Gb)) || 734 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && 735 !(phba->lmt & LMT_8Gb)) || 736 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && 737 !(phba->lmt & LMT_10Gb)) || 738 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && 739 !(phba->lmt & LMT_16Gb)) || 740 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) && 741 !(phba->lmt & LMT_32Gb)) || 742 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) && 743 !(phba->lmt & LMT_64Gb))) { 744 /* Reset link speed to auto */ 745 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 746 "1302 Invalid speed for this board:%d " 747 "Reset link speed to auto.\n", 748 phba->cfg_link_speed); 749 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 750 } 751 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); 752 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 753 if (phba->sli_rev < LPFC_SLI_REV4) 754 lpfc_set_loopback_flag(phba); 755 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 756 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 757 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 758 "0498 Adapter failed to init, mbxCmd x%x " 759 "INIT_LINK, mbxStatus x%x\n", 760 mb->mbxCommand, mb->mbxStatus); 761 if (phba->sli_rev <= LPFC_SLI_REV3) { 762 /* Clear all interrupt enable conditions */ 763 writel(0, phba->HCregaddr); 764 readl(phba->HCregaddr); /* flush */ 765 /* Clear all pending interrupts */ 766 writel(0xffffffff, phba->HAregaddr); 767 readl(phba->HAregaddr); /* flush */ 768 } 769 phba->link_state = LPFC_HBA_ERROR; 770 if (rc != MBX_BUSY || flag == MBX_POLL) 771 mempool_free(pmb, phba->mbox_mem_pool); 772 return -EIO; 773 } 774 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 775 if (flag == MBX_POLL) 776 mempool_free(pmb, phba->mbox_mem_pool); 777 778 return 0; 779 } 780 781 /** 782 * lpfc_hba_down_link - this routine downs the FC link 783 * @phba: pointer to lpfc hba data structure. 784 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 785 * 786 * This routine will issue the DOWN_LINK mailbox command call. 787 * It is available to other drivers through the lpfc_hba data 788 * structure for use to stop the link. 789 * 790 * Return code 791 * 0 - success 792 * Any other value - error 793 **/ 794 static int 795 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 796 { 797 LPFC_MBOXQ_t *pmb; 798 int rc; 799 800 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 801 if (!pmb) { 802 phba->link_state = LPFC_HBA_ERROR; 803 return -ENOMEM; 804 } 805 806 lpfc_printf_log(phba, 807 KERN_ERR, LOG_INIT, 808 "0491 Adapter Link is disabled.\n"); 809 lpfc_down_link(phba, pmb); 810 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 811 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 812 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 813 lpfc_printf_log(phba, 814 KERN_ERR, LOG_INIT, 815 "2522 Adapter failed to issue DOWN_LINK" 816 " mbox command rc 0x%x\n", rc); 817 818 mempool_free(pmb, phba->mbox_mem_pool); 819 return -EIO; 820 } 821 if (flag == MBX_POLL) 822 mempool_free(pmb, phba->mbox_mem_pool); 823 824 return 0; 825 } 826 827 /** 828 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 829 * @phba: pointer to lpfc HBA data structure. 830 * 831 * This routine will do LPFC uninitialization before the HBA is reset when 832 * bringing down the SLI Layer. 833 * 834 * Return codes 835 * 0 - success. 836 * Any other value - error. 837 **/ 838 int 839 lpfc_hba_down_prep(struct lpfc_hba *phba) 840 { 841 struct lpfc_vport **vports; 842 int i; 843 844 if (phba->sli_rev <= LPFC_SLI_REV3) { 845 /* Disable interrupts */ 846 writel(0, phba->HCregaddr); 847 readl(phba->HCregaddr); /* flush */ 848 } 849 850 if (phba->pport->load_flag & FC_UNLOADING) 851 lpfc_cleanup_discovery_resources(phba->pport); 852 else { 853 vports = lpfc_create_vport_work_array(phba); 854 if (vports != NULL) 855 for (i = 0; i <= phba->max_vports && 856 vports[i] != NULL; i++) 857 lpfc_cleanup_discovery_resources(vports[i]); 858 lpfc_destroy_vport_work_array(phba, vports); 859 } 860 return 0; 861 } 862 863 /** 864 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free 865 * rspiocb which got deferred 866 * 867 * @phba: pointer to lpfc HBA data structure. 868 * 869 * This routine will cleanup completed slow path events after HBA is reset 870 * when bringing down the SLI Layer. 871 * 872 * 873 * Return codes 874 * void. 875 **/ 876 static void 877 lpfc_sli4_free_sp_events(struct lpfc_hba *phba) 878 { 879 struct lpfc_iocbq *rspiocbq; 880 struct hbq_dmabuf *dmabuf; 881 struct lpfc_cq_event *cq_event; 882 883 spin_lock_irq(&phba->hbalock); 884 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 885 spin_unlock_irq(&phba->hbalock); 886 887 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 888 /* Get the response iocb from the head of work queue */ 889 spin_lock_irq(&phba->hbalock); 890 list_remove_head(&phba->sli4_hba.sp_queue_event, 891 cq_event, struct lpfc_cq_event, list); 892 spin_unlock_irq(&phba->hbalock); 893 894 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 895 case CQE_CODE_COMPL_WQE: 896 rspiocbq = container_of(cq_event, struct lpfc_iocbq, 897 cq_event); 898 lpfc_sli_release_iocbq(phba, rspiocbq); 899 break; 900 case CQE_CODE_RECEIVE: 901 case CQE_CODE_RECEIVE_V1: 902 dmabuf = container_of(cq_event, struct hbq_dmabuf, 903 cq_event); 904 lpfc_in_buf_free(phba, &dmabuf->dbuf); 905 } 906 } 907 } 908 909 /** 910 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset 911 * @phba: pointer to lpfc HBA data structure. 912 * 913 * This routine will cleanup posted ELS buffers after the HBA is reset 914 * when bringing down the SLI Layer. 915 * 916 * 917 * Return codes 918 * void. 919 **/ 920 static void 921 lpfc_hba_free_post_buf(struct lpfc_hba *phba) 922 { 923 struct lpfc_sli *psli = &phba->sli; 924 struct lpfc_sli_ring *pring; 925 struct lpfc_dmabuf *mp, *next_mp; 926 LIST_HEAD(buflist); 927 int count; 928 929 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 930 lpfc_sli_hbqbuf_free_all(phba); 931 else { 932 /* Cleanup preposted buffers on the ELS ring */ 933 pring = &psli->sli3_ring[LPFC_ELS_RING]; 934 spin_lock_irq(&phba->hbalock); 935 list_splice_init(&pring->postbufq, &buflist); 936 spin_unlock_irq(&phba->hbalock); 937 938 count = 0; 939 list_for_each_entry_safe(mp, next_mp, &buflist, list) { 940 list_del(&mp->list); 941 count++; 942 lpfc_mbuf_free(phba, mp->virt, mp->phys); 943 kfree(mp); 944 } 945 946 spin_lock_irq(&phba->hbalock); 947 pring->postbufq_cnt -= count; 948 spin_unlock_irq(&phba->hbalock); 949 } 950 } 951 952 /** 953 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset 954 * @phba: pointer to lpfc HBA data structure. 955 * 956 * This routine will cleanup the txcmplq after the HBA is reset when bringing 957 * down the SLI Layer. 958 * 959 * Return codes 960 * void 961 **/ 962 static void 963 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) 964 { 965 struct lpfc_sli *psli = &phba->sli; 966 struct lpfc_queue *qp = NULL; 967 struct lpfc_sli_ring *pring; 968 LIST_HEAD(completions); 969 int i; 970 struct lpfc_iocbq *piocb, *next_iocb; 971 972 if (phba->sli_rev != LPFC_SLI_REV4) { 973 for (i = 0; i < psli->num_rings; i++) { 974 pring = &psli->sli3_ring[i]; 975 spin_lock_irq(&phba->hbalock); 976 /* At this point in time the HBA is either reset or DOA 977 * Nothing should be on txcmplq as it will 978 * NEVER complete. 979 */ 980 list_splice_init(&pring->txcmplq, &completions); 981 pring->txcmplq_cnt = 0; 982 spin_unlock_irq(&phba->hbalock); 983 984 lpfc_sli_abort_iocb_ring(phba, pring); 985 } 986 /* Cancel all the IOCBs from the completions list */ 987 lpfc_sli_cancel_iocbs(phba, &completions, 988 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 989 return; 990 } 991 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 992 pring = qp->pring; 993 if (!pring) 994 continue; 995 spin_lock_irq(&pring->ring_lock); 996 list_for_each_entry_safe(piocb, next_iocb, 997 &pring->txcmplq, list) 998 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 999 list_splice_init(&pring->txcmplq, &completions); 1000 pring->txcmplq_cnt = 0; 1001 spin_unlock_irq(&pring->ring_lock); 1002 lpfc_sli_abort_iocb_ring(phba, pring); 1003 } 1004 /* Cancel all the IOCBs from the completions list */ 1005 lpfc_sli_cancel_iocbs(phba, &completions, 1006 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 1007 } 1008 1009 /** 1010 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 1011 int i; 1012 * @phba: pointer to lpfc HBA data structure. 1013 * 1014 * This routine will do uninitialization after the HBA is reset when bring 1015 * down the SLI Layer. 1016 * 1017 * Return codes 1018 * 0 - success. 1019 * Any other value - error. 1020 **/ 1021 static int 1022 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 1023 { 1024 lpfc_hba_free_post_buf(phba); 1025 lpfc_hba_clean_txcmplq(phba); 1026 return 0; 1027 } 1028 1029 /** 1030 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 1031 * @phba: pointer to lpfc HBA data structure. 1032 * 1033 * This routine will do uninitialization after the HBA is reset when bring 1034 * down the SLI Layer. 1035 * 1036 * Return codes 1037 * 0 - success. 1038 * Any other value - error. 1039 **/ 1040 static int 1041 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 1042 { 1043 struct lpfc_io_buf *psb, *psb_next; 1044 struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next; 1045 struct lpfc_sli4_hdw_queue *qp; 1046 LIST_HEAD(aborts); 1047 LIST_HEAD(nvme_aborts); 1048 LIST_HEAD(nvmet_aborts); 1049 struct lpfc_sglq *sglq_entry = NULL; 1050 int cnt, idx; 1051 1052 1053 lpfc_sli_hbqbuf_free_all(phba); 1054 lpfc_hba_clean_txcmplq(phba); 1055 1056 /* At this point in time the HBA is either reset or DOA. Either 1057 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 1058 * on the lpfc_els_sgl_list so that it can either be freed if the 1059 * driver is unloading or reposted if the driver is restarting 1060 * the port. 1061 */ 1062 spin_lock_irq(&phba->hbalock); /* required for lpfc_els_sgl_list and */ 1063 /* scsl_buf_list */ 1064 /* sgl_list_lock required because worker thread uses this 1065 * list. 1066 */ 1067 spin_lock(&phba->sli4_hba.sgl_list_lock); 1068 list_for_each_entry(sglq_entry, 1069 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 1070 sglq_entry->state = SGL_FREED; 1071 1072 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 1073 &phba->sli4_hba.lpfc_els_sgl_list); 1074 1075 1076 spin_unlock(&phba->sli4_hba.sgl_list_lock); 1077 1078 /* abts_xxxx_buf_list_lock required because worker thread uses this 1079 * list. 1080 */ 1081 cnt = 0; 1082 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 1083 qp = &phba->sli4_hba.hdwq[idx]; 1084 1085 spin_lock(&qp->abts_io_buf_list_lock); 1086 list_splice_init(&qp->lpfc_abts_io_buf_list, 1087 &aborts); 1088 1089 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 1090 psb->pCmd = NULL; 1091 psb->status = IOSTAT_SUCCESS; 1092 cnt++; 1093 } 1094 spin_lock(&qp->io_buf_list_put_lock); 1095 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put); 1096 qp->put_io_bufs += qp->abts_scsi_io_bufs; 1097 qp->put_io_bufs += qp->abts_nvme_io_bufs; 1098 qp->abts_scsi_io_bufs = 0; 1099 qp->abts_nvme_io_bufs = 0; 1100 spin_unlock(&qp->io_buf_list_put_lock); 1101 spin_unlock(&qp->abts_io_buf_list_lock); 1102 } 1103 spin_unlock_irq(&phba->hbalock); 1104 1105 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 1106 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1107 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 1108 &nvmet_aborts); 1109 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1110 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { 1111 ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP); 1112 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 1113 } 1114 } 1115 1116 lpfc_sli4_free_sp_events(phba); 1117 return cnt; 1118 } 1119 1120 /** 1121 * lpfc_hba_down_post - Wrapper func for hba down post routine 1122 * @phba: pointer to lpfc HBA data structure. 1123 * 1124 * This routine wraps the actual SLI3 or SLI4 routine for performing 1125 * uninitialization after the HBA is reset when bring down the SLI Layer. 1126 * 1127 * Return codes 1128 * 0 - success. 1129 * Any other value - error. 1130 **/ 1131 int 1132 lpfc_hba_down_post(struct lpfc_hba *phba) 1133 { 1134 return (*phba->lpfc_hba_down_post)(phba); 1135 } 1136 1137 /** 1138 * lpfc_hb_timeout - The HBA-timer timeout handler 1139 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 1140 * 1141 * This is the HBA-timer timeout handler registered to the lpfc driver. When 1142 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 1143 * work-port-events bitmap and the worker thread is notified. This timeout 1144 * event will be used by the worker thread to invoke the actual timeout 1145 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 1146 * be performed in the timeout handler and the HBA timeout event bit shall 1147 * be cleared by the worker thread after it has taken the event bitmap out. 1148 **/ 1149 static void 1150 lpfc_hb_timeout(struct timer_list *t) 1151 { 1152 struct lpfc_hba *phba; 1153 uint32_t tmo_posted; 1154 unsigned long iflag; 1155 1156 phba = from_timer(phba, t, hb_tmofunc); 1157 1158 /* Check for heart beat timeout conditions */ 1159 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1160 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 1161 if (!tmo_posted) 1162 phba->pport->work_port_events |= WORKER_HB_TMO; 1163 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1164 1165 /* Tell the worker thread there is work to do */ 1166 if (!tmo_posted) 1167 lpfc_worker_wake_up(phba); 1168 return; 1169 } 1170 1171 /** 1172 * lpfc_rrq_timeout - The RRQ-timer timeout handler 1173 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 1174 * 1175 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 1176 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 1177 * work-port-events bitmap and the worker thread is notified. This timeout 1178 * event will be used by the worker thread to invoke the actual timeout 1179 * handler routine, lpfc_rrq_handler. Any periodical operations will 1180 * be performed in the timeout handler and the RRQ timeout event bit shall 1181 * be cleared by the worker thread after it has taken the event bitmap out. 1182 **/ 1183 static void 1184 lpfc_rrq_timeout(struct timer_list *t) 1185 { 1186 struct lpfc_hba *phba; 1187 unsigned long iflag; 1188 1189 phba = from_timer(phba, t, rrq_tmr); 1190 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1191 if (!(phba->pport->load_flag & FC_UNLOADING)) 1192 phba->hba_flag |= HBA_RRQ_ACTIVE; 1193 else 1194 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1195 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1196 1197 if (!(phba->pport->load_flag & FC_UNLOADING)) 1198 lpfc_worker_wake_up(phba); 1199 } 1200 1201 /** 1202 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 1203 * @phba: pointer to lpfc hba data structure. 1204 * @pmboxq: pointer to the driver internal queue element for mailbox command. 1205 * 1206 * This is the callback function to the lpfc heart-beat mailbox command. 1207 * If configured, the lpfc driver issues the heart-beat mailbox command to 1208 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 1209 * heart-beat mailbox command is issued, the driver shall set up heart-beat 1210 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 1211 * heart-beat outstanding state. Once the mailbox command comes back and 1212 * no error conditions detected, the heart-beat mailbox command timer is 1213 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 1214 * state is cleared for the next heart-beat. If the timer expired with the 1215 * heart-beat outstanding state set, the driver will put the HBA offline. 1216 **/ 1217 static void 1218 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 1219 { 1220 unsigned long drvr_flag; 1221 1222 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1223 phba->hb_outstanding = 0; 1224 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1225 1226 /* Check and reset heart-beat timer is necessary */ 1227 mempool_free(pmboxq, phba->mbox_mem_pool); 1228 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 1229 !(phba->link_state == LPFC_HBA_ERROR) && 1230 !(phba->pport->load_flag & FC_UNLOADING)) 1231 mod_timer(&phba->hb_tmofunc, 1232 jiffies + 1233 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1234 return; 1235 } 1236 1237 static void 1238 lpfc_hb_eq_delay_work(struct work_struct *work) 1239 { 1240 struct lpfc_hba *phba = container_of(to_delayed_work(work), 1241 struct lpfc_hba, eq_delay_work); 1242 struct lpfc_eq_intr_info *eqi, *eqi_new; 1243 struct lpfc_queue *eq, *eq_next; 1244 unsigned char *ena_delay = NULL; 1245 uint32_t usdelay; 1246 int i; 1247 1248 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING) 1249 return; 1250 1251 if (phba->link_state == LPFC_HBA_ERROR || 1252 phba->pport->fc_flag & FC_OFFLINE_MODE) 1253 goto requeue; 1254 1255 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay), 1256 GFP_KERNEL); 1257 if (!ena_delay) 1258 goto requeue; 1259 1260 for (i = 0; i < phba->cfg_irq_chann; i++) { 1261 /* Get the EQ corresponding to the IRQ vector */ 1262 eq = phba->sli4_hba.hba_eq_hdl[i].eq; 1263 if (!eq) 1264 continue; 1265 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) { 1266 eq->q_flag &= ~HBA_EQ_DELAY_CHK; 1267 ena_delay[eq->last_cpu] = 1; 1268 } 1269 } 1270 1271 for_each_present_cpu(i) { 1272 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); 1273 if (ena_delay[i]) { 1274 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP; 1275 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY) 1276 usdelay = LPFC_MAX_AUTO_EQ_DELAY; 1277 } else { 1278 usdelay = 0; 1279 } 1280 1281 eqi->icnt = 0; 1282 1283 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) { 1284 if (unlikely(eq->last_cpu != i)) { 1285 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info, 1286 eq->last_cpu); 1287 list_move_tail(&eq->cpu_list, &eqi_new->list); 1288 continue; 1289 } 1290 if (usdelay != eq->q_mode) 1291 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1, 1292 usdelay); 1293 } 1294 } 1295 1296 kfree(ena_delay); 1297 1298 requeue: 1299 queue_delayed_work(phba->wq, &phba->eq_delay_work, 1300 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); 1301 } 1302 1303 /** 1304 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution 1305 * @phba: pointer to lpfc hba data structure. 1306 * 1307 * For each heartbeat, this routine does some heuristic methods to adjust 1308 * XRI distribution. The goal is to fully utilize free XRIs. 1309 **/ 1310 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba) 1311 { 1312 u32 i; 1313 u32 hwq_count; 1314 1315 hwq_count = phba->cfg_hdw_queue; 1316 for (i = 0; i < hwq_count; i++) { 1317 /* Adjust XRIs in private pool */ 1318 lpfc_adjust_pvt_pool_count(phba, i); 1319 1320 /* Adjust high watermark */ 1321 lpfc_adjust_high_watermark(phba, i); 1322 1323 #ifdef LPFC_MXP_STAT 1324 /* Snapshot pbl, pvt and busy count */ 1325 lpfc_snapshot_mxp(phba, i); 1326 #endif 1327 } 1328 } 1329 1330 /** 1331 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 1332 * @phba: pointer to lpfc hba data structure. 1333 * 1334 * This is the actual HBA-timer timeout handler to be invoked by the worker 1335 * thread whenever the HBA timer fired and HBA-timeout event posted. This 1336 * handler performs any periodic operations needed for the device. If such 1337 * periodic event has already been attended to either in the interrupt handler 1338 * or by processing slow-ring or fast-ring events within the HBA-timer 1339 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1340 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1341 * is configured and there is no heart-beat mailbox command outstanding, a 1342 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1343 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1344 * to offline. 1345 **/ 1346 void 1347 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1348 { 1349 struct lpfc_vport **vports; 1350 LPFC_MBOXQ_t *pmboxq; 1351 struct lpfc_dmabuf *buf_ptr; 1352 int retval, i; 1353 struct lpfc_sli *psli = &phba->sli; 1354 LIST_HEAD(completions); 1355 1356 if (phba->cfg_xri_rebalancing) { 1357 /* Multi-XRI pools handler */ 1358 lpfc_hb_mxp_handler(phba); 1359 } 1360 1361 vports = lpfc_create_vport_work_array(phba); 1362 if (vports != NULL) 1363 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 1364 lpfc_rcv_seq_check_edtov(vports[i]); 1365 lpfc_fdmi_num_disc_check(vports[i]); 1366 } 1367 lpfc_destroy_vport_work_array(phba, vports); 1368 1369 if ((phba->link_state == LPFC_HBA_ERROR) || 1370 (phba->pport->load_flag & FC_UNLOADING) || 1371 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1372 return; 1373 1374 spin_lock_irq(&phba->pport->work_port_lock); 1375 1376 if (time_after(phba->last_completion_time + 1377 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), 1378 jiffies)) { 1379 spin_unlock_irq(&phba->pport->work_port_lock); 1380 if (!phba->hb_outstanding) 1381 mod_timer(&phba->hb_tmofunc, 1382 jiffies + 1383 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1384 else 1385 mod_timer(&phba->hb_tmofunc, 1386 jiffies + 1387 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1388 return; 1389 } 1390 spin_unlock_irq(&phba->pport->work_port_lock); 1391 1392 if (phba->elsbuf_cnt && 1393 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1394 spin_lock_irq(&phba->hbalock); 1395 list_splice_init(&phba->elsbuf, &completions); 1396 phba->elsbuf_cnt = 0; 1397 phba->elsbuf_prev_cnt = 0; 1398 spin_unlock_irq(&phba->hbalock); 1399 1400 while (!list_empty(&completions)) { 1401 list_remove_head(&completions, buf_ptr, 1402 struct lpfc_dmabuf, list); 1403 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1404 kfree(buf_ptr); 1405 } 1406 } 1407 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1408 1409 /* If there is no heart beat outstanding, issue a heartbeat command */ 1410 if (phba->cfg_enable_hba_heartbeat) { 1411 if (!phba->hb_outstanding) { 1412 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1413 (list_empty(&psli->mboxq))) { 1414 pmboxq = mempool_alloc(phba->mbox_mem_pool, 1415 GFP_KERNEL); 1416 if (!pmboxq) { 1417 mod_timer(&phba->hb_tmofunc, 1418 jiffies + 1419 msecs_to_jiffies(1000 * 1420 LPFC_HB_MBOX_INTERVAL)); 1421 return; 1422 } 1423 1424 lpfc_heart_beat(phba, pmboxq); 1425 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1426 pmboxq->vport = phba->pport; 1427 retval = lpfc_sli_issue_mbox(phba, pmboxq, 1428 MBX_NOWAIT); 1429 1430 if (retval != MBX_BUSY && 1431 retval != MBX_SUCCESS) { 1432 mempool_free(pmboxq, 1433 phba->mbox_mem_pool); 1434 mod_timer(&phba->hb_tmofunc, 1435 jiffies + 1436 msecs_to_jiffies(1000 * 1437 LPFC_HB_MBOX_INTERVAL)); 1438 return; 1439 } 1440 phba->skipped_hb = 0; 1441 phba->hb_outstanding = 1; 1442 } else if (time_before_eq(phba->last_completion_time, 1443 phba->skipped_hb)) { 1444 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1445 "2857 Last completion time not " 1446 " updated in %d ms\n", 1447 jiffies_to_msecs(jiffies 1448 - phba->last_completion_time)); 1449 } else 1450 phba->skipped_hb = jiffies; 1451 1452 mod_timer(&phba->hb_tmofunc, 1453 jiffies + 1454 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1455 return; 1456 } else { 1457 /* 1458 * If heart beat timeout called with hb_outstanding set 1459 * we need to give the hb mailbox cmd a chance to 1460 * complete or TMO. 1461 */ 1462 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1463 "0459 Adapter heartbeat still out" 1464 "standing:last compl time was %d ms.\n", 1465 jiffies_to_msecs(jiffies 1466 - phba->last_completion_time)); 1467 mod_timer(&phba->hb_tmofunc, 1468 jiffies + 1469 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1470 } 1471 } else { 1472 mod_timer(&phba->hb_tmofunc, 1473 jiffies + 1474 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1475 } 1476 } 1477 1478 /** 1479 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1480 * @phba: pointer to lpfc hba data structure. 1481 * 1482 * This routine is called to bring the HBA offline when HBA hardware error 1483 * other than Port Error 6 has been detected. 1484 **/ 1485 static void 1486 lpfc_offline_eratt(struct lpfc_hba *phba) 1487 { 1488 struct lpfc_sli *psli = &phba->sli; 1489 1490 spin_lock_irq(&phba->hbalock); 1491 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1492 spin_unlock_irq(&phba->hbalock); 1493 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1494 1495 lpfc_offline(phba); 1496 lpfc_reset_barrier(phba); 1497 spin_lock_irq(&phba->hbalock); 1498 lpfc_sli_brdreset(phba); 1499 spin_unlock_irq(&phba->hbalock); 1500 lpfc_hba_down_post(phba); 1501 lpfc_sli_brdready(phba, HS_MBRDY); 1502 lpfc_unblock_mgmt_io(phba); 1503 phba->link_state = LPFC_HBA_ERROR; 1504 return; 1505 } 1506 1507 /** 1508 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1509 * @phba: pointer to lpfc hba data structure. 1510 * 1511 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1512 * other than Port Error 6 has been detected. 1513 **/ 1514 void 1515 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1516 { 1517 spin_lock_irq(&phba->hbalock); 1518 phba->link_state = LPFC_HBA_ERROR; 1519 spin_unlock_irq(&phba->hbalock); 1520 1521 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1522 lpfc_sli_flush_io_rings(phba); 1523 lpfc_offline(phba); 1524 lpfc_hba_down_post(phba); 1525 lpfc_unblock_mgmt_io(phba); 1526 } 1527 1528 /** 1529 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1530 * @phba: pointer to lpfc hba data structure. 1531 * 1532 * This routine is invoked to handle the deferred HBA hardware error 1533 * conditions. This type of error is indicated by HBA by setting ER1 1534 * and another ER bit in the host status register. The driver will 1535 * wait until the ER1 bit clears before handling the error condition. 1536 **/ 1537 static void 1538 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1539 { 1540 uint32_t old_host_status = phba->work_hs; 1541 struct lpfc_sli *psli = &phba->sli; 1542 1543 /* If the pci channel is offline, ignore possible errors, 1544 * since we cannot communicate with the pci card anyway. 1545 */ 1546 if (pci_channel_offline(phba->pcidev)) { 1547 spin_lock_irq(&phba->hbalock); 1548 phba->hba_flag &= ~DEFER_ERATT; 1549 spin_unlock_irq(&phba->hbalock); 1550 return; 1551 } 1552 1553 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1554 "0479 Deferred Adapter Hardware Error " 1555 "Data: x%x x%x x%x\n", 1556 phba->work_hs, 1557 phba->work_status[0], phba->work_status[1]); 1558 1559 spin_lock_irq(&phba->hbalock); 1560 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1561 spin_unlock_irq(&phba->hbalock); 1562 1563 1564 /* 1565 * Firmware stops when it triggred erratt. That could cause the I/Os 1566 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1567 * SCSI layer retry it after re-establishing link. 1568 */ 1569 lpfc_sli_abort_fcp_rings(phba); 1570 1571 /* 1572 * There was a firmware error. Take the hba offline and then 1573 * attempt to restart it. 1574 */ 1575 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 1576 lpfc_offline(phba); 1577 1578 /* Wait for the ER1 bit to clear.*/ 1579 while (phba->work_hs & HS_FFER1) { 1580 msleep(100); 1581 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { 1582 phba->work_hs = UNPLUG_ERR ; 1583 break; 1584 } 1585 /* If driver is unloading let the worker thread continue */ 1586 if (phba->pport->load_flag & FC_UNLOADING) { 1587 phba->work_hs = 0; 1588 break; 1589 } 1590 } 1591 1592 /* 1593 * This is to ptrotect against a race condition in which 1594 * first write to the host attention register clear the 1595 * host status register. 1596 */ 1597 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1598 phba->work_hs = old_host_status & ~HS_FFER1; 1599 1600 spin_lock_irq(&phba->hbalock); 1601 phba->hba_flag &= ~DEFER_ERATT; 1602 spin_unlock_irq(&phba->hbalock); 1603 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1604 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1605 } 1606 1607 static void 1608 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1609 { 1610 struct lpfc_board_event_header board_event; 1611 struct Scsi_Host *shost; 1612 1613 board_event.event_type = FC_REG_BOARD_EVENT; 1614 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1615 shost = lpfc_shost_from_vport(phba->pport); 1616 fc_host_post_vendor_event(shost, fc_get_event_number(), 1617 sizeof(board_event), 1618 (char *) &board_event, 1619 LPFC_NL_VENDOR_ID); 1620 } 1621 1622 /** 1623 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1624 * @phba: pointer to lpfc hba data structure. 1625 * 1626 * This routine is invoked to handle the following HBA hardware error 1627 * conditions: 1628 * 1 - HBA error attention interrupt 1629 * 2 - DMA ring index out of range 1630 * 3 - Mailbox command came back as unknown 1631 **/ 1632 static void 1633 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1634 { 1635 struct lpfc_vport *vport = phba->pport; 1636 struct lpfc_sli *psli = &phba->sli; 1637 uint32_t event_data; 1638 unsigned long temperature; 1639 struct temp_event temp_event_data; 1640 struct Scsi_Host *shost; 1641 1642 /* If the pci channel is offline, ignore possible errors, 1643 * since we cannot communicate with the pci card anyway. 1644 */ 1645 if (pci_channel_offline(phba->pcidev)) { 1646 spin_lock_irq(&phba->hbalock); 1647 phba->hba_flag &= ~DEFER_ERATT; 1648 spin_unlock_irq(&phba->hbalock); 1649 return; 1650 } 1651 1652 /* If resets are disabled then leave the HBA alone and return */ 1653 if (!phba->cfg_enable_hba_reset) 1654 return; 1655 1656 /* Send an internal error event to mgmt application */ 1657 lpfc_board_errevt_to_mgmt(phba); 1658 1659 if (phba->hba_flag & DEFER_ERATT) 1660 lpfc_handle_deferred_eratt(phba); 1661 1662 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1663 if (phba->work_hs & HS_FFER6) 1664 /* Re-establishing Link */ 1665 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1666 "1301 Re-establishing Link " 1667 "Data: x%x x%x x%x\n", 1668 phba->work_hs, phba->work_status[0], 1669 phba->work_status[1]); 1670 if (phba->work_hs & HS_FFER8) 1671 /* Device Zeroization */ 1672 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1673 "2861 Host Authentication device " 1674 "zeroization Data:x%x x%x x%x\n", 1675 phba->work_hs, phba->work_status[0], 1676 phba->work_status[1]); 1677 1678 spin_lock_irq(&phba->hbalock); 1679 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1680 spin_unlock_irq(&phba->hbalock); 1681 1682 /* 1683 * Firmware stops when it triggled erratt with HS_FFER6. 1684 * That could cause the I/Os dropped by the firmware. 1685 * Error iocb (I/O) on txcmplq and let the SCSI layer 1686 * retry it after re-establishing link. 1687 */ 1688 lpfc_sli_abort_fcp_rings(phba); 1689 1690 /* 1691 * There was a firmware error. Take the hba offline and then 1692 * attempt to restart it. 1693 */ 1694 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1695 lpfc_offline(phba); 1696 lpfc_sli_brdrestart(phba); 1697 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1698 lpfc_unblock_mgmt_io(phba); 1699 return; 1700 } 1701 lpfc_unblock_mgmt_io(phba); 1702 } else if (phba->work_hs & HS_CRIT_TEMP) { 1703 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1704 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1705 temp_event_data.event_code = LPFC_CRIT_TEMP; 1706 temp_event_data.data = (uint32_t)temperature; 1707 1708 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1709 "0406 Adapter maximum temperature exceeded " 1710 "(%ld), taking this port offline " 1711 "Data: x%x x%x x%x\n", 1712 temperature, phba->work_hs, 1713 phba->work_status[0], phba->work_status[1]); 1714 1715 shost = lpfc_shost_from_vport(phba->pport); 1716 fc_host_post_vendor_event(shost, fc_get_event_number(), 1717 sizeof(temp_event_data), 1718 (char *) &temp_event_data, 1719 SCSI_NL_VID_TYPE_PCI 1720 | PCI_VENDOR_ID_EMULEX); 1721 1722 spin_lock_irq(&phba->hbalock); 1723 phba->over_temp_state = HBA_OVER_TEMP; 1724 spin_unlock_irq(&phba->hbalock); 1725 lpfc_offline_eratt(phba); 1726 1727 } else { 1728 /* The if clause above forces this code path when the status 1729 * failure is a value other than FFER6. Do not call the offline 1730 * twice. This is the adapter hardware error path. 1731 */ 1732 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1733 "0457 Adapter Hardware Error " 1734 "Data: x%x x%x x%x\n", 1735 phba->work_hs, 1736 phba->work_status[0], phba->work_status[1]); 1737 1738 event_data = FC_REG_DUMP_EVENT; 1739 shost = lpfc_shost_from_vport(vport); 1740 fc_host_post_vendor_event(shost, fc_get_event_number(), 1741 sizeof(event_data), (char *) &event_data, 1742 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1743 1744 lpfc_offline_eratt(phba); 1745 } 1746 return; 1747 } 1748 1749 /** 1750 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg 1751 * @phba: pointer to lpfc hba data structure. 1752 * @mbx_action: flag for mailbox shutdown action. 1753 * 1754 * This routine is invoked to perform an SLI4 port PCI function reset in 1755 * response to port status register polling attention. It waits for port 1756 * status register (ERR, RDY, RN) bits before proceeding with function reset. 1757 * During this process, interrupt vectors are freed and later requested 1758 * for handling possible port resource change. 1759 **/ 1760 static int 1761 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, 1762 bool en_rn_msg) 1763 { 1764 int rc; 1765 uint32_t intr_mode; 1766 1767 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 1768 LPFC_SLI_INTF_IF_TYPE_2) { 1769 /* 1770 * On error status condition, driver need to wait for port 1771 * ready before performing reset. 1772 */ 1773 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1774 if (rc) 1775 return rc; 1776 } 1777 1778 /* need reset: attempt for port recovery */ 1779 if (en_rn_msg) 1780 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1781 "2887 Reset Needed: Attempting Port " 1782 "Recovery...\n"); 1783 lpfc_offline_prep(phba, mbx_action); 1784 lpfc_sli_flush_io_rings(phba); 1785 lpfc_offline(phba); 1786 /* release interrupt for possible resource change */ 1787 lpfc_sli4_disable_intr(phba); 1788 rc = lpfc_sli_brdrestart(phba); 1789 if (rc) { 1790 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1791 "6309 Failed to restart board\n"); 1792 return rc; 1793 } 1794 /* request and enable interrupt */ 1795 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 1796 if (intr_mode == LPFC_INTR_ERROR) { 1797 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1798 "3175 Failed to enable interrupt\n"); 1799 return -EIO; 1800 } 1801 phba->intr_mode = intr_mode; 1802 rc = lpfc_online(phba); 1803 if (rc == 0) 1804 lpfc_unblock_mgmt_io(phba); 1805 1806 return rc; 1807 } 1808 1809 /** 1810 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1811 * @phba: pointer to lpfc hba data structure. 1812 * 1813 * This routine is invoked to handle the SLI4 HBA hardware error attention 1814 * conditions. 1815 **/ 1816 static void 1817 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1818 { 1819 struct lpfc_vport *vport = phba->pport; 1820 uint32_t event_data; 1821 struct Scsi_Host *shost; 1822 uint32_t if_type; 1823 struct lpfc_register portstat_reg = {0}; 1824 uint32_t reg_err1, reg_err2; 1825 uint32_t uerrlo_reg, uemasklo_reg; 1826 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2; 1827 bool en_rn_msg = true; 1828 struct temp_event temp_event_data; 1829 struct lpfc_register portsmphr_reg; 1830 int rc, i; 1831 1832 /* If the pci channel is offline, ignore possible errors, since 1833 * we cannot communicate with the pci card anyway. 1834 */ 1835 if (pci_channel_offline(phba->pcidev)) { 1836 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1837 "3166 pci channel is offline\n"); 1838 lpfc_sli4_offline_eratt(phba); 1839 return; 1840 } 1841 1842 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 1843 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1844 switch (if_type) { 1845 case LPFC_SLI_INTF_IF_TYPE_0: 1846 pci_rd_rc1 = lpfc_readl( 1847 phba->sli4_hba.u.if_type0.UERRLOregaddr, 1848 &uerrlo_reg); 1849 pci_rd_rc2 = lpfc_readl( 1850 phba->sli4_hba.u.if_type0.UEMASKLOregaddr, 1851 &uemasklo_reg); 1852 /* consider PCI bus read error as pci_channel_offline */ 1853 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) 1854 return; 1855 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { 1856 lpfc_sli4_offline_eratt(phba); 1857 return; 1858 } 1859 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1860 "7623 Checking UE recoverable"); 1861 1862 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { 1863 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1864 &portsmphr_reg.word0)) 1865 continue; 1866 1867 smphr_port_status = bf_get(lpfc_port_smphr_port_status, 1868 &portsmphr_reg); 1869 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1870 LPFC_PORT_SEM_UE_RECOVERABLE) 1871 break; 1872 /*Sleep for 1Sec, before checking SEMAPHORE */ 1873 msleep(1000); 1874 } 1875 1876 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1877 "4827 smphr_port_status x%x : Waited %dSec", 1878 smphr_port_status, i); 1879 1880 /* Recoverable UE, reset the HBA device */ 1881 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1882 LPFC_PORT_SEM_UE_RECOVERABLE) { 1883 for (i = 0; i < 20; i++) { 1884 msleep(1000); 1885 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1886 &portsmphr_reg.word0) && 1887 (LPFC_POST_STAGE_PORT_READY == 1888 bf_get(lpfc_port_smphr_port_status, 1889 &portsmphr_reg))) { 1890 rc = lpfc_sli4_port_sta_fn_reset(phba, 1891 LPFC_MBX_NO_WAIT, en_rn_msg); 1892 if (rc == 0) 1893 return; 1894 lpfc_printf_log(phba, 1895 KERN_ERR, LOG_INIT, 1896 "4215 Failed to recover UE"); 1897 break; 1898 } 1899 } 1900 } 1901 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1902 "7624 Firmware not ready: Failing UE recovery," 1903 " waited %dSec", i); 1904 phba->link_state = LPFC_HBA_ERROR; 1905 break; 1906 1907 case LPFC_SLI_INTF_IF_TYPE_2: 1908 case LPFC_SLI_INTF_IF_TYPE_6: 1909 pci_rd_rc1 = lpfc_readl( 1910 phba->sli4_hba.u.if_type2.STATUSregaddr, 1911 &portstat_reg.word0); 1912 /* consider PCI bus read error as pci_channel_offline */ 1913 if (pci_rd_rc1 == -EIO) { 1914 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1915 "3151 PCI bus read access failure: x%x\n", 1916 readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); 1917 lpfc_sli4_offline_eratt(phba); 1918 return; 1919 } 1920 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 1921 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 1922 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1923 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1924 "2889 Port Overtemperature event, " 1925 "taking port offline Data: x%x x%x\n", 1926 reg_err1, reg_err2); 1927 1928 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 1929 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1930 temp_event_data.event_code = LPFC_CRIT_TEMP; 1931 temp_event_data.data = 0xFFFFFFFF; 1932 1933 shost = lpfc_shost_from_vport(phba->pport); 1934 fc_host_post_vendor_event(shost, fc_get_event_number(), 1935 sizeof(temp_event_data), 1936 (char *)&temp_event_data, 1937 SCSI_NL_VID_TYPE_PCI 1938 | PCI_VENDOR_ID_EMULEX); 1939 1940 spin_lock_irq(&phba->hbalock); 1941 phba->over_temp_state = HBA_OVER_TEMP; 1942 spin_unlock_irq(&phba->hbalock); 1943 lpfc_sli4_offline_eratt(phba); 1944 return; 1945 } 1946 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1947 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { 1948 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1949 "3143 Port Down: Firmware Update " 1950 "Detected\n"); 1951 en_rn_msg = false; 1952 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1953 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1954 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1955 "3144 Port Down: Debug Dump\n"); 1956 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1957 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) 1958 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1959 "3145 Port Down: Provisioning\n"); 1960 1961 /* If resets are disabled then leave the HBA alone and return */ 1962 if (!phba->cfg_enable_hba_reset) 1963 return; 1964 1965 /* Check port status register for function reset */ 1966 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, 1967 en_rn_msg); 1968 if (rc == 0) { 1969 /* don't report event on forced debug dump */ 1970 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1971 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1972 return; 1973 else 1974 break; 1975 } 1976 /* fall through for not able to recover */ 1977 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1978 "3152 Unrecoverable error\n"); 1979 phba->link_state = LPFC_HBA_ERROR; 1980 break; 1981 case LPFC_SLI_INTF_IF_TYPE_1: 1982 default: 1983 break; 1984 } 1985 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1986 "3123 Report dump event to upper layer\n"); 1987 /* Send an internal error event to mgmt application */ 1988 lpfc_board_errevt_to_mgmt(phba); 1989 1990 event_data = FC_REG_DUMP_EVENT; 1991 shost = lpfc_shost_from_vport(vport); 1992 fc_host_post_vendor_event(shost, fc_get_event_number(), 1993 sizeof(event_data), (char *) &event_data, 1994 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1995 } 1996 1997 /** 1998 * lpfc_handle_eratt - Wrapper func for handling hba error attention 1999 * @phba: pointer to lpfc HBA data structure. 2000 * 2001 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 2002 * routine from the API jump table function pointer from the lpfc_hba struct. 2003 * 2004 * Return codes 2005 * 0 - success. 2006 * Any other value - error. 2007 **/ 2008 void 2009 lpfc_handle_eratt(struct lpfc_hba *phba) 2010 { 2011 (*phba->lpfc_handle_eratt)(phba); 2012 } 2013 2014 /** 2015 * lpfc_handle_latt - The HBA link event handler 2016 * @phba: pointer to lpfc hba data structure. 2017 * 2018 * This routine is invoked from the worker thread to handle a HBA host 2019 * attention link event. SLI3 only. 2020 **/ 2021 void 2022 lpfc_handle_latt(struct lpfc_hba *phba) 2023 { 2024 struct lpfc_vport *vport = phba->pport; 2025 struct lpfc_sli *psli = &phba->sli; 2026 LPFC_MBOXQ_t *pmb; 2027 volatile uint32_t control; 2028 struct lpfc_dmabuf *mp; 2029 int rc = 0; 2030 2031 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2032 if (!pmb) { 2033 rc = 1; 2034 goto lpfc_handle_latt_err_exit; 2035 } 2036 2037 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2038 if (!mp) { 2039 rc = 2; 2040 goto lpfc_handle_latt_free_pmb; 2041 } 2042 2043 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 2044 if (!mp->virt) { 2045 rc = 3; 2046 goto lpfc_handle_latt_free_mp; 2047 } 2048 2049 /* Cleanup any outstanding ELS commands */ 2050 lpfc_els_flush_all_cmd(phba); 2051 2052 psli->slistat.link_event++; 2053 lpfc_read_topology(phba, pmb, mp); 2054 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 2055 pmb->vport = vport; 2056 /* Block ELS IOCBs until we have processed this mbox command */ 2057 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 2058 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 2059 if (rc == MBX_NOT_FINISHED) { 2060 rc = 4; 2061 goto lpfc_handle_latt_free_mbuf; 2062 } 2063 2064 /* Clear Link Attention in HA REG */ 2065 spin_lock_irq(&phba->hbalock); 2066 writel(HA_LATT, phba->HAregaddr); 2067 readl(phba->HAregaddr); /* flush */ 2068 spin_unlock_irq(&phba->hbalock); 2069 2070 return; 2071 2072 lpfc_handle_latt_free_mbuf: 2073 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 2074 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2075 lpfc_handle_latt_free_mp: 2076 kfree(mp); 2077 lpfc_handle_latt_free_pmb: 2078 mempool_free(pmb, phba->mbox_mem_pool); 2079 lpfc_handle_latt_err_exit: 2080 /* Enable Link attention interrupts */ 2081 spin_lock_irq(&phba->hbalock); 2082 psli->sli_flag |= LPFC_PROCESS_LA; 2083 control = readl(phba->HCregaddr); 2084 control |= HC_LAINT_ENA; 2085 writel(control, phba->HCregaddr); 2086 readl(phba->HCregaddr); /* flush */ 2087 2088 /* Clear Link Attention in HA REG */ 2089 writel(HA_LATT, phba->HAregaddr); 2090 readl(phba->HAregaddr); /* flush */ 2091 spin_unlock_irq(&phba->hbalock); 2092 lpfc_linkdown(phba); 2093 phba->link_state = LPFC_HBA_ERROR; 2094 2095 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 2096 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 2097 2098 return; 2099 } 2100 2101 /** 2102 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 2103 * @phba: pointer to lpfc hba data structure. 2104 * @vpd: pointer to the vital product data. 2105 * @len: length of the vital product data in bytes. 2106 * 2107 * This routine parses the Vital Product Data (VPD). The VPD is treated as 2108 * an array of characters. In this routine, the ModelName, ProgramType, and 2109 * ModelDesc, etc. fields of the phba data structure will be populated. 2110 * 2111 * Return codes 2112 * 0 - pointer to the VPD passed in is NULL 2113 * 1 - success 2114 **/ 2115 int 2116 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 2117 { 2118 uint8_t lenlo, lenhi; 2119 int Length; 2120 int i, j; 2121 int finished = 0; 2122 int index = 0; 2123 2124 if (!vpd) 2125 return 0; 2126 2127 /* Vital Product */ 2128 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2129 "0455 Vital Product Data: x%x x%x x%x x%x\n", 2130 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 2131 (uint32_t) vpd[3]); 2132 while (!finished && (index < (len - 4))) { 2133 switch (vpd[index]) { 2134 case 0x82: 2135 case 0x91: 2136 index += 1; 2137 lenlo = vpd[index]; 2138 index += 1; 2139 lenhi = vpd[index]; 2140 index += 1; 2141 i = ((((unsigned short)lenhi) << 8) + lenlo); 2142 index += i; 2143 break; 2144 case 0x90: 2145 index += 1; 2146 lenlo = vpd[index]; 2147 index += 1; 2148 lenhi = vpd[index]; 2149 index += 1; 2150 Length = ((((unsigned short)lenhi) << 8) + lenlo); 2151 if (Length > len - index) 2152 Length = len - index; 2153 while (Length > 0) { 2154 /* Look for Serial Number */ 2155 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 2156 index += 2; 2157 i = vpd[index]; 2158 index += 1; 2159 j = 0; 2160 Length -= (3+i); 2161 while(i--) { 2162 phba->SerialNumber[j++] = vpd[index++]; 2163 if (j == 31) 2164 break; 2165 } 2166 phba->SerialNumber[j] = 0; 2167 continue; 2168 } 2169 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 2170 phba->vpd_flag |= VPD_MODEL_DESC; 2171 index += 2; 2172 i = vpd[index]; 2173 index += 1; 2174 j = 0; 2175 Length -= (3+i); 2176 while(i--) { 2177 phba->ModelDesc[j++] = vpd[index++]; 2178 if (j == 255) 2179 break; 2180 } 2181 phba->ModelDesc[j] = 0; 2182 continue; 2183 } 2184 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 2185 phba->vpd_flag |= VPD_MODEL_NAME; 2186 index += 2; 2187 i = vpd[index]; 2188 index += 1; 2189 j = 0; 2190 Length -= (3+i); 2191 while(i--) { 2192 phba->ModelName[j++] = vpd[index++]; 2193 if (j == 79) 2194 break; 2195 } 2196 phba->ModelName[j] = 0; 2197 continue; 2198 } 2199 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 2200 phba->vpd_flag |= VPD_PROGRAM_TYPE; 2201 index += 2; 2202 i = vpd[index]; 2203 index += 1; 2204 j = 0; 2205 Length -= (3+i); 2206 while(i--) { 2207 phba->ProgramType[j++] = vpd[index++]; 2208 if (j == 255) 2209 break; 2210 } 2211 phba->ProgramType[j] = 0; 2212 continue; 2213 } 2214 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 2215 phba->vpd_flag |= VPD_PORT; 2216 index += 2; 2217 i = vpd[index]; 2218 index += 1; 2219 j = 0; 2220 Length -= (3+i); 2221 while(i--) { 2222 if ((phba->sli_rev == LPFC_SLI_REV4) && 2223 (phba->sli4_hba.pport_name_sta == 2224 LPFC_SLI4_PPNAME_GET)) { 2225 j++; 2226 index++; 2227 } else 2228 phba->Port[j++] = vpd[index++]; 2229 if (j == 19) 2230 break; 2231 } 2232 if ((phba->sli_rev != LPFC_SLI_REV4) || 2233 (phba->sli4_hba.pport_name_sta == 2234 LPFC_SLI4_PPNAME_NON)) 2235 phba->Port[j] = 0; 2236 continue; 2237 } 2238 else { 2239 index += 2; 2240 i = vpd[index]; 2241 index += 1; 2242 index += i; 2243 Length -= (3 + i); 2244 } 2245 } 2246 finished = 0; 2247 break; 2248 case 0x78: 2249 finished = 1; 2250 break; 2251 default: 2252 index ++; 2253 break; 2254 } 2255 } 2256 2257 return(1); 2258 } 2259 2260 /** 2261 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 2262 * @phba: pointer to lpfc hba data structure. 2263 * @mdp: pointer to the data structure to hold the derived model name. 2264 * @descp: pointer to the data structure to hold the derived description. 2265 * 2266 * This routine retrieves HBA's description based on its registered PCI device 2267 * ID. The @descp passed into this function points to an array of 256 chars. It 2268 * shall be returned with the model name, maximum speed, and the host bus type. 2269 * The @mdp passed into this function points to an array of 80 chars. When the 2270 * function returns, the @mdp will be filled with the model name. 2271 **/ 2272 static void 2273 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 2274 { 2275 lpfc_vpd_t *vp; 2276 uint16_t dev_id = phba->pcidev->device; 2277 int max_speed; 2278 int GE = 0; 2279 int oneConnect = 0; /* default is not a oneConnect */ 2280 struct { 2281 char *name; 2282 char *bus; 2283 char *function; 2284 } m = {"<Unknown>", "", ""}; 2285 2286 if (mdp && mdp[0] != '\0' 2287 && descp && descp[0] != '\0') 2288 return; 2289 2290 if (phba->lmt & LMT_64Gb) 2291 max_speed = 64; 2292 else if (phba->lmt & LMT_32Gb) 2293 max_speed = 32; 2294 else if (phba->lmt & LMT_16Gb) 2295 max_speed = 16; 2296 else if (phba->lmt & LMT_10Gb) 2297 max_speed = 10; 2298 else if (phba->lmt & LMT_8Gb) 2299 max_speed = 8; 2300 else if (phba->lmt & LMT_4Gb) 2301 max_speed = 4; 2302 else if (phba->lmt & LMT_2Gb) 2303 max_speed = 2; 2304 else if (phba->lmt & LMT_1Gb) 2305 max_speed = 1; 2306 else 2307 max_speed = 0; 2308 2309 vp = &phba->vpd; 2310 2311 switch (dev_id) { 2312 case PCI_DEVICE_ID_FIREFLY: 2313 m = (typeof(m)){"LP6000", "PCI", 2314 "Obsolete, Unsupported Fibre Channel Adapter"}; 2315 break; 2316 case PCI_DEVICE_ID_SUPERFLY: 2317 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 2318 m = (typeof(m)){"LP7000", "PCI", ""}; 2319 else 2320 m = (typeof(m)){"LP7000E", "PCI", ""}; 2321 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2322 break; 2323 case PCI_DEVICE_ID_DRAGONFLY: 2324 m = (typeof(m)){"LP8000", "PCI", 2325 "Obsolete, Unsupported Fibre Channel Adapter"}; 2326 break; 2327 case PCI_DEVICE_ID_CENTAUR: 2328 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 2329 m = (typeof(m)){"LP9002", "PCI", ""}; 2330 else 2331 m = (typeof(m)){"LP9000", "PCI", ""}; 2332 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2333 break; 2334 case PCI_DEVICE_ID_RFLY: 2335 m = (typeof(m)){"LP952", "PCI", 2336 "Obsolete, Unsupported Fibre Channel Adapter"}; 2337 break; 2338 case PCI_DEVICE_ID_PEGASUS: 2339 m = (typeof(m)){"LP9802", "PCI-X", 2340 "Obsolete, Unsupported Fibre Channel Adapter"}; 2341 break; 2342 case PCI_DEVICE_ID_THOR: 2343 m = (typeof(m)){"LP10000", "PCI-X", 2344 "Obsolete, Unsupported Fibre Channel Adapter"}; 2345 break; 2346 case PCI_DEVICE_ID_VIPER: 2347 m = (typeof(m)){"LPX1000", "PCI-X", 2348 "Obsolete, Unsupported Fibre Channel Adapter"}; 2349 break; 2350 case PCI_DEVICE_ID_PFLY: 2351 m = (typeof(m)){"LP982", "PCI-X", 2352 "Obsolete, Unsupported Fibre Channel Adapter"}; 2353 break; 2354 case PCI_DEVICE_ID_TFLY: 2355 m = (typeof(m)){"LP1050", "PCI-X", 2356 "Obsolete, Unsupported Fibre Channel Adapter"}; 2357 break; 2358 case PCI_DEVICE_ID_HELIOS: 2359 m = (typeof(m)){"LP11000", "PCI-X2", 2360 "Obsolete, Unsupported Fibre Channel Adapter"}; 2361 break; 2362 case PCI_DEVICE_ID_HELIOS_SCSP: 2363 m = (typeof(m)){"LP11000-SP", "PCI-X2", 2364 "Obsolete, Unsupported Fibre Channel Adapter"}; 2365 break; 2366 case PCI_DEVICE_ID_HELIOS_DCSP: 2367 m = (typeof(m)){"LP11002-SP", "PCI-X2", 2368 "Obsolete, Unsupported Fibre Channel Adapter"}; 2369 break; 2370 case PCI_DEVICE_ID_NEPTUNE: 2371 m = (typeof(m)){"LPe1000", "PCIe", 2372 "Obsolete, Unsupported Fibre Channel Adapter"}; 2373 break; 2374 case PCI_DEVICE_ID_NEPTUNE_SCSP: 2375 m = (typeof(m)){"LPe1000-SP", "PCIe", 2376 "Obsolete, Unsupported Fibre Channel Adapter"}; 2377 break; 2378 case PCI_DEVICE_ID_NEPTUNE_DCSP: 2379 m = (typeof(m)){"LPe1002-SP", "PCIe", 2380 "Obsolete, Unsupported Fibre Channel Adapter"}; 2381 break; 2382 case PCI_DEVICE_ID_BMID: 2383 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 2384 break; 2385 case PCI_DEVICE_ID_BSMB: 2386 m = (typeof(m)){"LP111", "PCI-X2", 2387 "Obsolete, Unsupported Fibre Channel Adapter"}; 2388 break; 2389 case PCI_DEVICE_ID_ZEPHYR: 2390 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2391 break; 2392 case PCI_DEVICE_ID_ZEPHYR_SCSP: 2393 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2394 break; 2395 case PCI_DEVICE_ID_ZEPHYR_DCSP: 2396 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 2397 GE = 1; 2398 break; 2399 case PCI_DEVICE_ID_ZMID: 2400 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 2401 break; 2402 case PCI_DEVICE_ID_ZSMB: 2403 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 2404 break; 2405 case PCI_DEVICE_ID_LP101: 2406 m = (typeof(m)){"LP101", "PCI-X", 2407 "Obsolete, Unsupported Fibre Channel Adapter"}; 2408 break; 2409 case PCI_DEVICE_ID_LP10000S: 2410 m = (typeof(m)){"LP10000-S", "PCI", 2411 "Obsolete, Unsupported Fibre Channel Adapter"}; 2412 break; 2413 case PCI_DEVICE_ID_LP11000S: 2414 m = (typeof(m)){"LP11000-S", "PCI-X2", 2415 "Obsolete, Unsupported Fibre Channel Adapter"}; 2416 break; 2417 case PCI_DEVICE_ID_LPE11000S: 2418 m = (typeof(m)){"LPe11000-S", "PCIe", 2419 "Obsolete, Unsupported Fibre Channel Adapter"}; 2420 break; 2421 case PCI_DEVICE_ID_SAT: 2422 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 2423 break; 2424 case PCI_DEVICE_ID_SAT_MID: 2425 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 2426 break; 2427 case PCI_DEVICE_ID_SAT_SMB: 2428 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 2429 break; 2430 case PCI_DEVICE_ID_SAT_DCSP: 2431 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 2432 break; 2433 case PCI_DEVICE_ID_SAT_SCSP: 2434 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 2435 break; 2436 case PCI_DEVICE_ID_SAT_S: 2437 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 2438 break; 2439 case PCI_DEVICE_ID_HORNET: 2440 m = (typeof(m)){"LP21000", "PCIe", 2441 "Obsolete, Unsupported FCoE Adapter"}; 2442 GE = 1; 2443 break; 2444 case PCI_DEVICE_ID_PROTEUS_VF: 2445 m = (typeof(m)){"LPev12000", "PCIe IOV", 2446 "Obsolete, Unsupported Fibre Channel Adapter"}; 2447 break; 2448 case PCI_DEVICE_ID_PROTEUS_PF: 2449 m = (typeof(m)){"LPev12000", "PCIe IOV", 2450 "Obsolete, Unsupported Fibre Channel Adapter"}; 2451 break; 2452 case PCI_DEVICE_ID_PROTEUS_S: 2453 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 2454 "Obsolete, Unsupported Fibre Channel Adapter"}; 2455 break; 2456 case PCI_DEVICE_ID_TIGERSHARK: 2457 oneConnect = 1; 2458 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 2459 break; 2460 case PCI_DEVICE_ID_TOMCAT: 2461 oneConnect = 1; 2462 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 2463 break; 2464 case PCI_DEVICE_ID_FALCON: 2465 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 2466 "EmulexSecure Fibre"}; 2467 break; 2468 case PCI_DEVICE_ID_BALIUS: 2469 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 2470 "Obsolete, Unsupported Fibre Channel Adapter"}; 2471 break; 2472 case PCI_DEVICE_ID_LANCER_FC: 2473 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 2474 break; 2475 case PCI_DEVICE_ID_LANCER_FC_VF: 2476 m = (typeof(m)){"LPe16000", "PCIe", 2477 "Obsolete, Unsupported Fibre Channel Adapter"}; 2478 break; 2479 case PCI_DEVICE_ID_LANCER_FCOE: 2480 oneConnect = 1; 2481 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2482 break; 2483 case PCI_DEVICE_ID_LANCER_FCOE_VF: 2484 oneConnect = 1; 2485 m = (typeof(m)){"OCe15100", "PCIe", 2486 "Obsolete, Unsupported FCoE"}; 2487 break; 2488 case PCI_DEVICE_ID_LANCER_G6_FC: 2489 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"}; 2490 break; 2491 case PCI_DEVICE_ID_LANCER_G7_FC: 2492 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"}; 2493 break; 2494 case PCI_DEVICE_ID_SKYHAWK: 2495 case PCI_DEVICE_ID_SKYHAWK_VF: 2496 oneConnect = 1; 2497 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; 2498 break; 2499 default: 2500 m = (typeof(m)){"Unknown", "", ""}; 2501 break; 2502 } 2503 2504 if (mdp && mdp[0] == '\0') 2505 snprintf(mdp, 79,"%s", m.name); 2506 /* 2507 * oneConnect hba requires special processing, they are all initiators 2508 * and we put the port number on the end 2509 */ 2510 if (descp && descp[0] == '\0') { 2511 if (oneConnect) 2512 snprintf(descp, 255, 2513 "Emulex OneConnect %s, %s Initiator %s", 2514 m.name, m.function, 2515 phba->Port); 2516 else if (max_speed == 0) 2517 snprintf(descp, 255, 2518 "Emulex %s %s %s", 2519 m.name, m.bus, m.function); 2520 else 2521 snprintf(descp, 255, 2522 "Emulex %s %d%s %s %s", 2523 m.name, max_speed, (GE) ? "GE" : "Gb", 2524 m.bus, m.function); 2525 } 2526 } 2527 2528 /** 2529 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 2530 * @phba: pointer to lpfc hba data structure. 2531 * @pring: pointer to a IOCB ring. 2532 * @cnt: the number of IOCBs to be posted to the IOCB ring. 2533 * 2534 * This routine posts a given number of IOCBs with the associated DMA buffer 2535 * descriptors specified by the cnt argument to the given IOCB ring. 2536 * 2537 * Return codes 2538 * The number of IOCBs NOT able to be posted to the IOCB ring. 2539 **/ 2540 int 2541 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 2542 { 2543 IOCB_t *icmd; 2544 struct lpfc_iocbq *iocb; 2545 struct lpfc_dmabuf *mp1, *mp2; 2546 2547 cnt += pring->missbufcnt; 2548 2549 /* While there are buffers to post */ 2550 while (cnt > 0) { 2551 /* Allocate buffer for command iocb */ 2552 iocb = lpfc_sli_get_iocbq(phba); 2553 if (iocb == NULL) { 2554 pring->missbufcnt = cnt; 2555 return cnt; 2556 } 2557 icmd = &iocb->iocb; 2558 2559 /* 2 buffers can be posted per command */ 2560 /* Allocate buffer to post */ 2561 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2562 if (mp1) 2563 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 2564 if (!mp1 || !mp1->virt) { 2565 kfree(mp1); 2566 lpfc_sli_release_iocbq(phba, iocb); 2567 pring->missbufcnt = cnt; 2568 return cnt; 2569 } 2570 2571 INIT_LIST_HEAD(&mp1->list); 2572 /* Allocate buffer to post */ 2573 if (cnt > 1) { 2574 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2575 if (mp2) 2576 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 2577 &mp2->phys); 2578 if (!mp2 || !mp2->virt) { 2579 kfree(mp2); 2580 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2581 kfree(mp1); 2582 lpfc_sli_release_iocbq(phba, iocb); 2583 pring->missbufcnt = cnt; 2584 return cnt; 2585 } 2586 2587 INIT_LIST_HEAD(&mp2->list); 2588 } else { 2589 mp2 = NULL; 2590 } 2591 2592 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2593 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2594 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2595 icmd->ulpBdeCount = 1; 2596 cnt--; 2597 if (mp2) { 2598 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2599 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2600 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2601 cnt--; 2602 icmd->ulpBdeCount = 2; 2603 } 2604 2605 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2606 icmd->ulpLe = 1; 2607 2608 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2609 IOCB_ERROR) { 2610 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2611 kfree(mp1); 2612 cnt++; 2613 if (mp2) { 2614 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2615 kfree(mp2); 2616 cnt++; 2617 } 2618 lpfc_sli_release_iocbq(phba, iocb); 2619 pring->missbufcnt = cnt; 2620 return cnt; 2621 } 2622 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2623 if (mp2) 2624 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2625 } 2626 pring->missbufcnt = 0; 2627 return 0; 2628 } 2629 2630 /** 2631 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2632 * @phba: pointer to lpfc hba data structure. 2633 * 2634 * This routine posts initial receive IOCB buffers to the ELS ring. The 2635 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2636 * set to 64 IOCBs. SLI3 only. 2637 * 2638 * Return codes 2639 * 0 - success (currently always success) 2640 **/ 2641 static int 2642 lpfc_post_rcv_buf(struct lpfc_hba *phba) 2643 { 2644 struct lpfc_sli *psli = &phba->sli; 2645 2646 /* Ring 0, ELS / CT buffers */ 2647 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2648 /* Ring 2 - FCP no buffers needed */ 2649 2650 return 0; 2651 } 2652 2653 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2654 2655 /** 2656 * lpfc_sha_init - Set up initial array of hash table entries 2657 * @HashResultPointer: pointer to an array as hash table. 2658 * 2659 * This routine sets up the initial values to the array of hash table entries 2660 * for the LC HBAs. 2661 **/ 2662 static void 2663 lpfc_sha_init(uint32_t * HashResultPointer) 2664 { 2665 HashResultPointer[0] = 0x67452301; 2666 HashResultPointer[1] = 0xEFCDAB89; 2667 HashResultPointer[2] = 0x98BADCFE; 2668 HashResultPointer[3] = 0x10325476; 2669 HashResultPointer[4] = 0xC3D2E1F0; 2670 } 2671 2672 /** 2673 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2674 * @HashResultPointer: pointer to an initial/result hash table. 2675 * @HashWorkingPointer: pointer to an working hash table. 2676 * 2677 * This routine iterates an initial hash table pointed by @HashResultPointer 2678 * with the values from the working hash table pointeed by @HashWorkingPointer. 2679 * The results are putting back to the initial hash table, returned through 2680 * the @HashResultPointer as the result hash table. 2681 **/ 2682 static void 2683 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2684 { 2685 int t; 2686 uint32_t TEMP; 2687 uint32_t A, B, C, D, E; 2688 t = 16; 2689 do { 2690 HashWorkingPointer[t] = 2691 S(1, 2692 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2693 8] ^ 2694 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2695 } while (++t <= 79); 2696 t = 0; 2697 A = HashResultPointer[0]; 2698 B = HashResultPointer[1]; 2699 C = HashResultPointer[2]; 2700 D = HashResultPointer[3]; 2701 E = HashResultPointer[4]; 2702 2703 do { 2704 if (t < 20) { 2705 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2706 } else if (t < 40) { 2707 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2708 } else if (t < 60) { 2709 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2710 } else { 2711 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2712 } 2713 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2714 E = D; 2715 D = C; 2716 C = S(30, B); 2717 B = A; 2718 A = TEMP; 2719 } while (++t <= 79); 2720 2721 HashResultPointer[0] += A; 2722 HashResultPointer[1] += B; 2723 HashResultPointer[2] += C; 2724 HashResultPointer[3] += D; 2725 HashResultPointer[4] += E; 2726 2727 } 2728 2729 /** 2730 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2731 * @RandomChallenge: pointer to the entry of host challenge random number array. 2732 * @HashWorking: pointer to the entry of the working hash array. 2733 * 2734 * This routine calculates the working hash array referred by @HashWorking 2735 * from the challenge random numbers associated with the host, referred by 2736 * @RandomChallenge. The result is put into the entry of the working hash 2737 * array and returned by reference through @HashWorking. 2738 **/ 2739 static void 2740 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2741 { 2742 *HashWorking = (*RandomChallenge ^ *HashWorking); 2743 } 2744 2745 /** 2746 * lpfc_hba_init - Perform special handling for LC HBA initialization 2747 * @phba: pointer to lpfc hba data structure. 2748 * @hbainit: pointer to an array of unsigned 32-bit integers. 2749 * 2750 * This routine performs the special handling for LC HBA initialization. 2751 **/ 2752 void 2753 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2754 { 2755 int t; 2756 uint32_t *HashWorking; 2757 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2758 2759 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2760 if (!HashWorking) 2761 return; 2762 2763 HashWorking[0] = HashWorking[78] = *pwwnn++; 2764 HashWorking[1] = HashWorking[79] = *pwwnn; 2765 2766 for (t = 0; t < 7; t++) 2767 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2768 2769 lpfc_sha_init(hbainit); 2770 lpfc_sha_iterate(hbainit, HashWorking); 2771 kfree(HashWorking); 2772 } 2773 2774 /** 2775 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2776 * @vport: pointer to a virtual N_Port data structure. 2777 * 2778 * This routine performs the necessary cleanups before deleting the @vport. 2779 * It invokes the discovery state machine to perform necessary state 2780 * transitions and to release the ndlps associated with the @vport. Note, 2781 * the physical port is treated as @vport 0. 2782 **/ 2783 void 2784 lpfc_cleanup(struct lpfc_vport *vport) 2785 { 2786 struct lpfc_hba *phba = vport->phba; 2787 struct lpfc_nodelist *ndlp, *next_ndlp; 2788 int i = 0; 2789 2790 if (phba->link_state > LPFC_LINK_DOWN) 2791 lpfc_port_link_failure(vport); 2792 2793 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2794 if (!NLP_CHK_NODE_ACT(ndlp)) { 2795 ndlp = lpfc_enable_node(vport, ndlp, 2796 NLP_STE_UNUSED_NODE); 2797 if (!ndlp) 2798 continue; 2799 spin_lock_irq(&phba->ndlp_lock); 2800 NLP_SET_FREE_REQ(ndlp); 2801 spin_unlock_irq(&phba->ndlp_lock); 2802 /* Trigger the release of the ndlp memory */ 2803 lpfc_nlp_put(ndlp); 2804 continue; 2805 } 2806 spin_lock_irq(&phba->ndlp_lock); 2807 if (NLP_CHK_FREE_REQ(ndlp)) { 2808 /* The ndlp should not be in memory free mode already */ 2809 spin_unlock_irq(&phba->ndlp_lock); 2810 continue; 2811 } else 2812 /* Indicate request for freeing ndlp memory */ 2813 NLP_SET_FREE_REQ(ndlp); 2814 spin_unlock_irq(&phba->ndlp_lock); 2815 2816 if (vport->port_type != LPFC_PHYSICAL_PORT && 2817 ndlp->nlp_DID == Fabric_DID) { 2818 /* Just free up ndlp with Fabric_DID for vports */ 2819 lpfc_nlp_put(ndlp); 2820 continue; 2821 } 2822 2823 /* take care of nodes in unused state before the state 2824 * machine taking action. 2825 */ 2826 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 2827 lpfc_nlp_put(ndlp); 2828 continue; 2829 } 2830 2831 if (ndlp->nlp_type & NLP_FABRIC) 2832 lpfc_disc_state_machine(vport, ndlp, NULL, 2833 NLP_EVT_DEVICE_RECOVERY); 2834 2835 lpfc_disc_state_machine(vport, ndlp, NULL, 2836 NLP_EVT_DEVICE_RM); 2837 } 2838 2839 /* At this point, ALL ndlp's should be gone 2840 * because of the previous NLP_EVT_DEVICE_RM. 2841 * Lets wait for this to happen, if needed. 2842 */ 2843 while (!list_empty(&vport->fc_nodes)) { 2844 if (i++ > 3000) { 2845 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2846 "0233 Nodelist not empty\n"); 2847 list_for_each_entry_safe(ndlp, next_ndlp, 2848 &vport->fc_nodes, nlp_listp) { 2849 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2850 LOG_NODE, 2851 "0282 did:x%x ndlp:x%px " 2852 "usgmap:x%x refcnt:%d\n", 2853 ndlp->nlp_DID, (void *)ndlp, 2854 ndlp->nlp_usg_map, 2855 kref_read(&ndlp->kref)); 2856 } 2857 break; 2858 } 2859 2860 /* Wait for any activity on ndlps to settle */ 2861 msleep(10); 2862 } 2863 lpfc_cleanup_vports_rrqs(vport, NULL); 2864 } 2865 2866 /** 2867 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2868 * @vport: pointer to a virtual N_Port data structure. 2869 * 2870 * This routine stops all the timers associated with a @vport. This function 2871 * is invoked before disabling or deleting a @vport. Note that the physical 2872 * port is treated as @vport 0. 2873 **/ 2874 void 2875 lpfc_stop_vport_timers(struct lpfc_vport *vport) 2876 { 2877 del_timer_sync(&vport->els_tmofunc); 2878 del_timer_sync(&vport->delayed_disc_tmo); 2879 lpfc_can_disctmo(vport); 2880 return; 2881 } 2882 2883 /** 2884 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2885 * @phba: pointer to lpfc hba data structure. 2886 * 2887 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2888 * caller of this routine should already hold the host lock. 2889 **/ 2890 void 2891 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2892 { 2893 /* Clear pending FCF rediscovery wait flag */ 2894 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2895 2896 /* Now, try to stop the timer */ 2897 del_timer(&phba->fcf.redisc_wait); 2898 } 2899 2900 /** 2901 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2902 * @phba: pointer to lpfc hba data structure. 2903 * 2904 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2905 * checks whether the FCF rediscovery wait timer is pending with the host 2906 * lock held before proceeding with disabling the timer and clearing the 2907 * wait timer pendig flag. 2908 **/ 2909 void 2910 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2911 { 2912 spin_lock_irq(&phba->hbalock); 2913 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2914 /* FCF rediscovery timer already fired or stopped */ 2915 spin_unlock_irq(&phba->hbalock); 2916 return; 2917 } 2918 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2919 /* Clear failover in progress flags */ 2920 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 2921 spin_unlock_irq(&phba->hbalock); 2922 } 2923 2924 /** 2925 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2926 * @phba: pointer to lpfc hba data structure. 2927 * 2928 * This routine stops all the timers associated with a HBA. This function is 2929 * invoked before either putting a HBA offline or unloading the driver. 2930 **/ 2931 void 2932 lpfc_stop_hba_timers(struct lpfc_hba *phba) 2933 { 2934 if (phba->pport) 2935 lpfc_stop_vport_timers(phba->pport); 2936 cancel_delayed_work_sync(&phba->eq_delay_work); 2937 del_timer_sync(&phba->sli.mbox_tmo); 2938 del_timer_sync(&phba->fabric_block_timer); 2939 del_timer_sync(&phba->eratt_poll); 2940 del_timer_sync(&phba->hb_tmofunc); 2941 if (phba->sli_rev == LPFC_SLI_REV4) { 2942 del_timer_sync(&phba->rrq_tmr); 2943 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 2944 } 2945 phba->hb_outstanding = 0; 2946 2947 switch (phba->pci_dev_grp) { 2948 case LPFC_PCI_DEV_LP: 2949 /* Stop any LightPulse device specific driver timers */ 2950 del_timer_sync(&phba->fcp_poll_timer); 2951 break; 2952 case LPFC_PCI_DEV_OC: 2953 /* Stop any OneConnect device specific driver timers */ 2954 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2955 break; 2956 default: 2957 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2958 "0297 Invalid device group (x%x)\n", 2959 phba->pci_dev_grp); 2960 break; 2961 } 2962 return; 2963 } 2964 2965 /** 2966 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2967 * @phba: pointer to lpfc hba data structure. 2968 * 2969 * This routine marks a HBA's management interface as blocked. Once the HBA's 2970 * management interface is marked as blocked, all the user space access to 2971 * the HBA, whether they are from sysfs interface or libdfc interface will 2972 * all be blocked. The HBA is set to block the management interface when the 2973 * driver prepares the HBA interface for online or offline. 2974 **/ 2975 static void 2976 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) 2977 { 2978 unsigned long iflag; 2979 uint8_t actcmd = MBX_HEARTBEAT; 2980 unsigned long timeout; 2981 2982 spin_lock_irqsave(&phba->hbalock, iflag); 2983 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2984 spin_unlock_irqrestore(&phba->hbalock, iflag); 2985 if (mbx_action == LPFC_MBX_NO_WAIT) 2986 return; 2987 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 2988 spin_lock_irqsave(&phba->hbalock, iflag); 2989 if (phba->sli.mbox_active) { 2990 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 2991 /* Determine how long we might wait for the active mailbox 2992 * command to be gracefully completed by firmware. 2993 */ 2994 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 2995 phba->sli.mbox_active) * 1000) + jiffies; 2996 } 2997 spin_unlock_irqrestore(&phba->hbalock, iflag); 2998 2999 /* Wait for the outstnading mailbox command to complete */ 3000 while (phba->sli.mbox_active) { 3001 /* Check active mailbox complete status every 2ms */ 3002 msleep(2); 3003 if (time_after(jiffies, timeout)) { 3004 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3005 "2813 Mgmt IO is Blocked %x " 3006 "- mbox cmd %x still active\n", 3007 phba->sli.sli_flag, actcmd); 3008 break; 3009 } 3010 } 3011 } 3012 3013 /** 3014 * lpfc_sli4_node_prep - Assign RPIs for active nodes. 3015 * @phba: pointer to lpfc hba data structure. 3016 * 3017 * Allocate RPIs for all active remote nodes. This is needed whenever 3018 * an SLI4 adapter is reset and the driver is not unloading. Its purpose 3019 * is to fixup the temporary rpi assignments. 3020 **/ 3021 void 3022 lpfc_sli4_node_prep(struct lpfc_hba *phba) 3023 { 3024 struct lpfc_nodelist *ndlp, *next_ndlp; 3025 struct lpfc_vport **vports; 3026 int i, rpi; 3027 unsigned long flags; 3028 3029 if (phba->sli_rev != LPFC_SLI_REV4) 3030 return; 3031 3032 vports = lpfc_create_vport_work_array(phba); 3033 if (vports == NULL) 3034 return; 3035 3036 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3037 if (vports[i]->load_flag & FC_UNLOADING) 3038 continue; 3039 3040 list_for_each_entry_safe(ndlp, next_ndlp, 3041 &vports[i]->fc_nodes, 3042 nlp_listp) { 3043 if (!NLP_CHK_NODE_ACT(ndlp)) 3044 continue; 3045 rpi = lpfc_sli4_alloc_rpi(phba); 3046 if (rpi == LPFC_RPI_ALLOC_ERROR) { 3047 spin_lock_irqsave(&phba->ndlp_lock, flags); 3048 NLP_CLR_NODE_ACT(ndlp); 3049 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 3050 continue; 3051 } 3052 ndlp->nlp_rpi = rpi; 3053 lpfc_printf_vlog(ndlp->vport, KERN_INFO, 3054 LOG_NODE | LOG_DISCOVERY, 3055 "0009 Assign RPI x%x to ndlp x%px " 3056 "DID:x%06x flg:x%x map:x%x\n", 3057 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID, 3058 ndlp->nlp_flag, ndlp->nlp_usg_map); 3059 } 3060 } 3061 lpfc_destroy_vport_work_array(phba, vports); 3062 } 3063 3064 /** 3065 * lpfc_create_expedite_pool - create expedite pool 3066 * @phba: pointer to lpfc hba data structure. 3067 * 3068 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0 3069 * to expedite pool. Mark them as expedite. 3070 **/ 3071 static void lpfc_create_expedite_pool(struct lpfc_hba *phba) 3072 { 3073 struct lpfc_sli4_hdw_queue *qp; 3074 struct lpfc_io_buf *lpfc_ncmd; 3075 struct lpfc_io_buf *lpfc_ncmd_next; 3076 struct lpfc_epd_pool *epd_pool; 3077 unsigned long iflag; 3078 3079 epd_pool = &phba->epd_pool; 3080 qp = &phba->sli4_hba.hdwq[0]; 3081 3082 spin_lock_init(&epd_pool->lock); 3083 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3084 spin_lock(&epd_pool->lock); 3085 INIT_LIST_HEAD(&epd_pool->list); 3086 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3087 &qp->lpfc_io_buf_list_put, list) { 3088 list_move_tail(&lpfc_ncmd->list, &epd_pool->list); 3089 lpfc_ncmd->expedite = true; 3090 qp->put_io_bufs--; 3091 epd_pool->count++; 3092 if (epd_pool->count >= XRI_BATCH) 3093 break; 3094 } 3095 spin_unlock(&epd_pool->lock); 3096 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3097 } 3098 3099 /** 3100 * lpfc_destroy_expedite_pool - destroy expedite pool 3101 * @phba: pointer to lpfc hba data structure. 3102 * 3103 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put 3104 * of HWQ 0. Clear the mark. 3105 **/ 3106 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba) 3107 { 3108 struct lpfc_sli4_hdw_queue *qp; 3109 struct lpfc_io_buf *lpfc_ncmd; 3110 struct lpfc_io_buf *lpfc_ncmd_next; 3111 struct lpfc_epd_pool *epd_pool; 3112 unsigned long iflag; 3113 3114 epd_pool = &phba->epd_pool; 3115 qp = &phba->sli4_hba.hdwq[0]; 3116 3117 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3118 spin_lock(&epd_pool->lock); 3119 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3120 &epd_pool->list, list) { 3121 list_move_tail(&lpfc_ncmd->list, 3122 &qp->lpfc_io_buf_list_put); 3123 lpfc_ncmd->flags = false; 3124 qp->put_io_bufs++; 3125 epd_pool->count--; 3126 } 3127 spin_unlock(&epd_pool->lock); 3128 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3129 } 3130 3131 /** 3132 * lpfc_create_multixri_pools - create multi-XRI pools 3133 * @phba: pointer to lpfc hba data structure. 3134 * 3135 * This routine initialize public, private per HWQ. Then, move XRIs from 3136 * lpfc_io_buf_list_put to public pool. High and low watermark are also 3137 * Initialized. 3138 **/ 3139 void lpfc_create_multixri_pools(struct lpfc_hba *phba) 3140 { 3141 u32 i, j; 3142 u32 hwq_count; 3143 u32 count_per_hwq; 3144 struct lpfc_io_buf *lpfc_ncmd; 3145 struct lpfc_io_buf *lpfc_ncmd_next; 3146 unsigned long iflag; 3147 struct lpfc_sli4_hdw_queue *qp; 3148 struct lpfc_multixri_pool *multixri_pool; 3149 struct lpfc_pbl_pool *pbl_pool; 3150 struct lpfc_pvt_pool *pvt_pool; 3151 3152 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3153 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n", 3154 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu, 3155 phba->sli4_hba.io_xri_cnt); 3156 3157 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3158 lpfc_create_expedite_pool(phba); 3159 3160 hwq_count = phba->cfg_hdw_queue; 3161 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count; 3162 3163 for (i = 0; i < hwq_count; i++) { 3164 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL); 3165 3166 if (!multixri_pool) { 3167 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3168 "1238 Failed to allocate memory for " 3169 "multixri_pool\n"); 3170 3171 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3172 lpfc_destroy_expedite_pool(phba); 3173 3174 j = 0; 3175 while (j < i) { 3176 qp = &phba->sli4_hba.hdwq[j]; 3177 kfree(qp->p_multixri_pool); 3178 j++; 3179 } 3180 phba->cfg_xri_rebalancing = 0; 3181 return; 3182 } 3183 3184 qp = &phba->sli4_hba.hdwq[i]; 3185 qp->p_multixri_pool = multixri_pool; 3186 3187 multixri_pool->xri_limit = count_per_hwq; 3188 multixri_pool->rrb_next_hwqid = i; 3189 3190 /* Deal with public free xri pool */ 3191 pbl_pool = &multixri_pool->pbl_pool; 3192 spin_lock_init(&pbl_pool->lock); 3193 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3194 spin_lock(&pbl_pool->lock); 3195 INIT_LIST_HEAD(&pbl_pool->list); 3196 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3197 &qp->lpfc_io_buf_list_put, list) { 3198 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list); 3199 qp->put_io_bufs--; 3200 pbl_pool->count++; 3201 } 3202 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3203 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n", 3204 pbl_pool->count, i); 3205 spin_unlock(&pbl_pool->lock); 3206 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3207 3208 /* Deal with private free xri pool */ 3209 pvt_pool = &multixri_pool->pvt_pool; 3210 pvt_pool->high_watermark = multixri_pool->xri_limit / 2; 3211 pvt_pool->low_watermark = XRI_BATCH; 3212 spin_lock_init(&pvt_pool->lock); 3213 spin_lock_irqsave(&pvt_pool->lock, iflag); 3214 INIT_LIST_HEAD(&pvt_pool->list); 3215 pvt_pool->count = 0; 3216 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 3217 } 3218 } 3219 3220 /** 3221 * lpfc_destroy_multixri_pools - destroy multi-XRI pools 3222 * @phba: pointer to lpfc hba data structure. 3223 * 3224 * This routine returns XRIs from public/private to lpfc_io_buf_list_put. 3225 **/ 3226 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba) 3227 { 3228 u32 i; 3229 u32 hwq_count; 3230 struct lpfc_io_buf *lpfc_ncmd; 3231 struct lpfc_io_buf *lpfc_ncmd_next; 3232 unsigned long iflag; 3233 struct lpfc_sli4_hdw_queue *qp; 3234 struct lpfc_multixri_pool *multixri_pool; 3235 struct lpfc_pbl_pool *pbl_pool; 3236 struct lpfc_pvt_pool *pvt_pool; 3237 3238 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3239 lpfc_destroy_expedite_pool(phba); 3240 3241 if (!(phba->pport->load_flag & FC_UNLOADING)) 3242 lpfc_sli_flush_io_rings(phba); 3243 3244 hwq_count = phba->cfg_hdw_queue; 3245 3246 for (i = 0; i < hwq_count; i++) { 3247 qp = &phba->sli4_hba.hdwq[i]; 3248 multixri_pool = qp->p_multixri_pool; 3249 if (!multixri_pool) 3250 continue; 3251 3252 qp->p_multixri_pool = NULL; 3253 3254 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3255 3256 /* Deal with public free xri pool */ 3257 pbl_pool = &multixri_pool->pbl_pool; 3258 spin_lock(&pbl_pool->lock); 3259 3260 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3261 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n", 3262 pbl_pool->count, i); 3263 3264 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3265 &pbl_pool->list, list) { 3266 list_move_tail(&lpfc_ncmd->list, 3267 &qp->lpfc_io_buf_list_put); 3268 qp->put_io_bufs++; 3269 pbl_pool->count--; 3270 } 3271 3272 INIT_LIST_HEAD(&pbl_pool->list); 3273 pbl_pool->count = 0; 3274 3275 spin_unlock(&pbl_pool->lock); 3276 3277 /* Deal with private free xri pool */ 3278 pvt_pool = &multixri_pool->pvt_pool; 3279 spin_lock(&pvt_pool->lock); 3280 3281 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3282 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n", 3283 pvt_pool->count, i); 3284 3285 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3286 &pvt_pool->list, list) { 3287 list_move_tail(&lpfc_ncmd->list, 3288 &qp->lpfc_io_buf_list_put); 3289 qp->put_io_bufs++; 3290 pvt_pool->count--; 3291 } 3292 3293 INIT_LIST_HEAD(&pvt_pool->list); 3294 pvt_pool->count = 0; 3295 3296 spin_unlock(&pvt_pool->lock); 3297 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3298 3299 kfree(multixri_pool); 3300 } 3301 } 3302 3303 /** 3304 * lpfc_online - Initialize and bring a HBA online 3305 * @phba: pointer to lpfc hba data structure. 3306 * 3307 * This routine initializes the HBA and brings a HBA online. During this 3308 * process, the management interface is blocked to prevent user space access 3309 * to the HBA interfering with the driver initialization. 3310 * 3311 * Return codes 3312 * 0 - successful 3313 * 1 - failed 3314 **/ 3315 int 3316 lpfc_online(struct lpfc_hba *phba) 3317 { 3318 struct lpfc_vport *vport; 3319 struct lpfc_vport **vports; 3320 int i, error = 0; 3321 bool vpis_cleared = false; 3322 3323 if (!phba) 3324 return 0; 3325 vport = phba->pport; 3326 3327 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 3328 return 0; 3329 3330 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3331 "0458 Bring Adapter online\n"); 3332 3333 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 3334 3335 if (phba->sli_rev == LPFC_SLI_REV4) { 3336 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 3337 lpfc_unblock_mgmt_io(phba); 3338 return 1; 3339 } 3340 spin_lock_irq(&phba->hbalock); 3341 if (!phba->sli4_hba.max_cfg_param.vpi_used) 3342 vpis_cleared = true; 3343 spin_unlock_irq(&phba->hbalock); 3344 3345 /* Reestablish the local initiator port. 3346 * The offline process destroyed the previous lport. 3347 */ 3348 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && 3349 !phba->nvmet_support) { 3350 error = lpfc_nvme_create_localport(phba->pport); 3351 if (error) 3352 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3353 "6132 NVME restore reg failed " 3354 "on nvmei error x%x\n", error); 3355 } 3356 } else { 3357 lpfc_sli_queue_init(phba); 3358 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 3359 lpfc_unblock_mgmt_io(phba); 3360 return 1; 3361 } 3362 } 3363 3364 vports = lpfc_create_vport_work_array(phba); 3365 if (vports != NULL) { 3366 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3367 struct Scsi_Host *shost; 3368 shost = lpfc_shost_from_vport(vports[i]); 3369 spin_lock_irq(shost->host_lock); 3370 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 3371 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3372 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3373 if (phba->sli_rev == LPFC_SLI_REV4) { 3374 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 3375 if ((vpis_cleared) && 3376 (vports[i]->port_type != 3377 LPFC_PHYSICAL_PORT)) 3378 vports[i]->vpi = 0; 3379 } 3380 spin_unlock_irq(shost->host_lock); 3381 } 3382 } 3383 lpfc_destroy_vport_work_array(phba, vports); 3384 3385 if (phba->cfg_xri_rebalancing) 3386 lpfc_create_multixri_pools(phba); 3387 3388 lpfc_cpuhp_add(phba); 3389 3390 lpfc_unblock_mgmt_io(phba); 3391 return 0; 3392 } 3393 3394 /** 3395 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 3396 * @phba: pointer to lpfc hba data structure. 3397 * 3398 * This routine marks a HBA's management interface as not blocked. Once the 3399 * HBA's management interface is marked as not blocked, all the user space 3400 * access to the HBA, whether they are from sysfs interface or libdfc 3401 * interface will be allowed. The HBA is set to block the management interface 3402 * when the driver prepares the HBA interface for online or offline and then 3403 * set to unblock the management interface afterwards. 3404 **/ 3405 void 3406 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 3407 { 3408 unsigned long iflag; 3409 3410 spin_lock_irqsave(&phba->hbalock, iflag); 3411 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 3412 spin_unlock_irqrestore(&phba->hbalock, iflag); 3413 } 3414 3415 /** 3416 * lpfc_offline_prep - Prepare a HBA to be brought offline 3417 * @phba: pointer to lpfc hba data structure. 3418 * 3419 * This routine is invoked to prepare a HBA to be brought offline. It performs 3420 * unregistration login to all the nodes on all vports and flushes the mailbox 3421 * queue to make it ready to be brought offline. 3422 **/ 3423 void 3424 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) 3425 { 3426 struct lpfc_vport *vport = phba->pport; 3427 struct lpfc_nodelist *ndlp, *next_ndlp; 3428 struct lpfc_vport **vports; 3429 struct Scsi_Host *shost; 3430 int i; 3431 3432 if (vport->fc_flag & FC_OFFLINE_MODE) 3433 return; 3434 3435 lpfc_block_mgmt_io(phba, mbx_action); 3436 3437 lpfc_linkdown(phba); 3438 3439 /* Issue an unreg_login to all nodes on all vports */ 3440 vports = lpfc_create_vport_work_array(phba); 3441 if (vports != NULL) { 3442 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3443 if (vports[i]->load_flag & FC_UNLOADING) 3444 continue; 3445 shost = lpfc_shost_from_vport(vports[i]); 3446 spin_lock_irq(shost->host_lock); 3447 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 3448 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3449 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 3450 spin_unlock_irq(shost->host_lock); 3451 3452 shost = lpfc_shost_from_vport(vports[i]); 3453 list_for_each_entry_safe(ndlp, next_ndlp, 3454 &vports[i]->fc_nodes, 3455 nlp_listp) { 3456 if ((!NLP_CHK_NODE_ACT(ndlp)) || 3457 ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 3458 /* Driver must assume RPI is invalid for 3459 * any unused or inactive node. 3460 */ 3461 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 3462 continue; 3463 } 3464 3465 if (ndlp->nlp_type & NLP_FABRIC) { 3466 lpfc_disc_state_machine(vports[i], ndlp, 3467 NULL, NLP_EVT_DEVICE_RECOVERY); 3468 lpfc_disc_state_machine(vports[i], ndlp, 3469 NULL, NLP_EVT_DEVICE_RM); 3470 } 3471 spin_lock_irq(shost->host_lock); 3472 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 3473 spin_unlock_irq(shost->host_lock); 3474 /* 3475 * Whenever an SLI4 port goes offline, free the 3476 * RPI. Get a new RPI when the adapter port 3477 * comes back online. 3478 */ 3479 if (phba->sli_rev == LPFC_SLI_REV4) { 3480 lpfc_printf_vlog(ndlp->vport, KERN_INFO, 3481 LOG_NODE | LOG_DISCOVERY, 3482 "0011 Free RPI x%x on " 3483 "ndlp:x%px did x%x " 3484 "usgmap:x%x\n", 3485 ndlp->nlp_rpi, ndlp, 3486 ndlp->nlp_DID, 3487 ndlp->nlp_usg_map); 3488 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 3489 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 3490 } 3491 lpfc_unreg_rpi(vports[i], ndlp); 3492 } 3493 } 3494 } 3495 lpfc_destroy_vport_work_array(phba, vports); 3496 3497 lpfc_sli_mbox_sys_shutdown(phba, mbx_action); 3498 3499 if (phba->wq) 3500 flush_workqueue(phba->wq); 3501 } 3502 3503 /** 3504 * lpfc_offline - Bring a HBA offline 3505 * @phba: pointer to lpfc hba data structure. 3506 * 3507 * This routine actually brings a HBA offline. It stops all the timers 3508 * associated with the HBA, brings down the SLI layer, and eventually 3509 * marks the HBA as in offline state for the upper layer protocol. 3510 **/ 3511 void 3512 lpfc_offline(struct lpfc_hba *phba) 3513 { 3514 struct Scsi_Host *shost; 3515 struct lpfc_vport **vports; 3516 int i; 3517 3518 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 3519 return; 3520 3521 /* stop port and all timers associated with this hba */ 3522 lpfc_stop_port(phba); 3523 3524 /* Tear down the local and target port registrations. The 3525 * nvme transports need to cleanup. 3526 */ 3527 lpfc_nvmet_destroy_targetport(phba); 3528 lpfc_nvme_destroy_localport(phba->pport); 3529 3530 vports = lpfc_create_vport_work_array(phba); 3531 if (vports != NULL) 3532 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3533 lpfc_stop_vport_timers(vports[i]); 3534 lpfc_destroy_vport_work_array(phba, vports); 3535 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3536 "0460 Bring Adapter offline\n"); 3537 /* Bring down the SLI Layer and cleanup. The HBA is offline 3538 now. */ 3539 lpfc_sli_hba_down(phba); 3540 spin_lock_irq(&phba->hbalock); 3541 phba->work_ha = 0; 3542 spin_unlock_irq(&phba->hbalock); 3543 vports = lpfc_create_vport_work_array(phba); 3544 if (vports != NULL) 3545 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3546 shost = lpfc_shost_from_vport(vports[i]); 3547 spin_lock_irq(shost->host_lock); 3548 vports[i]->work_port_events = 0; 3549 vports[i]->fc_flag |= FC_OFFLINE_MODE; 3550 spin_unlock_irq(shost->host_lock); 3551 } 3552 lpfc_destroy_vport_work_array(phba, vports); 3553 __lpfc_cpuhp_remove(phba); 3554 3555 if (phba->cfg_xri_rebalancing) 3556 lpfc_destroy_multixri_pools(phba); 3557 } 3558 3559 /** 3560 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 3561 * @phba: pointer to lpfc hba data structure. 3562 * 3563 * This routine is to free all the SCSI buffers and IOCBs from the driver 3564 * list back to kernel. It is called from lpfc_pci_remove_one to free 3565 * the internal resources before the device is removed from the system. 3566 **/ 3567 static void 3568 lpfc_scsi_free(struct lpfc_hba *phba) 3569 { 3570 struct lpfc_io_buf *sb, *sb_next; 3571 3572 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 3573 return; 3574 3575 spin_lock_irq(&phba->hbalock); 3576 3577 /* Release all the lpfc_scsi_bufs maintained by this host. */ 3578 3579 spin_lock(&phba->scsi_buf_list_put_lock); 3580 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, 3581 list) { 3582 list_del(&sb->list); 3583 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3584 sb->dma_handle); 3585 kfree(sb); 3586 phba->total_scsi_bufs--; 3587 } 3588 spin_unlock(&phba->scsi_buf_list_put_lock); 3589 3590 spin_lock(&phba->scsi_buf_list_get_lock); 3591 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, 3592 list) { 3593 list_del(&sb->list); 3594 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3595 sb->dma_handle); 3596 kfree(sb); 3597 phba->total_scsi_bufs--; 3598 } 3599 spin_unlock(&phba->scsi_buf_list_get_lock); 3600 spin_unlock_irq(&phba->hbalock); 3601 } 3602 3603 /** 3604 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists 3605 * @phba: pointer to lpfc hba data structure. 3606 * 3607 * This routine is to free all the IO buffers and IOCBs from the driver 3608 * list back to kernel. It is called from lpfc_pci_remove_one to free 3609 * the internal resources before the device is removed from the system. 3610 **/ 3611 void 3612 lpfc_io_free(struct lpfc_hba *phba) 3613 { 3614 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; 3615 struct lpfc_sli4_hdw_queue *qp; 3616 int idx; 3617 3618 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 3619 qp = &phba->sli4_hba.hdwq[idx]; 3620 /* Release all the lpfc_nvme_bufs maintained by this host. */ 3621 spin_lock(&qp->io_buf_list_put_lock); 3622 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3623 &qp->lpfc_io_buf_list_put, 3624 list) { 3625 list_del(&lpfc_ncmd->list); 3626 qp->put_io_bufs--; 3627 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3628 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 3629 if (phba->cfg_xpsgl && !phba->nvmet_support) 3630 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); 3631 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); 3632 kfree(lpfc_ncmd); 3633 qp->total_io_bufs--; 3634 } 3635 spin_unlock(&qp->io_buf_list_put_lock); 3636 3637 spin_lock(&qp->io_buf_list_get_lock); 3638 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3639 &qp->lpfc_io_buf_list_get, 3640 list) { 3641 list_del(&lpfc_ncmd->list); 3642 qp->get_io_bufs--; 3643 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3644 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 3645 if (phba->cfg_xpsgl && !phba->nvmet_support) 3646 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); 3647 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); 3648 kfree(lpfc_ncmd); 3649 qp->total_io_bufs--; 3650 } 3651 spin_unlock(&qp->io_buf_list_get_lock); 3652 } 3653 } 3654 3655 /** 3656 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping 3657 * @phba: pointer to lpfc hba data structure. 3658 * 3659 * This routine first calculates the sizes of the current els and allocated 3660 * scsi sgl lists, and then goes through all sgls to updates the physical 3661 * XRIs assigned due to port function reset. During port initialization, the 3662 * current els and allocated scsi sgl lists are 0s. 3663 * 3664 * Return codes 3665 * 0 - successful (for now, it always returns 0) 3666 **/ 3667 int 3668 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba) 3669 { 3670 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3671 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3672 LIST_HEAD(els_sgl_list); 3673 int rc; 3674 3675 /* 3676 * update on pci function's els xri-sgl list 3677 */ 3678 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3679 3680 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { 3681 /* els xri-sgl expanded */ 3682 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; 3683 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3684 "3157 ELS xri-sgl count increased from " 3685 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3686 els_xri_cnt); 3687 /* allocate the additional els sgls */ 3688 for (i = 0; i < xri_cnt; i++) { 3689 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3690 GFP_KERNEL); 3691 if (sglq_entry == NULL) { 3692 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3693 "2562 Failure to allocate an " 3694 "ELS sgl entry:%d\n", i); 3695 rc = -ENOMEM; 3696 goto out_free_mem; 3697 } 3698 sglq_entry->buff_type = GEN_BUFF_TYPE; 3699 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, 3700 &sglq_entry->phys); 3701 if (sglq_entry->virt == NULL) { 3702 kfree(sglq_entry); 3703 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3704 "2563 Failure to allocate an " 3705 "ELS mbuf:%d\n", i); 3706 rc = -ENOMEM; 3707 goto out_free_mem; 3708 } 3709 sglq_entry->sgl = sglq_entry->virt; 3710 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 3711 sglq_entry->state = SGL_FREED; 3712 list_add_tail(&sglq_entry->list, &els_sgl_list); 3713 } 3714 spin_lock_irq(&phba->hbalock); 3715 spin_lock(&phba->sli4_hba.sgl_list_lock); 3716 list_splice_init(&els_sgl_list, 3717 &phba->sli4_hba.lpfc_els_sgl_list); 3718 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3719 spin_unlock_irq(&phba->hbalock); 3720 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { 3721 /* els xri-sgl shrinked */ 3722 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; 3723 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3724 "3158 ELS xri-sgl count decreased from " 3725 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3726 els_xri_cnt); 3727 spin_lock_irq(&phba->hbalock); 3728 spin_lock(&phba->sli4_hba.sgl_list_lock); 3729 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, 3730 &els_sgl_list); 3731 /* release extra els sgls from list */ 3732 for (i = 0; i < xri_cnt; i++) { 3733 list_remove_head(&els_sgl_list, 3734 sglq_entry, struct lpfc_sglq, list); 3735 if (sglq_entry) { 3736 __lpfc_mbuf_free(phba, sglq_entry->virt, 3737 sglq_entry->phys); 3738 kfree(sglq_entry); 3739 } 3740 } 3741 list_splice_init(&els_sgl_list, 3742 &phba->sli4_hba.lpfc_els_sgl_list); 3743 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3744 spin_unlock_irq(&phba->hbalock); 3745 } else 3746 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3747 "3163 ELS xri-sgl count unchanged: %d\n", 3748 els_xri_cnt); 3749 phba->sli4_hba.els_xri_cnt = els_xri_cnt; 3750 3751 /* update xris to els sgls on the list */ 3752 sglq_entry = NULL; 3753 sglq_entry_next = NULL; 3754 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3755 &phba->sli4_hba.lpfc_els_sgl_list, list) { 3756 lxri = lpfc_sli4_next_xritag(phba); 3757 if (lxri == NO_XRI) { 3758 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3759 "2400 Failed to allocate xri for " 3760 "ELS sgl\n"); 3761 rc = -ENOMEM; 3762 goto out_free_mem; 3763 } 3764 sglq_entry->sli4_lxritag = lxri; 3765 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3766 } 3767 return 0; 3768 3769 out_free_mem: 3770 lpfc_free_els_sgl_list(phba); 3771 return rc; 3772 } 3773 3774 /** 3775 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping 3776 * @phba: pointer to lpfc hba data structure. 3777 * 3778 * This routine first calculates the sizes of the current els and allocated 3779 * scsi sgl lists, and then goes through all sgls to updates the physical 3780 * XRIs assigned due to port function reset. During port initialization, the 3781 * current els and allocated scsi sgl lists are 0s. 3782 * 3783 * Return codes 3784 * 0 - successful (for now, it always returns 0) 3785 **/ 3786 int 3787 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) 3788 { 3789 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3790 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3791 uint16_t nvmet_xri_cnt; 3792 LIST_HEAD(nvmet_sgl_list); 3793 int rc; 3794 3795 /* 3796 * update on pci function's nvmet xri-sgl list 3797 */ 3798 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3799 3800 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */ 3801 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 3802 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { 3803 /* els xri-sgl expanded */ 3804 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt; 3805 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3806 "6302 NVMET xri-sgl cnt grew from %d to %d\n", 3807 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt); 3808 /* allocate the additional nvmet sgls */ 3809 for (i = 0; i < xri_cnt; i++) { 3810 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3811 GFP_KERNEL); 3812 if (sglq_entry == NULL) { 3813 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3814 "6303 Failure to allocate an " 3815 "NVMET sgl entry:%d\n", i); 3816 rc = -ENOMEM; 3817 goto out_free_mem; 3818 } 3819 sglq_entry->buff_type = NVMET_BUFF_TYPE; 3820 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0, 3821 &sglq_entry->phys); 3822 if (sglq_entry->virt == NULL) { 3823 kfree(sglq_entry); 3824 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3825 "6304 Failure to allocate an " 3826 "NVMET buf:%d\n", i); 3827 rc = -ENOMEM; 3828 goto out_free_mem; 3829 } 3830 sglq_entry->sgl = sglq_entry->virt; 3831 memset(sglq_entry->sgl, 0, 3832 phba->cfg_sg_dma_buf_size); 3833 sglq_entry->state = SGL_FREED; 3834 list_add_tail(&sglq_entry->list, &nvmet_sgl_list); 3835 } 3836 spin_lock_irq(&phba->hbalock); 3837 spin_lock(&phba->sli4_hba.sgl_list_lock); 3838 list_splice_init(&nvmet_sgl_list, 3839 &phba->sli4_hba.lpfc_nvmet_sgl_list); 3840 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3841 spin_unlock_irq(&phba->hbalock); 3842 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) { 3843 /* nvmet xri-sgl shrunk */ 3844 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt; 3845 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3846 "6305 NVMET xri-sgl count decreased from " 3847 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt, 3848 nvmet_xri_cnt); 3849 spin_lock_irq(&phba->hbalock); 3850 spin_lock(&phba->sli4_hba.sgl_list_lock); 3851 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, 3852 &nvmet_sgl_list); 3853 /* release extra nvmet sgls from list */ 3854 for (i = 0; i < xri_cnt; i++) { 3855 list_remove_head(&nvmet_sgl_list, 3856 sglq_entry, struct lpfc_sglq, list); 3857 if (sglq_entry) { 3858 lpfc_nvmet_buf_free(phba, sglq_entry->virt, 3859 sglq_entry->phys); 3860 kfree(sglq_entry); 3861 } 3862 } 3863 list_splice_init(&nvmet_sgl_list, 3864 &phba->sli4_hba.lpfc_nvmet_sgl_list); 3865 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3866 spin_unlock_irq(&phba->hbalock); 3867 } else 3868 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3869 "6306 NVMET xri-sgl count unchanged: %d\n", 3870 nvmet_xri_cnt); 3871 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt; 3872 3873 /* update xris to nvmet sgls on the list */ 3874 sglq_entry = NULL; 3875 sglq_entry_next = NULL; 3876 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3877 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) { 3878 lxri = lpfc_sli4_next_xritag(phba); 3879 if (lxri == NO_XRI) { 3880 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3881 "6307 Failed to allocate xri for " 3882 "NVMET sgl\n"); 3883 rc = -ENOMEM; 3884 goto out_free_mem; 3885 } 3886 sglq_entry->sli4_lxritag = lxri; 3887 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3888 } 3889 return 0; 3890 3891 out_free_mem: 3892 lpfc_free_nvmet_sgl_list(phba); 3893 return rc; 3894 } 3895 3896 int 3897 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf) 3898 { 3899 LIST_HEAD(blist); 3900 struct lpfc_sli4_hdw_queue *qp; 3901 struct lpfc_io_buf *lpfc_cmd; 3902 struct lpfc_io_buf *iobufp, *prev_iobufp; 3903 int idx, cnt, xri, inserted; 3904 3905 cnt = 0; 3906 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 3907 qp = &phba->sli4_hba.hdwq[idx]; 3908 spin_lock_irq(&qp->io_buf_list_get_lock); 3909 spin_lock(&qp->io_buf_list_put_lock); 3910 3911 /* Take everything off the get and put lists */ 3912 list_splice_init(&qp->lpfc_io_buf_list_get, &blist); 3913 list_splice(&qp->lpfc_io_buf_list_put, &blist); 3914 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 3915 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 3916 cnt += qp->get_io_bufs + qp->put_io_bufs; 3917 qp->get_io_bufs = 0; 3918 qp->put_io_bufs = 0; 3919 qp->total_io_bufs = 0; 3920 spin_unlock(&qp->io_buf_list_put_lock); 3921 spin_unlock_irq(&qp->io_buf_list_get_lock); 3922 } 3923 3924 /* 3925 * Take IO buffers off blist and put on cbuf sorted by XRI. 3926 * This is because POST_SGL takes a sequential range of XRIs 3927 * to post to the firmware. 3928 */ 3929 for (idx = 0; idx < cnt; idx++) { 3930 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list); 3931 if (!lpfc_cmd) 3932 return cnt; 3933 if (idx == 0) { 3934 list_add_tail(&lpfc_cmd->list, cbuf); 3935 continue; 3936 } 3937 xri = lpfc_cmd->cur_iocbq.sli4_xritag; 3938 inserted = 0; 3939 prev_iobufp = NULL; 3940 list_for_each_entry(iobufp, cbuf, list) { 3941 if (xri < iobufp->cur_iocbq.sli4_xritag) { 3942 if (prev_iobufp) 3943 list_add(&lpfc_cmd->list, 3944 &prev_iobufp->list); 3945 else 3946 list_add(&lpfc_cmd->list, cbuf); 3947 inserted = 1; 3948 break; 3949 } 3950 prev_iobufp = iobufp; 3951 } 3952 if (!inserted) 3953 list_add_tail(&lpfc_cmd->list, cbuf); 3954 } 3955 return cnt; 3956 } 3957 3958 int 3959 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf) 3960 { 3961 struct lpfc_sli4_hdw_queue *qp; 3962 struct lpfc_io_buf *lpfc_cmd; 3963 int idx, cnt; 3964 3965 qp = phba->sli4_hba.hdwq; 3966 cnt = 0; 3967 while (!list_empty(cbuf)) { 3968 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 3969 list_remove_head(cbuf, lpfc_cmd, 3970 struct lpfc_io_buf, list); 3971 if (!lpfc_cmd) 3972 return cnt; 3973 cnt++; 3974 qp = &phba->sli4_hba.hdwq[idx]; 3975 lpfc_cmd->hdwq_no = idx; 3976 lpfc_cmd->hdwq = qp; 3977 lpfc_cmd->cur_iocbq.wqe_cmpl = NULL; 3978 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL; 3979 spin_lock(&qp->io_buf_list_put_lock); 3980 list_add_tail(&lpfc_cmd->list, 3981 &qp->lpfc_io_buf_list_put); 3982 qp->put_io_bufs++; 3983 qp->total_io_bufs++; 3984 spin_unlock(&qp->io_buf_list_put_lock); 3985 } 3986 } 3987 return cnt; 3988 } 3989 3990 /** 3991 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping 3992 * @phba: pointer to lpfc hba data structure. 3993 * 3994 * This routine first calculates the sizes of the current els and allocated 3995 * scsi sgl lists, and then goes through all sgls to updates the physical 3996 * XRIs assigned due to port function reset. During port initialization, the 3997 * current els and allocated scsi sgl lists are 0s. 3998 * 3999 * Return codes 4000 * 0 - successful (for now, it always returns 0) 4001 **/ 4002 int 4003 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba) 4004 { 4005 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL; 4006 uint16_t i, lxri, els_xri_cnt; 4007 uint16_t io_xri_cnt, io_xri_max; 4008 LIST_HEAD(io_sgl_list); 4009 int rc, cnt; 4010 4011 /* 4012 * update on pci function's allocated nvme xri-sgl list 4013 */ 4014 4015 /* maximum number of xris available for nvme buffers */ 4016 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4017 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4018 phba->sli4_hba.io_xri_max = io_xri_max; 4019 4020 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4021 "6074 Current allocated XRI sgl count:%d, " 4022 "maximum XRI count:%d\n", 4023 phba->sli4_hba.io_xri_cnt, 4024 phba->sli4_hba.io_xri_max); 4025 4026 cnt = lpfc_io_buf_flush(phba, &io_sgl_list); 4027 4028 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) { 4029 /* max nvme xri shrunk below the allocated nvme buffers */ 4030 io_xri_cnt = phba->sli4_hba.io_xri_cnt - 4031 phba->sli4_hba.io_xri_max; 4032 /* release the extra allocated nvme buffers */ 4033 for (i = 0; i < io_xri_cnt; i++) { 4034 list_remove_head(&io_sgl_list, lpfc_ncmd, 4035 struct lpfc_io_buf, list); 4036 if (lpfc_ncmd) { 4037 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4038 lpfc_ncmd->data, 4039 lpfc_ncmd->dma_handle); 4040 kfree(lpfc_ncmd); 4041 } 4042 } 4043 phba->sli4_hba.io_xri_cnt -= io_xri_cnt; 4044 } 4045 4046 /* update xris associated to remaining allocated nvme buffers */ 4047 lpfc_ncmd = NULL; 4048 lpfc_ncmd_next = NULL; 4049 phba->sli4_hba.io_xri_cnt = cnt; 4050 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 4051 &io_sgl_list, list) { 4052 lxri = lpfc_sli4_next_xritag(phba); 4053 if (lxri == NO_XRI) { 4054 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4055 "6075 Failed to allocate xri for " 4056 "nvme buffer\n"); 4057 rc = -ENOMEM; 4058 goto out_free_mem; 4059 } 4060 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; 4061 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4062 } 4063 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list); 4064 return 0; 4065 4066 out_free_mem: 4067 lpfc_io_free(phba); 4068 return rc; 4069 } 4070 4071 /** 4072 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec 4073 * @vport: The virtual port for which this call being executed. 4074 * @num_to_allocate: The requested number of buffers to allocate. 4075 * 4076 * This routine allocates nvme buffers for device with SLI-4 interface spec, 4077 * the nvme buffer contains all the necessary information needed to initiate 4078 * an I/O. After allocating up to @num_to_allocate IO buffers and put 4079 * them on a list, it post them to the port by using SGL block post. 4080 * 4081 * Return codes: 4082 * int - number of IO buffers that were allocated and posted. 4083 * 0 = failure, less than num_to_alloc is a partial failure. 4084 **/ 4085 int 4086 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc) 4087 { 4088 struct lpfc_io_buf *lpfc_ncmd; 4089 struct lpfc_iocbq *pwqeq; 4090 uint16_t iotag, lxri = 0; 4091 int bcnt, num_posted; 4092 LIST_HEAD(prep_nblist); 4093 LIST_HEAD(post_nblist); 4094 LIST_HEAD(nvme_nblist); 4095 4096 phba->sli4_hba.io_xri_cnt = 0; 4097 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 4098 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL); 4099 if (!lpfc_ncmd) 4100 break; 4101 /* 4102 * Get memory from the pci pool to map the virt space to 4103 * pci bus space for an I/O. The DMA buffer includes the 4104 * number of SGE's necessary to support the sg_tablesize. 4105 */ 4106 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, 4107 GFP_KERNEL, 4108 &lpfc_ncmd->dma_handle); 4109 if (!lpfc_ncmd->data) { 4110 kfree(lpfc_ncmd); 4111 break; 4112 } 4113 4114 if (phba->cfg_xpsgl && !phba->nvmet_support) { 4115 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list); 4116 } else { 4117 /* 4118 * 4K Page alignment is CRITICAL to BlockGuard, double 4119 * check to be sure. 4120 */ 4121 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 4122 (((unsigned long)(lpfc_ncmd->data) & 4123 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { 4124 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 4125 "3369 Memory alignment err: " 4126 "addr=%lx\n", 4127 (unsigned long)lpfc_ncmd->data); 4128 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4129 lpfc_ncmd->data, 4130 lpfc_ncmd->dma_handle); 4131 kfree(lpfc_ncmd); 4132 break; 4133 } 4134 } 4135 4136 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list); 4137 4138 lxri = lpfc_sli4_next_xritag(phba); 4139 if (lxri == NO_XRI) { 4140 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4141 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4142 kfree(lpfc_ncmd); 4143 break; 4144 } 4145 pwqeq = &lpfc_ncmd->cur_iocbq; 4146 4147 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */ 4148 iotag = lpfc_sli_next_iotag(phba, pwqeq); 4149 if (iotag == 0) { 4150 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4151 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4152 kfree(lpfc_ncmd); 4153 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 4154 "6121 Failed to allocate IOTAG for" 4155 " XRI:0x%x\n", lxri); 4156 lpfc_sli4_free_xri(phba, lxri); 4157 break; 4158 } 4159 pwqeq->sli4_lxritag = lxri; 4160 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4161 pwqeq->context1 = lpfc_ncmd; 4162 4163 /* Initialize local short-hand pointers. */ 4164 lpfc_ncmd->dma_sgl = lpfc_ncmd->data; 4165 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle; 4166 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd; 4167 spin_lock_init(&lpfc_ncmd->buf_lock); 4168 4169 /* add the nvme buffer to a post list */ 4170 list_add_tail(&lpfc_ncmd->list, &post_nblist); 4171 phba->sli4_hba.io_xri_cnt++; 4172 } 4173 lpfc_printf_log(phba, KERN_INFO, LOG_NVME, 4174 "6114 Allocate %d out of %d requested new NVME " 4175 "buffers\n", bcnt, num_to_alloc); 4176 4177 /* post the list of nvme buffer sgls to port if available */ 4178 if (!list_empty(&post_nblist)) 4179 num_posted = lpfc_sli4_post_io_sgl_list( 4180 phba, &post_nblist, bcnt); 4181 else 4182 num_posted = 0; 4183 4184 return num_posted; 4185 } 4186 4187 static uint64_t 4188 lpfc_get_wwpn(struct lpfc_hba *phba) 4189 { 4190 uint64_t wwn; 4191 int rc; 4192 LPFC_MBOXQ_t *mboxq; 4193 MAILBOX_t *mb; 4194 4195 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 4196 GFP_KERNEL); 4197 if (!mboxq) 4198 return (uint64_t)-1; 4199 4200 /* First get WWN of HBA instance */ 4201 lpfc_read_nv(phba, mboxq); 4202 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4203 if (rc != MBX_SUCCESS) { 4204 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4205 "6019 Mailbox failed , mbxCmd x%x " 4206 "READ_NV, mbxStatus x%x\n", 4207 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 4208 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 4209 mempool_free(mboxq, phba->mbox_mem_pool); 4210 return (uint64_t) -1; 4211 } 4212 mb = &mboxq->u.mb; 4213 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t)); 4214 /* wwn is WWPN of HBA instance */ 4215 mempool_free(mboxq, phba->mbox_mem_pool); 4216 if (phba->sli_rev == LPFC_SLI_REV4) 4217 return be64_to_cpu(wwn); 4218 else 4219 return rol64(wwn, 32); 4220 } 4221 4222 /** 4223 * lpfc_create_port - Create an FC port 4224 * @phba: pointer to lpfc hba data structure. 4225 * @instance: a unique integer ID to this FC port. 4226 * @dev: pointer to the device data structure. 4227 * 4228 * This routine creates a FC port for the upper layer protocol. The FC port 4229 * can be created on top of either a physical port or a virtual port provided 4230 * by the HBA. This routine also allocates a SCSI host data structure (shost) 4231 * and associates the FC port created before adding the shost into the SCSI 4232 * layer. 4233 * 4234 * Return codes 4235 * @vport - pointer to the virtual N_Port data structure. 4236 * NULL - port create failed. 4237 **/ 4238 struct lpfc_vport * 4239 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 4240 { 4241 struct lpfc_vport *vport; 4242 struct Scsi_Host *shost = NULL; 4243 int error = 0; 4244 int i; 4245 uint64_t wwn; 4246 bool use_no_reset_hba = false; 4247 int rc; 4248 4249 if (lpfc_no_hba_reset_cnt) { 4250 if (phba->sli_rev < LPFC_SLI_REV4 && 4251 dev == &phba->pcidev->dev) { 4252 /* Reset the port first */ 4253 lpfc_sli_brdrestart(phba); 4254 rc = lpfc_sli_chipset_init(phba); 4255 if (rc) 4256 return NULL; 4257 } 4258 wwn = lpfc_get_wwpn(phba); 4259 } 4260 4261 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) { 4262 if (wwn == lpfc_no_hba_reset[i]) { 4263 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4264 "6020 Setting use_no_reset port=%llx\n", 4265 wwn); 4266 use_no_reset_hba = true; 4267 break; 4268 } 4269 } 4270 4271 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 4272 if (dev != &phba->pcidev->dev) { 4273 shost = scsi_host_alloc(&lpfc_vport_template, 4274 sizeof(struct lpfc_vport)); 4275 } else { 4276 if (!use_no_reset_hba) 4277 shost = scsi_host_alloc(&lpfc_template, 4278 sizeof(struct lpfc_vport)); 4279 else 4280 shost = scsi_host_alloc(&lpfc_template_no_hr, 4281 sizeof(struct lpfc_vport)); 4282 } 4283 } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 4284 shost = scsi_host_alloc(&lpfc_template_nvme, 4285 sizeof(struct lpfc_vport)); 4286 } 4287 if (!shost) 4288 goto out; 4289 4290 vport = (struct lpfc_vport *) shost->hostdata; 4291 vport->phba = phba; 4292 vport->load_flag |= FC_LOADING; 4293 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 4294 vport->fc_rscn_flush = 0; 4295 lpfc_get_vport_cfgparam(vport); 4296 4297 /* Adjust value in vport */ 4298 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type; 4299 4300 shost->unique_id = instance; 4301 shost->max_id = LPFC_MAX_TARGET; 4302 shost->max_lun = vport->cfg_max_luns; 4303 shost->this_id = -1; 4304 shost->max_cmd_len = 16; 4305 4306 if (phba->sli_rev == LPFC_SLI_REV4) { 4307 if (!phba->cfg_fcp_mq_threshold || 4308 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue) 4309 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue; 4310 4311 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(), 4312 phba->cfg_fcp_mq_threshold); 4313 4314 shost->dma_boundary = 4315 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 4316 4317 if (phba->cfg_xpsgl && !phba->nvmet_support) 4318 shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE; 4319 else 4320 shost->sg_tablesize = phba->cfg_scsi_seg_cnt; 4321 } else 4322 /* SLI-3 has a limited number of hardware queues (3), 4323 * thus there is only one for FCP processing. 4324 */ 4325 shost->nr_hw_queues = 1; 4326 4327 /* 4328 * Set initial can_queue value since 0 is no longer supported and 4329 * scsi_add_host will fail. This will be adjusted later based on the 4330 * max xri value determined in hba setup. 4331 */ 4332 shost->can_queue = phba->cfg_hba_queue_depth - 10; 4333 if (dev != &phba->pcidev->dev) { 4334 shost->transportt = lpfc_vport_transport_template; 4335 vport->port_type = LPFC_NPIV_PORT; 4336 } else { 4337 shost->transportt = lpfc_transport_template; 4338 vport->port_type = LPFC_PHYSICAL_PORT; 4339 } 4340 4341 /* Initialize all internally managed lists. */ 4342 INIT_LIST_HEAD(&vport->fc_nodes); 4343 INIT_LIST_HEAD(&vport->rcv_buffer_list); 4344 spin_lock_init(&vport->work_port_lock); 4345 4346 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0); 4347 4348 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0); 4349 4350 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0); 4351 4352 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 4353 lpfc_setup_bg(phba, shost); 4354 4355 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 4356 if (error) 4357 goto out_put_shost; 4358 4359 spin_lock_irq(&phba->port_list_lock); 4360 list_add_tail(&vport->listentry, &phba->port_list); 4361 spin_unlock_irq(&phba->port_list_lock); 4362 return vport; 4363 4364 out_put_shost: 4365 scsi_host_put(shost); 4366 out: 4367 return NULL; 4368 } 4369 4370 /** 4371 * destroy_port - destroy an FC port 4372 * @vport: pointer to an lpfc virtual N_Port data structure. 4373 * 4374 * This routine destroys a FC port from the upper layer protocol. All the 4375 * resources associated with the port are released. 4376 **/ 4377 void 4378 destroy_port(struct lpfc_vport *vport) 4379 { 4380 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4381 struct lpfc_hba *phba = vport->phba; 4382 4383 lpfc_debugfs_terminate(vport); 4384 fc_remove_host(shost); 4385 scsi_remove_host(shost); 4386 4387 spin_lock_irq(&phba->port_list_lock); 4388 list_del_init(&vport->listentry); 4389 spin_unlock_irq(&phba->port_list_lock); 4390 4391 lpfc_cleanup(vport); 4392 return; 4393 } 4394 4395 /** 4396 * lpfc_get_instance - Get a unique integer ID 4397 * 4398 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 4399 * uses the kernel idr facility to perform the task. 4400 * 4401 * Return codes: 4402 * instance - a unique integer ID allocated as the new instance. 4403 * -1 - lpfc get instance failed. 4404 **/ 4405 int 4406 lpfc_get_instance(void) 4407 { 4408 int ret; 4409 4410 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL); 4411 return ret < 0 ? -1 : ret; 4412 } 4413 4414 /** 4415 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 4416 * @shost: pointer to SCSI host data structure. 4417 * @time: elapsed time of the scan in jiffies. 4418 * 4419 * This routine is called by the SCSI layer with a SCSI host to determine 4420 * whether the scan host is finished. 4421 * 4422 * Note: there is no scan_start function as adapter initialization will have 4423 * asynchronously kicked off the link initialization. 4424 * 4425 * Return codes 4426 * 0 - SCSI host scan is not over yet. 4427 * 1 - SCSI host scan is over. 4428 **/ 4429 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 4430 { 4431 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4432 struct lpfc_hba *phba = vport->phba; 4433 int stat = 0; 4434 4435 spin_lock_irq(shost->host_lock); 4436 4437 if (vport->load_flag & FC_UNLOADING) { 4438 stat = 1; 4439 goto finished; 4440 } 4441 if (time >= msecs_to_jiffies(30 * 1000)) { 4442 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4443 "0461 Scanning longer than 30 " 4444 "seconds. Continuing initialization\n"); 4445 stat = 1; 4446 goto finished; 4447 } 4448 if (time >= msecs_to_jiffies(15 * 1000) && 4449 phba->link_state <= LPFC_LINK_DOWN) { 4450 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4451 "0465 Link down longer than 15 " 4452 "seconds. Continuing initialization\n"); 4453 stat = 1; 4454 goto finished; 4455 } 4456 4457 if (vport->port_state != LPFC_VPORT_READY) 4458 goto finished; 4459 if (vport->num_disc_nodes || vport->fc_prli_sent) 4460 goto finished; 4461 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) 4462 goto finished; 4463 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 4464 goto finished; 4465 4466 stat = 1; 4467 4468 finished: 4469 spin_unlock_irq(shost->host_lock); 4470 return stat; 4471 } 4472 4473 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost) 4474 { 4475 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4476 struct lpfc_hba *phba = vport->phba; 4477 4478 fc_host_supported_speeds(shost) = 0; 4479 if (phba->lmt & LMT_128Gb) 4480 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT; 4481 if (phba->lmt & LMT_64Gb) 4482 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT; 4483 if (phba->lmt & LMT_32Gb) 4484 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT; 4485 if (phba->lmt & LMT_16Gb) 4486 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; 4487 if (phba->lmt & LMT_10Gb) 4488 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 4489 if (phba->lmt & LMT_8Gb) 4490 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 4491 if (phba->lmt & LMT_4Gb) 4492 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 4493 if (phba->lmt & LMT_2Gb) 4494 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 4495 if (phba->lmt & LMT_1Gb) 4496 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 4497 } 4498 4499 /** 4500 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 4501 * @shost: pointer to SCSI host data structure. 4502 * 4503 * This routine initializes a given SCSI host attributes on a FC port. The 4504 * SCSI host can be either on top of a physical port or a virtual port. 4505 **/ 4506 void lpfc_host_attrib_init(struct Scsi_Host *shost) 4507 { 4508 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4509 struct lpfc_hba *phba = vport->phba; 4510 /* 4511 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 4512 */ 4513 4514 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 4515 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 4516 fc_host_supported_classes(shost) = FC_COS_CLASS3; 4517 4518 memset(fc_host_supported_fc4s(shost), 0, 4519 sizeof(fc_host_supported_fc4s(shost))); 4520 fc_host_supported_fc4s(shost)[2] = 1; 4521 fc_host_supported_fc4s(shost)[7] = 1; 4522 4523 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 4524 sizeof fc_host_symbolic_name(shost)); 4525 4526 lpfc_host_supported_speeds_set(shost); 4527 4528 fc_host_maxframe_size(shost) = 4529 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 4530 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 4531 4532 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 4533 4534 /* This value is also unchanging */ 4535 memset(fc_host_active_fc4s(shost), 0, 4536 sizeof(fc_host_active_fc4s(shost))); 4537 fc_host_active_fc4s(shost)[2] = 1; 4538 fc_host_active_fc4s(shost)[7] = 1; 4539 4540 fc_host_max_npiv_vports(shost) = phba->max_vpi; 4541 spin_lock_irq(shost->host_lock); 4542 vport->load_flag &= ~FC_LOADING; 4543 spin_unlock_irq(shost->host_lock); 4544 } 4545 4546 /** 4547 * lpfc_stop_port_s3 - Stop SLI3 device port 4548 * @phba: pointer to lpfc hba data structure. 4549 * 4550 * This routine is invoked to stop an SLI3 device port, it stops the device 4551 * from generating interrupts and stops the device driver's timers for the 4552 * device. 4553 **/ 4554 static void 4555 lpfc_stop_port_s3(struct lpfc_hba *phba) 4556 { 4557 /* Clear all interrupt enable conditions */ 4558 writel(0, phba->HCregaddr); 4559 readl(phba->HCregaddr); /* flush */ 4560 /* Clear all pending interrupts */ 4561 writel(0xffffffff, phba->HAregaddr); 4562 readl(phba->HAregaddr); /* flush */ 4563 4564 /* Reset some HBA SLI setup states */ 4565 lpfc_stop_hba_timers(phba); 4566 phba->pport->work_port_events = 0; 4567 } 4568 4569 /** 4570 * lpfc_stop_port_s4 - Stop SLI4 device port 4571 * @phba: pointer to lpfc hba data structure. 4572 * 4573 * This routine is invoked to stop an SLI4 device port, it stops the device 4574 * from generating interrupts and stops the device driver's timers for the 4575 * device. 4576 **/ 4577 static void 4578 lpfc_stop_port_s4(struct lpfc_hba *phba) 4579 { 4580 /* Reset some HBA SLI4 setup states */ 4581 lpfc_stop_hba_timers(phba); 4582 if (phba->pport) 4583 phba->pport->work_port_events = 0; 4584 phba->sli4_hba.intr_enable = 0; 4585 } 4586 4587 /** 4588 * lpfc_stop_port - Wrapper function for stopping hba port 4589 * @phba: Pointer to HBA context object. 4590 * 4591 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 4592 * the API jump table function pointer from the lpfc_hba struct. 4593 **/ 4594 void 4595 lpfc_stop_port(struct lpfc_hba *phba) 4596 { 4597 phba->lpfc_stop_port(phba); 4598 4599 if (phba->wq) 4600 flush_workqueue(phba->wq); 4601 } 4602 4603 /** 4604 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 4605 * @phba: Pointer to hba for which this call is being executed. 4606 * 4607 * This routine starts the timer waiting for the FCF rediscovery to complete. 4608 **/ 4609 void 4610 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 4611 { 4612 unsigned long fcf_redisc_wait_tmo = 4613 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 4614 /* Start fcf rediscovery wait period timer */ 4615 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 4616 spin_lock_irq(&phba->hbalock); 4617 /* Allow action to new fcf asynchronous event */ 4618 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 4619 /* Mark the FCF rediscovery pending state */ 4620 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 4621 spin_unlock_irq(&phba->hbalock); 4622 } 4623 4624 /** 4625 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 4626 * @ptr: Map to lpfc_hba data structure pointer. 4627 * 4628 * This routine is invoked when waiting for FCF table rediscover has been 4629 * timed out. If new FCF record(s) has (have) been discovered during the 4630 * wait period, a new FCF event shall be added to the FCOE async event 4631 * list, and then worker thread shall be waked up for processing from the 4632 * worker thread context. 4633 **/ 4634 static void 4635 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t) 4636 { 4637 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait); 4638 4639 /* Don't send FCF rediscovery event if timer cancelled */ 4640 spin_lock_irq(&phba->hbalock); 4641 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 4642 spin_unlock_irq(&phba->hbalock); 4643 return; 4644 } 4645 /* Clear FCF rediscovery timer pending flag */ 4646 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 4647 /* FCF rediscovery event to worker thread */ 4648 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 4649 spin_unlock_irq(&phba->hbalock); 4650 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 4651 "2776 FCF rediscover quiescent timer expired\n"); 4652 /* wake up worker thread */ 4653 lpfc_worker_wake_up(phba); 4654 } 4655 4656 /** 4657 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 4658 * @phba: pointer to lpfc hba data structure. 4659 * @acqe_link: pointer to the async link completion queue entry. 4660 * 4661 * This routine is to parse the SLI4 link-attention link fault code. 4662 **/ 4663 static void 4664 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 4665 struct lpfc_acqe_link *acqe_link) 4666 { 4667 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 4668 case LPFC_ASYNC_LINK_FAULT_NONE: 4669 case LPFC_ASYNC_LINK_FAULT_LOCAL: 4670 case LPFC_ASYNC_LINK_FAULT_REMOTE: 4671 case LPFC_ASYNC_LINK_FAULT_LR_LRR: 4672 break; 4673 default: 4674 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4675 "0398 Unknown link fault code: x%x\n", 4676 bf_get(lpfc_acqe_link_fault, acqe_link)); 4677 break; 4678 } 4679 } 4680 4681 /** 4682 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 4683 * @phba: pointer to lpfc hba data structure. 4684 * @acqe_link: pointer to the async link completion queue entry. 4685 * 4686 * This routine is to parse the SLI4 link attention type and translate it 4687 * into the base driver's link attention type coding. 4688 * 4689 * Return: Link attention type in terms of base driver's coding. 4690 **/ 4691 static uint8_t 4692 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 4693 struct lpfc_acqe_link *acqe_link) 4694 { 4695 uint8_t att_type; 4696 4697 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 4698 case LPFC_ASYNC_LINK_STATUS_DOWN: 4699 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 4700 att_type = LPFC_ATT_LINK_DOWN; 4701 break; 4702 case LPFC_ASYNC_LINK_STATUS_UP: 4703 /* Ignore physical link up events - wait for logical link up */ 4704 att_type = LPFC_ATT_RESERVED; 4705 break; 4706 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 4707 att_type = LPFC_ATT_LINK_UP; 4708 break; 4709 default: 4710 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4711 "0399 Invalid link attention type: x%x\n", 4712 bf_get(lpfc_acqe_link_status, acqe_link)); 4713 att_type = LPFC_ATT_RESERVED; 4714 break; 4715 } 4716 return att_type; 4717 } 4718 4719 /** 4720 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed 4721 * @phba: pointer to lpfc hba data structure. 4722 * 4723 * This routine is to get an SLI3 FC port's link speed in Mbps. 4724 * 4725 * Return: link speed in terms of Mbps. 4726 **/ 4727 uint32_t 4728 lpfc_sli_port_speed_get(struct lpfc_hba *phba) 4729 { 4730 uint32_t link_speed; 4731 4732 if (!lpfc_is_link_up(phba)) 4733 return 0; 4734 4735 if (phba->sli_rev <= LPFC_SLI_REV3) { 4736 switch (phba->fc_linkspeed) { 4737 case LPFC_LINK_SPEED_1GHZ: 4738 link_speed = 1000; 4739 break; 4740 case LPFC_LINK_SPEED_2GHZ: 4741 link_speed = 2000; 4742 break; 4743 case LPFC_LINK_SPEED_4GHZ: 4744 link_speed = 4000; 4745 break; 4746 case LPFC_LINK_SPEED_8GHZ: 4747 link_speed = 8000; 4748 break; 4749 case LPFC_LINK_SPEED_10GHZ: 4750 link_speed = 10000; 4751 break; 4752 case LPFC_LINK_SPEED_16GHZ: 4753 link_speed = 16000; 4754 break; 4755 default: 4756 link_speed = 0; 4757 } 4758 } else { 4759 if (phba->sli4_hba.link_state.logical_speed) 4760 link_speed = 4761 phba->sli4_hba.link_state.logical_speed; 4762 else 4763 link_speed = phba->sli4_hba.link_state.speed; 4764 } 4765 return link_speed; 4766 } 4767 4768 /** 4769 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed 4770 * @phba: pointer to lpfc hba data structure. 4771 * @evt_code: asynchronous event code. 4772 * @speed_code: asynchronous event link speed code. 4773 * 4774 * This routine is to parse the giving SLI4 async event link speed code into 4775 * value of Mbps for the link speed. 4776 * 4777 * Return: link speed in terms of Mbps. 4778 **/ 4779 static uint32_t 4780 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, 4781 uint8_t speed_code) 4782 { 4783 uint32_t port_speed; 4784 4785 switch (evt_code) { 4786 case LPFC_TRAILER_CODE_LINK: 4787 switch (speed_code) { 4788 case LPFC_ASYNC_LINK_SPEED_ZERO: 4789 port_speed = 0; 4790 break; 4791 case LPFC_ASYNC_LINK_SPEED_10MBPS: 4792 port_speed = 10; 4793 break; 4794 case LPFC_ASYNC_LINK_SPEED_100MBPS: 4795 port_speed = 100; 4796 break; 4797 case LPFC_ASYNC_LINK_SPEED_1GBPS: 4798 port_speed = 1000; 4799 break; 4800 case LPFC_ASYNC_LINK_SPEED_10GBPS: 4801 port_speed = 10000; 4802 break; 4803 case LPFC_ASYNC_LINK_SPEED_20GBPS: 4804 port_speed = 20000; 4805 break; 4806 case LPFC_ASYNC_LINK_SPEED_25GBPS: 4807 port_speed = 25000; 4808 break; 4809 case LPFC_ASYNC_LINK_SPEED_40GBPS: 4810 port_speed = 40000; 4811 break; 4812 default: 4813 port_speed = 0; 4814 } 4815 break; 4816 case LPFC_TRAILER_CODE_FC: 4817 switch (speed_code) { 4818 case LPFC_FC_LA_SPEED_UNKNOWN: 4819 port_speed = 0; 4820 break; 4821 case LPFC_FC_LA_SPEED_1G: 4822 port_speed = 1000; 4823 break; 4824 case LPFC_FC_LA_SPEED_2G: 4825 port_speed = 2000; 4826 break; 4827 case LPFC_FC_LA_SPEED_4G: 4828 port_speed = 4000; 4829 break; 4830 case LPFC_FC_LA_SPEED_8G: 4831 port_speed = 8000; 4832 break; 4833 case LPFC_FC_LA_SPEED_10G: 4834 port_speed = 10000; 4835 break; 4836 case LPFC_FC_LA_SPEED_16G: 4837 port_speed = 16000; 4838 break; 4839 case LPFC_FC_LA_SPEED_32G: 4840 port_speed = 32000; 4841 break; 4842 case LPFC_FC_LA_SPEED_64G: 4843 port_speed = 64000; 4844 break; 4845 case LPFC_FC_LA_SPEED_128G: 4846 port_speed = 128000; 4847 break; 4848 default: 4849 port_speed = 0; 4850 } 4851 break; 4852 default: 4853 port_speed = 0; 4854 } 4855 return port_speed; 4856 } 4857 4858 /** 4859 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 4860 * @phba: pointer to lpfc hba data structure. 4861 * @acqe_link: pointer to the async link completion queue entry. 4862 * 4863 * This routine is to handle the SLI4 asynchronous FCoE link event. 4864 **/ 4865 static void 4866 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 4867 struct lpfc_acqe_link *acqe_link) 4868 { 4869 struct lpfc_dmabuf *mp; 4870 LPFC_MBOXQ_t *pmb; 4871 MAILBOX_t *mb; 4872 struct lpfc_mbx_read_top *la; 4873 uint8_t att_type; 4874 int rc; 4875 4876 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 4877 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 4878 return; 4879 phba->fcoe_eventtag = acqe_link->event_tag; 4880 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4881 if (!pmb) { 4882 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4883 "0395 The mboxq allocation failed\n"); 4884 return; 4885 } 4886 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4887 if (!mp) { 4888 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4889 "0396 The lpfc_dmabuf allocation failed\n"); 4890 goto out_free_pmb; 4891 } 4892 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 4893 if (!mp->virt) { 4894 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4895 "0397 The mbuf allocation failed\n"); 4896 goto out_free_dmabuf; 4897 } 4898 4899 /* Cleanup any outstanding ELS commands */ 4900 lpfc_els_flush_all_cmd(phba); 4901 4902 /* Block ELS IOCBs until we have done process link event */ 4903 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 4904 4905 /* Update link event statistics */ 4906 phba->sli.slistat.link_event++; 4907 4908 /* Create lpfc_handle_latt mailbox command from link ACQE */ 4909 lpfc_read_topology(phba, pmb, mp); 4910 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 4911 pmb->vport = phba->pport; 4912 4913 /* Keep the link status for extra SLI4 state machine reference */ 4914 phba->sli4_hba.link_state.speed = 4915 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK, 4916 bf_get(lpfc_acqe_link_speed, acqe_link)); 4917 phba->sli4_hba.link_state.duplex = 4918 bf_get(lpfc_acqe_link_duplex, acqe_link); 4919 phba->sli4_hba.link_state.status = 4920 bf_get(lpfc_acqe_link_status, acqe_link); 4921 phba->sli4_hba.link_state.type = 4922 bf_get(lpfc_acqe_link_type, acqe_link); 4923 phba->sli4_hba.link_state.number = 4924 bf_get(lpfc_acqe_link_number, acqe_link); 4925 phba->sli4_hba.link_state.fault = 4926 bf_get(lpfc_acqe_link_fault, acqe_link); 4927 phba->sli4_hba.link_state.logical_speed = 4928 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10; 4929 4930 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4931 "2900 Async FC/FCoE Link event - Speed:%dGBit " 4932 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 4933 "Logical speed:%dMbps Fault:%d\n", 4934 phba->sli4_hba.link_state.speed, 4935 phba->sli4_hba.link_state.topology, 4936 phba->sli4_hba.link_state.status, 4937 phba->sli4_hba.link_state.type, 4938 phba->sli4_hba.link_state.number, 4939 phba->sli4_hba.link_state.logical_speed, 4940 phba->sli4_hba.link_state.fault); 4941 /* 4942 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 4943 * topology info. Note: Optional for non FC-AL ports. 4944 */ 4945 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 4946 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 4947 if (rc == MBX_NOT_FINISHED) 4948 goto out_free_dmabuf; 4949 return; 4950 } 4951 /* 4952 * For FCoE Mode: fill in all the topology information we need and call 4953 * the READ_TOPOLOGY completion routine to continue without actually 4954 * sending the READ_TOPOLOGY mailbox command to the port. 4955 */ 4956 /* Initialize completion status */ 4957 mb = &pmb->u.mb; 4958 mb->mbxStatus = MBX_SUCCESS; 4959 4960 /* Parse port fault information field */ 4961 lpfc_sli4_parse_latt_fault(phba, acqe_link); 4962 4963 /* Parse and translate link attention fields */ 4964 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 4965 la->eventTag = acqe_link->event_tag; 4966 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 4967 bf_set(lpfc_mbx_read_top_link_spd, la, 4968 (bf_get(lpfc_acqe_link_speed, acqe_link))); 4969 4970 /* Fake the the following irrelvant fields */ 4971 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 4972 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 4973 bf_set(lpfc_mbx_read_top_il, la, 0); 4974 bf_set(lpfc_mbx_read_top_pb, la, 0); 4975 bf_set(lpfc_mbx_read_top_fa, la, 0); 4976 bf_set(lpfc_mbx_read_top_mm, la, 0); 4977 4978 /* Invoke the lpfc_handle_latt mailbox command callback function */ 4979 lpfc_mbx_cmpl_read_topology(phba, pmb); 4980 4981 return; 4982 4983 out_free_dmabuf: 4984 kfree(mp); 4985 out_free_pmb: 4986 mempool_free(pmb, phba->mbox_mem_pool); 4987 } 4988 4989 /** 4990 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read 4991 * topology. 4992 * @phba: pointer to lpfc hba data structure. 4993 * @evt_code: asynchronous event code. 4994 * @speed_code: asynchronous event link speed code. 4995 * 4996 * This routine is to parse the giving SLI4 async event link speed code into 4997 * value of Read topology link speed. 4998 * 4999 * Return: link speed in terms of Read topology. 5000 **/ 5001 static uint8_t 5002 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code) 5003 { 5004 uint8_t port_speed; 5005 5006 switch (speed_code) { 5007 case LPFC_FC_LA_SPEED_1G: 5008 port_speed = LPFC_LINK_SPEED_1GHZ; 5009 break; 5010 case LPFC_FC_LA_SPEED_2G: 5011 port_speed = LPFC_LINK_SPEED_2GHZ; 5012 break; 5013 case LPFC_FC_LA_SPEED_4G: 5014 port_speed = LPFC_LINK_SPEED_4GHZ; 5015 break; 5016 case LPFC_FC_LA_SPEED_8G: 5017 port_speed = LPFC_LINK_SPEED_8GHZ; 5018 break; 5019 case LPFC_FC_LA_SPEED_16G: 5020 port_speed = LPFC_LINK_SPEED_16GHZ; 5021 break; 5022 case LPFC_FC_LA_SPEED_32G: 5023 port_speed = LPFC_LINK_SPEED_32GHZ; 5024 break; 5025 case LPFC_FC_LA_SPEED_64G: 5026 port_speed = LPFC_LINK_SPEED_64GHZ; 5027 break; 5028 case LPFC_FC_LA_SPEED_128G: 5029 port_speed = LPFC_LINK_SPEED_128GHZ; 5030 break; 5031 case LPFC_FC_LA_SPEED_256G: 5032 port_speed = LPFC_LINK_SPEED_256GHZ; 5033 break; 5034 default: 5035 port_speed = 0; 5036 break; 5037 } 5038 5039 return port_speed; 5040 } 5041 5042 #define trunk_link_status(__idx)\ 5043 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 5044 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\ 5045 "Link up" : "Link down") : "NA" 5046 /* Did port __idx reported an error */ 5047 #define trunk_port_fault(__idx)\ 5048 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 5049 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA" 5050 5051 static void 5052 lpfc_update_trunk_link_status(struct lpfc_hba *phba, 5053 struct lpfc_acqe_fc_la *acqe_fc) 5054 { 5055 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc); 5056 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc); 5057 5058 phba->sli4_hba.link_state.speed = 5059 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 5060 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 5061 5062 phba->sli4_hba.link_state.logical_speed = 5063 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 5064 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */ 5065 phba->fc_linkspeed = 5066 lpfc_async_link_speed_to_read_top( 5067 phba, 5068 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 5069 5070 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) { 5071 phba->trunk_link.link0.state = 5072 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc) 5073 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5074 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0; 5075 } 5076 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) { 5077 phba->trunk_link.link1.state = 5078 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc) 5079 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5080 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0; 5081 } 5082 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) { 5083 phba->trunk_link.link2.state = 5084 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc) 5085 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5086 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0; 5087 } 5088 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) { 5089 phba->trunk_link.link3.state = 5090 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc) 5091 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5092 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0; 5093 } 5094 5095 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5096 "2910 Async FC Trunking Event - Speed:%d\n" 5097 "\tLogical speed:%d " 5098 "port0: %s port1: %s port2: %s port3: %s\n", 5099 phba->sli4_hba.link_state.speed, 5100 phba->sli4_hba.link_state.logical_speed, 5101 trunk_link_status(0), trunk_link_status(1), 5102 trunk_link_status(2), trunk_link_status(3)); 5103 5104 if (port_fault) 5105 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5106 "3202 trunk error:0x%x (%s) seen on port0:%s " 5107 /* 5108 * SLI-4: We have only 0xA error codes 5109 * defined as of now. print an appropriate 5110 * message in case driver needs to be updated. 5111 */ 5112 "port1:%s port2:%s port3:%s\n", err, err > 0xA ? 5113 "UNDEFINED. update driver." : trunk_errmsg[err], 5114 trunk_port_fault(0), trunk_port_fault(1), 5115 trunk_port_fault(2), trunk_port_fault(3)); 5116 } 5117 5118 5119 /** 5120 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 5121 * @phba: pointer to lpfc hba data structure. 5122 * @acqe_fc: pointer to the async fc completion queue entry. 5123 * 5124 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 5125 * that the event was received and then issue a read_topology mailbox command so 5126 * that the rest of the driver will treat it the same as SLI3. 5127 **/ 5128 static void 5129 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 5130 { 5131 struct lpfc_dmabuf *mp; 5132 LPFC_MBOXQ_t *pmb; 5133 MAILBOX_t *mb; 5134 struct lpfc_mbx_read_top *la; 5135 int rc; 5136 5137 if (bf_get(lpfc_trailer_type, acqe_fc) != 5138 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 5139 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5140 "2895 Non FC link Event detected.(%d)\n", 5141 bf_get(lpfc_trailer_type, acqe_fc)); 5142 return; 5143 } 5144 5145 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == 5146 LPFC_FC_LA_TYPE_TRUNKING_EVENT) { 5147 lpfc_update_trunk_link_status(phba, acqe_fc); 5148 return; 5149 } 5150 5151 /* Keep the link status for extra SLI4 state machine reference */ 5152 phba->sli4_hba.link_state.speed = 5153 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 5154 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 5155 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 5156 phba->sli4_hba.link_state.topology = 5157 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 5158 phba->sli4_hba.link_state.status = 5159 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 5160 phba->sli4_hba.link_state.type = 5161 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 5162 phba->sli4_hba.link_state.number = 5163 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 5164 phba->sli4_hba.link_state.fault = 5165 bf_get(lpfc_acqe_link_fault, acqe_fc); 5166 5167 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == 5168 LPFC_FC_LA_TYPE_LINK_DOWN) 5169 phba->sli4_hba.link_state.logical_speed = 0; 5170 else if (!phba->sli4_hba.conf_trunk) 5171 phba->sli4_hba.link_state.logical_speed = 5172 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 5173 5174 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5175 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 5176 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 5177 "%dMbps Fault:%d\n", 5178 phba->sli4_hba.link_state.speed, 5179 phba->sli4_hba.link_state.topology, 5180 phba->sli4_hba.link_state.status, 5181 phba->sli4_hba.link_state.type, 5182 phba->sli4_hba.link_state.number, 5183 phba->sli4_hba.link_state.logical_speed, 5184 phba->sli4_hba.link_state.fault); 5185 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5186 if (!pmb) { 5187 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5188 "2897 The mboxq allocation failed\n"); 5189 return; 5190 } 5191 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5192 if (!mp) { 5193 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5194 "2898 The lpfc_dmabuf allocation failed\n"); 5195 goto out_free_pmb; 5196 } 5197 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 5198 if (!mp->virt) { 5199 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5200 "2899 The mbuf allocation failed\n"); 5201 goto out_free_dmabuf; 5202 } 5203 5204 /* Cleanup any outstanding ELS commands */ 5205 lpfc_els_flush_all_cmd(phba); 5206 5207 /* Block ELS IOCBs until we have done process link event */ 5208 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 5209 5210 /* Update link event statistics */ 5211 phba->sli.slistat.link_event++; 5212 5213 /* Create lpfc_handle_latt mailbox command from link ACQE */ 5214 lpfc_read_topology(phba, pmb, mp); 5215 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 5216 pmb->vport = phba->pport; 5217 5218 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { 5219 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK); 5220 5221 switch (phba->sli4_hba.link_state.status) { 5222 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN: 5223 phba->link_flag |= LS_MDS_LINK_DOWN; 5224 break; 5225 case LPFC_FC_LA_TYPE_MDS_LOOPBACK: 5226 phba->link_flag |= LS_MDS_LOOPBACK; 5227 break; 5228 default: 5229 break; 5230 } 5231 5232 /* Initialize completion status */ 5233 mb = &pmb->u.mb; 5234 mb->mbxStatus = MBX_SUCCESS; 5235 5236 /* Parse port fault information field */ 5237 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc); 5238 5239 /* Parse and translate link attention fields */ 5240 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; 5241 la->eventTag = acqe_fc->event_tag; 5242 5243 if (phba->sli4_hba.link_state.status == 5244 LPFC_FC_LA_TYPE_UNEXP_WWPN) { 5245 bf_set(lpfc_mbx_read_top_att_type, la, 5246 LPFC_FC_LA_TYPE_UNEXP_WWPN); 5247 } else { 5248 bf_set(lpfc_mbx_read_top_att_type, la, 5249 LPFC_FC_LA_TYPE_LINK_DOWN); 5250 } 5251 /* Invoke the mailbox command callback function */ 5252 lpfc_mbx_cmpl_read_topology(phba, pmb); 5253 5254 return; 5255 } 5256 5257 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 5258 if (rc == MBX_NOT_FINISHED) 5259 goto out_free_dmabuf; 5260 return; 5261 5262 out_free_dmabuf: 5263 kfree(mp); 5264 out_free_pmb: 5265 mempool_free(pmb, phba->mbox_mem_pool); 5266 } 5267 5268 /** 5269 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 5270 * @phba: pointer to lpfc hba data structure. 5271 * @acqe_fc: pointer to the async SLI completion queue entry. 5272 * 5273 * This routine is to handle the SLI4 asynchronous SLI events. 5274 **/ 5275 static void 5276 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 5277 { 5278 char port_name; 5279 char message[128]; 5280 uint8_t status; 5281 uint8_t evt_type; 5282 uint8_t operational = 0; 5283 struct temp_event temp_event_data; 5284 struct lpfc_acqe_misconfigured_event *misconfigured; 5285 struct Scsi_Host *shost; 5286 struct lpfc_vport **vports; 5287 int rc, i; 5288 5289 evt_type = bf_get(lpfc_trailer_type, acqe_sli); 5290 5291 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5292 "2901 Async SLI event - Type:%d, Event Data: x%08x " 5293 "x%08x x%08x x%08x\n", evt_type, 5294 acqe_sli->event_data1, acqe_sli->event_data2, 5295 acqe_sli->reserved, acqe_sli->trailer); 5296 5297 port_name = phba->Port[0]; 5298 if (port_name == 0x00) 5299 port_name = '?'; /* get port name is empty */ 5300 5301 switch (evt_type) { 5302 case LPFC_SLI_EVENT_TYPE_OVER_TEMP: 5303 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 5304 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 5305 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 5306 5307 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5308 "3190 Over Temperature:%d Celsius- Port Name %c\n", 5309 acqe_sli->event_data1, port_name); 5310 5311 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 5312 shost = lpfc_shost_from_vport(phba->pport); 5313 fc_host_post_vendor_event(shost, fc_get_event_number(), 5314 sizeof(temp_event_data), 5315 (char *)&temp_event_data, 5316 SCSI_NL_VID_TYPE_PCI 5317 | PCI_VENDOR_ID_EMULEX); 5318 break; 5319 case LPFC_SLI_EVENT_TYPE_NORM_TEMP: 5320 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 5321 temp_event_data.event_code = LPFC_NORMAL_TEMP; 5322 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 5323 5324 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5325 "3191 Normal Temperature:%d Celsius - Port Name %c\n", 5326 acqe_sli->event_data1, port_name); 5327 5328 shost = lpfc_shost_from_vport(phba->pport); 5329 fc_host_post_vendor_event(shost, fc_get_event_number(), 5330 sizeof(temp_event_data), 5331 (char *)&temp_event_data, 5332 SCSI_NL_VID_TYPE_PCI 5333 | PCI_VENDOR_ID_EMULEX); 5334 break; 5335 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED: 5336 misconfigured = (struct lpfc_acqe_misconfigured_event *) 5337 &acqe_sli->event_data1; 5338 5339 /* fetch the status for this port */ 5340 switch (phba->sli4_hba.lnk_info.lnk_no) { 5341 case LPFC_LINK_NUMBER_0: 5342 status = bf_get(lpfc_sli_misconfigured_port0_state, 5343 &misconfigured->theEvent); 5344 operational = bf_get(lpfc_sli_misconfigured_port0_op, 5345 &misconfigured->theEvent); 5346 break; 5347 case LPFC_LINK_NUMBER_1: 5348 status = bf_get(lpfc_sli_misconfigured_port1_state, 5349 &misconfigured->theEvent); 5350 operational = bf_get(lpfc_sli_misconfigured_port1_op, 5351 &misconfigured->theEvent); 5352 break; 5353 case LPFC_LINK_NUMBER_2: 5354 status = bf_get(lpfc_sli_misconfigured_port2_state, 5355 &misconfigured->theEvent); 5356 operational = bf_get(lpfc_sli_misconfigured_port2_op, 5357 &misconfigured->theEvent); 5358 break; 5359 case LPFC_LINK_NUMBER_3: 5360 status = bf_get(lpfc_sli_misconfigured_port3_state, 5361 &misconfigured->theEvent); 5362 operational = bf_get(lpfc_sli_misconfigured_port3_op, 5363 &misconfigured->theEvent); 5364 break; 5365 default: 5366 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5367 "3296 " 5368 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED " 5369 "event: Invalid link %d", 5370 phba->sli4_hba.lnk_info.lnk_no); 5371 return; 5372 } 5373 5374 /* Skip if optic state unchanged */ 5375 if (phba->sli4_hba.lnk_info.optic_state == status) 5376 return; 5377 5378 switch (status) { 5379 case LPFC_SLI_EVENT_STATUS_VALID: 5380 sprintf(message, "Physical Link is functional"); 5381 break; 5382 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: 5383 sprintf(message, "Optics faulted/incorrectly " 5384 "installed/not installed - Reseat optics, " 5385 "if issue not resolved, replace."); 5386 break; 5387 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: 5388 sprintf(message, 5389 "Optics of two types installed - Remove one " 5390 "optic or install matching pair of optics."); 5391 break; 5392 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: 5393 sprintf(message, "Incompatible optics - Replace with " 5394 "compatible optics for card to function."); 5395 break; 5396 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED: 5397 sprintf(message, "Unqualified optics - Replace with " 5398 "Avago optics for Warranty and Technical " 5399 "Support - Link is%s operational", 5400 (operational) ? " not" : ""); 5401 break; 5402 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED: 5403 sprintf(message, "Uncertified optics - Replace with " 5404 "Avago-certified optics to enable link " 5405 "operation - Link is%s operational", 5406 (operational) ? " not" : ""); 5407 break; 5408 default: 5409 /* firmware is reporting a status we don't know about */ 5410 sprintf(message, "Unknown event status x%02x", status); 5411 break; 5412 } 5413 5414 /* Issue READ_CONFIG mbox command to refresh supported speeds */ 5415 rc = lpfc_sli4_read_config(phba); 5416 if (rc) { 5417 phba->lmt = 0; 5418 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5419 "3194 Unable to retrieve supported " 5420 "speeds, rc = 0x%x\n", rc); 5421 } 5422 vports = lpfc_create_vport_work_array(phba); 5423 if (vports != NULL) { 5424 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 5425 i++) { 5426 shost = lpfc_shost_from_vport(vports[i]); 5427 lpfc_host_supported_speeds_set(shost); 5428 } 5429 } 5430 lpfc_destroy_vport_work_array(phba, vports); 5431 5432 phba->sli4_hba.lnk_info.optic_state = status; 5433 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5434 "3176 Port Name %c %s\n", port_name, message); 5435 break; 5436 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT: 5437 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5438 "3192 Remote DPort Test Initiated - " 5439 "Event Data1:x%08x Event Data2: x%08x\n", 5440 acqe_sli->event_data1, acqe_sli->event_data2); 5441 break; 5442 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN: 5443 /* Misconfigured WWN. Reports that the SLI Port is configured 5444 * to use FA-WWN, but the attached device doesn’t support it. 5445 * No driver action is required. 5446 * Event Data1 - N.A, Event Data2 - N.A 5447 */ 5448 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI, 5449 "2699 Misconfigured FA-WWN - Attached device does " 5450 "not support FA-WWN\n"); 5451 break; 5452 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE: 5453 /* EEPROM failure. No driver action is required */ 5454 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5455 "2518 EEPROM failure - " 5456 "Event Data1: x%08x Event Data2: x%08x\n", 5457 acqe_sli->event_data1, acqe_sli->event_data2); 5458 break; 5459 default: 5460 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5461 "3193 Unrecognized SLI event, type: 0x%x", 5462 evt_type); 5463 break; 5464 } 5465 } 5466 5467 /** 5468 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 5469 * @vport: pointer to vport data structure. 5470 * 5471 * This routine is to perform Clear Virtual Link (CVL) on a vport in 5472 * response to a CVL event. 5473 * 5474 * Return the pointer to the ndlp with the vport if successful, otherwise 5475 * return NULL. 5476 **/ 5477 static struct lpfc_nodelist * 5478 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 5479 { 5480 struct lpfc_nodelist *ndlp; 5481 struct Scsi_Host *shost; 5482 struct lpfc_hba *phba; 5483 5484 if (!vport) 5485 return NULL; 5486 phba = vport->phba; 5487 if (!phba) 5488 return NULL; 5489 ndlp = lpfc_findnode_did(vport, Fabric_DID); 5490 if (!ndlp) { 5491 /* Cannot find existing Fabric ndlp, so allocate a new one */ 5492 ndlp = lpfc_nlp_init(vport, Fabric_DID); 5493 if (!ndlp) 5494 return 0; 5495 /* Set the node type */ 5496 ndlp->nlp_type |= NLP_FABRIC; 5497 /* Put ndlp onto node list */ 5498 lpfc_enqueue_node(vport, ndlp); 5499 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 5500 /* re-setup ndlp without removing from node list */ 5501 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 5502 if (!ndlp) 5503 return 0; 5504 } 5505 if ((phba->pport->port_state < LPFC_FLOGI) && 5506 (phba->pport->port_state != LPFC_VPORT_FAILED)) 5507 return NULL; 5508 /* If virtual link is not yet instantiated ignore CVL */ 5509 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 5510 && (vport->port_state != LPFC_VPORT_FAILED)) 5511 return NULL; 5512 shost = lpfc_shost_from_vport(vport); 5513 if (!shost) 5514 return NULL; 5515 lpfc_linkdown_port(vport); 5516 lpfc_cleanup_pending_mbox(vport); 5517 spin_lock_irq(shost->host_lock); 5518 vport->fc_flag |= FC_VPORT_CVL_RCVD; 5519 spin_unlock_irq(shost->host_lock); 5520 5521 return ndlp; 5522 } 5523 5524 /** 5525 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 5526 * @vport: pointer to lpfc hba data structure. 5527 * 5528 * This routine is to perform Clear Virtual Link (CVL) on all vports in 5529 * response to a FCF dead event. 5530 **/ 5531 static void 5532 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 5533 { 5534 struct lpfc_vport **vports; 5535 int i; 5536 5537 vports = lpfc_create_vport_work_array(phba); 5538 if (vports) 5539 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 5540 lpfc_sli4_perform_vport_cvl(vports[i]); 5541 lpfc_destroy_vport_work_array(phba, vports); 5542 } 5543 5544 /** 5545 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 5546 * @phba: pointer to lpfc hba data structure. 5547 * @acqe_link: pointer to the async fcoe completion queue entry. 5548 * 5549 * This routine is to handle the SLI4 asynchronous fcoe event. 5550 **/ 5551 static void 5552 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 5553 struct lpfc_acqe_fip *acqe_fip) 5554 { 5555 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 5556 int rc; 5557 struct lpfc_vport *vport; 5558 struct lpfc_nodelist *ndlp; 5559 struct Scsi_Host *shost; 5560 int active_vlink_present; 5561 struct lpfc_vport **vports; 5562 int i; 5563 5564 phba->fc_eventTag = acqe_fip->event_tag; 5565 phba->fcoe_eventtag = acqe_fip->event_tag; 5566 switch (event_type) { 5567 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 5568 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 5569 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 5570 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5571 LOG_DISCOVERY, 5572 "2546 New FCF event, evt_tag:x%x, " 5573 "index:x%x\n", 5574 acqe_fip->event_tag, 5575 acqe_fip->index); 5576 else 5577 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 5578 LOG_DISCOVERY, 5579 "2788 FCF param modified event, " 5580 "evt_tag:x%x, index:x%x\n", 5581 acqe_fip->event_tag, 5582 acqe_fip->index); 5583 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 5584 /* 5585 * During period of FCF discovery, read the FCF 5586 * table record indexed by the event to update 5587 * FCF roundrobin failover eligible FCF bmask. 5588 */ 5589 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 5590 LOG_DISCOVERY, 5591 "2779 Read FCF (x%x) for updating " 5592 "roundrobin FCF failover bmask\n", 5593 acqe_fip->index); 5594 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 5595 } 5596 5597 /* If the FCF discovery is in progress, do nothing. */ 5598 spin_lock_irq(&phba->hbalock); 5599 if (phba->hba_flag & FCF_TS_INPROG) { 5600 spin_unlock_irq(&phba->hbalock); 5601 break; 5602 } 5603 /* If fast FCF failover rescan event is pending, do nothing */ 5604 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) { 5605 spin_unlock_irq(&phba->hbalock); 5606 break; 5607 } 5608 5609 /* If the FCF has been in discovered state, do nothing. */ 5610 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 5611 spin_unlock_irq(&phba->hbalock); 5612 break; 5613 } 5614 spin_unlock_irq(&phba->hbalock); 5615 5616 /* Otherwise, scan the entire FCF table and re-discover SAN */ 5617 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5618 "2770 Start FCF table scan per async FCF " 5619 "event, evt_tag:x%x, index:x%x\n", 5620 acqe_fip->event_tag, acqe_fip->index); 5621 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 5622 LPFC_FCOE_FCF_GET_FIRST); 5623 if (rc) 5624 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5625 "2547 Issue FCF scan read FCF mailbox " 5626 "command failed (x%x)\n", rc); 5627 break; 5628 5629 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 5630 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5631 "2548 FCF Table full count 0x%x tag 0x%x\n", 5632 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 5633 acqe_fip->event_tag); 5634 break; 5635 5636 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 5637 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 5638 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5639 "2549 FCF (x%x) disconnected from network, " 5640 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); 5641 /* 5642 * If we are in the middle of FCF failover process, clear 5643 * the corresponding FCF bit in the roundrobin bitmap. 5644 */ 5645 spin_lock_irq(&phba->hbalock); 5646 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && 5647 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) { 5648 spin_unlock_irq(&phba->hbalock); 5649 /* Update FLOGI FCF failover eligible FCF bmask */ 5650 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 5651 break; 5652 } 5653 spin_unlock_irq(&phba->hbalock); 5654 5655 /* If the event is not for currently used fcf do nothing */ 5656 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 5657 break; 5658 5659 /* 5660 * Otherwise, request the port to rediscover the entire FCF 5661 * table for a fast recovery from case that the current FCF 5662 * is no longer valid as we are not in the middle of FCF 5663 * failover process already. 5664 */ 5665 spin_lock_irq(&phba->hbalock); 5666 /* Mark the fast failover process in progress */ 5667 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 5668 spin_unlock_irq(&phba->hbalock); 5669 5670 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5671 "2771 Start FCF fast failover process due to " 5672 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 5673 "\n", acqe_fip->event_tag, acqe_fip->index); 5674 rc = lpfc_sli4_redisc_fcf_table(phba); 5675 if (rc) { 5676 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5677 LOG_DISCOVERY, 5678 "2772 Issue FCF rediscover mailbox " 5679 "command failed, fail through to FCF " 5680 "dead event\n"); 5681 spin_lock_irq(&phba->hbalock); 5682 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 5683 spin_unlock_irq(&phba->hbalock); 5684 /* 5685 * Last resort will fail over by treating this 5686 * as a link down to FCF registration. 5687 */ 5688 lpfc_sli4_fcf_dead_failthrough(phba); 5689 } else { 5690 /* Reset FCF roundrobin bmask for new discovery */ 5691 lpfc_sli4_clear_fcf_rr_bmask(phba); 5692 /* 5693 * Handling fast FCF failover to a DEAD FCF event is 5694 * considered equalivant to receiving CVL to all vports. 5695 */ 5696 lpfc_sli4_perform_all_vport_cvl(phba); 5697 } 5698 break; 5699 case LPFC_FIP_EVENT_TYPE_CVL: 5700 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 5701 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5702 "2718 Clear Virtual Link Received for VPI 0x%x" 5703 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 5704 5705 vport = lpfc_find_vport_by_vpid(phba, 5706 acqe_fip->index); 5707 ndlp = lpfc_sli4_perform_vport_cvl(vport); 5708 if (!ndlp) 5709 break; 5710 active_vlink_present = 0; 5711 5712 vports = lpfc_create_vport_work_array(phba); 5713 if (vports) { 5714 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 5715 i++) { 5716 if ((!(vports[i]->fc_flag & 5717 FC_VPORT_CVL_RCVD)) && 5718 (vports[i]->port_state > LPFC_FDISC)) { 5719 active_vlink_present = 1; 5720 break; 5721 } 5722 } 5723 lpfc_destroy_vport_work_array(phba, vports); 5724 } 5725 5726 /* 5727 * Don't re-instantiate if vport is marked for deletion. 5728 * If we are here first then vport_delete is going to wait 5729 * for discovery to complete. 5730 */ 5731 if (!(vport->load_flag & FC_UNLOADING) && 5732 active_vlink_present) { 5733 /* 5734 * If there are other active VLinks present, 5735 * re-instantiate the Vlink using FDISC. 5736 */ 5737 mod_timer(&ndlp->nlp_delayfunc, 5738 jiffies + msecs_to_jiffies(1000)); 5739 shost = lpfc_shost_from_vport(vport); 5740 spin_lock_irq(shost->host_lock); 5741 ndlp->nlp_flag |= NLP_DELAY_TMO; 5742 spin_unlock_irq(shost->host_lock); 5743 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 5744 vport->port_state = LPFC_FDISC; 5745 } else { 5746 /* 5747 * Otherwise, we request port to rediscover 5748 * the entire FCF table for a fast recovery 5749 * from possible case that the current FCF 5750 * is no longer valid if we are not already 5751 * in the FCF failover process. 5752 */ 5753 spin_lock_irq(&phba->hbalock); 5754 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 5755 spin_unlock_irq(&phba->hbalock); 5756 break; 5757 } 5758 /* Mark the fast failover process in progress */ 5759 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 5760 spin_unlock_irq(&phba->hbalock); 5761 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 5762 LOG_DISCOVERY, 5763 "2773 Start FCF failover per CVL, " 5764 "evt_tag:x%x\n", acqe_fip->event_tag); 5765 rc = lpfc_sli4_redisc_fcf_table(phba); 5766 if (rc) { 5767 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5768 LOG_DISCOVERY, 5769 "2774 Issue FCF rediscover " 5770 "mailbox command failed, " 5771 "through to CVL event\n"); 5772 spin_lock_irq(&phba->hbalock); 5773 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 5774 spin_unlock_irq(&phba->hbalock); 5775 /* 5776 * Last resort will be re-try on the 5777 * the current registered FCF entry. 5778 */ 5779 lpfc_retry_pport_discovery(phba); 5780 } else 5781 /* 5782 * Reset FCF roundrobin bmask for new 5783 * discovery. 5784 */ 5785 lpfc_sli4_clear_fcf_rr_bmask(phba); 5786 } 5787 break; 5788 default: 5789 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5790 "0288 Unknown FCoE event type 0x%x event tag " 5791 "0x%x\n", event_type, acqe_fip->event_tag); 5792 break; 5793 } 5794 } 5795 5796 /** 5797 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 5798 * @phba: pointer to lpfc hba data structure. 5799 * @acqe_link: pointer to the async dcbx completion queue entry. 5800 * 5801 * This routine is to handle the SLI4 asynchronous dcbx event. 5802 **/ 5803 static void 5804 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 5805 struct lpfc_acqe_dcbx *acqe_dcbx) 5806 { 5807 phba->fc_eventTag = acqe_dcbx->event_tag; 5808 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5809 "0290 The SLI4 DCBX asynchronous event is not " 5810 "handled yet\n"); 5811 } 5812 5813 /** 5814 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 5815 * @phba: pointer to lpfc hba data structure. 5816 * @acqe_link: pointer to the async grp5 completion queue entry. 5817 * 5818 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 5819 * is an asynchronous notified of a logical link speed change. The Port 5820 * reports the logical link speed in units of 10Mbps. 5821 **/ 5822 static void 5823 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 5824 struct lpfc_acqe_grp5 *acqe_grp5) 5825 { 5826 uint16_t prev_ll_spd; 5827 5828 phba->fc_eventTag = acqe_grp5->event_tag; 5829 phba->fcoe_eventtag = acqe_grp5->event_tag; 5830 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 5831 phba->sli4_hba.link_state.logical_speed = 5832 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10; 5833 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5834 "2789 GRP5 Async Event: Updating logical link speed " 5835 "from %dMbps to %dMbps\n", prev_ll_spd, 5836 phba->sli4_hba.link_state.logical_speed); 5837 } 5838 5839 /** 5840 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 5841 * @phba: pointer to lpfc hba data structure. 5842 * 5843 * This routine is invoked by the worker thread to process all the pending 5844 * SLI4 asynchronous events. 5845 **/ 5846 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 5847 { 5848 struct lpfc_cq_event *cq_event; 5849 5850 /* First, declare the async event has been handled */ 5851 spin_lock_irq(&phba->hbalock); 5852 phba->hba_flag &= ~ASYNC_EVENT; 5853 spin_unlock_irq(&phba->hbalock); 5854 /* Now, handle all the async events */ 5855 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 5856 /* Get the first event from the head of the event queue */ 5857 spin_lock_irq(&phba->hbalock); 5858 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 5859 cq_event, struct lpfc_cq_event, list); 5860 spin_unlock_irq(&phba->hbalock); 5861 /* Process the asynchronous event */ 5862 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 5863 case LPFC_TRAILER_CODE_LINK: 5864 lpfc_sli4_async_link_evt(phba, 5865 &cq_event->cqe.acqe_link); 5866 break; 5867 case LPFC_TRAILER_CODE_FCOE: 5868 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 5869 break; 5870 case LPFC_TRAILER_CODE_DCBX: 5871 lpfc_sli4_async_dcbx_evt(phba, 5872 &cq_event->cqe.acqe_dcbx); 5873 break; 5874 case LPFC_TRAILER_CODE_GRP5: 5875 lpfc_sli4_async_grp5_evt(phba, 5876 &cq_event->cqe.acqe_grp5); 5877 break; 5878 case LPFC_TRAILER_CODE_FC: 5879 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 5880 break; 5881 case LPFC_TRAILER_CODE_SLI: 5882 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 5883 break; 5884 default: 5885 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5886 "1804 Invalid asynchrous event code: " 5887 "x%x\n", bf_get(lpfc_trailer_code, 5888 &cq_event->cqe.mcqe_cmpl)); 5889 break; 5890 } 5891 /* Free the completion event processed to the free pool */ 5892 lpfc_sli4_cq_event_release(phba, cq_event); 5893 } 5894 } 5895 5896 /** 5897 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 5898 * @phba: pointer to lpfc hba data structure. 5899 * 5900 * This routine is invoked by the worker thread to process FCF table 5901 * rediscovery pending completion event. 5902 **/ 5903 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 5904 { 5905 int rc; 5906 5907 spin_lock_irq(&phba->hbalock); 5908 /* Clear FCF rediscovery timeout event */ 5909 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 5910 /* Clear driver fast failover FCF record flag */ 5911 phba->fcf.failover_rec.flag = 0; 5912 /* Set state for FCF fast failover */ 5913 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 5914 spin_unlock_irq(&phba->hbalock); 5915 5916 /* Scan FCF table from the first entry to re-discover SAN */ 5917 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5918 "2777 Start post-quiescent FCF table scan\n"); 5919 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 5920 if (rc) 5921 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5922 "2747 Issue FCF scan read FCF mailbox " 5923 "command failed 0x%x\n", rc); 5924 } 5925 5926 /** 5927 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 5928 * @phba: pointer to lpfc hba data structure. 5929 * @dev_grp: The HBA PCI-Device group number. 5930 * 5931 * This routine is invoked to set up the per HBA PCI-Device group function 5932 * API jump table entries. 5933 * 5934 * Return: 0 if success, otherwise -ENODEV 5935 **/ 5936 int 5937 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 5938 { 5939 int rc; 5940 5941 /* Set up lpfc PCI-device group */ 5942 phba->pci_dev_grp = dev_grp; 5943 5944 /* The LPFC_PCI_DEV_OC uses SLI4 */ 5945 if (dev_grp == LPFC_PCI_DEV_OC) 5946 phba->sli_rev = LPFC_SLI_REV4; 5947 5948 /* Set up device INIT API function jump table */ 5949 rc = lpfc_init_api_table_setup(phba, dev_grp); 5950 if (rc) 5951 return -ENODEV; 5952 /* Set up SCSI API function jump table */ 5953 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 5954 if (rc) 5955 return -ENODEV; 5956 /* Set up SLI API function jump table */ 5957 rc = lpfc_sli_api_table_setup(phba, dev_grp); 5958 if (rc) 5959 return -ENODEV; 5960 /* Set up MBOX API function jump table */ 5961 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 5962 if (rc) 5963 return -ENODEV; 5964 5965 return 0; 5966 } 5967 5968 /** 5969 * lpfc_log_intr_mode - Log the active interrupt mode 5970 * @phba: pointer to lpfc hba data structure. 5971 * @intr_mode: active interrupt mode adopted. 5972 * 5973 * This routine it invoked to log the currently used active interrupt mode 5974 * to the device. 5975 **/ 5976 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 5977 { 5978 switch (intr_mode) { 5979 case 0: 5980 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5981 "0470 Enable INTx interrupt mode.\n"); 5982 break; 5983 case 1: 5984 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5985 "0481 Enabled MSI interrupt mode.\n"); 5986 break; 5987 case 2: 5988 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5989 "0480 Enabled MSI-X interrupt mode.\n"); 5990 break; 5991 default: 5992 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5993 "0482 Illegal interrupt mode.\n"); 5994 break; 5995 } 5996 return; 5997 } 5998 5999 /** 6000 * lpfc_cpumask_of_node_init - initalizes cpumask of phba's NUMA node 6001 * @phba: Pointer to HBA context object. 6002 * 6003 **/ 6004 static void 6005 lpfc_cpumask_of_node_init(struct lpfc_hba *phba) 6006 { 6007 unsigned int cpu, numa_node; 6008 struct cpumask *numa_mask = &phba->sli4_hba.numa_mask; 6009 6010 cpumask_clear(numa_mask); 6011 6012 /* Check if we're a NUMA architecture */ 6013 numa_node = dev_to_node(&phba->pcidev->dev); 6014 if (numa_node == NUMA_NO_NODE) 6015 return; 6016 6017 for_each_possible_cpu(cpu) 6018 if (cpu_to_node(cpu) == numa_node) 6019 cpumask_set_cpu(cpu, numa_mask); 6020 } 6021 6022 /** 6023 * lpfc_enable_pci_dev - Enable a generic PCI device. 6024 * @phba: pointer to lpfc hba data structure. 6025 * 6026 * This routine is invoked to enable the PCI device that is common to all 6027 * PCI devices. 6028 * 6029 * Return codes 6030 * 0 - successful 6031 * other values - error 6032 **/ 6033 static int 6034 lpfc_enable_pci_dev(struct lpfc_hba *phba) 6035 { 6036 struct pci_dev *pdev; 6037 6038 /* Obtain PCI device reference */ 6039 if (!phba->pcidev) 6040 goto out_error; 6041 else 6042 pdev = phba->pcidev; 6043 /* Enable PCI device */ 6044 if (pci_enable_device_mem(pdev)) 6045 goto out_error; 6046 /* Request PCI resource for the device */ 6047 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME)) 6048 goto out_disable_device; 6049 /* Set up device as PCI master and save state for EEH */ 6050 pci_set_master(pdev); 6051 pci_try_set_mwi(pdev); 6052 pci_save_state(pdev); 6053 6054 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 6055 if (pci_is_pcie(pdev)) 6056 pdev->needs_freset = 1; 6057 6058 return 0; 6059 6060 out_disable_device: 6061 pci_disable_device(pdev); 6062 out_error: 6063 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6064 "1401 Failed to enable pci device\n"); 6065 return -ENODEV; 6066 } 6067 6068 /** 6069 * lpfc_disable_pci_dev - Disable a generic PCI device. 6070 * @phba: pointer to lpfc hba data structure. 6071 * 6072 * This routine is invoked to disable the PCI device that is common to all 6073 * PCI devices. 6074 **/ 6075 static void 6076 lpfc_disable_pci_dev(struct lpfc_hba *phba) 6077 { 6078 struct pci_dev *pdev; 6079 6080 /* Obtain PCI device reference */ 6081 if (!phba->pcidev) 6082 return; 6083 else 6084 pdev = phba->pcidev; 6085 /* Release PCI resource and disable PCI device */ 6086 pci_release_mem_regions(pdev); 6087 pci_disable_device(pdev); 6088 6089 return; 6090 } 6091 6092 /** 6093 * lpfc_reset_hba - Reset a hba 6094 * @phba: pointer to lpfc hba data structure. 6095 * 6096 * This routine is invoked to reset a hba device. It brings the HBA 6097 * offline, performs a board restart, and then brings the board back 6098 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 6099 * on outstanding mailbox commands. 6100 **/ 6101 void 6102 lpfc_reset_hba(struct lpfc_hba *phba) 6103 { 6104 /* If resets are disabled then set error state and return. */ 6105 if (!phba->cfg_enable_hba_reset) { 6106 phba->link_state = LPFC_HBA_ERROR; 6107 return; 6108 } 6109 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 6110 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 6111 else 6112 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 6113 lpfc_offline(phba); 6114 lpfc_sli_brdrestart(phba); 6115 lpfc_online(phba); 6116 lpfc_unblock_mgmt_io(phba); 6117 } 6118 6119 /** 6120 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions 6121 * @phba: pointer to lpfc hba data structure. 6122 * 6123 * This function enables the PCI SR-IOV virtual functions to a physical 6124 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 6125 * enable the number of virtual functions to the physical function. As 6126 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 6127 * API call does not considered as an error condition for most of the device. 6128 **/ 6129 uint16_t 6130 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) 6131 { 6132 struct pci_dev *pdev = phba->pcidev; 6133 uint16_t nr_virtfn; 6134 int pos; 6135 6136 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 6137 if (pos == 0) 6138 return 0; 6139 6140 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); 6141 return nr_virtfn; 6142 } 6143 6144 /** 6145 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 6146 * @phba: pointer to lpfc hba data structure. 6147 * @nr_vfn: number of virtual functions to be enabled. 6148 * 6149 * This function enables the PCI SR-IOV virtual functions to a physical 6150 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 6151 * enable the number of virtual functions to the physical function. As 6152 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 6153 * API call does not considered as an error condition for most of the device. 6154 **/ 6155 int 6156 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 6157 { 6158 struct pci_dev *pdev = phba->pcidev; 6159 uint16_t max_nr_vfn; 6160 int rc; 6161 6162 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); 6163 if (nr_vfn > max_nr_vfn) { 6164 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6165 "3057 Requested vfs (%d) greater than " 6166 "supported vfs (%d)", nr_vfn, max_nr_vfn); 6167 return -EINVAL; 6168 } 6169 6170 rc = pci_enable_sriov(pdev, nr_vfn); 6171 if (rc) { 6172 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6173 "2806 Failed to enable sriov on this device " 6174 "with vfn number nr_vf:%d, rc:%d\n", 6175 nr_vfn, rc); 6176 } else 6177 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6178 "2807 Successful enable sriov on this device " 6179 "with vfn number nr_vf:%d\n", nr_vfn); 6180 return rc; 6181 } 6182 6183 /** 6184 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 6185 * @phba: pointer to lpfc hba data structure. 6186 * 6187 * This routine is invoked to set up the driver internal resources before the 6188 * device specific resource setup to support the HBA device it attached to. 6189 * 6190 * Return codes 6191 * 0 - successful 6192 * other values - error 6193 **/ 6194 static int 6195 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 6196 { 6197 struct lpfc_sli *psli = &phba->sli; 6198 6199 /* 6200 * Driver resources common to all SLI revisions 6201 */ 6202 atomic_set(&phba->fast_event_count, 0); 6203 spin_lock_init(&phba->hbalock); 6204 6205 /* Initialize ndlp management spinlock */ 6206 spin_lock_init(&phba->ndlp_lock); 6207 6208 /* Initialize port_list spinlock */ 6209 spin_lock_init(&phba->port_list_lock); 6210 INIT_LIST_HEAD(&phba->port_list); 6211 6212 INIT_LIST_HEAD(&phba->work_list); 6213 init_waitqueue_head(&phba->wait_4_mlo_m_q); 6214 6215 /* Initialize the wait queue head for the kernel thread */ 6216 init_waitqueue_head(&phba->work_waitq); 6217 6218 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6219 "1403 Protocols supported %s %s %s\n", 6220 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ? 6221 "SCSI" : " "), 6222 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ? 6223 "NVME" : " "), 6224 (phba->nvmet_support ? "NVMET" : " ")); 6225 6226 /* Initialize the IO buffer list used by driver for SLI3 SCSI */ 6227 spin_lock_init(&phba->scsi_buf_list_get_lock); 6228 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); 6229 spin_lock_init(&phba->scsi_buf_list_put_lock); 6230 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 6231 6232 /* Initialize the fabric iocb list */ 6233 INIT_LIST_HEAD(&phba->fabric_iocb_list); 6234 6235 /* Initialize list to save ELS buffers */ 6236 INIT_LIST_HEAD(&phba->elsbuf); 6237 6238 /* Initialize FCF connection rec list */ 6239 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 6240 6241 /* Initialize OAS configuration list */ 6242 spin_lock_init(&phba->devicelock); 6243 INIT_LIST_HEAD(&phba->luns); 6244 6245 /* MBOX heartbeat timer */ 6246 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0); 6247 /* Fabric block timer */ 6248 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0); 6249 /* EA polling mode timer */ 6250 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0); 6251 /* Heartbeat timer */ 6252 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0); 6253 6254 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work); 6255 6256 return 0; 6257 } 6258 6259 /** 6260 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev 6261 * @phba: pointer to lpfc hba data structure. 6262 * 6263 * This routine is invoked to set up the driver internal resources specific to 6264 * support the SLI-3 HBA device it attached to. 6265 * 6266 * Return codes 6267 * 0 - successful 6268 * other values - error 6269 **/ 6270 static int 6271 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 6272 { 6273 int rc, entry_sz; 6274 6275 /* 6276 * Initialize timers used by driver 6277 */ 6278 6279 /* FCP polling mode timer */ 6280 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0); 6281 6282 /* Host attention work mask setup */ 6283 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 6284 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 6285 6286 /* Get all the module params for configuring this host */ 6287 lpfc_get_cfgparam(phba); 6288 /* Set up phase-1 common device driver resources */ 6289 6290 rc = lpfc_setup_driver_resource_phase1(phba); 6291 if (rc) 6292 return -ENODEV; 6293 6294 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 6295 phba->menlo_flag |= HBA_MENLO_SUPPORT; 6296 /* check for menlo minimum sg count */ 6297 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 6298 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 6299 } 6300 6301 if (!phba->sli.sli3_ring) 6302 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING, 6303 sizeof(struct lpfc_sli_ring), 6304 GFP_KERNEL); 6305 if (!phba->sli.sli3_ring) 6306 return -ENOMEM; 6307 6308 /* 6309 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 6310 * used to create the sg_dma_buf_pool must be dynamically calculated. 6311 */ 6312 6313 /* Initialize the host templates the configured values. */ 6314 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 6315 lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt; 6316 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 6317 6318 if (phba->sli_rev == LPFC_SLI_REV4) 6319 entry_sz = sizeof(struct sli4_sge); 6320 else 6321 entry_sz = sizeof(struct ulp_bde64); 6322 6323 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ 6324 if (phba->cfg_enable_bg) { 6325 /* 6326 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 6327 * the FCP rsp, and a BDE for each. Sice we have no control 6328 * over how many protection data segments the SCSI Layer 6329 * will hand us (ie: there could be one for every block 6330 * in the IO), we just allocate enough BDEs to accomidate 6331 * our max amount and we need to limit lpfc_sg_seg_cnt to 6332 * minimize the risk of running out. 6333 */ 6334 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6335 sizeof(struct fcp_rsp) + 6336 (LPFC_MAX_SG_SEG_CNT * entry_sz); 6337 6338 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) 6339 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; 6340 6341 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ 6342 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; 6343 } else { 6344 /* 6345 * The scsi_buf for a regular I/O will hold the FCP cmnd, 6346 * the FCP rsp, a BDE for each, and a BDE for up to 6347 * cfg_sg_seg_cnt data segments. 6348 */ 6349 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6350 sizeof(struct fcp_rsp) + 6351 ((phba->cfg_sg_seg_cnt + 2) * entry_sz); 6352 6353 /* Total BDEs in BPL for scsi_sg_list */ 6354 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 6355 } 6356 6357 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 6358 "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", 6359 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 6360 phba->cfg_total_seg_cnt); 6361 6362 phba->max_vpi = LPFC_MAX_VPI; 6363 /* This will be set to correct value after config_port mbox */ 6364 phba->max_vports = 0; 6365 6366 /* 6367 * Initialize the SLI Layer to run with lpfc HBAs. 6368 */ 6369 lpfc_sli_setup(phba); 6370 lpfc_sli_queue_init(phba); 6371 6372 /* Allocate device driver memory */ 6373 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 6374 return -ENOMEM; 6375 6376 phba->lpfc_sg_dma_buf_pool = 6377 dma_pool_create("lpfc_sg_dma_buf_pool", 6378 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size, 6379 BPL_ALIGN_SZ, 0); 6380 6381 if (!phba->lpfc_sg_dma_buf_pool) 6382 goto fail_free_mem; 6383 6384 phba->lpfc_cmd_rsp_buf_pool = 6385 dma_pool_create("lpfc_cmd_rsp_buf_pool", 6386 &phba->pcidev->dev, 6387 sizeof(struct fcp_cmnd) + 6388 sizeof(struct fcp_rsp), 6389 BPL_ALIGN_SZ, 0); 6390 6391 if (!phba->lpfc_cmd_rsp_buf_pool) 6392 goto fail_free_dma_buf_pool; 6393 6394 /* 6395 * Enable sr-iov virtual functions if supported and configured 6396 * through the module parameter. 6397 */ 6398 if (phba->cfg_sriov_nr_virtfn > 0) { 6399 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 6400 phba->cfg_sriov_nr_virtfn); 6401 if (rc) { 6402 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6403 "2808 Requested number of SR-IOV " 6404 "virtual functions (%d) is not " 6405 "supported\n", 6406 phba->cfg_sriov_nr_virtfn); 6407 phba->cfg_sriov_nr_virtfn = 0; 6408 } 6409 } 6410 6411 return 0; 6412 6413 fail_free_dma_buf_pool: 6414 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); 6415 phba->lpfc_sg_dma_buf_pool = NULL; 6416 fail_free_mem: 6417 lpfc_mem_free(phba); 6418 return -ENOMEM; 6419 } 6420 6421 /** 6422 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 6423 * @phba: pointer to lpfc hba data structure. 6424 * 6425 * This routine is invoked to unset the driver internal resources set up 6426 * specific for supporting the SLI-3 HBA device it attached to. 6427 **/ 6428 static void 6429 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 6430 { 6431 /* Free device driver memory allocated */ 6432 lpfc_mem_free_all(phba); 6433 6434 return; 6435 } 6436 6437 /** 6438 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 6439 * @phba: pointer to lpfc hba data structure. 6440 * 6441 * This routine is invoked to set up the driver internal resources specific to 6442 * support the SLI-4 HBA device it attached to. 6443 * 6444 * Return codes 6445 * 0 - successful 6446 * other values - error 6447 **/ 6448 static int 6449 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 6450 { 6451 LPFC_MBOXQ_t *mboxq; 6452 MAILBOX_t *mb; 6453 int rc, i, max_buf_size; 6454 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 6455 struct lpfc_mqe *mqe; 6456 int longs; 6457 int extra; 6458 uint64_t wwn; 6459 u32 if_type; 6460 u32 if_fam; 6461 6462 phba->sli4_hba.num_present_cpu = lpfc_present_cpu; 6463 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1; 6464 phba->sli4_hba.curr_disp_cpu = 0; 6465 lpfc_cpumask_of_node_init(phba); 6466 6467 /* Get all the module params for configuring this host */ 6468 lpfc_get_cfgparam(phba); 6469 6470 /* Set up phase-1 common device driver resources */ 6471 rc = lpfc_setup_driver_resource_phase1(phba); 6472 if (rc) 6473 return -ENODEV; 6474 6475 /* Before proceed, wait for POST done and device ready */ 6476 rc = lpfc_sli4_post_status_check(phba); 6477 if (rc) 6478 return -ENODEV; 6479 6480 /* Allocate all driver workqueues here */ 6481 6482 /* The lpfc_wq workqueue for deferred irq use */ 6483 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); 6484 6485 /* 6486 * Initialize timers used by driver 6487 */ 6488 6489 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0); 6490 6491 /* FCF rediscover timer */ 6492 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0); 6493 6494 /* 6495 * Control structure for handling external multi-buffer mailbox 6496 * command pass-through. 6497 */ 6498 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, 6499 sizeof(struct lpfc_mbox_ext_buf_ctx)); 6500 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 6501 6502 phba->max_vpi = LPFC_MAX_VPI; 6503 6504 /* This will be set to correct value after the read_config mbox */ 6505 phba->max_vports = 0; 6506 6507 /* Program the default value of vlan_id and fc_map */ 6508 phba->valid_vlan = 0; 6509 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 6510 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 6511 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 6512 6513 /* 6514 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands 6515 * we will associate a new ring, for each EQ/CQ/WQ tuple. 6516 * The WQ create will allocate the ring. 6517 */ 6518 6519 /* Initialize buffer queue management fields */ 6520 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list); 6521 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 6522 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 6523 6524 /* 6525 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 6526 */ 6527 /* Initialize the Abort buffer list used by driver */ 6528 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock); 6529 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list); 6530 6531 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 6532 /* Initialize the Abort nvme buffer list used by driver */ 6533 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock); 6534 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 6535 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); 6536 spin_lock_init(&phba->sli4_hba.t_active_list_lock); 6537 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list); 6538 } 6539 6540 /* This abort list used by worker thread */ 6541 spin_lock_init(&phba->sli4_hba.sgl_list_lock); 6542 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); 6543 6544 /* 6545 * Initialize driver internal slow-path work queues 6546 */ 6547 6548 /* Driver internel slow-path CQ Event pool */ 6549 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 6550 /* Response IOCB work queue list */ 6551 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 6552 /* Asynchronous event CQ Event work queue list */ 6553 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 6554 /* Fast-path XRI aborted CQ Event work queue list */ 6555 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 6556 /* Slow-path XRI aborted CQ Event work queue list */ 6557 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 6558 /* Receive queue CQ Event work queue list */ 6559 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 6560 6561 /* Initialize extent block lists. */ 6562 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); 6563 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); 6564 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); 6565 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); 6566 6567 /* Initialize mboxq lists. If the early init routines fail 6568 * these lists need to be correctly initialized. 6569 */ 6570 INIT_LIST_HEAD(&phba->sli.mboxq); 6571 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl); 6572 6573 /* initialize optic_state to 0xFF */ 6574 phba->sli4_hba.lnk_info.optic_state = 0xff; 6575 6576 /* Allocate device driver memory */ 6577 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 6578 if (rc) 6579 return -ENOMEM; 6580 6581 /* IF Type 2 ports get initialized now. */ 6582 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 6583 LPFC_SLI_INTF_IF_TYPE_2) { 6584 rc = lpfc_pci_function_reset(phba); 6585 if (unlikely(rc)) { 6586 rc = -ENODEV; 6587 goto out_free_mem; 6588 } 6589 phba->temp_sensor_support = 1; 6590 } 6591 6592 /* Create the bootstrap mailbox command */ 6593 rc = lpfc_create_bootstrap_mbox(phba); 6594 if (unlikely(rc)) 6595 goto out_free_mem; 6596 6597 /* Set up the host's endian order with the device. */ 6598 rc = lpfc_setup_endian_order(phba); 6599 if (unlikely(rc)) 6600 goto out_free_bsmbx; 6601 6602 /* Set up the hba's configuration parameters. */ 6603 rc = lpfc_sli4_read_config(phba); 6604 if (unlikely(rc)) 6605 goto out_free_bsmbx; 6606 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba); 6607 if (unlikely(rc)) 6608 goto out_free_bsmbx; 6609 6610 /* IF Type 0 ports get initialized now. */ 6611 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 6612 LPFC_SLI_INTF_IF_TYPE_0) { 6613 rc = lpfc_pci_function_reset(phba); 6614 if (unlikely(rc)) 6615 goto out_free_bsmbx; 6616 } 6617 6618 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 6619 GFP_KERNEL); 6620 if (!mboxq) { 6621 rc = -ENOMEM; 6622 goto out_free_bsmbx; 6623 } 6624 6625 /* Check for NVMET being configured */ 6626 phba->nvmet_support = 0; 6627 if (lpfc_enable_nvmet_cnt) { 6628 6629 /* First get WWN of HBA instance */ 6630 lpfc_read_nv(phba, mboxq); 6631 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6632 if (rc != MBX_SUCCESS) { 6633 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6634 "6016 Mailbox failed , mbxCmd x%x " 6635 "READ_NV, mbxStatus x%x\n", 6636 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 6637 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 6638 mempool_free(mboxq, phba->mbox_mem_pool); 6639 rc = -EIO; 6640 goto out_free_bsmbx; 6641 } 6642 mb = &mboxq->u.mb; 6643 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename, 6644 sizeof(uint64_t)); 6645 wwn = cpu_to_be64(wwn); 6646 phba->sli4_hba.wwnn.u.name = wwn; 6647 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, 6648 sizeof(uint64_t)); 6649 /* wwn is WWPN of HBA instance */ 6650 wwn = cpu_to_be64(wwn); 6651 phba->sli4_hba.wwpn.u.name = wwn; 6652 6653 /* Check to see if it matches any module parameter */ 6654 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { 6655 if (wwn == lpfc_enable_nvmet[i]) { 6656 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 6657 if (lpfc_nvmet_mem_alloc(phba)) 6658 break; 6659 6660 phba->nvmet_support = 1; /* a match */ 6661 6662 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6663 "6017 NVME Target %016llx\n", 6664 wwn); 6665 #else 6666 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6667 "6021 Can't enable NVME Target." 6668 " NVME_TARGET_FC infrastructure" 6669 " is not in kernel\n"); 6670 #endif 6671 /* Not supported for NVMET */ 6672 phba->cfg_xri_rebalancing = 0; 6673 break; 6674 } 6675 } 6676 } 6677 6678 lpfc_nvme_mod_param_dep(phba); 6679 6680 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */ 6681 lpfc_supported_pages(mboxq); 6682 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6683 if (!rc) { 6684 mqe = &mboxq->u.mqe; 6685 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 6686 LPFC_MAX_SUPPORTED_PAGES); 6687 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 6688 switch (pn_page[i]) { 6689 case LPFC_SLI4_PARAMETERS: 6690 phba->sli4_hba.pc_sli4_params.supported = 1; 6691 break; 6692 default: 6693 break; 6694 } 6695 } 6696 /* Read the port's SLI4 Parameters capabilities if supported. */ 6697 if (phba->sli4_hba.pc_sli4_params.supported) 6698 rc = lpfc_pc_sli4_params_get(phba, mboxq); 6699 if (rc) { 6700 mempool_free(mboxq, phba->mbox_mem_pool); 6701 rc = -EIO; 6702 goto out_free_bsmbx; 6703 } 6704 } 6705 6706 /* 6707 * Get sli4 parameters that override parameters from Port capabilities. 6708 * If this call fails, it isn't critical unless the SLI4 parameters come 6709 * back in conflict. 6710 */ 6711 rc = lpfc_get_sli4_parameters(phba, mboxq); 6712 if (rc) { 6713 if_type = bf_get(lpfc_sli_intf_if_type, 6714 &phba->sli4_hba.sli_intf); 6715 if_fam = bf_get(lpfc_sli_intf_sli_family, 6716 &phba->sli4_hba.sli_intf); 6717 if (phba->sli4_hba.extents_in_use && 6718 phba->sli4_hba.rpi_hdrs_in_use) { 6719 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6720 "2999 Unsupported SLI4 Parameters " 6721 "Extents and RPI headers enabled.\n"); 6722 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 && 6723 if_fam == LPFC_SLI_INTF_FAMILY_BE2) { 6724 mempool_free(mboxq, phba->mbox_mem_pool); 6725 rc = -EIO; 6726 goto out_free_bsmbx; 6727 } 6728 } 6729 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 && 6730 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) { 6731 mempool_free(mboxq, phba->mbox_mem_pool); 6732 rc = -EIO; 6733 goto out_free_bsmbx; 6734 } 6735 } 6736 6737 /* 6738 * 1 for cmd, 1 for rsp, NVME adds an extra one 6739 * for boundary conditions in its max_sgl_segment template. 6740 */ 6741 extra = 2; 6742 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 6743 extra++; 6744 6745 /* 6746 * It doesn't matter what family our adapter is in, we are 6747 * limited to 2 Pages, 512 SGEs, for our SGL. 6748 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp 6749 */ 6750 max_buf_size = (2 * SLI4_PAGE_SIZE); 6751 6752 /* 6753 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size 6754 * used to create the sg_dma_buf_pool must be calculated. 6755 */ 6756 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 6757 /* Both cfg_enable_bg and cfg_external_dif code paths */ 6758 6759 /* 6760 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd, 6761 * the FCP rsp, and a SGE. Sice we have no control 6762 * over how many protection segments the SCSI Layer 6763 * will hand us (ie: there could be one for every block 6764 * in the IO), just allocate enough SGEs to accomidate 6765 * our max amount and we need to limit lpfc_sg_seg_cnt 6766 * to minimize the risk of running out. 6767 */ 6768 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6769 sizeof(struct fcp_rsp) + max_buf_size; 6770 6771 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ 6772 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; 6773 6774 /* 6775 * If supporting DIF, reduce the seg count for scsi to 6776 * allow room for the DIF sges. 6777 */ 6778 if (phba->cfg_enable_bg && 6779 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF) 6780 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF; 6781 else 6782 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 6783 6784 } else { 6785 /* 6786 * The scsi_buf for a regular I/O holds the FCP cmnd, 6787 * the FCP rsp, a SGE for each, and a SGE for up to 6788 * cfg_sg_seg_cnt data segments. 6789 */ 6790 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6791 sizeof(struct fcp_rsp) + 6792 ((phba->cfg_sg_seg_cnt + extra) * 6793 sizeof(struct sli4_sge)); 6794 6795 /* Total SGEs for scsi_sg_list */ 6796 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra; 6797 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 6798 6799 /* 6800 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only 6801 * need to post 1 page for the SGL. 6802 */ 6803 } 6804 6805 if (phba->cfg_xpsgl && !phba->nvmet_support) 6806 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE; 6807 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) 6808 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; 6809 else 6810 phba->cfg_sg_dma_buf_size = 6811 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); 6812 6813 phba->border_sge_num = phba->cfg_sg_dma_buf_size / 6814 sizeof(struct sli4_sge); 6815 6816 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */ 6817 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 6818 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { 6819 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, 6820 "6300 Reducing NVME sg segment " 6821 "cnt to %d\n", 6822 LPFC_MAX_NVME_SEG_CNT); 6823 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 6824 } else 6825 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; 6826 } 6827 6828 /* Initialize the host templates with the updated values. */ 6829 lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt; 6830 lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt; 6831 lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt; 6832 6833 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 6834 "9087 sg_seg_cnt:%d dmabuf_size:%d " 6835 "total:%d scsi:%d nvme:%d\n", 6836 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 6837 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt, 6838 phba->cfg_nvme_seg_cnt); 6839 6840 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE) 6841 i = phba->cfg_sg_dma_buf_size; 6842 else 6843 i = SLI4_PAGE_SIZE; 6844 6845 phba->lpfc_sg_dma_buf_pool = 6846 dma_pool_create("lpfc_sg_dma_buf_pool", 6847 &phba->pcidev->dev, 6848 phba->cfg_sg_dma_buf_size, 6849 i, 0); 6850 if (!phba->lpfc_sg_dma_buf_pool) 6851 goto out_free_bsmbx; 6852 6853 phba->lpfc_cmd_rsp_buf_pool = 6854 dma_pool_create("lpfc_cmd_rsp_buf_pool", 6855 &phba->pcidev->dev, 6856 sizeof(struct fcp_cmnd) + 6857 sizeof(struct fcp_rsp), 6858 i, 0); 6859 if (!phba->lpfc_cmd_rsp_buf_pool) 6860 goto out_free_sg_dma_buf; 6861 6862 mempool_free(mboxq, phba->mbox_mem_pool); 6863 6864 /* Verify OAS is supported */ 6865 lpfc_sli4_oas_verify(phba); 6866 6867 /* Verify RAS support on adapter */ 6868 lpfc_sli4_ras_init(phba); 6869 6870 /* Verify all the SLI4 queues */ 6871 rc = lpfc_sli4_queue_verify(phba); 6872 if (rc) 6873 goto out_free_cmd_rsp_buf; 6874 6875 /* Create driver internal CQE event pool */ 6876 rc = lpfc_sli4_cq_event_pool_create(phba); 6877 if (rc) 6878 goto out_free_cmd_rsp_buf; 6879 6880 /* Initialize sgl lists per host */ 6881 lpfc_init_sgl_list(phba); 6882 6883 /* Allocate and initialize active sgl array */ 6884 rc = lpfc_init_active_sgl_array(phba); 6885 if (rc) { 6886 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6887 "1430 Failed to initialize sgl list.\n"); 6888 goto out_destroy_cq_event_pool; 6889 } 6890 rc = lpfc_sli4_init_rpi_hdrs(phba); 6891 if (rc) { 6892 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6893 "1432 Failed to initialize rpi headers.\n"); 6894 goto out_free_active_sgl; 6895 } 6896 6897 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 6898 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 6899 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long), 6900 GFP_KERNEL); 6901 if (!phba->fcf.fcf_rr_bmask) { 6902 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6903 "2759 Failed allocate memory for FCF round " 6904 "robin failover bmask\n"); 6905 rc = -ENOMEM; 6906 goto out_remove_rpi_hdrs; 6907 } 6908 6909 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann, 6910 sizeof(struct lpfc_hba_eq_hdl), 6911 GFP_KERNEL); 6912 if (!phba->sli4_hba.hba_eq_hdl) { 6913 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6914 "2572 Failed allocate memory for " 6915 "fast-path per-EQ handle array\n"); 6916 rc = -ENOMEM; 6917 goto out_free_fcf_rr_bmask; 6918 } 6919 6920 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu, 6921 sizeof(struct lpfc_vector_map_info), 6922 GFP_KERNEL); 6923 if (!phba->sli4_hba.cpu_map) { 6924 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6925 "3327 Failed allocate memory for msi-x " 6926 "interrupt vector mapping\n"); 6927 rc = -ENOMEM; 6928 goto out_free_hba_eq_hdl; 6929 } 6930 6931 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info); 6932 if (!phba->sli4_hba.eq_info) { 6933 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6934 "3321 Failed allocation for per_cpu stats\n"); 6935 rc = -ENOMEM; 6936 goto out_free_hba_cpu_map; 6937 } 6938 /* 6939 * Enable sr-iov virtual functions if supported and configured 6940 * through the module parameter. 6941 */ 6942 if (phba->cfg_sriov_nr_virtfn > 0) { 6943 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 6944 phba->cfg_sriov_nr_virtfn); 6945 if (rc) { 6946 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6947 "3020 Requested number of SR-IOV " 6948 "virtual functions (%d) is not " 6949 "supported\n", 6950 phba->cfg_sriov_nr_virtfn); 6951 phba->cfg_sriov_nr_virtfn = 0; 6952 } 6953 } 6954 6955 return 0; 6956 6957 out_free_hba_cpu_map: 6958 kfree(phba->sli4_hba.cpu_map); 6959 out_free_hba_eq_hdl: 6960 kfree(phba->sli4_hba.hba_eq_hdl); 6961 out_free_fcf_rr_bmask: 6962 kfree(phba->fcf.fcf_rr_bmask); 6963 out_remove_rpi_hdrs: 6964 lpfc_sli4_remove_rpi_hdrs(phba); 6965 out_free_active_sgl: 6966 lpfc_free_active_sgl(phba); 6967 out_destroy_cq_event_pool: 6968 lpfc_sli4_cq_event_pool_destroy(phba); 6969 out_free_cmd_rsp_buf: 6970 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); 6971 phba->lpfc_cmd_rsp_buf_pool = NULL; 6972 out_free_sg_dma_buf: 6973 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); 6974 phba->lpfc_sg_dma_buf_pool = NULL; 6975 out_free_bsmbx: 6976 lpfc_destroy_bootstrap_mbox(phba); 6977 out_free_mem: 6978 lpfc_mem_free(phba); 6979 return rc; 6980 } 6981 6982 /** 6983 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 6984 * @phba: pointer to lpfc hba data structure. 6985 * 6986 * This routine is invoked to unset the driver internal resources set up 6987 * specific for supporting the SLI-4 HBA device it attached to. 6988 **/ 6989 static void 6990 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 6991 { 6992 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 6993 6994 free_percpu(phba->sli4_hba.eq_info); 6995 6996 /* Free memory allocated for msi-x interrupt vector to CPU mapping */ 6997 kfree(phba->sli4_hba.cpu_map); 6998 phba->sli4_hba.num_possible_cpu = 0; 6999 phba->sli4_hba.num_present_cpu = 0; 7000 phba->sli4_hba.curr_disp_cpu = 0; 7001 cpumask_clear(&phba->sli4_hba.numa_mask); 7002 7003 /* Free memory allocated for fast-path work queue handles */ 7004 kfree(phba->sli4_hba.hba_eq_hdl); 7005 7006 /* Free the allocated rpi headers. */ 7007 lpfc_sli4_remove_rpi_hdrs(phba); 7008 lpfc_sli4_remove_rpis(phba); 7009 7010 /* Free eligible FCF index bmask */ 7011 kfree(phba->fcf.fcf_rr_bmask); 7012 7013 /* Free the ELS sgl list */ 7014 lpfc_free_active_sgl(phba); 7015 lpfc_free_els_sgl_list(phba); 7016 lpfc_free_nvmet_sgl_list(phba); 7017 7018 /* Free the completion queue EQ event pool */ 7019 lpfc_sli4_cq_event_release_all(phba); 7020 lpfc_sli4_cq_event_pool_destroy(phba); 7021 7022 /* Release resource identifiers. */ 7023 lpfc_sli4_dealloc_resource_identifiers(phba); 7024 7025 /* Free the bsmbx region. */ 7026 lpfc_destroy_bootstrap_mbox(phba); 7027 7028 /* Free the SLI Layer memory with SLI4 HBAs */ 7029 lpfc_mem_free_all(phba); 7030 7031 /* Free the current connect table */ 7032 list_for_each_entry_safe(conn_entry, next_conn_entry, 7033 &phba->fcf_conn_rec_list, list) { 7034 list_del_init(&conn_entry->list); 7035 kfree(conn_entry); 7036 } 7037 7038 return; 7039 } 7040 7041 /** 7042 * lpfc_init_api_table_setup - Set up init api function jump table 7043 * @phba: The hba struct for which this call is being executed. 7044 * @dev_grp: The HBA PCI-Device group number. 7045 * 7046 * This routine sets up the device INIT interface API function jump table 7047 * in @phba struct. 7048 * 7049 * Returns: 0 - success, -ENODEV - failure. 7050 **/ 7051 int 7052 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 7053 { 7054 phba->lpfc_hba_init_link = lpfc_hba_init_link; 7055 phba->lpfc_hba_down_link = lpfc_hba_down_link; 7056 phba->lpfc_selective_reset = lpfc_selective_reset; 7057 switch (dev_grp) { 7058 case LPFC_PCI_DEV_LP: 7059 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 7060 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 7061 phba->lpfc_stop_port = lpfc_stop_port_s3; 7062 break; 7063 case LPFC_PCI_DEV_OC: 7064 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 7065 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 7066 phba->lpfc_stop_port = lpfc_stop_port_s4; 7067 break; 7068 default: 7069 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7070 "1431 Invalid HBA PCI-device group: 0x%x\n", 7071 dev_grp); 7072 return -ENODEV; 7073 break; 7074 } 7075 return 0; 7076 } 7077 7078 /** 7079 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 7080 * @phba: pointer to lpfc hba data structure. 7081 * 7082 * This routine is invoked to set up the driver internal resources after the 7083 * device specific resource setup to support the HBA device it attached to. 7084 * 7085 * Return codes 7086 * 0 - successful 7087 * other values - error 7088 **/ 7089 static int 7090 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 7091 { 7092 int error; 7093 7094 /* Startup the kernel thread for this host adapter. */ 7095 phba->worker_thread = kthread_run(lpfc_do_work, phba, 7096 "lpfc_worker_%d", phba->brd_no); 7097 if (IS_ERR(phba->worker_thread)) { 7098 error = PTR_ERR(phba->worker_thread); 7099 return error; 7100 } 7101 7102 return 0; 7103 } 7104 7105 /** 7106 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 7107 * @phba: pointer to lpfc hba data structure. 7108 * 7109 * This routine is invoked to unset the driver internal resources set up after 7110 * the device specific resource setup for supporting the HBA device it 7111 * attached to. 7112 **/ 7113 static void 7114 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 7115 { 7116 if (phba->wq) { 7117 flush_workqueue(phba->wq); 7118 destroy_workqueue(phba->wq); 7119 phba->wq = NULL; 7120 } 7121 7122 /* Stop kernel worker thread */ 7123 if (phba->worker_thread) 7124 kthread_stop(phba->worker_thread); 7125 } 7126 7127 /** 7128 * lpfc_free_iocb_list - Free iocb list. 7129 * @phba: pointer to lpfc hba data structure. 7130 * 7131 * This routine is invoked to free the driver's IOCB list and memory. 7132 **/ 7133 void 7134 lpfc_free_iocb_list(struct lpfc_hba *phba) 7135 { 7136 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 7137 7138 spin_lock_irq(&phba->hbalock); 7139 list_for_each_entry_safe(iocbq_entry, iocbq_next, 7140 &phba->lpfc_iocb_list, list) { 7141 list_del(&iocbq_entry->list); 7142 kfree(iocbq_entry); 7143 phba->total_iocbq_bufs--; 7144 } 7145 spin_unlock_irq(&phba->hbalock); 7146 7147 return; 7148 } 7149 7150 /** 7151 * lpfc_init_iocb_list - Allocate and initialize iocb list. 7152 * @phba: pointer to lpfc hba data structure. 7153 * 7154 * This routine is invoked to allocate and initizlize the driver's IOCB 7155 * list and set up the IOCB tag array accordingly. 7156 * 7157 * Return codes 7158 * 0 - successful 7159 * other values - error 7160 **/ 7161 int 7162 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 7163 { 7164 struct lpfc_iocbq *iocbq_entry = NULL; 7165 uint16_t iotag; 7166 int i; 7167 7168 /* Initialize and populate the iocb list per host. */ 7169 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 7170 for (i = 0; i < iocb_count; i++) { 7171 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 7172 if (iocbq_entry == NULL) { 7173 printk(KERN_ERR "%s: only allocated %d iocbs of " 7174 "expected %d count. Unloading driver.\n", 7175 __func__, i, iocb_count); 7176 goto out_free_iocbq; 7177 } 7178 7179 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 7180 if (iotag == 0) { 7181 kfree(iocbq_entry); 7182 printk(KERN_ERR "%s: failed to allocate IOTAG. " 7183 "Unloading driver.\n", __func__); 7184 goto out_free_iocbq; 7185 } 7186 iocbq_entry->sli4_lxritag = NO_XRI; 7187 iocbq_entry->sli4_xritag = NO_XRI; 7188 7189 spin_lock_irq(&phba->hbalock); 7190 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 7191 phba->total_iocbq_bufs++; 7192 spin_unlock_irq(&phba->hbalock); 7193 } 7194 7195 return 0; 7196 7197 out_free_iocbq: 7198 lpfc_free_iocb_list(phba); 7199 7200 return -ENOMEM; 7201 } 7202 7203 /** 7204 * lpfc_free_sgl_list - Free a given sgl list. 7205 * @phba: pointer to lpfc hba data structure. 7206 * @sglq_list: pointer to the head of sgl list. 7207 * 7208 * This routine is invoked to free a give sgl list and memory. 7209 **/ 7210 void 7211 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list) 7212 { 7213 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 7214 7215 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) { 7216 list_del(&sglq_entry->list); 7217 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 7218 kfree(sglq_entry); 7219 } 7220 } 7221 7222 /** 7223 * lpfc_free_els_sgl_list - Free els sgl list. 7224 * @phba: pointer to lpfc hba data structure. 7225 * 7226 * This routine is invoked to free the driver's els sgl list and memory. 7227 **/ 7228 static void 7229 lpfc_free_els_sgl_list(struct lpfc_hba *phba) 7230 { 7231 LIST_HEAD(sglq_list); 7232 7233 /* Retrieve all els sgls from driver list */ 7234 spin_lock_irq(&phba->hbalock); 7235 spin_lock(&phba->sli4_hba.sgl_list_lock); 7236 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list); 7237 spin_unlock(&phba->sli4_hba.sgl_list_lock); 7238 spin_unlock_irq(&phba->hbalock); 7239 7240 /* Now free the sgl list */ 7241 lpfc_free_sgl_list(phba, &sglq_list); 7242 } 7243 7244 /** 7245 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list. 7246 * @phba: pointer to lpfc hba data structure. 7247 * 7248 * This routine is invoked to free the driver's nvmet sgl list and memory. 7249 **/ 7250 static void 7251 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba) 7252 { 7253 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 7254 LIST_HEAD(sglq_list); 7255 7256 /* Retrieve all nvmet sgls from driver list */ 7257 spin_lock_irq(&phba->hbalock); 7258 spin_lock(&phba->sli4_hba.sgl_list_lock); 7259 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list); 7260 spin_unlock(&phba->sli4_hba.sgl_list_lock); 7261 spin_unlock_irq(&phba->hbalock); 7262 7263 /* Now free the sgl list */ 7264 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) { 7265 list_del(&sglq_entry->list); 7266 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys); 7267 kfree(sglq_entry); 7268 } 7269 7270 /* Update the nvmet_xri_cnt to reflect no current sgls. 7271 * The next initialization cycle sets the count and allocates 7272 * the sgls over again. 7273 */ 7274 phba->sli4_hba.nvmet_xri_cnt = 0; 7275 } 7276 7277 /** 7278 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 7279 * @phba: pointer to lpfc hba data structure. 7280 * 7281 * This routine is invoked to allocate the driver's active sgl memory. 7282 * This array will hold the sglq_entry's for active IOs. 7283 **/ 7284 static int 7285 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 7286 { 7287 int size; 7288 size = sizeof(struct lpfc_sglq *); 7289 size *= phba->sli4_hba.max_cfg_param.max_xri; 7290 7291 phba->sli4_hba.lpfc_sglq_active_list = 7292 kzalloc(size, GFP_KERNEL); 7293 if (!phba->sli4_hba.lpfc_sglq_active_list) 7294 return -ENOMEM; 7295 return 0; 7296 } 7297 7298 /** 7299 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 7300 * @phba: pointer to lpfc hba data structure. 7301 * 7302 * This routine is invoked to walk through the array of active sglq entries 7303 * and free all of the resources. 7304 * This is just a place holder for now. 7305 **/ 7306 static void 7307 lpfc_free_active_sgl(struct lpfc_hba *phba) 7308 { 7309 kfree(phba->sli4_hba.lpfc_sglq_active_list); 7310 } 7311 7312 /** 7313 * lpfc_init_sgl_list - Allocate and initialize sgl list. 7314 * @phba: pointer to lpfc hba data structure. 7315 * 7316 * This routine is invoked to allocate and initizlize the driver's sgl 7317 * list and set up the sgl xritag tag array accordingly. 7318 * 7319 **/ 7320 static void 7321 lpfc_init_sgl_list(struct lpfc_hba *phba) 7322 { 7323 /* Initialize and populate the sglq list per host/VF. */ 7324 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list); 7325 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 7326 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list); 7327 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 7328 7329 /* els xri-sgl book keeping */ 7330 phba->sli4_hba.els_xri_cnt = 0; 7331 7332 /* nvme xri-buffer book keeping */ 7333 phba->sli4_hba.io_xri_cnt = 0; 7334 } 7335 7336 /** 7337 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 7338 * @phba: pointer to lpfc hba data structure. 7339 * 7340 * This routine is invoked to post rpi header templates to the 7341 * port for those SLI4 ports that do not support extents. This routine 7342 * posts a PAGE_SIZE memory region to the port to hold up to 7343 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 7344 * and should be called only when interrupts are disabled. 7345 * 7346 * Return codes 7347 * 0 - successful 7348 * -ERROR - otherwise. 7349 **/ 7350 int 7351 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 7352 { 7353 int rc = 0; 7354 struct lpfc_rpi_hdr *rpi_hdr; 7355 7356 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 7357 if (!phba->sli4_hba.rpi_hdrs_in_use) 7358 return rc; 7359 if (phba->sli4_hba.extents_in_use) 7360 return -EIO; 7361 7362 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 7363 if (!rpi_hdr) { 7364 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7365 "0391 Error during rpi post operation\n"); 7366 lpfc_sli4_remove_rpis(phba); 7367 rc = -ENODEV; 7368 } 7369 7370 return rc; 7371 } 7372 7373 /** 7374 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 7375 * @phba: pointer to lpfc hba data structure. 7376 * 7377 * This routine is invoked to allocate a single 4KB memory region to 7378 * support rpis and stores them in the phba. This single region 7379 * provides support for up to 64 rpis. The region is used globally 7380 * by the device. 7381 * 7382 * Returns: 7383 * A valid rpi hdr on success. 7384 * A NULL pointer on any failure. 7385 **/ 7386 struct lpfc_rpi_hdr * 7387 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 7388 { 7389 uint16_t rpi_limit, curr_rpi_range; 7390 struct lpfc_dmabuf *dmabuf; 7391 struct lpfc_rpi_hdr *rpi_hdr; 7392 7393 /* 7394 * If the SLI4 port supports extents, posting the rpi header isn't 7395 * required. Set the expected maximum count and let the actual value 7396 * get set when extents are fully allocated. 7397 */ 7398 if (!phba->sli4_hba.rpi_hdrs_in_use) 7399 return NULL; 7400 if (phba->sli4_hba.extents_in_use) 7401 return NULL; 7402 7403 /* The limit on the logical index is just the max_rpi count. */ 7404 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi; 7405 7406 spin_lock_irq(&phba->hbalock); 7407 /* 7408 * Establish the starting RPI in this header block. The starting 7409 * rpi is normalized to a zero base because the physical rpi is 7410 * port based. 7411 */ 7412 curr_rpi_range = phba->sli4_hba.next_rpi; 7413 spin_unlock_irq(&phba->hbalock); 7414 7415 /* Reached full RPI range */ 7416 if (curr_rpi_range == rpi_limit) 7417 return NULL; 7418 7419 /* 7420 * First allocate the protocol header region for the port. The 7421 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 7422 */ 7423 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 7424 if (!dmabuf) 7425 return NULL; 7426 7427 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 7428 LPFC_HDR_TEMPLATE_SIZE, 7429 &dmabuf->phys, GFP_KERNEL); 7430 if (!dmabuf->virt) { 7431 rpi_hdr = NULL; 7432 goto err_free_dmabuf; 7433 } 7434 7435 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 7436 rpi_hdr = NULL; 7437 goto err_free_coherent; 7438 } 7439 7440 /* Save the rpi header data for cleanup later. */ 7441 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 7442 if (!rpi_hdr) 7443 goto err_free_coherent; 7444 7445 rpi_hdr->dmabuf = dmabuf; 7446 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 7447 rpi_hdr->page_count = 1; 7448 spin_lock_irq(&phba->hbalock); 7449 7450 /* The rpi_hdr stores the logical index only. */ 7451 rpi_hdr->start_rpi = curr_rpi_range; 7452 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT; 7453 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 7454 7455 spin_unlock_irq(&phba->hbalock); 7456 return rpi_hdr; 7457 7458 err_free_coherent: 7459 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 7460 dmabuf->virt, dmabuf->phys); 7461 err_free_dmabuf: 7462 kfree(dmabuf); 7463 return NULL; 7464 } 7465 7466 /** 7467 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 7468 * @phba: pointer to lpfc hba data structure. 7469 * 7470 * This routine is invoked to remove all memory resources allocated 7471 * to support rpis for SLI4 ports not supporting extents. This routine 7472 * presumes the caller has released all rpis consumed by fabric or port 7473 * logins and is prepared to have the header pages removed. 7474 **/ 7475 void 7476 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 7477 { 7478 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 7479 7480 if (!phba->sli4_hba.rpi_hdrs_in_use) 7481 goto exit; 7482 7483 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 7484 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 7485 list_del(&rpi_hdr->list); 7486 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 7487 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 7488 kfree(rpi_hdr->dmabuf); 7489 kfree(rpi_hdr); 7490 } 7491 exit: 7492 /* There are no rpis available to the port now. */ 7493 phba->sli4_hba.next_rpi = 0; 7494 } 7495 7496 /** 7497 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 7498 * @pdev: pointer to pci device data structure. 7499 * 7500 * This routine is invoked to allocate the driver hba data structure for an 7501 * HBA device. If the allocation is successful, the phba reference to the 7502 * PCI device data structure is set. 7503 * 7504 * Return codes 7505 * pointer to @phba - successful 7506 * NULL - error 7507 **/ 7508 static struct lpfc_hba * 7509 lpfc_hba_alloc(struct pci_dev *pdev) 7510 { 7511 struct lpfc_hba *phba; 7512 7513 /* Allocate memory for HBA structure */ 7514 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 7515 if (!phba) { 7516 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 7517 return NULL; 7518 } 7519 7520 /* Set reference to PCI device in HBA structure */ 7521 phba->pcidev = pdev; 7522 7523 /* Assign an unused board number */ 7524 phba->brd_no = lpfc_get_instance(); 7525 if (phba->brd_no < 0) { 7526 kfree(phba); 7527 return NULL; 7528 } 7529 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL; 7530 7531 spin_lock_init(&phba->ct_ev_lock); 7532 INIT_LIST_HEAD(&phba->ct_ev_waiters); 7533 7534 return phba; 7535 } 7536 7537 /** 7538 * lpfc_hba_free - Free driver hba data structure with a device. 7539 * @phba: pointer to lpfc hba data structure. 7540 * 7541 * This routine is invoked to free the driver hba data structure with an 7542 * HBA device. 7543 **/ 7544 static void 7545 lpfc_hba_free(struct lpfc_hba *phba) 7546 { 7547 if (phba->sli_rev == LPFC_SLI_REV4) 7548 kfree(phba->sli4_hba.hdwq); 7549 7550 /* Release the driver assigned board number */ 7551 idr_remove(&lpfc_hba_index, phba->brd_no); 7552 7553 /* Free memory allocated with sli3 rings */ 7554 kfree(phba->sli.sli3_ring); 7555 phba->sli.sli3_ring = NULL; 7556 7557 kfree(phba); 7558 return; 7559 } 7560 7561 /** 7562 * lpfc_create_shost - Create hba physical port with associated scsi host. 7563 * @phba: pointer to lpfc hba data structure. 7564 * 7565 * This routine is invoked to create HBA physical port and associate a SCSI 7566 * host with it. 7567 * 7568 * Return codes 7569 * 0 - successful 7570 * other values - error 7571 **/ 7572 static int 7573 lpfc_create_shost(struct lpfc_hba *phba) 7574 { 7575 struct lpfc_vport *vport; 7576 struct Scsi_Host *shost; 7577 7578 /* Initialize HBA FC structure */ 7579 phba->fc_edtov = FF_DEF_EDTOV; 7580 phba->fc_ratov = FF_DEF_RATOV; 7581 phba->fc_altov = FF_DEF_ALTOV; 7582 phba->fc_arbtov = FF_DEF_ARBTOV; 7583 7584 atomic_set(&phba->sdev_cnt, 0); 7585 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 7586 if (!vport) 7587 return -ENODEV; 7588 7589 shost = lpfc_shost_from_vport(vport); 7590 phba->pport = vport; 7591 7592 if (phba->nvmet_support) { 7593 /* Only 1 vport (pport) will support NVME target */ 7594 phba->targetport = NULL; 7595 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; 7596 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC, 7597 "6076 NVME Target Found\n"); 7598 } 7599 7600 lpfc_debugfs_initialize(vport); 7601 /* Put reference to SCSI host to driver's device private data */ 7602 pci_set_drvdata(phba->pcidev, shost); 7603 7604 /* 7605 * At this point we are fully registered with PSA. In addition, 7606 * any initial discovery should be completed. 7607 */ 7608 vport->load_flag |= FC_ALLOW_FDMI; 7609 if (phba->cfg_enable_SmartSAN || 7610 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 7611 7612 /* Setup appropriate attribute masks */ 7613 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 7614 if (phba->cfg_enable_SmartSAN) 7615 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 7616 else 7617 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 7618 } 7619 return 0; 7620 } 7621 7622 /** 7623 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 7624 * @phba: pointer to lpfc hba data structure. 7625 * 7626 * This routine is invoked to destroy HBA physical port and the associated 7627 * SCSI host. 7628 **/ 7629 static void 7630 lpfc_destroy_shost(struct lpfc_hba *phba) 7631 { 7632 struct lpfc_vport *vport = phba->pport; 7633 7634 /* Destroy physical port that associated with the SCSI host */ 7635 destroy_port(vport); 7636 7637 return; 7638 } 7639 7640 /** 7641 * lpfc_setup_bg - Setup Block guard structures and debug areas. 7642 * @phba: pointer to lpfc hba data structure. 7643 * @shost: the shost to be used to detect Block guard settings. 7644 * 7645 * This routine sets up the local Block guard protocol settings for @shost. 7646 * This routine also allocates memory for debugging bg buffers. 7647 **/ 7648 static void 7649 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 7650 { 7651 uint32_t old_mask; 7652 uint32_t old_guard; 7653 7654 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 7655 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7656 "1478 Registering BlockGuard with the " 7657 "SCSI layer\n"); 7658 7659 old_mask = phba->cfg_prot_mask; 7660 old_guard = phba->cfg_prot_guard; 7661 7662 /* Only allow supported values */ 7663 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | 7664 SHOST_DIX_TYPE0_PROTECTION | 7665 SHOST_DIX_TYPE1_PROTECTION); 7666 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP | 7667 SHOST_DIX_GUARD_CRC); 7668 7669 /* DIF Type 1 protection for profiles AST1/C1 is end to end */ 7670 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION) 7671 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; 7672 7673 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 7674 if ((old_mask != phba->cfg_prot_mask) || 7675 (old_guard != phba->cfg_prot_guard)) 7676 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7677 "1475 Registering BlockGuard with the " 7678 "SCSI layer: mask %d guard %d\n", 7679 phba->cfg_prot_mask, 7680 phba->cfg_prot_guard); 7681 7682 scsi_host_set_prot(shost, phba->cfg_prot_mask); 7683 scsi_host_set_guard(shost, phba->cfg_prot_guard); 7684 } else 7685 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7686 "1479 Not Registering BlockGuard with the SCSI " 7687 "layer, Bad protection parameters: %d %d\n", 7688 old_mask, old_guard); 7689 } 7690 } 7691 7692 /** 7693 * lpfc_post_init_setup - Perform necessary device post initialization setup. 7694 * @phba: pointer to lpfc hba data structure. 7695 * 7696 * This routine is invoked to perform all the necessary post initialization 7697 * setup for the device. 7698 **/ 7699 static void 7700 lpfc_post_init_setup(struct lpfc_hba *phba) 7701 { 7702 struct Scsi_Host *shost; 7703 struct lpfc_adapter_event_header adapter_event; 7704 7705 /* Get the default values for Model Name and Description */ 7706 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 7707 7708 /* 7709 * hba setup may have changed the hba_queue_depth so we need to 7710 * adjust the value of can_queue. 7711 */ 7712 shost = pci_get_drvdata(phba->pcidev); 7713 shost->can_queue = phba->cfg_hba_queue_depth - 10; 7714 7715 lpfc_host_attrib_init(shost); 7716 7717 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 7718 spin_lock_irq(shost->host_lock); 7719 lpfc_poll_start_timer(phba); 7720 spin_unlock_irq(shost->host_lock); 7721 } 7722 7723 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7724 "0428 Perform SCSI scan\n"); 7725 /* Send board arrival event to upper layer */ 7726 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 7727 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 7728 fc_host_post_vendor_event(shost, fc_get_event_number(), 7729 sizeof(adapter_event), 7730 (char *) &adapter_event, 7731 LPFC_NL_VENDOR_ID); 7732 return; 7733 } 7734 7735 /** 7736 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 7737 * @phba: pointer to lpfc hba data structure. 7738 * 7739 * This routine is invoked to set up the PCI device memory space for device 7740 * with SLI-3 interface spec. 7741 * 7742 * Return codes 7743 * 0 - successful 7744 * other values - error 7745 **/ 7746 static int 7747 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 7748 { 7749 struct pci_dev *pdev = phba->pcidev; 7750 unsigned long bar0map_len, bar2map_len; 7751 int i, hbq_count; 7752 void *ptr; 7753 int error; 7754 7755 if (!pdev) 7756 return -ENODEV; 7757 7758 /* Set the device DMA mask size */ 7759 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 7760 if (error) 7761 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 7762 if (error) 7763 return error; 7764 error = -ENODEV; 7765 7766 /* Get the bus address of Bar0 and Bar2 and the number of bytes 7767 * required by each mapping. 7768 */ 7769 phba->pci_bar0_map = pci_resource_start(pdev, 0); 7770 bar0map_len = pci_resource_len(pdev, 0); 7771 7772 phba->pci_bar2_map = pci_resource_start(pdev, 2); 7773 bar2map_len = pci_resource_len(pdev, 2); 7774 7775 /* Map HBA SLIM to a kernel virtual address. */ 7776 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 7777 if (!phba->slim_memmap_p) { 7778 dev_printk(KERN_ERR, &pdev->dev, 7779 "ioremap failed for SLIM memory.\n"); 7780 goto out; 7781 } 7782 7783 /* Map HBA Control Registers to a kernel virtual address. */ 7784 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 7785 if (!phba->ctrl_regs_memmap_p) { 7786 dev_printk(KERN_ERR, &pdev->dev, 7787 "ioremap failed for HBA control registers.\n"); 7788 goto out_iounmap_slim; 7789 } 7790 7791 /* Allocate memory for SLI-2 structures */ 7792 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7793 &phba->slim2p.phys, GFP_KERNEL); 7794 if (!phba->slim2p.virt) 7795 goto out_iounmap; 7796 7797 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 7798 phba->mbox_ext = (phba->slim2p.virt + 7799 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 7800 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 7801 phba->IOCBs = (phba->slim2p.virt + 7802 offsetof(struct lpfc_sli2_slim, IOCBs)); 7803 7804 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 7805 lpfc_sli_hbq_size(), 7806 &phba->hbqslimp.phys, 7807 GFP_KERNEL); 7808 if (!phba->hbqslimp.virt) 7809 goto out_free_slim; 7810 7811 hbq_count = lpfc_sli_hbq_count(); 7812 ptr = phba->hbqslimp.virt; 7813 for (i = 0; i < hbq_count; ++i) { 7814 phba->hbqs[i].hbq_virt = ptr; 7815 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 7816 ptr += (lpfc_hbq_defs[i]->entry_count * 7817 sizeof(struct lpfc_hbq_entry)); 7818 } 7819 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 7820 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 7821 7822 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 7823 7824 phba->MBslimaddr = phba->slim_memmap_p; 7825 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 7826 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 7827 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 7828 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 7829 7830 return 0; 7831 7832 out_free_slim: 7833 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7834 phba->slim2p.virt, phba->slim2p.phys); 7835 out_iounmap: 7836 iounmap(phba->ctrl_regs_memmap_p); 7837 out_iounmap_slim: 7838 iounmap(phba->slim_memmap_p); 7839 out: 7840 return error; 7841 } 7842 7843 /** 7844 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 7845 * @phba: pointer to lpfc hba data structure. 7846 * 7847 * This routine is invoked to unset the PCI device memory space for device 7848 * with SLI-3 interface spec. 7849 **/ 7850 static void 7851 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 7852 { 7853 struct pci_dev *pdev; 7854 7855 /* Obtain PCI device reference */ 7856 if (!phba->pcidev) 7857 return; 7858 else 7859 pdev = phba->pcidev; 7860 7861 /* Free coherent DMA memory allocated */ 7862 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 7863 phba->hbqslimp.virt, phba->hbqslimp.phys); 7864 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7865 phba->slim2p.virt, phba->slim2p.phys); 7866 7867 /* I/O memory unmap */ 7868 iounmap(phba->ctrl_regs_memmap_p); 7869 iounmap(phba->slim_memmap_p); 7870 7871 return; 7872 } 7873 7874 /** 7875 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 7876 * @phba: pointer to lpfc hba data structure. 7877 * 7878 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 7879 * done and check status. 7880 * 7881 * Return 0 if successful, otherwise -ENODEV. 7882 **/ 7883 int 7884 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 7885 { 7886 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 7887 struct lpfc_register reg_data; 7888 int i, port_error = 0; 7889 uint32_t if_type; 7890 7891 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 7892 memset(®_data, 0, sizeof(reg_data)); 7893 if (!phba->sli4_hba.PSMPHRregaddr) 7894 return -ENODEV; 7895 7896 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 7897 for (i = 0; i < 3000; i++) { 7898 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 7899 &portsmphr_reg.word0) || 7900 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { 7901 /* Port has a fatal POST error, break out */ 7902 port_error = -ENODEV; 7903 break; 7904 } 7905 if (LPFC_POST_STAGE_PORT_READY == 7906 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 7907 break; 7908 msleep(10); 7909 } 7910 7911 /* 7912 * If there was a port error during POST, then don't proceed with 7913 * other register reads as the data may not be valid. Just exit. 7914 */ 7915 if (port_error) { 7916 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7917 "1408 Port Failed POST - portsmphr=0x%x, " 7918 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 7919 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 7920 portsmphr_reg.word0, 7921 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 7922 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 7923 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 7924 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 7925 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 7926 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 7927 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 7928 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 7929 } else { 7930 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7931 "2534 Device Info: SLIFamily=0x%x, " 7932 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 7933 "SLIHint_2=0x%x, FT=0x%x\n", 7934 bf_get(lpfc_sli_intf_sli_family, 7935 &phba->sli4_hba.sli_intf), 7936 bf_get(lpfc_sli_intf_slirev, 7937 &phba->sli4_hba.sli_intf), 7938 bf_get(lpfc_sli_intf_if_type, 7939 &phba->sli4_hba.sli_intf), 7940 bf_get(lpfc_sli_intf_sli_hint1, 7941 &phba->sli4_hba.sli_intf), 7942 bf_get(lpfc_sli_intf_sli_hint2, 7943 &phba->sli4_hba.sli_intf), 7944 bf_get(lpfc_sli_intf_func_type, 7945 &phba->sli4_hba.sli_intf)); 7946 /* 7947 * Check for other Port errors during the initialization 7948 * process. Fail the load if the port did not come up 7949 * correctly. 7950 */ 7951 if_type = bf_get(lpfc_sli_intf_if_type, 7952 &phba->sli4_hba.sli_intf); 7953 switch (if_type) { 7954 case LPFC_SLI_INTF_IF_TYPE_0: 7955 phba->sli4_hba.ue_mask_lo = 7956 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 7957 phba->sli4_hba.ue_mask_hi = 7958 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 7959 uerrlo_reg.word0 = 7960 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 7961 uerrhi_reg.word0 = 7962 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 7963 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 7964 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 7965 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7966 "1422 Unrecoverable Error " 7967 "Detected during POST " 7968 "uerr_lo_reg=0x%x, " 7969 "uerr_hi_reg=0x%x, " 7970 "ue_mask_lo_reg=0x%x, " 7971 "ue_mask_hi_reg=0x%x\n", 7972 uerrlo_reg.word0, 7973 uerrhi_reg.word0, 7974 phba->sli4_hba.ue_mask_lo, 7975 phba->sli4_hba.ue_mask_hi); 7976 port_error = -ENODEV; 7977 } 7978 break; 7979 case LPFC_SLI_INTF_IF_TYPE_2: 7980 case LPFC_SLI_INTF_IF_TYPE_6: 7981 /* Final checks. The port status should be clean. */ 7982 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 7983 ®_data.word0) || 7984 (bf_get(lpfc_sliport_status_err, ®_data) && 7985 !bf_get(lpfc_sliport_status_rn, ®_data))) { 7986 phba->work_status[0] = 7987 readl(phba->sli4_hba.u.if_type2. 7988 ERR1regaddr); 7989 phba->work_status[1] = 7990 readl(phba->sli4_hba.u.if_type2. 7991 ERR2regaddr); 7992 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7993 "2888 Unrecoverable port error " 7994 "following POST: port status reg " 7995 "0x%x, port_smphr reg 0x%x, " 7996 "error 1=0x%x, error 2=0x%x\n", 7997 reg_data.word0, 7998 portsmphr_reg.word0, 7999 phba->work_status[0], 8000 phba->work_status[1]); 8001 port_error = -ENODEV; 8002 } 8003 break; 8004 case LPFC_SLI_INTF_IF_TYPE_1: 8005 default: 8006 break; 8007 } 8008 } 8009 return port_error; 8010 } 8011 8012 /** 8013 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 8014 * @phba: pointer to lpfc hba data structure. 8015 * @if_type: The SLI4 interface type getting configured. 8016 * 8017 * This routine is invoked to set up SLI4 BAR0 PCI config space register 8018 * memory map. 8019 **/ 8020 static void 8021 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 8022 { 8023 switch (if_type) { 8024 case LPFC_SLI_INTF_IF_TYPE_0: 8025 phba->sli4_hba.u.if_type0.UERRLOregaddr = 8026 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 8027 phba->sli4_hba.u.if_type0.UERRHIregaddr = 8028 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 8029 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 8030 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 8031 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 8032 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 8033 phba->sli4_hba.SLIINTFregaddr = 8034 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 8035 break; 8036 case LPFC_SLI_INTF_IF_TYPE_2: 8037 phba->sli4_hba.u.if_type2.EQDregaddr = 8038 phba->sli4_hba.conf_regs_memmap_p + 8039 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 8040 phba->sli4_hba.u.if_type2.ERR1regaddr = 8041 phba->sli4_hba.conf_regs_memmap_p + 8042 LPFC_CTL_PORT_ER1_OFFSET; 8043 phba->sli4_hba.u.if_type2.ERR2regaddr = 8044 phba->sli4_hba.conf_regs_memmap_p + 8045 LPFC_CTL_PORT_ER2_OFFSET; 8046 phba->sli4_hba.u.if_type2.CTRLregaddr = 8047 phba->sli4_hba.conf_regs_memmap_p + 8048 LPFC_CTL_PORT_CTL_OFFSET; 8049 phba->sli4_hba.u.if_type2.STATUSregaddr = 8050 phba->sli4_hba.conf_regs_memmap_p + 8051 LPFC_CTL_PORT_STA_OFFSET; 8052 phba->sli4_hba.SLIINTFregaddr = 8053 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 8054 phba->sli4_hba.PSMPHRregaddr = 8055 phba->sli4_hba.conf_regs_memmap_p + 8056 LPFC_CTL_PORT_SEM_OFFSET; 8057 phba->sli4_hba.RQDBregaddr = 8058 phba->sli4_hba.conf_regs_memmap_p + 8059 LPFC_ULP0_RQ_DOORBELL; 8060 phba->sli4_hba.WQDBregaddr = 8061 phba->sli4_hba.conf_regs_memmap_p + 8062 LPFC_ULP0_WQ_DOORBELL; 8063 phba->sli4_hba.CQDBregaddr = 8064 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 8065 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 8066 phba->sli4_hba.MQDBregaddr = 8067 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 8068 phba->sli4_hba.BMBXregaddr = 8069 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 8070 break; 8071 case LPFC_SLI_INTF_IF_TYPE_6: 8072 phba->sli4_hba.u.if_type2.EQDregaddr = 8073 phba->sli4_hba.conf_regs_memmap_p + 8074 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 8075 phba->sli4_hba.u.if_type2.ERR1regaddr = 8076 phba->sli4_hba.conf_regs_memmap_p + 8077 LPFC_CTL_PORT_ER1_OFFSET; 8078 phba->sli4_hba.u.if_type2.ERR2regaddr = 8079 phba->sli4_hba.conf_regs_memmap_p + 8080 LPFC_CTL_PORT_ER2_OFFSET; 8081 phba->sli4_hba.u.if_type2.CTRLregaddr = 8082 phba->sli4_hba.conf_regs_memmap_p + 8083 LPFC_CTL_PORT_CTL_OFFSET; 8084 phba->sli4_hba.u.if_type2.STATUSregaddr = 8085 phba->sli4_hba.conf_regs_memmap_p + 8086 LPFC_CTL_PORT_STA_OFFSET; 8087 phba->sli4_hba.PSMPHRregaddr = 8088 phba->sli4_hba.conf_regs_memmap_p + 8089 LPFC_CTL_PORT_SEM_OFFSET; 8090 phba->sli4_hba.BMBXregaddr = 8091 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 8092 break; 8093 case LPFC_SLI_INTF_IF_TYPE_1: 8094 default: 8095 dev_printk(KERN_ERR, &phba->pcidev->dev, 8096 "FATAL - unsupported SLI4 interface type - %d\n", 8097 if_type); 8098 break; 8099 } 8100 } 8101 8102 /** 8103 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 8104 * @phba: pointer to lpfc hba data structure. 8105 * 8106 * This routine is invoked to set up SLI4 BAR1 register memory map. 8107 **/ 8108 static void 8109 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 8110 { 8111 switch (if_type) { 8112 case LPFC_SLI_INTF_IF_TYPE_0: 8113 phba->sli4_hba.PSMPHRregaddr = 8114 phba->sli4_hba.ctrl_regs_memmap_p + 8115 LPFC_SLIPORT_IF0_SMPHR; 8116 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 8117 LPFC_HST_ISR0; 8118 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 8119 LPFC_HST_IMR0; 8120 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 8121 LPFC_HST_ISCR0; 8122 break; 8123 case LPFC_SLI_INTF_IF_TYPE_6: 8124 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8125 LPFC_IF6_RQ_DOORBELL; 8126 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8127 LPFC_IF6_WQ_DOORBELL; 8128 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8129 LPFC_IF6_CQ_DOORBELL; 8130 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8131 LPFC_IF6_EQ_DOORBELL; 8132 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8133 LPFC_IF6_MQ_DOORBELL; 8134 break; 8135 case LPFC_SLI_INTF_IF_TYPE_2: 8136 case LPFC_SLI_INTF_IF_TYPE_1: 8137 default: 8138 dev_err(&phba->pcidev->dev, 8139 "FATAL - unsupported SLI4 interface type - %d\n", 8140 if_type); 8141 break; 8142 } 8143 } 8144 8145 /** 8146 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 8147 * @phba: pointer to lpfc hba data structure. 8148 * @vf: virtual function number 8149 * 8150 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 8151 * based on the given viftual function number, @vf. 8152 * 8153 * Return 0 if successful, otherwise -ENODEV. 8154 **/ 8155 static int 8156 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 8157 { 8158 if (vf > LPFC_VIR_FUNC_MAX) 8159 return -ENODEV; 8160 8161 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8162 vf * LPFC_VFR_PAGE_SIZE + 8163 LPFC_ULP0_RQ_DOORBELL); 8164 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8165 vf * LPFC_VFR_PAGE_SIZE + 8166 LPFC_ULP0_WQ_DOORBELL); 8167 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8168 vf * LPFC_VFR_PAGE_SIZE + 8169 LPFC_EQCQ_DOORBELL); 8170 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 8171 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8172 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 8173 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8174 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 8175 return 0; 8176 } 8177 8178 /** 8179 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 8180 * @phba: pointer to lpfc hba data structure. 8181 * 8182 * This routine is invoked to create the bootstrap mailbox 8183 * region consistent with the SLI-4 interface spec. This 8184 * routine allocates all memory necessary to communicate 8185 * mailbox commands to the port and sets up all alignment 8186 * needs. No locks are expected to be held when calling 8187 * this routine. 8188 * 8189 * Return codes 8190 * 0 - successful 8191 * -ENOMEM - could not allocated memory. 8192 **/ 8193 static int 8194 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 8195 { 8196 uint32_t bmbx_size; 8197 struct lpfc_dmabuf *dmabuf; 8198 struct dma_address *dma_address; 8199 uint32_t pa_addr; 8200 uint64_t phys_addr; 8201 8202 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 8203 if (!dmabuf) 8204 return -ENOMEM; 8205 8206 /* 8207 * The bootstrap mailbox region is comprised of 2 parts 8208 * plus an alignment restriction of 16 bytes. 8209 */ 8210 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 8211 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size, 8212 &dmabuf->phys, GFP_KERNEL); 8213 if (!dmabuf->virt) { 8214 kfree(dmabuf); 8215 return -ENOMEM; 8216 } 8217 8218 /* 8219 * Initialize the bootstrap mailbox pointers now so that the register 8220 * operations are simple later. The mailbox dma address is required 8221 * to be 16-byte aligned. Also align the virtual memory as each 8222 * maibox is copied into the bmbx mailbox region before issuing the 8223 * command to the port. 8224 */ 8225 phba->sli4_hba.bmbx.dmabuf = dmabuf; 8226 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 8227 8228 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 8229 LPFC_ALIGN_16_BYTE); 8230 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 8231 LPFC_ALIGN_16_BYTE); 8232 8233 /* 8234 * Set the high and low physical addresses now. The SLI4 alignment 8235 * requirement is 16 bytes and the mailbox is posted to the port 8236 * as two 30-bit addresses. The other data is a bit marking whether 8237 * the 30-bit address is the high or low address. 8238 * Upcast bmbx aphys to 64bits so shift instruction compiles 8239 * clean on 32 bit machines. 8240 */ 8241 dma_address = &phba->sli4_hba.bmbx.dma_address; 8242 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 8243 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 8244 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 8245 LPFC_BMBX_BIT1_ADDR_HI); 8246 8247 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 8248 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 8249 LPFC_BMBX_BIT1_ADDR_LO); 8250 return 0; 8251 } 8252 8253 /** 8254 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 8255 * @phba: pointer to lpfc hba data structure. 8256 * 8257 * This routine is invoked to teardown the bootstrap mailbox 8258 * region and release all host resources. This routine requires 8259 * the caller to ensure all mailbox commands recovered, no 8260 * additional mailbox comands are sent, and interrupts are disabled 8261 * before calling this routine. 8262 * 8263 **/ 8264 static void 8265 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 8266 { 8267 dma_free_coherent(&phba->pcidev->dev, 8268 phba->sli4_hba.bmbx.bmbx_size, 8269 phba->sli4_hba.bmbx.dmabuf->virt, 8270 phba->sli4_hba.bmbx.dmabuf->phys); 8271 8272 kfree(phba->sli4_hba.bmbx.dmabuf); 8273 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 8274 } 8275 8276 static const char * const lpfc_topo_to_str[] = { 8277 "Loop then P2P", 8278 "Loopback", 8279 "P2P Only", 8280 "Unsupported", 8281 "Loop Only", 8282 "Unsupported", 8283 "P2P then Loop", 8284 }; 8285 8286 /** 8287 * lpfc_map_topology - Map the topology read from READ_CONFIG 8288 * @phba: pointer to lpfc hba data structure. 8289 * @rdconf: pointer to read config data 8290 * 8291 * This routine is invoked to map the topology values as read 8292 * from the read config mailbox command. If the persistent 8293 * topology feature is supported, the firmware will provide the 8294 * saved topology information to be used in INIT_LINK 8295 * 8296 **/ 8297 #define LINK_FLAGS_DEF 0x0 8298 #define LINK_FLAGS_P2P 0x1 8299 #define LINK_FLAGS_LOOP 0x2 8300 static void 8301 lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config) 8302 { 8303 u8 ptv, tf, pt; 8304 8305 ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config); 8306 tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config); 8307 pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config); 8308 8309 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8310 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x", 8311 ptv, tf, pt); 8312 if (!ptv) { 8313 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8314 "2019 FW does not support persistent topology " 8315 "Using driver parameter defined value [%s]", 8316 lpfc_topo_to_str[phba->cfg_topology]); 8317 return; 8318 } 8319 /* FW supports persistent topology - override module parameter value */ 8320 phba->hba_flag |= HBA_PERSISTENT_TOPO; 8321 switch (phba->pcidev->device) { 8322 case PCI_DEVICE_ID_LANCER_G7_FC: 8323 if (tf || (pt == LINK_FLAGS_LOOP)) { 8324 /* Invalid values from FW - use driver params */ 8325 phba->hba_flag &= ~HBA_PERSISTENT_TOPO; 8326 } else { 8327 /* Prism only supports PT2PT topology */ 8328 phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT; 8329 } 8330 break; 8331 case PCI_DEVICE_ID_LANCER_G6_FC: 8332 if (!tf) { 8333 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP) 8334 ? FLAGS_TOPOLOGY_MODE_LOOP 8335 : FLAGS_TOPOLOGY_MODE_PT_PT); 8336 } else { 8337 phba->hba_flag &= ~HBA_PERSISTENT_TOPO; 8338 } 8339 break; 8340 default: /* G5 */ 8341 if (tf) { 8342 /* If topology failover set - pt is '0' or '1' */ 8343 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP : 8344 FLAGS_TOPOLOGY_MODE_LOOP_PT); 8345 } else { 8346 phba->cfg_topology = ((pt == LINK_FLAGS_P2P) 8347 ? FLAGS_TOPOLOGY_MODE_PT_PT 8348 : FLAGS_TOPOLOGY_MODE_LOOP); 8349 } 8350 break; 8351 } 8352 if (phba->hba_flag & HBA_PERSISTENT_TOPO) { 8353 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8354 "2020 Using persistent topology value [%s]", 8355 lpfc_topo_to_str[phba->cfg_topology]); 8356 } else { 8357 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8358 "2021 Invalid topology values from FW " 8359 "Using driver parameter defined value [%s]", 8360 lpfc_topo_to_str[phba->cfg_topology]); 8361 } 8362 } 8363 8364 /** 8365 * lpfc_sli4_read_config - Get the config parameters. 8366 * @phba: pointer to lpfc hba data structure. 8367 * 8368 * This routine is invoked to read the configuration parameters from the HBA. 8369 * The configuration parameters are used to set the base and maximum values 8370 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 8371 * allocation for the port. 8372 * 8373 * Return codes 8374 * 0 - successful 8375 * -ENOMEM - No available memory 8376 * -EIO - The mailbox failed to complete successfully. 8377 **/ 8378 int 8379 lpfc_sli4_read_config(struct lpfc_hba *phba) 8380 { 8381 LPFC_MBOXQ_t *pmb; 8382 struct lpfc_mbx_read_config *rd_config; 8383 union lpfc_sli4_cfg_shdr *shdr; 8384 uint32_t shdr_status, shdr_add_status; 8385 struct lpfc_mbx_get_func_cfg *get_func_cfg; 8386 struct lpfc_rsrc_desc_fcfcoe *desc; 8387 char *pdesc_0; 8388 uint16_t forced_link_speed; 8389 uint32_t if_type, qmin; 8390 int length, i, rc = 0, rc2; 8391 8392 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8393 if (!pmb) { 8394 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8395 "2011 Unable to allocate memory for issuing " 8396 "SLI_CONFIG_SPECIAL mailbox command\n"); 8397 return -ENOMEM; 8398 } 8399 8400 lpfc_read_config(phba, pmb); 8401 8402 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 8403 if (rc != MBX_SUCCESS) { 8404 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8405 "2012 Mailbox failed , mbxCmd x%x " 8406 "READ_CONFIG, mbxStatus x%x\n", 8407 bf_get(lpfc_mqe_command, &pmb->u.mqe), 8408 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 8409 rc = -EIO; 8410 } else { 8411 rd_config = &pmb->u.mqe.un.rd_config; 8412 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { 8413 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 8414 phba->sli4_hba.lnk_info.lnk_tp = 8415 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); 8416 phba->sli4_hba.lnk_info.lnk_no = 8417 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); 8418 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8419 "3081 lnk_type:%d, lnk_numb:%d\n", 8420 phba->sli4_hba.lnk_info.lnk_tp, 8421 phba->sli4_hba.lnk_info.lnk_no); 8422 } else 8423 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8424 "3082 Mailbox (x%x) returned ldv:x0\n", 8425 bf_get(lpfc_mqe_command, &pmb->u.mqe)); 8426 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) { 8427 phba->bbcredit_support = 1; 8428 phba->sli4_hba.bbscn_params.word0 = rd_config->word8; 8429 } 8430 8431 phba->sli4_hba.conf_trunk = 8432 bf_get(lpfc_mbx_rd_conf_trunk, rd_config); 8433 phba->sli4_hba.extents_in_use = 8434 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 8435 phba->sli4_hba.max_cfg_param.max_xri = 8436 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 8437 /* Reduce resource usage in kdump environment */ 8438 if (is_kdump_kernel() && 8439 phba->sli4_hba.max_cfg_param.max_xri > 512) 8440 phba->sli4_hba.max_cfg_param.max_xri = 512; 8441 phba->sli4_hba.max_cfg_param.xri_base = 8442 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 8443 phba->sli4_hba.max_cfg_param.max_vpi = 8444 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 8445 /* Limit the max we support */ 8446 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS) 8447 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS; 8448 phba->sli4_hba.max_cfg_param.vpi_base = 8449 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 8450 phba->sli4_hba.max_cfg_param.max_rpi = 8451 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 8452 phba->sli4_hba.max_cfg_param.rpi_base = 8453 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 8454 phba->sli4_hba.max_cfg_param.max_vfi = 8455 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 8456 phba->sli4_hba.max_cfg_param.vfi_base = 8457 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 8458 phba->sli4_hba.max_cfg_param.max_fcfi = 8459 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 8460 phba->sli4_hba.max_cfg_param.max_eq = 8461 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 8462 phba->sli4_hba.max_cfg_param.max_rq = 8463 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 8464 phba->sli4_hba.max_cfg_param.max_wq = 8465 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 8466 phba->sli4_hba.max_cfg_param.max_cq = 8467 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 8468 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 8469 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 8470 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 8471 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 8472 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 8473 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 8474 phba->max_vports = phba->max_vpi; 8475 lpfc_map_topology(phba, rd_config); 8476 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8477 "2003 cfg params Extents? %d " 8478 "XRI(B:%d M:%d), " 8479 "VPI(B:%d M:%d) " 8480 "VFI(B:%d M:%d) " 8481 "RPI(B:%d M:%d) " 8482 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n", 8483 phba->sli4_hba.extents_in_use, 8484 phba->sli4_hba.max_cfg_param.xri_base, 8485 phba->sli4_hba.max_cfg_param.max_xri, 8486 phba->sli4_hba.max_cfg_param.vpi_base, 8487 phba->sli4_hba.max_cfg_param.max_vpi, 8488 phba->sli4_hba.max_cfg_param.vfi_base, 8489 phba->sli4_hba.max_cfg_param.max_vfi, 8490 phba->sli4_hba.max_cfg_param.rpi_base, 8491 phba->sli4_hba.max_cfg_param.max_rpi, 8492 phba->sli4_hba.max_cfg_param.max_fcfi, 8493 phba->sli4_hba.max_cfg_param.max_eq, 8494 phba->sli4_hba.max_cfg_param.max_cq, 8495 phba->sli4_hba.max_cfg_param.max_wq, 8496 phba->sli4_hba.max_cfg_param.max_rq); 8497 8498 /* 8499 * Calculate queue resources based on how 8500 * many WQ/CQ/EQs are available. 8501 */ 8502 qmin = phba->sli4_hba.max_cfg_param.max_wq; 8503 if (phba->sli4_hba.max_cfg_param.max_cq < qmin) 8504 qmin = phba->sli4_hba.max_cfg_param.max_cq; 8505 if (phba->sli4_hba.max_cfg_param.max_eq < qmin) 8506 qmin = phba->sli4_hba.max_cfg_param.max_eq; 8507 /* 8508 * Whats left after this can go toward NVME / FCP. 8509 * The minus 4 accounts for ELS, NVME LS, MBOX 8510 * plus one extra. When configured for 8511 * NVMET, FCP io channel WQs are not created. 8512 */ 8513 qmin -= 4; 8514 8515 /* Check to see if there is enough for NVME */ 8516 if ((phba->cfg_irq_chann > qmin) || 8517 (phba->cfg_hdw_queue > qmin)) { 8518 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8519 "2005 Reducing Queues: " 8520 "WQ %d CQ %d EQ %d: min %d: " 8521 "IRQ %d HDWQ %d\n", 8522 phba->sli4_hba.max_cfg_param.max_wq, 8523 phba->sli4_hba.max_cfg_param.max_cq, 8524 phba->sli4_hba.max_cfg_param.max_eq, 8525 qmin, phba->cfg_irq_chann, 8526 phba->cfg_hdw_queue); 8527 8528 if (phba->cfg_irq_chann > qmin) 8529 phba->cfg_irq_chann = qmin; 8530 if (phba->cfg_hdw_queue > qmin) 8531 phba->cfg_hdw_queue = qmin; 8532 } 8533 } 8534 8535 if (rc) 8536 goto read_cfg_out; 8537 8538 /* Update link speed if forced link speed is supported */ 8539 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8540 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 8541 forced_link_speed = 8542 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config); 8543 if (forced_link_speed) { 8544 phba->hba_flag |= HBA_FORCED_LINK_SPEED; 8545 8546 switch (forced_link_speed) { 8547 case LINK_SPEED_1G: 8548 phba->cfg_link_speed = 8549 LPFC_USER_LINK_SPEED_1G; 8550 break; 8551 case LINK_SPEED_2G: 8552 phba->cfg_link_speed = 8553 LPFC_USER_LINK_SPEED_2G; 8554 break; 8555 case LINK_SPEED_4G: 8556 phba->cfg_link_speed = 8557 LPFC_USER_LINK_SPEED_4G; 8558 break; 8559 case LINK_SPEED_8G: 8560 phba->cfg_link_speed = 8561 LPFC_USER_LINK_SPEED_8G; 8562 break; 8563 case LINK_SPEED_10G: 8564 phba->cfg_link_speed = 8565 LPFC_USER_LINK_SPEED_10G; 8566 break; 8567 case LINK_SPEED_16G: 8568 phba->cfg_link_speed = 8569 LPFC_USER_LINK_SPEED_16G; 8570 break; 8571 case LINK_SPEED_32G: 8572 phba->cfg_link_speed = 8573 LPFC_USER_LINK_SPEED_32G; 8574 break; 8575 case LINK_SPEED_64G: 8576 phba->cfg_link_speed = 8577 LPFC_USER_LINK_SPEED_64G; 8578 break; 8579 case 0xffff: 8580 phba->cfg_link_speed = 8581 LPFC_USER_LINK_SPEED_AUTO; 8582 break; 8583 default: 8584 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8585 "0047 Unrecognized link " 8586 "speed : %d\n", 8587 forced_link_speed); 8588 phba->cfg_link_speed = 8589 LPFC_USER_LINK_SPEED_AUTO; 8590 } 8591 } 8592 } 8593 8594 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 8595 length = phba->sli4_hba.max_cfg_param.max_xri - 8596 lpfc_sli4_get_els_iocb_cnt(phba); 8597 if (phba->cfg_hba_queue_depth > length) { 8598 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8599 "3361 HBA queue depth changed from %d to %d\n", 8600 phba->cfg_hba_queue_depth, length); 8601 phba->cfg_hba_queue_depth = length; 8602 } 8603 8604 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 8605 LPFC_SLI_INTF_IF_TYPE_2) 8606 goto read_cfg_out; 8607 8608 /* get the pf# and vf# for SLI4 if_type 2 port */ 8609 length = (sizeof(struct lpfc_mbx_get_func_cfg) - 8610 sizeof(struct lpfc_sli4_cfg_mhdr)); 8611 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, 8612 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 8613 length, LPFC_SLI4_MBX_EMBED); 8614 8615 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 8616 shdr = (union lpfc_sli4_cfg_shdr *) 8617 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 8618 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 8619 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 8620 if (rc2 || shdr_status || shdr_add_status) { 8621 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8622 "3026 Mailbox failed , mbxCmd x%x " 8623 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 8624 bf_get(lpfc_mqe_command, &pmb->u.mqe), 8625 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 8626 goto read_cfg_out; 8627 } 8628 8629 /* search for fc_fcoe resrouce descriptor */ 8630 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 8631 8632 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; 8633 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; 8634 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc); 8635 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD) 8636 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH; 8637 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH) 8638 goto read_cfg_out; 8639 8640 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 8641 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i); 8642 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 8643 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) { 8644 phba->sli4_hba.iov.pf_number = 8645 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 8646 phba->sli4_hba.iov.vf_number = 8647 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); 8648 break; 8649 } 8650 } 8651 8652 if (i < LPFC_RSRC_DESC_MAX_NUM) 8653 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8654 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 8655 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 8656 phba->sli4_hba.iov.vf_number); 8657 else 8658 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8659 "3028 GET_FUNCTION_CONFIG: failed to find " 8660 "Resource Descriptor:x%x\n", 8661 LPFC_RSRC_DESC_TYPE_FCFCOE); 8662 8663 read_cfg_out: 8664 mempool_free(pmb, phba->mbox_mem_pool); 8665 return rc; 8666 } 8667 8668 /** 8669 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 8670 * @phba: pointer to lpfc hba data structure. 8671 * 8672 * This routine is invoked to setup the port-side endian order when 8673 * the port if_type is 0. This routine has no function for other 8674 * if_types. 8675 * 8676 * Return codes 8677 * 0 - successful 8678 * -ENOMEM - No available memory 8679 * -EIO - The mailbox failed to complete successfully. 8680 **/ 8681 static int 8682 lpfc_setup_endian_order(struct lpfc_hba *phba) 8683 { 8684 LPFC_MBOXQ_t *mboxq; 8685 uint32_t if_type, rc = 0; 8686 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 8687 HOST_ENDIAN_HIGH_WORD1}; 8688 8689 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8690 switch (if_type) { 8691 case LPFC_SLI_INTF_IF_TYPE_0: 8692 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 8693 GFP_KERNEL); 8694 if (!mboxq) { 8695 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8696 "0492 Unable to allocate memory for " 8697 "issuing SLI_CONFIG_SPECIAL mailbox " 8698 "command\n"); 8699 return -ENOMEM; 8700 } 8701 8702 /* 8703 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 8704 * two words to contain special data values and no other data. 8705 */ 8706 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 8707 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 8708 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8709 if (rc != MBX_SUCCESS) { 8710 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8711 "0493 SLI_CONFIG_SPECIAL mailbox " 8712 "failed with status x%x\n", 8713 rc); 8714 rc = -EIO; 8715 } 8716 mempool_free(mboxq, phba->mbox_mem_pool); 8717 break; 8718 case LPFC_SLI_INTF_IF_TYPE_6: 8719 case LPFC_SLI_INTF_IF_TYPE_2: 8720 case LPFC_SLI_INTF_IF_TYPE_1: 8721 default: 8722 break; 8723 } 8724 return rc; 8725 } 8726 8727 /** 8728 * lpfc_sli4_queue_verify - Verify and update EQ counts 8729 * @phba: pointer to lpfc hba data structure. 8730 * 8731 * This routine is invoked to check the user settable queue counts for EQs. 8732 * After this routine is called the counts will be set to valid values that 8733 * adhere to the constraints of the system's interrupt vectors and the port's 8734 * queue resources. 8735 * 8736 * Return codes 8737 * 0 - successful 8738 * -ENOMEM - No available memory 8739 **/ 8740 static int 8741 lpfc_sli4_queue_verify(struct lpfc_hba *phba) 8742 { 8743 /* 8744 * Sanity check for configured queue parameters against the run-time 8745 * device parameters 8746 */ 8747 8748 if (phba->nvmet_support) { 8749 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq) 8750 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; 8751 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) 8752 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; 8753 } 8754 8755 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8756 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n", 8757 phba->cfg_hdw_queue, phba->cfg_irq_chann, 8758 phba->cfg_nvmet_mrq); 8759 8760 /* Get EQ depth from module parameter, fake the default for now */ 8761 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 8762 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 8763 8764 /* Get CQ depth from module parameter, fake the default for now */ 8765 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 8766 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 8767 return 0; 8768 } 8769 8770 static int 8771 lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx) 8772 { 8773 struct lpfc_queue *qdesc; 8774 u32 wqesize; 8775 int cpu; 8776 8777 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ); 8778 /* Create Fast Path IO CQs */ 8779 if (phba->enab_exp_wqcq_pages) 8780 /* Increase the CQ size when WQEs contain an embedded cdb */ 8781 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 8782 phba->sli4_hba.cq_esize, 8783 LPFC_CQE_EXP_COUNT, cpu); 8784 8785 else 8786 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8787 phba->sli4_hba.cq_esize, 8788 phba->sli4_hba.cq_ecount, cpu); 8789 if (!qdesc) { 8790 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8791 "0499 Failed allocate fast-path IO CQ (%d)\n", idx); 8792 return 1; 8793 } 8794 qdesc->qe_valid = 1; 8795 qdesc->hdwq = idx; 8796 qdesc->chann = cpu; 8797 phba->sli4_hba.hdwq[idx].io_cq = qdesc; 8798 8799 /* Create Fast Path IO WQs */ 8800 if (phba->enab_exp_wqcq_pages) { 8801 /* Increase the WQ size when WQEs contain an embedded cdb */ 8802 wqesize = (phba->fcp_embed_io) ? 8803 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; 8804 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 8805 wqesize, 8806 LPFC_WQE_EXP_COUNT, cpu); 8807 } else 8808 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8809 phba->sli4_hba.wq_esize, 8810 phba->sli4_hba.wq_ecount, cpu); 8811 8812 if (!qdesc) { 8813 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8814 "0503 Failed allocate fast-path IO WQ (%d)\n", 8815 idx); 8816 return 1; 8817 } 8818 qdesc->hdwq = idx; 8819 qdesc->chann = cpu; 8820 phba->sli4_hba.hdwq[idx].io_wq = qdesc; 8821 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 8822 return 0; 8823 } 8824 8825 /** 8826 * lpfc_sli4_queue_create - Create all the SLI4 queues 8827 * @phba: pointer to lpfc hba data structure. 8828 * 8829 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 8830 * operation. For each SLI4 queue type, the parameters such as queue entry 8831 * count (queue depth) shall be taken from the module parameter. For now, 8832 * we just use some constant number as place holder. 8833 * 8834 * Return codes 8835 * 0 - successful 8836 * -ENOMEM - No availble memory 8837 * -EIO - The mailbox failed to complete successfully. 8838 **/ 8839 int 8840 lpfc_sli4_queue_create(struct lpfc_hba *phba) 8841 { 8842 struct lpfc_queue *qdesc; 8843 int idx, cpu, eqcpu; 8844 struct lpfc_sli4_hdw_queue *qp; 8845 struct lpfc_vector_map_info *cpup; 8846 struct lpfc_vector_map_info *eqcpup; 8847 struct lpfc_eq_intr_info *eqi; 8848 8849 /* 8850 * Create HBA Record arrays. 8851 * Both NVME and FCP will share that same vectors / EQs 8852 */ 8853 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 8854 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 8855 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 8856 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 8857 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 8858 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 8859 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 8860 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 8861 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 8862 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 8863 8864 if (!phba->sli4_hba.hdwq) { 8865 phba->sli4_hba.hdwq = kcalloc( 8866 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue), 8867 GFP_KERNEL); 8868 if (!phba->sli4_hba.hdwq) { 8869 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8870 "6427 Failed allocate memory for " 8871 "fast-path Hardware Queue array\n"); 8872 goto out_error; 8873 } 8874 /* Prepare hardware queues to take IO buffers */ 8875 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 8876 qp = &phba->sli4_hba.hdwq[idx]; 8877 spin_lock_init(&qp->io_buf_list_get_lock); 8878 spin_lock_init(&qp->io_buf_list_put_lock); 8879 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 8880 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 8881 qp->get_io_bufs = 0; 8882 qp->put_io_bufs = 0; 8883 qp->total_io_bufs = 0; 8884 spin_lock_init(&qp->abts_io_buf_list_lock); 8885 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list); 8886 qp->abts_scsi_io_bufs = 0; 8887 qp->abts_nvme_io_bufs = 0; 8888 INIT_LIST_HEAD(&qp->sgl_list); 8889 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list); 8890 spin_lock_init(&qp->hdwq_lock); 8891 } 8892 } 8893 8894 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 8895 if (phba->nvmet_support) { 8896 phba->sli4_hba.nvmet_cqset = kcalloc( 8897 phba->cfg_nvmet_mrq, 8898 sizeof(struct lpfc_queue *), 8899 GFP_KERNEL); 8900 if (!phba->sli4_hba.nvmet_cqset) { 8901 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8902 "3121 Fail allocate memory for " 8903 "fast-path CQ set array\n"); 8904 goto out_error; 8905 } 8906 phba->sli4_hba.nvmet_mrq_hdr = kcalloc( 8907 phba->cfg_nvmet_mrq, 8908 sizeof(struct lpfc_queue *), 8909 GFP_KERNEL); 8910 if (!phba->sli4_hba.nvmet_mrq_hdr) { 8911 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8912 "3122 Fail allocate memory for " 8913 "fast-path RQ set hdr array\n"); 8914 goto out_error; 8915 } 8916 phba->sli4_hba.nvmet_mrq_data = kcalloc( 8917 phba->cfg_nvmet_mrq, 8918 sizeof(struct lpfc_queue *), 8919 GFP_KERNEL); 8920 if (!phba->sli4_hba.nvmet_mrq_data) { 8921 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8922 "3124 Fail allocate memory for " 8923 "fast-path RQ set data array\n"); 8924 goto out_error; 8925 } 8926 } 8927 } 8928 8929 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 8930 8931 /* Create HBA Event Queues (EQs) */ 8932 for_each_present_cpu(cpu) { 8933 /* We only want to create 1 EQ per vector, even though 8934 * multiple CPUs might be using that vector. so only 8935 * selects the CPUs that are LPFC_CPU_FIRST_IRQ. 8936 */ 8937 cpup = &phba->sli4_hba.cpu_map[cpu]; 8938 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 8939 continue; 8940 8941 /* Get a ptr to the Hardware Queue associated with this CPU */ 8942 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; 8943 8944 /* Allocate an EQ */ 8945 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8946 phba->sli4_hba.eq_esize, 8947 phba->sli4_hba.eq_ecount, cpu); 8948 if (!qdesc) { 8949 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8950 "0497 Failed allocate EQ (%d)\n", 8951 cpup->hdwq); 8952 goto out_error; 8953 } 8954 qdesc->qe_valid = 1; 8955 qdesc->hdwq = cpup->hdwq; 8956 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */ 8957 qdesc->last_cpu = qdesc->chann; 8958 8959 /* Save the allocated EQ in the Hardware Queue */ 8960 qp->hba_eq = qdesc; 8961 8962 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu); 8963 list_add(&qdesc->cpu_list, &eqi->list); 8964 } 8965 8966 /* Now we need to populate the other Hardware Queues, that share 8967 * an IRQ vector, with the associated EQ ptr. 8968 */ 8969 for_each_present_cpu(cpu) { 8970 cpup = &phba->sli4_hba.cpu_map[cpu]; 8971 8972 /* Check for EQ already allocated in previous loop */ 8973 if (cpup->flag & LPFC_CPU_FIRST_IRQ) 8974 continue; 8975 8976 /* Check for multiple CPUs per hdwq */ 8977 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; 8978 if (qp->hba_eq) 8979 continue; 8980 8981 /* We need to share an EQ for this hdwq */ 8982 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ); 8983 eqcpup = &phba->sli4_hba.cpu_map[eqcpu]; 8984 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq; 8985 } 8986 8987 /* Allocate IO Path SLI4 CQ/WQs */ 8988 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 8989 if (lpfc_alloc_io_wq_cq(phba, idx)) 8990 goto out_error; 8991 } 8992 8993 if (phba->nvmet_support) { 8994 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 8995 cpu = lpfc_find_cpu_handle(phba, idx, 8996 LPFC_FIND_BY_HDWQ); 8997 qdesc = lpfc_sli4_queue_alloc(phba, 8998 LPFC_DEFAULT_PAGE_SIZE, 8999 phba->sli4_hba.cq_esize, 9000 phba->sli4_hba.cq_ecount, 9001 cpu); 9002 if (!qdesc) { 9003 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9004 "3142 Failed allocate NVME " 9005 "CQ Set (%d)\n", idx); 9006 goto out_error; 9007 } 9008 qdesc->qe_valid = 1; 9009 qdesc->hdwq = idx; 9010 qdesc->chann = cpu; 9011 phba->sli4_hba.nvmet_cqset[idx] = qdesc; 9012 } 9013 } 9014 9015 /* 9016 * Create Slow Path Completion Queues (CQs) 9017 */ 9018 9019 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ); 9020 /* Create slow-path Mailbox Command Complete Queue */ 9021 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9022 phba->sli4_hba.cq_esize, 9023 phba->sli4_hba.cq_ecount, cpu); 9024 if (!qdesc) { 9025 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9026 "0500 Failed allocate slow-path mailbox CQ\n"); 9027 goto out_error; 9028 } 9029 qdesc->qe_valid = 1; 9030 phba->sli4_hba.mbx_cq = qdesc; 9031 9032 /* Create slow-path ELS Complete Queue */ 9033 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9034 phba->sli4_hba.cq_esize, 9035 phba->sli4_hba.cq_ecount, cpu); 9036 if (!qdesc) { 9037 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9038 "0501 Failed allocate slow-path ELS CQ\n"); 9039 goto out_error; 9040 } 9041 qdesc->qe_valid = 1; 9042 qdesc->chann = cpu; 9043 phba->sli4_hba.els_cq = qdesc; 9044 9045 9046 /* 9047 * Create Slow Path Work Queues (WQs) 9048 */ 9049 9050 /* Create Mailbox Command Queue */ 9051 9052 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9053 phba->sli4_hba.mq_esize, 9054 phba->sli4_hba.mq_ecount, cpu); 9055 if (!qdesc) { 9056 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9057 "0505 Failed allocate slow-path MQ\n"); 9058 goto out_error; 9059 } 9060 qdesc->chann = cpu; 9061 phba->sli4_hba.mbx_wq = qdesc; 9062 9063 /* 9064 * Create ELS Work Queues 9065 */ 9066 9067 /* Create slow-path ELS Work Queue */ 9068 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9069 phba->sli4_hba.wq_esize, 9070 phba->sli4_hba.wq_ecount, cpu); 9071 if (!qdesc) { 9072 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9073 "0504 Failed allocate slow-path ELS WQ\n"); 9074 goto out_error; 9075 } 9076 qdesc->chann = cpu; 9077 phba->sli4_hba.els_wq = qdesc; 9078 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 9079 9080 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9081 /* Create NVME LS Complete Queue */ 9082 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9083 phba->sli4_hba.cq_esize, 9084 phba->sli4_hba.cq_ecount, cpu); 9085 if (!qdesc) { 9086 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9087 "6079 Failed allocate NVME LS CQ\n"); 9088 goto out_error; 9089 } 9090 qdesc->chann = cpu; 9091 qdesc->qe_valid = 1; 9092 phba->sli4_hba.nvmels_cq = qdesc; 9093 9094 /* Create NVME LS Work Queue */ 9095 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9096 phba->sli4_hba.wq_esize, 9097 phba->sli4_hba.wq_ecount, cpu); 9098 if (!qdesc) { 9099 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9100 "6080 Failed allocate NVME LS WQ\n"); 9101 goto out_error; 9102 } 9103 qdesc->chann = cpu; 9104 phba->sli4_hba.nvmels_wq = qdesc; 9105 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 9106 } 9107 9108 /* 9109 * Create Receive Queue (RQ) 9110 */ 9111 9112 /* Create Receive Queue for header */ 9113 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9114 phba->sli4_hba.rq_esize, 9115 phba->sli4_hba.rq_ecount, cpu); 9116 if (!qdesc) { 9117 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9118 "0506 Failed allocate receive HRQ\n"); 9119 goto out_error; 9120 } 9121 phba->sli4_hba.hdr_rq = qdesc; 9122 9123 /* Create Receive Queue for data */ 9124 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9125 phba->sli4_hba.rq_esize, 9126 phba->sli4_hba.rq_ecount, cpu); 9127 if (!qdesc) { 9128 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9129 "0507 Failed allocate receive DRQ\n"); 9130 goto out_error; 9131 } 9132 phba->sli4_hba.dat_rq = qdesc; 9133 9134 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && 9135 phba->nvmet_support) { 9136 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 9137 cpu = lpfc_find_cpu_handle(phba, idx, 9138 LPFC_FIND_BY_HDWQ); 9139 /* Create NVMET Receive Queue for header */ 9140 qdesc = lpfc_sli4_queue_alloc(phba, 9141 LPFC_DEFAULT_PAGE_SIZE, 9142 phba->sli4_hba.rq_esize, 9143 LPFC_NVMET_RQE_DEF_COUNT, 9144 cpu); 9145 if (!qdesc) { 9146 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9147 "3146 Failed allocate " 9148 "receive HRQ\n"); 9149 goto out_error; 9150 } 9151 qdesc->hdwq = idx; 9152 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc; 9153 9154 /* Only needed for header of RQ pair */ 9155 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp), 9156 GFP_KERNEL, 9157 cpu_to_node(cpu)); 9158 if (qdesc->rqbp == NULL) { 9159 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9160 "6131 Failed allocate " 9161 "Header RQBP\n"); 9162 goto out_error; 9163 } 9164 9165 /* Put list in known state in case driver load fails. */ 9166 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list); 9167 9168 /* Create NVMET Receive Queue for data */ 9169 qdesc = lpfc_sli4_queue_alloc(phba, 9170 LPFC_DEFAULT_PAGE_SIZE, 9171 phba->sli4_hba.rq_esize, 9172 LPFC_NVMET_RQE_DEF_COUNT, 9173 cpu); 9174 if (!qdesc) { 9175 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9176 "3156 Failed allocate " 9177 "receive DRQ\n"); 9178 goto out_error; 9179 } 9180 qdesc->hdwq = idx; 9181 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc; 9182 } 9183 } 9184 9185 /* Clear NVME stats */ 9186 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9187 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9188 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0, 9189 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat)); 9190 } 9191 } 9192 9193 /* Clear SCSI stats */ 9194 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 9195 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9196 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0, 9197 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat)); 9198 } 9199 } 9200 9201 return 0; 9202 9203 out_error: 9204 lpfc_sli4_queue_destroy(phba); 9205 return -ENOMEM; 9206 } 9207 9208 static inline void 9209 __lpfc_sli4_release_queue(struct lpfc_queue **qp) 9210 { 9211 if (*qp != NULL) { 9212 lpfc_sli4_queue_free(*qp); 9213 *qp = NULL; 9214 } 9215 } 9216 9217 static inline void 9218 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max) 9219 { 9220 int idx; 9221 9222 if (*qs == NULL) 9223 return; 9224 9225 for (idx = 0; idx < max; idx++) 9226 __lpfc_sli4_release_queue(&(*qs)[idx]); 9227 9228 kfree(*qs); 9229 *qs = NULL; 9230 } 9231 9232 static inline void 9233 lpfc_sli4_release_hdwq(struct lpfc_hba *phba) 9234 { 9235 struct lpfc_sli4_hdw_queue *hdwq; 9236 struct lpfc_queue *eq; 9237 uint32_t idx; 9238 9239 hdwq = phba->sli4_hba.hdwq; 9240 9241 /* Loop thru all Hardware Queues */ 9242 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9243 /* Free the CQ/WQ corresponding to the Hardware Queue */ 9244 lpfc_sli4_queue_free(hdwq[idx].io_cq); 9245 lpfc_sli4_queue_free(hdwq[idx].io_wq); 9246 hdwq[idx].io_cq = NULL; 9247 hdwq[idx].io_wq = NULL; 9248 if (phba->cfg_xpsgl && !phba->nvmet_support) 9249 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]); 9250 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]); 9251 } 9252 /* Loop thru all IRQ vectors */ 9253 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 9254 /* Free the EQ corresponding to the IRQ vector */ 9255 eq = phba->sli4_hba.hba_eq_hdl[idx].eq; 9256 lpfc_sli4_queue_free(eq); 9257 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL; 9258 } 9259 } 9260 9261 /** 9262 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 9263 * @phba: pointer to lpfc hba data structure. 9264 * 9265 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 9266 * operation. 9267 * 9268 * Return codes 9269 * 0 - successful 9270 * -ENOMEM - No available memory 9271 * -EIO - The mailbox failed to complete successfully. 9272 **/ 9273 void 9274 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 9275 { 9276 /* 9277 * Set FREE_INIT before beginning to free the queues. 9278 * Wait until the users of queues to acknowledge to 9279 * release queues by clearing FREE_WAIT. 9280 */ 9281 spin_lock_irq(&phba->hbalock); 9282 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT; 9283 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) { 9284 spin_unlock_irq(&phba->hbalock); 9285 msleep(20); 9286 spin_lock_irq(&phba->hbalock); 9287 } 9288 spin_unlock_irq(&phba->hbalock); 9289 9290 lpfc_sli4_cleanup_poll_list(phba); 9291 9292 /* Release HBA eqs */ 9293 if (phba->sli4_hba.hdwq) 9294 lpfc_sli4_release_hdwq(phba); 9295 9296 if (phba->nvmet_support) { 9297 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, 9298 phba->cfg_nvmet_mrq); 9299 9300 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr, 9301 phba->cfg_nvmet_mrq); 9302 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data, 9303 phba->cfg_nvmet_mrq); 9304 } 9305 9306 /* Release mailbox command work queue */ 9307 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq); 9308 9309 /* Release ELS work queue */ 9310 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq); 9311 9312 /* Release ELS work queue */ 9313 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq); 9314 9315 /* Release unsolicited receive queue */ 9316 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq); 9317 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq); 9318 9319 /* Release ELS complete queue */ 9320 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq); 9321 9322 /* Release NVME LS complete queue */ 9323 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq); 9324 9325 /* Release mailbox command complete queue */ 9326 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq); 9327 9328 /* Everything on this list has been freed */ 9329 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 9330 9331 /* Done with freeing the queues */ 9332 spin_lock_irq(&phba->hbalock); 9333 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT; 9334 spin_unlock_irq(&phba->hbalock); 9335 } 9336 9337 int 9338 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) 9339 { 9340 struct lpfc_rqb *rqbp; 9341 struct lpfc_dmabuf *h_buf; 9342 struct rqb_dmabuf *rqb_buffer; 9343 9344 rqbp = rq->rqbp; 9345 while (!list_empty(&rqbp->rqb_buffer_list)) { 9346 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 9347 struct lpfc_dmabuf, list); 9348 9349 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf); 9350 (rqbp->rqb_free_buffer)(phba, rqb_buffer); 9351 rqbp->buffer_count--; 9352 } 9353 return 1; 9354 } 9355 9356 static int 9357 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, 9358 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map, 9359 int qidx, uint32_t qtype) 9360 { 9361 struct lpfc_sli_ring *pring; 9362 int rc; 9363 9364 if (!eq || !cq || !wq) { 9365 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9366 "6085 Fast-path %s (%d) not allocated\n", 9367 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx); 9368 return -ENOMEM; 9369 } 9370 9371 /* create the Cq first */ 9372 rc = lpfc_cq_create(phba, cq, eq, 9373 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype); 9374 if (rc) { 9375 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9376 "6086 Failed setup of CQ (%d), rc = 0x%x\n", 9377 qidx, (uint32_t)rc); 9378 return rc; 9379 } 9380 9381 if (qtype != LPFC_MBOX) { 9382 /* Setup cq_map for fast lookup */ 9383 if (cq_map) 9384 *cq_map = cq->queue_id; 9385 9386 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9387 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n", 9388 qidx, cq->queue_id, qidx, eq->queue_id); 9389 9390 /* create the wq */ 9391 rc = lpfc_wq_create(phba, wq, cq, qtype); 9392 if (rc) { 9393 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9394 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n", 9395 qidx, (uint32_t)rc); 9396 /* no need to tear down cq - caller will do so */ 9397 return rc; 9398 } 9399 9400 /* Bind this CQ/WQ to the NVME ring */ 9401 pring = wq->pring; 9402 pring->sli.sli4.wqp = (void *)wq; 9403 cq->pring = pring; 9404 9405 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9406 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n", 9407 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id); 9408 } else { 9409 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX); 9410 if (rc) { 9411 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9412 "0539 Failed setup of slow-path MQ: " 9413 "rc = 0x%x\n", rc); 9414 /* no need to tear down cq - caller will do so */ 9415 return rc; 9416 } 9417 9418 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9419 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 9420 phba->sli4_hba.mbx_wq->queue_id, 9421 phba->sli4_hba.mbx_cq->queue_id); 9422 } 9423 9424 return 0; 9425 } 9426 9427 /** 9428 * lpfc_setup_cq_lookup - Setup the CQ lookup table 9429 * @phba: pointer to lpfc hba data structure. 9430 * 9431 * This routine will populate the cq_lookup table by all 9432 * available CQ queue_id's. 9433 **/ 9434 static void 9435 lpfc_setup_cq_lookup(struct lpfc_hba *phba) 9436 { 9437 struct lpfc_queue *eq, *childq; 9438 int qidx; 9439 9440 memset(phba->sli4_hba.cq_lookup, 0, 9441 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1))); 9442 /* Loop thru all IRQ vectors */ 9443 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 9444 /* Get the EQ corresponding to the IRQ vector */ 9445 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 9446 if (!eq) 9447 continue; 9448 /* Loop through all CQs associated with that EQ */ 9449 list_for_each_entry(childq, &eq->child_list, list) { 9450 if (childq->queue_id > phba->sli4_hba.cq_max) 9451 continue; 9452 if (childq->subtype == LPFC_IO) 9453 phba->sli4_hba.cq_lookup[childq->queue_id] = 9454 childq; 9455 } 9456 } 9457 } 9458 9459 /** 9460 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 9461 * @phba: pointer to lpfc hba data structure. 9462 * 9463 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 9464 * operation. 9465 * 9466 * Return codes 9467 * 0 - successful 9468 * -ENOMEM - No available memory 9469 * -EIO - The mailbox failed to complete successfully. 9470 **/ 9471 int 9472 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 9473 { 9474 uint32_t shdr_status, shdr_add_status; 9475 union lpfc_sli4_cfg_shdr *shdr; 9476 struct lpfc_vector_map_info *cpup; 9477 struct lpfc_sli4_hdw_queue *qp; 9478 LPFC_MBOXQ_t *mboxq; 9479 int qidx, cpu; 9480 uint32_t length, usdelay; 9481 int rc = -ENOMEM; 9482 9483 /* Check for dual-ULP support */ 9484 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9485 if (!mboxq) { 9486 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9487 "3249 Unable to allocate memory for " 9488 "QUERY_FW_CFG mailbox command\n"); 9489 return -ENOMEM; 9490 } 9491 length = (sizeof(struct lpfc_mbx_query_fw_config) - 9492 sizeof(struct lpfc_sli4_cfg_mhdr)); 9493 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 9494 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 9495 length, LPFC_SLI4_MBX_EMBED); 9496 9497 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9498 9499 shdr = (union lpfc_sli4_cfg_shdr *) 9500 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 9501 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 9502 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 9503 if (shdr_status || shdr_add_status || rc) { 9504 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9505 "3250 QUERY_FW_CFG mailbox failed with status " 9506 "x%x add_status x%x, mbx status x%x\n", 9507 shdr_status, shdr_add_status, rc); 9508 if (rc != MBX_TIMEOUT) 9509 mempool_free(mboxq, phba->mbox_mem_pool); 9510 rc = -ENXIO; 9511 goto out_error; 9512 } 9513 9514 phba->sli4_hba.fw_func_mode = 9515 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; 9516 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; 9517 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; 9518 phba->sli4_hba.physical_port = 9519 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port; 9520 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9521 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " 9522 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, 9523 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); 9524 9525 if (rc != MBX_TIMEOUT) 9526 mempool_free(mboxq, phba->mbox_mem_pool); 9527 9528 /* 9529 * Set up HBA Event Queues (EQs) 9530 */ 9531 qp = phba->sli4_hba.hdwq; 9532 9533 /* Set up HBA event queue */ 9534 if (!qp) { 9535 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9536 "3147 Fast-path EQs not allocated\n"); 9537 rc = -ENOMEM; 9538 goto out_error; 9539 } 9540 9541 /* Loop thru all IRQ vectors */ 9542 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 9543 /* Create HBA Event Queues (EQs) in order */ 9544 for_each_present_cpu(cpu) { 9545 cpup = &phba->sli4_hba.cpu_map[cpu]; 9546 9547 /* Look for the CPU thats using that vector with 9548 * LPFC_CPU_FIRST_IRQ set. 9549 */ 9550 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 9551 continue; 9552 if (qidx != cpup->eq) 9553 continue; 9554 9555 /* Create an EQ for that vector */ 9556 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq, 9557 phba->cfg_fcp_imax); 9558 if (rc) { 9559 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9560 "0523 Failed setup of fast-path" 9561 " EQ (%d), rc = 0x%x\n", 9562 cpup->eq, (uint32_t)rc); 9563 goto out_destroy; 9564 } 9565 9566 /* Save the EQ for that vector in the hba_eq_hdl */ 9567 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq = 9568 qp[cpup->hdwq].hba_eq; 9569 9570 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9571 "2584 HBA EQ setup: queue[%d]-id=%d\n", 9572 cpup->eq, 9573 qp[cpup->hdwq].hba_eq->queue_id); 9574 } 9575 } 9576 9577 /* Loop thru all Hardware Queues */ 9578 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 9579 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ); 9580 cpup = &phba->sli4_hba.cpu_map[cpu]; 9581 9582 /* Create the CQ/WQ corresponding to the Hardware Queue */ 9583 rc = lpfc_create_wq_cq(phba, 9584 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq, 9585 qp[qidx].io_cq, 9586 qp[qidx].io_wq, 9587 &phba->sli4_hba.hdwq[qidx].io_cq_map, 9588 qidx, 9589 LPFC_IO); 9590 if (rc) { 9591 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9592 "0535 Failed to setup fastpath " 9593 "IO WQ/CQ (%d), rc = 0x%x\n", 9594 qidx, (uint32_t)rc); 9595 goto out_destroy; 9596 } 9597 } 9598 9599 /* 9600 * Set up Slow Path Complete Queues (CQs) 9601 */ 9602 9603 /* Set up slow-path MBOX CQ/MQ */ 9604 9605 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) { 9606 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9607 "0528 %s not allocated\n", 9608 phba->sli4_hba.mbx_cq ? 9609 "Mailbox WQ" : "Mailbox CQ"); 9610 rc = -ENOMEM; 9611 goto out_destroy; 9612 } 9613 9614 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 9615 phba->sli4_hba.mbx_cq, 9616 phba->sli4_hba.mbx_wq, 9617 NULL, 0, LPFC_MBOX); 9618 if (rc) { 9619 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9620 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n", 9621 (uint32_t)rc); 9622 goto out_destroy; 9623 } 9624 if (phba->nvmet_support) { 9625 if (!phba->sli4_hba.nvmet_cqset) { 9626 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9627 "3165 Fast-path NVME CQ Set " 9628 "array not allocated\n"); 9629 rc = -ENOMEM; 9630 goto out_destroy; 9631 } 9632 if (phba->cfg_nvmet_mrq > 1) { 9633 rc = lpfc_cq_create_set(phba, 9634 phba->sli4_hba.nvmet_cqset, 9635 qp, 9636 LPFC_WCQ, LPFC_NVMET); 9637 if (rc) { 9638 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9639 "3164 Failed setup of NVME CQ " 9640 "Set, rc = 0x%x\n", 9641 (uint32_t)rc); 9642 goto out_destroy; 9643 } 9644 } else { 9645 /* Set up NVMET Receive Complete Queue */ 9646 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0], 9647 qp[0].hba_eq, 9648 LPFC_WCQ, LPFC_NVMET); 9649 if (rc) { 9650 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9651 "6089 Failed setup NVMET CQ: " 9652 "rc = 0x%x\n", (uint32_t)rc); 9653 goto out_destroy; 9654 } 9655 phba->sli4_hba.nvmet_cqset[0]->chann = 0; 9656 9657 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9658 "6090 NVMET CQ setup: cq-id=%d, " 9659 "parent eq-id=%d\n", 9660 phba->sli4_hba.nvmet_cqset[0]->queue_id, 9661 qp[0].hba_eq->queue_id); 9662 } 9663 } 9664 9665 /* Set up slow-path ELS WQ/CQ */ 9666 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) { 9667 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9668 "0530 ELS %s not allocated\n", 9669 phba->sli4_hba.els_cq ? "WQ" : "CQ"); 9670 rc = -ENOMEM; 9671 goto out_destroy; 9672 } 9673 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 9674 phba->sli4_hba.els_cq, 9675 phba->sli4_hba.els_wq, 9676 NULL, 0, LPFC_ELS); 9677 if (rc) { 9678 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9679 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n", 9680 (uint32_t)rc); 9681 goto out_destroy; 9682 } 9683 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9684 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 9685 phba->sli4_hba.els_wq->queue_id, 9686 phba->sli4_hba.els_cq->queue_id); 9687 9688 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9689 /* Set up NVME LS Complete Queue */ 9690 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) { 9691 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9692 "6091 LS %s not allocated\n", 9693 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ"); 9694 rc = -ENOMEM; 9695 goto out_destroy; 9696 } 9697 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 9698 phba->sli4_hba.nvmels_cq, 9699 phba->sli4_hba.nvmels_wq, 9700 NULL, 0, LPFC_NVME_LS); 9701 if (rc) { 9702 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9703 "0526 Failed setup of NVVME LS WQ/CQ: " 9704 "rc = 0x%x\n", (uint32_t)rc); 9705 goto out_destroy; 9706 } 9707 9708 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9709 "6096 ELS WQ setup: wq-id=%d, " 9710 "parent cq-id=%d\n", 9711 phba->sli4_hba.nvmels_wq->queue_id, 9712 phba->sli4_hba.nvmels_cq->queue_id); 9713 } 9714 9715 /* 9716 * Create NVMET Receive Queue (RQ) 9717 */ 9718 if (phba->nvmet_support) { 9719 if ((!phba->sli4_hba.nvmet_cqset) || 9720 (!phba->sli4_hba.nvmet_mrq_hdr) || 9721 (!phba->sli4_hba.nvmet_mrq_data)) { 9722 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9723 "6130 MRQ CQ Queues not " 9724 "allocated\n"); 9725 rc = -ENOMEM; 9726 goto out_destroy; 9727 } 9728 if (phba->cfg_nvmet_mrq > 1) { 9729 rc = lpfc_mrq_create(phba, 9730 phba->sli4_hba.nvmet_mrq_hdr, 9731 phba->sli4_hba.nvmet_mrq_data, 9732 phba->sli4_hba.nvmet_cqset, 9733 LPFC_NVMET); 9734 if (rc) { 9735 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9736 "6098 Failed setup of NVMET " 9737 "MRQ: rc = 0x%x\n", 9738 (uint32_t)rc); 9739 goto out_destroy; 9740 } 9741 9742 } else { 9743 rc = lpfc_rq_create(phba, 9744 phba->sli4_hba.nvmet_mrq_hdr[0], 9745 phba->sli4_hba.nvmet_mrq_data[0], 9746 phba->sli4_hba.nvmet_cqset[0], 9747 LPFC_NVMET); 9748 if (rc) { 9749 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9750 "6057 Failed setup of NVMET " 9751 "Receive Queue: rc = 0x%x\n", 9752 (uint32_t)rc); 9753 goto out_destroy; 9754 } 9755 9756 lpfc_printf_log( 9757 phba, KERN_INFO, LOG_INIT, 9758 "6099 NVMET RQ setup: hdr-rq-id=%d, " 9759 "dat-rq-id=%d parent cq-id=%d\n", 9760 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id, 9761 phba->sli4_hba.nvmet_mrq_data[0]->queue_id, 9762 phba->sli4_hba.nvmet_cqset[0]->queue_id); 9763 9764 } 9765 } 9766 9767 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 9768 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9769 "0540 Receive Queue not allocated\n"); 9770 rc = -ENOMEM; 9771 goto out_destroy; 9772 } 9773 9774 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 9775 phba->sli4_hba.els_cq, LPFC_USOL); 9776 if (rc) { 9777 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9778 "0541 Failed setup of Receive Queue: " 9779 "rc = 0x%x\n", (uint32_t)rc); 9780 goto out_destroy; 9781 } 9782 9783 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9784 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 9785 "parent cq-id=%d\n", 9786 phba->sli4_hba.hdr_rq->queue_id, 9787 phba->sli4_hba.dat_rq->queue_id, 9788 phba->sli4_hba.els_cq->queue_id); 9789 9790 if (phba->cfg_fcp_imax) 9791 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax; 9792 else 9793 usdelay = 0; 9794 9795 for (qidx = 0; qidx < phba->cfg_irq_chann; 9796 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) 9797 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT, 9798 usdelay); 9799 9800 if (phba->sli4_hba.cq_max) { 9801 kfree(phba->sli4_hba.cq_lookup); 9802 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1), 9803 sizeof(struct lpfc_queue *), GFP_KERNEL); 9804 if (!phba->sli4_hba.cq_lookup) { 9805 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9806 "0549 Failed setup of CQ Lookup table: " 9807 "size 0x%x\n", phba->sli4_hba.cq_max); 9808 rc = -ENOMEM; 9809 goto out_destroy; 9810 } 9811 lpfc_setup_cq_lookup(phba); 9812 } 9813 return 0; 9814 9815 out_destroy: 9816 lpfc_sli4_queue_unset(phba); 9817 out_error: 9818 return rc; 9819 } 9820 9821 /** 9822 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 9823 * @phba: pointer to lpfc hba data structure. 9824 * 9825 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 9826 * operation. 9827 * 9828 * Return codes 9829 * 0 - successful 9830 * -ENOMEM - No available memory 9831 * -EIO - The mailbox failed to complete successfully. 9832 **/ 9833 void 9834 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 9835 { 9836 struct lpfc_sli4_hdw_queue *qp; 9837 struct lpfc_queue *eq; 9838 int qidx; 9839 9840 /* Unset mailbox command work queue */ 9841 if (phba->sli4_hba.mbx_wq) 9842 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 9843 9844 /* Unset NVME LS work queue */ 9845 if (phba->sli4_hba.nvmels_wq) 9846 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); 9847 9848 /* Unset ELS work queue */ 9849 if (phba->sli4_hba.els_wq) 9850 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 9851 9852 /* Unset unsolicited receive queue */ 9853 if (phba->sli4_hba.hdr_rq) 9854 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, 9855 phba->sli4_hba.dat_rq); 9856 9857 /* Unset mailbox command complete queue */ 9858 if (phba->sli4_hba.mbx_cq) 9859 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 9860 9861 /* Unset ELS complete queue */ 9862 if (phba->sli4_hba.els_cq) 9863 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 9864 9865 /* Unset NVME LS complete queue */ 9866 if (phba->sli4_hba.nvmels_cq) 9867 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq); 9868 9869 if (phba->nvmet_support) { 9870 /* Unset NVMET MRQ queue */ 9871 if (phba->sli4_hba.nvmet_mrq_hdr) { 9872 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 9873 lpfc_rq_destroy( 9874 phba, 9875 phba->sli4_hba.nvmet_mrq_hdr[qidx], 9876 phba->sli4_hba.nvmet_mrq_data[qidx]); 9877 } 9878 9879 /* Unset NVMET CQ Set complete queue */ 9880 if (phba->sli4_hba.nvmet_cqset) { 9881 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 9882 lpfc_cq_destroy( 9883 phba, phba->sli4_hba.nvmet_cqset[qidx]); 9884 } 9885 } 9886 9887 /* Unset fast-path SLI4 queues */ 9888 if (phba->sli4_hba.hdwq) { 9889 /* Loop thru all Hardware Queues */ 9890 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 9891 /* Destroy the CQ/WQ corresponding to Hardware Queue */ 9892 qp = &phba->sli4_hba.hdwq[qidx]; 9893 lpfc_wq_destroy(phba, qp->io_wq); 9894 lpfc_cq_destroy(phba, qp->io_cq); 9895 } 9896 /* Loop thru all IRQ vectors */ 9897 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 9898 /* Destroy the EQ corresponding to the IRQ vector */ 9899 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 9900 lpfc_eq_destroy(phba, eq); 9901 } 9902 } 9903 9904 kfree(phba->sli4_hba.cq_lookup); 9905 phba->sli4_hba.cq_lookup = NULL; 9906 phba->sli4_hba.cq_max = 0; 9907 } 9908 9909 /** 9910 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 9911 * @phba: pointer to lpfc hba data structure. 9912 * 9913 * This routine is invoked to allocate and set up a pool of completion queue 9914 * events. The body of the completion queue event is a completion queue entry 9915 * CQE. For now, this pool is used for the interrupt service routine to queue 9916 * the following HBA completion queue events for the worker thread to process: 9917 * - Mailbox asynchronous events 9918 * - Receive queue completion unsolicited events 9919 * Later, this can be used for all the slow-path events. 9920 * 9921 * Return codes 9922 * 0 - successful 9923 * -ENOMEM - No available memory 9924 **/ 9925 static int 9926 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 9927 { 9928 struct lpfc_cq_event *cq_event; 9929 int i; 9930 9931 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 9932 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 9933 if (!cq_event) 9934 goto out_pool_create_fail; 9935 list_add_tail(&cq_event->list, 9936 &phba->sli4_hba.sp_cqe_event_pool); 9937 } 9938 return 0; 9939 9940 out_pool_create_fail: 9941 lpfc_sli4_cq_event_pool_destroy(phba); 9942 return -ENOMEM; 9943 } 9944 9945 /** 9946 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 9947 * @phba: pointer to lpfc hba data structure. 9948 * 9949 * This routine is invoked to free the pool of completion queue events at 9950 * driver unload time. Note that, it is the responsibility of the driver 9951 * cleanup routine to free all the outstanding completion-queue events 9952 * allocated from this pool back into the pool before invoking this routine 9953 * to destroy the pool. 9954 **/ 9955 static void 9956 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 9957 { 9958 struct lpfc_cq_event *cq_event, *next_cq_event; 9959 9960 list_for_each_entry_safe(cq_event, next_cq_event, 9961 &phba->sli4_hba.sp_cqe_event_pool, list) { 9962 list_del(&cq_event->list); 9963 kfree(cq_event); 9964 } 9965 } 9966 9967 /** 9968 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 9969 * @phba: pointer to lpfc hba data structure. 9970 * 9971 * This routine is the lock free version of the API invoked to allocate a 9972 * completion-queue event from the free pool. 9973 * 9974 * Return: Pointer to the newly allocated completion-queue event if successful 9975 * NULL otherwise. 9976 **/ 9977 struct lpfc_cq_event * 9978 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 9979 { 9980 struct lpfc_cq_event *cq_event = NULL; 9981 9982 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 9983 struct lpfc_cq_event, list); 9984 return cq_event; 9985 } 9986 9987 /** 9988 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 9989 * @phba: pointer to lpfc hba data structure. 9990 * 9991 * This routine is the lock version of the API invoked to allocate a 9992 * completion-queue event from the free pool. 9993 * 9994 * Return: Pointer to the newly allocated completion-queue event if successful 9995 * NULL otherwise. 9996 **/ 9997 struct lpfc_cq_event * 9998 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 9999 { 10000 struct lpfc_cq_event *cq_event; 10001 unsigned long iflags; 10002 10003 spin_lock_irqsave(&phba->hbalock, iflags); 10004 cq_event = __lpfc_sli4_cq_event_alloc(phba); 10005 spin_unlock_irqrestore(&phba->hbalock, iflags); 10006 return cq_event; 10007 } 10008 10009 /** 10010 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 10011 * @phba: pointer to lpfc hba data structure. 10012 * @cq_event: pointer to the completion queue event to be freed. 10013 * 10014 * This routine is the lock free version of the API invoked to release a 10015 * completion-queue event back into the free pool. 10016 **/ 10017 void 10018 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 10019 struct lpfc_cq_event *cq_event) 10020 { 10021 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 10022 } 10023 10024 /** 10025 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 10026 * @phba: pointer to lpfc hba data structure. 10027 * @cq_event: pointer to the completion queue event to be freed. 10028 * 10029 * This routine is the lock version of the API invoked to release a 10030 * completion-queue event back into the free pool. 10031 **/ 10032 void 10033 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 10034 struct lpfc_cq_event *cq_event) 10035 { 10036 unsigned long iflags; 10037 spin_lock_irqsave(&phba->hbalock, iflags); 10038 __lpfc_sli4_cq_event_release(phba, cq_event); 10039 spin_unlock_irqrestore(&phba->hbalock, iflags); 10040 } 10041 10042 /** 10043 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 10044 * @phba: pointer to lpfc hba data structure. 10045 * 10046 * This routine is to free all the pending completion-queue events to the 10047 * back into the free pool for device reset. 10048 **/ 10049 static void 10050 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 10051 { 10052 LIST_HEAD(cqelist); 10053 struct lpfc_cq_event *cqe; 10054 unsigned long iflags; 10055 10056 /* Retrieve all the pending WCQEs from pending WCQE lists */ 10057 spin_lock_irqsave(&phba->hbalock, iflags); 10058 /* Pending FCP XRI abort events */ 10059 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 10060 &cqelist); 10061 /* Pending ELS XRI abort events */ 10062 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 10063 &cqelist); 10064 /* Pending asynnc events */ 10065 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 10066 &cqelist); 10067 spin_unlock_irqrestore(&phba->hbalock, iflags); 10068 10069 while (!list_empty(&cqelist)) { 10070 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 10071 lpfc_sli4_cq_event_release(phba, cqe); 10072 } 10073 } 10074 10075 /** 10076 * lpfc_pci_function_reset - Reset pci function. 10077 * @phba: pointer to lpfc hba data structure. 10078 * 10079 * This routine is invoked to request a PCI function reset. It will destroys 10080 * all resources assigned to the PCI function which originates this request. 10081 * 10082 * Return codes 10083 * 0 - successful 10084 * -ENOMEM - No available memory 10085 * -EIO - The mailbox failed to complete successfully. 10086 **/ 10087 int 10088 lpfc_pci_function_reset(struct lpfc_hba *phba) 10089 { 10090 LPFC_MBOXQ_t *mboxq; 10091 uint32_t rc = 0, if_type; 10092 uint32_t shdr_status, shdr_add_status; 10093 uint32_t rdy_chk; 10094 uint32_t port_reset = 0; 10095 union lpfc_sli4_cfg_shdr *shdr; 10096 struct lpfc_register reg_data; 10097 uint16_t devid; 10098 10099 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10100 switch (if_type) { 10101 case LPFC_SLI_INTF_IF_TYPE_0: 10102 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 10103 GFP_KERNEL); 10104 if (!mboxq) { 10105 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10106 "0494 Unable to allocate memory for " 10107 "issuing SLI_FUNCTION_RESET mailbox " 10108 "command\n"); 10109 return -ENOMEM; 10110 } 10111 10112 /* Setup PCI function reset mailbox-ioctl command */ 10113 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 10114 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 10115 LPFC_SLI4_MBX_EMBED); 10116 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 10117 shdr = (union lpfc_sli4_cfg_shdr *) 10118 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 10119 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10120 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 10121 &shdr->response); 10122 if (rc != MBX_TIMEOUT) 10123 mempool_free(mboxq, phba->mbox_mem_pool); 10124 if (shdr_status || shdr_add_status || rc) { 10125 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10126 "0495 SLI_FUNCTION_RESET mailbox " 10127 "failed with status x%x add_status x%x," 10128 " mbx status x%x\n", 10129 shdr_status, shdr_add_status, rc); 10130 rc = -ENXIO; 10131 } 10132 break; 10133 case LPFC_SLI_INTF_IF_TYPE_2: 10134 case LPFC_SLI_INTF_IF_TYPE_6: 10135 wait: 10136 /* 10137 * Poll the Port Status Register and wait for RDY for 10138 * up to 30 seconds. If the port doesn't respond, treat 10139 * it as an error. 10140 */ 10141 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) { 10142 if (lpfc_readl(phba->sli4_hba.u.if_type2. 10143 STATUSregaddr, ®_data.word0)) { 10144 rc = -ENODEV; 10145 goto out; 10146 } 10147 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 10148 break; 10149 msleep(20); 10150 } 10151 10152 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) { 10153 phba->work_status[0] = readl( 10154 phba->sli4_hba.u.if_type2.ERR1regaddr); 10155 phba->work_status[1] = readl( 10156 phba->sli4_hba.u.if_type2.ERR2regaddr); 10157 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10158 "2890 Port not ready, port status reg " 10159 "0x%x error 1=0x%x, error 2=0x%x\n", 10160 reg_data.word0, 10161 phba->work_status[0], 10162 phba->work_status[1]); 10163 rc = -ENODEV; 10164 goto out; 10165 } 10166 10167 if (!port_reset) { 10168 /* 10169 * Reset the port now 10170 */ 10171 reg_data.word0 = 0; 10172 bf_set(lpfc_sliport_ctrl_end, ®_data, 10173 LPFC_SLIPORT_LITTLE_ENDIAN); 10174 bf_set(lpfc_sliport_ctrl_ip, ®_data, 10175 LPFC_SLIPORT_INIT_PORT); 10176 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 10177 CTRLregaddr); 10178 /* flush */ 10179 pci_read_config_word(phba->pcidev, 10180 PCI_DEVICE_ID, &devid); 10181 10182 port_reset = 1; 10183 msleep(20); 10184 goto wait; 10185 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) { 10186 rc = -ENODEV; 10187 goto out; 10188 } 10189 break; 10190 10191 case LPFC_SLI_INTF_IF_TYPE_1: 10192 default: 10193 break; 10194 } 10195 10196 out: 10197 /* Catch the not-ready port failure after a port reset. */ 10198 if (rc) { 10199 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10200 "3317 HBA not functional: IP Reset Failed " 10201 "try: echo fw_reset > board_mode\n"); 10202 rc = -ENODEV; 10203 } 10204 10205 return rc; 10206 } 10207 10208 /** 10209 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 10210 * @phba: pointer to lpfc hba data structure. 10211 * 10212 * This routine is invoked to set up the PCI device memory space for device 10213 * with SLI-4 interface spec. 10214 * 10215 * Return codes 10216 * 0 - successful 10217 * other values - error 10218 **/ 10219 static int 10220 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 10221 { 10222 struct pci_dev *pdev = phba->pcidev; 10223 unsigned long bar0map_len, bar1map_len, bar2map_len; 10224 int error; 10225 uint32_t if_type; 10226 10227 if (!pdev) 10228 return -ENODEV; 10229 10230 /* Set the device DMA mask size */ 10231 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 10232 if (error) 10233 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 10234 if (error) 10235 return error; 10236 10237 /* 10238 * The BARs and register set definitions and offset locations are 10239 * dependent on the if_type. 10240 */ 10241 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 10242 &phba->sli4_hba.sli_intf.word0)) { 10243 return -ENODEV; 10244 } 10245 10246 /* There is no SLI3 failback for SLI4 devices. */ 10247 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 10248 LPFC_SLI_INTF_VALID) { 10249 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10250 "2894 SLI_INTF reg contents invalid " 10251 "sli_intf reg 0x%x\n", 10252 phba->sli4_hba.sli_intf.word0); 10253 return -ENODEV; 10254 } 10255 10256 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10257 /* 10258 * Get the bus address of SLI4 device Bar regions and the 10259 * number of bytes required by each mapping. The mapping of the 10260 * particular PCI BARs regions is dependent on the type of 10261 * SLI4 device. 10262 */ 10263 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { 10264 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); 10265 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); 10266 10267 /* 10268 * Map SLI4 PCI Config Space Register base to a kernel virtual 10269 * addr 10270 */ 10271 phba->sli4_hba.conf_regs_memmap_p = 10272 ioremap(phba->pci_bar0_map, bar0map_len); 10273 if (!phba->sli4_hba.conf_regs_memmap_p) { 10274 dev_printk(KERN_ERR, &pdev->dev, 10275 "ioremap failed for SLI4 PCI config " 10276 "registers.\n"); 10277 return -ENODEV; 10278 } 10279 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; 10280 /* Set up BAR0 PCI config space register memory map */ 10281 lpfc_sli4_bar0_register_memmap(phba, if_type); 10282 } else { 10283 phba->pci_bar0_map = pci_resource_start(pdev, 1); 10284 bar0map_len = pci_resource_len(pdev, 1); 10285 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 10286 dev_printk(KERN_ERR, &pdev->dev, 10287 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 10288 return -ENODEV; 10289 } 10290 phba->sli4_hba.conf_regs_memmap_p = 10291 ioremap(phba->pci_bar0_map, bar0map_len); 10292 if (!phba->sli4_hba.conf_regs_memmap_p) { 10293 dev_printk(KERN_ERR, &pdev->dev, 10294 "ioremap failed for SLI4 PCI config " 10295 "registers.\n"); 10296 return -ENODEV; 10297 } 10298 lpfc_sli4_bar0_register_memmap(phba, if_type); 10299 } 10300 10301 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 10302 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) { 10303 /* 10304 * Map SLI4 if type 0 HBA Control Register base to a 10305 * kernel virtual address and setup the registers. 10306 */ 10307 phba->pci_bar1_map = pci_resource_start(pdev, 10308 PCI_64BIT_BAR2); 10309 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 10310 phba->sli4_hba.ctrl_regs_memmap_p = 10311 ioremap(phba->pci_bar1_map, 10312 bar1map_len); 10313 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 10314 dev_err(&pdev->dev, 10315 "ioremap failed for SLI4 HBA " 10316 "control registers.\n"); 10317 error = -ENOMEM; 10318 goto out_iounmap_conf; 10319 } 10320 phba->pci_bar2_memmap_p = 10321 phba->sli4_hba.ctrl_regs_memmap_p; 10322 lpfc_sli4_bar1_register_memmap(phba, if_type); 10323 } else { 10324 error = -ENOMEM; 10325 goto out_iounmap_conf; 10326 } 10327 } 10328 10329 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) && 10330 (pci_resource_start(pdev, PCI_64BIT_BAR2))) { 10331 /* 10332 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel 10333 * virtual address and setup the registers. 10334 */ 10335 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); 10336 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 10337 phba->sli4_hba.drbl_regs_memmap_p = 10338 ioremap(phba->pci_bar1_map, bar1map_len); 10339 if (!phba->sli4_hba.drbl_regs_memmap_p) { 10340 dev_err(&pdev->dev, 10341 "ioremap failed for SLI4 HBA doorbell registers.\n"); 10342 error = -ENOMEM; 10343 goto out_iounmap_conf; 10344 } 10345 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; 10346 lpfc_sli4_bar1_register_memmap(phba, if_type); 10347 } 10348 10349 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 10350 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) { 10351 /* 10352 * Map SLI4 if type 0 HBA Doorbell Register base to 10353 * a kernel virtual address and setup the registers. 10354 */ 10355 phba->pci_bar2_map = pci_resource_start(pdev, 10356 PCI_64BIT_BAR4); 10357 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 10358 phba->sli4_hba.drbl_regs_memmap_p = 10359 ioremap(phba->pci_bar2_map, 10360 bar2map_len); 10361 if (!phba->sli4_hba.drbl_regs_memmap_p) { 10362 dev_err(&pdev->dev, 10363 "ioremap failed for SLI4 HBA" 10364 " doorbell registers.\n"); 10365 error = -ENOMEM; 10366 goto out_iounmap_ctrl; 10367 } 10368 phba->pci_bar4_memmap_p = 10369 phba->sli4_hba.drbl_regs_memmap_p; 10370 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 10371 if (error) 10372 goto out_iounmap_all; 10373 } else { 10374 error = -ENOMEM; 10375 goto out_iounmap_all; 10376 } 10377 } 10378 10379 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 && 10380 pci_resource_start(pdev, PCI_64BIT_BAR4)) { 10381 /* 10382 * Map SLI4 if type 6 HBA DPP Register base to a kernel 10383 * virtual address and setup the registers. 10384 */ 10385 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); 10386 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 10387 phba->sli4_hba.dpp_regs_memmap_p = 10388 ioremap(phba->pci_bar2_map, bar2map_len); 10389 if (!phba->sli4_hba.dpp_regs_memmap_p) { 10390 dev_err(&pdev->dev, 10391 "ioremap failed for SLI4 HBA dpp registers.\n"); 10392 error = -ENOMEM; 10393 goto out_iounmap_ctrl; 10394 } 10395 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p; 10396 } 10397 10398 /* Set up the EQ/CQ register handeling functions now */ 10399 switch (if_type) { 10400 case LPFC_SLI_INTF_IF_TYPE_0: 10401 case LPFC_SLI_INTF_IF_TYPE_2: 10402 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr; 10403 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db; 10404 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db; 10405 break; 10406 case LPFC_SLI_INTF_IF_TYPE_6: 10407 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr; 10408 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db; 10409 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db; 10410 break; 10411 default: 10412 break; 10413 } 10414 10415 return 0; 10416 10417 out_iounmap_all: 10418 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 10419 out_iounmap_ctrl: 10420 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 10421 out_iounmap_conf: 10422 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10423 10424 return error; 10425 } 10426 10427 /** 10428 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 10429 * @phba: pointer to lpfc hba data structure. 10430 * 10431 * This routine is invoked to unset the PCI device memory space for device 10432 * with SLI-4 interface spec. 10433 **/ 10434 static void 10435 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 10436 { 10437 uint32_t if_type; 10438 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10439 10440 switch (if_type) { 10441 case LPFC_SLI_INTF_IF_TYPE_0: 10442 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 10443 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 10444 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10445 break; 10446 case LPFC_SLI_INTF_IF_TYPE_2: 10447 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10448 break; 10449 case LPFC_SLI_INTF_IF_TYPE_6: 10450 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 10451 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10452 break; 10453 case LPFC_SLI_INTF_IF_TYPE_1: 10454 default: 10455 dev_printk(KERN_ERR, &phba->pcidev->dev, 10456 "FATAL - unsupported SLI4 interface type - %d\n", 10457 if_type); 10458 break; 10459 } 10460 } 10461 10462 /** 10463 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 10464 * @phba: pointer to lpfc hba data structure. 10465 * 10466 * This routine is invoked to enable the MSI-X interrupt vectors to device 10467 * with SLI-3 interface specs. 10468 * 10469 * Return codes 10470 * 0 - successful 10471 * other values - error 10472 **/ 10473 static int 10474 lpfc_sli_enable_msix(struct lpfc_hba *phba) 10475 { 10476 int rc; 10477 LPFC_MBOXQ_t *pmb; 10478 10479 /* Set up MSI-X multi-message vectors */ 10480 rc = pci_alloc_irq_vectors(phba->pcidev, 10481 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX); 10482 if (rc < 0) { 10483 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10484 "0420 PCI enable MSI-X failed (%d)\n", rc); 10485 goto vec_fail_out; 10486 } 10487 10488 /* 10489 * Assign MSI-X vectors to interrupt handlers 10490 */ 10491 10492 /* vector-0 is associated to slow-path handler */ 10493 rc = request_irq(pci_irq_vector(phba->pcidev, 0), 10494 &lpfc_sli_sp_intr_handler, 0, 10495 LPFC_SP_DRIVER_HANDLER_NAME, phba); 10496 if (rc) { 10497 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10498 "0421 MSI-X slow-path request_irq failed " 10499 "(%d)\n", rc); 10500 goto msi_fail_out; 10501 } 10502 10503 /* vector-1 is associated to fast-path handler */ 10504 rc = request_irq(pci_irq_vector(phba->pcidev, 1), 10505 &lpfc_sli_fp_intr_handler, 0, 10506 LPFC_FP_DRIVER_HANDLER_NAME, phba); 10507 10508 if (rc) { 10509 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10510 "0429 MSI-X fast-path request_irq failed " 10511 "(%d)\n", rc); 10512 goto irq_fail_out; 10513 } 10514 10515 /* 10516 * Configure HBA MSI-X attention conditions to messages 10517 */ 10518 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10519 10520 if (!pmb) { 10521 rc = -ENOMEM; 10522 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10523 "0474 Unable to allocate memory for issuing " 10524 "MBOX_CONFIG_MSI command\n"); 10525 goto mem_fail_out; 10526 } 10527 rc = lpfc_config_msi(phba, pmb); 10528 if (rc) 10529 goto mbx_fail_out; 10530 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 10531 if (rc != MBX_SUCCESS) { 10532 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 10533 "0351 Config MSI mailbox command failed, " 10534 "mbxCmd x%x, mbxStatus x%x\n", 10535 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 10536 goto mbx_fail_out; 10537 } 10538 10539 /* Free memory allocated for mailbox command */ 10540 mempool_free(pmb, phba->mbox_mem_pool); 10541 return rc; 10542 10543 mbx_fail_out: 10544 /* Free memory allocated for mailbox command */ 10545 mempool_free(pmb, phba->mbox_mem_pool); 10546 10547 mem_fail_out: 10548 /* free the irq already requested */ 10549 free_irq(pci_irq_vector(phba->pcidev, 1), phba); 10550 10551 irq_fail_out: 10552 /* free the irq already requested */ 10553 free_irq(pci_irq_vector(phba->pcidev, 0), phba); 10554 10555 msi_fail_out: 10556 /* Unconfigure MSI-X capability structure */ 10557 pci_free_irq_vectors(phba->pcidev); 10558 10559 vec_fail_out: 10560 return rc; 10561 } 10562 10563 /** 10564 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 10565 * @phba: pointer to lpfc hba data structure. 10566 * 10567 * This routine is invoked to enable the MSI interrupt mode to device with 10568 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 10569 * enable the MSI vector. The device driver is responsible for calling the 10570 * request_irq() to register MSI vector with a interrupt the handler, which 10571 * is done in this function. 10572 * 10573 * Return codes 10574 * 0 - successful 10575 * other values - error 10576 */ 10577 static int 10578 lpfc_sli_enable_msi(struct lpfc_hba *phba) 10579 { 10580 int rc; 10581 10582 rc = pci_enable_msi(phba->pcidev); 10583 if (!rc) 10584 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10585 "0462 PCI enable MSI mode success.\n"); 10586 else { 10587 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10588 "0471 PCI enable MSI mode failed (%d)\n", rc); 10589 return rc; 10590 } 10591 10592 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 10593 0, LPFC_DRIVER_NAME, phba); 10594 if (rc) { 10595 pci_disable_msi(phba->pcidev); 10596 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10597 "0478 MSI request_irq failed (%d)\n", rc); 10598 } 10599 return rc; 10600 } 10601 10602 /** 10603 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 10604 * @phba: pointer to lpfc hba data structure. 10605 * 10606 * This routine is invoked to enable device interrupt and associate driver's 10607 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 10608 * spec. Depends on the interrupt mode configured to the driver, the driver 10609 * will try to fallback from the configured interrupt mode to an interrupt 10610 * mode which is supported by the platform, kernel, and device in the order 10611 * of: 10612 * MSI-X -> MSI -> IRQ. 10613 * 10614 * Return codes 10615 * 0 - successful 10616 * other values - error 10617 **/ 10618 static uint32_t 10619 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 10620 { 10621 uint32_t intr_mode = LPFC_INTR_ERROR; 10622 int retval; 10623 10624 if (cfg_mode == 2) { 10625 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 10626 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 10627 if (!retval) { 10628 /* Now, try to enable MSI-X interrupt mode */ 10629 retval = lpfc_sli_enable_msix(phba); 10630 if (!retval) { 10631 /* Indicate initialization to MSI-X mode */ 10632 phba->intr_type = MSIX; 10633 intr_mode = 2; 10634 } 10635 } 10636 } 10637 10638 /* Fallback to MSI if MSI-X initialization failed */ 10639 if (cfg_mode >= 1 && phba->intr_type == NONE) { 10640 retval = lpfc_sli_enable_msi(phba); 10641 if (!retval) { 10642 /* Indicate initialization to MSI mode */ 10643 phba->intr_type = MSI; 10644 intr_mode = 1; 10645 } 10646 } 10647 10648 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 10649 if (phba->intr_type == NONE) { 10650 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 10651 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 10652 if (!retval) { 10653 /* Indicate initialization to INTx mode */ 10654 phba->intr_type = INTx; 10655 intr_mode = 0; 10656 } 10657 } 10658 return intr_mode; 10659 } 10660 10661 /** 10662 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 10663 * @phba: pointer to lpfc hba data structure. 10664 * 10665 * This routine is invoked to disable device interrupt and disassociate the 10666 * driver's interrupt handler(s) from interrupt vector(s) to device with 10667 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 10668 * release the interrupt vector(s) for the message signaled interrupt. 10669 **/ 10670 static void 10671 lpfc_sli_disable_intr(struct lpfc_hba *phba) 10672 { 10673 int nr_irqs, i; 10674 10675 if (phba->intr_type == MSIX) 10676 nr_irqs = LPFC_MSIX_VECTORS; 10677 else 10678 nr_irqs = 1; 10679 10680 for (i = 0; i < nr_irqs; i++) 10681 free_irq(pci_irq_vector(phba->pcidev, i), phba); 10682 pci_free_irq_vectors(phba->pcidev); 10683 10684 /* Reset interrupt management states */ 10685 phba->intr_type = NONE; 10686 phba->sli.slistat.sli_intr = 0; 10687 } 10688 10689 /** 10690 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue 10691 * @phba: pointer to lpfc hba data structure. 10692 * @id: EQ vector index or Hardware Queue index 10693 * @match: LPFC_FIND_BY_EQ = match by EQ 10694 * LPFC_FIND_BY_HDWQ = match by Hardware Queue 10695 * Return the CPU that matches the selection criteria 10696 */ 10697 static uint16_t 10698 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match) 10699 { 10700 struct lpfc_vector_map_info *cpup; 10701 int cpu; 10702 10703 /* Loop through all CPUs */ 10704 for_each_present_cpu(cpu) { 10705 cpup = &phba->sli4_hba.cpu_map[cpu]; 10706 10707 /* If we are matching by EQ, there may be multiple CPUs using 10708 * using the same vector, so select the one with 10709 * LPFC_CPU_FIRST_IRQ set. 10710 */ 10711 if ((match == LPFC_FIND_BY_EQ) && 10712 (cpup->flag & LPFC_CPU_FIRST_IRQ) && 10713 (cpup->eq == id)) 10714 return cpu; 10715 10716 /* If matching by HDWQ, select the first CPU that matches */ 10717 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id)) 10718 return cpu; 10719 } 10720 return 0; 10721 } 10722 10723 #ifdef CONFIG_X86 10724 /** 10725 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded 10726 * @phba: pointer to lpfc hba data structure. 10727 * @cpu: CPU map index 10728 * @phys_id: CPU package physical id 10729 * @core_id: CPU core id 10730 */ 10731 static int 10732 lpfc_find_hyper(struct lpfc_hba *phba, int cpu, 10733 uint16_t phys_id, uint16_t core_id) 10734 { 10735 struct lpfc_vector_map_info *cpup; 10736 int idx; 10737 10738 for_each_present_cpu(idx) { 10739 cpup = &phba->sli4_hba.cpu_map[idx]; 10740 /* Does the cpup match the one we are looking for */ 10741 if ((cpup->phys_id == phys_id) && 10742 (cpup->core_id == core_id) && 10743 (cpu != idx)) 10744 return 1; 10745 } 10746 return 0; 10747 } 10748 #endif 10749 10750 /* 10751 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure 10752 * @phba: pointer to lpfc hba data structure. 10753 * @eqidx: index for eq and irq vector 10754 * @flag: flags to set for vector_map structure 10755 * @cpu: cpu used to index vector_map structure 10756 * 10757 * The routine assigns eq info into vector_map structure 10758 */ 10759 static inline void 10760 lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag, 10761 unsigned int cpu) 10762 { 10763 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu]; 10764 struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx); 10765 10766 cpup->eq = eqidx; 10767 cpup->flag |= flag; 10768 10769 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10770 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n", 10771 cpu, eqhdl->irq, cpup->eq, cpup->flag); 10772 } 10773 10774 /** 10775 * lpfc_cpu_map_array_init - Initialize cpu_map structure 10776 * @phba: pointer to lpfc hba data structure. 10777 * 10778 * The routine initializes the cpu_map array structure 10779 */ 10780 static void 10781 lpfc_cpu_map_array_init(struct lpfc_hba *phba) 10782 { 10783 struct lpfc_vector_map_info *cpup; 10784 struct lpfc_eq_intr_info *eqi; 10785 int cpu; 10786 10787 for_each_possible_cpu(cpu) { 10788 cpup = &phba->sli4_hba.cpu_map[cpu]; 10789 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY; 10790 cpup->core_id = LPFC_VECTOR_MAP_EMPTY; 10791 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY; 10792 cpup->eq = LPFC_VECTOR_MAP_EMPTY; 10793 cpup->flag = 0; 10794 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu); 10795 INIT_LIST_HEAD(&eqi->list); 10796 eqi->icnt = 0; 10797 } 10798 } 10799 10800 /** 10801 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure 10802 * @phba: pointer to lpfc hba data structure. 10803 * 10804 * The routine initializes the hba_eq_hdl array structure 10805 */ 10806 static void 10807 lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba) 10808 { 10809 struct lpfc_hba_eq_hdl *eqhdl; 10810 int i; 10811 10812 for (i = 0; i < phba->cfg_irq_chann; i++) { 10813 eqhdl = lpfc_get_eq_hdl(i); 10814 eqhdl->irq = LPFC_VECTOR_MAP_EMPTY; 10815 eqhdl->phba = phba; 10816 } 10817 } 10818 10819 /** 10820 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings 10821 * @phba: pointer to lpfc hba data structure. 10822 * @vectors: number of msix vectors allocated. 10823 * 10824 * The routine will figure out the CPU affinity assignment for every 10825 * MSI-X vector allocated for the HBA. 10826 * In addition, the CPU to IO channel mapping will be calculated 10827 * and the phba->sli4_hba.cpu_map array will reflect this. 10828 */ 10829 static void 10830 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) 10831 { 10832 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu; 10833 int max_phys_id, min_phys_id; 10834 int max_core_id, min_core_id; 10835 struct lpfc_vector_map_info *cpup; 10836 struct lpfc_vector_map_info *new_cpup; 10837 #ifdef CONFIG_X86 10838 struct cpuinfo_x86 *cpuinfo; 10839 #endif 10840 10841 max_phys_id = 0; 10842 min_phys_id = LPFC_VECTOR_MAP_EMPTY; 10843 max_core_id = 0; 10844 min_core_id = LPFC_VECTOR_MAP_EMPTY; 10845 10846 /* Update CPU map with physical id and core id of each CPU */ 10847 for_each_present_cpu(cpu) { 10848 cpup = &phba->sli4_hba.cpu_map[cpu]; 10849 #ifdef CONFIG_X86 10850 cpuinfo = &cpu_data(cpu); 10851 cpup->phys_id = cpuinfo->phys_proc_id; 10852 cpup->core_id = cpuinfo->cpu_core_id; 10853 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id)) 10854 cpup->flag |= LPFC_CPU_MAP_HYPER; 10855 #else 10856 /* No distinction between CPUs for other platforms */ 10857 cpup->phys_id = 0; 10858 cpup->core_id = cpu; 10859 #endif 10860 10861 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10862 "3328 CPU %d physid %d coreid %d flag x%x\n", 10863 cpu, cpup->phys_id, cpup->core_id, cpup->flag); 10864 10865 if (cpup->phys_id > max_phys_id) 10866 max_phys_id = cpup->phys_id; 10867 if (cpup->phys_id < min_phys_id) 10868 min_phys_id = cpup->phys_id; 10869 10870 if (cpup->core_id > max_core_id) 10871 max_core_id = cpup->core_id; 10872 if (cpup->core_id < min_core_id) 10873 min_core_id = cpup->core_id; 10874 } 10875 10876 /* After looking at each irq vector assigned to this pcidev, its 10877 * possible to see that not ALL CPUs have been accounted for. 10878 * Next we will set any unassigned (unaffinitized) cpu map 10879 * entries to a IRQ on the same phys_id. 10880 */ 10881 first_cpu = cpumask_first(cpu_present_mask); 10882 start_cpu = first_cpu; 10883 10884 for_each_present_cpu(cpu) { 10885 cpup = &phba->sli4_hba.cpu_map[cpu]; 10886 10887 /* Is this CPU entry unassigned */ 10888 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 10889 /* Mark CPU as IRQ not assigned by the kernel */ 10890 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; 10891 10892 /* If so, find a new_cpup thats on the the SAME 10893 * phys_id as cpup. start_cpu will start where we 10894 * left off so all unassigned entries don't get assgined 10895 * the IRQ of the first entry. 10896 */ 10897 new_cpu = start_cpu; 10898 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 10899 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 10900 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && 10901 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) && 10902 (new_cpup->phys_id == cpup->phys_id)) 10903 goto found_same; 10904 new_cpu = cpumask_next( 10905 new_cpu, cpu_present_mask); 10906 if (new_cpu == nr_cpumask_bits) 10907 new_cpu = first_cpu; 10908 } 10909 /* At this point, we leave the CPU as unassigned */ 10910 continue; 10911 found_same: 10912 /* We found a matching phys_id, so copy the IRQ info */ 10913 cpup->eq = new_cpup->eq; 10914 10915 /* Bump start_cpu to the next slot to minmize the 10916 * chance of having multiple unassigned CPU entries 10917 * selecting the same IRQ. 10918 */ 10919 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 10920 if (start_cpu == nr_cpumask_bits) 10921 start_cpu = first_cpu; 10922 10923 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10924 "3337 Set Affinity: CPU %d " 10925 "eq %d from peer cpu %d same " 10926 "phys_id (%d)\n", 10927 cpu, cpup->eq, new_cpu, 10928 cpup->phys_id); 10929 } 10930 } 10931 10932 /* Set any unassigned cpu map entries to a IRQ on any phys_id */ 10933 start_cpu = first_cpu; 10934 10935 for_each_present_cpu(cpu) { 10936 cpup = &phba->sli4_hba.cpu_map[cpu]; 10937 10938 /* Is this entry unassigned */ 10939 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 10940 /* Mark it as IRQ not assigned by the kernel */ 10941 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; 10942 10943 /* If so, find a new_cpup thats on ANY phys_id 10944 * as the cpup. start_cpu will start where we 10945 * left off so all unassigned entries don't get 10946 * assigned the IRQ of the first entry. 10947 */ 10948 new_cpu = start_cpu; 10949 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 10950 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 10951 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && 10952 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY)) 10953 goto found_any; 10954 new_cpu = cpumask_next( 10955 new_cpu, cpu_present_mask); 10956 if (new_cpu == nr_cpumask_bits) 10957 new_cpu = first_cpu; 10958 } 10959 /* We should never leave an entry unassigned */ 10960 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10961 "3339 Set Affinity: CPU %d " 10962 "eq %d UNASSIGNED\n", 10963 cpup->hdwq, cpup->eq); 10964 continue; 10965 found_any: 10966 /* We found an available entry, copy the IRQ info */ 10967 cpup->eq = new_cpup->eq; 10968 10969 /* Bump start_cpu to the next slot to minmize the 10970 * chance of having multiple unassigned CPU entries 10971 * selecting the same IRQ. 10972 */ 10973 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 10974 if (start_cpu == nr_cpumask_bits) 10975 start_cpu = first_cpu; 10976 10977 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10978 "3338 Set Affinity: CPU %d " 10979 "eq %d from peer cpu %d (%d/%d)\n", 10980 cpu, cpup->eq, new_cpu, 10981 new_cpup->phys_id, new_cpup->core_id); 10982 } 10983 } 10984 10985 /* Assign hdwq indices that are unique across all cpus in the map 10986 * that are also FIRST_CPUs. 10987 */ 10988 idx = 0; 10989 for_each_present_cpu(cpu) { 10990 cpup = &phba->sli4_hba.cpu_map[cpu]; 10991 10992 /* Only FIRST IRQs get a hdwq index assignment. */ 10993 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 10994 continue; 10995 10996 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */ 10997 cpup->hdwq = idx; 10998 idx++; 10999 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11000 "3333 Set Affinity: CPU %d (phys %d core %d): " 11001 "hdwq %d eq %d flg x%x\n", 11002 cpu, cpup->phys_id, cpup->core_id, 11003 cpup->hdwq, cpup->eq, cpup->flag); 11004 } 11005 /* Associate a hdwq with each cpu_map entry 11006 * This will be 1 to 1 - hdwq to cpu, unless there are less 11007 * hardware queues then CPUs. For that case we will just round-robin 11008 * the available hardware queues as they get assigned to CPUs. 11009 * The next_idx is the idx from the FIRST_CPU loop above to account 11010 * for irq_chann < hdwq. The idx is used for round-robin assignments 11011 * and needs to start at 0. 11012 */ 11013 next_idx = idx; 11014 start_cpu = 0; 11015 idx = 0; 11016 for_each_present_cpu(cpu) { 11017 cpup = &phba->sli4_hba.cpu_map[cpu]; 11018 11019 /* FIRST cpus are already mapped. */ 11020 if (cpup->flag & LPFC_CPU_FIRST_IRQ) 11021 continue; 11022 11023 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq 11024 * of the unassigned cpus to the next idx so that all 11025 * hdw queues are fully utilized. 11026 */ 11027 if (next_idx < phba->cfg_hdw_queue) { 11028 cpup->hdwq = next_idx; 11029 next_idx++; 11030 continue; 11031 } 11032 11033 /* Not a First CPU and all hdw_queues are used. Reuse a 11034 * Hardware Queue for another CPU, so be smart about it 11035 * and pick one that has its IRQ/EQ mapped to the same phys_id 11036 * (CPU package) and core_id. 11037 */ 11038 new_cpu = start_cpu; 11039 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 11040 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 11041 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && 11042 new_cpup->phys_id == cpup->phys_id && 11043 new_cpup->core_id == cpup->core_id) { 11044 goto found_hdwq; 11045 } 11046 new_cpu = cpumask_next(new_cpu, cpu_present_mask); 11047 if (new_cpu == nr_cpumask_bits) 11048 new_cpu = first_cpu; 11049 } 11050 11051 /* If we can't match both phys_id and core_id, 11052 * settle for just a phys_id match. 11053 */ 11054 new_cpu = start_cpu; 11055 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 11056 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 11057 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && 11058 new_cpup->phys_id == cpup->phys_id) 11059 goto found_hdwq; 11060 11061 new_cpu = cpumask_next(new_cpu, cpu_present_mask); 11062 if (new_cpu == nr_cpumask_bits) 11063 new_cpu = first_cpu; 11064 } 11065 11066 /* Otherwise just round robin on cfg_hdw_queue */ 11067 cpup->hdwq = idx % phba->cfg_hdw_queue; 11068 idx++; 11069 goto logit; 11070 found_hdwq: 11071 /* We found an available entry, copy the IRQ info */ 11072 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 11073 if (start_cpu == nr_cpumask_bits) 11074 start_cpu = first_cpu; 11075 cpup->hdwq = new_cpup->hdwq; 11076 logit: 11077 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11078 "3335 Set Affinity: CPU %d (phys %d core %d): " 11079 "hdwq %d eq %d flg x%x\n", 11080 cpu, cpup->phys_id, cpup->core_id, 11081 cpup->hdwq, cpup->eq, cpup->flag); 11082 } 11083 11084 /* 11085 * Initialize the cpu_map slots for not-present cpus in case 11086 * a cpu is hot-added. Perform a simple hdwq round robin assignment. 11087 */ 11088 idx = 0; 11089 for_each_possible_cpu(cpu) { 11090 cpup = &phba->sli4_hba.cpu_map[cpu]; 11091 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) 11092 continue; 11093 11094 cpup->hdwq = idx++ % phba->cfg_hdw_queue; 11095 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11096 "3340 Set Affinity: not present " 11097 "CPU %d hdwq %d\n", 11098 cpu, cpup->hdwq); 11099 } 11100 11101 /* The cpu_map array will be used later during initialization 11102 * when EQ / CQ / WQs are allocated and configured. 11103 */ 11104 return; 11105 } 11106 11107 /** 11108 * lpfc_cpuhp_get_eq 11109 * 11110 * @phba: pointer to lpfc hba data structure. 11111 * @cpu: cpu going offline 11112 * @eqlist: 11113 */ 11114 static void 11115 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu, 11116 struct list_head *eqlist) 11117 { 11118 const struct cpumask *maskp; 11119 struct lpfc_queue *eq; 11120 cpumask_t tmp; 11121 u16 idx; 11122 11123 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 11124 maskp = pci_irq_get_affinity(phba->pcidev, idx); 11125 if (!maskp) 11126 continue; 11127 /* 11128 * if irq is not affinitized to the cpu going 11129 * then we don't need to poll the eq attached 11130 * to it. 11131 */ 11132 if (!cpumask_and(&tmp, maskp, cpumask_of(cpu))) 11133 continue; 11134 /* get the cpus that are online and are affini- 11135 * tized to this irq vector. If the count is 11136 * more than 1 then cpuhp is not going to shut- 11137 * down this vector. Since this cpu has not 11138 * gone offline yet, we need >1. 11139 */ 11140 cpumask_and(&tmp, maskp, cpu_online_mask); 11141 if (cpumask_weight(&tmp) > 1) 11142 continue; 11143 11144 /* Now that we have an irq to shutdown, get the eq 11145 * mapped to this irq. Note: multiple hdwq's in 11146 * the software can share an eq, but eventually 11147 * only eq will be mapped to this vector 11148 */ 11149 eq = phba->sli4_hba.hba_eq_hdl[idx].eq; 11150 list_add(&eq->_poll_list, eqlist); 11151 } 11152 } 11153 11154 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba) 11155 { 11156 if (phba->sli_rev != LPFC_SLI_REV4) 11157 return; 11158 11159 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state, 11160 &phba->cpuhp); 11161 /* 11162 * unregistering the instance doesn't stop the polling 11163 * timer. Wait for the poll timer to retire. 11164 */ 11165 synchronize_rcu(); 11166 del_timer_sync(&phba->cpuhp_poll_timer); 11167 } 11168 11169 static void lpfc_cpuhp_remove(struct lpfc_hba *phba) 11170 { 11171 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 11172 return; 11173 11174 __lpfc_cpuhp_remove(phba); 11175 } 11176 11177 static void lpfc_cpuhp_add(struct lpfc_hba *phba) 11178 { 11179 if (phba->sli_rev != LPFC_SLI_REV4) 11180 return; 11181 11182 rcu_read_lock(); 11183 11184 if (!list_empty(&phba->poll_list)) { 11185 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0); 11186 mod_timer(&phba->cpuhp_poll_timer, 11187 jiffies + msecs_to_jiffies(LPFC_POLL_HB)); 11188 } 11189 11190 rcu_read_unlock(); 11191 11192 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, 11193 &phba->cpuhp); 11194 } 11195 11196 static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval) 11197 { 11198 if (phba->pport->load_flag & FC_UNLOADING) { 11199 *retval = -EAGAIN; 11200 return true; 11201 } 11202 11203 if (phba->sli_rev != LPFC_SLI_REV4) { 11204 *retval = 0; 11205 return true; 11206 } 11207 11208 /* proceed with the hotplug */ 11209 return false; 11210 } 11211 11212 /** 11213 * lpfc_irq_set_aff - set IRQ affinity 11214 * @eqhdl: EQ handle 11215 * @cpu: cpu to set affinity 11216 * 11217 **/ 11218 static inline void 11219 lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu) 11220 { 11221 cpumask_clear(&eqhdl->aff_mask); 11222 cpumask_set_cpu(cpu, &eqhdl->aff_mask); 11223 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING); 11224 irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask); 11225 } 11226 11227 /** 11228 * lpfc_irq_clear_aff - clear IRQ affinity 11229 * @eqhdl: EQ handle 11230 * 11231 **/ 11232 static inline void 11233 lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl) 11234 { 11235 cpumask_clear(&eqhdl->aff_mask); 11236 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING); 11237 irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask); 11238 } 11239 11240 /** 11241 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event 11242 * @phba: pointer to HBA context object. 11243 * @cpu: cpu going offline/online 11244 * @offline: true, cpu is going offline. false, cpu is coming online. 11245 * 11246 * If cpu is going offline, we'll try our best effort to find the next 11247 * online cpu on the phba's NUMA node and migrate all offlining IRQ affinities. 11248 * 11249 * If cpu is coming online, reaffinitize the IRQ back to the onlineng cpu. 11250 * 11251 * Note: Call only if cfg_irq_numa is enabled, otherwise rely on 11252 * PCI_IRQ_AFFINITY to auto-manage IRQ affinity. 11253 * 11254 **/ 11255 static void 11256 lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline) 11257 { 11258 struct lpfc_vector_map_info *cpup; 11259 struct cpumask *aff_mask; 11260 unsigned int cpu_select, cpu_next, idx; 11261 const struct cpumask *numa_mask; 11262 11263 if (!phba->cfg_irq_numa) 11264 return; 11265 11266 numa_mask = &phba->sli4_hba.numa_mask; 11267 11268 if (!cpumask_test_cpu(cpu, numa_mask)) 11269 return; 11270 11271 cpup = &phba->sli4_hba.cpu_map[cpu]; 11272 11273 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 11274 return; 11275 11276 if (offline) { 11277 /* Find next online CPU on NUMA node */ 11278 cpu_next = cpumask_next_wrap(cpu, numa_mask, cpu, true); 11279 cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu_next); 11280 11281 /* Found a valid CPU */ 11282 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) { 11283 /* Go through each eqhdl and ensure offlining 11284 * cpu aff_mask is migrated 11285 */ 11286 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 11287 aff_mask = lpfc_get_aff_mask(idx); 11288 11289 /* Migrate affinity */ 11290 if (cpumask_test_cpu(cpu, aff_mask)) 11291 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx), 11292 cpu_select); 11293 } 11294 } else { 11295 /* Rely on irqbalance if no online CPUs left on NUMA */ 11296 for (idx = 0; idx < phba->cfg_irq_chann; idx++) 11297 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx)); 11298 } 11299 } else { 11300 /* Migrate affinity back to this CPU */ 11301 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu); 11302 } 11303 } 11304 11305 static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node) 11306 { 11307 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); 11308 struct lpfc_queue *eq, *next; 11309 LIST_HEAD(eqlist); 11310 int retval; 11311 11312 if (!phba) { 11313 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); 11314 return 0; 11315 } 11316 11317 if (__lpfc_cpuhp_checks(phba, &retval)) 11318 return retval; 11319 11320 lpfc_irq_rebalance(phba, cpu, true); 11321 11322 lpfc_cpuhp_get_eq(phba, cpu, &eqlist); 11323 11324 /* start polling on these eq's */ 11325 list_for_each_entry_safe(eq, next, &eqlist, _poll_list) { 11326 list_del_init(&eq->_poll_list); 11327 lpfc_sli4_start_polling(eq); 11328 } 11329 11330 return 0; 11331 } 11332 11333 static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node) 11334 { 11335 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); 11336 struct lpfc_queue *eq, *next; 11337 unsigned int n; 11338 int retval; 11339 11340 if (!phba) { 11341 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); 11342 return 0; 11343 } 11344 11345 if (__lpfc_cpuhp_checks(phba, &retval)) 11346 return retval; 11347 11348 lpfc_irq_rebalance(phba, cpu, false); 11349 11350 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) { 11351 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ); 11352 if (n == cpu) 11353 lpfc_sli4_stop_polling(eq); 11354 } 11355 11356 return 0; 11357 } 11358 11359 /** 11360 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 11361 * @phba: pointer to lpfc hba data structure. 11362 * 11363 * This routine is invoked to enable the MSI-X interrupt vectors to device 11364 * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them 11365 * to cpus on the system. 11366 * 11367 * When cfg_irq_numa is enabled, the adapter will only allocate vectors for 11368 * the number of cpus on the same numa node as this adapter. The vectors are 11369 * allocated without requesting OS affinity mapping. A vector will be 11370 * allocated and assigned to each online and offline cpu. If the cpu is 11371 * online, then affinity will be set to that cpu. If the cpu is offline, then 11372 * affinity will be set to the nearest peer cpu within the numa node that is 11373 * online. If there are no online cpus within the numa node, affinity is not 11374 * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping 11375 * is consistent with the way cpu online/offline is handled when cfg_irq_numa is 11376 * configured. 11377 * 11378 * If numa mode is not enabled and there is more than 1 vector allocated, then 11379 * the driver relies on the managed irq interface where the OS assigns vector to 11380 * cpu affinity. The driver will then use that affinity mapping to setup its 11381 * cpu mapping table. 11382 * 11383 * Return codes 11384 * 0 - successful 11385 * other values - error 11386 **/ 11387 static int 11388 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 11389 { 11390 int vectors, rc, index; 11391 char *name; 11392 const struct cpumask *numa_mask = NULL; 11393 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids; 11394 struct lpfc_hba_eq_hdl *eqhdl; 11395 const struct cpumask *maskp; 11396 bool first; 11397 unsigned int flags = PCI_IRQ_MSIX; 11398 11399 /* Set up MSI-X multi-message vectors */ 11400 vectors = phba->cfg_irq_chann; 11401 11402 if (phba->cfg_irq_numa) { 11403 numa_mask = &phba->sli4_hba.numa_mask; 11404 cpu_cnt = cpumask_weight(numa_mask); 11405 vectors = min(phba->cfg_irq_chann, cpu_cnt); 11406 11407 /* cpu: iterates over numa_mask including offline or online 11408 * cpu_select: iterates over online numa_mask to set affinity 11409 */ 11410 cpu = cpumask_first(numa_mask); 11411 cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu); 11412 } else { 11413 flags |= PCI_IRQ_AFFINITY; 11414 } 11415 11416 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags); 11417 if (rc < 0) { 11418 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11419 "0484 PCI enable MSI-X failed (%d)\n", rc); 11420 goto vec_fail_out; 11421 } 11422 vectors = rc; 11423 11424 /* Assign MSI-X vectors to interrupt handlers */ 11425 for (index = 0; index < vectors; index++) { 11426 eqhdl = lpfc_get_eq_hdl(index); 11427 name = eqhdl->handler_name; 11428 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ); 11429 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ, 11430 LPFC_DRIVER_HANDLER_NAME"%d", index); 11431 11432 eqhdl->idx = index; 11433 rc = request_irq(pci_irq_vector(phba->pcidev, index), 11434 &lpfc_sli4_hba_intr_handler, 0, 11435 name, eqhdl); 11436 if (rc) { 11437 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 11438 "0486 MSI-X fast-path (%d) " 11439 "request_irq failed (%d)\n", index, rc); 11440 goto cfg_fail_out; 11441 } 11442 11443 eqhdl->irq = pci_irq_vector(phba->pcidev, index); 11444 11445 if (phba->cfg_irq_numa) { 11446 /* If found a neighboring online cpu, set affinity */ 11447 if (cpu_select < nr_cpu_ids) 11448 lpfc_irq_set_aff(eqhdl, cpu_select); 11449 11450 /* Assign EQ to cpu_map */ 11451 lpfc_assign_eq_map_info(phba, index, 11452 LPFC_CPU_FIRST_IRQ, 11453 cpu); 11454 11455 /* Iterate to next offline or online cpu in numa_mask */ 11456 cpu = cpumask_next(cpu, numa_mask); 11457 11458 /* Find next online cpu in numa_mask to set affinity */ 11459 cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu); 11460 } else if (vectors == 1) { 11461 cpu = cpumask_first(cpu_present_mask); 11462 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ, 11463 cpu); 11464 } else { 11465 maskp = pci_irq_get_affinity(phba->pcidev, index); 11466 11467 first = true; 11468 /* Loop through all CPUs associated with vector index */ 11469 for_each_cpu_and(cpu, maskp, cpu_present_mask) { 11470 /* If this is the first CPU thats assigned to 11471 * this vector, set LPFC_CPU_FIRST_IRQ. 11472 */ 11473 lpfc_assign_eq_map_info(phba, index, 11474 first ? 11475 LPFC_CPU_FIRST_IRQ : 0, 11476 cpu); 11477 if (first) 11478 first = false; 11479 } 11480 } 11481 } 11482 11483 if (vectors != phba->cfg_irq_chann) { 11484 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11485 "3238 Reducing IO channels to match number of " 11486 "MSI-X vectors, requested %d got %d\n", 11487 phba->cfg_irq_chann, vectors); 11488 if (phba->cfg_irq_chann > vectors) 11489 phba->cfg_irq_chann = vectors; 11490 } 11491 11492 return rc; 11493 11494 cfg_fail_out: 11495 /* free the irq already requested */ 11496 for (--index; index >= 0; index--) { 11497 eqhdl = lpfc_get_eq_hdl(index); 11498 lpfc_irq_clear_aff(eqhdl); 11499 irq_set_affinity_hint(eqhdl->irq, NULL); 11500 free_irq(eqhdl->irq, eqhdl); 11501 } 11502 11503 /* Unconfigure MSI-X capability structure */ 11504 pci_free_irq_vectors(phba->pcidev); 11505 11506 vec_fail_out: 11507 return rc; 11508 } 11509 11510 /** 11511 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 11512 * @phba: pointer to lpfc hba data structure. 11513 * 11514 * This routine is invoked to enable the MSI interrupt mode to device with 11515 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is 11516 * called to enable the MSI vector. The device driver is responsible for 11517 * calling the request_irq() to register MSI vector with a interrupt the 11518 * handler, which is done in this function. 11519 * 11520 * Return codes 11521 * 0 - successful 11522 * other values - error 11523 **/ 11524 static int 11525 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 11526 { 11527 int rc, index; 11528 unsigned int cpu; 11529 struct lpfc_hba_eq_hdl *eqhdl; 11530 11531 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1, 11532 PCI_IRQ_MSI | PCI_IRQ_AFFINITY); 11533 if (rc > 0) 11534 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11535 "0487 PCI enable MSI mode success.\n"); 11536 else { 11537 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11538 "0488 PCI enable MSI mode failed (%d)\n", rc); 11539 return rc ? rc : -1; 11540 } 11541 11542 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 11543 0, LPFC_DRIVER_NAME, phba); 11544 if (rc) { 11545 pci_free_irq_vectors(phba->pcidev); 11546 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 11547 "0490 MSI request_irq failed (%d)\n", rc); 11548 return rc; 11549 } 11550 11551 eqhdl = lpfc_get_eq_hdl(0); 11552 eqhdl->irq = pci_irq_vector(phba->pcidev, 0); 11553 11554 cpu = cpumask_first(cpu_present_mask); 11555 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu); 11556 11557 for (index = 0; index < phba->cfg_irq_chann; index++) { 11558 eqhdl = lpfc_get_eq_hdl(index); 11559 eqhdl->idx = index; 11560 } 11561 11562 return 0; 11563 } 11564 11565 /** 11566 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 11567 * @phba: pointer to lpfc hba data structure. 11568 * 11569 * This routine is invoked to enable device interrupt and associate driver's 11570 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 11571 * interface spec. Depends on the interrupt mode configured to the driver, 11572 * the driver will try to fallback from the configured interrupt mode to an 11573 * interrupt mode which is supported by the platform, kernel, and device in 11574 * the order of: 11575 * MSI-X -> MSI -> IRQ. 11576 * 11577 * Return codes 11578 * 0 - successful 11579 * other values - error 11580 **/ 11581 static uint32_t 11582 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 11583 { 11584 uint32_t intr_mode = LPFC_INTR_ERROR; 11585 int retval, idx; 11586 11587 if (cfg_mode == 2) { 11588 /* Preparation before conf_msi mbox cmd */ 11589 retval = 0; 11590 if (!retval) { 11591 /* Now, try to enable MSI-X interrupt mode */ 11592 retval = lpfc_sli4_enable_msix(phba); 11593 if (!retval) { 11594 /* Indicate initialization to MSI-X mode */ 11595 phba->intr_type = MSIX; 11596 intr_mode = 2; 11597 } 11598 } 11599 } 11600 11601 /* Fallback to MSI if MSI-X initialization failed */ 11602 if (cfg_mode >= 1 && phba->intr_type == NONE) { 11603 retval = lpfc_sli4_enable_msi(phba); 11604 if (!retval) { 11605 /* Indicate initialization to MSI mode */ 11606 phba->intr_type = MSI; 11607 intr_mode = 1; 11608 } 11609 } 11610 11611 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 11612 if (phba->intr_type == NONE) { 11613 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 11614 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 11615 if (!retval) { 11616 struct lpfc_hba_eq_hdl *eqhdl; 11617 unsigned int cpu; 11618 11619 /* Indicate initialization to INTx mode */ 11620 phba->intr_type = INTx; 11621 intr_mode = 0; 11622 11623 eqhdl = lpfc_get_eq_hdl(0); 11624 eqhdl->irq = pci_irq_vector(phba->pcidev, 0); 11625 11626 cpu = cpumask_first(cpu_present_mask); 11627 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, 11628 cpu); 11629 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 11630 eqhdl = lpfc_get_eq_hdl(idx); 11631 eqhdl->idx = idx; 11632 } 11633 } 11634 } 11635 return intr_mode; 11636 } 11637 11638 /** 11639 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 11640 * @phba: pointer to lpfc hba data structure. 11641 * 11642 * This routine is invoked to disable device interrupt and disassociate 11643 * the driver's interrupt handler(s) from interrupt vector(s) to device 11644 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 11645 * will release the interrupt vector(s) for the message signaled interrupt. 11646 **/ 11647 static void 11648 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 11649 { 11650 /* Disable the currently initialized interrupt mode */ 11651 if (phba->intr_type == MSIX) { 11652 int index; 11653 struct lpfc_hba_eq_hdl *eqhdl; 11654 11655 /* Free up MSI-X multi-message vectors */ 11656 for (index = 0; index < phba->cfg_irq_chann; index++) { 11657 eqhdl = lpfc_get_eq_hdl(index); 11658 lpfc_irq_clear_aff(eqhdl); 11659 irq_set_affinity_hint(eqhdl->irq, NULL); 11660 free_irq(eqhdl->irq, eqhdl); 11661 } 11662 } else { 11663 free_irq(phba->pcidev->irq, phba); 11664 } 11665 11666 pci_free_irq_vectors(phba->pcidev); 11667 11668 /* Reset interrupt management states */ 11669 phba->intr_type = NONE; 11670 phba->sli.slistat.sli_intr = 0; 11671 } 11672 11673 /** 11674 * lpfc_unset_hba - Unset SLI3 hba device initialization 11675 * @phba: pointer to lpfc hba data structure. 11676 * 11677 * This routine is invoked to unset the HBA device initialization steps to 11678 * a device with SLI-3 interface spec. 11679 **/ 11680 static void 11681 lpfc_unset_hba(struct lpfc_hba *phba) 11682 { 11683 struct lpfc_vport *vport = phba->pport; 11684 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11685 11686 spin_lock_irq(shost->host_lock); 11687 vport->load_flag |= FC_UNLOADING; 11688 spin_unlock_irq(shost->host_lock); 11689 11690 kfree(phba->vpi_bmask); 11691 kfree(phba->vpi_ids); 11692 11693 lpfc_stop_hba_timers(phba); 11694 11695 phba->pport->work_port_events = 0; 11696 11697 lpfc_sli_hba_down(phba); 11698 11699 lpfc_sli_brdrestart(phba); 11700 11701 lpfc_sli_disable_intr(phba); 11702 11703 return; 11704 } 11705 11706 /** 11707 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 11708 * @phba: Pointer to HBA context object. 11709 * 11710 * This function is called in the SLI4 code path to wait for completion 11711 * of device's XRIs exchange busy. It will check the XRI exchange busy 11712 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 11713 * that, it will check the XRI exchange busy on outstanding FCP and ELS 11714 * I/Os every 30 seconds, log error message, and wait forever. Only when 11715 * all XRI exchange busy complete, the driver unload shall proceed with 11716 * invoking the function reset ioctl mailbox command to the CNA and the 11717 * the rest of the driver unload resource release. 11718 **/ 11719 static void 11720 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 11721 { 11722 struct lpfc_sli4_hdw_queue *qp; 11723 int idx, ccnt; 11724 int wait_time = 0; 11725 int io_xri_cmpl = 1; 11726 int nvmet_xri_cmpl = 1; 11727 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 11728 11729 /* Driver just aborted IOs during the hba_unset process. Pause 11730 * here to give the HBA time to complete the IO and get entries 11731 * into the abts lists. 11732 */ 11733 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5); 11734 11735 /* Wait for NVME pending IO to flush back to transport. */ 11736 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 11737 lpfc_nvme_wait_for_io_drain(phba); 11738 11739 ccnt = 0; 11740 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 11741 qp = &phba->sli4_hba.hdwq[idx]; 11742 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list); 11743 if (!io_xri_cmpl) /* if list is NOT empty */ 11744 ccnt++; 11745 } 11746 if (ccnt) 11747 io_xri_cmpl = 0; 11748 11749 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11750 nvmet_xri_cmpl = 11751 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 11752 } 11753 11754 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) { 11755 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 11756 if (!nvmet_xri_cmpl) 11757 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11758 "6424 NVMET XRI exchange busy " 11759 "wait time: %d seconds.\n", 11760 wait_time/1000); 11761 if (!io_xri_cmpl) 11762 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11763 "6100 IO XRI exchange busy " 11764 "wait time: %d seconds.\n", 11765 wait_time/1000); 11766 if (!els_xri_cmpl) 11767 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11768 "2878 ELS XRI exchange busy " 11769 "wait time: %d seconds.\n", 11770 wait_time/1000); 11771 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 11772 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 11773 } else { 11774 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 11775 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 11776 } 11777 11778 ccnt = 0; 11779 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 11780 qp = &phba->sli4_hba.hdwq[idx]; 11781 io_xri_cmpl = list_empty( 11782 &qp->lpfc_abts_io_buf_list); 11783 if (!io_xri_cmpl) /* if list is NOT empty */ 11784 ccnt++; 11785 } 11786 if (ccnt) 11787 io_xri_cmpl = 0; 11788 11789 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11790 nvmet_xri_cmpl = list_empty( 11791 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 11792 } 11793 els_xri_cmpl = 11794 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 11795 11796 } 11797 } 11798 11799 /** 11800 * lpfc_sli4_hba_unset - Unset the fcoe hba 11801 * @phba: Pointer to HBA context object. 11802 * 11803 * This function is called in the SLI4 code path to reset the HBA's FCoE 11804 * function. The caller is not required to hold any lock. This routine 11805 * issues PCI function reset mailbox command to reset the FCoE function. 11806 * At the end of the function, it calls lpfc_hba_down_post function to 11807 * free any pending commands. 11808 **/ 11809 static void 11810 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 11811 { 11812 int wait_cnt = 0; 11813 LPFC_MBOXQ_t *mboxq; 11814 struct pci_dev *pdev = phba->pcidev; 11815 11816 lpfc_stop_hba_timers(phba); 11817 if (phba->pport) 11818 phba->sli4_hba.intr_enable = 0; 11819 11820 /* 11821 * Gracefully wait out the potential current outstanding asynchronous 11822 * mailbox command. 11823 */ 11824 11825 /* First, block any pending async mailbox command from posted */ 11826 spin_lock_irq(&phba->hbalock); 11827 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 11828 spin_unlock_irq(&phba->hbalock); 11829 /* Now, trying to wait it out if we can */ 11830 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 11831 msleep(10); 11832 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 11833 break; 11834 } 11835 /* Forcefully release the outstanding mailbox command if timed out */ 11836 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 11837 spin_lock_irq(&phba->hbalock); 11838 mboxq = phba->sli.mbox_active; 11839 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 11840 __lpfc_mbox_cmpl_put(phba, mboxq); 11841 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 11842 phba->sli.mbox_active = NULL; 11843 spin_unlock_irq(&phba->hbalock); 11844 } 11845 11846 /* Abort all iocbs associated with the hba */ 11847 lpfc_sli_hba_iocb_abort(phba); 11848 11849 /* Wait for completion of device XRI exchange busy */ 11850 lpfc_sli4_xri_exchange_busy_wait(phba); 11851 11852 /* per-phba callback de-registration for hotplug event */ 11853 lpfc_cpuhp_remove(phba); 11854 11855 /* Disable PCI subsystem interrupt */ 11856 lpfc_sli4_disable_intr(phba); 11857 11858 /* Disable SR-IOV if enabled */ 11859 if (phba->cfg_sriov_nr_virtfn) 11860 pci_disable_sriov(pdev); 11861 11862 /* Stop kthread signal shall trigger work_done one more time */ 11863 kthread_stop(phba->worker_thread); 11864 11865 /* Disable FW logging to host memory */ 11866 lpfc_ras_stop_fwlog(phba); 11867 11868 /* Unset the queues shared with the hardware then release all 11869 * allocated resources. 11870 */ 11871 lpfc_sli4_queue_unset(phba); 11872 lpfc_sli4_queue_destroy(phba); 11873 11874 /* Reset SLI4 HBA FCoE function */ 11875 lpfc_pci_function_reset(phba); 11876 11877 /* Free RAS DMA memory */ 11878 if (phba->ras_fwlog.ras_enabled) 11879 lpfc_sli4_ras_dma_free(phba); 11880 11881 /* Stop the SLI4 device port */ 11882 if (phba->pport) 11883 phba->pport->work_port_events = 0; 11884 } 11885 11886 /** 11887 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 11888 * @phba: Pointer to HBA context object. 11889 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 11890 * 11891 * This function is called in the SLI4 code path to read the port's 11892 * sli4 capabilities. 11893 * 11894 * This function may be be called from any context that can block-wait 11895 * for the completion. The expectation is that this routine is called 11896 * typically from probe_one or from the online routine. 11897 **/ 11898 int 11899 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 11900 { 11901 int rc; 11902 struct lpfc_mqe *mqe; 11903 struct lpfc_pc_sli4_params *sli4_params; 11904 uint32_t mbox_tmo; 11905 11906 rc = 0; 11907 mqe = &mboxq->u.mqe; 11908 11909 /* Read the port's SLI4 Parameters port capabilities */ 11910 lpfc_pc_sli4_params(mboxq); 11911 if (!phba->sli4_hba.intr_enable) 11912 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 11913 else { 11914 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 11915 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 11916 } 11917 11918 if (unlikely(rc)) 11919 return 1; 11920 11921 sli4_params = &phba->sli4_hba.pc_sli4_params; 11922 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 11923 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 11924 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 11925 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 11926 &mqe->un.sli4_params); 11927 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 11928 &mqe->un.sli4_params); 11929 sli4_params->proto_types = mqe->un.sli4_params.word3; 11930 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 11931 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 11932 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 11933 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 11934 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 11935 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 11936 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 11937 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 11938 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 11939 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 11940 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 11941 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 11942 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 11943 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 11944 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 11945 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 11946 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 11947 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 11948 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 11949 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 11950 11951 /* Make sure that sge_supp_len can be handled by the driver */ 11952 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 11953 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 11954 11955 return rc; 11956 } 11957 11958 /** 11959 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 11960 * @phba: Pointer to HBA context object. 11961 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 11962 * 11963 * This function is called in the SLI4 code path to read the port's 11964 * sli4 capabilities. 11965 * 11966 * This function may be be called from any context that can block-wait 11967 * for the completion. The expectation is that this routine is called 11968 * typically from probe_one or from the online routine. 11969 **/ 11970 int 11971 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 11972 { 11973 int rc; 11974 struct lpfc_mqe *mqe = &mboxq->u.mqe; 11975 struct lpfc_pc_sli4_params *sli4_params; 11976 uint32_t mbox_tmo; 11977 int length; 11978 bool exp_wqcq_pages = true; 11979 struct lpfc_sli4_parameters *mbx_sli4_parameters; 11980 11981 /* 11982 * By default, the driver assumes the SLI4 port requires RPI 11983 * header postings. The SLI4_PARAM response will correct this 11984 * assumption. 11985 */ 11986 phba->sli4_hba.rpi_hdrs_in_use = 1; 11987 11988 /* Read the port's SLI4 Config Parameters */ 11989 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 11990 sizeof(struct lpfc_sli4_cfg_mhdr)); 11991 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 11992 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 11993 length, LPFC_SLI4_MBX_EMBED); 11994 if (!phba->sli4_hba.intr_enable) 11995 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 11996 else { 11997 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 11998 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 11999 } 12000 if (unlikely(rc)) 12001 return rc; 12002 sli4_params = &phba->sli4_hba.pc_sli4_params; 12003 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 12004 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 12005 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 12006 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 12007 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 12008 mbx_sli4_parameters); 12009 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 12010 mbx_sli4_parameters); 12011 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 12012 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 12013 else 12014 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 12015 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 12016 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); 12017 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); 12018 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 12019 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 12020 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 12021 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 12022 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters); 12023 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters); 12024 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); 12025 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters); 12026 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters); 12027 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 12028 mbx_sli4_parameters); 12029 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); 12030 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 12031 mbx_sli4_parameters); 12032 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); 12033 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); 12034 12035 /* Check for Extended Pre-Registered SGL support */ 12036 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters); 12037 12038 /* Check for firmware nvme support */ 12039 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) && 12040 bf_get(cfg_xib, mbx_sli4_parameters)); 12041 12042 if (rc) { 12043 /* Save this to indicate the Firmware supports NVME */ 12044 sli4_params->nvme = 1; 12045 12046 /* Firmware NVME support, check driver FC4 NVME support */ 12047 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) { 12048 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 12049 "6133 Disabling NVME support: " 12050 "FC4 type not supported: x%x\n", 12051 phba->cfg_enable_fc4_type); 12052 goto fcponly; 12053 } 12054 } else { 12055 /* No firmware NVME support, check driver FC4 NVME support */ 12056 sli4_params->nvme = 0; 12057 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 12058 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME, 12059 "6101 Disabling NVME support: Not " 12060 "supported by firmware (%d %d) x%x\n", 12061 bf_get(cfg_nvme, mbx_sli4_parameters), 12062 bf_get(cfg_xib, mbx_sli4_parameters), 12063 phba->cfg_enable_fc4_type); 12064 fcponly: 12065 phba->nvme_support = 0; 12066 phba->nvmet_support = 0; 12067 phba->cfg_nvmet_mrq = 0; 12068 phba->cfg_nvme_seg_cnt = 0; 12069 12070 /* If no FC4 type support, move to just SCSI support */ 12071 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 12072 return -ENODEV; 12073 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; 12074 } 12075 } 12076 12077 /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to 12078 * accommodate 512K and 1M IOs in a single nvme buf. 12079 */ 12080 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 12081 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 12082 12083 /* Only embed PBDE for if_type 6, PBDE support requires xib be set */ 12084 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 12085 LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters))) 12086 phba->cfg_enable_pbde = 0; 12087 12088 /* 12089 * To support Suppress Response feature we must satisfy 3 conditions. 12090 * lpfc_suppress_rsp module parameter must be set (default). 12091 * In SLI4-Parameters Descriptor: 12092 * Extended Inline Buffers (XIB) must be supported. 12093 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported 12094 * (double negative). 12095 */ 12096 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) && 12097 !(bf_get(cfg_nosr, mbx_sli4_parameters))) 12098 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP; 12099 else 12100 phba->cfg_suppress_rsp = 0; 12101 12102 if (bf_get(cfg_eqdr, mbx_sli4_parameters)) 12103 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR; 12104 12105 /* Make sure that sge_supp_len can be handled by the driver */ 12106 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 12107 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 12108 12109 /* 12110 * Check whether the adapter supports an embedded copy of the 12111 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order 12112 * to use this option, 128-byte WQEs must be used. 12113 */ 12114 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters)) 12115 phba->fcp_embed_io = 1; 12116 else 12117 phba->fcp_embed_io = 0; 12118 12119 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 12120 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n", 12121 bf_get(cfg_xib, mbx_sli4_parameters), 12122 phba->cfg_enable_pbde, 12123 phba->fcp_embed_io, phba->nvme_support, 12124 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp); 12125 12126 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 12127 LPFC_SLI_INTF_IF_TYPE_2) && 12128 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 12129 LPFC_SLI_INTF_FAMILY_LNCR_A0)) 12130 exp_wqcq_pages = false; 12131 12132 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) && 12133 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) && 12134 exp_wqcq_pages && 12135 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT)) 12136 phba->enab_exp_wqcq_pages = 1; 12137 else 12138 phba->enab_exp_wqcq_pages = 0; 12139 /* 12140 * Check if the SLI port supports MDS Diagnostics 12141 */ 12142 if (bf_get(cfg_mds_diags, mbx_sli4_parameters)) 12143 phba->mds_diags_support = 1; 12144 else 12145 phba->mds_diags_support = 0; 12146 12147 /* 12148 * Check if the SLI port supports NSLER 12149 */ 12150 if (bf_get(cfg_nsler, mbx_sli4_parameters)) 12151 phba->nsler = 1; 12152 else 12153 phba->nsler = 0; 12154 12155 return 0; 12156 } 12157 12158 /** 12159 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 12160 * @pdev: pointer to PCI device 12161 * @pid: pointer to PCI device identifier 12162 * 12163 * This routine is to be called to attach a device with SLI-3 interface spec 12164 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 12165 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 12166 * information of the device and driver to see if the driver state that it can 12167 * support this kind of device. If the match is successful, the driver core 12168 * invokes this routine. If this routine determines it can claim the HBA, it 12169 * does all the initialization that it needs to do to handle the HBA properly. 12170 * 12171 * Return code 12172 * 0 - driver can claim the device 12173 * negative value - driver can not claim the device 12174 **/ 12175 static int 12176 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 12177 { 12178 struct lpfc_hba *phba; 12179 struct lpfc_vport *vport = NULL; 12180 struct Scsi_Host *shost = NULL; 12181 int error; 12182 uint32_t cfg_mode, intr_mode; 12183 12184 /* Allocate memory for HBA structure */ 12185 phba = lpfc_hba_alloc(pdev); 12186 if (!phba) 12187 return -ENOMEM; 12188 12189 /* Perform generic PCI device enabling operation */ 12190 error = lpfc_enable_pci_dev(phba); 12191 if (error) 12192 goto out_free_phba; 12193 12194 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 12195 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 12196 if (error) 12197 goto out_disable_pci_dev; 12198 12199 /* Set up SLI-3 specific device PCI memory space */ 12200 error = lpfc_sli_pci_mem_setup(phba); 12201 if (error) { 12202 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12203 "1402 Failed to set up pci memory space.\n"); 12204 goto out_disable_pci_dev; 12205 } 12206 12207 /* Set up SLI-3 specific device driver resources */ 12208 error = lpfc_sli_driver_resource_setup(phba); 12209 if (error) { 12210 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12211 "1404 Failed to set up driver resource.\n"); 12212 goto out_unset_pci_mem_s3; 12213 } 12214 12215 /* Initialize and populate the iocb list per host */ 12216 12217 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 12218 if (error) { 12219 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12220 "1405 Failed to initialize iocb list.\n"); 12221 goto out_unset_driver_resource_s3; 12222 } 12223 12224 /* Set up common device driver resources */ 12225 error = lpfc_setup_driver_resource_phase2(phba); 12226 if (error) { 12227 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12228 "1406 Failed to set up driver resource.\n"); 12229 goto out_free_iocb_list; 12230 } 12231 12232 /* Get the default values for Model Name and Description */ 12233 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 12234 12235 /* Create SCSI host to the physical port */ 12236 error = lpfc_create_shost(phba); 12237 if (error) { 12238 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12239 "1407 Failed to create scsi host.\n"); 12240 goto out_unset_driver_resource; 12241 } 12242 12243 /* Configure sysfs attributes */ 12244 vport = phba->pport; 12245 error = lpfc_alloc_sysfs_attr(vport); 12246 if (error) { 12247 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12248 "1476 Failed to allocate sysfs attr\n"); 12249 goto out_destroy_shost; 12250 } 12251 12252 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 12253 /* Now, trying to enable interrupt and bring up the device */ 12254 cfg_mode = phba->cfg_use_msi; 12255 while (true) { 12256 /* Put device to a known state before enabling interrupt */ 12257 lpfc_stop_port(phba); 12258 /* Configure and enable interrupt */ 12259 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 12260 if (intr_mode == LPFC_INTR_ERROR) { 12261 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12262 "0431 Failed to enable interrupt.\n"); 12263 error = -ENODEV; 12264 goto out_free_sysfs_attr; 12265 } 12266 /* SLI-3 HBA setup */ 12267 if (lpfc_sli_hba_setup(phba)) { 12268 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12269 "1477 Failed to set up hba\n"); 12270 error = -ENODEV; 12271 goto out_remove_device; 12272 } 12273 12274 /* Wait 50ms for the interrupts of previous mailbox commands */ 12275 msleep(50); 12276 /* Check active interrupts on message signaled interrupts */ 12277 if (intr_mode == 0 || 12278 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 12279 /* Log the current active interrupt mode */ 12280 phba->intr_mode = intr_mode; 12281 lpfc_log_intr_mode(phba, intr_mode); 12282 break; 12283 } else { 12284 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12285 "0447 Configure interrupt mode (%d) " 12286 "failed active interrupt test.\n", 12287 intr_mode); 12288 /* Disable the current interrupt mode */ 12289 lpfc_sli_disable_intr(phba); 12290 /* Try next level of interrupt mode */ 12291 cfg_mode = --intr_mode; 12292 } 12293 } 12294 12295 /* Perform post initialization setup */ 12296 lpfc_post_init_setup(phba); 12297 12298 /* Check if there are static vports to be created. */ 12299 lpfc_create_static_vport(phba); 12300 12301 return 0; 12302 12303 out_remove_device: 12304 lpfc_unset_hba(phba); 12305 out_free_sysfs_attr: 12306 lpfc_free_sysfs_attr(vport); 12307 out_destroy_shost: 12308 lpfc_destroy_shost(phba); 12309 out_unset_driver_resource: 12310 lpfc_unset_driver_resource_phase2(phba); 12311 out_free_iocb_list: 12312 lpfc_free_iocb_list(phba); 12313 out_unset_driver_resource_s3: 12314 lpfc_sli_driver_resource_unset(phba); 12315 out_unset_pci_mem_s3: 12316 lpfc_sli_pci_mem_unset(phba); 12317 out_disable_pci_dev: 12318 lpfc_disable_pci_dev(phba); 12319 if (shost) 12320 scsi_host_put(shost); 12321 out_free_phba: 12322 lpfc_hba_free(phba); 12323 return error; 12324 } 12325 12326 /** 12327 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 12328 * @pdev: pointer to PCI device 12329 * 12330 * This routine is to be called to disattach a device with SLI-3 interface 12331 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 12332 * removed from PCI bus, it performs all the necessary cleanup for the HBA 12333 * device to be removed from the PCI subsystem properly. 12334 **/ 12335 static void 12336 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 12337 { 12338 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12339 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 12340 struct lpfc_vport **vports; 12341 struct lpfc_hba *phba = vport->phba; 12342 int i; 12343 12344 spin_lock_irq(&phba->hbalock); 12345 vport->load_flag |= FC_UNLOADING; 12346 spin_unlock_irq(&phba->hbalock); 12347 12348 lpfc_free_sysfs_attr(vport); 12349 12350 /* Release all the vports against this physical port */ 12351 vports = lpfc_create_vport_work_array(phba); 12352 if (vports != NULL) 12353 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 12354 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 12355 continue; 12356 fc_vport_terminate(vports[i]->fc_vport); 12357 } 12358 lpfc_destroy_vport_work_array(phba, vports); 12359 12360 /* Remove FC host and then SCSI host with the physical port */ 12361 fc_remove_host(shost); 12362 scsi_remove_host(shost); 12363 12364 lpfc_cleanup(vport); 12365 12366 /* 12367 * Bring down the SLI Layer. This step disable all interrupts, 12368 * clears the rings, discards all mailbox commands, and resets 12369 * the HBA. 12370 */ 12371 12372 /* HBA interrupt will be disabled after this call */ 12373 lpfc_sli_hba_down(phba); 12374 /* Stop kthread signal shall trigger work_done one more time */ 12375 kthread_stop(phba->worker_thread); 12376 /* Final cleanup of txcmplq and reset the HBA */ 12377 lpfc_sli_brdrestart(phba); 12378 12379 kfree(phba->vpi_bmask); 12380 kfree(phba->vpi_ids); 12381 12382 lpfc_stop_hba_timers(phba); 12383 spin_lock_irq(&phba->port_list_lock); 12384 list_del_init(&vport->listentry); 12385 spin_unlock_irq(&phba->port_list_lock); 12386 12387 lpfc_debugfs_terminate(vport); 12388 12389 /* Disable SR-IOV if enabled */ 12390 if (phba->cfg_sriov_nr_virtfn) 12391 pci_disable_sriov(pdev); 12392 12393 /* Disable interrupt */ 12394 lpfc_sli_disable_intr(phba); 12395 12396 scsi_host_put(shost); 12397 12398 /* 12399 * Call scsi_free before mem_free since scsi bufs are released to their 12400 * corresponding pools here. 12401 */ 12402 lpfc_scsi_free(phba); 12403 lpfc_free_iocb_list(phba); 12404 12405 lpfc_mem_free_all(phba); 12406 12407 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 12408 phba->hbqslimp.virt, phba->hbqslimp.phys); 12409 12410 /* Free resources associated with SLI2 interface */ 12411 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 12412 phba->slim2p.virt, phba->slim2p.phys); 12413 12414 /* unmap adapter SLIM and Control Registers */ 12415 iounmap(phba->ctrl_regs_memmap_p); 12416 iounmap(phba->slim_memmap_p); 12417 12418 lpfc_hba_free(phba); 12419 12420 pci_release_mem_regions(pdev); 12421 pci_disable_device(pdev); 12422 } 12423 12424 /** 12425 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 12426 * @pdev: pointer to PCI device 12427 * @msg: power management message 12428 * 12429 * This routine is to be called from the kernel's PCI subsystem to support 12430 * system Power Management (PM) to device with SLI-3 interface spec. When 12431 * PM invokes this method, it quiesces the device by stopping the driver's 12432 * worker thread for the device, turning off device's interrupt and DMA, 12433 * and bring the device offline. Note that as the driver implements the 12434 * minimum PM requirements to a power-aware driver's PM support for the 12435 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 12436 * to the suspend() method call will be treated as SUSPEND and the driver will 12437 * fully reinitialize its device during resume() method call, the driver will 12438 * set device to PCI_D3hot state in PCI config space instead of setting it 12439 * according to the @msg provided by the PM. 12440 * 12441 * Return code 12442 * 0 - driver suspended the device 12443 * Error otherwise 12444 **/ 12445 static int 12446 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 12447 { 12448 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12449 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12450 12451 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12452 "0473 PCI device Power Management suspend.\n"); 12453 12454 /* Bring down the device */ 12455 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 12456 lpfc_offline(phba); 12457 kthread_stop(phba->worker_thread); 12458 12459 /* Disable interrupt from device */ 12460 lpfc_sli_disable_intr(phba); 12461 12462 /* Save device state to PCI config space */ 12463 pci_save_state(pdev); 12464 pci_set_power_state(pdev, PCI_D3hot); 12465 12466 return 0; 12467 } 12468 12469 /** 12470 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 12471 * @pdev: pointer to PCI device 12472 * 12473 * This routine is to be called from the kernel's PCI subsystem to support 12474 * system Power Management (PM) to device with SLI-3 interface spec. When PM 12475 * invokes this method, it restores the device's PCI config space state and 12476 * fully reinitializes the device and brings it online. Note that as the 12477 * driver implements the minimum PM requirements to a power-aware driver's 12478 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 12479 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 12480 * driver will fully reinitialize its device during resume() method call, 12481 * the device will be set to PCI_D0 directly in PCI config space before 12482 * restoring the state. 12483 * 12484 * Return code 12485 * 0 - driver suspended the device 12486 * Error otherwise 12487 **/ 12488 static int 12489 lpfc_pci_resume_one_s3(struct pci_dev *pdev) 12490 { 12491 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12492 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12493 uint32_t intr_mode; 12494 int error; 12495 12496 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12497 "0452 PCI device Power Management resume.\n"); 12498 12499 /* Restore device state from PCI config space */ 12500 pci_set_power_state(pdev, PCI_D0); 12501 pci_restore_state(pdev); 12502 12503 /* 12504 * As the new kernel behavior of pci_restore_state() API call clears 12505 * device saved_state flag, need to save the restored state again. 12506 */ 12507 pci_save_state(pdev); 12508 12509 if (pdev->is_busmaster) 12510 pci_set_master(pdev); 12511 12512 /* Startup the kernel thread for this host adapter. */ 12513 phba->worker_thread = kthread_run(lpfc_do_work, phba, 12514 "lpfc_worker_%d", phba->brd_no); 12515 if (IS_ERR(phba->worker_thread)) { 12516 error = PTR_ERR(phba->worker_thread); 12517 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12518 "0434 PM resume failed to start worker " 12519 "thread: error=x%x.\n", error); 12520 return error; 12521 } 12522 12523 /* Configure and enable interrupt */ 12524 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 12525 if (intr_mode == LPFC_INTR_ERROR) { 12526 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12527 "0430 PM resume Failed to enable interrupt\n"); 12528 return -EIO; 12529 } else 12530 phba->intr_mode = intr_mode; 12531 12532 /* Restart HBA and bring it online */ 12533 lpfc_sli_brdrestart(phba); 12534 lpfc_online(phba); 12535 12536 /* Log the current active interrupt mode */ 12537 lpfc_log_intr_mode(phba, phba->intr_mode); 12538 12539 return 0; 12540 } 12541 12542 /** 12543 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 12544 * @phba: pointer to lpfc hba data structure. 12545 * 12546 * This routine is called to prepare the SLI3 device for PCI slot recover. It 12547 * aborts all the outstanding SCSI I/Os to the pci device. 12548 **/ 12549 static void 12550 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 12551 { 12552 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12553 "2723 PCI channel I/O abort preparing for recovery\n"); 12554 12555 /* 12556 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 12557 * and let the SCSI mid-layer to retry them to recover. 12558 */ 12559 lpfc_sli_abort_fcp_rings(phba); 12560 } 12561 12562 /** 12563 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 12564 * @phba: pointer to lpfc hba data structure. 12565 * 12566 * This routine is called to prepare the SLI3 device for PCI slot reset. It 12567 * disables the device interrupt and pci device, and aborts the internal FCP 12568 * pending I/Os. 12569 **/ 12570 static void 12571 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 12572 { 12573 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12574 "2710 PCI channel disable preparing for reset\n"); 12575 12576 /* Block any management I/Os to the device */ 12577 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 12578 12579 /* Block all SCSI devices' I/Os on the host */ 12580 lpfc_scsi_dev_block(phba); 12581 12582 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 12583 lpfc_sli_flush_io_rings(phba); 12584 12585 /* stop all timers */ 12586 lpfc_stop_hba_timers(phba); 12587 12588 /* Disable interrupt and pci device */ 12589 lpfc_sli_disable_intr(phba); 12590 pci_disable_device(phba->pcidev); 12591 } 12592 12593 /** 12594 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 12595 * @phba: pointer to lpfc hba data structure. 12596 * 12597 * This routine is called to prepare the SLI3 device for PCI slot permanently 12598 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 12599 * pending I/Os. 12600 **/ 12601 static void 12602 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 12603 { 12604 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12605 "2711 PCI channel permanent disable for failure\n"); 12606 /* Block all SCSI devices' I/Os on the host */ 12607 lpfc_scsi_dev_block(phba); 12608 12609 /* stop all timers */ 12610 lpfc_stop_hba_timers(phba); 12611 12612 /* Clean up all driver's outstanding SCSI I/Os */ 12613 lpfc_sli_flush_io_rings(phba); 12614 } 12615 12616 /** 12617 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 12618 * @pdev: pointer to PCI device. 12619 * @state: the current PCI connection state. 12620 * 12621 * This routine is called from the PCI subsystem for I/O error handling to 12622 * device with SLI-3 interface spec. This function is called by the PCI 12623 * subsystem after a PCI bus error affecting this device has been detected. 12624 * When this function is invoked, it will need to stop all the I/Os and 12625 * interrupt(s) to the device. Once that is done, it will return 12626 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 12627 * as desired. 12628 * 12629 * Return codes 12630 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 12631 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 12632 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 12633 **/ 12634 static pci_ers_result_t 12635 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 12636 { 12637 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12638 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12639 12640 switch (state) { 12641 case pci_channel_io_normal: 12642 /* Non-fatal error, prepare for recovery */ 12643 lpfc_sli_prep_dev_for_recover(phba); 12644 return PCI_ERS_RESULT_CAN_RECOVER; 12645 case pci_channel_io_frozen: 12646 /* Fatal error, prepare for slot reset */ 12647 lpfc_sli_prep_dev_for_reset(phba); 12648 return PCI_ERS_RESULT_NEED_RESET; 12649 case pci_channel_io_perm_failure: 12650 /* Permanent failure, prepare for device down */ 12651 lpfc_sli_prep_dev_for_perm_failure(phba); 12652 return PCI_ERS_RESULT_DISCONNECT; 12653 default: 12654 /* Unknown state, prepare and request slot reset */ 12655 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12656 "0472 Unknown PCI error state: x%x\n", state); 12657 lpfc_sli_prep_dev_for_reset(phba); 12658 return PCI_ERS_RESULT_NEED_RESET; 12659 } 12660 } 12661 12662 /** 12663 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 12664 * @pdev: pointer to PCI device. 12665 * 12666 * This routine is called from the PCI subsystem for error handling to 12667 * device with SLI-3 interface spec. This is called after PCI bus has been 12668 * reset to restart the PCI card from scratch, as if from a cold-boot. 12669 * During the PCI subsystem error recovery, after driver returns 12670 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 12671 * recovery and then call this routine before calling the .resume method 12672 * to recover the device. This function will initialize the HBA device, 12673 * enable the interrupt, but it will just put the HBA to offline state 12674 * without passing any I/O traffic. 12675 * 12676 * Return codes 12677 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 12678 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 12679 */ 12680 static pci_ers_result_t 12681 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 12682 { 12683 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12684 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12685 struct lpfc_sli *psli = &phba->sli; 12686 uint32_t intr_mode; 12687 12688 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 12689 if (pci_enable_device_mem(pdev)) { 12690 printk(KERN_ERR "lpfc: Cannot re-enable " 12691 "PCI device after reset.\n"); 12692 return PCI_ERS_RESULT_DISCONNECT; 12693 } 12694 12695 pci_restore_state(pdev); 12696 12697 /* 12698 * As the new kernel behavior of pci_restore_state() API call clears 12699 * device saved_state flag, need to save the restored state again. 12700 */ 12701 pci_save_state(pdev); 12702 12703 if (pdev->is_busmaster) 12704 pci_set_master(pdev); 12705 12706 spin_lock_irq(&phba->hbalock); 12707 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 12708 spin_unlock_irq(&phba->hbalock); 12709 12710 /* Configure and enable interrupt */ 12711 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 12712 if (intr_mode == LPFC_INTR_ERROR) { 12713 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12714 "0427 Cannot re-enable interrupt after " 12715 "slot reset.\n"); 12716 return PCI_ERS_RESULT_DISCONNECT; 12717 } else 12718 phba->intr_mode = intr_mode; 12719 12720 /* Take device offline, it will perform cleanup */ 12721 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 12722 lpfc_offline(phba); 12723 lpfc_sli_brdrestart(phba); 12724 12725 /* Log the current active interrupt mode */ 12726 lpfc_log_intr_mode(phba, phba->intr_mode); 12727 12728 return PCI_ERS_RESULT_RECOVERED; 12729 } 12730 12731 /** 12732 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 12733 * @pdev: pointer to PCI device 12734 * 12735 * This routine is called from the PCI subsystem for error handling to device 12736 * with SLI-3 interface spec. It is called when kernel error recovery tells 12737 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 12738 * error recovery. After this call, traffic can start to flow from this device 12739 * again. 12740 */ 12741 static void 12742 lpfc_io_resume_s3(struct pci_dev *pdev) 12743 { 12744 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12745 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12746 12747 /* Bring device online, it will be no-op for non-fatal error resume */ 12748 lpfc_online(phba); 12749 } 12750 12751 /** 12752 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 12753 * @phba: pointer to lpfc hba data structure. 12754 * 12755 * returns the number of ELS/CT IOCBs to reserve 12756 **/ 12757 int 12758 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 12759 { 12760 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 12761 12762 if (phba->sli_rev == LPFC_SLI_REV4) { 12763 if (max_xri <= 100) 12764 return 10; 12765 else if (max_xri <= 256) 12766 return 25; 12767 else if (max_xri <= 512) 12768 return 50; 12769 else if (max_xri <= 1024) 12770 return 100; 12771 else if (max_xri <= 1536) 12772 return 150; 12773 else if (max_xri <= 2048) 12774 return 200; 12775 else 12776 return 250; 12777 } else 12778 return 0; 12779 } 12780 12781 /** 12782 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve 12783 * @phba: pointer to lpfc hba data structure. 12784 * 12785 * returns the number of ELS/CT + NVMET IOCBs to reserve 12786 **/ 12787 int 12788 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba) 12789 { 12790 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba); 12791 12792 if (phba->nvmet_support) 12793 max_xri += LPFC_NVMET_BUF_POST; 12794 return max_xri; 12795 } 12796 12797 12798 static int 12799 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset, 12800 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize, 12801 const struct firmware *fw) 12802 { 12803 int rc; 12804 12805 /* Three cases: (1) FW was not supported on the detected adapter. 12806 * (2) FW update has been locked out administratively. 12807 * (3) Some other error during FW update. 12808 * In each case, an unmaskable message is written to the console 12809 * for admin diagnosis. 12810 */ 12811 if (offset == ADD_STATUS_FW_NOT_SUPPORTED || 12812 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC && 12813 magic_number != MAGIC_NUMBER_G6) || 12814 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC && 12815 magic_number != MAGIC_NUMBER_G7)) { 12816 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12817 "3030 This firmware version is not supported on" 12818 " this HBA model. Device:%x Magic:%x Type:%x " 12819 "ID:%x Size %d %zd\n", 12820 phba->pcidev->device, magic_number, ftype, fid, 12821 fsize, fw->size); 12822 rc = -EINVAL; 12823 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) { 12824 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12825 "3021 Firmware downloads have been prohibited " 12826 "by a system configuration setting on " 12827 "Device:%x Magic:%x Type:%x ID:%x Size %d " 12828 "%zd\n", 12829 phba->pcidev->device, magic_number, ftype, fid, 12830 fsize, fw->size); 12831 rc = -EACCES; 12832 } else { 12833 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12834 "3022 FW Download failed. Add Status x%x " 12835 "Device:%x Magic:%x Type:%x ID:%x Size %d " 12836 "%zd\n", 12837 offset, phba->pcidev->device, magic_number, 12838 ftype, fid, fsize, fw->size); 12839 rc = -EIO; 12840 } 12841 return rc; 12842 } 12843 12844 /** 12845 * lpfc_write_firmware - attempt to write a firmware image to the port 12846 * @fw: pointer to firmware image returned from request_firmware. 12847 * @context: pointer to firmware image returned from request_firmware. 12848 * @ret: return value this routine provides to the caller. 12849 * 12850 **/ 12851 static void 12852 lpfc_write_firmware(const struct firmware *fw, void *context) 12853 { 12854 struct lpfc_hba *phba = (struct lpfc_hba *)context; 12855 char fwrev[FW_REV_STR_SIZE]; 12856 struct lpfc_grp_hdr *image; 12857 struct list_head dma_buffer_list; 12858 int i, rc = 0; 12859 struct lpfc_dmabuf *dmabuf, *next; 12860 uint32_t offset = 0, temp_offset = 0; 12861 uint32_t magic_number, ftype, fid, fsize; 12862 12863 /* It can be null in no-wait mode, sanity check */ 12864 if (!fw) { 12865 rc = -ENXIO; 12866 goto out; 12867 } 12868 image = (struct lpfc_grp_hdr *)fw->data; 12869 12870 magic_number = be32_to_cpu(image->magic_number); 12871 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image); 12872 fid = bf_get_be32(lpfc_grp_hdr_id, image); 12873 fsize = be32_to_cpu(image->size); 12874 12875 INIT_LIST_HEAD(&dma_buffer_list); 12876 lpfc_decode_firmware_rev(phba, fwrev, 1); 12877 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 12878 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12879 "3023 Updating Firmware, Current Version:%s " 12880 "New Version:%s\n", 12881 fwrev, image->revision); 12882 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 12883 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 12884 GFP_KERNEL); 12885 if (!dmabuf) { 12886 rc = -ENOMEM; 12887 goto release_out; 12888 } 12889 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 12890 SLI4_PAGE_SIZE, 12891 &dmabuf->phys, 12892 GFP_KERNEL); 12893 if (!dmabuf->virt) { 12894 kfree(dmabuf); 12895 rc = -ENOMEM; 12896 goto release_out; 12897 } 12898 list_add_tail(&dmabuf->list, &dma_buffer_list); 12899 } 12900 while (offset < fw->size) { 12901 temp_offset = offset; 12902 list_for_each_entry(dmabuf, &dma_buffer_list, list) { 12903 if (temp_offset + SLI4_PAGE_SIZE > fw->size) { 12904 memcpy(dmabuf->virt, 12905 fw->data + temp_offset, 12906 fw->size - temp_offset); 12907 temp_offset = fw->size; 12908 break; 12909 } 12910 memcpy(dmabuf->virt, fw->data + temp_offset, 12911 SLI4_PAGE_SIZE); 12912 temp_offset += SLI4_PAGE_SIZE; 12913 } 12914 rc = lpfc_wr_object(phba, &dma_buffer_list, 12915 (fw->size - offset), &offset); 12916 if (rc) { 12917 rc = lpfc_log_write_firmware_error(phba, offset, 12918 magic_number, 12919 ftype, 12920 fid, 12921 fsize, 12922 fw); 12923 goto release_out; 12924 } 12925 } 12926 rc = offset; 12927 } else 12928 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12929 "3029 Skipped Firmware update, Current " 12930 "Version:%s New Version:%s\n", 12931 fwrev, image->revision); 12932 12933 release_out: 12934 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 12935 list_del(&dmabuf->list); 12936 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 12937 dmabuf->virt, dmabuf->phys); 12938 kfree(dmabuf); 12939 } 12940 release_firmware(fw); 12941 out: 12942 if (rc < 0) 12943 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12944 "3062 Firmware update error, status %d.\n", rc); 12945 else 12946 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12947 "3024 Firmware update success: size %d.\n", rc); 12948 } 12949 12950 /** 12951 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade 12952 * @phba: pointer to lpfc hba data structure. 12953 * 12954 * This routine is called to perform Linux generic firmware upgrade on device 12955 * that supports such feature. 12956 **/ 12957 int 12958 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) 12959 { 12960 uint8_t file_name[ELX_MODEL_NAME_SIZE]; 12961 int ret; 12962 const struct firmware *fw; 12963 12964 /* Only supported on SLI4 interface type 2 for now */ 12965 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 12966 LPFC_SLI_INTF_IF_TYPE_2) 12967 return -EPERM; 12968 12969 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); 12970 12971 if (fw_upgrade == INT_FW_UPGRADE) { 12972 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, 12973 file_name, &phba->pcidev->dev, 12974 GFP_KERNEL, (void *)phba, 12975 lpfc_write_firmware); 12976 } else if (fw_upgrade == RUN_FW_UPGRADE) { 12977 ret = request_firmware(&fw, file_name, &phba->pcidev->dev); 12978 if (!ret) 12979 lpfc_write_firmware(fw, (void *)phba); 12980 } else { 12981 ret = -EINVAL; 12982 } 12983 12984 return ret; 12985 } 12986 12987 /** 12988 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 12989 * @pdev: pointer to PCI device 12990 * @pid: pointer to PCI device identifier 12991 * 12992 * This routine is called from the kernel's PCI subsystem to device with 12993 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 12994 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 12995 * information of the device and driver to see if the driver state that it 12996 * can support this kind of device. If the match is successful, the driver 12997 * core invokes this routine. If this routine determines it can claim the HBA, 12998 * it does all the initialization that it needs to do to handle the HBA 12999 * properly. 13000 * 13001 * Return code 13002 * 0 - driver can claim the device 13003 * negative value - driver can not claim the device 13004 **/ 13005 static int 13006 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 13007 { 13008 struct lpfc_hba *phba; 13009 struct lpfc_vport *vport = NULL; 13010 struct Scsi_Host *shost = NULL; 13011 int error; 13012 uint32_t cfg_mode, intr_mode; 13013 13014 /* Allocate memory for HBA structure */ 13015 phba = lpfc_hba_alloc(pdev); 13016 if (!phba) 13017 return -ENOMEM; 13018 13019 /* Perform generic PCI device enabling operation */ 13020 error = lpfc_enable_pci_dev(phba); 13021 if (error) 13022 goto out_free_phba; 13023 13024 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 13025 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 13026 if (error) 13027 goto out_disable_pci_dev; 13028 13029 /* Set up SLI-4 specific device PCI memory space */ 13030 error = lpfc_sli4_pci_mem_setup(phba); 13031 if (error) { 13032 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13033 "1410 Failed to set up pci memory space.\n"); 13034 goto out_disable_pci_dev; 13035 } 13036 13037 /* Set up SLI-4 Specific device driver resources */ 13038 error = lpfc_sli4_driver_resource_setup(phba); 13039 if (error) { 13040 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13041 "1412 Failed to set up driver resource.\n"); 13042 goto out_unset_pci_mem_s4; 13043 } 13044 13045 INIT_LIST_HEAD(&phba->active_rrq_list); 13046 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 13047 13048 /* Set up common device driver resources */ 13049 error = lpfc_setup_driver_resource_phase2(phba); 13050 if (error) { 13051 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13052 "1414 Failed to set up driver resource.\n"); 13053 goto out_unset_driver_resource_s4; 13054 } 13055 13056 /* Get the default values for Model Name and Description */ 13057 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 13058 13059 /* Now, trying to enable interrupt and bring up the device */ 13060 cfg_mode = phba->cfg_use_msi; 13061 13062 /* Put device to a known state before enabling interrupt */ 13063 phba->pport = NULL; 13064 lpfc_stop_port(phba); 13065 13066 /* Init cpu_map array */ 13067 lpfc_cpu_map_array_init(phba); 13068 13069 /* Init hba_eq_hdl array */ 13070 lpfc_hba_eq_hdl_array_init(phba); 13071 13072 /* Configure and enable interrupt */ 13073 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 13074 if (intr_mode == LPFC_INTR_ERROR) { 13075 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13076 "0426 Failed to enable interrupt.\n"); 13077 error = -ENODEV; 13078 goto out_unset_driver_resource; 13079 } 13080 /* Default to single EQ for non-MSI-X */ 13081 if (phba->intr_type != MSIX) { 13082 phba->cfg_irq_chann = 1; 13083 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13084 if (phba->nvmet_support) 13085 phba->cfg_nvmet_mrq = 1; 13086 } 13087 } 13088 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); 13089 13090 /* Create SCSI host to the physical port */ 13091 error = lpfc_create_shost(phba); 13092 if (error) { 13093 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13094 "1415 Failed to create scsi host.\n"); 13095 goto out_disable_intr; 13096 } 13097 vport = phba->pport; 13098 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 13099 13100 /* Configure sysfs attributes */ 13101 error = lpfc_alloc_sysfs_attr(vport); 13102 if (error) { 13103 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13104 "1416 Failed to allocate sysfs attr\n"); 13105 goto out_destroy_shost; 13106 } 13107 13108 /* Set up SLI-4 HBA */ 13109 if (lpfc_sli4_hba_setup(phba)) { 13110 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13111 "1421 Failed to set up hba\n"); 13112 error = -ENODEV; 13113 goto out_free_sysfs_attr; 13114 } 13115 13116 /* Log the current active interrupt mode */ 13117 phba->intr_mode = intr_mode; 13118 lpfc_log_intr_mode(phba, intr_mode); 13119 13120 /* Perform post initialization setup */ 13121 lpfc_post_init_setup(phba); 13122 13123 /* NVME support in FW earlier in the driver load corrects the 13124 * FC4 type making a check for nvme_support unnecessary. 13125 */ 13126 if (phba->nvmet_support == 0) { 13127 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13128 /* Create NVME binding with nvme_fc_transport. This 13129 * ensures the vport is initialized. If the localport 13130 * create fails, it should not unload the driver to 13131 * support field issues. 13132 */ 13133 error = lpfc_nvme_create_localport(vport); 13134 if (error) { 13135 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13136 "6004 NVME registration " 13137 "failed, error x%x\n", 13138 error); 13139 } 13140 } 13141 } 13142 13143 /* check for firmware upgrade or downgrade */ 13144 if (phba->cfg_request_firmware_upgrade) 13145 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); 13146 13147 /* Check if there are static vports to be created. */ 13148 lpfc_create_static_vport(phba); 13149 13150 /* Enable RAS FW log support */ 13151 lpfc_sli4_ras_setup(phba); 13152 13153 INIT_LIST_HEAD(&phba->poll_list); 13154 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp); 13155 13156 return 0; 13157 13158 out_free_sysfs_attr: 13159 lpfc_free_sysfs_attr(vport); 13160 out_destroy_shost: 13161 lpfc_destroy_shost(phba); 13162 out_disable_intr: 13163 lpfc_sli4_disable_intr(phba); 13164 out_unset_driver_resource: 13165 lpfc_unset_driver_resource_phase2(phba); 13166 out_unset_driver_resource_s4: 13167 lpfc_sli4_driver_resource_unset(phba); 13168 out_unset_pci_mem_s4: 13169 lpfc_sli4_pci_mem_unset(phba); 13170 out_disable_pci_dev: 13171 lpfc_disable_pci_dev(phba); 13172 if (shost) 13173 scsi_host_put(shost); 13174 out_free_phba: 13175 lpfc_hba_free(phba); 13176 return error; 13177 } 13178 13179 /** 13180 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 13181 * @pdev: pointer to PCI device 13182 * 13183 * This routine is called from the kernel's PCI subsystem to device with 13184 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 13185 * removed from PCI bus, it performs all the necessary cleanup for the HBA 13186 * device to be removed from the PCI subsystem properly. 13187 **/ 13188 static void 13189 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 13190 { 13191 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13192 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 13193 struct lpfc_vport **vports; 13194 struct lpfc_hba *phba = vport->phba; 13195 int i; 13196 13197 /* Mark the device unloading flag */ 13198 spin_lock_irq(&phba->hbalock); 13199 vport->load_flag |= FC_UNLOADING; 13200 spin_unlock_irq(&phba->hbalock); 13201 13202 /* Free the HBA sysfs attributes */ 13203 lpfc_free_sysfs_attr(vport); 13204 13205 /* Release all the vports against this physical port */ 13206 vports = lpfc_create_vport_work_array(phba); 13207 if (vports != NULL) 13208 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 13209 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 13210 continue; 13211 fc_vport_terminate(vports[i]->fc_vport); 13212 } 13213 lpfc_destroy_vport_work_array(phba, vports); 13214 13215 /* Remove FC host and then SCSI host with the physical port */ 13216 fc_remove_host(shost); 13217 scsi_remove_host(shost); 13218 13219 /* Perform ndlp cleanup on the physical port. The nvme and nvmet 13220 * localports are destroyed after to cleanup all transport memory. 13221 */ 13222 lpfc_cleanup(vport); 13223 lpfc_nvmet_destroy_targetport(phba); 13224 lpfc_nvme_destroy_localport(vport); 13225 13226 /* De-allocate multi-XRI pools */ 13227 if (phba->cfg_xri_rebalancing) 13228 lpfc_destroy_multixri_pools(phba); 13229 13230 /* 13231 * Bring down the SLI Layer. This step disables all interrupts, 13232 * clears the rings, discards all mailbox commands, and resets 13233 * the HBA FCoE function. 13234 */ 13235 lpfc_debugfs_terminate(vport); 13236 13237 lpfc_stop_hba_timers(phba); 13238 spin_lock_irq(&phba->port_list_lock); 13239 list_del_init(&vport->listentry); 13240 spin_unlock_irq(&phba->port_list_lock); 13241 13242 /* Perform scsi free before driver resource_unset since scsi 13243 * buffers are released to their corresponding pools here. 13244 */ 13245 lpfc_io_free(phba); 13246 lpfc_free_iocb_list(phba); 13247 lpfc_sli4_hba_unset(phba); 13248 13249 lpfc_unset_driver_resource_phase2(phba); 13250 lpfc_sli4_driver_resource_unset(phba); 13251 13252 /* Unmap adapter Control and Doorbell registers */ 13253 lpfc_sli4_pci_mem_unset(phba); 13254 13255 /* Release PCI resources and disable device's PCI function */ 13256 scsi_host_put(shost); 13257 lpfc_disable_pci_dev(phba); 13258 13259 /* Finally, free the driver's device data structure */ 13260 lpfc_hba_free(phba); 13261 13262 return; 13263 } 13264 13265 /** 13266 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 13267 * @pdev: pointer to PCI device 13268 * @msg: power management message 13269 * 13270 * This routine is called from the kernel's PCI subsystem to support system 13271 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 13272 * this method, it quiesces the device by stopping the driver's worker 13273 * thread for the device, turning off device's interrupt and DMA, and bring 13274 * the device offline. Note that as the driver implements the minimum PM 13275 * requirements to a power-aware driver's PM support for suspend/resume -- all 13276 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 13277 * method call will be treated as SUSPEND and the driver will fully 13278 * reinitialize its device during resume() method call, the driver will set 13279 * device to PCI_D3hot state in PCI config space instead of setting it 13280 * according to the @msg provided by the PM. 13281 * 13282 * Return code 13283 * 0 - driver suspended the device 13284 * Error otherwise 13285 **/ 13286 static int 13287 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 13288 { 13289 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13290 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13291 13292 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13293 "2843 PCI device Power Management suspend.\n"); 13294 13295 /* Bring down the device */ 13296 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 13297 lpfc_offline(phba); 13298 kthread_stop(phba->worker_thread); 13299 13300 /* Disable interrupt from device */ 13301 lpfc_sli4_disable_intr(phba); 13302 lpfc_sli4_queue_destroy(phba); 13303 13304 /* Save device state to PCI config space */ 13305 pci_save_state(pdev); 13306 pci_set_power_state(pdev, PCI_D3hot); 13307 13308 return 0; 13309 } 13310 13311 /** 13312 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 13313 * @pdev: pointer to PCI device 13314 * 13315 * This routine is called from the kernel's PCI subsystem to support system 13316 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 13317 * this method, it restores the device's PCI config space state and fully 13318 * reinitializes the device and brings it online. Note that as the driver 13319 * implements the minimum PM requirements to a power-aware driver's PM for 13320 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 13321 * to the suspend() method call will be treated as SUSPEND and the driver 13322 * will fully reinitialize its device during resume() method call, the device 13323 * will be set to PCI_D0 directly in PCI config space before restoring the 13324 * state. 13325 * 13326 * Return code 13327 * 0 - driver suspended the device 13328 * Error otherwise 13329 **/ 13330 static int 13331 lpfc_pci_resume_one_s4(struct pci_dev *pdev) 13332 { 13333 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13334 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13335 uint32_t intr_mode; 13336 int error; 13337 13338 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13339 "0292 PCI device Power Management resume.\n"); 13340 13341 /* Restore device state from PCI config space */ 13342 pci_set_power_state(pdev, PCI_D0); 13343 pci_restore_state(pdev); 13344 13345 /* 13346 * As the new kernel behavior of pci_restore_state() API call clears 13347 * device saved_state flag, need to save the restored state again. 13348 */ 13349 pci_save_state(pdev); 13350 13351 if (pdev->is_busmaster) 13352 pci_set_master(pdev); 13353 13354 /* Startup the kernel thread for this host adapter. */ 13355 phba->worker_thread = kthread_run(lpfc_do_work, phba, 13356 "lpfc_worker_%d", phba->brd_no); 13357 if (IS_ERR(phba->worker_thread)) { 13358 error = PTR_ERR(phba->worker_thread); 13359 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13360 "0293 PM resume failed to start worker " 13361 "thread: error=x%x.\n", error); 13362 return error; 13363 } 13364 13365 /* Configure and enable interrupt */ 13366 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 13367 if (intr_mode == LPFC_INTR_ERROR) { 13368 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13369 "0294 PM resume Failed to enable interrupt\n"); 13370 return -EIO; 13371 } else 13372 phba->intr_mode = intr_mode; 13373 13374 /* Restart HBA and bring it online */ 13375 lpfc_sli_brdrestart(phba); 13376 lpfc_online(phba); 13377 13378 /* Log the current active interrupt mode */ 13379 lpfc_log_intr_mode(phba, phba->intr_mode); 13380 13381 return 0; 13382 } 13383 13384 /** 13385 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 13386 * @phba: pointer to lpfc hba data structure. 13387 * 13388 * This routine is called to prepare the SLI4 device for PCI slot recover. It 13389 * aborts all the outstanding SCSI I/Os to the pci device. 13390 **/ 13391 static void 13392 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 13393 { 13394 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13395 "2828 PCI channel I/O abort preparing for recovery\n"); 13396 /* 13397 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 13398 * and let the SCSI mid-layer to retry them to recover. 13399 */ 13400 lpfc_sli_abort_fcp_rings(phba); 13401 } 13402 13403 /** 13404 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 13405 * @phba: pointer to lpfc hba data structure. 13406 * 13407 * This routine is called to prepare the SLI4 device for PCI slot reset. It 13408 * disables the device interrupt and pci device, and aborts the internal FCP 13409 * pending I/Os. 13410 **/ 13411 static void 13412 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 13413 { 13414 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13415 "2826 PCI channel disable preparing for reset\n"); 13416 13417 /* Block any management I/Os to the device */ 13418 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); 13419 13420 /* Block all SCSI devices' I/Os on the host */ 13421 lpfc_scsi_dev_block(phba); 13422 13423 /* Flush all driver's outstanding I/Os as we are to reset */ 13424 lpfc_sli_flush_io_rings(phba); 13425 13426 /* stop all timers */ 13427 lpfc_stop_hba_timers(phba); 13428 13429 /* Disable interrupt and pci device */ 13430 lpfc_sli4_disable_intr(phba); 13431 lpfc_sli4_queue_destroy(phba); 13432 pci_disable_device(phba->pcidev); 13433 } 13434 13435 /** 13436 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 13437 * @phba: pointer to lpfc hba data structure. 13438 * 13439 * This routine is called to prepare the SLI4 device for PCI slot permanently 13440 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 13441 * pending I/Os. 13442 **/ 13443 static void 13444 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 13445 { 13446 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13447 "2827 PCI channel permanent disable for failure\n"); 13448 13449 /* Block all SCSI devices' I/Os on the host */ 13450 lpfc_scsi_dev_block(phba); 13451 13452 /* stop all timers */ 13453 lpfc_stop_hba_timers(phba); 13454 13455 /* Clean up all driver's outstanding I/Os */ 13456 lpfc_sli_flush_io_rings(phba); 13457 } 13458 13459 /** 13460 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 13461 * @pdev: pointer to PCI device. 13462 * @state: the current PCI connection state. 13463 * 13464 * This routine is called from the PCI subsystem for error handling to device 13465 * with SLI-4 interface spec. This function is called by the PCI subsystem 13466 * after a PCI bus error affecting this device has been detected. When this 13467 * function is invoked, it will need to stop all the I/Os and interrupt(s) 13468 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 13469 * for the PCI subsystem to perform proper recovery as desired. 13470 * 13471 * Return codes 13472 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 13473 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 13474 **/ 13475 static pci_ers_result_t 13476 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 13477 { 13478 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13479 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13480 13481 switch (state) { 13482 case pci_channel_io_normal: 13483 /* Non-fatal error, prepare for recovery */ 13484 lpfc_sli4_prep_dev_for_recover(phba); 13485 return PCI_ERS_RESULT_CAN_RECOVER; 13486 case pci_channel_io_frozen: 13487 /* Fatal error, prepare for slot reset */ 13488 lpfc_sli4_prep_dev_for_reset(phba); 13489 return PCI_ERS_RESULT_NEED_RESET; 13490 case pci_channel_io_perm_failure: 13491 /* Permanent failure, prepare for device down */ 13492 lpfc_sli4_prep_dev_for_perm_failure(phba); 13493 return PCI_ERS_RESULT_DISCONNECT; 13494 default: 13495 /* Unknown state, prepare and request slot reset */ 13496 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13497 "2825 Unknown PCI error state: x%x\n", state); 13498 lpfc_sli4_prep_dev_for_reset(phba); 13499 return PCI_ERS_RESULT_NEED_RESET; 13500 } 13501 } 13502 13503 /** 13504 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 13505 * @pdev: pointer to PCI device. 13506 * 13507 * This routine is called from the PCI subsystem for error handling to device 13508 * with SLI-4 interface spec. It is called after PCI bus has been reset to 13509 * restart the PCI card from scratch, as if from a cold-boot. During the 13510 * PCI subsystem error recovery, after the driver returns 13511 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 13512 * recovery and then call this routine before calling the .resume method to 13513 * recover the device. This function will initialize the HBA device, enable 13514 * the interrupt, but it will just put the HBA to offline state without 13515 * passing any I/O traffic. 13516 * 13517 * Return codes 13518 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 13519 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 13520 */ 13521 static pci_ers_result_t 13522 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 13523 { 13524 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13525 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13526 struct lpfc_sli *psli = &phba->sli; 13527 uint32_t intr_mode; 13528 13529 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 13530 if (pci_enable_device_mem(pdev)) { 13531 printk(KERN_ERR "lpfc: Cannot re-enable " 13532 "PCI device after reset.\n"); 13533 return PCI_ERS_RESULT_DISCONNECT; 13534 } 13535 13536 pci_restore_state(pdev); 13537 13538 /* 13539 * As the new kernel behavior of pci_restore_state() API call clears 13540 * device saved_state flag, need to save the restored state again. 13541 */ 13542 pci_save_state(pdev); 13543 13544 if (pdev->is_busmaster) 13545 pci_set_master(pdev); 13546 13547 spin_lock_irq(&phba->hbalock); 13548 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 13549 spin_unlock_irq(&phba->hbalock); 13550 13551 /* Configure and enable interrupt */ 13552 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 13553 if (intr_mode == LPFC_INTR_ERROR) { 13554 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13555 "2824 Cannot re-enable interrupt after " 13556 "slot reset.\n"); 13557 return PCI_ERS_RESULT_DISCONNECT; 13558 } else 13559 phba->intr_mode = intr_mode; 13560 13561 /* Log the current active interrupt mode */ 13562 lpfc_log_intr_mode(phba, phba->intr_mode); 13563 13564 return PCI_ERS_RESULT_RECOVERED; 13565 } 13566 13567 /** 13568 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 13569 * @pdev: pointer to PCI device 13570 * 13571 * This routine is called from the PCI subsystem for error handling to device 13572 * with SLI-4 interface spec. It is called when kernel error recovery tells 13573 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 13574 * error recovery. After this call, traffic can start to flow from this device 13575 * again. 13576 **/ 13577 static void 13578 lpfc_io_resume_s4(struct pci_dev *pdev) 13579 { 13580 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13581 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13582 13583 /* 13584 * In case of slot reset, as function reset is performed through 13585 * mailbox command which needs DMA to be enabled, this operation 13586 * has to be moved to the io resume phase. Taking device offline 13587 * will perform the necessary cleanup. 13588 */ 13589 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 13590 /* Perform device reset */ 13591 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 13592 lpfc_offline(phba); 13593 lpfc_sli_brdrestart(phba); 13594 /* Bring the device back online */ 13595 lpfc_online(phba); 13596 } 13597 } 13598 13599 /** 13600 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 13601 * @pdev: pointer to PCI device 13602 * @pid: pointer to PCI device identifier 13603 * 13604 * This routine is to be registered to the kernel's PCI subsystem. When an 13605 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 13606 * at PCI device-specific information of the device and driver to see if the 13607 * driver state that it can support this kind of device. If the match is 13608 * successful, the driver core invokes this routine. This routine dispatches 13609 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 13610 * do all the initialization that it needs to do to handle the HBA device 13611 * properly. 13612 * 13613 * Return code 13614 * 0 - driver can claim the device 13615 * negative value - driver can not claim the device 13616 **/ 13617 static int 13618 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 13619 { 13620 int rc; 13621 struct lpfc_sli_intf intf; 13622 13623 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 13624 return -ENODEV; 13625 13626 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 13627 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 13628 rc = lpfc_pci_probe_one_s4(pdev, pid); 13629 else 13630 rc = lpfc_pci_probe_one_s3(pdev, pid); 13631 13632 return rc; 13633 } 13634 13635 /** 13636 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 13637 * @pdev: pointer to PCI device 13638 * 13639 * This routine is to be registered to the kernel's PCI subsystem. When an 13640 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 13641 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 13642 * remove routine, which will perform all the necessary cleanup for the 13643 * device to be removed from the PCI subsystem properly. 13644 **/ 13645 static void 13646 lpfc_pci_remove_one(struct pci_dev *pdev) 13647 { 13648 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13649 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13650 13651 switch (phba->pci_dev_grp) { 13652 case LPFC_PCI_DEV_LP: 13653 lpfc_pci_remove_one_s3(pdev); 13654 break; 13655 case LPFC_PCI_DEV_OC: 13656 lpfc_pci_remove_one_s4(pdev); 13657 break; 13658 default: 13659 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13660 "1424 Invalid PCI device group: 0x%x\n", 13661 phba->pci_dev_grp); 13662 break; 13663 } 13664 return; 13665 } 13666 13667 /** 13668 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 13669 * @pdev: pointer to PCI device 13670 * @msg: power management message 13671 * 13672 * This routine is to be registered to the kernel's PCI subsystem to support 13673 * system Power Management (PM). When PM invokes this method, it dispatches 13674 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 13675 * suspend the device. 13676 * 13677 * Return code 13678 * 0 - driver suspended the device 13679 * Error otherwise 13680 **/ 13681 static int 13682 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 13683 { 13684 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13685 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13686 int rc = -ENODEV; 13687 13688 switch (phba->pci_dev_grp) { 13689 case LPFC_PCI_DEV_LP: 13690 rc = lpfc_pci_suspend_one_s3(pdev, msg); 13691 break; 13692 case LPFC_PCI_DEV_OC: 13693 rc = lpfc_pci_suspend_one_s4(pdev, msg); 13694 break; 13695 default: 13696 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13697 "1425 Invalid PCI device group: 0x%x\n", 13698 phba->pci_dev_grp); 13699 break; 13700 } 13701 return rc; 13702 } 13703 13704 /** 13705 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 13706 * @pdev: pointer to PCI device 13707 * 13708 * This routine is to be registered to the kernel's PCI subsystem to support 13709 * system Power Management (PM). When PM invokes this method, it dispatches 13710 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 13711 * resume the device. 13712 * 13713 * Return code 13714 * 0 - driver suspended the device 13715 * Error otherwise 13716 **/ 13717 static int 13718 lpfc_pci_resume_one(struct pci_dev *pdev) 13719 { 13720 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13721 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13722 int rc = -ENODEV; 13723 13724 switch (phba->pci_dev_grp) { 13725 case LPFC_PCI_DEV_LP: 13726 rc = lpfc_pci_resume_one_s3(pdev); 13727 break; 13728 case LPFC_PCI_DEV_OC: 13729 rc = lpfc_pci_resume_one_s4(pdev); 13730 break; 13731 default: 13732 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13733 "1426 Invalid PCI device group: 0x%x\n", 13734 phba->pci_dev_grp); 13735 break; 13736 } 13737 return rc; 13738 } 13739 13740 /** 13741 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 13742 * @pdev: pointer to PCI device. 13743 * @state: the current PCI connection state. 13744 * 13745 * This routine is registered to the PCI subsystem for error handling. This 13746 * function is called by the PCI subsystem after a PCI bus error affecting 13747 * this device has been detected. When this routine is invoked, it dispatches 13748 * the action to the proper SLI-3 or SLI-4 device error detected handling 13749 * routine, which will perform the proper error detected operation. 13750 * 13751 * Return codes 13752 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 13753 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 13754 **/ 13755 static pci_ers_result_t 13756 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 13757 { 13758 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13759 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13760 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 13761 13762 switch (phba->pci_dev_grp) { 13763 case LPFC_PCI_DEV_LP: 13764 rc = lpfc_io_error_detected_s3(pdev, state); 13765 break; 13766 case LPFC_PCI_DEV_OC: 13767 rc = lpfc_io_error_detected_s4(pdev, state); 13768 break; 13769 default: 13770 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13771 "1427 Invalid PCI device group: 0x%x\n", 13772 phba->pci_dev_grp); 13773 break; 13774 } 13775 return rc; 13776 } 13777 13778 /** 13779 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 13780 * @pdev: pointer to PCI device. 13781 * 13782 * This routine is registered to the PCI subsystem for error handling. This 13783 * function is called after PCI bus has been reset to restart the PCI card 13784 * from scratch, as if from a cold-boot. When this routine is invoked, it 13785 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 13786 * routine, which will perform the proper device reset. 13787 * 13788 * Return codes 13789 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 13790 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 13791 **/ 13792 static pci_ers_result_t 13793 lpfc_io_slot_reset(struct pci_dev *pdev) 13794 { 13795 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13796 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13797 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 13798 13799 switch (phba->pci_dev_grp) { 13800 case LPFC_PCI_DEV_LP: 13801 rc = lpfc_io_slot_reset_s3(pdev); 13802 break; 13803 case LPFC_PCI_DEV_OC: 13804 rc = lpfc_io_slot_reset_s4(pdev); 13805 break; 13806 default: 13807 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13808 "1428 Invalid PCI device group: 0x%x\n", 13809 phba->pci_dev_grp); 13810 break; 13811 } 13812 return rc; 13813 } 13814 13815 /** 13816 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 13817 * @pdev: pointer to PCI device 13818 * 13819 * This routine is registered to the PCI subsystem for error handling. It 13820 * is called when kernel error recovery tells the lpfc driver that it is 13821 * OK to resume normal PCI operation after PCI bus error recovery. When 13822 * this routine is invoked, it dispatches the action to the proper SLI-3 13823 * or SLI-4 device io_resume routine, which will resume the device operation. 13824 **/ 13825 static void 13826 lpfc_io_resume(struct pci_dev *pdev) 13827 { 13828 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13829 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13830 13831 switch (phba->pci_dev_grp) { 13832 case LPFC_PCI_DEV_LP: 13833 lpfc_io_resume_s3(pdev); 13834 break; 13835 case LPFC_PCI_DEV_OC: 13836 lpfc_io_resume_s4(pdev); 13837 break; 13838 default: 13839 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13840 "1429 Invalid PCI device group: 0x%x\n", 13841 phba->pci_dev_grp); 13842 break; 13843 } 13844 return; 13845 } 13846 13847 /** 13848 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter 13849 * @phba: pointer to lpfc hba data structure. 13850 * 13851 * This routine checks to see if OAS is supported for this adapter. If 13852 * supported, the configure Flash Optimized Fabric flag is set. Otherwise, 13853 * the enable oas flag is cleared and the pool created for OAS device data 13854 * is destroyed. 13855 * 13856 **/ 13857 static void 13858 lpfc_sli4_oas_verify(struct lpfc_hba *phba) 13859 { 13860 13861 if (!phba->cfg_EnableXLane) 13862 return; 13863 13864 if (phba->sli4_hba.pc_sli4_params.oas_supported) { 13865 phba->cfg_fof = 1; 13866 } else { 13867 phba->cfg_fof = 0; 13868 mempool_destroy(phba->device_data_mem_pool); 13869 phba->device_data_mem_pool = NULL; 13870 } 13871 13872 return; 13873 } 13874 13875 /** 13876 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter 13877 * @phba: pointer to lpfc hba data structure. 13878 * 13879 * This routine checks to see if RAS is supported by the adapter. Check the 13880 * function through which RAS support enablement is to be done. 13881 **/ 13882 void 13883 lpfc_sli4_ras_init(struct lpfc_hba *phba) 13884 { 13885 switch (phba->pcidev->device) { 13886 case PCI_DEVICE_ID_LANCER_G6_FC: 13887 case PCI_DEVICE_ID_LANCER_G7_FC: 13888 phba->ras_fwlog.ras_hwsupport = true; 13889 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) && 13890 phba->cfg_ras_fwlog_buffsize) 13891 phba->ras_fwlog.ras_enabled = true; 13892 else 13893 phba->ras_fwlog.ras_enabled = false; 13894 break; 13895 default: 13896 phba->ras_fwlog.ras_hwsupport = false; 13897 } 13898 } 13899 13900 13901 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 13902 13903 static const struct pci_error_handlers lpfc_err_handler = { 13904 .error_detected = lpfc_io_error_detected, 13905 .slot_reset = lpfc_io_slot_reset, 13906 .resume = lpfc_io_resume, 13907 }; 13908 13909 static struct pci_driver lpfc_driver = { 13910 .name = LPFC_DRIVER_NAME, 13911 .id_table = lpfc_id_table, 13912 .probe = lpfc_pci_probe_one, 13913 .remove = lpfc_pci_remove_one, 13914 .shutdown = lpfc_pci_remove_one, 13915 .suspend = lpfc_pci_suspend_one, 13916 .resume = lpfc_pci_resume_one, 13917 .err_handler = &lpfc_err_handler, 13918 }; 13919 13920 static const struct file_operations lpfc_mgmt_fop = { 13921 .owner = THIS_MODULE, 13922 }; 13923 13924 static struct miscdevice lpfc_mgmt_dev = { 13925 .minor = MISC_DYNAMIC_MINOR, 13926 .name = "lpfcmgmt", 13927 .fops = &lpfc_mgmt_fop, 13928 }; 13929 13930 /** 13931 * lpfc_init - lpfc module initialization routine 13932 * 13933 * This routine is to be invoked when the lpfc module is loaded into the 13934 * kernel. The special kernel macro module_init() is used to indicate the 13935 * role of this routine to the kernel as lpfc module entry point. 13936 * 13937 * Return codes 13938 * 0 - successful 13939 * -ENOMEM - FC attach transport failed 13940 * all others - failed 13941 */ 13942 static int __init 13943 lpfc_init(void) 13944 { 13945 int error = 0; 13946 13947 printk(LPFC_MODULE_DESC "\n"); 13948 printk(LPFC_COPYRIGHT "\n"); 13949 13950 error = misc_register(&lpfc_mgmt_dev); 13951 if (error) 13952 printk(KERN_ERR "Could not register lpfcmgmt device, " 13953 "misc_register returned with status %d", error); 13954 13955 lpfc_transport_functions.vport_create = lpfc_vport_create; 13956 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 13957 lpfc_transport_template = 13958 fc_attach_transport(&lpfc_transport_functions); 13959 if (lpfc_transport_template == NULL) 13960 return -ENOMEM; 13961 lpfc_vport_transport_template = 13962 fc_attach_transport(&lpfc_vport_transport_functions); 13963 if (lpfc_vport_transport_template == NULL) { 13964 fc_release_transport(lpfc_transport_template); 13965 return -ENOMEM; 13966 } 13967 lpfc_nvme_cmd_template(); 13968 lpfc_nvmet_cmd_template(); 13969 13970 /* Initialize in case vector mapping is needed */ 13971 lpfc_present_cpu = num_present_cpus(); 13972 13973 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, 13974 "lpfc/sli4:online", 13975 lpfc_cpu_online, lpfc_cpu_offline); 13976 if (error < 0) 13977 goto cpuhp_failure; 13978 lpfc_cpuhp_state = error; 13979 13980 error = pci_register_driver(&lpfc_driver); 13981 if (error) 13982 goto unwind; 13983 13984 return error; 13985 13986 unwind: 13987 cpuhp_remove_multi_state(lpfc_cpuhp_state); 13988 cpuhp_failure: 13989 fc_release_transport(lpfc_transport_template); 13990 fc_release_transport(lpfc_vport_transport_template); 13991 13992 return error; 13993 } 13994 13995 /** 13996 * lpfc_exit - lpfc module removal routine 13997 * 13998 * This routine is invoked when the lpfc module is removed from the kernel. 13999 * The special kernel macro module_exit() is used to indicate the role of 14000 * this routine to the kernel as lpfc module exit point. 14001 */ 14002 static void __exit 14003 lpfc_exit(void) 14004 { 14005 misc_deregister(&lpfc_mgmt_dev); 14006 pci_unregister_driver(&lpfc_driver); 14007 cpuhp_remove_multi_state(lpfc_cpuhp_state); 14008 fc_release_transport(lpfc_transport_template); 14009 fc_release_transport(lpfc_vport_transport_template); 14010 idr_destroy(&lpfc_hba_index); 14011 } 14012 14013 module_init(lpfc_init); 14014 module_exit(lpfc_exit); 14015 MODULE_LICENSE("GPL"); 14016 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 14017 MODULE_AUTHOR("Broadcom"); 14018 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 14019