1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/delay.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/idr.h> 28 #include <linux/interrupt.h> 29 #include <linux/module.h> 30 #include <linux/kthread.h> 31 #include <linux/pci.h> 32 #include <linux/spinlock.h> 33 #include <linux/ctype.h> 34 #include <linux/aer.h> 35 #include <linux/slab.h> 36 #include <linux/firmware.h> 37 #include <linux/miscdevice.h> 38 #include <linux/percpu.h> 39 #include <linux/msi.h> 40 #include <linux/irq.h> 41 #include <linux/bitops.h> 42 43 #include <scsi/scsi.h> 44 #include <scsi/scsi_device.h> 45 #include <scsi/scsi_host.h> 46 #include <scsi/scsi_transport_fc.h> 47 #include <scsi/scsi_tcq.h> 48 #include <scsi/fc/fc_fs.h> 49 50 #include <linux/nvme-fc-driver.h> 51 52 #include "lpfc_hw4.h" 53 #include "lpfc_hw.h" 54 #include "lpfc_sli.h" 55 #include "lpfc_sli4.h" 56 #include "lpfc_nl.h" 57 #include "lpfc_disc.h" 58 #include "lpfc.h" 59 #include "lpfc_scsi.h" 60 #include "lpfc_nvme.h" 61 #include "lpfc_nvmet.h" 62 #include "lpfc_logmsg.h" 63 #include "lpfc_crtn.h" 64 #include "lpfc_vport.h" 65 #include "lpfc_version.h" 66 #include "lpfc_ids.h" 67 68 char *_dump_buf_data; 69 unsigned long _dump_buf_data_order; 70 char *_dump_buf_dif; 71 unsigned long _dump_buf_dif_order; 72 spinlock_t _dump_buf_lock; 73 74 /* Used when mapping IRQ vectors in a driver centric manner */ 75 static uint32_t lpfc_present_cpu; 76 77 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 78 static int lpfc_post_rcv_buf(struct lpfc_hba *); 79 static int lpfc_sli4_queue_verify(struct lpfc_hba *); 80 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 81 static int lpfc_setup_endian_order(struct lpfc_hba *); 82 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 83 static void lpfc_free_els_sgl_list(struct lpfc_hba *); 84 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *); 85 static void lpfc_init_sgl_list(struct lpfc_hba *); 86 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 87 static void lpfc_free_active_sgl(struct lpfc_hba *); 88 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 89 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 90 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 91 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 92 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 93 static void lpfc_sli4_disable_intr(struct lpfc_hba *); 94 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); 95 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); 96 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int); 97 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *); 98 99 static struct scsi_transport_template *lpfc_transport_template = NULL; 100 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 101 static DEFINE_IDR(lpfc_hba_index); 102 #define LPFC_NVMET_BUF_POST 254 103 104 /** 105 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 106 * @phba: pointer to lpfc hba data structure. 107 * 108 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 109 * mailbox command. It retrieves the revision information from the HBA and 110 * collects the Vital Product Data (VPD) about the HBA for preparing the 111 * configuration of the HBA. 112 * 113 * Return codes: 114 * 0 - success. 115 * -ERESTART - requests the SLI layer to reset the HBA and try again. 116 * Any other value - indicates an error. 117 **/ 118 int 119 lpfc_config_port_prep(struct lpfc_hba *phba) 120 { 121 lpfc_vpd_t *vp = &phba->vpd; 122 int i = 0, rc; 123 LPFC_MBOXQ_t *pmb; 124 MAILBOX_t *mb; 125 char *lpfc_vpd_data = NULL; 126 uint16_t offset = 0; 127 static char licensed[56] = 128 "key unlock for use with gnu public licensed code only\0"; 129 static int init_key = 1; 130 131 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 132 if (!pmb) { 133 phba->link_state = LPFC_HBA_ERROR; 134 return -ENOMEM; 135 } 136 137 mb = &pmb->u.mb; 138 phba->link_state = LPFC_INIT_MBX_CMDS; 139 140 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 141 if (init_key) { 142 uint32_t *ptext = (uint32_t *) licensed; 143 144 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 145 *ptext = cpu_to_be32(*ptext); 146 init_key = 0; 147 } 148 149 lpfc_read_nv(phba, pmb); 150 memset((char*)mb->un.varRDnvp.rsvd3, 0, 151 sizeof (mb->un.varRDnvp.rsvd3)); 152 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 153 sizeof (licensed)); 154 155 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 156 157 if (rc != MBX_SUCCESS) { 158 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 159 "0324 Config Port initialization " 160 "error, mbxCmd x%x READ_NVPARM, " 161 "mbxStatus x%x\n", 162 mb->mbxCommand, mb->mbxStatus); 163 mempool_free(pmb, phba->mbox_mem_pool); 164 return -ERESTART; 165 } 166 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 167 sizeof(phba->wwnn)); 168 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 169 sizeof(phba->wwpn)); 170 } 171 172 /* 173 * Clear all option bits except LPFC_SLI3_BG_ENABLED, 174 * which was already set in lpfc_get_cfgparam() 175 */ 176 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED; 177 178 /* Setup and issue mailbox READ REV command */ 179 lpfc_read_rev(phba, pmb); 180 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 181 if (rc != MBX_SUCCESS) { 182 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 183 "0439 Adapter failed to init, mbxCmd x%x " 184 "READ_REV, mbxStatus x%x\n", 185 mb->mbxCommand, mb->mbxStatus); 186 mempool_free( pmb, phba->mbox_mem_pool); 187 return -ERESTART; 188 } 189 190 191 /* 192 * The value of rr must be 1 since the driver set the cv field to 1. 193 * This setting requires the FW to set all revision fields. 194 */ 195 if (mb->un.varRdRev.rr == 0) { 196 vp->rev.rBit = 0; 197 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 198 "0440 Adapter failed to init, READ_REV has " 199 "missing revision information.\n"); 200 mempool_free(pmb, phba->mbox_mem_pool); 201 return -ERESTART; 202 } 203 204 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 205 mempool_free(pmb, phba->mbox_mem_pool); 206 return -EINVAL; 207 } 208 209 /* Save information as VPD data */ 210 vp->rev.rBit = 1; 211 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 212 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 213 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 214 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 215 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 216 vp->rev.biuRev = mb->un.varRdRev.biuRev; 217 vp->rev.smRev = mb->un.varRdRev.smRev; 218 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 219 vp->rev.endecRev = mb->un.varRdRev.endecRev; 220 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 221 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 222 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 223 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 224 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 225 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 226 227 /* If the sli feature level is less then 9, we must 228 * tear down all RPIs and VPIs on link down if NPIV 229 * is enabled. 230 */ 231 if (vp->rev.feaLevelHigh < 9) 232 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 233 234 if (lpfc_is_LC_HBA(phba->pcidev->device)) 235 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 236 sizeof (phba->RandomData)); 237 238 /* Get adapter VPD information */ 239 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 240 if (!lpfc_vpd_data) 241 goto out_free_mbox; 242 do { 243 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 244 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 245 246 if (rc != MBX_SUCCESS) { 247 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 248 "0441 VPD not present on adapter, " 249 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 250 mb->mbxCommand, mb->mbxStatus); 251 mb->un.varDmp.word_cnt = 0; 252 } 253 /* dump mem may return a zero when finished or we got a 254 * mailbox error, either way we are done. 255 */ 256 if (mb->un.varDmp.word_cnt == 0) 257 break; 258 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 259 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 260 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 261 lpfc_vpd_data + offset, 262 mb->un.varDmp.word_cnt); 263 offset += mb->un.varDmp.word_cnt; 264 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 265 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 266 267 kfree(lpfc_vpd_data); 268 out_free_mbox: 269 mempool_free(pmb, phba->mbox_mem_pool); 270 return 0; 271 } 272 273 /** 274 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 275 * @phba: pointer to lpfc hba data structure. 276 * @pmboxq: pointer to the driver internal queue element for mailbox command. 277 * 278 * This is the completion handler for driver's configuring asynchronous event 279 * mailbox command to the device. If the mailbox command returns successfully, 280 * it will set internal async event support flag to 1; otherwise, it will 281 * set internal async event support flag to 0. 282 **/ 283 static void 284 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 285 { 286 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 287 phba->temp_sensor_support = 1; 288 else 289 phba->temp_sensor_support = 0; 290 mempool_free(pmboxq, phba->mbox_mem_pool); 291 return; 292 } 293 294 /** 295 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 296 * @phba: pointer to lpfc hba data structure. 297 * @pmboxq: pointer to the driver internal queue element for mailbox command. 298 * 299 * This is the completion handler for dump mailbox command for getting 300 * wake up parameters. When this command complete, the response contain 301 * Option rom version of the HBA. This function translate the version number 302 * into a human readable string and store it in OptionROMVersion. 303 **/ 304 static void 305 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 306 { 307 struct prog_id *prg; 308 uint32_t prog_id_word; 309 char dist = ' '; 310 /* character array used for decoding dist type. */ 311 char dist_char[] = "nabx"; 312 313 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 314 mempool_free(pmboxq, phba->mbox_mem_pool); 315 return; 316 } 317 318 prg = (struct prog_id *) &prog_id_word; 319 320 /* word 7 contain option rom version */ 321 prog_id_word = pmboxq->u.mb.un.varWords[7]; 322 323 /* Decode the Option rom version word to a readable string */ 324 if (prg->dist < 4) 325 dist = dist_char[prg->dist]; 326 327 if ((prg->dist == 3) && (prg->num == 0)) 328 snprintf(phba->OptionROMVersion, 32, "%d.%d%d", 329 prg->ver, prg->rev, prg->lev); 330 else 331 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d", 332 prg->ver, prg->rev, prg->lev, 333 dist, prg->num); 334 mempool_free(pmboxq, phba->mbox_mem_pool); 335 return; 336 } 337 338 /** 339 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, 340 * cfg_soft_wwnn, cfg_soft_wwpn 341 * @vport: pointer to lpfc vport data structure. 342 * 343 * 344 * Return codes 345 * None. 346 **/ 347 void 348 lpfc_update_vport_wwn(struct lpfc_vport *vport) 349 { 350 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level; 351 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0]; 352 353 /* If the soft name exists then update it using the service params */ 354 if (vport->phba->cfg_soft_wwnn) 355 u64_to_wwn(vport->phba->cfg_soft_wwnn, 356 vport->fc_sparam.nodeName.u.wwn); 357 if (vport->phba->cfg_soft_wwpn) 358 u64_to_wwn(vport->phba->cfg_soft_wwpn, 359 vport->fc_sparam.portName.u.wwn); 360 361 /* 362 * If the name is empty or there exists a soft name 363 * then copy the service params name, otherwise use the fc name 364 */ 365 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) 366 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 367 sizeof(struct lpfc_name)); 368 else 369 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, 370 sizeof(struct lpfc_name)); 371 372 /* 373 * If the port name has changed, then set the Param changes flag 374 * to unreg the login 375 */ 376 if (vport->fc_portname.u.wwn[0] != 0 && 377 memcmp(&vport->fc_portname, &vport->fc_sparam.portName, 378 sizeof(struct lpfc_name))) 379 vport->vport_flag |= FAWWPN_PARAM_CHG; 380 381 if (vport->fc_portname.u.wwn[0] == 0 || 382 vport->phba->cfg_soft_wwpn || 383 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) || 384 vport->vport_flag & FAWWPN_SET) { 385 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 386 sizeof(struct lpfc_name)); 387 vport->vport_flag &= ~FAWWPN_SET; 388 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) 389 vport->vport_flag |= FAWWPN_SET; 390 } 391 else 392 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, 393 sizeof(struct lpfc_name)); 394 } 395 396 /** 397 * lpfc_config_port_post - Perform lpfc initialization after config port 398 * @phba: pointer to lpfc hba data structure. 399 * 400 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 401 * command call. It performs all internal resource and state setups on the 402 * port: post IOCB buffers, enable appropriate host interrupt attentions, 403 * ELS ring timers, etc. 404 * 405 * Return codes 406 * 0 - success. 407 * Any other value - error. 408 **/ 409 int 410 lpfc_config_port_post(struct lpfc_hba *phba) 411 { 412 struct lpfc_vport *vport = phba->pport; 413 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 414 LPFC_MBOXQ_t *pmb; 415 MAILBOX_t *mb; 416 struct lpfc_dmabuf *mp; 417 struct lpfc_sli *psli = &phba->sli; 418 uint32_t status, timeout; 419 int i, j; 420 int rc; 421 422 spin_lock_irq(&phba->hbalock); 423 /* 424 * If the Config port completed correctly the HBA is not 425 * over heated any more. 426 */ 427 if (phba->over_temp_state == HBA_OVER_TEMP) 428 phba->over_temp_state = HBA_NORMAL_TEMP; 429 spin_unlock_irq(&phba->hbalock); 430 431 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 432 if (!pmb) { 433 phba->link_state = LPFC_HBA_ERROR; 434 return -ENOMEM; 435 } 436 mb = &pmb->u.mb; 437 438 /* Get login parameters for NID. */ 439 rc = lpfc_read_sparam(phba, pmb, 0); 440 if (rc) { 441 mempool_free(pmb, phba->mbox_mem_pool); 442 return -ENOMEM; 443 } 444 445 pmb->vport = vport; 446 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 447 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 448 "0448 Adapter failed init, mbxCmd x%x " 449 "READ_SPARM mbxStatus x%x\n", 450 mb->mbxCommand, mb->mbxStatus); 451 phba->link_state = LPFC_HBA_ERROR; 452 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 453 mempool_free(pmb, phba->mbox_mem_pool); 454 lpfc_mbuf_free(phba, mp->virt, mp->phys); 455 kfree(mp); 456 return -EIO; 457 } 458 459 mp = (struct lpfc_dmabuf *)pmb->ctx_buf; 460 461 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 462 lpfc_mbuf_free(phba, mp->virt, mp->phys); 463 kfree(mp); 464 pmb->ctx_buf = NULL; 465 lpfc_update_vport_wwn(vport); 466 467 /* Update the fc_host data structures with new wwn. */ 468 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 469 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 470 fc_host_max_npiv_vports(shost) = phba->max_vpi; 471 472 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 473 /* This should be consolidated into parse_vpd ? - mr */ 474 if (phba->SerialNumber[0] == 0) { 475 uint8_t *outptr; 476 477 outptr = &vport->fc_nodename.u.s.IEEE[0]; 478 for (i = 0; i < 12; i++) { 479 status = *outptr++; 480 j = ((status & 0xf0) >> 4); 481 if (j <= 9) 482 phba->SerialNumber[i] = 483 (char)((uint8_t) 0x30 + (uint8_t) j); 484 else 485 phba->SerialNumber[i] = 486 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 487 i++; 488 j = (status & 0xf); 489 if (j <= 9) 490 phba->SerialNumber[i] = 491 (char)((uint8_t) 0x30 + (uint8_t) j); 492 else 493 phba->SerialNumber[i] = 494 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 495 } 496 } 497 498 lpfc_read_config(phba, pmb); 499 pmb->vport = vport; 500 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 501 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 502 "0453 Adapter failed to init, mbxCmd x%x " 503 "READ_CONFIG, mbxStatus x%x\n", 504 mb->mbxCommand, mb->mbxStatus); 505 phba->link_state = LPFC_HBA_ERROR; 506 mempool_free( pmb, phba->mbox_mem_pool); 507 return -EIO; 508 } 509 510 /* Check if the port is disabled */ 511 lpfc_sli_read_link_ste(phba); 512 513 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 514 i = (mb->un.varRdConfig.max_xri + 1); 515 if (phba->cfg_hba_queue_depth > i) { 516 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 517 "3359 HBA queue depth changed from %d to %d\n", 518 phba->cfg_hba_queue_depth, i); 519 phba->cfg_hba_queue_depth = i; 520 } 521 522 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ 523 i = (mb->un.varRdConfig.max_xri >> 3); 524 if (phba->pport->cfg_lun_queue_depth > i) { 525 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 526 "3360 LUN queue depth changed from %d to %d\n", 527 phba->pport->cfg_lun_queue_depth, i); 528 phba->pport->cfg_lun_queue_depth = i; 529 } 530 531 phba->lmt = mb->un.varRdConfig.lmt; 532 533 /* Get the default values for Model Name and Description */ 534 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 535 536 phba->link_state = LPFC_LINK_DOWN; 537 538 /* Only process IOCBs on ELS ring till hba_state is READY */ 539 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr) 540 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT; 541 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr) 542 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT; 543 544 /* Post receive buffers for desired rings */ 545 if (phba->sli_rev != 3) 546 lpfc_post_rcv_buf(phba); 547 548 /* 549 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 550 */ 551 if (phba->intr_type == MSIX) { 552 rc = lpfc_config_msi(phba, pmb); 553 if (rc) { 554 mempool_free(pmb, phba->mbox_mem_pool); 555 return -EIO; 556 } 557 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 558 if (rc != MBX_SUCCESS) { 559 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 560 "0352 Config MSI mailbox command " 561 "failed, mbxCmd x%x, mbxStatus x%x\n", 562 pmb->u.mb.mbxCommand, 563 pmb->u.mb.mbxStatus); 564 mempool_free(pmb, phba->mbox_mem_pool); 565 return -EIO; 566 } 567 } 568 569 spin_lock_irq(&phba->hbalock); 570 /* Initialize ERATT handling flag */ 571 phba->hba_flag &= ~HBA_ERATT_HANDLED; 572 573 /* Enable appropriate host interrupts */ 574 if (lpfc_readl(phba->HCregaddr, &status)) { 575 spin_unlock_irq(&phba->hbalock); 576 return -EIO; 577 } 578 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 579 if (psli->num_rings > 0) 580 status |= HC_R0INT_ENA; 581 if (psli->num_rings > 1) 582 status |= HC_R1INT_ENA; 583 if (psli->num_rings > 2) 584 status |= HC_R2INT_ENA; 585 if (psli->num_rings > 3) 586 status |= HC_R3INT_ENA; 587 588 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 589 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 590 status &= ~(HC_R0INT_ENA); 591 592 writel(status, phba->HCregaddr); 593 readl(phba->HCregaddr); /* flush */ 594 spin_unlock_irq(&phba->hbalock); 595 596 /* Set up ring-0 (ELS) timer */ 597 timeout = phba->fc_ratov * 2; 598 mod_timer(&vport->els_tmofunc, 599 jiffies + msecs_to_jiffies(1000 * timeout)); 600 /* Set up heart beat (HB) timer */ 601 mod_timer(&phba->hb_tmofunc, 602 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 603 phba->hb_outstanding = 0; 604 phba->last_completion_time = jiffies; 605 /* Set up error attention (ERATT) polling timer */ 606 mod_timer(&phba->eratt_poll, 607 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 608 609 if (phba->hba_flag & LINK_DISABLED) { 610 lpfc_printf_log(phba, 611 KERN_ERR, LOG_INIT, 612 "2598 Adapter Link is disabled.\n"); 613 lpfc_down_link(phba, pmb); 614 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 615 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 616 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 617 lpfc_printf_log(phba, 618 KERN_ERR, LOG_INIT, 619 "2599 Adapter failed to issue DOWN_LINK" 620 " mbox command rc 0x%x\n", rc); 621 622 mempool_free(pmb, phba->mbox_mem_pool); 623 return -EIO; 624 } 625 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 626 mempool_free(pmb, phba->mbox_mem_pool); 627 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 628 if (rc) 629 return rc; 630 } 631 /* MBOX buffer will be freed in mbox compl */ 632 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 633 if (!pmb) { 634 phba->link_state = LPFC_HBA_ERROR; 635 return -ENOMEM; 636 } 637 638 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 639 pmb->mbox_cmpl = lpfc_config_async_cmpl; 640 pmb->vport = phba->pport; 641 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 642 643 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 644 lpfc_printf_log(phba, 645 KERN_ERR, 646 LOG_INIT, 647 "0456 Adapter failed to issue " 648 "ASYNCEVT_ENABLE mbox status x%x\n", 649 rc); 650 mempool_free(pmb, phba->mbox_mem_pool); 651 } 652 653 /* Get Option rom version */ 654 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 655 if (!pmb) { 656 phba->link_state = LPFC_HBA_ERROR; 657 return -ENOMEM; 658 } 659 660 lpfc_dump_wakeup_param(phba, pmb); 661 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 662 pmb->vport = phba->pport; 663 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 664 665 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 666 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 667 "to get Option ROM version status x%x\n", rc); 668 mempool_free(pmb, phba->mbox_mem_pool); 669 } 670 671 return 0; 672 } 673 674 /** 675 * lpfc_hba_init_link - Initialize the FC link 676 * @phba: pointer to lpfc hba data structure. 677 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 678 * 679 * This routine will issue the INIT_LINK mailbox command call. 680 * It is available to other drivers through the lpfc_hba data 681 * structure for use as a delayed link up mechanism with the 682 * module parameter lpfc_suppress_link_up. 683 * 684 * Return code 685 * 0 - success 686 * Any other value - error 687 **/ 688 static int 689 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 690 { 691 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); 692 } 693 694 /** 695 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology 696 * @phba: pointer to lpfc hba data structure. 697 * @fc_topology: desired fc topology. 698 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 699 * 700 * This routine will issue the INIT_LINK mailbox command call. 701 * It is available to other drivers through the lpfc_hba data 702 * structure for use as a delayed link up mechanism with the 703 * module parameter lpfc_suppress_link_up. 704 * 705 * Return code 706 * 0 - success 707 * Any other value - error 708 **/ 709 int 710 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, 711 uint32_t flag) 712 { 713 struct lpfc_vport *vport = phba->pport; 714 LPFC_MBOXQ_t *pmb; 715 MAILBOX_t *mb; 716 int rc; 717 718 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 719 if (!pmb) { 720 phba->link_state = LPFC_HBA_ERROR; 721 return -ENOMEM; 722 } 723 mb = &pmb->u.mb; 724 pmb->vport = vport; 725 726 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || 727 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && 728 !(phba->lmt & LMT_1Gb)) || 729 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && 730 !(phba->lmt & LMT_2Gb)) || 731 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && 732 !(phba->lmt & LMT_4Gb)) || 733 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && 734 !(phba->lmt & LMT_8Gb)) || 735 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && 736 !(phba->lmt & LMT_10Gb)) || 737 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && 738 !(phba->lmt & LMT_16Gb)) || 739 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) && 740 !(phba->lmt & LMT_32Gb)) || 741 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) && 742 !(phba->lmt & LMT_64Gb))) { 743 /* Reset link speed to auto */ 744 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 745 "1302 Invalid speed for this board:%d " 746 "Reset link speed to auto.\n", 747 phba->cfg_link_speed); 748 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 749 } 750 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); 751 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 752 if (phba->sli_rev < LPFC_SLI_REV4) 753 lpfc_set_loopback_flag(phba); 754 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 755 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 756 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 757 "0498 Adapter failed to init, mbxCmd x%x " 758 "INIT_LINK, mbxStatus x%x\n", 759 mb->mbxCommand, mb->mbxStatus); 760 if (phba->sli_rev <= LPFC_SLI_REV3) { 761 /* Clear all interrupt enable conditions */ 762 writel(0, phba->HCregaddr); 763 readl(phba->HCregaddr); /* flush */ 764 /* Clear all pending interrupts */ 765 writel(0xffffffff, phba->HAregaddr); 766 readl(phba->HAregaddr); /* flush */ 767 } 768 phba->link_state = LPFC_HBA_ERROR; 769 if (rc != MBX_BUSY || flag == MBX_POLL) 770 mempool_free(pmb, phba->mbox_mem_pool); 771 return -EIO; 772 } 773 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 774 if (flag == MBX_POLL) 775 mempool_free(pmb, phba->mbox_mem_pool); 776 777 return 0; 778 } 779 780 /** 781 * lpfc_hba_down_link - this routine downs the FC link 782 * @phba: pointer to lpfc hba data structure. 783 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 784 * 785 * This routine will issue the DOWN_LINK mailbox command call. 786 * It is available to other drivers through the lpfc_hba data 787 * structure for use to stop the link. 788 * 789 * Return code 790 * 0 - success 791 * Any other value - error 792 **/ 793 static int 794 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 795 { 796 LPFC_MBOXQ_t *pmb; 797 int rc; 798 799 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 800 if (!pmb) { 801 phba->link_state = LPFC_HBA_ERROR; 802 return -ENOMEM; 803 } 804 805 lpfc_printf_log(phba, 806 KERN_ERR, LOG_INIT, 807 "0491 Adapter Link is disabled.\n"); 808 lpfc_down_link(phba, pmb); 809 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 810 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 811 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 812 lpfc_printf_log(phba, 813 KERN_ERR, LOG_INIT, 814 "2522 Adapter failed to issue DOWN_LINK" 815 " mbox command rc 0x%x\n", rc); 816 817 mempool_free(pmb, phba->mbox_mem_pool); 818 return -EIO; 819 } 820 if (flag == MBX_POLL) 821 mempool_free(pmb, phba->mbox_mem_pool); 822 823 return 0; 824 } 825 826 /** 827 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 828 * @phba: pointer to lpfc HBA data structure. 829 * 830 * This routine will do LPFC uninitialization before the HBA is reset when 831 * bringing down the SLI Layer. 832 * 833 * Return codes 834 * 0 - success. 835 * Any other value - error. 836 **/ 837 int 838 lpfc_hba_down_prep(struct lpfc_hba *phba) 839 { 840 struct lpfc_vport **vports; 841 int i; 842 843 if (phba->sli_rev <= LPFC_SLI_REV3) { 844 /* Disable interrupts */ 845 writel(0, phba->HCregaddr); 846 readl(phba->HCregaddr); /* flush */ 847 } 848 849 if (phba->pport->load_flag & FC_UNLOADING) 850 lpfc_cleanup_discovery_resources(phba->pport); 851 else { 852 vports = lpfc_create_vport_work_array(phba); 853 if (vports != NULL) 854 for (i = 0; i <= phba->max_vports && 855 vports[i] != NULL; i++) 856 lpfc_cleanup_discovery_resources(vports[i]); 857 lpfc_destroy_vport_work_array(phba, vports); 858 } 859 return 0; 860 } 861 862 /** 863 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free 864 * rspiocb which got deferred 865 * 866 * @phba: pointer to lpfc HBA data structure. 867 * 868 * This routine will cleanup completed slow path events after HBA is reset 869 * when bringing down the SLI Layer. 870 * 871 * 872 * Return codes 873 * void. 874 **/ 875 static void 876 lpfc_sli4_free_sp_events(struct lpfc_hba *phba) 877 { 878 struct lpfc_iocbq *rspiocbq; 879 struct hbq_dmabuf *dmabuf; 880 struct lpfc_cq_event *cq_event; 881 882 spin_lock_irq(&phba->hbalock); 883 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 884 spin_unlock_irq(&phba->hbalock); 885 886 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 887 /* Get the response iocb from the head of work queue */ 888 spin_lock_irq(&phba->hbalock); 889 list_remove_head(&phba->sli4_hba.sp_queue_event, 890 cq_event, struct lpfc_cq_event, list); 891 spin_unlock_irq(&phba->hbalock); 892 893 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 894 case CQE_CODE_COMPL_WQE: 895 rspiocbq = container_of(cq_event, struct lpfc_iocbq, 896 cq_event); 897 lpfc_sli_release_iocbq(phba, rspiocbq); 898 break; 899 case CQE_CODE_RECEIVE: 900 case CQE_CODE_RECEIVE_V1: 901 dmabuf = container_of(cq_event, struct hbq_dmabuf, 902 cq_event); 903 lpfc_in_buf_free(phba, &dmabuf->dbuf); 904 } 905 } 906 } 907 908 /** 909 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset 910 * @phba: pointer to lpfc HBA data structure. 911 * 912 * This routine will cleanup posted ELS buffers after the HBA is reset 913 * when bringing down the SLI Layer. 914 * 915 * 916 * Return codes 917 * void. 918 **/ 919 static void 920 lpfc_hba_free_post_buf(struct lpfc_hba *phba) 921 { 922 struct lpfc_sli *psli = &phba->sli; 923 struct lpfc_sli_ring *pring; 924 struct lpfc_dmabuf *mp, *next_mp; 925 LIST_HEAD(buflist); 926 int count; 927 928 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 929 lpfc_sli_hbqbuf_free_all(phba); 930 else { 931 /* Cleanup preposted buffers on the ELS ring */ 932 pring = &psli->sli3_ring[LPFC_ELS_RING]; 933 spin_lock_irq(&phba->hbalock); 934 list_splice_init(&pring->postbufq, &buflist); 935 spin_unlock_irq(&phba->hbalock); 936 937 count = 0; 938 list_for_each_entry_safe(mp, next_mp, &buflist, list) { 939 list_del(&mp->list); 940 count++; 941 lpfc_mbuf_free(phba, mp->virt, mp->phys); 942 kfree(mp); 943 } 944 945 spin_lock_irq(&phba->hbalock); 946 pring->postbufq_cnt -= count; 947 spin_unlock_irq(&phba->hbalock); 948 } 949 } 950 951 /** 952 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset 953 * @phba: pointer to lpfc HBA data structure. 954 * 955 * This routine will cleanup the txcmplq after the HBA is reset when bringing 956 * down the SLI Layer. 957 * 958 * Return codes 959 * void 960 **/ 961 static void 962 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) 963 { 964 struct lpfc_sli *psli = &phba->sli; 965 struct lpfc_queue *qp = NULL; 966 struct lpfc_sli_ring *pring; 967 LIST_HEAD(completions); 968 int i; 969 struct lpfc_iocbq *piocb, *next_iocb; 970 971 if (phba->sli_rev != LPFC_SLI_REV4) { 972 for (i = 0; i < psli->num_rings; i++) { 973 pring = &psli->sli3_ring[i]; 974 spin_lock_irq(&phba->hbalock); 975 /* At this point in time the HBA is either reset or DOA 976 * Nothing should be on txcmplq as it will 977 * NEVER complete. 978 */ 979 list_splice_init(&pring->txcmplq, &completions); 980 pring->txcmplq_cnt = 0; 981 spin_unlock_irq(&phba->hbalock); 982 983 lpfc_sli_abort_iocb_ring(phba, pring); 984 } 985 /* Cancel all the IOCBs from the completions list */ 986 lpfc_sli_cancel_iocbs(phba, &completions, 987 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 988 return; 989 } 990 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 991 pring = qp->pring; 992 if (!pring) 993 continue; 994 spin_lock_irq(&pring->ring_lock); 995 list_for_each_entry_safe(piocb, next_iocb, 996 &pring->txcmplq, list) 997 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 998 list_splice_init(&pring->txcmplq, &completions); 999 pring->txcmplq_cnt = 0; 1000 spin_unlock_irq(&pring->ring_lock); 1001 lpfc_sli_abort_iocb_ring(phba, pring); 1002 } 1003 /* Cancel all the IOCBs from the completions list */ 1004 lpfc_sli_cancel_iocbs(phba, &completions, 1005 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 1006 } 1007 1008 /** 1009 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 1010 int i; 1011 * @phba: pointer to lpfc HBA data structure. 1012 * 1013 * This routine will do uninitialization after the HBA is reset when bring 1014 * down the SLI Layer. 1015 * 1016 * Return codes 1017 * 0 - success. 1018 * Any other value - error. 1019 **/ 1020 static int 1021 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 1022 { 1023 lpfc_hba_free_post_buf(phba); 1024 lpfc_hba_clean_txcmplq(phba); 1025 return 0; 1026 } 1027 1028 /** 1029 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 1030 * @phba: pointer to lpfc HBA data structure. 1031 * 1032 * This routine will do uninitialization after the HBA is reset when bring 1033 * down the SLI Layer. 1034 * 1035 * Return codes 1036 * 0 - success. 1037 * Any other value - error. 1038 **/ 1039 static int 1040 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 1041 { 1042 struct lpfc_io_buf *psb, *psb_next; 1043 struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next; 1044 struct lpfc_sli4_hdw_queue *qp; 1045 LIST_HEAD(aborts); 1046 LIST_HEAD(nvme_aborts); 1047 LIST_HEAD(nvmet_aborts); 1048 struct lpfc_sglq *sglq_entry = NULL; 1049 int cnt, idx; 1050 1051 1052 lpfc_sli_hbqbuf_free_all(phba); 1053 lpfc_hba_clean_txcmplq(phba); 1054 1055 /* At this point in time the HBA is either reset or DOA. Either 1056 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 1057 * on the lpfc_els_sgl_list so that it can either be freed if the 1058 * driver is unloading or reposted if the driver is restarting 1059 * the port. 1060 */ 1061 spin_lock_irq(&phba->hbalock); /* required for lpfc_els_sgl_list and */ 1062 /* scsl_buf_list */ 1063 /* sgl_list_lock required because worker thread uses this 1064 * list. 1065 */ 1066 spin_lock(&phba->sli4_hba.sgl_list_lock); 1067 list_for_each_entry(sglq_entry, 1068 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 1069 sglq_entry->state = SGL_FREED; 1070 1071 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 1072 &phba->sli4_hba.lpfc_els_sgl_list); 1073 1074 1075 spin_unlock(&phba->sli4_hba.sgl_list_lock); 1076 1077 /* abts_xxxx_buf_list_lock required because worker thread uses this 1078 * list. 1079 */ 1080 cnt = 0; 1081 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 1082 qp = &phba->sli4_hba.hdwq[idx]; 1083 1084 spin_lock(&qp->abts_scsi_buf_list_lock); 1085 list_splice_init(&qp->lpfc_abts_scsi_buf_list, 1086 &aborts); 1087 1088 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 1089 psb->pCmd = NULL; 1090 psb->status = IOSTAT_SUCCESS; 1091 cnt++; 1092 } 1093 spin_lock(&qp->io_buf_list_put_lock); 1094 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put); 1095 qp->put_io_bufs += qp->abts_scsi_io_bufs; 1096 qp->abts_scsi_io_bufs = 0; 1097 spin_unlock(&qp->io_buf_list_put_lock); 1098 spin_unlock(&qp->abts_scsi_buf_list_lock); 1099 1100 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 1101 spin_lock(&qp->abts_nvme_buf_list_lock); 1102 list_splice_init(&qp->lpfc_abts_nvme_buf_list, 1103 &nvme_aborts); 1104 list_for_each_entry_safe(psb, psb_next, &nvme_aborts, 1105 list) { 1106 psb->pCmd = NULL; 1107 psb->status = IOSTAT_SUCCESS; 1108 cnt++; 1109 } 1110 spin_lock(&qp->io_buf_list_put_lock); 1111 qp->put_io_bufs += qp->abts_nvme_io_bufs; 1112 qp->abts_nvme_io_bufs = 0; 1113 list_splice_init(&nvme_aborts, 1114 &qp->lpfc_io_buf_list_put); 1115 spin_unlock(&qp->io_buf_list_put_lock); 1116 spin_unlock(&qp->abts_nvme_buf_list_lock); 1117 1118 } 1119 } 1120 spin_unlock_irq(&phba->hbalock); 1121 1122 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 1123 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1124 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 1125 &nvmet_aborts); 1126 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1127 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { 1128 ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP); 1129 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 1130 } 1131 } 1132 1133 lpfc_sli4_free_sp_events(phba); 1134 return cnt; 1135 } 1136 1137 /** 1138 * lpfc_hba_down_post - Wrapper func for hba down post routine 1139 * @phba: pointer to lpfc HBA data structure. 1140 * 1141 * This routine wraps the actual SLI3 or SLI4 routine for performing 1142 * uninitialization after the HBA is reset when bring down the SLI Layer. 1143 * 1144 * Return codes 1145 * 0 - success. 1146 * Any other value - error. 1147 **/ 1148 int 1149 lpfc_hba_down_post(struct lpfc_hba *phba) 1150 { 1151 return (*phba->lpfc_hba_down_post)(phba); 1152 } 1153 1154 /** 1155 * lpfc_hb_timeout - The HBA-timer timeout handler 1156 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 1157 * 1158 * This is the HBA-timer timeout handler registered to the lpfc driver. When 1159 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 1160 * work-port-events bitmap and the worker thread is notified. This timeout 1161 * event will be used by the worker thread to invoke the actual timeout 1162 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 1163 * be performed in the timeout handler and the HBA timeout event bit shall 1164 * be cleared by the worker thread after it has taken the event bitmap out. 1165 **/ 1166 static void 1167 lpfc_hb_timeout(struct timer_list *t) 1168 { 1169 struct lpfc_hba *phba; 1170 uint32_t tmo_posted; 1171 unsigned long iflag; 1172 1173 phba = from_timer(phba, t, hb_tmofunc); 1174 1175 /* Check for heart beat timeout conditions */ 1176 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1177 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 1178 if (!tmo_posted) 1179 phba->pport->work_port_events |= WORKER_HB_TMO; 1180 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1181 1182 /* Tell the worker thread there is work to do */ 1183 if (!tmo_posted) 1184 lpfc_worker_wake_up(phba); 1185 return; 1186 } 1187 1188 /** 1189 * lpfc_rrq_timeout - The RRQ-timer timeout handler 1190 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 1191 * 1192 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 1193 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 1194 * work-port-events bitmap and the worker thread is notified. This timeout 1195 * event will be used by the worker thread to invoke the actual timeout 1196 * handler routine, lpfc_rrq_handler. Any periodical operations will 1197 * be performed in the timeout handler and the RRQ timeout event bit shall 1198 * be cleared by the worker thread after it has taken the event bitmap out. 1199 **/ 1200 static void 1201 lpfc_rrq_timeout(struct timer_list *t) 1202 { 1203 struct lpfc_hba *phba; 1204 unsigned long iflag; 1205 1206 phba = from_timer(phba, t, rrq_tmr); 1207 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1208 if (!(phba->pport->load_flag & FC_UNLOADING)) 1209 phba->hba_flag |= HBA_RRQ_ACTIVE; 1210 else 1211 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1212 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1213 1214 if (!(phba->pport->load_flag & FC_UNLOADING)) 1215 lpfc_worker_wake_up(phba); 1216 } 1217 1218 /** 1219 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 1220 * @phba: pointer to lpfc hba data structure. 1221 * @pmboxq: pointer to the driver internal queue element for mailbox command. 1222 * 1223 * This is the callback function to the lpfc heart-beat mailbox command. 1224 * If configured, the lpfc driver issues the heart-beat mailbox command to 1225 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 1226 * heart-beat mailbox command is issued, the driver shall set up heart-beat 1227 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 1228 * heart-beat outstanding state. Once the mailbox command comes back and 1229 * no error conditions detected, the heart-beat mailbox command timer is 1230 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 1231 * state is cleared for the next heart-beat. If the timer expired with the 1232 * heart-beat outstanding state set, the driver will put the HBA offline. 1233 **/ 1234 static void 1235 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 1236 { 1237 unsigned long drvr_flag; 1238 1239 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1240 phba->hb_outstanding = 0; 1241 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1242 1243 /* Check and reset heart-beat timer is necessary */ 1244 mempool_free(pmboxq, phba->mbox_mem_pool); 1245 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 1246 !(phba->link_state == LPFC_HBA_ERROR) && 1247 !(phba->pport->load_flag & FC_UNLOADING)) 1248 mod_timer(&phba->hb_tmofunc, 1249 jiffies + 1250 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1251 return; 1252 } 1253 1254 static void 1255 lpfc_hb_eq_delay_work(struct work_struct *work) 1256 { 1257 struct lpfc_hba *phba = container_of(to_delayed_work(work), 1258 struct lpfc_hba, eq_delay_work); 1259 struct lpfc_eq_intr_info *eqi, *eqi_new; 1260 struct lpfc_queue *eq, *eq_next; 1261 unsigned char *eqcnt = NULL; 1262 uint32_t usdelay; 1263 int i; 1264 1265 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING) 1266 return; 1267 1268 if (phba->link_state == LPFC_HBA_ERROR || 1269 phba->pport->fc_flag & FC_OFFLINE_MODE) 1270 goto requeue; 1271 1272 eqcnt = kcalloc(num_possible_cpus(), sizeof(unsigned char), 1273 GFP_KERNEL); 1274 if (!eqcnt) 1275 goto requeue; 1276 1277 /* Loop thru all IRQ vectors */ 1278 for (i = 0; i < phba->cfg_irq_chann; i++) { 1279 /* Get the EQ corresponding to the IRQ vector */ 1280 eq = phba->sli4_hba.hba_eq_hdl[i].eq; 1281 if (eq && eqcnt[eq->last_cpu] < 2) 1282 eqcnt[eq->last_cpu]++; 1283 continue; 1284 } 1285 1286 for_each_present_cpu(i) { 1287 if (phba->cfg_irq_chann > 1 && eqcnt[i] < 2) 1288 continue; 1289 1290 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); 1291 1292 usdelay = (eqi->icnt / LPFC_IMAX_THRESHOLD) * 1293 LPFC_EQ_DELAY_STEP; 1294 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY) 1295 usdelay = LPFC_MAX_AUTO_EQ_DELAY; 1296 1297 eqi->icnt = 0; 1298 1299 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) { 1300 if (eq->last_cpu != i) { 1301 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info, 1302 eq->last_cpu); 1303 list_move_tail(&eq->cpu_list, &eqi_new->list); 1304 continue; 1305 } 1306 if (usdelay != eq->q_mode) 1307 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1, 1308 usdelay); 1309 } 1310 } 1311 1312 kfree(eqcnt); 1313 1314 requeue: 1315 queue_delayed_work(phba->wq, &phba->eq_delay_work, 1316 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); 1317 } 1318 1319 /** 1320 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution 1321 * @phba: pointer to lpfc hba data structure. 1322 * 1323 * For each heartbeat, this routine does some heuristic methods to adjust 1324 * XRI distribution. The goal is to fully utilize free XRIs. 1325 **/ 1326 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba) 1327 { 1328 u32 i; 1329 u32 hwq_count; 1330 1331 hwq_count = phba->cfg_hdw_queue; 1332 for (i = 0; i < hwq_count; i++) { 1333 /* Adjust XRIs in private pool */ 1334 lpfc_adjust_pvt_pool_count(phba, i); 1335 1336 /* Adjust high watermark */ 1337 lpfc_adjust_high_watermark(phba, i); 1338 1339 #ifdef LPFC_MXP_STAT 1340 /* Snapshot pbl, pvt and busy count */ 1341 lpfc_snapshot_mxp(phba, i); 1342 #endif 1343 } 1344 } 1345 1346 /** 1347 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 1348 * @phba: pointer to lpfc hba data structure. 1349 * 1350 * This is the actual HBA-timer timeout handler to be invoked by the worker 1351 * thread whenever the HBA timer fired and HBA-timeout event posted. This 1352 * handler performs any periodic operations needed for the device. If such 1353 * periodic event has already been attended to either in the interrupt handler 1354 * or by processing slow-ring or fast-ring events within the HBA-timer 1355 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1356 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1357 * is configured and there is no heart-beat mailbox command outstanding, a 1358 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1359 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1360 * to offline. 1361 **/ 1362 void 1363 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1364 { 1365 struct lpfc_vport **vports; 1366 LPFC_MBOXQ_t *pmboxq; 1367 struct lpfc_dmabuf *buf_ptr; 1368 int retval, i; 1369 struct lpfc_sli *psli = &phba->sli; 1370 LIST_HEAD(completions); 1371 1372 if (phba->cfg_xri_rebalancing) { 1373 /* Multi-XRI pools handler */ 1374 lpfc_hb_mxp_handler(phba); 1375 } 1376 1377 vports = lpfc_create_vport_work_array(phba); 1378 if (vports != NULL) 1379 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 1380 lpfc_rcv_seq_check_edtov(vports[i]); 1381 lpfc_fdmi_num_disc_check(vports[i]); 1382 } 1383 lpfc_destroy_vport_work_array(phba, vports); 1384 1385 if ((phba->link_state == LPFC_HBA_ERROR) || 1386 (phba->pport->load_flag & FC_UNLOADING) || 1387 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1388 return; 1389 1390 spin_lock_irq(&phba->pport->work_port_lock); 1391 1392 if (time_after(phba->last_completion_time + 1393 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), 1394 jiffies)) { 1395 spin_unlock_irq(&phba->pport->work_port_lock); 1396 if (!phba->hb_outstanding) 1397 mod_timer(&phba->hb_tmofunc, 1398 jiffies + 1399 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1400 else 1401 mod_timer(&phba->hb_tmofunc, 1402 jiffies + 1403 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1404 return; 1405 } 1406 spin_unlock_irq(&phba->pport->work_port_lock); 1407 1408 if (phba->elsbuf_cnt && 1409 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1410 spin_lock_irq(&phba->hbalock); 1411 list_splice_init(&phba->elsbuf, &completions); 1412 phba->elsbuf_cnt = 0; 1413 phba->elsbuf_prev_cnt = 0; 1414 spin_unlock_irq(&phba->hbalock); 1415 1416 while (!list_empty(&completions)) { 1417 list_remove_head(&completions, buf_ptr, 1418 struct lpfc_dmabuf, list); 1419 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1420 kfree(buf_ptr); 1421 } 1422 } 1423 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1424 1425 /* If there is no heart beat outstanding, issue a heartbeat command */ 1426 if (phba->cfg_enable_hba_heartbeat) { 1427 if (!phba->hb_outstanding) { 1428 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1429 (list_empty(&psli->mboxq))) { 1430 pmboxq = mempool_alloc(phba->mbox_mem_pool, 1431 GFP_KERNEL); 1432 if (!pmboxq) { 1433 mod_timer(&phba->hb_tmofunc, 1434 jiffies + 1435 msecs_to_jiffies(1000 * 1436 LPFC_HB_MBOX_INTERVAL)); 1437 return; 1438 } 1439 1440 lpfc_heart_beat(phba, pmboxq); 1441 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1442 pmboxq->vport = phba->pport; 1443 retval = lpfc_sli_issue_mbox(phba, pmboxq, 1444 MBX_NOWAIT); 1445 1446 if (retval != MBX_BUSY && 1447 retval != MBX_SUCCESS) { 1448 mempool_free(pmboxq, 1449 phba->mbox_mem_pool); 1450 mod_timer(&phba->hb_tmofunc, 1451 jiffies + 1452 msecs_to_jiffies(1000 * 1453 LPFC_HB_MBOX_INTERVAL)); 1454 return; 1455 } 1456 phba->skipped_hb = 0; 1457 phba->hb_outstanding = 1; 1458 } else if (time_before_eq(phba->last_completion_time, 1459 phba->skipped_hb)) { 1460 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1461 "2857 Last completion time not " 1462 " updated in %d ms\n", 1463 jiffies_to_msecs(jiffies 1464 - phba->last_completion_time)); 1465 } else 1466 phba->skipped_hb = jiffies; 1467 1468 mod_timer(&phba->hb_tmofunc, 1469 jiffies + 1470 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1471 return; 1472 } else { 1473 /* 1474 * If heart beat timeout called with hb_outstanding set 1475 * we need to give the hb mailbox cmd a chance to 1476 * complete or TMO. 1477 */ 1478 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1479 "0459 Adapter heartbeat still out" 1480 "standing:last compl time was %d ms.\n", 1481 jiffies_to_msecs(jiffies 1482 - phba->last_completion_time)); 1483 mod_timer(&phba->hb_tmofunc, 1484 jiffies + 1485 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1486 } 1487 } else { 1488 mod_timer(&phba->hb_tmofunc, 1489 jiffies + 1490 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1491 } 1492 } 1493 1494 /** 1495 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1496 * @phba: pointer to lpfc hba data structure. 1497 * 1498 * This routine is called to bring the HBA offline when HBA hardware error 1499 * other than Port Error 6 has been detected. 1500 **/ 1501 static void 1502 lpfc_offline_eratt(struct lpfc_hba *phba) 1503 { 1504 struct lpfc_sli *psli = &phba->sli; 1505 1506 spin_lock_irq(&phba->hbalock); 1507 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1508 spin_unlock_irq(&phba->hbalock); 1509 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1510 1511 lpfc_offline(phba); 1512 lpfc_reset_barrier(phba); 1513 spin_lock_irq(&phba->hbalock); 1514 lpfc_sli_brdreset(phba); 1515 spin_unlock_irq(&phba->hbalock); 1516 lpfc_hba_down_post(phba); 1517 lpfc_sli_brdready(phba, HS_MBRDY); 1518 lpfc_unblock_mgmt_io(phba); 1519 phba->link_state = LPFC_HBA_ERROR; 1520 return; 1521 } 1522 1523 /** 1524 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1525 * @phba: pointer to lpfc hba data structure. 1526 * 1527 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1528 * other than Port Error 6 has been detected. 1529 **/ 1530 void 1531 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1532 { 1533 spin_lock_irq(&phba->hbalock); 1534 phba->link_state = LPFC_HBA_ERROR; 1535 spin_unlock_irq(&phba->hbalock); 1536 1537 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1538 lpfc_offline(phba); 1539 lpfc_hba_down_post(phba); 1540 lpfc_unblock_mgmt_io(phba); 1541 } 1542 1543 /** 1544 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1545 * @phba: pointer to lpfc hba data structure. 1546 * 1547 * This routine is invoked to handle the deferred HBA hardware error 1548 * conditions. This type of error is indicated by HBA by setting ER1 1549 * and another ER bit in the host status register. The driver will 1550 * wait until the ER1 bit clears before handling the error condition. 1551 **/ 1552 static void 1553 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1554 { 1555 uint32_t old_host_status = phba->work_hs; 1556 struct lpfc_sli *psli = &phba->sli; 1557 1558 /* If the pci channel is offline, ignore possible errors, 1559 * since we cannot communicate with the pci card anyway. 1560 */ 1561 if (pci_channel_offline(phba->pcidev)) { 1562 spin_lock_irq(&phba->hbalock); 1563 phba->hba_flag &= ~DEFER_ERATT; 1564 spin_unlock_irq(&phba->hbalock); 1565 return; 1566 } 1567 1568 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1569 "0479 Deferred Adapter Hardware Error " 1570 "Data: x%x x%x x%x\n", 1571 phba->work_hs, 1572 phba->work_status[0], phba->work_status[1]); 1573 1574 spin_lock_irq(&phba->hbalock); 1575 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1576 spin_unlock_irq(&phba->hbalock); 1577 1578 1579 /* 1580 * Firmware stops when it triggred erratt. That could cause the I/Os 1581 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1582 * SCSI layer retry it after re-establishing link. 1583 */ 1584 lpfc_sli_abort_fcp_rings(phba); 1585 1586 /* 1587 * There was a firmware error. Take the hba offline and then 1588 * attempt to restart it. 1589 */ 1590 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 1591 lpfc_offline(phba); 1592 1593 /* Wait for the ER1 bit to clear.*/ 1594 while (phba->work_hs & HS_FFER1) { 1595 msleep(100); 1596 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { 1597 phba->work_hs = UNPLUG_ERR ; 1598 break; 1599 } 1600 /* If driver is unloading let the worker thread continue */ 1601 if (phba->pport->load_flag & FC_UNLOADING) { 1602 phba->work_hs = 0; 1603 break; 1604 } 1605 } 1606 1607 /* 1608 * This is to ptrotect against a race condition in which 1609 * first write to the host attention register clear the 1610 * host status register. 1611 */ 1612 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1613 phba->work_hs = old_host_status & ~HS_FFER1; 1614 1615 spin_lock_irq(&phba->hbalock); 1616 phba->hba_flag &= ~DEFER_ERATT; 1617 spin_unlock_irq(&phba->hbalock); 1618 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1619 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1620 } 1621 1622 static void 1623 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1624 { 1625 struct lpfc_board_event_header board_event; 1626 struct Scsi_Host *shost; 1627 1628 board_event.event_type = FC_REG_BOARD_EVENT; 1629 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1630 shost = lpfc_shost_from_vport(phba->pport); 1631 fc_host_post_vendor_event(shost, fc_get_event_number(), 1632 sizeof(board_event), 1633 (char *) &board_event, 1634 LPFC_NL_VENDOR_ID); 1635 } 1636 1637 /** 1638 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1639 * @phba: pointer to lpfc hba data structure. 1640 * 1641 * This routine is invoked to handle the following HBA hardware error 1642 * conditions: 1643 * 1 - HBA error attention interrupt 1644 * 2 - DMA ring index out of range 1645 * 3 - Mailbox command came back as unknown 1646 **/ 1647 static void 1648 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1649 { 1650 struct lpfc_vport *vport = phba->pport; 1651 struct lpfc_sli *psli = &phba->sli; 1652 uint32_t event_data; 1653 unsigned long temperature; 1654 struct temp_event temp_event_data; 1655 struct Scsi_Host *shost; 1656 1657 /* If the pci channel is offline, ignore possible errors, 1658 * since we cannot communicate with the pci card anyway. 1659 */ 1660 if (pci_channel_offline(phba->pcidev)) { 1661 spin_lock_irq(&phba->hbalock); 1662 phba->hba_flag &= ~DEFER_ERATT; 1663 spin_unlock_irq(&phba->hbalock); 1664 return; 1665 } 1666 1667 /* If resets are disabled then leave the HBA alone and return */ 1668 if (!phba->cfg_enable_hba_reset) 1669 return; 1670 1671 /* Send an internal error event to mgmt application */ 1672 lpfc_board_errevt_to_mgmt(phba); 1673 1674 if (phba->hba_flag & DEFER_ERATT) 1675 lpfc_handle_deferred_eratt(phba); 1676 1677 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1678 if (phba->work_hs & HS_FFER6) 1679 /* Re-establishing Link */ 1680 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1681 "1301 Re-establishing Link " 1682 "Data: x%x x%x x%x\n", 1683 phba->work_hs, phba->work_status[0], 1684 phba->work_status[1]); 1685 if (phba->work_hs & HS_FFER8) 1686 /* Device Zeroization */ 1687 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1688 "2861 Host Authentication device " 1689 "zeroization Data:x%x x%x x%x\n", 1690 phba->work_hs, phba->work_status[0], 1691 phba->work_status[1]); 1692 1693 spin_lock_irq(&phba->hbalock); 1694 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1695 spin_unlock_irq(&phba->hbalock); 1696 1697 /* 1698 * Firmware stops when it triggled erratt with HS_FFER6. 1699 * That could cause the I/Os dropped by the firmware. 1700 * Error iocb (I/O) on txcmplq and let the SCSI layer 1701 * retry it after re-establishing link. 1702 */ 1703 lpfc_sli_abort_fcp_rings(phba); 1704 1705 /* 1706 * There was a firmware error. Take the hba offline and then 1707 * attempt to restart it. 1708 */ 1709 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1710 lpfc_offline(phba); 1711 lpfc_sli_brdrestart(phba); 1712 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1713 lpfc_unblock_mgmt_io(phba); 1714 return; 1715 } 1716 lpfc_unblock_mgmt_io(phba); 1717 } else if (phba->work_hs & HS_CRIT_TEMP) { 1718 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1719 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1720 temp_event_data.event_code = LPFC_CRIT_TEMP; 1721 temp_event_data.data = (uint32_t)temperature; 1722 1723 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1724 "0406 Adapter maximum temperature exceeded " 1725 "(%ld), taking this port offline " 1726 "Data: x%x x%x x%x\n", 1727 temperature, phba->work_hs, 1728 phba->work_status[0], phba->work_status[1]); 1729 1730 shost = lpfc_shost_from_vport(phba->pport); 1731 fc_host_post_vendor_event(shost, fc_get_event_number(), 1732 sizeof(temp_event_data), 1733 (char *) &temp_event_data, 1734 SCSI_NL_VID_TYPE_PCI 1735 | PCI_VENDOR_ID_EMULEX); 1736 1737 spin_lock_irq(&phba->hbalock); 1738 phba->over_temp_state = HBA_OVER_TEMP; 1739 spin_unlock_irq(&phba->hbalock); 1740 lpfc_offline_eratt(phba); 1741 1742 } else { 1743 /* The if clause above forces this code path when the status 1744 * failure is a value other than FFER6. Do not call the offline 1745 * twice. This is the adapter hardware error path. 1746 */ 1747 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1748 "0457 Adapter Hardware Error " 1749 "Data: x%x x%x x%x\n", 1750 phba->work_hs, 1751 phba->work_status[0], phba->work_status[1]); 1752 1753 event_data = FC_REG_DUMP_EVENT; 1754 shost = lpfc_shost_from_vport(vport); 1755 fc_host_post_vendor_event(shost, fc_get_event_number(), 1756 sizeof(event_data), (char *) &event_data, 1757 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1758 1759 lpfc_offline_eratt(phba); 1760 } 1761 return; 1762 } 1763 1764 /** 1765 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg 1766 * @phba: pointer to lpfc hba data structure. 1767 * @mbx_action: flag for mailbox shutdown action. 1768 * 1769 * This routine is invoked to perform an SLI4 port PCI function reset in 1770 * response to port status register polling attention. It waits for port 1771 * status register (ERR, RDY, RN) bits before proceeding with function reset. 1772 * During this process, interrupt vectors are freed and later requested 1773 * for handling possible port resource change. 1774 **/ 1775 static int 1776 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, 1777 bool en_rn_msg) 1778 { 1779 int rc; 1780 uint32_t intr_mode; 1781 1782 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 1783 LPFC_SLI_INTF_IF_TYPE_2) { 1784 /* 1785 * On error status condition, driver need to wait for port 1786 * ready before performing reset. 1787 */ 1788 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1789 if (rc) 1790 return rc; 1791 } 1792 1793 /* need reset: attempt for port recovery */ 1794 if (en_rn_msg) 1795 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1796 "2887 Reset Needed: Attempting Port " 1797 "Recovery...\n"); 1798 lpfc_offline_prep(phba, mbx_action); 1799 lpfc_offline(phba); 1800 /* release interrupt for possible resource change */ 1801 lpfc_sli4_disable_intr(phba); 1802 rc = lpfc_sli_brdrestart(phba); 1803 if (rc) { 1804 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1805 "6309 Failed to restart board\n"); 1806 return rc; 1807 } 1808 /* request and enable interrupt */ 1809 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 1810 if (intr_mode == LPFC_INTR_ERROR) { 1811 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1812 "3175 Failed to enable interrupt\n"); 1813 return -EIO; 1814 } 1815 phba->intr_mode = intr_mode; 1816 rc = lpfc_online(phba); 1817 if (rc == 0) 1818 lpfc_unblock_mgmt_io(phba); 1819 1820 return rc; 1821 } 1822 1823 /** 1824 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1825 * @phba: pointer to lpfc hba data structure. 1826 * 1827 * This routine is invoked to handle the SLI4 HBA hardware error attention 1828 * conditions. 1829 **/ 1830 static void 1831 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1832 { 1833 struct lpfc_vport *vport = phba->pport; 1834 uint32_t event_data; 1835 struct Scsi_Host *shost; 1836 uint32_t if_type; 1837 struct lpfc_register portstat_reg = {0}; 1838 uint32_t reg_err1, reg_err2; 1839 uint32_t uerrlo_reg, uemasklo_reg; 1840 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2; 1841 bool en_rn_msg = true; 1842 struct temp_event temp_event_data; 1843 struct lpfc_register portsmphr_reg; 1844 int rc, i; 1845 1846 /* If the pci channel is offline, ignore possible errors, since 1847 * we cannot communicate with the pci card anyway. 1848 */ 1849 if (pci_channel_offline(phba->pcidev)) { 1850 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1851 "3166 pci channel is offline\n"); 1852 lpfc_sli4_offline_eratt(phba); 1853 return; 1854 } 1855 1856 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 1857 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1858 switch (if_type) { 1859 case LPFC_SLI_INTF_IF_TYPE_0: 1860 pci_rd_rc1 = lpfc_readl( 1861 phba->sli4_hba.u.if_type0.UERRLOregaddr, 1862 &uerrlo_reg); 1863 pci_rd_rc2 = lpfc_readl( 1864 phba->sli4_hba.u.if_type0.UEMASKLOregaddr, 1865 &uemasklo_reg); 1866 /* consider PCI bus read error as pci_channel_offline */ 1867 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) 1868 return; 1869 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { 1870 lpfc_sli4_offline_eratt(phba); 1871 return; 1872 } 1873 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1874 "7623 Checking UE recoverable"); 1875 1876 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { 1877 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1878 &portsmphr_reg.word0)) 1879 continue; 1880 1881 smphr_port_status = bf_get(lpfc_port_smphr_port_status, 1882 &portsmphr_reg); 1883 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1884 LPFC_PORT_SEM_UE_RECOVERABLE) 1885 break; 1886 /*Sleep for 1Sec, before checking SEMAPHORE */ 1887 msleep(1000); 1888 } 1889 1890 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1891 "4827 smphr_port_status x%x : Waited %dSec", 1892 smphr_port_status, i); 1893 1894 /* Recoverable UE, reset the HBA device */ 1895 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1896 LPFC_PORT_SEM_UE_RECOVERABLE) { 1897 for (i = 0; i < 20; i++) { 1898 msleep(1000); 1899 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1900 &portsmphr_reg.word0) && 1901 (LPFC_POST_STAGE_PORT_READY == 1902 bf_get(lpfc_port_smphr_port_status, 1903 &portsmphr_reg))) { 1904 rc = lpfc_sli4_port_sta_fn_reset(phba, 1905 LPFC_MBX_NO_WAIT, en_rn_msg); 1906 if (rc == 0) 1907 return; 1908 lpfc_printf_log(phba, 1909 KERN_ERR, LOG_INIT, 1910 "4215 Failed to recover UE"); 1911 break; 1912 } 1913 } 1914 } 1915 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1916 "7624 Firmware not ready: Failing UE recovery," 1917 " waited %dSec", i); 1918 lpfc_sli4_offline_eratt(phba); 1919 break; 1920 1921 case LPFC_SLI_INTF_IF_TYPE_2: 1922 case LPFC_SLI_INTF_IF_TYPE_6: 1923 pci_rd_rc1 = lpfc_readl( 1924 phba->sli4_hba.u.if_type2.STATUSregaddr, 1925 &portstat_reg.word0); 1926 /* consider PCI bus read error as pci_channel_offline */ 1927 if (pci_rd_rc1 == -EIO) { 1928 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1929 "3151 PCI bus read access failure: x%x\n", 1930 readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); 1931 lpfc_sli4_offline_eratt(phba); 1932 return; 1933 } 1934 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 1935 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 1936 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1937 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1938 "2889 Port Overtemperature event, " 1939 "taking port offline Data: x%x x%x\n", 1940 reg_err1, reg_err2); 1941 1942 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 1943 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1944 temp_event_data.event_code = LPFC_CRIT_TEMP; 1945 temp_event_data.data = 0xFFFFFFFF; 1946 1947 shost = lpfc_shost_from_vport(phba->pport); 1948 fc_host_post_vendor_event(shost, fc_get_event_number(), 1949 sizeof(temp_event_data), 1950 (char *)&temp_event_data, 1951 SCSI_NL_VID_TYPE_PCI 1952 | PCI_VENDOR_ID_EMULEX); 1953 1954 spin_lock_irq(&phba->hbalock); 1955 phba->over_temp_state = HBA_OVER_TEMP; 1956 spin_unlock_irq(&phba->hbalock); 1957 lpfc_sli4_offline_eratt(phba); 1958 return; 1959 } 1960 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1961 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { 1962 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1963 "3143 Port Down: Firmware Update " 1964 "Detected\n"); 1965 en_rn_msg = false; 1966 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1967 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1968 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1969 "3144 Port Down: Debug Dump\n"); 1970 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1971 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) 1972 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1973 "3145 Port Down: Provisioning\n"); 1974 1975 /* If resets are disabled then leave the HBA alone and return */ 1976 if (!phba->cfg_enable_hba_reset) 1977 return; 1978 1979 /* Check port status register for function reset */ 1980 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, 1981 en_rn_msg); 1982 if (rc == 0) { 1983 /* don't report event on forced debug dump */ 1984 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1985 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1986 return; 1987 else 1988 break; 1989 } 1990 /* fall through for not able to recover */ 1991 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1992 "3152 Unrecoverable error, bring the port " 1993 "offline\n"); 1994 lpfc_sli4_offline_eratt(phba); 1995 break; 1996 case LPFC_SLI_INTF_IF_TYPE_1: 1997 default: 1998 break; 1999 } 2000 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2001 "3123 Report dump event to upper layer\n"); 2002 /* Send an internal error event to mgmt application */ 2003 lpfc_board_errevt_to_mgmt(phba); 2004 2005 event_data = FC_REG_DUMP_EVENT; 2006 shost = lpfc_shost_from_vport(vport); 2007 fc_host_post_vendor_event(shost, fc_get_event_number(), 2008 sizeof(event_data), (char *) &event_data, 2009 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 2010 } 2011 2012 /** 2013 * lpfc_handle_eratt - Wrapper func for handling hba error attention 2014 * @phba: pointer to lpfc HBA data structure. 2015 * 2016 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 2017 * routine from the API jump table function pointer from the lpfc_hba struct. 2018 * 2019 * Return codes 2020 * 0 - success. 2021 * Any other value - error. 2022 **/ 2023 void 2024 lpfc_handle_eratt(struct lpfc_hba *phba) 2025 { 2026 (*phba->lpfc_handle_eratt)(phba); 2027 } 2028 2029 /** 2030 * lpfc_handle_latt - The HBA link event handler 2031 * @phba: pointer to lpfc hba data structure. 2032 * 2033 * This routine is invoked from the worker thread to handle a HBA host 2034 * attention link event. SLI3 only. 2035 **/ 2036 void 2037 lpfc_handle_latt(struct lpfc_hba *phba) 2038 { 2039 struct lpfc_vport *vport = phba->pport; 2040 struct lpfc_sli *psli = &phba->sli; 2041 LPFC_MBOXQ_t *pmb; 2042 volatile uint32_t control; 2043 struct lpfc_dmabuf *mp; 2044 int rc = 0; 2045 2046 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2047 if (!pmb) { 2048 rc = 1; 2049 goto lpfc_handle_latt_err_exit; 2050 } 2051 2052 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2053 if (!mp) { 2054 rc = 2; 2055 goto lpfc_handle_latt_free_pmb; 2056 } 2057 2058 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 2059 if (!mp->virt) { 2060 rc = 3; 2061 goto lpfc_handle_latt_free_mp; 2062 } 2063 2064 /* Cleanup any outstanding ELS commands */ 2065 lpfc_els_flush_all_cmd(phba); 2066 2067 psli->slistat.link_event++; 2068 lpfc_read_topology(phba, pmb, mp); 2069 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 2070 pmb->vport = vport; 2071 /* Block ELS IOCBs until we have processed this mbox command */ 2072 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 2073 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 2074 if (rc == MBX_NOT_FINISHED) { 2075 rc = 4; 2076 goto lpfc_handle_latt_free_mbuf; 2077 } 2078 2079 /* Clear Link Attention in HA REG */ 2080 spin_lock_irq(&phba->hbalock); 2081 writel(HA_LATT, phba->HAregaddr); 2082 readl(phba->HAregaddr); /* flush */ 2083 spin_unlock_irq(&phba->hbalock); 2084 2085 return; 2086 2087 lpfc_handle_latt_free_mbuf: 2088 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 2089 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2090 lpfc_handle_latt_free_mp: 2091 kfree(mp); 2092 lpfc_handle_latt_free_pmb: 2093 mempool_free(pmb, phba->mbox_mem_pool); 2094 lpfc_handle_latt_err_exit: 2095 /* Enable Link attention interrupts */ 2096 spin_lock_irq(&phba->hbalock); 2097 psli->sli_flag |= LPFC_PROCESS_LA; 2098 control = readl(phba->HCregaddr); 2099 control |= HC_LAINT_ENA; 2100 writel(control, phba->HCregaddr); 2101 readl(phba->HCregaddr); /* flush */ 2102 2103 /* Clear Link Attention in HA REG */ 2104 writel(HA_LATT, phba->HAregaddr); 2105 readl(phba->HAregaddr); /* flush */ 2106 spin_unlock_irq(&phba->hbalock); 2107 lpfc_linkdown(phba); 2108 phba->link_state = LPFC_HBA_ERROR; 2109 2110 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 2111 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 2112 2113 return; 2114 } 2115 2116 /** 2117 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 2118 * @phba: pointer to lpfc hba data structure. 2119 * @vpd: pointer to the vital product data. 2120 * @len: length of the vital product data in bytes. 2121 * 2122 * This routine parses the Vital Product Data (VPD). The VPD is treated as 2123 * an array of characters. In this routine, the ModelName, ProgramType, and 2124 * ModelDesc, etc. fields of the phba data structure will be populated. 2125 * 2126 * Return codes 2127 * 0 - pointer to the VPD passed in is NULL 2128 * 1 - success 2129 **/ 2130 int 2131 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 2132 { 2133 uint8_t lenlo, lenhi; 2134 int Length; 2135 int i, j; 2136 int finished = 0; 2137 int index = 0; 2138 2139 if (!vpd) 2140 return 0; 2141 2142 /* Vital Product */ 2143 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2144 "0455 Vital Product Data: x%x x%x x%x x%x\n", 2145 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 2146 (uint32_t) vpd[3]); 2147 while (!finished && (index < (len - 4))) { 2148 switch (vpd[index]) { 2149 case 0x82: 2150 case 0x91: 2151 index += 1; 2152 lenlo = vpd[index]; 2153 index += 1; 2154 lenhi = vpd[index]; 2155 index += 1; 2156 i = ((((unsigned short)lenhi) << 8) + lenlo); 2157 index += i; 2158 break; 2159 case 0x90: 2160 index += 1; 2161 lenlo = vpd[index]; 2162 index += 1; 2163 lenhi = vpd[index]; 2164 index += 1; 2165 Length = ((((unsigned short)lenhi) << 8) + lenlo); 2166 if (Length > len - index) 2167 Length = len - index; 2168 while (Length > 0) { 2169 /* Look for Serial Number */ 2170 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 2171 index += 2; 2172 i = vpd[index]; 2173 index += 1; 2174 j = 0; 2175 Length -= (3+i); 2176 while(i--) { 2177 phba->SerialNumber[j++] = vpd[index++]; 2178 if (j == 31) 2179 break; 2180 } 2181 phba->SerialNumber[j] = 0; 2182 continue; 2183 } 2184 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 2185 phba->vpd_flag |= VPD_MODEL_DESC; 2186 index += 2; 2187 i = vpd[index]; 2188 index += 1; 2189 j = 0; 2190 Length -= (3+i); 2191 while(i--) { 2192 phba->ModelDesc[j++] = vpd[index++]; 2193 if (j == 255) 2194 break; 2195 } 2196 phba->ModelDesc[j] = 0; 2197 continue; 2198 } 2199 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 2200 phba->vpd_flag |= VPD_MODEL_NAME; 2201 index += 2; 2202 i = vpd[index]; 2203 index += 1; 2204 j = 0; 2205 Length -= (3+i); 2206 while(i--) { 2207 phba->ModelName[j++] = vpd[index++]; 2208 if (j == 79) 2209 break; 2210 } 2211 phba->ModelName[j] = 0; 2212 continue; 2213 } 2214 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 2215 phba->vpd_flag |= VPD_PROGRAM_TYPE; 2216 index += 2; 2217 i = vpd[index]; 2218 index += 1; 2219 j = 0; 2220 Length -= (3+i); 2221 while(i--) { 2222 phba->ProgramType[j++] = vpd[index++]; 2223 if (j == 255) 2224 break; 2225 } 2226 phba->ProgramType[j] = 0; 2227 continue; 2228 } 2229 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 2230 phba->vpd_flag |= VPD_PORT; 2231 index += 2; 2232 i = vpd[index]; 2233 index += 1; 2234 j = 0; 2235 Length -= (3+i); 2236 while(i--) { 2237 if ((phba->sli_rev == LPFC_SLI_REV4) && 2238 (phba->sli4_hba.pport_name_sta == 2239 LPFC_SLI4_PPNAME_GET)) { 2240 j++; 2241 index++; 2242 } else 2243 phba->Port[j++] = vpd[index++]; 2244 if (j == 19) 2245 break; 2246 } 2247 if ((phba->sli_rev != LPFC_SLI_REV4) || 2248 (phba->sli4_hba.pport_name_sta == 2249 LPFC_SLI4_PPNAME_NON)) 2250 phba->Port[j] = 0; 2251 continue; 2252 } 2253 else { 2254 index += 2; 2255 i = vpd[index]; 2256 index += 1; 2257 index += i; 2258 Length -= (3 + i); 2259 } 2260 } 2261 finished = 0; 2262 break; 2263 case 0x78: 2264 finished = 1; 2265 break; 2266 default: 2267 index ++; 2268 break; 2269 } 2270 } 2271 2272 return(1); 2273 } 2274 2275 /** 2276 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 2277 * @phba: pointer to lpfc hba data structure. 2278 * @mdp: pointer to the data structure to hold the derived model name. 2279 * @descp: pointer to the data structure to hold the derived description. 2280 * 2281 * This routine retrieves HBA's description based on its registered PCI device 2282 * ID. The @descp passed into this function points to an array of 256 chars. It 2283 * shall be returned with the model name, maximum speed, and the host bus type. 2284 * The @mdp passed into this function points to an array of 80 chars. When the 2285 * function returns, the @mdp will be filled with the model name. 2286 **/ 2287 static void 2288 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 2289 { 2290 lpfc_vpd_t *vp; 2291 uint16_t dev_id = phba->pcidev->device; 2292 int max_speed; 2293 int GE = 0; 2294 int oneConnect = 0; /* default is not a oneConnect */ 2295 struct { 2296 char *name; 2297 char *bus; 2298 char *function; 2299 } m = {"<Unknown>", "", ""}; 2300 2301 if (mdp && mdp[0] != '\0' 2302 && descp && descp[0] != '\0') 2303 return; 2304 2305 if (phba->lmt & LMT_64Gb) 2306 max_speed = 64; 2307 else if (phba->lmt & LMT_32Gb) 2308 max_speed = 32; 2309 else if (phba->lmt & LMT_16Gb) 2310 max_speed = 16; 2311 else if (phba->lmt & LMT_10Gb) 2312 max_speed = 10; 2313 else if (phba->lmt & LMT_8Gb) 2314 max_speed = 8; 2315 else if (phba->lmt & LMT_4Gb) 2316 max_speed = 4; 2317 else if (phba->lmt & LMT_2Gb) 2318 max_speed = 2; 2319 else if (phba->lmt & LMT_1Gb) 2320 max_speed = 1; 2321 else 2322 max_speed = 0; 2323 2324 vp = &phba->vpd; 2325 2326 switch (dev_id) { 2327 case PCI_DEVICE_ID_FIREFLY: 2328 m = (typeof(m)){"LP6000", "PCI", 2329 "Obsolete, Unsupported Fibre Channel Adapter"}; 2330 break; 2331 case PCI_DEVICE_ID_SUPERFLY: 2332 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 2333 m = (typeof(m)){"LP7000", "PCI", ""}; 2334 else 2335 m = (typeof(m)){"LP7000E", "PCI", ""}; 2336 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2337 break; 2338 case PCI_DEVICE_ID_DRAGONFLY: 2339 m = (typeof(m)){"LP8000", "PCI", 2340 "Obsolete, Unsupported Fibre Channel Adapter"}; 2341 break; 2342 case PCI_DEVICE_ID_CENTAUR: 2343 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 2344 m = (typeof(m)){"LP9002", "PCI", ""}; 2345 else 2346 m = (typeof(m)){"LP9000", "PCI", ""}; 2347 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2348 break; 2349 case PCI_DEVICE_ID_RFLY: 2350 m = (typeof(m)){"LP952", "PCI", 2351 "Obsolete, Unsupported Fibre Channel Adapter"}; 2352 break; 2353 case PCI_DEVICE_ID_PEGASUS: 2354 m = (typeof(m)){"LP9802", "PCI-X", 2355 "Obsolete, Unsupported Fibre Channel Adapter"}; 2356 break; 2357 case PCI_DEVICE_ID_THOR: 2358 m = (typeof(m)){"LP10000", "PCI-X", 2359 "Obsolete, Unsupported Fibre Channel Adapter"}; 2360 break; 2361 case PCI_DEVICE_ID_VIPER: 2362 m = (typeof(m)){"LPX1000", "PCI-X", 2363 "Obsolete, Unsupported Fibre Channel Adapter"}; 2364 break; 2365 case PCI_DEVICE_ID_PFLY: 2366 m = (typeof(m)){"LP982", "PCI-X", 2367 "Obsolete, Unsupported Fibre Channel Adapter"}; 2368 break; 2369 case PCI_DEVICE_ID_TFLY: 2370 m = (typeof(m)){"LP1050", "PCI-X", 2371 "Obsolete, Unsupported Fibre Channel Adapter"}; 2372 break; 2373 case PCI_DEVICE_ID_HELIOS: 2374 m = (typeof(m)){"LP11000", "PCI-X2", 2375 "Obsolete, Unsupported Fibre Channel Adapter"}; 2376 break; 2377 case PCI_DEVICE_ID_HELIOS_SCSP: 2378 m = (typeof(m)){"LP11000-SP", "PCI-X2", 2379 "Obsolete, Unsupported Fibre Channel Adapter"}; 2380 break; 2381 case PCI_DEVICE_ID_HELIOS_DCSP: 2382 m = (typeof(m)){"LP11002-SP", "PCI-X2", 2383 "Obsolete, Unsupported Fibre Channel Adapter"}; 2384 break; 2385 case PCI_DEVICE_ID_NEPTUNE: 2386 m = (typeof(m)){"LPe1000", "PCIe", 2387 "Obsolete, Unsupported Fibre Channel Adapter"}; 2388 break; 2389 case PCI_DEVICE_ID_NEPTUNE_SCSP: 2390 m = (typeof(m)){"LPe1000-SP", "PCIe", 2391 "Obsolete, Unsupported Fibre Channel Adapter"}; 2392 break; 2393 case PCI_DEVICE_ID_NEPTUNE_DCSP: 2394 m = (typeof(m)){"LPe1002-SP", "PCIe", 2395 "Obsolete, Unsupported Fibre Channel Adapter"}; 2396 break; 2397 case PCI_DEVICE_ID_BMID: 2398 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 2399 break; 2400 case PCI_DEVICE_ID_BSMB: 2401 m = (typeof(m)){"LP111", "PCI-X2", 2402 "Obsolete, Unsupported Fibre Channel Adapter"}; 2403 break; 2404 case PCI_DEVICE_ID_ZEPHYR: 2405 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2406 break; 2407 case PCI_DEVICE_ID_ZEPHYR_SCSP: 2408 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2409 break; 2410 case PCI_DEVICE_ID_ZEPHYR_DCSP: 2411 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 2412 GE = 1; 2413 break; 2414 case PCI_DEVICE_ID_ZMID: 2415 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 2416 break; 2417 case PCI_DEVICE_ID_ZSMB: 2418 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 2419 break; 2420 case PCI_DEVICE_ID_LP101: 2421 m = (typeof(m)){"LP101", "PCI-X", 2422 "Obsolete, Unsupported Fibre Channel Adapter"}; 2423 break; 2424 case PCI_DEVICE_ID_LP10000S: 2425 m = (typeof(m)){"LP10000-S", "PCI", 2426 "Obsolete, Unsupported Fibre Channel Adapter"}; 2427 break; 2428 case PCI_DEVICE_ID_LP11000S: 2429 m = (typeof(m)){"LP11000-S", "PCI-X2", 2430 "Obsolete, Unsupported Fibre Channel Adapter"}; 2431 break; 2432 case PCI_DEVICE_ID_LPE11000S: 2433 m = (typeof(m)){"LPe11000-S", "PCIe", 2434 "Obsolete, Unsupported Fibre Channel Adapter"}; 2435 break; 2436 case PCI_DEVICE_ID_SAT: 2437 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 2438 break; 2439 case PCI_DEVICE_ID_SAT_MID: 2440 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 2441 break; 2442 case PCI_DEVICE_ID_SAT_SMB: 2443 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 2444 break; 2445 case PCI_DEVICE_ID_SAT_DCSP: 2446 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 2447 break; 2448 case PCI_DEVICE_ID_SAT_SCSP: 2449 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 2450 break; 2451 case PCI_DEVICE_ID_SAT_S: 2452 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 2453 break; 2454 case PCI_DEVICE_ID_HORNET: 2455 m = (typeof(m)){"LP21000", "PCIe", 2456 "Obsolete, Unsupported FCoE Adapter"}; 2457 GE = 1; 2458 break; 2459 case PCI_DEVICE_ID_PROTEUS_VF: 2460 m = (typeof(m)){"LPev12000", "PCIe IOV", 2461 "Obsolete, Unsupported Fibre Channel Adapter"}; 2462 break; 2463 case PCI_DEVICE_ID_PROTEUS_PF: 2464 m = (typeof(m)){"LPev12000", "PCIe IOV", 2465 "Obsolete, Unsupported Fibre Channel Adapter"}; 2466 break; 2467 case PCI_DEVICE_ID_PROTEUS_S: 2468 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 2469 "Obsolete, Unsupported Fibre Channel Adapter"}; 2470 break; 2471 case PCI_DEVICE_ID_TIGERSHARK: 2472 oneConnect = 1; 2473 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 2474 break; 2475 case PCI_DEVICE_ID_TOMCAT: 2476 oneConnect = 1; 2477 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 2478 break; 2479 case PCI_DEVICE_ID_FALCON: 2480 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 2481 "EmulexSecure Fibre"}; 2482 break; 2483 case PCI_DEVICE_ID_BALIUS: 2484 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 2485 "Obsolete, Unsupported Fibre Channel Adapter"}; 2486 break; 2487 case PCI_DEVICE_ID_LANCER_FC: 2488 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 2489 break; 2490 case PCI_DEVICE_ID_LANCER_FC_VF: 2491 m = (typeof(m)){"LPe16000", "PCIe", 2492 "Obsolete, Unsupported Fibre Channel Adapter"}; 2493 break; 2494 case PCI_DEVICE_ID_LANCER_FCOE: 2495 oneConnect = 1; 2496 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2497 break; 2498 case PCI_DEVICE_ID_LANCER_FCOE_VF: 2499 oneConnect = 1; 2500 m = (typeof(m)){"OCe15100", "PCIe", 2501 "Obsolete, Unsupported FCoE"}; 2502 break; 2503 case PCI_DEVICE_ID_LANCER_G6_FC: 2504 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"}; 2505 break; 2506 case PCI_DEVICE_ID_LANCER_G7_FC: 2507 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"}; 2508 break; 2509 case PCI_DEVICE_ID_SKYHAWK: 2510 case PCI_DEVICE_ID_SKYHAWK_VF: 2511 oneConnect = 1; 2512 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; 2513 break; 2514 default: 2515 m = (typeof(m)){"Unknown", "", ""}; 2516 break; 2517 } 2518 2519 if (mdp && mdp[0] == '\0') 2520 snprintf(mdp, 79,"%s", m.name); 2521 /* 2522 * oneConnect hba requires special processing, they are all initiators 2523 * and we put the port number on the end 2524 */ 2525 if (descp && descp[0] == '\0') { 2526 if (oneConnect) 2527 snprintf(descp, 255, 2528 "Emulex OneConnect %s, %s Initiator %s", 2529 m.name, m.function, 2530 phba->Port); 2531 else if (max_speed == 0) 2532 snprintf(descp, 255, 2533 "Emulex %s %s %s", 2534 m.name, m.bus, m.function); 2535 else 2536 snprintf(descp, 255, 2537 "Emulex %s %d%s %s %s", 2538 m.name, max_speed, (GE) ? "GE" : "Gb", 2539 m.bus, m.function); 2540 } 2541 } 2542 2543 /** 2544 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 2545 * @phba: pointer to lpfc hba data structure. 2546 * @pring: pointer to a IOCB ring. 2547 * @cnt: the number of IOCBs to be posted to the IOCB ring. 2548 * 2549 * This routine posts a given number of IOCBs with the associated DMA buffer 2550 * descriptors specified by the cnt argument to the given IOCB ring. 2551 * 2552 * Return codes 2553 * The number of IOCBs NOT able to be posted to the IOCB ring. 2554 **/ 2555 int 2556 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 2557 { 2558 IOCB_t *icmd; 2559 struct lpfc_iocbq *iocb; 2560 struct lpfc_dmabuf *mp1, *mp2; 2561 2562 cnt += pring->missbufcnt; 2563 2564 /* While there are buffers to post */ 2565 while (cnt > 0) { 2566 /* Allocate buffer for command iocb */ 2567 iocb = lpfc_sli_get_iocbq(phba); 2568 if (iocb == NULL) { 2569 pring->missbufcnt = cnt; 2570 return cnt; 2571 } 2572 icmd = &iocb->iocb; 2573 2574 /* 2 buffers can be posted per command */ 2575 /* Allocate buffer to post */ 2576 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2577 if (mp1) 2578 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 2579 if (!mp1 || !mp1->virt) { 2580 kfree(mp1); 2581 lpfc_sli_release_iocbq(phba, iocb); 2582 pring->missbufcnt = cnt; 2583 return cnt; 2584 } 2585 2586 INIT_LIST_HEAD(&mp1->list); 2587 /* Allocate buffer to post */ 2588 if (cnt > 1) { 2589 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2590 if (mp2) 2591 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 2592 &mp2->phys); 2593 if (!mp2 || !mp2->virt) { 2594 kfree(mp2); 2595 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2596 kfree(mp1); 2597 lpfc_sli_release_iocbq(phba, iocb); 2598 pring->missbufcnt = cnt; 2599 return cnt; 2600 } 2601 2602 INIT_LIST_HEAD(&mp2->list); 2603 } else { 2604 mp2 = NULL; 2605 } 2606 2607 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2608 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2609 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2610 icmd->ulpBdeCount = 1; 2611 cnt--; 2612 if (mp2) { 2613 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2614 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2615 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2616 cnt--; 2617 icmd->ulpBdeCount = 2; 2618 } 2619 2620 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2621 icmd->ulpLe = 1; 2622 2623 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2624 IOCB_ERROR) { 2625 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2626 kfree(mp1); 2627 cnt++; 2628 if (mp2) { 2629 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2630 kfree(mp2); 2631 cnt++; 2632 } 2633 lpfc_sli_release_iocbq(phba, iocb); 2634 pring->missbufcnt = cnt; 2635 return cnt; 2636 } 2637 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2638 if (mp2) 2639 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2640 } 2641 pring->missbufcnt = 0; 2642 return 0; 2643 } 2644 2645 /** 2646 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2647 * @phba: pointer to lpfc hba data structure. 2648 * 2649 * This routine posts initial receive IOCB buffers to the ELS ring. The 2650 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2651 * set to 64 IOCBs. SLI3 only. 2652 * 2653 * Return codes 2654 * 0 - success (currently always success) 2655 **/ 2656 static int 2657 lpfc_post_rcv_buf(struct lpfc_hba *phba) 2658 { 2659 struct lpfc_sli *psli = &phba->sli; 2660 2661 /* Ring 0, ELS / CT buffers */ 2662 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2663 /* Ring 2 - FCP no buffers needed */ 2664 2665 return 0; 2666 } 2667 2668 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2669 2670 /** 2671 * lpfc_sha_init - Set up initial array of hash table entries 2672 * @HashResultPointer: pointer to an array as hash table. 2673 * 2674 * This routine sets up the initial values to the array of hash table entries 2675 * for the LC HBAs. 2676 **/ 2677 static void 2678 lpfc_sha_init(uint32_t * HashResultPointer) 2679 { 2680 HashResultPointer[0] = 0x67452301; 2681 HashResultPointer[1] = 0xEFCDAB89; 2682 HashResultPointer[2] = 0x98BADCFE; 2683 HashResultPointer[3] = 0x10325476; 2684 HashResultPointer[4] = 0xC3D2E1F0; 2685 } 2686 2687 /** 2688 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2689 * @HashResultPointer: pointer to an initial/result hash table. 2690 * @HashWorkingPointer: pointer to an working hash table. 2691 * 2692 * This routine iterates an initial hash table pointed by @HashResultPointer 2693 * with the values from the working hash table pointeed by @HashWorkingPointer. 2694 * The results are putting back to the initial hash table, returned through 2695 * the @HashResultPointer as the result hash table. 2696 **/ 2697 static void 2698 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2699 { 2700 int t; 2701 uint32_t TEMP; 2702 uint32_t A, B, C, D, E; 2703 t = 16; 2704 do { 2705 HashWorkingPointer[t] = 2706 S(1, 2707 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2708 8] ^ 2709 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2710 } while (++t <= 79); 2711 t = 0; 2712 A = HashResultPointer[0]; 2713 B = HashResultPointer[1]; 2714 C = HashResultPointer[2]; 2715 D = HashResultPointer[3]; 2716 E = HashResultPointer[4]; 2717 2718 do { 2719 if (t < 20) { 2720 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2721 } else if (t < 40) { 2722 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2723 } else if (t < 60) { 2724 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2725 } else { 2726 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2727 } 2728 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2729 E = D; 2730 D = C; 2731 C = S(30, B); 2732 B = A; 2733 A = TEMP; 2734 } while (++t <= 79); 2735 2736 HashResultPointer[0] += A; 2737 HashResultPointer[1] += B; 2738 HashResultPointer[2] += C; 2739 HashResultPointer[3] += D; 2740 HashResultPointer[4] += E; 2741 2742 } 2743 2744 /** 2745 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2746 * @RandomChallenge: pointer to the entry of host challenge random number array. 2747 * @HashWorking: pointer to the entry of the working hash array. 2748 * 2749 * This routine calculates the working hash array referred by @HashWorking 2750 * from the challenge random numbers associated with the host, referred by 2751 * @RandomChallenge. The result is put into the entry of the working hash 2752 * array and returned by reference through @HashWorking. 2753 **/ 2754 static void 2755 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2756 { 2757 *HashWorking = (*RandomChallenge ^ *HashWorking); 2758 } 2759 2760 /** 2761 * lpfc_hba_init - Perform special handling for LC HBA initialization 2762 * @phba: pointer to lpfc hba data structure. 2763 * @hbainit: pointer to an array of unsigned 32-bit integers. 2764 * 2765 * This routine performs the special handling for LC HBA initialization. 2766 **/ 2767 void 2768 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2769 { 2770 int t; 2771 uint32_t *HashWorking; 2772 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2773 2774 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2775 if (!HashWorking) 2776 return; 2777 2778 HashWorking[0] = HashWorking[78] = *pwwnn++; 2779 HashWorking[1] = HashWorking[79] = *pwwnn; 2780 2781 for (t = 0; t < 7; t++) 2782 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2783 2784 lpfc_sha_init(hbainit); 2785 lpfc_sha_iterate(hbainit, HashWorking); 2786 kfree(HashWorking); 2787 } 2788 2789 /** 2790 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2791 * @vport: pointer to a virtual N_Port data structure. 2792 * 2793 * This routine performs the necessary cleanups before deleting the @vport. 2794 * It invokes the discovery state machine to perform necessary state 2795 * transitions and to release the ndlps associated with the @vport. Note, 2796 * the physical port is treated as @vport 0. 2797 **/ 2798 void 2799 lpfc_cleanup(struct lpfc_vport *vport) 2800 { 2801 struct lpfc_hba *phba = vport->phba; 2802 struct lpfc_nodelist *ndlp, *next_ndlp; 2803 int i = 0; 2804 2805 if (phba->link_state > LPFC_LINK_DOWN) 2806 lpfc_port_link_failure(vport); 2807 2808 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2809 if (!NLP_CHK_NODE_ACT(ndlp)) { 2810 ndlp = lpfc_enable_node(vport, ndlp, 2811 NLP_STE_UNUSED_NODE); 2812 if (!ndlp) 2813 continue; 2814 spin_lock_irq(&phba->ndlp_lock); 2815 NLP_SET_FREE_REQ(ndlp); 2816 spin_unlock_irq(&phba->ndlp_lock); 2817 /* Trigger the release of the ndlp memory */ 2818 lpfc_nlp_put(ndlp); 2819 continue; 2820 } 2821 spin_lock_irq(&phba->ndlp_lock); 2822 if (NLP_CHK_FREE_REQ(ndlp)) { 2823 /* The ndlp should not be in memory free mode already */ 2824 spin_unlock_irq(&phba->ndlp_lock); 2825 continue; 2826 } else 2827 /* Indicate request for freeing ndlp memory */ 2828 NLP_SET_FREE_REQ(ndlp); 2829 spin_unlock_irq(&phba->ndlp_lock); 2830 2831 if (vport->port_type != LPFC_PHYSICAL_PORT && 2832 ndlp->nlp_DID == Fabric_DID) { 2833 /* Just free up ndlp with Fabric_DID for vports */ 2834 lpfc_nlp_put(ndlp); 2835 continue; 2836 } 2837 2838 /* take care of nodes in unused state before the state 2839 * machine taking action. 2840 */ 2841 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 2842 lpfc_nlp_put(ndlp); 2843 continue; 2844 } 2845 2846 if (ndlp->nlp_type & NLP_FABRIC) 2847 lpfc_disc_state_machine(vport, ndlp, NULL, 2848 NLP_EVT_DEVICE_RECOVERY); 2849 2850 lpfc_disc_state_machine(vport, ndlp, NULL, 2851 NLP_EVT_DEVICE_RM); 2852 } 2853 2854 /* At this point, ALL ndlp's should be gone 2855 * because of the previous NLP_EVT_DEVICE_RM. 2856 * Lets wait for this to happen, if needed. 2857 */ 2858 while (!list_empty(&vport->fc_nodes)) { 2859 if (i++ > 3000) { 2860 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2861 "0233 Nodelist not empty\n"); 2862 list_for_each_entry_safe(ndlp, next_ndlp, 2863 &vport->fc_nodes, nlp_listp) { 2864 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2865 LOG_NODE, 2866 "0282 did:x%x ndlp:x%p " 2867 "usgmap:x%x refcnt:%d\n", 2868 ndlp->nlp_DID, (void *)ndlp, 2869 ndlp->nlp_usg_map, 2870 kref_read(&ndlp->kref)); 2871 } 2872 break; 2873 } 2874 2875 /* Wait for any activity on ndlps to settle */ 2876 msleep(10); 2877 } 2878 lpfc_cleanup_vports_rrqs(vport, NULL); 2879 } 2880 2881 /** 2882 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2883 * @vport: pointer to a virtual N_Port data structure. 2884 * 2885 * This routine stops all the timers associated with a @vport. This function 2886 * is invoked before disabling or deleting a @vport. Note that the physical 2887 * port is treated as @vport 0. 2888 **/ 2889 void 2890 lpfc_stop_vport_timers(struct lpfc_vport *vport) 2891 { 2892 del_timer_sync(&vport->els_tmofunc); 2893 del_timer_sync(&vport->delayed_disc_tmo); 2894 lpfc_can_disctmo(vport); 2895 return; 2896 } 2897 2898 /** 2899 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2900 * @phba: pointer to lpfc hba data structure. 2901 * 2902 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2903 * caller of this routine should already hold the host lock. 2904 **/ 2905 void 2906 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2907 { 2908 /* Clear pending FCF rediscovery wait flag */ 2909 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2910 2911 /* Now, try to stop the timer */ 2912 del_timer(&phba->fcf.redisc_wait); 2913 } 2914 2915 /** 2916 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2917 * @phba: pointer to lpfc hba data structure. 2918 * 2919 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2920 * checks whether the FCF rediscovery wait timer is pending with the host 2921 * lock held before proceeding with disabling the timer and clearing the 2922 * wait timer pendig flag. 2923 **/ 2924 void 2925 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2926 { 2927 spin_lock_irq(&phba->hbalock); 2928 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2929 /* FCF rediscovery timer already fired or stopped */ 2930 spin_unlock_irq(&phba->hbalock); 2931 return; 2932 } 2933 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2934 /* Clear failover in progress flags */ 2935 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 2936 spin_unlock_irq(&phba->hbalock); 2937 } 2938 2939 /** 2940 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2941 * @phba: pointer to lpfc hba data structure. 2942 * 2943 * This routine stops all the timers associated with a HBA. This function is 2944 * invoked before either putting a HBA offline or unloading the driver. 2945 **/ 2946 void 2947 lpfc_stop_hba_timers(struct lpfc_hba *phba) 2948 { 2949 if (phba->pport) 2950 lpfc_stop_vport_timers(phba->pport); 2951 cancel_delayed_work_sync(&phba->eq_delay_work); 2952 del_timer_sync(&phba->sli.mbox_tmo); 2953 del_timer_sync(&phba->fabric_block_timer); 2954 del_timer_sync(&phba->eratt_poll); 2955 del_timer_sync(&phba->hb_tmofunc); 2956 if (phba->sli_rev == LPFC_SLI_REV4) { 2957 del_timer_sync(&phba->rrq_tmr); 2958 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 2959 } 2960 phba->hb_outstanding = 0; 2961 2962 switch (phba->pci_dev_grp) { 2963 case LPFC_PCI_DEV_LP: 2964 /* Stop any LightPulse device specific driver timers */ 2965 del_timer_sync(&phba->fcp_poll_timer); 2966 break; 2967 case LPFC_PCI_DEV_OC: 2968 /* Stop any OneConnect device specific driver timers */ 2969 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2970 break; 2971 default: 2972 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2973 "0297 Invalid device group (x%x)\n", 2974 phba->pci_dev_grp); 2975 break; 2976 } 2977 return; 2978 } 2979 2980 /** 2981 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2982 * @phba: pointer to lpfc hba data structure. 2983 * 2984 * This routine marks a HBA's management interface as blocked. Once the HBA's 2985 * management interface is marked as blocked, all the user space access to 2986 * the HBA, whether they are from sysfs interface or libdfc interface will 2987 * all be blocked. The HBA is set to block the management interface when the 2988 * driver prepares the HBA interface for online or offline. 2989 **/ 2990 static void 2991 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) 2992 { 2993 unsigned long iflag; 2994 uint8_t actcmd = MBX_HEARTBEAT; 2995 unsigned long timeout; 2996 2997 spin_lock_irqsave(&phba->hbalock, iflag); 2998 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2999 spin_unlock_irqrestore(&phba->hbalock, iflag); 3000 if (mbx_action == LPFC_MBX_NO_WAIT) 3001 return; 3002 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 3003 spin_lock_irqsave(&phba->hbalock, iflag); 3004 if (phba->sli.mbox_active) { 3005 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 3006 /* Determine how long we might wait for the active mailbox 3007 * command to be gracefully completed by firmware. 3008 */ 3009 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 3010 phba->sli.mbox_active) * 1000) + jiffies; 3011 } 3012 spin_unlock_irqrestore(&phba->hbalock, iflag); 3013 3014 /* Wait for the outstnading mailbox command to complete */ 3015 while (phba->sli.mbox_active) { 3016 /* Check active mailbox complete status every 2ms */ 3017 msleep(2); 3018 if (time_after(jiffies, timeout)) { 3019 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3020 "2813 Mgmt IO is Blocked %x " 3021 "- mbox cmd %x still active\n", 3022 phba->sli.sli_flag, actcmd); 3023 break; 3024 } 3025 } 3026 } 3027 3028 /** 3029 * lpfc_sli4_node_prep - Assign RPIs for active nodes. 3030 * @phba: pointer to lpfc hba data structure. 3031 * 3032 * Allocate RPIs for all active remote nodes. This is needed whenever 3033 * an SLI4 adapter is reset and the driver is not unloading. Its purpose 3034 * is to fixup the temporary rpi assignments. 3035 **/ 3036 void 3037 lpfc_sli4_node_prep(struct lpfc_hba *phba) 3038 { 3039 struct lpfc_nodelist *ndlp, *next_ndlp; 3040 struct lpfc_vport **vports; 3041 int i, rpi; 3042 unsigned long flags; 3043 3044 if (phba->sli_rev != LPFC_SLI_REV4) 3045 return; 3046 3047 vports = lpfc_create_vport_work_array(phba); 3048 if (vports == NULL) 3049 return; 3050 3051 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3052 if (vports[i]->load_flag & FC_UNLOADING) 3053 continue; 3054 3055 list_for_each_entry_safe(ndlp, next_ndlp, 3056 &vports[i]->fc_nodes, 3057 nlp_listp) { 3058 if (!NLP_CHK_NODE_ACT(ndlp)) 3059 continue; 3060 rpi = lpfc_sli4_alloc_rpi(phba); 3061 if (rpi == LPFC_RPI_ALLOC_ERROR) { 3062 spin_lock_irqsave(&phba->ndlp_lock, flags); 3063 NLP_CLR_NODE_ACT(ndlp); 3064 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 3065 continue; 3066 } 3067 ndlp->nlp_rpi = rpi; 3068 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 3069 "0009 rpi:%x DID:%x " 3070 "flg:%x map:%x %p\n", ndlp->nlp_rpi, 3071 ndlp->nlp_DID, ndlp->nlp_flag, 3072 ndlp->nlp_usg_map, ndlp); 3073 } 3074 } 3075 lpfc_destroy_vport_work_array(phba, vports); 3076 } 3077 3078 /** 3079 * lpfc_create_expedite_pool - create expedite pool 3080 * @phba: pointer to lpfc hba data structure. 3081 * 3082 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0 3083 * to expedite pool. Mark them as expedite. 3084 **/ 3085 static void lpfc_create_expedite_pool(struct lpfc_hba *phba) 3086 { 3087 struct lpfc_sli4_hdw_queue *qp; 3088 struct lpfc_io_buf *lpfc_ncmd; 3089 struct lpfc_io_buf *lpfc_ncmd_next; 3090 struct lpfc_epd_pool *epd_pool; 3091 unsigned long iflag; 3092 3093 epd_pool = &phba->epd_pool; 3094 qp = &phba->sli4_hba.hdwq[0]; 3095 3096 spin_lock_init(&epd_pool->lock); 3097 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3098 spin_lock(&epd_pool->lock); 3099 INIT_LIST_HEAD(&epd_pool->list); 3100 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3101 &qp->lpfc_io_buf_list_put, list) { 3102 list_move_tail(&lpfc_ncmd->list, &epd_pool->list); 3103 lpfc_ncmd->expedite = true; 3104 qp->put_io_bufs--; 3105 epd_pool->count++; 3106 if (epd_pool->count >= XRI_BATCH) 3107 break; 3108 } 3109 spin_unlock(&epd_pool->lock); 3110 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3111 } 3112 3113 /** 3114 * lpfc_destroy_expedite_pool - destroy expedite pool 3115 * @phba: pointer to lpfc hba data structure. 3116 * 3117 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put 3118 * of HWQ 0. Clear the mark. 3119 **/ 3120 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba) 3121 { 3122 struct lpfc_sli4_hdw_queue *qp; 3123 struct lpfc_io_buf *lpfc_ncmd; 3124 struct lpfc_io_buf *lpfc_ncmd_next; 3125 struct lpfc_epd_pool *epd_pool; 3126 unsigned long iflag; 3127 3128 epd_pool = &phba->epd_pool; 3129 qp = &phba->sli4_hba.hdwq[0]; 3130 3131 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3132 spin_lock(&epd_pool->lock); 3133 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3134 &epd_pool->list, list) { 3135 list_move_tail(&lpfc_ncmd->list, 3136 &qp->lpfc_io_buf_list_put); 3137 lpfc_ncmd->flags = false; 3138 qp->put_io_bufs++; 3139 epd_pool->count--; 3140 } 3141 spin_unlock(&epd_pool->lock); 3142 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3143 } 3144 3145 /** 3146 * lpfc_create_multixri_pools - create multi-XRI pools 3147 * @phba: pointer to lpfc hba data structure. 3148 * 3149 * This routine initialize public, private per HWQ. Then, move XRIs from 3150 * lpfc_io_buf_list_put to public pool. High and low watermark are also 3151 * Initialized. 3152 **/ 3153 void lpfc_create_multixri_pools(struct lpfc_hba *phba) 3154 { 3155 u32 i, j; 3156 u32 hwq_count; 3157 u32 count_per_hwq; 3158 struct lpfc_io_buf *lpfc_ncmd; 3159 struct lpfc_io_buf *lpfc_ncmd_next; 3160 unsigned long iflag; 3161 struct lpfc_sli4_hdw_queue *qp; 3162 struct lpfc_multixri_pool *multixri_pool; 3163 struct lpfc_pbl_pool *pbl_pool; 3164 struct lpfc_pvt_pool *pvt_pool; 3165 3166 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3167 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n", 3168 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu, 3169 phba->sli4_hba.io_xri_cnt); 3170 3171 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3172 lpfc_create_expedite_pool(phba); 3173 3174 hwq_count = phba->cfg_hdw_queue; 3175 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count; 3176 3177 for (i = 0; i < hwq_count; i++) { 3178 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL); 3179 3180 if (!multixri_pool) { 3181 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3182 "1238 Failed to allocate memory for " 3183 "multixri_pool\n"); 3184 3185 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3186 lpfc_destroy_expedite_pool(phba); 3187 3188 j = 0; 3189 while (j < i) { 3190 qp = &phba->sli4_hba.hdwq[j]; 3191 kfree(qp->p_multixri_pool); 3192 j++; 3193 } 3194 phba->cfg_xri_rebalancing = 0; 3195 return; 3196 } 3197 3198 qp = &phba->sli4_hba.hdwq[i]; 3199 qp->p_multixri_pool = multixri_pool; 3200 3201 multixri_pool->xri_limit = count_per_hwq; 3202 multixri_pool->rrb_next_hwqid = i; 3203 3204 /* Deal with public free xri pool */ 3205 pbl_pool = &multixri_pool->pbl_pool; 3206 spin_lock_init(&pbl_pool->lock); 3207 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3208 spin_lock(&pbl_pool->lock); 3209 INIT_LIST_HEAD(&pbl_pool->list); 3210 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3211 &qp->lpfc_io_buf_list_put, list) { 3212 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list); 3213 qp->put_io_bufs--; 3214 pbl_pool->count++; 3215 } 3216 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3217 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n", 3218 pbl_pool->count, i); 3219 spin_unlock(&pbl_pool->lock); 3220 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3221 3222 /* Deal with private free xri pool */ 3223 pvt_pool = &multixri_pool->pvt_pool; 3224 pvt_pool->high_watermark = multixri_pool->xri_limit / 2; 3225 pvt_pool->low_watermark = XRI_BATCH; 3226 spin_lock_init(&pvt_pool->lock); 3227 spin_lock_irqsave(&pvt_pool->lock, iflag); 3228 INIT_LIST_HEAD(&pvt_pool->list); 3229 pvt_pool->count = 0; 3230 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 3231 } 3232 } 3233 3234 /** 3235 * lpfc_destroy_multixri_pools - destroy multi-XRI pools 3236 * @phba: pointer to lpfc hba data structure. 3237 * 3238 * This routine returns XRIs from public/private to lpfc_io_buf_list_put. 3239 **/ 3240 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba) 3241 { 3242 u32 i; 3243 u32 hwq_count; 3244 struct lpfc_io_buf *lpfc_ncmd; 3245 struct lpfc_io_buf *lpfc_ncmd_next; 3246 unsigned long iflag; 3247 struct lpfc_sli4_hdw_queue *qp; 3248 struct lpfc_multixri_pool *multixri_pool; 3249 struct lpfc_pbl_pool *pbl_pool; 3250 struct lpfc_pvt_pool *pvt_pool; 3251 3252 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3253 lpfc_destroy_expedite_pool(phba); 3254 3255 if (!(phba->pport->load_flag & FC_UNLOADING)) { 3256 lpfc_sli_flush_fcp_rings(phba); 3257 3258 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3259 lpfc_sli_flush_nvme_rings(phba); 3260 } 3261 3262 hwq_count = phba->cfg_hdw_queue; 3263 3264 for (i = 0; i < hwq_count; i++) { 3265 qp = &phba->sli4_hba.hdwq[i]; 3266 multixri_pool = qp->p_multixri_pool; 3267 if (!multixri_pool) 3268 continue; 3269 3270 qp->p_multixri_pool = NULL; 3271 3272 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3273 3274 /* Deal with public free xri pool */ 3275 pbl_pool = &multixri_pool->pbl_pool; 3276 spin_lock(&pbl_pool->lock); 3277 3278 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3279 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n", 3280 pbl_pool->count, i); 3281 3282 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3283 &pbl_pool->list, list) { 3284 list_move_tail(&lpfc_ncmd->list, 3285 &qp->lpfc_io_buf_list_put); 3286 qp->put_io_bufs++; 3287 pbl_pool->count--; 3288 } 3289 3290 INIT_LIST_HEAD(&pbl_pool->list); 3291 pbl_pool->count = 0; 3292 3293 spin_unlock(&pbl_pool->lock); 3294 3295 /* Deal with private free xri pool */ 3296 pvt_pool = &multixri_pool->pvt_pool; 3297 spin_lock(&pvt_pool->lock); 3298 3299 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3300 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n", 3301 pvt_pool->count, i); 3302 3303 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3304 &pvt_pool->list, list) { 3305 list_move_tail(&lpfc_ncmd->list, 3306 &qp->lpfc_io_buf_list_put); 3307 qp->put_io_bufs++; 3308 pvt_pool->count--; 3309 } 3310 3311 INIT_LIST_HEAD(&pvt_pool->list); 3312 pvt_pool->count = 0; 3313 3314 spin_unlock(&pvt_pool->lock); 3315 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3316 3317 kfree(multixri_pool); 3318 } 3319 } 3320 3321 /** 3322 * lpfc_online - Initialize and bring a HBA online 3323 * @phba: pointer to lpfc hba data structure. 3324 * 3325 * This routine initializes the HBA and brings a HBA online. During this 3326 * process, the management interface is blocked to prevent user space access 3327 * to the HBA interfering with the driver initialization. 3328 * 3329 * Return codes 3330 * 0 - successful 3331 * 1 - failed 3332 **/ 3333 int 3334 lpfc_online(struct lpfc_hba *phba) 3335 { 3336 struct lpfc_vport *vport; 3337 struct lpfc_vport **vports; 3338 int i, error = 0; 3339 bool vpis_cleared = false; 3340 3341 if (!phba) 3342 return 0; 3343 vport = phba->pport; 3344 3345 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 3346 return 0; 3347 3348 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3349 "0458 Bring Adapter online\n"); 3350 3351 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 3352 3353 if (phba->sli_rev == LPFC_SLI_REV4) { 3354 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 3355 lpfc_unblock_mgmt_io(phba); 3356 return 1; 3357 } 3358 spin_lock_irq(&phba->hbalock); 3359 if (!phba->sli4_hba.max_cfg_param.vpi_used) 3360 vpis_cleared = true; 3361 spin_unlock_irq(&phba->hbalock); 3362 3363 /* Reestablish the local initiator port. 3364 * The offline process destroyed the previous lport. 3365 */ 3366 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && 3367 !phba->nvmet_support) { 3368 error = lpfc_nvme_create_localport(phba->pport); 3369 if (error) 3370 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3371 "6132 NVME restore reg failed " 3372 "on nvmei error x%x\n", error); 3373 } 3374 } else { 3375 lpfc_sli_queue_init(phba); 3376 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 3377 lpfc_unblock_mgmt_io(phba); 3378 return 1; 3379 } 3380 } 3381 3382 vports = lpfc_create_vport_work_array(phba); 3383 if (vports != NULL) { 3384 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3385 struct Scsi_Host *shost; 3386 shost = lpfc_shost_from_vport(vports[i]); 3387 spin_lock_irq(shost->host_lock); 3388 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 3389 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3390 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3391 if (phba->sli_rev == LPFC_SLI_REV4) { 3392 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 3393 if ((vpis_cleared) && 3394 (vports[i]->port_type != 3395 LPFC_PHYSICAL_PORT)) 3396 vports[i]->vpi = 0; 3397 } 3398 spin_unlock_irq(shost->host_lock); 3399 } 3400 } 3401 lpfc_destroy_vport_work_array(phba, vports); 3402 3403 if (phba->cfg_xri_rebalancing) 3404 lpfc_create_multixri_pools(phba); 3405 3406 lpfc_unblock_mgmt_io(phba); 3407 return 0; 3408 } 3409 3410 /** 3411 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 3412 * @phba: pointer to lpfc hba data structure. 3413 * 3414 * This routine marks a HBA's management interface as not blocked. Once the 3415 * HBA's management interface is marked as not blocked, all the user space 3416 * access to the HBA, whether they are from sysfs interface or libdfc 3417 * interface will be allowed. The HBA is set to block the management interface 3418 * when the driver prepares the HBA interface for online or offline and then 3419 * set to unblock the management interface afterwards. 3420 **/ 3421 void 3422 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 3423 { 3424 unsigned long iflag; 3425 3426 spin_lock_irqsave(&phba->hbalock, iflag); 3427 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 3428 spin_unlock_irqrestore(&phba->hbalock, iflag); 3429 } 3430 3431 /** 3432 * lpfc_offline_prep - Prepare a HBA to be brought offline 3433 * @phba: pointer to lpfc hba data structure. 3434 * 3435 * This routine is invoked to prepare a HBA to be brought offline. It performs 3436 * unregistration login to all the nodes on all vports and flushes the mailbox 3437 * queue to make it ready to be brought offline. 3438 **/ 3439 void 3440 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) 3441 { 3442 struct lpfc_vport *vport = phba->pport; 3443 struct lpfc_nodelist *ndlp, *next_ndlp; 3444 struct lpfc_vport **vports; 3445 struct Scsi_Host *shost; 3446 int i; 3447 3448 if (vport->fc_flag & FC_OFFLINE_MODE) 3449 return; 3450 3451 lpfc_block_mgmt_io(phba, mbx_action); 3452 3453 lpfc_linkdown(phba); 3454 3455 /* Issue an unreg_login to all nodes on all vports */ 3456 vports = lpfc_create_vport_work_array(phba); 3457 if (vports != NULL) { 3458 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3459 if (vports[i]->load_flag & FC_UNLOADING) 3460 continue; 3461 shost = lpfc_shost_from_vport(vports[i]); 3462 spin_lock_irq(shost->host_lock); 3463 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 3464 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3465 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 3466 spin_unlock_irq(shost->host_lock); 3467 3468 shost = lpfc_shost_from_vport(vports[i]); 3469 list_for_each_entry_safe(ndlp, next_ndlp, 3470 &vports[i]->fc_nodes, 3471 nlp_listp) { 3472 if (!NLP_CHK_NODE_ACT(ndlp)) 3473 continue; 3474 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 3475 continue; 3476 if (ndlp->nlp_type & NLP_FABRIC) { 3477 lpfc_disc_state_machine(vports[i], ndlp, 3478 NULL, NLP_EVT_DEVICE_RECOVERY); 3479 lpfc_disc_state_machine(vports[i], ndlp, 3480 NULL, NLP_EVT_DEVICE_RM); 3481 } 3482 spin_lock_irq(shost->host_lock); 3483 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 3484 spin_unlock_irq(shost->host_lock); 3485 /* 3486 * Whenever an SLI4 port goes offline, free the 3487 * RPI. Get a new RPI when the adapter port 3488 * comes back online. 3489 */ 3490 if (phba->sli_rev == LPFC_SLI_REV4) { 3491 lpfc_printf_vlog(ndlp->vport, 3492 KERN_INFO, LOG_NODE, 3493 "0011 lpfc_offline: " 3494 "ndlp:x%p did %x " 3495 "usgmap:x%x rpi:%x\n", 3496 ndlp, ndlp->nlp_DID, 3497 ndlp->nlp_usg_map, 3498 ndlp->nlp_rpi); 3499 3500 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 3501 } 3502 lpfc_unreg_rpi(vports[i], ndlp); 3503 } 3504 } 3505 } 3506 lpfc_destroy_vport_work_array(phba, vports); 3507 3508 lpfc_sli_mbox_sys_shutdown(phba, mbx_action); 3509 3510 if (phba->wq) 3511 flush_workqueue(phba->wq); 3512 } 3513 3514 /** 3515 * lpfc_offline - Bring a HBA offline 3516 * @phba: pointer to lpfc hba data structure. 3517 * 3518 * This routine actually brings a HBA offline. It stops all the timers 3519 * associated with the HBA, brings down the SLI layer, and eventually 3520 * marks the HBA as in offline state for the upper layer protocol. 3521 **/ 3522 void 3523 lpfc_offline(struct lpfc_hba *phba) 3524 { 3525 struct Scsi_Host *shost; 3526 struct lpfc_vport **vports; 3527 int i; 3528 3529 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 3530 return; 3531 3532 /* stop port and all timers associated with this hba */ 3533 lpfc_stop_port(phba); 3534 3535 /* Tear down the local and target port registrations. The 3536 * nvme transports need to cleanup. 3537 */ 3538 lpfc_nvmet_destroy_targetport(phba); 3539 lpfc_nvme_destroy_localport(phba->pport); 3540 3541 vports = lpfc_create_vport_work_array(phba); 3542 if (vports != NULL) 3543 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3544 lpfc_stop_vport_timers(vports[i]); 3545 lpfc_destroy_vport_work_array(phba, vports); 3546 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3547 "0460 Bring Adapter offline\n"); 3548 /* Bring down the SLI Layer and cleanup. The HBA is offline 3549 now. */ 3550 lpfc_sli_hba_down(phba); 3551 spin_lock_irq(&phba->hbalock); 3552 phba->work_ha = 0; 3553 spin_unlock_irq(&phba->hbalock); 3554 vports = lpfc_create_vport_work_array(phba); 3555 if (vports != NULL) 3556 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3557 shost = lpfc_shost_from_vport(vports[i]); 3558 spin_lock_irq(shost->host_lock); 3559 vports[i]->work_port_events = 0; 3560 vports[i]->fc_flag |= FC_OFFLINE_MODE; 3561 spin_unlock_irq(shost->host_lock); 3562 } 3563 lpfc_destroy_vport_work_array(phba, vports); 3564 3565 if (phba->cfg_xri_rebalancing) 3566 lpfc_destroy_multixri_pools(phba); 3567 } 3568 3569 /** 3570 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 3571 * @phba: pointer to lpfc hba data structure. 3572 * 3573 * This routine is to free all the SCSI buffers and IOCBs from the driver 3574 * list back to kernel. It is called from lpfc_pci_remove_one to free 3575 * the internal resources before the device is removed from the system. 3576 **/ 3577 static void 3578 lpfc_scsi_free(struct lpfc_hba *phba) 3579 { 3580 struct lpfc_io_buf *sb, *sb_next; 3581 3582 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 3583 return; 3584 3585 spin_lock_irq(&phba->hbalock); 3586 3587 /* Release all the lpfc_scsi_bufs maintained by this host. */ 3588 3589 spin_lock(&phba->scsi_buf_list_put_lock); 3590 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, 3591 list) { 3592 list_del(&sb->list); 3593 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3594 sb->dma_handle); 3595 kfree(sb); 3596 phba->total_scsi_bufs--; 3597 } 3598 spin_unlock(&phba->scsi_buf_list_put_lock); 3599 3600 spin_lock(&phba->scsi_buf_list_get_lock); 3601 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, 3602 list) { 3603 list_del(&sb->list); 3604 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3605 sb->dma_handle); 3606 kfree(sb); 3607 phba->total_scsi_bufs--; 3608 } 3609 spin_unlock(&phba->scsi_buf_list_get_lock); 3610 spin_unlock_irq(&phba->hbalock); 3611 } 3612 3613 /** 3614 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists 3615 * @phba: pointer to lpfc hba data structure. 3616 * 3617 * This routine is to free all the IO buffers and IOCBs from the driver 3618 * list back to kernel. It is called from lpfc_pci_remove_one to free 3619 * the internal resources before the device is removed from the system. 3620 **/ 3621 void 3622 lpfc_io_free(struct lpfc_hba *phba) 3623 { 3624 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; 3625 struct lpfc_sli4_hdw_queue *qp; 3626 int idx; 3627 3628 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 3629 qp = &phba->sli4_hba.hdwq[idx]; 3630 /* Release all the lpfc_nvme_bufs maintained by this host. */ 3631 spin_lock(&qp->io_buf_list_put_lock); 3632 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3633 &qp->lpfc_io_buf_list_put, 3634 list) { 3635 list_del(&lpfc_ncmd->list); 3636 qp->put_io_bufs--; 3637 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3638 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 3639 kfree(lpfc_ncmd); 3640 qp->total_io_bufs--; 3641 } 3642 spin_unlock(&qp->io_buf_list_put_lock); 3643 3644 spin_lock(&qp->io_buf_list_get_lock); 3645 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3646 &qp->lpfc_io_buf_list_get, 3647 list) { 3648 list_del(&lpfc_ncmd->list); 3649 qp->get_io_bufs--; 3650 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3651 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 3652 kfree(lpfc_ncmd); 3653 qp->total_io_bufs--; 3654 } 3655 spin_unlock(&qp->io_buf_list_get_lock); 3656 } 3657 } 3658 3659 /** 3660 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping 3661 * @phba: pointer to lpfc hba data structure. 3662 * 3663 * This routine first calculates the sizes of the current els and allocated 3664 * scsi sgl lists, and then goes through all sgls to updates the physical 3665 * XRIs assigned due to port function reset. During port initialization, the 3666 * current els and allocated scsi sgl lists are 0s. 3667 * 3668 * Return codes 3669 * 0 - successful (for now, it always returns 0) 3670 **/ 3671 int 3672 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba) 3673 { 3674 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3675 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3676 LIST_HEAD(els_sgl_list); 3677 int rc; 3678 3679 /* 3680 * update on pci function's els xri-sgl list 3681 */ 3682 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3683 3684 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { 3685 /* els xri-sgl expanded */ 3686 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; 3687 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3688 "3157 ELS xri-sgl count increased from " 3689 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3690 els_xri_cnt); 3691 /* allocate the additional els sgls */ 3692 for (i = 0; i < xri_cnt; i++) { 3693 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3694 GFP_KERNEL); 3695 if (sglq_entry == NULL) { 3696 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3697 "2562 Failure to allocate an " 3698 "ELS sgl entry:%d\n", i); 3699 rc = -ENOMEM; 3700 goto out_free_mem; 3701 } 3702 sglq_entry->buff_type = GEN_BUFF_TYPE; 3703 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, 3704 &sglq_entry->phys); 3705 if (sglq_entry->virt == NULL) { 3706 kfree(sglq_entry); 3707 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3708 "2563 Failure to allocate an " 3709 "ELS mbuf:%d\n", i); 3710 rc = -ENOMEM; 3711 goto out_free_mem; 3712 } 3713 sglq_entry->sgl = sglq_entry->virt; 3714 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 3715 sglq_entry->state = SGL_FREED; 3716 list_add_tail(&sglq_entry->list, &els_sgl_list); 3717 } 3718 spin_lock_irq(&phba->hbalock); 3719 spin_lock(&phba->sli4_hba.sgl_list_lock); 3720 list_splice_init(&els_sgl_list, 3721 &phba->sli4_hba.lpfc_els_sgl_list); 3722 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3723 spin_unlock_irq(&phba->hbalock); 3724 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { 3725 /* els xri-sgl shrinked */ 3726 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; 3727 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3728 "3158 ELS xri-sgl count decreased from " 3729 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3730 els_xri_cnt); 3731 spin_lock_irq(&phba->hbalock); 3732 spin_lock(&phba->sli4_hba.sgl_list_lock); 3733 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, 3734 &els_sgl_list); 3735 /* release extra els sgls from list */ 3736 for (i = 0; i < xri_cnt; i++) { 3737 list_remove_head(&els_sgl_list, 3738 sglq_entry, struct lpfc_sglq, list); 3739 if (sglq_entry) { 3740 __lpfc_mbuf_free(phba, sglq_entry->virt, 3741 sglq_entry->phys); 3742 kfree(sglq_entry); 3743 } 3744 } 3745 list_splice_init(&els_sgl_list, 3746 &phba->sli4_hba.lpfc_els_sgl_list); 3747 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3748 spin_unlock_irq(&phba->hbalock); 3749 } else 3750 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3751 "3163 ELS xri-sgl count unchanged: %d\n", 3752 els_xri_cnt); 3753 phba->sli4_hba.els_xri_cnt = els_xri_cnt; 3754 3755 /* update xris to els sgls on the list */ 3756 sglq_entry = NULL; 3757 sglq_entry_next = NULL; 3758 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3759 &phba->sli4_hba.lpfc_els_sgl_list, list) { 3760 lxri = lpfc_sli4_next_xritag(phba); 3761 if (lxri == NO_XRI) { 3762 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3763 "2400 Failed to allocate xri for " 3764 "ELS sgl\n"); 3765 rc = -ENOMEM; 3766 goto out_free_mem; 3767 } 3768 sglq_entry->sli4_lxritag = lxri; 3769 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3770 } 3771 return 0; 3772 3773 out_free_mem: 3774 lpfc_free_els_sgl_list(phba); 3775 return rc; 3776 } 3777 3778 /** 3779 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping 3780 * @phba: pointer to lpfc hba data structure. 3781 * 3782 * This routine first calculates the sizes of the current els and allocated 3783 * scsi sgl lists, and then goes through all sgls to updates the physical 3784 * XRIs assigned due to port function reset. During port initialization, the 3785 * current els and allocated scsi sgl lists are 0s. 3786 * 3787 * Return codes 3788 * 0 - successful (for now, it always returns 0) 3789 **/ 3790 int 3791 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) 3792 { 3793 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3794 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3795 uint16_t nvmet_xri_cnt; 3796 LIST_HEAD(nvmet_sgl_list); 3797 int rc; 3798 3799 /* 3800 * update on pci function's nvmet xri-sgl list 3801 */ 3802 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3803 3804 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */ 3805 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 3806 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { 3807 /* els xri-sgl expanded */ 3808 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt; 3809 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3810 "6302 NVMET xri-sgl cnt grew from %d to %d\n", 3811 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt); 3812 /* allocate the additional nvmet sgls */ 3813 for (i = 0; i < xri_cnt; i++) { 3814 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3815 GFP_KERNEL); 3816 if (sglq_entry == NULL) { 3817 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3818 "6303 Failure to allocate an " 3819 "NVMET sgl entry:%d\n", i); 3820 rc = -ENOMEM; 3821 goto out_free_mem; 3822 } 3823 sglq_entry->buff_type = NVMET_BUFF_TYPE; 3824 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0, 3825 &sglq_entry->phys); 3826 if (sglq_entry->virt == NULL) { 3827 kfree(sglq_entry); 3828 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3829 "6304 Failure to allocate an " 3830 "NVMET buf:%d\n", i); 3831 rc = -ENOMEM; 3832 goto out_free_mem; 3833 } 3834 sglq_entry->sgl = sglq_entry->virt; 3835 memset(sglq_entry->sgl, 0, 3836 phba->cfg_sg_dma_buf_size); 3837 sglq_entry->state = SGL_FREED; 3838 list_add_tail(&sglq_entry->list, &nvmet_sgl_list); 3839 } 3840 spin_lock_irq(&phba->hbalock); 3841 spin_lock(&phba->sli4_hba.sgl_list_lock); 3842 list_splice_init(&nvmet_sgl_list, 3843 &phba->sli4_hba.lpfc_nvmet_sgl_list); 3844 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3845 spin_unlock_irq(&phba->hbalock); 3846 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) { 3847 /* nvmet xri-sgl shrunk */ 3848 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt; 3849 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3850 "6305 NVMET xri-sgl count decreased from " 3851 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt, 3852 nvmet_xri_cnt); 3853 spin_lock_irq(&phba->hbalock); 3854 spin_lock(&phba->sli4_hba.sgl_list_lock); 3855 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, 3856 &nvmet_sgl_list); 3857 /* release extra nvmet sgls from list */ 3858 for (i = 0; i < xri_cnt; i++) { 3859 list_remove_head(&nvmet_sgl_list, 3860 sglq_entry, struct lpfc_sglq, list); 3861 if (sglq_entry) { 3862 lpfc_nvmet_buf_free(phba, sglq_entry->virt, 3863 sglq_entry->phys); 3864 kfree(sglq_entry); 3865 } 3866 } 3867 list_splice_init(&nvmet_sgl_list, 3868 &phba->sli4_hba.lpfc_nvmet_sgl_list); 3869 spin_unlock(&phba->sli4_hba.sgl_list_lock); 3870 spin_unlock_irq(&phba->hbalock); 3871 } else 3872 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3873 "6306 NVMET xri-sgl count unchanged: %d\n", 3874 nvmet_xri_cnt); 3875 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt; 3876 3877 /* update xris to nvmet sgls on the list */ 3878 sglq_entry = NULL; 3879 sglq_entry_next = NULL; 3880 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3881 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) { 3882 lxri = lpfc_sli4_next_xritag(phba); 3883 if (lxri == NO_XRI) { 3884 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3885 "6307 Failed to allocate xri for " 3886 "NVMET sgl\n"); 3887 rc = -ENOMEM; 3888 goto out_free_mem; 3889 } 3890 sglq_entry->sli4_lxritag = lxri; 3891 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3892 } 3893 return 0; 3894 3895 out_free_mem: 3896 lpfc_free_nvmet_sgl_list(phba); 3897 return rc; 3898 } 3899 3900 int 3901 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf) 3902 { 3903 LIST_HEAD(blist); 3904 struct lpfc_sli4_hdw_queue *qp; 3905 struct lpfc_io_buf *lpfc_cmd; 3906 struct lpfc_io_buf *iobufp, *prev_iobufp; 3907 int idx, cnt, xri, inserted; 3908 3909 cnt = 0; 3910 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 3911 qp = &phba->sli4_hba.hdwq[idx]; 3912 spin_lock_irq(&qp->io_buf_list_get_lock); 3913 spin_lock(&qp->io_buf_list_put_lock); 3914 3915 /* Take everything off the get and put lists */ 3916 list_splice_init(&qp->lpfc_io_buf_list_get, &blist); 3917 list_splice(&qp->lpfc_io_buf_list_put, &blist); 3918 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 3919 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 3920 cnt += qp->get_io_bufs + qp->put_io_bufs; 3921 qp->get_io_bufs = 0; 3922 qp->put_io_bufs = 0; 3923 qp->total_io_bufs = 0; 3924 spin_unlock(&qp->io_buf_list_put_lock); 3925 spin_unlock_irq(&qp->io_buf_list_get_lock); 3926 } 3927 3928 /* 3929 * Take IO buffers off blist and put on cbuf sorted by XRI. 3930 * This is because POST_SGL takes a sequential range of XRIs 3931 * to post to the firmware. 3932 */ 3933 for (idx = 0; idx < cnt; idx++) { 3934 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list); 3935 if (!lpfc_cmd) 3936 return cnt; 3937 if (idx == 0) { 3938 list_add_tail(&lpfc_cmd->list, cbuf); 3939 continue; 3940 } 3941 xri = lpfc_cmd->cur_iocbq.sli4_xritag; 3942 inserted = 0; 3943 prev_iobufp = NULL; 3944 list_for_each_entry(iobufp, cbuf, list) { 3945 if (xri < iobufp->cur_iocbq.sli4_xritag) { 3946 if (prev_iobufp) 3947 list_add(&lpfc_cmd->list, 3948 &prev_iobufp->list); 3949 else 3950 list_add(&lpfc_cmd->list, cbuf); 3951 inserted = 1; 3952 break; 3953 } 3954 prev_iobufp = iobufp; 3955 } 3956 if (!inserted) 3957 list_add_tail(&lpfc_cmd->list, cbuf); 3958 } 3959 return cnt; 3960 } 3961 3962 int 3963 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf) 3964 { 3965 struct lpfc_sli4_hdw_queue *qp; 3966 struct lpfc_io_buf *lpfc_cmd; 3967 int idx, cnt; 3968 3969 qp = phba->sli4_hba.hdwq; 3970 cnt = 0; 3971 while (!list_empty(cbuf)) { 3972 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 3973 list_remove_head(cbuf, lpfc_cmd, 3974 struct lpfc_io_buf, list); 3975 if (!lpfc_cmd) 3976 return cnt; 3977 cnt++; 3978 qp = &phba->sli4_hba.hdwq[idx]; 3979 lpfc_cmd->hdwq_no = idx; 3980 lpfc_cmd->hdwq = qp; 3981 lpfc_cmd->cur_iocbq.wqe_cmpl = NULL; 3982 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL; 3983 spin_lock(&qp->io_buf_list_put_lock); 3984 list_add_tail(&lpfc_cmd->list, 3985 &qp->lpfc_io_buf_list_put); 3986 qp->put_io_bufs++; 3987 qp->total_io_bufs++; 3988 spin_unlock(&qp->io_buf_list_put_lock); 3989 } 3990 } 3991 return cnt; 3992 } 3993 3994 /** 3995 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping 3996 * @phba: pointer to lpfc hba data structure. 3997 * 3998 * This routine first calculates the sizes of the current els and allocated 3999 * scsi sgl lists, and then goes through all sgls to updates the physical 4000 * XRIs assigned due to port function reset. During port initialization, the 4001 * current els and allocated scsi sgl lists are 0s. 4002 * 4003 * Return codes 4004 * 0 - successful (for now, it always returns 0) 4005 **/ 4006 int 4007 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba) 4008 { 4009 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL; 4010 uint16_t i, lxri, els_xri_cnt; 4011 uint16_t io_xri_cnt, io_xri_max; 4012 LIST_HEAD(io_sgl_list); 4013 int rc, cnt; 4014 4015 /* 4016 * update on pci function's allocated nvme xri-sgl list 4017 */ 4018 4019 /* maximum number of xris available for nvme buffers */ 4020 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4021 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4022 phba->sli4_hba.io_xri_max = io_xri_max; 4023 4024 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4025 "6074 Current allocated XRI sgl count:%d, " 4026 "maximum XRI count:%d\n", 4027 phba->sli4_hba.io_xri_cnt, 4028 phba->sli4_hba.io_xri_max); 4029 4030 cnt = lpfc_io_buf_flush(phba, &io_sgl_list); 4031 4032 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) { 4033 /* max nvme xri shrunk below the allocated nvme buffers */ 4034 io_xri_cnt = phba->sli4_hba.io_xri_cnt - 4035 phba->sli4_hba.io_xri_max; 4036 /* release the extra allocated nvme buffers */ 4037 for (i = 0; i < io_xri_cnt; i++) { 4038 list_remove_head(&io_sgl_list, lpfc_ncmd, 4039 struct lpfc_io_buf, list); 4040 if (lpfc_ncmd) { 4041 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4042 lpfc_ncmd->data, 4043 lpfc_ncmd->dma_handle); 4044 kfree(lpfc_ncmd); 4045 } 4046 } 4047 phba->sli4_hba.io_xri_cnt -= io_xri_cnt; 4048 } 4049 4050 /* update xris associated to remaining allocated nvme buffers */ 4051 lpfc_ncmd = NULL; 4052 lpfc_ncmd_next = NULL; 4053 phba->sli4_hba.io_xri_cnt = cnt; 4054 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 4055 &io_sgl_list, list) { 4056 lxri = lpfc_sli4_next_xritag(phba); 4057 if (lxri == NO_XRI) { 4058 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4059 "6075 Failed to allocate xri for " 4060 "nvme buffer\n"); 4061 rc = -ENOMEM; 4062 goto out_free_mem; 4063 } 4064 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; 4065 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4066 } 4067 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list); 4068 return 0; 4069 4070 out_free_mem: 4071 lpfc_io_free(phba); 4072 return rc; 4073 } 4074 4075 /** 4076 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec 4077 * @vport: The virtual port for which this call being executed. 4078 * @num_to_allocate: The requested number of buffers to allocate. 4079 * 4080 * This routine allocates nvme buffers for device with SLI-4 interface spec, 4081 * the nvme buffer contains all the necessary information needed to initiate 4082 * an I/O. After allocating up to @num_to_allocate IO buffers and put 4083 * them on a list, it post them to the port by using SGL block post. 4084 * 4085 * Return codes: 4086 * int - number of IO buffers that were allocated and posted. 4087 * 0 = failure, less than num_to_alloc is a partial failure. 4088 **/ 4089 int 4090 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc) 4091 { 4092 struct lpfc_io_buf *lpfc_ncmd; 4093 struct lpfc_iocbq *pwqeq; 4094 uint16_t iotag, lxri = 0; 4095 int bcnt, num_posted; 4096 LIST_HEAD(prep_nblist); 4097 LIST_HEAD(post_nblist); 4098 LIST_HEAD(nvme_nblist); 4099 4100 /* Sanity check to ensure our sizing is right for both SCSI and NVME */ 4101 if (sizeof(struct lpfc_io_buf) > LPFC_COMMON_IO_BUF_SZ) { 4102 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 4103 "6426 Common buffer size %zd exceeds %d\n", 4104 sizeof(struct lpfc_io_buf), 4105 LPFC_COMMON_IO_BUF_SZ); 4106 return 0; 4107 } 4108 4109 phba->sli4_hba.io_xri_cnt = 0; 4110 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 4111 lpfc_ncmd = kzalloc(LPFC_COMMON_IO_BUF_SZ, GFP_KERNEL); 4112 if (!lpfc_ncmd) 4113 break; 4114 /* 4115 * Get memory from the pci pool to map the virt space to 4116 * pci bus space for an I/O. The DMA buffer includes the 4117 * number of SGE's necessary to support the sg_tablesize. 4118 */ 4119 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, 4120 GFP_KERNEL, 4121 &lpfc_ncmd->dma_handle); 4122 if (!lpfc_ncmd->data) { 4123 kfree(lpfc_ncmd); 4124 break; 4125 } 4126 4127 /* 4128 * 4K Page alignment is CRITICAL to BlockGuard, double check 4129 * to be sure. 4130 */ 4131 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 4132 (((unsigned long)(lpfc_ncmd->data) & 4133 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { 4134 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 4135 "3369 Memory alignment err: addr=%lx\n", 4136 (unsigned long)lpfc_ncmd->data); 4137 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4138 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4139 kfree(lpfc_ncmd); 4140 break; 4141 } 4142 4143 lxri = lpfc_sli4_next_xritag(phba); 4144 if (lxri == NO_XRI) { 4145 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4146 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4147 kfree(lpfc_ncmd); 4148 break; 4149 } 4150 pwqeq = &lpfc_ncmd->cur_iocbq; 4151 4152 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */ 4153 iotag = lpfc_sli_next_iotag(phba, pwqeq); 4154 if (iotag == 0) { 4155 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4156 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4157 kfree(lpfc_ncmd); 4158 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 4159 "6121 Failed to allocate IOTAG for" 4160 " XRI:0x%x\n", lxri); 4161 lpfc_sli4_free_xri(phba, lxri); 4162 break; 4163 } 4164 pwqeq->sli4_lxritag = lxri; 4165 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4166 pwqeq->context1 = lpfc_ncmd; 4167 4168 /* Initialize local short-hand pointers. */ 4169 lpfc_ncmd->dma_sgl = lpfc_ncmd->data; 4170 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle; 4171 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd; 4172 spin_lock_init(&lpfc_ncmd->buf_lock); 4173 4174 /* add the nvme buffer to a post list */ 4175 list_add_tail(&lpfc_ncmd->list, &post_nblist); 4176 phba->sli4_hba.io_xri_cnt++; 4177 } 4178 lpfc_printf_log(phba, KERN_INFO, LOG_NVME, 4179 "6114 Allocate %d out of %d requested new NVME " 4180 "buffers\n", bcnt, num_to_alloc); 4181 4182 /* post the list of nvme buffer sgls to port if available */ 4183 if (!list_empty(&post_nblist)) 4184 num_posted = lpfc_sli4_post_io_sgl_list( 4185 phba, &post_nblist, bcnt); 4186 else 4187 num_posted = 0; 4188 4189 return num_posted; 4190 } 4191 4192 static uint64_t 4193 lpfc_get_wwpn(struct lpfc_hba *phba) 4194 { 4195 uint64_t wwn; 4196 int rc; 4197 LPFC_MBOXQ_t *mboxq; 4198 MAILBOX_t *mb; 4199 4200 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 4201 GFP_KERNEL); 4202 if (!mboxq) 4203 return (uint64_t)-1; 4204 4205 /* First get WWN of HBA instance */ 4206 lpfc_read_nv(phba, mboxq); 4207 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4208 if (rc != MBX_SUCCESS) { 4209 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4210 "6019 Mailbox failed , mbxCmd x%x " 4211 "READ_NV, mbxStatus x%x\n", 4212 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 4213 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 4214 mempool_free(mboxq, phba->mbox_mem_pool); 4215 return (uint64_t) -1; 4216 } 4217 mb = &mboxq->u.mb; 4218 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t)); 4219 /* wwn is WWPN of HBA instance */ 4220 mempool_free(mboxq, phba->mbox_mem_pool); 4221 if (phba->sli_rev == LPFC_SLI_REV4) 4222 return be64_to_cpu(wwn); 4223 else 4224 return rol64(wwn, 32); 4225 } 4226 4227 /** 4228 * lpfc_create_port - Create an FC port 4229 * @phba: pointer to lpfc hba data structure. 4230 * @instance: a unique integer ID to this FC port. 4231 * @dev: pointer to the device data structure. 4232 * 4233 * This routine creates a FC port for the upper layer protocol. The FC port 4234 * can be created on top of either a physical port or a virtual port provided 4235 * by the HBA. This routine also allocates a SCSI host data structure (shost) 4236 * and associates the FC port created before adding the shost into the SCSI 4237 * layer. 4238 * 4239 * Return codes 4240 * @vport - pointer to the virtual N_Port data structure. 4241 * NULL - port create failed. 4242 **/ 4243 struct lpfc_vport * 4244 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 4245 { 4246 struct lpfc_vport *vport; 4247 struct Scsi_Host *shost = NULL; 4248 int error = 0; 4249 int i; 4250 uint64_t wwn; 4251 bool use_no_reset_hba = false; 4252 int rc; 4253 4254 if (lpfc_no_hba_reset_cnt) { 4255 if (phba->sli_rev < LPFC_SLI_REV4 && 4256 dev == &phba->pcidev->dev) { 4257 /* Reset the port first */ 4258 lpfc_sli_brdrestart(phba); 4259 rc = lpfc_sli_chipset_init(phba); 4260 if (rc) 4261 return NULL; 4262 } 4263 wwn = lpfc_get_wwpn(phba); 4264 } 4265 4266 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) { 4267 if (wwn == lpfc_no_hba_reset[i]) { 4268 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4269 "6020 Setting use_no_reset port=%llx\n", 4270 wwn); 4271 use_no_reset_hba = true; 4272 break; 4273 } 4274 } 4275 4276 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 4277 if (dev != &phba->pcidev->dev) { 4278 shost = scsi_host_alloc(&lpfc_vport_template, 4279 sizeof(struct lpfc_vport)); 4280 } else { 4281 if (!use_no_reset_hba) 4282 shost = scsi_host_alloc(&lpfc_template, 4283 sizeof(struct lpfc_vport)); 4284 else 4285 shost = scsi_host_alloc(&lpfc_template_no_hr, 4286 sizeof(struct lpfc_vport)); 4287 } 4288 } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 4289 shost = scsi_host_alloc(&lpfc_template_nvme, 4290 sizeof(struct lpfc_vport)); 4291 } 4292 if (!shost) 4293 goto out; 4294 4295 vport = (struct lpfc_vport *) shost->hostdata; 4296 vport->phba = phba; 4297 vport->load_flag |= FC_LOADING; 4298 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 4299 vport->fc_rscn_flush = 0; 4300 lpfc_get_vport_cfgparam(vport); 4301 4302 /* Adjust value in vport */ 4303 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type; 4304 4305 shost->unique_id = instance; 4306 shost->max_id = LPFC_MAX_TARGET; 4307 shost->max_lun = vport->cfg_max_luns; 4308 shost->this_id = -1; 4309 shost->max_cmd_len = 16; 4310 4311 if (phba->sli_rev == LPFC_SLI_REV4) { 4312 if (!phba->cfg_fcp_mq_threshold || 4313 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue) 4314 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue; 4315 4316 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(), 4317 phba->cfg_fcp_mq_threshold); 4318 4319 shost->dma_boundary = 4320 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 4321 shost->sg_tablesize = phba->cfg_scsi_seg_cnt; 4322 } else 4323 /* SLI-3 has a limited number of hardware queues (3), 4324 * thus there is only one for FCP processing. 4325 */ 4326 shost->nr_hw_queues = 1; 4327 4328 /* 4329 * Set initial can_queue value since 0 is no longer supported and 4330 * scsi_add_host will fail. This will be adjusted later based on the 4331 * max xri value determined in hba setup. 4332 */ 4333 shost->can_queue = phba->cfg_hba_queue_depth - 10; 4334 if (dev != &phba->pcidev->dev) { 4335 shost->transportt = lpfc_vport_transport_template; 4336 vport->port_type = LPFC_NPIV_PORT; 4337 } else { 4338 shost->transportt = lpfc_transport_template; 4339 vport->port_type = LPFC_PHYSICAL_PORT; 4340 } 4341 4342 /* Initialize all internally managed lists. */ 4343 INIT_LIST_HEAD(&vport->fc_nodes); 4344 INIT_LIST_HEAD(&vport->rcv_buffer_list); 4345 spin_lock_init(&vport->work_port_lock); 4346 4347 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0); 4348 4349 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0); 4350 4351 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0); 4352 4353 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 4354 lpfc_setup_bg(phba, shost); 4355 4356 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 4357 if (error) 4358 goto out_put_shost; 4359 4360 spin_lock_irq(&phba->port_list_lock); 4361 list_add_tail(&vport->listentry, &phba->port_list); 4362 spin_unlock_irq(&phba->port_list_lock); 4363 return vport; 4364 4365 out_put_shost: 4366 scsi_host_put(shost); 4367 out: 4368 return NULL; 4369 } 4370 4371 /** 4372 * destroy_port - destroy an FC port 4373 * @vport: pointer to an lpfc virtual N_Port data structure. 4374 * 4375 * This routine destroys a FC port from the upper layer protocol. All the 4376 * resources associated with the port are released. 4377 **/ 4378 void 4379 destroy_port(struct lpfc_vport *vport) 4380 { 4381 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4382 struct lpfc_hba *phba = vport->phba; 4383 4384 lpfc_debugfs_terminate(vport); 4385 fc_remove_host(shost); 4386 scsi_remove_host(shost); 4387 4388 spin_lock_irq(&phba->port_list_lock); 4389 list_del_init(&vport->listentry); 4390 spin_unlock_irq(&phba->port_list_lock); 4391 4392 lpfc_cleanup(vport); 4393 return; 4394 } 4395 4396 /** 4397 * lpfc_get_instance - Get a unique integer ID 4398 * 4399 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 4400 * uses the kernel idr facility to perform the task. 4401 * 4402 * Return codes: 4403 * instance - a unique integer ID allocated as the new instance. 4404 * -1 - lpfc get instance failed. 4405 **/ 4406 int 4407 lpfc_get_instance(void) 4408 { 4409 int ret; 4410 4411 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL); 4412 return ret < 0 ? -1 : ret; 4413 } 4414 4415 /** 4416 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 4417 * @shost: pointer to SCSI host data structure. 4418 * @time: elapsed time of the scan in jiffies. 4419 * 4420 * This routine is called by the SCSI layer with a SCSI host to determine 4421 * whether the scan host is finished. 4422 * 4423 * Note: there is no scan_start function as adapter initialization will have 4424 * asynchronously kicked off the link initialization. 4425 * 4426 * Return codes 4427 * 0 - SCSI host scan is not over yet. 4428 * 1 - SCSI host scan is over. 4429 **/ 4430 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 4431 { 4432 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4433 struct lpfc_hba *phba = vport->phba; 4434 int stat = 0; 4435 4436 spin_lock_irq(shost->host_lock); 4437 4438 if (vport->load_flag & FC_UNLOADING) { 4439 stat = 1; 4440 goto finished; 4441 } 4442 if (time >= msecs_to_jiffies(30 * 1000)) { 4443 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4444 "0461 Scanning longer than 30 " 4445 "seconds. Continuing initialization\n"); 4446 stat = 1; 4447 goto finished; 4448 } 4449 if (time >= msecs_to_jiffies(15 * 1000) && 4450 phba->link_state <= LPFC_LINK_DOWN) { 4451 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4452 "0465 Link down longer than 15 " 4453 "seconds. Continuing initialization\n"); 4454 stat = 1; 4455 goto finished; 4456 } 4457 4458 if (vport->port_state != LPFC_VPORT_READY) 4459 goto finished; 4460 if (vport->num_disc_nodes || vport->fc_prli_sent) 4461 goto finished; 4462 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) 4463 goto finished; 4464 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 4465 goto finished; 4466 4467 stat = 1; 4468 4469 finished: 4470 spin_unlock_irq(shost->host_lock); 4471 return stat; 4472 } 4473 4474 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost) 4475 { 4476 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4477 struct lpfc_hba *phba = vport->phba; 4478 4479 fc_host_supported_speeds(shost) = 0; 4480 if (phba->lmt & LMT_128Gb) 4481 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT; 4482 if (phba->lmt & LMT_64Gb) 4483 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT; 4484 if (phba->lmt & LMT_32Gb) 4485 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT; 4486 if (phba->lmt & LMT_16Gb) 4487 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; 4488 if (phba->lmt & LMT_10Gb) 4489 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 4490 if (phba->lmt & LMT_8Gb) 4491 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 4492 if (phba->lmt & LMT_4Gb) 4493 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 4494 if (phba->lmt & LMT_2Gb) 4495 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 4496 if (phba->lmt & LMT_1Gb) 4497 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 4498 } 4499 4500 /** 4501 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 4502 * @shost: pointer to SCSI host data structure. 4503 * 4504 * This routine initializes a given SCSI host attributes on a FC port. The 4505 * SCSI host can be either on top of a physical port or a virtual port. 4506 **/ 4507 void lpfc_host_attrib_init(struct Scsi_Host *shost) 4508 { 4509 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4510 struct lpfc_hba *phba = vport->phba; 4511 /* 4512 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 4513 */ 4514 4515 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 4516 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 4517 fc_host_supported_classes(shost) = FC_COS_CLASS3; 4518 4519 memset(fc_host_supported_fc4s(shost), 0, 4520 sizeof(fc_host_supported_fc4s(shost))); 4521 fc_host_supported_fc4s(shost)[2] = 1; 4522 fc_host_supported_fc4s(shost)[7] = 1; 4523 4524 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 4525 sizeof fc_host_symbolic_name(shost)); 4526 4527 lpfc_host_supported_speeds_set(shost); 4528 4529 fc_host_maxframe_size(shost) = 4530 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 4531 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 4532 4533 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 4534 4535 /* This value is also unchanging */ 4536 memset(fc_host_active_fc4s(shost), 0, 4537 sizeof(fc_host_active_fc4s(shost))); 4538 fc_host_active_fc4s(shost)[2] = 1; 4539 fc_host_active_fc4s(shost)[7] = 1; 4540 4541 fc_host_max_npiv_vports(shost) = phba->max_vpi; 4542 spin_lock_irq(shost->host_lock); 4543 vport->load_flag &= ~FC_LOADING; 4544 spin_unlock_irq(shost->host_lock); 4545 } 4546 4547 /** 4548 * lpfc_stop_port_s3 - Stop SLI3 device port 4549 * @phba: pointer to lpfc hba data structure. 4550 * 4551 * This routine is invoked to stop an SLI3 device port, it stops the device 4552 * from generating interrupts and stops the device driver's timers for the 4553 * device. 4554 **/ 4555 static void 4556 lpfc_stop_port_s3(struct lpfc_hba *phba) 4557 { 4558 /* Clear all interrupt enable conditions */ 4559 writel(0, phba->HCregaddr); 4560 readl(phba->HCregaddr); /* flush */ 4561 /* Clear all pending interrupts */ 4562 writel(0xffffffff, phba->HAregaddr); 4563 readl(phba->HAregaddr); /* flush */ 4564 4565 /* Reset some HBA SLI setup states */ 4566 lpfc_stop_hba_timers(phba); 4567 phba->pport->work_port_events = 0; 4568 } 4569 4570 /** 4571 * lpfc_stop_port_s4 - Stop SLI4 device port 4572 * @phba: pointer to lpfc hba data structure. 4573 * 4574 * This routine is invoked to stop an SLI4 device port, it stops the device 4575 * from generating interrupts and stops the device driver's timers for the 4576 * device. 4577 **/ 4578 static void 4579 lpfc_stop_port_s4(struct lpfc_hba *phba) 4580 { 4581 /* Reset some HBA SLI4 setup states */ 4582 lpfc_stop_hba_timers(phba); 4583 if (phba->pport) 4584 phba->pport->work_port_events = 0; 4585 phba->sli4_hba.intr_enable = 0; 4586 } 4587 4588 /** 4589 * lpfc_stop_port - Wrapper function for stopping hba port 4590 * @phba: Pointer to HBA context object. 4591 * 4592 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 4593 * the API jump table function pointer from the lpfc_hba struct. 4594 **/ 4595 void 4596 lpfc_stop_port(struct lpfc_hba *phba) 4597 { 4598 phba->lpfc_stop_port(phba); 4599 4600 if (phba->wq) 4601 flush_workqueue(phba->wq); 4602 } 4603 4604 /** 4605 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 4606 * @phba: Pointer to hba for which this call is being executed. 4607 * 4608 * This routine starts the timer waiting for the FCF rediscovery to complete. 4609 **/ 4610 void 4611 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 4612 { 4613 unsigned long fcf_redisc_wait_tmo = 4614 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 4615 /* Start fcf rediscovery wait period timer */ 4616 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 4617 spin_lock_irq(&phba->hbalock); 4618 /* Allow action to new fcf asynchronous event */ 4619 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 4620 /* Mark the FCF rediscovery pending state */ 4621 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 4622 spin_unlock_irq(&phba->hbalock); 4623 } 4624 4625 /** 4626 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 4627 * @ptr: Map to lpfc_hba data structure pointer. 4628 * 4629 * This routine is invoked when waiting for FCF table rediscover has been 4630 * timed out. If new FCF record(s) has (have) been discovered during the 4631 * wait period, a new FCF event shall be added to the FCOE async event 4632 * list, and then worker thread shall be waked up for processing from the 4633 * worker thread context. 4634 **/ 4635 static void 4636 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t) 4637 { 4638 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait); 4639 4640 /* Don't send FCF rediscovery event if timer cancelled */ 4641 spin_lock_irq(&phba->hbalock); 4642 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 4643 spin_unlock_irq(&phba->hbalock); 4644 return; 4645 } 4646 /* Clear FCF rediscovery timer pending flag */ 4647 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 4648 /* FCF rediscovery event to worker thread */ 4649 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 4650 spin_unlock_irq(&phba->hbalock); 4651 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 4652 "2776 FCF rediscover quiescent timer expired\n"); 4653 /* wake up worker thread */ 4654 lpfc_worker_wake_up(phba); 4655 } 4656 4657 /** 4658 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 4659 * @phba: pointer to lpfc hba data structure. 4660 * @acqe_link: pointer to the async link completion queue entry. 4661 * 4662 * This routine is to parse the SLI4 link-attention link fault code. 4663 **/ 4664 static void 4665 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 4666 struct lpfc_acqe_link *acqe_link) 4667 { 4668 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 4669 case LPFC_ASYNC_LINK_FAULT_NONE: 4670 case LPFC_ASYNC_LINK_FAULT_LOCAL: 4671 case LPFC_ASYNC_LINK_FAULT_REMOTE: 4672 case LPFC_ASYNC_LINK_FAULT_LR_LRR: 4673 break; 4674 default: 4675 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4676 "0398 Unknown link fault code: x%x\n", 4677 bf_get(lpfc_acqe_link_fault, acqe_link)); 4678 break; 4679 } 4680 } 4681 4682 /** 4683 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 4684 * @phba: pointer to lpfc hba data structure. 4685 * @acqe_link: pointer to the async link completion queue entry. 4686 * 4687 * This routine is to parse the SLI4 link attention type and translate it 4688 * into the base driver's link attention type coding. 4689 * 4690 * Return: Link attention type in terms of base driver's coding. 4691 **/ 4692 static uint8_t 4693 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 4694 struct lpfc_acqe_link *acqe_link) 4695 { 4696 uint8_t att_type; 4697 4698 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 4699 case LPFC_ASYNC_LINK_STATUS_DOWN: 4700 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 4701 att_type = LPFC_ATT_LINK_DOWN; 4702 break; 4703 case LPFC_ASYNC_LINK_STATUS_UP: 4704 /* Ignore physical link up events - wait for logical link up */ 4705 att_type = LPFC_ATT_RESERVED; 4706 break; 4707 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 4708 att_type = LPFC_ATT_LINK_UP; 4709 break; 4710 default: 4711 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4712 "0399 Invalid link attention type: x%x\n", 4713 bf_get(lpfc_acqe_link_status, acqe_link)); 4714 att_type = LPFC_ATT_RESERVED; 4715 break; 4716 } 4717 return att_type; 4718 } 4719 4720 /** 4721 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed 4722 * @phba: pointer to lpfc hba data structure. 4723 * 4724 * This routine is to get an SLI3 FC port's link speed in Mbps. 4725 * 4726 * Return: link speed in terms of Mbps. 4727 **/ 4728 uint32_t 4729 lpfc_sli_port_speed_get(struct lpfc_hba *phba) 4730 { 4731 uint32_t link_speed; 4732 4733 if (!lpfc_is_link_up(phba)) 4734 return 0; 4735 4736 if (phba->sli_rev <= LPFC_SLI_REV3) { 4737 switch (phba->fc_linkspeed) { 4738 case LPFC_LINK_SPEED_1GHZ: 4739 link_speed = 1000; 4740 break; 4741 case LPFC_LINK_SPEED_2GHZ: 4742 link_speed = 2000; 4743 break; 4744 case LPFC_LINK_SPEED_4GHZ: 4745 link_speed = 4000; 4746 break; 4747 case LPFC_LINK_SPEED_8GHZ: 4748 link_speed = 8000; 4749 break; 4750 case LPFC_LINK_SPEED_10GHZ: 4751 link_speed = 10000; 4752 break; 4753 case LPFC_LINK_SPEED_16GHZ: 4754 link_speed = 16000; 4755 break; 4756 default: 4757 link_speed = 0; 4758 } 4759 } else { 4760 if (phba->sli4_hba.link_state.logical_speed) 4761 link_speed = 4762 phba->sli4_hba.link_state.logical_speed; 4763 else 4764 link_speed = phba->sli4_hba.link_state.speed; 4765 } 4766 return link_speed; 4767 } 4768 4769 /** 4770 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed 4771 * @phba: pointer to lpfc hba data structure. 4772 * @evt_code: asynchronous event code. 4773 * @speed_code: asynchronous event link speed code. 4774 * 4775 * This routine is to parse the giving SLI4 async event link speed code into 4776 * value of Mbps for the link speed. 4777 * 4778 * Return: link speed in terms of Mbps. 4779 **/ 4780 static uint32_t 4781 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, 4782 uint8_t speed_code) 4783 { 4784 uint32_t port_speed; 4785 4786 switch (evt_code) { 4787 case LPFC_TRAILER_CODE_LINK: 4788 switch (speed_code) { 4789 case LPFC_ASYNC_LINK_SPEED_ZERO: 4790 port_speed = 0; 4791 break; 4792 case LPFC_ASYNC_LINK_SPEED_10MBPS: 4793 port_speed = 10; 4794 break; 4795 case LPFC_ASYNC_LINK_SPEED_100MBPS: 4796 port_speed = 100; 4797 break; 4798 case LPFC_ASYNC_LINK_SPEED_1GBPS: 4799 port_speed = 1000; 4800 break; 4801 case LPFC_ASYNC_LINK_SPEED_10GBPS: 4802 port_speed = 10000; 4803 break; 4804 case LPFC_ASYNC_LINK_SPEED_20GBPS: 4805 port_speed = 20000; 4806 break; 4807 case LPFC_ASYNC_LINK_SPEED_25GBPS: 4808 port_speed = 25000; 4809 break; 4810 case LPFC_ASYNC_LINK_SPEED_40GBPS: 4811 port_speed = 40000; 4812 break; 4813 default: 4814 port_speed = 0; 4815 } 4816 break; 4817 case LPFC_TRAILER_CODE_FC: 4818 switch (speed_code) { 4819 case LPFC_FC_LA_SPEED_UNKNOWN: 4820 port_speed = 0; 4821 break; 4822 case LPFC_FC_LA_SPEED_1G: 4823 port_speed = 1000; 4824 break; 4825 case LPFC_FC_LA_SPEED_2G: 4826 port_speed = 2000; 4827 break; 4828 case LPFC_FC_LA_SPEED_4G: 4829 port_speed = 4000; 4830 break; 4831 case LPFC_FC_LA_SPEED_8G: 4832 port_speed = 8000; 4833 break; 4834 case LPFC_FC_LA_SPEED_10G: 4835 port_speed = 10000; 4836 break; 4837 case LPFC_FC_LA_SPEED_16G: 4838 port_speed = 16000; 4839 break; 4840 case LPFC_FC_LA_SPEED_32G: 4841 port_speed = 32000; 4842 break; 4843 case LPFC_FC_LA_SPEED_64G: 4844 port_speed = 64000; 4845 break; 4846 case LPFC_FC_LA_SPEED_128G: 4847 port_speed = 128000; 4848 break; 4849 default: 4850 port_speed = 0; 4851 } 4852 break; 4853 default: 4854 port_speed = 0; 4855 } 4856 return port_speed; 4857 } 4858 4859 /** 4860 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 4861 * @phba: pointer to lpfc hba data structure. 4862 * @acqe_link: pointer to the async link completion queue entry. 4863 * 4864 * This routine is to handle the SLI4 asynchronous FCoE link event. 4865 **/ 4866 static void 4867 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 4868 struct lpfc_acqe_link *acqe_link) 4869 { 4870 struct lpfc_dmabuf *mp; 4871 LPFC_MBOXQ_t *pmb; 4872 MAILBOX_t *mb; 4873 struct lpfc_mbx_read_top *la; 4874 uint8_t att_type; 4875 int rc; 4876 4877 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 4878 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 4879 return; 4880 phba->fcoe_eventtag = acqe_link->event_tag; 4881 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4882 if (!pmb) { 4883 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4884 "0395 The mboxq allocation failed\n"); 4885 return; 4886 } 4887 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4888 if (!mp) { 4889 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4890 "0396 The lpfc_dmabuf allocation failed\n"); 4891 goto out_free_pmb; 4892 } 4893 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 4894 if (!mp->virt) { 4895 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4896 "0397 The mbuf allocation failed\n"); 4897 goto out_free_dmabuf; 4898 } 4899 4900 /* Cleanup any outstanding ELS commands */ 4901 lpfc_els_flush_all_cmd(phba); 4902 4903 /* Block ELS IOCBs until we have done process link event */ 4904 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 4905 4906 /* Update link event statistics */ 4907 phba->sli.slistat.link_event++; 4908 4909 /* Create lpfc_handle_latt mailbox command from link ACQE */ 4910 lpfc_read_topology(phba, pmb, mp); 4911 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 4912 pmb->vport = phba->pport; 4913 4914 /* Keep the link status for extra SLI4 state machine reference */ 4915 phba->sli4_hba.link_state.speed = 4916 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK, 4917 bf_get(lpfc_acqe_link_speed, acqe_link)); 4918 phba->sli4_hba.link_state.duplex = 4919 bf_get(lpfc_acqe_link_duplex, acqe_link); 4920 phba->sli4_hba.link_state.status = 4921 bf_get(lpfc_acqe_link_status, acqe_link); 4922 phba->sli4_hba.link_state.type = 4923 bf_get(lpfc_acqe_link_type, acqe_link); 4924 phba->sli4_hba.link_state.number = 4925 bf_get(lpfc_acqe_link_number, acqe_link); 4926 phba->sli4_hba.link_state.fault = 4927 bf_get(lpfc_acqe_link_fault, acqe_link); 4928 phba->sli4_hba.link_state.logical_speed = 4929 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10; 4930 4931 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4932 "2900 Async FC/FCoE Link event - Speed:%dGBit " 4933 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 4934 "Logical speed:%dMbps Fault:%d\n", 4935 phba->sli4_hba.link_state.speed, 4936 phba->sli4_hba.link_state.topology, 4937 phba->sli4_hba.link_state.status, 4938 phba->sli4_hba.link_state.type, 4939 phba->sli4_hba.link_state.number, 4940 phba->sli4_hba.link_state.logical_speed, 4941 phba->sli4_hba.link_state.fault); 4942 /* 4943 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 4944 * topology info. Note: Optional for non FC-AL ports. 4945 */ 4946 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 4947 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 4948 if (rc == MBX_NOT_FINISHED) 4949 goto out_free_dmabuf; 4950 return; 4951 } 4952 /* 4953 * For FCoE Mode: fill in all the topology information we need and call 4954 * the READ_TOPOLOGY completion routine to continue without actually 4955 * sending the READ_TOPOLOGY mailbox command to the port. 4956 */ 4957 /* Initialize completion status */ 4958 mb = &pmb->u.mb; 4959 mb->mbxStatus = MBX_SUCCESS; 4960 4961 /* Parse port fault information field */ 4962 lpfc_sli4_parse_latt_fault(phba, acqe_link); 4963 4964 /* Parse and translate link attention fields */ 4965 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 4966 la->eventTag = acqe_link->event_tag; 4967 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 4968 bf_set(lpfc_mbx_read_top_link_spd, la, 4969 (bf_get(lpfc_acqe_link_speed, acqe_link))); 4970 4971 /* Fake the the following irrelvant fields */ 4972 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 4973 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 4974 bf_set(lpfc_mbx_read_top_il, la, 0); 4975 bf_set(lpfc_mbx_read_top_pb, la, 0); 4976 bf_set(lpfc_mbx_read_top_fa, la, 0); 4977 bf_set(lpfc_mbx_read_top_mm, la, 0); 4978 4979 /* Invoke the lpfc_handle_latt mailbox command callback function */ 4980 lpfc_mbx_cmpl_read_topology(phba, pmb); 4981 4982 return; 4983 4984 out_free_dmabuf: 4985 kfree(mp); 4986 out_free_pmb: 4987 mempool_free(pmb, phba->mbox_mem_pool); 4988 } 4989 4990 /** 4991 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read 4992 * topology. 4993 * @phba: pointer to lpfc hba data structure. 4994 * @evt_code: asynchronous event code. 4995 * @speed_code: asynchronous event link speed code. 4996 * 4997 * This routine is to parse the giving SLI4 async event link speed code into 4998 * value of Read topology link speed. 4999 * 5000 * Return: link speed in terms of Read topology. 5001 **/ 5002 static uint8_t 5003 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code) 5004 { 5005 uint8_t port_speed; 5006 5007 switch (speed_code) { 5008 case LPFC_FC_LA_SPEED_1G: 5009 port_speed = LPFC_LINK_SPEED_1GHZ; 5010 break; 5011 case LPFC_FC_LA_SPEED_2G: 5012 port_speed = LPFC_LINK_SPEED_2GHZ; 5013 break; 5014 case LPFC_FC_LA_SPEED_4G: 5015 port_speed = LPFC_LINK_SPEED_4GHZ; 5016 break; 5017 case LPFC_FC_LA_SPEED_8G: 5018 port_speed = LPFC_LINK_SPEED_8GHZ; 5019 break; 5020 case LPFC_FC_LA_SPEED_16G: 5021 port_speed = LPFC_LINK_SPEED_16GHZ; 5022 break; 5023 case LPFC_FC_LA_SPEED_32G: 5024 port_speed = LPFC_LINK_SPEED_32GHZ; 5025 break; 5026 case LPFC_FC_LA_SPEED_64G: 5027 port_speed = LPFC_LINK_SPEED_64GHZ; 5028 break; 5029 case LPFC_FC_LA_SPEED_128G: 5030 port_speed = LPFC_LINK_SPEED_128GHZ; 5031 break; 5032 case LPFC_FC_LA_SPEED_256G: 5033 port_speed = LPFC_LINK_SPEED_256GHZ; 5034 break; 5035 default: 5036 port_speed = 0; 5037 break; 5038 } 5039 5040 return port_speed; 5041 } 5042 5043 #define trunk_link_status(__idx)\ 5044 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 5045 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\ 5046 "Link up" : "Link down") : "NA" 5047 /* Did port __idx reported an error */ 5048 #define trunk_port_fault(__idx)\ 5049 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 5050 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA" 5051 5052 static void 5053 lpfc_update_trunk_link_status(struct lpfc_hba *phba, 5054 struct lpfc_acqe_fc_la *acqe_fc) 5055 { 5056 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc); 5057 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc); 5058 5059 phba->sli4_hba.link_state.speed = 5060 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 5061 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 5062 5063 phba->sli4_hba.link_state.logical_speed = 5064 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 5065 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */ 5066 phba->fc_linkspeed = 5067 lpfc_async_link_speed_to_read_top( 5068 phba, 5069 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 5070 5071 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) { 5072 phba->trunk_link.link0.state = 5073 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc) 5074 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5075 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0; 5076 } 5077 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) { 5078 phba->trunk_link.link1.state = 5079 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc) 5080 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5081 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0; 5082 } 5083 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) { 5084 phba->trunk_link.link2.state = 5085 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc) 5086 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5087 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0; 5088 } 5089 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) { 5090 phba->trunk_link.link3.state = 5091 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc) 5092 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 5093 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0; 5094 } 5095 5096 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5097 "2910 Async FC Trunking Event - Speed:%d\n" 5098 "\tLogical speed:%d " 5099 "port0: %s port1: %s port2: %s port3: %s\n", 5100 phba->sli4_hba.link_state.speed, 5101 phba->sli4_hba.link_state.logical_speed, 5102 trunk_link_status(0), trunk_link_status(1), 5103 trunk_link_status(2), trunk_link_status(3)); 5104 5105 if (port_fault) 5106 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5107 "3202 trunk error:0x%x (%s) seen on port0:%s " 5108 /* 5109 * SLI-4: We have only 0xA error codes 5110 * defined as of now. print an appropriate 5111 * message in case driver needs to be updated. 5112 */ 5113 "port1:%s port2:%s port3:%s\n", err, err > 0xA ? 5114 "UNDEFINED. update driver." : trunk_errmsg[err], 5115 trunk_port_fault(0), trunk_port_fault(1), 5116 trunk_port_fault(2), trunk_port_fault(3)); 5117 } 5118 5119 5120 /** 5121 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 5122 * @phba: pointer to lpfc hba data structure. 5123 * @acqe_fc: pointer to the async fc completion queue entry. 5124 * 5125 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 5126 * that the event was received and then issue a read_topology mailbox command so 5127 * that the rest of the driver will treat it the same as SLI3. 5128 **/ 5129 static void 5130 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 5131 { 5132 struct lpfc_dmabuf *mp; 5133 LPFC_MBOXQ_t *pmb; 5134 MAILBOX_t *mb; 5135 struct lpfc_mbx_read_top *la; 5136 int rc; 5137 5138 if (bf_get(lpfc_trailer_type, acqe_fc) != 5139 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 5140 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5141 "2895 Non FC link Event detected.(%d)\n", 5142 bf_get(lpfc_trailer_type, acqe_fc)); 5143 return; 5144 } 5145 5146 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == 5147 LPFC_FC_LA_TYPE_TRUNKING_EVENT) { 5148 lpfc_update_trunk_link_status(phba, acqe_fc); 5149 return; 5150 } 5151 5152 /* Keep the link status for extra SLI4 state machine reference */ 5153 phba->sli4_hba.link_state.speed = 5154 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 5155 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 5156 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 5157 phba->sli4_hba.link_state.topology = 5158 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 5159 phba->sli4_hba.link_state.status = 5160 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 5161 phba->sli4_hba.link_state.type = 5162 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 5163 phba->sli4_hba.link_state.number = 5164 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 5165 phba->sli4_hba.link_state.fault = 5166 bf_get(lpfc_acqe_link_fault, acqe_fc); 5167 5168 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == 5169 LPFC_FC_LA_TYPE_LINK_DOWN) 5170 phba->sli4_hba.link_state.logical_speed = 0; 5171 else if (!phba->sli4_hba.conf_trunk) 5172 phba->sli4_hba.link_state.logical_speed = 5173 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 5174 5175 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5176 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 5177 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 5178 "%dMbps Fault:%d\n", 5179 phba->sli4_hba.link_state.speed, 5180 phba->sli4_hba.link_state.topology, 5181 phba->sli4_hba.link_state.status, 5182 phba->sli4_hba.link_state.type, 5183 phba->sli4_hba.link_state.number, 5184 phba->sli4_hba.link_state.logical_speed, 5185 phba->sli4_hba.link_state.fault); 5186 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5187 if (!pmb) { 5188 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5189 "2897 The mboxq allocation failed\n"); 5190 return; 5191 } 5192 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5193 if (!mp) { 5194 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5195 "2898 The lpfc_dmabuf allocation failed\n"); 5196 goto out_free_pmb; 5197 } 5198 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 5199 if (!mp->virt) { 5200 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5201 "2899 The mbuf allocation failed\n"); 5202 goto out_free_dmabuf; 5203 } 5204 5205 /* Cleanup any outstanding ELS commands */ 5206 lpfc_els_flush_all_cmd(phba); 5207 5208 /* Block ELS IOCBs until we have done process link event */ 5209 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 5210 5211 /* Update link event statistics */ 5212 phba->sli.slistat.link_event++; 5213 5214 /* Create lpfc_handle_latt mailbox command from link ACQE */ 5215 lpfc_read_topology(phba, pmb, mp); 5216 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 5217 pmb->vport = phba->pport; 5218 5219 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { 5220 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK); 5221 5222 switch (phba->sli4_hba.link_state.status) { 5223 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN: 5224 phba->link_flag |= LS_MDS_LINK_DOWN; 5225 break; 5226 case LPFC_FC_LA_TYPE_MDS_LOOPBACK: 5227 phba->link_flag |= LS_MDS_LOOPBACK; 5228 break; 5229 default: 5230 break; 5231 } 5232 5233 /* Initialize completion status */ 5234 mb = &pmb->u.mb; 5235 mb->mbxStatus = MBX_SUCCESS; 5236 5237 /* Parse port fault information field */ 5238 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc); 5239 5240 /* Parse and translate link attention fields */ 5241 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; 5242 la->eventTag = acqe_fc->event_tag; 5243 5244 if (phba->sli4_hba.link_state.status == 5245 LPFC_FC_LA_TYPE_UNEXP_WWPN) { 5246 bf_set(lpfc_mbx_read_top_att_type, la, 5247 LPFC_FC_LA_TYPE_UNEXP_WWPN); 5248 } else { 5249 bf_set(lpfc_mbx_read_top_att_type, la, 5250 LPFC_FC_LA_TYPE_LINK_DOWN); 5251 } 5252 /* Invoke the mailbox command callback function */ 5253 lpfc_mbx_cmpl_read_topology(phba, pmb); 5254 5255 return; 5256 } 5257 5258 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 5259 if (rc == MBX_NOT_FINISHED) 5260 goto out_free_dmabuf; 5261 return; 5262 5263 out_free_dmabuf: 5264 kfree(mp); 5265 out_free_pmb: 5266 mempool_free(pmb, phba->mbox_mem_pool); 5267 } 5268 5269 /** 5270 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 5271 * @phba: pointer to lpfc hba data structure. 5272 * @acqe_fc: pointer to the async SLI completion queue entry. 5273 * 5274 * This routine is to handle the SLI4 asynchronous SLI events. 5275 **/ 5276 static void 5277 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 5278 { 5279 char port_name; 5280 char message[128]; 5281 uint8_t status; 5282 uint8_t evt_type; 5283 uint8_t operational = 0; 5284 struct temp_event temp_event_data; 5285 struct lpfc_acqe_misconfigured_event *misconfigured; 5286 struct Scsi_Host *shost; 5287 struct lpfc_vport **vports; 5288 int rc, i; 5289 5290 evt_type = bf_get(lpfc_trailer_type, acqe_sli); 5291 5292 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5293 "2901 Async SLI event - Event Data1:x%08x Event Data2:" 5294 "x%08x SLI Event Type:%d\n", 5295 acqe_sli->event_data1, acqe_sli->event_data2, 5296 evt_type); 5297 5298 port_name = phba->Port[0]; 5299 if (port_name == 0x00) 5300 port_name = '?'; /* get port name is empty */ 5301 5302 switch (evt_type) { 5303 case LPFC_SLI_EVENT_TYPE_OVER_TEMP: 5304 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 5305 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 5306 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 5307 5308 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5309 "3190 Over Temperature:%d Celsius- Port Name %c\n", 5310 acqe_sli->event_data1, port_name); 5311 5312 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 5313 shost = lpfc_shost_from_vport(phba->pport); 5314 fc_host_post_vendor_event(shost, fc_get_event_number(), 5315 sizeof(temp_event_data), 5316 (char *)&temp_event_data, 5317 SCSI_NL_VID_TYPE_PCI 5318 | PCI_VENDOR_ID_EMULEX); 5319 break; 5320 case LPFC_SLI_EVENT_TYPE_NORM_TEMP: 5321 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 5322 temp_event_data.event_code = LPFC_NORMAL_TEMP; 5323 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 5324 5325 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5326 "3191 Normal Temperature:%d Celsius - Port Name %c\n", 5327 acqe_sli->event_data1, port_name); 5328 5329 shost = lpfc_shost_from_vport(phba->pport); 5330 fc_host_post_vendor_event(shost, fc_get_event_number(), 5331 sizeof(temp_event_data), 5332 (char *)&temp_event_data, 5333 SCSI_NL_VID_TYPE_PCI 5334 | PCI_VENDOR_ID_EMULEX); 5335 break; 5336 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED: 5337 misconfigured = (struct lpfc_acqe_misconfigured_event *) 5338 &acqe_sli->event_data1; 5339 5340 /* fetch the status for this port */ 5341 switch (phba->sli4_hba.lnk_info.lnk_no) { 5342 case LPFC_LINK_NUMBER_0: 5343 status = bf_get(lpfc_sli_misconfigured_port0_state, 5344 &misconfigured->theEvent); 5345 operational = bf_get(lpfc_sli_misconfigured_port0_op, 5346 &misconfigured->theEvent); 5347 break; 5348 case LPFC_LINK_NUMBER_1: 5349 status = bf_get(lpfc_sli_misconfigured_port1_state, 5350 &misconfigured->theEvent); 5351 operational = bf_get(lpfc_sli_misconfigured_port1_op, 5352 &misconfigured->theEvent); 5353 break; 5354 case LPFC_LINK_NUMBER_2: 5355 status = bf_get(lpfc_sli_misconfigured_port2_state, 5356 &misconfigured->theEvent); 5357 operational = bf_get(lpfc_sli_misconfigured_port2_op, 5358 &misconfigured->theEvent); 5359 break; 5360 case LPFC_LINK_NUMBER_3: 5361 status = bf_get(lpfc_sli_misconfigured_port3_state, 5362 &misconfigured->theEvent); 5363 operational = bf_get(lpfc_sli_misconfigured_port3_op, 5364 &misconfigured->theEvent); 5365 break; 5366 default: 5367 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5368 "3296 " 5369 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED " 5370 "event: Invalid link %d", 5371 phba->sli4_hba.lnk_info.lnk_no); 5372 return; 5373 } 5374 5375 /* Skip if optic state unchanged */ 5376 if (phba->sli4_hba.lnk_info.optic_state == status) 5377 return; 5378 5379 switch (status) { 5380 case LPFC_SLI_EVENT_STATUS_VALID: 5381 sprintf(message, "Physical Link is functional"); 5382 break; 5383 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: 5384 sprintf(message, "Optics faulted/incorrectly " 5385 "installed/not installed - Reseat optics, " 5386 "if issue not resolved, replace."); 5387 break; 5388 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: 5389 sprintf(message, 5390 "Optics of two types installed - Remove one " 5391 "optic or install matching pair of optics."); 5392 break; 5393 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: 5394 sprintf(message, "Incompatible optics - Replace with " 5395 "compatible optics for card to function."); 5396 break; 5397 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED: 5398 sprintf(message, "Unqualified optics - Replace with " 5399 "Avago optics for Warranty and Technical " 5400 "Support - Link is%s operational", 5401 (operational) ? " not" : ""); 5402 break; 5403 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED: 5404 sprintf(message, "Uncertified optics - Replace with " 5405 "Avago-certified optics to enable link " 5406 "operation - Link is%s operational", 5407 (operational) ? " not" : ""); 5408 break; 5409 default: 5410 /* firmware is reporting a status we don't know about */ 5411 sprintf(message, "Unknown event status x%02x", status); 5412 break; 5413 } 5414 5415 /* Issue READ_CONFIG mbox command to refresh supported speeds */ 5416 rc = lpfc_sli4_read_config(phba); 5417 if (rc) { 5418 phba->lmt = 0; 5419 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5420 "3194 Unable to retrieve supported " 5421 "speeds, rc = 0x%x\n", rc); 5422 } 5423 vports = lpfc_create_vport_work_array(phba); 5424 if (vports != NULL) { 5425 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 5426 i++) { 5427 shost = lpfc_shost_from_vport(vports[i]); 5428 lpfc_host_supported_speeds_set(shost); 5429 } 5430 } 5431 lpfc_destroy_vport_work_array(phba, vports); 5432 5433 phba->sli4_hba.lnk_info.optic_state = status; 5434 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5435 "3176 Port Name %c %s\n", port_name, message); 5436 break; 5437 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT: 5438 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5439 "3192 Remote DPort Test Initiated - " 5440 "Event Data1:x%08x Event Data2: x%08x\n", 5441 acqe_sli->event_data1, acqe_sli->event_data2); 5442 break; 5443 default: 5444 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5445 "3193 Async SLI event - Event Data1:x%08x Event Data2:" 5446 "x%08x SLI Event Type:%d\n", 5447 acqe_sli->event_data1, acqe_sli->event_data2, 5448 evt_type); 5449 break; 5450 } 5451 } 5452 5453 /** 5454 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 5455 * @vport: pointer to vport data structure. 5456 * 5457 * This routine is to perform Clear Virtual Link (CVL) on a vport in 5458 * response to a CVL event. 5459 * 5460 * Return the pointer to the ndlp with the vport if successful, otherwise 5461 * return NULL. 5462 **/ 5463 static struct lpfc_nodelist * 5464 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 5465 { 5466 struct lpfc_nodelist *ndlp; 5467 struct Scsi_Host *shost; 5468 struct lpfc_hba *phba; 5469 5470 if (!vport) 5471 return NULL; 5472 phba = vport->phba; 5473 if (!phba) 5474 return NULL; 5475 ndlp = lpfc_findnode_did(vport, Fabric_DID); 5476 if (!ndlp) { 5477 /* Cannot find existing Fabric ndlp, so allocate a new one */ 5478 ndlp = lpfc_nlp_init(vport, Fabric_DID); 5479 if (!ndlp) 5480 return 0; 5481 /* Set the node type */ 5482 ndlp->nlp_type |= NLP_FABRIC; 5483 /* Put ndlp onto node list */ 5484 lpfc_enqueue_node(vport, ndlp); 5485 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 5486 /* re-setup ndlp without removing from node list */ 5487 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 5488 if (!ndlp) 5489 return 0; 5490 } 5491 if ((phba->pport->port_state < LPFC_FLOGI) && 5492 (phba->pport->port_state != LPFC_VPORT_FAILED)) 5493 return NULL; 5494 /* If virtual link is not yet instantiated ignore CVL */ 5495 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 5496 && (vport->port_state != LPFC_VPORT_FAILED)) 5497 return NULL; 5498 shost = lpfc_shost_from_vport(vport); 5499 if (!shost) 5500 return NULL; 5501 lpfc_linkdown_port(vport); 5502 lpfc_cleanup_pending_mbox(vport); 5503 spin_lock_irq(shost->host_lock); 5504 vport->fc_flag |= FC_VPORT_CVL_RCVD; 5505 spin_unlock_irq(shost->host_lock); 5506 5507 return ndlp; 5508 } 5509 5510 /** 5511 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 5512 * @vport: pointer to lpfc hba data structure. 5513 * 5514 * This routine is to perform Clear Virtual Link (CVL) on all vports in 5515 * response to a FCF dead event. 5516 **/ 5517 static void 5518 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 5519 { 5520 struct lpfc_vport **vports; 5521 int i; 5522 5523 vports = lpfc_create_vport_work_array(phba); 5524 if (vports) 5525 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 5526 lpfc_sli4_perform_vport_cvl(vports[i]); 5527 lpfc_destroy_vport_work_array(phba, vports); 5528 } 5529 5530 /** 5531 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 5532 * @phba: pointer to lpfc hba data structure. 5533 * @acqe_link: pointer to the async fcoe completion queue entry. 5534 * 5535 * This routine is to handle the SLI4 asynchronous fcoe event. 5536 **/ 5537 static void 5538 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 5539 struct lpfc_acqe_fip *acqe_fip) 5540 { 5541 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 5542 int rc; 5543 struct lpfc_vport *vport; 5544 struct lpfc_nodelist *ndlp; 5545 struct Scsi_Host *shost; 5546 int active_vlink_present; 5547 struct lpfc_vport **vports; 5548 int i; 5549 5550 phba->fc_eventTag = acqe_fip->event_tag; 5551 phba->fcoe_eventtag = acqe_fip->event_tag; 5552 switch (event_type) { 5553 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 5554 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 5555 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 5556 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5557 LOG_DISCOVERY, 5558 "2546 New FCF event, evt_tag:x%x, " 5559 "index:x%x\n", 5560 acqe_fip->event_tag, 5561 acqe_fip->index); 5562 else 5563 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 5564 LOG_DISCOVERY, 5565 "2788 FCF param modified event, " 5566 "evt_tag:x%x, index:x%x\n", 5567 acqe_fip->event_tag, 5568 acqe_fip->index); 5569 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 5570 /* 5571 * During period of FCF discovery, read the FCF 5572 * table record indexed by the event to update 5573 * FCF roundrobin failover eligible FCF bmask. 5574 */ 5575 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 5576 LOG_DISCOVERY, 5577 "2779 Read FCF (x%x) for updating " 5578 "roundrobin FCF failover bmask\n", 5579 acqe_fip->index); 5580 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 5581 } 5582 5583 /* If the FCF discovery is in progress, do nothing. */ 5584 spin_lock_irq(&phba->hbalock); 5585 if (phba->hba_flag & FCF_TS_INPROG) { 5586 spin_unlock_irq(&phba->hbalock); 5587 break; 5588 } 5589 /* If fast FCF failover rescan event is pending, do nothing */ 5590 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) { 5591 spin_unlock_irq(&phba->hbalock); 5592 break; 5593 } 5594 5595 /* If the FCF has been in discovered state, do nothing. */ 5596 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 5597 spin_unlock_irq(&phba->hbalock); 5598 break; 5599 } 5600 spin_unlock_irq(&phba->hbalock); 5601 5602 /* Otherwise, scan the entire FCF table and re-discover SAN */ 5603 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5604 "2770 Start FCF table scan per async FCF " 5605 "event, evt_tag:x%x, index:x%x\n", 5606 acqe_fip->event_tag, acqe_fip->index); 5607 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 5608 LPFC_FCOE_FCF_GET_FIRST); 5609 if (rc) 5610 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5611 "2547 Issue FCF scan read FCF mailbox " 5612 "command failed (x%x)\n", rc); 5613 break; 5614 5615 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 5616 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5617 "2548 FCF Table full count 0x%x tag 0x%x\n", 5618 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 5619 acqe_fip->event_tag); 5620 break; 5621 5622 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 5623 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 5624 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5625 "2549 FCF (x%x) disconnected from network, " 5626 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); 5627 /* 5628 * If we are in the middle of FCF failover process, clear 5629 * the corresponding FCF bit in the roundrobin bitmap. 5630 */ 5631 spin_lock_irq(&phba->hbalock); 5632 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && 5633 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) { 5634 spin_unlock_irq(&phba->hbalock); 5635 /* Update FLOGI FCF failover eligible FCF bmask */ 5636 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 5637 break; 5638 } 5639 spin_unlock_irq(&phba->hbalock); 5640 5641 /* If the event is not for currently used fcf do nothing */ 5642 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 5643 break; 5644 5645 /* 5646 * Otherwise, request the port to rediscover the entire FCF 5647 * table for a fast recovery from case that the current FCF 5648 * is no longer valid as we are not in the middle of FCF 5649 * failover process already. 5650 */ 5651 spin_lock_irq(&phba->hbalock); 5652 /* Mark the fast failover process in progress */ 5653 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 5654 spin_unlock_irq(&phba->hbalock); 5655 5656 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5657 "2771 Start FCF fast failover process due to " 5658 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 5659 "\n", acqe_fip->event_tag, acqe_fip->index); 5660 rc = lpfc_sli4_redisc_fcf_table(phba); 5661 if (rc) { 5662 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5663 LOG_DISCOVERY, 5664 "2772 Issue FCF rediscover mailbox " 5665 "command failed, fail through to FCF " 5666 "dead event\n"); 5667 spin_lock_irq(&phba->hbalock); 5668 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 5669 spin_unlock_irq(&phba->hbalock); 5670 /* 5671 * Last resort will fail over by treating this 5672 * as a link down to FCF registration. 5673 */ 5674 lpfc_sli4_fcf_dead_failthrough(phba); 5675 } else { 5676 /* Reset FCF roundrobin bmask for new discovery */ 5677 lpfc_sli4_clear_fcf_rr_bmask(phba); 5678 /* 5679 * Handling fast FCF failover to a DEAD FCF event is 5680 * considered equalivant to receiving CVL to all vports. 5681 */ 5682 lpfc_sli4_perform_all_vport_cvl(phba); 5683 } 5684 break; 5685 case LPFC_FIP_EVENT_TYPE_CVL: 5686 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 5687 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5688 "2718 Clear Virtual Link Received for VPI 0x%x" 5689 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 5690 5691 vport = lpfc_find_vport_by_vpid(phba, 5692 acqe_fip->index); 5693 ndlp = lpfc_sli4_perform_vport_cvl(vport); 5694 if (!ndlp) 5695 break; 5696 active_vlink_present = 0; 5697 5698 vports = lpfc_create_vport_work_array(phba); 5699 if (vports) { 5700 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 5701 i++) { 5702 if ((!(vports[i]->fc_flag & 5703 FC_VPORT_CVL_RCVD)) && 5704 (vports[i]->port_state > LPFC_FDISC)) { 5705 active_vlink_present = 1; 5706 break; 5707 } 5708 } 5709 lpfc_destroy_vport_work_array(phba, vports); 5710 } 5711 5712 /* 5713 * Don't re-instantiate if vport is marked for deletion. 5714 * If we are here first then vport_delete is going to wait 5715 * for discovery to complete. 5716 */ 5717 if (!(vport->load_flag & FC_UNLOADING) && 5718 active_vlink_present) { 5719 /* 5720 * If there are other active VLinks present, 5721 * re-instantiate the Vlink using FDISC. 5722 */ 5723 mod_timer(&ndlp->nlp_delayfunc, 5724 jiffies + msecs_to_jiffies(1000)); 5725 shost = lpfc_shost_from_vport(vport); 5726 spin_lock_irq(shost->host_lock); 5727 ndlp->nlp_flag |= NLP_DELAY_TMO; 5728 spin_unlock_irq(shost->host_lock); 5729 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 5730 vport->port_state = LPFC_FDISC; 5731 } else { 5732 /* 5733 * Otherwise, we request port to rediscover 5734 * the entire FCF table for a fast recovery 5735 * from possible case that the current FCF 5736 * is no longer valid if we are not already 5737 * in the FCF failover process. 5738 */ 5739 spin_lock_irq(&phba->hbalock); 5740 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 5741 spin_unlock_irq(&phba->hbalock); 5742 break; 5743 } 5744 /* Mark the fast failover process in progress */ 5745 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 5746 spin_unlock_irq(&phba->hbalock); 5747 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 5748 LOG_DISCOVERY, 5749 "2773 Start FCF failover per CVL, " 5750 "evt_tag:x%x\n", acqe_fip->event_tag); 5751 rc = lpfc_sli4_redisc_fcf_table(phba); 5752 if (rc) { 5753 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5754 LOG_DISCOVERY, 5755 "2774 Issue FCF rediscover " 5756 "mailbox command failed, " 5757 "through to CVL event\n"); 5758 spin_lock_irq(&phba->hbalock); 5759 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 5760 spin_unlock_irq(&phba->hbalock); 5761 /* 5762 * Last resort will be re-try on the 5763 * the current registered FCF entry. 5764 */ 5765 lpfc_retry_pport_discovery(phba); 5766 } else 5767 /* 5768 * Reset FCF roundrobin bmask for new 5769 * discovery. 5770 */ 5771 lpfc_sli4_clear_fcf_rr_bmask(phba); 5772 } 5773 break; 5774 default: 5775 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5776 "0288 Unknown FCoE event type 0x%x event tag " 5777 "0x%x\n", event_type, acqe_fip->event_tag); 5778 break; 5779 } 5780 } 5781 5782 /** 5783 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 5784 * @phba: pointer to lpfc hba data structure. 5785 * @acqe_link: pointer to the async dcbx completion queue entry. 5786 * 5787 * This routine is to handle the SLI4 asynchronous dcbx event. 5788 **/ 5789 static void 5790 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 5791 struct lpfc_acqe_dcbx *acqe_dcbx) 5792 { 5793 phba->fc_eventTag = acqe_dcbx->event_tag; 5794 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5795 "0290 The SLI4 DCBX asynchronous event is not " 5796 "handled yet\n"); 5797 } 5798 5799 /** 5800 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 5801 * @phba: pointer to lpfc hba data structure. 5802 * @acqe_link: pointer to the async grp5 completion queue entry. 5803 * 5804 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 5805 * is an asynchronous notified of a logical link speed change. The Port 5806 * reports the logical link speed in units of 10Mbps. 5807 **/ 5808 static void 5809 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 5810 struct lpfc_acqe_grp5 *acqe_grp5) 5811 { 5812 uint16_t prev_ll_spd; 5813 5814 phba->fc_eventTag = acqe_grp5->event_tag; 5815 phba->fcoe_eventtag = acqe_grp5->event_tag; 5816 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 5817 phba->sli4_hba.link_state.logical_speed = 5818 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10; 5819 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5820 "2789 GRP5 Async Event: Updating logical link speed " 5821 "from %dMbps to %dMbps\n", prev_ll_spd, 5822 phba->sli4_hba.link_state.logical_speed); 5823 } 5824 5825 /** 5826 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 5827 * @phba: pointer to lpfc hba data structure. 5828 * 5829 * This routine is invoked by the worker thread to process all the pending 5830 * SLI4 asynchronous events. 5831 **/ 5832 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 5833 { 5834 struct lpfc_cq_event *cq_event; 5835 5836 /* First, declare the async event has been handled */ 5837 spin_lock_irq(&phba->hbalock); 5838 phba->hba_flag &= ~ASYNC_EVENT; 5839 spin_unlock_irq(&phba->hbalock); 5840 /* Now, handle all the async events */ 5841 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 5842 /* Get the first event from the head of the event queue */ 5843 spin_lock_irq(&phba->hbalock); 5844 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 5845 cq_event, struct lpfc_cq_event, list); 5846 spin_unlock_irq(&phba->hbalock); 5847 /* Process the asynchronous event */ 5848 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 5849 case LPFC_TRAILER_CODE_LINK: 5850 lpfc_sli4_async_link_evt(phba, 5851 &cq_event->cqe.acqe_link); 5852 break; 5853 case LPFC_TRAILER_CODE_FCOE: 5854 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 5855 break; 5856 case LPFC_TRAILER_CODE_DCBX: 5857 lpfc_sli4_async_dcbx_evt(phba, 5858 &cq_event->cqe.acqe_dcbx); 5859 break; 5860 case LPFC_TRAILER_CODE_GRP5: 5861 lpfc_sli4_async_grp5_evt(phba, 5862 &cq_event->cqe.acqe_grp5); 5863 break; 5864 case LPFC_TRAILER_CODE_FC: 5865 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 5866 break; 5867 case LPFC_TRAILER_CODE_SLI: 5868 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 5869 break; 5870 default: 5871 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5872 "1804 Invalid asynchrous event code: " 5873 "x%x\n", bf_get(lpfc_trailer_code, 5874 &cq_event->cqe.mcqe_cmpl)); 5875 break; 5876 } 5877 /* Free the completion event processed to the free pool */ 5878 lpfc_sli4_cq_event_release(phba, cq_event); 5879 } 5880 } 5881 5882 /** 5883 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 5884 * @phba: pointer to lpfc hba data structure. 5885 * 5886 * This routine is invoked by the worker thread to process FCF table 5887 * rediscovery pending completion event. 5888 **/ 5889 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 5890 { 5891 int rc; 5892 5893 spin_lock_irq(&phba->hbalock); 5894 /* Clear FCF rediscovery timeout event */ 5895 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 5896 /* Clear driver fast failover FCF record flag */ 5897 phba->fcf.failover_rec.flag = 0; 5898 /* Set state for FCF fast failover */ 5899 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 5900 spin_unlock_irq(&phba->hbalock); 5901 5902 /* Scan FCF table from the first entry to re-discover SAN */ 5903 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 5904 "2777 Start post-quiescent FCF table scan\n"); 5905 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 5906 if (rc) 5907 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 5908 "2747 Issue FCF scan read FCF mailbox " 5909 "command failed 0x%x\n", rc); 5910 } 5911 5912 /** 5913 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 5914 * @phba: pointer to lpfc hba data structure. 5915 * @dev_grp: The HBA PCI-Device group number. 5916 * 5917 * This routine is invoked to set up the per HBA PCI-Device group function 5918 * API jump table entries. 5919 * 5920 * Return: 0 if success, otherwise -ENODEV 5921 **/ 5922 int 5923 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 5924 { 5925 int rc; 5926 5927 /* Set up lpfc PCI-device group */ 5928 phba->pci_dev_grp = dev_grp; 5929 5930 /* The LPFC_PCI_DEV_OC uses SLI4 */ 5931 if (dev_grp == LPFC_PCI_DEV_OC) 5932 phba->sli_rev = LPFC_SLI_REV4; 5933 5934 /* Set up device INIT API function jump table */ 5935 rc = lpfc_init_api_table_setup(phba, dev_grp); 5936 if (rc) 5937 return -ENODEV; 5938 /* Set up SCSI API function jump table */ 5939 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 5940 if (rc) 5941 return -ENODEV; 5942 /* Set up SLI API function jump table */ 5943 rc = lpfc_sli_api_table_setup(phba, dev_grp); 5944 if (rc) 5945 return -ENODEV; 5946 /* Set up MBOX API function jump table */ 5947 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 5948 if (rc) 5949 return -ENODEV; 5950 5951 return 0; 5952 } 5953 5954 /** 5955 * lpfc_log_intr_mode - Log the active interrupt mode 5956 * @phba: pointer to lpfc hba data structure. 5957 * @intr_mode: active interrupt mode adopted. 5958 * 5959 * This routine it invoked to log the currently used active interrupt mode 5960 * to the device. 5961 **/ 5962 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 5963 { 5964 switch (intr_mode) { 5965 case 0: 5966 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5967 "0470 Enable INTx interrupt mode.\n"); 5968 break; 5969 case 1: 5970 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5971 "0481 Enabled MSI interrupt mode.\n"); 5972 break; 5973 case 2: 5974 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5975 "0480 Enabled MSI-X interrupt mode.\n"); 5976 break; 5977 default: 5978 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5979 "0482 Illegal interrupt mode.\n"); 5980 break; 5981 } 5982 return; 5983 } 5984 5985 /** 5986 * lpfc_enable_pci_dev - Enable a generic PCI device. 5987 * @phba: pointer to lpfc hba data structure. 5988 * 5989 * This routine is invoked to enable the PCI device that is common to all 5990 * PCI devices. 5991 * 5992 * Return codes 5993 * 0 - successful 5994 * other values - error 5995 **/ 5996 static int 5997 lpfc_enable_pci_dev(struct lpfc_hba *phba) 5998 { 5999 struct pci_dev *pdev; 6000 6001 /* Obtain PCI device reference */ 6002 if (!phba->pcidev) 6003 goto out_error; 6004 else 6005 pdev = phba->pcidev; 6006 /* Enable PCI device */ 6007 if (pci_enable_device_mem(pdev)) 6008 goto out_error; 6009 /* Request PCI resource for the device */ 6010 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME)) 6011 goto out_disable_device; 6012 /* Set up device as PCI master and save state for EEH */ 6013 pci_set_master(pdev); 6014 pci_try_set_mwi(pdev); 6015 pci_save_state(pdev); 6016 6017 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 6018 if (pci_is_pcie(pdev)) 6019 pdev->needs_freset = 1; 6020 6021 return 0; 6022 6023 out_disable_device: 6024 pci_disable_device(pdev); 6025 out_error: 6026 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6027 "1401 Failed to enable pci device\n"); 6028 return -ENODEV; 6029 } 6030 6031 /** 6032 * lpfc_disable_pci_dev - Disable a generic PCI device. 6033 * @phba: pointer to lpfc hba data structure. 6034 * 6035 * This routine is invoked to disable the PCI device that is common to all 6036 * PCI devices. 6037 **/ 6038 static void 6039 lpfc_disable_pci_dev(struct lpfc_hba *phba) 6040 { 6041 struct pci_dev *pdev; 6042 6043 /* Obtain PCI device reference */ 6044 if (!phba->pcidev) 6045 return; 6046 else 6047 pdev = phba->pcidev; 6048 /* Release PCI resource and disable PCI device */ 6049 pci_release_mem_regions(pdev); 6050 pci_disable_device(pdev); 6051 6052 return; 6053 } 6054 6055 /** 6056 * lpfc_reset_hba - Reset a hba 6057 * @phba: pointer to lpfc hba data structure. 6058 * 6059 * This routine is invoked to reset a hba device. It brings the HBA 6060 * offline, performs a board restart, and then brings the board back 6061 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 6062 * on outstanding mailbox commands. 6063 **/ 6064 void 6065 lpfc_reset_hba(struct lpfc_hba *phba) 6066 { 6067 /* If resets are disabled then set error state and return. */ 6068 if (!phba->cfg_enable_hba_reset) { 6069 phba->link_state = LPFC_HBA_ERROR; 6070 return; 6071 } 6072 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 6073 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 6074 else 6075 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 6076 lpfc_offline(phba); 6077 lpfc_sli_brdrestart(phba); 6078 lpfc_online(phba); 6079 lpfc_unblock_mgmt_io(phba); 6080 } 6081 6082 /** 6083 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions 6084 * @phba: pointer to lpfc hba data structure. 6085 * 6086 * This function enables the PCI SR-IOV virtual functions to a physical 6087 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 6088 * enable the number of virtual functions to the physical function. As 6089 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 6090 * API call does not considered as an error condition for most of the device. 6091 **/ 6092 uint16_t 6093 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) 6094 { 6095 struct pci_dev *pdev = phba->pcidev; 6096 uint16_t nr_virtfn; 6097 int pos; 6098 6099 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 6100 if (pos == 0) 6101 return 0; 6102 6103 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); 6104 return nr_virtfn; 6105 } 6106 6107 /** 6108 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 6109 * @phba: pointer to lpfc hba data structure. 6110 * @nr_vfn: number of virtual functions to be enabled. 6111 * 6112 * This function enables the PCI SR-IOV virtual functions to a physical 6113 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 6114 * enable the number of virtual functions to the physical function. As 6115 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 6116 * API call does not considered as an error condition for most of the device. 6117 **/ 6118 int 6119 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 6120 { 6121 struct pci_dev *pdev = phba->pcidev; 6122 uint16_t max_nr_vfn; 6123 int rc; 6124 6125 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); 6126 if (nr_vfn > max_nr_vfn) { 6127 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6128 "3057 Requested vfs (%d) greater than " 6129 "supported vfs (%d)", nr_vfn, max_nr_vfn); 6130 return -EINVAL; 6131 } 6132 6133 rc = pci_enable_sriov(pdev, nr_vfn); 6134 if (rc) { 6135 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6136 "2806 Failed to enable sriov on this device " 6137 "with vfn number nr_vf:%d, rc:%d\n", 6138 nr_vfn, rc); 6139 } else 6140 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6141 "2807 Successful enable sriov on this device " 6142 "with vfn number nr_vf:%d\n", nr_vfn); 6143 return rc; 6144 } 6145 6146 /** 6147 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 6148 * @phba: pointer to lpfc hba data structure. 6149 * 6150 * This routine is invoked to set up the driver internal resources before the 6151 * device specific resource setup to support the HBA device it attached to. 6152 * 6153 * Return codes 6154 * 0 - successful 6155 * other values - error 6156 **/ 6157 static int 6158 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 6159 { 6160 struct lpfc_sli *psli = &phba->sli; 6161 6162 /* 6163 * Driver resources common to all SLI revisions 6164 */ 6165 atomic_set(&phba->fast_event_count, 0); 6166 spin_lock_init(&phba->hbalock); 6167 6168 /* Initialize ndlp management spinlock */ 6169 spin_lock_init(&phba->ndlp_lock); 6170 6171 /* Initialize port_list spinlock */ 6172 spin_lock_init(&phba->port_list_lock); 6173 INIT_LIST_HEAD(&phba->port_list); 6174 6175 INIT_LIST_HEAD(&phba->work_list); 6176 init_waitqueue_head(&phba->wait_4_mlo_m_q); 6177 6178 /* Initialize the wait queue head for the kernel thread */ 6179 init_waitqueue_head(&phba->work_waitq); 6180 6181 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6182 "1403 Protocols supported %s %s %s\n", 6183 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ? 6184 "SCSI" : " "), 6185 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ? 6186 "NVME" : " "), 6187 (phba->nvmet_support ? "NVMET" : " ")); 6188 6189 /* Initialize the IO buffer list used by driver for SLI3 SCSI */ 6190 spin_lock_init(&phba->scsi_buf_list_get_lock); 6191 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); 6192 spin_lock_init(&phba->scsi_buf_list_put_lock); 6193 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 6194 6195 /* Initialize the fabric iocb list */ 6196 INIT_LIST_HEAD(&phba->fabric_iocb_list); 6197 6198 /* Initialize list to save ELS buffers */ 6199 INIT_LIST_HEAD(&phba->elsbuf); 6200 6201 /* Initialize FCF connection rec list */ 6202 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 6203 6204 /* Initialize OAS configuration list */ 6205 spin_lock_init(&phba->devicelock); 6206 INIT_LIST_HEAD(&phba->luns); 6207 6208 /* MBOX heartbeat timer */ 6209 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0); 6210 /* Fabric block timer */ 6211 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0); 6212 /* EA polling mode timer */ 6213 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0); 6214 /* Heartbeat timer */ 6215 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0); 6216 6217 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work); 6218 6219 return 0; 6220 } 6221 6222 /** 6223 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev 6224 * @phba: pointer to lpfc hba data structure. 6225 * 6226 * This routine is invoked to set up the driver internal resources specific to 6227 * support the SLI-3 HBA device it attached to. 6228 * 6229 * Return codes 6230 * 0 - successful 6231 * other values - error 6232 **/ 6233 static int 6234 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 6235 { 6236 int rc, entry_sz; 6237 6238 /* 6239 * Initialize timers used by driver 6240 */ 6241 6242 /* FCP polling mode timer */ 6243 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0); 6244 6245 /* Host attention work mask setup */ 6246 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 6247 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 6248 6249 /* Get all the module params for configuring this host */ 6250 lpfc_get_cfgparam(phba); 6251 /* Set up phase-1 common device driver resources */ 6252 6253 rc = lpfc_setup_driver_resource_phase1(phba); 6254 if (rc) 6255 return -ENODEV; 6256 6257 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 6258 phba->menlo_flag |= HBA_MENLO_SUPPORT; 6259 /* check for menlo minimum sg count */ 6260 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 6261 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 6262 } 6263 6264 if (!phba->sli.sli3_ring) 6265 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING, 6266 sizeof(struct lpfc_sli_ring), 6267 GFP_KERNEL); 6268 if (!phba->sli.sli3_ring) 6269 return -ENOMEM; 6270 6271 /* 6272 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 6273 * used to create the sg_dma_buf_pool must be dynamically calculated. 6274 */ 6275 6276 /* Initialize the host templates the configured values. */ 6277 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 6278 lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt; 6279 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 6280 6281 if (phba->sli_rev == LPFC_SLI_REV4) 6282 entry_sz = sizeof(struct sli4_sge); 6283 else 6284 entry_sz = sizeof(struct ulp_bde64); 6285 6286 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ 6287 if (phba->cfg_enable_bg) { 6288 /* 6289 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 6290 * the FCP rsp, and a BDE for each. Sice we have no control 6291 * over how many protection data segments the SCSI Layer 6292 * will hand us (ie: there could be one for every block 6293 * in the IO), we just allocate enough BDEs to accomidate 6294 * our max amount and we need to limit lpfc_sg_seg_cnt to 6295 * minimize the risk of running out. 6296 */ 6297 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6298 sizeof(struct fcp_rsp) + 6299 (LPFC_MAX_SG_SEG_CNT * entry_sz); 6300 6301 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) 6302 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; 6303 6304 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ 6305 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; 6306 } else { 6307 /* 6308 * The scsi_buf for a regular I/O will hold the FCP cmnd, 6309 * the FCP rsp, a BDE for each, and a BDE for up to 6310 * cfg_sg_seg_cnt data segments. 6311 */ 6312 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6313 sizeof(struct fcp_rsp) + 6314 ((phba->cfg_sg_seg_cnt + 2) * entry_sz); 6315 6316 /* Total BDEs in BPL for scsi_sg_list */ 6317 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 6318 } 6319 6320 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 6321 "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", 6322 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 6323 phba->cfg_total_seg_cnt); 6324 6325 phba->max_vpi = LPFC_MAX_VPI; 6326 /* This will be set to correct value after config_port mbox */ 6327 phba->max_vports = 0; 6328 6329 /* 6330 * Initialize the SLI Layer to run with lpfc HBAs. 6331 */ 6332 lpfc_sli_setup(phba); 6333 lpfc_sli_queue_init(phba); 6334 6335 /* Allocate device driver memory */ 6336 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 6337 return -ENOMEM; 6338 6339 /* 6340 * Enable sr-iov virtual functions if supported and configured 6341 * through the module parameter. 6342 */ 6343 if (phba->cfg_sriov_nr_virtfn > 0) { 6344 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 6345 phba->cfg_sriov_nr_virtfn); 6346 if (rc) { 6347 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6348 "2808 Requested number of SR-IOV " 6349 "virtual functions (%d) is not " 6350 "supported\n", 6351 phba->cfg_sriov_nr_virtfn); 6352 phba->cfg_sriov_nr_virtfn = 0; 6353 } 6354 } 6355 6356 return 0; 6357 } 6358 6359 /** 6360 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 6361 * @phba: pointer to lpfc hba data structure. 6362 * 6363 * This routine is invoked to unset the driver internal resources set up 6364 * specific for supporting the SLI-3 HBA device it attached to. 6365 **/ 6366 static void 6367 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 6368 { 6369 /* Free device driver memory allocated */ 6370 lpfc_mem_free_all(phba); 6371 6372 return; 6373 } 6374 6375 /** 6376 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 6377 * @phba: pointer to lpfc hba data structure. 6378 * 6379 * This routine is invoked to set up the driver internal resources specific to 6380 * support the SLI-4 HBA device it attached to. 6381 * 6382 * Return codes 6383 * 0 - successful 6384 * other values - error 6385 **/ 6386 static int 6387 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 6388 { 6389 LPFC_MBOXQ_t *mboxq; 6390 MAILBOX_t *mb; 6391 int rc, i, max_buf_size; 6392 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 6393 struct lpfc_mqe *mqe; 6394 int longs; 6395 int extra; 6396 uint64_t wwn; 6397 u32 if_type; 6398 u32 if_fam; 6399 6400 phba->sli4_hba.num_present_cpu = lpfc_present_cpu; 6401 phba->sli4_hba.num_possible_cpu = num_possible_cpus(); 6402 phba->sli4_hba.curr_disp_cpu = 0; 6403 6404 /* Get all the module params for configuring this host */ 6405 lpfc_get_cfgparam(phba); 6406 6407 /* Set up phase-1 common device driver resources */ 6408 rc = lpfc_setup_driver_resource_phase1(phba); 6409 if (rc) 6410 return -ENODEV; 6411 6412 /* Before proceed, wait for POST done and device ready */ 6413 rc = lpfc_sli4_post_status_check(phba); 6414 if (rc) 6415 return -ENODEV; 6416 6417 /* 6418 * Initialize timers used by driver 6419 */ 6420 6421 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0); 6422 6423 /* FCF rediscover timer */ 6424 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0); 6425 6426 /* 6427 * Control structure for handling external multi-buffer mailbox 6428 * command pass-through. 6429 */ 6430 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, 6431 sizeof(struct lpfc_mbox_ext_buf_ctx)); 6432 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 6433 6434 phba->max_vpi = LPFC_MAX_VPI; 6435 6436 /* This will be set to correct value after the read_config mbox */ 6437 phba->max_vports = 0; 6438 6439 /* Program the default value of vlan_id and fc_map */ 6440 phba->valid_vlan = 0; 6441 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 6442 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 6443 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 6444 6445 /* 6446 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands 6447 * we will associate a new ring, for each EQ/CQ/WQ tuple. 6448 * The WQ create will allocate the ring. 6449 */ 6450 6451 /* 6452 * 1 for cmd, 1 for rsp, NVME adds an extra one 6453 * for boundary conditions in its max_sgl_segment template. 6454 */ 6455 extra = 2; 6456 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 6457 extra++; 6458 6459 /* 6460 * It doesn't matter what family our adapter is in, we are 6461 * limited to 2 Pages, 512 SGEs, for our SGL. 6462 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp 6463 */ 6464 max_buf_size = (2 * SLI4_PAGE_SIZE); 6465 6466 /* 6467 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size 6468 * used to create the sg_dma_buf_pool must be calculated. 6469 */ 6470 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 6471 /* 6472 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd, 6473 * the FCP rsp, and a SGE. Sice we have no control 6474 * over how many protection segments the SCSI Layer 6475 * will hand us (ie: there could be one for every block 6476 * in the IO), just allocate enough SGEs to accomidate 6477 * our max amount and we need to limit lpfc_sg_seg_cnt 6478 * to minimize the risk of running out. 6479 */ 6480 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6481 sizeof(struct fcp_rsp) + max_buf_size; 6482 6483 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ 6484 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; 6485 6486 /* 6487 * If supporting DIF, reduce the seg count for scsi to 6488 * allow room for the DIF sges. 6489 */ 6490 if (phba->cfg_enable_bg && 6491 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF) 6492 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF; 6493 else 6494 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 6495 6496 } else { 6497 /* 6498 * The scsi_buf for a regular I/O holds the FCP cmnd, 6499 * the FCP rsp, a SGE for each, and a SGE for up to 6500 * cfg_sg_seg_cnt data segments. 6501 */ 6502 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 6503 sizeof(struct fcp_rsp) + 6504 ((phba->cfg_sg_seg_cnt + extra) * 6505 sizeof(struct sli4_sge)); 6506 6507 /* Total SGEs for scsi_sg_list */ 6508 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra; 6509 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 6510 6511 /* 6512 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only 6513 * need to post 1 page for the SGL. 6514 */ 6515 } 6516 6517 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */ 6518 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 6519 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { 6520 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, 6521 "6300 Reducing NVME sg segment " 6522 "cnt to %d\n", 6523 LPFC_MAX_NVME_SEG_CNT); 6524 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 6525 } else 6526 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; 6527 } 6528 6529 /* Initialize the host templates with the updated values. */ 6530 lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt; 6531 lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt; 6532 lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt; 6533 6534 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) 6535 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; 6536 else 6537 phba->cfg_sg_dma_buf_size = 6538 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); 6539 6540 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 6541 "9087 sg_seg_cnt:%d dmabuf_size:%d " 6542 "total:%d scsi:%d nvme:%d\n", 6543 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 6544 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt, 6545 phba->cfg_nvme_seg_cnt); 6546 6547 /* Initialize buffer queue management fields */ 6548 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list); 6549 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 6550 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 6551 6552 /* 6553 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 6554 */ 6555 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 6556 /* Initialize the Abort scsi buffer list used by driver */ 6557 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 6558 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 6559 } 6560 6561 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 6562 /* Initialize the Abort nvme buffer list used by driver */ 6563 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock); 6564 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 6565 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); 6566 spin_lock_init(&phba->sli4_hba.t_active_list_lock); 6567 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list); 6568 } 6569 6570 /* This abort list used by worker thread */ 6571 spin_lock_init(&phba->sli4_hba.sgl_list_lock); 6572 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); 6573 6574 /* 6575 * Initialize driver internal slow-path work queues 6576 */ 6577 6578 /* Driver internel slow-path CQ Event pool */ 6579 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 6580 /* Response IOCB work queue list */ 6581 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 6582 /* Asynchronous event CQ Event work queue list */ 6583 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 6584 /* Fast-path XRI aborted CQ Event work queue list */ 6585 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 6586 /* Slow-path XRI aborted CQ Event work queue list */ 6587 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 6588 /* Receive queue CQ Event work queue list */ 6589 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 6590 6591 /* Initialize extent block lists. */ 6592 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); 6593 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); 6594 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); 6595 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); 6596 6597 /* Initialize mboxq lists. If the early init routines fail 6598 * these lists need to be correctly initialized. 6599 */ 6600 INIT_LIST_HEAD(&phba->sli.mboxq); 6601 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl); 6602 6603 /* initialize optic_state to 0xFF */ 6604 phba->sli4_hba.lnk_info.optic_state = 0xff; 6605 6606 /* Allocate device driver memory */ 6607 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 6608 if (rc) 6609 return -ENOMEM; 6610 6611 /* IF Type 2 ports get initialized now. */ 6612 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 6613 LPFC_SLI_INTF_IF_TYPE_2) { 6614 rc = lpfc_pci_function_reset(phba); 6615 if (unlikely(rc)) { 6616 rc = -ENODEV; 6617 goto out_free_mem; 6618 } 6619 phba->temp_sensor_support = 1; 6620 } 6621 6622 /* Create the bootstrap mailbox command */ 6623 rc = lpfc_create_bootstrap_mbox(phba); 6624 if (unlikely(rc)) 6625 goto out_free_mem; 6626 6627 /* Set up the host's endian order with the device. */ 6628 rc = lpfc_setup_endian_order(phba); 6629 if (unlikely(rc)) 6630 goto out_free_bsmbx; 6631 6632 /* Set up the hba's configuration parameters. */ 6633 rc = lpfc_sli4_read_config(phba); 6634 if (unlikely(rc)) 6635 goto out_free_bsmbx; 6636 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba); 6637 if (unlikely(rc)) 6638 goto out_free_bsmbx; 6639 6640 /* IF Type 0 ports get initialized now. */ 6641 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 6642 LPFC_SLI_INTF_IF_TYPE_0) { 6643 rc = lpfc_pci_function_reset(phba); 6644 if (unlikely(rc)) 6645 goto out_free_bsmbx; 6646 } 6647 6648 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 6649 GFP_KERNEL); 6650 if (!mboxq) { 6651 rc = -ENOMEM; 6652 goto out_free_bsmbx; 6653 } 6654 6655 /* Check for NVMET being configured */ 6656 phba->nvmet_support = 0; 6657 if (lpfc_enable_nvmet_cnt) { 6658 6659 /* First get WWN of HBA instance */ 6660 lpfc_read_nv(phba, mboxq); 6661 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6662 if (rc != MBX_SUCCESS) { 6663 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6664 "6016 Mailbox failed , mbxCmd x%x " 6665 "READ_NV, mbxStatus x%x\n", 6666 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 6667 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 6668 mempool_free(mboxq, phba->mbox_mem_pool); 6669 rc = -EIO; 6670 goto out_free_bsmbx; 6671 } 6672 mb = &mboxq->u.mb; 6673 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename, 6674 sizeof(uint64_t)); 6675 wwn = cpu_to_be64(wwn); 6676 phba->sli4_hba.wwnn.u.name = wwn; 6677 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, 6678 sizeof(uint64_t)); 6679 /* wwn is WWPN of HBA instance */ 6680 wwn = cpu_to_be64(wwn); 6681 phba->sli4_hba.wwpn.u.name = wwn; 6682 6683 /* Check to see if it matches any module parameter */ 6684 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { 6685 if (wwn == lpfc_enable_nvmet[i]) { 6686 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 6687 if (lpfc_nvmet_mem_alloc(phba)) 6688 break; 6689 6690 phba->nvmet_support = 1; /* a match */ 6691 6692 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6693 "6017 NVME Target %016llx\n", 6694 wwn); 6695 #else 6696 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6697 "6021 Can't enable NVME Target." 6698 " NVME_TARGET_FC infrastructure" 6699 " is not in kernel\n"); 6700 #endif 6701 /* Not supported for NVMET */ 6702 phba->cfg_xri_rebalancing = 0; 6703 break; 6704 } 6705 } 6706 } 6707 6708 lpfc_nvme_mod_param_dep(phba); 6709 6710 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */ 6711 lpfc_supported_pages(mboxq); 6712 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6713 if (!rc) { 6714 mqe = &mboxq->u.mqe; 6715 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 6716 LPFC_MAX_SUPPORTED_PAGES); 6717 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 6718 switch (pn_page[i]) { 6719 case LPFC_SLI4_PARAMETERS: 6720 phba->sli4_hba.pc_sli4_params.supported = 1; 6721 break; 6722 default: 6723 break; 6724 } 6725 } 6726 /* Read the port's SLI4 Parameters capabilities if supported. */ 6727 if (phba->sli4_hba.pc_sli4_params.supported) 6728 rc = lpfc_pc_sli4_params_get(phba, mboxq); 6729 if (rc) { 6730 mempool_free(mboxq, phba->mbox_mem_pool); 6731 rc = -EIO; 6732 goto out_free_bsmbx; 6733 } 6734 } 6735 6736 /* 6737 * Get sli4 parameters that override parameters from Port capabilities. 6738 * If this call fails, it isn't critical unless the SLI4 parameters come 6739 * back in conflict. 6740 */ 6741 rc = lpfc_get_sli4_parameters(phba, mboxq); 6742 if (rc) { 6743 if_type = bf_get(lpfc_sli_intf_if_type, 6744 &phba->sli4_hba.sli_intf); 6745 if_fam = bf_get(lpfc_sli_intf_sli_family, 6746 &phba->sli4_hba.sli_intf); 6747 if (phba->sli4_hba.extents_in_use && 6748 phba->sli4_hba.rpi_hdrs_in_use) { 6749 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6750 "2999 Unsupported SLI4 Parameters " 6751 "Extents and RPI headers enabled.\n"); 6752 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 && 6753 if_fam == LPFC_SLI_INTF_FAMILY_BE2) { 6754 mempool_free(mboxq, phba->mbox_mem_pool); 6755 rc = -EIO; 6756 goto out_free_bsmbx; 6757 } 6758 } 6759 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 && 6760 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) { 6761 mempool_free(mboxq, phba->mbox_mem_pool); 6762 rc = -EIO; 6763 goto out_free_bsmbx; 6764 } 6765 } 6766 6767 mempool_free(mboxq, phba->mbox_mem_pool); 6768 6769 /* Verify OAS is supported */ 6770 lpfc_sli4_oas_verify(phba); 6771 6772 /* Verify RAS support on adapter */ 6773 lpfc_sli4_ras_init(phba); 6774 6775 /* Verify all the SLI4 queues */ 6776 rc = lpfc_sli4_queue_verify(phba); 6777 if (rc) 6778 goto out_free_bsmbx; 6779 6780 /* Create driver internal CQE event pool */ 6781 rc = lpfc_sli4_cq_event_pool_create(phba); 6782 if (rc) 6783 goto out_free_bsmbx; 6784 6785 /* Initialize sgl lists per host */ 6786 lpfc_init_sgl_list(phba); 6787 6788 /* Allocate and initialize active sgl array */ 6789 rc = lpfc_init_active_sgl_array(phba); 6790 if (rc) { 6791 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6792 "1430 Failed to initialize sgl list.\n"); 6793 goto out_destroy_cq_event_pool; 6794 } 6795 rc = lpfc_sli4_init_rpi_hdrs(phba); 6796 if (rc) { 6797 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6798 "1432 Failed to initialize rpi headers.\n"); 6799 goto out_free_active_sgl; 6800 } 6801 6802 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 6803 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 6804 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long), 6805 GFP_KERNEL); 6806 if (!phba->fcf.fcf_rr_bmask) { 6807 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6808 "2759 Failed allocate memory for FCF round " 6809 "robin failover bmask\n"); 6810 rc = -ENOMEM; 6811 goto out_remove_rpi_hdrs; 6812 } 6813 6814 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann, 6815 sizeof(struct lpfc_hba_eq_hdl), 6816 GFP_KERNEL); 6817 if (!phba->sli4_hba.hba_eq_hdl) { 6818 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6819 "2572 Failed allocate memory for " 6820 "fast-path per-EQ handle array\n"); 6821 rc = -ENOMEM; 6822 goto out_free_fcf_rr_bmask; 6823 } 6824 6825 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu, 6826 sizeof(struct lpfc_vector_map_info), 6827 GFP_KERNEL); 6828 if (!phba->sli4_hba.cpu_map) { 6829 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6830 "3327 Failed allocate memory for msi-x " 6831 "interrupt vector mapping\n"); 6832 rc = -ENOMEM; 6833 goto out_free_hba_eq_hdl; 6834 } 6835 6836 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info); 6837 if (!phba->sli4_hba.eq_info) { 6838 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6839 "3321 Failed allocation for per_cpu stats\n"); 6840 rc = -ENOMEM; 6841 goto out_free_hba_cpu_map; 6842 } 6843 /* 6844 * Enable sr-iov virtual functions if supported and configured 6845 * through the module parameter. 6846 */ 6847 if (phba->cfg_sriov_nr_virtfn > 0) { 6848 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 6849 phba->cfg_sriov_nr_virtfn); 6850 if (rc) { 6851 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6852 "3020 Requested number of SR-IOV " 6853 "virtual functions (%d) is not " 6854 "supported\n", 6855 phba->cfg_sriov_nr_virtfn); 6856 phba->cfg_sriov_nr_virtfn = 0; 6857 } 6858 } 6859 6860 return 0; 6861 6862 out_free_hba_cpu_map: 6863 kfree(phba->sli4_hba.cpu_map); 6864 out_free_hba_eq_hdl: 6865 kfree(phba->sli4_hba.hba_eq_hdl); 6866 out_free_fcf_rr_bmask: 6867 kfree(phba->fcf.fcf_rr_bmask); 6868 out_remove_rpi_hdrs: 6869 lpfc_sli4_remove_rpi_hdrs(phba); 6870 out_free_active_sgl: 6871 lpfc_free_active_sgl(phba); 6872 out_destroy_cq_event_pool: 6873 lpfc_sli4_cq_event_pool_destroy(phba); 6874 out_free_bsmbx: 6875 lpfc_destroy_bootstrap_mbox(phba); 6876 out_free_mem: 6877 lpfc_mem_free(phba); 6878 return rc; 6879 } 6880 6881 /** 6882 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 6883 * @phba: pointer to lpfc hba data structure. 6884 * 6885 * This routine is invoked to unset the driver internal resources set up 6886 * specific for supporting the SLI-4 HBA device it attached to. 6887 **/ 6888 static void 6889 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 6890 { 6891 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 6892 6893 free_percpu(phba->sli4_hba.eq_info); 6894 6895 /* Free memory allocated for msi-x interrupt vector to CPU mapping */ 6896 kfree(phba->sli4_hba.cpu_map); 6897 phba->sli4_hba.num_possible_cpu = 0; 6898 phba->sli4_hba.num_present_cpu = 0; 6899 phba->sli4_hba.curr_disp_cpu = 0; 6900 6901 /* Free memory allocated for fast-path work queue handles */ 6902 kfree(phba->sli4_hba.hba_eq_hdl); 6903 6904 /* Free the allocated rpi headers. */ 6905 lpfc_sli4_remove_rpi_hdrs(phba); 6906 lpfc_sli4_remove_rpis(phba); 6907 6908 /* Free eligible FCF index bmask */ 6909 kfree(phba->fcf.fcf_rr_bmask); 6910 6911 /* Free the ELS sgl list */ 6912 lpfc_free_active_sgl(phba); 6913 lpfc_free_els_sgl_list(phba); 6914 lpfc_free_nvmet_sgl_list(phba); 6915 6916 /* Free the completion queue EQ event pool */ 6917 lpfc_sli4_cq_event_release_all(phba); 6918 lpfc_sli4_cq_event_pool_destroy(phba); 6919 6920 /* Release resource identifiers. */ 6921 lpfc_sli4_dealloc_resource_identifiers(phba); 6922 6923 /* Free the bsmbx region. */ 6924 lpfc_destroy_bootstrap_mbox(phba); 6925 6926 /* Free the SLI Layer memory with SLI4 HBAs */ 6927 lpfc_mem_free_all(phba); 6928 6929 /* Free the current connect table */ 6930 list_for_each_entry_safe(conn_entry, next_conn_entry, 6931 &phba->fcf_conn_rec_list, list) { 6932 list_del_init(&conn_entry->list); 6933 kfree(conn_entry); 6934 } 6935 6936 return; 6937 } 6938 6939 /** 6940 * lpfc_init_api_table_setup - Set up init api function jump table 6941 * @phba: The hba struct for which this call is being executed. 6942 * @dev_grp: The HBA PCI-Device group number. 6943 * 6944 * This routine sets up the device INIT interface API function jump table 6945 * in @phba struct. 6946 * 6947 * Returns: 0 - success, -ENODEV - failure. 6948 **/ 6949 int 6950 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 6951 { 6952 phba->lpfc_hba_init_link = lpfc_hba_init_link; 6953 phba->lpfc_hba_down_link = lpfc_hba_down_link; 6954 phba->lpfc_selective_reset = lpfc_selective_reset; 6955 switch (dev_grp) { 6956 case LPFC_PCI_DEV_LP: 6957 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 6958 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 6959 phba->lpfc_stop_port = lpfc_stop_port_s3; 6960 break; 6961 case LPFC_PCI_DEV_OC: 6962 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 6963 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 6964 phba->lpfc_stop_port = lpfc_stop_port_s4; 6965 break; 6966 default: 6967 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6968 "1431 Invalid HBA PCI-device group: 0x%x\n", 6969 dev_grp); 6970 return -ENODEV; 6971 break; 6972 } 6973 return 0; 6974 } 6975 6976 /** 6977 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 6978 * @phba: pointer to lpfc hba data structure. 6979 * 6980 * This routine is invoked to set up the driver internal resources after the 6981 * device specific resource setup to support the HBA device it attached to. 6982 * 6983 * Return codes 6984 * 0 - successful 6985 * other values - error 6986 **/ 6987 static int 6988 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 6989 { 6990 int error; 6991 6992 /* Startup the kernel thread for this host adapter. */ 6993 phba->worker_thread = kthread_run(lpfc_do_work, phba, 6994 "lpfc_worker_%d", phba->brd_no); 6995 if (IS_ERR(phba->worker_thread)) { 6996 error = PTR_ERR(phba->worker_thread); 6997 return error; 6998 } 6999 7000 /* The lpfc_wq workqueue for deferred irq use, is only used for SLI4 */ 7001 if (phba->sli_rev == LPFC_SLI_REV4) 7002 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); 7003 else 7004 phba->wq = NULL; 7005 7006 return 0; 7007 } 7008 7009 /** 7010 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 7011 * @phba: pointer to lpfc hba data structure. 7012 * 7013 * This routine is invoked to unset the driver internal resources set up after 7014 * the device specific resource setup for supporting the HBA device it 7015 * attached to. 7016 **/ 7017 static void 7018 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 7019 { 7020 if (phba->wq) { 7021 flush_workqueue(phba->wq); 7022 destroy_workqueue(phba->wq); 7023 phba->wq = NULL; 7024 } 7025 7026 /* Stop kernel worker thread */ 7027 if (phba->worker_thread) 7028 kthread_stop(phba->worker_thread); 7029 } 7030 7031 /** 7032 * lpfc_free_iocb_list - Free iocb list. 7033 * @phba: pointer to lpfc hba data structure. 7034 * 7035 * This routine is invoked to free the driver's IOCB list and memory. 7036 **/ 7037 void 7038 lpfc_free_iocb_list(struct lpfc_hba *phba) 7039 { 7040 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 7041 7042 spin_lock_irq(&phba->hbalock); 7043 list_for_each_entry_safe(iocbq_entry, iocbq_next, 7044 &phba->lpfc_iocb_list, list) { 7045 list_del(&iocbq_entry->list); 7046 kfree(iocbq_entry); 7047 phba->total_iocbq_bufs--; 7048 } 7049 spin_unlock_irq(&phba->hbalock); 7050 7051 return; 7052 } 7053 7054 /** 7055 * lpfc_init_iocb_list - Allocate and initialize iocb list. 7056 * @phba: pointer to lpfc hba data structure. 7057 * 7058 * This routine is invoked to allocate and initizlize the driver's IOCB 7059 * list and set up the IOCB tag array accordingly. 7060 * 7061 * Return codes 7062 * 0 - successful 7063 * other values - error 7064 **/ 7065 int 7066 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 7067 { 7068 struct lpfc_iocbq *iocbq_entry = NULL; 7069 uint16_t iotag; 7070 int i; 7071 7072 /* Initialize and populate the iocb list per host. */ 7073 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 7074 for (i = 0; i < iocb_count; i++) { 7075 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 7076 if (iocbq_entry == NULL) { 7077 printk(KERN_ERR "%s: only allocated %d iocbs of " 7078 "expected %d count. Unloading driver.\n", 7079 __func__, i, LPFC_IOCB_LIST_CNT); 7080 goto out_free_iocbq; 7081 } 7082 7083 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 7084 if (iotag == 0) { 7085 kfree(iocbq_entry); 7086 printk(KERN_ERR "%s: failed to allocate IOTAG. " 7087 "Unloading driver.\n", __func__); 7088 goto out_free_iocbq; 7089 } 7090 iocbq_entry->sli4_lxritag = NO_XRI; 7091 iocbq_entry->sli4_xritag = NO_XRI; 7092 7093 spin_lock_irq(&phba->hbalock); 7094 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 7095 phba->total_iocbq_bufs++; 7096 spin_unlock_irq(&phba->hbalock); 7097 } 7098 7099 return 0; 7100 7101 out_free_iocbq: 7102 lpfc_free_iocb_list(phba); 7103 7104 return -ENOMEM; 7105 } 7106 7107 /** 7108 * lpfc_free_sgl_list - Free a given sgl list. 7109 * @phba: pointer to lpfc hba data structure. 7110 * @sglq_list: pointer to the head of sgl list. 7111 * 7112 * This routine is invoked to free a give sgl list and memory. 7113 **/ 7114 void 7115 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list) 7116 { 7117 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 7118 7119 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) { 7120 list_del(&sglq_entry->list); 7121 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 7122 kfree(sglq_entry); 7123 } 7124 } 7125 7126 /** 7127 * lpfc_free_els_sgl_list - Free els sgl list. 7128 * @phba: pointer to lpfc hba data structure. 7129 * 7130 * This routine is invoked to free the driver's els sgl list and memory. 7131 **/ 7132 static void 7133 lpfc_free_els_sgl_list(struct lpfc_hba *phba) 7134 { 7135 LIST_HEAD(sglq_list); 7136 7137 /* Retrieve all els sgls from driver list */ 7138 spin_lock_irq(&phba->hbalock); 7139 spin_lock(&phba->sli4_hba.sgl_list_lock); 7140 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list); 7141 spin_unlock(&phba->sli4_hba.sgl_list_lock); 7142 spin_unlock_irq(&phba->hbalock); 7143 7144 /* Now free the sgl list */ 7145 lpfc_free_sgl_list(phba, &sglq_list); 7146 } 7147 7148 /** 7149 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list. 7150 * @phba: pointer to lpfc hba data structure. 7151 * 7152 * This routine is invoked to free the driver's nvmet sgl list and memory. 7153 **/ 7154 static void 7155 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba) 7156 { 7157 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 7158 LIST_HEAD(sglq_list); 7159 7160 /* Retrieve all nvmet sgls from driver list */ 7161 spin_lock_irq(&phba->hbalock); 7162 spin_lock(&phba->sli4_hba.sgl_list_lock); 7163 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list); 7164 spin_unlock(&phba->sli4_hba.sgl_list_lock); 7165 spin_unlock_irq(&phba->hbalock); 7166 7167 /* Now free the sgl list */ 7168 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) { 7169 list_del(&sglq_entry->list); 7170 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys); 7171 kfree(sglq_entry); 7172 } 7173 7174 /* Update the nvmet_xri_cnt to reflect no current sgls. 7175 * The next initialization cycle sets the count and allocates 7176 * the sgls over again. 7177 */ 7178 phba->sli4_hba.nvmet_xri_cnt = 0; 7179 } 7180 7181 /** 7182 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 7183 * @phba: pointer to lpfc hba data structure. 7184 * 7185 * This routine is invoked to allocate the driver's active sgl memory. 7186 * This array will hold the sglq_entry's for active IOs. 7187 **/ 7188 static int 7189 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 7190 { 7191 int size; 7192 size = sizeof(struct lpfc_sglq *); 7193 size *= phba->sli4_hba.max_cfg_param.max_xri; 7194 7195 phba->sli4_hba.lpfc_sglq_active_list = 7196 kzalloc(size, GFP_KERNEL); 7197 if (!phba->sli4_hba.lpfc_sglq_active_list) 7198 return -ENOMEM; 7199 return 0; 7200 } 7201 7202 /** 7203 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 7204 * @phba: pointer to lpfc hba data structure. 7205 * 7206 * This routine is invoked to walk through the array of active sglq entries 7207 * and free all of the resources. 7208 * This is just a place holder for now. 7209 **/ 7210 static void 7211 lpfc_free_active_sgl(struct lpfc_hba *phba) 7212 { 7213 kfree(phba->sli4_hba.lpfc_sglq_active_list); 7214 } 7215 7216 /** 7217 * lpfc_init_sgl_list - Allocate and initialize sgl list. 7218 * @phba: pointer to lpfc hba data structure. 7219 * 7220 * This routine is invoked to allocate and initizlize the driver's sgl 7221 * list and set up the sgl xritag tag array accordingly. 7222 * 7223 **/ 7224 static void 7225 lpfc_init_sgl_list(struct lpfc_hba *phba) 7226 { 7227 /* Initialize and populate the sglq list per host/VF. */ 7228 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list); 7229 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 7230 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list); 7231 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 7232 7233 /* els xri-sgl book keeping */ 7234 phba->sli4_hba.els_xri_cnt = 0; 7235 7236 /* nvme xri-buffer book keeping */ 7237 phba->sli4_hba.io_xri_cnt = 0; 7238 } 7239 7240 /** 7241 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 7242 * @phba: pointer to lpfc hba data structure. 7243 * 7244 * This routine is invoked to post rpi header templates to the 7245 * port for those SLI4 ports that do not support extents. This routine 7246 * posts a PAGE_SIZE memory region to the port to hold up to 7247 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 7248 * and should be called only when interrupts are disabled. 7249 * 7250 * Return codes 7251 * 0 - successful 7252 * -ERROR - otherwise. 7253 **/ 7254 int 7255 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 7256 { 7257 int rc = 0; 7258 struct lpfc_rpi_hdr *rpi_hdr; 7259 7260 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 7261 if (!phba->sli4_hba.rpi_hdrs_in_use) 7262 return rc; 7263 if (phba->sli4_hba.extents_in_use) 7264 return -EIO; 7265 7266 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 7267 if (!rpi_hdr) { 7268 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7269 "0391 Error during rpi post operation\n"); 7270 lpfc_sli4_remove_rpis(phba); 7271 rc = -ENODEV; 7272 } 7273 7274 return rc; 7275 } 7276 7277 /** 7278 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 7279 * @phba: pointer to lpfc hba data structure. 7280 * 7281 * This routine is invoked to allocate a single 4KB memory region to 7282 * support rpis and stores them in the phba. This single region 7283 * provides support for up to 64 rpis. The region is used globally 7284 * by the device. 7285 * 7286 * Returns: 7287 * A valid rpi hdr on success. 7288 * A NULL pointer on any failure. 7289 **/ 7290 struct lpfc_rpi_hdr * 7291 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 7292 { 7293 uint16_t rpi_limit, curr_rpi_range; 7294 struct lpfc_dmabuf *dmabuf; 7295 struct lpfc_rpi_hdr *rpi_hdr; 7296 7297 /* 7298 * If the SLI4 port supports extents, posting the rpi header isn't 7299 * required. Set the expected maximum count and let the actual value 7300 * get set when extents are fully allocated. 7301 */ 7302 if (!phba->sli4_hba.rpi_hdrs_in_use) 7303 return NULL; 7304 if (phba->sli4_hba.extents_in_use) 7305 return NULL; 7306 7307 /* The limit on the logical index is just the max_rpi count. */ 7308 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi; 7309 7310 spin_lock_irq(&phba->hbalock); 7311 /* 7312 * Establish the starting RPI in this header block. The starting 7313 * rpi is normalized to a zero base because the physical rpi is 7314 * port based. 7315 */ 7316 curr_rpi_range = phba->sli4_hba.next_rpi; 7317 spin_unlock_irq(&phba->hbalock); 7318 7319 /* Reached full RPI range */ 7320 if (curr_rpi_range == rpi_limit) 7321 return NULL; 7322 7323 /* 7324 * First allocate the protocol header region for the port. The 7325 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 7326 */ 7327 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 7328 if (!dmabuf) 7329 return NULL; 7330 7331 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 7332 LPFC_HDR_TEMPLATE_SIZE, 7333 &dmabuf->phys, GFP_KERNEL); 7334 if (!dmabuf->virt) { 7335 rpi_hdr = NULL; 7336 goto err_free_dmabuf; 7337 } 7338 7339 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 7340 rpi_hdr = NULL; 7341 goto err_free_coherent; 7342 } 7343 7344 /* Save the rpi header data for cleanup later. */ 7345 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 7346 if (!rpi_hdr) 7347 goto err_free_coherent; 7348 7349 rpi_hdr->dmabuf = dmabuf; 7350 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 7351 rpi_hdr->page_count = 1; 7352 spin_lock_irq(&phba->hbalock); 7353 7354 /* The rpi_hdr stores the logical index only. */ 7355 rpi_hdr->start_rpi = curr_rpi_range; 7356 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT; 7357 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 7358 7359 spin_unlock_irq(&phba->hbalock); 7360 return rpi_hdr; 7361 7362 err_free_coherent: 7363 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 7364 dmabuf->virt, dmabuf->phys); 7365 err_free_dmabuf: 7366 kfree(dmabuf); 7367 return NULL; 7368 } 7369 7370 /** 7371 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 7372 * @phba: pointer to lpfc hba data structure. 7373 * 7374 * This routine is invoked to remove all memory resources allocated 7375 * to support rpis for SLI4 ports not supporting extents. This routine 7376 * presumes the caller has released all rpis consumed by fabric or port 7377 * logins and is prepared to have the header pages removed. 7378 **/ 7379 void 7380 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 7381 { 7382 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 7383 7384 if (!phba->sli4_hba.rpi_hdrs_in_use) 7385 goto exit; 7386 7387 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 7388 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 7389 list_del(&rpi_hdr->list); 7390 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 7391 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 7392 kfree(rpi_hdr->dmabuf); 7393 kfree(rpi_hdr); 7394 } 7395 exit: 7396 /* There are no rpis available to the port now. */ 7397 phba->sli4_hba.next_rpi = 0; 7398 } 7399 7400 /** 7401 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 7402 * @pdev: pointer to pci device data structure. 7403 * 7404 * This routine is invoked to allocate the driver hba data structure for an 7405 * HBA device. If the allocation is successful, the phba reference to the 7406 * PCI device data structure is set. 7407 * 7408 * Return codes 7409 * pointer to @phba - successful 7410 * NULL - error 7411 **/ 7412 static struct lpfc_hba * 7413 lpfc_hba_alloc(struct pci_dev *pdev) 7414 { 7415 struct lpfc_hba *phba; 7416 7417 /* Allocate memory for HBA structure */ 7418 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 7419 if (!phba) { 7420 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 7421 return NULL; 7422 } 7423 7424 /* Set reference to PCI device in HBA structure */ 7425 phba->pcidev = pdev; 7426 7427 /* Assign an unused board number */ 7428 phba->brd_no = lpfc_get_instance(); 7429 if (phba->brd_no < 0) { 7430 kfree(phba); 7431 return NULL; 7432 } 7433 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL; 7434 7435 spin_lock_init(&phba->ct_ev_lock); 7436 INIT_LIST_HEAD(&phba->ct_ev_waiters); 7437 7438 return phba; 7439 } 7440 7441 /** 7442 * lpfc_hba_free - Free driver hba data structure with a device. 7443 * @phba: pointer to lpfc hba data structure. 7444 * 7445 * This routine is invoked to free the driver hba data structure with an 7446 * HBA device. 7447 **/ 7448 static void 7449 lpfc_hba_free(struct lpfc_hba *phba) 7450 { 7451 if (phba->sli_rev == LPFC_SLI_REV4) 7452 kfree(phba->sli4_hba.hdwq); 7453 7454 /* Release the driver assigned board number */ 7455 idr_remove(&lpfc_hba_index, phba->brd_no); 7456 7457 /* Free memory allocated with sli3 rings */ 7458 kfree(phba->sli.sli3_ring); 7459 phba->sli.sli3_ring = NULL; 7460 7461 kfree(phba); 7462 return; 7463 } 7464 7465 /** 7466 * lpfc_create_shost - Create hba physical port with associated scsi host. 7467 * @phba: pointer to lpfc hba data structure. 7468 * 7469 * This routine is invoked to create HBA physical port and associate a SCSI 7470 * host with it. 7471 * 7472 * Return codes 7473 * 0 - successful 7474 * other values - error 7475 **/ 7476 static int 7477 lpfc_create_shost(struct lpfc_hba *phba) 7478 { 7479 struct lpfc_vport *vport; 7480 struct Scsi_Host *shost; 7481 7482 /* Initialize HBA FC structure */ 7483 phba->fc_edtov = FF_DEF_EDTOV; 7484 phba->fc_ratov = FF_DEF_RATOV; 7485 phba->fc_altov = FF_DEF_ALTOV; 7486 phba->fc_arbtov = FF_DEF_ARBTOV; 7487 7488 atomic_set(&phba->sdev_cnt, 0); 7489 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 7490 if (!vport) 7491 return -ENODEV; 7492 7493 shost = lpfc_shost_from_vport(vport); 7494 phba->pport = vport; 7495 7496 if (phba->nvmet_support) { 7497 /* Only 1 vport (pport) will support NVME target */ 7498 if (phba->txrdy_payload_pool == NULL) { 7499 phba->txrdy_payload_pool = dma_pool_create( 7500 "txrdy_pool", &phba->pcidev->dev, 7501 TXRDY_PAYLOAD_LEN, 16, 0); 7502 if (phba->txrdy_payload_pool) { 7503 phba->targetport = NULL; 7504 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; 7505 lpfc_printf_log(phba, KERN_INFO, 7506 LOG_INIT | LOG_NVME_DISC, 7507 "6076 NVME Target Found\n"); 7508 } 7509 } 7510 } 7511 7512 lpfc_debugfs_initialize(vport); 7513 /* Put reference to SCSI host to driver's device private data */ 7514 pci_set_drvdata(phba->pcidev, shost); 7515 7516 /* 7517 * At this point we are fully registered with PSA. In addition, 7518 * any initial discovery should be completed. 7519 */ 7520 vport->load_flag |= FC_ALLOW_FDMI; 7521 if (phba->cfg_enable_SmartSAN || 7522 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 7523 7524 /* Setup appropriate attribute masks */ 7525 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 7526 if (phba->cfg_enable_SmartSAN) 7527 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 7528 else 7529 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 7530 } 7531 return 0; 7532 } 7533 7534 /** 7535 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 7536 * @phba: pointer to lpfc hba data structure. 7537 * 7538 * This routine is invoked to destroy HBA physical port and the associated 7539 * SCSI host. 7540 **/ 7541 static void 7542 lpfc_destroy_shost(struct lpfc_hba *phba) 7543 { 7544 struct lpfc_vport *vport = phba->pport; 7545 7546 /* Destroy physical port that associated with the SCSI host */ 7547 destroy_port(vport); 7548 7549 return; 7550 } 7551 7552 /** 7553 * lpfc_setup_bg - Setup Block guard structures and debug areas. 7554 * @phba: pointer to lpfc hba data structure. 7555 * @shost: the shost to be used to detect Block guard settings. 7556 * 7557 * This routine sets up the local Block guard protocol settings for @shost. 7558 * This routine also allocates memory for debugging bg buffers. 7559 **/ 7560 static void 7561 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 7562 { 7563 uint32_t old_mask; 7564 uint32_t old_guard; 7565 7566 int pagecnt = 10; 7567 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 7568 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7569 "1478 Registering BlockGuard with the " 7570 "SCSI layer\n"); 7571 7572 old_mask = phba->cfg_prot_mask; 7573 old_guard = phba->cfg_prot_guard; 7574 7575 /* Only allow supported values */ 7576 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | 7577 SHOST_DIX_TYPE0_PROTECTION | 7578 SHOST_DIX_TYPE1_PROTECTION); 7579 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP | 7580 SHOST_DIX_GUARD_CRC); 7581 7582 /* DIF Type 1 protection for profiles AST1/C1 is end to end */ 7583 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION) 7584 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; 7585 7586 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 7587 if ((old_mask != phba->cfg_prot_mask) || 7588 (old_guard != phba->cfg_prot_guard)) 7589 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7590 "1475 Registering BlockGuard with the " 7591 "SCSI layer: mask %d guard %d\n", 7592 phba->cfg_prot_mask, 7593 phba->cfg_prot_guard); 7594 7595 scsi_host_set_prot(shost, phba->cfg_prot_mask); 7596 scsi_host_set_guard(shost, phba->cfg_prot_guard); 7597 } else 7598 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7599 "1479 Not Registering BlockGuard with the SCSI " 7600 "layer, Bad protection parameters: %d %d\n", 7601 old_mask, old_guard); 7602 } 7603 7604 if (!_dump_buf_data) { 7605 while (pagecnt) { 7606 spin_lock_init(&_dump_buf_lock); 7607 _dump_buf_data = 7608 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 7609 if (_dump_buf_data) { 7610 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 7611 "9043 BLKGRD: allocated %d pages for " 7612 "_dump_buf_data at 0x%p\n", 7613 (1 << pagecnt), _dump_buf_data); 7614 _dump_buf_data_order = pagecnt; 7615 memset(_dump_buf_data, 0, 7616 ((1 << PAGE_SHIFT) << pagecnt)); 7617 break; 7618 } else 7619 --pagecnt; 7620 } 7621 if (!_dump_buf_data_order) 7622 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 7623 "9044 BLKGRD: ERROR unable to allocate " 7624 "memory for hexdump\n"); 7625 } else 7626 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 7627 "9045 BLKGRD: already allocated _dump_buf_data=0x%p" 7628 "\n", _dump_buf_data); 7629 if (!_dump_buf_dif) { 7630 while (pagecnt) { 7631 _dump_buf_dif = 7632 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 7633 if (_dump_buf_dif) { 7634 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 7635 "9046 BLKGRD: allocated %d pages for " 7636 "_dump_buf_dif at 0x%p\n", 7637 (1 << pagecnt), _dump_buf_dif); 7638 _dump_buf_dif_order = pagecnt; 7639 memset(_dump_buf_dif, 0, 7640 ((1 << PAGE_SHIFT) << pagecnt)); 7641 break; 7642 } else 7643 --pagecnt; 7644 } 7645 if (!_dump_buf_dif_order) 7646 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 7647 "9047 BLKGRD: ERROR unable to allocate " 7648 "memory for hexdump\n"); 7649 } else 7650 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 7651 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", 7652 _dump_buf_dif); 7653 } 7654 7655 /** 7656 * lpfc_post_init_setup - Perform necessary device post initialization setup. 7657 * @phba: pointer to lpfc hba data structure. 7658 * 7659 * This routine is invoked to perform all the necessary post initialization 7660 * setup for the device. 7661 **/ 7662 static void 7663 lpfc_post_init_setup(struct lpfc_hba *phba) 7664 { 7665 struct Scsi_Host *shost; 7666 struct lpfc_adapter_event_header adapter_event; 7667 7668 /* Get the default values for Model Name and Description */ 7669 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 7670 7671 /* 7672 * hba setup may have changed the hba_queue_depth so we need to 7673 * adjust the value of can_queue. 7674 */ 7675 shost = pci_get_drvdata(phba->pcidev); 7676 shost->can_queue = phba->cfg_hba_queue_depth - 10; 7677 7678 lpfc_host_attrib_init(shost); 7679 7680 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 7681 spin_lock_irq(shost->host_lock); 7682 lpfc_poll_start_timer(phba); 7683 spin_unlock_irq(shost->host_lock); 7684 } 7685 7686 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7687 "0428 Perform SCSI scan\n"); 7688 /* Send board arrival event to upper layer */ 7689 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 7690 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 7691 fc_host_post_vendor_event(shost, fc_get_event_number(), 7692 sizeof(adapter_event), 7693 (char *) &adapter_event, 7694 LPFC_NL_VENDOR_ID); 7695 return; 7696 } 7697 7698 /** 7699 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 7700 * @phba: pointer to lpfc hba data structure. 7701 * 7702 * This routine is invoked to set up the PCI device memory space for device 7703 * with SLI-3 interface spec. 7704 * 7705 * Return codes 7706 * 0 - successful 7707 * other values - error 7708 **/ 7709 static int 7710 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 7711 { 7712 struct pci_dev *pdev = phba->pcidev; 7713 unsigned long bar0map_len, bar2map_len; 7714 int i, hbq_count; 7715 void *ptr; 7716 int error; 7717 7718 if (!pdev) 7719 return -ENODEV; 7720 7721 /* Set the device DMA mask size */ 7722 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 7723 if (error) 7724 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 7725 if (error) 7726 return error; 7727 error = -ENODEV; 7728 7729 /* Get the bus address of Bar0 and Bar2 and the number of bytes 7730 * required by each mapping. 7731 */ 7732 phba->pci_bar0_map = pci_resource_start(pdev, 0); 7733 bar0map_len = pci_resource_len(pdev, 0); 7734 7735 phba->pci_bar2_map = pci_resource_start(pdev, 2); 7736 bar2map_len = pci_resource_len(pdev, 2); 7737 7738 /* Map HBA SLIM to a kernel virtual address. */ 7739 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 7740 if (!phba->slim_memmap_p) { 7741 dev_printk(KERN_ERR, &pdev->dev, 7742 "ioremap failed for SLIM memory.\n"); 7743 goto out; 7744 } 7745 7746 /* Map HBA Control Registers to a kernel virtual address. */ 7747 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 7748 if (!phba->ctrl_regs_memmap_p) { 7749 dev_printk(KERN_ERR, &pdev->dev, 7750 "ioremap failed for HBA control registers.\n"); 7751 goto out_iounmap_slim; 7752 } 7753 7754 /* Allocate memory for SLI-2 structures */ 7755 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7756 &phba->slim2p.phys, GFP_KERNEL); 7757 if (!phba->slim2p.virt) 7758 goto out_iounmap; 7759 7760 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 7761 phba->mbox_ext = (phba->slim2p.virt + 7762 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 7763 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 7764 phba->IOCBs = (phba->slim2p.virt + 7765 offsetof(struct lpfc_sli2_slim, IOCBs)); 7766 7767 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 7768 lpfc_sli_hbq_size(), 7769 &phba->hbqslimp.phys, 7770 GFP_KERNEL); 7771 if (!phba->hbqslimp.virt) 7772 goto out_free_slim; 7773 7774 hbq_count = lpfc_sli_hbq_count(); 7775 ptr = phba->hbqslimp.virt; 7776 for (i = 0; i < hbq_count; ++i) { 7777 phba->hbqs[i].hbq_virt = ptr; 7778 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 7779 ptr += (lpfc_hbq_defs[i]->entry_count * 7780 sizeof(struct lpfc_hbq_entry)); 7781 } 7782 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 7783 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 7784 7785 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 7786 7787 phba->MBslimaddr = phba->slim_memmap_p; 7788 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 7789 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 7790 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 7791 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 7792 7793 return 0; 7794 7795 out_free_slim: 7796 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7797 phba->slim2p.virt, phba->slim2p.phys); 7798 out_iounmap: 7799 iounmap(phba->ctrl_regs_memmap_p); 7800 out_iounmap_slim: 7801 iounmap(phba->slim_memmap_p); 7802 out: 7803 return error; 7804 } 7805 7806 /** 7807 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 7808 * @phba: pointer to lpfc hba data structure. 7809 * 7810 * This routine is invoked to unset the PCI device memory space for device 7811 * with SLI-3 interface spec. 7812 **/ 7813 static void 7814 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 7815 { 7816 struct pci_dev *pdev; 7817 7818 /* Obtain PCI device reference */ 7819 if (!phba->pcidev) 7820 return; 7821 else 7822 pdev = phba->pcidev; 7823 7824 /* Free coherent DMA memory allocated */ 7825 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 7826 phba->hbqslimp.virt, phba->hbqslimp.phys); 7827 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7828 phba->slim2p.virt, phba->slim2p.phys); 7829 7830 /* I/O memory unmap */ 7831 iounmap(phba->ctrl_regs_memmap_p); 7832 iounmap(phba->slim_memmap_p); 7833 7834 return; 7835 } 7836 7837 /** 7838 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 7839 * @phba: pointer to lpfc hba data structure. 7840 * 7841 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 7842 * done and check status. 7843 * 7844 * Return 0 if successful, otherwise -ENODEV. 7845 **/ 7846 int 7847 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 7848 { 7849 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 7850 struct lpfc_register reg_data; 7851 int i, port_error = 0; 7852 uint32_t if_type; 7853 7854 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 7855 memset(®_data, 0, sizeof(reg_data)); 7856 if (!phba->sli4_hba.PSMPHRregaddr) 7857 return -ENODEV; 7858 7859 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 7860 for (i = 0; i < 3000; i++) { 7861 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 7862 &portsmphr_reg.word0) || 7863 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { 7864 /* Port has a fatal POST error, break out */ 7865 port_error = -ENODEV; 7866 break; 7867 } 7868 if (LPFC_POST_STAGE_PORT_READY == 7869 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 7870 break; 7871 msleep(10); 7872 } 7873 7874 /* 7875 * If there was a port error during POST, then don't proceed with 7876 * other register reads as the data may not be valid. Just exit. 7877 */ 7878 if (port_error) { 7879 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7880 "1408 Port Failed POST - portsmphr=0x%x, " 7881 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 7882 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 7883 portsmphr_reg.word0, 7884 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 7885 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 7886 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 7887 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 7888 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 7889 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 7890 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 7891 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 7892 } else { 7893 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7894 "2534 Device Info: SLIFamily=0x%x, " 7895 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 7896 "SLIHint_2=0x%x, FT=0x%x\n", 7897 bf_get(lpfc_sli_intf_sli_family, 7898 &phba->sli4_hba.sli_intf), 7899 bf_get(lpfc_sli_intf_slirev, 7900 &phba->sli4_hba.sli_intf), 7901 bf_get(lpfc_sli_intf_if_type, 7902 &phba->sli4_hba.sli_intf), 7903 bf_get(lpfc_sli_intf_sli_hint1, 7904 &phba->sli4_hba.sli_intf), 7905 bf_get(lpfc_sli_intf_sli_hint2, 7906 &phba->sli4_hba.sli_intf), 7907 bf_get(lpfc_sli_intf_func_type, 7908 &phba->sli4_hba.sli_intf)); 7909 /* 7910 * Check for other Port errors during the initialization 7911 * process. Fail the load if the port did not come up 7912 * correctly. 7913 */ 7914 if_type = bf_get(lpfc_sli_intf_if_type, 7915 &phba->sli4_hba.sli_intf); 7916 switch (if_type) { 7917 case LPFC_SLI_INTF_IF_TYPE_0: 7918 phba->sli4_hba.ue_mask_lo = 7919 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 7920 phba->sli4_hba.ue_mask_hi = 7921 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 7922 uerrlo_reg.word0 = 7923 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 7924 uerrhi_reg.word0 = 7925 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 7926 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 7927 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 7928 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7929 "1422 Unrecoverable Error " 7930 "Detected during POST " 7931 "uerr_lo_reg=0x%x, " 7932 "uerr_hi_reg=0x%x, " 7933 "ue_mask_lo_reg=0x%x, " 7934 "ue_mask_hi_reg=0x%x\n", 7935 uerrlo_reg.word0, 7936 uerrhi_reg.word0, 7937 phba->sli4_hba.ue_mask_lo, 7938 phba->sli4_hba.ue_mask_hi); 7939 port_error = -ENODEV; 7940 } 7941 break; 7942 case LPFC_SLI_INTF_IF_TYPE_2: 7943 case LPFC_SLI_INTF_IF_TYPE_6: 7944 /* Final checks. The port status should be clean. */ 7945 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 7946 ®_data.word0) || 7947 (bf_get(lpfc_sliport_status_err, ®_data) && 7948 !bf_get(lpfc_sliport_status_rn, ®_data))) { 7949 phba->work_status[0] = 7950 readl(phba->sli4_hba.u.if_type2. 7951 ERR1regaddr); 7952 phba->work_status[1] = 7953 readl(phba->sli4_hba.u.if_type2. 7954 ERR2regaddr); 7955 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7956 "2888 Unrecoverable port error " 7957 "following POST: port status reg " 7958 "0x%x, port_smphr reg 0x%x, " 7959 "error 1=0x%x, error 2=0x%x\n", 7960 reg_data.word0, 7961 portsmphr_reg.word0, 7962 phba->work_status[0], 7963 phba->work_status[1]); 7964 port_error = -ENODEV; 7965 } 7966 break; 7967 case LPFC_SLI_INTF_IF_TYPE_1: 7968 default: 7969 break; 7970 } 7971 } 7972 return port_error; 7973 } 7974 7975 /** 7976 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 7977 * @phba: pointer to lpfc hba data structure. 7978 * @if_type: The SLI4 interface type getting configured. 7979 * 7980 * This routine is invoked to set up SLI4 BAR0 PCI config space register 7981 * memory map. 7982 **/ 7983 static void 7984 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 7985 { 7986 switch (if_type) { 7987 case LPFC_SLI_INTF_IF_TYPE_0: 7988 phba->sli4_hba.u.if_type0.UERRLOregaddr = 7989 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 7990 phba->sli4_hba.u.if_type0.UERRHIregaddr = 7991 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 7992 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 7993 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 7994 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 7995 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 7996 phba->sli4_hba.SLIINTFregaddr = 7997 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 7998 break; 7999 case LPFC_SLI_INTF_IF_TYPE_2: 8000 phba->sli4_hba.u.if_type2.EQDregaddr = 8001 phba->sli4_hba.conf_regs_memmap_p + 8002 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 8003 phba->sli4_hba.u.if_type2.ERR1regaddr = 8004 phba->sli4_hba.conf_regs_memmap_p + 8005 LPFC_CTL_PORT_ER1_OFFSET; 8006 phba->sli4_hba.u.if_type2.ERR2regaddr = 8007 phba->sli4_hba.conf_regs_memmap_p + 8008 LPFC_CTL_PORT_ER2_OFFSET; 8009 phba->sli4_hba.u.if_type2.CTRLregaddr = 8010 phba->sli4_hba.conf_regs_memmap_p + 8011 LPFC_CTL_PORT_CTL_OFFSET; 8012 phba->sli4_hba.u.if_type2.STATUSregaddr = 8013 phba->sli4_hba.conf_regs_memmap_p + 8014 LPFC_CTL_PORT_STA_OFFSET; 8015 phba->sli4_hba.SLIINTFregaddr = 8016 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 8017 phba->sli4_hba.PSMPHRregaddr = 8018 phba->sli4_hba.conf_regs_memmap_p + 8019 LPFC_CTL_PORT_SEM_OFFSET; 8020 phba->sli4_hba.RQDBregaddr = 8021 phba->sli4_hba.conf_regs_memmap_p + 8022 LPFC_ULP0_RQ_DOORBELL; 8023 phba->sli4_hba.WQDBregaddr = 8024 phba->sli4_hba.conf_regs_memmap_p + 8025 LPFC_ULP0_WQ_DOORBELL; 8026 phba->sli4_hba.CQDBregaddr = 8027 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 8028 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 8029 phba->sli4_hba.MQDBregaddr = 8030 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 8031 phba->sli4_hba.BMBXregaddr = 8032 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 8033 break; 8034 case LPFC_SLI_INTF_IF_TYPE_6: 8035 phba->sli4_hba.u.if_type2.EQDregaddr = 8036 phba->sli4_hba.conf_regs_memmap_p + 8037 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 8038 phba->sli4_hba.u.if_type2.ERR1regaddr = 8039 phba->sli4_hba.conf_regs_memmap_p + 8040 LPFC_CTL_PORT_ER1_OFFSET; 8041 phba->sli4_hba.u.if_type2.ERR2regaddr = 8042 phba->sli4_hba.conf_regs_memmap_p + 8043 LPFC_CTL_PORT_ER2_OFFSET; 8044 phba->sli4_hba.u.if_type2.CTRLregaddr = 8045 phba->sli4_hba.conf_regs_memmap_p + 8046 LPFC_CTL_PORT_CTL_OFFSET; 8047 phba->sli4_hba.u.if_type2.STATUSregaddr = 8048 phba->sli4_hba.conf_regs_memmap_p + 8049 LPFC_CTL_PORT_STA_OFFSET; 8050 phba->sli4_hba.PSMPHRregaddr = 8051 phba->sli4_hba.conf_regs_memmap_p + 8052 LPFC_CTL_PORT_SEM_OFFSET; 8053 phba->sli4_hba.BMBXregaddr = 8054 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 8055 break; 8056 case LPFC_SLI_INTF_IF_TYPE_1: 8057 default: 8058 dev_printk(KERN_ERR, &phba->pcidev->dev, 8059 "FATAL - unsupported SLI4 interface type - %d\n", 8060 if_type); 8061 break; 8062 } 8063 } 8064 8065 /** 8066 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 8067 * @phba: pointer to lpfc hba data structure. 8068 * 8069 * This routine is invoked to set up SLI4 BAR1 register memory map. 8070 **/ 8071 static void 8072 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 8073 { 8074 switch (if_type) { 8075 case LPFC_SLI_INTF_IF_TYPE_0: 8076 phba->sli4_hba.PSMPHRregaddr = 8077 phba->sli4_hba.ctrl_regs_memmap_p + 8078 LPFC_SLIPORT_IF0_SMPHR; 8079 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 8080 LPFC_HST_ISR0; 8081 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 8082 LPFC_HST_IMR0; 8083 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 8084 LPFC_HST_ISCR0; 8085 break; 8086 case LPFC_SLI_INTF_IF_TYPE_6: 8087 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8088 LPFC_IF6_RQ_DOORBELL; 8089 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8090 LPFC_IF6_WQ_DOORBELL; 8091 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8092 LPFC_IF6_CQ_DOORBELL; 8093 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8094 LPFC_IF6_EQ_DOORBELL; 8095 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 8096 LPFC_IF6_MQ_DOORBELL; 8097 break; 8098 case LPFC_SLI_INTF_IF_TYPE_2: 8099 case LPFC_SLI_INTF_IF_TYPE_1: 8100 default: 8101 dev_err(&phba->pcidev->dev, 8102 "FATAL - unsupported SLI4 interface type - %d\n", 8103 if_type); 8104 break; 8105 } 8106 } 8107 8108 /** 8109 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 8110 * @phba: pointer to lpfc hba data structure. 8111 * @vf: virtual function number 8112 * 8113 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 8114 * based on the given viftual function number, @vf. 8115 * 8116 * Return 0 if successful, otherwise -ENODEV. 8117 **/ 8118 static int 8119 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 8120 { 8121 if (vf > LPFC_VIR_FUNC_MAX) 8122 return -ENODEV; 8123 8124 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8125 vf * LPFC_VFR_PAGE_SIZE + 8126 LPFC_ULP0_RQ_DOORBELL); 8127 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8128 vf * LPFC_VFR_PAGE_SIZE + 8129 LPFC_ULP0_WQ_DOORBELL); 8130 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8131 vf * LPFC_VFR_PAGE_SIZE + 8132 LPFC_EQCQ_DOORBELL); 8133 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 8134 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8135 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 8136 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 8137 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 8138 return 0; 8139 } 8140 8141 /** 8142 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 8143 * @phba: pointer to lpfc hba data structure. 8144 * 8145 * This routine is invoked to create the bootstrap mailbox 8146 * region consistent with the SLI-4 interface spec. This 8147 * routine allocates all memory necessary to communicate 8148 * mailbox commands to the port and sets up all alignment 8149 * needs. No locks are expected to be held when calling 8150 * this routine. 8151 * 8152 * Return codes 8153 * 0 - successful 8154 * -ENOMEM - could not allocated memory. 8155 **/ 8156 static int 8157 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 8158 { 8159 uint32_t bmbx_size; 8160 struct lpfc_dmabuf *dmabuf; 8161 struct dma_address *dma_address; 8162 uint32_t pa_addr; 8163 uint64_t phys_addr; 8164 8165 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 8166 if (!dmabuf) 8167 return -ENOMEM; 8168 8169 /* 8170 * The bootstrap mailbox region is comprised of 2 parts 8171 * plus an alignment restriction of 16 bytes. 8172 */ 8173 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 8174 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size, 8175 &dmabuf->phys, GFP_KERNEL); 8176 if (!dmabuf->virt) { 8177 kfree(dmabuf); 8178 return -ENOMEM; 8179 } 8180 8181 /* 8182 * Initialize the bootstrap mailbox pointers now so that the register 8183 * operations are simple later. The mailbox dma address is required 8184 * to be 16-byte aligned. Also align the virtual memory as each 8185 * maibox is copied into the bmbx mailbox region before issuing the 8186 * command to the port. 8187 */ 8188 phba->sli4_hba.bmbx.dmabuf = dmabuf; 8189 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 8190 8191 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 8192 LPFC_ALIGN_16_BYTE); 8193 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 8194 LPFC_ALIGN_16_BYTE); 8195 8196 /* 8197 * Set the high and low physical addresses now. The SLI4 alignment 8198 * requirement is 16 bytes and the mailbox is posted to the port 8199 * as two 30-bit addresses. The other data is a bit marking whether 8200 * the 30-bit address is the high or low address. 8201 * Upcast bmbx aphys to 64bits so shift instruction compiles 8202 * clean on 32 bit machines. 8203 */ 8204 dma_address = &phba->sli4_hba.bmbx.dma_address; 8205 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 8206 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 8207 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 8208 LPFC_BMBX_BIT1_ADDR_HI); 8209 8210 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 8211 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 8212 LPFC_BMBX_BIT1_ADDR_LO); 8213 return 0; 8214 } 8215 8216 /** 8217 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 8218 * @phba: pointer to lpfc hba data structure. 8219 * 8220 * This routine is invoked to teardown the bootstrap mailbox 8221 * region and release all host resources. This routine requires 8222 * the caller to ensure all mailbox commands recovered, no 8223 * additional mailbox comands are sent, and interrupts are disabled 8224 * before calling this routine. 8225 * 8226 **/ 8227 static void 8228 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 8229 { 8230 dma_free_coherent(&phba->pcidev->dev, 8231 phba->sli4_hba.bmbx.bmbx_size, 8232 phba->sli4_hba.bmbx.dmabuf->virt, 8233 phba->sli4_hba.bmbx.dmabuf->phys); 8234 8235 kfree(phba->sli4_hba.bmbx.dmabuf); 8236 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 8237 } 8238 8239 /** 8240 * lpfc_sli4_read_config - Get the config parameters. 8241 * @phba: pointer to lpfc hba data structure. 8242 * 8243 * This routine is invoked to read the configuration parameters from the HBA. 8244 * The configuration parameters are used to set the base and maximum values 8245 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 8246 * allocation for the port. 8247 * 8248 * Return codes 8249 * 0 - successful 8250 * -ENOMEM - No available memory 8251 * -EIO - The mailbox failed to complete successfully. 8252 **/ 8253 int 8254 lpfc_sli4_read_config(struct lpfc_hba *phba) 8255 { 8256 LPFC_MBOXQ_t *pmb; 8257 struct lpfc_mbx_read_config *rd_config; 8258 union lpfc_sli4_cfg_shdr *shdr; 8259 uint32_t shdr_status, shdr_add_status; 8260 struct lpfc_mbx_get_func_cfg *get_func_cfg; 8261 struct lpfc_rsrc_desc_fcfcoe *desc; 8262 char *pdesc_0; 8263 uint16_t forced_link_speed; 8264 uint32_t if_type, qmin; 8265 int length, i, rc = 0, rc2; 8266 8267 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8268 if (!pmb) { 8269 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8270 "2011 Unable to allocate memory for issuing " 8271 "SLI_CONFIG_SPECIAL mailbox command\n"); 8272 return -ENOMEM; 8273 } 8274 8275 lpfc_read_config(phba, pmb); 8276 8277 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 8278 if (rc != MBX_SUCCESS) { 8279 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8280 "2012 Mailbox failed , mbxCmd x%x " 8281 "READ_CONFIG, mbxStatus x%x\n", 8282 bf_get(lpfc_mqe_command, &pmb->u.mqe), 8283 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 8284 rc = -EIO; 8285 } else { 8286 rd_config = &pmb->u.mqe.un.rd_config; 8287 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { 8288 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 8289 phba->sli4_hba.lnk_info.lnk_tp = 8290 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); 8291 phba->sli4_hba.lnk_info.lnk_no = 8292 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); 8293 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8294 "3081 lnk_type:%d, lnk_numb:%d\n", 8295 phba->sli4_hba.lnk_info.lnk_tp, 8296 phba->sli4_hba.lnk_info.lnk_no); 8297 } else 8298 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8299 "3082 Mailbox (x%x) returned ldv:x0\n", 8300 bf_get(lpfc_mqe_command, &pmb->u.mqe)); 8301 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) { 8302 phba->bbcredit_support = 1; 8303 phba->sli4_hba.bbscn_params.word0 = rd_config->word8; 8304 } 8305 8306 phba->sli4_hba.conf_trunk = 8307 bf_get(lpfc_mbx_rd_conf_trunk, rd_config); 8308 phba->sli4_hba.extents_in_use = 8309 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 8310 phba->sli4_hba.max_cfg_param.max_xri = 8311 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 8312 phba->sli4_hba.max_cfg_param.xri_base = 8313 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 8314 phba->sli4_hba.max_cfg_param.max_vpi = 8315 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 8316 /* Limit the max we support */ 8317 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS) 8318 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS; 8319 phba->sli4_hba.max_cfg_param.vpi_base = 8320 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 8321 phba->sli4_hba.max_cfg_param.max_rpi = 8322 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 8323 phba->sli4_hba.max_cfg_param.rpi_base = 8324 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 8325 phba->sli4_hba.max_cfg_param.max_vfi = 8326 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 8327 phba->sli4_hba.max_cfg_param.vfi_base = 8328 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 8329 phba->sli4_hba.max_cfg_param.max_fcfi = 8330 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 8331 phba->sli4_hba.max_cfg_param.max_eq = 8332 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 8333 phba->sli4_hba.max_cfg_param.max_rq = 8334 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 8335 phba->sli4_hba.max_cfg_param.max_wq = 8336 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 8337 phba->sli4_hba.max_cfg_param.max_cq = 8338 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 8339 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 8340 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 8341 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 8342 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 8343 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 8344 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 8345 phba->max_vports = phba->max_vpi; 8346 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8347 "2003 cfg params Extents? %d " 8348 "XRI(B:%d M:%d), " 8349 "VPI(B:%d M:%d) " 8350 "VFI(B:%d M:%d) " 8351 "RPI(B:%d M:%d) " 8352 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n", 8353 phba->sli4_hba.extents_in_use, 8354 phba->sli4_hba.max_cfg_param.xri_base, 8355 phba->sli4_hba.max_cfg_param.max_xri, 8356 phba->sli4_hba.max_cfg_param.vpi_base, 8357 phba->sli4_hba.max_cfg_param.max_vpi, 8358 phba->sli4_hba.max_cfg_param.vfi_base, 8359 phba->sli4_hba.max_cfg_param.max_vfi, 8360 phba->sli4_hba.max_cfg_param.rpi_base, 8361 phba->sli4_hba.max_cfg_param.max_rpi, 8362 phba->sli4_hba.max_cfg_param.max_fcfi, 8363 phba->sli4_hba.max_cfg_param.max_eq, 8364 phba->sli4_hba.max_cfg_param.max_cq, 8365 phba->sli4_hba.max_cfg_param.max_wq, 8366 phba->sli4_hba.max_cfg_param.max_rq); 8367 8368 /* 8369 * Calculate queue resources based on how 8370 * many WQ/CQ/EQs are available. 8371 */ 8372 qmin = phba->sli4_hba.max_cfg_param.max_wq; 8373 if (phba->sli4_hba.max_cfg_param.max_cq < qmin) 8374 qmin = phba->sli4_hba.max_cfg_param.max_cq; 8375 if (phba->sli4_hba.max_cfg_param.max_eq < qmin) 8376 qmin = phba->sli4_hba.max_cfg_param.max_eq; 8377 /* 8378 * Whats left after this can go toward NVME / FCP. 8379 * The minus 4 accounts for ELS, NVME LS, MBOX 8380 * plus one extra. When configured for 8381 * NVMET, FCP io channel WQs are not created. 8382 */ 8383 qmin -= 4; 8384 8385 /* If NVME is configured, double the number of CQ/WQs needed */ 8386 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && 8387 !phba->nvmet_support) 8388 qmin /= 2; 8389 8390 /* Check to see if there is enough for NVME */ 8391 if ((phba->cfg_irq_chann > qmin) || 8392 (phba->cfg_hdw_queue > qmin)) { 8393 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8394 "2005 Reducing Queues: " 8395 "WQ %d CQ %d EQ %d: min %d: " 8396 "IRQ %d HDWQ %d\n", 8397 phba->sli4_hba.max_cfg_param.max_wq, 8398 phba->sli4_hba.max_cfg_param.max_cq, 8399 phba->sli4_hba.max_cfg_param.max_eq, 8400 qmin, phba->cfg_irq_chann, 8401 phba->cfg_hdw_queue); 8402 8403 if (phba->cfg_irq_chann > qmin) 8404 phba->cfg_irq_chann = qmin; 8405 if (phba->cfg_hdw_queue > qmin) 8406 phba->cfg_hdw_queue = qmin; 8407 } 8408 } 8409 8410 if (rc) 8411 goto read_cfg_out; 8412 8413 /* Update link speed if forced link speed is supported */ 8414 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8415 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 8416 forced_link_speed = 8417 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config); 8418 if (forced_link_speed) { 8419 phba->hba_flag |= HBA_FORCED_LINK_SPEED; 8420 8421 switch (forced_link_speed) { 8422 case LINK_SPEED_1G: 8423 phba->cfg_link_speed = 8424 LPFC_USER_LINK_SPEED_1G; 8425 break; 8426 case LINK_SPEED_2G: 8427 phba->cfg_link_speed = 8428 LPFC_USER_LINK_SPEED_2G; 8429 break; 8430 case LINK_SPEED_4G: 8431 phba->cfg_link_speed = 8432 LPFC_USER_LINK_SPEED_4G; 8433 break; 8434 case LINK_SPEED_8G: 8435 phba->cfg_link_speed = 8436 LPFC_USER_LINK_SPEED_8G; 8437 break; 8438 case LINK_SPEED_10G: 8439 phba->cfg_link_speed = 8440 LPFC_USER_LINK_SPEED_10G; 8441 break; 8442 case LINK_SPEED_16G: 8443 phba->cfg_link_speed = 8444 LPFC_USER_LINK_SPEED_16G; 8445 break; 8446 case LINK_SPEED_32G: 8447 phba->cfg_link_speed = 8448 LPFC_USER_LINK_SPEED_32G; 8449 break; 8450 case LINK_SPEED_64G: 8451 phba->cfg_link_speed = 8452 LPFC_USER_LINK_SPEED_64G; 8453 break; 8454 case 0xffff: 8455 phba->cfg_link_speed = 8456 LPFC_USER_LINK_SPEED_AUTO; 8457 break; 8458 default: 8459 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8460 "0047 Unrecognized link " 8461 "speed : %d\n", 8462 forced_link_speed); 8463 phba->cfg_link_speed = 8464 LPFC_USER_LINK_SPEED_AUTO; 8465 } 8466 } 8467 } 8468 8469 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 8470 length = phba->sli4_hba.max_cfg_param.max_xri - 8471 lpfc_sli4_get_els_iocb_cnt(phba); 8472 if (phba->cfg_hba_queue_depth > length) { 8473 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8474 "3361 HBA queue depth changed from %d to %d\n", 8475 phba->cfg_hba_queue_depth, length); 8476 phba->cfg_hba_queue_depth = length; 8477 } 8478 8479 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 8480 LPFC_SLI_INTF_IF_TYPE_2) 8481 goto read_cfg_out; 8482 8483 /* get the pf# and vf# for SLI4 if_type 2 port */ 8484 length = (sizeof(struct lpfc_mbx_get_func_cfg) - 8485 sizeof(struct lpfc_sli4_cfg_mhdr)); 8486 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, 8487 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 8488 length, LPFC_SLI4_MBX_EMBED); 8489 8490 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 8491 shdr = (union lpfc_sli4_cfg_shdr *) 8492 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 8493 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 8494 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 8495 if (rc2 || shdr_status || shdr_add_status) { 8496 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8497 "3026 Mailbox failed , mbxCmd x%x " 8498 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 8499 bf_get(lpfc_mqe_command, &pmb->u.mqe), 8500 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 8501 goto read_cfg_out; 8502 } 8503 8504 /* search for fc_fcoe resrouce descriptor */ 8505 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 8506 8507 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; 8508 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; 8509 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc); 8510 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD) 8511 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH; 8512 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH) 8513 goto read_cfg_out; 8514 8515 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 8516 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i); 8517 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 8518 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) { 8519 phba->sli4_hba.iov.pf_number = 8520 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 8521 phba->sli4_hba.iov.vf_number = 8522 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); 8523 break; 8524 } 8525 } 8526 8527 if (i < LPFC_RSRC_DESC_MAX_NUM) 8528 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8529 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 8530 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 8531 phba->sli4_hba.iov.vf_number); 8532 else 8533 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8534 "3028 GET_FUNCTION_CONFIG: failed to find " 8535 "Resource Descriptor:x%x\n", 8536 LPFC_RSRC_DESC_TYPE_FCFCOE); 8537 8538 read_cfg_out: 8539 mempool_free(pmb, phba->mbox_mem_pool); 8540 return rc; 8541 } 8542 8543 /** 8544 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 8545 * @phba: pointer to lpfc hba data structure. 8546 * 8547 * This routine is invoked to setup the port-side endian order when 8548 * the port if_type is 0. This routine has no function for other 8549 * if_types. 8550 * 8551 * Return codes 8552 * 0 - successful 8553 * -ENOMEM - No available memory 8554 * -EIO - The mailbox failed to complete successfully. 8555 **/ 8556 static int 8557 lpfc_setup_endian_order(struct lpfc_hba *phba) 8558 { 8559 LPFC_MBOXQ_t *mboxq; 8560 uint32_t if_type, rc = 0; 8561 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 8562 HOST_ENDIAN_HIGH_WORD1}; 8563 8564 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8565 switch (if_type) { 8566 case LPFC_SLI_INTF_IF_TYPE_0: 8567 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 8568 GFP_KERNEL); 8569 if (!mboxq) { 8570 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8571 "0492 Unable to allocate memory for " 8572 "issuing SLI_CONFIG_SPECIAL mailbox " 8573 "command\n"); 8574 return -ENOMEM; 8575 } 8576 8577 /* 8578 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 8579 * two words to contain special data values and no other data. 8580 */ 8581 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 8582 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 8583 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8584 if (rc != MBX_SUCCESS) { 8585 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8586 "0493 SLI_CONFIG_SPECIAL mailbox " 8587 "failed with status x%x\n", 8588 rc); 8589 rc = -EIO; 8590 } 8591 mempool_free(mboxq, phba->mbox_mem_pool); 8592 break; 8593 case LPFC_SLI_INTF_IF_TYPE_6: 8594 case LPFC_SLI_INTF_IF_TYPE_2: 8595 case LPFC_SLI_INTF_IF_TYPE_1: 8596 default: 8597 break; 8598 } 8599 return rc; 8600 } 8601 8602 /** 8603 * lpfc_sli4_queue_verify - Verify and update EQ counts 8604 * @phba: pointer to lpfc hba data structure. 8605 * 8606 * This routine is invoked to check the user settable queue counts for EQs. 8607 * After this routine is called the counts will be set to valid values that 8608 * adhere to the constraints of the system's interrupt vectors and the port's 8609 * queue resources. 8610 * 8611 * Return codes 8612 * 0 - successful 8613 * -ENOMEM - No available memory 8614 **/ 8615 static int 8616 lpfc_sli4_queue_verify(struct lpfc_hba *phba) 8617 { 8618 /* 8619 * Sanity check for configured queue parameters against the run-time 8620 * device parameters 8621 */ 8622 8623 if (phba->nvmet_support) { 8624 if (phba->cfg_irq_chann < phba->cfg_nvmet_mrq) 8625 phba->cfg_nvmet_mrq = phba->cfg_irq_chann; 8626 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) 8627 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; 8628 } 8629 8630 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8631 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n", 8632 phba->cfg_hdw_queue, phba->cfg_irq_chann, 8633 phba->cfg_nvmet_mrq); 8634 8635 /* Get EQ depth from module parameter, fake the default for now */ 8636 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 8637 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 8638 8639 /* Get CQ depth from module parameter, fake the default for now */ 8640 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 8641 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 8642 return 0; 8643 } 8644 8645 static int 8646 lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx) 8647 { 8648 struct lpfc_queue *qdesc; 8649 int cpu; 8650 8651 cpu = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ); 8652 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 8653 phba->sli4_hba.cq_esize, 8654 LPFC_CQE_EXP_COUNT, cpu); 8655 if (!qdesc) { 8656 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8657 "0508 Failed allocate fast-path NVME CQ (%d)\n", 8658 wqidx); 8659 return 1; 8660 } 8661 qdesc->qe_valid = 1; 8662 qdesc->hdwq = wqidx; 8663 qdesc->chann = cpu; 8664 phba->sli4_hba.hdwq[wqidx].nvme_cq = qdesc; 8665 8666 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 8667 LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT, 8668 cpu); 8669 if (!qdesc) { 8670 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8671 "0509 Failed allocate fast-path NVME WQ (%d)\n", 8672 wqidx); 8673 return 1; 8674 } 8675 qdesc->hdwq = wqidx; 8676 qdesc->chann = wqidx; 8677 phba->sli4_hba.hdwq[wqidx].nvme_wq = qdesc; 8678 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 8679 return 0; 8680 } 8681 8682 static int 8683 lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx) 8684 { 8685 struct lpfc_queue *qdesc; 8686 uint32_t wqesize; 8687 int cpu; 8688 8689 cpu = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ); 8690 /* Create Fast Path FCP CQs */ 8691 if (phba->enab_exp_wqcq_pages) 8692 /* Increase the CQ size when WQEs contain an embedded cdb */ 8693 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 8694 phba->sli4_hba.cq_esize, 8695 LPFC_CQE_EXP_COUNT, cpu); 8696 8697 else 8698 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8699 phba->sli4_hba.cq_esize, 8700 phba->sli4_hba.cq_ecount, cpu); 8701 if (!qdesc) { 8702 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8703 "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx); 8704 return 1; 8705 } 8706 qdesc->qe_valid = 1; 8707 qdesc->hdwq = wqidx; 8708 qdesc->chann = cpu; 8709 phba->sli4_hba.hdwq[wqidx].fcp_cq = qdesc; 8710 8711 /* Create Fast Path FCP WQs */ 8712 if (phba->enab_exp_wqcq_pages) { 8713 /* Increase the WQ size when WQEs contain an embedded cdb */ 8714 wqesize = (phba->fcp_embed_io) ? 8715 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; 8716 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 8717 wqesize, 8718 LPFC_WQE_EXP_COUNT, cpu); 8719 } else 8720 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8721 phba->sli4_hba.wq_esize, 8722 phba->sli4_hba.wq_ecount, cpu); 8723 8724 if (!qdesc) { 8725 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8726 "0503 Failed allocate fast-path FCP WQ (%d)\n", 8727 wqidx); 8728 return 1; 8729 } 8730 qdesc->hdwq = wqidx; 8731 qdesc->chann = wqidx; 8732 phba->sli4_hba.hdwq[wqidx].fcp_wq = qdesc; 8733 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 8734 return 0; 8735 } 8736 8737 /** 8738 * lpfc_sli4_queue_create - Create all the SLI4 queues 8739 * @phba: pointer to lpfc hba data structure. 8740 * 8741 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 8742 * operation. For each SLI4 queue type, the parameters such as queue entry 8743 * count (queue depth) shall be taken from the module parameter. For now, 8744 * we just use some constant number as place holder. 8745 * 8746 * Return codes 8747 * 0 - successful 8748 * -ENOMEM - No availble memory 8749 * -EIO - The mailbox failed to complete successfully. 8750 **/ 8751 int 8752 lpfc_sli4_queue_create(struct lpfc_hba *phba) 8753 { 8754 struct lpfc_queue *qdesc; 8755 int idx, cpu, eqcpu; 8756 struct lpfc_sli4_hdw_queue *qp; 8757 struct lpfc_vector_map_info *cpup; 8758 struct lpfc_vector_map_info *eqcpup; 8759 struct lpfc_eq_intr_info *eqi; 8760 8761 /* 8762 * Create HBA Record arrays. 8763 * Both NVME and FCP will share that same vectors / EQs 8764 */ 8765 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 8766 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 8767 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 8768 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 8769 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 8770 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 8771 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 8772 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 8773 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 8774 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 8775 8776 if (!phba->sli4_hba.hdwq) { 8777 phba->sli4_hba.hdwq = kcalloc( 8778 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue), 8779 GFP_KERNEL); 8780 if (!phba->sli4_hba.hdwq) { 8781 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8782 "6427 Failed allocate memory for " 8783 "fast-path Hardware Queue array\n"); 8784 goto out_error; 8785 } 8786 /* Prepare hardware queues to take IO buffers */ 8787 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 8788 qp = &phba->sli4_hba.hdwq[idx]; 8789 spin_lock_init(&qp->io_buf_list_get_lock); 8790 spin_lock_init(&qp->io_buf_list_put_lock); 8791 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 8792 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 8793 qp->get_io_bufs = 0; 8794 qp->put_io_bufs = 0; 8795 qp->total_io_bufs = 0; 8796 spin_lock_init(&qp->abts_scsi_buf_list_lock); 8797 INIT_LIST_HEAD(&qp->lpfc_abts_scsi_buf_list); 8798 qp->abts_scsi_io_bufs = 0; 8799 spin_lock_init(&qp->abts_nvme_buf_list_lock); 8800 INIT_LIST_HEAD(&qp->lpfc_abts_nvme_buf_list); 8801 qp->abts_nvme_io_bufs = 0; 8802 } 8803 } 8804 8805 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 8806 if (phba->nvmet_support) { 8807 phba->sli4_hba.nvmet_cqset = kcalloc( 8808 phba->cfg_nvmet_mrq, 8809 sizeof(struct lpfc_queue *), 8810 GFP_KERNEL); 8811 if (!phba->sli4_hba.nvmet_cqset) { 8812 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8813 "3121 Fail allocate memory for " 8814 "fast-path CQ set array\n"); 8815 goto out_error; 8816 } 8817 phba->sli4_hba.nvmet_mrq_hdr = kcalloc( 8818 phba->cfg_nvmet_mrq, 8819 sizeof(struct lpfc_queue *), 8820 GFP_KERNEL); 8821 if (!phba->sli4_hba.nvmet_mrq_hdr) { 8822 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8823 "3122 Fail allocate memory for " 8824 "fast-path RQ set hdr array\n"); 8825 goto out_error; 8826 } 8827 phba->sli4_hba.nvmet_mrq_data = kcalloc( 8828 phba->cfg_nvmet_mrq, 8829 sizeof(struct lpfc_queue *), 8830 GFP_KERNEL); 8831 if (!phba->sli4_hba.nvmet_mrq_data) { 8832 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8833 "3124 Fail allocate memory for " 8834 "fast-path RQ set data array\n"); 8835 goto out_error; 8836 } 8837 } 8838 } 8839 8840 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 8841 8842 /* Create HBA Event Queues (EQs) */ 8843 for_each_present_cpu(cpu) { 8844 /* We only want to create 1 EQ per vector, even though 8845 * multiple CPUs might be using that vector. so only 8846 * selects the CPUs that are LPFC_CPU_FIRST_IRQ. 8847 */ 8848 cpup = &phba->sli4_hba.cpu_map[cpu]; 8849 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 8850 continue; 8851 8852 /* Get a ptr to the Hardware Queue associated with this CPU */ 8853 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; 8854 8855 /* Allocate an EQ */ 8856 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8857 phba->sli4_hba.eq_esize, 8858 phba->sli4_hba.eq_ecount, cpu); 8859 if (!qdesc) { 8860 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8861 "0497 Failed allocate EQ (%d)\n", 8862 cpup->hdwq); 8863 goto out_error; 8864 } 8865 qdesc->qe_valid = 1; 8866 qdesc->hdwq = cpup->hdwq; 8867 qdesc->chann = cpu; /* First CPU this EQ is affinitised to */ 8868 qdesc->last_cpu = qdesc->chann; 8869 8870 /* Save the allocated EQ in the Hardware Queue */ 8871 qp->hba_eq = qdesc; 8872 8873 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu); 8874 list_add(&qdesc->cpu_list, &eqi->list); 8875 } 8876 8877 /* Now we need to populate the other Hardware Queues, that share 8878 * an IRQ vector, with the associated EQ ptr. 8879 */ 8880 for_each_present_cpu(cpu) { 8881 cpup = &phba->sli4_hba.cpu_map[cpu]; 8882 8883 /* Check for EQ already allocated in previous loop */ 8884 if (cpup->flag & LPFC_CPU_FIRST_IRQ) 8885 continue; 8886 8887 /* Check for multiple CPUs per hdwq */ 8888 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; 8889 if (qp->hba_eq) 8890 continue; 8891 8892 /* We need to share an EQ for this hdwq */ 8893 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ); 8894 eqcpup = &phba->sli4_hba.cpu_map[eqcpu]; 8895 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq; 8896 } 8897 8898 /* Allocate SCSI SLI4 CQ/WQs */ 8899 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 8900 if (lpfc_alloc_fcp_wq_cq(phba, idx)) 8901 goto out_error; 8902 } 8903 8904 /* Allocate NVME SLI4 CQ/WQs */ 8905 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 8906 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 8907 if (lpfc_alloc_nvme_wq_cq(phba, idx)) 8908 goto out_error; 8909 } 8910 8911 if (phba->nvmet_support) { 8912 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 8913 cpu = lpfc_find_cpu_handle(phba, idx, 8914 LPFC_FIND_BY_HDWQ); 8915 qdesc = lpfc_sli4_queue_alloc( 8916 phba, 8917 LPFC_DEFAULT_PAGE_SIZE, 8918 phba->sli4_hba.cq_esize, 8919 phba->sli4_hba.cq_ecount, 8920 cpu); 8921 if (!qdesc) { 8922 lpfc_printf_log( 8923 phba, KERN_ERR, LOG_INIT, 8924 "3142 Failed allocate NVME " 8925 "CQ Set (%d)\n", idx); 8926 goto out_error; 8927 } 8928 qdesc->qe_valid = 1; 8929 qdesc->hdwq = idx; 8930 qdesc->chann = cpu; 8931 phba->sli4_hba.nvmet_cqset[idx] = qdesc; 8932 } 8933 } 8934 } 8935 8936 /* 8937 * Create Slow Path Completion Queues (CQs) 8938 */ 8939 8940 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ); 8941 /* Create slow-path Mailbox Command Complete Queue */ 8942 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8943 phba->sli4_hba.cq_esize, 8944 phba->sli4_hba.cq_ecount, cpu); 8945 if (!qdesc) { 8946 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8947 "0500 Failed allocate slow-path mailbox CQ\n"); 8948 goto out_error; 8949 } 8950 qdesc->qe_valid = 1; 8951 phba->sli4_hba.mbx_cq = qdesc; 8952 8953 /* Create slow-path ELS Complete Queue */ 8954 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8955 phba->sli4_hba.cq_esize, 8956 phba->sli4_hba.cq_ecount, cpu); 8957 if (!qdesc) { 8958 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8959 "0501 Failed allocate slow-path ELS CQ\n"); 8960 goto out_error; 8961 } 8962 qdesc->qe_valid = 1; 8963 qdesc->chann = 0; 8964 phba->sli4_hba.els_cq = qdesc; 8965 8966 8967 /* 8968 * Create Slow Path Work Queues (WQs) 8969 */ 8970 8971 /* Create Mailbox Command Queue */ 8972 8973 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8974 phba->sli4_hba.mq_esize, 8975 phba->sli4_hba.mq_ecount, cpu); 8976 if (!qdesc) { 8977 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8978 "0505 Failed allocate slow-path MQ\n"); 8979 goto out_error; 8980 } 8981 qdesc->chann = 0; 8982 phba->sli4_hba.mbx_wq = qdesc; 8983 8984 /* 8985 * Create ELS Work Queues 8986 */ 8987 8988 /* Create slow-path ELS Work Queue */ 8989 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 8990 phba->sli4_hba.wq_esize, 8991 phba->sli4_hba.wq_ecount, cpu); 8992 if (!qdesc) { 8993 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8994 "0504 Failed allocate slow-path ELS WQ\n"); 8995 goto out_error; 8996 } 8997 qdesc->chann = 0; 8998 phba->sli4_hba.els_wq = qdesc; 8999 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 9000 9001 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9002 /* Create NVME LS Complete Queue */ 9003 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9004 phba->sli4_hba.cq_esize, 9005 phba->sli4_hba.cq_ecount, cpu); 9006 if (!qdesc) { 9007 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9008 "6079 Failed allocate NVME LS CQ\n"); 9009 goto out_error; 9010 } 9011 qdesc->chann = 0; 9012 qdesc->qe_valid = 1; 9013 phba->sli4_hba.nvmels_cq = qdesc; 9014 9015 /* Create NVME LS Work Queue */ 9016 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9017 phba->sli4_hba.wq_esize, 9018 phba->sli4_hba.wq_ecount, cpu); 9019 if (!qdesc) { 9020 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9021 "6080 Failed allocate NVME LS WQ\n"); 9022 goto out_error; 9023 } 9024 qdesc->chann = 0; 9025 phba->sli4_hba.nvmels_wq = qdesc; 9026 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 9027 } 9028 9029 /* 9030 * Create Receive Queue (RQ) 9031 */ 9032 9033 /* Create Receive Queue for header */ 9034 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9035 phba->sli4_hba.rq_esize, 9036 phba->sli4_hba.rq_ecount, cpu); 9037 if (!qdesc) { 9038 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9039 "0506 Failed allocate receive HRQ\n"); 9040 goto out_error; 9041 } 9042 phba->sli4_hba.hdr_rq = qdesc; 9043 9044 /* Create Receive Queue for data */ 9045 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 9046 phba->sli4_hba.rq_esize, 9047 phba->sli4_hba.rq_ecount, cpu); 9048 if (!qdesc) { 9049 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9050 "0507 Failed allocate receive DRQ\n"); 9051 goto out_error; 9052 } 9053 phba->sli4_hba.dat_rq = qdesc; 9054 9055 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && 9056 phba->nvmet_support) { 9057 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 9058 cpu = lpfc_find_cpu_handle(phba, idx, 9059 LPFC_FIND_BY_HDWQ); 9060 /* Create NVMET Receive Queue for header */ 9061 qdesc = lpfc_sli4_queue_alloc(phba, 9062 LPFC_DEFAULT_PAGE_SIZE, 9063 phba->sli4_hba.rq_esize, 9064 LPFC_NVMET_RQE_DEF_COUNT, 9065 cpu); 9066 if (!qdesc) { 9067 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9068 "3146 Failed allocate " 9069 "receive HRQ\n"); 9070 goto out_error; 9071 } 9072 qdesc->hdwq = idx; 9073 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc; 9074 9075 /* Only needed for header of RQ pair */ 9076 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp), 9077 GFP_KERNEL, 9078 cpu_to_node(cpu)); 9079 if (qdesc->rqbp == NULL) { 9080 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9081 "6131 Failed allocate " 9082 "Header RQBP\n"); 9083 goto out_error; 9084 } 9085 9086 /* Put list in known state in case driver load fails. */ 9087 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list); 9088 9089 /* Create NVMET Receive Queue for data */ 9090 qdesc = lpfc_sli4_queue_alloc(phba, 9091 LPFC_DEFAULT_PAGE_SIZE, 9092 phba->sli4_hba.rq_esize, 9093 LPFC_NVMET_RQE_DEF_COUNT, 9094 cpu); 9095 if (!qdesc) { 9096 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9097 "3156 Failed allocate " 9098 "receive DRQ\n"); 9099 goto out_error; 9100 } 9101 qdesc->hdwq = idx; 9102 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc; 9103 } 9104 } 9105 9106 #if defined(BUILD_NVME) 9107 /* Clear NVME stats */ 9108 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9109 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9110 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0, 9111 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat)); 9112 } 9113 } 9114 #endif 9115 9116 /* Clear SCSI stats */ 9117 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 9118 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9119 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0, 9120 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat)); 9121 } 9122 } 9123 9124 return 0; 9125 9126 out_error: 9127 lpfc_sli4_queue_destroy(phba); 9128 return -ENOMEM; 9129 } 9130 9131 static inline void 9132 __lpfc_sli4_release_queue(struct lpfc_queue **qp) 9133 { 9134 if (*qp != NULL) { 9135 lpfc_sli4_queue_free(*qp); 9136 *qp = NULL; 9137 } 9138 } 9139 9140 static inline void 9141 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max) 9142 { 9143 int idx; 9144 9145 if (*qs == NULL) 9146 return; 9147 9148 for (idx = 0; idx < max; idx++) 9149 __lpfc_sli4_release_queue(&(*qs)[idx]); 9150 9151 kfree(*qs); 9152 *qs = NULL; 9153 } 9154 9155 static inline void 9156 lpfc_sli4_release_hdwq(struct lpfc_hba *phba) 9157 { 9158 struct lpfc_sli4_hdw_queue *hdwq; 9159 struct lpfc_queue *eq; 9160 uint32_t idx; 9161 9162 hdwq = phba->sli4_hba.hdwq; 9163 9164 /* Loop thru all Hardware Queues */ 9165 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9166 /* Free the CQ/WQ corresponding to the Hardware Queue */ 9167 lpfc_sli4_queue_free(hdwq[idx].fcp_cq); 9168 lpfc_sli4_queue_free(hdwq[idx].nvme_cq); 9169 lpfc_sli4_queue_free(hdwq[idx].fcp_wq); 9170 lpfc_sli4_queue_free(hdwq[idx].nvme_wq); 9171 hdwq[idx].hba_eq = NULL; 9172 hdwq[idx].fcp_cq = NULL; 9173 hdwq[idx].nvme_cq = NULL; 9174 hdwq[idx].fcp_wq = NULL; 9175 hdwq[idx].nvme_wq = NULL; 9176 } 9177 /* Loop thru all IRQ vectors */ 9178 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 9179 /* Free the EQ corresponding to the IRQ vector */ 9180 eq = phba->sli4_hba.hba_eq_hdl[idx].eq; 9181 lpfc_sli4_queue_free(eq); 9182 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL; 9183 } 9184 } 9185 9186 /** 9187 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 9188 * @phba: pointer to lpfc hba data structure. 9189 * 9190 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 9191 * operation. 9192 * 9193 * Return codes 9194 * 0 - successful 9195 * -ENOMEM - No available memory 9196 * -EIO - The mailbox failed to complete successfully. 9197 **/ 9198 void 9199 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 9200 { 9201 /* 9202 * Set FREE_INIT before beginning to free the queues. 9203 * Wait until the users of queues to acknowledge to 9204 * release queues by clearing FREE_WAIT. 9205 */ 9206 spin_lock_irq(&phba->hbalock); 9207 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT; 9208 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) { 9209 spin_unlock_irq(&phba->hbalock); 9210 msleep(20); 9211 spin_lock_irq(&phba->hbalock); 9212 } 9213 spin_unlock_irq(&phba->hbalock); 9214 9215 /* Release HBA eqs */ 9216 if (phba->sli4_hba.hdwq) 9217 lpfc_sli4_release_hdwq(phba); 9218 9219 if (phba->nvmet_support) { 9220 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, 9221 phba->cfg_nvmet_mrq); 9222 9223 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr, 9224 phba->cfg_nvmet_mrq); 9225 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data, 9226 phba->cfg_nvmet_mrq); 9227 } 9228 9229 /* Release mailbox command work queue */ 9230 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq); 9231 9232 /* Release ELS work queue */ 9233 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq); 9234 9235 /* Release ELS work queue */ 9236 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq); 9237 9238 /* Release unsolicited receive queue */ 9239 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq); 9240 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq); 9241 9242 /* Release ELS complete queue */ 9243 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq); 9244 9245 /* Release NVME LS complete queue */ 9246 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq); 9247 9248 /* Release mailbox command complete queue */ 9249 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq); 9250 9251 /* Everything on this list has been freed */ 9252 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 9253 9254 /* Done with freeing the queues */ 9255 spin_lock_irq(&phba->hbalock); 9256 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT; 9257 spin_unlock_irq(&phba->hbalock); 9258 } 9259 9260 int 9261 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) 9262 { 9263 struct lpfc_rqb *rqbp; 9264 struct lpfc_dmabuf *h_buf; 9265 struct rqb_dmabuf *rqb_buffer; 9266 9267 rqbp = rq->rqbp; 9268 while (!list_empty(&rqbp->rqb_buffer_list)) { 9269 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 9270 struct lpfc_dmabuf, list); 9271 9272 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf); 9273 (rqbp->rqb_free_buffer)(phba, rqb_buffer); 9274 rqbp->buffer_count--; 9275 } 9276 return 1; 9277 } 9278 9279 static int 9280 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, 9281 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map, 9282 int qidx, uint32_t qtype) 9283 { 9284 struct lpfc_sli_ring *pring; 9285 int rc; 9286 9287 if (!eq || !cq || !wq) { 9288 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9289 "6085 Fast-path %s (%d) not allocated\n", 9290 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx); 9291 return -ENOMEM; 9292 } 9293 9294 /* create the Cq first */ 9295 rc = lpfc_cq_create(phba, cq, eq, 9296 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype); 9297 if (rc) { 9298 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9299 "6086 Failed setup of CQ (%d), rc = 0x%x\n", 9300 qidx, (uint32_t)rc); 9301 return rc; 9302 } 9303 9304 if (qtype != LPFC_MBOX) { 9305 /* Setup cq_map for fast lookup */ 9306 if (cq_map) 9307 *cq_map = cq->queue_id; 9308 9309 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9310 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n", 9311 qidx, cq->queue_id, qidx, eq->queue_id); 9312 9313 /* create the wq */ 9314 rc = lpfc_wq_create(phba, wq, cq, qtype); 9315 if (rc) { 9316 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9317 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n", 9318 qidx, (uint32_t)rc); 9319 /* no need to tear down cq - caller will do so */ 9320 return rc; 9321 } 9322 9323 /* Bind this CQ/WQ to the NVME ring */ 9324 pring = wq->pring; 9325 pring->sli.sli4.wqp = (void *)wq; 9326 cq->pring = pring; 9327 9328 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9329 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n", 9330 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id); 9331 } else { 9332 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX); 9333 if (rc) { 9334 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9335 "0539 Failed setup of slow-path MQ: " 9336 "rc = 0x%x\n", rc); 9337 /* no need to tear down cq - caller will do so */ 9338 return rc; 9339 } 9340 9341 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9342 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 9343 phba->sli4_hba.mbx_wq->queue_id, 9344 phba->sli4_hba.mbx_cq->queue_id); 9345 } 9346 9347 return 0; 9348 } 9349 9350 /** 9351 * lpfc_setup_cq_lookup - Setup the CQ lookup table 9352 * @phba: pointer to lpfc hba data structure. 9353 * 9354 * This routine will populate the cq_lookup table by all 9355 * available CQ queue_id's. 9356 **/ 9357 static void 9358 lpfc_setup_cq_lookup(struct lpfc_hba *phba) 9359 { 9360 struct lpfc_queue *eq, *childq; 9361 int qidx; 9362 9363 memset(phba->sli4_hba.cq_lookup, 0, 9364 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1))); 9365 /* Loop thru all IRQ vectors */ 9366 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 9367 /* Get the EQ corresponding to the IRQ vector */ 9368 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 9369 if (!eq) 9370 continue; 9371 /* Loop through all CQs associated with that EQ */ 9372 list_for_each_entry(childq, &eq->child_list, list) { 9373 if (childq->queue_id > phba->sli4_hba.cq_max) 9374 continue; 9375 if ((childq->subtype == LPFC_FCP) || 9376 (childq->subtype == LPFC_NVME)) 9377 phba->sli4_hba.cq_lookup[childq->queue_id] = 9378 childq; 9379 } 9380 } 9381 } 9382 9383 /** 9384 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 9385 * @phba: pointer to lpfc hba data structure. 9386 * 9387 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 9388 * operation. 9389 * 9390 * Return codes 9391 * 0 - successful 9392 * -ENOMEM - No available memory 9393 * -EIO - The mailbox failed to complete successfully. 9394 **/ 9395 int 9396 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 9397 { 9398 uint32_t shdr_status, shdr_add_status; 9399 union lpfc_sli4_cfg_shdr *shdr; 9400 struct lpfc_vector_map_info *cpup; 9401 struct lpfc_sli4_hdw_queue *qp; 9402 LPFC_MBOXQ_t *mboxq; 9403 int qidx, cpu; 9404 uint32_t length, usdelay; 9405 int rc = -ENOMEM; 9406 9407 /* Check for dual-ULP support */ 9408 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9409 if (!mboxq) { 9410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9411 "3249 Unable to allocate memory for " 9412 "QUERY_FW_CFG mailbox command\n"); 9413 return -ENOMEM; 9414 } 9415 length = (sizeof(struct lpfc_mbx_query_fw_config) - 9416 sizeof(struct lpfc_sli4_cfg_mhdr)); 9417 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 9418 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 9419 length, LPFC_SLI4_MBX_EMBED); 9420 9421 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9422 9423 shdr = (union lpfc_sli4_cfg_shdr *) 9424 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 9425 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 9426 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 9427 if (shdr_status || shdr_add_status || rc) { 9428 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9429 "3250 QUERY_FW_CFG mailbox failed with status " 9430 "x%x add_status x%x, mbx status x%x\n", 9431 shdr_status, shdr_add_status, rc); 9432 if (rc != MBX_TIMEOUT) 9433 mempool_free(mboxq, phba->mbox_mem_pool); 9434 rc = -ENXIO; 9435 goto out_error; 9436 } 9437 9438 phba->sli4_hba.fw_func_mode = 9439 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; 9440 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; 9441 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; 9442 phba->sli4_hba.physical_port = 9443 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port; 9444 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9445 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " 9446 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, 9447 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); 9448 9449 if (rc != MBX_TIMEOUT) 9450 mempool_free(mboxq, phba->mbox_mem_pool); 9451 9452 /* 9453 * Set up HBA Event Queues (EQs) 9454 */ 9455 qp = phba->sli4_hba.hdwq; 9456 9457 /* Set up HBA event queue */ 9458 if (!qp) { 9459 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9460 "3147 Fast-path EQs not allocated\n"); 9461 rc = -ENOMEM; 9462 goto out_error; 9463 } 9464 9465 /* Loop thru all IRQ vectors */ 9466 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 9467 /* Create HBA Event Queues (EQs) in order */ 9468 for_each_present_cpu(cpu) { 9469 cpup = &phba->sli4_hba.cpu_map[cpu]; 9470 9471 /* Look for the CPU thats using that vector with 9472 * LPFC_CPU_FIRST_IRQ set. 9473 */ 9474 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 9475 continue; 9476 if (qidx != cpup->eq) 9477 continue; 9478 9479 /* Create an EQ for that vector */ 9480 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq, 9481 phba->cfg_fcp_imax); 9482 if (rc) { 9483 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9484 "0523 Failed setup of fast-path" 9485 " EQ (%d), rc = 0x%x\n", 9486 cpup->eq, (uint32_t)rc); 9487 goto out_destroy; 9488 } 9489 9490 /* Save the EQ for that vector in the hba_eq_hdl */ 9491 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq = 9492 qp[cpup->hdwq].hba_eq; 9493 9494 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9495 "2584 HBA EQ setup: queue[%d]-id=%d\n", 9496 cpup->eq, 9497 qp[cpup->hdwq].hba_eq->queue_id); 9498 } 9499 } 9500 9501 /* Loop thru all Hardware Queues */ 9502 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9503 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 9504 cpu = lpfc_find_cpu_handle(phba, qidx, 9505 LPFC_FIND_BY_HDWQ); 9506 cpup = &phba->sli4_hba.cpu_map[cpu]; 9507 9508 /* Create the CQ/WQ corresponding to the 9509 * Hardware Queue 9510 */ 9511 rc = lpfc_create_wq_cq(phba, 9512 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq, 9513 qp[qidx].nvme_cq, 9514 qp[qidx].nvme_wq, 9515 &phba->sli4_hba.hdwq[qidx].nvme_cq_map, 9516 qidx, LPFC_NVME); 9517 if (rc) { 9518 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9519 "6123 Failed to setup fastpath " 9520 "NVME WQ/CQ (%d), rc = 0x%x\n", 9521 qidx, (uint32_t)rc); 9522 goto out_destroy; 9523 } 9524 } 9525 } 9526 9527 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 9528 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ); 9529 cpup = &phba->sli4_hba.cpu_map[cpu]; 9530 9531 /* Create the CQ/WQ corresponding to the Hardware Queue */ 9532 rc = lpfc_create_wq_cq(phba, 9533 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq, 9534 qp[qidx].fcp_cq, 9535 qp[qidx].fcp_wq, 9536 &phba->sli4_hba.hdwq[qidx].fcp_cq_map, 9537 qidx, LPFC_FCP); 9538 if (rc) { 9539 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9540 "0535 Failed to setup fastpath " 9541 "FCP WQ/CQ (%d), rc = 0x%x\n", 9542 qidx, (uint32_t)rc); 9543 goto out_destroy; 9544 } 9545 } 9546 9547 /* 9548 * Set up Slow Path Complete Queues (CQs) 9549 */ 9550 9551 /* Set up slow-path MBOX CQ/MQ */ 9552 9553 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) { 9554 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9555 "0528 %s not allocated\n", 9556 phba->sli4_hba.mbx_cq ? 9557 "Mailbox WQ" : "Mailbox CQ"); 9558 rc = -ENOMEM; 9559 goto out_destroy; 9560 } 9561 9562 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 9563 phba->sli4_hba.mbx_cq, 9564 phba->sli4_hba.mbx_wq, 9565 NULL, 0, LPFC_MBOX); 9566 if (rc) { 9567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9568 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n", 9569 (uint32_t)rc); 9570 goto out_destroy; 9571 } 9572 if (phba->nvmet_support) { 9573 if (!phba->sli4_hba.nvmet_cqset) { 9574 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9575 "3165 Fast-path NVME CQ Set " 9576 "array not allocated\n"); 9577 rc = -ENOMEM; 9578 goto out_destroy; 9579 } 9580 if (phba->cfg_nvmet_mrq > 1) { 9581 rc = lpfc_cq_create_set(phba, 9582 phba->sli4_hba.nvmet_cqset, 9583 qp, 9584 LPFC_WCQ, LPFC_NVMET); 9585 if (rc) { 9586 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9587 "3164 Failed setup of NVME CQ " 9588 "Set, rc = 0x%x\n", 9589 (uint32_t)rc); 9590 goto out_destroy; 9591 } 9592 } else { 9593 /* Set up NVMET Receive Complete Queue */ 9594 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0], 9595 qp[0].hba_eq, 9596 LPFC_WCQ, LPFC_NVMET); 9597 if (rc) { 9598 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9599 "6089 Failed setup NVMET CQ: " 9600 "rc = 0x%x\n", (uint32_t)rc); 9601 goto out_destroy; 9602 } 9603 phba->sli4_hba.nvmet_cqset[0]->chann = 0; 9604 9605 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9606 "6090 NVMET CQ setup: cq-id=%d, " 9607 "parent eq-id=%d\n", 9608 phba->sli4_hba.nvmet_cqset[0]->queue_id, 9609 qp[0].hba_eq->queue_id); 9610 } 9611 } 9612 9613 /* Set up slow-path ELS WQ/CQ */ 9614 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) { 9615 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9616 "0530 ELS %s not allocated\n", 9617 phba->sli4_hba.els_cq ? "WQ" : "CQ"); 9618 rc = -ENOMEM; 9619 goto out_destroy; 9620 } 9621 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 9622 phba->sli4_hba.els_cq, 9623 phba->sli4_hba.els_wq, 9624 NULL, 0, LPFC_ELS); 9625 if (rc) { 9626 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9627 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n", 9628 (uint32_t)rc); 9629 goto out_destroy; 9630 } 9631 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9632 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 9633 phba->sli4_hba.els_wq->queue_id, 9634 phba->sli4_hba.els_cq->queue_id); 9635 9636 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 9637 /* Set up NVME LS Complete Queue */ 9638 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) { 9639 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9640 "6091 LS %s not allocated\n", 9641 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ"); 9642 rc = -ENOMEM; 9643 goto out_destroy; 9644 } 9645 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 9646 phba->sli4_hba.nvmels_cq, 9647 phba->sli4_hba.nvmels_wq, 9648 NULL, 0, LPFC_NVME_LS); 9649 if (rc) { 9650 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9651 "0526 Failed setup of NVVME LS WQ/CQ: " 9652 "rc = 0x%x\n", (uint32_t)rc); 9653 goto out_destroy; 9654 } 9655 9656 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9657 "6096 ELS WQ setup: wq-id=%d, " 9658 "parent cq-id=%d\n", 9659 phba->sli4_hba.nvmels_wq->queue_id, 9660 phba->sli4_hba.nvmels_cq->queue_id); 9661 } 9662 9663 /* 9664 * Create NVMET Receive Queue (RQ) 9665 */ 9666 if (phba->nvmet_support) { 9667 if ((!phba->sli4_hba.nvmet_cqset) || 9668 (!phba->sli4_hba.nvmet_mrq_hdr) || 9669 (!phba->sli4_hba.nvmet_mrq_data)) { 9670 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9671 "6130 MRQ CQ Queues not " 9672 "allocated\n"); 9673 rc = -ENOMEM; 9674 goto out_destroy; 9675 } 9676 if (phba->cfg_nvmet_mrq > 1) { 9677 rc = lpfc_mrq_create(phba, 9678 phba->sli4_hba.nvmet_mrq_hdr, 9679 phba->sli4_hba.nvmet_mrq_data, 9680 phba->sli4_hba.nvmet_cqset, 9681 LPFC_NVMET); 9682 if (rc) { 9683 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9684 "6098 Failed setup of NVMET " 9685 "MRQ: rc = 0x%x\n", 9686 (uint32_t)rc); 9687 goto out_destroy; 9688 } 9689 9690 } else { 9691 rc = lpfc_rq_create(phba, 9692 phba->sli4_hba.nvmet_mrq_hdr[0], 9693 phba->sli4_hba.nvmet_mrq_data[0], 9694 phba->sli4_hba.nvmet_cqset[0], 9695 LPFC_NVMET); 9696 if (rc) { 9697 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9698 "6057 Failed setup of NVMET " 9699 "Receive Queue: rc = 0x%x\n", 9700 (uint32_t)rc); 9701 goto out_destroy; 9702 } 9703 9704 lpfc_printf_log( 9705 phba, KERN_INFO, LOG_INIT, 9706 "6099 NVMET RQ setup: hdr-rq-id=%d, " 9707 "dat-rq-id=%d parent cq-id=%d\n", 9708 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id, 9709 phba->sli4_hba.nvmet_mrq_data[0]->queue_id, 9710 phba->sli4_hba.nvmet_cqset[0]->queue_id); 9711 9712 } 9713 } 9714 9715 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 9716 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9717 "0540 Receive Queue not allocated\n"); 9718 rc = -ENOMEM; 9719 goto out_destroy; 9720 } 9721 9722 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 9723 phba->sli4_hba.els_cq, LPFC_USOL); 9724 if (rc) { 9725 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9726 "0541 Failed setup of Receive Queue: " 9727 "rc = 0x%x\n", (uint32_t)rc); 9728 goto out_destroy; 9729 } 9730 9731 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9732 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 9733 "parent cq-id=%d\n", 9734 phba->sli4_hba.hdr_rq->queue_id, 9735 phba->sli4_hba.dat_rq->queue_id, 9736 phba->sli4_hba.els_cq->queue_id); 9737 9738 if (phba->cfg_fcp_imax) 9739 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax; 9740 else 9741 usdelay = 0; 9742 9743 for (qidx = 0; qidx < phba->cfg_irq_chann; 9744 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) 9745 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT, 9746 usdelay); 9747 9748 if (phba->sli4_hba.cq_max) { 9749 kfree(phba->sli4_hba.cq_lookup); 9750 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1), 9751 sizeof(struct lpfc_queue *), GFP_KERNEL); 9752 if (!phba->sli4_hba.cq_lookup) { 9753 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9754 "0549 Failed setup of CQ Lookup table: " 9755 "size 0x%x\n", phba->sli4_hba.cq_max); 9756 rc = -ENOMEM; 9757 goto out_destroy; 9758 } 9759 lpfc_setup_cq_lookup(phba); 9760 } 9761 return 0; 9762 9763 out_destroy: 9764 lpfc_sli4_queue_unset(phba); 9765 out_error: 9766 return rc; 9767 } 9768 9769 /** 9770 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 9771 * @phba: pointer to lpfc hba data structure. 9772 * 9773 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 9774 * operation. 9775 * 9776 * Return codes 9777 * 0 - successful 9778 * -ENOMEM - No available memory 9779 * -EIO - The mailbox failed to complete successfully. 9780 **/ 9781 void 9782 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 9783 { 9784 struct lpfc_sli4_hdw_queue *qp; 9785 struct lpfc_queue *eq; 9786 int qidx; 9787 9788 /* Unset mailbox command work queue */ 9789 if (phba->sli4_hba.mbx_wq) 9790 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 9791 9792 /* Unset NVME LS work queue */ 9793 if (phba->sli4_hba.nvmels_wq) 9794 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); 9795 9796 /* Unset ELS work queue */ 9797 if (phba->sli4_hba.els_wq) 9798 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 9799 9800 /* Unset unsolicited receive queue */ 9801 if (phba->sli4_hba.hdr_rq) 9802 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, 9803 phba->sli4_hba.dat_rq); 9804 9805 /* Unset mailbox command complete queue */ 9806 if (phba->sli4_hba.mbx_cq) 9807 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 9808 9809 /* Unset ELS complete queue */ 9810 if (phba->sli4_hba.els_cq) 9811 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 9812 9813 /* Unset NVME LS complete queue */ 9814 if (phba->sli4_hba.nvmels_cq) 9815 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq); 9816 9817 if (phba->nvmet_support) { 9818 /* Unset NVMET MRQ queue */ 9819 if (phba->sli4_hba.nvmet_mrq_hdr) { 9820 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 9821 lpfc_rq_destroy( 9822 phba, 9823 phba->sli4_hba.nvmet_mrq_hdr[qidx], 9824 phba->sli4_hba.nvmet_mrq_data[qidx]); 9825 } 9826 9827 /* Unset NVMET CQ Set complete queue */ 9828 if (phba->sli4_hba.nvmet_cqset) { 9829 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 9830 lpfc_cq_destroy( 9831 phba, phba->sli4_hba.nvmet_cqset[qidx]); 9832 } 9833 } 9834 9835 /* Unset fast-path SLI4 queues */ 9836 if (phba->sli4_hba.hdwq) { 9837 /* Loop thru all Hardware Queues */ 9838 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 9839 /* Destroy the CQ/WQ corresponding to Hardware Queue */ 9840 qp = &phba->sli4_hba.hdwq[qidx]; 9841 lpfc_wq_destroy(phba, qp->fcp_wq); 9842 lpfc_wq_destroy(phba, qp->nvme_wq); 9843 lpfc_cq_destroy(phba, qp->fcp_cq); 9844 lpfc_cq_destroy(phba, qp->nvme_cq); 9845 } 9846 /* Loop thru all IRQ vectors */ 9847 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 9848 /* Destroy the EQ corresponding to the IRQ vector */ 9849 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 9850 lpfc_eq_destroy(phba, eq); 9851 } 9852 } 9853 9854 kfree(phba->sli4_hba.cq_lookup); 9855 phba->sli4_hba.cq_lookup = NULL; 9856 phba->sli4_hba.cq_max = 0; 9857 } 9858 9859 /** 9860 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 9861 * @phba: pointer to lpfc hba data structure. 9862 * 9863 * This routine is invoked to allocate and set up a pool of completion queue 9864 * events. The body of the completion queue event is a completion queue entry 9865 * CQE. For now, this pool is used for the interrupt service routine to queue 9866 * the following HBA completion queue events for the worker thread to process: 9867 * - Mailbox asynchronous events 9868 * - Receive queue completion unsolicited events 9869 * Later, this can be used for all the slow-path events. 9870 * 9871 * Return codes 9872 * 0 - successful 9873 * -ENOMEM - No available memory 9874 **/ 9875 static int 9876 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 9877 { 9878 struct lpfc_cq_event *cq_event; 9879 int i; 9880 9881 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 9882 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 9883 if (!cq_event) 9884 goto out_pool_create_fail; 9885 list_add_tail(&cq_event->list, 9886 &phba->sli4_hba.sp_cqe_event_pool); 9887 } 9888 return 0; 9889 9890 out_pool_create_fail: 9891 lpfc_sli4_cq_event_pool_destroy(phba); 9892 return -ENOMEM; 9893 } 9894 9895 /** 9896 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 9897 * @phba: pointer to lpfc hba data structure. 9898 * 9899 * This routine is invoked to free the pool of completion queue events at 9900 * driver unload time. Note that, it is the responsibility of the driver 9901 * cleanup routine to free all the outstanding completion-queue events 9902 * allocated from this pool back into the pool before invoking this routine 9903 * to destroy the pool. 9904 **/ 9905 static void 9906 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 9907 { 9908 struct lpfc_cq_event *cq_event, *next_cq_event; 9909 9910 list_for_each_entry_safe(cq_event, next_cq_event, 9911 &phba->sli4_hba.sp_cqe_event_pool, list) { 9912 list_del(&cq_event->list); 9913 kfree(cq_event); 9914 } 9915 } 9916 9917 /** 9918 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 9919 * @phba: pointer to lpfc hba data structure. 9920 * 9921 * This routine is the lock free version of the API invoked to allocate a 9922 * completion-queue event from the free pool. 9923 * 9924 * Return: Pointer to the newly allocated completion-queue event if successful 9925 * NULL otherwise. 9926 **/ 9927 struct lpfc_cq_event * 9928 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 9929 { 9930 struct lpfc_cq_event *cq_event = NULL; 9931 9932 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 9933 struct lpfc_cq_event, list); 9934 return cq_event; 9935 } 9936 9937 /** 9938 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 9939 * @phba: pointer to lpfc hba data structure. 9940 * 9941 * This routine is the lock version of the API invoked to allocate a 9942 * completion-queue event from the free pool. 9943 * 9944 * Return: Pointer to the newly allocated completion-queue event if successful 9945 * NULL otherwise. 9946 **/ 9947 struct lpfc_cq_event * 9948 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 9949 { 9950 struct lpfc_cq_event *cq_event; 9951 unsigned long iflags; 9952 9953 spin_lock_irqsave(&phba->hbalock, iflags); 9954 cq_event = __lpfc_sli4_cq_event_alloc(phba); 9955 spin_unlock_irqrestore(&phba->hbalock, iflags); 9956 return cq_event; 9957 } 9958 9959 /** 9960 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 9961 * @phba: pointer to lpfc hba data structure. 9962 * @cq_event: pointer to the completion queue event to be freed. 9963 * 9964 * This routine is the lock free version of the API invoked to release a 9965 * completion-queue event back into the free pool. 9966 **/ 9967 void 9968 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 9969 struct lpfc_cq_event *cq_event) 9970 { 9971 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 9972 } 9973 9974 /** 9975 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 9976 * @phba: pointer to lpfc hba data structure. 9977 * @cq_event: pointer to the completion queue event to be freed. 9978 * 9979 * This routine is the lock version of the API invoked to release a 9980 * completion-queue event back into the free pool. 9981 **/ 9982 void 9983 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 9984 struct lpfc_cq_event *cq_event) 9985 { 9986 unsigned long iflags; 9987 spin_lock_irqsave(&phba->hbalock, iflags); 9988 __lpfc_sli4_cq_event_release(phba, cq_event); 9989 spin_unlock_irqrestore(&phba->hbalock, iflags); 9990 } 9991 9992 /** 9993 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 9994 * @phba: pointer to lpfc hba data structure. 9995 * 9996 * This routine is to free all the pending completion-queue events to the 9997 * back into the free pool for device reset. 9998 **/ 9999 static void 10000 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 10001 { 10002 LIST_HEAD(cqelist); 10003 struct lpfc_cq_event *cqe; 10004 unsigned long iflags; 10005 10006 /* Retrieve all the pending WCQEs from pending WCQE lists */ 10007 spin_lock_irqsave(&phba->hbalock, iflags); 10008 /* Pending FCP XRI abort events */ 10009 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 10010 &cqelist); 10011 /* Pending ELS XRI abort events */ 10012 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 10013 &cqelist); 10014 /* Pending asynnc events */ 10015 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 10016 &cqelist); 10017 spin_unlock_irqrestore(&phba->hbalock, iflags); 10018 10019 while (!list_empty(&cqelist)) { 10020 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 10021 lpfc_sli4_cq_event_release(phba, cqe); 10022 } 10023 } 10024 10025 /** 10026 * lpfc_pci_function_reset - Reset pci function. 10027 * @phba: pointer to lpfc hba data structure. 10028 * 10029 * This routine is invoked to request a PCI function reset. It will destroys 10030 * all resources assigned to the PCI function which originates this request. 10031 * 10032 * Return codes 10033 * 0 - successful 10034 * -ENOMEM - No available memory 10035 * -EIO - The mailbox failed to complete successfully. 10036 **/ 10037 int 10038 lpfc_pci_function_reset(struct lpfc_hba *phba) 10039 { 10040 LPFC_MBOXQ_t *mboxq; 10041 uint32_t rc = 0, if_type; 10042 uint32_t shdr_status, shdr_add_status; 10043 uint32_t rdy_chk; 10044 uint32_t port_reset = 0; 10045 union lpfc_sli4_cfg_shdr *shdr; 10046 struct lpfc_register reg_data; 10047 uint16_t devid; 10048 10049 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10050 switch (if_type) { 10051 case LPFC_SLI_INTF_IF_TYPE_0: 10052 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 10053 GFP_KERNEL); 10054 if (!mboxq) { 10055 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10056 "0494 Unable to allocate memory for " 10057 "issuing SLI_FUNCTION_RESET mailbox " 10058 "command\n"); 10059 return -ENOMEM; 10060 } 10061 10062 /* Setup PCI function reset mailbox-ioctl command */ 10063 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 10064 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 10065 LPFC_SLI4_MBX_EMBED); 10066 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 10067 shdr = (union lpfc_sli4_cfg_shdr *) 10068 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 10069 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10070 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 10071 &shdr->response); 10072 if (rc != MBX_TIMEOUT) 10073 mempool_free(mboxq, phba->mbox_mem_pool); 10074 if (shdr_status || shdr_add_status || rc) { 10075 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10076 "0495 SLI_FUNCTION_RESET mailbox " 10077 "failed with status x%x add_status x%x," 10078 " mbx status x%x\n", 10079 shdr_status, shdr_add_status, rc); 10080 rc = -ENXIO; 10081 } 10082 break; 10083 case LPFC_SLI_INTF_IF_TYPE_2: 10084 case LPFC_SLI_INTF_IF_TYPE_6: 10085 wait: 10086 /* 10087 * Poll the Port Status Register and wait for RDY for 10088 * up to 30 seconds. If the port doesn't respond, treat 10089 * it as an error. 10090 */ 10091 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) { 10092 if (lpfc_readl(phba->sli4_hba.u.if_type2. 10093 STATUSregaddr, ®_data.word0)) { 10094 rc = -ENODEV; 10095 goto out; 10096 } 10097 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 10098 break; 10099 msleep(20); 10100 } 10101 10102 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) { 10103 phba->work_status[0] = readl( 10104 phba->sli4_hba.u.if_type2.ERR1regaddr); 10105 phba->work_status[1] = readl( 10106 phba->sli4_hba.u.if_type2.ERR2regaddr); 10107 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10108 "2890 Port not ready, port status reg " 10109 "0x%x error 1=0x%x, error 2=0x%x\n", 10110 reg_data.word0, 10111 phba->work_status[0], 10112 phba->work_status[1]); 10113 rc = -ENODEV; 10114 goto out; 10115 } 10116 10117 if (!port_reset) { 10118 /* 10119 * Reset the port now 10120 */ 10121 reg_data.word0 = 0; 10122 bf_set(lpfc_sliport_ctrl_end, ®_data, 10123 LPFC_SLIPORT_LITTLE_ENDIAN); 10124 bf_set(lpfc_sliport_ctrl_ip, ®_data, 10125 LPFC_SLIPORT_INIT_PORT); 10126 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 10127 CTRLregaddr); 10128 /* flush */ 10129 pci_read_config_word(phba->pcidev, 10130 PCI_DEVICE_ID, &devid); 10131 10132 port_reset = 1; 10133 msleep(20); 10134 goto wait; 10135 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) { 10136 rc = -ENODEV; 10137 goto out; 10138 } 10139 break; 10140 10141 case LPFC_SLI_INTF_IF_TYPE_1: 10142 default: 10143 break; 10144 } 10145 10146 out: 10147 /* Catch the not-ready port failure after a port reset. */ 10148 if (rc) { 10149 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10150 "3317 HBA not functional: IP Reset Failed " 10151 "try: echo fw_reset > board_mode\n"); 10152 rc = -ENODEV; 10153 } 10154 10155 return rc; 10156 } 10157 10158 /** 10159 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 10160 * @phba: pointer to lpfc hba data structure. 10161 * 10162 * This routine is invoked to set up the PCI device memory space for device 10163 * with SLI-4 interface spec. 10164 * 10165 * Return codes 10166 * 0 - successful 10167 * other values - error 10168 **/ 10169 static int 10170 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 10171 { 10172 struct pci_dev *pdev = phba->pcidev; 10173 unsigned long bar0map_len, bar1map_len, bar2map_len; 10174 int error; 10175 uint32_t if_type; 10176 10177 if (!pdev) 10178 return -ENODEV; 10179 10180 /* Set the device DMA mask size */ 10181 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 10182 if (error) 10183 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 10184 if (error) 10185 return error; 10186 10187 /* 10188 * The BARs and register set definitions and offset locations are 10189 * dependent on the if_type. 10190 */ 10191 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 10192 &phba->sli4_hba.sli_intf.word0)) { 10193 return -ENODEV; 10194 } 10195 10196 /* There is no SLI3 failback for SLI4 devices. */ 10197 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 10198 LPFC_SLI_INTF_VALID) { 10199 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10200 "2894 SLI_INTF reg contents invalid " 10201 "sli_intf reg 0x%x\n", 10202 phba->sli4_hba.sli_intf.word0); 10203 return -ENODEV; 10204 } 10205 10206 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10207 /* 10208 * Get the bus address of SLI4 device Bar regions and the 10209 * number of bytes required by each mapping. The mapping of the 10210 * particular PCI BARs regions is dependent on the type of 10211 * SLI4 device. 10212 */ 10213 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { 10214 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); 10215 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); 10216 10217 /* 10218 * Map SLI4 PCI Config Space Register base to a kernel virtual 10219 * addr 10220 */ 10221 phba->sli4_hba.conf_regs_memmap_p = 10222 ioremap(phba->pci_bar0_map, bar0map_len); 10223 if (!phba->sli4_hba.conf_regs_memmap_p) { 10224 dev_printk(KERN_ERR, &pdev->dev, 10225 "ioremap failed for SLI4 PCI config " 10226 "registers.\n"); 10227 return -ENODEV; 10228 } 10229 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; 10230 /* Set up BAR0 PCI config space register memory map */ 10231 lpfc_sli4_bar0_register_memmap(phba, if_type); 10232 } else { 10233 phba->pci_bar0_map = pci_resource_start(pdev, 1); 10234 bar0map_len = pci_resource_len(pdev, 1); 10235 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 10236 dev_printk(KERN_ERR, &pdev->dev, 10237 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 10238 return -ENODEV; 10239 } 10240 phba->sli4_hba.conf_regs_memmap_p = 10241 ioremap(phba->pci_bar0_map, bar0map_len); 10242 if (!phba->sli4_hba.conf_regs_memmap_p) { 10243 dev_printk(KERN_ERR, &pdev->dev, 10244 "ioremap failed for SLI4 PCI config " 10245 "registers.\n"); 10246 return -ENODEV; 10247 } 10248 lpfc_sli4_bar0_register_memmap(phba, if_type); 10249 } 10250 10251 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 10252 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) { 10253 /* 10254 * Map SLI4 if type 0 HBA Control Register base to a 10255 * kernel virtual address and setup the registers. 10256 */ 10257 phba->pci_bar1_map = pci_resource_start(pdev, 10258 PCI_64BIT_BAR2); 10259 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 10260 phba->sli4_hba.ctrl_regs_memmap_p = 10261 ioremap(phba->pci_bar1_map, 10262 bar1map_len); 10263 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 10264 dev_err(&pdev->dev, 10265 "ioremap failed for SLI4 HBA " 10266 "control registers.\n"); 10267 error = -ENOMEM; 10268 goto out_iounmap_conf; 10269 } 10270 phba->pci_bar2_memmap_p = 10271 phba->sli4_hba.ctrl_regs_memmap_p; 10272 lpfc_sli4_bar1_register_memmap(phba, if_type); 10273 } else { 10274 error = -ENOMEM; 10275 goto out_iounmap_conf; 10276 } 10277 } 10278 10279 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) && 10280 (pci_resource_start(pdev, PCI_64BIT_BAR2))) { 10281 /* 10282 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel 10283 * virtual address and setup the registers. 10284 */ 10285 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); 10286 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 10287 phba->sli4_hba.drbl_regs_memmap_p = 10288 ioremap(phba->pci_bar1_map, bar1map_len); 10289 if (!phba->sli4_hba.drbl_regs_memmap_p) { 10290 dev_err(&pdev->dev, 10291 "ioremap failed for SLI4 HBA doorbell registers.\n"); 10292 error = -ENOMEM; 10293 goto out_iounmap_conf; 10294 } 10295 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; 10296 lpfc_sli4_bar1_register_memmap(phba, if_type); 10297 } 10298 10299 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 10300 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) { 10301 /* 10302 * Map SLI4 if type 0 HBA Doorbell Register base to 10303 * a kernel virtual address and setup the registers. 10304 */ 10305 phba->pci_bar2_map = pci_resource_start(pdev, 10306 PCI_64BIT_BAR4); 10307 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 10308 phba->sli4_hba.drbl_regs_memmap_p = 10309 ioremap(phba->pci_bar2_map, 10310 bar2map_len); 10311 if (!phba->sli4_hba.drbl_regs_memmap_p) { 10312 dev_err(&pdev->dev, 10313 "ioremap failed for SLI4 HBA" 10314 " doorbell registers.\n"); 10315 error = -ENOMEM; 10316 goto out_iounmap_ctrl; 10317 } 10318 phba->pci_bar4_memmap_p = 10319 phba->sli4_hba.drbl_regs_memmap_p; 10320 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 10321 if (error) 10322 goto out_iounmap_all; 10323 } else { 10324 error = -ENOMEM; 10325 goto out_iounmap_all; 10326 } 10327 } 10328 10329 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 && 10330 pci_resource_start(pdev, PCI_64BIT_BAR4)) { 10331 /* 10332 * Map SLI4 if type 6 HBA DPP Register base to a kernel 10333 * virtual address and setup the registers. 10334 */ 10335 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); 10336 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 10337 phba->sli4_hba.dpp_regs_memmap_p = 10338 ioremap(phba->pci_bar2_map, bar2map_len); 10339 if (!phba->sli4_hba.dpp_regs_memmap_p) { 10340 dev_err(&pdev->dev, 10341 "ioremap failed for SLI4 HBA dpp registers.\n"); 10342 error = -ENOMEM; 10343 goto out_iounmap_ctrl; 10344 } 10345 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p; 10346 } 10347 10348 /* Set up the EQ/CQ register handeling functions now */ 10349 switch (if_type) { 10350 case LPFC_SLI_INTF_IF_TYPE_0: 10351 case LPFC_SLI_INTF_IF_TYPE_2: 10352 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr; 10353 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db; 10354 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db; 10355 break; 10356 case LPFC_SLI_INTF_IF_TYPE_6: 10357 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr; 10358 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db; 10359 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db; 10360 break; 10361 default: 10362 break; 10363 } 10364 10365 return 0; 10366 10367 out_iounmap_all: 10368 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 10369 out_iounmap_ctrl: 10370 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 10371 out_iounmap_conf: 10372 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10373 10374 return error; 10375 } 10376 10377 /** 10378 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 10379 * @phba: pointer to lpfc hba data structure. 10380 * 10381 * This routine is invoked to unset the PCI device memory space for device 10382 * with SLI-4 interface spec. 10383 **/ 10384 static void 10385 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 10386 { 10387 uint32_t if_type; 10388 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10389 10390 switch (if_type) { 10391 case LPFC_SLI_INTF_IF_TYPE_0: 10392 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 10393 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 10394 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10395 break; 10396 case LPFC_SLI_INTF_IF_TYPE_2: 10397 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10398 break; 10399 case LPFC_SLI_INTF_IF_TYPE_6: 10400 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 10401 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10402 break; 10403 case LPFC_SLI_INTF_IF_TYPE_1: 10404 default: 10405 dev_printk(KERN_ERR, &phba->pcidev->dev, 10406 "FATAL - unsupported SLI4 interface type - %d\n", 10407 if_type); 10408 break; 10409 } 10410 } 10411 10412 /** 10413 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 10414 * @phba: pointer to lpfc hba data structure. 10415 * 10416 * This routine is invoked to enable the MSI-X interrupt vectors to device 10417 * with SLI-3 interface specs. 10418 * 10419 * Return codes 10420 * 0 - successful 10421 * other values - error 10422 **/ 10423 static int 10424 lpfc_sli_enable_msix(struct lpfc_hba *phba) 10425 { 10426 int rc; 10427 LPFC_MBOXQ_t *pmb; 10428 10429 /* Set up MSI-X multi-message vectors */ 10430 rc = pci_alloc_irq_vectors(phba->pcidev, 10431 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX); 10432 if (rc < 0) { 10433 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10434 "0420 PCI enable MSI-X failed (%d)\n", rc); 10435 goto vec_fail_out; 10436 } 10437 10438 /* 10439 * Assign MSI-X vectors to interrupt handlers 10440 */ 10441 10442 /* vector-0 is associated to slow-path handler */ 10443 rc = request_irq(pci_irq_vector(phba->pcidev, 0), 10444 &lpfc_sli_sp_intr_handler, 0, 10445 LPFC_SP_DRIVER_HANDLER_NAME, phba); 10446 if (rc) { 10447 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10448 "0421 MSI-X slow-path request_irq failed " 10449 "(%d)\n", rc); 10450 goto msi_fail_out; 10451 } 10452 10453 /* vector-1 is associated to fast-path handler */ 10454 rc = request_irq(pci_irq_vector(phba->pcidev, 1), 10455 &lpfc_sli_fp_intr_handler, 0, 10456 LPFC_FP_DRIVER_HANDLER_NAME, phba); 10457 10458 if (rc) { 10459 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10460 "0429 MSI-X fast-path request_irq failed " 10461 "(%d)\n", rc); 10462 goto irq_fail_out; 10463 } 10464 10465 /* 10466 * Configure HBA MSI-X attention conditions to messages 10467 */ 10468 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10469 10470 if (!pmb) { 10471 rc = -ENOMEM; 10472 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10473 "0474 Unable to allocate memory for issuing " 10474 "MBOX_CONFIG_MSI command\n"); 10475 goto mem_fail_out; 10476 } 10477 rc = lpfc_config_msi(phba, pmb); 10478 if (rc) 10479 goto mbx_fail_out; 10480 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 10481 if (rc != MBX_SUCCESS) { 10482 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 10483 "0351 Config MSI mailbox command failed, " 10484 "mbxCmd x%x, mbxStatus x%x\n", 10485 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 10486 goto mbx_fail_out; 10487 } 10488 10489 /* Free memory allocated for mailbox command */ 10490 mempool_free(pmb, phba->mbox_mem_pool); 10491 return rc; 10492 10493 mbx_fail_out: 10494 /* Free memory allocated for mailbox command */ 10495 mempool_free(pmb, phba->mbox_mem_pool); 10496 10497 mem_fail_out: 10498 /* free the irq already requested */ 10499 free_irq(pci_irq_vector(phba->pcidev, 1), phba); 10500 10501 irq_fail_out: 10502 /* free the irq already requested */ 10503 free_irq(pci_irq_vector(phba->pcidev, 0), phba); 10504 10505 msi_fail_out: 10506 /* Unconfigure MSI-X capability structure */ 10507 pci_free_irq_vectors(phba->pcidev); 10508 10509 vec_fail_out: 10510 return rc; 10511 } 10512 10513 /** 10514 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 10515 * @phba: pointer to lpfc hba data structure. 10516 * 10517 * This routine is invoked to enable the MSI interrupt mode to device with 10518 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 10519 * enable the MSI vector. The device driver is responsible for calling the 10520 * request_irq() to register MSI vector with a interrupt the handler, which 10521 * is done in this function. 10522 * 10523 * Return codes 10524 * 0 - successful 10525 * other values - error 10526 */ 10527 static int 10528 lpfc_sli_enable_msi(struct lpfc_hba *phba) 10529 { 10530 int rc; 10531 10532 rc = pci_enable_msi(phba->pcidev); 10533 if (!rc) 10534 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10535 "0462 PCI enable MSI mode success.\n"); 10536 else { 10537 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10538 "0471 PCI enable MSI mode failed (%d)\n", rc); 10539 return rc; 10540 } 10541 10542 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 10543 0, LPFC_DRIVER_NAME, phba); 10544 if (rc) { 10545 pci_disable_msi(phba->pcidev); 10546 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10547 "0478 MSI request_irq failed (%d)\n", rc); 10548 } 10549 return rc; 10550 } 10551 10552 /** 10553 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 10554 * @phba: pointer to lpfc hba data structure. 10555 * 10556 * This routine is invoked to enable device interrupt and associate driver's 10557 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 10558 * spec. Depends on the interrupt mode configured to the driver, the driver 10559 * will try to fallback from the configured interrupt mode to an interrupt 10560 * mode which is supported by the platform, kernel, and device in the order 10561 * of: 10562 * MSI-X -> MSI -> IRQ. 10563 * 10564 * Return codes 10565 * 0 - successful 10566 * other values - error 10567 **/ 10568 static uint32_t 10569 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 10570 { 10571 uint32_t intr_mode = LPFC_INTR_ERROR; 10572 int retval; 10573 10574 if (cfg_mode == 2) { 10575 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 10576 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 10577 if (!retval) { 10578 /* Now, try to enable MSI-X interrupt mode */ 10579 retval = lpfc_sli_enable_msix(phba); 10580 if (!retval) { 10581 /* Indicate initialization to MSI-X mode */ 10582 phba->intr_type = MSIX; 10583 intr_mode = 2; 10584 } 10585 } 10586 } 10587 10588 /* Fallback to MSI if MSI-X initialization failed */ 10589 if (cfg_mode >= 1 && phba->intr_type == NONE) { 10590 retval = lpfc_sli_enable_msi(phba); 10591 if (!retval) { 10592 /* Indicate initialization to MSI mode */ 10593 phba->intr_type = MSI; 10594 intr_mode = 1; 10595 } 10596 } 10597 10598 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 10599 if (phba->intr_type == NONE) { 10600 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 10601 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 10602 if (!retval) { 10603 /* Indicate initialization to INTx mode */ 10604 phba->intr_type = INTx; 10605 intr_mode = 0; 10606 } 10607 } 10608 return intr_mode; 10609 } 10610 10611 /** 10612 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 10613 * @phba: pointer to lpfc hba data structure. 10614 * 10615 * This routine is invoked to disable device interrupt and disassociate the 10616 * driver's interrupt handler(s) from interrupt vector(s) to device with 10617 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 10618 * release the interrupt vector(s) for the message signaled interrupt. 10619 **/ 10620 static void 10621 lpfc_sli_disable_intr(struct lpfc_hba *phba) 10622 { 10623 int nr_irqs, i; 10624 10625 if (phba->intr_type == MSIX) 10626 nr_irqs = LPFC_MSIX_VECTORS; 10627 else 10628 nr_irqs = 1; 10629 10630 for (i = 0; i < nr_irqs; i++) 10631 free_irq(pci_irq_vector(phba->pcidev, i), phba); 10632 pci_free_irq_vectors(phba->pcidev); 10633 10634 /* Reset interrupt management states */ 10635 phba->intr_type = NONE; 10636 phba->sli.slistat.sli_intr = 0; 10637 } 10638 10639 /** 10640 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue 10641 * @phba: pointer to lpfc hba data structure. 10642 * @id: EQ vector index or Hardware Queue index 10643 * @match: LPFC_FIND_BY_EQ = match by EQ 10644 * LPFC_FIND_BY_HDWQ = match by Hardware Queue 10645 * Return the CPU that matches the selection criteria 10646 */ 10647 static uint16_t 10648 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match) 10649 { 10650 struct lpfc_vector_map_info *cpup; 10651 int cpu; 10652 10653 /* Loop through all CPUs */ 10654 for_each_present_cpu(cpu) { 10655 cpup = &phba->sli4_hba.cpu_map[cpu]; 10656 10657 /* If we are matching by EQ, there may be multiple CPUs using 10658 * using the same vector, so select the one with 10659 * LPFC_CPU_FIRST_IRQ set. 10660 */ 10661 if ((match == LPFC_FIND_BY_EQ) && 10662 (cpup->flag & LPFC_CPU_FIRST_IRQ) && 10663 (cpup->irq != LPFC_VECTOR_MAP_EMPTY) && 10664 (cpup->eq == id)) 10665 return cpu; 10666 10667 /* If matching by HDWQ, select the first CPU that matches */ 10668 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id)) 10669 return cpu; 10670 } 10671 return 0; 10672 } 10673 10674 #ifdef CONFIG_X86 10675 /** 10676 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded 10677 * @phba: pointer to lpfc hba data structure. 10678 * @cpu: CPU map index 10679 * @phys_id: CPU package physical id 10680 * @core_id: CPU core id 10681 */ 10682 static int 10683 lpfc_find_hyper(struct lpfc_hba *phba, int cpu, 10684 uint16_t phys_id, uint16_t core_id) 10685 { 10686 struct lpfc_vector_map_info *cpup; 10687 int idx; 10688 10689 for_each_present_cpu(idx) { 10690 cpup = &phba->sli4_hba.cpu_map[idx]; 10691 /* Does the cpup match the one we are looking for */ 10692 if ((cpup->phys_id == phys_id) && 10693 (cpup->core_id == core_id) && 10694 (cpu != idx)) 10695 return 1; 10696 } 10697 return 0; 10698 } 10699 #endif 10700 10701 /** 10702 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings 10703 * @phba: pointer to lpfc hba data structure. 10704 * @vectors: number of msix vectors allocated. 10705 * 10706 * The routine will figure out the CPU affinity assignment for every 10707 * MSI-X vector allocated for the HBA. 10708 * In addition, the CPU to IO channel mapping will be calculated 10709 * and the phba->sli4_hba.cpu_map array will reflect this. 10710 */ 10711 static void 10712 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) 10713 { 10714 int i, cpu, idx, new_cpu, start_cpu, first_cpu; 10715 int max_phys_id, min_phys_id; 10716 int max_core_id, min_core_id; 10717 struct lpfc_vector_map_info *cpup; 10718 struct lpfc_vector_map_info *new_cpup; 10719 const struct cpumask *maskp; 10720 #ifdef CONFIG_X86 10721 struct cpuinfo_x86 *cpuinfo; 10722 #endif 10723 10724 /* Init cpu_map array */ 10725 for_each_possible_cpu(cpu) { 10726 cpup = &phba->sli4_hba.cpu_map[cpu]; 10727 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY; 10728 cpup->core_id = LPFC_VECTOR_MAP_EMPTY; 10729 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY; 10730 cpup->eq = LPFC_VECTOR_MAP_EMPTY; 10731 cpup->irq = LPFC_VECTOR_MAP_EMPTY; 10732 cpup->flag = 0; 10733 } 10734 10735 max_phys_id = 0; 10736 min_phys_id = LPFC_VECTOR_MAP_EMPTY; 10737 max_core_id = 0; 10738 min_core_id = LPFC_VECTOR_MAP_EMPTY; 10739 10740 /* Update CPU map with physical id and core id of each CPU */ 10741 for_each_present_cpu(cpu) { 10742 cpup = &phba->sli4_hba.cpu_map[cpu]; 10743 #ifdef CONFIG_X86 10744 cpuinfo = &cpu_data(cpu); 10745 cpup->phys_id = cpuinfo->phys_proc_id; 10746 cpup->core_id = cpuinfo->cpu_core_id; 10747 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id)) 10748 cpup->flag |= LPFC_CPU_MAP_HYPER; 10749 #else 10750 /* No distinction between CPUs for other platforms */ 10751 cpup->phys_id = 0; 10752 cpup->core_id = cpu; 10753 #endif 10754 10755 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10756 "3328 CPU physid %d coreid %d\n", 10757 cpup->phys_id, cpup->core_id); 10758 10759 if (cpup->phys_id > max_phys_id) 10760 max_phys_id = cpup->phys_id; 10761 if (cpup->phys_id < min_phys_id) 10762 min_phys_id = cpup->phys_id; 10763 10764 if (cpup->core_id > max_core_id) 10765 max_core_id = cpup->core_id; 10766 if (cpup->core_id < min_core_id) 10767 min_core_id = cpup->core_id; 10768 } 10769 10770 for_each_possible_cpu(i) { 10771 struct lpfc_eq_intr_info *eqi = 10772 per_cpu_ptr(phba->sli4_hba.eq_info, i); 10773 10774 INIT_LIST_HEAD(&eqi->list); 10775 eqi->icnt = 0; 10776 } 10777 10778 /* This loop sets up all CPUs that are affinitized with a 10779 * irq vector assigned to the driver. All affinitized CPUs 10780 * will get a link to that vectors IRQ and EQ. 10781 * 10782 * NULL affinity mask handling: 10783 * If irq count is greater than one, log an error message. 10784 * If the null mask is received for the first irq, find the 10785 * first present cpu, and assign the eq index to ensure at 10786 * least one EQ is assigned. 10787 */ 10788 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 10789 /* Get a CPU mask for all CPUs affinitized to this vector */ 10790 maskp = pci_irq_get_affinity(phba->pcidev, idx); 10791 if (!maskp) { 10792 if (phba->cfg_irq_chann > 1) 10793 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10794 "3329 No affinity mask found " 10795 "for vector %d (%d)\n", 10796 idx, phba->cfg_irq_chann); 10797 if (!idx) { 10798 cpu = cpumask_first(cpu_present_mask); 10799 cpup = &phba->sli4_hba.cpu_map[cpu]; 10800 cpup->eq = idx; 10801 cpup->irq = pci_irq_vector(phba->pcidev, idx); 10802 cpup->flag |= LPFC_CPU_FIRST_IRQ; 10803 } 10804 break; 10805 } 10806 10807 i = 0; 10808 /* Loop through all CPUs associated with vector idx */ 10809 for_each_cpu_and(cpu, maskp, cpu_present_mask) { 10810 /* Set the EQ index and IRQ for that vector */ 10811 cpup = &phba->sli4_hba.cpu_map[cpu]; 10812 cpup->eq = idx; 10813 cpup->irq = pci_irq_vector(phba->pcidev, idx); 10814 10815 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10816 "3336 Set Affinity: CPU %d " 10817 "irq %d eq %d\n", 10818 cpu, cpup->irq, cpup->eq); 10819 10820 /* If this is the first CPU thats assigned to this 10821 * vector, set LPFC_CPU_FIRST_IRQ. 10822 */ 10823 if (!i) 10824 cpup->flag |= LPFC_CPU_FIRST_IRQ; 10825 i++; 10826 } 10827 } 10828 10829 /* After looking at each irq vector assigned to this pcidev, its 10830 * possible to see that not ALL CPUs have been accounted for. 10831 * Next we will set any unassigned (unaffinitized) cpu map 10832 * entries to a IRQ on the same phys_id. 10833 */ 10834 first_cpu = cpumask_first(cpu_present_mask); 10835 start_cpu = first_cpu; 10836 10837 for_each_present_cpu(cpu) { 10838 cpup = &phba->sli4_hba.cpu_map[cpu]; 10839 10840 /* Is this CPU entry unassigned */ 10841 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 10842 /* Mark CPU as IRQ not assigned by the kernel */ 10843 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; 10844 10845 /* If so, find a new_cpup thats on the the SAME 10846 * phys_id as cpup. start_cpu will start where we 10847 * left off so all unassigned entries don't get assgined 10848 * the IRQ of the first entry. 10849 */ 10850 new_cpu = start_cpu; 10851 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 10852 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 10853 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && 10854 (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY) && 10855 (new_cpup->phys_id == cpup->phys_id)) 10856 goto found_same; 10857 new_cpu = cpumask_next( 10858 new_cpu, cpu_present_mask); 10859 if (new_cpu == nr_cpumask_bits) 10860 new_cpu = first_cpu; 10861 } 10862 /* At this point, we leave the CPU as unassigned */ 10863 continue; 10864 found_same: 10865 /* We found a matching phys_id, so copy the IRQ info */ 10866 cpup->eq = new_cpup->eq; 10867 cpup->irq = new_cpup->irq; 10868 10869 /* Bump start_cpu to the next slot to minmize the 10870 * chance of having multiple unassigned CPU entries 10871 * selecting the same IRQ. 10872 */ 10873 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 10874 if (start_cpu == nr_cpumask_bits) 10875 start_cpu = first_cpu; 10876 10877 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10878 "3337 Set Affinity: CPU %d " 10879 "irq %d from id %d same " 10880 "phys_id (%d)\n", 10881 cpu, cpup->irq, new_cpu, cpup->phys_id); 10882 } 10883 } 10884 10885 /* Set any unassigned cpu map entries to a IRQ on any phys_id */ 10886 start_cpu = first_cpu; 10887 10888 for_each_present_cpu(cpu) { 10889 cpup = &phba->sli4_hba.cpu_map[cpu]; 10890 10891 /* Is this entry unassigned */ 10892 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 10893 /* Mark it as IRQ not assigned by the kernel */ 10894 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; 10895 10896 /* If so, find a new_cpup thats on ANY phys_id 10897 * as the cpup. start_cpu will start where we 10898 * left off so all unassigned entries don't get 10899 * assigned the IRQ of the first entry. 10900 */ 10901 new_cpu = start_cpu; 10902 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 10903 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 10904 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && 10905 (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY)) 10906 goto found_any; 10907 new_cpu = cpumask_next( 10908 new_cpu, cpu_present_mask); 10909 if (new_cpu == nr_cpumask_bits) 10910 new_cpu = first_cpu; 10911 } 10912 /* We should never leave an entry unassigned */ 10913 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10914 "3339 Set Affinity: CPU %d " 10915 "irq %d UNASSIGNED\n", 10916 cpup->hdwq, cpup->irq); 10917 continue; 10918 found_any: 10919 /* We found an available entry, copy the IRQ info */ 10920 cpup->eq = new_cpup->eq; 10921 cpup->irq = new_cpup->irq; 10922 10923 /* Bump start_cpu to the next slot to minmize the 10924 * chance of having multiple unassigned CPU entries 10925 * selecting the same IRQ. 10926 */ 10927 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 10928 if (start_cpu == nr_cpumask_bits) 10929 start_cpu = first_cpu; 10930 10931 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10932 "3338 Set Affinity: CPU %d " 10933 "irq %d from id %d (%d/%d)\n", 10934 cpu, cpup->irq, new_cpu, 10935 new_cpup->phys_id, new_cpup->core_id); 10936 } 10937 } 10938 10939 /* Finally we need to associate a hdwq with each cpu_map entry 10940 * This will be 1 to 1 - hdwq to cpu, unless there are less 10941 * hardware queues then CPUs. For that case we will just round-robin 10942 * the available hardware queues as they get assigned to CPUs. 10943 */ 10944 idx = 0; 10945 start_cpu = 0; 10946 for_each_present_cpu(cpu) { 10947 cpup = &phba->sli4_hba.cpu_map[cpu]; 10948 if (idx >= phba->cfg_hdw_queue) { 10949 /* We need to reuse a Hardware Queue for another CPU, 10950 * so be smart about it and pick one that has its 10951 * IRQ/EQ mapped to the same phys_id (CPU package). 10952 * and core_id. 10953 */ 10954 new_cpu = start_cpu; 10955 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 10956 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 10957 if ((new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) && 10958 (new_cpup->phys_id == cpup->phys_id) && 10959 (new_cpup->core_id == cpup->core_id)) 10960 goto found_hdwq; 10961 new_cpu = cpumask_next( 10962 new_cpu, cpu_present_mask); 10963 if (new_cpu == nr_cpumask_bits) 10964 new_cpu = first_cpu; 10965 } 10966 10967 /* If we can't match both phys_id and core_id, 10968 * settle for just a phys_id match. 10969 */ 10970 new_cpu = start_cpu; 10971 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 10972 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 10973 if ((new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) && 10974 (new_cpup->phys_id == cpup->phys_id)) 10975 goto found_hdwq; 10976 new_cpu = cpumask_next( 10977 new_cpu, cpu_present_mask); 10978 if (new_cpu == nr_cpumask_bits) 10979 new_cpu = first_cpu; 10980 } 10981 10982 /* Otherwise just round robin on cfg_hdw_queue */ 10983 cpup->hdwq = idx % phba->cfg_hdw_queue; 10984 goto logit; 10985 found_hdwq: 10986 /* We found an available entry, copy the IRQ info */ 10987 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 10988 if (start_cpu == nr_cpumask_bits) 10989 start_cpu = first_cpu; 10990 cpup->hdwq = new_cpup->hdwq; 10991 } else { 10992 /* 1 to 1, CPU to hdwq */ 10993 cpup->hdwq = idx; 10994 } 10995 logit: 10996 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10997 "3335 Set Affinity: CPU %d (phys %d core %d): " 10998 "hdwq %d eq %d irq %d flg x%x\n", 10999 cpu, cpup->phys_id, cpup->core_id, 11000 cpup->hdwq, cpup->eq, cpup->irq, cpup->flag); 11001 idx++; 11002 } 11003 11004 /* The cpu_map array will be used later during initialization 11005 * when EQ / CQ / WQs are allocated and configured. 11006 */ 11007 return; 11008 } 11009 11010 /** 11011 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 11012 * @phba: pointer to lpfc hba data structure. 11013 * 11014 * This routine is invoked to enable the MSI-X interrupt vectors to device 11015 * with SLI-4 interface spec. 11016 * 11017 * Return codes 11018 * 0 - successful 11019 * other values - error 11020 **/ 11021 static int 11022 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 11023 { 11024 int vectors, rc, index; 11025 char *name; 11026 11027 /* Set up MSI-X multi-message vectors */ 11028 vectors = phba->cfg_irq_chann; 11029 11030 rc = pci_alloc_irq_vectors(phba->pcidev, 11031 1, 11032 vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); 11033 if (rc < 0) { 11034 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11035 "0484 PCI enable MSI-X failed (%d)\n", rc); 11036 goto vec_fail_out; 11037 } 11038 vectors = rc; 11039 11040 /* Assign MSI-X vectors to interrupt handlers */ 11041 for (index = 0; index < vectors; index++) { 11042 name = phba->sli4_hba.hba_eq_hdl[index].handler_name; 11043 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ); 11044 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ, 11045 LPFC_DRIVER_HANDLER_NAME"%d", index); 11046 11047 phba->sli4_hba.hba_eq_hdl[index].idx = index; 11048 phba->sli4_hba.hba_eq_hdl[index].phba = phba; 11049 rc = request_irq(pci_irq_vector(phba->pcidev, index), 11050 &lpfc_sli4_hba_intr_handler, 0, 11051 name, 11052 &phba->sli4_hba.hba_eq_hdl[index]); 11053 if (rc) { 11054 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 11055 "0486 MSI-X fast-path (%d) " 11056 "request_irq failed (%d)\n", index, rc); 11057 goto cfg_fail_out; 11058 } 11059 } 11060 11061 if (vectors != phba->cfg_irq_chann) { 11062 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11063 "3238 Reducing IO channels to match number of " 11064 "MSI-X vectors, requested %d got %d\n", 11065 phba->cfg_irq_chann, vectors); 11066 if (phba->cfg_irq_chann > vectors) 11067 phba->cfg_irq_chann = vectors; 11068 if (phba->nvmet_support && (phba->cfg_nvmet_mrq > vectors)) 11069 phba->cfg_nvmet_mrq = vectors; 11070 } 11071 11072 return rc; 11073 11074 cfg_fail_out: 11075 /* free the irq already requested */ 11076 for (--index; index >= 0; index--) 11077 free_irq(pci_irq_vector(phba->pcidev, index), 11078 &phba->sli4_hba.hba_eq_hdl[index]); 11079 11080 /* Unconfigure MSI-X capability structure */ 11081 pci_free_irq_vectors(phba->pcidev); 11082 11083 vec_fail_out: 11084 return rc; 11085 } 11086 11087 /** 11088 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 11089 * @phba: pointer to lpfc hba data structure. 11090 * 11091 * This routine is invoked to enable the MSI interrupt mode to device with 11092 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 11093 * to enable the MSI vector. The device driver is responsible for calling 11094 * the request_irq() to register MSI vector with a interrupt the handler, 11095 * which is done in this function. 11096 * 11097 * Return codes 11098 * 0 - successful 11099 * other values - error 11100 **/ 11101 static int 11102 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 11103 { 11104 int rc, index; 11105 11106 rc = pci_enable_msi(phba->pcidev); 11107 if (!rc) 11108 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11109 "0487 PCI enable MSI mode success.\n"); 11110 else { 11111 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11112 "0488 PCI enable MSI mode failed (%d)\n", rc); 11113 return rc; 11114 } 11115 11116 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 11117 0, LPFC_DRIVER_NAME, phba); 11118 if (rc) { 11119 pci_disable_msi(phba->pcidev); 11120 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 11121 "0490 MSI request_irq failed (%d)\n", rc); 11122 return rc; 11123 } 11124 11125 for (index = 0; index < phba->cfg_irq_chann; index++) { 11126 phba->sli4_hba.hba_eq_hdl[index].idx = index; 11127 phba->sli4_hba.hba_eq_hdl[index].phba = phba; 11128 } 11129 11130 return 0; 11131 } 11132 11133 /** 11134 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 11135 * @phba: pointer to lpfc hba data structure. 11136 * 11137 * This routine is invoked to enable device interrupt and associate driver's 11138 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 11139 * interface spec. Depends on the interrupt mode configured to the driver, 11140 * the driver will try to fallback from the configured interrupt mode to an 11141 * interrupt mode which is supported by the platform, kernel, and device in 11142 * the order of: 11143 * MSI-X -> MSI -> IRQ. 11144 * 11145 * Return codes 11146 * 0 - successful 11147 * other values - error 11148 **/ 11149 static uint32_t 11150 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 11151 { 11152 uint32_t intr_mode = LPFC_INTR_ERROR; 11153 int retval, idx; 11154 11155 if (cfg_mode == 2) { 11156 /* Preparation before conf_msi mbox cmd */ 11157 retval = 0; 11158 if (!retval) { 11159 /* Now, try to enable MSI-X interrupt mode */ 11160 retval = lpfc_sli4_enable_msix(phba); 11161 if (!retval) { 11162 /* Indicate initialization to MSI-X mode */ 11163 phba->intr_type = MSIX; 11164 intr_mode = 2; 11165 } 11166 } 11167 } 11168 11169 /* Fallback to MSI if MSI-X initialization failed */ 11170 if (cfg_mode >= 1 && phba->intr_type == NONE) { 11171 retval = lpfc_sli4_enable_msi(phba); 11172 if (!retval) { 11173 /* Indicate initialization to MSI mode */ 11174 phba->intr_type = MSI; 11175 intr_mode = 1; 11176 } 11177 } 11178 11179 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 11180 if (phba->intr_type == NONE) { 11181 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 11182 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 11183 if (!retval) { 11184 struct lpfc_hba_eq_hdl *eqhdl; 11185 11186 /* Indicate initialization to INTx mode */ 11187 phba->intr_type = INTx; 11188 intr_mode = 0; 11189 11190 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 11191 eqhdl = &phba->sli4_hba.hba_eq_hdl[idx]; 11192 eqhdl->idx = idx; 11193 eqhdl->phba = phba; 11194 } 11195 } 11196 } 11197 return intr_mode; 11198 } 11199 11200 /** 11201 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 11202 * @phba: pointer to lpfc hba data structure. 11203 * 11204 * This routine is invoked to disable device interrupt and disassociate 11205 * the driver's interrupt handler(s) from interrupt vector(s) to device 11206 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 11207 * will release the interrupt vector(s) for the message signaled interrupt. 11208 **/ 11209 static void 11210 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 11211 { 11212 /* Disable the currently initialized interrupt mode */ 11213 if (phba->intr_type == MSIX) { 11214 int index; 11215 11216 /* Free up MSI-X multi-message vectors */ 11217 for (index = 0; index < phba->cfg_irq_chann; index++) { 11218 irq_set_affinity_hint( 11219 pci_irq_vector(phba->pcidev, index), 11220 NULL); 11221 free_irq(pci_irq_vector(phba->pcidev, index), 11222 &phba->sli4_hba.hba_eq_hdl[index]); 11223 } 11224 } else { 11225 free_irq(phba->pcidev->irq, phba); 11226 } 11227 11228 pci_free_irq_vectors(phba->pcidev); 11229 11230 /* Reset interrupt management states */ 11231 phba->intr_type = NONE; 11232 phba->sli.slistat.sli_intr = 0; 11233 } 11234 11235 /** 11236 * lpfc_unset_hba - Unset SLI3 hba device initialization 11237 * @phba: pointer to lpfc hba data structure. 11238 * 11239 * This routine is invoked to unset the HBA device initialization steps to 11240 * a device with SLI-3 interface spec. 11241 **/ 11242 static void 11243 lpfc_unset_hba(struct lpfc_hba *phba) 11244 { 11245 struct lpfc_vport *vport = phba->pport; 11246 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 11247 11248 spin_lock_irq(shost->host_lock); 11249 vport->load_flag |= FC_UNLOADING; 11250 spin_unlock_irq(shost->host_lock); 11251 11252 kfree(phba->vpi_bmask); 11253 kfree(phba->vpi_ids); 11254 11255 lpfc_stop_hba_timers(phba); 11256 11257 phba->pport->work_port_events = 0; 11258 11259 lpfc_sli_hba_down(phba); 11260 11261 lpfc_sli_brdrestart(phba); 11262 11263 lpfc_sli_disable_intr(phba); 11264 11265 return; 11266 } 11267 11268 /** 11269 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 11270 * @phba: Pointer to HBA context object. 11271 * 11272 * This function is called in the SLI4 code path to wait for completion 11273 * of device's XRIs exchange busy. It will check the XRI exchange busy 11274 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 11275 * that, it will check the XRI exchange busy on outstanding FCP and ELS 11276 * I/Os every 30 seconds, log error message, and wait forever. Only when 11277 * all XRI exchange busy complete, the driver unload shall proceed with 11278 * invoking the function reset ioctl mailbox command to the CNA and the 11279 * the rest of the driver unload resource release. 11280 **/ 11281 static void 11282 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 11283 { 11284 struct lpfc_sli4_hdw_queue *qp; 11285 int idx, ccnt, fcnt; 11286 int wait_time = 0; 11287 int io_xri_cmpl = 1; 11288 int nvmet_xri_cmpl = 1; 11289 int fcp_xri_cmpl = 1; 11290 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 11291 11292 /* Driver just aborted IOs during the hba_unset process. Pause 11293 * here to give the HBA time to complete the IO and get entries 11294 * into the abts lists. 11295 */ 11296 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5); 11297 11298 /* Wait for NVME pending IO to flush back to transport. */ 11299 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 11300 lpfc_nvme_wait_for_io_drain(phba); 11301 11302 ccnt = 0; 11303 fcnt = 0; 11304 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 11305 qp = &phba->sli4_hba.hdwq[idx]; 11306 fcp_xri_cmpl = list_empty( 11307 &qp->lpfc_abts_scsi_buf_list); 11308 if (!fcp_xri_cmpl) /* if list is NOT empty */ 11309 fcnt++; 11310 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11311 io_xri_cmpl = list_empty( 11312 &qp->lpfc_abts_nvme_buf_list); 11313 if (!io_xri_cmpl) /* if list is NOT empty */ 11314 ccnt++; 11315 } 11316 } 11317 if (ccnt) 11318 io_xri_cmpl = 0; 11319 if (fcnt) 11320 fcp_xri_cmpl = 0; 11321 11322 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11323 nvmet_xri_cmpl = 11324 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 11325 } 11326 11327 while (!fcp_xri_cmpl || !els_xri_cmpl || !io_xri_cmpl || 11328 !nvmet_xri_cmpl) { 11329 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 11330 if (!nvmet_xri_cmpl) 11331 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11332 "6424 NVMET XRI exchange busy " 11333 "wait time: %d seconds.\n", 11334 wait_time/1000); 11335 if (!io_xri_cmpl) 11336 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11337 "6100 NVME XRI exchange busy " 11338 "wait time: %d seconds.\n", 11339 wait_time/1000); 11340 if (!fcp_xri_cmpl) 11341 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11342 "2877 FCP XRI exchange busy " 11343 "wait time: %d seconds.\n", 11344 wait_time/1000); 11345 if (!els_xri_cmpl) 11346 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11347 "2878 ELS XRI exchange busy " 11348 "wait time: %d seconds.\n", 11349 wait_time/1000); 11350 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 11351 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 11352 } else { 11353 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 11354 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 11355 } 11356 11357 ccnt = 0; 11358 fcnt = 0; 11359 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 11360 qp = &phba->sli4_hba.hdwq[idx]; 11361 fcp_xri_cmpl = list_empty( 11362 &qp->lpfc_abts_scsi_buf_list); 11363 if (!fcp_xri_cmpl) /* if list is NOT empty */ 11364 fcnt++; 11365 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11366 io_xri_cmpl = list_empty( 11367 &qp->lpfc_abts_nvme_buf_list); 11368 if (!io_xri_cmpl) /* if list is NOT empty */ 11369 ccnt++; 11370 } 11371 } 11372 if (ccnt) 11373 io_xri_cmpl = 0; 11374 if (fcnt) 11375 fcp_xri_cmpl = 0; 11376 11377 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11378 nvmet_xri_cmpl = list_empty( 11379 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 11380 } 11381 els_xri_cmpl = 11382 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 11383 11384 } 11385 } 11386 11387 /** 11388 * lpfc_sli4_hba_unset - Unset the fcoe hba 11389 * @phba: Pointer to HBA context object. 11390 * 11391 * This function is called in the SLI4 code path to reset the HBA's FCoE 11392 * function. The caller is not required to hold any lock. This routine 11393 * issues PCI function reset mailbox command to reset the FCoE function. 11394 * At the end of the function, it calls lpfc_hba_down_post function to 11395 * free any pending commands. 11396 **/ 11397 static void 11398 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 11399 { 11400 int wait_cnt = 0; 11401 LPFC_MBOXQ_t *mboxq; 11402 struct pci_dev *pdev = phba->pcidev; 11403 11404 lpfc_stop_hba_timers(phba); 11405 if (phba->pport) 11406 phba->sli4_hba.intr_enable = 0; 11407 11408 /* 11409 * Gracefully wait out the potential current outstanding asynchronous 11410 * mailbox command. 11411 */ 11412 11413 /* First, block any pending async mailbox command from posted */ 11414 spin_lock_irq(&phba->hbalock); 11415 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 11416 spin_unlock_irq(&phba->hbalock); 11417 /* Now, trying to wait it out if we can */ 11418 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 11419 msleep(10); 11420 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 11421 break; 11422 } 11423 /* Forcefully release the outstanding mailbox command if timed out */ 11424 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 11425 spin_lock_irq(&phba->hbalock); 11426 mboxq = phba->sli.mbox_active; 11427 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 11428 __lpfc_mbox_cmpl_put(phba, mboxq); 11429 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 11430 phba->sli.mbox_active = NULL; 11431 spin_unlock_irq(&phba->hbalock); 11432 } 11433 11434 /* Abort all iocbs associated with the hba */ 11435 lpfc_sli_hba_iocb_abort(phba); 11436 11437 /* Wait for completion of device XRI exchange busy */ 11438 lpfc_sli4_xri_exchange_busy_wait(phba); 11439 11440 /* Disable PCI subsystem interrupt */ 11441 lpfc_sli4_disable_intr(phba); 11442 11443 /* Disable SR-IOV if enabled */ 11444 if (phba->cfg_sriov_nr_virtfn) 11445 pci_disable_sriov(pdev); 11446 11447 /* Stop kthread signal shall trigger work_done one more time */ 11448 kthread_stop(phba->worker_thread); 11449 11450 /* Disable FW logging to host memory */ 11451 lpfc_ras_stop_fwlog(phba); 11452 11453 /* Unset the queues shared with the hardware then release all 11454 * allocated resources. 11455 */ 11456 lpfc_sli4_queue_unset(phba); 11457 lpfc_sli4_queue_destroy(phba); 11458 11459 /* Reset SLI4 HBA FCoE function */ 11460 lpfc_pci_function_reset(phba); 11461 11462 /* Free RAS DMA memory */ 11463 if (phba->ras_fwlog.ras_enabled) 11464 lpfc_sli4_ras_dma_free(phba); 11465 11466 /* Stop the SLI4 device port */ 11467 if (phba->pport) 11468 phba->pport->work_port_events = 0; 11469 } 11470 11471 /** 11472 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 11473 * @phba: Pointer to HBA context object. 11474 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 11475 * 11476 * This function is called in the SLI4 code path to read the port's 11477 * sli4 capabilities. 11478 * 11479 * This function may be be called from any context that can block-wait 11480 * for the completion. The expectation is that this routine is called 11481 * typically from probe_one or from the online routine. 11482 **/ 11483 int 11484 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 11485 { 11486 int rc; 11487 struct lpfc_mqe *mqe; 11488 struct lpfc_pc_sli4_params *sli4_params; 11489 uint32_t mbox_tmo; 11490 11491 rc = 0; 11492 mqe = &mboxq->u.mqe; 11493 11494 /* Read the port's SLI4 Parameters port capabilities */ 11495 lpfc_pc_sli4_params(mboxq); 11496 if (!phba->sli4_hba.intr_enable) 11497 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 11498 else { 11499 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 11500 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 11501 } 11502 11503 if (unlikely(rc)) 11504 return 1; 11505 11506 sli4_params = &phba->sli4_hba.pc_sli4_params; 11507 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 11508 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 11509 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 11510 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 11511 &mqe->un.sli4_params); 11512 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 11513 &mqe->un.sli4_params); 11514 sli4_params->proto_types = mqe->un.sli4_params.word3; 11515 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 11516 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 11517 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 11518 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 11519 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 11520 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 11521 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 11522 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 11523 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 11524 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 11525 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 11526 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 11527 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 11528 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 11529 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 11530 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 11531 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 11532 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 11533 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 11534 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 11535 11536 /* Make sure that sge_supp_len can be handled by the driver */ 11537 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 11538 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 11539 11540 return rc; 11541 } 11542 11543 /** 11544 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 11545 * @phba: Pointer to HBA context object. 11546 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 11547 * 11548 * This function is called in the SLI4 code path to read the port's 11549 * sli4 capabilities. 11550 * 11551 * This function may be be called from any context that can block-wait 11552 * for the completion. The expectation is that this routine is called 11553 * typically from probe_one or from the online routine. 11554 **/ 11555 int 11556 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 11557 { 11558 int rc; 11559 struct lpfc_mqe *mqe = &mboxq->u.mqe; 11560 struct lpfc_pc_sli4_params *sli4_params; 11561 uint32_t mbox_tmo; 11562 int length; 11563 bool exp_wqcq_pages = true; 11564 struct lpfc_sli4_parameters *mbx_sli4_parameters; 11565 11566 /* 11567 * By default, the driver assumes the SLI4 port requires RPI 11568 * header postings. The SLI4_PARAM response will correct this 11569 * assumption. 11570 */ 11571 phba->sli4_hba.rpi_hdrs_in_use = 1; 11572 11573 /* Read the port's SLI4 Config Parameters */ 11574 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 11575 sizeof(struct lpfc_sli4_cfg_mhdr)); 11576 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 11577 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 11578 length, LPFC_SLI4_MBX_EMBED); 11579 if (!phba->sli4_hba.intr_enable) 11580 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 11581 else { 11582 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 11583 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 11584 } 11585 if (unlikely(rc)) 11586 return rc; 11587 sli4_params = &phba->sli4_hba.pc_sli4_params; 11588 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 11589 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 11590 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 11591 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 11592 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 11593 mbx_sli4_parameters); 11594 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 11595 mbx_sli4_parameters); 11596 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 11597 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 11598 else 11599 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 11600 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 11601 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); 11602 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); 11603 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 11604 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 11605 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 11606 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 11607 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters); 11608 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters); 11609 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); 11610 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters); 11611 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 11612 mbx_sli4_parameters); 11613 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); 11614 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 11615 mbx_sli4_parameters); 11616 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); 11617 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); 11618 11619 /* Check for firmware nvme support */ 11620 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) && 11621 bf_get(cfg_xib, mbx_sli4_parameters)); 11622 11623 if (rc) { 11624 /* Save this to indicate the Firmware supports NVME */ 11625 sli4_params->nvme = 1; 11626 11627 /* Firmware NVME support, check driver FC4 NVME support */ 11628 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) { 11629 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 11630 "6133 Disabling NVME support: " 11631 "FC4 type not supported: x%x\n", 11632 phba->cfg_enable_fc4_type); 11633 goto fcponly; 11634 } 11635 } else { 11636 /* No firmware NVME support, check driver FC4 NVME support */ 11637 sli4_params->nvme = 0; 11638 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11639 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME, 11640 "6101 Disabling NVME support: Not " 11641 "supported by firmware (%d %d) x%x\n", 11642 bf_get(cfg_nvme, mbx_sli4_parameters), 11643 bf_get(cfg_xib, mbx_sli4_parameters), 11644 phba->cfg_enable_fc4_type); 11645 fcponly: 11646 phba->nvme_support = 0; 11647 phba->nvmet_support = 0; 11648 phba->cfg_nvmet_mrq = 0; 11649 11650 /* If no FC4 type support, move to just SCSI support */ 11651 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 11652 return -ENODEV; 11653 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; 11654 } 11655 } 11656 11657 /* Only embed PBDE for if_type 6, PBDE support requires xib be set */ 11658 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 11659 LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters))) 11660 phba->cfg_enable_pbde = 0; 11661 11662 /* 11663 * To support Suppress Response feature we must satisfy 3 conditions. 11664 * lpfc_suppress_rsp module parameter must be set (default). 11665 * In SLI4-Parameters Descriptor: 11666 * Extended Inline Buffers (XIB) must be supported. 11667 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported 11668 * (double negative). 11669 */ 11670 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) && 11671 !(bf_get(cfg_nosr, mbx_sli4_parameters))) 11672 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP; 11673 else 11674 phba->cfg_suppress_rsp = 0; 11675 11676 if (bf_get(cfg_eqdr, mbx_sli4_parameters)) 11677 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR; 11678 11679 /* Make sure that sge_supp_len can be handled by the driver */ 11680 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 11681 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 11682 11683 /* 11684 * Check whether the adapter supports an embedded copy of the 11685 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order 11686 * to use this option, 128-byte WQEs must be used. 11687 */ 11688 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters)) 11689 phba->fcp_embed_io = 1; 11690 else 11691 phba->fcp_embed_io = 0; 11692 11693 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 11694 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n", 11695 bf_get(cfg_xib, mbx_sli4_parameters), 11696 phba->cfg_enable_pbde, 11697 phba->fcp_embed_io, phba->nvme_support, 11698 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp); 11699 11700 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 11701 LPFC_SLI_INTF_IF_TYPE_2) && 11702 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 11703 LPFC_SLI_INTF_FAMILY_LNCR_A0)) 11704 exp_wqcq_pages = false; 11705 11706 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) && 11707 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) && 11708 exp_wqcq_pages && 11709 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT)) 11710 phba->enab_exp_wqcq_pages = 1; 11711 else 11712 phba->enab_exp_wqcq_pages = 0; 11713 /* 11714 * Check if the SLI port supports MDS Diagnostics 11715 */ 11716 if (bf_get(cfg_mds_diags, mbx_sli4_parameters)) 11717 phba->mds_diags_support = 1; 11718 else 11719 phba->mds_diags_support = 0; 11720 11721 return 0; 11722 } 11723 11724 /** 11725 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 11726 * @pdev: pointer to PCI device 11727 * @pid: pointer to PCI device identifier 11728 * 11729 * This routine is to be called to attach a device with SLI-3 interface spec 11730 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 11731 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 11732 * information of the device and driver to see if the driver state that it can 11733 * support this kind of device. If the match is successful, the driver core 11734 * invokes this routine. If this routine determines it can claim the HBA, it 11735 * does all the initialization that it needs to do to handle the HBA properly. 11736 * 11737 * Return code 11738 * 0 - driver can claim the device 11739 * negative value - driver can not claim the device 11740 **/ 11741 static int 11742 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 11743 { 11744 struct lpfc_hba *phba; 11745 struct lpfc_vport *vport = NULL; 11746 struct Scsi_Host *shost = NULL; 11747 int error; 11748 uint32_t cfg_mode, intr_mode; 11749 11750 /* Allocate memory for HBA structure */ 11751 phba = lpfc_hba_alloc(pdev); 11752 if (!phba) 11753 return -ENOMEM; 11754 11755 /* Perform generic PCI device enabling operation */ 11756 error = lpfc_enable_pci_dev(phba); 11757 if (error) 11758 goto out_free_phba; 11759 11760 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 11761 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 11762 if (error) 11763 goto out_disable_pci_dev; 11764 11765 /* Set up SLI-3 specific device PCI memory space */ 11766 error = lpfc_sli_pci_mem_setup(phba); 11767 if (error) { 11768 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11769 "1402 Failed to set up pci memory space.\n"); 11770 goto out_disable_pci_dev; 11771 } 11772 11773 /* Set up SLI-3 specific device driver resources */ 11774 error = lpfc_sli_driver_resource_setup(phba); 11775 if (error) { 11776 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11777 "1404 Failed to set up driver resource.\n"); 11778 goto out_unset_pci_mem_s3; 11779 } 11780 11781 /* Initialize and populate the iocb list per host */ 11782 11783 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 11784 if (error) { 11785 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11786 "1405 Failed to initialize iocb list.\n"); 11787 goto out_unset_driver_resource_s3; 11788 } 11789 11790 /* Set up common device driver resources */ 11791 error = lpfc_setup_driver_resource_phase2(phba); 11792 if (error) { 11793 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11794 "1406 Failed to set up driver resource.\n"); 11795 goto out_free_iocb_list; 11796 } 11797 11798 /* Get the default values for Model Name and Description */ 11799 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 11800 11801 /* Create SCSI host to the physical port */ 11802 error = lpfc_create_shost(phba); 11803 if (error) { 11804 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11805 "1407 Failed to create scsi host.\n"); 11806 goto out_unset_driver_resource; 11807 } 11808 11809 /* Configure sysfs attributes */ 11810 vport = phba->pport; 11811 error = lpfc_alloc_sysfs_attr(vport); 11812 if (error) { 11813 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11814 "1476 Failed to allocate sysfs attr\n"); 11815 goto out_destroy_shost; 11816 } 11817 11818 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 11819 /* Now, trying to enable interrupt and bring up the device */ 11820 cfg_mode = phba->cfg_use_msi; 11821 while (true) { 11822 /* Put device to a known state before enabling interrupt */ 11823 lpfc_stop_port(phba); 11824 /* Configure and enable interrupt */ 11825 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 11826 if (intr_mode == LPFC_INTR_ERROR) { 11827 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11828 "0431 Failed to enable interrupt.\n"); 11829 error = -ENODEV; 11830 goto out_free_sysfs_attr; 11831 } 11832 /* SLI-3 HBA setup */ 11833 if (lpfc_sli_hba_setup(phba)) { 11834 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11835 "1477 Failed to set up hba\n"); 11836 error = -ENODEV; 11837 goto out_remove_device; 11838 } 11839 11840 /* Wait 50ms for the interrupts of previous mailbox commands */ 11841 msleep(50); 11842 /* Check active interrupts on message signaled interrupts */ 11843 if (intr_mode == 0 || 11844 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 11845 /* Log the current active interrupt mode */ 11846 phba->intr_mode = intr_mode; 11847 lpfc_log_intr_mode(phba, intr_mode); 11848 break; 11849 } else { 11850 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11851 "0447 Configure interrupt mode (%d) " 11852 "failed active interrupt test.\n", 11853 intr_mode); 11854 /* Disable the current interrupt mode */ 11855 lpfc_sli_disable_intr(phba); 11856 /* Try next level of interrupt mode */ 11857 cfg_mode = --intr_mode; 11858 } 11859 } 11860 11861 /* Perform post initialization setup */ 11862 lpfc_post_init_setup(phba); 11863 11864 /* Check if there are static vports to be created. */ 11865 lpfc_create_static_vport(phba); 11866 11867 return 0; 11868 11869 out_remove_device: 11870 lpfc_unset_hba(phba); 11871 out_free_sysfs_attr: 11872 lpfc_free_sysfs_attr(vport); 11873 out_destroy_shost: 11874 lpfc_destroy_shost(phba); 11875 out_unset_driver_resource: 11876 lpfc_unset_driver_resource_phase2(phba); 11877 out_free_iocb_list: 11878 lpfc_free_iocb_list(phba); 11879 out_unset_driver_resource_s3: 11880 lpfc_sli_driver_resource_unset(phba); 11881 out_unset_pci_mem_s3: 11882 lpfc_sli_pci_mem_unset(phba); 11883 out_disable_pci_dev: 11884 lpfc_disable_pci_dev(phba); 11885 if (shost) 11886 scsi_host_put(shost); 11887 out_free_phba: 11888 lpfc_hba_free(phba); 11889 return error; 11890 } 11891 11892 /** 11893 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 11894 * @pdev: pointer to PCI device 11895 * 11896 * This routine is to be called to disattach a device with SLI-3 interface 11897 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 11898 * removed from PCI bus, it performs all the necessary cleanup for the HBA 11899 * device to be removed from the PCI subsystem properly. 11900 **/ 11901 static void 11902 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 11903 { 11904 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11905 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 11906 struct lpfc_vport **vports; 11907 struct lpfc_hba *phba = vport->phba; 11908 int i; 11909 11910 spin_lock_irq(&phba->hbalock); 11911 vport->load_flag |= FC_UNLOADING; 11912 spin_unlock_irq(&phba->hbalock); 11913 11914 lpfc_free_sysfs_attr(vport); 11915 11916 /* Release all the vports against this physical port */ 11917 vports = lpfc_create_vport_work_array(phba); 11918 if (vports != NULL) 11919 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 11920 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 11921 continue; 11922 fc_vport_terminate(vports[i]->fc_vport); 11923 } 11924 lpfc_destroy_vport_work_array(phba, vports); 11925 11926 /* Remove FC host and then SCSI host with the physical port */ 11927 fc_remove_host(shost); 11928 scsi_remove_host(shost); 11929 11930 lpfc_cleanup(vport); 11931 11932 /* 11933 * Bring down the SLI Layer. This step disable all interrupts, 11934 * clears the rings, discards all mailbox commands, and resets 11935 * the HBA. 11936 */ 11937 11938 /* HBA interrupt will be disabled after this call */ 11939 lpfc_sli_hba_down(phba); 11940 /* Stop kthread signal shall trigger work_done one more time */ 11941 kthread_stop(phba->worker_thread); 11942 /* Final cleanup of txcmplq and reset the HBA */ 11943 lpfc_sli_brdrestart(phba); 11944 11945 kfree(phba->vpi_bmask); 11946 kfree(phba->vpi_ids); 11947 11948 lpfc_stop_hba_timers(phba); 11949 spin_lock_irq(&phba->port_list_lock); 11950 list_del_init(&vport->listentry); 11951 spin_unlock_irq(&phba->port_list_lock); 11952 11953 lpfc_debugfs_terminate(vport); 11954 11955 /* Disable SR-IOV if enabled */ 11956 if (phba->cfg_sriov_nr_virtfn) 11957 pci_disable_sriov(pdev); 11958 11959 /* Disable interrupt */ 11960 lpfc_sli_disable_intr(phba); 11961 11962 scsi_host_put(shost); 11963 11964 /* 11965 * Call scsi_free before mem_free since scsi bufs are released to their 11966 * corresponding pools here. 11967 */ 11968 lpfc_scsi_free(phba); 11969 lpfc_free_iocb_list(phba); 11970 11971 lpfc_mem_free_all(phba); 11972 11973 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 11974 phba->hbqslimp.virt, phba->hbqslimp.phys); 11975 11976 /* Free resources associated with SLI2 interface */ 11977 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 11978 phba->slim2p.virt, phba->slim2p.phys); 11979 11980 /* unmap adapter SLIM and Control Registers */ 11981 iounmap(phba->ctrl_regs_memmap_p); 11982 iounmap(phba->slim_memmap_p); 11983 11984 lpfc_hba_free(phba); 11985 11986 pci_release_mem_regions(pdev); 11987 pci_disable_device(pdev); 11988 } 11989 11990 /** 11991 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 11992 * @pdev: pointer to PCI device 11993 * @msg: power management message 11994 * 11995 * This routine is to be called from the kernel's PCI subsystem to support 11996 * system Power Management (PM) to device with SLI-3 interface spec. When 11997 * PM invokes this method, it quiesces the device by stopping the driver's 11998 * worker thread for the device, turning off device's interrupt and DMA, 11999 * and bring the device offline. Note that as the driver implements the 12000 * minimum PM requirements to a power-aware driver's PM support for the 12001 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 12002 * to the suspend() method call will be treated as SUSPEND and the driver will 12003 * fully reinitialize its device during resume() method call, the driver will 12004 * set device to PCI_D3hot state in PCI config space instead of setting it 12005 * according to the @msg provided by the PM. 12006 * 12007 * Return code 12008 * 0 - driver suspended the device 12009 * Error otherwise 12010 **/ 12011 static int 12012 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 12013 { 12014 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12015 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12016 12017 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12018 "0473 PCI device Power Management suspend.\n"); 12019 12020 /* Bring down the device */ 12021 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 12022 lpfc_offline(phba); 12023 kthread_stop(phba->worker_thread); 12024 12025 /* Disable interrupt from device */ 12026 lpfc_sli_disable_intr(phba); 12027 12028 /* Save device state to PCI config space */ 12029 pci_save_state(pdev); 12030 pci_set_power_state(pdev, PCI_D3hot); 12031 12032 return 0; 12033 } 12034 12035 /** 12036 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 12037 * @pdev: pointer to PCI device 12038 * 12039 * This routine is to be called from the kernel's PCI subsystem to support 12040 * system Power Management (PM) to device with SLI-3 interface spec. When PM 12041 * invokes this method, it restores the device's PCI config space state and 12042 * fully reinitializes the device and brings it online. Note that as the 12043 * driver implements the minimum PM requirements to a power-aware driver's 12044 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 12045 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 12046 * driver will fully reinitialize its device during resume() method call, 12047 * the device will be set to PCI_D0 directly in PCI config space before 12048 * restoring the state. 12049 * 12050 * Return code 12051 * 0 - driver suspended the device 12052 * Error otherwise 12053 **/ 12054 static int 12055 lpfc_pci_resume_one_s3(struct pci_dev *pdev) 12056 { 12057 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12058 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12059 uint32_t intr_mode; 12060 int error; 12061 12062 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12063 "0452 PCI device Power Management resume.\n"); 12064 12065 /* Restore device state from PCI config space */ 12066 pci_set_power_state(pdev, PCI_D0); 12067 pci_restore_state(pdev); 12068 12069 /* 12070 * As the new kernel behavior of pci_restore_state() API call clears 12071 * device saved_state flag, need to save the restored state again. 12072 */ 12073 pci_save_state(pdev); 12074 12075 if (pdev->is_busmaster) 12076 pci_set_master(pdev); 12077 12078 /* Startup the kernel thread for this host adapter. */ 12079 phba->worker_thread = kthread_run(lpfc_do_work, phba, 12080 "lpfc_worker_%d", phba->brd_no); 12081 if (IS_ERR(phba->worker_thread)) { 12082 error = PTR_ERR(phba->worker_thread); 12083 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12084 "0434 PM resume failed to start worker " 12085 "thread: error=x%x.\n", error); 12086 return error; 12087 } 12088 12089 /* Configure and enable interrupt */ 12090 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 12091 if (intr_mode == LPFC_INTR_ERROR) { 12092 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12093 "0430 PM resume Failed to enable interrupt\n"); 12094 return -EIO; 12095 } else 12096 phba->intr_mode = intr_mode; 12097 12098 /* Restart HBA and bring it online */ 12099 lpfc_sli_brdrestart(phba); 12100 lpfc_online(phba); 12101 12102 /* Log the current active interrupt mode */ 12103 lpfc_log_intr_mode(phba, phba->intr_mode); 12104 12105 return 0; 12106 } 12107 12108 /** 12109 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 12110 * @phba: pointer to lpfc hba data structure. 12111 * 12112 * This routine is called to prepare the SLI3 device for PCI slot recover. It 12113 * aborts all the outstanding SCSI I/Os to the pci device. 12114 **/ 12115 static void 12116 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 12117 { 12118 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12119 "2723 PCI channel I/O abort preparing for recovery\n"); 12120 12121 /* 12122 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 12123 * and let the SCSI mid-layer to retry them to recover. 12124 */ 12125 lpfc_sli_abort_fcp_rings(phba); 12126 } 12127 12128 /** 12129 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 12130 * @phba: pointer to lpfc hba data structure. 12131 * 12132 * This routine is called to prepare the SLI3 device for PCI slot reset. It 12133 * disables the device interrupt and pci device, and aborts the internal FCP 12134 * pending I/Os. 12135 **/ 12136 static void 12137 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 12138 { 12139 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12140 "2710 PCI channel disable preparing for reset\n"); 12141 12142 /* Block any management I/Os to the device */ 12143 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 12144 12145 /* Block all SCSI devices' I/Os on the host */ 12146 lpfc_scsi_dev_block(phba); 12147 12148 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 12149 lpfc_sli_flush_fcp_rings(phba); 12150 12151 /* stop all timers */ 12152 lpfc_stop_hba_timers(phba); 12153 12154 /* Disable interrupt and pci device */ 12155 lpfc_sli_disable_intr(phba); 12156 pci_disable_device(phba->pcidev); 12157 } 12158 12159 /** 12160 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 12161 * @phba: pointer to lpfc hba data structure. 12162 * 12163 * This routine is called to prepare the SLI3 device for PCI slot permanently 12164 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 12165 * pending I/Os. 12166 **/ 12167 static void 12168 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 12169 { 12170 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12171 "2711 PCI channel permanent disable for failure\n"); 12172 /* Block all SCSI devices' I/Os on the host */ 12173 lpfc_scsi_dev_block(phba); 12174 12175 /* stop all timers */ 12176 lpfc_stop_hba_timers(phba); 12177 12178 /* Clean up all driver's outstanding SCSI I/Os */ 12179 lpfc_sli_flush_fcp_rings(phba); 12180 } 12181 12182 /** 12183 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 12184 * @pdev: pointer to PCI device. 12185 * @state: the current PCI connection state. 12186 * 12187 * This routine is called from the PCI subsystem for I/O error handling to 12188 * device with SLI-3 interface spec. This function is called by the PCI 12189 * subsystem after a PCI bus error affecting this device has been detected. 12190 * When this function is invoked, it will need to stop all the I/Os and 12191 * interrupt(s) to the device. Once that is done, it will return 12192 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 12193 * as desired. 12194 * 12195 * Return codes 12196 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 12197 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 12198 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 12199 **/ 12200 static pci_ers_result_t 12201 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 12202 { 12203 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12204 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12205 12206 switch (state) { 12207 case pci_channel_io_normal: 12208 /* Non-fatal error, prepare for recovery */ 12209 lpfc_sli_prep_dev_for_recover(phba); 12210 return PCI_ERS_RESULT_CAN_RECOVER; 12211 case pci_channel_io_frozen: 12212 /* Fatal error, prepare for slot reset */ 12213 lpfc_sli_prep_dev_for_reset(phba); 12214 return PCI_ERS_RESULT_NEED_RESET; 12215 case pci_channel_io_perm_failure: 12216 /* Permanent failure, prepare for device down */ 12217 lpfc_sli_prep_dev_for_perm_failure(phba); 12218 return PCI_ERS_RESULT_DISCONNECT; 12219 default: 12220 /* Unknown state, prepare and request slot reset */ 12221 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12222 "0472 Unknown PCI error state: x%x\n", state); 12223 lpfc_sli_prep_dev_for_reset(phba); 12224 return PCI_ERS_RESULT_NEED_RESET; 12225 } 12226 } 12227 12228 /** 12229 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 12230 * @pdev: pointer to PCI device. 12231 * 12232 * This routine is called from the PCI subsystem for error handling to 12233 * device with SLI-3 interface spec. This is called after PCI bus has been 12234 * reset to restart the PCI card from scratch, as if from a cold-boot. 12235 * During the PCI subsystem error recovery, after driver returns 12236 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 12237 * recovery and then call this routine before calling the .resume method 12238 * to recover the device. This function will initialize the HBA device, 12239 * enable the interrupt, but it will just put the HBA to offline state 12240 * without passing any I/O traffic. 12241 * 12242 * Return codes 12243 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 12244 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 12245 */ 12246 static pci_ers_result_t 12247 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 12248 { 12249 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12250 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12251 struct lpfc_sli *psli = &phba->sli; 12252 uint32_t intr_mode; 12253 12254 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 12255 if (pci_enable_device_mem(pdev)) { 12256 printk(KERN_ERR "lpfc: Cannot re-enable " 12257 "PCI device after reset.\n"); 12258 return PCI_ERS_RESULT_DISCONNECT; 12259 } 12260 12261 pci_restore_state(pdev); 12262 12263 /* 12264 * As the new kernel behavior of pci_restore_state() API call clears 12265 * device saved_state flag, need to save the restored state again. 12266 */ 12267 pci_save_state(pdev); 12268 12269 if (pdev->is_busmaster) 12270 pci_set_master(pdev); 12271 12272 spin_lock_irq(&phba->hbalock); 12273 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 12274 spin_unlock_irq(&phba->hbalock); 12275 12276 /* Configure and enable interrupt */ 12277 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 12278 if (intr_mode == LPFC_INTR_ERROR) { 12279 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12280 "0427 Cannot re-enable interrupt after " 12281 "slot reset.\n"); 12282 return PCI_ERS_RESULT_DISCONNECT; 12283 } else 12284 phba->intr_mode = intr_mode; 12285 12286 /* Take device offline, it will perform cleanup */ 12287 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 12288 lpfc_offline(phba); 12289 lpfc_sli_brdrestart(phba); 12290 12291 /* Log the current active interrupt mode */ 12292 lpfc_log_intr_mode(phba, phba->intr_mode); 12293 12294 return PCI_ERS_RESULT_RECOVERED; 12295 } 12296 12297 /** 12298 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 12299 * @pdev: pointer to PCI device 12300 * 12301 * This routine is called from the PCI subsystem for error handling to device 12302 * with SLI-3 interface spec. It is called when kernel error recovery tells 12303 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 12304 * error recovery. After this call, traffic can start to flow from this device 12305 * again. 12306 */ 12307 static void 12308 lpfc_io_resume_s3(struct pci_dev *pdev) 12309 { 12310 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12311 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12312 12313 /* Bring device online, it will be no-op for non-fatal error resume */ 12314 lpfc_online(phba); 12315 } 12316 12317 /** 12318 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 12319 * @phba: pointer to lpfc hba data structure. 12320 * 12321 * returns the number of ELS/CT IOCBs to reserve 12322 **/ 12323 int 12324 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 12325 { 12326 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 12327 12328 if (phba->sli_rev == LPFC_SLI_REV4) { 12329 if (max_xri <= 100) 12330 return 10; 12331 else if (max_xri <= 256) 12332 return 25; 12333 else if (max_xri <= 512) 12334 return 50; 12335 else if (max_xri <= 1024) 12336 return 100; 12337 else if (max_xri <= 1536) 12338 return 150; 12339 else if (max_xri <= 2048) 12340 return 200; 12341 else 12342 return 250; 12343 } else 12344 return 0; 12345 } 12346 12347 /** 12348 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve 12349 * @phba: pointer to lpfc hba data structure. 12350 * 12351 * returns the number of ELS/CT + NVMET IOCBs to reserve 12352 **/ 12353 int 12354 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba) 12355 { 12356 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba); 12357 12358 if (phba->nvmet_support) 12359 max_xri += LPFC_NVMET_BUF_POST; 12360 return max_xri; 12361 } 12362 12363 12364 static void 12365 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset, 12366 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize, 12367 const struct firmware *fw) 12368 { 12369 if ((offset == ADD_STATUS_FW_NOT_SUPPORTED) || 12370 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC && 12371 magic_number != MAGIC_NUMER_G6) || 12372 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC && 12373 magic_number != MAGIC_NUMER_G7)) 12374 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12375 "3030 This firmware version is not supported on " 12376 "this HBA model. Device:%x Magic:%x Type:%x " 12377 "ID:%x Size %d %zd\n", 12378 phba->pcidev->device, magic_number, ftype, fid, 12379 fsize, fw->size); 12380 else 12381 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12382 "3022 FW Download failed. Device:%x Magic:%x Type:%x " 12383 "ID:%x Size %d %zd\n", 12384 phba->pcidev->device, magic_number, ftype, fid, 12385 fsize, fw->size); 12386 } 12387 12388 12389 /** 12390 * lpfc_write_firmware - attempt to write a firmware image to the port 12391 * @fw: pointer to firmware image returned from request_firmware. 12392 * @phba: pointer to lpfc hba data structure. 12393 * 12394 **/ 12395 static void 12396 lpfc_write_firmware(const struct firmware *fw, void *context) 12397 { 12398 struct lpfc_hba *phba = (struct lpfc_hba *)context; 12399 char fwrev[FW_REV_STR_SIZE]; 12400 struct lpfc_grp_hdr *image; 12401 struct list_head dma_buffer_list; 12402 int i, rc = 0; 12403 struct lpfc_dmabuf *dmabuf, *next; 12404 uint32_t offset = 0, temp_offset = 0; 12405 uint32_t magic_number, ftype, fid, fsize; 12406 12407 /* It can be null in no-wait mode, sanity check */ 12408 if (!fw) { 12409 rc = -ENXIO; 12410 goto out; 12411 } 12412 image = (struct lpfc_grp_hdr *)fw->data; 12413 12414 magic_number = be32_to_cpu(image->magic_number); 12415 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image); 12416 fid = bf_get_be32(lpfc_grp_hdr_id, image); 12417 fsize = be32_to_cpu(image->size); 12418 12419 INIT_LIST_HEAD(&dma_buffer_list); 12420 lpfc_decode_firmware_rev(phba, fwrev, 1); 12421 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 12422 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12423 "3023 Updating Firmware, Current Version:%s " 12424 "New Version:%s\n", 12425 fwrev, image->revision); 12426 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 12427 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 12428 GFP_KERNEL); 12429 if (!dmabuf) { 12430 rc = -ENOMEM; 12431 goto release_out; 12432 } 12433 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 12434 SLI4_PAGE_SIZE, 12435 &dmabuf->phys, 12436 GFP_KERNEL); 12437 if (!dmabuf->virt) { 12438 kfree(dmabuf); 12439 rc = -ENOMEM; 12440 goto release_out; 12441 } 12442 list_add_tail(&dmabuf->list, &dma_buffer_list); 12443 } 12444 while (offset < fw->size) { 12445 temp_offset = offset; 12446 list_for_each_entry(dmabuf, &dma_buffer_list, list) { 12447 if (temp_offset + SLI4_PAGE_SIZE > fw->size) { 12448 memcpy(dmabuf->virt, 12449 fw->data + temp_offset, 12450 fw->size - temp_offset); 12451 temp_offset = fw->size; 12452 break; 12453 } 12454 memcpy(dmabuf->virt, fw->data + temp_offset, 12455 SLI4_PAGE_SIZE); 12456 temp_offset += SLI4_PAGE_SIZE; 12457 } 12458 rc = lpfc_wr_object(phba, &dma_buffer_list, 12459 (fw->size - offset), &offset); 12460 if (rc) { 12461 lpfc_log_write_firmware_error(phba, offset, 12462 magic_number, ftype, fid, fsize, fw); 12463 goto release_out; 12464 } 12465 } 12466 rc = offset; 12467 } else 12468 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12469 "3029 Skipped Firmware update, Current " 12470 "Version:%s New Version:%s\n", 12471 fwrev, image->revision); 12472 12473 release_out: 12474 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 12475 list_del(&dmabuf->list); 12476 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 12477 dmabuf->virt, dmabuf->phys); 12478 kfree(dmabuf); 12479 } 12480 release_firmware(fw); 12481 out: 12482 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12483 "3024 Firmware update done: %d.\n", rc); 12484 return; 12485 } 12486 12487 /** 12488 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade 12489 * @phba: pointer to lpfc hba data structure. 12490 * 12491 * This routine is called to perform Linux generic firmware upgrade on device 12492 * that supports such feature. 12493 **/ 12494 int 12495 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) 12496 { 12497 uint8_t file_name[ELX_MODEL_NAME_SIZE]; 12498 int ret; 12499 const struct firmware *fw; 12500 12501 /* Only supported on SLI4 interface type 2 for now */ 12502 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 12503 LPFC_SLI_INTF_IF_TYPE_2) 12504 return -EPERM; 12505 12506 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); 12507 12508 if (fw_upgrade == INT_FW_UPGRADE) { 12509 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, 12510 file_name, &phba->pcidev->dev, 12511 GFP_KERNEL, (void *)phba, 12512 lpfc_write_firmware); 12513 } else if (fw_upgrade == RUN_FW_UPGRADE) { 12514 ret = request_firmware(&fw, file_name, &phba->pcidev->dev); 12515 if (!ret) 12516 lpfc_write_firmware(fw, (void *)phba); 12517 } else { 12518 ret = -EINVAL; 12519 } 12520 12521 return ret; 12522 } 12523 12524 /** 12525 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 12526 * @pdev: pointer to PCI device 12527 * @pid: pointer to PCI device identifier 12528 * 12529 * This routine is called from the kernel's PCI subsystem to device with 12530 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 12531 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 12532 * information of the device and driver to see if the driver state that it 12533 * can support this kind of device. If the match is successful, the driver 12534 * core invokes this routine. If this routine determines it can claim the HBA, 12535 * it does all the initialization that it needs to do to handle the HBA 12536 * properly. 12537 * 12538 * Return code 12539 * 0 - driver can claim the device 12540 * negative value - driver can not claim the device 12541 **/ 12542 static int 12543 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 12544 { 12545 struct lpfc_hba *phba; 12546 struct lpfc_vport *vport = NULL; 12547 struct Scsi_Host *shost = NULL; 12548 int error; 12549 uint32_t cfg_mode, intr_mode; 12550 12551 /* Allocate memory for HBA structure */ 12552 phba = lpfc_hba_alloc(pdev); 12553 if (!phba) 12554 return -ENOMEM; 12555 12556 /* Perform generic PCI device enabling operation */ 12557 error = lpfc_enable_pci_dev(phba); 12558 if (error) 12559 goto out_free_phba; 12560 12561 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 12562 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 12563 if (error) 12564 goto out_disable_pci_dev; 12565 12566 /* Set up SLI-4 specific device PCI memory space */ 12567 error = lpfc_sli4_pci_mem_setup(phba); 12568 if (error) { 12569 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12570 "1410 Failed to set up pci memory space.\n"); 12571 goto out_disable_pci_dev; 12572 } 12573 12574 /* Set up SLI-4 Specific device driver resources */ 12575 error = lpfc_sli4_driver_resource_setup(phba); 12576 if (error) { 12577 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12578 "1412 Failed to set up driver resource.\n"); 12579 goto out_unset_pci_mem_s4; 12580 } 12581 12582 INIT_LIST_HEAD(&phba->active_rrq_list); 12583 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 12584 12585 /* Set up common device driver resources */ 12586 error = lpfc_setup_driver_resource_phase2(phba); 12587 if (error) { 12588 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12589 "1414 Failed to set up driver resource.\n"); 12590 goto out_unset_driver_resource_s4; 12591 } 12592 12593 /* Get the default values for Model Name and Description */ 12594 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 12595 12596 /* Now, trying to enable interrupt and bring up the device */ 12597 cfg_mode = phba->cfg_use_msi; 12598 12599 /* Put device to a known state before enabling interrupt */ 12600 phba->pport = NULL; 12601 lpfc_stop_port(phba); 12602 12603 /* Configure and enable interrupt */ 12604 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 12605 if (intr_mode == LPFC_INTR_ERROR) { 12606 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12607 "0426 Failed to enable interrupt.\n"); 12608 error = -ENODEV; 12609 goto out_unset_driver_resource; 12610 } 12611 /* Default to single EQ for non-MSI-X */ 12612 if (phba->intr_type != MSIX) { 12613 phba->cfg_irq_chann = 1; 12614 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 12615 if (phba->nvmet_support) 12616 phba->cfg_nvmet_mrq = 1; 12617 } 12618 } 12619 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); 12620 12621 /* Create SCSI host to the physical port */ 12622 error = lpfc_create_shost(phba); 12623 if (error) { 12624 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12625 "1415 Failed to create scsi host.\n"); 12626 goto out_disable_intr; 12627 } 12628 vport = phba->pport; 12629 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 12630 12631 /* Configure sysfs attributes */ 12632 error = lpfc_alloc_sysfs_attr(vport); 12633 if (error) { 12634 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12635 "1416 Failed to allocate sysfs attr\n"); 12636 goto out_destroy_shost; 12637 } 12638 12639 /* Set up SLI-4 HBA */ 12640 if (lpfc_sli4_hba_setup(phba)) { 12641 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12642 "1421 Failed to set up hba\n"); 12643 error = -ENODEV; 12644 goto out_free_sysfs_attr; 12645 } 12646 12647 /* Log the current active interrupt mode */ 12648 phba->intr_mode = intr_mode; 12649 lpfc_log_intr_mode(phba, intr_mode); 12650 12651 /* Perform post initialization setup */ 12652 lpfc_post_init_setup(phba); 12653 12654 /* NVME support in FW earlier in the driver load corrects the 12655 * FC4 type making a check for nvme_support unnecessary. 12656 */ 12657 if (phba->nvmet_support == 0) { 12658 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 12659 /* Create NVME binding with nvme_fc_transport. This 12660 * ensures the vport is initialized. If the localport 12661 * create fails, it should not unload the driver to 12662 * support field issues. 12663 */ 12664 error = lpfc_nvme_create_localport(vport); 12665 if (error) { 12666 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12667 "6004 NVME registration " 12668 "failed, error x%x\n", 12669 error); 12670 } 12671 } 12672 } 12673 12674 /* check for firmware upgrade or downgrade */ 12675 if (phba->cfg_request_firmware_upgrade) 12676 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); 12677 12678 /* Check if there are static vports to be created. */ 12679 lpfc_create_static_vport(phba); 12680 12681 /* Enable RAS FW log support */ 12682 lpfc_sli4_ras_setup(phba); 12683 12684 return 0; 12685 12686 out_free_sysfs_attr: 12687 lpfc_free_sysfs_attr(vport); 12688 out_destroy_shost: 12689 lpfc_destroy_shost(phba); 12690 out_disable_intr: 12691 lpfc_sli4_disable_intr(phba); 12692 out_unset_driver_resource: 12693 lpfc_unset_driver_resource_phase2(phba); 12694 out_unset_driver_resource_s4: 12695 lpfc_sli4_driver_resource_unset(phba); 12696 out_unset_pci_mem_s4: 12697 lpfc_sli4_pci_mem_unset(phba); 12698 out_disable_pci_dev: 12699 lpfc_disable_pci_dev(phba); 12700 if (shost) 12701 scsi_host_put(shost); 12702 out_free_phba: 12703 lpfc_hba_free(phba); 12704 return error; 12705 } 12706 12707 /** 12708 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 12709 * @pdev: pointer to PCI device 12710 * 12711 * This routine is called from the kernel's PCI subsystem to device with 12712 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 12713 * removed from PCI bus, it performs all the necessary cleanup for the HBA 12714 * device to be removed from the PCI subsystem properly. 12715 **/ 12716 static void 12717 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 12718 { 12719 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12720 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 12721 struct lpfc_vport **vports; 12722 struct lpfc_hba *phba = vport->phba; 12723 int i; 12724 12725 /* Mark the device unloading flag */ 12726 spin_lock_irq(&phba->hbalock); 12727 vport->load_flag |= FC_UNLOADING; 12728 spin_unlock_irq(&phba->hbalock); 12729 12730 /* Free the HBA sysfs attributes */ 12731 lpfc_free_sysfs_attr(vport); 12732 12733 /* Release all the vports against this physical port */ 12734 vports = lpfc_create_vport_work_array(phba); 12735 if (vports != NULL) 12736 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 12737 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 12738 continue; 12739 fc_vport_terminate(vports[i]->fc_vport); 12740 } 12741 lpfc_destroy_vport_work_array(phba, vports); 12742 12743 /* Remove FC host and then SCSI host with the physical port */ 12744 fc_remove_host(shost); 12745 scsi_remove_host(shost); 12746 12747 /* Perform ndlp cleanup on the physical port. The nvme and nvmet 12748 * localports are destroyed after to cleanup all transport memory. 12749 */ 12750 lpfc_cleanup(vport); 12751 lpfc_nvmet_destroy_targetport(phba); 12752 lpfc_nvme_destroy_localport(vport); 12753 12754 /* De-allocate multi-XRI pools */ 12755 if (phba->cfg_xri_rebalancing) 12756 lpfc_destroy_multixri_pools(phba); 12757 12758 /* 12759 * Bring down the SLI Layer. This step disables all interrupts, 12760 * clears the rings, discards all mailbox commands, and resets 12761 * the HBA FCoE function. 12762 */ 12763 lpfc_debugfs_terminate(vport); 12764 12765 lpfc_stop_hba_timers(phba); 12766 spin_lock_irq(&phba->port_list_lock); 12767 list_del_init(&vport->listentry); 12768 spin_unlock_irq(&phba->port_list_lock); 12769 12770 /* Perform scsi free before driver resource_unset since scsi 12771 * buffers are released to their corresponding pools here. 12772 */ 12773 lpfc_io_free(phba); 12774 lpfc_free_iocb_list(phba); 12775 lpfc_sli4_hba_unset(phba); 12776 12777 lpfc_unset_driver_resource_phase2(phba); 12778 lpfc_sli4_driver_resource_unset(phba); 12779 12780 /* Unmap adapter Control and Doorbell registers */ 12781 lpfc_sli4_pci_mem_unset(phba); 12782 12783 /* Release PCI resources and disable device's PCI function */ 12784 scsi_host_put(shost); 12785 lpfc_disable_pci_dev(phba); 12786 12787 /* Finally, free the driver's device data structure */ 12788 lpfc_hba_free(phba); 12789 12790 return; 12791 } 12792 12793 /** 12794 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 12795 * @pdev: pointer to PCI device 12796 * @msg: power management message 12797 * 12798 * This routine is called from the kernel's PCI subsystem to support system 12799 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 12800 * this method, it quiesces the device by stopping the driver's worker 12801 * thread for the device, turning off device's interrupt and DMA, and bring 12802 * the device offline. Note that as the driver implements the minimum PM 12803 * requirements to a power-aware driver's PM support for suspend/resume -- all 12804 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 12805 * method call will be treated as SUSPEND and the driver will fully 12806 * reinitialize its device during resume() method call, the driver will set 12807 * device to PCI_D3hot state in PCI config space instead of setting it 12808 * according to the @msg provided by the PM. 12809 * 12810 * Return code 12811 * 0 - driver suspended the device 12812 * Error otherwise 12813 **/ 12814 static int 12815 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 12816 { 12817 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12818 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12819 12820 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12821 "2843 PCI device Power Management suspend.\n"); 12822 12823 /* Bring down the device */ 12824 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 12825 lpfc_offline(phba); 12826 kthread_stop(phba->worker_thread); 12827 12828 /* Disable interrupt from device */ 12829 lpfc_sli4_disable_intr(phba); 12830 lpfc_sli4_queue_destroy(phba); 12831 12832 /* Save device state to PCI config space */ 12833 pci_save_state(pdev); 12834 pci_set_power_state(pdev, PCI_D3hot); 12835 12836 return 0; 12837 } 12838 12839 /** 12840 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 12841 * @pdev: pointer to PCI device 12842 * 12843 * This routine is called from the kernel's PCI subsystem to support system 12844 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 12845 * this method, it restores the device's PCI config space state and fully 12846 * reinitializes the device and brings it online. Note that as the driver 12847 * implements the minimum PM requirements to a power-aware driver's PM for 12848 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 12849 * to the suspend() method call will be treated as SUSPEND and the driver 12850 * will fully reinitialize its device during resume() method call, the device 12851 * will be set to PCI_D0 directly in PCI config space before restoring the 12852 * state. 12853 * 12854 * Return code 12855 * 0 - driver suspended the device 12856 * Error otherwise 12857 **/ 12858 static int 12859 lpfc_pci_resume_one_s4(struct pci_dev *pdev) 12860 { 12861 struct Scsi_Host *shost = pci_get_drvdata(pdev); 12862 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 12863 uint32_t intr_mode; 12864 int error; 12865 12866 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12867 "0292 PCI device Power Management resume.\n"); 12868 12869 /* Restore device state from PCI config space */ 12870 pci_set_power_state(pdev, PCI_D0); 12871 pci_restore_state(pdev); 12872 12873 /* 12874 * As the new kernel behavior of pci_restore_state() API call clears 12875 * device saved_state flag, need to save the restored state again. 12876 */ 12877 pci_save_state(pdev); 12878 12879 if (pdev->is_busmaster) 12880 pci_set_master(pdev); 12881 12882 /* Startup the kernel thread for this host adapter. */ 12883 phba->worker_thread = kthread_run(lpfc_do_work, phba, 12884 "lpfc_worker_%d", phba->brd_no); 12885 if (IS_ERR(phba->worker_thread)) { 12886 error = PTR_ERR(phba->worker_thread); 12887 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12888 "0293 PM resume failed to start worker " 12889 "thread: error=x%x.\n", error); 12890 return error; 12891 } 12892 12893 /* Configure and enable interrupt */ 12894 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 12895 if (intr_mode == LPFC_INTR_ERROR) { 12896 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12897 "0294 PM resume Failed to enable interrupt\n"); 12898 return -EIO; 12899 } else 12900 phba->intr_mode = intr_mode; 12901 12902 /* Restart HBA and bring it online */ 12903 lpfc_sli_brdrestart(phba); 12904 lpfc_online(phba); 12905 12906 /* Log the current active interrupt mode */ 12907 lpfc_log_intr_mode(phba, phba->intr_mode); 12908 12909 return 0; 12910 } 12911 12912 /** 12913 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 12914 * @phba: pointer to lpfc hba data structure. 12915 * 12916 * This routine is called to prepare the SLI4 device for PCI slot recover. It 12917 * aborts all the outstanding SCSI I/Os to the pci device. 12918 **/ 12919 static void 12920 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 12921 { 12922 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12923 "2828 PCI channel I/O abort preparing for recovery\n"); 12924 /* 12925 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 12926 * and let the SCSI mid-layer to retry them to recover. 12927 */ 12928 lpfc_sli_abort_fcp_rings(phba); 12929 } 12930 12931 /** 12932 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 12933 * @phba: pointer to lpfc hba data structure. 12934 * 12935 * This routine is called to prepare the SLI4 device for PCI slot reset. It 12936 * disables the device interrupt and pci device, and aborts the internal FCP 12937 * pending I/Os. 12938 **/ 12939 static void 12940 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 12941 { 12942 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12943 "2826 PCI channel disable preparing for reset\n"); 12944 12945 /* Block any management I/Os to the device */ 12946 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); 12947 12948 /* Block all SCSI devices' I/Os on the host */ 12949 lpfc_scsi_dev_block(phba); 12950 12951 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 12952 lpfc_sli_flush_fcp_rings(phba); 12953 12954 /* Flush the outstanding NVME IOs if fc4 type enabled. */ 12955 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 12956 lpfc_sli_flush_nvme_rings(phba); 12957 12958 /* stop all timers */ 12959 lpfc_stop_hba_timers(phba); 12960 12961 /* Disable interrupt and pci device */ 12962 lpfc_sli4_disable_intr(phba); 12963 lpfc_sli4_queue_destroy(phba); 12964 pci_disable_device(phba->pcidev); 12965 } 12966 12967 /** 12968 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 12969 * @phba: pointer to lpfc hba data structure. 12970 * 12971 * This routine is called to prepare the SLI4 device for PCI slot permanently 12972 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 12973 * pending I/Os. 12974 **/ 12975 static void 12976 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 12977 { 12978 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12979 "2827 PCI channel permanent disable for failure\n"); 12980 12981 /* Block all SCSI devices' I/Os on the host */ 12982 lpfc_scsi_dev_block(phba); 12983 12984 /* stop all timers */ 12985 lpfc_stop_hba_timers(phba); 12986 12987 /* Clean up all driver's outstanding SCSI I/Os */ 12988 lpfc_sli_flush_fcp_rings(phba); 12989 12990 /* Flush the outstanding NVME IOs if fc4 type enabled. */ 12991 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 12992 lpfc_sli_flush_nvme_rings(phba); 12993 } 12994 12995 /** 12996 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 12997 * @pdev: pointer to PCI device. 12998 * @state: the current PCI connection state. 12999 * 13000 * This routine is called from the PCI subsystem for error handling to device 13001 * with SLI-4 interface spec. This function is called by the PCI subsystem 13002 * after a PCI bus error affecting this device has been detected. When this 13003 * function is invoked, it will need to stop all the I/Os and interrupt(s) 13004 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 13005 * for the PCI subsystem to perform proper recovery as desired. 13006 * 13007 * Return codes 13008 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 13009 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 13010 **/ 13011 static pci_ers_result_t 13012 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 13013 { 13014 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13015 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13016 13017 switch (state) { 13018 case pci_channel_io_normal: 13019 /* Non-fatal error, prepare for recovery */ 13020 lpfc_sli4_prep_dev_for_recover(phba); 13021 return PCI_ERS_RESULT_CAN_RECOVER; 13022 case pci_channel_io_frozen: 13023 /* Fatal error, prepare for slot reset */ 13024 lpfc_sli4_prep_dev_for_reset(phba); 13025 return PCI_ERS_RESULT_NEED_RESET; 13026 case pci_channel_io_perm_failure: 13027 /* Permanent failure, prepare for device down */ 13028 lpfc_sli4_prep_dev_for_perm_failure(phba); 13029 return PCI_ERS_RESULT_DISCONNECT; 13030 default: 13031 /* Unknown state, prepare and request slot reset */ 13032 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13033 "2825 Unknown PCI error state: x%x\n", state); 13034 lpfc_sli4_prep_dev_for_reset(phba); 13035 return PCI_ERS_RESULT_NEED_RESET; 13036 } 13037 } 13038 13039 /** 13040 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 13041 * @pdev: pointer to PCI device. 13042 * 13043 * This routine is called from the PCI subsystem for error handling to device 13044 * with SLI-4 interface spec. It is called after PCI bus has been reset to 13045 * restart the PCI card from scratch, as if from a cold-boot. During the 13046 * PCI subsystem error recovery, after the driver returns 13047 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 13048 * recovery and then call this routine before calling the .resume method to 13049 * recover the device. This function will initialize the HBA device, enable 13050 * the interrupt, but it will just put the HBA to offline state without 13051 * passing any I/O traffic. 13052 * 13053 * Return codes 13054 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 13055 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 13056 */ 13057 static pci_ers_result_t 13058 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 13059 { 13060 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13061 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13062 struct lpfc_sli *psli = &phba->sli; 13063 uint32_t intr_mode; 13064 13065 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 13066 if (pci_enable_device_mem(pdev)) { 13067 printk(KERN_ERR "lpfc: Cannot re-enable " 13068 "PCI device after reset.\n"); 13069 return PCI_ERS_RESULT_DISCONNECT; 13070 } 13071 13072 pci_restore_state(pdev); 13073 13074 /* 13075 * As the new kernel behavior of pci_restore_state() API call clears 13076 * device saved_state flag, need to save the restored state again. 13077 */ 13078 pci_save_state(pdev); 13079 13080 if (pdev->is_busmaster) 13081 pci_set_master(pdev); 13082 13083 spin_lock_irq(&phba->hbalock); 13084 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 13085 spin_unlock_irq(&phba->hbalock); 13086 13087 /* Configure and enable interrupt */ 13088 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 13089 if (intr_mode == LPFC_INTR_ERROR) { 13090 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13091 "2824 Cannot re-enable interrupt after " 13092 "slot reset.\n"); 13093 return PCI_ERS_RESULT_DISCONNECT; 13094 } else 13095 phba->intr_mode = intr_mode; 13096 13097 /* Log the current active interrupt mode */ 13098 lpfc_log_intr_mode(phba, phba->intr_mode); 13099 13100 return PCI_ERS_RESULT_RECOVERED; 13101 } 13102 13103 /** 13104 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 13105 * @pdev: pointer to PCI device 13106 * 13107 * This routine is called from the PCI subsystem for error handling to device 13108 * with SLI-4 interface spec. It is called when kernel error recovery tells 13109 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 13110 * error recovery. After this call, traffic can start to flow from this device 13111 * again. 13112 **/ 13113 static void 13114 lpfc_io_resume_s4(struct pci_dev *pdev) 13115 { 13116 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13117 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13118 13119 /* 13120 * In case of slot reset, as function reset is performed through 13121 * mailbox command which needs DMA to be enabled, this operation 13122 * has to be moved to the io resume phase. Taking device offline 13123 * will perform the necessary cleanup. 13124 */ 13125 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 13126 /* Perform device reset */ 13127 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 13128 lpfc_offline(phba); 13129 lpfc_sli_brdrestart(phba); 13130 /* Bring the device back online */ 13131 lpfc_online(phba); 13132 } 13133 } 13134 13135 /** 13136 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 13137 * @pdev: pointer to PCI device 13138 * @pid: pointer to PCI device identifier 13139 * 13140 * This routine is to be registered to the kernel's PCI subsystem. When an 13141 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 13142 * at PCI device-specific information of the device and driver to see if the 13143 * driver state that it can support this kind of device. If the match is 13144 * successful, the driver core invokes this routine. This routine dispatches 13145 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 13146 * do all the initialization that it needs to do to handle the HBA device 13147 * properly. 13148 * 13149 * Return code 13150 * 0 - driver can claim the device 13151 * negative value - driver can not claim the device 13152 **/ 13153 static int 13154 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 13155 { 13156 int rc; 13157 struct lpfc_sli_intf intf; 13158 13159 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 13160 return -ENODEV; 13161 13162 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 13163 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 13164 rc = lpfc_pci_probe_one_s4(pdev, pid); 13165 else 13166 rc = lpfc_pci_probe_one_s3(pdev, pid); 13167 13168 return rc; 13169 } 13170 13171 /** 13172 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 13173 * @pdev: pointer to PCI device 13174 * 13175 * This routine is to be registered to the kernel's PCI subsystem. When an 13176 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 13177 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 13178 * remove routine, which will perform all the necessary cleanup for the 13179 * device to be removed from the PCI subsystem properly. 13180 **/ 13181 static void 13182 lpfc_pci_remove_one(struct pci_dev *pdev) 13183 { 13184 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13185 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13186 13187 switch (phba->pci_dev_grp) { 13188 case LPFC_PCI_DEV_LP: 13189 lpfc_pci_remove_one_s3(pdev); 13190 break; 13191 case LPFC_PCI_DEV_OC: 13192 lpfc_pci_remove_one_s4(pdev); 13193 break; 13194 default: 13195 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13196 "1424 Invalid PCI device group: 0x%x\n", 13197 phba->pci_dev_grp); 13198 break; 13199 } 13200 return; 13201 } 13202 13203 /** 13204 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 13205 * @pdev: pointer to PCI device 13206 * @msg: power management message 13207 * 13208 * This routine is to be registered to the kernel's PCI subsystem to support 13209 * system Power Management (PM). When PM invokes this method, it dispatches 13210 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 13211 * suspend the device. 13212 * 13213 * Return code 13214 * 0 - driver suspended the device 13215 * Error otherwise 13216 **/ 13217 static int 13218 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 13219 { 13220 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13221 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13222 int rc = -ENODEV; 13223 13224 switch (phba->pci_dev_grp) { 13225 case LPFC_PCI_DEV_LP: 13226 rc = lpfc_pci_suspend_one_s3(pdev, msg); 13227 break; 13228 case LPFC_PCI_DEV_OC: 13229 rc = lpfc_pci_suspend_one_s4(pdev, msg); 13230 break; 13231 default: 13232 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13233 "1425 Invalid PCI device group: 0x%x\n", 13234 phba->pci_dev_grp); 13235 break; 13236 } 13237 return rc; 13238 } 13239 13240 /** 13241 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 13242 * @pdev: pointer to PCI device 13243 * 13244 * This routine is to be registered to the kernel's PCI subsystem to support 13245 * system Power Management (PM). When PM invokes this method, it dispatches 13246 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 13247 * resume the device. 13248 * 13249 * Return code 13250 * 0 - driver suspended the device 13251 * Error otherwise 13252 **/ 13253 static int 13254 lpfc_pci_resume_one(struct pci_dev *pdev) 13255 { 13256 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13257 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13258 int rc = -ENODEV; 13259 13260 switch (phba->pci_dev_grp) { 13261 case LPFC_PCI_DEV_LP: 13262 rc = lpfc_pci_resume_one_s3(pdev); 13263 break; 13264 case LPFC_PCI_DEV_OC: 13265 rc = lpfc_pci_resume_one_s4(pdev); 13266 break; 13267 default: 13268 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13269 "1426 Invalid PCI device group: 0x%x\n", 13270 phba->pci_dev_grp); 13271 break; 13272 } 13273 return rc; 13274 } 13275 13276 /** 13277 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 13278 * @pdev: pointer to PCI device. 13279 * @state: the current PCI connection state. 13280 * 13281 * This routine is registered to the PCI subsystem for error handling. This 13282 * function is called by the PCI subsystem after a PCI bus error affecting 13283 * this device has been detected. When this routine is invoked, it dispatches 13284 * the action to the proper SLI-3 or SLI-4 device error detected handling 13285 * routine, which will perform the proper error detected operation. 13286 * 13287 * Return codes 13288 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 13289 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 13290 **/ 13291 static pci_ers_result_t 13292 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 13293 { 13294 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13295 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13296 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 13297 13298 switch (phba->pci_dev_grp) { 13299 case LPFC_PCI_DEV_LP: 13300 rc = lpfc_io_error_detected_s3(pdev, state); 13301 break; 13302 case LPFC_PCI_DEV_OC: 13303 rc = lpfc_io_error_detected_s4(pdev, state); 13304 break; 13305 default: 13306 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13307 "1427 Invalid PCI device group: 0x%x\n", 13308 phba->pci_dev_grp); 13309 break; 13310 } 13311 return rc; 13312 } 13313 13314 /** 13315 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 13316 * @pdev: pointer to PCI device. 13317 * 13318 * This routine is registered to the PCI subsystem for error handling. This 13319 * function is called after PCI bus has been reset to restart the PCI card 13320 * from scratch, as if from a cold-boot. When this routine is invoked, it 13321 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 13322 * routine, which will perform the proper device reset. 13323 * 13324 * Return codes 13325 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 13326 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 13327 **/ 13328 static pci_ers_result_t 13329 lpfc_io_slot_reset(struct pci_dev *pdev) 13330 { 13331 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13332 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13333 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 13334 13335 switch (phba->pci_dev_grp) { 13336 case LPFC_PCI_DEV_LP: 13337 rc = lpfc_io_slot_reset_s3(pdev); 13338 break; 13339 case LPFC_PCI_DEV_OC: 13340 rc = lpfc_io_slot_reset_s4(pdev); 13341 break; 13342 default: 13343 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13344 "1428 Invalid PCI device group: 0x%x\n", 13345 phba->pci_dev_grp); 13346 break; 13347 } 13348 return rc; 13349 } 13350 13351 /** 13352 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 13353 * @pdev: pointer to PCI device 13354 * 13355 * This routine is registered to the PCI subsystem for error handling. It 13356 * is called when kernel error recovery tells the lpfc driver that it is 13357 * OK to resume normal PCI operation after PCI bus error recovery. When 13358 * this routine is invoked, it dispatches the action to the proper SLI-3 13359 * or SLI-4 device io_resume routine, which will resume the device operation. 13360 **/ 13361 static void 13362 lpfc_io_resume(struct pci_dev *pdev) 13363 { 13364 struct Scsi_Host *shost = pci_get_drvdata(pdev); 13365 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 13366 13367 switch (phba->pci_dev_grp) { 13368 case LPFC_PCI_DEV_LP: 13369 lpfc_io_resume_s3(pdev); 13370 break; 13371 case LPFC_PCI_DEV_OC: 13372 lpfc_io_resume_s4(pdev); 13373 break; 13374 default: 13375 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13376 "1429 Invalid PCI device group: 0x%x\n", 13377 phba->pci_dev_grp); 13378 break; 13379 } 13380 return; 13381 } 13382 13383 /** 13384 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter 13385 * @phba: pointer to lpfc hba data structure. 13386 * 13387 * This routine checks to see if OAS is supported for this adapter. If 13388 * supported, the configure Flash Optimized Fabric flag is set. Otherwise, 13389 * the enable oas flag is cleared and the pool created for OAS device data 13390 * is destroyed. 13391 * 13392 **/ 13393 static void 13394 lpfc_sli4_oas_verify(struct lpfc_hba *phba) 13395 { 13396 13397 if (!phba->cfg_EnableXLane) 13398 return; 13399 13400 if (phba->sli4_hba.pc_sli4_params.oas_supported) { 13401 phba->cfg_fof = 1; 13402 } else { 13403 phba->cfg_fof = 0; 13404 if (phba->device_data_mem_pool) 13405 mempool_destroy(phba->device_data_mem_pool); 13406 phba->device_data_mem_pool = NULL; 13407 } 13408 13409 return; 13410 } 13411 13412 /** 13413 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter 13414 * @phba: pointer to lpfc hba data structure. 13415 * 13416 * This routine checks to see if RAS is supported by the adapter. Check the 13417 * function through which RAS support enablement is to be done. 13418 **/ 13419 void 13420 lpfc_sli4_ras_init(struct lpfc_hba *phba) 13421 { 13422 switch (phba->pcidev->device) { 13423 case PCI_DEVICE_ID_LANCER_G6_FC: 13424 case PCI_DEVICE_ID_LANCER_G7_FC: 13425 phba->ras_fwlog.ras_hwsupport = true; 13426 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) && 13427 phba->cfg_ras_fwlog_buffsize) 13428 phba->ras_fwlog.ras_enabled = true; 13429 else 13430 phba->ras_fwlog.ras_enabled = false; 13431 break; 13432 default: 13433 phba->ras_fwlog.ras_hwsupport = false; 13434 } 13435 } 13436 13437 13438 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 13439 13440 static const struct pci_error_handlers lpfc_err_handler = { 13441 .error_detected = lpfc_io_error_detected, 13442 .slot_reset = lpfc_io_slot_reset, 13443 .resume = lpfc_io_resume, 13444 }; 13445 13446 static struct pci_driver lpfc_driver = { 13447 .name = LPFC_DRIVER_NAME, 13448 .id_table = lpfc_id_table, 13449 .probe = lpfc_pci_probe_one, 13450 .remove = lpfc_pci_remove_one, 13451 .shutdown = lpfc_pci_remove_one, 13452 .suspend = lpfc_pci_suspend_one, 13453 .resume = lpfc_pci_resume_one, 13454 .err_handler = &lpfc_err_handler, 13455 }; 13456 13457 static const struct file_operations lpfc_mgmt_fop = { 13458 .owner = THIS_MODULE, 13459 }; 13460 13461 static struct miscdevice lpfc_mgmt_dev = { 13462 .minor = MISC_DYNAMIC_MINOR, 13463 .name = "lpfcmgmt", 13464 .fops = &lpfc_mgmt_fop, 13465 }; 13466 13467 /** 13468 * lpfc_init - lpfc module initialization routine 13469 * 13470 * This routine is to be invoked when the lpfc module is loaded into the 13471 * kernel. The special kernel macro module_init() is used to indicate the 13472 * role of this routine to the kernel as lpfc module entry point. 13473 * 13474 * Return codes 13475 * 0 - successful 13476 * -ENOMEM - FC attach transport failed 13477 * all others - failed 13478 */ 13479 static int __init 13480 lpfc_init(void) 13481 { 13482 int error = 0; 13483 13484 printk(LPFC_MODULE_DESC "\n"); 13485 printk(LPFC_COPYRIGHT "\n"); 13486 13487 error = misc_register(&lpfc_mgmt_dev); 13488 if (error) 13489 printk(KERN_ERR "Could not register lpfcmgmt device, " 13490 "misc_register returned with status %d", error); 13491 13492 lpfc_transport_functions.vport_create = lpfc_vport_create; 13493 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 13494 lpfc_transport_template = 13495 fc_attach_transport(&lpfc_transport_functions); 13496 if (lpfc_transport_template == NULL) 13497 return -ENOMEM; 13498 lpfc_vport_transport_template = 13499 fc_attach_transport(&lpfc_vport_transport_functions); 13500 if (lpfc_vport_transport_template == NULL) { 13501 fc_release_transport(lpfc_transport_template); 13502 return -ENOMEM; 13503 } 13504 lpfc_nvme_cmd_template(); 13505 lpfc_nvmet_cmd_template(); 13506 13507 /* Initialize in case vector mapping is needed */ 13508 lpfc_present_cpu = num_present_cpus(); 13509 13510 error = pci_register_driver(&lpfc_driver); 13511 if (error) { 13512 fc_release_transport(lpfc_transport_template); 13513 fc_release_transport(lpfc_vport_transport_template); 13514 } 13515 13516 return error; 13517 } 13518 13519 /** 13520 * lpfc_exit - lpfc module removal routine 13521 * 13522 * This routine is invoked when the lpfc module is removed from the kernel. 13523 * The special kernel macro module_exit() is used to indicate the role of 13524 * this routine to the kernel as lpfc module exit point. 13525 */ 13526 static void __exit 13527 lpfc_exit(void) 13528 { 13529 misc_deregister(&lpfc_mgmt_dev); 13530 pci_unregister_driver(&lpfc_driver); 13531 fc_release_transport(lpfc_transport_template); 13532 fc_release_transport(lpfc_vport_transport_template); 13533 if (_dump_buf_data) { 13534 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " 13535 "_dump_buf_data at 0x%p\n", 13536 (1L << _dump_buf_data_order), _dump_buf_data); 13537 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 13538 } 13539 13540 if (_dump_buf_dif) { 13541 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " 13542 "_dump_buf_dif at 0x%p\n", 13543 (1L << _dump_buf_dif_order), _dump_buf_dif); 13544 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 13545 } 13546 idr_destroy(&lpfc_hba_index); 13547 } 13548 13549 module_init(lpfc_init); 13550 module_exit(lpfc_exit); 13551 MODULE_LICENSE("GPL"); 13552 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 13553 MODULE_AUTHOR("Broadcom"); 13554 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 13555