1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/delay.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/idr.h> 28 #include <linux/interrupt.h> 29 #include <linux/module.h> 30 #include <linux/kthread.h> 31 #include <linux/pci.h> 32 #include <linux/spinlock.h> 33 #include <linux/sched/clock.h> 34 #include <linux/ctype.h> 35 #include <linux/slab.h> 36 #include <linux/firmware.h> 37 #include <linux/miscdevice.h> 38 #include <linux/percpu.h> 39 #include <linux/irq.h> 40 #include <linux/bitops.h> 41 #include <linux/crash_dump.h> 42 #include <linux/cpu.h> 43 #include <linux/cpuhotplug.h> 44 45 #include <scsi/scsi.h> 46 #include <scsi/scsi_device.h> 47 #include <scsi/scsi_host.h> 48 #include <scsi/scsi_transport_fc.h> 49 #include <scsi/scsi_tcq.h> 50 #include <scsi/fc/fc_fs.h> 51 52 #include "lpfc_hw4.h" 53 #include "lpfc_hw.h" 54 #include "lpfc_sli.h" 55 #include "lpfc_sli4.h" 56 #include "lpfc_nl.h" 57 #include "lpfc_disc.h" 58 #include "lpfc.h" 59 #include "lpfc_scsi.h" 60 #include "lpfc_nvme.h" 61 #include "lpfc_logmsg.h" 62 #include "lpfc_crtn.h" 63 #include "lpfc_vport.h" 64 #include "lpfc_version.h" 65 #include "lpfc_ids.h" 66 67 static enum cpuhp_state lpfc_cpuhp_state; 68 /* Used when mapping IRQ vectors in a driver centric manner */ 69 static uint32_t lpfc_present_cpu; 70 static bool lpfc_pldv_detect; 71 72 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba); 73 static void lpfc_cpuhp_remove(struct lpfc_hba *phba); 74 static void lpfc_cpuhp_add(struct lpfc_hba *phba); 75 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 76 static int lpfc_post_rcv_buf(struct lpfc_hba *); 77 static int lpfc_sli4_queue_verify(struct lpfc_hba *); 78 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 79 static int lpfc_setup_endian_order(struct lpfc_hba *); 80 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 81 static void lpfc_free_els_sgl_list(struct lpfc_hba *); 82 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *); 83 static void lpfc_init_sgl_list(struct lpfc_hba *); 84 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 85 static void lpfc_free_active_sgl(struct lpfc_hba *); 86 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 87 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 88 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 89 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 90 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 91 static void lpfc_sli4_disable_intr(struct lpfc_hba *); 92 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); 93 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); 94 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int); 95 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *); 96 static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *); 97 static void lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba); 98 static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba); 99 100 static struct scsi_transport_template *lpfc_transport_template = NULL; 101 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 102 static DEFINE_IDR(lpfc_hba_index); 103 #define LPFC_NVMET_BUF_POST 254 104 static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport); 105 static void lpfc_cgn_update_tstamp(struct lpfc_hba *phba, struct lpfc_cgn_ts *ts); 106 107 /** 108 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 109 * @phba: pointer to lpfc hba data structure. 110 * 111 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 112 * mailbox command. It retrieves the revision information from the HBA and 113 * collects the Vital Product Data (VPD) about the HBA for preparing the 114 * configuration of the HBA. 115 * 116 * Return codes: 117 * 0 - success. 118 * -ERESTART - requests the SLI layer to reset the HBA and try again. 119 * Any other value - indicates an error. 120 **/ 121 int 122 lpfc_config_port_prep(struct lpfc_hba *phba) 123 { 124 lpfc_vpd_t *vp = &phba->vpd; 125 int i = 0, rc; 126 LPFC_MBOXQ_t *pmb; 127 MAILBOX_t *mb; 128 char *lpfc_vpd_data = NULL; 129 uint16_t offset = 0; 130 static char licensed[56] = 131 "key unlock for use with gnu public licensed code only\0"; 132 static int init_key = 1; 133 134 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 135 if (!pmb) { 136 phba->link_state = LPFC_HBA_ERROR; 137 return -ENOMEM; 138 } 139 140 mb = &pmb->u.mb; 141 phba->link_state = LPFC_INIT_MBX_CMDS; 142 143 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 144 if (init_key) { 145 uint32_t *ptext = (uint32_t *) licensed; 146 147 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 148 *ptext = cpu_to_be32(*ptext); 149 init_key = 0; 150 } 151 152 lpfc_read_nv(phba, pmb); 153 memset((char*)mb->un.varRDnvp.rsvd3, 0, 154 sizeof (mb->un.varRDnvp.rsvd3)); 155 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 156 sizeof (licensed)); 157 158 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 159 160 if (rc != MBX_SUCCESS) { 161 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 162 "0324 Config Port initialization " 163 "error, mbxCmd x%x READ_NVPARM, " 164 "mbxStatus x%x\n", 165 mb->mbxCommand, mb->mbxStatus); 166 mempool_free(pmb, phba->mbox_mem_pool); 167 return -ERESTART; 168 } 169 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 170 sizeof(phba->wwnn)); 171 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 172 sizeof(phba->wwpn)); 173 } 174 175 /* 176 * Clear all option bits except LPFC_SLI3_BG_ENABLED, 177 * which was already set in lpfc_get_cfgparam() 178 */ 179 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED; 180 181 /* Setup and issue mailbox READ REV command */ 182 lpfc_read_rev(phba, pmb); 183 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 184 if (rc != MBX_SUCCESS) { 185 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 186 "0439 Adapter failed to init, mbxCmd x%x " 187 "READ_REV, mbxStatus x%x\n", 188 mb->mbxCommand, mb->mbxStatus); 189 mempool_free( pmb, phba->mbox_mem_pool); 190 return -ERESTART; 191 } 192 193 194 /* 195 * The value of rr must be 1 since the driver set the cv field to 1. 196 * This setting requires the FW to set all revision fields. 197 */ 198 if (mb->un.varRdRev.rr == 0) { 199 vp->rev.rBit = 0; 200 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 201 "0440 Adapter failed to init, READ_REV has " 202 "missing revision information.\n"); 203 mempool_free(pmb, phba->mbox_mem_pool); 204 return -ERESTART; 205 } 206 207 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 208 mempool_free(pmb, phba->mbox_mem_pool); 209 return -EINVAL; 210 } 211 212 /* Save information as VPD data */ 213 vp->rev.rBit = 1; 214 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 215 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 216 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 217 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 218 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 219 vp->rev.biuRev = mb->un.varRdRev.biuRev; 220 vp->rev.smRev = mb->un.varRdRev.smRev; 221 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 222 vp->rev.endecRev = mb->un.varRdRev.endecRev; 223 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 224 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 225 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 226 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 227 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 228 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 229 230 /* If the sli feature level is less then 9, we must 231 * tear down all RPIs and VPIs on link down if NPIV 232 * is enabled. 233 */ 234 if (vp->rev.feaLevelHigh < 9) 235 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 236 237 if (lpfc_is_LC_HBA(phba->pcidev->device)) 238 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 239 sizeof (phba->RandomData)); 240 241 /* Get adapter VPD information */ 242 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 243 if (!lpfc_vpd_data) 244 goto out_free_mbox; 245 do { 246 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 247 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 248 249 if (rc != MBX_SUCCESS) { 250 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 251 "0441 VPD not present on adapter, " 252 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 253 mb->mbxCommand, mb->mbxStatus); 254 mb->un.varDmp.word_cnt = 0; 255 } 256 /* dump mem may return a zero when finished or we got a 257 * mailbox error, either way we are done. 258 */ 259 if (mb->un.varDmp.word_cnt == 0) 260 break; 261 262 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 263 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 264 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 265 lpfc_vpd_data + offset, 266 mb->un.varDmp.word_cnt); 267 offset += mb->un.varDmp.word_cnt; 268 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 269 270 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 271 272 kfree(lpfc_vpd_data); 273 out_free_mbox: 274 mempool_free(pmb, phba->mbox_mem_pool); 275 return 0; 276 } 277 278 /** 279 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 280 * @phba: pointer to lpfc hba data structure. 281 * @pmboxq: pointer to the driver internal queue element for mailbox command. 282 * 283 * This is the completion handler for driver's configuring asynchronous event 284 * mailbox command to the device. If the mailbox command returns successfully, 285 * it will set internal async event support flag to 1; otherwise, it will 286 * set internal async event support flag to 0. 287 **/ 288 static void 289 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 290 { 291 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 292 phba->temp_sensor_support = 1; 293 else 294 phba->temp_sensor_support = 0; 295 mempool_free(pmboxq, phba->mbox_mem_pool); 296 return; 297 } 298 299 /** 300 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 301 * @phba: pointer to lpfc hba data structure. 302 * @pmboxq: pointer to the driver internal queue element for mailbox command. 303 * 304 * This is the completion handler for dump mailbox command for getting 305 * wake up parameters. When this command complete, the response contain 306 * Option rom version of the HBA. This function translate the version number 307 * into a human readable string and store it in OptionROMVersion. 308 **/ 309 static void 310 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 311 { 312 struct prog_id *prg; 313 uint32_t prog_id_word; 314 char dist = ' '; 315 /* character array used for decoding dist type. */ 316 char dist_char[] = "nabx"; 317 318 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 319 mempool_free(pmboxq, phba->mbox_mem_pool); 320 return; 321 } 322 323 prg = (struct prog_id *) &prog_id_word; 324 325 /* word 7 contain option rom version */ 326 prog_id_word = pmboxq->u.mb.un.varWords[7]; 327 328 /* Decode the Option rom version word to a readable string */ 329 dist = dist_char[prg->dist]; 330 331 if ((prg->dist == 3) && (prg->num == 0)) 332 snprintf(phba->OptionROMVersion, 32, "%d.%d%d", 333 prg->ver, prg->rev, prg->lev); 334 else 335 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d", 336 prg->ver, prg->rev, prg->lev, 337 dist, prg->num); 338 mempool_free(pmboxq, phba->mbox_mem_pool); 339 return; 340 } 341 342 /** 343 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, 344 * @vport: pointer to lpfc vport data structure. 345 * 346 * 347 * Return codes 348 * None. 349 **/ 350 void 351 lpfc_update_vport_wwn(struct lpfc_vport *vport) 352 { 353 struct lpfc_hba *phba = vport->phba; 354 355 /* 356 * If the name is empty or there exists a soft name 357 * then copy the service params name, otherwise use the fc name 358 */ 359 if (vport->fc_nodename.u.wwn[0] == 0) 360 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 361 sizeof(struct lpfc_name)); 362 else 363 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, 364 sizeof(struct lpfc_name)); 365 366 /* 367 * If the port name has changed, then set the Param changes flag 368 * to unreg the login 369 */ 370 if (vport->fc_portname.u.wwn[0] != 0 && 371 memcmp(&vport->fc_portname, &vport->fc_sparam.portName, 372 sizeof(struct lpfc_name))) { 373 vport->vport_flag |= FAWWPN_PARAM_CHG; 374 375 if (phba->sli_rev == LPFC_SLI_REV4 && 376 vport->port_type == LPFC_PHYSICAL_PORT && 377 phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_FABRIC) { 378 if (!(phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG)) 379 phba->sli4_hba.fawwpn_flag &= 380 ~LPFC_FAWWPN_FABRIC; 381 lpfc_printf_log(phba, KERN_INFO, 382 LOG_SLI | LOG_DISCOVERY | LOG_ELS, 383 "2701 FA-PWWN change WWPN from %llx to " 384 "%llx: vflag x%x fawwpn_flag x%x\n", 385 wwn_to_u64(vport->fc_portname.u.wwn), 386 wwn_to_u64 387 (vport->fc_sparam.portName.u.wwn), 388 vport->vport_flag, 389 phba->sli4_hba.fawwpn_flag); 390 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 391 sizeof(struct lpfc_name)); 392 } 393 } 394 395 if (vport->fc_portname.u.wwn[0] == 0) 396 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 397 sizeof(struct lpfc_name)); 398 else 399 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, 400 sizeof(struct lpfc_name)); 401 } 402 403 /** 404 * lpfc_config_port_post - Perform lpfc initialization after config port 405 * @phba: pointer to lpfc hba data structure. 406 * 407 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 408 * command call. It performs all internal resource and state setups on the 409 * port: post IOCB buffers, enable appropriate host interrupt attentions, 410 * ELS ring timers, etc. 411 * 412 * Return codes 413 * 0 - success. 414 * Any other value - error. 415 **/ 416 int 417 lpfc_config_port_post(struct lpfc_hba *phba) 418 { 419 struct lpfc_vport *vport = phba->pport; 420 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 421 LPFC_MBOXQ_t *pmb; 422 MAILBOX_t *mb; 423 struct lpfc_dmabuf *mp; 424 struct lpfc_sli *psli = &phba->sli; 425 uint32_t status, timeout; 426 int i, j; 427 int rc; 428 429 spin_lock_irq(&phba->hbalock); 430 /* 431 * If the Config port completed correctly the HBA is not 432 * over heated any more. 433 */ 434 if (phba->over_temp_state == HBA_OVER_TEMP) 435 phba->over_temp_state = HBA_NORMAL_TEMP; 436 spin_unlock_irq(&phba->hbalock); 437 438 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 439 if (!pmb) { 440 phba->link_state = LPFC_HBA_ERROR; 441 return -ENOMEM; 442 } 443 mb = &pmb->u.mb; 444 445 /* Get login parameters for NID. */ 446 rc = lpfc_read_sparam(phba, pmb, 0); 447 if (rc) { 448 mempool_free(pmb, phba->mbox_mem_pool); 449 return -ENOMEM; 450 } 451 452 pmb->vport = vport; 453 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 454 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 455 "0448 Adapter failed init, mbxCmd x%x " 456 "READ_SPARM mbxStatus x%x\n", 457 mb->mbxCommand, mb->mbxStatus); 458 phba->link_state = LPFC_HBA_ERROR; 459 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 460 return -EIO; 461 } 462 463 mp = pmb->ctx_buf; 464 465 /* This dmabuf was allocated by lpfc_read_sparam. The dmabuf is no 466 * longer needed. Prevent unintended ctx_buf access as the mbox is 467 * reused. 468 */ 469 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 470 lpfc_mbuf_free(phba, mp->virt, mp->phys); 471 kfree(mp); 472 pmb->ctx_buf = NULL; 473 lpfc_update_vport_wwn(vport); 474 475 /* Update the fc_host data structures with new wwn. */ 476 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 477 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 478 fc_host_max_npiv_vports(shost) = phba->max_vpi; 479 480 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 481 /* This should be consolidated into parse_vpd ? - mr */ 482 if (phba->SerialNumber[0] == 0) { 483 uint8_t *outptr; 484 485 outptr = &vport->fc_nodename.u.s.IEEE[0]; 486 for (i = 0; i < 12; i++) { 487 status = *outptr++; 488 j = ((status & 0xf0) >> 4); 489 if (j <= 9) 490 phba->SerialNumber[i] = 491 (char)((uint8_t) 0x30 + (uint8_t) j); 492 else 493 phba->SerialNumber[i] = 494 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 495 i++; 496 j = (status & 0xf); 497 if (j <= 9) 498 phba->SerialNumber[i] = 499 (char)((uint8_t) 0x30 + (uint8_t) j); 500 else 501 phba->SerialNumber[i] = 502 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 503 } 504 } 505 506 lpfc_read_config(phba, pmb); 507 pmb->vport = vport; 508 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 509 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 510 "0453 Adapter failed to init, mbxCmd x%x " 511 "READ_CONFIG, mbxStatus x%x\n", 512 mb->mbxCommand, mb->mbxStatus); 513 phba->link_state = LPFC_HBA_ERROR; 514 mempool_free( pmb, phba->mbox_mem_pool); 515 return -EIO; 516 } 517 518 /* Check if the port is disabled */ 519 lpfc_sli_read_link_ste(phba); 520 521 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 522 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) { 523 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 524 "3359 HBA queue depth changed from %d to %d\n", 525 phba->cfg_hba_queue_depth, 526 mb->un.varRdConfig.max_xri); 527 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri; 528 } 529 530 phba->lmt = mb->un.varRdConfig.lmt; 531 532 /* Get the default values for Model Name and Description */ 533 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 534 535 phba->link_state = LPFC_LINK_DOWN; 536 537 /* Only process IOCBs on ELS ring till hba_state is READY */ 538 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr) 539 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT; 540 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr) 541 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT; 542 543 /* Post receive buffers for desired rings */ 544 if (phba->sli_rev != 3) 545 lpfc_post_rcv_buf(phba); 546 547 /* 548 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 549 */ 550 if (phba->intr_type == MSIX) { 551 rc = lpfc_config_msi(phba, pmb); 552 if (rc) { 553 mempool_free(pmb, phba->mbox_mem_pool); 554 return -EIO; 555 } 556 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 557 if (rc != MBX_SUCCESS) { 558 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 559 "0352 Config MSI mailbox command " 560 "failed, mbxCmd x%x, mbxStatus x%x\n", 561 pmb->u.mb.mbxCommand, 562 pmb->u.mb.mbxStatus); 563 mempool_free(pmb, phba->mbox_mem_pool); 564 return -EIO; 565 } 566 } 567 568 spin_lock_irq(&phba->hbalock); 569 /* Initialize ERATT handling flag */ 570 phba->hba_flag &= ~HBA_ERATT_HANDLED; 571 572 /* Enable appropriate host interrupts */ 573 if (lpfc_readl(phba->HCregaddr, &status)) { 574 spin_unlock_irq(&phba->hbalock); 575 return -EIO; 576 } 577 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 578 if (psli->num_rings > 0) 579 status |= HC_R0INT_ENA; 580 if (psli->num_rings > 1) 581 status |= HC_R1INT_ENA; 582 if (psli->num_rings > 2) 583 status |= HC_R2INT_ENA; 584 if (psli->num_rings > 3) 585 status |= HC_R3INT_ENA; 586 587 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 588 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 589 status &= ~(HC_R0INT_ENA); 590 591 writel(status, phba->HCregaddr); 592 readl(phba->HCregaddr); /* flush */ 593 spin_unlock_irq(&phba->hbalock); 594 595 /* Set up ring-0 (ELS) timer */ 596 timeout = phba->fc_ratov * 2; 597 mod_timer(&vport->els_tmofunc, 598 jiffies + msecs_to_jiffies(1000 * timeout)); 599 /* Set up heart beat (HB) timer */ 600 mod_timer(&phba->hb_tmofunc, 601 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 602 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); 603 phba->last_completion_time = jiffies; 604 /* Set up error attention (ERATT) polling timer */ 605 mod_timer(&phba->eratt_poll, 606 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 607 608 if (phba->hba_flag & LINK_DISABLED) { 609 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 610 "2598 Adapter Link is disabled.\n"); 611 lpfc_down_link(phba, pmb); 612 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 613 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 614 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 615 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 616 "2599 Adapter failed to issue DOWN_LINK" 617 " mbox command rc 0x%x\n", rc); 618 619 mempool_free(pmb, phba->mbox_mem_pool); 620 return -EIO; 621 } 622 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 623 mempool_free(pmb, phba->mbox_mem_pool); 624 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 625 if (rc) 626 return rc; 627 } 628 /* MBOX buffer will be freed in mbox compl */ 629 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 630 if (!pmb) { 631 phba->link_state = LPFC_HBA_ERROR; 632 return -ENOMEM; 633 } 634 635 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 636 pmb->mbox_cmpl = lpfc_config_async_cmpl; 637 pmb->vport = phba->pport; 638 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 639 640 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 641 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 642 "0456 Adapter failed to issue " 643 "ASYNCEVT_ENABLE mbox status x%x\n", 644 rc); 645 mempool_free(pmb, phba->mbox_mem_pool); 646 } 647 648 /* Get Option rom version */ 649 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 650 if (!pmb) { 651 phba->link_state = LPFC_HBA_ERROR; 652 return -ENOMEM; 653 } 654 655 lpfc_dump_wakeup_param(phba, pmb); 656 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 657 pmb->vport = phba->pport; 658 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 659 660 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 661 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 662 "0435 Adapter failed " 663 "to get Option ROM version status x%x\n", rc); 664 mempool_free(pmb, phba->mbox_mem_pool); 665 } 666 667 return 0; 668 } 669 670 /** 671 * lpfc_sli4_refresh_params - update driver copy of params. 672 * @phba: Pointer to HBA context object. 673 * 674 * This is called to refresh driver copy of dynamic fields from the 675 * common_get_sli4_parameters descriptor. 676 **/ 677 int 678 lpfc_sli4_refresh_params(struct lpfc_hba *phba) 679 { 680 LPFC_MBOXQ_t *mboxq; 681 struct lpfc_mqe *mqe; 682 struct lpfc_sli4_parameters *mbx_sli4_parameters; 683 int length, rc; 684 685 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 686 if (!mboxq) 687 return -ENOMEM; 688 689 mqe = &mboxq->u.mqe; 690 /* Read the port's SLI4 Config Parameters */ 691 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 692 sizeof(struct lpfc_sli4_cfg_mhdr)); 693 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 694 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 695 length, LPFC_SLI4_MBX_EMBED); 696 697 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 698 if (unlikely(rc)) { 699 mempool_free(mboxq, phba->mbox_mem_pool); 700 return rc; 701 } 702 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 703 phba->sli4_hba.pc_sli4_params.mi_cap = 704 bf_get(cfg_mi_ver, mbx_sli4_parameters); 705 706 /* Are we forcing MI off via module parameter? */ 707 if (phba->cfg_enable_mi) 708 phba->sli4_hba.pc_sli4_params.mi_ver = 709 bf_get(cfg_mi_ver, mbx_sli4_parameters); 710 else 711 phba->sli4_hba.pc_sli4_params.mi_ver = 0; 712 713 phba->sli4_hba.pc_sli4_params.cmf = 714 bf_get(cfg_cmf, mbx_sli4_parameters); 715 phba->sli4_hba.pc_sli4_params.pls = 716 bf_get(cfg_pvl, mbx_sli4_parameters); 717 718 mempool_free(mboxq, phba->mbox_mem_pool); 719 return rc; 720 } 721 722 /** 723 * lpfc_hba_init_link - Initialize the FC link 724 * @phba: pointer to lpfc hba data structure. 725 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 726 * 727 * This routine will issue the INIT_LINK mailbox command call. 728 * It is available to other drivers through the lpfc_hba data 729 * structure for use as a delayed link up mechanism with the 730 * module parameter lpfc_suppress_link_up. 731 * 732 * Return code 733 * 0 - success 734 * Any other value - error 735 **/ 736 static int 737 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 738 { 739 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); 740 } 741 742 /** 743 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology 744 * @phba: pointer to lpfc hba data structure. 745 * @fc_topology: desired fc topology. 746 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 747 * 748 * This routine will issue the INIT_LINK mailbox command call. 749 * It is available to other drivers through the lpfc_hba data 750 * structure for use as a delayed link up mechanism with the 751 * module parameter lpfc_suppress_link_up. 752 * 753 * Return code 754 * 0 - success 755 * Any other value - error 756 **/ 757 int 758 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, 759 uint32_t flag) 760 { 761 struct lpfc_vport *vport = phba->pport; 762 LPFC_MBOXQ_t *pmb; 763 MAILBOX_t *mb; 764 int rc; 765 766 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 767 if (!pmb) { 768 phba->link_state = LPFC_HBA_ERROR; 769 return -ENOMEM; 770 } 771 mb = &pmb->u.mb; 772 pmb->vport = vport; 773 774 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || 775 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && 776 !(phba->lmt & LMT_1Gb)) || 777 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && 778 !(phba->lmt & LMT_2Gb)) || 779 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && 780 !(phba->lmt & LMT_4Gb)) || 781 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && 782 !(phba->lmt & LMT_8Gb)) || 783 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && 784 !(phba->lmt & LMT_10Gb)) || 785 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && 786 !(phba->lmt & LMT_16Gb)) || 787 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) && 788 !(phba->lmt & LMT_32Gb)) || 789 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) && 790 !(phba->lmt & LMT_64Gb))) { 791 /* Reset link speed to auto */ 792 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 793 "1302 Invalid speed for this board:%d " 794 "Reset link speed to auto.\n", 795 phba->cfg_link_speed); 796 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 797 } 798 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); 799 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 800 if (phba->sli_rev < LPFC_SLI_REV4) 801 lpfc_set_loopback_flag(phba); 802 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 803 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 804 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 805 "0498 Adapter failed to init, mbxCmd x%x " 806 "INIT_LINK, mbxStatus x%x\n", 807 mb->mbxCommand, mb->mbxStatus); 808 if (phba->sli_rev <= LPFC_SLI_REV3) { 809 /* Clear all interrupt enable conditions */ 810 writel(0, phba->HCregaddr); 811 readl(phba->HCregaddr); /* flush */ 812 /* Clear all pending interrupts */ 813 writel(0xffffffff, phba->HAregaddr); 814 readl(phba->HAregaddr); /* flush */ 815 } 816 phba->link_state = LPFC_HBA_ERROR; 817 if (rc != MBX_BUSY || flag == MBX_POLL) 818 mempool_free(pmb, phba->mbox_mem_pool); 819 return -EIO; 820 } 821 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 822 if (flag == MBX_POLL) 823 mempool_free(pmb, phba->mbox_mem_pool); 824 825 return 0; 826 } 827 828 /** 829 * lpfc_hba_down_link - this routine downs the FC link 830 * @phba: pointer to lpfc hba data structure. 831 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 832 * 833 * This routine will issue the DOWN_LINK mailbox command call. 834 * It is available to other drivers through the lpfc_hba data 835 * structure for use to stop the link. 836 * 837 * Return code 838 * 0 - success 839 * Any other value - error 840 **/ 841 static int 842 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 843 { 844 LPFC_MBOXQ_t *pmb; 845 int rc; 846 847 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 848 if (!pmb) { 849 phba->link_state = LPFC_HBA_ERROR; 850 return -ENOMEM; 851 } 852 853 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 854 "0491 Adapter Link is disabled.\n"); 855 lpfc_down_link(phba, pmb); 856 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 857 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 858 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 859 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 860 "2522 Adapter failed to issue DOWN_LINK" 861 " mbox command rc 0x%x\n", rc); 862 863 mempool_free(pmb, phba->mbox_mem_pool); 864 return -EIO; 865 } 866 if (flag == MBX_POLL) 867 mempool_free(pmb, phba->mbox_mem_pool); 868 869 return 0; 870 } 871 872 /** 873 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 874 * @phba: pointer to lpfc HBA data structure. 875 * 876 * This routine will do LPFC uninitialization before the HBA is reset when 877 * bringing down the SLI Layer. 878 * 879 * Return codes 880 * 0 - success. 881 * Any other value - error. 882 **/ 883 int 884 lpfc_hba_down_prep(struct lpfc_hba *phba) 885 { 886 struct lpfc_vport **vports; 887 int i; 888 889 if (phba->sli_rev <= LPFC_SLI_REV3) { 890 /* Disable interrupts */ 891 writel(0, phba->HCregaddr); 892 readl(phba->HCregaddr); /* flush */ 893 } 894 895 if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) 896 lpfc_cleanup_discovery_resources(phba->pport); 897 else { 898 vports = lpfc_create_vport_work_array(phba); 899 if (vports != NULL) 900 for (i = 0; i <= phba->max_vports && 901 vports[i] != NULL; i++) 902 lpfc_cleanup_discovery_resources(vports[i]); 903 lpfc_destroy_vport_work_array(phba, vports); 904 } 905 return 0; 906 } 907 908 /** 909 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free 910 * rspiocb which got deferred 911 * 912 * @phba: pointer to lpfc HBA data structure. 913 * 914 * This routine will cleanup completed slow path events after HBA is reset 915 * when bringing down the SLI Layer. 916 * 917 * 918 * Return codes 919 * void. 920 **/ 921 static void 922 lpfc_sli4_free_sp_events(struct lpfc_hba *phba) 923 { 924 struct lpfc_iocbq *rspiocbq; 925 struct hbq_dmabuf *dmabuf; 926 struct lpfc_cq_event *cq_event; 927 928 spin_lock_irq(&phba->hbalock); 929 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 930 spin_unlock_irq(&phba->hbalock); 931 932 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 933 /* Get the response iocb from the head of work queue */ 934 spin_lock_irq(&phba->hbalock); 935 list_remove_head(&phba->sli4_hba.sp_queue_event, 936 cq_event, struct lpfc_cq_event, list); 937 spin_unlock_irq(&phba->hbalock); 938 939 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 940 case CQE_CODE_COMPL_WQE: 941 rspiocbq = container_of(cq_event, struct lpfc_iocbq, 942 cq_event); 943 lpfc_sli_release_iocbq(phba, rspiocbq); 944 break; 945 case CQE_CODE_RECEIVE: 946 case CQE_CODE_RECEIVE_V1: 947 dmabuf = container_of(cq_event, struct hbq_dmabuf, 948 cq_event); 949 lpfc_in_buf_free(phba, &dmabuf->dbuf); 950 } 951 } 952 } 953 954 /** 955 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset 956 * @phba: pointer to lpfc HBA data structure. 957 * 958 * This routine will cleanup posted ELS buffers after the HBA is reset 959 * when bringing down the SLI Layer. 960 * 961 * 962 * Return codes 963 * void. 964 **/ 965 static void 966 lpfc_hba_free_post_buf(struct lpfc_hba *phba) 967 { 968 struct lpfc_sli *psli = &phba->sli; 969 struct lpfc_sli_ring *pring; 970 struct lpfc_dmabuf *mp, *next_mp; 971 LIST_HEAD(buflist); 972 int count; 973 974 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 975 lpfc_sli_hbqbuf_free_all(phba); 976 else { 977 /* Cleanup preposted buffers on the ELS ring */ 978 pring = &psli->sli3_ring[LPFC_ELS_RING]; 979 spin_lock_irq(&phba->hbalock); 980 list_splice_init(&pring->postbufq, &buflist); 981 spin_unlock_irq(&phba->hbalock); 982 983 count = 0; 984 list_for_each_entry_safe(mp, next_mp, &buflist, list) { 985 list_del(&mp->list); 986 count++; 987 lpfc_mbuf_free(phba, mp->virt, mp->phys); 988 kfree(mp); 989 } 990 991 spin_lock_irq(&phba->hbalock); 992 pring->postbufq_cnt -= count; 993 spin_unlock_irq(&phba->hbalock); 994 } 995 } 996 997 /** 998 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset 999 * @phba: pointer to lpfc HBA data structure. 1000 * 1001 * This routine will cleanup the txcmplq after the HBA is reset when bringing 1002 * down the SLI Layer. 1003 * 1004 * Return codes 1005 * void 1006 **/ 1007 static void 1008 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) 1009 { 1010 struct lpfc_sli *psli = &phba->sli; 1011 struct lpfc_queue *qp = NULL; 1012 struct lpfc_sli_ring *pring; 1013 LIST_HEAD(completions); 1014 int i; 1015 struct lpfc_iocbq *piocb, *next_iocb; 1016 1017 if (phba->sli_rev != LPFC_SLI_REV4) { 1018 for (i = 0; i < psli->num_rings; i++) { 1019 pring = &psli->sli3_ring[i]; 1020 spin_lock_irq(&phba->hbalock); 1021 /* At this point in time the HBA is either reset or DOA 1022 * Nothing should be on txcmplq as it will 1023 * NEVER complete. 1024 */ 1025 list_splice_init(&pring->txcmplq, &completions); 1026 pring->txcmplq_cnt = 0; 1027 spin_unlock_irq(&phba->hbalock); 1028 1029 lpfc_sli_abort_iocb_ring(phba, pring); 1030 } 1031 /* Cancel all the IOCBs from the completions list */ 1032 lpfc_sli_cancel_iocbs(phba, &completions, 1033 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 1034 return; 1035 } 1036 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 1037 pring = qp->pring; 1038 if (!pring) 1039 continue; 1040 spin_lock_irq(&pring->ring_lock); 1041 list_for_each_entry_safe(piocb, next_iocb, 1042 &pring->txcmplq, list) 1043 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; 1044 list_splice_init(&pring->txcmplq, &completions); 1045 pring->txcmplq_cnt = 0; 1046 spin_unlock_irq(&pring->ring_lock); 1047 lpfc_sli_abort_iocb_ring(phba, pring); 1048 } 1049 /* Cancel all the IOCBs from the completions list */ 1050 lpfc_sli_cancel_iocbs(phba, &completions, 1051 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 1052 } 1053 1054 /** 1055 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 1056 * @phba: pointer to lpfc HBA data structure. 1057 * 1058 * This routine will do uninitialization after the HBA is reset when bring 1059 * down the SLI Layer. 1060 * 1061 * Return codes 1062 * 0 - success. 1063 * Any other value - error. 1064 **/ 1065 static int 1066 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 1067 { 1068 lpfc_hba_free_post_buf(phba); 1069 lpfc_hba_clean_txcmplq(phba); 1070 return 0; 1071 } 1072 1073 /** 1074 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 1075 * @phba: pointer to lpfc HBA data structure. 1076 * 1077 * This routine will do uninitialization after the HBA is reset when bring 1078 * down the SLI Layer. 1079 * 1080 * Return codes 1081 * 0 - success. 1082 * Any other value - error. 1083 **/ 1084 static int 1085 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 1086 { 1087 struct lpfc_io_buf *psb, *psb_next; 1088 struct lpfc_async_xchg_ctx *ctxp, *ctxp_next; 1089 struct lpfc_sli4_hdw_queue *qp; 1090 LIST_HEAD(aborts); 1091 LIST_HEAD(nvme_aborts); 1092 LIST_HEAD(nvmet_aborts); 1093 struct lpfc_sglq *sglq_entry = NULL; 1094 int cnt, idx; 1095 1096 1097 lpfc_sli_hbqbuf_free_all(phba); 1098 lpfc_hba_clean_txcmplq(phba); 1099 1100 /* At this point in time the HBA is either reset or DOA. Either 1101 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 1102 * on the lpfc_els_sgl_list so that it can either be freed if the 1103 * driver is unloading or reposted if the driver is restarting 1104 * the port. 1105 */ 1106 1107 /* sgl_list_lock required because worker thread uses this 1108 * list. 1109 */ 1110 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 1111 list_for_each_entry(sglq_entry, 1112 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 1113 sglq_entry->state = SGL_FREED; 1114 1115 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 1116 &phba->sli4_hba.lpfc_els_sgl_list); 1117 1118 1119 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 1120 1121 /* abts_xxxx_buf_list_lock required because worker thread uses this 1122 * list. 1123 */ 1124 spin_lock_irq(&phba->hbalock); 1125 cnt = 0; 1126 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 1127 qp = &phba->sli4_hba.hdwq[idx]; 1128 1129 spin_lock(&qp->abts_io_buf_list_lock); 1130 list_splice_init(&qp->lpfc_abts_io_buf_list, 1131 &aborts); 1132 1133 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 1134 psb->pCmd = NULL; 1135 psb->status = IOSTAT_SUCCESS; 1136 cnt++; 1137 } 1138 spin_lock(&qp->io_buf_list_put_lock); 1139 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put); 1140 qp->put_io_bufs += qp->abts_scsi_io_bufs; 1141 qp->put_io_bufs += qp->abts_nvme_io_bufs; 1142 qp->abts_scsi_io_bufs = 0; 1143 qp->abts_nvme_io_bufs = 0; 1144 spin_unlock(&qp->io_buf_list_put_lock); 1145 spin_unlock(&qp->abts_io_buf_list_lock); 1146 } 1147 spin_unlock_irq(&phba->hbalock); 1148 1149 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 1150 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1151 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 1152 &nvmet_aborts); 1153 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock); 1154 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { 1155 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP); 1156 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 1157 } 1158 } 1159 1160 lpfc_sli4_free_sp_events(phba); 1161 return cnt; 1162 } 1163 1164 /** 1165 * lpfc_hba_down_post - Wrapper func for hba down post routine 1166 * @phba: pointer to lpfc HBA data structure. 1167 * 1168 * This routine wraps the actual SLI3 or SLI4 routine for performing 1169 * uninitialization after the HBA is reset when bring down the SLI Layer. 1170 * 1171 * Return codes 1172 * 0 - success. 1173 * Any other value - error. 1174 **/ 1175 int 1176 lpfc_hba_down_post(struct lpfc_hba *phba) 1177 { 1178 return (*phba->lpfc_hba_down_post)(phba); 1179 } 1180 1181 /** 1182 * lpfc_hb_timeout - The HBA-timer timeout handler 1183 * @t: timer context used to obtain the pointer to lpfc hba data structure. 1184 * 1185 * This is the HBA-timer timeout handler registered to the lpfc driver. When 1186 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 1187 * work-port-events bitmap and the worker thread is notified. This timeout 1188 * event will be used by the worker thread to invoke the actual timeout 1189 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 1190 * be performed in the timeout handler and the HBA timeout event bit shall 1191 * be cleared by the worker thread after it has taken the event bitmap out. 1192 **/ 1193 static void 1194 lpfc_hb_timeout(struct timer_list *t) 1195 { 1196 struct lpfc_hba *phba; 1197 uint32_t tmo_posted; 1198 unsigned long iflag; 1199 1200 phba = from_timer(phba, t, hb_tmofunc); 1201 1202 /* Check for heart beat timeout conditions */ 1203 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1204 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 1205 if (!tmo_posted) 1206 phba->pport->work_port_events |= WORKER_HB_TMO; 1207 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1208 1209 /* Tell the worker thread there is work to do */ 1210 if (!tmo_posted) 1211 lpfc_worker_wake_up(phba); 1212 return; 1213 } 1214 1215 /** 1216 * lpfc_rrq_timeout - The RRQ-timer timeout handler 1217 * @t: timer context used to obtain the pointer to lpfc hba data structure. 1218 * 1219 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 1220 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 1221 * work-port-events bitmap and the worker thread is notified. This timeout 1222 * event will be used by the worker thread to invoke the actual timeout 1223 * handler routine, lpfc_rrq_handler. Any periodical operations will 1224 * be performed in the timeout handler and the RRQ timeout event bit shall 1225 * be cleared by the worker thread after it has taken the event bitmap out. 1226 **/ 1227 static void 1228 lpfc_rrq_timeout(struct timer_list *t) 1229 { 1230 struct lpfc_hba *phba; 1231 unsigned long iflag; 1232 1233 phba = from_timer(phba, t, rrq_tmr); 1234 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1235 if (!test_bit(FC_UNLOADING, &phba->pport->load_flag)) 1236 phba->hba_flag |= HBA_RRQ_ACTIVE; 1237 else 1238 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1239 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1240 1241 if (!test_bit(FC_UNLOADING, &phba->pport->load_flag)) 1242 lpfc_worker_wake_up(phba); 1243 } 1244 1245 /** 1246 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 1247 * @phba: pointer to lpfc hba data structure. 1248 * @pmboxq: pointer to the driver internal queue element for mailbox command. 1249 * 1250 * This is the callback function to the lpfc heart-beat mailbox command. 1251 * If configured, the lpfc driver issues the heart-beat mailbox command to 1252 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 1253 * heart-beat mailbox command is issued, the driver shall set up heart-beat 1254 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 1255 * heart-beat outstanding state. Once the mailbox command comes back and 1256 * no error conditions detected, the heart-beat mailbox command timer is 1257 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 1258 * state is cleared for the next heart-beat. If the timer expired with the 1259 * heart-beat outstanding state set, the driver will put the HBA offline. 1260 **/ 1261 static void 1262 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 1263 { 1264 unsigned long drvr_flag; 1265 1266 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1267 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); 1268 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1269 1270 /* Check and reset heart-beat timer if necessary */ 1271 mempool_free(pmboxq, phba->mbox_mem_pool); 1272 if (!test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag) && 1273 !(phba->link_state == LPFC_HBA_ERROR) && 1274 !test_bit(FC_UNLOADING, &phba->pport->load_flag)) 1275 mod_timer(&phba->hb_tmofunc, 1276 jiffies + 1277 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1278 return; 1279 } 1280 1281 /* 1282 * lpfc_idle_stat_delay_work - idle_stat tracking 1283 * 1284 * This routine tracks per-eq idle_stat and determines polling decisions. 1285 * 1286 * Return codes: 1287 * None 1288 **/ 1289 static void 1290 lpfc_idle_stat_delay_work(struct work_struct *work) 1291 { 1292 struct lpfc_hba *phba = container_of(to_delayed_work(work), 1293 struct lpfc_hba, 1294 idle_stat_delay_work); 1295 struct lpfc_queue *eq; 1296 struct lpfc_sli4_hdw_queue *hdwq; 1297 struct lpfc_idle_stat *idle_stat; 1298 u32 i, idle_percent; 1299 u64 wall, wall_idle, diff_wall, diff_idle, busy_time; 1300 1301 if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) 1302 return; 1303 1304 if (phba->link_state == LPFC_HBA_ERROR || 1305 test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag) || 1306 phba->cmf_active_mode != LPFC_CFG_OFF) 1307 goto requeue; 1308 1309 for_each_present_cpu(i) { 1310 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq]; 1311 eq = hdwq->hba_eq; 1312 1313 /* Skip if we've already handled this eq's primary CPU */ 1314 if (eq->chann != i) 1315 continue; 1316 1317 idle_stat = &phba->sli4_hba.idle_stat[i]; 1318 1319 /* get_cpu_idle_time returns values as running counters. Thus, 1320 * to know the amount for this period, the prior counter values 1321 * need to be subtracted from the current counter values. 1322 * From there, the idle time stat can be calculated as a 1323 * percentage of 100 - the sum of the other consumption times. 1324 */ 1325 wall_idle = get_cpu_idle_time(i, &wall, 1); 1326 diff_idle = wall_idle - idle_stat->prev_idle; 1327 diff_wall = wall - idle_stat->prev_wall; 1328 1329 if (diff_wall <= diff_idle) 1330 busy_time = 0; 1331 else 1332 busy_time = diff_wall - diff_idle; 1333 1334 idle_percent = div64_u64(100 * busy_time, diff_wall); 1335 idle_percent = 100 - idle_percent; 1336 1337 if (idle_percent < 15) 1338 eq->poll_mode = LPFC_QUEUE_WORK; 1339 else 1340 eq->poll_mode = LPFC_THREADED_IRQ; 1341 1342 idle_stat->prev_idle = wall_idle; 1343 idle_stat->prev_wall = wall; 1344 } 1345 1346 requeue: 1347 schedule_delayed_work(&phba->idle_stat_delay_work, 1348 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY)); 1349 } 1350 1351 static void 1352 lpfc_hb_eq_delay_work(struct work_struct *work) 1353 { 1354 struct lpfc_hba *phba = container_of(to_delayed_work(work), 1355 struct lpfc_hba, eq_delay_work); 1356 struct lpfc_eq_intr_info *eqi, *eqi_new; 1357 struct lpfc_queue *eq, *eq_next; 1358 unsigned char *ena_delay = NULL; 1359 uint32_t usdelay; 1360 int i; 1361 1362 if (!phba->cfg_auto_imax || 1363 test_bit(FC_UNLOADING, &phba->pport->load_flag)) 1364 return; 1365 1366 if (phba->link_state == LPFC_HBA_ERROR || 1367 test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag)) 1368 goto requeue; 1369 1370 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay), 1371 GFP_KERNEL); 1372 if (!ena_delay) 1373 goto requeue; 1374 1375 for (i = 0; i < phba->cfg_irq_chann; i++) { 1376 /* Get the EQ corresponding to the IRQ vector */ 1377 eq = phba->sli4_hba.hba_eq_hdl[i].eq; 1378 if (!eq) 1379 continue; 1380 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) { 1381 eq->q_flag &= ~HBA_EQ_DELAY_CHK; 1382 ena_delay[eq->last_cpu] = 1; 1383 } 1384 } 1385 1386 for_each_present_cpu(i) { 1387 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); 1388 if (ena_delay[i]) { 1389 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP; 1390 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY) 1391 usdelay = LPFC_MAX_AUTO_EQ_DELAY; 1392 } else { 1393 usdelay = 0; 1394 } 1395 1396 eqi->icnt = 0; 1397 1398 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) { 1399 if (unlikely(eq->last_cpu != i)) { 1400 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info, 1401 eq->last_cpu); 1402 list_move_tail(&eq->cpu_list, &eqi_new->list); 1403 continue; 1404 } 1405 if (usdelay != eq->q_mode) 1406 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1, 1407 usdelay); 1408 } 1409 } 1410 1411 kfree(ena_delay); 1412 1413 requeue: 1414 queue_delayed_work(phba->wq, &phba->eq_delay_work, 1415 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); 1416 } 1417 1418 /** 1419 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution 1420 * @phba: pointer to lpfc hba data structure. 1421 * 1422 * For each heartbeat, this routine does some heuristic methods to adjust 1423 * XRI distribution. The goal is to fully utilize free XRIs. 1424 **/ 1425 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba) 1426 { 1427 u32 i; 1428 u32 hwq_count; 1429 1430 hwq_count = phba->cfg_hdw_queue; 1431 for (i = 0; i < hwq_count; i++) { 1432 /* Adjust XRIs in private pool */ 1433 lpfc_adjust_pvt_pool_count(phba, i); 1434 1435 /* Adjust high watermark */ 1436 lpfc_adjust_high_watermark(phba, i); 1437 1438 #ifdef LPFC_MXP_STAT 1439 /* Snapshot pbl, pvt and busy count */ 1440 lpfc_snapshot_mxp(phba, i); 1441 #endif 1442 } 1443 } 1444 1445 /** 1446 * lpfc_issue_hb_mbox - Issues heart-beat mailbox command 1447 * @phba: pointer to lpfc hba data structure. 1448 * 1449 * If a HB mbox is not already in progrees, this routine will allocate 1450 * a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command, 1451 * and issue it. The HBA_HBEAT_INP flag means the command is in progress. 1452 **/ 1453 int 1454 lpfc_issue_hb_mbox(struct lpfc_hba *phba) 1455 { 1456 LPFC_MBOXQ_t *pmboxq; 1457 int retval; 1458 1459 /* Is a Heartbeat mbox already in progress */ 1460 if (phba->hba_flag & HBA_HBEAT_INP) 1461 return 0; 1462 1463 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1464 if (!pmboxq) 1465 return -ENOMEM; 1466 1467 lpfc_heart_beat(phba, pmboxq); 1468 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1469 pmboxq->vport = phba->pport; 1470 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 1471 1472 if (retval != MBX_BUSY && retval != MBX_SUCCESS) { 1473 mempool_free(pmboxq, phba->mbox_mem_pool); 1474 return -ENXIO; 1475 } 1476 phba->hba_flag |= HBA_HBEAT_INP; 1477 1478 return 0; 1479 } 1480 1481 /** 1482 * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command 1483 * @phba: pointer to lpfc hba data structure. 1484 * 1485 * The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO 1486 * flag is set, it will force a MBX_HEARTBEAT mbox command, regardless 1487 * of the value of lpfc_enable_hba_heartbeat. 1488 * If lpfc_enable_hba_heartbeat is set, the timeout routine will always 1489 * try to issue a MBX_HEARTBEAT mbox command. 1490 **/ 1491 void 1492 lpfc_issue_hb_tmo(struct lpfc_hba *phba) 1493 { 1494 if (phba->cfg_enable_hba_heartbeat) 1495 return; 1496 phba->hba_flag |= HBA_HBEAT_TMO; 1497 } 1498 1499 /** 1500 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 1501 * @phba: pointer to lpfc hba data structure. 1502 * 1503 * This is the actual HBA-timer timeout handler to be invoked by the worker 1504 * thread whenever the HBA timer fired and HBA-timeout event posted. This 1505 * handler performs any periodic operations needed for the device. If such 1506 * periodic event has already been attended to either in the interrupt handler 1507 * or by processing slow-ring or fast-ring events within the HBA-timer 1508 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1509 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1510 * is configured and there is no heart-beat mailbox command outstanding, a 1511 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1512 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1513 * to offline. 1514 **/ 1515 void 1516 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1517 { 1518 struct lpfc_vport **vports; 1519 struct lpfc_dmabuf *buf_ptr; 1520 int retval = 0; 1521 int i, tmo; 1522 struct lpfc_sli *psli = &phba->sli; 1523 LIST_HEAD(completions); 1524 1525 if (phba->cfg_xri_rebalancing) { 1526 /* Multi-XRI pools handler */ 1527 lpfc_hb_mxp_handler(phba); 1528 } 1529 1530 vports = lpfc_create_vport_work_array(phba); 1531 if (vports != NULL) 1532 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 1533 lpfc_rcv_seq_check_edtov(vports[i]); 1534 lpfc_fdmi_change_check(vports[i]); 1535 } 1536 lpfc_destroy_vport_work_array(phba, vports); 1537 1538 if (phba->link_state == LPFC_HBA_ERROR || 1539 test_bit(FC_UNLOADING, &phba->pport->load_flag) || 1540 test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag)) 1541 return; 1542 1543 if (phba->elsbuf_cnt && 1544 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1545 spin_lock_irq(&phba->hbalock); 1546 list_splice_init(&phba->elsbuf, &completions); 1547 phba->elsbuf_cnt = 0; 1548 phba->elsbuf_prev_cnt = 0; 1549 spin_unlock_irq(&phba->hbalock); 1550 1551 while (!list_empty(&completions)) { 1552 list_remove_head(&completions, buf_ptr, 1553 struct lpfc_dmabuf, list); 1554 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1555 kfree(buf_ptr); 1556 } 1557 } 1558 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1559 1560 /* If there is no heart beat outstanding, issue a heartbeat command */ 1561 if (phba->cfg_enable_hba_heartbeat) { 1562 /* If IOs are completing, no need to issue a MBX_HEARTBEAT */ 1563 spin_lock_irq(&phba->pport->work_port_lock); 1564 if (time_after(phba->last_completion_time + 1565 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), 1566 jiffies)) { 1567 spin_unlock_irq(&phba->pport->work_port_lock); 1568 if (phba->hba_flag & HBA_HBEAT_INP) 1569 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1570 else 1571 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1572 goto out; 1573 } 1574 spin_unlock_irq(&phba->pport->work_port_lock); 1575 1576 /* Check if a MBX_HEARTBEAT is already in progress */ 1577 if (phba->hba_flag & HBA_HBEAT_INP) { 1578 /* 1579 * If heart beat timeout called with HBA_HBEAT_INP set 1580 * we need to give the hb mailbox cmd a chance to 1581 * complete or TMO. 1582 */ 1583 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1584 "0459 Adapter heartbeat still outstanding: " 1585 "last compl time was %d ms.\n", 1586 jiffies_to_msecs(jiffies 1587 - phba->last_completion_time)); 1588 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1589 } else { 1590 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1591 (list_empty(&psli->mboxq))) { 1592 1593 retval = lpfc_issue_hb_mbox(phba); 1594 if (retval) { 1595 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1596 goto out; 1597 } 1598 phba->skipped_hb = 0; 1599 } else if (time_before_eq(phba->last_completion_time, 1600 phba->skipped_hb)) { 1601 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1602 "2857 Last completion time not " 1603 " updated in %d ms\n", 1604 jiffies_to_msecs(jiffies 1605 - phba->last_completion_time)); 1606 } else 1607 phba->skipped_hb = jiffies; 1608 1609 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1610 goto out; 1611 } 1612 } else { 1613 /* Check to see if we want to force a MBX_HEARTBEAT */ 1614 if (phba->hba_flag & HBA_HBEAT_TMO) { 1615 retval = lpfc_issue_hb_mbox(phba); 1616 if (retval) 1617 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1618 else 1619 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); 1620 goto out; 1621 } 1622 tmo = (1000 * LPFC_HB_MBOX_INTERVAL); 1623 } 1624 out: 1625 mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo)); 1626 } 1627 1628 /** 1629 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1630 * @phba: pointer to lpfc hba data structure. 1631 * 1632 * This routine is called to bring the HBA offline when HBA hardware error 1633 * other than Port Error 6 has been detected. 1634 **/ 1635 static void 1636 lpfc_offline_eratt(struct lpfc_hba *phba) 1637 { 1638 struct lpfc_sli *psli = &phba->sli; 1639 1640 spin_lock_irq(&phba->hbalock); 1641 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1642 spin_unlock_irq(&phba->hbalock); 1643 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1644 1645 lpfc_offline(phba); 1646 lpfc_reset_barrier(phba); 1647 spin_lock_irq(&phba->hbalock); 1648 lpfc_sli_brdreset(phba); 1649 spin_unlock_irq(&phba->hbalock); 1650 lpfc_hba_down_post(phba); 1651 lpfc_sli_brdready(phba, HS_MBRDY); 1652 lpfc_unblock_mgmt_io(phba); 1653 phba->link_state = LPFC_HBA_ERROR; 1654 return; 1655 } 1656 1657 /** 1658 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1659 * @phba: pointer to lpfc hba data structure. 1660 * 1661 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1662 * other than Port Error 6 has been detected. 1663 **/ 1664 void 1665 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1666 { 1667 spin_lock_irq(&phba->hbalock); 1668 if (phba->link_state == LPFC_HBA_ERROR && 1669 test_bit(HBA_PCI_ERR, &phba->bit_flags)) { 1670 spin_unlock_irq(&phba->hbalock); 1671 return; 1672 } 1673 phba->link_state = LPFC_HBA_ERROR; 1674 spin_unlock_irq(&phba->hbalock); 1675 1676 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1677 lpfc_sli_flush_io_rings(phba); 1678 lpfc_offline(phba); 1679 lpfc_hba_down_post(phba); 1680 lpfc_unblock_mgmt_io(phba); 1681 } 1682 1683 /** 1684 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1685 * @phba: pointer to lpfc hba data structure. 1686 * 1687 * This routine is invoked to handle the deferred HBA hardware error 1688 * conditions. This type of error is indicated by HBA by setting ER1 1689 * and another ER bit in the host status register. The driver will 1690 * wait until the ER1 bit clears before handling the error condition. 1691 **/ 1692 static void 1693 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1694 { 1695 uint32_t old_host_status = phba->work_hs; 1696 struct lpfc_sli *psli = &phba->sli; 1697 1698 /* If the pci channel is offline, ignore possible errors, 1699 * since we cannot communicate with the pci card anyway. 1700 */ 1701 if (pci_channel_offline(phba->pcidev)) { 1702 spin_lock_irq(&phba->hbalock); 1703 phba->hba_flag &= ~DEFER_ERATT; 1704 spin_unlock_irq(&phba->hbalock); 1705 return; 1706 } 1707 1708 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1709 "0479 Deferred Adapter Hardware Error " 1710 "Data: x%x x%x x%x\n", 1711 phba->work_hs, phba->work_status[0], 1712 phba->work_status[1]); 1713 1714 spin_lock_irq(&phba->hbalock); 1715 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1716 spin_unlock_irq(&phba->hbalock); 1717 1718 1719 /* 1720 * Firmware stops when it triggred erratt. That could cause the I/Os 1721 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1722 * SCSI layer retry it after re-establishing link. 1723 */ 1724 lpfc_sli_abort_fcp_rings(phba); 1725 1726 /* 1727 * There was a firmware error. Take the hba offline and then 1728 * attempt to restart it. 1729 */ 1730 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 1731 lpfc_offline(phba); 1732 1733 /* Wait for the ER1 bit to clear.*/ 1734 while (phba->work_hs & HS_FFER1) { 1735 msleep(100); 1736 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { 1737 phba->work_hs = UNPLUG_ERR ; 1738 break; 1739 } 1740 /* If driver is unloading let the worker thread continue */ 1741 if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) { 1742 phba->work_hs = 0; 1743 break; 1744 } 1745 } 1746 1747 /* 1748 * This is to ptrotect against a race condition in which 1749 * first write to the host attention register clear the 1750 * host status register. 1751 */ 1752 if (!phba->work_hs && !test_bit(FC_UNLOADING, &phba->pport->load_flag)) 1753 phba->work_hs = old_host_status & ~HS_FFER1; 1754 1755 spin_lock_irq(&phba->hbalock); 1756 phba->hba_flag &= ~DEFER_ERATT; 1757 spin_unlock_irq(&phba->hbalock); 1758 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1759 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1760 } 1761 1762 static void 1763 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1764 { 1765 struct lpfc_board_event_header board_event; 1766 struct Scsi_Host *shost; 1767 1768 board_event.event_type = FC_REG_BOARD_EVENT; 1769 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1770 shost = lpfc_shost_from_vport(phba->pport); 1771 fc_host_post_vendor_event(shost, fc_get_event_number(), 1772 sizeof(board_event), 1773 (char *) &board_event, 1774 LPFC_NL_VENDOR_ID); 1775 } 1776 1777 /** 1778 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1779 * @phba: pointer to lpfc hba data structure. 1780 * 1781 * This routine is invoked to handle the following HBA hardware error 1782 * conditions: 1783 * 1 - HBA error attention interrupt 1784 * 2 - DMA ring index out of range 1785 * 3 - Mailbox command came back as unknown 1786 **/ 1787 static void 1788 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1789 { 1790 struct lpfc_vport *vport = phba->pport; 1791 struct lpfc_sli *psli = &phba->sli; 1792 uint32_t event_data; 1793 unsigned long temperature; 1794 struct temp_event temp_event_data; 1795 struct Scsi_Host *shost; 1796 1797 /* If the pci channel is offline, ignore possible errors, 1798 * since we cannot communicate with the pci card anyway. 1799 */ 1800 if (pci_channel_offline(phba->pcidev)) { 1801 spin_lock_irq(&phba->hbalock); 1802 phba->hba_flag &= ~DEFER_ERATT; 1803 spin_unlock_irq(&phba->hbalock); 1804 return; 1805 } 1806 1807 /* If resets are disabled then leave the HBA alone and return */ 1808 if (!phba->cfg_enable_hba_reset) 1809 return; 1810 1811 /* Send an internal error event to mgmt application */ 1812 lpfc_board_errevt_to_mgmt(phba); 1813 1814 if (phba->hba_flag & DEFER_ERATT) 1815 lpfc_handle_deferred_eratt(phba); 1816 1817 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1818 if (phba->work_hs & HS_FFER6) 1819 /* Re-establishing Link */ 1820 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1821 "1301 Re-establishing Link " 1822 "Data: x%x x%x x%x\n", 1823 phba->work_hs, phba->work_status[0], 1824 phba->work_status[1]); 1825 if (phba->work_hs & HS_FFER8) 1826 /* Device Zeroization */ 1827 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1828 "2861 Host Authentication device " 1829 "zeroization Data:x%x x%x x%x\n", 1830 phba->work_hs, phba->work_status[0], 1831 phba->work_status[1]); 1832 1833 spin_lock_irq(&phba->hbalock); 1834 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1835 spin_unlock_irq(&phba->hbalock); 1836 1837 /* 1838 * Firmware stops when it triggled erratt with HS_FFER6. 1839 * That could cause the I/Os dropped by the firmware. 1840 * Error iocb (I/O) on txcmplq and let the SCSI layer 1841 * retry it after re-establishing link. 1842 */ 1843 lpfc_sli_abort_fcp_rings(phba); 1844 1845 /* 1846 * There was a firmware error. Take the hba offline and then 1847 * attempt to restart it. 1848 */ 1849 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1850 lpfc_offline(phba); 1851 lpfc_sli_brdrestart(phba); 1852 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1853 lpfc_unblock_mgmt_io(phba); 1854 return; 1855 } 1856 lpfc_unblock_mgmt_io(phba); 1857 } else if (phba->work_hs & HS_CRIT_TEMP) { 1858 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1859 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1860 temp_event_data.event_code = LPFC_CRIT_TEMP; 1861 temp_event_data.data = (uint32_t)temperature; 1862 1863 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1864 "0406 Adapter maximum temperature exceeded " 1865 "(%ld), taking this port offline " 1866 "Data: x%x x%x x%x\n", 1867 temperature, phba->work_hs, 1868 phba->work_status[0], phba->work_status[1]); 1869 1870 shost = lpfc_shost_from_vport(phba->pport); 1871 fc_host_post_vendor_event(shost, fc_get_event_number(), 1872 sizeof(temp_event_data), 1873 (char *) &temp_event_data, 1874 SCSI_NL_VID_TYPE_PCI 1875 | PCI_VENDOR_ID_EMULEX); 1876 1877 spin_lock_irq(&phba->hbalock); 1878 phba->over_temp_state = HBA_OVER_TEMP; 1879 spin_unlock_irq(&phba->hbalock); 1880 lpfc_offline_eratt(phba); 1881 1882 } else { 1883 /* The if clause above forces this code path when the status 1884 * failure is a value other than FFER6. Do not call the offline 1885 * twice. This is the adapter hardware error path. 1886 */ 1887 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1888 "0457 Adapter Hardware Error " 1889 "Data: x%x x%x x%x\n", 1890 phba->work_hs, 1891 phba->work_status[0], phba->work_status[1]); 1892 1893 event_data = FC_REG_DUMP_EVENT; 1894 shost = lpfc_shost_from_vport(vport); 1895 fc_host_post_vendor_event(shost, fc_get_event_number(), 1896 sizeof(event_data), (char *) &event_data, 1897 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1898 1899 lpfc_offline_eratt(phba); 1900 } 1901 return; 1902 } 1903 1904 /** 1905 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg 1906 * @phba: pointer to lpfc hba data structure. 1907 * @mbx_action: flag for mailbox shutdown action. 1908 * @en_rn_msg: send reset/port recovery message. 1909 * This routine is invoked to perform an SLI4 port PCI function reset in 1910 * response to port status register polling attention. It waits for port 1911 * status register (ERR, RDY, RN) bits before proceeding with function reset. 1912 * During this process, interrupt vectors are freed and later requested 1913 * for handling possible port resource change. 1914 **/ 1915 static int 1916 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, 1917 bool en_rn_msg) 1918 { 1919 int rc; 1920 uint32_t intr_mode; 1921 LPFC_MBOXQ_t *mboxq; 1922 1923 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 1924 LPFC_SLI_INTF_IF_TYPE_2) { 1925 /* 1926 * On error status condition, driver need to wait for port 1927 * ready before performing reset. 1928 */ 1929 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1930 if (rc) 1931 return rc; 1932 } 1933 1934 /* need reset: attempt for port recovery */ 1935 if (en_rn_msg) 1936 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1937 "2887 Reset Needed: Attempting Port " 1938 "Recovery...\n"); 1939 1940 /* If we are no wait, the HBA has been reset and is not 1941 * functional, thus we should clear 1942 * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags. 1943 */ 1944 if (mbx_action == LPFC_MBX_NO_WAIT) { 1945 spin_lock_irq(&phba->hbalock); 1946 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 1947 if (phba->sli.mbox_active) { 1948 mboxq = phba->sli.mbox_active; 1949 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 1950 __lpfc_mbox_cmpl_put(phba, mboxq); 1951 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 1952 phba->sli.mbox_active = NULL; 1953 } 1954 spin_unlock_irq(&phba->hbalock); 1955 } 1956 1957 lpfc_offline_prep(phba, mbx_action); 1958 lpfc_sli_flush_io_rings(phba); 1959 lpfc_offline(phba); 1960 /* release interrupt for possible resource change */ 1961 lpfc_sli4_disable_intr(phba); 1962 rc = lpfc_sli_brdrestart(phba); 1963 if (rc) { 1964 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1965 "6309 Failed to restart board\n"); 1966 return rc; 1967 } 1968 /* request and enable interrupt */ 1969 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 1970 if (intr_mode == LPFC_INTR_ERROR) { 1971 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1972 "3175 Failed to enable interrupt\n"); 1973 return -EIO; 1974 } 1975 phba->intr_mode = intr_mode; 1976 rc = lpfc_online(phba); 1977 if (rc == 0) 1978 lpfc_unblock_mgmt_io(phba); 1979 1980 return rc; 1981 } 1982 1983 /** 1984 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1985 * @phba: pointer to lpfc hba data structure. 1986 * 1987 * This routine is invoked to handle the SLI4 HBA hardware error attention 1988 * conditions. 1989 **/ 1990 static void 1991 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1992 { 1993 struct lpfc_vport *vport = phba->pport; 1994 uint32_t event_data; 1995 struct Scsi_Host *shost; 1996 uint32_t if_type; 1997 struct lpfc_register portstat_reg = {0}; 1998 uint32_t reg_err1, reg_err2; 1999 uint32_t uerrlo_reg, uemasklo_reg; 2000 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2; 2001 bool en_rn_msg = true; 2002 struct temp_event temp_event_data; 2003 struct lpfc_register portsmphr_reg; 2004 int rc, i; 2005 2006 /* If the pci channel is offline, ignore possible errors, since 2007 * we cannot communicate with the pci card anyway. 2008 */ 2009 if (pci_channel_offline(phba->pcidev)) { 2010 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2011 "3166 pci channel is offline\n"); 2012 lpfc_sli_flush_io_rings(phba); 2013 return; 2014 } 2015 2016 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 2017 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 2018 switch (if_type) { 2019 case LPFC_SLI_INTF_IF_TYPE_0: 2020 pci_rd_rc1 = lpfc_readl( 2021 phba->sli4_hba.u.if_type0.UERRLOregaddr, 2022 &uerrlo_reg); 2023 pci_rd_rc2 = lpfc_readl( 2024 phba->sli4_hba.u.if_type0.UEMASKLOregaddr, 2025 &uemasklo_reg); 2026 /* consider PCI bus read error as pci_channel_offline */ 2027 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) 2028 return; 2029 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { 2030 lpfc_sli4_offline_eratt(phba); 2031 return; 2032 } 2033 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2034 "7623 Checking UE recoverable"); 2035 2036 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { 2037 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 2038 &portsmphr_reg.word0)) 2039 continue; 2040 2041 smphr_port_status = bf_get(lpfc_port_smphr_port_status, 2042 &portsmphr_reg); 2043 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 2044 LPFC_PORT_SEM_UE_RECOVERABLE) 2045 break; 2046 /*Sleep for 1Sec, before checking SEMAPHORE */ 2047 msleep(1000); 2048 } 2049 2050 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2051 "4827 smphr_port_status x%x : Waited %dSec", 2052 smphr_port_status, i); 2053 2054 /* Recoverable UE, reset the HBA device */ 2055 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 2056 LPFC_PORT_SEM_UE_RECOVERABLE) { 2057 for (i = 0; i < 20; i++) { 2058 msleep(1000); 2059 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 2060 &portsmphr_reg.word0) && 2061 (LPFC_POST_STAGE_PORT_READY == 2062 bf_get(lpfc_port_smphr_port_status, 2063 &portsmphr_reg))) { 2064 rc = lpfc_sli4_port_sta_fn_reset(phba, 2065 LPFC_MBX_NO_WAIT, en_rn_msg); 2066 if (rc == 0) 2067 return; 2068 lpfc_printf_log(phba, KERN_ERR, 2069 LOG_TRACE_EVENT, 2070 "4215 Failed to recover UE"); 2071 break; 2072 } 2073 } 2074 } 2075 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2076 "7624 Firmware not ready: Failing UE recovery," 2077 " waited %dSec", i); 2078 phba->link_state = LPFC_HBA_ERROR; 2079 break; 2080 2081 case LPFC_SLI_INTF_IF_TYPE_2: 2082 case LPFC_SLI_INTF_IF_TYPE_6: 2083 pci_rd_rc1 = lpfc_readl( 2084 phba->sli4_hba.u.if_type2.STATUSregaddr, 2085 &portstat_reg.word0); 2086 /* consider PCI bus read error as pci_channel_offline */ 2087 if (pci_rd_rc1 == -EIO) { 2088 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2089 "3151 PCI bus read access failure: x%x\n", 2090 readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); 2091 lpfc_sli4_offline_eratt(phba); 2092 return; 2093 } 2094 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 2095 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 2096 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 2097 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2098 "2889 Port Overtemperature event, " 2099 "taking port offline Data: x%x x%x\n", 2100 reg_err1, reg_err2); 2101 2102 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 2103 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 2104 temp_event_data.event_code = LPFC_CRIT_TEMP; 2105 temp_event_data.data = 0xFFFFFFFF; 2106 2107 shost = lpfc_shost_from_vport(phba->pport); 2108 fc_host_post_vendor_event(shost, fc_get_event_number(), 2109 sizeof(temp_event_data), 2110 (char *)&temp_event_data, 2111 SCSI_NL_VID_TYPE_PCI 2112 | PCI_VENDOR_ID_EMULEX); 2113 2114 spin_lock_irq(&phba->hbalock); 2115 phba->over_temp_state = HBA_OVER_TEMP; 2116 spin_unlock_irq(&phba->hbalock); 2117 lpfc_sli4_offline_eratt(phba); 2118 return; 2119 } 2120 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2121 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { 2122 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2123 "3143 Port Down: Firmware Update " 2124 "Detected\n"); 2125 en_rn_msg = false; 2126 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2127 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 2128 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2129 "3144 Port Down: Debug Dump\n"); 2130 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2131 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) 2132 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2133 "3145 Port Down: Provisioning\n"); 2134 2135 /* If resets are disabled then leave the HBA alone and return */ 2136 if (!phba->cfg_enable_hba_reset) 2137 return; 2138 2139 /* Check port status register for function reset */ 2140 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, 2141 en_rn_msg); 2142 if (rc == 0) { 2143 /* don't report event on forced debug dump */ 2144 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 2145 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 2146 return; 2147 else 2148 break; 2149 } 2150 /* fall through for not able to recover */ 2151 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2152 "3152 Unrecoverable error\n"); 2153 lpfc_sli4_offline_eratt(phba); 2154 break; 2155 case LPFC_SLI_INTF_IF_TYPE_1: 2156 default: 2157 break; 2158 } 2159 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2160 "3123 Report dump event to upper layer\n"); 2161 /* Send an internal error event to mgmt application */ 2162 lpfc_board_errevt_to_mgmt(phba); 2163 2164 event_data = FC_REG_DUMP_EVENT; 2165 shost = lpfc_shost_from_vport(vport); 2166 fc_host_post_vendor_event(shost, fc_get_event_number(), 2167 sizeof(event_data), (char *) &event_data, 2168 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 2169 } 2170 2171 /** 2172 * lpfc_handle_eratt - Wrapper func for handling hba error attention 2173 * @phba: pointer to lpfc HBA data structure. 2174 * 2175 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 2176 * routine from the API jump table function pointer from the lpfc_hba struct. 2177 * 2178 * Return codes 2179 * 0 - success. 2180 * Any other value - error. 2181 **/ 2182 void 2183 lpfc_handle_eratt(struct lpfc_hba *phba) 2184 { 2185 (*phba->lpfc_handle_eratt)(phba); 2186 } 2187 2188 /** 2189 * lpfc_handle_latt - The HBA link event handler 2190 * @phba: pointer to lpfc hba data structure. 2191 * 2192 * This routine is invoked from the worker thread to handle a HBA host 2193 * attention link event. SLI3 only. 2194 **/ 2195 void 2196 lpfc_handle_latt(struct lpfc_hba *phba) 2197 { 2198 struct lpfc_vport *vport = phba->pport; 2199 struct lpfc_sli *psli = &phba->sli; 2200 LPFC_MBOXQ_t *pmb; 2201 volatile uint32_t control; 2202 int rc = 0; 2203 2204 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2205 if (!pmb) { 2206 rc = 1; 2207 goto lpfc_handle_latt_err_exit; 2208 } 2209 2210 rc = lpfc_mbox_rsrc_prep(phba, pmb); 2211 if (rc) { 2212 rc = 2; 2213 mempool_free(pmb, phba->mbox_mem_pool); 2214 goto lpfc_handle_latt_err_exit; 2215 } 2216 2217 /* Cleanup any outstanding ELS commands */ 2218 lpfc_els_flush_all_cmd(phba); 2219 psli->slistat.link_event++; 2220 lpfc_read_topology(phba, pmb, pmb->ctx_buf); 2221 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 2222 pmb->vport = vport; 2223 /* Block ELS IOCBs until we have processed this mbox command */ 2224 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 2225 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 2226 if (rc == MBX_NOT_FINISHED) { 2227 rc = 4; 2228 goto lpfc_handle_latt_free_mbuf; 2229 } 2230 2231 /* Clear Link Attention in HA REG */ 2232 spin_lock_irq(&phba->hbalock); 2233 writel(HA_LATT, phba->HAregaddr); 2234 readl(phba->HAregaddr); /* flush */ 2235 spin_unlock_irq(&phba->hbalock); 2236 2237 return; 2238 2239 lpfc_handle_latt_free_mbuf: 2240 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 2241 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 2242 lpfc_handle_latt_err_exit: 2243 /* Enable Link attention interrupts */ 2244 spin_lock_irq(&phba->hbalock); 2245 psli->sli_flag |= LPFC_PROCESS_LA; 2246 control = readl(phba->HCregaddr); 2247 control |= HC_LAINT_ENA; 2248 writel(control, phba->HCregaddr); 2249 readl(phba->HCregaddr); /* flush */ 2250 2251 /* Clear Link Attention in HA REG */ 2252 writel(HA_LATT, phba->HAregaddr); 2253 readl(phba->HAregaddr); /* flush */ 2254 spin_unlock_irq(&phba->hbalock); 2255 lpfc_linkdown(phba); 2256 phba->link_state = LPFC_HBA_ERROR; 2257 2258 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2259 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 2260 2261 return; 2262 } 2263 2264 static void 2265 lpfc_fill_vpd(struct lpfc_hba *phba, uint8_t *vpd, int length, int *pindex) 2266 { 2267 int i, j; 2268 2269 while (length > 0) { 2270 /* Look for Serial Number */ 2271 if ((vpd[*pindex] == 'S') && (vpd[*pindex + 1] == 'N')) { 2272 *pindex += 2; 2273 i = vpd[*pindex]; 2274 *pindex += 1; 2275 j = 0; 2276 length -= (3+i); 2277 while (i--) { 2278 phba->SerialNumber[j++] = vpd[(*pindex)++]; 2279 if (j == 31) 2280 break; 2281 } 2282 phba->SerialNumber[j] = 0; 2283 continue; 2284 } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '1')) { 2285 phba->vpd_flag |= VPD_MODEL_DESC; 2286 *pindex += 2; 2287 i = vpd[*pindex]; 2288 *pindex += 1; 2289 j = 0; 2290 length -= (3+i); 2291 while (i--) { 2292 phba->ModelDesc[j++] = vpd[(*pindex)++]; 2293 if (j == 255) 2294 break; 2295 } 2296 phba->ModelDesc[j] = 0; 2297 continue; 2298 } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '2')) { 2299 phba->vpd_flag |= VPD_MODEL_NAME; 2300 *pindex += 2; 2301 i = vpd[*pindex]; 2302 *pindex += 1; 2303 j = 0; 2304 length -= (3+i); 2305 while (i--) { 2306 phba->ModelName[j++] = vpd[(*pindex)++]; 2307 if (j == 79) 2308 break; 2309 } 2310 phba->ModelName[j] = 0; 2311 continue; 2312 } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '3')) { 2313 phba->vpd_flag |= VPD_PROGRAM_TYPE; 2314 *pindex += 2; 2315 i = vpd[*pindex]; 2316 *pindex += 1; 2317 j = 0; 2318 length -= (3+i); 2319 while (i--) { 2320 phba->ProgramType[j++] = vpd[(*pindex)++]; 2321 if (j == 255) 2322 break; 2323 } 2324 phba->ProgramType[j] = 0; 2325 continue; 2326 } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '4')) { 2327 phba->vpd_flag |= VPD_PORT; 2328 *pindex += 2; 2329 i = vpd[*pindex]; 2330 *pindex += 1; 2331 j = 0; 2332 length -= (3 + i); 2333 while (i--) { 2334 if ((phba->sli_rev == LPFC_SLI_REV4) && 2335 (phba->sli4_hba.pport_name_sta == 2336 LPFC_SLI4_PPNAME_GET)) { 2337 j++; 2338 (*pindex)++; 2339 } else 2340 phba->Port[j++] = vpd[(*pindex)++]; 2341 if (j == 19) 2342 break; 2343 } 2344 if ((phba->sli_rev != LPFC_SLI_REV4) || 2345 (phba->sli4_hba.pport_name_sta == 2346 LPFC_SLI4_PPNAME_NON)) 2347 phba->Port[j] = 0; 2348 continue; 2349 } else { 2350 *pindex += 2; 2351 i = vpd[*pindex]; 2352 *pindex += 1; 2353 *pindex += i; 2354 length -= (3 + i); 2355 } 2356 } 2357 } 2358 2359 /** 2360 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 2361 * @phba: pointer to lpfc hba data structure. 2362 * @vpd: pointer to the vital product data. 2363 * @len: length of the vital product data in bytes. 2364 * 2365 * This routine parses the Vital Product Data (VPD). The VPD is treated as 2366 * an array of characters. In this routine, the ModelName, ProgramType, and 2367 * ModelDesc, etc. fields of the phba data structure will be populated. 2368 * 2369 * Return codes 2370 * 0 - pointer to the VPD passed in is NULL 2371 * 1 - success 2372 **/ 2373 int 2374 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 2375 { 2376 uint8_t lenlo, lenhi; 2377 int Length; 2378 int i; 2379 int finished = 0; 2380 int index = 0; 2381 2382 if (!vpd) 2383 return 0; 2384 2385 /* Vital Product */ 2386 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2387 "0455 Vital Product Data: x%x x%x x%x x%x\n", 2388 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 2389 (uint32_t) vpd[3]); 2390 while (!finished && (index < (len - 4))) { 2391 switch (vpd[index]) { 2392 case 0x82: 2393 case 0x91: 2394 index += 1; 2395 lenlo = vpd[index]; 2396 index += 1; 2397 lenhi = vpd[index]; 2398 index += 1; 2399 i = ((((unsigned short)lenhi) << 8) + lenlo); 2400 index += i; 2401 break; 2402 case 0x90: 2403 index += 1; 2404 lenlo = vpd[index]; 2405 index += 1; 2406 lenhi = vpd[index]; 2407 index += 1; 2408 Length = ((((unsigned short)lenhi) << 8) + lenlo); 2409 if (Length > len - index) 2410 Length = len - index; 2411 2412 lpfc_fill_vpd(phba, vpd, Length, &index); 2413 finished = 0; 2414 break; 2415 case 0x78: 2416 finished = 1; 2417 break; 2418 default: 2419 index ++; 2420 break; 2421 } 2422 } 2423 2424 return(1); 2425 } 2426 2427 /** 2428 * lpfc_get_atto_model_desc - Retrieve ATTO HBA device model name and description 2429 * @phba: pointer to lpfc hba data structure. 2430 * @mdp: pointer to the data structure to hold the derived model name. 2431 * @descp: pointer to the data structure to hold the derived description. 2432 * 2433 * This routine retrieves HBA's description based on its registered PCI device 2434 * ID. The @descp passed into this function points to an array of 256 chars. It 2435 * shall be returned with the model name, maximum speed, and the host bus type. 2436 * The @mdp passed into this function points to an array of 80 chars. When the 2437 * function returns, the @mdp will be filled with the model name. 2438 **/ 2439 static void 2440 lpfc_get_atto_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 2441 { 2442 uint16_t sub_dev_id = phba->pcidev->subsystem_device; 2443 char *model = "<Unknown>"; 2444 int tbolt = 0; 2445 2446 switch (sub_dev_id) { 2447 case PCI_DEVICE_ID_CLRY_161E: 2448 model = "161E"; 2449 break; 2450 case PCI_DEVICE_ID_CLRY_162E: 2451 model = "162E"; 2452 break; 2453 case PCI_DEVICE_ID_CLRY_164E: 2454 model = "164E"; 2455 break; 2456 case PCI_DEVICE_ID_CLRY_161P: 2457 model = "161P"; 2458 break; 2459 case PCI_DEVICE_ID_CLRY_162P: 2460 model = "162P"; 2461 break; 2462 case PCI_DEVICE_ID_CLRY_164P: 2463 model = "164P"; 2464 break; 2465 case PCI_DEVICE_ID_CLRY_321E: 2466 model = "321E"; 2467 break; 2468 case PCI_DEVICE_ID_CLRY_322E: 2469 model = "322E"; 2470 break; 2471 case PCI_DEVICE_ID_CLRY_324E: 2472 model = "324E"; 2473 break; 2474 case PCI_DEVICE_ID_CLRY_321P: 2475 model = "321P"; 2476 break; 2477 case PCI_DEVICE_ID_CLRY_322P: 2478 model = "322P"; 2479 break; 2480 case PCI_DEVICE_ID_CLRY_324P: 2481 model = "324P"; 2482 break; 2483 case PCI_DEVICE_ID_TLFC_2XX2: 2484 model = "2XX2"; 2485 tbolt = 1; 2486 break; 2487 case PCI_DEVICE_ID_TLFC_3162: 2488 model = "3162"; 2489 tbolt = 1; 2490 break; 2491 case PCI_DEVICE_ID_TLFC_3322: 2492 model = "3322"; 2493 tbolt = 1; 2494 break; 2495 default: 2496 model = "Unknown"; 2497 break; 2498 } 2499 2500 if (mdp && mdp[0] == '\0') 2501 snprintf(mdp, 79, "%s", model); 2502 2503 if (descp && descp[0] == '\0') 2504 snprintf(descp, 255, 2505 "ATTO %s%s, Fibre Channel Adapter Initiator, Port %s", 2506 (tbolt) ? "ThunderLink FC " : "Celerity FC-", 2507 model, 2508 phba->Port); 2509 } 2510 2511 /** 2512 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 2513 * @phba: pointer to lpfc hba data structure. 2514 * @mdp: pointer to the data structure to hold the derived model name. 2515 * @descp: pointer to the data structure to hold the derived description. 2516 * 2517 * This routine retrieves HBA's description based on its registered PCI device 2518 * ID. The @descp passed into this function points to an array of 256 chars. It 2519 * shall be returned with the model name, maximum speed, and the host bus type. 2520 * The @mdp passed into this function points to an array of 80 chars. When the 2521 * function returns, the @mdp will be filled with the model name. 2522 **/ 2523 static void 2524 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 2525 { 2526 lpfc_vpd_t *vp; 2527 uint16_t dev_id = phba->pcidev->device; 2528 int max_speed; 2529 int GE = 0; 2530 int oneConnect = 0; /* default is not a oneConnect */ 2531 struct { 2532 char *name; 2533 char *bus; 2534 char *function; 2535 } m = {"<Unknown>", "", ""}; 2536 2537 if (mdp && mdp[0] != '\0' 2538 && descp && descp[0] != '\0') 2539 return; 2540 2541 if (phba->pcidev->vendor == PCI_VENDOR_ID_ATTO) { 2542 lpfc_get_atto_model_desc(phba, mdp, descp); 2543 return; 2544 } 2545 2546 if (phba->lmt & LMT_64Gb) 2547 max_speed = 64; 2548 else if (phba->lmt & LMT_32Gb) 2549 max_speed = 32; 2550 else if (phba->lmt & LMT_16Gb) 2551 max_speed = 16; 2552 else if (phba->lmt & LMT_10Gb) 2553 max_speed = 10; 2554 else if (phba->lmt & LMT_8Gb) 2555 max_speed = 8; 2556 else if (phba->lmt & LMT_4Gb) 2557 max_speed = 4; 2558 else if (phba->lmt & LMT_2Gb) 2559 max_speed = 2; 2560 else if (phba->lmt & LMT_1Gb) 2561 max_speed = 1; 2562 else 2563 max_speed = 0; 2564 2565 vp = &phba->vpd; 2566 2567 switch (dev_id) { 2568 case PCI_DEVICE_ID_FIREFLY: 2569 m = (typeof(m)){"LP6000", "PCI", 2570 "Obsolete, Unsupported Fibre Channel Adapter"}; 2571 break; 2572 case PCI_DEVICE_ID_SUPERFLY: 2573 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 2574 m = (typeof(m)){"LP7000", "PCI", ""}; 2575 else 2576 m = (typeof(m)){"LP7000E", "PCI", ""}; 2577 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2578 break; 2579 case PCI_DEVICE_ID_DRAGONFLY: 2580 m = (typeof(m)){"LP8000", "PCI", 2581 "Obsolete, Unsupported Fibre Channel Adapter"}; 2582 break; 2583 case PCI_DEVICE_ID_CENTAUR: 2584 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 2585 m = (typeof(m)){"LP9002", "PCI", ""}; 2586 else 2587 m = (typeof(m)){"LP9000", "PCI", ""}; 2588 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2589 break; 2590 case PCI_DEVICE_ID_RFLY: 2591 m = (typeof(m)){"LP952", "PCI", 2592 "Obsolete, Unsupported Fibre Channel Adapter"}; 2593 break; 2594 case PCI_DEVICE_ID_PEGASUS: 2595 m = (typeof(m)){"LP9802", "PCI-X", 2596 "Obsolete, Unsupported Fibre Channel Adapter"}; 2597 break; 2598 case PCI_DEVICE_ID_THOR: 2599 m = (typeof(m)){"LP10000", "PCI-X", 2600 "Obsolete, Unsupported Fibre Channel Adapter"}; 2601 break; 2602 case PCI_DEVICE_ID_VIPER: 2603 m = (typeof(m)){"LPX1000", "PCI-X", 2604 "Obsolete, Unsupported Fibre Channel Adapter"}; 2605 break; 2606 case PCI_DEVICE_ID_PFLY: 2607 m = (typeof(m)){"LP982", "PCI-X", 2608 "Obsolete, Unsupported Fibre Channel Adapter"}; 2609 break; 2610 case PCI_DEVICE_ID_TFLY: 2611 m = (typeof(m)){"LP1050", "PCI-X", 2612 "Obsolete, Unsupported Fibre Channel Adapter"}; 2613 break; 2614 case PCI_DEVICE_ID_HELIOS: 2615 m = (typeof(m)){"LP11000", "PCI-X2", 2616 "Obsolete, Unsupported Fibre Channel Adapter"}; 2617 break; 2618 case PCI_DEVICE_ID_HELIOS_SCSP: 2619 m = (typeof(m)){"LP11000-SP", "PCI-X2", 2620 "Obsolete, Unsupported Fibre Channel Adapter"}; 2621 break; 2622 case PCI_DEVICE_ID_HELIOS_DCSP: 2623 m = (typeof(m)){"LP11002-SP", "PCI-X2", 2624 "Obsolete, Unsupported Fibre Channel Adapter"}; 2625 break; 2626 case PCI_DEVICE_ID_NEPTUNE: 2627 m = (typeof(m)){"LPe1000", "PCIe", 2628 "Obsolete, Unsupported Fibre Channel Adapter"}; 2629 break; 2630 case PCI_DEVICE_ID_NEPTUNE_SCSP: 2631 m = (typeof(m)){"LPe1000-SP", "PCIe", 2632 "Obsolete, Unsupported Fibre Channel Adapter"}; 2633 break; 2634 case PCI_DEVICE_ID_NEPTUNE_DCSP: 2635 m = (typeof(m)){"LPe1002-SP", "PCIe", 2636 "Obsolete, Unsupported Fibre Channel Adapter"}; 2637 break; 2638 case PCI_DEVICE_ID_BMID: 2639 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 2640 break; 2641 case PCI_DEVICE_ID_BSMB: 2642 m = (typeof(m)){"LP111", "PCI-X2", 2643 "Obsolete, Unsupported Fibre Channel Adapter"}; 2644 break; 2645 case PCI_DEVICE_ID_ZEPHYR: 2646 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2647 break; 2648 case PCI_DEVICE_ID_ZEPHYR_SCSP: 2649 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2650 break; 2651 case PCI_DEVICE_ID_ZEPHYR_DCSP: 2652 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 2653 GE = 1; 2654 break; 2655 case PCI_DEVICE_ID_ZMID: 2656 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 2657 break; 2658 case PCI_DEVICE_ID_ZSMB: 2659 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 2660 break; 2661 case PCI_DEVICE_ID_LP101: 2662 m = (typeof(m)){"LP101", "PCI-X", 2663 "Obsolete, Unsupported Fibre Channel Adapter"}; 2664 break; 2665 case PCI_DEVICE_ID_LP10000S: 2666 m = (typeof(m)){"LP10000-S", "PCI", 2667 "Obsolete, Unsupported Fibre Channel Adapter"}; 2668 break; 2669 case PCI_DEVICE_ID_LP11000S: 2670 m = (typeof(m)){"LP11000-S", "PCI-X2", 2671 "Obsolete, Unsupported Fibre Channel Adapter"}; 2672 break; 2673 case PCI_DEVICE_ID_LPE11000S: 2674 m = (typeof(m)){"LPe11000-S", "PCIe", 2675 "Obsolete, Unsupported Fibre Channel Adapter"}; 2676 break; 2677 case PCI_DEVICE_ID_SAT: 2678 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 2679 break; 2680 case PCI_DEVICE_ID_SAT_MID: 2681 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 2682 break; 2683 case PCI_DEVICE_ID_SAT_SMB: 2684 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 2685 break; 2686 case PCI_DEVICE_ID_SAT_DCSP: 2687 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 2688 break; 2689 case PCI_DEVICE_ID_SAT_SCSP: 2690 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 2691 break; 2692 case PCI_DEVICE_ID_SAT_S: 2693 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 2694 break; 2695 case PCI_DEVICE_ID_PROTEUS_VF: 2696 m = (typeof(m)){"LPev12000", "PCIe IOV", 2697 "Obsolete, Unsupported Fibre Channel Adapter"}; 2698 break; 2699 case PCI_DEVICE_ID_PROTEUS_PF: 2700 m = (typeof(m)){"LPev12000", "PCIe IOV", 2701 "Obsolete, Unsupported Fibre Channel Adapter"}; 2702 break; 2703 case PCI_DEVICE_ID_PROTEUS_S: 2704 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 2705 "Obsolete, Unsupported Fibre Channel Adapter"}; 2706 break; 2707 case PCI_DEVICE_ID_TIGERSHARK: 2708 oneConnect = 1; 2709 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 2710 break; 2711 case PCI_DEVICE_ID_TOMCAT: 2712 oneConnect = 1; 2713 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 2714 break; 2715 case PCI_DEVICE_ID_FALCON: 2716 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 2717 "EmulexSecure Fibre"}; 2718 break; 2719 case PCI_DEVICE_ID_BALIUS: 2720 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 2721 "Obsolete, Unsupported Fibre Channel Adapter"}; 2722 break; 2723 case PCI_DEVICE_ID_LANCER_FC: 2724 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 2725 break; 2726 case PCI_DEVICE_ID_LANCER_FC_VF: 2727 m = (typeof(m)){"LPe16000", "PCIe", 2728 "Obsolete, Unsupported Fibre Channel Adapter"}; 2729 break; 2730 case PCI_DEVICE_ID_LANCER_FCOE: 2731 oneConnect = 1; 2732 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2733 break; 2734 case PCI_DEVICE_ID_LANCER_FCOE_VF: 2735 oneConnect = 1; 2736 m = (typeof(m)){"OCe15100", "PCIe", 2737 "Obsolete, Unsupported FCoE"}; 2738 break; 2739 case PCI_DEVICE_ID_LANCER_G6_FC: 2740 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"}; 2741 break; 2742 case PCI_DEVICE_ID_LANCER_G7_FC: 2743 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"}; 2744 break; 2745 case PCI_DEVICE_ID_LANCER_G7P_FC: 2746 m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"}; 2747 break; 2748 case PCI_DEVICE_ID_SKYHAWK: 2749 case PCI_DEVICE_ID_SKYHAWK_VF: 2750 oneConnect = 1; 2751 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; 2752 break; 2753 default: 2754 m = (typeof(m)){"Unknown", "", ""}; 2755 break; 2756 } 2757 2758 if (mdp && mdp[0] == '\0') 2759 snprintf(mdp, 79,"%s", m.name); 2760 /* 2761 * oneConnect hba requires special processing, they are all initiators 2762 * and we put the port number on the end 2763 */ 2764 if (descp && descp[0] == '\0') { 2765 if (oneConnect) 2766 snprintf(descp, 255, 2767 "Emulex OneConnect %s, %s Initiator %s", 2768 m.name, m.function, 2769 phba->Port); 2770 else if (max_speed == 0) 2771 snprintf(descp, 255, 2772 "Emulex %s %s %s", 2773 m.name, m.bus, m.function); 2774 else 2775 snprintf(descp, 255, 2776 "Emulex %s %d%s %s %s", 2777 m.name, max_speed, (GE) ? "GE" : "Gb", 2778 m.bus, m.function); 2779 } 2780 } 2781 2782 /** 2783 * lpfc_sli3_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 2784 * @phba: pointer to lpfc hba data structure. 2785 * @pring: pointer to a IOCB ring. 2786 * @cnt: the number of IOCBs to be posted to the IOCB ring. 2787 * 2788 * This routine posts a given number of IOCBs with the associated DMA buffer 2789 * descriptors specified by the cnt argument to the given IOCB ring. 2790 * 2791 * Return codes 2792 * The number of IOCBs NOT able to be posted to the IOCB ring. 2793 **/ 2794 int 2795 lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 2796 { 2797 IOCB_t *icmd; 2798 struct lpfc_iocbq *iocb; 2799 struct lpfc_dmabuf *mp1, *mp2; 2800 2801 cnt += pring->missbufcnt; 2802 2803 /* While there are buffers to post */ 2804 while (cnt > 0) { 2805 /* Allocate buffer for command iocb */ 2806 iocb = lpfc_sli_get_iocbq(phba); 2807 if (iocb == NULL) { 2808 pring->missbufcnt = cnt; 2809 return cnt; 2810 } 2811 icmd = &iocb->iocb; 2812 2813 /* 2 buffers can be posted per command */ 2814 /* Allocate buffer to post */ 2815 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2816 if (mp1) 2817 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 2818 if (!mp1 || !mp1->virt) { 2819 kfree(mp1); 2820 lpfc_sli_release_iocbq(phba, iocb); 2821 pring->missbufcnt = cnt; 2822 return cnt; 2823 } 2824 2825 INIT_LIST_HEAD(&mp1->list); 2826 /* Allocate buffer to post */ 2827 if (cnt > 1) { 2828 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2829 if (mp2) 2830 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 2831 &mp2->phys); 2832 if (!mp2 || !mp2->virt) { 2833 kfree(mp2); 2834 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2835 kfree(mp1); 2836 lpfc_sli_release_iocbq(phba, iocb); 2837 pring->missbufcnt = cnt; 2838 return cnt; 2839 } 2840 2841 INIT_LIST_HEAD(&mp2->list); 2842 } else { 2843 mp2 = NULL; 2844 } 2845 2846 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2847 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2848 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2849 icmd->ulpBdeCount = 1; 2850 cnt--; 2851 if (mp2) { 2852 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2853 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2854 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2855 cnt--; 2856 icmd->ulpBdeCount = 2; 2857 } 2858 2859 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2860 icmd->ulpLe = 1; 2861 2862 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2863 IOCB_ERROR) { 2864 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2865 kfree(mp1); 2866 cnt++; 2867 if (mp2) { 2868 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2869 kfree(mp2); 2870 cnt++; 2871 } 2872 lpfc_sli_release_iocbq(phba, iocb); 2873 pring->missbufcnt = cnt; 2874 return cnt; 2875 } 2876 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2877 if (mp2) 2878 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2879 } 2880 pring->missbufcnt = 0; 2881 return 0; 2882 } 2883 2884 /** 2885 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2886 * @phba: pointer to lpfc hba data structure. 2887 * 2888 * This routine posts initial receive IOCB buffers to the ELS ring. The 2889 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2890 * set to 64 IOCBs. SLI3 only. 2891 * 2892 * Return codes 2893 * 0 - success (currently always success) 2894 **/ 2895 static int 2896 lpfc_post_rcv_buf(struct lpfc_hba *phba) 2897 { 2898 struct lpfc_sli *psli = &phba->sli; 2899 2900 /* Ring 0, ELS / CT buffers */ 2901 lpfc_sli3_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2902 /* Ring 2 - FCP no buffers needed */ 2903 2904 return 0; 2905 } 2906 2907 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2908 2909 /** 2910 * lpfc_sha_init - Set up initial array of hash table entries 2911 * @HashResultPointer: pointer to an array as hash table. 2912 * 2913 * This routine sets up the initial values to the array of hash table entries 2914 * for the LC HBAs. 2915 **/ 2916 static void 2917 lpfc_sha_init(uint32_t * HashResultPointer) 2918 { 2919 HashResultPointer[0] = 0x67452301; 2920 HashResultPointer[1] = 0xEFCDAB89; 2921 HashResultPointer[2] = 0x98BADCFE; 2922 HashResultPointer[3] = 0x10325476; 2923 HashResultPointer[4] = 0xC3D2E1F0; 2924 } 2925 2926 /** 2927 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2928 * @HashResultPointer: pointer to an initial/result hash table. 2929 * @HashWorkingPointer: pointer to an working hash table. 2930 * 2931 * This routine iterates an initial hash table pointed by @HashResultPointer 2932 * with the values from the working hash table pointeed by @HashWorkingPointer. 2933 * The results are putting back to the initial hash table, returned through 2934 * the @HashResultPointer as the result hash table. 2935 **/ 2936 static void 2937 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2938 { 2939 int t; 2940 uint32_t TEMP; 2941 uint32_t A, B, C, D, E; 2942 t = 16; 2943 do { 2944 HashWorkingPointer[t] = 2945 S(1, 2946 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2947 8] ^ 2948 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2949 } while (++t <= 79); 2950 t = 0; 2951 A = HashResultPointer[0]; 2952 B = HashResultPointer[1]; 2953 C = HashResultPointer[2]; 2954 D = HashResultPointer[3]; 2955 E = HashResultPointer[4]; 2956 2957 do { 2958 if (t < 20) { 2959 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2960 } else if (t < 40) { 2961 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2962 } else if (t < 60) { 2963 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2964 } else { 2965 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2966 } 2967 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2968 E = D; 2969 D = C; 2970 C = S(30, B); 2971 B = A; 2972 A = TEMP; 2973 } while (++t <= 79); 2974 2975 HashResultPointer[0] += A; 2976 HashResultPointer[1] += B; 2977 HashResultPointer[2] += C; 2978 HashResultPointer[3] += D; 2979 HashResultPointer[4] += E; 2980 2981 } 2982 2983 /** 2984 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2985 * @RandomChallenge: pointer to the entry of host challenge random number array. 2986 * @HashWorking: pointer to the entry of the working hash array. 2987 * 2988 * This routine calculates the working hash array referred by @HashWorking 2989 * from the challenge random numbers associated with the host, referred by 2990 * @RandomChallenge. The result is put into the entry of the working hash 2991 * array and returned by reference through @HashWorking. 2992 **/ 2993 static void 2994 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2995 { 2996 *HashWorking = (*RandomChallenge ^ *HashWorking); 2997 } 2998 2999 /** 3000 * lpfc_hba_init - Perform special handling for LC HBA initialization 3001 * @phba: pointer to lpfc hba data structure. 3002 * @hbainit: pointer to an array of unsigned 32-bit integers. 3003 * 3004 * This routine performs the special handling for LC HBA initialization. 3005 **/ 3006 void 3007 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 3008 { 3009 int t; 3010 uint32_t *HashWorking; 3011 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 3012 3013 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 3014 if (!HashWorking) 3015 return; 3016 3017 HashWorking[0] = HashWorking[78] = *pwwnn++; 3018 HashWorking[1] = HashWorking[79] = *pwwnn; 3019 3020 for (t = 0; t < 7; t++) 3021 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 3022 3023 lpfc_sha_init(hbainit); 3024 lpfc_sha_iterate(hbainit, HashWorking); 3025 kfree(HashWorking); 3026 } 3027 3028 /** 3029 * lpfc_cleanup - Performs vport cleanups before deleting a vport 3030 * @vport: pointer to a virtual N_Port data structure. 3031 * 3032 * This routine performs the necessary cleanups before deleting the @vport. 3033 * It invokes the discovery state machine to perform necessary state 3034 * transitions and to release the ndlps associated with the @vport. Note, 3035 * the physical port is treated as @vport 0. 3036 **/ 3037 void 3038 lpfc_cleanup(struct lpfc_vport *vport) 3039 { 3040 struct lpfc_hba *phba = vport->phba; 3041 struct lpfc_nodelist *ndlp, *next_ndlp; 3042 int i = 0; 3043 3044 if (phba->link_state > LPFC_LINK_DOWN) 3045 lpfc_port_link_failure(vport); 3046 3047 /* Clean up VMID resources */ 3048 if (lpfc_is_vmid_enabled(phba)) 3049 lpfc_vmid_vport_cleanup(vport); 3050 3051 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 3052 if (vport->port_type != LPFC_PHYSICAL_PORT && 3053 ndlp->nlp_DID == Fabric_DID) { 3054 /* Just free up ndlp with Fabric_DID for vports */ 3055 lpfc_nlp_put(ndlp); 3056 continue; 3057 } 3058 3059 if (ndlp->nlp_DID == Fabric_Cntl_DID && 3060 ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 3061 lpfc_nlp_put(ndlp); 3062 continue; 3063 } 3064 3065 /* Fabric Ports not in UNMAPPED state are cleaned up in the 3066 * DEVICE_RM event. 3067 */ 3068 if (ndlp->nlp_type & NLP_FABRIC && 3069 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) 3070 lpfc_disc_state_machine(vport, ndlp, NULL, 3071 NLP_EVT_DEVICE_RECOVERY); 3072 3073 if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD))) 3074 lpfc_disc_state_machine(vport, ndlp, NULL, 3075 NLP_EVT_DEVICE_RM); 3076 } 3077 3078 /* This is a special case flush to return all 3079 * IOs before entering this loop. There are 3080 * two points in the code where a flush is 3081 * avoided if the FC_UNLOADING flag is set. 3082 * one is in the multipool destroy, 3083 * (this prevents a crash) and the other is 3084 * in the nvme abort handler, ( also prevents 3085 * a crash). Both of these exceptions are 3086 * cases where the slot is still accessible. 3087 * The flush here is only when the pci slot 3088 * is offline. 3089 */ 3090 if (test_bit(FC_UNLOADING, &vport->load_flag) && 3091 pci_channel_offline(phba->pcidev)) 3092 lpfc_sli_flush_io_rings(vport->phba); 3093 3094 /* At this point, ALL ndlp's should be gone 3095 * because of the previous NLP_EVT_DEVICE_RM. 3096 * Lets wait for this to happen, if needed. 3097 */ 3098 while (!list_empty(&vport->fc_nodes)) { 3099 if (i++ > 3000) { 3100 lpfc_printf_vlog(vport, KERN_ERR, 3101 LOG_TRACE_EVENT, 3102 "0233 Nodelist not empty\n"); 3103 list_for_each_entry_safe(ndlp, next_ndlp, 3104 &vport->fc_nodes, nlp_listp) { 3105 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 3106 LOG_DISCOVERY, 3107 "0282 did:x%x ndlp:x%px " 3108 "refcnt:%d xflags x%x nflag x%x\n", 3109 ndlp->nlp_DID, (void *)ndlp, 3110 kref_read(&ndlp->kref), 3111 ndlp->fc4_xpt_flags, 3112 ndlp->nlp_flag); 3113 } 3114 break; 3115 } 3116 3117 /* Wait for any activity on ndlps to settle */ 3118 msleep(10); 3119 } 3120 lpfc_cleanup_vports_rrqs(vport, NULL); 3121 } 3122 3123 /** 3124 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 3125 * @vport: pointer to a virtual N_Port data structure. 3126 * 3127 * This routine stops all the timers associated with a @vport. This function 3128 * is invoked before disabling or deleting a @vport. Note that the physical 3129 * port is treated as @vport 0. 3130 **/ 3131 void 3132 lpfc_stop_vport_timers(struct lpfc_vport *vport) 3133 { 3134 del_timer_sync(&vport->els_tmofunc); 3135 del_timer_sync(&vport->delayed_disc_tmo); 3136 lpfc_can_disctmo(vport); 3137 return; 3138 } 3139 3140 /** 3141 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 3142 * @phba: pointer to lpfc hba data structure. 3143 * 3144 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 3145 * caller of this routine should already hold the host lock. 3146 **/ 3147 void 3148 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 3149 { 3150 /* Clear pending FCF rediscovery wait flag */ 3151 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 3152 3153 /* Now, try to stop the timer */ 3154 del_timer(&phba->fcf.redisc_wait); 3155 } 3156 3157 /** 3158 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 3159 * @phba: pointer to lpfc hba data structure. 3160 * 3161 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 3162 * checks whether the FCF rediscovery wait timer is pending with the host 3163 * lock held before proceeding with disabling the timer and clearing the 3164 * wait timer pendig flag. 3165 **/ 3166 void 3167 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 3168 { 3169 spin_lock_irq(&phba->hbalock); 3170 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 3171 /* FCF rediscovery timer already fired or stopped */ 3172 spin_unlock_irq(&phba->hbalock); 3173 return; 3174 } 3175 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 3176 /* Clear failover in progress flags */ 3177 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 3178 spin_unlock_irq(&phba->hbalock); 3179 } 3180 3181 /** 3182 * lpfc_cmf_stop - Stop CMF processing 3183 * @phba: pointer to lpfc hba data structure. 3184 * 3185 * This is called when the link goes down or if CMF mode is turned OFF. 3186 * It is also called when going offline or unloaded just before the 3187 * congestion info buffer is unregistered. 3188 **/ 3189 void 3190 lpfc_cmf_stop(struct lpfc_hba *phba) 3191 { 3192 int cpu; 3193 struct lpfc_cgn_stat *cgs; 3194 3195 /* We only do something if CMF is enabled */ 3196 if (!phba->sli4_hba.pc_sli4_params.cmf) 3197 return; 3198 3199 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 3200 "6221 Stop CMF / Cancel Timer\n"); 3201 3202 /* Cancel the CMF timer */ 3203 hrtimer_cancel(&phba->cmf_stats_timer); 3204 hrtimer_cancel(&phba->cmf_timer); 3205 3206 /* Zero CMF counters */ 3207 atomic_set(&phba->cmf_busy, 0); 3208 for_each_present_cpu(cpu) { 3209 cgs = per_cpu_ptr(phba->cmf_stat, cpu); 3210 atomic64_set(&cgs->total_bytes, 0); 3211 atomic64_set(&cgs->rcv_bytes, 0); 3212 atomic_set(&cgs->rx_io_cnt, 0); 3213 atomic64_set(&cgs->rx_latency, 0); 3214 } 3215 atomic_set(&phba->cmf_bw_wait, 0); 3216 3217 /* Resume any blocked IO - Queue unblock on workqueue */ 3218 queue_work(phba->wq, &phba->unblock_request_work); 3219 } 3220 3221 static inline uint64_t 3222 lpfc_get_max_line_rate(struct lpfc_hba *phba) 3223 { 3224 uint64_t rate = lpfc_sli_port_speed_get(phba); 3225 3226 return ((((unsigned long)rate) * 1024 * 1024) / 10); 3227 } 3228 3229 void 3230 lpfc_cmf_signal_init(struct lpfc_hba *phba) 3231 { 3232 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 3233 "6223 Signal CMF init\n"); 3234 3235 /* Use the new fc_linkspeed to recalculate */ 3236 phba->cmf_interval_rate = LPFC_CMF_INTERVAL; 3237 phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba); 3238 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate * 3239 phba->cmf_interval_rate, 1000); 3240 phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count; 3241 3242 /* This is a signal to firmware to sync up CMF BW with link speed */ 3243 lpfc_issue_cmf_sync_wqe(phba, 0, 0); 3244 } 3245 3246 /** 3247 * lpfc_cmf_start - Start CMF processing 3248 * @phba: pointer to lpfc hba data structure. 3249 * 3250 * This is called when the link comes up or if CMF mode is turned OFF 3251 * to Monitor or Managed. 3252 **/ 3253 void 3254 lpfc_cmf_start(struct lpfc_hba *phba) 3255 { 3256 struct lpfc_cgn_stat *cgs; 3257 int cpu; 3258 3259 /* We only do something if CMF is enabled */ 3260 if (!phba->sli4_hba.pc_sli4_params.cmf || 3261 phba->cmf_active_mode == LPFC_CFG_OFF) 3262 return; 3263 3264 /* Reinitialize congestion buffer info */ 3265 lpfc_init_congestion_buf(phba); 3266 3267 atomic_set(&phba->cgn_fabric_warn_cnt, 0); 3268 atomic_set(&phba->cgn_fabric_alarm_cnt, 0); 3269 atomic_set(&phba->cgn_sync_alarm_cnt, 0); 3270 atomic_set(&phba->cgn_sync_warn_cnt, 0); 3271 3272 atomic_set(&phba->cmf_busy, 0); 3273 for_each_present_cpu(cpu) { 3274 cgs = per_cpu_ptr(phba->cmf_stat, cpu); 3275 atomic64_set(&cgs->total_bytes, 0); 3276 atomic64_set(&cgs->rcv_bytes, 0); 3277 atomic_set(&cgs->rx_io_cnt, 0); 3278 atomic64_set(&cgs->rx_latency, 0); 3279 } 3280 phba->cmf_latency.tv_sec = 0; 3281 phba->cmf_latency.tv_nsec = 0; 3282 3283 lpfc_cmf_signal_init(phba); 3284 3285 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 3286 "6222 Start CMF / Timer\n"); 3287 3288 phba->cmf_timer_cnt = 0; 3289 hrtimer_start(&phba->cmf_timer, 3290 ktime_set(0, LPFC_CMF_INTERVAL * NSEC_PER_MSEC), 3291 HRTIMER_MODE_REL); 3292 hrtimer_start(&phba->cmf_stats_timer, 3293 ktime_set(0, LPFC_SEC_MIN * NSEC_PER_SEC), 3294 HRTIMER_MODE_REL); 3295 /* Setup for latency check in IO cmpl routines */ 3296 ktime_get_real_ts64(&phba->cmf_latency); 3297 3298 atomic_set(&phba->cmf_bw_wait, 0); 3299 atomic_set(&phba->cmf_stop_io, 0); 3300 } 3301 3302 /** 3303 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 3304 * @phba: pointer to lpfc hba data structure. 3305 * 3306 * This routine stops all the timers associated with a HBA. This function is 3307 * invoked before either putting a HBA offline or unloading the driver. 3308 **/ 3309 void 3310 lpfc_stop_hba_timers(struct lpfc_hba *phba) 3311 { 3312 if (phba->pport) 3313 lpfc_stop_vport_timers(phba->pport); 3314 cancel_delayed_work_sync(&phba->eq_delay_work); 3315 cancel_delayed_work_sync(&phba->idle_stat_delay_work); 3316 del_timer_sync(&phba->sli.mbox_tmo); 3317 del_timer_sync(&phba->fabric_block_timer); 3318 del_timer_sync(&phba->eratt_poll); 3319 del_timer_sync(&phba->hb_tmofunc); 3320 if (phba->sli_rev == LPFC_SLI_REV4) { 3321 del_timer_sync(&phba->rrq_tmr); 3322 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 3323 } 3324 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); 3325 3326 switch (phba->pci_dev_grp) { 3327 case LPFC_PCI_DEV_LP: 3328 /* Stop any LightPulse device specific driver timers */ 3329 del_timer_sync(&phba->fcp_poll_timer); 3330 break; 3331 case LPFC_PCI_DEV_OC: 3332 /* Stop any OneConnect device specific driver timers */ 3333 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 3334 break; 3335 default: 3336 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3337 "0297 Invalid device group (x%x)\n", 3338 phba->pci_dev_grp); 3339 break; 3340 } 3341 return; 3342 } 3343 3344 /** 3345 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 3346 * @phba: pointer to lpfc hba data structure. 3347 * @mbx_action: flag for mailbox no wait action. 3348 * 3349 * This routine marks a HBA's management interface as blocked. Once the HBA's 3350 * management interface is marked as blocked, all the user space access to 3351 * the HBA, whether they are from sysfs interface or libdfc interface will 3352 * all be blocked. The HBA is set to block the management interface when the 3353 * driver prepares the HBA interface for online or offline. 3354 **/ 3355 static void 3356 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) 3357 { 3358 unsigned long iflag; 3359 uint8_t actcmd = MBX_HEARTBEAT; 3360 unsigned long timeout; 3361 3362 spin_lock_irqsave(&phba->hbalock, iflag); 3363 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 3364 spin_unlock_irqrestore(&phba->hbalock, iflag); 3365 if (mbx_action == LPFC_MBX_NO_WAIT) 3366 return; 3367 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 3368 spin_lock_irqsave(&phba->hbalock, iflag); 3369 if (phba->sli.mbox_active) { 3370 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 3371 /* Determine how long we might wait for the active mailbox 3372 * command to be gracefully completed by firmware. 3373 */ 3374 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 3375 phba->sli.mbox_active) * 1000) + jiffies; 3376 } 3377 spin_unlock_irqrestore(&phba->hbalock, iflag); 3378 3379 /* Wait for the outstnading mailbox command to complete */ 3380 while (phba->sli.mbox_active) { 3381 /* Check active mailbox complete status every 2ms */ 3382 msleep(2); 3383 if (time_after(jiffies, timeout)) { 3384 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3385 "2813 Mgmt IO is Blocked %x " 3386 "- mbox cmd %x still active\n", 3387 phba->sli.sli_flag, actcmd); 3388 break; 3389 } 3390 } 3391 } 3392 3393 /** 3394 * lpfc_sli4_node_prep - Assign RPIs for active nodes. 3395 * @phba: pointer to lpfc hba data structure. 3396 * 3397 * Allocate RPIs for all active remote nodes. This is needed whenever 3398 * an SLI4 adapter is reset and the driver is not unloading. Its purpose 3399 * is to fixup the temporary rpi assignments. 3400 **/ 3401 void 3402 lpfc_sli4_node_prep(struct lpfc_hba *phba) 3403 { 3404 struct lpfc_nodelist *ndlp, *next_ndlp; 3405 struct lpfc_vport **vports; 3406 int i, rpi; 3407 3408 if (phba->sli_rev != LPFC_SLI_REV4) 3409 return; 3410 3411 vports = lpfc_create_vport_work_array(phba); 3412 if (vports == NULL) 3413 return; 3414 3415 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3416 if (test_bit(FC_UNLOADING, &vports[i]->load_flag)) 3417 continue; 3418 3419 list_for_each_entry_safe(ndlp, next_ndlp, 3420 &vports[i]->fc_nodes, 3421 nlp_listp) { 3422 rpi = lpfc_sli4_alloc_rpi(phba); 3423 if (rpi == LPFC_RPI_ALLOC_ERROR) { 3424 /* TODO print log? */ 3425 continue; 3426 } 3427 ndlp->nlp_rpi = rpi; 3428 lpfc_printf_vlog(ndlp->vport, KERN_INFO, 3429 LOG_NODE | LOG_DISCOVERY, 3430 "0009 Assign RPI x%x to ndlp x%px " 3431 "DID:x%06x flg:x%x\n", 3432 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID, 3433 ndlp->nlp_flag); 3434 } 3435 } 3436 lpfc_destroy_vport_work_array(phba, vports); 3437 } 3438 3439 /** 3440 * lpfc_create_expedite_pool - create expedite pool 3441 * @phba: pointer to lpfc hba data structure. 3442 * 3443 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0 3444 * to expedite pool. Mark them as expedite. 3445 **/ 3446 static void lpfc_create_expedite_pool(struct lpfc_hba *phba) 3447 { 3448 struct lpfc_sli4_hdw_queue *qp; 3449 struct lpfc_io_buf *lpfc_ncmd; 3450 struct lpfc_io_buf *lpfc_ncmd_next; 3451 struct lpfc_epd_pool *epd_pool; 3452 unsigned long iflag; 3453 3454 epd_pool = &phba->epd_pool; 3455 qp = &phba->sli4_hba.hdwq[0]; 3456 3457 spin_lock_init(&epd_pool->lock); 3458 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3459 spin_lock(&epd_pool->lock); 3460 INIT_LIST_HEAD(&epd_pool->list); 3461 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3462 &qp->lpfc_io_buf_list_put, list) { 3463 list_move_tail(&lpfc_ncmd->list, &epd_pool->list); 3464 lpfc_ncmd->expedite = true; 3465 qp->put_io_bufs--; 3466 epd_pool->count++; 3467 if (epd_pool->count >= XRI_BATCH) 3468 break; 3469 } 3470 spin_unlock(&epd_pool->lock); 3471 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3472 } 3473 3474 /** 3475 * lpfc_destroy_expedite_pool - destroy expedite pool 3476 * @phba: pointer to lpfc hba data structure. 3477 * 3478 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put 3479 * of HWQ 0. Clear the mark. 3480 **/ 3481 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba) 3482 { 3483 struct lpfc_sli4_hdw_queue *qp; 3484 struct lpfc_io_buf *lpfc_ncmd; 3485 struct lpfc_io_buf *lpfc_ncmd_next; 3486 struct lpfc_epd_pool *epd_pool; 3487 unsigned long iflag; 3488 3489 epd_pool = &phba->epd_pool; 3490 qp = &phba->sli4_hba.hdwq[0]; 3491 3492 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3493 spin_lock(&epd_pool->lock); 3494 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3495 &epd_pool->list, list) { 3496 list_move_tail(&lpfc_ncmd->list, 3497 &qp->lpfc_io_buf_list_put); 3498 lpfc_ncmd->flags = false; 3499 qp->put_io_bufs++; 3500 epd_pool->count--; 3501 } 3502 spin_unlock(&epd_pool->lock); 3503 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3504 } 3505 3506 /** 3507 * lpfc_create_multixri_pools - create multi-XRI pools 3508 * @phba: pointer to lpfc hba data structure. 3509 * 3510 * This routine initialize public, private per HWQ. Then, move XRIs from 3511 * lpfc_io_buf_list_put to public pool. High and low watermark are also 3512 * Initialized. 3513 **/ 3514 void lpfc_create_multixri_pools(struct lpfc_hba *phba) 3515 { 3516 u32 i, j; 3517 u32 hwq_count; 3518 u32 count_per_hwq; 3519 struct lpfc_io_buf *lpfc_ncmd; 3520 struct lpfc_io_buf *lpfc_ncmd_next; 3521 unsigned long iflag; 3522 struct lpfc_sli4_hdw_queue *qp; 3523 struct lpfc_multixri_pool *multixri_pool; 3524 struct lpfc_pbl_pool *pbl_pool; 3525 struct lpfc_pvt_pool *pvt_pool; 3526 3527 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3528 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n", 3529 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu, 3530 phba->sli4_hba.io_xri_cnt); 3531 3532 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3533 lpfc_create_expedite_pool(phba); 3534 3535 hwq_count = phba->cfg_hdw_queue; 3536 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count; 3537 3538 for (i = 0; i < hwq_count; i++) { 3539 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL); 3540 3541 if (!multixri_pool) { 3542 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3543 "1238 Failed to allocate memory for " 3544 "multixri_pool\n"); 3545 3546 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3547 lpfc_destroy_expedite_pool(phba); 3548 3549 j = 0; 3550 while (j < i) { 3551 qp = &phba->sli4_hba.hdwq[j]; 3552 kfree(qp->p_multixri_pool); 3553 j++; 3554 } 3555 phba->cfg_xri_rebalancing = 0; 3556 return; 3557 } 3558 3559 qp = &phba->sli4_hba.hdwq[i]; 3560 qp->p_multixri_pool = multixri_pool; 3561 3562 multixri_pool->xri_limit = count_per_hwq; 3563 multixri_pool->rrb_next_hwqid = i; 3564 3565 /* Deal with public free xri pool */ 3566 pbl_pool = &multixri_pool->pbl_pool; 3567 spin_lock_init(&pbl_pool->lock); 3568 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3569 spin_lock(&pbl_pool->lock); 3570 INIT_LIST_HEAD(&pbl_pool->list); 3571 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3572 &qp->lpfc_io_buf_list_put, list) { 3573 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list); 3574 qp->put_io_bufs--; 3575 pbl_pool->count++; 3576 } 3577 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3578 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n", 3579 pbl_pool->count, i); 3580 spin_unlock(&pbl_pool->lock); 3581 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3582 3583 /* Deal with private free xri pool */ 3584 pvt_pool = &multixri_pool->pvt_pool; 3585 pvt_pool->high_watermark = multixri_pool->xri_limit / 2; 3586 pvt_pool->low_watermark = XRI_BATCH; 3587 spin_lock_init(&pvt_pool->lock); 3588 spin_lock_irqsave(&pvt_pool->lock, iflag); 3589 INIT_LIST_HEAD(&pvt_pool->list); 3590 pvt_pool->count = 0; 3591 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 3592 } 3593 } 3594 3595 /** 3596 * lpfc_destroy_multixri_pools - destroy multi-XRI pools 3597 * @phba: pointer to lpfc hba data structure. 3598 * 3599 * This routine returns XRIs from public/private to lpfc_io_buf_list_put. 3600 **/ 3601 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba) 3602 { 3603 u32 i; 3604 u32 hwq_count; 3605 struct lpfc_io_buf *lpfc_ncmd; 3606 struct lpfc_io_buf *lpfc_ncmd_next; 3607 unsigned long iflag; 3608 struct lpfc_sli4_hdw_queue *qp; 3609 struct lpfc_multixri_pool *multixri_pool; 3610 struct lpfc_pbl_pool *pbl_pool; 3611 struct lpfc_pvt_pool *pvt_pool; 3612 3613 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3614 lpfc_destroy_expedite_pool(phba); 3615 3616 if (!test_bit(FC_UNLOADING, &phba->pport->load_flag)) 3617 lpfc_sli_flush_io_rings(phba); 3618 3619 hwq_count = phba->cfg_hdw_queue; 3620 3621 for (i = 0; i < hwq_count; i++) { 3622 qp = &phba->sli4_hba.hdwq[i]; 3623 multixri_pool = qp->p_multixri_pool; 3624 if (!multixri_pool) 3625 continue; 3626 3627 qp->p_multixri_pool = NULL; 3628 3629 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); 3630 3631 /* Deal with public free xri pool */ 3632 pbl_pool = &multixri_pool->pbl_pool; 3633 spin_lock(&pbl_pool->lock); 3634 3635 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3636 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n", 3637 pbl_pool->count, i); 3638 3639 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3640 &pbl_pool->list, list) { 3641 list_move_tail(&lpfc_ncmd->list, 3642 &qp->lpfc_io_buf_list_put); 3643 qp->put_io_bufs++; 3644 pbl_pool->count--; 3645 } 3646 3647 INIT_LIST_HEAD(&pbl_pool->list); 3648 pbl_pool->count = 0; 3649 3650 spin_unlock(&pbl_pool->lock); 3651 3652 /* Deal with private free xri pool */ 3653 pvt_pool = &multixri_pool->pvt_pool; 3654 spin_lock(&pvt_pool->lock); 3655 3656 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3657 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n", 3658 pvt_pool->count, i); 3659 3660 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3661 &pvt_pool->list, list) { 3662 list_move_tail(&lpfc_ncmd->list, 3663 &qp->lpfc_io_buf_list_put); 3664 qp->put_io_bufs++; 3665 pvt_pool->count--; 3666 } 3667 3668 INIT_LIST_HEAD(&pvt_pool->list); 3669 pvt_pool->count = 0; 3670 3671 spin_unlock(&pvt_pool->lock); 3672 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); 3673 3674 kfree(multixri_pool); 3675 } 3676 } 3677 3678 /** 3679 * lpfc_online - Initialize and bring a HBA online 3680 * @phba: pointer to lpfc hba data structure. 3681 * 3682 * This routine initializes the HBA and brings a HBA online. During this 3683 * process, the management interface is blocked to prevent user space access 3684 * to the HBA interfering with the driver initialization. 3685 * 3686 * Return codes 3687 * 0 - successful 3688 * 1 - failed 3689 **/ 3690 int 3691 lpfc_online(struct lpfc_hba *phba) 3692 { 3693 struct lpfc_vport *vport; 3694 struct lpfc_vport **vports; 3695 int i, error = 0; 3696 bool vpis_cleared = false; 3697 3698 if (!phba) 3699 return 0; 3700 vport = phba->pport; 3701 3702 if (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag)) 3703 return 0; 3704 3705 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3706 "0458 Bring Adapter online\n"); 3707 3708 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 3709 3710 if (phba->sli_rev == LPFC_SLI_REV4) { 3711 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 3712 lpfc_unblock_mgmt_io(phba); 3713 return 1; 3714 } 3715 spin_lock_irq(&phba->hbalock); 3716 if (!phba->sli4_hba.max_cfg_param.vpi_used) 3717 vpis_cleared = true; 3718 spin_unlock_irq(&phba->hbalock); 3719 3720 /* Reestablish the local initiator port. 3721 * The offline process destroyed the previous lport. 3722 */ 3723 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && 3724 !phba->nvmet_support) { 3725 error = lpfc_nvme_create_localport(phba->pport); 3726 if (error) 3727 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3728 "6132 NVME restore reg failed " 3729 "on nvmei error x%x\n", error); 3730 } 3731 } else { 3732 lpfc_sli_queue_init(phba); 3733 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 3734 lpfc_unblock_mgmt_io(phba); 3735 return 1; 3736 } 3737 } 3738 3739 vports = lpfc_create_vport_work_array(phba); 3740 if (vports != NULL) { 3741 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3742 clear_bit(FC_OFFLINE_MODE, &vports[i]->fc_flag); 3743 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3744 set_bit(FC_VPORT_NEEDS_REG_VPI, 3745 &vports[i]->fc_flag); 3746 if (phba->sli_rev == LPFC_SLI_REV4) { 3747 set_bit(FC_VPORT_NEEDS_INIT_VPI, 3748 &vports[i]->fc_flag); 3749 if ((vpis_cleared) && 3750 (vports[i]->port_type != 3751 LPFC_PHYSICAL_PORT)) 3752 vports[i]->vpi = 0; 3753 } 3754 } 3755 } 3756 lpfc_destroy_vport_work_array(phba, vports); 3757 3758 if (phba->cfg_xri_rebalancing) 3759 lpfc_create_multixri_pools(phba); 3760 3761 lpfc_cpuhp_add(phba); 3762 3763 lpfc_unblock_mgmt_io(phba); 3764 return 0; 3765 } 3766 3767 /** 3768 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 3769 * @phba: pointer to lpfc hba data structure. 3770 * 3771 * This routine marks a HBA's management interface as not blocked. Once the 3772 * HBA's management interface is marked as not blocked, all the user space 3773 * access to the HBA, whether they are from sysfs interface or libdfc 3774 * interface will be allowed. The HBA is set to block the management interface 3775 * when the driver prepares the HBA interface for online or offline and then 3776 * set to unblock the management interface afterwards. 3777 **/ 3778 void 3779 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 3780 { 3781 unsigned long iflag; 3782 3783 spin_lock_irqsave(&phba->hbalock, iflag); 3784 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 3785 spin_unlock_irqrestore(&phba->hbalock, iflag); 3786 } 3787 3788 /** 3789 * lpfc_offline_prep - Prepare a HBA to be brought offline 3790 * @phba: pointer to lpfc hba data structure. 3791 * @mbx_action: flag for mailbox shutdown action. 3792 * 3793 * This routine is invoked to prepare a HBA to be brought offline. It performs 3794 * unregistration login to all the nodes on all vports and flushes the mailbox 3795 * queue to make it ready to be brought offline. 3796 **/ 3797 void 3798 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) 3799 { 3800 struct lpfc_vport *vport = phba->pport; 3801 struct lpfc_nodelist *ndlp, *next_ndlp; 3802 struct lpfc_vport **vports; 3803 struct Scsi_Host *shost; 3804 int i; 3805 int offline; 3806 bool hba_pci_err; 3807 3808 if (test_bit(FC_OFFLINE_MODE, &vport->fc_flag)) 3809 return; 3810 3811 lpfc_block_mgmt_io(phba, mbx_action); 3812 3813 lpfc_linkdown(phba); 3814 3815 offline = pci_channel_offline(phba->pcidev); 3816 hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags); 3817 3818 /* Issue an unreg_login to all nodes on all vports */ 3819 vports = lpfc_create_vport_work_array(phba); 3820 if (vports != NULL) { 3821 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3822 if (test_bit(FC_UNLOADING, &vports[i]->load_flag)) 3823 continue; 3824 shost = lpfc_shost_from_vport(vports[i]); 3825 spin_lock_irq(shost->host_lock); 3826 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 3827 spin_unlock_irq(shost->host_lock); 3828 set_bit(FC_VPORT_NEEDS_REG_VPI, &vports[i]->fc_flag); 3829 clear_bit(FC_VFI_REGISTERED, &vports[i]->fc_flag); 3830 3831 list_for_each_entry_safe(ndlp, next_ndlp, 3832 &vports[i]->fc_nodes, 3833 nlp_listp) { 3834 3835 spin_lock_irq(&ndlp->lock); 3836 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 3837 spin_unlock_irq(&ndlp->lock); 3838 3839 if (offline || hba_pci_err) { 3840 spin_lock_irq(&ndlp->lock); 3841 ndlp->nlp_flag &= ~(NLP_UNREG_INP | 3842 NLP_RPI_REGISTERED); 3843 spin_unlock_irq(&ndlp->lock); 3844 if (phba->sli_rev == LPFC_SLI_REV4) 3845 lpfc_sli_rpi_release(vports[i], 3846 ndlp); 3847 } else { 3848 lpfc_unreg_rpi(vports[i], ndlp); 3849 } 3850 /* 3851 * Whenever an SLI4 port goes offline, free the 3852 * RPI. Get a new RPI when the adapter port 3853 * comes back online. 3854 */ 3855 if (phba->sli_rev == LPFC_SLI_REV4) { 3856 lpfc_printf_vlog(vports[i], KERN_INFO, 3857 LOG_NODE | LOG_DISCOVERY, 3858 "0011 Free RPI x%x on " 3859 "ndlp: x%px did x%x\n", 3860 ndlp->nlp_rpi, ndlp, 3861 ndlp->nlp_DID); 3862 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 3863 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 3864 } 3865 3866 if (ndlp->nlp_type & NLP_FABRIC) { 3867 lpfc_disc_state_machine(vports[i], ndlp, 3868 NULL, NLP_EVT_DEVICE_RECOVERY); 3869 3870 /* Don't remove the node unless the node 3871 * has been unregistered with the 3872 * transport, and we're not in recovery 3873 * before dev_loss_tmo triggered. 3874 * Otherwise, let dev_loss take care of 3875 * the node. 3876 */ 3877 if (!(ndlp->save_flags & 3878 NLP_IN_RECOV_POST_DEV_LOSS) && 3879 !(ndlp->fc4_xpt_flags & 3880 (NVME_XPT_REGD | SCSI_XPT_REGD))) 3881 lpfc_disc_state_machine 3882 (vports[i], ndlp, 3883 NULL, 3884 NLP_EVT_DEVICE_RM); 3885 } 3886 } 3887 } 3888 } 3889 lpfc_destroy_vport_work_array(phba, vports); 3890 3891 lpfc_sli_mbox_sys_shutdown(phba, mbx_action); 3892 3893 if (phba->wq) 3894 flush_workqueue(phba->wq); 3895 } 3896 3897 /** 3898 * lpfc_offline - Bring a HBA offline 3899 * @phba: pointer to lpfc hba data structure. 3900 * 3901 * This routine actually brings a HBA offline. It stops all the timers 3902 * associated with the HBA, brings down the SLI layer, and eventually 3903 * marks the HBA as in offline state for the upper layer protocol. 3904 **/ 3905 void 3906 lpfc_offline(struct lpfc_hba *phba) 3907 { 3908 struct Scsi_Host *shost; 3909 struct lpfc_vport **vports; 3910 int i; 3911 3912 if (test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag)) 3913 return; 3914 3915 /* stop port and all timers associated with this hba */ 3916 lpfc_stop_port(phba); 3917 3918 /* Tear down the local and target port registrations. The 3919 * nvme transports need to cleanup. 3920 */ 3921 lpfc_nvmet_destroy_targetport(phba); 3922 lpfc_nvme_destroy_localport(phba->pport); 3923 3924 vports = lpfc_create_vport_work_array(phba); 3925 if (vports != NULL) 3926 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3927 lpfc_stop_vport_timers(vports[i]); 3928 lpfc_destroy_vport_work_array(phba, vports); 3929 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3930 "0460 Bring Adapter offline\n"); 3931 /* Bring down the SLI Layer and cleanup. The HBA is offline 3932 now. */ 3933 lpfc_sli_hba_down(phba); 3934 spin_lock_irq(&phba->hbalock); 3935 phba->work_ha = 0; 3936 spin_unlock_irq(&phba->hbalock); 3937 vports = lpfc_create_vport_work_array(phba); 3938 if (vports != NULL) 3939 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3940 shost = lpfc_shost_from_vport(vports[i]); 3941 spin_lock_irq(shost->host_lock); 3942 vports[i]->work_port_events = 0; 3943 spin_unlock_irq(shost->host_lock); 3944 set_bit(FC_OFFLINE_MODE, &vports[i]->fc_flag); 3945 } 3946 lpfc_destroy_vport_work_array(phba, vports); 3947 /* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled 3948 * in hba_unset 3949 */ 3950 if (test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag)) 3951 __lpfc_cpuhp_remove(phba); 3952 3953 if (phba->cfg_xri_rebalancing) 3954 lpfc_destroy_multixri_pools(phba); 3955 } 3956 3957 /** 3958 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 3959 * @phba: pointer to lpfc hba data structure. 3960 * 3961 * This routine is to free all the SCSI buffers and IOCBs from the driver 3962 * list back to kernel. It is called from lpfc_pci_remove_one to free 3963 * the internal resources before the device is removed from the system. 3964 **/ 3965 static void 3966 lpfc_scsi_free(struct lpfc_hba *phba) 3967 { 3968 struct lpfc_io_buf *sb, *sb_next; 3969 3970 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 3971 return; 3972 3973 spin_lock_irq(&phba->hbalock); 3974 3975 /* Release all the lpfc_scsi_bufs maintained by this host. */ 3976 3977 spin_lock(&phba->scsi_buf_list_put_lock); 3978 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, 3979 list) { 3980 list_del(&sb->list); 3981 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3982 sb->dma_handle); 3983 kfree(sb); 3984 phba->total_scsi_bufs--; 3985 } 3986 spin_unlock(&phba->scsi_buf_list_put_lock); 3987 3988 spin_lock(&phba->scsi_buf_list_get_lock); 3989 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, 3990 list) { 3991 list_del(&sb->list); 3992 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data, 3993 sb->dma_handle); 3994 kfree(sb); 3995 phba->total_scsi_bufs--; 3996 } 3997 spin_unlock(&phba->scsi_buf_list_get_lock); 3998 spin_unlock_irq(&phba->hbalock); 3999 } 4000 4001 /** 4002 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists 4003 * @phba: pointer to lpfc hba data structure. 4004 * 4005 * This routine is to free all the IO buffers and IOCBs from the driver 4006 * list back to kernel. It is called from lpfc_pci_remove_one to free 4007 * the internal resources before the device is removed from the system. 4008 **/ 4009 void 4010 lpfc_io_free(struct lpfc_hba *phba) 4011 { 4012 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; 4013 struct lpfc_sli4_hdw_queue *qp; 4014 int idx; 4015 4016 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 4017 qp = &phba->sli4_hba.hdwq[idx]; 4018 /* Release all the lpfc_nvme_bufs maintained by this host. */ 4019 spin_lock(&qp->io_buf_list_put_lock); 4020 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 4021 &qp->lpfc_io_buf_list_put, 4022 list) { 4023 list_del(&lpfc_ncmd->list); 4024 qp->put_io_bufs--; 4025 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4026 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4027 if (phba->cfg_xpsgl && !phba->nvmet_support) 4028 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); 4029 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); 4030 kfree(lpfc_ncmd); 4031 qp->total_io_bufs--; 4032 } 4033 spin_unlock(&qp->io_buf_list_put_lock); 4034 4035 spin_lock(&qp->io_buf_list_get_lock); 4036 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 4037 &qp->lpfc_io_buf_list_get, 4038 list) { 4039 list_del(&lpfc_ncmd->list); 4040 qp->get_io_bufs--; 4041 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4042 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4043 if (phba->cfg_xpsgl && !phba->nvmet_support) 4044 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); 4045 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); 4046 kfree(lpfc_ncmd); 4047 qp->total_io_bufs--; 4048 } 4049 spin_unlock(&qp->io_buf_list_get_lock); 4050 } 4051 } 4052 4053 /** 4054 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping 4055 * @phba: pointer to lpfc hba data structure. 4056 * 4057 * This routine first calculates the sizes of the current els and allocated 4058 * scsi sgl lists, and then goes through all sgls to updates the physical 4059 * XRIs assigned due to port function reset. During port initialization, the 4060 * current els and allocated scsi sgl lists are 0s. 4061 * 4062 * Return codes 4063 * 0 - successful (for now, it always returns 0) 4064 **/ 4065 int 4066 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba) 4067 { 4068 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 4069 uint16_t i, lxri, xri_cnt, els_xri_cnt; 4070 LIST_HEAD(els_sgl_list); 4071 int rc; 4072 4073 /* 4074 * update on pci function's els xri-sgl list 4075 */ 4076 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4077 4078 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { 4079 /* els xri-sgl expanded */ 4080 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; 4081 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4082 "3157 ELS xri-sgl count increased from " 4083 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 4084 els_xri_cnt); 4085 /* allocate the additional els sgls */ 4086 for (i = 0; i < xri_cnt; i++) { 4087 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 4088 GFP_KERNEL); 4089 if (sglq_entry == NULL) { 4090 lpfc_printf_log(phba, KERN_ERR, 4091 LOG_TRACE_EVENT, 4092 "2562 Failure to allocate an " 4093 "ELS sgl entry:%d\n", i); 4094 rc = -ENOMEM; 4095 goto out_free_mem; 4096 } 4097 sglq_entry->buff_type = GEN_BUFF_TYPE; 4098 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, 4099 &sglq_entry->phys); 4100 if (sglq_entry->virt == NULL) { 4101 kfree(sglq_entry); 4102 lpfc_printf_log(phba, KERN_ERR, 4103 LOG_TRACE_EVENT, 4104 "2563 Failure to allocate an " 4105 "ELS mbuf:%d\n", i); 4106 rc = -ENOMEM; 4107 goto out_free_mem; 4108 } 4109 sglq_entry->sgl = sglq_entry->virt; 4110 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 4111 sglq_entry->state = SGL_FREED; 4112 list_add_tail(&sglq_entry->list, &els_sgl_list); 4113 } 4114 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 4115 list_splice_init(&els_sgl_list, 4116 &phba->sli4_hba.lpfc_els_sgl_list); 4117 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 4118 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { 4119 /* els xri-sgl shrinked */ 4120 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; 4121 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4122 "3158 ELS xri-sgl count decreased from " 4123 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 4124 els_xri_cnt); 4125 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 4126 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, 4127 &els_sgl_list); 4128 /* release extra els sgls from list */ 4129 for (i = 0; i < xri_cnt; i++) { 4130 list_remove_head(&els_sgl_list, 4131 sglq_entry, struct lpfc_sglq, list); 4132 if (sglq_entry) { 4133 __lpfc_mbuf_free(phba, sglq_entry->virt, 4134 sglq_entry->phys); 4135 kfree(sglq_entry); 4136 } 4137 } 4138 list_splice_init(&els_sgl_list, 4139 &phba->sli4_hba.lpfc_els_sgl_list); 4140 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 4141 } else 4142 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4143 "3163 ELS xri-sgl count unchanged: %d\n", 4144 els_xri_cnt); 4145 phba->sli4_hba.els_xri_cnt = els_xri_cnt; 4146 4147 /* update xris to els sgls on the list */ 4148 sglq_entry = NULL; 4149 sglq_entry_next = NULL; 4150 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 4151 &phba->sli4_hba.lpfc_els_sgl_list, list) { 4152 lxri = lpfc_sli4_next_xritag(phba); 4153 if (lxri == NO_XRI) { 4154 lpfc_printf_log(phba, KERN_ERR, 4155 LOG_TRACE_EVENT, 4156 "2400 Failed to allocate xri for " 4157 "ELS sgl\n"); 4158 rc = -ENOMEM; 4159 goto out_free_mem; 4160 } 4161 sglq_entry->sli4_lxritag = lxri; 4162 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4163 } 4164 return 0; 4165 4166 out_free_mem: 4167 lpfc_free_els_sgl_list(phba); 4168 return rc; 4169 } 4170 4171 /** 4172 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping 4173 * @phba: pointer to lpfc hba data structure. 4174 * 4175 * This routine first calculates the sizes of the current els and allocated 4176 * scsi sgl lists, and then goes through all sgls to updates the physical 4177 * XRIs assigned due to port function reset. During port initialization, the 4178 * current els and allocated scsi sgl lists are 0s. 4179 * 4180 * Return codes 4181 * 0 - successful (for now, it always returns 0) 4182 **/ 4183 int 4184 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) 4185 { 4186 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 4187 uint16_t i, lxri, xri_cnt, els_xri_cnt; 4188 uint16_t nvmet_xri_cnt; 4189 LIST_HEAD(nvmet_sgl_list); 4190 int rc; 4191 4192 /* 4193 * update on pci function's nvmet xri-sgl list 4194 */ 4195 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4196 4197 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */ 4198 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4199 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { 4200 /* els xri-sgl expanded */ 4201 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt; 4202 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4203 "6302 NVMET xri-sgl cnt grew from %d to %d\n", 4204 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt); 4205 /* allocate the additional nvmet sgls */ 4206 for (i = 0; i < xri_cnt; i++) { 4207 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 4208 GFP_KERNEL); 4209 if (sglq_entry == NULL) { 4210 lpfc_printf_log(phba, KERN_ERR, 4211 LOG_TRACE_EVENT, 4212 "6303 Failure to allocate an " 4213 "NVMET sgl entry:%d\n", i); 4214 rc = -ENOMEM; 4215 goto out_free_mem; 4216 } 4217 sglq_entry->buff_type = NVMET_BUFF_TYPE; 4218 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0, 4219 &sglq_entry->phys); 4220 if (sglq_entry->virt == NULL) { 4221 kfree(sglq_entry); 4222 lpfc_printf_log(phba, KERN_ERR, 4223 LOG_TRACE_EVENT, 4224 "6304 Failure to allocate an " 4225 "NVMET buf:%d\n", i); 4226 rc = -ENOMEM; 4227 goto out_free_mem; 4228 } 4229 sglq_entry->sgl = sglq_entry->virt; 4230 memset(sglq_entry->sgl, 0, 4231 phba->cfg_sg_dma_buf_size); 4232 sglq_entry->state = SGL_FREED; 4233 list_add_tail(&sglq_entry->list, &nvmet_sgl_list); 4234 } 4235 spin_lock_irq(&phba->hbalock); 4236 spin_lock(&phba->sli4_hba.sgl_list_lock); 4237 list_splice_init(&nvmet_sgl_list, 4238 &phba->sli4_hba.lpfc_nvmet_sgl_list); 4239 spin_unlock(&phba->sli4_hba.sgl_list_lock); 4240 spin_unlock_irq(&phba->hbalock); 4241 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) { 4242 /* nvmet xri-sgl shrunk */ 4243 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt; 4244 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4245 "6305 NVMET xri-sgl count decreased from " 4246 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt, 4247 nvmet_xri_cnt); 4248 spin_lock_irq(&phba->hbalock); 4249 spin_lock(&phba->sli4_hba.sgl_list_lock); 4250 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, 4251 &nvmet_sgl_list); 4252 /* release extra nvmet sgls from list */ 4253 for (i = 0; i < xri_cnt; i++) { 4254 list_remove_head(&nvmet_sgl_list, 4255 sglq_entry, struct lpfc_sglq, list); 4256 if (sglq_entry) { 4257 lpfc_nvmet_buf_free(phba, sglq_entry->virt, 4258 sglq_entry->phys); 4259 kfree(sglq_entry); 4260 } 4261 } 4262 list_splice_init(&nvmet_sgl_list, 4263 &phba->sli4_hba.lpfc_nvmet_sgl_list); 4264 spin_unlock(&phba->sli4_hba.sgl_list_lock); 4265 spin_unlock_irq(&phba->hbalock); 4266 } else 4267 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4268 "6306 NVMET xri-sgl count unchanged: %d\n", 4269 nvmet_xri_cnt); 4270 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt; 4271 4272 /* update xris to nvmet sgls on the list */ 4273 sglq_entry = NULL; 4274 sglq_entry_next = NULL; 4275 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 4276 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) { 4277 lxri = lpfc_sli4_next_xritag(phba); 4278 if (lxri == NO_XRI) { 4279 lpfc_printf_log(phba, KERN_ERR, 4280 LOG_TRACE_EVENT, 4281 "6307 Failed to allocate xri for " 4282 "NVMET sgl\n"); 4283 rc = -ENOMEM; 4284 goto out_free_mem; 4285 } 4286 sglq_entry->sli4_lxritag = lxri; 4287 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4288 } 4289 return 0; 4290 4291 out_free_mem: 4292 lpfc_free_nvmet_sgl_list(phba); 4293 return rc; 4294 } 4295 4296 int 4297 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf) 4298 { 4299 LIST_HEAD(blist); 4300 struct lpfc_sli4_hdw_queue *qp; 4301 struct lpfc_io_buf *lpfc_cmd; 4302 struct lpfc_io_buf *iobufp, *prev_iobufp; 4303 int idx, cnt, xri, inserted; 4304 4305 cnt = 0; 4306 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 4307 qp = &phba->sli4_hba.hdwq[idx]; 4308 spin_lock_irq(&qp->io_buf_list_get_lock); 4309 spin_lock(&qp->io_buf_list_put_lock); 4310 4311 /* Take everything off the get and put lists */ 4312 list_splice_init(&qp->lpfc_io_buf_list_get, &blist); 4313 list_splice(&qp->lpfc_io_buf_list_put, &blist); 4314 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 4315 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 4316 cnt += qp->get_io_bufs + qp->put_io_bufs; 4317 qp->get_io_bufs = 0; 4318 qp->put_io_bufs = 0; 4319 qp->total_io_bufs = 0; 4320 spin_unlock(&qp->io_buf_list_put_lock); 4321 spin_unlock_irq(&qp->io_buf_list_get_lock); 4322 } 4323 4324 /* 4325 * Take IO buffers off blist and put on cbuf sorted by XRI. 4326 * This is because POST_SGL takes a sequential range of XRIs 4327 * to post to the firmware. 4328 */ 4329 for (idx = 0; idx < cnt; idx++) { 4330 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list); 4331 if (!lpfc_cmd) 4332 return cnt; 4333 if (idx == 0) { 4334 list_add_tail(&lpfc_cmd->list, cbuf); 4335 continue; 4336 } 4337 xri = lpfc_cmd->cur_iocbq.sli4_xritag; 4338 inserted = 0; 4339 prev_iobufp = NULL; 4340 list_for_each_entry(iobufp, cbuf, list) { 4341 if (xri < iobufp->cur_iocbq.sli4_xritag) { 4342 if (prev_iobufp) 4343 list_add(&lpfc_cmd->list, 4344 &prev_iobufp->list); 4345 else 4346 list_add(&lpfc_cmd->list, cbuf); 4347 inserted = 1; 4348 break; 4349 } 4350 prev_iobufp = iobufp; 4351 } 4352 if (!inserted) 4353 list_add_tail(&lpfc_cmd->list, cbuf); 4354 } 4355 return cnt; 4356 } 4357 4358 int 4359 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf) 4360 { 4361 struct lpfc_sli4_hdw_queue *qp; 4362 struct lpfc_io_buf *lpfc_cmd; 4363 int idx, cnt; 4364 unsigned long iflags; 4365 4366 qp = phba->sli4_hba.hdwq; 4367 cnt = 0; 4368 while (!list_empty(cbuf)) { 4369 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 4370 list_remove_head(cbuf, lpfc_cmd, 4371 struct lpfc_io_buf, list); 4372 if (!lpfc_cmd) 4373 return cnt; 4374 cnt++; 4375 qp = &phba->sli4_hba.hdwq[idx]; 4376 lpfc_cmd->hdwq_no = idx; 4377 lpfc_cmd->hdwq = qp; 4378 lpfc_cmd->cur_iocbq.cmd_cmpl = NULL; 4379 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflags); 4380 list_add_tail(&lpfc_cmd->list, 4381 &qp->lpfc_io_buf_list_put); 4382 qp->put_io_bufs++; 4383 qp->total_io_bufs++; 4384 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, 4385 iflags); 4386 } 4387 } 4388 return cnt; 4389 } 4390 4391 /** 4392 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping 4393 * @phba: pointer to lpfc hba data structure. 4394 * 4395 * This routine first calculates the sizes of the current els and allocated 4396 * scsi sgl lists, and then goes through all sgls to updates the physical 4397 * XRIs assigned due to port function reset. During port initialization, the 4398 * current els and allocated scsi sgl lists are 0s. 4399 * 4400 * Return codes 4401 * 0 - successful (for now, it always returns 0) 4402 **/ 4403 int 4404 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba) 4405 { 4406 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL; 4407 uint16_t i, lxri, els_xri_cnt; 4408 uint16_t io_xri_cnt, io_xri_max; 4409 LIST_HEAD(io_sgl_list); 4410 int rc, cnt; 4411 4412 /* 4413 * update on pci function's allocated nvme xri-sgl list 4414 */ 4415 4416 /* maximum number of xris available for nvme buffers */ 4417 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4418 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4419 phba->sli4_hba.io_xri_max = io_xri_max; 4420 4421 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4422 "6074 Current allocated XRI sgl count:%d, " 4423 "maximum XRI count:%d els_xri_cnt:%d\n\n", 4424 phba->sli4_hba.io_xri_cnt, 4425 phba->sli4_hba.io_xri_max, 4426 els_xri_cnt); 4427 4428 cnt = lpfc_io_buf_flush(phba, &io_sgl_list); 4429 4430 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) { 4431 /* max nvme xri shrunk below the allocated nvme buffers */ 4432 io_xri_cnt = phba->sli4_hba.io_xri_cnt - 4433 phba->sli4_hba.io_xri_max; 4434 /* release the extra allocated nvme buffers */ 4435 for (i = 0; i < io_xri_cnt; i++) { 4436 list_remove_head(&io_sgl_list, lpfc_ncmd, 4437 struct lpfc_io_buf, list); 4438 if (lpfc_ncmd) { 4439 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4440 lpfc_ncmd->data, 4441 lpfc_ncmd->dma_handle); 4442 kfree(lpfc_ncmd); 4443 } 4444 } 4445 phba->sli4_hba.io_xri_cnt -= io_xri_cnt; 4446 } 4447 4448 /* update xris associated to remaining allocated nvme buffers */ 4449 lpfc_ncmd = NULL; 4450 lpfc_ncmd_next = NULL; 4451 phba->sli4_hba.io_xri_cnt = cnt; 4452 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 4453 &io_sgl_list, list) { 4454 lxri = lpfc_sli4_next_xritag(phba); 4455 if (lxri == NO_XRI) { 4456 lpfc_printf_log(phba, KERN_ERR, 4457 LOG_TRACE_EVENT, 4458 "6075 Failed to allocate xri for " 4459 "nvme buffer\n"); 4460 rc = -ENOMEM; 4461 goto out_free_mem; 4462 } 4463 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; 4464 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4465 } 4466 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list); 4467 return 0; 4468 4469 out_free_mem: 4470 lpfc_io_free(phba); 4471 return rc; 4472 } 4473 4474 /** 4475 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec 4476 * @phba: Pointer to lpfc hba data structure. 4477 * @num_to_alloc: The requested number of buffers to allocate. 4478 * 4479 * This routine allocates nvme buffers for device with SLI-4 interface spec, 4480 * the nvme buffer contains all the necessary information needed to initiate 4481 * an I/O. After allocating up to @num_to_allocate IO buffers and put 4482 * them on a list, it post them to the port by using SGL block post. 4483 * 4484 * Return codes: 4485 * int - number of IO buffers that were allocated and posted. 4486 * 0 = failure, less than num_to_alloc is a partial failure. 4487 **/ 4488 int 4489 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc) 4490 { 4491 struct lpfc_io_buf *lpfc_ncmd; 4492 struct lpfc_iocbq *pwqeq; 4493 uint16_t iotag, lxri = 0; 4494 int bcnt, num_posted; 4495 LIST_HEAD(prep_nblist); 4496 LIST_HEAD(post_nblist); 4497 LIST_HEAD(nvme_nblist); 4498 4499 phba->sli4_hba.io_xri_cnt = 0; 4500 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 4501 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL); 4502 if (!lpfc_ncmd) 4503 break; 4504 /* 4505 * Get memory from the pci pool to map the virt space to 4506 * pci bus space for an I/O. The DMA buffer includes the 4507 * number of SGE's necessary to support the sg_tablesize. 4508 */ 4509 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, 4510 GFP_KERNEL, 4511 &lpfc_ncmd->dma_handle); 4512 if (!lpfc_ncmd->data) { 4513 kfree(lpfc_ncmd); 4514 break; 4515 } 4516 4517 if (phba->cfg_xpsgl && !phba->nvmet_support) { 4518 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list); 4519 } else { 4520 /* 4521 * 4K Page alignment is CRITICAL to BlockGuard, double 4522 * check to be sure. 4523 */ 4524 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 4525 (((unsigned long)(lpfc_ncmd->data) & 4526 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { 4527 lpfc_printf_log(phba, KERN_ERR, 4528 LOG_TRACE_EVENT, 4529 "3369 Memory alignment err: " 4530 "addr=%lx\n", 4531 (unsigned long)lpfc_ncmd->data); 4532 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4533 lpfc_ncmd->data, 4534 lpfc_ncmd->dma_handle); 4535 kfree(lpfc_ncmd); 4536 break; 4537 } 4538 } 4539 4540 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list); 4541 4542 lxri = lpfc_sli4_next_xritag(phba); 4543 if (lxri == NO_XRI) { 4544 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4545 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4546 kfree(lpfc_ncmd); 4547 break; 4548 } 4549 pwqeq = &lpfc_ncmd->cur_iocbq; 4550 4551 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */ 4552 iotag = lpfc_sli_next_iotag(phba, pwqeq); 4553 if (iotag == 0) { 4554 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4555 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4556 kfree(lpfc_ncmd); 4557 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4558 "6121 Failed to allocate IOTAG for" 4559 " XRI:0x%x\n", lxri); 4560 lpfc_sli4_free_xri(phba, lxri); 4561 break; 4562 } 4563 pwqeq->sli4_lxritag = lxri; 4564 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 4565 4566 /* Initialize local short-hand pointers. */ 4567 lpfc_ncmd->dma_sgl = lpfc_ncmd->data; 4568 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle; 4569 lpfc_ncmd->cur_iocbq.io_buf = lpfc_ncmd; 4570 spin_lock_init(&lpfc_ncmd->buf_lock); 4571 4572 /* add the nvme buffer to a post list */ 4573 list_add_tail(&lpfc_ncmd->list, &post_nblist); 4574 phba->sli4_hba.io_xri_cnt++; 4575 } 4576 lpfc_printf_log(phba, KERN_INFO, LOG_NVME, 4577 "6114 Allocate %d out of %d requested new NVME " 4578 "buffers of size x%zu bytes\n", bcnt, num_to_alloc, 4579 sizeof(*lpfc_ncmd)); 4580 4581 4582 /* post the list of nvme buffer sgls to port if available */ 4583 if (!list_empty(&post_nblist)) 4584 num_posted = lpfc_sli4_post_io_sgl_list( 4585 phba, &post_nblist, bcnt); 4586 else 4587 num_posted = 0; 4588 4589 return num_posted; 4590 } 4591 4592 static uint64_t 4593 lpfc_get_wwpn(struct lpfc_hba *phba) 4594 { 4595 uint64_t wwn; 4596 int rc; 4597 LPFC_MBOXQ_t *mboxq; 4598 MAILBOX_t *mb; 4599 4600 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 4601 GFP_KERNEL); 4602 if (!mboxq) 4603 return (uint64_t)-1; 4604 4605 /* First get WWN of HBA instance */ 4606 lpfc_read_nv(phba, mboxq); 4607 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4608 if (rc != MBX_SUCCESS) { 4609 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4610 "6019 Mailbox failed , mbxCmd x%x " 4611 "READ_NV, mbxStatus x%x\n", 4612 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 4613 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 4614 mempool_free(mboxq, phba->mbox_mem_pool); 4615 return (uint64_t) -1; 4616 } 4617 mb = &mboxq->u.mb; 4618 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t)); 4619 /* wwn is WWPN of HBA instance */ 4620 mempool_free(mboxq, phba->mbox_mem_pool); 4621 if (phba->sli_rev == LPFC_SLI_REV4) 4622 return be64_to_cpu(wwn); 4623 else 4624 return rol64(wwn, 32); 4625 } 4626 4627 static unsigned short lpfc_get_sg_tablesize(struct lpfc_hba *phba) 4628 { 4629 if (phba->sli_rev == LPFC_SLI_REV4) 4630 if (phba->cfg_xpsgl && !phba->nvmet_support) 4631 return LPFC_MAX_SG_TABLESIZE; 4632 else 4633 return phba->cfg_scsi_seg_cnt; 4634 else 4635 return phba->cfg_sg_seg_cnt; 4636 } 4637 4638 /** 4639 * lpfc_vmid_res_alloc - Allocates resources for VMID 4640 * @phba: pointer to lpfc hba data structure. 4641 * @vport: pointer to vport data structure 4642 * 4643 * This routine allocated the resources needed for the VMID. 4644 * 4645 * Return codes 4646 * 0 on Success 4647 * Non-0 on Failure 4648 */ 4649 static int 4650 lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport) 4651 { 4652 /* VMID feature is supported only on SLI4 */ 4653 if (phba->sli_rev == LPFC_SLI_REV3) { 4654 phba->cfg_vmid_app_header = 0; 4655 phba->cfg_vmid_priority_tagging = 0; 4656 } 4657 4658 if (lpfc_is_vmid_enabled(phba)) { 4659 vport->vmid = 4660 kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid), 4661 GFP_KERNEL); 4662 if (!vport->vmid) 4663 return -ENOMEM; 4664 4665 rwlock_init(&vport->vmid_lock); 4666 4667 /* Set the VMID parameters for the vport */ 4668 vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging; 4669 vport->vmid_inactivity_timeout = 4670 phba->cfg_vmid_inactivity_timeout; 4671 vport->max_vmid = phba->cfg_max_vmid; 4672 vport->cur_vmid_cnt = 0; 4673 4674 vport->vmid_priority_range = bitmap_zalloc 4675 (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL); 4676 4677 if (!vport->vmid_priority_range) { 4678 kfree(vport->vmid); 4679 return -ENOMEM; 4680 } 4681 4682 hash_init(vport->hash_table); 4683 } 4684 return 0; 4685 } 4686 4687 /** 4688 * lpfc_create_port - Create an FC port 4689 * @phba: pointer to lpfc hba data structure. 4690 * @instance: a unique integer ID to this FC port. 4691 * @dev: pointer to the device data structure. 4692 * 4693 * This routine creates a FC port for the upper layer protocol. The FC port 4694 * can be created on top of either a physical port or a virtual port provided 4695 * by the HBA. This routine also allocates a SCSI host data structure (shost) 4696 * and associates the FC port created before adding the shost into the SCSI 4697 * layer. 4698 * 4699 * Return codes 4700 * @vport - pointer to the virtual N_Port data structure. 4701 * NULL - port create failed. 4702 **/ 4703 struct lpfc_vport * 4704 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 4705 { 4706 struct lpfc_vport *vport; 4707 struct Scsi_Host *shost = NULL; 4708 struct scsi_host_template *template; 4709 int error = 0; 4710 int i; 4711 uint64_t wwn; 4712 bool use_no_reset_hba = false; 4713 int rc; 4714 4715 if (lpfc_no_hba_reset_cnt) { 4716 if (phba->sli_rev < LPFC_SLI_REV4 && 4717 dev == &phba->pcidev->dev) { 4718 /* Reset the port first */ 4719 lpfc_sli_brdrestart(phba); 4720 rc = lpfc_sli_chipset_init(phba); 4721 if (rc) 4722 return NULL; 4723 } 4724 wwn = lpfc_get_wwpn(phba); 4725 } 4726 4727 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) { 4728 if (wwn == lpfc_no_hba_reset[i]) { 4729 lpfc_printf_log(phba, KERN_ERR, 4730 LOG_TRACE_EVENT, 4731 "6020 Setting use_no_reset port=%llx\n", 4732 wwn); 4733 use_no_reset_hba = true; 4734 break; 4735 } 4736 } 4737 4738 /* Seed template for SCSI host registration */ 4739 if (dev == &phba->pcidev->dev) { 4740 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 4741 /* Seed physical port template */ 4742 template = &lpfc_template; 4743 4744 if (use_no_reset_hba) 4745 /* template is for a no reset SCSI Host */ 4746 template->eh_host_reset_handler = NULL; 4747 4748 /* Seed updated value of sg_tablesize */ 4749 template->sg_tablesize = lpfc_get_sg_tablesize(phba); 4750 } else { 4751 /* NVMET is for physical port only */ 4752 template = &lpfc_template_nvme; 4753 } 4754 } else { 4755 /* Seed vport template */ 4756 template = &lpfc_vport_template; 4757 4758 /* Seed updated value of sg_tablesize */ 4759 template->sg_tablesize = lpfc_get_sg_tablesize(phba); 4760 } 4761 4762 shost = scsi_host_alloc(template, sizeof(struct lpfc_vport)); 4763 if (!shost) 4764 goto out; 4765 4766 vport = (struct lpfc_vport *) shost->hostdata; 4767 vport->phba = phba; 4768 set_bit(FC_LOADING, &vport->load_flag); 4769 set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag); 4770 vport->fc_rscn_flush = 0; 4771 atomic_set(&vport->fc_plogi_cnt, 0); 4772 atomic_set(&vport->fc_adisc_cnt, 0); 4773 atomic_set(&vport->fc_reglogin_cnt, 0); 4774 atomic_set(&vport->fc_prli_cnt, 0); 4775 atomic_set(&vport->fc_unmap_cnt, 0); 4776 atomic_set(&vport->fc_map_cnt, 0); 4777 atomic_set(&vport->fc_npr_cnt, 0); 4778 atomic_set(&vport->fc_unused_cnt, 0); 4779 lpfc_get_vport_cfgparam(vport); 4780 4781 /* Adjust value in vport */ 4782 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type; 4783 4784 shost->unique_id = instance; 4785 shost->max_id = LPFC_MAX_TARGET; 4786 shost->max_lun = vport->cfg_max_luns; 4787 shost->this_id = -1; 4788 shost->max_cmd_len = 16; 4789 4790 if (phba->sli_rev == LPFC_SLI_REV4) { 4791 if (!phba->cfg_fcp_mq_threshold || 4792 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue) 4793 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue; 4794 4795 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(), 4796 phba->cfg_fcp_mq_threshold); 4797 4798 shost->dma_boundary = 4799 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 4800 } else 4801 /* SLI-3 has a limited number of hardware queues (3), 4802 * thus there is only one for FCP processing. 4803 */ 4804 shost->nr_hw_queues = 1; 4805 4806 /* 4807 * Set initial can_queue value since 0 is no longer supported and 4808 * scsi_add_host will fail. This will be adjusted later based on the 4809 * max xri value determined in hba setup. 4810 */ 4811 shost->can_queue = phba->cfg_hba_queue_depth - 10; 4812 if (dev != &phba->pcidev->dev) { 4813 shost->transportt = lpfc_vport_transport_template; 4814 vport->port_type = LPFC_NPIV_PORT; 4815 } else { 4816 shost->transportt = lpfc_transport_template; 4817 vport->port_type = LPFC_PHYSICAL_PORT; 4818 } 4819 4820 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 4821 "9081 CreatePort TMPLATE type %x TBLsize %d " 4822 "SEGcnt %d/%d\n", 4823 vport->port_type, shost->sg_tablesize, 4824 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt); 4825 4826 /* Allocate the resources for VMID */ 4827 rc = lpfc_vmid_res_alloc(phba, vport); 4828 4829 if (rc) 4830 goto out_put_shost; 4831 4832 /* Initialize all internally managed lists. */ 4833 INIT_LIST_HEAD(&vport->fc_nodes); 4834 spin_lock_init(&vport->fc_nodes_list_lock); 4835 INIT_LIST_HEAD(&vport->rcv_buffer_list); 4836 spin_lock_init(&vport->work_port_lock); 4837 4838 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0); 4839 4840 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0); 4841 4842 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0); 4843 4844 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 4845 lpfc_setup_bg(phba, shost); 4846 4847 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 4848 if (error) 4849 goto out_free_vmid; 4850 4851 spin_lock_irq(&phba->port_list_lock); 4852 list_add_tail(&vport->listentry, &phba->port_list); 4853 spin_unlock_irq(&phba->port_list_lock); 4854 return vport; 4855 4856 out_free_vmid: 4857 kfree(vport->vmid); 4858 bitmap_free(vport->vmid_priority_range); 4859 out_put_shost: 4860 scsi_host_put(shost); 4861 out: 4862 return NULL; 4863 } 4864 4865 /** 4866 * destroy_port - destroy an FC port 4867 * @vport: pointer to an lpfc virtual N_Port data structure. 4868 * 4869 * This routine destroys a FC port from the upper layer protocol. All the 4870 * resources associated with the port are released. 4871 **/ 4872 void 4873 destroy_port(struct lpfc_vport *vport) 4874 { 4875 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4876 struct lpfc_hba *phba = vport->phba; 4877 4878 lpfc_debugfs_terminate(vport); 4879 fc_remove_host(shost); 4880 scsi_remove_host(shost); 4881 4882 spin_lock_irq(&phba->port_list_lock); 4883 list_del_init(&vport->listentry); 4884 spin_unlock_irq(&phba->port_list_lock); 4885 4886 lpfc_cleanup(vport); 4887 return; 4888 } 4889 4890 /** 4891 * lpfc_get_instance - Get a unique integer ID 4892 * 4893 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 4894 * uses the kernel idr facility to perform the task. 4895 * 4896 * Return codes: 4897 * instance - a unique integer ID allocated as the new instance. 4898 * -1 - lpfc get instance failed. 4899 **/ 4900 int 4901 lpfc_get_instance(void) 4902 { 4903 int ret; 4904 4905 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL); 4906 return ret < 0 ? -1 : ret; 4907 } 4908 4909 /** 4910 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 4911 * @shost: pointer to SCSI host data structure. 4912 * @time: elapsed time of the scan in jiffies. 4913 * 4914 * This routine is called by the SCSI layer with a SCSI host to determine 4915 * whether the scan host is finished. 4916 * 4917 * Note: there is no scan_start function as adapter initialization will have 4918 * asynchronously kicked off the link initialization. 4919 * 4920 * Return codes 4921 * 0 - SCSI host scan is not over yet. 4922 * 1 - SCSI host scan is over. 4923 **/ 4924 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 4925 { 4926 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4927 struct lpfc_hba *phba = vport->phba; 4928 int stat = 0; 4929 4930 spin_lock_irq(shost->host_lock); 4931 4932 if (test_bit(FC_UNLOADING, &vport->load_flag)) { 4933 stat = 1; 4934 goto finished; 4935 } 4936 if (time >= msecs_to_jiffies(30 * 1000)) { 4937 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4938 "0461 Scanning longer than 30 " 4939 "seconds. Continuing initialization\n"); 4940 stat = 1; 4941 goto finished; 4942 } 4943 if (time >= msecs_to_jiffies(15 * 1000) && 4944 phba->link_state <= LPFC_LINK_DOWN) { 4945 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4946 "0465 Link down longer than 15 " 4947 "seconds. Continuing initialization\n"); 4948 stat = 1; 4949 goto finished; 4950 } 4951 4952 if (vport->port_state != LPFC_VPORT_READY) 4953 goto finished; 4954 if (vport->num_disc_nodes || vport->fc_prli_sent) 4955 goto finished; 4956 if (!atomic_read(&vport->fc_map_cnt) && 4957 time < msecs_to_jiffies(2 * 1000)) 4958 goto finished; 4959 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 4960 goto finished; 4961 4962 stat = 1; 4963 4964 finished: 4965 spin_unlock_irq(shost->host_lock); 4966 return stat; 4967 } 4968 4969 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost) 4970 { 4971 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4972 struct lpfc_hba *phba = vport->phba; 4973 4974 fc_host_supported_speeds(shost) = 0; 4975 /* 4976 * Avoid reporting supported link speed for FCoE as it can't be 4977 * controlled via FCoE. 4978 */ 4979 if (phba->hba_flag & HBA_FCOE_MODE) 4980 return; 4981 4982 if (phba->lmt & LMT_256Gb) 4983 fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT; 4984 if (phba->lmt & LMT_128Gb) 4985 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT; 4986 if (phba->lmt & LMT_64Gb) 4987 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT; 4988 if (phba->lmt & LMT_32Gb) 4989 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT; 4990 if (phba->lmt & LMT_16Gb) 4991 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; 4992 if (phba->lmt & LMT_10Gb) 4993 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 4994 if (phba->lmt & LMT_8Gb) 4995 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 4996 if (phba->lmt & LMT_4Gb) 4997 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 4998 if (phba->lmt & LMT_2Gb) 4999 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 5000 if (phba->lmt & LMT_1Gb) 5001 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 5002 } 5003 5004 /** 5005 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 5006 * @shost: pointer to SCSI host data structure. 5007 * 5008 * This routine initializes a given SCSI host attributes on a FC port. The 5009 * SCSI host can be either on top of a physical port or a virtual port. 5010 **/ 5011 void lpfc_host_attrib_init(struct Scsi_Host *shost) 5012 { 5013 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 5014 struct lpfc_hba *phba = vport->phba; 5015 /* 5016 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 5017 */ 5018 5019 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 5020 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 5021 fc_host_supported_classes(shost) = FC_COS_CLASS3; 5022 5023 memset(fc_host_supported_fc4s(shost), 0, 5024 sizeof(fc_host_supported_fc4s(shost))); 5025 fc_host_supported_fc4s(shost)[2] = 1; 5026 fc_host_supported_fc4s(shost)[7] = 1; 5027 5028 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 5029 sizeof fc_host_symbolic_name(shost)); 5030 5031 lpfc_host_supported_speeds_set(shost); 5032 5033 fc_host_maxframe_size(shost) = 5034 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 5035 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 5036 5037 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 5038 5039 /* This value is also unchanging */ 5040 memset(fc_host_active_fc4s(shost), 0, 5041 sizeof(fc_host_active_fc4s(shost))); 5042 fc_host_active_fc4s(shost)[2] = 1; 5043 fc_host_active_fc4s(shost)[7] = 1; 5044 5045 fc_host_max_npiv_vports(shost) = phba->max_vpi; 5046 clear_bit(FC_LOADING, &vport->load_flag); 5047 } 5048 5049 /** 5050 * lpfc_stop_port_s3 - Stop SLI3 device port 5051 * @phba: pointer to lpfc hba data structure. 5052 * 5053 * This routine is invoked to stop an SLI3 device port, it stops the device 5054 * from generating interrupts and stops the device driver's timers for the 5055 * device. 5056 **/ 5057 static void 5058 lpfc_stop_port_s3(struct lpfc_hba *phba) 5059 { 5060 /* Clear all interrupt enable conditions */ 5061 writel(0, phba->HCregaddr); 5062 readl(phba->HCregaddr); /* flush */ 5063 /* Clear all pending interrupts */ 5064 writel(0xffffffff, phba->HAregaddr); 5065 readl(phba->HAregaddr); /* flush */ 5066 5067 /* Reset some HBA SLI setup states */ 5068 lpfc_stop_hba_timers(phba); 5069 phba->pport->work_port_events = 0; 5070 } 5071 5072 /** 5073 * lpfc_stop_port_s4 - Stop SLI4 device port 5074 * @phba: pointer to lpfc hba data structure. 5075 * 5076 * This routine is invoked to stop an SLI4 device port, it stops the device 5077 * from generating interrupts and stops the device driver's timers for the 5078 * device. 5079 **/ 5080 static void 5081 lpfc_stop_port_s4(struct lpfc_hba *phba) 5082 { 5083 /* Reset some HBA SLI4 setup states */ 5084 lpfc_stop_hba_timers(phba); 5085 if (phba->pport) 5086 phba->pport->work_port_events = 0; 5087 phba->sli4_hba.intr_enable = 0; 5088 } 5089 5090 /** 5091 * lpfc_stop_port - Wrapper function for stopping hba port 5092 * @phba: Pointer to HBA context object. 5093 * 5094 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 5095 * the API jump table function pointer from the lpfc_hba struct. 5096 **/ 5097 void 5098 lpfc_stop_port(struct lpfc_hba *phba) 5099 { 5100 phba->lpfc_stop_port(phba); 5101 5102 if (phba->wq) 5103 flush_workqueue(phba->wq); 5104 } 5105 5106 /** 5107 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 5108 * @phba: Pointer to hba for which this call is being executed. 5109 * 5110 * This routine starts the timer waiting for the FCF rediscovery to complete. 5111 **/ 5112 void 5113 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 5114 { 5115 unsigned long fcf_redisc_wait_tmo = 5116 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 5117 /* Start fcf rediscovery wait period timer */ 5118 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 5119 spin_lock_irq(&phba->hbalock); 5120 /* Allow action to new fcf asynchronous event */ 5121 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 5122 /* Mark the FCF rediscovery pending state */ 5123 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 5124 spin_unlock_irq(&phba->hbalock); 5125 } 5126 5127 /** 5128 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 5129 * @t: Timer context used to obtain the pointer to lpfc hba data structure. 5130 * 5131 * This routine is invoked when waiting for FCF table rediscover has been 5132 * timed out. If new FCF record(s) has (have) been discovered during the 5133 * wait period, a new FCF event shall be added to the FCOE async event 5134 * list, and then worker thread shall be waked up for processing from the 5135 * worker thread context. 5136 **/ 5137 static void 5138 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t) 5139 { 5140 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait); 5141 5142 /* Don't send FCF rediscovery event if timer cancelled */ 5143 spin_lock_irq(&phba->hbalock); 5144 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 5145 spin_unlock_irq(&phba->hbalock); 5146 return; 5147 } 5148 /* Clear FCF rediscovery timer pending flag */ 5149 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 5150 /* FCF rediscovery event to worker thread */ 5151 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 5152 spin_unlock_irq(&phba->hbalock); 5153 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 5154 "2776 FCF rediscover quiescent timer expired\n"); 5155 /* wake up worker thread */ 5156 lpfc_worker_wake_up(phba); 5157 } 5158 5159 /** 5160 * lpfc_vmid_poll - VMID timeout detection 5161 * @t: Timer context used to obtain the pointer to lpfc hba data structure. 5162 * 5163 * This routine is invoked when there is no I/O on by a VM for the specified 5164 * amount of time. When this situation is detected, the VMID has to be 5165 * deregistered from the switch and all the local resources freed. The VMID 5166 * will be reassigned to the VM once the I/O begins. 5167 **/ 5168 static void 5169 lpfc_vmid_poll(struct timer_list *t) 5170 { 5171 struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll); 5172 u32 wake_up = 0; 5173 5174 /* check if there is a need to issue QFPA */ 5175 if (phba->pport->vmid_priority_tagging) { 5176 wake_up = 1; 5177 phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA; 5178 } 5179 5180 /* Is the vmid inactivity timer enabled */ 5181 if (phba->pport->vmid_inactivity_timeout || 5182 test_bit(FC_DEREGISTER_ALL_APP_ID, &phba->pport->load_flag)) { 5183 wake_up = 1; 5184 phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID; 5185 } 5186 5187 if (wake_up) 5188 lpfc_worker_wake_up(phba); 5189 5190 /* restart the timer for the next iteration */ 5191 mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 * 5192 LPFC_VMID_TIMER)); 5193 } 5194 5195 /** 5196 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 5197 * @phba: pointer to lpfc hba data structure. 5198 * @acqe_link: pointer to the async link completion queue entry. 5199 * 5200 * This routine is to parse the SLI4 link-attention link fault code. 5201 **/ 5202 static void 5203 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 5204 struct lpfc_acqe_link *acqe_link) 5205 { 5206 switch (bf_get(lpfc_acqe_fc_la_att_type, acqe_link)) { 5207 case LPFC_FC_LA_TYPE_LINK_DOWN: 5208 case LPFC_FC_LA_TYPE_TRUNKING_EVENT: 5209 case LPFC_FC_LA_TYPE_ACTIVATE_FAIL: 5210 case LPFC_FC_LA_TYPE_LINK_RESET_PRTCL_EVT: 5211 break; 5212 default: 5213 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 5214 case LPFC_ASYNC_LINK_FAULT_NONE: 5215 case LPFC_ASYNC_LINK_FAULT_LOCAL: 5216 case LPFC_ASYNC_LINK_FAULT_REMOTE: 5217 case LPFC_ASYNC_LINK_FAULT_LR_LRR: 5218 break; 5219 default: 5220 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5221 "0398 Unknown link fault code: x%x\n", 5222 bf_get(lpfc_acqe_link_fault, acqe_link)); 5223 break; 5224 } 5225 break; 5226 } 5227 } 5228 5229 /** 5230 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 5231 * @phba: pointer to lpfc hba data structure. 5232 * @acqe_link: pointer to the async link completion queue entry. 5233 * 5234 * This routine is to parse the SLI4 link attention type and translate it 5235 * into the base driver's link attention type coding. 5236 * 5237 * Return: Link attention type in terms of base driver's coding. 5238 **/ 5239 static uint8_t 5240 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 5241 struct lpfc_acqe_link *acqe_link) 5242 { 5243 uint8_t att_type; 5244 5245 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 5246 case LPFC_ASYNC_LINK_STATUS_DOWN: 5247 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 5248 att_type = LPFC_ATT_LINK_DOWN; 5249 break; 5250 case LPFC_ASYNC_LINK_STATUS_UP: 5251 /* Ignore physical link up events - wait for logical link up */ 5252 att_type = LPFC_ATT_RESERVED; 5253 break; 5254 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 5255 att_type = LPFC_ATT_LINK_UP; 5256 break; 5257 default: 5258 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5259 "0399 Invalid link attention type: x%x\n", 5260 bf_get(lpfc_acqe_link_status, acqe_link)); 5261 att_type = LPFC_ATT_RESERVED; 5262 break; 5263 } 5264 return att_type; 5265 } 5266 5267 /** 5268 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed 5269 * @phba: pointer to lpfc hba data structure. 5270 * 5271 * This routine is to get an SLI3 FC port's link speed in Mbps. 5272 * 5273 * Return: link speed in terms of Mbps. 5274 **/ 5275 uint32_t 5276 lpfc_sli_port_speed_get(struct lpfc_hba *phba) 5277 { 5278 uint32_t link_speed; 5279 5280 if (!lpfc_is_link_up(phba)) 5281 return 0; 5282 5283 if (phba->sli_rev <= LPFC_SLI_REV3) { 5284 switch (phba->fc_linkspeed) { 5285 case LPFC_LINK_SPEED_1GHZ: 5286 link_speed = 1000; 5287 break; 5288 case LPFC_LINK_SPEED_2GHZ: 5289 link_speed = 2000; 5290 break; 5291 case LPFC_LINK_SPEED_4GHZ: 5292 link_speed = 4000; 5293 break; 5294 case LPFC_LINK_SPEED_8GHZ: 5295 link_speed = 8000; 5296 break; 5297 case LPFC_LINK_SPEED_10GHZ: 5298 link_speed = 10000; 5299 break; 5300 case LPFC_LINK_SPEED_16GHZ: 5301 link_speed = 16000; 5302 break; 5303 default: 5304 link_speed = 0; 5305 } 5306 } else { 5307 if (phba->sli4_hba.link_state.logical_speed) 5308 link_speed = 5309 phba->sli4_hba.link_state.logical_speed; 5310 else 5311 link_speed = phba->sli4_hba.link_state.speed; 5312 } 5313 return link_speed; 5314 } 5315 5316 /** 5317 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed 5318 * @phba: pointer to lpfc hba data structure. 5319 * @evt_code: asynchronous event code. 5320 * @speed_code: asynchronous event link speed code. 5321 * 5322 * This routine is to parse the giving SLI4 async event link speed code into 5323 * value of Mbps for the link speed. 5324 * 5325 * Return: link speed in terms of Mbps. 5326 **/ 5327 static uint32_t 5328 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, 5329 uint8_t speed_code) 5330 { 5331 uint32_t port_speed; 5332 5333 switch (evt_code) { 5334 case LPFC_TRAILER_CODE_LINK: 5335 switch (speed_code) { 5336 case LPFC_ASYNC_LINK_SPEED_ZERO: 5337 port_speed = 0; 5338 break; 5339 case LPFC_ASYNC_LINK_SPEED_10MBPS: 5340 port_speed = 10; 5341 break; 5342 case LPFC_ASYNC_LINK_SPEED_100MBPS: 5343 port_speed = 100; 5344 break; 5345 case LPFC_ASYNC_LINK_SPEED_1GBPS: 5346 port_speed = 1000; 5347 break; 5348 case LPFC_ASYNC_LINK_SPEED_10GBPS: 5349 port_speed = 10000; 5350 break; 5351 case LPFC_ASYNC_LINK_SPEED_20GBPS: 5352 port_speed = 20000; 5353 break; 5354 case LPFC_ASYNC_LINK_SPEED_25GBPS: 5355 port_speed = 25000; 5356 break; 5357 case LPFC_ASYNC_LINK_SPEED_40GBPS: 5358 port_speed = 40000; 5359 break; 5360 case LPFC_ASYNC_LINK_SPEED_100GBPS: 5361 port_speed = 100000; 5362 break; 5363 default: 5364 port_speed = 0; 5365 } 5366 break; 5367 case LPFC_TRAILER_CODE_FC: 5368 switch (speed_code) { 5369 case LPFC_FC_LA_SPEED_UNKNOWN: 5370 port_speed = 0; 5371 break; 5372 case LPFC_FC_LA_SPEED_1G: 5373 port_speed = 1000; 5374 break; 5375 case LPFC_FC_LA_SPEED_2G: 5376 port_speed = 2000; 5377 break; 5378 case LPFC_FC_LA_SPEED_4G: 5379 port_speed = 4000; 5380 break; 5381 case LPFC_FC_LA_SPEED_8G: 5382 port_speed = 8000; 5383 break; 5384 case LPFC_FC_LA_SPEED_10G: 5385 port_speed = 10000; 5386 break; 5387 case LPFC_FC_LA_SPEED_16G: 5388 port_speed = 16000; 5389 break; 5390 case LPFC_FC_LA_SPEED_32G: 5391 port_speed = 32000; 5392 break; 5393 case LPFC_FC_LA_SPEED_64G: 5394 port_speed = 64000; 5395 break; 5396 case LPFC_FC_LA_SPEED_128G: 5397 port_speed = 128000; 5398 break; 5399 case LPFC_FC_LA_SPEED_256G: 5400 port_speed = 256000; 5401 break; 5402 default: 5403 port_speed = 0; 5404 } 5405 break; 5406 default: 5407 port_speed = 0; 5408 } 5409 return port_speed; 5410 } 5411 5412 /** 5413 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 5414 * @phba: pointer to lpfc hba data structure. 5415 * @acqe_link: pointer to the async link completion queue entry. 5416 * 5417 * This routine is to handle the SLI4 asynchronous FCoE link event. 5418 **/ 5419 static void 5420 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 5421 struct lpfc_acqe_link *acqe_link) 5422 { 5423 LPFC_MBOXQ_t *pmb; 5424 MAILBOX_t *mb; 5425 struct lpfc_mbx_read_top *la; 5426 uint8_t att_type; 5427 int rc; 5428 5429 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 5430 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 5431 return; 5432 phba->fcoe_eventtag = acqe_link->event_tag; 5433 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5434 if (!pmb) { 5435 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5436 "0395 The mboxq allocation failed\n"); 5437 return; 5438 } 5439 5440 rc = lpfc_mbox_rsrc_prep(phba, pmb); 5441 if (rc) { 5442 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5443 "0396 mailbox allocation failed\n"); 5444 goto out_free_pmb; 5445 } 5446 5447 /* Cleanup any outstanding ELS commands */ 5448 lpfc_els_flush_all_cmd(phba); 5449 5450 /* Block ELS IOCBs until we have done process link event */ 5451 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 5452 5453 /* Update link event statistics */ 5454 phba->sli.slistat.link_event++; 5455 5456 /* Create lpfc_handle_latt mailbox command from link ACQE */ 5457 lpfc_read_topology(phba, pmb, pmb->ctx_buf); 5458 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 5459 pmb->vport = phba->pport; 5460 5461 /* Keep the link status for extra SLI4 state machine reference */ 5462 phba->sli4_hba.link_state.speed = 5463 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK, 5464 bf_get(lpfc_acqe_link_speed, acqe_link)); 5465 phba->sli4_hba.link_state.duplex = 5466 bf_get(lpfc_acqe_link_duplex, acqe_link); 5467 phba->sli4_hba.link_state.status = 5468 bf_get(lpfc_acqe_link_status, acqe_link); 5469 phba->sli4_hba.link_state.type = 5470 bf_get(lpfc_acqe_link_type, acqe_link); 5471 phba->sli4_hba.link_state.number = 5472 bf_get(lpfc_acqe_link_number, acqe_link); 5473 phba->sli4_hba.link_state.fault = 5474 bf_get(lpfc_acqe_link_fault, acqe_link); 5475 phba->sli4_hba.link_state.logical_speed = 5476 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10; 5477 5478 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5479 "2900 Async FC/FCoE Link event - Speed:%dGBit " 5480 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 5481 "Logical speed:%dMbps Fault:%d\n", 5482 phba->sli4_hba.link_state.speed, 5483 phba->sli4_hba.link_state.topology, 5484 phba->sli4_hba.link_state.status, 5485 phba->sli4_hba.link_state.type, 5486 phba->sli4_hba.link_state.number, 5487 phba->sli4_hba.link_state.logical_speed, 5488 phba->sli4_hba.link_state.fault); 5489 /* 5490 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 5491 * topology info. Note: Optional for non FC-AL ports. 5492 */ 5493 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 5494 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 5495 if (rc == MBX_NOT_FINISHED) 5496 goto out_free_pmb; 5497 return; 5498 } 5499 /* 5500 * For FCoE Mode: fill in all the topology information we need and call 5501 * the READ_TOPOLOGY completion routine to continue without actually 5502 * sending the READ_TOPOLOGY mailbox command to the port. 5503 */ 5504 /* Initialize completion status */ 5505 mb = &pmb->u.mb; 5506 mb->mbxStatus = MBX_SUCCESS; 5507 5508 /* Parse port fault information field */ 5509 lpfc_sli4_parse_latt_fault(phba, acqe_link); 5510 5511 /* Parse and translate link attention fields */ 5512 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 5513 la->eventTag = acqe_link->event_tag; 5514 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 5515 bf_set(lpfc_mbx_read_top_link_spd, la, 5516 (bf_get(lpfc_acqe_link_speed, acqe_link))); 5517 5518 /* Fake the following irrelevant fields */ 5519 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 5520 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 5521 bf_set(lpfc_mbx_read_top_il, la, 0); 5522 bf_set(lpfc_mbx_read_top_pb, la, 0); 5523 bf_set(lpfc_mbx_read_top_fa, la, 0); 5524 bf_set(lpfc_mbx_read_top_mm, la, 0); 5525 5526 /* Invoke the lpfc_handle_latt mailbox command callback function */ 5527 lpfc_mbx_cmpl_read_topology(phba, pmb); 5528 5529 return; 5530 5531 out_free_pmb: 5532 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 5533 } 5534 5535 /** 5536 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read 5537 * topology. 5538 * @phba: pointer to lpfc hba data structure. 5539 * @speed_code: asynchronous event link speed code. 5540 * 5541 * This routine is to parse the giving SLI4 async event link speed code into 5542 * value of Read topology link speed. 5543 * 5544 * Return: link speed in terms of Read topology. 5545 **/ 5546 static uint8_t 5547 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code) 5548 { 5549 uint8_t port_speed; 5550 5551 switch (speed_code) { 5552 case LPFC_FC_LA_SPEED_1G: 5553 port_speed = LPFC_LINK_SPEED_1GHZ; 5554 break; 5555 case LPFC_FC_LA_SPEED_2G: 5556 port_speed = LPFC_LINK_SPEED_2GHZ; 5557 break; 5558 case LPFC_FC_LA_SPEED_4G: 5559 port_speed = LPFC_LINK_SPEED_4GHZ; 5560 break; 5561 case LPFC_FC_LA_SPEED_8G: 5562 port_speed = LPFC_LINK_SPEED_8GHZ; 5563 break; 5564 case LPFC_FC_LA_SPEED_16G: 5565 port_speed = LPFC_LINK_SPEED_16GHZ; 5566 break; 5567 case LPFC_FC_LA_SPEED_32G: 5568 port_speed = LPFC_LINK_SPEED_32GHZ; 5569 break; 5570 case LPFC_FC_LA_SPEED_64G: 5571 port_speed = LPFC_LINK_SPEED_64GHZ; 5572 break; 5573 case LPFC_FC_LA_SPEED_128G: 5574 port_speed = LPFC_LINK_SPEED_128GHZ; 5575 break; 5576 case LPFC_FC_LA_SPEED_256G: 5577 port_speed = LPFC_LINK_SPEED_256GHZ; 5578 break; 5579 default: 5580 port_speed = 0; 5581 break; 5582 } 5583 5584 return port_speed; 5585 } 5586 5587 void 5588 lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba) 5589 { 5590 if (!phba->rx_monitor) { 5591 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5592 "4411 Rx Monitor Info is empty.\n"); 5593 } else { 5594 lpfc_rx_monitor_report(phba, phba->rx_monitor, NULL, 0, 5595 LPFC_MAX_RXMONITOR_DUMP); 5596 } 5597 } 5598 5599 /** 5600 * lpfc_cgn_update_stat - Save data into congestion stats buffer 5601 * @phba: pointer to lpfc hba data structure. 5602 * @dtag: FPIN descriptor received 5603 * 5604 * Increment the FPIN received counter/time when it happens. 5605 */ 5606 void 5607 lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag) 5608 { 5609 struct lpfc_cgn_info *cp; 5610 u32 value; 5611 5612 /* Make sure we have a congestion info buffer */ 5613 if (!phba->cgn_i) 5614 return; 5615 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 5616 5617 /* Update congestion statistics */ 5618 switch (dtag) { 5619 case ELS_DTAG_LNK_INTEGRITY: 5620 le32_add_cpu(&cp->link_integ_notification, 1); 5621 lpfc_cgn_update_tstamp(phba, &cp->stat_lnk); 5622 break; 5623 case ELS_DTAG_DELIVERY: 5624 le32_add_cpu(&cp->delivery_notification, 1); 5625 lpfc_cgn_update_tstamp(phba, &cp->stat_delivery); 5626 break; 5627 case ELS_DTAG_PEER_CONGEST: 5628 le32_add_cpu(&cp->cgn_peer_notification, 1); 5629 lpfc_cgn_update_tstamp(phba, &cp->stat_peer); 5630 break; 5631 case ELS_DTAG_CONGESTION: 5632 le32_add_cpu(&cp->cgn_notification, 1); 5633 lpfc_cgn_update_tstamp(phba, &cp->stat_fpin); 5634 } 5635 if (phba->cgn_fpin_frequency && 5636 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { 5637 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; 5638 cp->cgn_stat_npm = value; 5639 } 5640 5641 value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, 5642 LPFC_CGN_CRC32_SEED); 5643 cp->cgn_info_crc = cpu_to_le32(value); 5644 } 5645 5646 /** 5647 * lpfc_cgn_update_tstamp - Update cmf timestamp 5648 * @phba: pointer to lpfc hba data structure. 5649 * @ts: structure to write the timestamp to. 5650 */ 5651 void 5652 lpfc_cgn_update_tstamp(struct lpfc_hba *phba, struct lpfc_cgn_ts *ts) 5653 { 5654 struct timespec64 cur_time; 5655 struct tm tm_val; 5656 5657 ktime_get_real_ts64(&cur_time); 5658 time64_to_tm(cur_time.tv_sec, 0, &tm_val); 5659 5660 ts->month = tm_val.tm_mon + 1; 5661 ts->day = tm_val.tm_mday; 5662 ts->year = tm_val.tm_year - 100; 5663 ts->hour = tm_val.tm_hour; 5664 ts->minute = tm_val.tm_min; 5665 ts->second = tm_val.tm_sec; 5666 5667 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5668 "2646 Updated CMF timestamp : " 5669 "%u/%u/%u %u:%u:%u\n", 5670 ts->day, ts->month, 5671 ts->year, ts->hour, 5672 ts->minute, ts->second); 5673 } 5674 5675 /** 5676 * lpfc_cmf_stats_timer - Save data into registered congestion buffer 5677 * @timer: Timer cookie to access lpfc private data 5678 * 5679 * Save the congestion event data every minute. 5680 * On the hour collapse all the minute data into hour data. Every day 5681 * collapse all the hour data into daily data. Separate driver 5682 * and fabrc congestion event counters that will be saved out 5683 * to the registered congestion buffer every minute. 5684 */ 5685 static enum hrtimer_restart 5686 lpfc_cmf_stats_timer(struct hrtimer *timer) 5687 { 5688 struct lpfc_hba *phba; 5689 struct lpfc_cgn_info *cp; 5690 uint32_t i, index; 5691 uint16_t value, mvalue; 5692 uint64_t bps; 5693 uint32_t mbps; 5694 uint32_t dvalue, wvalue, lvalue, avalue; 5695 uint64_t latsum; 5696 __le16 *ptr; 5697 __le32 *lptr; 5698 __le16 *mptr; 5699 5700 phba = container_of(timer, struct lpfc_hba, cmf_stats_timer); 5701 /* Make sure we have a congestion info buffer */ 5702 if (!phba->cgn_i) 5703 return HRTIMER_NORESTART; 5704 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 5705 5706 phba->cgn_evt_timestamp = jiffies + 5707 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN); 5708 phba->cgn_evt_minute++; 5709 5710 /* We should get to this point in the routine on 1 minute intervals */ 5711 lpfc_cgn_update_tstamp(phba, &cp->base_time); 5712 5713 if (phba->cgn_fpin_frequency && 5714 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { 5715 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; 5716 cp->cgn_stat_npm = value; 5717 } 5718 5719 /* Read and clear the latency counters for this minute */ 5720 lvalue = atomic_read(&phba->cgn_latency_evt_cnt); 5721 latsum = atomic64_read(&phba->cgn_latency_evt); 5722 atomic_set(&phba->cgn_latency_evt_cnt, 0); 5723 atomic64_set(&phba->cgn_latency_evt, 0); 5724 5725 /* We need to store MB/sec bandwidth in the congestion information. 5726 * block_cnt is count of 512 byte blocks for the entire minute, 5727 * bps will get bytes per sec before finally converting to MB/sec. 5728 */ 5729 bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512; 5730 phba->rx_block_cnt = 0; 5731 mvalue = bps / (1024 * 1024); /* convert to MB/sec */ 5732 5733 /* Every minute */ 5734 /* cgn parameters */ 5735 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; 5736 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; 5737 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; 5738 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; 5739 5740 /* Fill in default LUN qdepth */ 5741 value = (uint16_t)(phba->pport->cfg_lun_queue_depth); 5742 cp->cgn_lunq = cpu_to_le16(value); 5743 5744 /* Record congestion buffer info - every minute 5745 * cgn_driver_evt_cnt (Driver events) 5746 * cgn_fabric_warn_cnt (Congestion Warnings) 5747 * cgn_latency_evt_cnt / cgn_latency_evt (IO Latency) 5748 * cgn_fabric_alarm_cnt (Congestion Alarms) 5749 */ 5750 index = ++cp->cgn_index_minute; 5751 if (cp->cgn_index_minute == LPFC_MIN_HOUR) { 5752 cp->cgn_index_minute = 0; 5753 index = 0; 5754 } 5755 5756 /* Get the number of driver events in this sample and reset counter */ 5757 dvalue = atomic_read(&phba->cgn_driver_evt_cnt); 5758 atomic_set(&phba->cgn_driver_evt_cnt, 0); 5759 5760 /* Get the number of warning events - FPIN and Signal for this minute */ 5761 wvalue = 0; 5762 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) || 5763 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || 5764 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) 5765 wvalue = atomic_read(&phba->cgn_fabric_warn_cnt); 5766 atomic_set(&phba->cgn_fabric_warn_cnt, 0); 5767 5768 /* Get the number of alarm events - FPIN and Signal for this minute */ 5769 avalue = 0; 5770 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) || 5771 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) 5772 avalue = atomic_read(&phba->cgn_fabric_alarm_cnt); 5773 atomic_set(&phba->cgn_fabric_alarm_cnt, 0); 5774 5775 /* Collect the driver, warning, alarm and latency counts for this 5776 * minute into the driver congestion buffer. 5777 */ 5778 ptr = &cp->cgn_drvr_min[index]; 5779 value = (uint16_t)dvalue; 5780 *ptr = cpu_to_le16(value); 5781 5782 ptr = &cp->cgn_warn_min[index]; 5783 value = (uint16_t)wvalue; 5784 *ptr = cpu_to_le16(value); 5785 5786 ptr = &cp->cgn_alarm_min[index]; 5787 value = (uint16_t)avalue; 5788 *ptr = cpu_to_le16(value); 5789 5790 lptr = &cp->cgn_latency_min[index]; 5791 if (lvalue) { 5792 lvalue = (uint32_t)div_u64(latsum, lvalue); 5793 *lptr = cpu_to_le32(lvalue); 5794 } else { 5795 *lptr = 0; 5796 } 5797 5798 /* Collect the bandwidth value into the driver's congesion buffer. */ 5799 mptr = &cp->cgn_bw_min[index]; 5800 *mptr = cpu_to_le16(mvalue); 5801 5802 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5803 "2418 Congestion Info - minute (%d): %d %d %d %d %d\n", 5804 index, dvalue, wvalue, *lptr, mvalue, avalue); 5805 5806 /* Every hour */ 5807 if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) { 5808 /* Record congestion buffer info - every hour 5809 * Collapse all minutes into an hour 5810 */ 5811 index = ++cp->cgn_index_hour; 5812 if (cp->cgn_index_hour == LPFC_HOUR_DAY) { 5813 cp->cgn_index_hour = 0; 5814 index = 0; 5815 } 5816 5817 dvalue = 0; 5818 wvalue = 0; 5819 lvalue = 0; 5820 avalue = 0; 5821 mvalue = 0; 5822 mbps = 0; 5823 for (i = 0; i < LPFC_MIN_HOUR; i++) { 5824 dvalue += le16_to_cpu(cp->cgn_drvr_min[i]); 5825 wvalue += le16_to_cpu(cp->cgn_warn_min[i]); 5826 lvalue += le32_to_cpu(cp->cgn_latency_min[i]); 5827 mbps += le16_to_cpu(cp->cgn_bw_min[i]); 5828 avalue += le16_to_cpu(cp->cgn_alarm_min[i]); 5829 } 5830 if (lvalue) /* Avg of latency averages */ 5831 lvalue /= LPFC_MIN_HOUR; 5832 if (mbps) /* Avg of Bandwidth averages */ 5833 mvalue = mbps / LPFC_MIN_HOUR; 5834 5835 lptr = &cp->cgn_drvr_hr[index]; 5836 *lptr = cpu_to_le32(dvalue); 5837 lptr = &cp->cgn_warn_hr[index]; 5838 *lptr = cpu_to_le32(wvalue); 5839 lptr = &cp->cgn_latency_hr[index]; 5840 *lptr = cpu_to_le32(lvalue); 5841 mptr = &cp->cgn_bw_hr[index]; 5842 *mptr = cpu_to_le16(mvalue); 5843 lptr = &cp->cgn_alarm_hr[index]; 5844 *lptr = cpu_to_le32(avalue); 5845 5846 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5847 "2419 Congestion Info - hour " 5848 "(%d): %d %d %d %d %d\n", 5849 index, dvalue, wvalue, lvalue, mvalue, avalue); 5850 } 5851 5852 /* Every day */ 5853 if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) { 5854 /* Record congestion buffer info - every hour 5855 * Collapse all hours into a day. Rotate days 5856 * after LPFC_MAX_CGN_DAYS. 5857 */ 5858 index = ++cp->cgn_index_day; 5859 if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) { 5860 cp->cgn_index_day = 0; 5861 index = 0; 5862 } 5863 5864 dvalue = 0; 5865 wvalue = 0; 5866 lvalue = 0; 5867 mvalue = 0; 5868 mbps = 0; 5869 avalue = 0; 5870 for (i = 0; i < LPFC_HOUR_DAY; i++) { 5871 dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]); 5872 wvalue += le32_to_cpu(cp->cgn_warn_hr[i]); 5873 lvalue += le32_to_cpu(cp->cgn_latency_hr[i]); 5874 mbps += le16_to_cpu(cp->cgn_bw_hr[i]); 5875 avalue += le32_to_cpu(cp->cgn_alarm_hr[i]); 5876 } 5877 if (lvalue) /* Avg of latency averages */ 5878 lvalue /= LPFC_HOUR_DAY; 5879 if (mbps) /* Avg of Bandwidth averages */ 5880 mvalue = mbps / LPFC_HOUR_DAY; 5881 5882 lptr = &cp->cgn_drvr_day[index]; 5883 *lptr = cpu_to_le32(dvalue); 5884 lptr = &cp->cgn_warn_day[index]; 5885 *lptr = cpu_to_le32(wvalue); 5886 lptr = &cp->cgn_latency_day[index]; 5887 *lptr = cpu_to_le32(lvalue); 5888 mptr = &cp->cgn_bw_day[index]; 5889 *mptr = cpu_to_le16(mvalue); 5890 lptr = &cp->cgn_alarm_day[index]; 5891 *lptr = cpu_to_le32(avalue); 5892 5893 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5894 "2420 Congestion Info - daily (%d): " 5895 "%d %d %d %d %d\n", 5896 index, dvalue, wvalue, lvalue, mvalue, avalue); 5897 } 5898 5899 /* Use the frequency found in the last rcv'ed FPIN */ 5900 value = phba->cgn_fpin_frequency; 5901 cp->cgn_warn_freq = cpu_to_le16(value); 5902 cp->cgn_alarm_freq = cpu_to_le16(value); 5903 5904 lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, 5905 LPFC_CGN_CRC32_SEED); 5906 cp->cgn_info_crc = cpu_to_le32(lvalue); 5907 5908 hrtimer_forward_now(timer, ktime_set(0, LPFC_SEC_MIN * NSEC_PER_SEC)); 5909 5910 return HRTIMER_RESTART; 5911 } 5912 5913 /** 5914 * lpfc_calc_cmf_latency - latency from start of rxate timer interval 5915 * @phba: The Hba for which this call is being executed. 5916 * 5917 * The routine calculates the latency from the beginning of the CMF timer 5918 * interval to the current point in time. It is called from IO completion 5919 * when we exceed our Bandwidth limitation for the time interval. 5920 */ 5921 uint32_t 5922 lpfc_calc_cmf_latency(struct lpfc_hba *phba) 5923 { 5924 struct timespec64 cmpl_time; 5925 uint32_t msec = 0; 5926 5927 ktime_get_real_ts64(&cmpl_time); 5928 5929 /* This routine works on a ms granularity so sec and usec are 5930 * converted accordingly. 5931 */ 5932 if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) { 5933 msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) / 5934 NSEC_PER_MSEC; 5935 } else { 5936 if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) { 5937 msec = (cmpl_time.tv_sec - 5938 phba->cmf_latency.tv_sec) * MSEC_PER_SEC; 5939 msec += ((cmpl_time.tv_nsec - 5940 phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC); 5941 } else { 5942 msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec - 5943 1) * MSEC_PER_SEC; 5944 msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) + 5945 cmpl_time.tv_nsec) / NSEC_PER_MSEC); 5946 } 5947 } 5948 return msec; 5949 } 5950 5951 /** 5952 * lpfc_cmf_timer - This is the timer function for one congestion 5953 * rate interval. 5954 * @timer: Pointer to the high resolution timer that expired 5955 */ 5956 static enum hrtimer_restart 5957 lpfc_cmf_timer(struct hrtimer *timer) 5958 { 5959 struct lpfc_hba *phba = container_of(timer, struct lpfc_hba, 5960 cmf_timer); 5961 struct rx_info_entry entry; 5962 uint32_t io_cnt; 5963 uint32_t busy, max_read; 5964 uint64_t total, rcv, lat, mbpi, extra, cnt; 5965 int timer_interval = LPFC_CMF_INTERVAL; 5966 uint32_t ms; 5967 struct lpfc_cgn_stat *cgs; 5968 int cpu; 5969 5970 /* Only restart the timer if congestion mgmt is on */ 5971 if (phba->cmf_active_mode == LPFC_CFG_OFF || 5972 !phba->cmf_latency.tv_sec) { 5973 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 5974 "6224 CMF timer exit: %d %lld\n", 5975 phba->cmf_active_mode, 5976 (uint64_t)phba->cmf_latency.tv_sec); 5977 return HRTIMER_NORESTART; 5978 } 5979 5980 /* If pport is not ready yet, just exit and wait for 5981 * the next timer cycle to hit. 5982 */ 5983 if (!phba->pport) 5984 goto skip; 5985 5986 /* Do not block SCSI IO while in the timer routine since 5987 * total_bytes will be cleared 5988 */ 5989 atomic_set(&phba->cmf_stop_io, 1); 5990 5991 /* First we need to calculate the actual ms between 5992 * the last timer interrupt and this one. We ask for 5993 * LPFC_CMF_INTERVAL, however the actual time may 5994 * vary depending on system overhead. 5995 */ 5996 ms = lpfc_calc_cmf_latency(phba); 5997 5998 5999 /* Immediately after we calculate the time since the last 6000 * timer interrupt, set the start time for the next 6001 * interrupt 6002 */ 6003 ktime_get_real_ts64(&phba->cmf_latency); 6004 6005 phba->cmf_link_byte_count = 6006 div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000); 6007 6008 /* Collect all the stats from the prior timer interval */ 6009 total = 0; 6010 io_cnt = 0; 6011 lat = 0; 6012 rcv = 0; 6013 for_each_present_cpu(cpu) { 6014 cgs = per_cpu_ptr(phba->cmf_stat, cpu); 6015 total += atomic64_xchg(&cgs->total_bytes, 0); 6016 io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0); 6017 lat += atomic64_xchg(&cgs->rx_latency, 0); 6018 rcv += atomic64_xchg(&cgs->rcv_bytes, 0); 6019 } 6020 6021 /* Before we issue another CMF_SYNC_WQE, retrieve the BW 6022 * returned from the last CMF_SYNC_WQE issued, from 6023 * cmf_last_sync_bw. This will be the target BW for 6024 * this next timer interval. 6025 */ 6026 if (phba->cmf_active_mode == LPFC_CFG_MANAGED && 6027 phba->link_state != LPFC_LINK_DOWN && 6028 phba->hba_flag & HBA_SETUP) { 6029 mbpi = phba->cmf_last_sync_bw; 6030 phba->cmf_last_sync_bw = 0; 6031 extra = 0; 6032 6033 /* Calculate any extra bytes needed to account for the 6034 * timer accuracy. If we are less than LPFC_CMF_INTERVAL 6035 * calculate the adjustment needed for total to reflect 6036 * a full LPFC_CMF_INTERVAL. 6037 */ 6038 if (ms && ms < LPFC_CMF_INTERVAL) { 6039 cnt = div_u64(total, ms); /* bytes per ms */ 6040 cnt *= LPFC_CMF_INTERVAL; /* what total should be */ 6041 extra = cnt - total; 6042 } 6043 lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra); 6044 } else { 6045 /* For Monitor mode or link down we want mbpi 6046 * to be the full link speed 6047 */ 6048 mbpi = phba->cmf_link_byte_count; 6049 extra = 0; 6050 } 6051 phba->cmf_timer_cnt++; 6052 6053 if (io_cnt) { 6054 /* Update congestion info buffer latency in us */ 6055 atomic_add(io_cnt, &phba->cgn_latency_evt_cnt); 6056 atomic64_add(lat, &phba->cgn_latency_evt); 6057 } 6058 busy = atomic_xchg(&phba->cmf_busy, 0); 6059 max_read = atomic_xchg(&phba->rx_max_read_cnt, 0); 6060 6061 /* Calculate MBPI for the next timer interval */ 6062 if (mbpi) { 6063 if (mbpi > phba->cmf_link_byte_count || 6064 phba->cmf_active_mode == LPFC_CFG_MONITOR) 6065 mbpi = phba->cmf_link_byte_count; 6066 6067 /* Change max_bytes_per_interval to what the prior 6068 * CMF_SYNC_WQE cmpl indicated. 6069 */ 6070 if (mbpi != phba->cmf_max_bytes_per_interval) 6071 phba->cmf_max_bytes_per_interval = mbpi; 6072 } 6073 6074 /* Save rxmonitor information for debug */ 6075 if (phba->rx_monitor) { 6076 entry.total_bytes = total; 6077 entry.cmf_bytes = total + extra; 6078 entry.rcv_bytes = rcv; 6079 entry.cmf_busy = busy; 6080 entry.cmf_info = phba->cmf_active_info; 6081 if (io_cnt) { 6082 entry.avg_io_latency = div_u64(lat, io_cnt); 6083 entry.avg_io_size = div_u64(rcv, io_cnt); 6084 } else { 6085 entry.avg_io_latency = 0; 6086 entry.avg_io_size = 0; 6087 } 6088 entry.max_read_cnt = max_read; 6089 entry.io_cnt = io_cnt; 6090 entry.max_bytes_per_interval = mbpi; 6091 if (phba->cmf_active_mode == LPFC_CFG_MANAGED) 6092 entry.timer_utilization = phba->cmf_last_ts; 6093 else 6094 entry.timer_utilization = ms; 6095 entry.timer_interval = ms; 6096 phba->cmf_last_ts = 0; 6097 6098 lpfc_rx_monitor_record(phba->rx_monitor, &entry); 6099 } 6100 6101 if (phba->cmf_active_mode == LPFC_CFG_MONITOR) { 6102 /* If Monitor mode, check if we are oversubscribed 6103 * against the full line rate. 6104 */ 6105 if (mbpi && total > mbpi) 6106 atomic_inc(&phba->cgn_driver_evt_cnt); 6107 } 6108 phba->rx_block_cnt += div_u64(rcv, 512); /* save 512 byte block cnt */ 6109 6110 /* Since total_bytes has already been zero'ed, its okay to unblock 6111 * after max_bytes_per_interval is setup. 6112 */ 6113 if (atomic_xchg(&phba->cmf_bw_wait, 0)) 6114 queue_work(phba->wq, &phba->unblock_request_work); 6115 6116 /* SCSI IO is now unblocked */ 6117 atomic_set(&phba->cmf_stop_io, 0); 6118 6119 skip: 6120 hrtimer_forward_now(timer, 6121 ktime_set(0, timer_interval * NSEC_PER_MSEC)); 6122 return HRTIMER_RESTART; 6123 } 6124 6125 #define trunk_link_status(__idx)\ 6126 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 6127 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\ 6128 "Link up" : "Link down") : "NA" 6129 /* Did port __idx reported an error */ 6130 #define trunk_port_fault(__idx)\ 6131 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\ 6132 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA" 6133 6134 static void 6135 lpfc_update_trunk_link_status(struct lpfc_hba *phba, 6136 struct lpfc_acqe_fc_la *acqe_fc) 6137 { 6138 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc); 6139 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc); 6140 u8 cnt = 0; 6141 6142 phba->sli4_hba.link_state.speed = 6143 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 6144 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 6145 6146 phba->sli4_hba.link_state.logical_speed = 6147 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 6148 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */ 6149 phba->fc_linkspeed = 6150 lpfc_async_link_speed_to_read_top( 6151 phba, 6152 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 6153 6154 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) { 6155 phba->trunk_link.link0.state = 6156 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc) 6157 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 6158 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0; 6159 cnt++; 6160 } 6161 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) { 6162 phba->trunk_link.link1.state = 6163 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc) 6164 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 6165 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0; 6166 cnt++; 6167 } 6168 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) { 6169 phba->trunk_link.link2.state = 6170 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc) 6171 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 6172 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0; 6173 cnt++; 6174 } 6175 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) { 6176 phba->trunk_link.link3.state = 6177 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc) 6178 ? LPFC_LINK_UP : LPFC_LINK_DOWN; 6179 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0; 6180 cnt++; 6181 } 6182 6183 if (cnt) 6184 phba->trunk_link.phy_lnk_speed = 6185 phba->sli4_hba.link_state.logical_speed / (cnt * 1000); 6186 else 6187 phba->trunk_link.phy_lnk_speed = LPFC_LINK_SPEED_UNKNOWN; 6188 6189 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6190 "2910 Async FC Trunking Event - Speed:%d\n" 6191 "\tLogical speed:%d " 6192 "port0: %s port1: %s port2: %s port3: %s\n", 6193 phba->sli4_hba.link_state.speed, 6194 phba->sli4_hba.link_state.logical_speed, 6195 trunk_link_status(0), trunk_link_status(1), 6196 trunk_link_status(2), trunk_link_status(3)); 6197 6198 if (phba->cmf_active_mode != LPFC_CFG_OFF) 6199 lpfc_cmf_signal_init(phba); 6200 6201 if (port_fault) 6202 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6203 "3202 trunk error:0x%x (%s) seen on port0:%s " 6204 /* 6205 * SLI-4: We have only 0xA error codes 6206 * defined as of now. print an appropriate 6207 * message in case driver needs to be updated. 6208 */ 6209 "port1:%s port2:%s port3:%s\n", err, err > 0xA ? 6210 "UNDEFINED. update driver." : trunk_errmsg[err], 6211 trunk_port_fault(0), trunk_port_fault(1), 6212 trunk_port_fault(2), trunk_port_fault(3)); 6213 } 6214 6215 6216 /** 6217 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 6218 * @phba: pointer to lpfc hba data structure. 6219 * @acqe_fc: pointer to the async fc completion queue entry. 6220 * 6221 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 6222 * that the event was received and then issue a read_topology mailbox command so 6223 * that the rest of the driver will treat it the same as SLI3. 6224 **/ 6225 static void 6226 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 6227 { 6228 LPFC_MBOXQ_t *pmb; 6229 MAILBOX_t *mb; 6230 struct lpfc_mbx_read_top *la; 6231 char *log_level; 6232 int rc; 6233 6234 if (bf_get(lpfc_trailer_type, acqe_fc) != 6235 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 6236 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6237 "2895 Non FC link Event detected.(%d)\n", 6238 bf_get(lpfc_trailer_type, acqe_fc)); 6239 return; 6240 } 6241 6242 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == 6243 LPFC_FC_LA_TYPE_TRUNKING_EVENT) { 6244 lpfc_update_trunk_link_status(phba, acqe_fc); 6245 return; 6246 } 6247 6248 /* Keep the link status for extra SLI4 state machine reference */ 6249 phba->sli4_hba.link_state.speed = 6250 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 6251 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 6252 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 6253 phba->sli4_hba.link_state.topology = 6254 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 6255 phba->sli4_hba.link_state.status = 6256 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 6257 phba->sli4_hba.link_state.type = 6258 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 6259 phba->sli4_hba.link_state.number = 6260 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 6261 phba->sli4_hba.link_state.fault = 6262 bf_get(lpfc_acqe_link_fault, acqe_fc); 6263 phba->sli4_hba.link_state.link_status = 6264 bf_get(lpfc_acqe_fc_la_link_status, acqe_fc); 6265 6266 /* 6267 * Only select attention types need logical speed modification to what 6268 * was previously set. 6269 */ 6270 if (phba->sli4_hba.link_state.status >= LPFC_FC_LA_TYPE_LINK_UP && 6271 phba->sli4_hba.link_state.status < LPFC_FC_LA_TYPE_ACTIVATE_FAIL) { 6272 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == 6273 LPFC_FC_LA_TYPE_LINK_DOWN) 6274 phba->sli4_hba.link_state.logical_speed = 0; 6275 else if (!phba->sli4_hba.conf_trunk) 6276 phba->sli4_hba.link_state.logical_speed = 6277 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 6278 } 6279 6280 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6281 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 6282 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 6283 "%dMbps Fault:x%x Link Status:x%x\n", 6284 phba->sli4_hba.link_state.speed, 6285 phba->sli4_hba.link_state.topology, 6286 phba->sli4_hba.link_state.status, 6287 phba->sli4_hba.link_state.type, 6288 phba->sli4_hba.link_state.number, 6289 phba->sli4_hba.link_state.logical_speed, 6290 phba->sli4_hba.link_state.fault, 6291 phba->sli4_hba.link_state.link_status); 6292 6293 /* 6294 * The following attention types are informational only, providing 6295 * further details about link status. Overwrite the value of 6296 * link_state.status appropriately. No further action is required. 6297 */ 6298 if (phba->sli4_hba.link_state.status >= LPFC_FC_LA_TYPE_ACTIVATE_FAIL) { 6299 switch (phba->sli4_hba.link_state.status) { 6300 case LPFC_FC_LA_TYPE_ACTIVATE_FAIL: 6301 log_level = KERN_WARNING; 6302 phba->sli4_hba.link_state.status = 6303 LPFC_FC_LA_TYPE_LINK_DOWN; 6304 break; 6305 case LPFC_FC_LA_TYPE_LINK_RESET_PRTCL_EVT: 6306 /* 6307 * During bb credit recovery establishment, receiving 6308 * this attention type is normal. Link Up attention 6309 * type is expected to occur before this informational 6310 * attention type so keep the Link Up status. 6311 */ 6312 log_level = KERN_INFO; 6313 phba->sli4_hba.link_state.status = 6314 LPFC_FC_LA_TYPE_LINK_UP; 6315 break; 6316 default: 6317 log_level = KERN_INFO; 6318 break; 6319 } 6320 lpfc_log_msg(phba, log_level, LOG_SLI, 6321 "2992 Async FC event - Informational Link " 6322 "Attention Type x%x\n", 6323 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc)); 6324 return; 6325 } 6326 6327 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6328 if (!pmb) { 6329 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6330 "2897 The mboxq allocation failed\n"); 6331 return; 6332 } 6333 rc = lpfc_mbox_rsrc_prep(phba, pmb); 6334 if (rc) { 6335 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6336 "2898 The mboxq prep failed\n"); 6337 goto out_free_pmb; 6338 } 6339 6340 /* Cleanup any outstanding ELS commands */ 6341 lpfc_els_flush_all_cmd(phba); 6342 6343 /* Block ELS IOCBs until we have done process link event */ 6344 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT; 6345 6346 /* Update link event statistics */ 6347 phba->sli.slistat.link_event++; 6348 6349 /* Create lpfc_handle_latt mailbox command from link ACQE */ 6350 lpfc_read_topology(phba, pmb, pmb->ctx_buf); 6351 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 6352 pmb->vport = phba->pport; 6353 6354 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { 6355 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK); 6356 6357 switch (phba->sli4_hba.link_state.status) { 6358 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN: 6359 phba->link_flag |= LS_MDS_LINK_DOWN; 6360 break; 6361 case LPFC_FC_LA_TYPE_MDS_LOOPBACK: 6362 phba->link_flag |= LS_MDS_LOOPBACK; 6363 break; 6364 default: 6365 break; 6366 } 6367 6368 /* Initialize completion status */ 6369 mb = &pmb->u.mb; 6370 mb->mbxStatus = MBX_SUCCESS; 6371 6372 /* Parse port fault information field */ 6373 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc); 6374 6375 /* Parse and translate link attention fields */ 6376 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; 6377 la->eventTag = acqe_fc->event_tag; 6378 6379 if (phba->sli4_hba.link_state.status == 6380 LPFC_FC_LA_TYPE_UNEXP_WWPN) { 6381 bf_set(lpfc_mbx_read_top_att_type, la, 6382 LPFC_FC_LA_TYPE_UNEXP_WWPN); 6383 } else { 6384 bf_set(lpfc_mbx_read_top_att_type, la, 6385 LPFC_FC_LA_TYPE_LINK_DOWN); 6386 } 6387 /* Invoke the mailbox command callback function */ 6388 lpfc_mbx_cmpl_read_topology(phba, pmb); 6389 6390 return; 6391 } 6392 6393 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 6394 if (rc == MBX_NOT_FINISHED) 6395 goto out_free_pmb; 6396 return; 6397 6398 out_free_pmb: 6399 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 6400 } 6401 6402 /** 6403 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 6404 * @phba: pointer to lpfc hba data structure. 6405 * @acqe_sli: pointer to the async SLI completion queue entry. 6406 * 6407 * This routine is to handle the SLI4 asynchronous SLI events. 6408 **/ 6409 static void 6410 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 6411 { 6412 char port_name; 6413 char message[128]; 6414 uint8_t status; 6415 uint8_t evt_type; 6416 uint8_t operational = 0; 6417 struct temp_event temp_event_data; 6418 struct lpfc_acqe_misconfigured_event *misconfigured; 6419 struct lpfc_acqe_cgn_signal *cgn_signal; 6420 struct Scsi_Host *shost; 6421 struct lpfc_vport **vports; 6422 int rc, i, cnt; 6423 6424 evt_type = bf_get(lpfc_trailer_type, acqe_sli); 6425 6426 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6427 "2901 Async SLI event - Type:%d, Event Data: x%08x " 6428 "x%08x x%08x x%08x\n", evt_type, 6429 acqe_sli->event_data1, acqe_sli->event_data2, 6430 acqe_sli->event_data3, acqe_sli->trailer); 6431 6432 port_name = phba->Port[0]; 6433 if (port_name == 0x00) 6434 port_name = '?'; /* get port name is empty */ 6435 6436 switch (evt_type) { 6437 case LPFC_SLI_EVENT_TYPE_OVER_TEMP: 6438 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 6439 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 6440 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 6441 6442 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6443 "3190 Over Temperature:%d Celsius- Port Name %c\n", 6444 acqe_sli->event_data1, port_name); 6445 6446 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 6447 shost = lpfc_shost_from_vport(phba->pport); 6448 fc_host_post_vendor_event(shost, fc_get_event_number(), 6449 sizeof(temp_event_data), 6450 (char *)&temp_event_data, 6451 SCSI_NL_VID_TYPE_PCI 6452 | PCI_VENDOR_ID_EMULEX); 6453 break; 6454 case LPFC_SLI_EVENT_TYPE_NORM_TEMP: 6455 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 6456 temp_event_data.event_code = LPFC_NORMAL_TEMP; 6457 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 6458 6459 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_LDS_EVENT, 6460 "3191 Normal Temperature:%d Celsius - Port Name %c\n", 6461 acqe_sli->event_data1, port_name); 6462 6463 shost = lpfc_shost_from_vport(phba->pport); 6464 fc_host_post_vendor_event(shost, fc_get_event_number(), 6465 sizeof(temp_event_data), 6466 (char *)&temp_event_data, 6467 SCSI_NL_VID_TYPE_PCI 6468 | PCI_VENDOR_ID_EMULEX); 6469 break; 6470 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED: 6471 misconfigured = (struct lpfc_acqe_misconfigured_event *) 6472 &acqe_sli->event_data1; 6473 6474 /* fetch the status for this port */ 6475 switch (phba->sli4_hba.lnk_info.lnk_no) { 6476 case LPFC_LINK_NUMBER_0: 6477 status = bf_get(lpfc_sli_misconfigured_port0_state, 6478 &misconfigured->theEvent); 6479 operational = bf_get(lpfc_sli_misconfigured_port0_op, 6480 &misconfigured->theEvent); 6481 break; 6482 case LPFC_LINK_NUMBER_1: 6483 status = bf_get(lpfc_sli_misconfigured_port1_state, 6484 &misconfigured->theEvent); 6485 operational = bf_get(lpfc_sli_misconfigured_port1_op, 6486 &misconfigured->theEvent); 6487 break; 6488 case LPFC_LINK_NUMBER_2: 6489 status = bf_get(lpfc_sli_misconfigured_port2_state, 6490 &misconfigured->theEvent); 6491 operational = bf_get(lpfc_sli_misconfigured_port2_op, 6492 &misconfigured->theEvent); 6493 break; 6494 case LPFC_LINK_NUMBER_3: 6495 status = bf_get(lpfc_sli_misconfigured_port3_state, 6496 &misconfigured->theEvent); 6497 operational = bf_get(lpfc_sli_misconfigured_port3_op, 6498 &misconfigured->theEvent); 6499 break; 6500 default: 6501 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6502 "3296 " 6503 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED " 6504 "event: Invalid link %d", 6505 phba->sli4_hba.lnk_info.lnk_no); 6506 return; 6507 } 6508 6509 /* Skip if optic state unchanged */ 6510 if (phba->sli4_hba.lnk_info.optic_state == status) 6511 return; 6512 6513 switch (status) { 6514 case LPFC_SLI_EVENT_STATUS_VALID: 6515 sprintf(message, "Physical Link is functional"); 6516 break; 6517 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: 6518 sprintf(message, "Optics faulted/incorrectly " 6519 "installed/not installed - Reseat optics, " 6520 "if issue not resolved, replace."); 6521 break; 6522 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: 6523 sprintf(message, 6524 "Optics of two types installed - Remove one " 6525 "optic or install matching pair of optics."); 6526 break; 6527 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: 6528 sprintf(message, "Incompatible optics - Replace with " 6529 "compatible optics for card to function."); 6530 break; 6531 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED: 6532 sprintf(message, "Unqualified optics - Replace with " 6533 "Avago optics for Warranty and Technical " 6534 "Support - Link is%s operational", 6535 (operational) ? " not" : ""); 6536 break; 6537 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED: 6538 sprintf(message, "Uncertified optics - Replace with " 6539 "Avago-certified optics to enable link " 6540 "operation - Link is%s operational", 6541 (operational) ? " not" : ""); 6542 break; 6543 default: 6544 /* firmware is reporting a status we don't know about */ 6545 sprintf(message, "Unknown event status x%02x", status); 6546 break; 6547 } 6548 6549 /* Issue READ_CONFIG mbox command to refresh supported speeds */ 6550 rc = lpfc_sli4_read_config(phba); 6551 if (rc) { 6552 phba->lmt = 0; 6553 lpfc_printf_log(phba, KERN_ERR, 6554 LOG_TRACE_EVENT, 6555 "3194 Unable to retrieve supported " 6556 "speeds, rc = 0x%x\n", rc); 6557 } 6558 rc = lpfc_sli4_refresh_params(phba); 6559 if (rc) { 6560 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6561 "3174 Unable to update pls support, " 6562 "rc x%x\n", rc); 6563 } 6564 vports = lpfc_create_vport_work_array(phba); 6565 if (vports != NULL) { 6566 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 6567 i++) { 6568 shost = lpfc_shost_from_vport(vports[i]); 6569 lpfc_host_supported_speeds_set(shost); 6570 } 6571 } 6572 lpfc_destroy_vport_work_array(phba, vports); 6573 6574 phba->sli4_hba.lnk_info.optic_state = status; 6575 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6576 "3176 Port Name %c %s\n", port_name, message); 6577 break; 6578 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT: 6579 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6580 "3192 Remote DPort Test Initiated - " 6581 "Event Data1:x%08x Event Data2: x%08x\n", 6582 acqe_sli->event_data1, acqe_sli->event_data2); 6583 break; 6584 case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG: 6585 /* Call FW to obtain active parms */ 6586 lpfc_sli4_cgn_parm_chg_evt(phba); 6587 break; 6588 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN: 6589 /* Misconfigured WWN. Reports that the SLI Port is configured 6590 * to use FA-WWN, but the attached device doesn’t support it. 6591 * Event Data1 - N.A, Event Data2 - N.A 6592 * This event only happens on the physical port. 6593 */ 6594 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI | LOG_DISCOVERY, 6595 "2699 Misconfigured FA-PWWN - Attached device " 6596 "does not support FA-PWWN\n"); 6597 phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_FABRIC; 6598 memset(phba->pport->fc_portname.u.wwn, 0, 6599 sizeof(struct lpfc_name)); 6600 break; 6601 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE: 6602 /* EEPROM failure. No driver action is required */ 6603 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6604 "2518 EEPROM failure - " 6605 "Event Data1: x%08x Event Data2: x%08x\n", 6606 acqe_sli->event_data1, acqe_sli->event_data2); 6607 break; 6608 case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL: 6609 if (phba->cmf_active_mode == LPFC_CFG_OFF) 6610 break; 6611 cgn_signal = (struct lpfc_acqe_cgn_signal *) 6612 &acqe_sli->event_data1; 6613 phba->cgn_acqe_cnt++; 6614 6615 cnt = bf_get(lpfc_warn_acqe, cgn_signal); 6616 atomic64_add(cnt, &phba->cgn_acqe_stat.warn); 6617 atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm); 6618 6619 /* no threshold for CMF, even 1 signal will trigger an event */ 6620 6621 /* Alarm overrides warning, so check that first */ 6622 if (cgn_signal->alarm_cnt) { 6623 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { 6624 /* Keep track of alarm cnt for CMF_SYNC_WQE */ 6625 atomic_add(cgn_signal->alarm_cnt, 6626 &phba->cgn_sync_alarm_cnt); 6627 } 6628 } else if (cnt) { 6629 /* signal action needs to be taken */ 6630 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY || 6631 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { 6632 /* Keep track of warning cnt for CMF_SYNC_WQE */ 6633 atomic_add(cnt, &phba->cgn_sync_warn_cnt); 6634 } 6635 } 6636 break; 6637 case LPFC_SLI_EVENT_TYPE_RD_SIGNAL: 6638 /* May be accompanied by a temperature event */ 6639 lpfc_printf_log(phba, KERN_INFO, 6640 LOG_SLI | LOG_LINK_EVENT | LOG_LDS_EVENT, 6641 "2902 Remote Degrade Signaling: x%08x x%08x " 6642 "x%08x\n", 6643 acqe_sli->event_data1, acqe_sli->event_data2, 6644 acqe_sli->event_data3); 6645 break; 6646 case LPFC_SLI_EVENT_TYPE_RESET_CM_STATS: 6647 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 6648 "2905 Reset CM statistics\n"); 6649 lpfc_sli4_async_cmstat_evt(phba); 6650 break; 6651 default: 6652 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6653 "3193 Unrecognized SLI event, type: 0x%x", 6654 evt_type); 6655 break; 6656 } 6657 } 6658 6659 /** 6660 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 6661 * @vport: pointer to vport data structure. 6662 * 6663 * This routine is to perform Clear Virtual Link (CVL) on a vport in 6664 * response to a CVL event. 6665 * 6666 * Return the pointer to the ndlp with the vport if successful, otherwise 6667 * return NULL. 6668 **/ 6669 static struct lpfc_nodelist * 6670 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 6671 { 6672 struct lpfc_nodelist *ndlp; 6673 struct Scsi_Host *shost; 6674 struct lpfc_hba *phba; 6675 6676 if (!vport) 6677 return NULL; 6678 phba = vport->phba; 6679 if (!phba) 6680 return NULL; 6681 ndlp = lpfc_findnode_did(vport, Fabric_DID); 6682 if (!ndlp) { 6683 /* Cannot find existing Fabric ndlp, so allocate a new one */ 6684 ndlp = lpfc_nlp_init(vport, Fabric_DID); 6685 if (!ndlp) 6686 return NULL; 6687 /* Set the node type */ 6688 ndlp->nlp_type |= NLP_FABRIC; 6689 /* Put ndlp onto node list */ 6690 lpfc_enqueue_node(vport, ndlp); 6691 } 6692 if ((phba->pport->port_state < LPFC_FLOGI) && 6693 (phba->pport->port_state != LPFC_VPORT_FAILED)) 6694 return NULL; 6695 /* If virtual link is not yet instantiated ignore CVL */ 6696 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 6697 && (vport->port_state != LPFC_VPORT_FAILED)) 6698 return NULL; 6699 shost = lpfc_shost_from_vport(vport); 6700 if (!shost) 6701 return NULL; 6702 lpfc_linkdown_port(vport); 6703 lpfc_cleanup_pending_mbox(vport); 6704 set_bit(FC_VPORT_CVL_RCVD, &vport->fc_flag); 6705 6706 return ndlp; 6707 } 6708 6709 /** 6710 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 6711 * @phba: pointer to lpfc hba data structure. 6712 * 6713 * This routine is to perform Clear Virtual Link (CVL) on all vports in 6714 * response to a FCF dead event. 6715 **/ 6716 static void 6717 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 6718 { 6719 struct lpfc_vport **vports; 6720 int i; 6721 6722 vports = lpfc_create_vport_work_array(phba); 6723 if (vports) 6724 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 6725 lpfc_sli4_perform_vport_cvl(vports[i]); 6726 lpfc_destroy_vport_work_array(phba, vports); 6727 } 6728 6729 /** 6730 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 6731 * @phba: pointer to lpfc hba data structure. 6732 * @acqe_fip: pointer to the async fcoe completion queue entry. 6733 * 6734 * This routine is to handle the SLI4 asynchronous fcoe event. 6735 **/ 6736 static void 6737 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 6738 struct lpfc_acqe_fip *acqe_fip) 6739 { 6740 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 6741 int rc; 6742 struct lpfc_vport *vport; 6743 struct lpfc_nodelist *ndlp; 6744 int active_vlink_present; 6745 struct lpfc_vport **vports; 6746 int i; 6747 6748 phba->fc_eventTag = acqe_fip->event_tag; 6749 phba->fcoe_eventtag = acqe_fip->event_tag; 6750 switch (event_type) { 6751 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 6752 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 6753 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 6754 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6755 "2546 New FCF event, evt_tag:x%x, " 6756 "index:x%x\n", 6757 acqe_fip->event_tag, 6758 acqe_fip->index); 6759 else 6760 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 6761 LOG_DISCOVERY, 6762 "2788 FCF param modified event, " 6763 "evt_tag:x%x, index:x%x\n", 6764 acqe_fip->event_tag, 6765 acqe_fip->index); 6766 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 6767 /* 6768 * During period of FCF discovery, read the FCF 6769 * table record indexed by the event to update 6770 * FCF roundrobin failover eligible FCF bmask. 6771 */ 6772 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 6773 LOG_DISCOVERY, 6774 "2779 Read FCF (x%x) for updating " 6775 "roundrobin FCF failover bmask\n", 6776 acqe_fip->index); 6777 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 6778 } 6779 6780 /* If the FCF discovery is in progress, do nothing. */ 6781 spin_lock_irq(&phba->hbalock); 6782 if (phba->hba_flag & FCF_TS_INPROG) { 6783 spin_unlock_irq(&phba->hbalock); 6784 break; 6785 } 6786 /* If fast FCF failover rescan event is pending, do nothing */ 6787 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) { 6788 spin_unlock_irq(&phba->hbalock); 6789 break; 6790 } 6791 6792 /* If the FCF has been in discovered state, do nothing. */ 6793 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 6794 spin_unlock_irq(&phba->hbalock); 6795 break; 6796 } 6797 spin_unlock_irq(&phba->hbalock); 6798 6799 /* Otherwise, scan the entire FCF table and re-discover SAN */ 6800 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 6801 "2770 Start FCF table scan per async FCF " 6802 "event, evt_tag:x%x, index:x%x\n", 6803 acqe_fip->event_tag, acqe_fip->index); 6804 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 6805 LPFC_FCOE_FCF_GET_FIRST); 6806 if (rc) 6807 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6808 "2547 Issue FCF scan read FCF mailbox " 6809 "command failed (x%x)\n", rc); 6810 break; 6811 6812 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 6813 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6814 "2548 FCF Table full count 0x%x tag 0x%x\n", 6815 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 6816 acqe_fip->event_tag); 6817 break; 6818 6819 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 6820 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 6821 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6822 "2549 FCF (x%x) disconnected from network, " 6823 "tag:x%x\n", acqe_fip->index, 6824 acqe_fip->event_tag); 6825 /* 6826 * If we are in the middle of FCF failover process, clear 6827 * the corresponding FCF bit in the roundrobin bitmap. 6828 */ 6829 spin_lock_irq(&phba->hbalock); 6830 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && 6831 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) { 6832 spin_unlock_irq(&phba->hbalock); 6833 /* Update FLOGI FCF failover eligible FCF bmask */ 6834 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 6835 break; 6836 } 6837 spin_unlock_irq(&phba->hbalock); 6838 6839 /* If the event is not for currently used fcf do nothing */ 6840 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 6841 break; 6842 6843 /* 6844 * Otherwise, request the port to rediscover the entire FCF 6845 * table for a fast recovery from case that the current FCF 6846 * is no longer valid as we are not in the middle of FCF 6847 * failover process already. 6848 */ 6849 spin_lock_irq(&phba->hbalock); 6850 /* Mark the fast failover process in progress */ 6851 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 6852 spin_unlock_irq(&phba->hbalock); 6853 6854 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 6855 "2771 Start FCF fast failover process due to " 6856 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 6857 "\n", acqe_fip->event_tag, acqe_fip->index); 6858 rc = lpfc_sli4_redisc_fcf_table(phba); 6859 if (rc) { 6860 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 6861 LOG_TRACE_EVENT, 6862 "2772 Issue FCF rediscover mailbox " 6863 "command failed, fail through to FCF " 6864 "dead event\n"); 6865 spin_lock_irq(&phba->hbalock); 6866 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 6867 spin_unlock_irq(&phba->hbalock); 6868 /* 6869 * Last resort will fail over by treating this 6870 * as a link down to FCF registration. 6871 */ 6872 lpfc_sli4_fcf_dead_failthrough(phba); 6873 } else { 6874 /* Reset FCF roundrobin bmask for new discovery */ 6875 lpfc_sli4_clear_fcf_rr_bmask(phba); 6876 /* 6877 * Handling fast FCF failover to a DEAD FCF event is 6878 * considered equalivant to receiving CVL to all vports. 6879 */ 6880 lpfc_sli4_perform_all_vport_cvl(phba); 6881 } 6882 break; 6883 case LPFC_FIP_EVENT_TYPE_CVL: 6884 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 6885 lpfc_printf_log(phba, KERN_ERR, 6886 LOG_TRACE_EVENT, 6887 "2718 Clear Virtual Link Received for VPI 0x%x" 6888 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 6889 6890 vport = lpfc_find_vport_by_vpid(phba, 6891 acqe_fip->index); 6892 ndlp = lpfc_sli4_perform_vport_cvl(vport); 6893 if (!ndlp) 6894 break; 6895 active_vlink_present = 0; 6896 6897 vports = lpfc_create_vport_work_array(phba); 6898 if (vports) { 6899 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 6900 i++) { 6901 if (!test_bit(FC_VPORT_CVL_RCVD, 6902 &vports[i]->fc_flag) && 6903 vports[i]->port_state > LPFC_FDISC) { 6904 active_vlink_present = 1; 6905 break; 6906 } 6907 } 6908 lpfc_destroy_vport_work_array(phba, vports); 6909 } 6910 6911 /* 6912 * Don't re-instantiate if vport is marked for deletion. 6913 * If we are here first then vport_delete is going to wait 6914 * for discovery to complete. 6915 */ 6916 if (!test_bit(FC_UNLOADING, &vport->load_flag) && 6917 active_vlink_present) { 6918 /* 6919 * If there are other active VLinks present, 6920 * re-instantiate the Vlink using FDISC. 6921 */ 6922 mod_timer(&ndlp->nlp_delayfunc, 6923 jiffies + msecs_to_jiffies(1000)); 6924 spin_lock_irq(&ndlp->lock); 6925 ndlp->nlp_flag |= NLP_DELAY_TMO; 6926 spin_unlock_irq(&ndlp->lock); 6927 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 6928 vport->port_state = LPFC_FDISC; 6929 } else { 6930 /* 6931 * Otherwise, we request port to rediscover 6932 * the entire FCF table for a fast recovery 6933 * from possible case that the current FCF 6934 * is no longer valid if we are not already 6935 * in the FCF failover process. 6936 */ 6937 spin_lock_irq(&phba->hbalock); 6938 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 6939 spin_unlock_irq(&phba->hbalock); 6940 break; 6941 } 6942 /* Mark the fast failover process in progress */ 6943 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 6944 spin_unlock_irq(&phba->hbalock); 6945 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 6946 LOG_DISCOVERY, 6947 "2773 Start FCF failover per CVL, " 6948 "evt_tag:x%x\n", acqe_fip->event_tag); 6949 rc = lpfc_sli4_redisc_fcf_table(phba); 6950 if (rc) { 6951 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 6952 LOG_TRACE_EVENT, 6953 "2774 Issue FCF rediscover " 6954 "mailbox command failed, " 6955 "through to CVL event\n"); 6956 spin_lock_irq(&phba->hbalock); 6957 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 6958 spin_unlock_irq(&phba->hbalock); 6959 /* 6960 * Last resort will be re-try on the 6961 * the current registered FCF entry. 6962 */ 6963 lpfc_retry_pport_discovery(phba); 6964 } else 6965 /* 6966 * Reset FCF roundrobin bmask for new 6967 * discovery. 6968 */ 6969 lpfc_sli4_clear_fcf_rr_bmask(phba); 6970 } 6971 break; 6972 default: 6973 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6974 "0288 Unknown FCoE event type 0x%x event tag " 6975 "0x%x\n", event_type, acqe_fip->event_tag); 6976 break; 6977 } 6978 } 6979 6980 /** 6981 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 6982 * @phba: pointer to lpfc hba data structure. 6983 * @acqe_dcbx: pointer to the async dcbx completion queue entry. 6984 * 6985 * This routine is to handle the SLI4 asynchronous dcbx event. 6986 **/ 6987 static void 6988 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 6989 struct lpfc_acqe_dcbx *acqe_dcbx) 6990 { 6991 phba->fc_eventTag = acqe_dcbx->event_tag; 6992 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6993 "0290 The SLI4 DCBX asynchronous event is not " 6994 "handled yet\n"); 6995 } 6996 6997 /** 6998 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 6999 * @phba: pointer to lpfc hba data structure. 7000 * @acqe_grp5: pointer to the async grp5 completion queue entry. 7001 * 7002 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 7003 * is an asynchronous notified of a logical link speed change. The Port 7004 * reports the logical link speed in units of 10Mbps. 7005 **/ 7006 static void 7007 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 7008 struct lpfc_acqe_grp5 *acqe_grp5) 7009 { 7010 uint16_t prev_ll_spd; 7011 7012 phba->fc_eventTag = acqe_grp5->event_tag; 7013 phba->fcoe_eventtag = acqe_grp5->event_tag; 7014 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 7015 phba->sli4_hba.link_state.logical_speed = 7016 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10; 7017 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 7018 "2789 GRP5 Async Event: Updating logical link speed " 7019 "from %dMbps to %dMbps\n", prev_ll_spd, 7020 phba->sli4_hba.link_state.logical_speed); 7021 } 7022 7023 /** 7024 * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event 7025 * @phba: pointer to lpfc hba data structure. 7026 * 7027 * This routine is to handle the SLI4 asynchronous cmstat event. A cmstat event 7028 * is an asynchronous notification of a request to reset CM stats. 7029 **/ 7030 static void 7031 lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba) 7032 { 7033 if (!phba->cgn_i) 7034 return; 7035 lpfc_init_congestion_stat(phba); 7036 } 7037 7038 /** 7039 * lpfc_cgn_params_val - Validate FW congestion parameters. 7040 * @phba: pointer to lpfc hba data structure. 7041 * @p_cfg_param: pointer to FW provided congestion parameters. 7042 * 7043 * This routine validates the congestion parameters passed 7044 * by the FW to the driver via an ACQE event. 7045 **/ 7046 static void 7047 lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param) 7048 { 7049 spin_lock_irq(&phba->hbalock); 7050 7051 if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF, 7052 LPFC_CFG_MONITOR)) { 7053 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, 7054 "6225 CMF mode param out of range: %d\n", 7055 p_cfg_param->cgn_param_mode); 7056 p_cfg_param->cgn_param_mode = LPFC_CFG_OFF; 7057 } 7058 7059 spin_unlock_irq(&phba->hbalock); 7060 } 7061 7062 static const char * const lpfc_cmf_mode_to_str[] = { 7063 "OFF", 7064 "MANAGED", 7065 "MONITOR", 7066 }; 7067 7068 /** 7069 * lpfc_cgn_params_parse - Process a FW cong parm change event 7070 * @phba: pointer to lpfc hba data structure. 7071 * @p_cgn_param: pointer to a data buffer with the FW cong params. 7072 * @len: the size of pdata in bytes. 7073 * 7074 * This routine validates the congestion management buffer signature 7075 * from the FW, validates the contents and makes corrections for 7076 * valid, in-range values. If the signature magic is correct and 7077 * after parameter validation, the contents are copied to the driver's 7078 * @phba structure. If the magic is incorrect, an error message is 7079 * logged. 7080 **/ 7081 static void 7082 lpfc_cgn_params_parse(struct lpfc_hba *phba, 7083 struct lpfc_cgn_param *p_cgn_param, uint32_t len) 7084 { 7085 struct lpfc_cgn_info *cp; 7086 uint32_t crc, oldmode; 7087 char acr_string[4] = {0}; 7088 7089 /* Make sure the FW has encoded the correct magic number to 7090 * validate the congestion parameter in FW memory. 7091 */ 7092 if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) { 7093 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, 7094 "4668 FW cgn parm buffer data: " 7095 "magic 0x%x version %d mode %d " 7096 "level0 %d level1 %d " 7097 "level2 %d byte13 %d " 7098 "byte14 %d byte15 %d " 7099 "byte11 %d byte12 %d activeMode %d\n", 7100 p_cgn_param->cgn_param_magic, 7101 p_cgn_param->cgn_param_version, 7102 p_cgn_param->cgn_param_mode, 7103 p_cgn_param->cgn_param_level0, 7104 p_cgn_param->cgn_param_level1, 7105 p_cgn_param->cgn_param_level2, 7106 p_cgn_param->byte13, 7107 p_cgn_param->byte14, 7108 p_cgn_param->byte15, 7109 p_cgn_param->byte11, 7110 p_cgn_param->byte12, 7111 phba->cmf_active_mode); 7112 7113 oldmode = phba->cmf_active_mode; 7114 7115 /* Any parameters out of range are corrected to defaults 7116 * by this routine. No need to fail. 7117 */ 7118 lpfc_cgn_params_val(phba, p_cgn_param); 7119 7120 /* Parameters are verified, move them into driver storage */ 7121 spin_lock_irq(&phba->hbalock); 7122 memcpy(&phba->cgn_p, p_cgn_param, 7123 sizeof(struct lpfc_cgn_param)); 7124 7125 /* Update parameters in congestion info buffer now */ 7126 if (phba->cgn_i) { 7127 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 7128 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; 7129 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; 7130 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; 7131 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; 7132 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, 7133 LPFC_CGN_CRC32_SEED); 7134 cp->cgn_info_crc = cpu_to_le32(crc); 7135 } 7136 spin_unlock_irq(&phba->hbalock); 7137 7138 phba->cmf_active_mode = phba->cgn_p.cgn_param_mode; 7139 7140 switch (oldmode) { 7141 case LPFC_CFG_OFF: 7142 if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) { 7143 /* Turning CMF on */ 7144 lpfc_cmf_start(phba); 7145 7146 if (phba->link_state >= LPFC_LINK_UP) { 7147 phba->cgn_reg_fpin = 7148 phba->cgn_init_reg_fpin; 7149 phba->cgn_reg_signal = 7150 phba->cgn_init_reg_signal; 7151 lpfc_issue_els_edc(phba->pport, 0); 7152 } 7153 } 7154 break; 7155 case LPFC_CFG_MANAGED: 7156 switch (phba->cgn_p.cgn_param_mode) { 7157 case LPFC_CFG_OFF: 7158 /* Turning CMF off */ 7159 lpfc_cmf_stop(phba); 7160 if (phba->link_state >= LPFC_LINK_UP) 7161 lpfc_issue_els_edc(phba->pport, 0); 7162 break; 7163 case LPFC_CFG_MONITOR: 7164 phba->cmf_max_bytes_per_interval = 7165 phba->cmf_link_byte_count; 7166 7167 /* Resume blocked IO - unblock on workqueue */ 7168 queue_work(phba->wq, 7169 &phba->unblock_request_work); 7170 break; 7171 } 7172 break; 7173 case LPFC_CFG_MONITOR: 7174 switch (phba->cgn_p.cgn_param_mode) { 7175 case LPFC_CFG_OFF: 7176 /* Turning CMF off */ 7177 lpfc_cmf_stop(phba); 7178 if (phba->link_state >= LPFC_LINK_UP) 7179 lpfc_issue_els_edc(phba->pport, 0); 7180 break; 7181 case LPFC_CFG_MANAGED: 7182 lpfc_cmf_signal_init(phba); 7183 break; 7184 } 7185 break; 7186 } 7187 if (oldmode != LPFC_CFG_OFF || 7188 oldmode != phba->cgn_p.cgn_param_mode) { 7189 if (phba->cgn_p.cgn_param_mode == LPFC_CFG_MANAGED) 7190 scnprintf(acr_string, sizeof(acr_string), "%u", 7191 phba->cgn_p.cgn_param_level0); 7192 else 7193 scnprintf(acr_string, sizeof(acr_string), "NA"); 7194 7195 dev_info(&phba->pcidev->dev, "%d: " 7196 "4663 CMF: Mode %s acr %s\n", 7197 phba->brd_no, 7198 lpfc_cmf_mode_to_str 7199 [phba->cgn_p.cgn_param_mode], 7200 acr_string); 7201 } 7202 } else { 7203 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7204 "4669 FW cgn parm buf wrong magic 0x%x " 7205 "version %d\n", p_cgn_param->cgn_param_magic, 7206 p_cgn_param->cgn_param_version); 7207 } 7208 } 7209 7210 /** 7211 * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters. 7212 * @phba: pointer to lpfc hba data structure. 7213 * 7214 * This routine issues a read_object mailbox command to 7215 * get the congestion management parameters from the FW 7216 * parses it and updates the driver maintained values. 7217 * 7218 * Returns 7219 * 0 if the object was empty 7220 * -Eval if an error was encountered 7221 * Count if bytes were read from object 7222 **/ 7223 int 7224 lpfc_sli4_cgn_params_read(struct lpfc_hba *phba) 7225 { 7226 int ret = 0; 7227 struct lpfc_cgn_param *p_cgn_param = NULL; 7228 u32 *pdata = NULL; 7229 u32 len = 0; 7230 7231 /* Find out if the FW has a new set of congestion parameters. */ 7232 len = sizeof(struct lpfc_cgn_param); 7233 pdata = kzalloc(len, GFP_KERNEL); 7234 if (!pdata) 7235 return -ENOMEM; 7236 ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME, 7237 pdata, len); 7238 7239 /* 0 means no data. A negative means error. A positive means 7240 * bytes were copied. 7241 */ 7242 if (!ret) { 7243 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7244 "4670 CGN RD OBJ returns no data\n"); 7245 goto rd_obj_err; 7246 } else if (ret < 0) { 7247 /* Some error. Just exit and return it to the caller.*/ 7248 goto rd_obj_err; 7249 } 7250 7251 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, 7252 "6234 READ CGN PARAMS Successful %d\n", len); 7253 7254 /* Parse data pointer over len and update the phba congestion 7255 * parameters with values passed back. The receive rate values 7256 * may have been altered in FW, but take no action here. 7257 */ 7258 p_cgn_param = (struct lpfc_cgn_param *)pdata; 7259 lpfc_cgn_params_parse(phba, p_cgn_param, len); 7260 7261 rd_obj_err: 7262 kfree(pdata); 7263 return ret; 7264 } 7265 7266 /** 7267 * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event 7268 * @phba: pointer to lpfc hba data structure. 7269 * 7270 * The FW generated Async ACQE SLI event calls this routine when 7271 * the event type is an SLI Internal Port Event and the Event Code 7272 * indicates a change to the FW maintained congestion parameters. 7273 * 7274 * This routine executes a Read_Object mailbox call to obtain the 7275 * current congestion parameters maintained in FW and corrects 7276 * the driver's active congestion parameters. 7277 * 7278 * The acqe event is not passed because there is no further data 7279 * required. 7280 * 7281 * Returns nonzero error if event processing encountered an error. 7282 * Zero otherwise for success. 7283 **/ 7284 static int 7285 lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba) 7286 { 7287 int ret = 0; 7288 7289 if (!phba->sli4_hba.pc_sli4_params.cmf) { 7290 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7291 "4664 Cgn Evt when E2E off. Drop event\n"); 7292 return -EACCES; 7293 } 7294 7295 /* If the event is claiming an empty object, it's ok. A write 7296 * could have cleared it. Only error is a negative return 7297 * status. 7298 */ 7299 ret = lpfc_sli4_cgn_params_read(phba); 7300 if (ret < 0) { 7301 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7302 "4667 Error reading Cgn Params (%d)\n", 7303 ret); 7304 } else if (!ret) { 7305 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT, 7306 "4673 CGN Event empty object.\n"); 7307 } 7308 return ret; 7309 } 7310 7311 /** 7312 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 7313 * @phba: pointer to lpfc hba data structure. 7314 * 7315 * This routine is invoked by the worker thread to process all the pending 7316 * SLI4 asynchronous events. 7317 **/ 7318 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 7319 { 7320 struct lpfc_cq_event *cq_event; 7321 unsigned long iflags; 7322 7323 /* First, declare the async event has been handled */ 7324 spin_lock_irqsave(&phba->hbalock, iflags); 7325 phba->hba_flag &= ~ASYNC_EVENT; 7326 spin_unlock_irqrestore(&phba->hbalock, iflags); 7327 7328 /* Now, handle all the async events */ 7329 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); 7330 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 7331 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 7332 cq_event, struct lpfc_cq_event, list); 7333 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, 7334 iflags); 7335 7336 /* Process the asynchronous event */ 7337 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 7338 case LPFC_TRAILER_CODE_LINK: 7339 lpfc_sli4_async_link_evt(phba, 7340 &cq_event->cqe.acqe_link); 7341 break; 7342 case LPFC_TRAILER_CODE_FCOE: 7343 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 7344 break; 7345 case LPFC_TRAILER_CODE_DCBX: 7346 lpfc_sli4_async_dcbx_evt(phba, 7347 &cq_event->cqe.acqe_dcbx); 7348 break; 7349 case LPFC_TRAILER_CODE_GRP5: 7350 lpfc_sli4_async_grp5_evt(phba, 7351 &cq_event->cqe.acqe_grp5); 7352 break; 7353 case LPFC_TRAILER_CODE_FC: 7354 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 7355 break; 7356 case LPFC_TRAILER_CODE_SLI: 7357 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 7358 break; 7359 default: 7360 lpfc_printf_log(phba, KERN_ERR, 7361 LOG_TRACE_EVENT, 7362 "1804 Invalid asynchronous event code: " 7363 "x%x\n", bf_get(lpfc_trailer_code, 7364 &cq_event->cqe.mcqe_cmpl)); 7365 break; 7366 } 7367 7368 /* Free the completion event processed to the free pool */ 7369 lpfc_sli4_cq_event_release(phba, cq_event); 7370 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); 7371 } 7372 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); 7373 } 7374 7375 /** 7376 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 7377 * @phba: pointer to lpfc hba data structure. 7378 * 7379 * This routine is invoked by the worker thread to process FCF table 7380 * rediscovery pending completion event. 7381 **/ 7382 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 7383 { 7384 int rc; 7385 7386 spin_lock_irq(&phba->hbalock); 7387 /* Clear FCF rediscovery timeout event */ 7388 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 7389 /* Clear driver fast failover FCF record flag */ 7390 phba->fcf.failover_rec.flag = 0; 7391 /* Set state for FCF fast failover */ 7392 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 7393 spin_unlock_irq(&phba->hbalock); 7394 7395 /* Scan FCF table from the first entry to re-discover SAN */ 7396 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 7397 "2777 Start post-quiescent FCF table scan\n"); 7398 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 7399 if (rc) 7400 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7401 "2747 Issue FCF scan read FCF mailbox " 7402 "command failed 0x%x\n", rc); 7403 } 7404 7405 /** 7406 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 7407 * @phba: pointer to lpfc hba data structure. 7408 * @dev_grp: The HBA PCI-Device group number. 7409 * 7410 * This routine is invoked to set up the per HBA PCI-Device group function 7411 * API jump table entries. 7412 * 7413 * Return: 0 if success, otherwise -ENODEV 7414 **/ 7415 int 7416 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 7417 { 7418 int rc; 7419 7420 /* Set up lpfc PCI-device group */ 7421 phba->pci_dev_grp = dev_grp; 7422 7423 /* The LPFC_PCI_DEV_OC uses SLI4 */ 7424 if (dev_grp == LPFC_PCI_DEV_OC) 7425 phba->sli_rev = LPFC_SLI_REV4; 7426 7427 /* Set up device INIT API function jump table */ 7428 rc = lpfc_init_api_table_setup(phba, dev_grp); 7429 if (rc) 7430 return -ENODEV; 7431 /* Set up SCSI API function jump table */ 7432 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 7433 if (rc) 7434 return -ENODEV; 7435 /* Set up SLI API function jump table */ 7436 rc = lpfc_sli_api_table_setup(phba, dev_grp); 7437 if (rc) 7438 return -ENODEV; 7439 /* Set up MBOX API function jump table */ 7440 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 7441 if (rc) 7442 return -ENODEV; 7443 7444 return 0; 7445 } 7446 7447 /** 7448 * lpfc_log_intr_mode - Log the active interrupt mode 7449 * @phba: pointer to lpfc hba data structure. 7450 * @intr_mode: active interrupt mode adopted. 7451 * 7452 * This routine it invoked to log the currently used active interrupt mode 7453 * to the device. 7454 **/ 7455 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 7456 { 7457 switch (intr_mode) { 7458 case 0: 7459 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7460 "0470 Enable INTx interrupt mode.\n"); 7461 break; 7462 case 1: 7463 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7464 "0481 Enabled MSI interrupt mode.\n"); 7465 break; 7466 case 2: 7467 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7468 "0480 Enabled MSI-X interrupt mode.\n"); 7469 break; 7470 default: 7471 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7472 "0482 Illegal interrupt mode.\n"); 7473 break; 7474 } 7475 return; 7476 } 7477 7478 /** 7479 * lpfc_enable_pci_dev - Enable a generic PCI device. 7480 * @phba: pointer to lpfc hba data structure. 7481 * 7482 * This routine is invoked to enable the PCI device that is common to all 7483 * PCI devices. 7484 * 7485 * Return codes 7486 * 0 - successful 7487 * other values - error 7488 **/ 7489 static int 7490 lpfc_enable_pci_dev(struct lpfc_hba *phba) 7491 { 7492 struct pci_dev *pdev; 7493 7494 /* Obtain PCI device reference */ 7495 if (!phba->pcidev) 7496 goto out_error; 7497 else 7498 pdev = phba->pcidev; 7499 /* Enable PCI device */ 7500 if (pci_enable_device_mem(pdev)) 7501 goto out_error; 7502 /* Request PCI resource for the device */ 7503 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME)) 7504 goto out_disable_device; 7505 /* Set up device as PCI master and save state for EEH */ 7506 pci_set_master(pdev); 7507 pci_try_set_mwi(pdev); 7508 pci_save_state(pdev); 7509 7510 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 7511 if (pci_is_pcie(pdev)) 7512 pdev->needs_freset = 1; 7513 7514 return 0; 7515 7516 out_disable_device: 7517 pci_disable_device(pdev); 7518 out_error: 7519 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7520 "1401 Failed to enable pci device\n"); 7521 return -ENODEV; 7522 } 7523 7524 /** 7525 * lpfc_disable_pci_dev - Disable a generic PCI device. 7526 * @phba: pointer to lpfc hba data structure. 7527 * 7528 * This routine is invoked to disable the PCI device that is common to all 7529 * PCI devices. 7530 **/ 7531 static void 7532 lpfc_disable_pci_dev(struct lpfc_hba *phba) 7533 { 7534 struct pci_dev *pdev; 7535 7536 /* Obtain PCI device reference */ 7537 if (!phba->pcidev) 7538 return; 7539 else 7540 pdev = phba->pcidev; 7541 /* Release PCI resource and disable PCI device */ 7542 pci_release_mem_regions(pdev); 7543 pci_disable_device(pdev); 7544 7545 return; 7546 } 7547 7548 /** 7549 * lpfc_reset_hba - Reset a hba 7550 * @phba: pointer to lpfc hba data structure. 7551 * 7552 * This routine is invoked to reset a hba device. It brings the HBA 7553 * offline, performs a board restart, and then brings the board back 7554 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 7555 * on outstanding mailbox commands. 7556 **/ 7557 void 7558 lpfc_reset_hba(struct lpfc_hba *phba) 7559 { 7560 int rc = 0; 7561 7562 /* If resets are disabled then set error state and return. */ 7563 if (!phba->cfg_enable_hba_reset) { 7564 phba->link_state = LPFC_HBA_ERROR; 7565 return; 7566 } 7567 7568 /* If not LPFC_SLI_ACTIVE, force all IO to be flushed */ 7569 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) { 7570 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 7571 } else { 7572 if (test_bit(MBX_TMO_ERR, &phba->bit_flags)) { 7573 /* Perform a PCI function reset to start from clean */ 7574 rc = lpfc_pci_function_reset(phba); 7575 lpfc_els_flush_all_cmd(phba); 7576 } 7577 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 7578 lpfc_sli_flush_io_rings(phba); 7579 } 7580 lpfc_offline(phba); 7581 clear_bit(MBX_TMO_ERR, &phba->bit_flags); 7582 if (unlikely(rc)) { 7583 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7584 "8888 PCI function reset failed rc %x\n", 7585 rc); 7586 } else { 7587 lpfc_sli_brdrestart(phba); 7588 lpfc_online(phba); 7589 lpfc_unblock_mgmt_io(phba); 7590 } 7591 } 7592 7593 /** 7594 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions 7595 * @phba: pointer to lpfc hba data structure. 7596 * 7597 * This function enables the PCI SR-IOV virtual functions to a physical 7598 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 7599 * enable the number of virtual functions to the physical function. As 7600 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 7601 * API call does not considered as an error condition for most of the device. 7602 **/ 7603 uint16_t 7604 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) 7605 { 7606 struct pci_dev *pdev = phba->pcidev; 7607 uint16_t nr_virtfn; 7608 int pos; 7609 7610 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 7611 if (pos == 0) 7612 return 0; 7613 7614 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); 7615 return nr_virtfn; 7616 } 7617 7618 /** 7619 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 7620 * @phba: pointer to lpfc hba data structure. 7621 * @nr_vfn: number of virtual functions to be enabled. 7622 * 7623 * This function enables the PCI SR-IOV virtual functions to a physical 7624 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 7625 * enable the number of virtual functions to the physical function. As 7626 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 7627 * API call does not considered as an error condition for most of the device. 7628 **/ 7629 int 7630 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 7631 { 7632 struct pci_dev *pdev = phba->pcidev; 7633 uint16_t max_nr_vfn; 7634 int rc; 7635 7636 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); 7637 if (nr_vfn > max_nr_vfn) { 7638 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7639 "3057 Requested vfs (%d) greater than " 7640 "supported vfs (%d)", nr_vfn, max_nr_vfn); 7641 return -EINVAL; 7642 } 7643 7644 rc = pci_enable_sriov(pdev, nr_vfn); 7645 if (rc) { 7646 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7647 "2806 Failed to enable sriov on this device " 7648 "with vfn number nr_vf:%d, rc:%d\n", 7649 nr_vfn, rc); 7650 } else 7651 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7652 "2807 Successful enable sriov on this device " 7653 "with vfn number nr_vf:%d\n", nr_vfn); 7654 return rc; 7655 } 7656 7657 static void 7658 lpfc_unblock_requests_work(struct work_struct *work) 7659 { 7660 struct lpfc_hba *phba = container_of(work, struct lpfc_hba, 7661 unblock_request_work); 7662 7663 lpfc_unblock_requests(phba); 7664 } 7665 7666 /** 7667 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 7668 * @phba: pointer to lpfc hba data structure. 7669 * 7670 * This routine is invoked to set up the driver internal resources before the 7671 * device specific resource setup to support the HBA device it attached to. 7672 * 7673 * Return codes 7674 * 0 - successful 7675 * other values - error 7676 **/ 7677 static int 7678 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 7679 { 7680 struct lpfc_sli *psli = &phba->sli; 7681 7682 /* 7683 * Driver resources common to all SLI revisions 7684 */ 7685 atomic_set(&phba->fast_event_count, 0); 7686 atomic_set(&phba->dbg_log_idx, 0); 7687 atomic_set(&phba->dbg_log_cnt, 0); 7688 atomic_set(&phba->dbg_log_dmping, 0); 7689 spin_lock_init(&phba->hbalock); 7690 7691 /* Initialize port_list spinlock */ 7692 spin_lock_init(&phba->port_list_lock); 7693 INIT_LIST_HEAD(&phba->port_list); 7694 7695 INIT_LIST_HEAD(&phba->work_list); 7696 7697 /* Initialize the wait queue head for the kernel thread */ 7698 init_waitqueue_head(&phba->work_waitq); 7699 7700 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7701 "1403 Protocols supported %s %s %s\n", 7702 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ? 7703 "SCSI" : " "), 7704 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ? 7705 "NVME" : " "), 7706 (phba->nvmet_support ? "NVMET" : " ")); 7707 7708 /* ras_fwlog state */ 7709 spin_lock_init(&phba->ras_fwlog_lock); 7710 7711 /* Initialize the IO buffer list used by driver for SLI3 SCSI */ 7712 spin_lock_init(&phba->scsi_buf_list_get_lock); 7713 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); 7714 spin_lock_init(&phba->scsi_buf_list_put_lock); 7715 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 7716 7717 /* Initialize the fabric iocb list */ 7718 INIT_LIST_HEAD(&phba->fabric_iocb_list); 7719 7720 /* Initialize list to save ELS buffers */ 7721 INIT_LIST_HEAD(&phba->elsbuf); 7722 7723 /* Initialize FCF connection rec list */ 7724 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 7725 7726 /* Initialize OAS configuration list */ 7727 spin_lock_init(&phba->devicelock); 7728 INIT_LIST_HEAD(&phba->luns); 7729 7730 /* MBOX heartbeat timer */ 7731 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0); 7732 /* Fabric block timer */ 7733 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0); 7734 /* EA polling mode timer */ 7735 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0); 7736 /* Heartbeat timer */ 7737 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0); 7738 7739 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work); 7740 7741 INIT_DELAYED_WORK(&phba->idle_stat_delay_work, 7742 lpfc_idle_stat_delay_work); 7743 INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work); 7744 return 0; 7745 } 7746 7747 /** 7748 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev 7749 * @phba: pointer to lpfc hba data structure. 7750 * 7751 * This routine is invoked to set up the driver internal resources specific to 7752 * support the SLI-3 HBA device it attached to. 7753 * 7754 * Return codes 7755 * 0 - successful 7756 * other values - error 7757 **/ 7758 static int 7759 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 7760 { 7761 int rc, entry_sz; 7762 7763 /* 7764 * Initialize timers used by driver 7765 */ 7766 7767 /* FCP polling mode timer */ 7768 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0); 7769 7770 /* Host attention work mask setup */ 7771 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 7772 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 7773 7774 /* Get all the module params for configuring this host */ 7775 lpfc_get_cfgparam(phba); 7776 /* Set up phase-1 common device driver resources */ 7777 7778 rc = lpfc_setup_driver_resource_phase1(phba); 7779 if (rc) 7780 return -ENODEV; 7781 7782 if (!phba->sli.sli3_ring) 7783 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING, 7784 sizeof(struct lpfc_sli_ring), 7785 GFP_KERNEL); 7786 if (!phba->sli.sli3_ring) 7787 return -ENOMEM; 7788 7789 /* 7790 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 7791 * used to create the sg_dma_buf_pool must be dynamically calculated. 7792 */ 7793 7794 if (phba->sli_rev == LPFC_SLI_REV4) 7795 entry_sz = sizeof(struct sli4_sge); 7796 else 7797 entry_sz = sizeof(struct ulp_bde64); 7798 7799 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ 7800 if (phba->cfg_enable_bg) { 7801 /* 7802 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 7803 * the FCP rsp, and a BDE for each. Sice we have no control 7804 * over how many protection data segments the SCSI Layer 7805 * will hand us (ie: there could be one for every block 7806 * in the IO), we just allocate enough BDEs to accomidate 7807 * our max amount and we need to limit lpfc_sg_seg_cnt to 7808 * minimize the risk of running out. 7809 */ 7810 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 7811 sizeof(struct fcp_rsp) + 7812 (LPFC_MAX_SG_SEG_CNT * entry_sz); 7813 7814 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) 7815 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; 7816 7817 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ 7818 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; 7819 } else { 7820 /* 7821 * The scsi_buf for a regular I/O will hold the FCP cmnd, 7822 * the FCP rsp, a BDE for each, and a BDE for up to 7823 * cfg_sg_seg_cnt data segments. 7824 */ 7825 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 7826 sizeof(struct fcp_rsp) + 7827 ((phba->cfg_sg_seg_cnt + 2) * entry_sz); 7828 7829 /* Total BDEs in BPL for scsi_sg_list */ 7830 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 7831 } 7832 7833 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 7834 "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", 7835 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 7836 phba->cfg_total_seg_cnt); 7837 7838 phba->max_vpi = LPFC_MAX_VPI; 7839 /* This will be set to correct value after config_port mbox */ 7840 phba->max_vports = 0; 7841 7842 /* 7843 * Initialize the SLI Layer to run with lpfc HBAs. 7844 */ 7845 lpfc_sli_setup(phba); 7846 lpfc_sli_queue_init(phba); 7847 7848 /* Allocate device driver memory */ 7849 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 7850 return -ENOMEM; 7851 7852 phba->lpfc_sg_dma_buf_pool = 7853 dma_pool_create("lpfc_sg_dma_buf_pool", 7854 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size, 7855 BPL_ALIGN_SZ, 0); 7856 7857 if (!phba->lpfc_sg_dma_buf_pool) 7858 goto fail_free_mem; 7859 7860 phba->lpfc_cmd_rsp_buf_pool = 7861 dma_pool_create("lpfc_cmd_rsp_buf_pool", 7862 &phba->pcidev->dev, 7863 sizeof(struct fcp_cmnd) + 7864 sizeof(struct fcp_rsp), 7865 BPL_ALIGN_SZ, 0); 7866 7867 if (!phba->lpfc_cmd_rsp_buf_pool) 7868 goto fail_free_dma_buf_pool; 7869 7870 /* 7871 * Enable sr-iov virtual functions if supported and configured 7872 * through the module parameter. 7873 */ 7874 if (phba->cfg_sriov_nr_virtfn > 0) { 7875 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 7876 phba->cfg_sriov_nr_virtfn); 7877 if (rc) { 7878 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7879 "2808 Requested number of SR-IOV " 7880 "virtual functions (%d) is not " 7881 "supported\n", 7882 phba->cfg_sriov_nr_virtfn); 7883 phba->cfg_sriov_nr_virtfn = 0; 7884 } 7885 } 7886 7887 return 0; 7888 7889 fail_free_dma_buf_pool: 7890 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); 7891 phba->lpfc_sg_dma_buf_pool = NULL; 7892 fail_free_mem: 7893 lpfc_mem_free(phba); 7894 return -ENOMEM; 7895 } 7896 7897 /** 7898 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 7899 * @phba: pointer to lpfc hba data structure. 7900 * 7901 * This routine is invoked to unset the driver internal resources set up 7902 * specific for supporting the SLI-3 HBA device it attached to. 7903 **/ 7904 static void 7905 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 7906 { 7907 /* Free device driver memory allocated */ 7908 lpfc_mem_free_all(phba); 7909 7910 return; 7911 } 7912 7913 /** 7914 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 7915 * @phba: pointer to lpfc hba data structure. 7916 * 7917 * This routine is invoked to set up the driver internal resources specific to 7918 * support the SLI-4 HBA device it attached to. 7919 * 7920 * Return codes 7921 * 0 - successful 7922 * other values - error 7923 **/ 7924 static int 7925 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 7926 { 7927 LPFC_MBOXQ_t *mboxq; 7928 MAILBOX_t *mb; 7929 int rc, i, max_buf_size; 7930 int longs; 7931 int extra; 7932 uint64_t wwn; 7933 u32 if_type; 7934 u32 if_fam; 7935 7936 phba->sli4_hba.num_present_cpu = lpfc_present_cpu; 7937 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1; 7938 phba->sli4_hba.curr_disp_cpu = 0; 7939 7940 /* Get all the module params for configuring this host */ 7941 lpfc_get_cfgparam(phba); 7942 7943 /* Set up phase-1 common device driver resources */ 7944 rc = lpfc_setup_driver_resource_phase1(phba); 7945 if (rc) 7946 return -ENODEV; 7947 7948 /* Before proceed, wait for POST done and device ready */ 7949 rc = lpfc_sli4_post_status_check(phba); 7950 if (rc) 7951 return -ENODEV; 7952 7953 /* Allocate all driver workqueues here */ 7954 7955 /* The lpfc_wq workqueue for deferred irq use */ 7956 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); 7957 if (!phba->wq) 7958 return -ENOMEM; 7959 7960 /* 7961 * Initialize timers used by driver 7962 */ 7963 7964 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0); 7965 7966 /* FCF rediscover timer */ 7967 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0); 7968 7969 /* CMF congestion timer */ 7970 hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 7971 phba->cmf_timer.function = lpfc_cmf_timer; 7972 /* CMF 1 minute stats collection timer */ 7973 hrtimer_init(&phba->cmf_stats_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 7974 phba->cmf_stats_timer.function = lpfc_cmf_stats_timer; 7975 7976 /* 7977 * Control structure for handling external multi-buffer mailbox 7978 * command pass-through. 7979 */ 7980 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, 7981 sizeof(struct lpfc_mbox_ext_buf_ctx)); 7982 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 7983 7984 phba->max_vpi = LPFC_MAX_VPI; 7985 7986 /* This will be set to correct value after the read_config mbox */ 7987 phba->max_vports = 0; 7988 7989 /* Program the default value of vlan_id and fc_map */ 7990 phba->valid_vlan = 0; 7991 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 7992 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 7993 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 7994 7995 /* 7996 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands 7997 * we will associate a new ring, for each EQ/CQ/WQ tuple. 7998 * The WQ create will allocate the ring. 7999 */ 8000 8001 /* Initialize buffer queue management fields */ 8002 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list); 8003 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 8004 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 8005 8006 /* for VMID idle timeout if VMID is enabled */ 8007 if (lpfc_is_vmid_enabled(phba)) 8008 timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0); 8009 8010 /* 8011 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 8012 */ 8013 /* Initialize the Abort buffer list used by driver */ 8014 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock); 8015 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list); 8016 8017 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 8018 /* Initialize the Abort nvme buffer list used by driver */ 8019 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock); 8020 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 8021 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); 8022 spin_lock_init(&phba->sli4_hba.t_active_list_lock); 8023 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list); 8024 } 8025 8026 /* This abort list used by worker thread */ 8027 spin_lock_init(&phba->sli4_hba.sgl_list_lock); 8028 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); 8029 spin_lock_init(&phba->sli4_hba.asynce_list_lock); 8030 spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock); 8031 8032 /* 8033 * Initialize driver internal slow-path work queues 8034 */ 8035 8036 /* Driver internel slow-path CQ Event pool */ 8037 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 8038 /* Response IOCB work queue list */ 8039 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 8040 /* Asynchronous event CQ Event work queue list */ 8041 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 8042 /* Slow-path XRI aborted CQ Event work queue list */ 8043 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 8044 /* Receive queue CQ Event work queue list */ 8045 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 8046 8047 /* Initialize extent block lists. */ 8048 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); 8049 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); 8050 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); 8051 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); 8052 8053 /* Initialize mboxq lists. If the early init routines fail 8054 * these lists need to be correctly initialized. 8055 */ 8056 INIT_LIST_HEAD(&phba->sli.mboxq); 8057 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl); 8058 8059 /* initialize optic_state to 0xFF */ 8060 phba->sli4_hba.lnk_info.optic_state = 0xff; 8061 8062 /* Allocate device driver memory */ 8063 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 8064 if (rc) 8065 goto out_destroy_workqueue; 8066 8067 /* IF Type 2 ports get initialized now. */ 8068 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 8069 LPFC_SLI_INTF_IF_TYPE_2) { 8070 rc = lpfc_pci_function_reset(phba); 8071 if (unlikely(rc)) { 8072 rc = -ENODEV; 8073 goto out_free_mem; 8074 } 8075 phba->temp_sensor_support = 1; 8076 } 8077 8078 /* Create the bootstrap mailbox command */ 8079 rc = lpfc_create_bootstrap_mbox(phba); 8080 if (unlikely(rc)) 8081 goto out_free_mem; 8082 8083 /* Set up the host's endian order with the device. */ 8084 rc = lpfc_setup_endian_order(phba); 8085 if (unlikely(rc)) 8086 goto out_free_bsmbx; 8087 8088 /* Set up the hba's configuration parameters. */ 8089 rc = lpfc_sli4_read_config(phba); 8090 if (unlikely(rc)) 8091 goto out_free_bsmbx; 8092 8093 if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) { 8094 /* Right now the link is down, if FA-PWWN is configured the 8095 * firmware will try FLOGI before the driver gets a link up. 8096 * If it fails, the driver should get a MISCONFIGURED async 8097 * event which will clear this flag. The only notification 8098 * the driver gets is if it fails, if it succeeds there is no 8099 * notification given. Assume success. 8100 */ 8101 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC; 8102 } 8103 8104 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba); 8105 if (unlikely(rc)) 8106 goto out_free_bsmbx; 8107 8108 /* IF Type 0 ports get initialized now. */ 8109 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 8110 LPFC_SLI_INTF_IF_TYPE_0) { 8111 rc = lpfc_pci_function_reset(phba); 8112 if (unlikely(rc)) 8113 goto out_free_bsmbx; 8114 } 8115 8116 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 8117 GFP_KERNEL); 8118 if (!mboxq) { 8119 rc = -ENOMEM; 8120 goto out_free_bsmbx; 8121 } 8122 8123 /* Check for NVMET being configured */ 8124 phba->nvmet_support = 0; 8125 if (lpfc_enable_nvmet_cnt) { 8126 8127 /* First get WWN of HBA instance */ 8128 lpfc_read_nv(phba, mboxq); 8129 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8130 if (rc != MBX_SUCCESS) { 8131 lpfc_printf_log(phba, KERN_ERR, 8132 LOG_TRACE_EVENT, 8133 "6016 Mailbox failed , mbxCmd x%x " 8134 "READ_NV, mbxStatus x%x\n", 8135 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 8136 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 8137 mempool_free(mboxq, phba->mbox_mem_pool); 8138 rc = -EIO; 8139 goto out_free_bsmbx; 8140 } 8141 mb = &mboxq->u.mb; 8142 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename, 8143 sizeof(uint64_t)); 8144 wwn = cpu_to_be64(wwn); 8145 phba->sli4_hba.wwnn.u.name = wwn; 8146 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, 8147 sizeof(uint64_t)); 8148 /* wwn is WWPN of HBA instance */ 8149 wwn = cpu_to_be64(wwn); 8150 phba->sli4_hba.wwpn.u.name = wwn; 8151 8152 /* Check to see if it matches any module parameter */ 8153 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { 8154 if (wwn == lpfc_enable_nvmet[i]) { 8155 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 8156 if (lpfc_nvmet_mem_alloc(phba)) 8157 break; 8158 8159 phba->nvmet_support = 1; /* a match */ 8160 8161 lpfc_printf_log(phba, KERN_ERR, 8162 LOG_TRACE_EVENT, 8163 "6017 NVME Target %016llx\n", 8164 wwn); 8165 #else 8166 lpfc_printf_log(phba, KERN_ERR, 8167 LOG_TRACE_EVENT, 8168 "6021 Can't enable NVME Target." 8169 " NVME_TARGET_FC infrastructure" 8170 " is not in kernel\n"); 8171 #endif 8172 /* Not supported for NVMET */ 8173 phba->cfg_xri_rebalancing = 0; 8174 if (phba->irq_chann_mode == NHT_MODE) { 8175 phba->cfg_irq_chann = 8176 phba->sli4_hba.num_present_cpu; 8177 phba->cfg_hdw_queue = 8178 phba->sli4_hba.num_present_cpu; 8179 phba->irq_chann_mode = NORMAL_MODE; 8180 } 8181 break; 8182 } 8183 } 8184 } 8185 8186 lpfc_nvme_mod_param_dep(phba); 8187 8188 /* 8189 * Get sli4 parameters that override parameters from Port capabilities. 8190 * If this call fails, it isn't critical unless the SLI4 parameters come 8191 * back in conflict. 8192 */ 8193 rc = lpfc_get_sli4_parameters(phba, mboxq); 8194 if (rc) { 8195 if_type = bf_get(lpfc_sli_intf_if_type, 8196 &phba->sli4_hba.sli_intf); 8197 if_fam = bf_get(lpfc_sli_intf_sli_family, 8198 &phba->sli4_hba.sli_intf); 8199 if (phba->sli4_hba.extents_in_use && 8200 phba->sli4_hba.rpi_hdrs_in_use) { 8201 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8202 "2999 Unsupported SLI4 Parameters " 8203 "Extents and RPI headers enabled.\n"); 8204 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 && 8205 if_fam == LPFC_SLI_INTF_FAMILY_BE2) { 8206 mempool_free(mboxq, phba->mbox_mem_pool); 8207 rc = -EIO; 8208 goto out_free_bsmbx; 8209 } 8210 } 8211 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 && 8212 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) { 8213 mempool_free(mboxq, phba->mbox_mem_pool); 8214 rc = -EIO; 8215 goto out_free_bsmbx; 8216 } 8217 } 8218 8219 /* 8220 * 1 for cmd, 1 for rsp, NVME adds an extra one 8221 * for boundary conditions in its max_sgl_segment template. 8222 */ 8223 extra = 2; 8224 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 8225 extra++; 8226 8227 /* 8228 * It doesn't matter what family our adapter is in, we are 8229 * limited to 2 Pages, 512 SGEs, for our SGL. 8230 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp 8231 */ 8232 max_buf_size = (2 * SLI4_PAGE_SIZE); 8233 8234 /* 8235 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size 8236 * used to create the sg_dma_buf_pool must be calculated. 8237 */ 8238 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 8239 /* Both cfg_enable_bg and cfg_external_dif code paths */ 8240 8241 /* 8242 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd, 8243 * the FCP rsp, and a SGE. Sice we have no control 8244 * over how many protection segments the SCSI Layer 8245 * will hand us (ie: there could be one for every block 8246 * in the IO), just allocate enough SGEs to accomidate 8247 * our max amount and we need to limit lpfc_sg_seg_cnt 8248 * to minimize the risk of running out. 8249 */ 8250 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 8251 sizeof(struct fcp_rsp) + max_buf_size; 8252 8253 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ 8254 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; 8255 8256 /* 8257 * If supporting DIF, reduce the seg count for scsi to 8258 * allow room for the DIF sges. 8259 */ 8260 if (phba->cfg_enable_bg && 8261 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF) 8262 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF; 8263 else 8264 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 8265 8266 } else { 8267 /* 8268 * The scsi_buf for a regular I/O holds the FCP cmnd, 8269 * the FCP rsp, a SGE for each, and a SGE for up to 8270 * cfg_sg_seg_cnt data segments. 8271 */ 8272 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 8273 sizeof(struct fcp_rsp) + 8274 ((phba->cfg_sg_seg_cnt + extra) * 8275 sizeof(struct sli4_sge)); 8276 8277 /* Total SGEs for scsi_sg_list */ 8278 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra; 8279 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; 8280 8281 /* 8282 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only 8283 * need to post 1 page for the SGL. 8284 */ 8285 } 8286 8287 if (phba->cfg_xpsgl && !phba->nvmet_support) 8288 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE; 8289 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) 8290 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; 8291 else 8292 phba->cfg_sg_dma_buf_size = 8293 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); 8294 8295 phba->border_sge_num = phba->cfg_sg_dma_buf_size / 8296 sizeof(struct sli4_sge); 8297 8298 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */ 8299 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 8300 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { 8301 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, 8302 "6300 Reducing NVME sg segment " 8303 "cnt to %d\n", 8304 LPFC_MAX_NVME_SEG_CNT); 8305 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 8306 } else 8307 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; 8308 } 8309 8310 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 8311 "9087 sg_seg_cnt:%d dmabuf_size:%d " 8312 "total:%d scsi:%d nvme:%d\n", 8313 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 8314 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt, 8315 phba->cfg_nvme_seg_cnt); 8316 8317 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE) 8318 i = phba->cfg_sg_dma_buf_size; 8319 else 8320 i = SLI4_PAGE_SIZE; 8321 8322 phba->lpfc_sg_dma_buf_pool = 8323 dma_pool_create("lpfc_sg_dma_buf_pool", 8324 &phba->pcidev->dev, 8325 phba->cfg_sg_dma_buf_size, 8326 i, 0); 8327 if (!phba->lpfc_sg_dma_buf_pool) { 8328 rc = -ENOMEM; 8329 goto out_free_bsmbx; 8330 } 8331 8332 phba->lpfc_cmd_rsp_buf_pool = 8333 dma_pool_create("lpfc_cmd_rsp_buf_pool", 8334 &phba->pcidev->dev, 8335 sizeof(struct fcp_cmnd) + 8336 sizeof(struct fcp_rsp), 8337 i, 0); 8338 if (!phba->lpfc_cmd_rsp_buf_pool) { 8339 rc = -ENOMEM; 8340 goto out_free_sg_dma_buf; 8341 } 8342 8343 mempool_free(mboxq, phba->mbox_mem_pool); 8344 8345 /* Verify OAS is supported */ 8346 lpfc_sli4_oas_verify(phba); 8347 8348 /* Verify RAS support on adapter */ 8349 lpfc_sli4_ras_init(phba); 8350 8351 /* Verify all the SLI4 queues */ 8352 rc = lpfc_sli4_queue_verify(phba); 8353 if (rc) 8354 goto out_free_cmd_rsp_buf; 8355 8356 /* Create driver internal CQE event pool */ 8357 rc = lpfc_sli4_cq_event_pool_create(phba); 8358 if (rc) 8359 goto out_free_cmd_rsp_buf; 8360 8361 /* Initialize sgl lists per host */ 8362 lpfc_init_sgl_list(phba); 8363 8364 /* Allocate and initialize active sgl array */ 8365 rc = lpfc_init_active_sgl_array(phba); 8366 if (rc) { 8367 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8368 "1430 Failed to initialize sgl list.\n"); 8369 goto out_destroy_cq_event_pool; 8370 } 8371 rc = lpfc_sli4_init_rpi_hdrs(phba); 8372 if (rc) { 8373 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8374 "1432 Failed to initialize rpi headers.\n"); 8375 goto out_free_active_sgl; 8376 } 8377 8378 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 8379 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 8380 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long), 8381 GFP_KERNEL); 8382 if (!phba->fcf.fcf_rr_bmask) { 8383 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8384 "2759 Failed allocate memory for FCF round " 8385 "robin failover bmask\n"); 8386 rc = -ENOMEM; 8387 goto out_remove_rpi_hdrs; 8388 } 8389 8390 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann, 8391 sizeof(struct lpfc_hba_eq_hdl), 8392 GFP_KERNEL); 8393 if (!phba->sli4_hba.hba_eq_hdl) { 8394 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8395 "2572 Failed allocate memory for " 8396 "fast-path per-EQ handle array\n"); 8397 rc = -ENOMEM; 8398 goto out_free_fcf_rr_bmask; 8399 } 8400 8401 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu, 8402 sizeof(struct lpfc_vector_map_info), 8403 GFP_KERNEL); 8404 if (!phba->sli4_hba.cpu_map) { 8405 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8406 "3327 Failed allocate memory for msi-x " 8407 "interrupt vector mapping\n"); 8408 rc = -ENOMEM; 8409 goto out_free_hba_eq_hdl; 8410 } 8411 8412 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info); 8413 if (!phba->sli4_hba.eq_info) { 8414 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8415 "3321 Failed allocation for per_cpu stats\n"); 8416 rc = -ENOMEM; 8417 goto out_free_hba_cpu_map; 8418 } 8419 8420 phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu, 8421 sizeof(*phba->sli4_hba.idle_stat), 8422 GFP_KERNEL); 8423 if (!phba->sli4_hba.idle_stat) { 8424 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8425 "3390 Failed allocation for idle_stat\n"); 8426 rc = -ENOMEM; 8427 goto out_free_hba_eq_info; 8428 } 8429 8430 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 8431 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat); 8432 if (!phba->sli4_hba.c_stat) { 8433 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8434 "3332 Failed allocating per cpu hdwq stats\n"); 8435 rc = -ENOMEM; 8436 goto out_free_hba_idle_stat; 8437 } 8438 #endif 8439 8440 phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat); 8441 if (!phba->cmf_stat) { 8442 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8443 "3331 Failed allocating per cpu cgn stats\n"); 8444 rc = -ENOMEM; 8445 goto out_free_hba_hdwq_info; 8446 } 8447 8448 /* 8449 * Enable sr-iov virtual functions if supported and configured 8450 * through the module parameter. 8451 */ 8452 if (phba->cfg_sriov_nr_virtfn > 0) { 8453 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 8454 phba->cfg_sriov_nr_virtfn); 8455 if (rc) { 8456 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8457 "3020 Requested number of SR-IOV " 8458 "virtual functions (%d) is not " 8459 "supported\n", 8460 phba->cfg_sriov_nr_virtfn); 8461 phba->cfg_sriov_nr_virtfn = 0; 8462 } 8463 } 8464 8465 return 0; 8466 8467 out_free_hba_hdwq_info: 8468 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 8469 free_percpu(phba->sli4_hba.c_stat); 8470 out_free_hba_idle_stat: 8471 #endif 8472 kfree(phba->sli4_hba.idle_stat); 8473 out_free_hba_eq_info: 8474 free_percpu(phba->sli4_hba.eq_info); 8475 out_free_hba_cpu_map: 8476 kfree(phba->sli4_hba.cpu_map); 8477 out_free_hba_eq_hdl: 8478 kfree(phba->sli4_hba.hba_eq_hdl); 8479 out_free_fcf_rr_bmask: 8480 kfree(phba->fcf.fcf_rr_bmask); 8481 out_remove_rpi_hdrs: 8482 lpfc_sli4_remove_rpi_hdrs(phba); 8483 out_free_active_sgl: 8484 lpfc_free_active_sgl(phba); 8485 out_destroy_cq_event_pool: 8486 lpfc_sli4_cq_event_pool_destroy(phba); 8487 out_free_cmd_rsp_buf: 8488 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); 8489 phba->lpfc_cmd_rsp_buf_pool = NULL; 8490 out_free_sg_dma_buf: 8491 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); 8492 phba->lpfc_sg_dma_buf_pool = NULL; 8493 out_free_bsmbx: 8494 lpfc_destroy_bootstrap_mbox(phba); 8495 out_free_mem: 8496 lpfc_mem_free(phba); 8497 out_destroy_workqueue: 8498 destroy_workqueue(phba->wq); 8499 phba->wq = NULL; 8500 return rc; 8501 } 8502 8503 /** 8504 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 8505 * @phba: pointer to lpfc hba data structure. 8506 * 8507 * This routine is invoked to unset the driver internal resources set up 8508 * specific for supporting the SLI-4 HBA device it attached to. 8509 **/ 8510 static void 8511 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 8512 { 8513 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 8514 8515 free_percpu(phba->sli4_hba.eq_info); 8516 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 8517 free_percpu(phba->sli4_hba.c_stat); 8518 #endif 8519 free_percpu(phba->cmf_stat); 8520 kfree(phba->sli4_hba.idle_stat); 8521 8522 /* Free memory allocated for msi-x interrupt vector to CPU mapping */ 8523 kfree(phba->sli4_hba.cpu_map); 8524 phba->sli4_hba.num_possible_cpu = 0; 8525 phba->sli4_hba.num_present_cpu = 0; 8526 phba->sli4_hba.curr_disp_cpu = 0; 8527 cpumask_clear(&phba->sli4_hba.irq_aff_mask); 8528 8529 /* Free memory allocated for fast-path work queue handles */ 8530 kfree(phba->sli4_hba.hba_eq_hdl); 8531 8532 /* Free the allocated rpi headers. */ 8533 lpfc_sli4_remove_rpi_hdrs(phba); 8534 lpfc_sli4_remove_rpis(phba); 8535 8536 /* Free eligible FCF index bmask */ 8537 kfree(phba->fcf.fcf_rr_bmask); 8538 8539 /* Free the ELS sgl list */ 8540 lpfc_free_active_sgl(phba); 8541 lpfc_free_els_sgl_list(phba); 8542 lpfc_free_nvmet_sgl_list(phba); 8543 8544 /* Free the completion queue EQ event pool */ 8545 lpfc_sli4_cq_event_release_all(phba); 8546 lpfc_sli4_cq_event_pool_destroy(phba); 8547 8548 /* Release resource identifiers. */ 8549 lpfc_sli4_dealloc_resource_identifiers(phba); 8550 8551 /* Free the bsmbx region. */ 8552 lpfc_destroy_bootstrap_mbox(phba); 8553 8554 /* Free the SLI Layer memory with SLI4 HBAs */ 8555 lpfc_mem_free_all(phba); 8556 8557 /* Free the current connect table */ 8558 list_for_each_entry_safe(conn_entry, next_conn_entry, 8559 &phba->fcf_conn_rec_list, list) { 8560 list_del_init(&conn_entry->list); 8561 kfree(conn_entry); 8562 } 8563 8564 return; 8565 } 8566 8567 /** 8568 * lpfc_init_api_table_setup - Set up init api function jump table 8569 * @phba: The hba struct for which this call is being executed. 8570 * @dev_grp: The HBA PCI-Device group number. 8571 * 8572 * This routine sets up the device INIT interface API function jump table 8573 * in @phba struct. 8574 * 8575 * Returns: 0 - success, -ENODEV - failure. 8576 **/ 8577 int 8578 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 8579 { 8580 phba->lpfc_hba_init_link = lpfc_hba_init_link; 8581 phba->lpfc_hba_down_link = lpfc_hba_down_link; 8582 phba->lpfc_selective_reset = lpfc_selective_reset; 8583 switch (dev_grp) { 8584 case LPFC_PCI_DEV_LP: 8585 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 8586 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 8587 phba->lpfc_stop_port = lpfc_stop_port_s3; 8588 break; 8589 case LPFC_PCI_DEV_OC: 8590 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 8591 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 8592 phba->lpfc_stop_port = lpfc_stop_port_s4; 8593 break; 8594 default: 8595 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8596 "1431 Invalid HBA PCI-device group: 0x%x\n", 8597 dev_grp); 8598 return -ENODEV; 8599 } 8600 return 0; 8601 } 8602 8603 /** 8604 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 8605 * @phba: pointer to lpfc hba data structure. 8606 * 8607 * This routine is invoked to set up the driver internal resources after the 8608 * device specific resource setup to support the HBA device it attached to. 8609 * 8610 * Return codes 8611 * 0 - successful 8612 * other values - error 8613 **/ 8614 static int 8615 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 8616 { 8617 int error; 8618 8619 /* Startup the kernel thread for this host adapter. */ 8620 phba->worker_thread = kthread_run(lpfc_do_work, phba, 8621 "lpfc_worker_%d", phba->brd_no); 8622 if (IS_ERR(phba->worker_thread)) { 8623 error = PTR_ERR(phba->worker_thread); 8624 return error; 8625 } 8626 8627 return 0; 8628 } 8629 8630 /** 8631 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 8632 * @phba: pointer to lpfc hba data structure. 8633 * 8634 * This routine is invoked to unset the driver internal resources set up after 8635 * the device specific resource setup for supporting the HBA device it 8636 * attached to. 8637 **/ 8638 static void 8639 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 8640 { 8641 if (phba->wq) { 8642 destroy_workqueue(phba->wq); 8643 phba->wq = NULL; 8644 } 8645 8646 /* Stop kernel worker thread */ 8647 if (phba->worker_thread) 8648 kthread_stop(phba->worker_thread); 8649 } 8650 8651 /** 8652 * lpfc_free_iocb_list - Free iocb list. 8653 * @phba: pointer to lpfc hba data structure. 8654 * 8655 * This routine is invoked to free the driver's IOCB list and memory. 8656 **/ 8657 void 8658 lpfc_free_iocb_list(struct lpfc_hba *phba) 8659 { 8660 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 8661 8662 spin_lock_irq(&phba->hbalock); 8663 list_for_each_entry_safe(iocbq_entry, iocbq_next, 8664 &phba->lpfc_iocb_list, list) { 8665 list_del(&iocbq_entry->list); 8666 kfree(iocbq_entry); 8667 phba->total_iocbq_bufs--; 8668 } 8669 spin_unlock_irq(&phba->hbalock); 8670 8671 return; 8672 } 8673 8674 /** 8675 * lpfc_init_iocb_list - Allocate and initialize iocb list. 8676 * @phba: pointer to lpfc hba data structure. 8677 * @iocb_count: number of requested iocbs 8678 * 8679 * This routine is invoked to allocate and initizlize the driver's IOCB 8680 * list and set up the IOCB tag array accordingly. 8681 * 8682 * Return codes 8683 * 0 - successful 8684 * other values - error 8685 **/ 8686 int 8687 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 8688 { 8689 struct lpfc_iocbq *iocbq_entry = NULL; 8690 uint16_t iotag; 8691 int i; 8692 8693 /* Initialize and populate the iocb list per host. */ 8694 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 8695 for (i = 0; i < iocb_count; i++) { 8696 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 8697 if (iocbq_entry == NULL) { 8698 printk(KERN_ERR "%s: only allocated %d iocbs of " 8699 "expected %d count. Unloading driver.\n", 8700 __func__, i, iocb_count); 8701 goto out_free_iocbq; 8702 } 8703 8704 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 8705 if (iotag == 0) { 8706 kfree(iocbq_entry); 8707 printk(KERN_ERR "%s: failed to allocate IOTAG. " 8708 "Unloading driver.\n", __func__); 8709 goto out_free_iocbq; 8710 } 8711 iocbq_entry->sli4_lxritag = NO_XRI; 8712 iocbq_entry->sli4_xritag = NO_XRI; 8713 8714 spin_lock_irq(&phba->hbalock); 8715 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 8716 phba->total_iocbq_bufs++; 8717 spin_unlock_irq(&phba->hbalock); 8718 } 8719 8720 return 0; 8721 8722 out_free_iocbq: 8723 lpfc_free_iocb_list(phba); 8724 8725 return -ENOMEM; 8726 } 8727 8728 /** 8729 * lpfc_free_sgl_list - Free a given sgl list. 8730 * @phba: pointer to lpfc hba data structure. 8731 * @sglq_list: pointer to the head of sgl list. 8732 * 8733 * This routine is invoked to free a give sgl list and memory. 8734 **/ 8735 void 8736 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list) 8737 { 8738 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 8739 8740 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) { 8741 list_del(&sglq_entry->list); 8742 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 8743 kfree(sglq_entry); 8744 } 8745 } 8746 8747 /** 8748 * lpfc_free_els_sgl_list - Free els sgl list. 8749 * @phba: pointer to lpfc hba data structure. 8750 * 8751 * This routine is invoked to free the driver's els sgl list and memory. 8752 **/ 8753 static void 8754 lpfc_free_els_sgl_list(struct lpfc_hba *phba) 8755 { 8756 LIST_HEAD(sglq_list); 8757 8758 /* Retrieve all els sgls from driver list */ 8759 spin_lock_irq(&phba->sli4_hba.sgl_list_lock); 8760 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list); 8761 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock); 8762 8763 /* Now free the sgl list */ 8764 lpfc_free_sgl_list(phba, &sglq_list); 8765 } 8766 8767 /** 8768 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list. 8769 * @phba: pointer to lpfc hba data structure. 8770 * 8771 * This routine is invoked to free the driver's nvmet sgl list and memory. 8772 **/ 8773 static void 8774 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba) 8775 { 8776 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 8777 LIST_HEAD(sglq_list); 8778 8779 /* Retrieve all nvmet sgls from driver list */ 8780 spin_lock_irq(&phba->hbalock); 8781 spin_lock(&phba->sli4_hba.sgl_list_lock); 8782 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list); 8783 spin_unlock(&phba->sli4_hba.sgl_list_lock); 8784 spin_unlock_irq(&phba->hbalock); 8785 8786 /* Now free the sgl list */ 8787 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) { 8788 list_del(&sglq_entry->list); 8789 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys); 8790 kfree(sglq_entry); 8791 } 8792 8793 /* Update the nvmet_xri_cnt to reflect no current sgls. 8794 * The next initialization cycle sets the count and allocates 8795 * the sgls over again. 8796 */ 8797 phba->sli4_hba.nvmet_xri_cnt = 0; 8798 } 8799 8800 /** 8801 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 8802 * @phba: pointer to lpfc hba data structure. 8803 * 8804 * This routine is invoked to allocate the driver's active sgl memory. 8805 * This array will hold the sglq_entry's for active IOs. 8806 **/ 8807 static int 8808 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 8809 { 8810 int size; 8811 size = sizeof(struct lpfc_sglq *); 8812 size *= phba->sli4_hba.max_cfg_param.max_xri; 8813 8814 phba->sli4_hba.lpfc_sglq_active_list = 8815 kzalloc(size, GFP_KERNEL); 8816 if (!phba->sli4_hba.lpfc_sglq_active_list) 8817 return -ENOMEM; 8818 return 0; 8819 } 8820 8821 /** 8822 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 8823 * @phba: pointer to lpfc hba data structure. 8824 * 8825 * This routine is invoked to walk through the array of active sglq entries 8826 * and free all of the resources. 8827 * This is just a place holder for now. 8828 **/ 8829 static void 8830 lpfc_free_active_sgl(struct lpfc_hba *phba) 8831 { 8832 kfree(phba->sli4_hba.lpfc_sglq_active_list); 8833 } 8834 8835 /** 8836 * lpfc_init_sgl_list - Allocate and initialize sgl list. 8837 * @phba: pointer to lpfc hba data structure. 8838 * 8839 * This routine is invoked to allocate and initizlize the driver's sgl 8840 * list and set up the sgl xritag tag array accordingly. 8841 * 8842 **/ 8843 static void 8844 lpfc_init_sgl_list(struct lpfc_hba *phba) 8845 { 8846 /* Initialize and populate the sglq list per host/VF. */ 8847 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list); 8848 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 8849 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list); 8850 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 8851 8852 /* els xri-sgl book keeping */ 8853 phba->sli4_hba.els_xri_cnt = 0; 8854 8855 /* nvme xri-buffer book keeping */ 8856 phba->sli4_hba.io_xri_cnt = 0; 8857 } 8858 8859 /** 8860 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 8861 * @phba: pointer to lpfc hba data structure. 8862 * 8863 * This routine is invoked to post rpi header templates to the 8864 * port for those SLI4 ports that do not support extents. This routine 8865 * posts a PAGE_SIZE memory region to the port to hold up to 8866 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 8867 * and should be called only when interrupts are disabled. 8868 * 8869 * Return codes 8870 * 0 - successful 8871 * -ERROR - otherwise. 8872 **/ 8873 int 8874 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 8875 { 8876 int rc = 0; 8877 struct lpfc_rpi_hdr *rpi_hdr; 8878 8879 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 8880 if (!phba->sli4_hba.rpi_hdrs_in_use) 8881 return rc; 8882 if (phba->sli4_hba.extents_in_use) 8883 return -EIO; 8884 8885 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 8886 if (!rpi_hdr) { 8887 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8888 "0391 Error during rpi post operation\n"); 8889 lpfc_sli4_remove_rpis(phba); 8890 rc = -ENODEV; 8891 } 8892 8893 return rc; 8894 } 8895 8896 /** 8897 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 8898 * @phba: pointer to lpfc hba data structure. 8899 * 8900 * This routine is invoked to allocate a single 4KB memory region to 8901 * support rpis and stores them in the phba. This single region 8902 * provides support for up to 64 rpis. The region is used globally 8903 * by the device. 8904 * 8905 * Returns: 8906 * A valid rpi hdr on success. 8907 * A NULL pointer on any failure. 8908 **/ 8909 struct lpfc_rpi_hdr * 8910 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 8911 { 8912 uint16_t rpi_limit, curr_rpi_range; 8913 struct lpfc_dmabuf *dmabuf; 8914 struct lpfc_rpi_hdr *rpi_hdr; 8915 8916 /* 8917 * If the SLI4 port supports extents, posting the rpi header isn't 8918 * required. Set the expected maximum count and let the actual value 8919 * get set when extents are fully allocated. 8920 */ 8921 if (!phba->sli4_hba.rpi_hdrs_in_use) 8922 return NULL; 8923 if (phba->sli4_hba.extents_in_use) 8924 return NULL; 8925 8926 /* The limit on the logical index is just the max_rpi count. */ 8927 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi; 8928 8929 spin_lock_irq(&phba->hbalock); 8930 /* 8931 * Establish the starting RPI in this header block. The starting 8932 * rpi is normalized to a zero base because the physical rpi is 8933 * port based. 8934 */ 8935 curr_rpi_range = phba->sli4_hba.next_rpi; 8936 spin_unlock_irq(&phba->hbalock); 8937 8938 /* Reached full RPI range */ 8939 if (curr_rpi_range == rpi_limit) 8940 return NULL; 8941 8942 /* 8943 * First allocate the protocol header region for the port. The 8944 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 8945 */ 8946 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 8947 if (!dmabuf) 8948 return NULL; 8949 8950 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 8951 LPFC_HDR_TEMPLATE_SIZE, 8952 &dmabuf->phys, GFP_KERNEL); 8953 if (!dmabuf->virt) { 8954 rpi_hdr = NULL; 8955 goto err_free_dmabuf; 8956 } 8957 8958 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 8959 rpi_hdr = NULL; 8960 goto err_free_coherent; 8961 } 8962 8963 /* Save the rpi header data for cleanup later. */ 8964 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 8965 if (!rpi_hdr) 8966 goto err_free_coherent; 8967 8968 rpi_hdr->dmabuf = dmabuf; 8969 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 8970 rpi_hdr->page_count = 1; 8971 spin_lock_irq(&phba->hbalock); 8972 8973 /* The rpi_hdr stores the logical index only. */ 8974 rpi_hdr->start_rpi = curr_rpi_range; 8975 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT; 8976 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 8977 8978 spin_unlock_irq(&phba->hbalock); 8979 return rpi_hdr; 8980 8981 err_free_coherent: 8982 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 8983 dmabuf->virt, dmabuf->phys); 8984 err_free_dmabuf: 8985 kfree(dmabuf); 8986 return NULL; 8987 } 8988 8989 /** 8990 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 8991 * @phba: pointer to lpfc hba data structure. 8992 * 8993 * This routine is invoked to remove all memory resources allocated 8994 * to support rpis for SLI4 ports not supporting extents. This routine 8995 * presumes the caller has released all rpis consumed by fabric or port 8996 * logins and is prepared to have the header pages removed. 8997 **/ 8998 void 8999 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 9000 { 9001 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 9002 9003 if (!phba->sli4_hba.rpi_hdrs_in_use) 9004 goto exit; 9005 9006 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 9007 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 9008 list_del(&rpi_hdr->list); 9009 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 9010 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 9011 kfree(rpi_hdr->dmabuf); 9012 kfree(rpi_hdr); 9013 } 9014 exit: 9015 /* There are no rpis available to the port now. */ 9016 phba->sli4_hba.next_rpi = 0; 9017 } 9018 9019 /** 9020 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 9021 * @pdev: pointer to pci device data structure. 9022 * 9023 * This routine is invoked to allocate the driver hba data structure for an 9024 * HBA device. If the allocation is successful, the phba reference to the 9025 * PCI device data structure is set. 9026 * 9027 * Return codes 9028 * pointer to @phba - successful 9029 * NULL - error 9030 **/ 9031 static struct lpfc_hba * 9032 lpfc_hba_alloc(struct pci_dev *pdev) 9033 { 9034 struct lpfc_hba *phba; 9035 9036 /* Allocate memory for HBA structure */ 9037 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 9038 if (!phba) { 9039 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 9040 return NULL; 9041 } 9042 9043 /* Set reference to PCI device in HBA structure */ 9044 phba->pcidev = pdev; 9045 9046 /* Assign an unused board number */ 9047 phba->brd_no = lpfc_get_instance(); 9048 if (phba->brd_no < 0) { 9049 kfree(phba); 9050 return NULL; 9051 } 9052 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL; 9053 9054 spin_lock_init(&phba->ct_ev_lock); 9055 INIT_LIST_HEAD(&phba->ct_ev_waiters); 9056 9057 return phba; 9058 } 9059 9060 /** 9061 * lpfc_hba_free - Free driver hba data structure with a device. 9062 * @phba: pointer to lpfc hba data structure. 9063 * 9064 * This routine is invoked to free the driver hba data structure with an 9065 * HBA device. 9066 **/ 9067 static void 9068 lpfc_hba_free(struct lpfc_hba *phba) 9069 { 9070 if (phba->sli_rev == LPFC_SLI_REV4) 9071 kfree(phba->sli4_hba.hdwq); 9072 9073 /* Release the driver assigned board number */ 9074 idr_remove(&lpfc_hba_index, phba->brd_no); 9075 9076 /* Free memory allocated with sli3 rings */ 9077 kfree(phba->sli.sli3_ring); 9078 phba->sli.sli3_ring = NULL; 9079 9080 kfree(phba); 9081 return; 9082 } 9083 9084 /** 9085 * lpfc_setup_fdmi_mask - Setup initial FDMI mask for HBA and Port attributes 9086 * @vport: pointer to lpfc vport data structure. 9087 * 9088 * This routine is will setup initial FDMI attribute masks for 9089 * FDMI2 or SmartSAN depending on module parameters. The driver will attempt 9090 * to get these attributes first before falling back, the attribute 9091 * fallback hierarchy is SmartSAN -> FDMI2 -> FMDI1 9092 **/ 9093 void 9094 lpfc_setup_fdmi_mask(struct lpfc_vport *vport) 9095 { 9096 struct lpfc_hba *phba = vport->phba; 9097 9098 set_bit(FC_ALLOW_FDMI, &vport->load_flag); 9099 if (phba->cfg_enable_SmartSAN || 9100 phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) { 9101 /* Setup appropriate attribute masks */ 9102 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 9103 if (phba->cfg_enable_SmartSAN) 9104 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 9105 else 9106 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 9107 } 9108 9109 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 9110 "6077 Setup FDMI mask: hba x%x port x%x\n", 9111 vport->fdmi_hba_mask, vport->fdmi_port_mask); 9112 } 9113 9114 /** 9115 * lpfc_create_shost - Create hba physical port with associated scsi host. 9116 * @phba: pointer to lpfc hba data structure. 9117 * 9118 * This routine is invoked to create HBA physical port and associate a SCSI 9119 * host with it. 9120 * 9121 * Return codes 9122 * 0 - successful 9123 * other values - error 9124 **/ 9125 static int 9126 lpfc_create_shost(struct lpfc_hba *phba) 9127 { 9128 struct lpfc_vport *vport; 9129 struct Scsi_Host *shost; 9130 9131 /* Initialize HBA FC structure */ 9132 phba->fc_edtov = FF_DEF_EDTOV; 9133 phba->fc_ratov = FF_DEF_RATOV; 9134 phba->fc_altov = FF_DEF_ALTOV; 9135 phba->fc_arbtov = FF_DEF_ARBTOV; 9136 9137 atomic_set(&phba->sdev_cnt, 0); 9138 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 9139 if (!vport) 9140 return -ENODEV; 9141 9142 shost = lpfc_shost_from_vport(vport); 9143 phba->pport = vport; 9144 9145 if (phba->nvmet_support) { 9146 /* Only 1 vport (pport) will support NVME target */ 9147 phba->targetport = NULL; 9148 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; 9149 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC, 9150 "6076 NVME Target Found\n"); 9151 } 9152 9153 lpfc_debugfs_initialize(vport); 9154 /* Put reference to SCSI host to driver's device private data */ 9155 pci_set_drvdata(phba->pcidev, shost); 9156 9157 lpfc_setup_fdmi_mask(vport); 9158 9159 /* 9160 * At this point we are fully registered with PSA. In addition, 9161 * any initial discovery should be completed. 9162 */ 9163 return 0; 9164 } 9165 9166 /** 9167 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 9168 * @phba: pointer to lpfc hba data structure. 9169 * 9170 * This routine is invoked to destroy HBA physical port and the associated 9171 * SCSI host. 9172 **/ 9173 static void 9174 lpfc_destroy_shost(struct lpfc_hba *phba) 9175 { 9176 struct lpfc_vport *vport = phba->pport; 9177 9178 /* Destroy physical port that associated with the SCSI host */ 9179 destroy_port(vport); 9180 9181 return; 9182 } 9183 9184 /** 9185 * lpfc_setup_bg - Setup Block guard structures and debug areas. 9186 * @phba: pointer to lpfc hba data structure. 9187 * @shost: the shost to be used to detect Block guard settings. 9188 * 9189 * This routine sets up the local Block guard protocol settings for @shost. 9190 * This routine also allocates memory for debugging bg buffers. 9191 **/ 9192 static void 9193 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 9194 { 9195 uint32_t old_mask; 9196 uint32_t old_guard; 9197 9198 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 9199 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9200 "1478 Registering BlockGuard with the " 9201 "SCSI layer\n"); 9202 9203 old_mask = phba->cfg_prot_mask; 9204 old_guard = phba->cfg_prot_guard; 9205 9206 /* Only allow supported values */ 9207 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | 9208 SHOST_DIX_TYPE0_PROTECTION | 9209 SHOST_DIX_TYPE1_PROTECTION); 9210 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP | 9211 SHOST_DIX_GUARD_CRC); 9212 9213 /* DIF Type 1 protection for profiles AST1/C1 is end to end */ 9214 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION) 9215 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; 9216 9217 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 9218 if ((old_mask != phba->cfg_prot_mask) || 9219 (old_guard != phba->cfg_prot_guard)) 9220 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9221 "1475 Registering BlockGuard with the " 9222 "SCSI layer: mask %d guard %d\n", 9223 phba->cfg_prot_mask, 9224 phba->cfg_prot_guard); 9225 9226 scsi_host_set_prot(shost, phba->cfg_prot_mask); 9227 scsi_host_set_guard(shost, phba->cfg_prot_guard); 9228 } else 9229 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9230 "1479 Not Registering BlockGuard with the SCSI " 9231 "layer, Bad protection parameters: %d %d\n", 9232 old_mask, old_guard); 9233 } 9234 } 9235 9236 /** 9237 * lpfc_post_init_setup - Perform necessary device post initialization setup. 9238 * @phba: pointer to lpfc hba data structure. 9239 * 9240 * This routine is invoked to perform all the necessary post initialization 9241 * setup for the device. 9242 **/ 9243 static void 9244 lpfc_post_init_setup(struct lpfc_hba *phba) 9245 { 9246 struct Scsi_Host *shost; 9247 struct lpfc_adapter_event_header adapter_event; 9248 9249 /* Get the default values for Model Name and Description */ 9250 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 9251 9252 /* 9253 * hba setup may have changed the hba_queue_depth so we need to 9254 * adjust the value of can_queue. 9255 */ 9256 shost = pci_get_drvdata(phba->pcidev); 9257 shost->can_queue = phba->cfg_hba_queue_depth - 10; 9258 9259 lpfc_host_attrib_init(shost); 9260 9261 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 9262 spin_lock_irq(shost->host_lock); 9263 lpfc_poll_start_timer(phba); 9264 spin_unlock_irq(shost->host_lock); 9265 } 9266 9267 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9268 "0428 Perform SCSI scan\n"); 9269 /* Send board arrival event to upper layer */ 9270 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 9271 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 9272 fc_host_post_vendor_event(shost, fc_get_event_number(), 9273 sizeof(adapter_event), 9274 (char *) &adapter_event, 9275 LPFC_NL_VENDOR_ID); 9276 return; 9277 } 9278 9279 /** 9280 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 9281 * @phba: pointer to lpfc hba data structure. 9282 * 9283 * This routine is invoked to set up the PCI device memory space for device 9284 * with SLI-3 interface spec. 9285 * 9286 * Return codes 9287 * 0 - successful 9288 * other values - error 9289 **/ 9290 static int 9291 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 9292 { 9293 struct pci_dev *pdev = phba->pcidev; 9294 unsigned long bar0map_len, bar2map_len; 9295 int i, hbq_count; 9296 void *ptr; 9297 int error; 9298 9299 if (!pdev) 9300 return -ENODEV; 9301 9302 /* Set the device DMA mask size */ 9303 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 9304 if (error) 9305 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 9306 if (error) 9307 return error; 9308 error = -ENODEV; 9309 9310 /* Get the bus address of Bar0 and Bar2 and the number of bytes 9311 * required by each mapping. 9312 */ 9313 phba->pci_bar0_map = pci_resource_start(pdev, 0); 9314 bar0map_len = pci_resource_len(pdev, 0); 9315 9316 phba->pci_bar2_map = pci_resource_start(pdev, 2); 9317 bar2map_len = pci_resource_len(pdev, 2); 9318 9319 /* Map HBA SLIM to a kernel virtual address. */ 9320 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 9321 if (!phba->slim_memmap_p) { 9322 dev_printk(KERN_ERR, &pdev->dev, 9323 "ioremap failed for SLIM memory.\n"); 9324 goto out; 9325 } 9326 9327 /* Map HBA Control Registers to a kernel virtual address. */ 9328 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 9329 if (!phba->ctrl_regs_memmap_p) { 9330 dev_printk(KERN_ERR, &pdev->dev, 9331 "ioremap failed for HBA control registers.\n"); 9332 goto out_iounmap_slim; 9333 } 9334 9335 /* Allocate memory for SLI-2 structures */ 9336 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, 9337 &phba->slim2p.phys, GFP_KERNEL); 9338 if (!phba->slim2p.virt) 9339 goto out_iounmap; 9340 9341 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 9342 phba->mbox_ext = (phba->slim2p.virt + 9343 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 9344 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 9345 phba->IOCBs = (phba->slim2p.virt + 9346 offsetof(struct lpfc_sli2_slim, IOCBs)); 9347 9348 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 9349 lpfc_sli_hbq_size(), 9350 &phba->hbqslimp.phys, 9351 GFP_KERNEL); 9352 if (!phba->hbqslimp.virt) 9353 goto out_free_slim; 9354 9355 hbq_count = lpfc_sli_hbq_count(); 9356 ptr = phba->hbqslimp.virt; 9357 for (i = 0; i < hbq_count; ++i) { 9358 phba->hbqs[i].hbq_virt = ptr; 9359 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 9360 ptr += (lpfc_hbq_defs[i]->entry_count * 9361 sizeof(struct lpfc_hbq_entry)); 9362 } 9363 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 9364 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 9365 9366 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 9367 9368 phba->MBslimaddr = phba->slim_memmap_p; 9369 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 9370 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 9371 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 9372 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 9373 9374 return 0; 9375 9376 out_free_slim: 9377 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 9378 phba->slim2p.virt, phba->slim2p.phys); 9379 out_iounmap: 9380 iounmap(phba->ctrl_regs_memmap_p); 9381 out_iounmap_slim: 9382 iounmap(phba->slim_memmap_p); 9383 out: 9384 return error; 9385 } 9386 9387 /** 9388 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 9389 * @phba: pointer to lpfc hba data structure. 9390 * 9391 * This routine is invoked to unset the PCI device memory space for device 9392 * with SLI-3 interface spec. 9393 **/ 9394 static void 9395 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 9396 { 9397 struct pci_dev *pdev; 9398 9399 /* Obtain PCI device reference */ 9400 if (!phba->pcidev) 9401 return; 9402 else 9403 pdev = phba->pcidev; 9404 9405 /* Free coherent DMA memory allocated */ 9406 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 9407 phba->hbqslimp.virt, phba->hbqslimp.phys); 9408 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 9409 phba->slim2p.virt, phba->slim2p.phys); 9410 9411 /* I/O memory unmap */ 9412 iounmap(phba->ctrl_regs_memmap_p); 9413 iounmap(phba->slim_memmap_p); 9414 9415 return; 9416 } 9417 9418 /** 9419 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 9420 * @phba: pointer to lpfc hba data structure. 9421 * 9422 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 9423 * done and check status. 9424 * 9425 * Return 0 if successful, otherwise -ENODEV. 9426 **/ 9427 int 9428 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 9429 { 9430 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 9431 struct lpfc_register reg_data; 9432 int i, port_error = 0; 9433 uint32_t if_type; 9434 9435 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 9436 memset(®_data, 0, sizeof(reg_data)); 9437 if (!phba->sli4_hba.PSMPHRregaddr) 9438 return -ENODEV; 9439 9440 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 9441 for (i = 0; i < 3000; i++) { 9442 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 9443 &portsmphr_reg.word0) || 9444 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { 9445 /* Port has a fatal POST error, break out */ 9446 port_error = -ENODEV; 9447 break; 9448 } 9449 if (LPFC_POST_STAGE_PORT_READY == 9450 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 9451 break; 9452 msleep(10); 9453 } 9454 9455 /* 9456 * If there was a port error during POST, then don't proceed with 9457 * other register reads as the data may not be valid. Just exit. 9458 */ 9459 if (port_error) { 9460 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9461 "1408 Port Failed POST - portsmphr=0x%x, " 9462 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 9463 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 9464 portsmphr_reg.word0, 9465 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 9466 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 9467 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 9468 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 9469 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 9470 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 9471 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 9472 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 9473 } else { 9474 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9475 "2534 Device Info: SLIFamily=0x%x, " 9476 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 9477 "SLIHint_2=0x%x, FT=0x%x\n", 9478 bf_get(lpfc_sli_intf_sli_family, 9479 &phba->sli4_hba.sli_intf), 9480 bf_get(lpfc_sli_intf_slirev, 9481 &phba->sli4_hba.sli_intf), 9482 bf_get(lpfc_sli_intf_if_type, 9483 &phba->sli4_hba.sli_intf), 9484 bf_get(lpfc_sli_intf_sli_hint1, 9485 &phba->sli4_hba.sli_intf), 9486 bf_get(lpfc_sli_intf_sli_hint2, 9487 &phba->sli4_hba.sli_intf), 9488 bf_get(lpfc_sli_intf_func_type, 9489 &phba->sli4_hba.sli_intf)); 9490 /* 9491 * Check for other Port errors during the initialization 9492 * process. Fail the load if the port did not come up 9493 * correctly. 9494 */ 9495 if_type = bf_get(lpfc_sli_intf_if_type, 9496 &phba->sli4_hba.sli_intf); 9497 switch (if_type) { 9498 case LPFC_SLI_INTF_IF_TYPE_0: 9499 phba->sli4_hba.ue_mask_lo = 9500 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 9501 phba->sli4_hba.ue_mask_hi = 9502 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 9503 uerrlo_reg.word0 = 9504 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 9505 uerrhi_reg.word0 = 9506 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 9507 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 9508 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 9509 lpfc_printf_log(phba, KERN_ERR, 9510 LOG_TRACE_EVENT, 9511 "1422 Unrecoverable Error " 9512 "Detected during POST " 9513 "uerr_lo_reg=0x%x, " 9514 "uerr_hi_reg=0x%x, " 9515 "ue_mask_lo_reg=0x%x, " 9516 "ue_mask_hi_reg=0x%x\n", 9517 uerrlo_reg.word0, 9518 uerrhi_reg.word0, 9519 phba->sli4_hba.ue_mask_lo, 9520 phba->sli4_hba.ue_mask_hi); 9521 port_error = -ENODEV; 9522 } 9523 break; 9524 case LPFC_SLI_INTF_IF_TYPE_2: 9525 case LPFC_SLI_INTF_IF_TYPE_6: 9526 /* Final checks. The port status should be clean. */ 9527 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 9528 ®_data.word0) || 9529 lpfc_sli4_unrecoverable_port(®_data)) { 9530 phba->work_status[0] = 9531 readl(phba->sli4_hba.u.if_type2. 9532 ERR1regaddr); 9533 phba->work_status[1] = 9534 readl(phba->sli4_hba.u.if_type2. 9535 ERR2regaddr); 9536 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9537 "2888 Unrecoverable port error " 9538 "following POST: port status reg " 9539 "0x%x, port_smphr reg 0x%x, " 9540 "error 1=0x%x, error 2=0x%x\n", 9541 reg_data.word0, 9542 portsmphr_reg.word0, 9543 phba->work_status[0], 9544 phba->work_status[1]); 9545 port_error = -ENODEV; 9546 break; 9547 } 9548 9549 if (lpfc_pldv_detect && 9550 bf_get(lpfc_sli_intf_sli_family, 9551 &phba->sli4_hba.sli_intf) == 9552 LPFC_SLI_INTF_FAMILY_G6) 9553 pci_write_config_byte(phba->pcidev, 9554 LPFC_SLI_INTF, CFG_PLD); 9555 break; 9556 case LPFC_SLI_INTF_IF_TYPE_1: 9557 default: 9558 break; 9559 } 9560 } 9561 return port_error; 9562 } 9563 9564 /** 9565 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 9566 * @phba: pointer to lpfc hba data structure. 9567 * @if_type: The SLI4 interface type getting configured. 9568 * 9569 * This routine is invoked to set up SLI4 BAR0 PCI config space register 9570 * memory map. 9571 **/ 9572 static void 9573 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 9574 { 9575 switch (if_type) { 9576 case LPFC_SLI_INTF_IF_TYPE_0: 9577 phba->sli4_hba.u.if_type0.UERRLOregaddr = 9578 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 9579 phba->sli4_hba.u.if_type0.UERRHIregaddr = 9580 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 9581 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 9582 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 9583 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 9584 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 9585 phba->sli4_hba.SLIINTFregaddr = 9586 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 9587 break; 9588 case LPFC_SLI_INTF_IF_TYPE_2: 9589 phba->sli4_hba.u.if_type2.EQDregaddr = 9590 phba->sli4_hba.conf_regs_memmap_p + 9591 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 9592 phba->sli4_hba.u.if_type2.ERR1regaddr = 9593 phba->sli4_hba.conf_regs_memmap_p + 9594 LPFC_CTL_PORT_ER1_OFFSET; 9595 phba->sli4_hba.u.if_type2.ERR2regaddr = 9596 phba->sli4_hba.conf_regs_memmap_p + 9597 LPFC_CTL_PORT_ER2_OFFSET; 9598 phba->sli4_hba.u.if_type2.CTRLregaddr = 9599 phba->sli4_hba.conf_regs_memmap_p + 9600 LPFC_CTL_PORT_CTL_OFFSET; 9601 phba->sli4_hba.u.if_type2.STATUSregaddr = 9602 phba->sli4_hba.conf_regs_memmap_p + 9603 LPFC_CTL_PORT_STA_OFFSET; 9604 phba->sli4_hba.SLIINTFregaddr = 9605 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 9606 phba->sli4_hba.PSMPHRregaddr = 9607 phba->sli4_hba.conf_regs_memmap_p + 9608 LPFC_CTL_PORT_SEM_OFFSET; 9609 phba->sli4_hba.RQDBregaddr = 9610 phba->sli4_hba.conf_regs_memmap_p + 9611 LPFC_ULP0_RQ_DOORBELL; 9612 phba->sli4_hba.WQDBregaddr = 9613 phba->sli4_hba.conf_regs_memmap_p + 9614 LPFC_ULP0_WQ_DOORBELL; 9615 phba->sli4_hba.CQDBregaddr = 9616 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 9617 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 9618 phba->sli4_hba.MQDBregaddr = 9619 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 9620 phba->sli4_hba.BMBXregaddr = 9621 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 9622 break; 9623 case LPFC_SLI_INTF_IF_TYPE_6: 9624 phba->sli4_hba.u.if_type2.EQDregaddr = 9625 phba->sli4_hba.conf_regs_memmap_p + 9626 LPFC_CTL_PORT_EQ_DELAY_OFFSET; 9627 phba->sli4_hba.u.if_type2.ERR1regaddr = 9628 phba->sli4_hba.conf_regs_memmap_p + 9629 LPFC_CTL_PORT_ER1_OFFSET; 9630 phba->sli4_hba.u.if_type2.ERR2regaddr = 9631 phba->sli4_hba.conf_regs_memmap_p + 9632 LPFC_CTL_PORT_ER2_OFFSET; 9633 phba->sli4_hba.u.if_type2.CTRLregaddr = 9634 phba->sli4_hba.conf_regs_memmap_p + 9635 LPFC_CTL_PORT_CTL_OFFSET; 9636 phba->sli4_hba.u.if_type2.STATUSregaddr = 9637 phba->sli4_hba.conf_regs_memmap_p + 9638 LPFC_CTL_PORT_STA_OFFSET; 9639 phba->sli4_hba.PSMPHRregaddr = 9640 phba->sli4_hba.conf_regs_memmap_p + 9641 LPFC_CTL_PORT_SEM_OFFSET; 9642 phba->sli4_hba.BMBXregaddr = 9643 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 9644 break; 9645 case LPFC_SLI_INTF_IF_TYPE_1: 9646 default: 9647 dev_printk(KERN_ERR, &phba->pcidev->dev, 9648 "FATAL - unsupported SLI4 interface type - %d\n", 9649 if_type); 9650 break; 9651 } 9652 } 9653 9654 /** 9655 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 9656 * @phba: pointer to lpfc hba data structure. 9657 * @if_type: sli if type to operate on. 9658 * 9659 * This routine is invoked to set up SLI4 BAR1 register memory map. 9660 **/ 9661 static void 9662 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 9663 { 9664 switch (if_type) { 9665 case LPFC_SLI_INTF_IF_TYPE_0: 9666 phba->sli4_hba.PSMPHRregaddr = 9667 phba->sli4_hba.ctrl_regs_memmap_p + 9668 LPFC_SLIPORT_IF0_SMPHR; 9669 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 9670 LPFC_HST_ISR0; 9671 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 9672 LPFC_HST_IMR0; 9673 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 9674 LPFC_HST_ISCR0; 9675 break; 9676 case LPFC_SLI_INTF_IF_TYPE_6: 9677 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9678 LPFC_IF6_RQ_DOORBELL; 9679 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9680 LPFC_IF6_WQ_DOORBELL; 9681 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9682 LPFC_IF6_CQ_DOORBELL; 9683 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9684 LPFC_IF6_EQ_DOORBELL; 9685 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p + 9686 LPFC_IF6_MQ_DOORBELL; 9687 break; 9688 case LPFC_SLI_INTF_IF_TYPE_2: 9689 case LPFC_SLI_INTF_IF_TYPE_1: 9690 default: 9691 dev_err(&phba->pcidev->dev, 9692 "FATAL - unsupported SLI4 interface type - %d\n", 9693 if_type); 9694 break; 9695 } 9696 } 9697 9698 /** 9699 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 9700 * @phba: pointer to lpfc hba data structure. 9701 * @vf: virtual function number 9702 * 9703 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 9704 * based on the given viftual function number, @vf. 9705 * 9706 * Return 0 if successful, otherwise -ENODEV. 9707 **/ 9708 static int 9709 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 9710 { 9711 if (vf > LPFC_VIR_FUNC_MAX) 9712 return -ENODEV; 9713 9714 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9715 vf * LPFC_VFR_PAGE_SIZE + 9716 LPFC_ULP0_RQ_DOORBELL); 9717 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9718 vf * LPFC_VFR_PAGE_SIZE + 9719 LPFC_ULP0_WQ_DOORBELL); 9720 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9721 vf * LPFC_VFR_PAGE_SIZE + 9722 LPFC_EQCQ_DOORBELL); 9723 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr; 9724 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9725 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 9726 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 9727 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 9728 return 0; 9729 } 9730 9731 /** 9732 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 9733 * @phba: pointer to lpfc hba data structure. 9734 * 9735 * This routine is invoked to create the bootstrap mailbox 9736 * region consistent with the SLI-4 interface spec. This 9737 * routine allocates all memory necessary to communicate 9738 * mailbox commands to the port and sets up all alignment 9739 * needs. No locks are expected to be held when calling 9740 * this routine. 9741 * 9742 * Return codes 9743 * 0 - successful 9744 * -ENOMEM - could not allocated memory. 9745 **/ 9746 static int 9747 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 9748 { 9749 uint32_t bmbx_size; 9750 struct lpfc_dmabuf *dmabuf; 9751 struct dma_address *dma_address; 9752 uint32_t pa_addr; 9753 uint64_t phys_addr; 9754 9755 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 9756 if (!dmabuf) 9757 return -ENOMEM; 9758 9759 /* 9760 * The bootstrap mailbox region is comprised of 2 parts 9761 * plus an alignment restriction of 16 bytes. 9762 */ 9763 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 9764 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size, 9765 &dmabuf->phys, GFP_KERNEL); 9766 if (!dmabuf->virt) { 9767 kfree(dmabuf); 9768 return -ENOMEM; 9769 } 9770 9771 /* 9772 * Initialize the bootstrap mailbox pointers now so that the register 9773 * operations are simple later. The mailbox dma address is required 9774 * to be 16-byte aligned. Also align the virtual memory as each 9775 * maibox is copied into the bmbx mailbox region before issuing the 9776 * command to the port. 9777 */ 9778 phba->sli4_hba.bmbx.dmabuf = dmabuf; 9779 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 9780 9781 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 9782 LPFC_ALIGN_16_BYTE); 9783 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 9784 LPFC_ALIGN_16_BYTE); 9785 9786 /* 9787 * Set the high and low physical addresses now. The SLI4 alignment 9788 * requirement is 16 bytes and the mailbox is posted to the port 9789 * as two 30-bit addresses. The other data is a bit marking whether 9790 * the 30-bit address is the high or low address. 9791 * Upcast bmbx aphys to 64bits so shift instruction compiles 9792 * clean on 32 bit machines. 9793 */ 9794 dma_address = &phba->sli4_hba.bmbx.dma_address; 9795 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 9796 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 9797 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 9798 LPFC_BMBX_BIT1_ADDR_HI); 9799 9800 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 9801 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 9802 LPFC_BMBX_BIT1_ADDR_LO); 9803 return 0; 9804 } 9805 9806 /** 9807 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 9808 * @phba: pointer to lpfc hba data structure. 9809 * 9810 * This routine is invoked to teardown the bootstrap mailbox 9811 * region and release all host resources. This routine requires 9812 * the caller to ensure all mailbox commands recovered, no 9813 * additional mailbox comands are sent, and interrupts are disabled 9814 * before calling this routine. 9815 * 9816 **/ 9817 static void 9818 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 9819 { 9820 dma_free_coherent(&phba->pcidev->dev, 9821 phba->sli4_hba.bmbx.bmbx_size, 9822 phba->sli4_hba.bmbx.dmabuf->virt, 9823 phba->sli4_hba.bmbx.dmabuf->phys); 9824 9825 kfree(phba->sli4_hba.bmbx.dmabuf); 9826 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 9827 } 9828 9829 static const char * const lpfc_topo_to_str[] = { 9830 "Loop then P2P", 9831 "Loopback", 9832 "P2P Only", 9833 "Unsupported", 9834 "Loop Only", 9835 "Unsupported", 9836 "P2P then Loop", 9837 }; 9838 9839 #define LINK_FLAGS_DEF 0x0 9840 #define LINK_FLAGS_P2P 0x1 9841 #define LINK_FLAGS_LOOP 0x2 9842 /** 9843 * lpfc_map_topology - Map the topology read from READ_CONFIG 9844 * @phba: pointer to lpfc hba data structure. 9845 * @rd_config: pointer to read config data 9846 * 9847 * This routine is invoked to map the topology values as read 9848 * from the read config mailbox command. If the persistent 9849 * topology feature is supported, the firmware will provide the 9850 * saved topology information to be used in INIT_LINK 9851 **/ 9852 static void 9853 lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config) 9854 { 9855 u8 ptv, tf, pt; 9856 9857 ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config); 9858 tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config); 9859 pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config); 9860 9861 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9862 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x", 9863 ptv, tf, pt); 9864 if (!ptv) { 9865 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9866 "2019 FW does not support persistent topology " 9867 "Using driver parameter defined value [%s]", 9868 lpfc_topo_to_str[phba->cfg_topology]); 9869 return; 9870 } 9871 /* FW supports persistent topology - override module parameter value */ 9872 phba->hba_flag |= HBA_PERSISTENT_TOPO; 9873 9874 /* if ASIC_GEN_NUM >= 0xC) */ 9875 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 9876 LPFC_SLI_INTF_IF_TYPE_6) || 9877 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 9878 LPFC_SLI_INTF_FAMILY_G6)) { 9879 if (!tf) { 9880 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP) 9881 ? FLAGS_TOPOLOGY_MODE_LOOP 9882 : FLAGS_TOPOLOGY_MODE_PT_PT); 9883 } else { 9884 phba->hba_flag &= ~HBA_PERSISTENT_TOPO; 9885 } 9886 } else { /* G5 */ 9887 if (tf) { 9888 /* If topology failover set - pt is '0' or '1' */ 9889 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP : 9890 FLAGS_TOPOLOGY_MODE_LOOP_PT); 9891 } else { 9892 phba->cfg_topology = ((pt == LINK_FLAGS_P2P) 9893 ? FLAGS_TOPOLOGY_MODE_PT_PT 9894 : FLAGS_TOPOLOGY_MODE_LOOP); 9895 } 9896 } 9897 if (phba->hba_flag & HBA_PERSISTENT_TOPO) { 9898 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9899 "2020 Using persistent topology value [%s]", 9900 lpfc_topo_to_str[phba->cfg_topology]); 9901 } else { 9902 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9903 "2021 Invalid topology values from FW " 9904 "Using driver parameter defined value [%s]", 9905 lpfc_topo_to_str[phba->cfg_topology]); 9906 } 9907 } 9908 9909 /** 9910 * lpfc_sli4_read_config - Get the config parameters. 9911 * @phba: pointer to lpfc hba data structure. 9912 * 9913 * This routine is invoked to read the configuration parameters from the HBA. 9914 * The configuration parameters are used to set the base and maximum values 9915 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 9916 * allocation for the port. 9917 * 9918 * Return codes 9919 * 0 - successful 9920 * -ENOMEM - No available memory 9921 * -EIO - The mailbox failed to complete successfully. 9922 **/ 9923 int 9924 lpfc_sli4_read_config(struct lpfc_hba *phba) 9925 { 9926 LPFC_MBOXQ_t *pmb; 9927 struct lpfc_mbx_read_config *rd_config; 9928 union lpfc_sli4_cfg_shdr *shdr; 9929 uint32_t shdr_status, shdr_add_status; 9930 struct lpfc_mbx_get_func_cfg *get_func_cfg; 9931 struct lpfc_rsrc_desc_fcfcoe *desc; 9932 char *pdesc_0; 9933 uint16_t forced_link_speed; 9934 uint32_t if_type, qmin, fawwpn; 9935 int length, i, rc = 0, rc2; 9936 9937 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9938 if (!pmb) { 9939 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9940 "2011 Unable to allocate memory for issuing " 9941 "SLI_CONFIG_SPECIAL mailbox command\n"); 9942 return -ENOMEM; 9943 } 9944 9945 lpfc_read_config(phba, pmb); 9946 9947 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 9948 if (rc != MBX_SUCCESS) { 9949 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 9950 "2012 Mailbox failed , mbxCmd x%x " 9951 "READ_CONFIG, mbxStatus x%x\n", 9952 bf_get(lpfc_mqe_command, &pmb->u.mqe), 9953 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 9954 rc = -EIO; 9955 } else { 9956 rd_config = &pmb->u.mqe.un.rd_config; 9957 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { 9958 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 9959 phba->sli4_hba.lnk_info.lnk_tp = 9960 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); 9961 phba->sli4_hba.lnk_info.lnk_no = 9962 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); 9963 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9964 "3081 lnk_type:%d, lnk_numb:%d\n", 9965 phba->sli4_hba.lnk_info.lnk_tp, 9966 phba->sli4_hba.lnk_info.lnk_no); 9967 } else 9968 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9969 "3082 Mailbox (x%x) returned ldv:x0\n", 9970 bf_get(lpfc_mqe_command, &pmb->u.mqe)); 9971 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) { 9972 phba->bbcredit_support = 1; 9973 phba->sli4_hba.bbscn_params.word0 = rd_config->word8; 9974 } 9975 9976 fawwpn = bf_get(lpfc_mbx_rd_conf_fawwpn, rd_config); 9977 9978 if (fawwpn) { 9979 lpfc_printf_log(phba, KERN_INFO, 9980 LOG_INIT | LOG_DISCOVERY, 9981 "2702 READ_CONFIG: FA-PWWN is " 9982 "configured on\n"); 9983 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_CONFIG; 9984 } else { 9985 /* Clear FW configured flag, preserve driver flag */ 9986 phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_CONFIG; 9987 } 9988 9989 phba->sli4_hba.conf_trunk = 9990 bf_get(lpfc_mbx_rd_conf_trunk, rd_config); 9991 phba->sli4_hba.extents_in_use = 9992 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 9993 9994 phba->sli4_hba.max_cfg_param.max_xri = 9995 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 9996 /* Reduce resource usage in kdump environment */ 9997 if (is_kdump_kernel() && 9998 phba->sli4_hba.max_cfg_param.max_xri > 512) 9999 phba->sli4_hba.max_cfg_param.max_xri = 512; 10000 phba->sli4_hba.max_cfg_param.xri_base = 10001 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 10002 phba->sli4_hba.max_cfg_param.max_vpi = 10003 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 10004 /* Limit the max we support */ 10005 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS) 10006 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS; 10007 phba->sli4_hba.max_cfg_param.vpi_base = 10008 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 10009 phba->sli4_hba.max_cfg_param.max_rpi = 10010 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 10011 phba->sli4_hba.max_cfg_param.rpi_base = 10012 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 10013 phba->sli4_hba.max_cfg_param.max_vfi = 10014 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 10015 phba->sli4_hba.max_cfg_param.vfi_base = 10016 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 10017 phba->sli4_hba.max_cfg_param.max_fcfi = 10018 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 10019 phba->sli4_hba.max_cfg_param.max_eq = 10020 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 10021 phba->sli4_hba.max_cfg_param.max_rq = 10022 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 10023 phba->sli4_hba.max_cfg_param.max_wq = 10024 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 10025 phba->sli4_hba.max_cfg_param.max_cq = 10026 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 10027 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 10028 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 10029 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 10030 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 10031 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 10032 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 10033 phba->max_vports = phba->max_vpi; 10034 10035 /* Next decide on FPIN or Signal E2E CGN support 10036 * For congestion alarms and warnings valid combination are: 10037 * 1. FPIN alarms / FPIN warnings 10038 * 2. Signal alarms / Signal warnings 10039 * 3. FPIN alarms / Signal warnings 10040 * 4. Signal alarms / FPIN warnings 10041 * 10042 * Initialize the adapter frequency to 100 mSecs 10043 */ 10044 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH; 10045 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 10046 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 10047 10048 if (lpfc_use_cgn_signal) { 10049 if (bf_get(lpfc_mbx_rd_conf_wcs, rd_config)) { 10050 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 10051 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 10052 } 10053 if (bf_get(lpfc_mbx_rd_conf_acs, rd_config)) { 10054 /* MUST support both alarm and warning 10055 * because EDC does not support alarm alone. 10056 */ 10057 if (phba->cgn_reg_signal != 10058 EDC_CG_SIG_WARN_ONLY) { 10059 /* Must support both or none */ 10060 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH; 10061 phba->cgn_reg_signal = 10062 EDC_CG_SIG_NOTSUPPORTED; 10063 } else { 10064 phba->cgn_reg_signal = 10065 EDC_CG_SIG_WARN_ALARM; 10066 phba->cgn_reg_fpin = 10067 LPFC_CGN_FPIN_NONE; 10068 } 10069 } 10070 } 10071 10072 /* Set the congestion initial signal and fpin values. */ 10073 phba->cgn_init_reg_fpin = phba->cgn_reg_fpin; 10074 phba->cgn_init_reg_signal = phba->cgn_reg_signal; 10075 10076 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 10077 "6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n", 10078 phba->cgn_reg_signal, phba->cgn_reg_fpin); 10079 10080 lpfc_map_topology(phba, rd_config); 10081 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10082 "2003 cfg params Extents? %d " 10083 "XRI(B:%d M:%d), " 10084 "VPI(B:%d M:%d) " 10085 "VFI(B:%d M:%d) " 10086 "RPI(B:%d M:%d) " 10087 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n", 10088 phba->sli4_hba.extents_in_use, 10089 phba->sli4_hba.max_cfg_param.xri_base, 10090 phba->sli4_hba.max_cfg_param.max_xri, 10091 phba->sli4_hba.max_cfg_param.vpi_base, 10092 phba->sli4_hba.max_cfg_param.max_vpi, 10093 phba->sli4_hba.max_cfg_param.vfi_base, 10094 phba->sli4_hba.max_cfg_param.max_vfi, 10095 phba->sli4_hba.max_cfg_param.rpi_base, 10096 phba->sli4_hba.max_cfg_param.max_rpi, 10097 phba->sli4_hba.max_cfg_param.max_fcfi, 10098 phba->sli4_hba.max_cfg_param.max_eq, 10099 phba->sli4_hba.max_cfg_param.max_cq, 10100 phba->sli4_hba.max_cfg_param.max_wq, 10101 phba->sli4_hba.max_cfg_param.max_rq, 10102 phba->lmt); 10103 10104 /* 10105 * Calculate queue resources based on how 10106 * many WQ/CQ/EQs are available. 10107 */ 10108 qmin = phba->sli4_hba.max_cfg_param.max_wq; 10109 if (phba->sli4_hba.max_cfg_param.max_cq < qmin) 10110 qmin = phba->sli4_hba.max_cfg_param.max_cq; 10111 /* 10112 * Reserve 4 (ELS, NVME LS, MBOX, plus one extra) and 10113 * the remainder can be used for NVME / FCP. 10114 */ 10115 qmin -= 4; 10116 if (phba->sli4_hba.max_cfg_param.max_eq < qmin) 10117 qmin = phba->sli4_hba.max_cfg_param.max_eq; 10118 10119 /* Check to see if there is enough for default cfg */ 10120 if ((phba->cfg_irq_chann > qmin) || 10121 (phba->cfg_hdw_queue > qmin)) { 10122 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10123 "2005 Reducing Queues - " 10124 "FW resource limitation: " 10125 "WQ %d CQ %d EQ %d: min %d: " 10126 "IRQ %d HDWQ %d\n", 10127 phba->sli4_hba.max_cfg_param.max_wq, 10128 phba->sli4_hba.max_cfg_param.max_cq, 10129 phba->sli4_hba.max_cfg_param.max_eq, 10130 qmin, phba->cfg_irq_chann, 10131 phba->cfg_hdw_queue); 10132 10133 if (phba->cfg_irq_chann > qmin) 10134 phba->cfg_irq_chann = qmin; 10135 if (phba->cfg_hdw_queue > qmin) 10136 phba->cfg_hdw_queue = qmin; 10137 } 10138 } 10139 10140 if (rc) 10141 goto read_cfg_out; 10142 10143 /* Update link speed if forced link speed is supported */ 10144 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10145 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 10146 forced_link_speed = 10147 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config); 10148 if (forced_link_speed) { 10149 phba->hba_flag |= HBA_FORCED_LINK_SPEED; 10150 10151 switch (forced_link_speed) { 10152 case LINK_SPEED_1G: 10153 phba->cfg_link_speed = 10154 LPFC_USER_LINK_SPEED_1G; 10155 break; 10156 case LINK_SPEED_2G: 10157 phba->cfg_link_speed = 10158 LPFC_USER_LINK_SPEED_2G; 10159 break; 10160 case LINK_SPEED_4G: 10161 phba->cfg_link_speed = 10162 LPFC_USER_LINK_SPEED_4G; 10163 break; 10164 case LINK_SPEED_8G: 10165 phba->cfg_link_speed = 10166 LPFC_USER_LINK_SPEED_8G; 10167 break; 10168 case LINK_SPEED_10G: 10169 phba->cfg_link_speed = 10170 LPFC_USER_LINK_SPEED_10G; 10171 break; 10172 case LINK_SPEED_16G: 10173 phba->cfg_link_speed = 10174 LPFC_USER_LINK_SPEED_16G; 10175 break; 10176 case LINK_SPEED_32G: 10177 phba->cfg_link_speed = 10178 LPFC_USER_LINK_SPEED_32G; 10179 break; 10180 case LINK_SPEED_64G: 10181 phba->cfg_link_speed = 10182 LPFC_USER_LINK_SPEED_64G; 10183 break; 10184 case 0xffff: 10185 phba->cfg_link_speed = 10186 LPFC_USER_LINK_SPEED_AUTO; 10187 break; 10188 default: 10189 lpfc_printf_log(phba, KERN_ERR, 10190 LOG_TRACE_EVENT, 10191 "0047 Unrecognized link " 10192 "speed : %d\n", 10193 forced_link_speed); 10194 phba->cfg_link_speed = 10195 LPFC_USER_LINK_SPEED_AUTO; 10196 } 10197 } 10198 } 10199 10200 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 10201 length = phba->sli4_hba.max_cfg_param.max_xri - 10202 lpfc_sli4_get_els_iocb_cnt(phba); 10203 if (phba->cfg_hba_queue_depth > length) { 10204 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10205 "3361 HBA queue depth changed from %d to %d\n", 10206 phba->cfg_hba_queue_depth, length); 10207 phba->cfg_hba_queue_depth = length; 10208 } 10209 10210 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 10211 LPFC_SLI_INTF_IF_TYPE_2) 10212 goto read_cfg_out; 10213 10214 /* get the pf# and vf# for SLI4 if_type 2 port */ 10215 length = (sizeof(struct lpfc_mbx_get_func_cfg) - 10216 sizeof(struct lpfc_sli4_cfg_mhdr)); 10217 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, 10218 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 10219 length, LPFC_SLI4_MBX_EMBED); 10220 10221 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 10222 shdr = (union lpfc_sli4_cfg_shdr *) 10223 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 10224 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10225 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10226 if (rc2 || shdr_status || shdr_add_status) { 10227 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10228 "3026 Mailbox failed , mbxCmd x%x " 10229 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 10230 bf_get(lpfc_mqe_command, &pmb->u.mqe), 10231 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 10232 goto read_cfg_out; 10233 } 10234 10235 /* search for fc_fcoe resrouce descriptor */ 10236 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 10237 10238 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; 10239 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; 10240 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc); 10241 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD) 10242 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH; 10243 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH) 10244 goto read_cfg_out; 10245 10246 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 10247 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i); 10248 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 10249 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) { 10250 phba->sli4_hba.iov.pf_number = 10251 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 10252 phba->sli4_hba.iov.vf_number = 10253 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); 10254 break; 10255 } 10256 } 10257 10258 if (i < LPFC_RSRC_DESC_MAX_NUM) 10259 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10260 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 10261 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 10262 phba->sli4_hba.iov.vf_number); 10263 else 10264 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10265 "3028 GET_FUNCTION_CONFIG: failed to find " 10266 "Resource Descriptor:x%x\n", 10267 LPFC_RSRC_DESC_TYPE_FCFCOE); 10268 10269 read_cfg_out: 10270 mempool_free(pmb, phba->mbox_mem_pool); 10271 return rc; 10272 } 10273 10274 /** 10275 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 10276 * @phba: pointer to lpfc hba data structure. 10277 * 10278 * This routine is invoked to setup the port-side endian order when 10279 * the port if_type is 0. This routine has no function for other 10280 * if_types. 10281 * 10282 * Return codes 10283 * 0 - successful 10284 * -ENOMEM - No available memory 10285 * -EIO - The mailbox failed to complete successfully. 10286 **/ 10287 static int 10288 lpfc_setup_endian_order(struct lpfc_hba *phba) 10289 { 10290 LPFC_MBOXQ_t *mboxq; 10291 uint32_t if_type, rc = 0; 10292 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 10293 HOST_ENDIAN_HIGH_WORD1}; 10294 10295 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10296 switch (if_type) { 10297 case LPFC_SLI_INTF_IF_TYPE_0: 10298 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 10299 GFP_KERNEL); 10300 if (!mboxq) { 10301 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10302 "0492 Unable to allocate memory for " 10303 "issuing SLI_CONFIG_SPECIAL mailbox " 10304 "command\n"); 10305 return -ENOMEM; 10306 } 10307 10308 /* 10309 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 10310 * two words to contain special data values and no other data. 10311 */ 10312 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 10313 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 10314 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 10315 if (rc != MBX_SUCCESS) { 10316 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10317 "0493 SLI_CONFIG_SPECIAL mailbox " 10318 "failed with status x%x\n", 10319 rc); 10320 rc = -EIO; 10321 } 10322 mempool_free(mboxq, phba->mbox_mem_pool); 10323 break; 10324 case LPFC_SLI_INTF_IF_TYPE_6: 10325 case LPFC_SLI_INTF_IF_TYPE_2: 10326 case LPFC_SLI_INTF_IF_TYPE_1: 10327 default: 10328 break; 10329 } 10330 return rc; 10331 } 10332 10333 /** 10334 * lpfc_sli4_queue_verify - Verify and update EQ counts 10335 * @phba: pointer to lpfc hba data structure. 10336 * 10337 * This routine is invoked to check the user settable queue counts for EQs. 10338 * After this routine is called the counts will be set to valid values that 10339 * adhere to the constraints of the system's interrupt vectors and the port's 10340 * queue resources. 10341 * 10342 * Return codes 10343 * 0 - successful 10344 * -ENOMEM - No available memory 10345 **/ 10346 static int 10347 lpfc_sli4_queue_verify(struct lpfc_hba *phba) 10348 { 10349 /* 10350 * Sanity check for configured queue parameters against the run-time 10351 * device parameters 10352 */ 10353 10354 if (phba->nvmet_support) { 10355 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq) 10356 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; 10357 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) 10358 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; 10359 } 10360 10361 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10362 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n", 10363 phba->cfg_hdw_queue, phba->cfg_irq_chann, 10364 phba->cfg_nvmet_mrq); 10365 10366 /* Get EQ depth from module parameter, fake the default for now */ 10367 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 10368 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 10369 10370 /* Get CQ depth from module parameter, fake the default for now */ 10371 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 10372 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 10373 return 0; 10374 } 10375 10376 static int 10377 lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx) 10378 { 10379 struct lpfc_queue *qdesc; 10380 u32 wqesize; 10381 int cpu; 10382 10383 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ); 10384 /* Create Fast Path IO CQs */ 10385 if (phba->enab_exp_wqcq_pages) 10386 /* Increase the CQ size when WQEs contain an embedded cdb */ 10387 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 10388 phba->sli4_hba.cq_esize, 10389 LPFC_CQE_EXP_COUNT, cpu); 10390 10391 else 10392 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10393 phba->sli4_hba.cq_esize, 10394 phba->sli4_hba.cq_ecount, cpu); 10395 if (!qdesc) { 10396 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10397 "0499 Failed allocate fast-path IO CQ (%d)\n", 10398 idx); 10399 return 1; 10400 } 10401 qdesc->qe_valid = 1; 10402 qdesc->hdwq = idx; 10403 qdesc->chann = cpu; 10404 phba->sli4_hba.hdwq[idx].io_cq = qdesc; 10405 10406 /* Create Fast Path IO WQs */ 10407 if (phba->enab_exp_wqcq_pages) { 10408 /* Increase the WQ size when WQEs contain an embedded cdb */ 10409 wqesize = (phba->fcp_embed_io) ? 10410 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; 10411 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 10412 wqesize, 10413 LPFC_WQE_EXP_COUNT, cpu); 10414 } else 10415 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10416 phba->sli4_hba.wq_esize, 10417 phba->sli4_hba.wq_ecount, cpu); 10418 10419 if (!qdesc) { 10420 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10421 "0503 Failed allocate fast-path IO WQ (%d)\n", 10422 idx); 10423 return 1; 10424 } 10425 qdesc->hdwq = idx; 10426 qdesc->chann = cpu; 10427 phba->sli4_hba.hdwq[idx].io_wq = qdesc; 10428 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 10429 return 0; 10430 } 10431 10432 /** 10433 * lpfc_sli4_queue_create - Create all the SLI4 queues 10434 * @phba: pointer to lpfc hba data structure. 10435 * 10436 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 10437 * operation. For each SLI4 queue type, the parameters such as queue entry 10438 * count (queue depth) shall be taken from the module parameter. For now, 10439 * we just use some constant number as place holder. 10440 * 10441 * Return codes 10442 * 0 - successful 10443 * -ENOMEM - No availble memory 10444 * -EIO - The mailbox failed to complete successfully. 10445 **/ 10446 int 10447 lpfc_sli4_queue_create(struct lpfc_hba *phba) 10448 { 10449 struct lpfc_queue *qdesc; 10450 int idx, cpu, eqcpu; 10451 struct lpfc_sli4_hdw_queue *qp; 10452 struct lpfc_vector_map_info *cpup; 10453 struct lpfc_vector_map_info *eqcpup; 10454 struct lpfc_eq_intr_info *eqi; 10455 10456 /* 10457 * Create HBA Record arrays. 10458 * Both NVME and FCP will share that same vectors / EQs 10459 */ 10460 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 10461 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 10462 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 10463 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 10464 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 10465 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 10466 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 10467 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 10468 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 10469 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 10470 10471 if (!phba->sli4_hba.hdwq) { 10472 phba->sli4_hba.hdwq = kcalloc( 10473 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue), 10474 GFP_KERNEL); 10475 if (!phba->sli4_hba.hdwq) { 10476 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10477 "6427 Failed allocate memory for " 10478 "fast-path Hardware Queue array\n"); 10479 goto out_error; 10480 } 10481 /* Prepare hardware queues to take IO buffers */ 10482 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10483 qp = &phba->sli4_hba.hdwq[idx]; 10484 spin_lock_init(&qp->io_buf_list_get_lock); 10485 spin_lock_init(&qp->io_buf_list_put_lock); 10486 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); 10487 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 10488 qp->get_io_bufs = 0; 10489 qp->put_io_bufs = 0; 10490 qp->total_io_bufs = 0; 10491 spin_lock_init(&qp->abts_io_buf_list_lock); 10492 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list); 10493 qp->abts_scsi_io_bufs = 0; 10494 qp->abts_nvme_io_bufs = 0; 10495 INIT_LIST_HEAD(&qp->sgl_list); 10496 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list); 10497 spin_lock_init(&qp->hdwq_lock); 10498 } 10499 } 10500 10501 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10502 if (phba->nvmet_support) { 10503 phba->sli4_hba.nvmet_cqset = kcalloc( 10504 phba->cfg_nvmet_mrq, 10505 sizeof(struct lpfc_queue *), 10506 GFP_KERNEL); 10507 if (!phba->sli4_hba.nvmet_cqset) { 10508 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10509 "3121 Fail allocate memory for " 10510 "fast-path CQ set array\n"); 10511 goto out_error; 10512 } 10513 phba->sli4_hba.nvmet_mrq_hdr = kcalloc( 10514 phba->cfg_nvmet_mrq, 10515 sizeof(struct lpfc_queue *), 10516 GFP_KERNEL); 10517 if (!phba->sli4_hba.nvmet_mrq_hdr) { 10518 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10519 "3122 Fail allocate memory for " 10520 "fast-path RQ set hdr array\n"); 10521 goto out_error; 10522 } 10523 phba->sli4_hba.nvmet_mrq_data = kcalloc( 10524 phba->cfg_nvmet_mrq, 10525 sizeof(struct lpfc_queue *), 10526 GFP_KERNEL); 10527 if (!phba->sli4_hba.nvmet_mrq_data) { 10528 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10529 "3124 Fail allocate memory for " 10530 "fast-path RQ set data array\n"); 10531 goto out_error; 10532 } 10533 } 10534 } 10535 10536 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 10537 10538 /* Create HBA Event Queues (EQs) */ 10539 for_each_present_cpu(cpu) { 10540 /* We only want to create 1 EQ per vector, even though 10541 * multiple CPUs might be using that vector. so only 10542 * selects the CPUs that are LPFC_CPU_FIRST_IRQ. 10543 */ 10544 cpup = &phba->sli4_hba.cpu_map[cpu]; 10545 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 10546 continue; 10547 10548 /* Get a ptr to the Hardware Queue associated with this CPU */ 10549 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; 10550 10551 /* Allocate an EQ */ 10552 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10553 phba->sli4_hba.eq_esize, 10554 phba->sli4_hba.eq_ecount, cpu); 10555 if (!qdesc) { 10556 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10557 "0497 Failed allocate EQ (%d)\n", 10558 cpup->hdwq); 10559 goto out_error; 10560 } 10561 qdesc->qe_valid = 1; 10562 qdesc->hdwq = cpup->hdwq; 10563 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */ 10564 qdesc->last_cpu = qdesc->chann; 10565 10566 /* Save the allocated EQ in the Hardware Queue */ 10567 qp->hba_eq = qdesc; 10568 10569 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu); 10570 list_add(&qdesc->cpu_list, &eqi->list); 10571 } 10572 10573 /* Now we need to populate the other Hardware Queues, that share 10574 * an IRQ vector, with the associated EQ ptr. 10575 */ 10576 for_each_present_cpu(cpu) { 10577 cpup = &phba->sli4_hba.cpu_map[cpu]; 10578 10579 /* Check for EQ already allocated in previous loop */ 10580 if (cpup->flag & LPFC_CPU_FIRST_IRQ) 10581 continue; 10582 10583 /* Check for multiple CPUs per hdwq */ 10584 qp = &phba->sli4_hba.hdwq[cpup->hdwq]; 10585 if (qp->hba_eq) 10586 continue; 10587 10588 /* We need to share an EQ for this hdwq */ 10589 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ); 10590 eqcpup = &phba->sli4_hba.cpu_map[eqcpu]; 10591 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq; 10592 } 10593 10594 /* Allocate IO Path SLI4 CQ/WQs */ 10595 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10596 if (lpfc_alloc_io_wq_cq(phba, idx)) 10597 goto out_error; 10598 } 10599 10600 if (phba->nvmet_support) { 10601 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 10602 cpu = lpfc_find_cpu_handle(phba, idx, 10603 LPFC_FIND_BY_HDWQ); 10604 qdesc = lpfc_sli4_queue_alloc(phba, 10605 LPFC_DEFAULT_PAGE_SIZE, 10606 phba->sli4_hba.cq_esize, 10607 phba->sli4_hba.cq_ecount, 10608 cpu); 10609 if (!qdesc) { 10610 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10611 "3142 Failed allocate NVME " 10612 "CQ Set (%d)\n", idx); 10613 goto out_error; 10614 } 10615 qdesc->qe_valid = 1; 10616 qdesc->hdwq = idx; 10617 qdesc->chann = cpu; 10618 phba->sli4_hba.nvmet_cqset[idx] = qdesc; 10619 } 10620 } 10621 10622 /* 10623 * Create Slow Path Completion Queues (CQs) 10624 */ 10625 10626 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ); 10627 /* Create slow-path Mailbox Command Complete Queue */ 10628 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10629 phba->sli4_hba.cq_esize, 10630 phba->sli4_hba.cq_ecount, cpu); 10631 if (!qdesc) { 10632 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10633 "0500 Failed allocate slow-path mailbox CQ\n"); 10634 goto out_error; 10635 } 10636 qdesc->qe_valid = 1; 10637 phba->sli4_hba.mbx_cq = qdesc; 10638 10639 /* Create slow-path ELS Complete Queue */ 10640 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10641 phba->sli4_hba.cq_esize, 10642 phba->sli4_hba.cq_ecount, cpu); 10643 if (!qdesc) { 10644 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10645 "0501 Failed allocate slow-path ELS CQ\n"); 10646 goto out_error; 10647 } 10648 qdesc->qe_valid = 1; 10649 qdesc->chann = cpu; 10650 phba->sli4_hba.els_cq = qdesc; 10651 10652 10653 /* 10654 * Create Slow Path Work Queues (WQs) 10655 */ 10656 10657 /* Create Mailbox Command Queue */ 10658 10659 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10660 phba->sli4_hba.mq_esize, 10661 phba->sli4_hba.mq_ecount, cpu); 10662 if (!qdesc) { 10663 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10664 "0505 Failed allocate slow-path MQ\n"); 10665 goto out_error; 10666 } 10667 qdesc->chann = cpu; 10668 phba->sli4_hba.mbx_wq = qdesc; 10669 10670 /* 10671 * Create ELS Work Queues 10672 */ 10673 10674 /* Create slow-path ELS Work Queue */ 10675 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10676 phba->sli4_hba.wq_esize, 10677 phba->sli4_hba.wq_ecount, cpu); 10678 if (!qdesc) { 10679 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10680 "0504 Failed allocate slow-path ELS WQ\n"); 10681 goto out_error; 10682 } 10683 qdesc->chann = cpu; 10684 phba->sli4_hba.els_wq = qdesc; 10685 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 10686 10687 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10688 /* Create NVME LS Complete Queue */ 10689 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10690 phba->sli4_hba.cq_esize, 10691 phba->sli4_hba.cq_ecount, cpu); 10692 if (!qdesc) { 10693 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10694 "6079 Failed allocate NVME LS CQ\n"); 10695 goto out_error; 10696 } 10697 qdesc->chann = cpu; 10698 qdesc->qe_valid = 1; 10699 phba->sli4_hba.nvmels_cq = qdesc; 10700 10701 /* Create NVME LS Work Queue */ 10702 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10703 phba->sli4_hba.wq_esize, 10704 phba->sli4_hba.wq_ecount, cpu); 10705 if (!qdesc) { 10706 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10707 "6080 Failed allocate NVME LS WQ\n"); 10708 goto out_error; 10709 } 10710 qdesc->chann = cpu; 10711 phba->sli4_hba.nvmels_wq = qdesc; 10712 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 10713 } 10714 10715 /* 10716 * Create Receive Queue (RQ) 10717 */ 10718 10719 /* Create Receive Queue for header */ 10720 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10721 phba->sli4_hba.rq_esize, 10722 phba->sli4_hba.rq_ecount, cpu); 10723 if (!qdesc) { 10724 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10725 "0506 Failed allocate receive HRQ\n"); 10726 goto out_error; 10727 } 10728 phba->sli4_hba.hdr_rq = qdesc; 10729 10730 /* Create Receive Queue for data */ 10731 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, 10732 phba->sli4_hba.rq_esize, 10733 phba->sli4_hba.rq_ecount, cpu); 10734 if (!qdesc) { 10735 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10736 "0507 Failed allocate receive DRQ\n"); 10737 goto out_error; 10738 } 10739 phba->sli4_hba.dat_rq = qdesc; 10740 10741 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && 10742 phba->nvmet_support) { 10743 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 10744 cpu = lpfc_find_cpu_handle(phba, idx, 10745 LPFC_FIND_BY_HDWQ); 10746 /* Create NVMET Receive Queue for header */ 10747 qdesc = lpfc_sli4_queue_alloc(phba, 10748 LPFC_DEFAULT_PAGE_SIZE, 10749 phba->sli4_hba.rq_esize, 10750 LPFC_NVMET_RQE_DEF_COUNT, 10751 cpu); 10752 if (!qdesc) { 10753 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10754 "3146 Failed allocate " 10755 "receive HRQ\n"); 10756 goto out_error; 10757 } 10758 qdesc->hdwq = idx; 10759 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc; 10760 10761 /* Only needed for header of RQ pair */ 10762 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp), 10763 GFP_KERNEL, 10764 cpu_to_node(cpu)); 10765 if (qdesc->rqbp == NULL) { 10766 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10767 "6131 Failed allocate " 10768 "Header RQBP\n"); 10769 goto out_error; 10770 } 10771 10772 /* Put list in known state in case driver load fails. */ 10773 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list); 10774 10775 /* Create NVMET Receive Queue for data */ 10776 qdesc = lpfc_sli4_queue_alloc(phba, 10777 LPFC_DEFAULT_PAGE_SIZE, 10778 phba->sli4_hba.rq_esize, 10779 LPFC_NVMET_RQE_DEF_COUNT, 10780 cpu); 10781 if (!qdesc) { 10782 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10783 "3156 Failed allocate " 10784 "receive DRQ\n"); 10785 goto out_error; 10786 } 10787 qdesc->hdwq = idx; 10788 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc; 10789 } 10790 } 10791 10792 /* Clear NVME stats */ 10793 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10794 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10795 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0, 10796 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat)); 10797 } 10798 } 10799 10800 /* Clear SCSI stats */ 10801 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 10802 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10803 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0, 10804 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat)); 10805 } 10806 } 10807 10808 return 0; 10809 10810 out_error: 10811 lpfc_sli4_queue_destroy(phba); 10812 return -ENOMEM; 10813 } 10814 10815 static inline void 10816 __lpfc_sli4_release_queue(struct lpfc_queue **qp) 10817 { 10818 if (*qp != NULL) { 10819 lpfc_sli4_queue_free(*qp); 10820 *qp = NULL; 10821 } 10822 } 10823 10824 static inline void 10825 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max) 10826 { 10827 int idx; 10828 10829 if (*qs == NULL) 10830 return; 10831 10832 for (idx = 0; idx < max; idx++) 10833 __lpfc_sli4_release_queue(&(*qs)[idx]); 10834 10835 kfree(*qs); 10836 *qs = NULL; 10837 } 10838 10839 static inline void 10840 lpfc_sli4_release_hdwq(struct lpfc_hba *phba) 10841 { 10842 struct lpfc_sli4_hdw_queue *hdwq; 10843 struct lpfc_queue *eq; 10844 uint32_t idx; 10845 10846 hdwq = phba->sli4_hba.hdwq; 10847 10848 /* Loop thru all Hardware Queues */ 10849 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 10850 /* Free the CQ/WQ corresponding to the Hardware Queue */ 10851 lpfc_sli4_queue_free(hdwq[idx].io_cq); 10852 lpfc_sli4_queue_free(hdwq[idx].io_wq); 10853 hdwq[idx].hba_eq = NULL; 10854 hdwq[idx].io_cq = NULL; 10855 hdwq[idx].io_wq = NULL; 10856 if (phba->cfg_xpsgl && !phba->nvmet_support) 10857 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]); 10858 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]); 10859 } 10860 /* Loop thru all IRQ vectors */ 10861 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 10862 /* Free the EQ corresponding to the IRQ vector */ 10863 eq = phba->sli4_hba.hba_eq_hdl[idx].eq; 10864 lpfc_sli4_queue_free(eq); 10865 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL; 10866 } 10867 } 10868 10869 /** 10870 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 10871 * @phba: pointer to lpfc hba data structure. 10872 * 10873 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 10874 * operation. 10875 * 10876 * Return codes 10877 * 0 - successful 10878 * -ENOMEM - No available memory 10879 * -EIO - The mailbox failed to complete successfully. 10880 **/ 10881 void 10882 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 10883 { 10884 /* 10885 * Set FREE_INIT before beginning to free the queues. 10886 * Wait until the users of queues to acknowledge to 10887 * release queues by clearing FREE_WAIT. 10888 */ 10889 spin_lock_irq(&phba->hbalock); 10890 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT; 10891 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) { 10892 spin_unlock_irq(&phba->hbalock); 10893 msleep(20); 10894 spin_lock_irq(&phba->hbalock); 10895 } 10896 spin_unlock_irq(&phba->hbalock); 10897 10898 lpfc_sli4_cleanup_poll_list(phba); 10899 10900 /* Release HBA eqs */ 10901 if (phba->sli4_hba.hdwq) 10902 lpfc_sli4_release_hdwq(phba); 10903 10904 if (phba->nvmet_support) { 10905 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, 10906 phba->cfg_nvmet_mrq); 10907 10908 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr, 10909 phba->cfg_nvmet_mrq); 10910 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data, 10911 phba->cfg_nvmet_mrq); 10912 } 10913 10914 /* Release mailbox command work queue */ 10915 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq); 10916 10917 /* Release ELS work queue */ 10918 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq); 10919 10920 /* Release ELS work queue */ 10921 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq); 10922 10923 /* Release unsolicited receive queue */ 10924 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq); 10925 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq); 10926 10927 /* Release ELS complete queue */ 10928 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq); 10929 10930 /* Release NVME LS complete queue */ 10931 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq); 10932 10933 /* Release mailbox command complete queue */ 10934 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq); 10935 10936 /* Everything on this list has been freed */ 10937 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); 10938 10939 /* Done with freeing the queues */ 10940 spin_lock_irq(&phba->hbalock); 10941 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT; 10942 spin_unlock_irq(&phba->hbalock); 10943 } 10944 10945 int 10946 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) 10947 { 10948 struct lpfc_rqb *rqbp; 10949 struct lpfc_dmabuf *h_buf; 10950 struct rqb_dmabuf *rqb_buffer; 10951 10952 rqbp = rq->rqbp; 10953 while (!list_empty(&rqbp->rqb_buffer_list)) { 10954 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 10955 struct lpfc_dmabuf, list); 10956 10957 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf); 10958 (rqbp->rqb_free_buffer)(phba, rqb_buffer); 10959 rqbp->buffer_count--; 10960 } 10961 return 1; 10962 } 10963 10964 static int 10965 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, 10966 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map, 10967 int qidx, uint32_t qtype) 10968 { 10969 struct lpfc_sli_ring *pring; 10970 int rc; 10971 10972 if (!eq || !cq || !wq) { 10973 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10974 "6085 Fast-path %s (%d) not allocated\n", 10975 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx); 10976 return -ENOMEM; 10977 } 10978 10979 /* create the Cq first */ 10980 rc = lpfc_cq_create(phba, cq, eq, 10981 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype); 10982 if (rc) { 10983 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10984 "6086 Failed setup of CQ (%d), rc = 0x%x\n", 10985 qidx, (uint32_t)rc); 10986 return rc; 10987 } 10988 10989 if (qtype != LPFC_MBOX) { 10990 /* Setup cq_map for fast lookup */ 10991 if (cq_map) 10992 *cq_map = cq->queue_id; 10993 10994 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10995 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n", 10996 qidx, cq->queue_id, qidx, eq->queue_id); 10997 10998 /* create the wq */ 10999 rc = lpfc_wq_create(phba, wq, cq, qtype); 11000 if (rc) { 11001 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11002 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n", 11003 qidx, (uint32_t)rc); 11004 /* no need to tear down cq - caller will do so */ 11005 return rc; 11006 } 11007 11008 /* Bind this CQ/WQ to the NVME ring */ 11009 pring = wq->pring; 11010 pring->sli.sli4.wqp = (void *)wq; 11011 cq->pring = pring; 11012 11013 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11014 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n", 11015 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id); 11016 } else { 11017 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX); 11018 if (rc) { 11019 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11020 "0539 Failed setup of slow-path MQ: " 11021 "rc = 0x%x\n", rc); 11022 /* no need to tear down cq - caller will do so */ 11023 return rc; 11024 } 11025 11026 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11027 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 11028 phba->sli4_hba.mbx_wq->queue_id, 11029 phba->sli4_hba.mbx_cq->queue_id); 11030 } 11031 11032 return 0; 11033 } 11034 11035 /** 11036 * lpfc_setup_cq_lookup - Setup the CQ lookup table 11037 * @phba: pointer to lpfc hba data structure. 11038 * 11039 * This routine will populate the cq_lookup table by all 11040 * available CQ queue_id's. 11041 **/ 11042 static void 11043 lpfc_setup_cq_lookup(struct lpfc_hba *phba) 11044 { 11045 struct lpfc_queue *eq, *childq; 11046 int qidx; 11047 11048 memset(phba->sli4_hba.cq_lookup, 0, 11049 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1))); 11050 /* Loop thru all IRQ vectors */ 11051 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 11052 /* Get the EQ corresponding to the IRQ vector */ 11053 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 11054 if (!eq) 11055 continue; 11056 /* Loop through all CQs associated with that EQ */ 11057 list_for_each_entry(childq, &eq->child_list, list) { 11058 if (childq->queue_id > phba->sli4_hba.cq_max) 11059 continue; 11060 if (childq->subtype == LPFC_IO) 11061 phba->sli4_hba.cq_lookup[childq->queue_id] = 11062 childq; 11063 } 11064 } 11065 } 11066 11067 /** 11068 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 11069 * @phba: pointer to lpfc hba data structure. 11070 * 11071 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 11072 * operation. 11073 * 11074 * Return codes 11075 * 0 - successful 11076 * -ENOMEM - No available memory 11077 * -EIO - The mailbox failed to complete successfully. 11078 **/ 11079 int 11080 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 11081 { 11082 uint32_t shdr_status, shdr_add_status; 11083 union lpfc_sli4_cfg_shdr *shdr; 11084 struct lpfc_vector_map_info *cpup; 11085 struct lpfc_sli4_hdw_queue *qp; 11086 LPFC_MBOXQ_t *mboxq; 11087 int qidx, cpu; 11088 uint32_t length, usdelay; 11089 int rc = -ENOMEM; 11090 11091 /* Check for dual-ULP support */ 11092 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11093 if (!mboxq) { 11094 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11095 "3249 Unable to allocate memory for " 11096 "QUERY_FW_CFG mailbox command\n"); 11097 return -ENOMEM; 11098 } 11099 length = (sizeof(struct lpfc_mbx_query_fw_config) - 11100 sizeof(struct lpfc_sli4_cfg_mhdr)); 11101 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 11102 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 11103 length, LPFC_SLI4_MBX_EMBED); 11104 11105 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 11106 11107 shdr = (union lpfc_sli4_cfg_shdr *) 11108 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 11109 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11110 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 11111 if (shdr_status || shdr_add_status || rc) { 11112 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11113 "3250 QUERY_FW_CFG mailbox failed with status " 11114 "x%x add_status x%x, mbx status x%x\n", 11115 shdr_status, shdr_add_status, rc); 11116 mempool_free(mboxq, phba->mbox_mem_pool); 11117 rc = -ENXIO; 11118 goto out_error; 11119 } 11120 11121 phba->sli4_hba.fw_func_mode = 11122 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; 11123 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; 11124 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; 11125 phba->sli4_hba.physical_port = 11126 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port; 11127 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11128 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " 11129 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, 11130 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); 11131 11132 mempool_free(mboxq, phba->mbox_mem_pool); 11133 11134 /* 11135 * Set up HBA Event Queues (EQs) 11136 */ 11137 qp = phba->sli4_hba.hdwq; 11138 11139 /* Set up HBA event queue */ 11140 if (!qp) { 11141 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11142 "3147 Fast-path EQs not allocated\n"); 11143 rc = -ENOMEM; 11144 goto out_error; 11145 } 11146 11147 /* Loop thru all IRQ vectors */ 11148 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 11149 /* Create HBA Event Queues (EQs) in order */ 11150 for_each_present_cpu(cpu) { 11151 cpup = &phba->sli4_hba.cpu_map[cpu]; 11152 11153 /* Look for the CPU thats using that vector with 11154 * LPFC_CPU_FIRST_IRQ set. 11155 */ 11156 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 11157 continue; 11158 if (qidx != cpup->eq) 11159 continue; 11160 11161 /* Create an EQ for that vector */ 11162 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq, 11163 phba->cfg_fcp_imax); 11164 if (rc) { 11165 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11166 "0523 Failed setup of fast-path" 11167 " EQ (%d), rc = 0x%x\n", 11168 cpup->eq, (uint32_t)rc); 11169 goto out_destroy; 11170 } 11171 11172 /* Save the EQ for that vector in the hba_eq_hdl */ 11173 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq = 11174 qp[cpup->hdwq].hba_eq; 11175 11176 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11177 "2584 HBA EQ setup: queue[%d]-id=%d\n", 11178 cpup->eq, 11179 qp[cpup->hdwq].hba_eq->queue_id); 11180 } 11181 } 11182 11183 /* Loop thru all Hardware Queues */ 11184 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 11185 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ); 11186 cpup = &phba->sli4_hba.cpu_map[cpu]; 11187 11188 /* Create the CQ/WQ corresponding to the Hardware Queue */ 11189 rc = lpfc_create_wq_cq(phba, 11190 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq, 11191 qp[qidx].io_cq, 11192 qp[qidx].io_wq, 11193 &phba->sli4_hba.hdwq[qidx].io_cq_map, 11194 qidx, 11195 LPFC_IO); 11196 if (rc) { 11197 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11198 "0535 Failed to setup fastpath " 11199 "IO WQ/CQ (%d), rc = 0x%x\n", 11200 qidx, (uint32_t)rc); 11201 goto out_destroy; 11202 } 11203 } 11204 11205 /* 11206 * Set up Slow Path Complete Queues (CQs) 11207 */ 11208 11209 /* Set up slow-path MBOX CQ/MQ */ 11210 11211 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) { 11212 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11213 "0528 %s not allocated\n", 11214 phba->sli4_hba.mbx_cq ? 11215 "Mailbox WQ" : "Mailbox CQ"); 11216 rc = -ENOMEM; 11217 goto out_destroy; 11218 } 11219 11220 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 11221 phba->sli4_hba.mbx_cq, 11222 phba->sli4_hba.mbx_wq, 11223 NULL, 0, LPFC_MBOX); 11224 if (rc) { 11225 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11226 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n", 11227 (uint32_t)rc); 11228 goto out_destroy; 11229 } 11230 if (phba->nvmet_support) { 11231 if (!phba->sli4_hba.nvmet_cqset) { 11232 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11233 "3165 Fast-path NVME CQ Set " 11234 "array not allocated\n"); 11235 rc = -ENOMEM; 11236 goto out_destroy; 11237 } 11238 if (phba->cfg_nvmet_mrq > 1) { 11239 rc = lpfc_cq_create_set(phba, 11240 phba->sli4_hba.nvmet_cqset, 11241 qp, 11242 LPFC_WCQ, LPFC_NVMET); 11243 if (rc) { 11244 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11245 "3164 Failed setup of NVME CQ " 11246 "Set, rc = 0x%x\n", 11247 (uint32_t)rc); 11248 goto out_destroy; 11249 } 11250 } else { 11251 /* Set up NVMET Receive Complete Queue */ 11252 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0], 11253 qp[0].hba_eq, 11254 LPFC_WCQ, LPFC_NVMET); 11255 if (rc) { 11256 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11257 "6089 Failed setup NVMET CQ: " 11258 "rc = 0x%x\n", (uint32_t)rc); 11259 goto out_destroy; 11260 } 11261 phba->sli4_hba.nvmet_cqset[0]->chann = 0; 11262 11263 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11264 "6090 NVMET CQ setup: cq-id=%d, " 11265 "parent eq-id=%d\n", 11266 phba->sli4_hba.nvmet_cqset[0]->queue_id, 11267 qp[0].hba_eq->queue_id); 11268 } 11269 } 11270 11271 /* Set up slow-path ELS WQ/CQ */ 11272 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) { 11273 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11274 "0530 ELS %s not allocated\n", 11275 phba->sli4_hba.els_cq ? "WQ" : "CQ"); 11276 rc = -ENOMEM; 11277 goto out_destroy; 11278 } 11279 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 11280 phba->sli4_hba.els_cq, 11281 phba->sli4_hba.els_wq, 11282 NULL, 0, LPFC_ELS); 11283 if (rc) { 11284 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11285 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n", 11286 (uint32_t)rc); 11287 goto out_destroy; 11288 } 11289 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11290 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 11291 phba->sli4_hba.els_wq->queue_id, 11292 phba->sli4_hba.els_cq->queue_id); 11293 11294 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11295 /* Set up NVME LS Complete Queue */ 11296 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) { 11297 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11298 "6091 LS %s not allocated\n", 11299 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ"); 11300 rc = -ENOMEM; 11301 goto out_destroy; 11302 } 11303 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, 11304 phba->sli4_hba.nvmels_cq, 11305 phba->sli4_hba.nvmels_wq, 11306 NULL, 0, LPFC_NVME_LS); 11307 if (rc) { 11308 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11309 "0526 Failed setup of NVVME LS WQ/CQ: " 11310 "rc = 0x%x\n", (uint32_t)rc); 11311 goto out_destroy; 11312 } 11313 11314 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11315 "6096 ELS WQ setup: wq-id=%d, " 11316 "parent cq-id=%d\n", 11317 phba->sli4_hba.nvmels_wq->queue_id, 11318 phba->sli4_hba.nvmels_cq->queue_id); 11319 } 11320 11321 /* 11322 * Create NVMET Receive Queue (RQ) 11323 */ 11324 if (phba->nvmet_support) { 11325 if ((!phba->sli4_hba.nvmet_cqset) || 11326 (!phba->sli4_hba.nvmet_mrq_hdr) || 11327 (!phba->sli4_hba.nvmet_mrq_data)) { 11328 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11329 "6130 MRQ CQ Queues not " 11330 "allocated\n"); 11331 rc = -ENOMEM; 11332 goto out_destroy; 11333 } 11334 if (phba->cfg_nvmet_mrq > 1) { 11335 rc = lpfc_mrq_create(phba, 11336 phba->sli4_hba.nvmet_mrq_hdr, 11337 phba->sli4_hba.nvmet_mrq_data, 11338 phba->sli4_hba.nvmet_cqset, 11339 LPFC_NVMET); 11340 if (rc) { 11341 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11342 "6098 Failed setup of NVMET " 11343 "MRQ: rc = 0x%x\n", 11344 (uint32_t)rc); 11345 goto out_destroy; 11346 } 11347 11348 } else { 11349 rc = lpfc_rq_create(phba, 11350 phba->sli4_hba.nvmet_mrq_hdr[0], 11351 phba->sli4_hba.nvmet_mrq_data[0], 11352 phba->sli4_hba.nvmet_cqset[0], 11353 LPFC_NVMET); 11354 if (rc) { 11355 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11356 "6057 Failed setup of NVMET " 11357 "Receive Queue: rc = 0x%x\n", 11358 (uint32_t)rc); 11359 goto out_destroy; 11360 } 11361 11362 lpfc_printf_log( 11363 phba, KERN_INFO, LOG_INIT, 11364 "6099 NVMET RQ setup: hdr-rq-id=%d, " 11365 "dat-rq-id=%d parent cq-id=%d\n", 11366 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id, 11367 phba->sli4_hba.nvmet_mrq_data[0]->queue_id, 11368 phba->sli4_hba.nvmet_cqset[0]->queue_id); 11369 11370 } 11371 } 11372 11373 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 11374 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11375 "0540 Receive Queue not allocated\n"); 11376 rc = -ENOMEM; 11377 goto out_destroy; 11378 } 11379 11380 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 11381 phba->sli4_hba.els_cq, LPFC_USOL); 11382 if (rc) { 11383 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11384 "0541 Failed setup of Receive Queue: " 11385 "rc = 0x%x\n", (uint32_t)rc); 11386 goto out_destroy; 11387 } 11388 11389 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11390 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 11391 "parent cq-id=%d\n", 11392 phba->sli4_hba.hdr_rq->queue_id, 11393 phba->sli4_hba.dat_rq->queue_id, 11394 phba->sli4_hba.els_cq->queue_id); 11395 11396 if (phba->cfg_fcp_imax) 11397 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax; 11398 else 11399 usdelay = 0; 11400 11401 for (qidx = 0; qidx < phba->cfg_irq_chann; 11402 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) 11403 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT, 11404 usdelay); 11405 11406 if (phba->sli4_hba.cq_max) { 11407 kfree(phba->sli4_hba.cq_lookup); 11408 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1), 11409 sizeof(struct lpfc_queue *), GFP_KERNEL); 11410 if (!phba->sli4_hba.cq_lookup) { 11411 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11412 "0549 Failed setup of CQ Lookup table: " 11413 "size 0x%x\n", phba->sli4_hba.cq_max); 11414 rc = -ENOMEM; 11415 goto out_destroy; 11416 } 11417 lpfc_setup_cq_lookup(phba); 11418 } 11419 return 0; 11420 11421 out_destroy: 11422 lpfc_sli4_queue_unset(phba); 11423 out_error: 11424 return rc; 11425 } 11426 11427 /** 11428 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 11429 * @phba: pointer to lpfc hba data structure. 11430 * 11431 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 11432 * operation. 11433 * 11434 * Return codes 11435 * 0 - successful 11436 * -ENOMEM - No available memory 11437 * -EIO - The mailbox failed to complete successfully. 11438 **/ 11439 void 11440 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 11441 { 11442 struct lpfc_sli4_hdw_queue *qp; 11443 struct lpfc_queue *eq; 11444 int qidx; 11445 11446 /* Unset mailbox command work queue */ 11447 if (phba->sli4_hba.mbx_wq) 11448 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 11449 11450 /* Unset NVME LS work queue */ 11451 if (phba->sli4_hba.nvmels_wq) 11452 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); 11453 11454 /* Unset ELS work queue */ 11455 if (phba->sli4_hba.els_wq) 11456 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 11457 11458 /* Unset unsolicited receive queue */ 11459 if (phba->sli4_hba.hdr_rq) 11460 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, 11461 phba->sli4_hba.dat_rq); 11462 11463 /* Unset mailbox command complete queue */ 11464 if (phba->sli4_hba.mbx_cq) 11465 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 11466 11467 /* Unset ELS complete queue */ 11468 if (phba->sli4_hba.els_cq) 11469 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 11470 11471 /* Unset NVME LS complete queue */ 11472 if (phba->sli4_hba.nvmels_cq) 11473 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq); 11474 11475 if (phba->nvmet_support) { 11476 /* Unset NVMET MRQ queue */ 11477 if (phba->sli4_hba.nvmet_mrq_hdr) { 11478 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 11479 lpfc_rq_destroy( 11480 phba, 11481 phba->sli4_hba.nvmet_mrq_hdr[qidx], 11482 phba->sli4_hba.nvmet_mrq_data[qidx]); 11483 } 11484 11485 /* Unset NVMET CQ Set complete queue */ 11486 if (phba->sli4_hba.nvmet_cqset) { 11487 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 11488 lpfc_cq_destroy( 11489 phba, phba->sli4_hba.nvmet_cqset[qidx]); 11490 } 11491 } 11492 11493 /* Unset fast-path SLI4 queues */ 11494 if (phba->sli4_hba.hdwq) { 11495 /* Loop thru all Hardware Queues */ 11496 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 11497 /* Destroy the CQ/WQ corresponding to Hardware Queue */ 11498 qp = &phba->sli4_hba.hdwq[qidx]; 11499 lpfc_wq_destroy(phba, qp->io_wq); 11500 lpfc_cq_destroy(phba, qp->io_cq); 11501 } 11502 /* Loop thru all IRQ vectors */ 11503 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 11504 /* Destroy the EQ corresponding to the IRQ vector */ 11505 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 11506 lpfc_eq_destroy(phba, eq); 11507 } 11508 } 11509 11510 kfree(phba->sli4_hba.cq_lookup); 11511 phba->sli4_hba.cq_lookup = NULL; 11512 phba->sli4_hba.cq_max = 0; 11513 } 11514 11515 /** 11516 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 11517 * @phba: pointer to lpfc hba data structure. 11518 * 11519 * This routine is invoked to allocate and set up a pool of completion queue 11520 * events. The body of the completion queue event is a completion queue entry 11521 * CQE. For now, this pool is used for the interrupt service routine to queue 11522 * the following HBA completion queue events for the worker thread to process: 11523 * - Mailbox asynchronous events 11524 * - Receive queue completion unsolicited events 11525 * Later, this can be used for all the slow-path events. 11526 * 11527 * Return codes 11528 * 0 - successful 11529 * -ENOMEM - No available memory 11530 **/ 11531 static int 11532 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 11533 { 11534 struct lpfc_cq_event *cq_event; 11535 int i; 11536 11537 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 11538 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 11539 if (!cq_event) 11540 goto out_pool_create_fail; 11541 list_add_tail(&cq_event->list, 11542 &phba->sli4_hba.sp_cqe_event_pool); 11543 } 11544 return 0; 11545 11546 out_pool_create_fail: 11547 lpfc_sli4_cq_event_pool_destroy(phba); 11548 return -ENOMEM; 11549 } 11550 11551 /** 11552 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 11553 * @phba: pointer to lpfc hba data structure. 11554 * 11555 * This routine is invoked to free the pool of completion queue events at 11556 * driver unload time. Note that, it is the responsibility of the driver 11557 * cleanup routine to free all the outstanding completion-queue events 11558 * allocated from this pool back into the pool before invoking this routine 11559 * to destroy the pool. 11560 **/ 11561 static void 11562 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 11563 { 11564 struct lpfc_cq_event *cq_event, *next_cq_event; 11565 11566 list_for_each_entry_safe(cq_event, next_cq_event, 11567 &phba->sli4_hba.sp_cqe_event_pool, list) { 11568 list_del(&cq_event->list); 11569 kfree(cq_event); 11570 } 11571 } 11572 11573 /** 11574 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 11575 * @phba: pointer to lpfc hba data structure. 11576 * 11577 * This routine is the lock free version of the API invoked to allocate a 11578 * completion-queue event from the free pool. 11579 * 11580 * Return: Pointer to the newly allocated completion-queue event if successful 11581 * NULL otherwise. 11582 **/ 11583 struct lpfc_cq_event * 11584 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 11585 { 11586 struct lpfc_cq_event *cq_event = NULL; 11587 11588 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 11589 struct lpfc_cq_event, list); 11590 return cq_event; 11591 } 11592 11593 /** 11594 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 11595 * @phba: pointer to lpfc hba data structure. 11596 * 11597 * This routine is the lock version of the API invoked to allocate a 11598 * completion-queue event from the free pool. 11599 * 11600 * Return: Pointer to the newly allocated completion-queue event if successful 11601 * NULL otherwise. 11602 **/ 11603 struct lpfc_cq_event * 11604 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 11605 { 11606 struct lpfc_cq_event *cq_event; 11607 unsigned long iflags; 11608 11609 spin_lock_irqsave(&phba->hbalock, iflags); 11610 cq_event = __lpfc_sli4_cq_event_alloc(phba); 11611 spin_unlock_irqrestore(&phba->hbalock, iflags); 11612 return cq_event; 11613 } 11614 11615 /** 11616 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 11617 * @phba: pointer to lpfc hba data structure. 11618 * @cq_event: pointer to the completion queue event to be freed. 11619 * 11620 * This routine is the lock free version of the API invoked to release a 11621 * completion-queue event back into the free pool. 11622 **/ 11623 void 11624 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 11625 struct lpfc_cq_event *cq_event) 11626 { 11627 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 11628 } 11629 11630 /** 11631 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 11632 * @phba: pointer to lpfc hba data structure. 11633 * @cq_event: pointer to the completion queue event to be freed. 11634 * 11635 * This routine is the lock version of the API invoked to release a 11636 * completion-queue event back into the free pool. 11637 **/ 11638 void 11639 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 11640 struct lpfc_cq_event *cq_event) 11641 { 11642 unsigned long iflags; 11643 spin_lock_irqsave(&phba->hbalock, iflags); 11644 __lpfc_sli4_cq_event_release(phba, cq_event); 11645 spin_unlock_irqrestore(&phba->hbalock, iflags); 11646 } 11647 11648 /** 11649 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 11650 * @phba: pointer to lpfc hba data structure. 11651 * 11652 * This routine is to free all the pending completion-queue events to the 11653 * back into the free pool for device reset. 11654 **/ 11655 static void 11656 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 11657 { 11658 LIST_HEAD(cq_event_list); 11659 struct lpfc_cq_event *cq_event; 11660 unsigned long iflags; 11661 11662 /* Retrieve all the pending WCQEs from pending WCQE lists */ 11663 11664 /* Pending ELS XRI abort events */ 11665 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); 11666 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 11667 &cq_event_list); 11668 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); 11669 11670 /* Pending asynnc events */ 11671 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); 11672 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 11673 &cq_event_list); 11674 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); 11675 11676 while (!list_empty(&cq_event_list)) { 11677 list_remove_head(&cq_event_list, cq_event, 11678 struct lpfc_cq_event, list); 11679 lpfc_sli4_cq_event_release(phba, cq_event); 11680 } 11681 } 11682 11683 /** 11684 * lpfc_pci_function_reset - Reset pci function. 11685 * @phba: pointer to lpfc hba data structure. 11686 * 11687 * This routine is invoked to request a PCI function reset. It will destroys 11688 * all resources assigned to the PCI function which originates this request. 11689 * 11690 * Return codes 11691 * 0 - successful 11692 * -ENOMEM - No available memory 11693 * -EIO - The mailbox failed to complete successfully. 11694 **/ 11695 int 11696 lpfc_pci_function_reset(struct lpfc_hba *phba) 11697 { 11698 LPFC_MBOXQ_t *mboxq; 11699 uint32_t rc = 0, if_type; 11700 uint32_t shdr_status, shdr_add_status; 11701 uint32_t rdy_chk; 11702 uint32_t port_reset = 0; 11703 union lpfc_sli4_cfg_shdr *shdr; 11704 struct lpfc_register reg_data; 11705 uint16_t devid; 11706 11707 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 11708 switch (if_type) { 11709 case LPFC_SLI_INTF_IF_TYPE_0: 11710 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 11711 GFP_KERNEL); 11712 if (!mboxq) { 11713 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11714 "0494 Unable to allocate memory for " 11715 "issuing SLI_FUNCTION_RESET mailbox " 11716 "command\n"); 11717 return -ENOMEM; 11718 } 11719 11720 /* Setup PCI function reset mailbox-ioctl command */ 11721 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 11722 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 11723 LPFC_SLI4_MBX_EMBED); 11724 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 11725 shdr = (union lpfc_sli4_cfg_shdr *) 11726 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 11727 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11728 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 11729 &shdr->response); 11730 mempool_free(mboxq, phba->mbox_mem_pool); 11731 if (shdr_status || shdr_add_status || rc) { 11732 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11733 "0495 SLI_FUNCTION_RESET mailbox " 11734 "failed with status x%x add_status x%x," 11735 " mbx status x%x\n", 11736 shdr_status, shdr_add_status, rc); 11737 rc = -ENXIO; 11738 } 11739 break; 11740 case LPFC_SLI_INTF_IF_TYPE_2: 11741 case LPFC_SLI_INTF_IF_TYPE_6: 11742 wait: 11743 /* 11744 * Poll the Port Status Register and wait for RDY for 11745 * up to 30 seconds. If the port doesn't respond, treat 11746 * it as an error. 11747 */ 11748 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) { 11749 if (lpfc_readl(phba->sli4_hba.u.if_type2. 11750 STATUSregaddr, ®_data.word0)) { 11751 rc = -ENODEV; 11752 goto out; 11753 } 11754 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 11755 break; 11756 msleep(20); 11757 } 11758 11759 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) { 11760 phba->work_status[0] = readl( 11761 phba->sli4_hba.u.if_type2.ERR1regaddr); 11762 phba->work_status[1] = readl( 11763 phba->sli4_hba.u.if_type2.ERR2regaddr); 11764 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11765 "2890 Port not ready, port status reg " 11766 "0x%x error 1=0x%x, error 2=0x%x\n", 11767 reg_data.word0, 11768 phba->work_status[0], 11769 phba->work_status[1]); 11770 rc = -ENODEV; 11771 goto out; 11772 } 11773 11774 if (bf_get(lpfc_sliport_status_pldv, ®_data)) 11775 lpfc_pldv_detect = true; 11776 11777 if (!port_reset) { 11778 /* 11779 * Reset the port now 11780 */ 11781 reg_data.word0 = 0; 11782 bf_set(lpfc_sliport_ctrl_end, ®_data, 11783 LPFC_SLIPORT_LITTLE_ENDIAN); 11784 bf_set(lpfc_sliport_ctrl_ip, ®_data, 11785 LPFC_SLIPORT_INIT_PORT); 11786 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 11787 CTRLregaddr); 11788 /* flush */ 11789 pci_read_config_word(phba->pcidev, 11790 PCI_DEVICE_ID, &devid); 11791 11792 port_reset = 1; 11793 msleep(20); 11794 goto wait; 11795 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) { 11796 rc = -ENODEV; 11797 goto out; 11798 } 11799 break; 11800 11801 case LPFC_SLI_INTF_IF_TYPE_1: 11802 default: 11803 break; 11804 } 11805 11806 out: 11807 /* Catch the not-ready port failure after a port reset. */ 11808 if (rc) { 11809 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11810 "3317 HBA not functional: IP Reset Failed " 11811 "try: echo fw_reset > board_mode\n"); 11812 rc = -ENODEV; 11813 } 11814 11815 return rc; 11816 } 11817 11818 /** 11819 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 11820 * @phba: pointer to lpfc hba data structure. 11821 * 11822 * This routine is invoked to set up the PCI device memory space for device 11823 * with SLI-4 interface spec. 11824 * 11825 * Return codes 11826 * 0 - successful 11827 * other values - error 11828 **/ 11829 static int 11830 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 11831 { 11832 struct pci_dev *pdev = phba->pcidev; 11833 unsigned long bar0map_len, bar1map_len, bar2map_len; 11834 int error; 11835 uint32_t if_type; 11836 11837 if (!pdev) 11838 return -ENODEV; 11839 11840 /* Set the device DMA mask size */ 11841 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 11842 if (error) 11843 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 11844 if (error) 11845 return error; 11846 11847 /* 11848 * The BARs and register set definitions and offset locations are 11849 * dependent on the if_type. 11850 */ 11851 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 11852 &phba->sli4_hba.sli_intf.word0)) { 11853 return -ENODEV; 11854 } 11855 11856 /* There is no SLI3 failback for SLI4 devices. */ 11857 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 11858 LPFC_SLI_INTF_VALID) { 11859 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11860 "2894 SLI_INTF reg contents invalid " 11861 "sli_intf reg 0x%x\n", 11862 phba->sli4_hba.sli_intf.word0); 11863 return -ENODEV; 11864 } 11865 11866 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 11867 /* 11868 * Get the bus address of SLI4 device Bar regions and the 11869 * number of bytes required by each mapping. The mapping of the 11870 * particular PCI BARs regions is dependent on the type of 11871 * SLI4 device. 11872 */ 11873 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { 11874 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); 11875 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); 11876 11877 /* 11878 * Map SLI4 PCI Config Space Register base to a kernel virtual 11879 * addr 11880 */ 11881 phba->sli4_hba.conf_regs_memmap_p = 11882 ioremap(phba->pci_bar0_map, bar0map_len); 11883 if (!phba->sli4_hba.conf_regs_memmap_p) { 11884 dev_printk(KERN_ERR, &pdev->dev, 11885 "ioremap failed for SLI4 PCI config " 11886 "registers.\n"); 11887 return -ENODEV; 11888 } 11889 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; 11890 /* Set up BAR0 PCI config space register memory map */ 11891 lpfc_sli4_bar0_register_memmap(phba, if_type); 11892 } else { 11893 phba->pci_bar0_map = pci_resource_start(pdev, 1); 11894 bar0map_len = pci_resource_len(pdev, 1); 11895 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 11896 dev_printk(KERN_ERR, &pdev->dev, 11897 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 11898 return -ENODEV; 11899 } 11900 phba->sli4_hba.conf_regs_memmap_p = 11901 ioremap(phba->pci_bar0_map, bar0map_len); 11902 if (!phba->sli4_hba.conf_regs_memmap_p) { 11903 dev_printk(KERN_ERR, &pdev->dev, 11904 "ioremap failed for SLI4 PCI config " 11905 "registers.\n"); 11906 return -ENODEV; 11907 } 11908 lpfc_sli4_bar0_register_memmap(phba, if_type); 11909 } 11910 11911 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 11912 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) { 11913 /* 11914 * Map SLI4 if type 0 HBA Control Register base to a 11915 * kernel virtual address and setup the registers. 11916 */ 11917 phba->pci_bar1_map = pci_resource_start(pdev, 11918 PCI_64BIT_BAR2); 11919 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 11920 phba->sli4_hba.ctrl_regs_memmap_p = 11921 ioremap(phba->pci_bar1_map, 11922 bar1map_len); 11923 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 11924 dev_err(&pdev->dev, 11925 "ioremap failed for SLI4 HBA " 11926 "control registers.\n"); 11927 error = -ENOMEM; 11928 goto out_iounmap_conf; 11929 } 11930 phba->pci_bar2_memmap_p = 11931 phba->sli4_hba.ctrl_regs_memmap_p; 11932 lpfc_sli4_bar1_register_memmap(phba, if_type); 11933 } else { 11934 error = -ENOMEM; 11935 goto out_iounmap_conf; 11936 } 11937 } 11938 11939 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) && 11940 (pci_resource_start(pdev, PCI_64BIT_BAR2))) { 11941 /* 11942 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel 11943 * virtual address and setup the registers. 11944 */ 11945 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); 11946 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 11947 phba->sli4_hba.drbl_regs_memmap_p = 11948 ioremap(phba->pci_bar1_map, bar1map_len); 11949 if (!phba->sli4_hba.drbl_regs_memmap_p) { 11950 dev_err(&pdev->dev, 11951 "ioremap failed for SLI4 HBA doorbell registers.\n"); 11952 error = -ENOMEM; 11953 goto out_iounmap_conf; 11954 } 11955 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; 11956 lpfc_sli4_bar1_register_memmap(phba, if_type); 11957 } 11958 11959 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { 11960 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) { 11961 /* 11962 * Map SLI4 if type 0 HBA Doorbell Register base to 11963 * a kernel virtual address and setup the registers. 11964 */ 11965 phba->pci_bar2_map = pci_resource_start(pdev, 11966 PCI_64BIT_BAR4); 11967 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 11968 phba->sli4_hba.drbl_regs_memmap_p = 11969 ioremap(phba->pci_bar2_map, 11970 bar2map_len); 11971 if (!phba->sli4_hba.drbl_regs_memmap_p) { 11972 dev_err(&pdev->dev, 11973 "ioremap failed for SLI4 HBA" 11974 " doorbell registers.\n"); 11975 error = -ENOMEM; 11976 goto out_iounmap_ctrl; 11977 } 11978 phba->pci_bar4_memmap_p = 11979 phba->sli4_hba.drbl_regs_memmap_p; 11980 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 11981 if (error) 11982 goto out_iounmap_all; 11983 } else { 11984 error = -ENOMEM; 11985 goto out_iounmap_ctrl; 11986 } 11987 } 11988 11989 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 && 11990 pci_resource_start(pdev, PCI_64BIT_BAR4)) { 11991 /* 11992 * Map SLI4 if type 6 HBA DPP Register base to a kernel 11993 * virtual address and setup the registers. 11994 */ 11995 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); 11996 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 11997 phba->sli4_hba.dpp_regs_memmap_p = 11998 ioremap(phba->pci_bar2_map, bar2map_len); 11999 if (!phba->sli4_hba.dpp_regs_memmap_p) { 12000 dev_err(&pdev->dev, 12001 "ioremap failed for SLI4 HBA dpp registers.\n"); 12002 error = -ENOMEM; 12003 goto out_iounmap_all; 12004 } 12005 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p; 12006 } 12007 12008 /* Set up the EQ/CQ register handeling functions now */ 12009 switch (if_type) { 12010 case LPFC_SLI_INTF_IF_TYPE_0: 12011 case LPFC_SLI_INTF_IF_TYPE_2: 12012 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr; 12013 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db; 12014 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db; 12015 break; 12016 case LPFC_SLI_INTF_IF_TYPE_6: 12017 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr; 12018 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db; 12019 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db; 12020 break; 12021 default: 12022 break; 12023 } 12024 12025 return 0; 12026 12027 out_iounmap_all: 12028 if (phba->sli4_hba.drbl_regs_memmap_p) 12029 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 12030 out_iounmap_ctrl: 12031 if (phba->sli4_hba.ctrl_regs_memmap_p) 12032 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 12033 out_iounmap_conf: 12034 iounmap(phba->sli4_hba.conf_regs_memmap_p); 12035 12036 return error; 12037 } 12038 12039 /** 12040 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 12041 * @phba: pointer to lpfc hba data structure. 12042 * 12043 * This routine is invoked to unset the PCI device memory space for device 12044 * with SLI-4 interface spec. 12045 **/ 12046 static void 12047 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 12048 { 12049 uint32_t if_type; 12050 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 12051 12052 switch (if_type) { 12053 case LPFC_SLI_INTF_IF_TYPE_0: 12054 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 12055 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 12056 iounmap(phba->sli4_hba.conf_regs_memmap_p); 12057 break; 12058 case LPFC_SLI_INTF_IF_TYPE_2: 12059 iounmap(phba->sli4_hba.conf_regs_memmap_p); 12060 break; 12061 case LPFC_SLI_INTF_IF_TYPE_6: 12062 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 12063 iounmap(phba->sli4_hba.conf_regs_memmap_p); 12064 if (phba->sli4_hba.dpp_regs_memmap_p) 12065 iounmap(phba->sli4_hba.dpp_regs_memmap_p); 12066 break; 12067 case LPFC_SLI_INTF_IF_TYPE_1: 12068 break; 12069 default: 12070 dev_printk(KERN_ERR, &phba->pcidev->dev, 12071 "FATAL - unsupported SLI4 interface type - %d\n", 12072 if_type); 12073 break; 12074 } 12075 } 12076 12077 /** 12078 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 12079 * @phba: pointer to lpfc hba data structure. 12080 * 12081 * This routine is invoked to enable the MSI-X interrupt vectors to device 12082 * with SLI-3 interface specs. 12083 * 12084 * Return codes 12085 * 0 - successful 12086 * other values - error 12087 **/ 12088 static int 12089 lpfc_sli_enable_msix(struct lpfc_hba *phba) 12090 { 12091 int rc; 12092 LPFC_MBOXQ_t *pmb; 12093 12094 /* Set up MSI-X multi-message vectors */ 12095 rc = pci_alloc_irq_vectors(phba->pcidev, 12096 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX); 12097 if (rc < 0) { 12098 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12099 "0420 PCI enable MSI-X failed (%d)\n", rc); 12100 goto vec_fail_out; 12101 } 12102 12103 /* 12104 * Assign MSI-X vectors to interrupt handlers 12105 */ 12106 12107 /* vector-0 is associated to slow-path handler */ 12108 rc = request_irq(pci_irq_vector(phba->pcidev, 0), 12109 &lpfc_sli_sp_intr_handler, 0, 12110 LPFC_SP_DRIVER_HANDLER_NAME, phba); 12111 if (rc) { 12112 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 12113 "0421 MSI-X slow-path request_irq failed " 12114 "(%d)\n", rc); 12115 goto msi_fail_out; 12116 } 12117 12118 /* vector-1 is associated to fast-path handler */ 12119 rc = request_irq(pci_irq_vector(phba->pcidev, 1), 12120 &lpfc_sli_fp_intr_handler, 0, 12121 LPFC_FP_DRIVER_HANDLER_NAME, phba); 12122 12123 if (rc) { 12124 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 12125 "0429 MSI-X fast-path request_irq failed " 12126 "(%d)\n", rc); 12127 goto irq_fail_out; 12128 } 12129 12130 /* 12131 * Configure HBA MSI-X attention conditions to messages 12132 */ 12133 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12134 12135 if (!pmb) { 12136 rc = -ENOMEM; 12137 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 12138 "0474 Unable to allocate memory for issuing " 12139 "MBOX_CONFIG_MSI command\n"); 12140 goto mem_fail_out; 12141 } 12142 rc = lpfc_config_msi(phba, pmb); 12143 if (rc) 12144 goto mbx_fail_out; 12145 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 12146 if (rc != MBX_SUCCESS) { 12147 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 12148 "0351 Config MSI mailbox command failed, " 12149 "mbxCmd x%x, mbxStatus x%x\n", 12150 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 12151 goto mbx_fail_out; 12152 } 12153 12154 /* Free memory allocated for mailbox command */ 12155 mempool_free(pmb, phba->mbox_mem_pool); 12156 return rc; 12157 12158 mbx_fail_out: 12159 /* Free memory allocated for mailbox command */ 12160 mempool_free(pmb, phba->mbox_mem_pool); 12161 12162 mem_fail_out: 12163 /* free the irq already requested */ 12164 free_irq(pci_irq_vector(phba->pcidev, 1), phba); 12165 12166 irq_fail_out: 12167 /* free the irq already requested */ 12168 free_irq(pci_irq_vector(phba->pcidev, 0), phba); 12169 12170 msi_fail_out: 12171 /* Unconfigure MSI-X capability structure */ 12172 pci_free_irq_vectors(phba->pcidev); 12173 12174 vec_fail_out: 12175 return rc; 12176 } 12177 12178 /** 12179 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 12180 * @phba: pointer to lpfc hba data structure. 12181 * 12182 * This routine is invoked to enable the MSI interrupt mode to device with 12183 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 12184 * enable the MSI vector. The device driver is responsible for calling the 12185 * request_irq() to register MSI vector with a interrupt the handler, which 12186 * is done in this function. 12187 * 12188 * Return codes 12189 * 0 - successful 12190 * other values - error 12191 */ 12192 static int 12193 lpfc_sli_enable_msi(struct lpfc_hba *phba) 12194 { 12195 int rc; 12196 12197 rc = pci_enable_msi(phba->pcidev); 12198 if (!rc) 12199 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12200 "0012 PCI enable MSI mode success.\n"); 12201 else { 12202 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12203 "0471 PCI enable MSI mode failed (%d)\n", rc); 12204 return rc; 12205 } 12206 12207 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 12208 0, LPFC_DRIVER_NAME, phba); 12209 if (rc) { 12210 pci_disable_msi(phba->pcidev); 12211 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 12212 "0478 MSI request_irq failed (%d)\n", rc); 12213 } 12214 return rc; 12215 } 12216 12217 /** 12218 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 12219 * @phba: pointer to lpfc hba data structure. 12220 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X). 12221 * 12222 * This routine is invoked to enable device interrupt and associate driver's 12223 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 12224 * spec. Depends on the interrupt mode configured to the driver, the driver 12225 * will try to fallback from the configured interrupt mode to an interrupt 12226 * mode which is supported by the platform, kernel, and device in the order 12227 * of: 12228 * MSI-X -> MSI -> IRQ. 12229 * 12230 * Return codes 12231 * 0 - successful 12232 * other values - error 12233 **/ 12234 static uint32_t 12235 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 12236 { 12237 uint32_t intr_mode = LPFC_INTR_ERROR; 12238 int retval; 12239 12240 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 12241 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 12242 if (retval) 12243 return intr_mode; 12244 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT; 12245 12246 if (cfg_mode == 2) { 12247 /* Now, try to enable MSI-X interrupt mode */ 12248 retval = lpfc_sli_enable_msix(phba); 12249 if (!retval) { 12250 /* Indicate initialization to MSI-X mode */ 12251 phba->intr_type = MSIX; 12252 intr_mode = 2; 12253 } 12254 } 12255 12256 /* Fallback to MSI if MSI-X initialization failed */ 12257 if (cfg_mode >= 1 && phba->intr_type == NONE) { 12258 retval = lpfc_sli_enable_msi(phba); 12259 if (!retval) { 12260 /* Indicate initialization to MSI mode */ 12261 phba->intr_type = MSI; 12262 intr_mode = 1; 12263 } 12264 } 12265 12266 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 12267 if (phba->intr_type == NONE) { 12268 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 12269 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 12270 if (!retval) { 12271 /* Indicate initialization to INTx mode */ 12272 phba->intr_type = INTx; 12273 intr_mode = 0; 12274 } 12275 } 12276 return intr_mode; 12277 } 12278 12279 /** 12280 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 12281 * @phba: pointer to lpfc hba data structure. 12282 * 12283 * This routine is invoked to disable device interrupt and disassociate the 12284 * driver's interrupt handler(s) from interrupt vector(s) to device with 12285 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 12286 * release the interrupt vector(s) for the message signaled interrupt. 12287 **/ 12288 static void 12289 lpfc_sli_disable_intr(struct lpfc_hba *phba) 12290 { 12291 int nr_irqs, i; 12292 12293 if (phba->intr_type == MSIX) 12294 nr_irqs = LPFC_MSIX_VECTORS; 12295 else 12296 nr_irqs = 1; 12297 12298 for (i = 0; i < nr_irqs; i++) 12299 free_irq(pci_irq_vector(phba->pcidev, i), phba); 12300 pci_free_irq_vectors(phba->pcidev); 12301 12302 /* Reset interrupt management states */ 12303 phba->intr_type = NONE; 12304 phba->sli.slistat.sli_intr = 0; 12305 } 12306 12307 /** 12308 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue 12309 * @phba: pointer to lpfc hba data structure. 12310 * @id: EQ vector index or Hardware Queue index 12311 * @match: LPFC_FIND_BY_EQ = match by EQ 12312 * LPFC_FIND_BY_HDWQ = match by Hardware Queue 12313 * Return the CPU that matches the selection criteria 12314 */ 12315 static uint16_t 12316 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match) 12317 { 12318 struct lpfc_vector_map_info *cpup; 12319 int cpu; 12320 12321 /* Loop through all CPUs */ 12322 for_each_present_cpu(cpu) { 12323 cpup = &phba->sli4_hba.cpu_map[cpu]; 12324 12325 /* If we are matching by EQ, there may be multiple CPUs using 12326 * using the same vector, so select the one with 12327 * LPFC_CPU_FIRST_IRQ set. 12328 */ 12329 if ((match == LPFC_FIND_BY_EQ) && 12330 (cpup->flag & LPFC_CPU_FIRST_IRQ) && 12331 (cpup->eq == id)) 12332 return cpu; 12333 12334 /* If matching by HDWQ, select the first CPU that matches */ 12335 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id)) 12336 return cpu; 12337 } 12338 return 0; 12339 } 12340 12341 #ifdef CONFIG_X86 12342 /** 12343 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded 12344 * @phba: pointer to lpfc hba data structure. 12345 * @cpu: CPU map index 12346 * @phys_id: CPU package physical id 12347 * @core_id: CPU core id 12348 */ 12349 static int 12350 lpfc_find_hyper(struct lpfc_hba *phba, int cpu, 12351 uint16_t phys_id, uint16_t core_id) 12352 { 12353 struct lpfc_vector_map_info *cpup; 12354 int idx; 12355 12356 for_each_present_cpu(idx) { 12357 cpup = &phba->sli4_hba.cpu_map[idx]; 12358 /* Does the cpup match the one we are looking for */ 12359 if ((cpup->phys_id == phys_id) && 12360 (cpup->core_id == core_id) && 12361 (cpu != idx)) 12362 return 1; 12363 } 12364 return 0; 12365 } 12366 #endif 12367 12368 /* 12369 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure 12370 * @phba: pointer to lpfc hba data structure. 12371 * @eqidx: index for eq and irq vector 12372 * @flag: flags to set for vector_map structure 12373 * @cpu: cpu used to index vector_map structure 12374 * 12375 * The routine assigns eq info into vector_map structure 12376 */ 12377 static inline void 12378 lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag, 12379 unsigned int cpu) 12380 { 12381 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu]; 12382 struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx); 12383 12384 cpup->eq = eqidx; 12385 cpup->flag |= flag; 12386 12387 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12388 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n", 12389 cpu, eqhdl->irq, cpup->eq, cpup->flag); 12390 } 12391 12392 /** 12393 * lpfc_cpu_map_array_init - Initialize cpu_map structure 12394 * @phba: pointer to lpfc hba data structure. 12395 * 12396 * The routine initializes the cpu_map array structure 12397 */ 12398 static void 12399 lpfc_cpu_map_array_init(struct lpfc_hba *phba) 12400 { 12401 struct lpfc_vector_map_info *cpup; 12402 struct lpfc_eq_intr_info *eqi; 12403 int cpu; 12404 12405 for_each_possible_cpu(cpu) { 12406 cpup = &phba->sli4_hba.cpu_map[cpu]; 12407 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY; 12408 cpup->core_id = LPFC_VECTOR_MAP_EMPTY; 12409 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY; 12410 cpup->eq = LPFC_VECTOR_MAP_EMPTY; 12411 cpup->flag = 0; 12412 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu); 12413 INIT_LIST_HEAD(&eqi->list); 12414 eqi->icnt = 0; 12415 } 12416 } 12417 12418 /** 12419 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure 12420 * @phba: pointer to lpfc hba data structure. 12421 * 12422 * The routine initializes the hba_eq_hdl array structure 12423 */ 12424 static void 12425 lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba) 12426 { 12427 struct lpfc_hba_eq_hdl *eqhdl; 12428 int i; 12429 12430 for (i = 0; i < phba->cfg_irq_chann; i++) { 12431 eqhdl = lpfc_get_eq_hdl(i); 12432 eqhdl->irq = LPFC_IRQ_EMPTY; 12433 eqhdl->phba = phba; 12434 } 12435 } 12436 12437 /** 12438 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings 12439 * @phba: pointer to lpfc hba data structure. 12440 * @vectors: number of msix vectors allocated. 12441 * 12442 * The routine will figure out the CPU affinity assignment for every 12443 * MSI-X vector allocated for the HBA. 12444 * In addition, the CPU to IO channel mapping will be calculated 12445 * and the phba->sli4_hba.cpu_map array will reflect this. 12446 */ 12447 static void 12448 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) 12449 { 12450 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu; 12451 int max_phys_id, min_phys_id; 12452 int max_core_id, min_core_id; 12453 struct lpfc_vector_map_info *cpup; 12454 struct lpfc_vector_map_info *new_cpup; 12455 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 12456 struct lpfc_hdwq_stat *c_stat; 12457 #endif 12458 12459 max_phys_id = 0; 12460 min_phys_id = LPFC_VECTOR_MAP_EMPTY; 12461 max_core_id = 0; 12462 min_core_id = LPFC_VECTOR_MAP_EMPTY; 12463 12464 /* Update CPU map with physical id and core id of each CPU */ 12465 for_each_present_cpu(cpu) { 12466 cpup = &phba->sli4_hba.cpu_map[cpu]; 12467 #ifdef CONFIG_X86 12468 cpup->phys_id = topology_physical_package_id(cpu); 12469 cpup->core_id = topology_core_id(cpu); 12470 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id)) 12471 cpup->flag |= LPFC_CPU_MAP_HYPER; 12472 #else 12473 /* No distinction between CPUs for other platforms */ 12474 cpup->phys_id = 0; 12475 cpup->core_id = cpu; 12476 #endif 12477 12478 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12479 "3328 CPU %d physid %d coreid %d flag x%x\n", 12480 cpu, cpup->phys_id, cpup->core_id, cpup->flag); 12481 12482 if (cpup->phys_id > max_phys_id) 12483 max_phys_id = cpup->phys_id; 12484 if (cpup->phys_id < min_phys_id) 12485 min_phys_id = cpup->phys_id; 12486 12487 if (cpup->core_id > max_core_id) 12488 max_core_id = cpup->core_id; 12489 if (cpup->core_id < min_core_id) 12490 min_core_id = cpup->core_id; 12491 } 12492 12493 /* After looking at each irq vector assigned to this pcidev, its 12494 * possible to see that not ALL CPUs have been accounted for. 12495 * Next we will set any unassigned (unaffinitized) cpu map 12496 * entries to a IRQ on the same phys_id. 12497 */ 12498 first_cpu = cpumask_first(cpu_present_mask); 12499 start_cpu = first_cpu; 12500 12501 for_each_present_cpu(cpu) { 12502 cpup = &phba->sli4_hba.cpu_map[cpu]; 12503 12504 /* Is this CPU entry unassigned */ 12505 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 12506 /* Mark CPU as IRQ not assigned by the kernel */ 12507 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; 12508 12509 /* If so, find a new_cpup that is on the SAME 12510 * phys_id as cpup. start_cpu will start where we 12511 * left off so all unassigned entries don't get assgined 12512 * the IRQ of the first entry. 12513 */ 12514 new_cpu = start_cpu; 12515 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 12516 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 12517 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && 12518 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) && 12519 (new_cpup->phys_id == cpup->phys_id)) 12520 goto found_same; 12521 new_cpu = lpfc_next_present_cpu(new_cpu); 12522 } 12523 /* At this point, we leave the CPU as unassigned */ 12524 continue; 12525 found_same: 12526 /* We found a matching phys_id, so copy the IRQ info */ 12527 cpup->eq = new_cpup->eq; 12528 12529 /* Bump start_cpu to the next slot to minmize the 12530 * chance of having multiple unassigned CPU entries 12531 * selecting the same IRQ. 12532 */ 12533 start_cpu = lpfc_next_present_cpu(new_cpu); 12534 12535 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12536 "3337 Set Affinity: CPU %d " 12537 "eq %d from peer cpu %d same " 12538 "phys_id (%d)\n", 12539 cpu, cpup->eq, new_cpu, 12540 cpup->phys_id); 12541 } 12542 } 12543 12544 /* Set any unassigned cpu map entries to a IRQ on any phys_id */ 12545 start_cpu = first_cpu; 12546 12547 for_each_present_cpu(cpu) { 12548 cpup = &phba->sli4_hba.cpu_map[cpu]; 12549 12550 /* Is this entry unassigned */ 12551 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { 12552 /* Mark it as IRQ not assigned by the kernel */ 12553 cpup->flag |= LPFC_CPU_MAP_UNASSIGN; 12554 12555 /* If so, find a new_cpup thats on ANY phys_id 12556 * as the cpup. start_cpu will start where we 12557 * left off so all unassigned entries don't get 12558 * assigned the IRQ of the first entry. 12559 */ 12560 new_cpu = start_cpu; 12561 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 12562 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 12563 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && 12564 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY)) 12565 goto found_any; 12566 new_cpu = lpfc_next_present_cpu(new_cpu); 12567 } 12568 /* We should never leave an entry unassigned */ 12569 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12570 "3339 Set Affinity: CPU %d " 12571 "eq %d UNASSIGNED\n", 12572 cpup->hdwq, cpup->eq); 12573 continue; 12574 found_any: 12575 /* We found an available entry, copy the IRQ info */ 12576 cpup->eq = new_cpup->eq; 12577 12578 /* Bump start_cpu to the next slot to minmize the 12579 * chance of having multiple unassigned CPU entries 12580 * selecting the same IRQ. 12581 */ 12582 start_cpu = lpfc_next_present_cpu(new_cpu); 12583 12584 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12585 "3338 Set Affinity: CPU %d " 12586 "eq %d from peer cpu %d (%d/%d)\n", 12587 cpu, cpup->eq, new_cpu, 12588 new_cpup->phys_id, new_cpup->core_id); 12589 } 12590 } 12591 12592 /* Assign hdwq indices that are unique across all cpus in the map 12593 * that are also FIRST_CPUs. 12594 */ 12595 idx = 0; 12596 for_each_present_cpu(cpu) { 12597 cpup = &phba->sli4_hba.cpu_map[cpu]; 12598 12599 /* Only FIRST IRQs get a hdwq index assignment. */ 12600 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 12601 continue; 12602 12603 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */ 12604 cpup->hdwq = idx; 12605 idx++; 12606 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12607 "3333 Set Affinity: CPU %d (phys %d core %d): " 12608 "hdwq %d eq %d flg x%x\n", 12609 cpu, cpup->phys_id, cpup->core_id, 12610 cpup->hdwq, cpup->eq, cpup->flag); 12611 } 12612 /* Associate a hdwq with each cpu_map entry 12613 * This will be 1 to 1 - hdwq to cpu, unless there are less 12614 * hardware queues then CPUs. For that case we will just round-robin 12615 * the available hardware queues as they get assigned to CPUs. 12616 * The next_idx is the idx from the FIRST_CPU loop above to account 12617 * for irq_chann < hdwq. The idx is used for round-robin assignments 12618 * and needs to start at 0. 12619 */ 12620 next_idx = idx; 12621 start_cpu = 0; 12622 idx = 0; 12623 for_each_present_cpu(cpu) { 12624 cpup = &phba->sli4_hba.cpu_map[cpu]; 12625 12626 /* FIRST cpus are already mapped. */ 12627 if (cpup->flag & LPFC_CPU_FIRST_IRQ) 12628 continue; 12629 12630 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq 12631 * of the unassigned cpus to the next idx so that all 12632 * hdw queues are fully utilized. 12633 */ 12634 if (next_idx < phba->cfg_hdw_queue) { 12635 cpup->hdwq = next_idx; 12636 next_idx++; 12637 continue; 12638 } 12639 12640 /* Not a First CPU and all hdw_queues are used. Reuse a 12641 * Hardware Queue for another CPU, so be smart about it 12642 * and pick one that has its IRQ/EQ mapped to the same phys_id 12643 * (CPU package) and core_id. 12644 */ 12645 new_cpu = start_cpu; 12646 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 12647 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 12648 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && 12649 new_cpup->phys_id == cpup->phys_id && 12650 new_cpup->core_id == cpup->core_id) { 12651 goto found_hdwq; 12652 } 12653 new_cpu = lpfc_next_present_cpu(new_cpu); 12654 } 12655 12656 /* If we can't match both phys_id and core_id, 12657 * settle for just a phys_id match. 12658 */ 12659 new_cpu = start_cpu; 12660 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 12661 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 12662 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && 12663 new_cpup->phys_id == cpup->phys_id) 12664 goto found_hdwq; 12665 new_cpu = lpfc_next_present_cpu(new_cpu); 12666 } 12667 12668 /* Otherwise just round robin on cfg_hdw_queue */ 12669 cpup->hdwq = idx % phba->cfg_hdw_queue; 12670 idx++; 12671 goto logit; 12672 found_hdwq: 12673 /* We found an available entry, copy the IRQ info */ 12674 start_cpu = lpfc_next_present_cpu(new_cpu); 12675 cpup->hdwq = new_cpup->hdwq; 12676 logit: 12677 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12678 "3335 Set Affinity: CPU %d (phys %d core %d): " 12679 "hdwq %d eq %d flg x%x\n", 12680 cpu, cpup->phys_id, cpup->core_id, 12681 cpup->hdwq, cpup->eq, cpup->flag); 12682 } 12683 12684 /* 12685 * Initialize the cpu_map slots for not-present cpus in case 12686 * a cpu is hot-added. Perform a simple hdwq round robin assignment. 12687 */ 12688 idx = 0; 12689 for_each_possible_cpu(cpu) { 12690 cpup = &phba->sli4_hba.cpu_map[cpu]; 12691 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 12692 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu); 12693 c_stat->hdwq_no = cpup->hdwq; 12694 #endif 12695 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) 12696 continue; 12697 12698 cpup->hdwq = idx++ % phba->cfg_hdw_queue; 12699 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 12700 c_stat->hdwq_no = cpup->hdwq; 12701 #endif 12702 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12703 "3340 Set Affinity: not present " 12704 "CPU %d hdwq %d\n", 12705 cpu, cpup->hdwq); 12706 } 12707 12708 /* The cpu_map array will be used later during initialization 12709 * when EQ / CQ / WQs are allocated and configured. 12710 */ 12711 return; 12712 } 12713 12714 /** 12715 * lpfc_cpuhp_get_eq 12716 * 12717 * @phba: pointer to lpfc hba data structure. 12718 * @cpu: cpu going offline 12719 * @eqlist: eq list to append to 12720 */ 12721 static int 12722 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu, 12723 struct list_head *eqlist) 12724 { 12725 const struct cpumask *maskp; 12726 struct lpfc_queue *eq; 12727 struct cpumask *tmp; 12728 u16 idx; 12729 12730 tmp = kzalloc(cpumask_size(), GFP_KERNEL); 12731 if (!tmp) 12732 return -ENOMEM; 12733 12734 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 12735 maskp = pci_irq_get_affinity(phba->pcidev, idx); 12736 if (!maskp) 12737 continue; 12738 /* 12739 * if irq is not affinitized to the cpu going 12740 * then we don't need to poll the eq attached 12741 * to it. 12742 */ 12743 if (!cpumask_and(tmp, maskp, cpumask_of(cpu))) 12744 continue; 12745 /* get the cpus that are online and are affini- 12746 * tized to this irq vector. If the count is 12747 * more than 1 then cpuhp is not going to shut- 12748 * down this vector. Since this cpu has not 12749 * gone offline yet, we need >1. 12750 */ 12751 cpumask_and(tmp, maskp, cpu_online_mask); 12752 if (cpumask_weight(tmp) > 1) 12753 continue; 12754 12755 /* Now that we have an irq to shutdown, get the eq 12756 * mapped to this irq. Note: multiple hdwq's in 12757 * the software can share an eq, but eventually 12758 * only eq will be mapped to this vector 12759 */ 12760 eq = phba->sli4_hba.hba_eq_hdl[idx].eq; 12761 list_add(&eq->_poll_list, eqlist); 12762 } 12763 kfree(tmp); 12764 return 0; 12765 } 12766 12767 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba) 12768 { 12769 if (phba->sli_rev != LPFC_SLI_REV4) 12770 return; 12771 12772 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state, 12773 &phba->cpuhp); 12774 /* 12775 * unregistering the instance doesn't stop the polling 12776 * timer. Wait for the poll timer to retire. 12777 */ 12778 synchronize_rcu(); 12779 del_timer_sync(&phba->cpuhp_poll_timer); 12780 } 12781 12782 static void lpfc_cpuhp_remove(struct lpfc_hba *phba) 12783 { 12784 if (phba->pport && 12785 test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag)) 12786 return; 12787 12788 __lpfc_cpuhp_remove(phba); 12789 } 12790 12791 static void lpfc_cpuhp_add(struct lpfc_hba *phba) 12792 { 12793 if (phba->sli_rev != LPFC_SLI_REV4) 12794 return; 12795 12796 rcu_read_lock(); 12797 12798 if (!list_empty(&phba->poll_list)) 12799 mod_timer(&phba->cpuhp_poll_timer, 12800 jiffies + msecs_to_jiffies(LPFC_POLL_HB)); 12801 12802 rcu_read_unlock(); 12803 12804 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, 12805 &phba->cpuhp); 12806 } 12807 12808 static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval) 12809 { 12810 if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) { 12811 *retval = -EAGAIN; 12812 return true; 12813 } 12814 12815 if (phba->sli_rev != LPFC_SLI_REV4) { 12816 *retval = 0; 12817 return true; 12818 } 12819 12820 /* proceed with the hotplug */ 12821 return false; 12822 } 12823 12824 /** 12825 * lpfc_irq_set_aff - set IRQ affinity 12826 * @eqhdl: EQ handle 12827 * @cpu: cpu to set affinity 12828 * 12829 **/ 12830 static inline void 12831 lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu) 12832 { 12833 cpumask_clear(&eqhdl->aff_mask); 12834 cpumask_set_cpu(cpu, &eqhdl->aff_mask); 12835 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING); 12836 irq_set_affinity(eqhdl->irq, &eqhdl->aff_mask); 12837 } 12838 12839 /** 12840 * lpfc_irq_clear_aff - clear IRQ affinity 12841 * @eqhdl: EQ handle 12842 * 12843 **/ 12844 static inline void 12845 lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl) 12846 { 12847 cpumask_clear(&eqhdl->aff_mask); 12848 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING); 12849 } 12850 12851 /** 12852 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event 12853 * @phba: pointer to HBA context object. 12854 * @cpu: cpu going offline/online 12855 * @offline: true, cpu is going offline. false, cpu is coming online. 12856 * 12857 * If cpu is going offline, we'll try our best effort to find the next 12858 * online cpu on the phba's original_mask and migrate all offlining IRQ 12859 * affinities. 12860 * 12861 * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu. 12862 * 12863 * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on 12864 * PCI_IRQ_AFFINITY to auto-manage IRQ affinity. 12865 * 12866 **/ 12867 static void 12868 lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline) 12869 { 12870 struct lpfc_vector_map_info *cpup; 12871 struct cpumask *aff_mask; 12872 unsigned int cpu_select, cpu_next, idx; 12873 const struct cpumask *orig_mask; 12874 12875 if (phba->irq_chann_mode == NORMAL_MODE) 12876 return; 12877 12878 orig_mask = &phba->sli4_hba.irq_aff_mask; 12879 12880 if (!cpumask_test_cpu(cpu, orig_mask)) 12881 return; 12882 12883 cpup = &phba->sli4_hba.cpu_map[cpu]; 12884 12885 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) 12886 return; 12887 12888 if (offline) { 12889 /* Find next online CPU on original mask */ 12890 cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true); 12891 cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next); 12892 12893 /* Found a valid CPU */ 12894 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) { 12895 /* Go through each eqhdl and ensure offlining 12896 * cpu aff_mask is migrated 12897 */ 12898 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 12899 aff_mask = lpfc_get_aff_mask(idx); 12900 12901 /* Migrate affinity */ 12902 if (cpumask_test_cpu(cpu, aff_mask)) 12903 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx), 12904 cpu_select); 12905 } 12906 } else { 12907 /* Rely on irqbalance if no online CPUs left on NUMA */ 12908 for (idx = 0; idx < phba->cfg_irq_chann; idx++) 12909 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx)); 12910 } 12911 } else { 12912 /* Migrate affinity back to this CPU */ 12913 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu); 12914 } 12915 } 12916 12917 static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node) 12918 { 12919 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); 12920 struct lpfc_queue *eq, *next; 12921 LIST_HEAD(eqlist); 12922 int retval; 12923 12924 if (!phba) { 12925 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); 12926 return 0; 12927 } 12928 12929 if (__lpfc_cpuhp_checks(phba, &retval)) 12930 return retval; 12931 12932 lpfc_irq_rebalance(phba, cpu, true); 12933 12934 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist); 12935 if (retval) 12936 return retval; 12937 12938 /* start polling on these eq's */ 12939 list_for_each_entry_safe(eq, next, &eqlist, _poll_list) { 12940 list_del_init(&eq->_poll_list); 12941 lpfc_sli4_start_polling(eq); 12942 } 12943 12944 return 0; 12945 } 12946 12947 static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node) 12948 { 12949 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); 12950 struct lpfc_queue *eq, *next; 12951 unsigned int n; 12952 int retval; 12953 12954 if (!phba) { 12955 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); 12956 return 0; 12957 } 12958 12959 if (__lpfc_cpuhp_checks(phba, &retval)) 12960 return retval; 12961 12962 lpfc_irq_rebalance(phba, cpu, false); 12963 12964 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) { 12965 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ); 12966 if (n == cpu) 12967 lpfc_sli4_stop_polling(eq); 12968 } 12969 12970 return 0; 12971 } 12972 12973 /** 12974 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 12975 * @phba: pointer to lpfc hba data structure. 12976 * 12977 * This routine is invoked to enable the MSI-X interrupt vectors to device 12978 * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them 12979 * to cpus on the system. 12980 * 12981 * When cfg_irq_numa is enabled, the adapter will only allocate vectors for 12982 * the number of cpus on the same numa node as this adapter. The vectors are 12983 * allocated without requesting OS affinity mapping. A vector will be 12984 * allocated and assigned to each online and offline cpu. If the cpu is 12985 * online, then affinity will be set to that cpu. If the cpu is offline, then 12986 * affinity will be set to the nearest peer cpu within the numa node that is 12987 * online. If there are no online cpus within the numa node, affinity is not 12988 * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping 12989 * is consistent with the way cpu online/offline is handled when cfg_irq_numa is 12990 * configured. 12991 * 12992 * If numa mode is not enabled and there is more than 1 vector allocated, then 12993 * the driver relies on the managed irq interface where the OS assigns vector to 12994 * cpu affinity. The driver will then use that affinity mapping to setup its 12995 * cpu mapping table. 12996 * 12997 * Return codes 12998 * 0 - successful 12999 * other values - error 13000 **/ 13001 static int 13002 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 13003 { 13004 int vectors, rc, index; 13005 char *name; 13006 const struct cpumask *aff_mask = NULL; 13007 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids; 13008 struct lpfc_vector_map_info *cpup; 13009 struct lpfc_hba_eq_hdl *eqhdl; 13010 const struct cpumask *maskp; 13011 unsigned int flags = PCI_IRQ_MSIX; 13012 13013 /* Set up MSI-X multi-message vectors */ 13014 vectors = phba->cfg_irq_chann; 13015 13016 if (phba->irq_chann_mode != NORMAL_MODE) 13017 aff_mask = &phba->sli4_hba.irq_aff_mask; 13018 13019 if (aff_mask) { 13020 cpu_cnt = cpumask_weight(aff_mask); 13021 vectors = min(phba->cfg_irq_chann, cpu_cnt); 13022 13023 /* cpu: iterates over aff_mask including offline or online 13024 * cpu_select: iterates over online aff_mask to set affinity 13025 */ 13026 cpu = cpumask_first(aff_mask); 13027 cpu_select = lpfc_next_online_cpu(aff_mask, cpu); 13028 } else { 13029 flags |= PCI_IRQ_AFFINITY; 13030 } 13031 13032 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags); 13033 if (rc < 0) { 13034 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13035 "0484 PCI enable MSI-X failed (%d)\n", rc); 13036 goto vec_fail_out; 13037 } 13038 vectors = rc; 13039 13040 /* Assign MSI-X vectors to interrupt handlers */ 13041 for (index = 0; index < vectors; index++) { 13042 eqhdl = lpfc_get_eq_hdl(index); 13043 name = eqhdl->handler_name; 13044 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ); 13045 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ, 13046 LPFC_DRIVER_HANDLER_NAME"%d", index); 13047 13048 eqhdl->idx = index; 13049 rc = pci_irq_vector(phba->pcidev, index); 13050 if (rc < 0) { 13051 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13052 "0489 MSI-X fast-path (%d) " 13053 "pci_irq_vec failed (%d)\n", index, rc); 13054 goto cfg_fail_out; 13055 } 13056 eqhdl->irq = rc; 13057 13058 rc = request_threaded_irq(eqhdl->irq, 13059 &lpfc_sli4_hba_intr_handler, 13060 &lpfc_sli4_hba_intr_handler_th, 13061 0, name, eqhdl); 13062 if (rc) { 13063 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13064 "0486 MSI-X fast-path (%d) " 13065 "request_irq failed (%d)\n", index, rc); 13066 goto cfg_fail_out; 13067 } 13068 13069 if (aff_mask) { 13070 /* If found a neighboring online cpu, set affinity */ 13071 if (cpu_select < nr_cpu_ids) 13072 lpfc_irq_set_aff(eqhdl, cpu_select); 13073 13074 /* Assign EQ to cpu_map */ 13075 lpfc_assign_eq_map_info(phba, index, 13076 LPFC_CPU_FIRST_IRQ, 13077 cpu); 13078 13079 /* Iterate to next offline or online cpu in aff_mask */ 13080 cpu = cpumask_next(cpu, aff_mask); 13081 13082 /* Find next online cpu in aff_mask to set affinity */ 13083 cpu_select = lpfc_next_online_cpu(aff_mask, cpu); 13084 } else if (vectors == 1) { 13085 cpu = cpumask_first(cpu_present_mask); 13086 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ, 13087 cpu); 13088 } else { 13089 maskp = pci_irq_get_affinity(phba->pcidev, index); 13090 13091 /* Loop through all CPUs associated with vector index */ 13092 for_each_cpu_and(cpu, maskp, cpu_present_mask) { 13093 cpup = &phba->sli4_hba.cpu_map[cpu]; 13094 13095 /* If this is the first CPU thats assigned to 13096 * this vector, set LPFC_CPU_FIRST_IRQ. 13097 * 13098 * With certain platforms its possible that irq 13099 * vectors are affinitized to all the cpu's. 13100 * This can result in each cpu_map.eq to be set 13101 * to the last vector, resulting in overwrite 13102 * of all the previous cpu_map.eq. Ensure that 13103 * each vector receives a place in cpu_map. 13104 * Later call to lpfc_cpu_affinity_check will 13105 * ensure we are nicely balanced out. 13106 */ 13107 if (cpup->eq != LPFC_VECTOR_MAP_EMPTY) 13108 continue; 13109 lpfc_assign_eq_map_info(phba, index, 13110 LPFC_CPU_FIRST_IRQ, 13111 cpu); 13112 break; 13113 } 13114 } 13115 } 13116 13117 if (vectors != phba->cfg_irq_chann) { 13118 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13119 "3238 Reducing IO channels to match number of " 13120 "MSI-X vectors, requested %d got %d\n", 13121 phba->cfg_irq_chann, vectors); 13122 if (phba->cfg_irq_chann > vectors) 13123 phba->cfg_irq_chann = vectors; 13124 } 13125 13126 return rc; 13127 13128 cfg_fail_out: 13129 /* free the irq already requested */ 13130 for (--index; index >= 0; index--) { 13131 eqhdl = lpfc_get_eq_hdl(index); 13132 lpfc_irq_clear_aff(eqhdl); 13133 free_irq(eqhdl->irq, eqhdl); 13134 } 13135 13136 /* Unconfigure MSI-X capability structure */ 13137 pci_free_irq_vectors(phba->pcidev); 13138 13139 vec_fail_out: 13140 return rc; 13141 } 13142 13143 /** 13144 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 13145 * @phba: pointer to lpfc hba data structure. 13146 * 13147 * This routine is invoked to enable the MSI interrupt mode to device with 13148 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is 13149 * called to enable the MSI vector. The device driver is responsible for 13150 * calling the request_irq() to register MSI vector with a interrupt the 13151 * handler, which is done in this function. 13152 * 13153 * Return codes 13154 * 0 - successful 13155 * other values - error 13156 **/ 13157 static int 13158 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 13159 { 13160 int rc, index; 13161 unsigned int cpu; 13162 struct lpfc_hba_eq_hdl *eqhdl; 13163 13164 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1, 13165 PCI_IRQ_MSI | PCI_IRQ_AFFINITY); 13166 if (rc > 0) 13167 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13168 "0487 PCI enable MSI mode success.\n"); 13169 else { 13170 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13171 "0488 PCI enable MSI mode failed (%d)\n", rc); 13172 return rc ? rc : -1; 13173 } 13174 13175 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 13176 0, LPFC_DRIVER_NAME, phba); 13177 if (rc) { 13178 pci_free_irq_vectors(phba->pcidev); 13179 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13180 "0490 MSI request_irq failed (%d)\n", rc); 13181 return rc; 13182 } 13183 13184 eqhdl = lpfc_get_eq_hdl(0); 13185 rc = pci_irq_vector(phba->pcidev, 0); 13186 if (rc < 0) { 13187 pci_free_irq_vectors(phba->pcidev); 13188 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13189 "0496 MSI pci_irq_vec failed (%d)\n", rc); 13190 return rc; 13191 } 13192 eqhdl->irq = rc; 13193 13194 cpu = cpumask_first(cpu_present_mask); 13195 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu); 13196 13197 for (index = 0; index < phba->cfg_irq_chann; index++) { 13198 eqhdl = lpfc_get_eq_hdl(index); 13199 eqhdl->idx = index; 13200 } 13201 13202 return 0; 13203 } 13204 13205 /** 13206 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 13207 * @phba: pointer to lpfc hba data structure. 13208 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X). 13209 * 13210 * This routine is invoked to enable device interrupt and associate driver's 13211 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 13212 * interface spec. Depends on the interrupt mode configured to the driver, 13213 * the driver will try to fallback from the configured interrupt mode to an 13214 * interrupt mode which is supported by the platform, kernel, and device in 13215 * the order of: 13216 * MSI-X -> MSI -> IRQ. 13217 * 13218 * Return codes 13219 * Interrupt mode (2, 1, 0) - successful 13220 * LPFC_INTR_ERROR - error 13221 **/ 13222 static uint32_t 13223 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 13224 { 13225 uint32_t intr_mode = LPFC_INTR_ERROR; 13226 int retval, idx; 13227 13228 if (cfg_mode == 2) { 13229 /* Preparation before conf_msi mbox cmd */ 13230 retval = 0; 13231 if (!retval) { 13232 /* Now, try to enable MSI-X interrupt mode */ 13233 retval = lpfc_sli4_enable_msix(phba); 13234 if (!retval) { 13235 /* Indicate initialization to MSI-X mode */ 13236 phba->intr_type = MSIX; 13237 intr_mode = 2; 13238 } 13239 } 13240 } 13241 13242 /* Fallback to MSI if MSI-X initialization failed */ 13243 if (cfg_mode >= 1 && phba->intr_type == NONE) { 13244 retval = lpfc_sli4_enable_msi(phba); 13245 if (!retval) { 13246 /* Indicate initialization to MSI mode */ 13247 phba->intr_type = MSI; 13248 intr_mode = 1; 13249 } 13250 } 13251 13252 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 13253 if (phba->intr_type == NONE) { 13254 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 13255 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 13256 if (!retval) { 13257 struct lpfc_hba_eq_hdl *eqhdl; 13258 unsigned int cpu; 13259 13260 /* Indicate initialization to INTx mode */ 13261 phba->intr_type = INTx; 13262 intr_mode = 0; 13263 13264 eqhdl = lpfc_get_eq_hdl(0); 13265 retval = pci_irq_vector(phba->pcidev, 0); 13266 if (retval < 0) { 13267 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13268 "0502 INTR pci_irq_vec failed (%d)\n", 13269 retval); 13270 return LPFC_INTR_ERROR; 13271 } 13272 eqhdl->irq = retval; 13273 13274 cpu = cpumask_first(cpu_present_mask); 13275 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, 13276 cpu); 13277 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 13278 eqhdl = lpfc_get_eq_hdl(idx); 13279 eqhdl->idx = idx; 13280 } 13281 } 13282 } 13283 return intr_mode; 13284 } 13285 13286 /** 13287 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 13288 * @phba: pointer to lpfc hba data structure. 13289 * 13290 * This routine is invoked to disable device interrupt and disassociate 13291 * the driver's interrupt handler(s) from interrupt vector(s) to device 13292 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 13293 * will release the interrupt vector(s) for the message signaled interrupt. 13294 **/ 13295 static void 13296 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 13297 { 13298 /* Disable the currently initialized interrupt mode */ 13299 if (phba->intr_type == MSIX) { 13300 int index; 13301 struct lpfc_hba_eq_hdl *eqhdl; 13302 13303 /* Free up MSI-X multi-message vectors */ 13304 for (index = 0; index < phba->cfg_irq_chann; index++) { 13305 eqhdl = lpfc_get_eq_hdl(index); 13306 lpfc_irq_clear_aff(eqhdl); 13307 free_irq(eqhdl->irq, eqhdl); 13308 } 13309 } else { 13310 free_irq(phba->pcidev->irq, phba); 13311 } 13312 13313 pci_free_irq_vectors(phba->pcidev); 13314 13315 /* Reset interrupt management states */ 13316 phba->intr_type = NONE; 13317 phba->sli.slistat.sli_intr = 0; 13318 } 13319 13320 /** 13321 * lpfc_unset_hba - Unset SLI3 hba device initialization 13322 * @phba: pointer to lpfc hba data structure. 13323 * 13324 * This routine is invoked to unset the HBA device initialization steps to 13325 * a device with SLI-3 interface spec. 13326 **/ 13327 static void 13328 lpfc_unset_hba(struct lpfc_hba *phba) 13329 { 13330 set_bit(FC_UNLOADING, &phba->pport->load_flag); 13331 13332 kfree(phba->vpi_bmask); 13333 kfree(phba->vpi_ids); 13334 13335 lpfc_stop_hba_timers(phba); 13336 13337 phba->pport->work_port_events = 0; 13338 13339 lpfc_sli_hba_down(phba); 13340 13341 lpfc_sli_brdrestart(phba); 13342 13343 lpfc_sli_disable_intr(phba); 13344 13345 return; 13346 } 13347 13348 /** 13349 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 13350 * @phba: Pointer to HBA context object. 13351 * 13352 * This function is called in the SLI4 code path to wait for completion 13353 * of device's XRIs exchange busy. It will check the XRI exchange busy 13354 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 13355 * that, it will check the XRI exchange busy on outstanding FCP and ELS 13356 * I/Os every 30 seconds, log error message, and wait forever. Only when 13357 * all XRI exchange busy complete, the driver unload shall proceed with 13358 * invoking the function reset ioctl mailbox command to the CNA and the 13359 * the rest of the driver unload resource release. 13360 **/ 13361 static void 13362 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 13363 { 13364 struct lpfc_sli4_hdw_queue *qp; 13365 int idx, ccnt; 13366 int wait_time = 0; 13367 int io_xri_cmpl = 1; 13368 int nvmet_xri_cmpl = 1; 13369 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 13370 13371 /* Driver just aborted IOs during the hba_unset process. Pause 13372 * here to give the HBA time to complete the IO and get entries 13373 * into the abts lists. 13374 */ 13375 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5); 13376 13377 /* Wait for NVME pending IO to flush back to transport. */ 13378 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 13379 lpfc_nvme_wait_for_io_drain(phba); 13380 13381 ccnt = 0; 13382 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 13383 qp = &phba->sli4_hba.hdwq[idx]; 13384 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list); 13385 if (!io_xri_cmpl) /* if list is NOT empty */ 13386 ccnt++; 13387 } 13388 if (ccnt) 13389 io_xri_cmpl = 0; 13390 13391 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13392 nvmet_xri_cmpl = 13393 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 13394 } 13395 13396 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) { 13397 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 13398 if (!nvmet_xri_cmpl) 13399 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13400 "6424 NVMET XRI exchange busy " 13401 "wait time: %d seconds.\n", 13402 wait_time/1000); 13403 if (!io_xri_cmpl) 13404 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13405 "6100 IO XRI exchange busy " 13406 "wait time: %d seconds.\n", 13407 wait_time/1000); 13408 if (!els_xri_cmpl) 13409 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 13410 "2878 ELS XRI exchange busy " 13411 "wait time: %d seconds.\n", 13412 wait_time/1000); 13413 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 13414 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 13415 } else { 13416 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 13417 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 13418 } 13419 13420 ccnt = 0; 13421 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 13422 qp = &phba->sli4_hba.hdwq[idx]; 13423 io_xri_cmpl = list_empty( 13424 &qp->lpfc_abts_io_buf_list); 13425 if (!io_xri_cmpl) /* if list is NOT empty */ 13426 ccnt++; 13427 } 13428 if (ccnt) 13429 io_xri_cmpl = 0; 13430 13431 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13432 nvmet_xri_cmpl = list_empty( 13433 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 13434 } 13435 els_xri_cmpl = 13436 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 13437 13438 } 13439 } 13440 13441 /** 13442 * lpfc_sli4_hba_unset - Unset the fcoe hba 13443 * @phba: Pointer to HBA context object. 13444 * 13445 * This function is called in the SLI4 code path to reset the HBA's FCoE 13446 * function. The caller is not required to hold any lock. This routine 13447 * issues PCI function reset mailbox command to reset the FCoE function. 13448 * At the end of the function, it calls lpfc_hba_down_post function to 13449 * free any pending commands. 13450 **/ 13451 static void 13452 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 13453 { 13454 int wait_cnt = 0; 13455 LPFC_MBOXQ_t *mboxq; 13456 struct pci_dev *pdev = phba->pcidev; 13457 13458 lpfc_stop_hba_timers(phba); 13459 hrtimer_cancel(&phba->cmf_stats_timer); 13460 hrtimer_cancel(&phba->cmf_timer); 13461 13462 if (phba->pport) 13463 phba->sli4_hba.intr_enable = 0; 13464 13465 /* 13466 * Gracefully wait out the potential current outstanding asynchronous 13467 * mailbox command. 13468 */ 13469 13470 /* First, block any pending async mailbox command from posted */ 13471 spin_lock_irq(&phba->hbalock); 13472 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 13473 spin_unlock_irq(&phba->hbalock); 13474 /* Now, trying to wait it out if we can */ 13475 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 13476 msleep(10); 13477 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 13478 break; 13479 } 13480 /* Forcefully release the outstanding mailbox command if timed out */ 13481 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 13482 spin_lock_irq(&phba->hbalock); 13483 mboxq = phba->sli.mbox_active; 13484 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 13485 __lpfc_mbox_cmpl_put(phba, mboxq); 13486 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 13487 phba->sli.mbox_active = NULL; 13488 spin_unlock_irq(&phba->hbalock); 13489 } 13490 13491 /* Abort all iocbs associated with the hba */ 13492 lpfc_sli_hba_iocb_abort(phba); 13493 13494 if (!pci_channel_offline(phba->pcidev)) 13495 /* Wait for completion of device XRI exchange busy */ 13496 lpfc_sli4_xri_exchange_busy_wait(phba); 13497 13498 /* per-phba callback de-registration for hotplug event */ 13499 if (phba->pport) 13500 lpfc_cpuhp_remove(phba); 13501 13502 /* Disable PCI subsystem interrupt */ 13503 lpfc_sli4_disable_intr(phba); 13504 13505 /* Disable SR-IOV if enabled */ 13506 if (phba->cfg_sriov_nr_virtfn) 13507 pci_disable_sriov(pdev); 13508 13509 /* Stop kthread signal shall trigger work_done one more time */ 13510 kthread_stop(phba->worker_thread); 13511 13512 /* Disable FW logging to host memory */ 13513 lpfc_ras_stop_fwlog(phba); 13514 13515 /* Reset SLI4 HBA FCoE function */ 13516 lpfc_pci_function_reset(phba); 13517 13518 /* release all queue allocated resources. */ 13519 lpfc_sli4_queue_destroy(phba); 13520 13521 /* Free RAS DMA memory */ 13522 if (phba->ras_fwlog.ras_enabled) 13523 lpfc_sli4_ras_dma_free(phba); 13524 13525 /* Stop the SLI4 device port */ 13526 if (phba->pport) 13527 phba->pport->work_port_events = 0; 13528 } 13529 13530 static uint32_t 13531 lpfc_cgn_crc32(uint32_t crc, u8 byte) 13532 { 13533 uint32_t msb = 0; 13534 uint32_t bit; 13535 13536 for (bit = 0; bit < 8; bit++) { 13537 msb = (crc >> 31) & 1; 13538 crc <<= 1; 13539 13540 if (msb ^ (byte & 1)) { 13541 crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER; 13542 crc |= 1; 13543 } 13544 byte >>= 1; 13545 } 13546 return crc; 13547 } 13548 13549 static uint32_t 13550 lpfc_cgn_reverse_bits(uint32_t wd) 13551 { 13552 uint32_t result = 0; 13553 uint32_t i; 13554 13555 for (i = 0; i < 32; i++) { 13556 result <<= 1; 13557 result |= (1 & (wd >> i)); 13558 } 13559 return result; 13560 } 13561 13562 /* 13563 * The routine corresponds with the algorithm the HBA firmware 13564 * uses to validate the data integrity. 13565 */ 13566 uint32_t 13567 lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc) 13568 { 13569 uint32_t i; 13570 uint32_t result; 13571 uint8_t *data = (uint8_t *)ptr; 13572 13573 for (i = 0; i < byteLen; ++i) 13574 crc = lpfc_cgn_crc32(crc, data[i]); 13575 13576 result = ~lpfc_cgn_reverse_bits(crc); 13577 return result; 13578 } 13579 13580 void 13581 lpfc_init_congestion_buf(struct lpfc_hba *phba) 13582 { 13583 struct lpfc_cgn_info *cp; 13584 uint16_t size; 13585 uint32_t crc; 13586 13587 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 13588 "6235 INIT Congestion Buffer %p\n", phba->cgn_i); 13589 13590 if (!phba->cgn_i) 13591 return; 13592 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 13593 13594 atomic_set(&phba->cgn_fabric_warn_cnt, 0); 13595 atomic_set(&phba->cgn_fabric_alarm_cnt, 0); 13596 atomic_set(&phba->cgn_sync_alarm_cnt, 0); 13597 atomic_set(&phba->cgn_sync_warn_cnt, 0); 13598 13599 atomic_set(&phba->cgn_driver_evt_cnt, 0); 13600 atomic_set(&phba->cgn_latency_evt_cnt, 0); 13601 atomic64_set(&phba->cgn_latency_evt, 0); 13602 phba->cgn_evt_minute = 0; 13603 13604 memset(cp, 0xff, offsetof(struct lpfc_cgn_info, cgn_stat)); 13605 cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ); 13606 cp->cgn_info_version = LPFC_CGN_INFO_V4; 13607 13608 /* cgn parameters */ 13609 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; 13610 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0; 13611 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; 13612 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; 13613 13614 lpfc_cgn_update_tstamp(phba, &cp->base_time); 13615 13616 /* Fill in default LUN qdepth */ 13617 if (phba->pport) { 13618 size = (uint16_t)(phba->pport->cfg_lun_queue_depth); 13619 cp->cgn_lunq = cpu_to_le16(size); 13620 } 13621 13622 /* last used Index initialized to 0xff already */ 13623 13624 cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ); 13625 cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ); 13626 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); 13627 cp->cgn_info_crc = cpu_to_le32(crc); 13628 13629 phba->cgn_evt_timestamp = jiffies + 13630 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN); 13631 } 13632 13633 void 13634 lpfc_init_congestion_stat(struct lpfc_hba *phba) 13635 { 13636 struct lpfc_cgn_info *cp; 13637 uint32_t crc; 13638 13639 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, 13640 "6236 INIT Congestion Stat %p\n", phba->cgn_i); 13641 13642 if (!phba->cgn_i) 13643 return; 13644 13645 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 13646 memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat)); 13647 13648 lpfc_cgn_update_tstamp(phba, &cp->stat_start); 13649 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); 13650 cp->cgn_info_crc = cpu_to_le32(crc); 13651 } 13652 13653 /** 13654 * __lpfc_reg_congestion_buf - register congestion info buffer with HBA 13655 * @phba: Pointer to hba context object. 13656 * @reg: flag to determine register or unregister. 13657 */ 13658 static int 13659 __lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg) 13660 { 13661 struct lpfc_mbx_reg_congestion_buf *reg_congestion_buf; 13662 union lpfc_sli4_cfg_shdr *shdr; 13663 uint32_t shdr_status, shdr_add_status; 13664 LPFC_MBOXQ_t *mboxq; 13665 int length, rc; 13666 13667 if (!phba->cgn_i) 13668 return -ENXIO; 13669 13670 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13671 if (!mboxq) { 13672 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 13673 "2641 REG_CONGESTION_BUF mbox allocation fail: " 13674 "HBA state x%x reg %d\n", 13675 phba->pport->port_state, reg); 13676 return -ENOMEM; 13677 } 13678 13679 length = (sizeof(struct lpfc_mbx_reg_congestion_buf) - 13680 sizeof(struct lpfc_sli4_cfg_mhdr)); 13681 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 13682 LPFC_MBOX_OPCODE_REG_CONGESTION_BUF, length, 13683 LPFC_SLI4_MBX_EMBED); 13684 reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf; 13685 bf_set(lpfc_mbx_reg_cgn_buf_type, reg_congestion_buf, 1); 13686 if (reg > 0) 13687 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 1); 13688 else 13689 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 0); 13690 reg_congestion_buf->length = sizeof(struct lpfc_cgn_info); 13691 reg_congestion_buf->addr_lo = 13692 putPaddrLow(phba->cgn_i->phys); 13693 reg_congestion_buf->addr_hi = 13694 putPaddrHigh(phba->cgn_i->phys); 13695 13696 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 13697 shdr = (union lpfc_sli4_cfg_shdr *) 13698 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 13699 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13700 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 13701 &shdr->response); 13702 mempool_free(mboxq, phba->mbox_mem_pool); 13703 if (shdr_status || shdr_add_status || rc) { 13704 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13705 "2642 REG_CONGESTION_BUF mailbox " 13706 "failed with status x%x add_status x%x," 13707 " mbx status x%x reg %d\n", 13708 shdr_status, shdr_add_status, rc, reg); 13709 return -ENXIO; 13710 } 13711 return 0; 13712 } 13713 13714 int 13715 lpfc_unreg_congestion_buf(struct lpfc_hba *phba) 13716 { 13717 lpfc_cmf_stop(phba); 13718 return __lpfc_reg_congestion_buf(phba, 0); 13719 } 13720 13721 int 13722 lpfc_reg_congestion_buf(struct lpfc_hba *phba) 13723 { 13724 return __lpfc_reg_congestion_buf(phba, 1); 13725 } 13726 13727 /** 13728 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 13729 * @phba: Pointer to HBA context object. 13730 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 13731 * 13732 * This function is called in the SLI4 code path to read the port's 13733 * sli4 capabilities. 13734 * 13735 * This function may be be called from any context that can block-wait 13736 * for the completion. The expectation is that this routine is called 13737 * typically from probe_one or from the online routine. 13738 **/ 13739 int 13740 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 13741 { 13742 int rc; 13743 struct lpfc_mqe *mqe = &mboxq->u.mqe; 13744 struct lpfc_pc_sli4_params *sli4_params; 13745 uint32_t mbox_tmo; 13746 int length; 13747 bool exp_wqcq_pages = true; 13748 struct lpfc_sli4_parameters *mbx_sli4_parameters; 13749 13750 /* 13751 * By default, the driver assumes the SLI4 port requires RPI 13752 * header postings. The SLI4_PARAM response will correct this 13753 * assumption. 13754 */ 13755 phba->sli4_hba.rpi_hdrs_in_use = 1; 13756 13757 /* Read the port's SLI4 Config Parameters */ 13758 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 13759 sizeof(struct lpfc_sli4_cfg_mhdr)); 13760 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 13761 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 13762 length, LPFC_SLI4_MBX_EMBED); 13763 if (!phba->sli4_hba.intr_enable) 13764 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 13765 else { 13766 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 13767 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 13768 } 13769 if (unlikely(rc)) 13770 return rc; 13771 sli4_params = &phba->sli4_hba.pc_sli4_params; 13772 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 13773 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 13774 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 13775 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 13776 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 13777 mbx_sli4_parameters); 13778 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 13779 mbx_sli4_parameters); 13780 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 13781 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 13782 else 13783 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 13784 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 13785 sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope, 13786 mbx_sli4_parameters); 13787 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); 13788 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 13789 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 13790 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 13791 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 13792 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters); 13793 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters); 13794 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); 13795 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters); 13796 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters); 13797 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 13798 mbx_sli4_parameters); 13799 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); 13800 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 13801 mbx_sli4_parameters); 13802 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); 13803 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); 13804 sli4_params->mi_cap = bf_get(cfg_mi_ver, mbx_sli4_parameters); 13805 13806 /* Check for Extended Pre-Registered SGL support */ 13807 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters); 13808 13809 /* Check for firmware nvme support */ 13810 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) && 13811 bf_get(cfg_xib, mbx_sli4_parameters)); 13812 13813 if (rc) { 13814 /* Save this to indicate the Firmware supports NVME */ 13815 sli4_params->nvme = 1; 13816 13817 /* Firmware NVME support, check driver FC4 NVME support */ 13818 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) { 13819 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 13820 "6133 Disabling NVME support: " 13821 "FC4 type not supported: x%x\n", 13822 phba->cfg_enable_fc4_type); 13823 goto fcponly; 13824 } 13825 } else { 13826 /* No firmware NVME support, check driver FC4 NVME support */ 13827 sli4_params->nvme = 0; 13828 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13829 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME, 13830 "6101 Disabling NVME support: Not " 13831 "supported by firmware (%d %d) x%x\n", 13832 bf_get(cfg_nvme, mbx_sli4_parameters), 13833 bf_get(cfg_xib, mbx_sli4_parameters), 13834 phba->cfg_enable_fc4_type); 13835 fcponly: 13836 phba->nvmet_support = 0; 13837 phba->cfg_nvmet_mrq = 0; 13838 phba->cfg_nvme_seg_cnt = 0; 13839 13840 /* If no FC4 type support, move to just SCSI support */ 13841 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 13842 return -ENODEV; 13843 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; 13844 } 13845 } 13846 13847 /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to 13848 * accommodate 512K and 1M IOs in a single nvme buf. 13849 */ 13850 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 13851 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 13852 13853 /* Enable embedded Payload BDE if support is indicated */ 13854 if (bf_get(cfg_pbde, mbx_sli4_parameters)) 13855 phba->cfg_enable_pbde = 1; 13856 else 13857 phba->cfg_enable_pbde = 0; 13858 13859 /* 13860 * To support Suppress Response feature we must satisfy 3 conditions. 13861 * lpfc_suppress_rsp module parameter must be set (default). 13862 * In SLI4-Parameters Descriptor: 13863 * Extended Inline Buffers (XIB) must be supported. 13864 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported 13865 * (double negative). 13866 */ 13867 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) && 13868 !(bf_get(cfg_nosr, mbx_sli4_parameters))) 13869 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP; 13870 else 13871 phba->cfg_suppress_rsp = 0; 13872 13873 if (bf_get(cfg_eqdr, mbx_sli4_parameters)) 13874 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR; 13875 13876 /* Make sure that sge_supp_len can be handled by the driver */ 13877 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 13878 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 13879 13880 rc = dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len); 13881 if (unlikely(rc)) { 13882 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13883 "6400 Can't set dma maximum segment size\n"); 13884 return rc; 13885 } 13886 13887 /* 13888 * Check whether the adapter supports an embedded copy of the 13889 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order 13890 * to use this option, 128-byte WQEs must be used. 13891 */ 13892 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters)) 13893 phba->fcp_embed_io = 1; 13894 else 13895 phba->fcp_embed_io = 0; 13896 13897 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, 13898 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n", 13899 bf_get(cfg_xib, mbx_sli4_parameters), 13900 phba->cfg_enable_pbde, 13901 phba->fcp_embed_io, sli4_params->nvme, 13902 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp); 13903 13904 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 13905 LPFC_SLI_INTF_IF_TYPE_2) && 13906 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 13907 LPFC_SLI_INTF_FAMILY_LNCR_A0)) 13908 exp_wqcq_pages = false; 13909 13910 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) && 13911 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) && 13912 exp_wqcq_pages && 13913 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT)) 13914 phba->enab_exp_wqcq_pages = 1; 13915 else 13916 phba->enab_exp_wqcq_pages = 0; 13917 /* 13918 * Check if the SLI port supports MDS Diagnostics 13919 */ 13920 if (bf_get(cfg_mds_diags, mbx_sli4_parameters)) 13921 phba->mds_diags_support = 1; 13922 else 13923 phba->mds_diags_support = 0; 13924 13925 /* 13926 * Check if the SLI port supports NSLER 13927 */ 13928 if (bf_get(cfg_nsler, mbx_sli4_parameters)) 13929 phba->nsler = 1; 13930 else 13931 phba->nsler = 0; 13932 13933 return 0; 13934 } 13935 13936 /** 13937 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 13938 * @pdev: pointer to PCI device 13939 * @pid: pointer to PCI device identifier 13940 * 13941 * This routine is to be called to attach a device with SLI-3 interface spec 13942 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 13943 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 13944 * information of the device and driver to see if the driver state that it can 13945 * support this kind of device. If the match is successful, the driver core 13946 * invokes this routine. If this routine determines it can claim the HBA, it 13947 * does all the initialization that it needs to do to handle the HBA properly. 13948 * 13949 * Return code 13950 * 0 - driver can claim the device 13951 * negative value - driver can not claim the device 13952 **/ 13953 static int 13954 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 13955 { 13956 struct lpfc_hba *phba; 13957 struct lpfc_vport *vport = NULL; 13958 struct Scsi_Host *shost = NULL; 13959 int error; 13960 uint32_t cfg_mode, intr_mode; 13961 13962 /* Allocate memory for HBA structure */ 13963 phba = lpfc_hba_alloc(pdev); 13964 if (!phba) 13965 return -ENOMEM; 13966 13967 /* Perform generic PCI device enabling operation */ 13968 error = lpfc_enable_pci_dev(phba); 13969 if (error) 13970 goto out_free_phba; 13971 13972 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 13973 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 13974 if (error) 13975 goto out_disable_pci_dev; 13976 13977 /* Set up SLI-3 specific device PCI memory space */ 13978 error = lpfc_sli_pci_mem_setup(phba); 13979 if (error) { 13980 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13981 "1402 Failed to set up pci memory space.\n"); 13982 goto out_disable_pci_dev; 13983 } 13984 13985 /* Set up SLI-3 specific device driver resources */ 13986 error = lpfc_sli_driver_resource_setup(phba); 13987 if (error) { 13988 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13989 "1404 Failed to set up driver resource.\n"); 13990 goto out_unset_pci_mem_s3; 13991 } 13992 13993 /* Initialize and populate the iocb list per host */ 13994 13995 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 13996 if (error) { 13997 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13998 "1405 Failed to initialize iocb list.\n"); 13999 goto out_unset_driver_resource_s3; 14000 } 14001 14002 /* Set up common device driver resources */ 14003 error = lpfc_setup_driver_resource_phase2(phba); 14004 if (error) { 14005 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14006 "1406 Failed to set up driver resource.\n"); 14007 goto out_free_iocb_list; 14008 } 14009 14010 /* Get the default values for Model Name and Description */ 14011 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 14012 14013 /* Create SCSI host to the physical port */ 14014 error = lpfc_create_shost(phba); 14015 if (error) { 14016 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14017 "1407 Failed to create scsi host.\n"); 14018 goto out_unset_driver_resource; 14019 } 14020 14021 /* Configure sysfs attributes */ 14022 vport = phba->pport; 14023 error = lpfc_alloc_sysfs_attr(vport); 14024 if (error) { 14025 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14026 "1476 Failed to allocate sysfs attr\n"); 14027 goto out_destroy_shost; 14028 } 14029 14030 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 14031 /* Now, trying to enable interrupt and bring up the device */ 14032 cfg_mode = phba->cfg_use_msi; 14033 while (true) { 14034 /* Put device to a known state before enabling interrupt */ 14035 lpfc_stop_port(phba); 14036 /* Configure and enable interrupt */ 14037 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 14038 if (intr_mode == LPFC_INTR_ERROR) { 14039 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14040 "0431 Failed to enable interrupt.\n"); 14041 error = -ENODEV; 14042 goto out_free_sysfs_attr; 14043 } 14044 /* SLI-3 HBA setup */ 14045 if (lpfc_sli_hba_setup(phba)) { 14046 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14047 "1477 Failed to set up hba\n"); 14048 error = -ENODEV; 14049 goto out_remove_device; 14050 } 14051 14052 /* Wait 50ms for the interrupts of previous mailbox commands */ 14053 msleep(50); 14054 /* Check active interrupts on message signaled interrupts */ 14055 if (intr_mode == 0 || 14056 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 14057 /* Log the current active interrupt mode */ 14058 phba->intr_mode = intr_mode; 14059 lpfc_log_intr_mode(phba, intr_mode); 14060 break; 14061 } else { 14062 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 14063 "0447 Configure interrupt mode (%d) " 14064 "failed active interrupt test.\n", 14065 intr_mode); 14066 /* Disable the current interrupt mode */ 14067 lpfc_sli_disable_intr(phba); 14068 /* Try next level of interrupt mode */ 14069 cfg_mode = --intr_mode; 14070 } 14071 } 14072 14073 /* Perform post initialization setup */ 14074 lpfc_post_init_setup(phba); 14075 14076 /* Check if there are static vports to be created. */ 14077 lpfc_create_static_vport(phba); 14078 14079 return 0; 14080 14081 out_remove_device: 14082 lpfc_unset_hba(phba); 14083 out_free_sysfs_attr: 14084 lpfc_free_sysfs_attr(vport); 14085 out_destroy_shost: 14086 lpfc_destroy_shost(phba); 14087 out_unset_driver_resource: 14088 lpfc_unset_driver_resource_phase2(phba); 14089 out_free_iocb_list: 14090 lpfc_free_iocb_list(phba); 14091 out_unset_driver_resource_s3: 14092 lpfc_sli_driver_resource_unset(phba); 14093 out_unset_pci_mem_s3: 14094 lpfc_sli_pci_mem_unset(phba); 14095 out_disable_pci_dev: 14096 lpfc_disable_pci_dev(phba); 14097 if (shost) 14098 scsi_host_put(shost); 14099 out_free_phba: 14100 lpfc_hba_free(phba); 14101 return error; 14102 } 14103 14104 /** 14105 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 14106 * @pdev: pointer to PCI device 14107 * 14108 * This routine is to be called to disattach a device with SLI-3 interface 14109 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 14110 * removed from PCI bus, it performs all the necessary cleanup for the HBA 14111 * device to be removed from the PCI subsystem properly. 14112 **/ 14113 static void 14114 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 14115 { 14116 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14117 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 14118 struct lpfc_vport **vports; 14119 struct lpfc_hba *phba = vport->phba; 14120 int i; 14121 14122 set_bit(FC_UNLOADING, &vport->load_flag); 14123 14124 lpfc_free_sysfs_attr(vport); 14125 14126 /* Release all the vports against this physical port */ 14127 vports = lpfc_create_vport_work_array(phba); 14128 if (vports != NULL) 14129 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 14130 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 14131 continue; 14132 fc_vport_terminate(vports[i]->fc_vport); 14133 } 14134 lpfc_destroy_vport_work_array(phba, vports); 14135 14136 /* Remove FC host with the physical port */ 14137 fc_remove_host(shost); 14138 scsi_remove_host(shost); 14139 14140 /* Clean up all nodes, mailboxes and IOs. */ 14141 lpfc_cleanup(vport); 14142 14143 /* 14144 * Bring down the SLI Layer. This step disable all interrupts, 14145 * clears the rings, discards all mailbox commands, and resets 14146 * the HBA. 14147 */ 14148 14149 /* HBA interrupt will be disabled after this call */ 14150 lpfc_sli_hba_down(phba); 14151 /* Stop kthread signal shall trigger work_done one more time */ 14152 kthread_stop(phba->worker_thread); 14153 /* Final cleanup of txcmplq and reset the HBA */ 14154 lpfc_sli_brdrestart(phba); 14155 14156 kfree(phba->vpi_bmask); 14157 kfree(phba->vpi_ids); 14158 14159 lpfc_stop_hba_timers(phba); 14160 spin_lock_irq(&phba->port_list_lock); 14161 list_del_init(&vport->listentry); 14162 spin_unlock_irq(&phba->port_list_lock); 14163 14164 lpfc_debugfs_terminate(vport); 14165 14166 /* Disable SR-IOV if enabled */ 14167 if (phba->cfg_sriov_nr_virtfn) 14168 pci_disable_sriov(pdev); 14169 14170 /* Disable interrupt */ 14171 lpfc_sli_disable_intr(phba); 14172 14173 scsi_host_put(shost); 14174 14175 /* 14176 * Call scsi_free before mem_free since scsi bufs are released to their 14177 * corresponding pools here. 14178 */ 14179 lpfc_scsi_free(phba); 14180 lpfc_free_iocb_list(phba); 14181 14182 lpfc_mem_free_all(phba); 14183 14184 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 14185 phba->hbqslimp.virt, phba->hbqslimp.phys); 14186 14187 /* Free resources associated with SLI2 interface */ 14188 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 14189 phba->slim2p.virt, phba->slim2p.phys); 14190 14191 /* unmap adapter SLIM and Control Registers */ 14192 iounmap(phba->ctrl_regs_memmap_p); 14193 iounmap(phba->slim_memmap_p); 14194 14195 lpfc_hba_free(phba); 14196 14197 pci_release_mem_regions(pdev); 14198 pci_disable_device(pdev); 14199 } 14200 14201 /** 14202 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 14203 * @dev_d: pointer to device 14204 * 14205 * This routine is to be called from the kernel's PCI subsystem to support 14206 * system Power Management (PM) to device with SLI-3 interface spec. When 14207 * PM invokes this method, it quiesces the device by stopping the driver's 14208 * worker thread for the device, turning off device's interrupt and DMA, 14209 * and bring the device offline. Note that as the driver implements the 14210 * minimum PM requirements to a power-aware driver's PM support for the 14211 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 14212 * to the suspend() method call will be treated as SUSPEND and the driver will 14213 * fully reinitialize its device during resume() method call, the driver will 14214 * set device to PCI_D3hot state in PCI config space instead of setting it 14215 * according to the @msg provided by the PM. 14216 * 14217 * Return code 14218 * 0 - driver suspended the device 14219 * Error otherwise 14220 **/ 14221 static int __maybe_unused 14222 lpfc_pci_suspend_one_s3(struct device *dev_d) 14223 { 14224 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 14225 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14226 14227 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 14228 "0473 PCI device Power Management suspend.\n"); 14229 14230 /* Bring down the device */ 14231 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 14232 lpfc_offline(phba); 14233 kthread_stop(phba->worker_thread); 14234 14235 /* Disable interrupt from device */ 14236 lpfc_sli_disable_intr(phba); 14237 14238 return 0; 14239 } 14240 14241 /** 14242 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 14243 * @dev_d: pointer to device 14244 * 14245 * This routine is to be called from the kernel's PCI subsystem to support 14246 * system Power Management (PM) to device with SLI-3 interface spec. When PM 14247 * invokes this method, it restores the device's PCI config space state and 14248 * fully reinitializes the device and brings it online. Note that as the 14249 * driver implements the minimum PM requirements to a power-aware driver's 14250 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 14251 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 14252 * driver will fully reinitialize its device during resume() method call, 14253 * the device will be set to PCI_D0 directly in PCI config space before 14254 * restoring the state. 14255 * 14256 * Return code 14257 * 0 - driver suspended the device 14258 * Error otherwise 14259 **/ 14260 static int __maybe_unused 14261 lpfc_pci_resume_one_s3(struct device *dev_d) 14262 { 14263 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 14264 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14265 uint32_t intr_mode; 14266 int error; 14267 14268 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 14269 "0452 PCI device Power Management resume.\n"); 14270 14271 /* Startup the kernel thread for this host adapter. */ 14272 phba->worker_thread = kthread_run(lpfc_do_work, phba, 14273 "lpfc_worker_%d", phba->brd_no); 14274 if (IS_ERR(phba->worker_thread)) { 14275 error = PTR_ERR(phba->worker_thread); 14276 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14277 "0434 PM resume failed to start worker " 14278 "thread: error=x%x.\n", error); 14279 return error; 14280 } 14281 14282 /* Init cpu_map array */ 14283 lpfc_cpu_map_array_init(phba); 14284 /* Init hba_eq_hdl array */ 14285 lpfc_hba_eq_hdl_array_init(phba); 14286 /* Configure and enable interrupt */ 14287 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 14288 if (intr_mode == LPFC_INTR_ERROR) { 14289 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14290 "0430 PM resume Failed to enable interrupt\n"); 14291 return -EIO; 14292 } else 14293 phba->intr_mode = intr_mode; 14294 14295 /* Restart HBA and bring it online */ 14296 lpfc_sli_brdrestart(phba); 14297 lpfc_online(phba); 14298 14299 /* Log the current active interrupt mode */ 14300 lpfc_log_intr_mode(phba, phba->intr_mode); 14301 14302 return 0; 14303 } 14304 14305 /** 14306 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 14307 * @phba: pointer to lpfc hba data structure. 14308 * 14309 * This routine is called to prepare the SLI3 device for PCI slot recover. It 14310 * aborts all the outstanding SCSI I/Os to the pci device. 14311 **/ 14312 static void 14313 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 14314 { 14315 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14316 "2723 PCI channel I/O abort preparing for recovery\n"); 14317 14318 /* 14319 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 14320 * and let the SCSI mid-layer to retry them to recover. 14321 */ 14322 lpfc_sli_abort_fcp_rings(phba); 14323 } 14324 14325 /** 14326 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 14327 * @phba: pointer to lpfc hba data structure. 14328 * 14329 * This routine is called to prepare the SLI3 device for PCI slot reset. It 14330 * disables the device interrupt and pci device, and aborts the internal FCP 14331 * pending I/Os. 14332 **/ 14333 static void 14334 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 14335 { 14336 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14337 "2710 PCI channel disable preparing for reset\n"); 14338 14339 /* Block any management I/Os to the device */ 14340 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 14341 14342 /* Block all SCSI devices' I/Os on the host */ 14343 lpfc_scsi_dev_block(phba); 14344 14345 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 14346 lpfc_sli_flush_io_rings(phba); 14347 14348 /* stop all timers */ 14349 lpfc_stop_hba_timers(phba); 14350 14351 /* Disable interrupt and pci device */ 14352 lpfc_sli_disable_intr(phba); 14353 pci_disable_device(phba->pcidev); 14354 } 14355 14356 /** 14357 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 14358 * @phba: pointer to lpfc hba data structure. 14359 * 14360 * This routine is called to prepare the SLI3 device for PCI slot permanently 14361 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 14362 * pending I/Os. 14363 **/ 14364 static void 14365 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 14366 { 14367 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14368 "2711 PCI channel permanent disable for failure\n"); 14369 /* Block all SCSI devices' I/Os on the host */ 14370 lpfc_scsi_dev_block(phba); 14371 lpfc_sli4_prep_dev_for_reset(phba); 14372 14373 /* stop all timers */ 14374 lpfc_stop_hba_timers(phba); 14375 14376 /* Clean up all driver's outstanding SCSI I/Os */ 14377 lpfc_sli_flush_io_rings(phba); 14378 } 14379 14380 /** 14381 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 14382 * @pdev: pointer to PCI device. 14383 * @state: the current PCI connection state. 14384 * 14385 * This routine is called from the PCI subsystem for I/O error handling to 14386 * device with SLI-3 interface spec. This function is called by the PCI 14387 * subsystem after a PCI bus error affecting this device has been detected. 14388 * When this function is invoked, it will need to stop all the I/Os and 14389 * interrupt(s) to the device. Once that is done, it will return 14390 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 14391 * as desired. 14392 * 14393 * Return codes 14394 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 14395 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 14396 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 14397 **/ 14398 static pci_ers_result_t 14399 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 14400 { 14401 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14402 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14403 14404 switch (state) { 14405 case pci_channel_io_normal: 14406 /* Non-fatal error, prepare for recovery */ 14407 lpfc_sli_prep_dev_for_recover(phba); 14408 return PCI_ERS_RESULT_CAN_RECOVER; 14409 case pci_channel_io_frozen: 14410 /* Fatal error, prepare for slot reset */ 14411 lpfc_sli_prep_dev_for_reset(phba); 14412 return PCI_ERS_RESULT_NEED_RESET; 14413 case pci_channel_io_perm_failure: 14414 /* Permanent failure, prepare for device down */ 14415 lpfc_sli_prep_dev_for_perm_failure(phba); 14416 return PCI_ERS_RESULT_DISCONNECT; 14417 default: 14418 /* Unknown state, prepare and request slot reset */ 14419 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14420 "0472 Unknown PCI error state: x%x\n", state); 14421 lpfc_sli_prep_dev_for_reset(phba); 14422 return PCI_ERS_RESULT_NEED_RESET; 14423 } 14424 } 14425 14426 /** 14427 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 14428 * @pdev: pointer to PCI device. 14429 * 14430 * This routine is called from the PCI subsystem for error handling to 14431 * device with SLI-3 interface spec. This is called after PCI bus has been 14432 * reset to restart the PCI card from scratch, as if from a cold-boot. 14433 * During the PCI subsystem error recovery, after driver returns 14434 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 14435 * recovery and then call this routine before calling the .resume method 14436 * to recover the device. This function will initialize the HBA device, 14437 * enable the interrupt, but it will just put the HBA to offline state 14438 * without passing any I/O traffic. 14439 * 14440 * Return codes 14441 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 14442 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 14443 */ 14444 static pci_ers_result_t 14445 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 14446 { 14447 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14448 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14449 struct lpfc_sli *psli = &phba->sli; 14450 uint32_t intr_mode; 14451 14452 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 14453 if (pci_enable_device_mem(pdev)) { 14454 printk(KERN_ERR "lpfc: Cannot re-enable " 14455 "PCI device after reset.\n"); 14456 return PCI_ERS_RESULT_DISCONNECT; 14457 } 14458 14459 pci_restore_state(pdev); 14460 14461 /* 14462 * As the new kernel behavior of pci_restore_state() API call clears 14463 * device saved_state flag, need to save the restored state again. 14464 */ 14465 pci_save_state(pdev); 14466 14467 if (pdev->is_busmaster) 14468 pci_set_master(pdev); 14469 14470 spin_lock_irq(&phba->hbalock); 14471 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 14472 spin_unlock_irq(&phba->hbalock); 14473 14474 /* Configure and enable interrupt */ 14475 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 14476 if (intr_mode == LPFC_INTR_ERROR) { 14477 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14478 "0427 Cannot re-enable interrupt after " 14479 "slot reset.\n"); 14480 return PCI_ERS_RESULT_DISCONNECT; 14481 } else 14482 phba->intr_mode = intr_mode; 14483 14484 /* Take device offline, it will perform cleanup */ 14485 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 14486 lpfc_offline(phba); 14487 lpfc_sli_brdrestart(phba); 14488 14489 /* Log the current active interrupt mode */ 14490 lpfc_log_intr_mode(phba, phba->intr_mode); 14491 14492 return PCI_ERS_RESULT_RECOVERED; 14493 } 14494 14495 /** 14496 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 14497 * @pdev: pointer to PCI device 14498 * 14499 * This routine is called from the PCI subsystem for error handling to device 14500 * with SLI-3 interface spec. It is called when kernel error recovery tells 14501 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 14502 * error recovery. After this call, traffic can start to flow from this device 14503 * again. 14504 */ 14505 static void 14506 lpfc_io_resume_s3(struct pci_dev *pdev) 14507 { 14508 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14509 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 14510 14511 /* Bring device online, it will be no-op for non-fatal error resume */ 14512 lpfc_online(phba); 14513 } 14514 14515 /** 14516 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 14517 * @phba: pointer to lpfc hba data structure. 14518 * 14519 * returns the number of ELS/CT IOCBs to reserve 14520 **/ 14521 int 14522 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 14523 { 14524 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 14525 14526 if (phba->sli_rev == LPFC_SLI_REV4) { 14527 if (max_xri <= 100) 14528 return 10; 14529 else if (max_xri <= 256) 14530 return 25; 14531 else if (max_xri <= 512) 14532 return 50; 14533 else if (max_xri <= 1024) 14534 return 100; 14535 else if (max_xri <= 1536) 14536 return 150; 14537 else if (max_xri <= 2048) 14538 return 200; 14539 else 14540 return 250; 14541 } else 14542 return 0; 14543 } 14544 14545 /** 14546 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve 14547 * @phba: pointer to lpfc hba data structure. 14548 * 14549 * returns the number of ELS/CT + NVMET IOCBs to reserve 14550 **/ 14551 int 14552 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba) 14553 { 14554 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba); 14555 14556 if (phba->nvmet_support) 14557 max_xri += LPFC_NVMET_BUF_POST; 14558 return max_xri; 14559 } 14560 14561 14562 static int 14563 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset, 14564 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize, 14565 const struct firmware *fw) 14566 { 14567 int rc; 14568 u8 sli_family; 14569 14570 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); 14571 /* Three cases: (1) FW was not supported on the detected adapter. 14572 * (2) FW update has been locked out administratively. 14573 * (3) Some other error during FW update. 14574 * In each case, an unmaskable message is written to the console 14575 * for admin diagnosis. 14576 */ 14577 if (offset == ADD_STATUS_FW_NOT_SUPPORTED || 14578 (sli_family == LPFC_SLI_INTF_FAMILY_G6 && 14579 magic_number != MAGIC_NUMBER_G6) || 14580 (sli_family == LPFC_SLI_INTF_FAMILY_G7 && 14581 magic_number != MAGIC_NUMBER_G7) || 14582 (sli_family == LPFC_SLI_INTF_FAMILY_G7P && 14583 magic_number != MAGIC_NUMBER_G7P)) { 14584 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14585 "3030 This firmware version is not supported on" 14586 " this HBA model. Device:%x Magic:%x Type:%x " 14587 "ID:%x Size %d %zd\n", 14588 phba->pcidev->device, magic_number, ftype, fid, 14589 fsize, fw->size); 14590 rc = -EINVAL; 14591 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) { 14592 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14593 "3021 Firmware downloads have been prohibited " 14594 "by a system configuration setting on " 14595 "Device:%x Magic:%x Type:%x ID:%x Size %d " 14596 "%zd\n", 14597 phba->pcidev->device, magic_number, ftype, fid, 14598 fsize, fw->size); 14599 rc = -EACCES; 14600 } else { 14601 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14602 "3022 FW Download failed. Add Status x%x " 14603 "Device:%x Magic:%x Type:%x ID:%x Size %d " 14604 "%zd\n", 14605 offset, phba->pcidev->device, magic_number, 14606 ftype, fid, fsize, fw->size); 14607 rc = -EIO; 14608 } 14609 return rc; 14610 } 14611 14612 /** 14613 * lpfc_write_firmware - attempt to write a firmware image to the port 14614 * @fw: pointer to firmware image returned from request_firmware. 14615 * @context: pointer to firmware image returned from request_firmware. 14616 * 14617 **/ 14618 static void 14619 lpfc_write_firmware(const struct firmware *fw, void *context) 14620 { 14621 struct lpfc_hba *phba = (struct lpfc_hba *)context; 14622 char fwrev[FW_REV_STR_SIZE]; 14623 struct lpfc_grp_hdr *image; 14624 struct list_head dma_buffer_list; 14625 int i, rc = 0; 14626 struct lpfc_dmabuf *dmabuf, *next; 14627 uint32_t offset = 0, temp_offset = 0; 14628 uint32_t magic_number, ftype, fid, fsize; 14629 14630 /* It can be null in no-wait mode, sanity check */ 14631 if (!fw) { 14632 rc = -ENXIO; 14633 goto out; 14634 } 14635 image = (struct lpfc_grp_hdr *)fw->data; 14636 14637 magic_number = be32_to_cpu(image->magic_number); 14638 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image); 14639 fid = bf_get_be32(lpfc_grp_hdr_id, image); 14640 fsize = be32_to_cpu(image->size); 14641 14642 INIT_LIST_HEAD(&dma_buffer_list); 14643 lpfc_decode_firmware_rev(phba, fwrev, 1); 14644 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 14645 lpfc_log_msg(phba, KERN_NOTICE, LOG_INIT | LOG_SLI, 14646 "3023 Updating Firmware, Current Version:%s " 14647 "New Version:%s\n", 14648 fwrev, image->revision); 14649 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 14650 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 14651 GFP_KERNEL); 14652 if (!dmabuf) { 14653 rc = -ENOMEM; 14654 goto release_out; 14655 } 14656 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 14657 SLI4_PAGE_SIZE, 14658 &dmabuf->phys, 14659 GFP_KERNEL); 14660 if (!dmabuf->virt) { 14661 kfree(dmabuf); 14662 rc = -ENOMEM; 14663 goto release_out; 14664 } 14665 list_add_tail(&dmabuf->list, &dma_buffer_list); 14666 } 14667 while (offset < fw->size) { 14668 temp_offset = offset; 14669 list_for_each_entry(dmabuf, &dma_buffer_list, list) { 14670 if (temp_offset + SLI4_PAGE_SIZE > fw->size) { 14671 memcpy(dmabuf->virt, 14672 fw->data + temp_offset, 14673 fw->size - temp_offset); 14674 temp_offset = fw->size; 14675 break; 14676 } 14677 memcpy(dmabuf->virt, fw->data + temp_offset, 14678 SLI4_PAGE_SIZE); 14679 temp_offset += SLI4_PAGE_SIZE; 14680 } 14681 rc = lpfc_wr_object(phba, &dma_buffer_list, 14682 (fw->size - offset), &offset); 14683 if (rc) { 14684 rc = lpfc_log_write_firmware_error(phba, offset, 14685 magic_number, 14686 ftype, 14687 fid, 14688 fsize, 14689 fw); 14690 goto release_out; 14691 } 14692 } 14693 rc = offset; 14694 } else 14695 lpfc_log_msg(phba, KERN_NOTICE, LOG_INIT | LOG_SLI, 14696 "3029 Skipped Firmware update, Current " 14697 "Version:%s New Version:%s\n", 14698 fwrev, image->revision); 14699 14700 release_out: 14701 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 14702 list_del(&dmabuf->list); 14703 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 14704 dmabuf->virt, dmabuf->phys); 14705 kfree(dmabuf); 14706 } 14707 release_firmware(fw); 14708 out: 14709 if (rc < 0) 14710 lpfc_log_msg(phba, KERN_ERR, LOG_INIT | LOG_SLI, 14711 "3062 Firmware update error, status %d.\n", rc); 14712 else 14713 lpfc_log_msg(phba, KERN_NOTICE, LOG_INIT | LOG_SLI, 14714 "3024 Firmware update success: size %d.\n", rc); 14715 } 14716 14717 /** 14718 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade 14719 * @phba: pointer to lpfc hba data structure. 14720 * @fw_upgrade: which firmware to update. 14721 * 14722 * This routine is called to perform Linux generic firmware upgrade on device 14723 * that supports such feature. 14724 **/ 14725 int 14726 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) 14727 { 14728 char file_name[ELX_FW_NAME_SIZE] = {0}; 14729 int ret; 14730 const struct firmware *fw; 14731 14732 /* Only supported on SLI4 interface type 2 for now */ 14733 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 14734 LPFC_SLI_INTF_IF_TYPE_2) 14735 return -EPERM; 14736 14737 scnprintf(file_name, sizeof(file_name), "%s.grp", phba->ModelName); 14738 14739 if (fw_upgrade == INT_FW_UPGRADE) { 14740 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT, 14741 file_name, &phba->pcidev->dev, 14742 GFP_KERNEL, (void *)phba, 14743 lpfc_write_firmware); 14744 } else if (fw_upgrade == RUN_FW_UPGRADE) { 14745 ret = request_firmware(&fw, file_name, &phba->pcidev->dev); 14746 if (!ret) 14747 lpfc_write_firmware(fw, (void *)phba); 14748 } else { 14749 ret = -EINVAL; 14750 } 14751 14752 return ret; 14753 } 14754 14755 /** 14756 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 14757 * @pdev: pointer to PCI device 14758 * @pid: pointer to PCI device identifier 14759 * 14760 * This routine is called from the kernel's PCI subsystem to device with 14761 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 14762 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 14763 * information of the device and driver to see if the driver state that it 14764 * can support this kind of device. If the match is successful, the driver 14765 * core invokes this routine. If this routine determines it can claim the HBA, 14766 * it does all the initialization that it needs to do to handle the HBA 14767 * properly. 14768 * 14769 * Return code 14770 * 0 - driver can claim the device 14771 * negative value - driver can not claim the device 14772 **/ 14773 static int 14774 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 14775 { 14776 struct lpfc_hba *phba; 14777 struct lpfc_vport *vport = NULL; 14778 struct Scsi_Host *shost = NULL; 14779 int error; 14780 uint32_t cfg_mode, intr_mode; 14781 14782 /* Allocate memory for HBA structure */ 14783 phba = lpfc_hba_alloc(pdev); 14784 if (!phba) 14785 return -ENOMEM; 14786 14787 INIT_LIST_HEAD(&phba->poll_list); 14788 14789 /* Perform generic PCI device enabling operation */ 14790 error = lpfc_enable_pci_dev(phba); 14791 if (error) 14792 goto out_free_phba; 14793 14794 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 14795 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 14796 if (error) 14797 goto out_disable_pci_dev; 14798 14799 /* Set up SLI-4 specific device PCI memory space */ 14800 error = lpfc_sli4_pci_mem_setup(phba); 14801 if (error) { 14802 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14803 "1410 Failed to set up pci memory space.\n"); 14804 goto out_disable_pci_dev; 14805 } 14806 14807 /* Set up SLI-4 Specific device driver resources */ 14808 error = lpfc_sli4_driver_resource_setup(phba); 14809 if (error) { 14810 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14811 "1412 Failed to set up driver resource.\n"); 14812 goto out_unset_pci_mem_s4; 14813 } 14814 14815 INIT_LIST_HEAD(&phba->active_rrq_list); 14816 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 14817 14818 /* Set up common device driver resources */ 14819 error = lpfc_setup_driver_resource_phase2(phba); 14820 if (error) { 14821 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14822 "1414 Failed to set up driver resource.\n"); 14823 goto out_unset_driver_resource_s4; 14824 } 14825 14826 /* Get the default values for Model Name and Description */ 14827 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 14828 14829 /* Now, trying to enable interrupt and bring up the device */ 14830 cfg_mode = phba->cfg_use_msi; 14831 14832 /* Put device to a known state before enabling interrupt */ 14833 phba->pport = NULL; 14834 lpfc_stop_port(phba); 14835 14836 /* Init cpu_map array */ 14837 lpfc_cpu_map_array_init(phba); 14838 14839 /* Init hba_eq_hdl array */ 14840 lpfc_hba_eq_hdl_array_init(phba); 14841 14842 /* Configure and enable interrupt */ 14843 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 14844 if (intr_mode == LPFC_INTR_ERROR) { 14845 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14846 "0426 Failed to enable interrupt.\n"); 14847 error = -ENODEV; 14848 goto out_unset_driver_resource; 14849 } 14850 /* Default to single EQ for non-MSI-X */ 14851 if (phba->intr_type != MSIX) { 14852 phba->cfg_irq_chann = 1; 14853 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 14854 if (phba->nvmet_support) 14855 phba->cfg_nvmet_mrq = 1; 14856 } 14857 } 14858 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); 14859 14860 /* Create SCSI host to the physical port */ 14861 error = lpfc_create_shost(phba); 14862 if (error) { 14863 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14864 "1415 Failed to create scsi host.\n"); 14865 goto out_disable_intr; 14866 } 14867 vport = phba->pport; 14868 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 14869 14870 /* Configure sysfs attributes */ 14871 error = lpfc_alloc_sysfs_attr(vport); 14872 if (error) { 14873 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14874 "1416 Failed to allocate sysfs attr\n"); 14875 goto out_destroy_shost; 14876 } 14877 14878 /* Set up SLI-4 HBA */ 14879 if (lpfc_sli4_hba_setup(phba)) { 14880 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14881 "1421 Failed to set up hba\n"); 14882 error = -ENODEV; 14883 goto out_free_sysfs_attr; 14884 } 14885 14886 /* Log the current active interrupt mode */ 14887 phba->intr_mode = intr_mode; 14888 lpfc_log_intr_mode(phba, intr_mode); 14889 14890 /* Perform post initialization setup */ 14891 lpfc_post_init_setup(phba); 14892 14893 /* NVME support in FW earlier in the driver load corrects the 14894 * FC4 type making a check for nvme_support unnecessary. 14895 */ 14896 if (phba->nvmet_support == 0) { 14897 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 14898 /* Create NVME binding with nvme_fc_transport. This 14899 * ensures the vport is initialized. If the localport 14900 * create fails, it should not unload the driver to 14901 * support field issues. 14902 */ 14903 error = lpfc_nvme_create_localport(vport); 14904 if (error) { 14905 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 14906 "6004 NVME registration " 14907 "failed, error x%x\n", 14908 error); 14909 } 14910 } 14911 } 14912 14913 /* check for firmware upgrade or downgrade */ 14914 if (phba->cfg_request_firmware_upgrade) 14915 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); 14916 14917 /* Check if there are static vports to be created. */ 14918 lpfc_create_static_vport(phba); 14919 14920 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0); 14921 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp); 14922 14923 return 0; 14924 14925 out_free_sysfs_attr: 14926 lpfc_free_sysfs_attr(vport); 14927 out_destroy_shost: 14928 lpfc_destroy_shost(phba); 14929 out_disable_intr: 14930 lpfc_sli4_disable_intr(phba); 14931 out_unset_driver_resource: 14932 lpfc_unset_driver_resource_phase2(phba); 14933 out_unset_driver_resource_s4: 14934 lpfc_sli4_driver_resource_unset(phba); 14935 out_unset_pci_mem_s4: 14936 lpfc_sli4_pci_mem_unset(phba); 14937 out_disable_pci_dev: 14938 lpfc_disable_pci_dev(phba); 14939 if (shost) 14940 scsi_host_put(shost); 14941 out_free_phba: 14942 lpfc_hba_free(phba); 14943 return error; 14944 } 14945 14946 /** 14947 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 14948 * @pdev: pointer to PCI device 14949 * 14950 * This routine is called from the kernel's PCI subsystem to device with 14951 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 14952 * removed from PCI bus, it performs all the necessary cleanup for the HBA 14953 * device to be removed from the PCI subsystem properly. 14954 **/ 14955 static void 14956 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 14957 { 14958 struct Scsi_Host *shost = pci_get_drvdata(pdev); 14959 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 14960 struct lpfc_vport **vports; 14961 struct lpfc_hba *phba = vport->phba; 14962 int i; 14963 14964 /* Mark the device unloading flag */ 14965 set_bit(FC_UNLOADING, &vport->load_flag); 14966 if (phba->cgn_i) 14967 lpfc_unreg_congestion_buf(phba); 14968 14969 lpfc_free_sysfs_attr(vport); 14970 14971 /* Release all the vports against this physical port */ 14972 vports = lpfc_create_vport_work_array(phba); 14973 if (vports != NULL) 14974 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 14975 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 14976 continue; 14977 fc_vport_terminate(vports[i]->fc_vport); 14978 } 14979 lpfc_destroy_vport_work_array(phba, vports); 14980 14981 /* Remove FC host with the physical port */ 14982 fc_remove_host(shost); 14983 scsi_remove_host(shost); 14984 14985 /* Perform ndlp cleanup on the physical port. The nvme and nvmet 14986 * localports are destroyed after to cleanup all transport memory. 14987 */ 14988 lpfc_cleanup(vport); 14989 lpfc_nvmet_destroy_targetport(phba); 14990 lpfc_nvme_destroy_localport(vport); 14991 14992 /* De-allocate multi-XRI pools */ 14993 if (phba->cfg_xri_rebalancing) 14994 lpfc_destroy_multixri_pools(phba); 14995 14996 /* 14997 * Bring down the SLI Layer. This step disables all interrupts, 14998 * clears the rings, discards all mailbox commands, and resets 14999 * the HBA FCoE function. 15000 */ 15001 lpfc_debugfs_terminate(vport); 15002 15003 lpfc_stop_hba_timers(phba); 15004 spin_lock_irq(&phba->port_list_lock); 15005 list_del_init(&vport->listentry); 15006 spin_unlock_irq(&phba->port_list_lock); 15007 15008 /* Perform scsi free before driver resource_unset since scsi 15009 * buffers are released to their corresponding pools here. 15010 */ 15011 lpfc_io_free(phba); 15012 lpfc_free_iocb_list(phba); 15013 lpfc_sli4_hba_unset(phba); 15014 15015 lpfc_unset_driver_resource_phase2(phba); 15016 lpfc_sli4_driver_resource_unset(phba); 15017 15018 /* Unmap adapter Control and Doorbell registers */ 15019 lpfc_sli4_pci_mem_unset(phba); 15020 15021 /* Release PCI resources and disable device's PCI function */ 15022 scsi_host_put(shost); 15023 lpfc_disable_pci_dev(phba); 15024 15025 /* Finally, free the driver's device data structure */ 15026 lpfc_hba_free(phba); 15027 15028 return; 15029 } 15030 15031 /** 15032 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 15033 * @dev_d: pointer to device 15034 * 15035 * This routine is called from the kernel's PCI subsystem to support system 15036 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 15037 * this method, it quiesces the device by stopping the driver's worker 15038 * thread for the device, turning off device's interrupt and DMA, and bring 15039 * the device offline. Note that as the driver implements the minimum PM 15040 * requirements to a power-aware driver's PM support for suspend/resume -- all 15041 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 15042 * method call will be treated as SUSPEND and the driver will fully 15043 * reinitialize its device during resume() method call, the driver will set 15044 * device to PCI_D3hot state in PCI config space instead of setting it 15045 * according to the @msg provided by the PM. 15046 * 15047 * Return code 15048 * 0 - driver suspended the device 15049 * Error otherwise 15050 **/ 15051 static int __maybe_unused 15052 lpfc_pci_suspend_one_s4(struct device *dev_d) 15053 { 15054 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 15055 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15056 15057 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15058 "2843 PCI device Power Management suspend.\n"); 15059 15060 /* Bring down the device */ 15061 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 15062 lpfc_offline(phba); 15063 kthread_stop(phba->worker_thread); 15064 15065 /* Disable interrupt from device */ 15066 lpfc_sli4_disable_intr(phba); 15067 lpfc_sli4_queue_destroy(phba); 15068 15069 return 0; 15070 } 15071 15072 /** 15073 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 15074 * @dev_d: pointer to device 15075 * 15076 * This routine is called from the kernel's PCI subsystem to support system 15077 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 15078 * this method, it restores the device's PCI config space state and fully 15079 * reinitializes the device and brings it online. Note that as the driver 15080 * implements the minimum PM requirements to a power-aware driver's PM for 15081 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 15082 * to the suspend() method call will be treated as SUSPEND and the driver 15083 * will fully reinitialize its device during resume() method call, the device 15084 * will be set to PCI_D0 directly in PCI config space before restoring the 15085 * state. 15086 * 15087 * Return code 15088 * 0 - driver suspended the device 15089 * Error otherwise 15090 **/ 15091 static int __maybe_unused 15092 lpfc_pci_resume_one_s4(struct device *dev_d) 15093 { 15094 struct Scsi_Host *shost = dev_get_drvdata(dev_d); 15095 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15096 uint32_t intr_mode; 15097 int error; 15098 15099 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15100 "0292 PCI device Power Management resume.\n"); 15101 15102 /* Startup the kernel thread for this host adapter. */ 15103 phba->worker_thread = kthread_run(lpfc_do_work, phba, 15104 "lpfc_worker_%d", phba->brd_no); 15105 if (IS_ERR(phba->worker_thread)) { 15106 error = PTR_ERR(phba->worker_thread); 15107 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15108 "0293 PM resume failed to start worker " 15109 "thread: error=x%x.\n", error); 15110 return error; 15111 } 15112 15113 /* Configure and enable interrupt */ 15114 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 15115 if (intr_mode == LPFC_INTR_ERROR) { 15116 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15117 "0294 PM resume Failed to enable interrupt\n"); 15118 return -EIO; 15119 } else 15120 phba->intr_mode = intr_mode; 15121 15122 /* Restart HBA and bring it online */ 15123 lpfc_sli_brdrestart(phba); 15124 lpfc_online(phba); 15125 15126 /* Log the current active interrupt mode */ 15127 lpfc_log_intr_mode(phba, phba->intr_mode); 15128 15129 return 0; 15130 } 15131 15132 /** 15133 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 15134 * @phba: pointer to lpfc hba data structure. 15135 * 15136 * This routine is called to prepare the SLI4 device for PCI slot recover. It 15137 * aborts all the outstanding SCSI I/Os to the pci device. 15138 **/ 15139 static void 15140 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 15141 { 15142 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15143 "2828 PCI channel I/O abort preparing for recovery\n"); 15144 /* 15145 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 15146 * and let the SCSI mid-layer to retry them to recover. 15147 */ 15148 lpfc_sli_abort_fcp_rings(phba); 15149 } 15150 15151 /** 15152 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 15153 * @phba: pointer to lpfc hba data structure. 15154 * 15155 * This routine is called to prepare the SLI4 device for PCI slot reset. It 15156 * disables the device interrupt and pci device, and aborts the internal FCP 15157 * pending I/Os. 15158 **/ 15159 static void 15160 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 15161 { 15162 int offline = pci_channel_offline(phba->pcidev); 15163 15164 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15165 "2826 PCI channel disable preparing for reset offline" 15166 " %d\n", offline); 15167 15168 /* Block any management I/Os to the device */ 15169 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); 15170 15171 15172 /* HBA_PCI_ERR was set in io_error_detect */ 15173 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 15174 /* Flush all driver's outstanding I/Os as we are to reset */ 15175 lpfc_sli_flush_io_rings(phba); 15176 lpfc_offline(phba); 15177 15178 /* stop all timers */ 15179 lpfc_stop_hba_timers(phba); 15180 15181 lpfc_sli4_queue_destroy(phba); 15182 /* Disable interrupt and pci device */ 15183 lpfc_sli4_disable_intr(phba); 15184 pci_disable_device(phba->pcidev); 15185 } 15186 15187 /** 15188 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 15189 * @phba: pointer to lpfc hba data structure. 15190 * 15191 * This routine is called to prepare the SLI4 device for PCI slot permanently 15192 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 15193 * pending I/Os. 15194 **/ 15195 static void 15196 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 15197 { 15198 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15199 "2827 PCI channel permanent disable for failure\n"); 15200 15201 /* Block all SCSI devices' I/Os on the host */ 15202 lpfc_scsi_dev_block(phba); 15203 15204 /* stop all timers */ 15205 lpfc_stop_hba_timers(phba); 15206 15207 /* Clean up all driver's outstanding I/Os */ 15208 lpfc_sli_flush_io_rings(phba); 15209 } 15210 15211 /** 15212 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 15213 * @pdev: pointer to PCI device. 15214 * @state: the current PCI connection state. 15215 * 15216 * This routine is called from the PCI subsystem for error handling to device 15217 * with SLI-4 interface spec. This function is called by the PCI subsystem 15218 * after a PCI bus error affecting this device has been detected. When this 15219 * function is invoked, it will need to stop all the I/Os and interrupt(s) 15220 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 15221 * for the PCI subsystem to perform proper recovery as desired. 15222 * 15223 * Return codes 15224 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 15225 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 15226 **/ 15227 static pci_ers_result_t 15228 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 15229 { 15230 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15231 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15232 bool hba_pci_err; 15233 15234 switch (state) { 15235 case pci_channel_io_normal: 15236 /* Non-fatal error, prepare for recovery */ 15237 lpfc_sli4_prep_dev_for_recover(phba); 15238 return PCI_ERS_RESULT_CAN_RECOVER; 15239 case pci_channel_io_frozen: 15240 hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags); 15241 /* Fatal error, prepare for slot reset */ 15242 if (!hba_pci_err) 15243 lpfc_sli4_prep_dev_for_reset(phba); 15244 else 15245 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15246 "2832 Already handling PCI error " 15247 "state: x%x\n", state); 15248 return PCI_ERS_RESULT_NEED_RESET; 15249 case pci_channel_io_perm_failure: 15250 set_bit(HBA_PCI_ERR, &phba->bit_flags); 15251 /* Permanent failure, prepare for device down */ 15252 lpfc_sli4_prep_dev_for_perm_failure(phba); 15253 return PCI_ERS_RESULT_DISCONNECT; 15254 default: 15255 hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags); 15256 if (!hba_pci_err) 15257 lpfc_sli4_prep_dev_for_reset(phba); 15258 /* Unknown state, prepare and request slot reset */ 15259 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15260 "2825 Unknown PCI error state: x%x\n", state); 15261 lpfc_sli4_prep_dev_for_reset(phba); 15262 return PCI_ERS_RESULT_NEED_RESET; 15263 } 15264 } 15265 15266 /** 15267 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 15268 * @pdev: pointer to PCI device. 15269 * 15270 * This routine is called from the PCI subsystem for error handling to device 15271 * with SLI-4 interface spec. It is called after PCI bus has been reset to 15272 * restart the PCI card from scratch, as if from a cold-boot. During the 15273 * PCI subsystem error recovery, after the driver returns 15274 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 15275 * recovery and then call this routine before calling the .resume method to 15276 * recover the device. This function will initialize the HBA device, enable 15277 * the interrupt, but it will just put the HBA to offline state without 15278 * passing any I/O traffic. 15279 * 15280 * Return codes 15281 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 15282 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 15283 */ 15284 static pci_ers_result_t 15285 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 15286 { 15287 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15288 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15289 struct lpfc_sli *psli = &phba->sli; 15290 uint32_t intr_mode; 15291 bool hba_pci_err; 15292 15293 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 15294 if (pci_enable_device_mem(pdev)) { 15295 printk(KERN_ERR "lpfc: Cannot re-enable " 15296 "PCI device after reset.\n"); 15297 return PCI_ERS_RESULT_DISCONNECT; 15298 } 15299 15300 pci_restore_state(pdev); 15301 15302 hba_pci_err = test_and_clear_bit(HBA_PCI_ERR, &phba->bit_flags); 15303 if (!hba_pci_err) 15304 dev_info(&pdev->dev, 15305 "hba_pci_err was not set, recovering slot reset.\n"); 15306 /* 15307 * As the new kernel behavior of pci_restore_state() API call clears 15308 * device saved_state flag, need to save the restored state again. 15309 */ 15310 pci_save_state(pdev); 15311 15312 if (pdev->is_busmaster) 15313 pci_set_master(pdev); 15314 15315 spin_lock_irq(&phba->hbalock); 15316 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 15317 spin_unlock_irq(&phba->hbalock); 15318 15319 /* Init cpu_map array */ 15320 lpfc_cpu_map_array_init(phba); 15321 /* Configure and enable interrupt */ 15322 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 15323 if (intr_mode == LPFC_INTR_ERROR) { 15324 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15325 "2824 Cannot re-enable interrupt after " 15326 "slot reset.\n"); 15327 return PCI_ERS_RESULT_DISCONNECT; 15328 } else 15329 phba->intr_mode = intr_mode; 15330 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); 15331 15332 /* Log the current active interrupt mode */ 15333 lpfc_log_intr_mode(phba, phba->intr_mode); 15334 15335 return PCI_ERS_RESULT_RECOVERED; 15336 } 15337 15338 /** 15339 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 15340 * @pdev: pointer to PCI device 15341 * 15342 * This routine is called from the PCI subsystem for error handling to device 15343 * with SLI-4 interface spec. It is called when kernel error recovery tells 15344 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 15345 * error recovery. After this call, traffic can start to flow from this device 15346 * again. 15347 **/ 15348 static void 15349 lpfc_io_resume_s4(struct pci_dev *pdev) 15350 { 15351 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15352 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15353 15354 /* 15355 * In case of slot reset, as function reset is performed through 15356 * mailbox command which needs DMA to be enabled, this operation 15357 * has to be moved to the io resume phase. Taking device offline 15358 * will perform the necessary cleanup. 15359 */ 15360 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 15361 /* Perform device reset */ 15362 lpfc_sli_brdrestart(phba); 15363 /* Bring the device back online */ 15364 lpfc_online(phba); 15365 } 15366 } 15367 15368 /** 15369 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 15370 * @pdev: pointer to PCI device 15371 * @pid: pointer to PCI device identifier 15372 * 15373 * This routine is to be registered to the kernel's PCI subsystem. When an 15374 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 15375 * at PCI device-specific information of the device and driver to see if the 15376 * driver state that it can support this kind of device. If the match is 15377 * successful, the driver core invokes this routine. This routine dispatches 15378 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 15379 * do all the initialization that it needs to do to handle the HBA device 15380 * properly. 15381 * 15382 * Return code 15383 * 0 - driver can claim the device 15384 * negative value - driver can not claim the device 15385 **/ 15386 static int 15387 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 15388 { 15389 int rc; 15390 struct lpfc_sli_intf intf; 15391 15392 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 15393 return -ENODEV; 15394 15395 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 15396 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 15397 rc = lpfc_pci_probe_one_s4(pdev, pid); 15398 else 15399 rc = lpfc_pci_probe_one_s3(pdev, pid); 15400 15401 return rc; 15402 } 15403 15404 /** 15405 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 15406 * @pdev: pointer to PCI device 15407 * 15408 * This routine is to be registered to the kernel's PCI subsystem. When an 15409 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 15410 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 15411 * remove routine, which will perform all the necessary cleanup for the 15412 * device to be removed from the PCI subsystem properly. 15413 **/ 15414 static void 15415 lpfc_pci_remove_one(struct pci_dev *pdev) 15416 { 15417 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15418 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15419 15420 switch (phba->pci_dev_grp) { 15421 case LPFC_PCI_DEV_LP: 15422 lpfc_pci_remove_one_s3(pdev); 15423 break; 15424 case LPFC_PCI_DEV_OC: 15425 lpfc_pci_remove_one_s4(pdev); 15426 break; 15427 default: 15428 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15429 "1424 Invalid PCI device group: 0x%x\n", 15430 phba->pci_dev_grp); 15431 break; 15432 } 15433 return; 15434 } 15435 15436 /** 15437 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 15438 * @dev: pointer to device 15439 * 15440 * This routine is to be registered to the kernel's PCI subsystem to support 15441 * system Power Management (PM). When PM invokes this method, it dispatches 15442 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 15443 * suspend the device. 15444 * 15445 * Return code 15446 * 0 - driver suspended the device 15447 * Error otherwise 15448 **/ 15449 static int __maybe_unused 15450 lpfc_pci_suspend_one(struct device *dev) 15451 { 15452 struct Scsi_Host *shost = dev_get_drvdata(dev); 15453 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15454 int rc = -ENODEV; 15455 15456 switch (phba->pci_dev_grp) { 15457 case LPFC_PCI_DEV_LP: 15458 rc = lpfc_pci_suspend_one_s3(dev); 15459 break; 15460 case LPFC_PCI_DEV_OC: 15461 rc = lpfc_pci_suspend_one_s4(dev); 15462 break; 15463 default: 15464 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15465 "1425 Invalid PCI device group: 0x%x\n", 15466 phba->pci_dev_grp); 15467 break; 15468 } 15469 return rc; 15470 } 15471 15472 /** 15473 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 15474 * @dev: pointer to device 15475 * 15476 * This routine is to be registered to the kernel's PCI subsystem to support 15477 * system Power Management (PM). When PM invokes this method, it dispatches 15478 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 15479 * resume the device. 15480 * 15481 * Return code 15482 * 0 - driver suspended the device 15483 * Error otherwise 15484 **/ 15485 static int __maybe_unused 15486 lpfc_pci_resume_one(struct device *dev) 15487 { 15488 struct Scsi_Host *shost = dev_get_drvdata(dev); 15489 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15490 int rc = -ENODEV; 15491 15492 switch (phba->pci_dev_grp) { 15493 case LPFC_PCI_DEV_LP: 15494 rc = lpfc_pci_resume_one_s3(dev); 15495 break; 15496 case LPFC_PCI_DEV_OC: 15497 rc = lpfc_pci_resume_one_s4(dev); 15498 break; 15499 default: 15500 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15501 "1426 Invalid PCI device group: 0x%x\n", 15502 phba->pci_dev_grp); 15503 break; 15504 } 15505 return rc; 15506 } 15507 15508 /** 15509 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 15510 * @pdev: pointer to PCI device. 15511 * @state: the current PCI connection state. 15512 * 15513 * This routine is registered to the PCI subsystem for error handling. This 15514 * function is called by the PCI subsystem after a PCI bus error affecting 15515 * this device has been detected. When this routine is invoked, it dispatches 15516 * the action to the proper SLI-3 or SLI-4 device error detected handling 15517 * routine, which will perform the proper error detected operation. 15518 * 15519 * Return codes 15520 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 15521 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 15522 **/ 15523 static pci_ers_result_t 15524 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 15525 { 15526 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15527 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15528 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 15529 15530 if (phba->link_state == LPFC_HBA_ERROR && 15531 phba->hba_flag & HBA_IOQ_FLUSH) 15532 return PCI_ERS_RESULT_NEED_RESET; 15533 15534 switch (phba->pci_dev_grp) { 15535 case LPFC_PCI_DEV_LP: 15536 rc = lpfc_io_error_detected_s3(pdev, state); 15537 break; 15538 case LPFC_PCI_DEV_OC: 15539 rc = lpfc_io_error_detected_s4(pdev, state); 15540 break; 15541 default: 15542 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15543 "1427 Invalid PCI device group: 0x%x\n", 15544 phba->pci_dev_grp); 15545 break; 15546 } 15547 return rc; 15548 } 15549 15550 /** 15551 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 15552 * @pdev: pointer to PCI device. 15553 * 15554 * This routine is registered to the PCI subsystem for error handling. This 15555 * function is called after PCI bus has been reset to restart the PCI card 15556 * from scratch, as if from a cold-boot. When this routine is invoked, it 15557 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 15558 * routine, which will perform the proper device reset. 15559 * 15560 * Return codes 15561 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 15562 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 15563 **/ 15564 static pci_ers_result_t 15565 lpfc_io_slot_reset(struct pci_dev *pdev) 15566 { 15567 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15568 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15569 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 15570 15571 switch (phba->pci_dev_grp) { 15572 case LPFC_PCI_DEV_LP: 15573 rc = lpfc_io_slot_reset_s3(pdev); 15574 break; 15575 case LPFC_PCI_DEV_OC: 15576 rc = lpfc_io_slot_reset_s4(pdev); 15577 break; 15578 default: 15579 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15580 "1428 Invalid PCI device group: 0x%x\n", 15581 phba->pci_dev_grp); 15582 break; 15583 } 15584 return rc; 15585 } 15586 15587 /** 15588 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 15589 * @pdev: pointer to PCI device 15590 * 15591 * This routine is registered to the PCI subsystem for error handling. It 15592 * is called when kernel error recovery tells the lpfc driver that it is 15593 * OK to resume normal PCI operation after PCI bus error recovery. When 15594 * this routine is invoked, it dispatches the action to the proper SLI-3 15595 * or SLI-4 device io_resume routine, which will resume the device operation. 15596 **/ 15597 static void 15598 lpfc_io_resume(struct pci_dev *pdev) 15599 { 15600 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15601 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15602 15603 switch (phba->pci_dev_grp) { 15604 case LPFC_PCI_DEV_LP: 15605 lpfc_io_resume_s3(pdev); 15606 break; 15607 case LPFC_PCI_DEV_OC: 15608 lpfc_io_resume_s4(pdev); 15609 break; 15610 default: 15611 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15612 "1429 Invalid PCI device group: 0x%x\n", 15613 phba->pci_dev_grp); 15614 break; 15615 } 15616 return; 15617 } 15618 15619 /** 15620 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter 15621 * @phba: pointer to lpfc hba data structure. 15622 * 15623 * This routine checks to see if OAS is supported for this adapter. If 15624 * supported, the configure Flash Optimized Fabric flag is set. Otherwise, 15625 * the enable oas flag is cleared and the pool created for OAS device data 15626 * is destroyed. 15627 * 15628 **/ 15629 static void 15630 lpfc_sli4_oas_verify(struct lpfc_hba *phba) 15631 { 15632 15633 if (!phba->cfg_EnableXLane) 15634 return; 15635 15636 if (phba->sli4_hba.pc_sli4_params.oas_supported) { 15637 phba->cfg_fof = 1; 15638 } else { 15639 phba->cfg_fof = 0; 15640 mempool_destroy(phba->device_data_mem_pool); 15641 phba->device_data_mem_pool = NULL; 15642 } 15643 15644 return; 15645 } 15646 15647 /** 15648 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter 15649 * @phba: pointer to lpfc hba data structure. 15650 * 15651 * This routine checks to see if RAS is supported by the adapter. Check the 15652 * function through which RAS support enablement is to be done. 15653 **/ 15654 void 15655 lpfc_sli4_ras_init(struct lpfc_hba *phba) 15656 { 15657 /* if ASIC_GEN_NUM >= 0xC) */ 15658 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 15659 LPFC_SLI_INTF_IF_TYPE_6) || 15660 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == 15661 LPFC_SLI_INTF_FAMILY_G6)) { 15662 phba->ras_fwlog.ras_hwsupport = true; 15663 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) && 15664 phba->cfg_ras_fwlog_buffsize) 15665 phba->ras_fwlog.ras_enabled = true; 15666 else 15667 phba->ras_fwlog.ras_enabled = false; 15668 } else { 15669 phba->ras_fwlog.ras_hwsupport = false; 15670 } 15671 } 15672 15673 15674 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 15675 15676 static const struct pci_error_handlers lpfc_err_handler = { 15677 .error_detected = lpfc_io_error_detected, 15678 .slot_reset = lpfc_io_slot_reset, 15679 .resume = lpfc_io_resume, 15680 }; 15681 15682 static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one, 15683 lpfc_pci_suspend_one, 15684 lpfc_pci_resume_one); 15685 15686 static struct pci_driver lpfc_driver = { 15687 .name = LPFC_DRIVER_NAME, 15688 .id_table = lpfc_id_table, 15689 .probe = lpfc_pci_probe_one, 15690 .remove = lpfc_pci_remove_one, 15691 .shutdown = lpfc_pci_remove_one, 15692 .driver.pm = &lpfc_pci_pm_ops_one, 15693 .err_handler = &lpfc_err_handler, 15694 }; 15695 15696 static const struct file_operations lpfc_mgmt_fop = { 15697 .owner = THIS_MODULE, 15698 }; 15699 15700 static struct miscdevice lpfc_mgmt_dev = { 15701 .minor = MISC_DYNAMIC_MINOR, 15702 .name = "lpfcmgmt", 15703 .fops = &lpfc_mgmt_fop, 15704 }; 15705 15706 /** 15707 * lpfc_init - lpfc module initialization routine 15708 * 15709 * This routine is to be invoked when the lpfc module is loaded into the 15710 * kernel. The special kernel macro module_init() is used to indicate the 15711 * role of this routine to the kernel as lpfc module entry point. 15712 * 15713 * Return codes 15714 * 0 - successful 15715 * -ENOMEM - FC attach transport failed 15716 * all others - failed 15717 */ 15718 static int __init 15719 lpfc_init(void) 15720 { 15721 int error = 0; 15722 15723 pr_info(LPFC_MODULE_DESC "\n"); 15724 pr_info(LPFC_COPYRIGHT "\n"); 15725 15726 error = misc_register(&lpfc_mgmt_dev); 15727 if (error) 15728 printk(KERN_ERR "Could not register lpfcmgmt device, " 15729 "misc_register returned with status %d", error); 15730 15731 error = -ENOMEM; 15732 lpfc_transport_functions.vport_create = lpfc_vport_create; 15733 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 15734 lpfc_transport_template = 15735 fc_attach_transport(&lpfc_transport_functions); 15736 if (lpfc_transport_template == NULL) 15737 goto unregister; 15738 lpfc_vport_transport_template = 15739 fc_attach_transport(&lpfc_vport_transport_functions); 15740 if (lpfc_vport_transport_template == NULL) { 15741 fc_release_transport(lpfc_transport_template); 15742 goto unregister; 15743 } 15744 lpfc_wqe_cmd_template(); 15745 lpfc_nvmet_cmd_template(); 15746 15747 /* Initialize in case vector mapping is needed */ 15748 lpfc_present_cpu = num_present_cpus(); 15749 15750 lpfc_pldv_detect = false; 15751 15752 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, 15753 "lpfc/sli4:online", 15754 lpfc_cpu_online, lpfc_cpu_offline); 15755 if (error < 0) 15756 goto cpuhp_failure; 15757 lpfc_cpuhp_state = error; 15758 15759 error = pci_register_driver(&lpfc_driver); 15760 if (error) 15761 goto unwind; 15762 15763 return error; 15764 15765 unwind: 15766 cpuhp_remove_multi_state(lpfc_cpuhp_state); 15767 cpuhp_failure: 15768 fc_release_transport(lpfc_transport_template); 15769 fc_release_transport(lpfc_vport_transport_template); 15770 unregister: 15771 misc_deregister(&lpfc_mgmt_dev); 15772 15773 return error; 15774 } 15775 15776 void lpfc_dmp_dbg(struct lpfc_hba *phba) 15777 { 15778 unsigned int start_idx; 15779 unsigned int dbg_cnt; 15780 unsigned int temp_idx; 15781 int i; 15782 int j = 0; 15783 unsigned long rem_nsec; 15784 15785 if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0) 15786 return; 15787 15788 start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ; 15789 dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt); 15790 if (!dbg_cnt) 15791 goto out; 15792 temp_idx = start_idx; 15793 if (dbg_cnt >= DBG_LOG_SZ) { 15794 dbg_cnt = DBG_LOG_SZ; 15795 temp_idx -= 1; 15796 } else { 15797 if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) { 15798 temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ; 15799 } else { 15800 if (start_idx < dbg_cnt) 15801 start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx); 15802 else 15803 start_idx -= dbg_cnt; 15804 } 15805 } 15806 dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n", 15807 start_idx, temp_idx, dbg_cnt); 15808 15809 for (i = 0; i < dbg_cnt; i++) { 15810 if ((start_idx + i) < DBG_LOG_SZ) 15811 temp_idx = (start_idx + i) % DBG_LOG_SZ; 15812 else 15813 temp_idx = j++; 15814 rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC); 15815 dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s", 15816 temp_idx, 15817 (unsigned long)phba->dbg_log[temp_idx].t_ns, 15818 rem_nsec / 1000, 15819 phba->dbg_log[temp_idx].log); 15820 } 15821 out: 15822 atomic_set(&phba->dbg_log_cnt, 0); 15823 atomic_set(&phba->dbg_log_dmping, 0); 15824 } 15825 15826 __printf(2, 3) 15827 void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...) 15828 { 15829 unsigned int idx; 15830 va_list args; 15831 int dbg_dmping = atomic_read(&phba->dbg_log_dmping); 15832 struct va_format vaf; 15833 15834 15835 va_start(args, fmt); 15836 if (unlikely(dbg_dmping)) { 15837 vaf.fmt = fmt; 15838 vaf.va = &args; 15839 dev_info(&phba->pcidev->dev, "%pV", &vaf); 15840 va_end(args); 15841 return; 15842 } 15843 idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) % 15844 DBG_LOG_SZ; 15845 15846 atomic_inc(&phba->dbg_log_cnt); 15847 15848 vscnprintf(phba->dbg_log[idx].log, 15849 sizeof(phba->dbg_log[idx].log), fmt, args); 15850 va_end(args); 15851 15852 phba->dbg_log[idx].t_ns = local_clock(); 15853 } 15854 15855 /** 15856 * lpfc_exit - lpfc module removal routine 15857 * 15858 * This routine is invoked when the lpfc module is removed from the kernel. 15859 * The special kernel macro module_exit() is used to indicate the role of 15860 * this routine to the kernel as lpfc module exit point. 15861 */ 15862 static void __exit 15863 lpfc_exit(void) 15864 { 15865 misc_deregister(&lpfc_mgmt_dev); 15866 pci_unregister_driver(&lpfc_driver); 15867 cpuhp_remove_multi_state(lpfc_cpuhp_state); 15868 fc_release_transport(lpfc_transport_template); 15869 fc_release_transport(lpfc_vport_transport_template); 15870 idr_destroy(&lpfc_hba_index); 15871 } 15872 15873 module_init(lpfc_init); 15874 module_exit(lpfc_exit); 15875 MODULE_LICENSE("GPL"); 15876 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 15877 MODULE_AUTHOR("Broadcom"); 15878 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 15879