1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/idr.h> 26 #include <linux/interrupt.h> 27 #include <linux/module.h> 28 #include <linux/kthread.h> 29 #include <linux/pci.h> 30 #include <linux/spinlock.h> 31 #include <linux/ctype.h> 32 #include <linux/aer.h> 33 #include <linux/slab.h> 34 #include <linux/firmware.h> 35 #include <linux/miscdevice.h> 36 #include <linux/percpu.h> 37 38 #include <scsi/scsi.h> 39 #include <scsi/scsi_device.h> 40 #include <scsi/scsi_host.h> 41 #include <scsi/scsi_transport_fc.h> 42 43 #include "lpfc_hw4.h" 44 #include "lpfc_hw.h" 45 #include "lpfc_sli.h" 46 #include "lpfc_sli4.h" 47 #include "lpfc_nl.h" 48 #include "lpfc_disc.h" 49 #include "lpfc_scsi.h" 50 #include "lpfc.h" 51 #include "lpfc_logmsg.h" 52 #include "lpfc_crtn.h" 53 #include "lpfc_vport.h" 54 #include "lpfc_version.h" 55 #include "lpfc_ids.h" 56 57 char *_dump_buf_data; 58 unsigned long _dump_buf_data_order; 59 char *_dump_buf_dif; 60 unsigned long _dump_buf_dif_order; 61 spinlock_t _dump_buf_lock; 62 63 /* Used when mapping IRQ vectors in a driver centric manner */ 64 uint16_t *lpfc_used_cpu; 65 uint32_t lpfc_present_cpu; 66 67 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 68 static int lpfc_post_rcv_buf(struct lpfc_hba *); 69 static int lpfc_sli4_queue_verify(struct lpfc_hba *); 70 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 71 static int lpfc_setup_endian_order(struct lpfc_hba *); 72 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 73 static void lpfc_free_els_sgl_list(struct lpfc_hba *); 74 static void lpfc_init_sgl_list(struct lpfc_hba *); 75 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 76 static void lpfc_free_active_sgl(struct lpfc_hba *); 77 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 78 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 79 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 80 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 81 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 82 static void lpfc_sli4_disable_intr(struct lpfc_hba *); 83 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); 84 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); 85 86 static struct scsi_transport_template *lpfc_transport_template = NULL; 87 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 88 static DEFINE_IDR(lpfc_hba_index); 89 90 /** 91 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 92 * @phba: pointer to lpfc hba data structure. 93 * 94 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 95 * mailbox command. It retrieves the revision information from the HBA and 96 * collects the Vital Product Data (VPD) about the HBA for preparing the 97 * configuration of the HBA. 98 * 99 * Return codes: 100 * 0 - success. 101 * -ERESTART - requests the SLI layer to reset the HBA and try again. 102 * Any other value - indicates an error. 103 **/ 104 int 105 lpfc_config_port_prep(struct lpfc_hba *phba) 106 { 107 lpfc_vpd_t *vp = &phba->vpd; 108 int i = 0, rc; 109 LPFC_MBOXQ_t *pmb; 110 MAILBOX_t *mb; 111 char *lpfc_vpd_data = NULL; 112 uint16_t offset = 0; 113 static char licensed[56] = 114 "key unlock for use with gnu public licensed code only\0"; 115 static int init_key = 1; 116 117 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 118 if (!pmb) { 119 phba->link_state = LPFC_HBA_ERROR; 120 return -ENOMEM; 121 } 122 123 mb = &pmb->u.mb; 124 phba->link_state = LPFC_INIT_MBX_CMDS; 125 126 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 127 if (init_key) { 128 uint32_t *ptext = (uint32_t *) licensed; 129 130 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 131 *ptext = cpu_to_be32(*ptext); 132 init_key = 0; 133 } 134 135 lpfc_read_nv(phba, pmb); 136 memset((char*)mb->un.varRDnvp.rsvd3, 0, 137 sizeof (mb->un.varRDnvp.rsvd3)); 138 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 139 sizeof (licensed)); 140 141 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 142 143 if (rc != MBX_SUCCESS) { 144 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 145 "0324 Config Port initialization " 146 "error, mbxCmd x%x READ_NVPARM, " 147 "mbxStatus x%x\n", 148 mb->mbxCommand, mb->mbxStatus); 149 mempool_free(pmb, phba->mbox_mem_pool); 150 return -ERESTART; 151 } 152 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 153 sizeof(phba->wwnn)); 154 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 155 sizeof(phba->wwpn)); 156 } 157 158 phba->sli3_options = 0x0; 159 160 /* Setup and issue mailbox READ REV command */ 161 lpfc_read_rev(phba, pmb); 162 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 163 if (rc != MBX_SUCCESS) { 164 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 165 "0439 Adapter failed to init, mbxCmd x%x " 166 "READ_REV, mbxStatus x%x\n", 167 mb->mbxCommand, mb->mbxStatus); 168 mempool_free( pmb, phba->mbox_mem_pool); 169 return -ERESTART; 170 } 171 172 173 /* 174 * The value of rr must be 1 since the driver set the cv field to 1. 175 * This setting requires the FW to set all revision fields. 176 */ 177 if (mb->un.varRdRev.rr == 0) { 178 vp->rev.rBit = 0; 179 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 180 "0440 Adapter failed to init, READ_REV has " 181 "missing revision information.\n"); 182 mempool_free(pmb, phba->mbox_mem_pool); 183 return -ERESTART; 184 } 185 186 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 187 mempool_free(pmb, phba->mbox_mem_pool); 188 return -EINVAL; 189 } 190 191 /* Save information as VPD data */ 192 vp->rev.rBit = 1; 193 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 194 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 195 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 196 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 197 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 198 vp->rev.biuRev = mb->un.varRdRev.biuRev; 199 vp->rev.smRev = mb->un.varRdRev.smRev; 200 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 201 vp->rev.endecRev = mb->un.varRdRev.endecRev; 202 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 203 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 204 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 205 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 206 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 207 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 208 209 /* If the sli feature level is less then 9, we must 210 * tear down all RPIs and VPIs on link down if NPIV 211 * is enabled. 212 */ 213 if (vp->rev.feaLevelHigh < 9) 214 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 215 216 if (lpfc_is_LC_HBA(phba->pcidev->device)) 217 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 218 sizeof (phba->RandomData)); 219 220 /* Get adapter VPD information */ 221 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 222 if (!lpfc_vpd_data) 223 goto out_free_mbox; 224 do { 225 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 226 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 227 228 if (rc != MBX_SUCCESS) { 229 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 230 "0441 VPD not present on adapter, " 231 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 232 mb->mbxCommand, mb->mbxStatus); 233 mb->un.varDmp.word_cnt = 0; 234 } 235 /* dump mem may return a zero when finished or we got a 236 * mailbox error, either way we are done. 237 */ 238 if (mb->un.varDmp.word_cnt == 0) 239 break; 240 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 241 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 242 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 243 lpfc_vpd_data + offset, 244 mb->un.varDmp.word_cnt); 245 offset += mb->un.varDmp.word_cnt; 246 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 247 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 248 249 kfree(lpfc_vpd_data); 250 out_free_mbox: 251 mempool_free(pmb, phba->mbox_mem_pool); 252 return 0; 253 } 254 255 /** 256 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 257 * @phba: pointer to lpfc hba data structure. 258 * @pmboxq: pointer to the driver internal queue element for mailbox command. 259 * 260 * This is the completion handler for driver's configuring asynchronous event 261 * mailbox command to the device. If the mailbox command returns successfully, 262 * it will set internal async event support flag to 1; otherwise, it will 263 * set internal async event support flag to 0. 264 **/ 265 static void 266 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 267 { 268 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 269 phba->temp_sensor_support = 1; 270 else 271 phba->temp_sensor_support = 0; 272 mempool_free(pmboxq, phba->mbox_mem_pool); 273 return; 274 } 275 276 /** 277 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 278 * @phba: pointer to lpfc hba data structure. 279 * @pmboxq: pointer to the driver internal queue element for mailbox command. 280 * 281 * This is the completion handler for dump mailbox command for getting 282 * wake up parameters. When this command complete, the response contain 283 * Option rom version of the HBA. This function translate the version number 284 * into a human readable string and store it in OptionROMVersion. 285 **/ 286 static void 287 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 288 { 289 struct prog_id *prg; 290 uint32_t prog_id_word; 291 char dist = ' '; 292 /* character array used for decoding dist type. */ 293 char dist_char[] = "nabx"; 294 295 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 296 mempool_free(pmboxq, phba->mbox_mem_pool); 297 return; 298 } 299 300 prg = (struct prog_id *) &prog_id_word; 301 302 /* word 7 contain option rom version */ 303 prog_id_word = pmboxq->u.mb.un.varWords[7]; 304 305 /* Decode the Option rom version word to a readable string */ 306 if (prg->dist < 4) 307 dist = dist_char[prg->dist]; 308 309 if ((prg->dist == 3) && (prg->num == 0)) 310 snprintf(phba->OptionROMVersion, 32, "%d.%d%d", 311 prg->ver, prg->rev, prg->lev); 312 else 313 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d", 314 prg->ver, prg->rev, prg->lev, 315 dist, prg->num); 316 mempool_free(pmboxq, phba->mbox_mem_pool); 317 return; 318 } 319 320 /** 321 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, 322 * cfg_soft_wwnn, cfg_soft_wwpn 323 * @vport: pointer to lpfc vport data structure. 324 * 325 * 326 * Return codes 327 * None. 328 **/ 329 void 330 lpfc_update_vport_wwn(struct lpfc_vport *vport) 331 { 332 /* If the soft name exists then update it using the service params */ 333 if (vport->phba->cfg_soft_wwnn) 334 u64_to_wwn(vport->phba->cfg_soft_wwnn, 335 vport->fc_sparam.nodeName.u.wwn); 336 if (vport->phba->cfg_soft_wwpn) 337 u64_to_wwn(vport->phba->cfg_soft_wwpn, 338 vport->fc_sparam.portName.u.wwn); 339 340 /* 341 * If the name is empty or there exists a soft name 342 * then copy the service params name, otherwise use the fc name 343 */ 344 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) 345 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 346 sizeof(struct lpfc_name)); 347 else 348 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, 349 sizeof(struct lpfc_name)); 350 351 if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn) 352 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 353 sizeof(struct lpfc_name)); 354 else 355 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, 356 sizeof(struct lpfc_name)); 357 } 358 359 /** 360 * lpfc_config_port_post - Perform lpfc initialization after config port 361 * @phba: pointer to lpfc hba data structure. 362 * 363 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 364 * command call. It performs all internal resource and state setups on the 365 * port: post IOCB buffers, enable appropriate host interrupt attentions, 366 * ELS ring timers, etc. 367 * 368 * Return codes 369 * 0 - success. 370 * Any other value - error. 371 **/ 372 int 373 lpfc_config_port_post(struct lpfc_hba *phba) 374 { 375 struct lpfc_vport *vport = phba->pport; 376 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 377 LPFC_MBOXQ_t *pmb; 378 MAILBOX_t *mb; 379 struct lpfc_dmabuf *mp; 380 struct lpfc_sli *psli = &phba->sli; 381 uint32_t status, timeout; 382 int i, j; 383 int rc; 384 385 spin_lock_irq(&phba->hbalock); 386 /* 387 * If the Config port completed correctly the HBA is not 388 * over heated any more. 389 */ 390 if (phba->over_temp_state == HBA_OVER_TEMP) 391 phba->over_temp_state = HBA_NORMAL_TEMP; 392 spin_unlock_irq(&phba->hbalock); 393 394 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 395 if (!pmb) { 396 phba->link_state = LPFC_HBA_ERROR; 397 return -ENOMEM; 398 } 399 mb = &pmb->u.mb; 400 401 /* Get login parameters for NID. */ 402 rc = lpfc_read_sparam(phba, pmb, 0); 403 if (rc) { 404 mempool_free(pmb, phba->mbox_mem_pool); 405 return -ENOMEM; 406 } 407 408 pmb->vport = vport; 409 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 411 "0448 Adapter failed init, mbxCmd x%x " 412 "READ_SPARM mbxStatus x%x\n", 413 mb->mbxCommand, mb->mbxStatus); 414 phba->link_state = LPFC_HBA_ERROR; 415 mp = (struct lpfc_dmabuf *) pmb->context1; 416 mempool_free(pmb, phba->mbox_mem_pool); 417 lpfc_mbuf_free(phba, mp->virt, mp->phys); 418 kfree(mp); 419 return -EIO; 420 } 421 422 mp = (struct lpfc_dmabuf *) pmb->context1; 423 424 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 425 lpfc_mbuf_free(phba, mp->virt, mp->phys); 426 kfree(mp); 427 pmb->context1 = NULL; 428 lpfc_update_vport_wwn(vport); 429 430 /* Update the fc_host data structures with new wwn. */ 431 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 432 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 433 fc_host_max_npiv_vports(shost) = phba->max_vpi; 434 435 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 436 /* This should be consolidated into parse_vpd ? - mr */ 437 if (phba->SerialNumber[0] == 0) { 438 uint8_t *outptr; 439 440 outptr = &vport->fc_nodename.u.s.IEEE[0]; 441 for (i = 0; i < 12; i++) { 442 status = *outptr++; 443 j = ((status & 0xf0) >> 4); 444 if (j <= 9) 445 phba->SerialNumber[i] = 446 (char)((uint8_t) 0x30 + (uint8_t) j); 447 else 448 phba->SerialNumber[i] = 449 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 450 i++; 451 j = (status & 0xf); 452 if (j <= 9) 453 phba->SerialNumber[i] = 454 (char)((uint8_t) 0x30 + (uint8_t) j); 455 else 456 phba->SerialNumber[i] = 457 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 458 } 459 } 460 461 lpfc_read_config(phba, pmb); 462 pmb->vport = vport; 463 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 464 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 465 "0453 Adapter failed to init, mbxCmd x%x " 466 "READ_CONFIG, mbxStatus x%x\n", 467 mb->mbxCommand, mb->mbxStatus); 468 phba->link_state = LPFC_HBA_ERROR; 469 mempool_free( pmb, phba->mbox_mem_pool); 470 return -EIO; 471 } 472 473 /* Check if the port is disabled */ 474 lpfc_sli_read_link_ste(phba); 475 476 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 477 i = (mb->un.varRdConfig.max_xri + 1); 478 if (phba->cfg_hba_queue_depth > i) { 479 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 480 "3359 HBA queue depth changed from %d to %d\n", 481 phba->cfg_hba_queue_depth, i); 482 phba->cfg_hba_queue_depth = i; 483 } 484 485 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ 486 i = (mb->un.varRdConfig.max_xri >> 3); 487 if (phba->pport->cfg_lun_queue_depth > i) { 488 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 489 "3360 LUN queue depth changed from %d to %d\n", 490 phba->pport->cfg_lun_queue_depth, i); 491 phba->pport->cfg_lun_queue_depth = i; 492 } 493 494 phba->lmt = mb->un.varRdConfig.lmt; 495 496 /* Get the default values for Model Name and Description */ 497 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 498 499 phba->link_state = LPFC_LINK_DOWN; 500 501 /* Only process IOCBs on ELS ring till hba_state is READY */ 502 if (psli->ring[psli->extra_ring].sli.sli3.cmdringaddr) 503 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 504 if (psli->ring[psli->fcp_ring].sli.sli3.cmdringaddr) 505 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 506 if (psli->ring[psli->next_ring].sli.sli3.cmdringaddr) 507 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 508 509 /* Post receive buffers for desired rings */ 510 if (phba->sli_rev != 3) 511 lpfc_post_rcv_buf(phba); 512 513 /* 514 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 515 */ 516 if (phba->intr_type == MSIX) { 517 rc = lpfc_config_msi(phba, pmb); 518 if (rc) { 519 mempool_free(pmb, phba->mbox_mem_pool); 520 return -EIO; 521 } 522 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 523 if (rc != MBX_SUCCESS) { 524 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 525 "0352 Config MSI mailbox command " 526 "failed, mbxCmd x%x, mbxStatus x%x\n", 527 pmb->u.mb.mbxCommand, 528 pmb->u.mb.mbxStatus); 529 mempool_free(pmb, phba->mbox_mem_pool); 530 return -EIO; 531 } 532 } 533 534 spin_lock_irq(&phba->hbalock); 535 /* Initialize ERATT handling flag */ 536 phba->hba_flag &= ~HBA_ERATT_HANDLED; 537 538 /* Enable appropriate host interrupts */ 539 if (lpfc_readl(phba->HCregaddr, &status)) { 540 spin_unlock_irq(&phba->hbalock); 541 return -EIO; 542 } 543 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 544 if (psli->num_rings > 0) 545 status |= HC_R0INT_ENA; 546 if (psli->num_rings > 1) 547 status |= HC_R1INT_ENA; 548 if (psli->num_rings > 2) 549 status |= HC_R2INT_ENA; 550 if (psli->num_rings > 3) 551 status |= HC_R3INT_ENA; 552 553 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 554 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 555 status &= ~(HC_R0INT_ENA); 556 557 writel(status, phba->HCregaddr); 558 readl(phba->HCregaddr); /* flush */ 559 spin_unlock_irq(&phba->hbalock); 560 561 /* Set up ring-0 (ELS) timer */ 562 timeout = phba->fc_ratov * 2; 563 mod_timer(&vport->els_tmofunc, 564 jiffies + msecs_to_jiffies(1000 * timeout)); 565 /* Set up heart beat (HB) timer */ 566 mod_timer(&phba->hb_tmofunc, 567 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 568 phba->hb_outstanding = 0; 569 phba->last_completion_time = jiffies; 570 /* Set up error attention (ERATT) polling timer */ 571 mod_timer(&phba->eratt_poll, 572 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 573 574 if (phba->hba_flag & LINK_DISABLED) { 575 lpfc_printf_log(phba, 576 KERN_ERR, LOG_INIT, 577 "2598 Adapter Link is disabled.\n"); 578 lpfc_down_link(phba, pmb); 579 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 580 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 581 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 582 lpfc_printf_log(phba, 583 KERN_ERR, LOG_INIT, 584 "2599 Adapter failed to issue DOWN_LINK" 585 " mbox command rc 0x%x\n", rc); 586 587 mempool_free(pmb, phba->mbox_mem_pool); 588 return -EIO; 589 } 590 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 591 mempool_free(pmb, phba->mbox_mem_pool); 592 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 593 if (rc) 594 return rc; 595 } 596 /* MBOX buffer will be freed in mbox compl */ 597 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 598 if (!pmb) { 599 phba->link_state = LPFC_HBA_ERROR; 600 return -ENOMEM; 601 } 602 603 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 604 pmb->mbox_cmpl = lpfc_config_async_cmpl; 605 pmb->vport = phba->pport; 606 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 607 608 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 609 lpfc_printf_log(phba, 610 KERN_ERR, 611 LOG_INIT, 612 "0456 Adapter failed to issue " 613 "ASYNCEVT_ENABLE mbox status x%x\n", 614 rc); 615 mempool_free(pmb, phba->mbox_mem_pool); 616 } 617 618 /* Get Option rom version */ 619 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 620 if (!pmb) { 621 phba->link_state = LPFC_HBA_ERROR; 622 return -ENOMEM; 623 } 624 625 lpfc_dump_wakeup_param(phba, pmb); 626 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 627 pmb->vport = phba->pport; 628 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 629 630 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 631 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 632 "to get Option ROM version status x%x\n", rc); 633 mempool_free(pmb, phba->mbox_mem_pool); 634 } 635 636 return 0; 637 } 638 639 /** 640 * lpfc_hba_init_link - Initialize the FC link 641 * @phba: pointer to lpfc hba data structure. 642 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 643 * 644 * This routine will issue the INIT_LINK mailbox command call. 645 * It is available to other drivers through the lpfc_hba data 646 * structure for use as a delayed link up mechanism with the 647 * module parameter lpfc_suppress_link_up. 648 * 649 * Return code 650 * 0 - success 651 * Any other value - error 652 **/ 653 static int 654 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 655 { 656 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); 657 } 658 659 /** 660 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology 661 * @phba: pointer to lpfc hba data structure. 662 * @fc_topology: desired fc topology. 663 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 664 * 665 * This routine will issue the INIT_LINK mailbox command call. 666 * It is available to other drivers through the lpfc_hba data 667 * structure for use as a delayed link up mechanism with the 668 * module parameter lpfc_suppress_link_up. 669 * 670 * Return code 671 * 0 - success 672 * Any other value - error 673 **/ 674 int 675 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, 676 uint32_t flag) 677 { 678 struct lpfc_vport *vport = phba->pport; 679 LPFC_MBOXQ_t *pmb; 680 MAILBOX_t *mb; 681 int rc; 682 683 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 684 if (!pmb) { 685 phba->link_state = LPFC_HBA_ERROR; 686 return -ENOMEM; 687 } 688 mb = &pmb->u.mb; 689 pmb->vport = vport; 690 691 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || 692 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && 693 !(phba->lmt & LMT_1Gb)) || 694 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && 695 !(phba->lmt & LMT_2Gb)) || 696 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && 697 !(phba->lmt & LMT_4Gb)) || 698 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && 699 !(phba->lmt & LMT_8Gb)) || 700 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && 701 !(phba->lmt & LMT_10Gb)) || 702 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && 703 !(phba->lmt & LMT_16Gb)) || 704 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) && 705 !(phba->lmt & LMT_32Gb))) { 706 /* Reset link speed to auto */ 707 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 708 "1302 Invalid speed for this board:%d " 709 "Reset link speed to auto.\n", 710 phba->cfg_link_speed); 711 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 712 } 713 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); 714 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 715 if (phba->sli_rev < LPFC_SLI_REV4) 716 lpfc_set_loopback_flag(phba); 717 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 718 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 719 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 720 "0498 Adapter failed to init, mbxCmd x%x " 721 "INIT_LINK, mbxStatus x%x\n", 722 mb->mbxCommand, mb->mbxStatus); 723 if (phba->sli_rev <= LPFC_SLI_REV3) { 724 /* Clear all interrupt enable conditions */ 725 writel(0, phba->HCregaddr); 726 readl(phba->HCregaddr); /* flush */ 727 /* Clear all pending interrupts */ 728 writel(0xffffffff, phba->HAregaddr); 729 readl(phba->HAregaddr); /* flush */ 730 } 731 phba->link_state = LPFC_HBA_ERROR; 732 if (rc != MBX_BUSY || flag == MBX_POLL) 733 mempool_free(pmb, phba->mbox_mem_pool); 734 return -EIO; 735 } 736 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 737 if (flag == MBX_POLL) 738 mempool_free(pmb, phba->mbox_mem_pool); 739 740 return 0; 741 } 742 743 /** 744 * lpfc_hba_down_link - this routine downs the FC link 745 * @phba: pointer to lpfc hba data structure. 746 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 747 * 748 * This routine will issue the DOWN_LINK mailbox command call. 749 * It is available to other drivers through the lpfc_hba data 750 * structure for use to stop the link. 751 * 752 * Return code 753 * 0 - success 754 * Any other value - error 755 **/ 756 static int 757 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 758 { 759 LPFC_MBOXQ_t *pmb; 760 int rc; 761 762 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 763 if (!pmb) { 764 phba->link_state = LPFC_HBA_ERROR; 765 return -ENOMEM; 766 } 767 768 lpfc_printf_log(phba, 769 KERN_ERR, LOG_INIT, 770 "0491 Adapter Link is disabled.\n"); 771 lpfc_down_link(phba, pmb); 772 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 773 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 774 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 775 lpfc_printf_log(phba, 776 KERN_ERR, LOG_INIT, 777 "2522 Adapter failed to issue DOWN_LINK" 778 " mbox command rc 0x%x\n", rc); 779 780 mempool_free(pmb, phba->mbox_mem_pool); 781 return -EIO; 782 } 783 if (flag == MBX_POLL) 784 mempool_free(pmb, phba->mbox_mem_pool); 785 786 return 0; 787 } 788 789 /** 790 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 791 * @phba: pointer to lpfc HBA data structure. 792 * 793 * This routine will do LPFC uninitialization before the HBA is reset when 794 * bringing down the SLI Layer. 795 * 796 * Return codes 797 * 0 - success. 798 * Any other value - error. 799 **/ 800 int 801 lpfc_hba_down_prep(struct lpfc_hba *phba) 802 { 803 struct lpfc_vport **vports; 804 int i; 805 806 if (phba->sli_rev <= LPFC_SLI_REV3) { 807 /* Disable interrupts */ 808 writel(0, phba->HCregaddr); 809 readl(phba->HCregaddr); /* flush */ 810 } 811 812 if (phba->pport->load_flag & FC_UNLOADING) 813 lpfc_cleanup_discovery_resources(phba->pport); 814 else { 815 vports = lpfc_create_vport_work_array(phba); 816 if (vports != NULL) 817 for (i = 0; i <= phba->max_vports && 818 vports[i] != NULL; i++) 819 lpfc_cleanup_discovery_resources(vports[i]); 820 lpfc_destroy_vport_work_array(phba, vports); 821 } 822 return 0; 823 } 824 825 /** 826 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free 827 * rspiocb which got deferred 828 * 829 * @phba: pointer to lpfc HBA data structure. 830 * 831 * This routine will cleanup completed slow path events after HBA is reset 832 * when bringing down the SLI Layer. 833 * 834 * 835 * Return codes 836 * void. 837 **/ 838 static void 839 lpfc_sli4_free_sp_events(struct lpfc_hba *phba) 840 { 841 struct lpfc_iocbq *rspiocbq; 842 struct hbq_dmabuf *dmabuf; 843 struct lpfc_cq_event *cq_event; 844 845 spin_lock_irq(&phba->hbalock); 846 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 847 spin_unlock_irq(&phba->hbalock); 848 849 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 850 /* Get the response iocb from the head of work queue */ 851 spin_lock_irq(&phba->hbalock); 852 list_remove_head(&phba->sli4_hba.sp_queue_event, 853 cq_event, struct lpfc_cq_event, list); 854 spin_unlock_irq(&phba->hbalock); 855 856 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 857 case CQE_CODE_COMPL_WQE: 858 rspiocbq = container_of(cq_event, struct lpfc_iocbq, 859 cq_event); 860 lpfc_sli_release_iocbq(phba, rspiocbq); 861 break; 862 case CQE_CODE_RECEIVE: 863 case CQE_CODE_RECEIVE_V1: 864 dmabuf = container_of(cq_event, struct hbq_dmabuf, 865 cq_event); 866 lpfc_in_buf_free(phba, &dmabuf->dbuf); 867 } 868 } 869 } 870 871 /** 872 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset 873 * @phba: pointer to lpfc HBA data structure. 874 * 875 * This routine will cleanup posted ELS buffers after the HBA is reset 876 * when bringing down the SLI Layer. 877 * 878 * 879 * Return codes 880 * void. 881 **/ 882 static void 883 lpfc_hba_free_post_buf(struct lpfc_hba *phba) 884 { 885 struct lpfc_sli *psli = &phba->sli; 886 struct lpfc_sli_ring *pring; 887 struct lpfc_dmabuf *mp, *next_mp; 888 LIST_HEAD(buflist); 889 int count; 890 891 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 892 lpfc_sli_hbqbuf_free_all(phba); 893 else { 894 /* Cleanup preposted buffers on the ELS ring */ 895 pring = &psli->ring[LPFC_ELS_RING]; 896 spin_lock_irq(&phba->hbalock); 897 list_splice_init(&pring->postbufq, &buflist); 898 spin_unlock_irq(&phba->hbalock); 899 900 count = 0; 901 list_for_each_entry_safe(mp, next_mp, &buflist, list) { 902 list_del(&mp->list); 903 count++; 904 lpfc_mbuf_free(phba, mp->virt, mp->phys); 905 kfree(mp); 906 } 907 908 spin_lock_irq(&phba->hbalock); 909 pring->postbufq_cnt -= count; 910 spin_unlock_irq(&phba->hbalock); 911 } 912 } 913 914 /** 915 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset 916 * @phba: pointer to lpfc HBA data structure. 917 * 918 * This routine will cleanup the txcmplq after the HBA is reset when bringing 919 * down the SLI Layer. 920 * 921 * Return codes 922 * void 923 **/ 924 static void 925 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) 926 { 927 struct lpfc_sli *psli = &phba->sli; 928 struct lpfc_sli_ring *pring; 929 LIST_HEAD(completions); 930 int i; 931 932 for (i = 0; i < psli->num_rings; i++) { 933 pring = &psli->ring[i]; 934 if (phba->sli_rev >= LPFC_SLI_REV4) 935 spin_lock_irq(&pring->ring_lock); 936 else 937 spin_lock_irq(&phba->hbalock); 938 /* At this point in time the HBA is either reset or DOA. Either 939 * way, nothing should be on txcmplq as it will NEVER complete. 940 */ 941 list_splice_init(&pring->txcmplq, &completions); 942 pring->txcmplq_cnt = 0; 943 944 if (phba->sli_rev >= LPFC_SLI_REV4) 945 spin_unlock_irq(&pring->ring_lock); 946 else 947 spin_unlock_irq(&phba->hbalock); 948 949 /* Cancel all the IOCBs from the completions list */ 950 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 951 IOERR_SLI_ABORTED); 952 lpfc_sli_abort_iocb_ring(phba, pring); 953 } 954 } 955 956 /** 957 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 958 int i; 959 * @phba: pointer to lpfc HBA data structure. 960 * 961 * This routine will do uninitialization after the HBA is reset when bring 962 * down the SLI Layer. 963 * 964 * Return codes 965 * 0 - success. 966 * Any other value - error. 967 **/ 968 static int 969 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 970 { 971 lpfc_hba_free_post_buf(phba); 972 lpfc_hba_clean_txcmplq(phba); 973 return 0; 974 } 975 976 /** 977 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 978 * @phba: pointer to lpfc HBA data structure. 979 * 980 * This routine will do uninitialization after the HBA is reset when bring 981 * down the SLI Layer. 982 * 983 * Return codes 984 * 0 - success. 985 * Any other value - error. 986 **/ 987 static int 988 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 989 { 990 struct lpfc_scsi_buf *psb, *psb_next; 991 LIST_HEAD(aborts); 992 unsigned long iflag = 0; 993 struct lpfc_sglq *sglq_entry = NULL; 994 struct lpfc_sli *psli = &phba->sli; 995 struct lpfc_sli_ring *pring; 996 997 lpfc_hba_free_post_buf(phba); 998 lpfc_hba_clean_txcmplq(phba); 999 pring = &psli->ring[LPFC_ELS_RING]; 1000 1001 /* At this point in time the HBA is either reset or DOA. Either 1002 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 1003 * on the lpfc_sgl_list so that it can either be freed if the 1004 * driver is unloading or reposted if the driver is restarting 1005 * the port. 1006 */ 1007 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ 1008 /* scsl_buf_list */ 1009 /* abts_sgl_list_lock required because worker thread uses this 1010 * list. 1011 */ 1012 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 1013 list_for_each_entry(sglq_entry, 1014 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 1015 sglq_entry->state = SGL_FREED; 1016 1017 spin_lock(&pring->ring_lock); 1018 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 1019 &phba->sli4_hba.lpfc_sgl_list); 1020 spin_unlock(&pring->ring_lock); 1021 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 1022 /* abts_scsi_buf_list_lock required because worker thread uses this 1023 * list. 1024 */ 1025 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 1026 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, 1027 &aborts); 1028 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 1029 spin_unlock_irq(&phba->hbalock); 1030 1031 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 1032 psb->pCmd = NULL; 1033 psb->status = IOSTAT_SUCCESS; 1034 } 1035 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); 1036 list_splice(&aborts, &phba->lpfc_scsi_buf_list_put); 1037 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); 1038 1039 lpfc_sli4_free_sp_events(phba); 1040 return 0; 1041 } 1042 1043 /** 1044 * lpfc_hba_down_post - Wrapper func for hba down post routine 1045 * @phba: pointer to lpfc HBA data structure. 1046 * 1047 * This routine wraps the actual SLI3 or SLI4 routine for performing 1048 * uninitialization after the HBA is reset when bring down the SLI Layer. 1049 * 1050 * Return codes 1051 * 0 - success. 1052 * Any other value - error. 1053 **/ 1054 int 1055 lpfc_hba_down_post(struct lpfc_hba *phba) 1056 { 1057 return (*phba->lpfc_hba_down_post)(phba); 1058 } 1059 1060 /** 1061 * lpfc_hb_timeout - The HBA-timer timeout handler 1062 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 1063 * 1064 * This is the HBA-timer timeout handler registered to the lpfc driver. When 1065 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 1066 * work-port-events bitmap and the worker thread is notified. This timeout 1067 * event will be used by the worker thread to invoke the actual timeout 1068 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 1069 * be performed in the timeout handler and the HBA timeout event bit shall 1070 * be cleared by the worker thread after it has taken the event bitmap out. 1071 **/ 1072 static void 1073 lpfc_hb_timeout(unsigned long ptr) 1074 { 1075 struct lpfc_hba *phba; 1076 uint32_t tmo_posted; 1077 unsigned long iflag; 1078 1079 phba = (struct lpfc_hba *)ptr; 1080 1081 /* Check for heart beat timeout conditions */ 1082 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1083 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 1084 if (!tmo_posted) 1085 phba->pport->work_port_events |= WORKER_HB_TMO; 1086 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1087 1088 /* Tell the worker thread there is work to do */ 1089 if (!tmo_posted) 1090 lpfc_worker_wake_up(phba); 1091 return; 1092 } 1093 1094 /** 1095 * lpfc_rrq_timeout - The RRQ-timer timeout handler 1096 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 1097 * 1098 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 1099 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 1100 * work-port-events bitmap and the worker thread is notified. This timeout 1101 * event will be used by the worker thread to invoke the actual timeout 1102 * handler routine, lpfc_rrq_handler. Any periodical operations will 1103 * be performed in the timeout handler and the RRQ timeout event bit shall 1104 * be cleared by the worker thread after it has taken the event bitmap out. 1105 **/ 1106 static void 1107 lpfc_rrq_timeout(unsigned long ptr) 1108 { 1109 struct lpfc_hba *phba; 1110 unsigned long iflag; 1111 1112 phba = (struct lpfc_hba *)ptr; 1113 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1114 if (!(phba->pport->load_flag & FC_UNLOADING)) 1115 phba->hba_flag |= HBA_RRQ_ACTIVE; 1116 else 1117 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1118 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1119 1120 if (!(phba->pport->load_flag & FC_UNLOADING)) 1121 lpfc_worker_wake_up(phba); 1122 } 1123 1124 /** 1125 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 1126 * @phba: pointer to lpfc hba data structure. 1127 * @pmboxq: pointer to the driver internal queue element for mailbox command. 1128 * 1129 * This is the callback function to the lpfc heart-beat mailbox command. 1130 * If configured, the lpfc driver issues the heart-beat mailbox command to 1131 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 1132 * heart-beat mailbox command is issued, the driver shall set up heart-beat 1133 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 1134 * heart-beat outstanding state. Once the mailbox command comes back and 1135 * no error conditions detected, the heart-beat mailbox command timer is 1136 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 1137 * state is cleared for the next heart-beat. If the timer expired with the 1138 * heart-beat outstanding state set, the driver will put the HBA offline. 1139 **/ 1140 static void 1141 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 1142 { 1143 unsigned long drvr_flag; 1144 1145 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1146 phba->hb_outstanding = 0; 1147 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1148 1149 /* Check and reset heart-beat timer is necessary */ 1150 mempool_free(pmboxq, phba->mbox_mem_pool); 1151 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 1152 !(phba->link_state == LPFC_HBA_ERROR) && 1153 !(phba->pport->load_flag & FC_UNLOADING)) 1154 mod_timer(&phba->hb_tmofunc, 1155 jiffies + 1156 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1157 return; 1158 } 1159 1160 /** 1161 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 1162 * @phba: pointer to lpfc hba data structure. 1163 * 1164 * This is the actual HBA-timer timeout handler to be invoked by the worker 1165 * thread whenever the HBA timer fired and HBA-timeout event posted. This 1166 * handler performs any periodic operations needed for the device. If such 1167 * periodic event has already been attended to either in the interrupt handler 1168 * or by processing slow-ring or fast-ring events within the HBA-timer 1169 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1170 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1171 * is configured and there is no heart-beat mailbox command outstanding, a 1172 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1173 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1174 * to offline. 1175 **/ 1176 void 1177 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1178 { 1179 struct lpfc_vport **vports; 1180 LPFC_MBOXQ_t *pmboxq; 1181 struct lpfc_dmabuf *buf_ptr; 1182 int retval, i; 1183 struct lpfc_sli *psli = &phba->sli; 1184 LIST_HEAD(completions); 1185 1186 vports = lpfc_create_vport_work_array(phba); 1187 if (vports != NULL) 1188 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 1189 lpfc_rcv_seq_check_edtov(vports[i]); 1190 lpfc_fdmi_num_disc_check(vports[i]); 1191 } 1192 lpfc_destroy_vport_work_array(phba, vports); 1193 1194 if ((phba->link_state == LPFC_HBA_ERROR) || 1195 (phba->pport->load_flag & FC_UNLOADING) || 1196 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1197 return; 1198 1199 spin_lock_irq(&phba->pport->work_port_lock); 1200 1201 if (time_after(phba->last_completion_time + 1202 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), 1203 jiffies)) { 1204 spin_unlock_irq(&phba->pport->work_port_lock); 1205 if (!phba->hb_outstanding) 1206 mod_timer(&phba->hb_tmofunc, 1207 jiffies + 1208 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1209 else 1210 mod_timer(&phba->hb_tmofunc, 1211 jiffies + 1212 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1213 return; 1214 } 1215 spin_unlock_irq(&phba->pport->work_port_lock); 1216 1217 if (phba->elsbuf_cnt && 1218 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1219 spin_lock_irq(&phba->hbalock); 1220 list_splice_init(&phba->elsbuf, &completions); 1221 phba->elsbuf_cnt = 0; 1222 phba->elsbuf_prev_cnt = 0; 1223 spin_unlock_irq(&phba->hbalock); 1224 1225 while (!list_empty(&completions)) { 1226 list_remove_head(&completions, buf_ptr, 1227 struct lpfc_dmabuf, list); 1228 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1229 kfree(buf_ptr); 1230 } 1231 } 1232 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1233 1234 /* If there is no heart beat outstanding, issue a heartbeat command */ 1235 if (phba->cfg_enable_hba_heartbeat) { 1236 if (!phba->hb_outstanding) { 1237 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1238 (list_empty(&psli->mboxq))) { 1239 pmboxq = mempool_alloc(phba->mbox_mem_pool, 1240 GFP_KERNEL); 1241 if (!pmboxq) { 1242 mod_timer(&phba->hb_tmofunc, 1243 jiffies + 1244 msecs_to_jiffies(1000 * 1245 LPFC_HB_MBOX_INTERVAL)); 1246 return; 1247 } 1248 1249 lpfc_heart_beat(phba, pmboxq); 1250 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1251 pmboxq->vport = phba->pport; 1252 retval = lpfc_sli_issue_mbox(phba, pmboxq, 1253 MBX_NOWAIT); 1254 1255 if (retval != MBX_BUSY && 1256 retval != MBX_SUCCESS) { 1257 mempool_free(pmboxq, 1258 phba->mbox_mem_pool); 1259 mod_timer(&phba->hb_tmofunc, 1260 jiffies + 1261 msecs_to_jiffies(1000 * 1262 LPFC_HB_MBOX_INTERVAL)); 1263 return; 1264 } 1265 phba->skipped_hb = 0; 1266 phba->hb_outstanding = 1; 1267 } else if (time_before_eq(phba->last_completion_time, 1268 phba->skipped_hb)) { 1269 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1270 "2857 Last completion time not " 1271 " updated in %d ms\n", 1272 jiffies_to_msecs(jiffies 1273 - phba->last_completion_time)); 1274 } else 1275 phba->skipped_hb = jiffies; 1276 1277 mod_timer(&phba->hb_tmofunc, 1278 jiffies + 1279 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1280 return; 1281 } else { 1282 /* 1283 * If heart beat timeout called with hb_outstanding set 1284 * we need to give the hb mailbox cmd a chance to 1285 * complete or TMO. 1286 */ 1287 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1288 "0459 Adapter heartbeat still out" 1289 "standing:last compl time was %d ms.\n", 1290 jiffies_to_msecs(jiffies 1291 - phba->last_completion_time)); 1292 mod_timer(&phba->hb_tmofunc, 1293 jiffies + 1294 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1295 } 1296 } else { 1297 mod_timer(&phba->hb_tmofunc, 1298 jiffies + 1299 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1300 } 1301 } 1302 1303 /** 1304 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1305 * @phba: pointer to lpfc hba data structure. 1306 * 1307 * This routine is called to bring the HBA offline when HBA hardware error 1308 * other than Port Error 6 has been detected. 1309 **/ 1310 static void 1311 lpfc_offline_eratt(struct lpfc_hba *phba) 1312 { 1313 struct lpfc_sli *psli = &phba->sli; 1314 1315 spin_lock_irq(&phba->hbalock); 1316 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1317 spin_unlock_irq(&phba->hbalock); 1318 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1319 1320 lpfc_offline(phba); 1321 lpfc_reset_barrier(phba); 1322 spin_lock_irq(&phba->hbalock); 1323 lpfc_sli_brdreset(phba); 1324 spin_unlock_irq(&phba->hbalock); 1325 lpfc_hba_down_post(phba); 1326 lpfc_sli_brdready(phba, HS_MBRDY); 1327 lpfc_unblock_mgmt_io(phba); 1328 phba->link_state = LPFC_HBA_ERROR; 1329 return; 1330 } 1331 1332 /** 1333 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1334 * @phba: pointer to lpfc hba data structure. 1335 * 1336 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1337 * other than Port Error 6 has been detected. 1338 **/ 1339 void 1340 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1341 { 1342 spin_lock_irq(&phba->hbalock); 1343 phba->link_state = LPFC_HBA_ERROR; 1344 spin_unlock_irq(&phba->hbalock); 1345 1346 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1347 lpfc_offline(phba); 1348 lpfc_hba_down_post(phba); 1349 lpfc_unblock_mgmt_io(phba); 1350 } 1351 1352 /** 1353 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1354 * @phba: pointer to lpfc hba data structure. 1355 * 1356 * This routine is invoked to handle the deferred HBA hardware error 1357 * conditions. This type of error is indicated by HBA by setting ER1 1358 * and another ER bit in the host status register. The driver will 1359 * wait until the ER1 bit clears before handling the error condition. 1360 **/ 1361 static void 1362 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1363 { 1364 uint32_t old_host_status = phba->work_hs; 1365 struct lpfc_sli *psli = &phba->sli; 1366 1367 /* If the pci channel is offline, ignore possible errors, 1368 * since we cannot communicate with the pci card anyway. 1369 */ 1370 if (pci_channel_offline(phba->pcidev)) { 1371 spin_lock_irq(&phba->hbalock); 1372 phba->hba_flag &= ~DEFER_ERATT; 1373 spin_unlock_irq(&phba->hbalock); 1374 return; 1375 } 1376 1377 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1378 "0479 Deferred Adapter Hardware Error " 1379 "Data: x%x x%x x%x\n", 1380 phba->work_hs, 1381 phba->work_status[0], phba->work_status[1]); 1382 1383 spin_lock_irq(&phba->hbalock); 1384 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1385 spin_unlock_irq(&phba->hbalock); 1386 1387 1388 /* 1389 * Firmware stops when it triggred erratt. That could cause the I/Os 1390 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1391 * SCSI layer retry it after re-establishing link. 1392 */ 1393 lpfc_sli_abort_fcp_rings(phba); 1394 1395 /* 1396 * There was a firmware error. Take the hba offline and then 1397 * attempt to restart it. 1398 */ 1399 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 1400 lpfc_offline(phba); 1401 1402 /* Wait for the ER1 bit to clear.*/ 1403 while (phba->work_hs & HS_FFER1) { 1404 msleep(100); 1405 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { 1406 phba->work_hs = UNPLUG_ERR ; 1407 break; 1408 } 1409 /* If driver is unloading let the worker thread continue */ 1410 if (phba->pport->load_flag & FC_UNLOADING) { 1411 phba->work_hs = 0; 1412 break; 1413 } 1414 } 1415 1416 /* 1417 * This is to ptrotect against a race condition in which 1418 * first write to the host attention register clear the 1419 * host status register. 1420 */ 1421 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1422 phba->work_hs = old_host_status & ~HS_FFER1; 1423 1424 spin_lock_irq(&phba->hbalock); 1425 phba->hba_flag &= ~DEFER_ERATT; 1426 spin_unlock_irq(&phba->hbalock); 1427 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1428 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1429 } 1430 1431 static void 1432 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1433 { 1434 struct lpfc_board_event_header board_event; 1435 struct Scsi_Host *shost; 1436 1437 board_event.event_type = FC_REG_BOARD_EVENT; 1438 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1439 shost = lpfc_shost_from_vport(phba->pport); 1440 fc_host_post_vendor_event(shost, fc_get_event_number(), 1441 sizeof(board_event), 1442 (char *) &board_event, 1443 LPFC_NL_VENDOR_ID); 1444 } 1445 1446 /** 1447 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1448 * @phba: pointer to lpfc hba data structure. 1449 * 1450 * This routine is invoked to handle the following HBA hardware error 1451 * conditions: 1452 * 1 - HBA error attention interrupt 1453 * 2 - DMA ring index out of range 1454 * 3 - Mailbox command came back as unknown 1455 **/ 1456 static void 1457 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1458 { 1459 struct lpfc_vport *vport = phba->pport; 1460 struct lpfc_sli *psli = &phba->sli; 1461 uint32_t event_data; 1462 unsigned long temperature; 1463 struct temp_event temp_event_data; 1464 struct Scsi_Host *shost; 1465 1466 /* If the pci channel is offline, ignore possible errors, 1467 * since we cannot communicate with the pci card anyway. 1468 */ 1469 if (pci_channel_offline(phba->pcidev)) { 1470 spin_lock_irq(&phba->hbalock); 1471 phba->hba_flag &= ~DEFER_ERATT; 1472 spin_unlock_irq(&phba->hbalock); 1473 return; 1474 } 1475 1476 /* If resets are disabled then leave the HBA alone and return */ 1477 if (!phba->cfg_enable_hba_reset) 1478 return; 1479 1480 /* Send an internal error event to mgmt application */ 1481 lpfc_board_errevt_to_mgmt(phba); 1482 1483 if (phba->hba_flag & DEFER_ERATT) 1484 lpfc_handle_deferred_eratt(phba); 1485 1486 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1487 if (phba->work_hs & HS_FFER6) 1488 /* Re-establishing Link */ 1489 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1490 "1301 Re-establishing Link " 1491 "Data: x%x x%x x%x\n", 1492 phba->work_hs, phba->work_status[0], 1493 phba->work_status[1]); 1494 if (phba->work_hs & HS_FFER8) 1495 /* Device Zeroization */ 1496 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1497 "2861 Host Authentication device " 1498 "zeroization Data:x%x x%x x%x\n", 1499 phba->work_hs, phba->work_status[0], 1500 phba->work_status[1]); 1501 1502 spin_lock_irq(&phba->hbalock); 1503 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1504 spin_unlock_irq(&phba->hbalock); 1505 1506 /* 1507 * Firmware stops when it triggled erratt with HS_FFER6. 1508 * That could cause the I/Os dropped by the firmware. 1509 * Error iocb (I/O) on txcmplq and let the SCSI layer 1510 * retry it after re-establishing link. 1511 */ 1512 lpfc_sli_abort_fcp_rings(phba); 1513 1514 /* 1515 * There was a firmware error. Take the hba offline and then 1516 * attempt to restart it. 1517 */ 1518 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1519 lpfc_offline(phba); 1520 lpfc_sli_brdrestart(phba); 1521 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1522 lpfc_unblock_mgmt_io(phba); 1523 return; 1524 } 1525 lpfc_unblock_mgmt_io(phba); 1526 } else if (phba->work_hs & HS_CRIT_TEMP) { 1527 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1528 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1529 temp_event_data.event_code = LPFC_CRIT_TEMP; 1530 temp_event_data.data = (uint32_t)temperature; 1531 1532 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1533 "0406 Adapter maximum temperature exceeded " 1534 "(%ld), taking this port offline " 1535 "Data: x%x x%x x%x\n", 1536 temperature, phba->work_hs, 1537 phba->work_status[0], phba->work_status[1]); 1538 1539 shost = lpfc_shost_from_vport(phba->pport); 1540 fc_host_post_vendor_event(shost, fc_get_event_number(), 1541 sizeof(temp_event_data), 1542 (char *) &temp_event_data, 1543 SCSI_NL_VID_TYPE_PCI 1544 | PCI_VENDOR_ID_EMULEX); 1545 1546 spin_lock_irq(&phba->hbalock); 1547 phba->over_temp_state = HBA_OVER_TEMP; 1548 spin_unlock_irq(&phba->hbalock); 1549 lpfc_offline_eratt(phba); 1550 1551 } else { 1552 /* The if clause above forces this code path when the status 1553 * failure is a value other than FFER6. Do not call the offline 1554 * twice. This is the adapter hardware error path. 1555 */ 1556 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1557 "0457 Adapter Hardware Error " 1558 "Data: x%x x%x x%x\n", 1559 phba->work_hs, 1560 phba->work_status[0], phba->work_status[1]); 1561 1562 event_data = FC_REG_DUMP_EVENT; 1563 shost = lpfc_shost_from_vport(vport); 1564 fc_host_post_vendor_event(shost, fc_get_event_number(), 1565 sizeof(event_data), (char *) &event_data, 1566 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1567 1568 lpfc_offline_eratt(phba); 1569 } 1570 return; 1571 } 1572 1573 /** 1574 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg 1575 * @phba: pointer to lpfc hba data structure. 1576 * @mbx_action: flag for mailbox shutdown action. 1577 * 1578 * This routine is invoked to perform an SLI4 port PCI function reset in 1579 * response to port status register polling attention. It waits for port 1580 * status register (ERR, RDY, RN) bits before proceeding with function reset. 1581 * During this process, interrupt vectors are freed and later requested 1582 * for handling possible port resource change. 1583 **/ 1584 static int 1585 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, 1586 bool en_rn_msg) 1587 { 1588 int rc; 1589 uint32_t intr_mode; 1590 1591 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1592 LPFC_SLI_INTF_IF_TYPE_2) { 1593 /* 1594 * On error status condition, driver need to wait for port 1595 * ready before performing reset. 1596 */ 1597 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1598 if (rc) 1599 return rc; 1600 } 1601 1602 /* need reset: attempt for port recovery */ 1603 if (en_rn_msg) 1604 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1605 "2887 Reset Needed: Attempting Port " 1606 "Recovery...\n"); 1607 lpfc_offline_prep(phba, mbx_action); 1608 lpfc_offline(phba); 1609 /* release interrupt for possible resource change */ 1610 lpfc_sli4_disable_intr(phba); 1611 lpfc_sli_brdrestart(phba); 1612 /* request and enable interrupt */ 1613 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 1614 if (intr_mode == LPFC_INTR_ERROR) { 1615 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1616 "3175 Failed to enable interrupt\n"); 1617 return -EIO; 1618 } 1619 phba->intr_mode = intr_mode; 1620 rc = lpfc_online(phba); 1621 if (rc == 0) 1622 lpfc_unblock_mgmt_io(phba); 1623 1624 return rc; 1625 } 1626 1627 /** 1628 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1629 * @phba: pointer to lpfc hba data structure. 1630 * 1631 * This routine is invoked to handle the SLI4 HBA hardware error attention 1632 * conditions. 1633 **/ 1634 static void 1635 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1636 { 1637 struct lpfc_vport *vport = phba->pport; 1638 uint32_t event_data; 1639 struct Scsi_Host *shost; 1640 uint32_t if_type; 1641 struct lpfc_register portstat_reg = {0}; 1642 uint32_t reg_err1, reg_err2; 1643 uint32_t uerrlo_reg, uemasklo_reg; 1644 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2; 1645 bool en_rn_msg = true; 1646 struct temp_event temp_event_data; 1647 struct lpfc_register portsmphr_reg; 1648 int rc, i; 1649 1650 /* If the pci channel is offline, ignore possible errors, since 1651 * we cannot communicate with the pci card anyway. 1652 */ 1653 if (pci_channel_offline(phba->pcidev)) 1654 return; 1655 1656 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 1657 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1658 switch (if_type) { 1659 case LPFC_SLI_INTF_IF_TYPE_0: 1660 pci_rd_rc1 = lpfc_readl( 1661 phba->sli4_hba.u.if_type0.UERRLOregaddr, 1662 &uerrlo_reg); 1663 pci_rd_rc2 = lpfc_readl( 1664 phba->sli4_hba.u.if_type0.UEMASKLOregaddr, 1665 &uemasklo_reg); 1666 /* consider PCI bus read error as pci_channel_offline */ 1667 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) 1668 return; 1669 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { 1670 lpfc_sli4_offline_eratt(phba); 1671 return; 1672 } 1673 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1674 "7623 Checking UE recoverable"); 1675 1676 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { 1677 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1678 &portsmphr_reg.word0)) 1679 continue; 1680 1681 smphr_port_status = bf_get(lpfc_port_smphr_port_status, 1682 &portsmphr_reg); 1683 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1684 LPFC_PORT_SEM_UE_RECOVERABLE) 1685 break; 1686 /*Sleep for 1Sec, before checking SEMAPHORE */ 1687 msleep(1000); 1688 } 1689 1690 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1691 "4827 smphr_port_status x%x : Waited %dSec", 1692 smphr_port_status, i); 1693 1694 /* Recoverable UE, reset the HBA device */ 1695 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1696 LPFC_PORT_SEM_UE_RECOVERABLE) { 1697 for (i = 0; i < 20; i++) { 1698 msleep(1000); 1699 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1700 &portsmphr_reg.word0) && 1701 (LPFC_POST_STAGE_PORT_READY == 1702 bf_get(lpfc_port_smphr_port_status, 1703 &portsmphr_reg))) { 1704 rc = lpfc_sli4_port_sta_fn_reset(phba, 1705 LPFC_MBX_NO_WAIT, en_rn_msg); 1706 if (rc == 0) 1707 return; 1708 lpfc_printf_log(phba, 1709 KERN_ERR, LOG_INIT, 1710 "4215 Failed to recover UE"); 1711 break; 1712 } 1713 } 1714 } 1715 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1716 "7624 Firmware not ready: Failing UE recovery," 1717 " waited %dSec", i); 1718 lpfc_sli4_offline_eratt(phba); 1719 break; 1720 1721 case LPFC_SLI_INTF_IF_TYPE_2: 1722 pci_rd_rc1 = lpfc_readl( 1723 phba->sli4_hba.u.if_type2.STATUSregaddr, 1724 &portstat_reg.word0); 1725 /* consider PCI bus read error as pci_channel_offline */ 1726 if (pci_rd_rc1 == -EIO) { 1727 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1728 "3151 PCI bus read access failure: x%x\n", 1729 readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); 1730 return; 1731 } 1732 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 1733 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 1734 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1735 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1736 "2889 Port Overtemperature event, " 1737 "taking port offline Data: x%x x%x\n", 1738 reg_err1, reg_err2); 1739 1740 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 1741 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1742 temp_event_data.event_code = LPFC_CRIT_TEMP; 1743 temp_event_data.data = 0xFFFFFFFF; 1744 1745 shost = lpfc_shost_from_vport(phba->pport); 1746 fc_host_post_vendor_event(shost, fc_get_event_number(), 1747 sizeof(temp_event_data), 1748 (char *)&temp_event_data, 1749 SCSI_NL_VID_TYPE_PCI 1750 | PCI_VENDOR_ID_EMULEX); 1751 1752 spin_lock_irq(&phba->hbalock); 1753 phba->over_temp_state = HBA_OVER_TEMP; 1754 spin_unlock_irq(&phba->hbalock); 1755 lpfc_sli4_offline_eratt(phba); 1756 return; 1757 } 1758 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1759 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { 1760 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1761 "3143 Port Down: Firmware Update " 1762 "Detected\n"); 1763 en_rn_msg = false; 1764 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1765 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1766 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1767 "3144 Port Down: Debug Dump\n"); 1768 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1769 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) 1770 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1771 "3145 Port Down: Provisioning\n"); 1772 1773 /* If resets are disabled then leave the HBA alone and return */ 1774 if (!phba->cfg_enable_hba_reset) 1775 return; 1776 1777 /* Check port status register for function reset */ 1778 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, 1779 en_rn_msg); 1780 if (rc == 0) { 1781 /* don't report event on forced debug dump */ 1782 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1783 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1784 return; 1785 else 1786 break; 1787 } 1788 /* fall through for not able to recover */ 1789 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1790 "3152 Unrecoverable error, bring the port " 1791 "offline\n"); 1792 lpfc_sli4_offline_eratt(phba); 1793 break; 1794 case LPFC_SLI_INTF_IF_TYPE_1: 1795 default: 1796 break; 1797 } 1798 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1799 "3123 Report dump event to upper layer\n"); 1800 /* Send an internal error event to mgmt application */ 1801 lpfc_board_errevt_to_mgmt(phba); 1802 1803 event_data = FC_REG_DUMP_EVENT; 1804 shost = lpfc_shost_from_vport(vport); 1805 fc_host_post_vendor_event(shost, fc_get_event_number(), 1806 sizeof(event_data), (char *) &event_data, 1807 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1808 } 1809 1810 /** 1811 * lpfc_handle_eratt - Wrapper func for handling hba error attention 1812 * @phba: pointer to lpfc HBA data structure. 1813 * 1814 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 1815 * routine from the API jump table function pointer from the lpfc_hba struct. 1816 * 1817 * Return codes 1818 * 0 - success. 1819 * Any other value - error. 1820 **/ 1821 void 1822 lpfc_handle_eratt(struct lpfc_hba *phba) 1823 { 1824 (*phba->lpfc_handle_eratt)(phba); 1825 } 1826 1827 /** 1828 * lpfc_handle_latt - The HBA link event handler 1829 * @phba: pointer to lpfc hba data structure. 1830 * 1831 * This routine is invoked from the worker thread to handle a HBA host 1832 * attention link event. 1833 **/ 1834 void 1835 lpfc_handle_latt(struct lpfc_hba *phba) 1836 { 1837 struct lpfc_vport *vport = phba->pport; 1838 struct lpfc_sli *psli = &phba->sli; 1839 LPFC_MBOXQ_t *pmb; 1840 volatile uint32_t control; 1841 struct lpfc_dmabuf *mp; 1842 int rc = 0; 1843 1844 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1845 if (!pmb) { 1846 rc = 1; 1847 goto lpfc_handle_latt_err_exit; 1848 } 1849 1850 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1851 if (!mp) { 1852 rc = 2; 1853 goto lpfc_handle_latt_free_pmb; 1854 } 1855 1856 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 1857 if (!mp->virt) { 1858 rc = 3; 1859 goto lpfc_handle_latt_free_mp; 1860 } 1861 1862 /* Cleanup any outstanding ELS commands */ 1863 lpfc_els_flush_all_cmd(phba); 1864 1865 psli->slistat.link_event++; 1866 lpfc_read_topology(phba, pmb, mp); 1867 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 1868 pmb->vport = vport; 1869 /* Block ELS IOCBs until we have processed this mbox command */ 1870 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 1871 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 1872 if (rc == MBX_NOT_FINISHED) { 1873 rc = 4; 1874 goto lpfc_handle_latt_free_mbuf; 1875 } 1876 1877 /* Clear Link Attention in HA REG */ 1878 spin_lock_irq(&phba->hbalock); 1879 writel(HA_LATT, phba->HAregaddr); 1880 readl(phba->HAregaddr); /* flush */ 1881 spin_unlock_irq(&phba->hbalock); 1882 1883 return; 1884 1885 lpfc_handle_latt_free_mbuf: 1886 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1887 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1888 lpfc_handle_latt_free_mp: 1889 kfree(mp); 1890 lpfc_handle_latt_free_pmb: 1891 mempool_free(pmb, phba->mbox_mem_pool); 1892 lpfc_handle_latt_err_exit: 1893 /* Enable Link attention interrupts */ 1894 spin_lock_irq(&phba->hbalock); 1895 psli->sli_flag |= LPFC_PROCESS_LA; 1896 control = readl(phba->HCregaddr); 1897 control |= HC_LAINT_ENA; 1898 writel(control, phba->HCregaddr); 1899 readl(phba->HCregaddr); /* flush */ 1900 1901 /* Clear Link Attention in HA REG */ 1902 writel(HA_LATT, phba->HAregaddr); 1903 readl(phba->HAregaddr); /* flush */ 1904 spin_unlock_irq(&phba->hbalock); 1905 lpfc_linkdown(phba); 1906 phba->link_state = LPFC_HBA_ERROR; 1907 1908 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1909 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 1910 1911 return; 1912 } 1913 1914 /** 1915 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 1916 * @phba: pointer to lpfc hba data structure. 1917 * @vpd: pointer to the vital product data. 1918 * @len: length of the vital product data in bytes. 1919 * 1920 * This routine parses the Vital Product Data (VPD). The VPD is treated as 1921 * an array of characters. In this routine, the ModelName, ProgramType, and 1922 * ModelDesc, etc. fields of the phba data structure will be populated. 1923 * 1924 * Return codes 1925 * 0 - pointer to the VPD passed in is NULL 1926 * 1 - success 1927 **/ 1928 int 1929 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1930 { 1931 uint8_t lenlo, lenhi; 1932 int Length; 1933 int i, j; 1934 int finished = 0; 1935 int index = 0; 1936 1937 if (!vpd) 1938 return 0; 1939 1940 /* Vital Product */ 1941 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1942 "0455 Vital Product Data: x%x x%x x%x x%x\n", 1943 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 1944 (uint32_t) vpd[3]); 1945 while (!finished && (index < (len - 4))) { 1946 switch (vpd[index]) { 1947 case 0x82: 1948 case 0x91: 1949 index += 1; 1950 lenlo = vpd[index]; 1951 index += 1; 1952 lenhi = vpd[index]; 1953 index += 1; 1954 i = ((((unsigned short)lenhi) << 8) + lenlo); 1955 index += i; 1956 break; 1957 case 0x90: 1958 index += 1; 1959 lenlo = vpd[index]; 1960 index += 1; 1961 lenhi = vpd[index]; 1962 index += 1; 1963 Length = ((((unsigned short)lenhi) << 8) + lenlo); 1964 if (Length > len - index) 1965 Length = len - index; 1966 while (Length > 0) { 1967 /* Look for Serial Number */ 1968 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 1969 index += 2; 1970 i = vpd[index]; 1971 index += 1; 1972 j = 0; 1973 Length -= (3+i); 1974 while(i--) { 1975 phba->SerialNumber[j++] = vpd[index++]; 1976 if (j == 31) 1977 break; 1978 } 1979 phba->SerialNumber[j] = 0; 1980 continue; 1981 } 1982 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 1983 phba->vpd_flag |= VPD_MODEL_DESC; 1984 index += 2; 1985 i = vpd[index]; 1986 index += 1; 1987 j = 0; 1988 Length -= (3+i); 1989 while(i--) { 1990 phba->ModelDesc[j++] = vpd[index++]; 1991 if (j == 255) 1992 break; 1993 } 1994 phba->ModelDesc[j] = 0; 1995 continue; 1996 } 1997 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 1998 phba->vpd_flag |= VPD_MODEL_NAME; 1999 index += 2; 2000 i = vpd[index]; 2001 index += 1; 2002 j = 0; 2003 Length -= (3+i); 2004 while(i--) { 2005 phba->ModelName[j++] = vpd[index++]; 2006 if (j == 79) 2007 break; 2008 } 2009 phba->ModelName[j] = 0; 2010 continue; 2011 } 2012 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 2013 phba->vpd_flag |= VPD_PROGRAM_TYPE; 2014 index += 2; 2015 i = vpd[index]; 2016 index += 1; 2017 j = 0; 2018 Length -= (3+i); 2019 while(i--) { 2020 phba->ProgramType[j++] = vpd[index++]; 2021 if (j == 255) 2022 break; 2023 } 2024 phba->ProgramType[j] = 0; 2025 continue; 2026 } 2027 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 2028 phba->vpd_flag |= VPD_PORT; 2029 index += 2; 2030 i = vpd[index]; 2031 index += 1; 2032 j = 0; 2033 Length -= (3+i); 2034 while(i--) { 2035 if ((phba->sli_rev == LPFC_SLI_REV4) && 2036 (phba->sli4_hba.pport_name_sta == 2037 LPFC_SLI4_PPNAME_GET)) { 2038 j++; 2039 index++; 2040 } else 2041 phba->Port[j++] = vpd[index++]; 2042 if (j == 19) 2043 break; 2044 } 2045 if ((phba->sli_rev != LPFC_SLI_REV4) || 2046 (phba->sli4_hba.pport_name_sta == 2047 LPFC_SLI4_PPNAME_NON)) 2048 phba->Port[j] = 0; 2049 continue; 2050 } 2051 else { 2052 index += 2; 2053 i = vpd[index]; 2054 index += 1; 2055 index += i; 2056 Length -= (3 + i); 2057 } 2058 } 2059 finished = 0; 2060 break; 2061 case 0x78: 2062 finished = 1; 2063 break; 2064 default: 2065 index ++; 2066 break; 2067 } 2068 } 2069 2070 return(1); 2071 } 2072 2073 /** 2074 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 2075 * @phba: pointer to lpfc hba data structure. 2076 * @mdp: pointer to the data structure to hold the derived model name. 2077 * @descp: pointer to the data structure to hold the derived description. 2078 * 2079 * This routine retrieves HBA's description based on its registered PCI device 2080 * ID. The @descp passed into this function points to an array of 256 chars. It 2081 * shall be returned with the model name, maximum speed, and the host bus type. 2082 * The @mdp passed into this function points to an array of 80 chars. When the 2083 * function returns, the @mdp will be filled with the model name. 2084 **/ 2085 static void 2086 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 2087 { 2088 lpfc_vpd_t *vp; 2089 uint16_t dev_id = phba->pcidev->device; 2090 int max_speed; 2091 int GE = 0; 2092 int oneConnect = 0; /* default is not a oneConnect */ 2093 struct { 2094 char *name; 2095 char *bus; 2096 char *function; 2097 } m = {"<Unknown>", "", ""}; 2098 2099 if (mdp && mdp[0] != '\0' 2100 && descp && descp[0] != '\0') 2101 return; 2102 2103 if (phba->lmt & LMT_32Gb) 2104 max_speed = 32; 2105 else if (phba->lmt & LMT_16Gb) 2106 max_speed = 16; 2107 else if (phba->lmt & LMT_10Gb) 2108 max_speed = 10; 2109 else if (phba->lmt & LMT_8Gb) 2110 max_speed = 8; 2111 else if (phba->lmt & LMT_4Gb) 2112 max_speed = 4; 2113 else if (phba->lmt & LMT_2Gb) 2114 max_speed = 2; 2115 else if (phba->lmt & LMT_1Gb) 2116 max_speed = 1; 2117 else 2118 max_speed = 0; 2119 2120 vp = &phba->vpd; 2121 2122 switch (dev_id) { 2123 case PCI_DEVICE_ID_FIREFLY: 2124 m = (typeof(m)){"LP6000", "PCI", 2125 "Obsolete, Unsupported Fibre Channel Adapter"}; 2126 break; 2127 case PCI_DEVICE_ID_SUPERFLY: 2128 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 2129 m = (typeof(m)){"LP7000", "PCI", ""}; 2130 else 2131 m = (typeof(m)){"LP7000E", "PCI", ""}; 2132 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2133 break; 2134 case PCI_DEVICE_ID_DRAGONFLY: 2135 m = (typeof(m)){"LP8000", "PCI", 2136 "Obsolete, Unsupported Fibre Channel Adapter"}; 2137 break; 2138 case PCI_DEVICE_ID_CENTAUR: 2139 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 2140 m = (typeof(m)){"LP9002", "PCI", ""}; 2141 else 2142 m = (typeof(m)){"LP9000", "PCI", ""}; 2143 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2144 break; 2145 case PCI_DEVICE_ID_RFLY: 2146 m = (typeof(m)){"LP952", "PCI", 2147 "Obsolete, Unsupported Fibre Channel Adapter"}; 2148 break; 2149 case PCI_DEVICE_ID_PEGASUS: 2150 m = (typeof(m)){"LP9802", "PCI-X", 2151 "Obsolete, Unsupported Fibre Channel Adapter"}; 2152 break; 2153 case PCI_DEVICE_ID_THOR: 2154 m = (typeof(m)){"LP10000", "PCI-X", 2155 "Obsolete, Unsupported Fibre Channel Adapter"}; 2156 break; 2157 case PCI_DEVICE_ID_VIPER: 2158 m = (typeof(m)){"LPX1000", "PCI-X", 2159 "Obsolete, Unsupported Fibre Channel Adapter"}; 2160 break; 2161 case PCI_DEVICE_ID_PFLY: 2162 m = (typeof(m)){"LP982", "PCI-X", 2163 "Obsolete, Unsupported Fibre Channel Adapter"}; 2164 break; 2165 case PCI_DEVICE_ID_TFLY: 2166 m = (typeof(m)){"LP1050", "PCI-X", 2167 "Obsolete, Unsupported Fibre Channel Adapter"}; 2168 break; 2169 case PCI_DEVICE_ID_HELIOS: 2170 m = (typeof(m)){"LP11000", "PCI-X2", 2171 "Obsolete, Unsupported Fibre Channel Adapter"}; 2172 break; 2173 case PCI_DEVICE_ID_HELIOS_SCSP: 2174 m = (typeof(m)){"LP11000-SP", "PCI-X2", 2175 "Obsolete, Unsupported Fibre Channel Adapter"}; 2176 break; 2177 case PCI_DEVICE_ID_HELIOS_DCSP: 2178 m = (typeof(m)){"LP11002-SP", "PCI-X2", 2179 "Obsolete, Unsupported Fibre Channel Adapter"}; 2180 break; 2181 case PCI_DEVICE_ID_NEPTUNE: 2182 m = (typeof(m)){"LPe1000", "PCIe", 2183 "Obsolete, Unsupported Fibre Channel Adapter"}; 2184 break; 2185 case PCI_DEVICE_ID_NEPTUNE_SCSP: 2186 m = (typeof(m)){"LPe1000-SP", "PCIe", 2187 "Obsolete, Unsupported Fibre Channel Adapter"}; 2188 break; 2189 case PCI_DEVICE_ID_NEPTUNE_DCSP: 2190 m = (typeof(m)){"LPe1002-SP", "PCIe", 2191 "Obsolete, Unsupported Fibre Channel Adapter"}; 2192 break; 2193 case PCI_DEVICE_ID_BMID: 2194 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 2195 break; 2196 case PCI_DEVICE_ID_BSMB: 2197 m = (typeof(m)){"LP111", "PCI-X2", 2198 "Obsolete, Unsupported Fibre Channel Adapter"}; 2199 break; 2200 case PCI_DEVICE_ID_ZEPHYR: 2201 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2202 break; 2203 case PCI_DEVICE_ID_ZEPHYR_SCSP: 2204 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2205 break; 2206 case PCI_DEVICE_ID_ZEPHYR_DCSP: 2207 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 2208 GE = 1; 2209 break; 2210 case PCI_DEVICE_ID_ZMID: 2211 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 2212 break; 2213 case PCI_DEVICE_ID_ZSMB: 2214 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 2215 break; 2216 case PCI_DEVICE_ID_LP101: 2217 m = (typeof(m)){"LP101", "PCI-X", 2218 "Obsolete, Unsupported Fibre Channel Adapter"}; 2219 break; 2220 case PCI_DEVICE_ID_LP10000S: 2221 m = (typeof(m)){"LP10000-S", "PCI", 2222 "Obsolete, Unsupported Fibre Channel Adapter"}; 2223 break; 2224 case PCI_DEVICE_ID_LP11000S: 2225 m = (typeof(m)){"LP11000-S", "PCI-X2", 2226 "Obsolete, Unsupported Fibre Channel Adapter"}; 2227 break; 2228 case PCI_DEVICE_ID_LPE11000S: 2229 m = (typeof(m)){"LPe11000-S", "PCIe", 2230 "Obsolete, Unsupported Fibre Channel Adapter"}; 2231 break; 2232 case PCI_DEVICE_ID_SAT: 2233 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 2234 break; 2235 case PCI_DEVICE_ID_SAT_MID: 2236 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 2237 break; 2238 case PCI_DEVICE_ID_SAT_SMB: 2239 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 2240 break; 2241 case PCI_DEVICE_ID_SAT_DCSP: 2242 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 2243 break; 2244 case PCI_DEVICE_ID_SAT_SCSP: 2245 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 2246 break; 2247 case PCI_DEVICE_ID_SAT_S: 2248 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 2249 break; 2250 case PCI_DEVICE_ID_HORNET: 2251 m = (typeof(m)){"LP21000", "PCIe", 2252 "Obsolete, Unsupported FCoE Adapter"}; 2253 GE = 1; 2254 break; 2255 case PCI_DEVICE_ID_PROTEUS_VF: 2256 m = (typeof(m)){"LPev12000", "PCIe IOV", 2257 "Obsolete, Unsupported Fibre Channel Adapter"}; 2258 break; 2259 case PCI_DEVICE_ID_PROTEUS_PF: 2260 m = (typeof(m)){"LPev12000", "PCIe IOV", 2261 "Obsolete, Unsupported Fibre Channel Adapter"}; 2262 break; 2263 case PCI_DEVICE_ID_PROTEUS_S: 2264 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 2265 "Obsolete, Unsupported Fibre Channel Adapter"}; 2266 break; 2267 case PCI_DEVICE_ID_TIGERSHARK: 2268 oneConnect = 1; 2269 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 2270 break; 2271 case PCI_DEVICE_ID_TOMCAT: 2272 oneConnect = 1; 2273 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 2274 break; 2275 case PCI_DEVICE_ID_FALCON: 2276 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 2277 "EmulexSecure Fibre"}; 2278 break; 2279 case PCI_DEVICE_ID_BALIUS: 2280 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 2281 "Obsolete, Unsupported Fibre Channel Adapter"}; 2282 break; 2283 case PCI_DEVICE_ID_LANCER_FC: 2284 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 2285 break; 2286 case PCI_DEVICE_ID_LANCER_FC_VF: 2287 m = (typeof(m)){"LPe16000", "PCIe", 2288 "Obsolete, Unsupported Fibre Channel Adapter"}; 2289 break; 2290 case PCI_DEVICE_ID_LANCER_FCOE: 2291 oneConnect = 1; 2292 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2293 break; 2294 case PCI_DEVICE_ID_LANCER_FCOE_VF: 2295 oneConnect = 1; 2296 m = (typeof(m)){"OCe15100", "PCIe", 2297 "Obsolete, Unsupported FCoE"}; 2298 break; 2299 case PCI_DEVICE_ID_LANCER_G6_FC: 2300 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"}; 2301 break; 2302 case PCI_DEVICE_ID_SKYHAWK: 2303 case PCI_DEVICE_ID_SKYHAWK_VF: 2304 oneConnect = 1; 2305 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; 2306 break; 2307 default: 2308 m = (typeof(m)){"Unknown", "", ""}; 2309 break; 2310 } 2311 2312 if (mdp && mdp[0] == '\0') 2313 snprintf(mdp, 79,"%s", m.name); 2314 /* 2315 * oneConnect hba requires special processing, they are all initiators 2316 * and we put the port number on the end 2317 */ 2318 if (descp && descp[0] == '\0') { 2319 if (oneConnect) 2320 snprintf(descp, 255, 2321 "Emulex OneConnect %s, %s Initiator %s", 2322 m.name, m.function, 2323 phba->Port); 2324 else if (max_speed == 0) 2325 snprintf(descp, 255, 2326 "Emulex %s %s %s", 2327 m.name, m.bus, m.function); 2328 else 2329 snprintf(descp, 255, 2330 "Emulex %s %d%s %s %s", 2331 m.name, max_speed, (GE) ? "GE" : "Gb", 2332 m.bus, m.function); 2333 } 2334 } 2335 2336 /** 2337 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 2338 * @phba: pointer to lpfc hba data structure. 2339 * @pring: pointer to a IOCB ring. 2340 * @cnt: the number of IOCBs to be posted to the IOCB ring. 2341 * 2342 * This routine posts a given number of IOCBs with the associated DMA buffer 2343 * descriptors specified by the cnt argument to the given IOCB ring. 2344 * 2345 * Return codes 2346 * The number of IOCBs NOT able to be posted to the IOCB ring. 2347 **/ 2348 int 2349 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 2350 { 2351 IOCB_t *icmd; 2352 struct lpfc_iocbq *iocb; 2353 struct lpfc_dmabuf *mp1, *mp2; 2354 2355 cnt += pring->missbufcnt; 2356 2357 /* While there are buffers to post */ 2358 while (cnt > 0) { 2359 /* Allocate buffer for command iocb */ 2360 iocb = lpfc_sli_get_iocbq(phba); 2361 if (iocb == NULL) { 2362 pring->missbufcnt = cnt; 2363 return cnt; 2364 } 2365 icmd = &iocb->iocb; 2366 2367 /* 2 buffers can be posted per command */ 2368 /* Allocate buffer to post */ 2369 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2370 if (mp1) 2371 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 2372 if (!mp1 || !mp1->virt) { 2373 kfree(mp1); 2374 lpfc_sli_release_iocbq(phba, iocb); 2375 pring->missbufcnt = cnt; 2376 return cnt; 2377 } 2378 2379 INIT_LIST_HEAD(&mp1->list); 2380 /* Allocate buffer to post */ 2381 if (cnt > 1) { 2382 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2383 if (mp2) 2384 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 2385 &mp2->phys); 2386 if (!mp2 || !mp2->virt) { 2387 kfree(mp2); 2388 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2389 kfree(mp1); 2390 lpfc_sli_release_iocbq(phba, iocb); 2391 pring->missbufcnt = cnt; 2392 return cnt; 2393 } 2394 2395 INIT_LIST_HEAD(&mp2->list); 2396 } else { 2397 mp2 = NULL; 2398 } 2399 2400 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2401 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2402 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2403 icmd->ulpBdeCount = 1; 2404 cnt--; 2405 if (mp2) { 2406 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2407 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2408 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2409 cnt--; 2410 icmd->ulpBdeCount = 2; 2411 } 2412 2413 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2414 icmd->ulpLe = 1; 2415 2416 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2417 IOCB_ERROR) { 2418 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2419 kfree(mp1); 2420 cnt++; 2421 if (mp2) { 2422 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2423 kfree(mp2); 2424 cnt++; 2425 } 2426 lpfc_sli_release_iocbq(phba, iocb); 2427 pring->missbufcnt = cnt; 2428 return cnt; 2429 } 2430 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2431 if (mp2) 2432 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2433 } 2434 pring->missbufcnt = 0; 2435 return 0; 2436 } 2437 2438 /** 2439 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2440 * @phba: pointer to lpfc hba data structure. 2441 * 2442 * This routine posts initial receive IOCB buffers to the ELS ring. The 2443 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2444 * set to 64 IOCBs. 2445 * 2446 * Return codes 2447 * 0 - success (currently always success) 2448 **/ 2449 static int 2450 lpfc_post_rcv_buf(struct lpfc_hba *phba) 2451 { 2452 struct lpfc_sli *psli = &phba->sli; 2453 2454 /* Ring 0, ELS / CT buffers */ 2455 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2456 /* Ring 2 - FCP no buffers needed */ 2457 2458 return 0; 2459 } 2460 2461 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2462 2463 /** 2464 * lpfc_sha_init - Set up initial array of hash table entries 2465 * @HashResultPointer: pointer to an array as hash table. 2466 * 2467 * This routine sets up the initial values to the array of hash table entries 2468 * for the LC HBAs. 2469 **/ 2470 static void 2471 lpfc_sha_init(uint32_t * HashResultPointer) 2472 { 2473 HashResultPointer[0] = 0x67452301; 2474 HashResultPointer[1] = 0xEFCDAB89; 2475 HashResultPointer[2] = 0x98BADCFE; 2476 HashResultPointer[3] = 0x10325476; 2477 HashResultPointer[4] = 0xC3D2E1F0; 2478 } 2479 2480 /** 2481 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2482 * @HashResultPointer: pointer to an initial/result hash table. 2483 * @HashWorkingPointer: pointer to an working hash table. 2484 * 2485 * This routine iterates an initial hash table pointed by @HashResultPointer 2486 * with the values from the working hash table pointeed by @HashWorkingPointer. 2487 * The results are putting back to the initial hash table, returned through 2488 * the @HashResultPointer as the result hash table. 2489 **/ 2490 static void 2491 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2492 { 2493 int t; 2494 uint32_t TEMP; 2495 uint32_t A, B, C, D, E; 2496 t = 16; 2497 do { 2498 HashWorkingPointer[t] = 2499 S(1, 2500 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2501 8] ^ 2502 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2503 } while (++t <= 79); 2504 t = 0; 2505 A = HashResultPointer[0]; 2506 B = HashResultPointer[1]; 2507 C = HashResultPointer[2]; 2508 D = HashResultPointer[3]; 2509 E = HashResultPointer[4]; 2510 2511 do { 2512 if (t < 20) { 2513 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2514 } else if (t < 40) { 2515 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2516 } else if (t < 60) { 2517 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2518 } else { 2519 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2520 } 2521 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2522 E = D; 2523 D = C; 2524 C = S(30, B); 2525 B = A; 2526 A = TEMP; 2527 } while (++t <= 79); 2528 2529 HashResultPointer[0] += A; 2530 HashResultPointer[1] += B; 2531 HashResultPointer[2] += C; 2532 HashResultPointer[3] += D; 2533 HashResultPointer[4] += E; 2534 2535 } 2536 2537 /** 2538 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2539 * @RandomChallenge: pointer to the entry of host challenge random number array. 2540 * @HashWorking: pointer to the entry of the working hash array. 2541 * 2542 * This routine calculates the working hash array referred by @HashWorking 2543 * from the challenge random numbers associated with the host, referred by 2544 * @RandomChallenge. The result is put into the entry of the working hash 2545 * array and returned by reference through @HashWorking. 2546 **/ 2547 static void 2548 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2549 { 2550 *HashWorking = (*RandomChallenge ^ *HashWorking); 2551 } 2552 2553 /** 2554 * lpfc_hba_init - Perform special handling for LC HBA initialization 2555 * @phba: pointer to lpfc hba data structure. 2556 * @hbainit: pointer to an array of unsigned 32-bit integers. 2557 * 2558 * This routine performs the special handling for LC HBA initialization. 2559 **/ 2560 void 2561 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2562 { 2563 int t; 2564 uint32_t *HashWorking; 2565 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2566 2567 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2568 if (!HashWorking) 2569 return; 2570 2571 HashWorking[0] = HashWorking[78] = *pwwnn++; 2572 HashWorking[1] = HashWorking[79] = *pwwnn; 2573 2574 for (t = 0; t < 7; t++) 2575 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2576 2577 lpfc_sha_init(hbainit); 2578 lpfc_sha_iterate(hbainit, HashWorking); 2579 kfree(HashWorking); 2580 } 2581 2582 /** 2583 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2584 * @vport: pointer to a virtual N_Port data structure. 2585 * 2586 * This routine performs the necessary cleanups before deleting the @vport. 2587 * It invokes the discovery state machine to perform necessary state 2588 * transitions and to release the ndlps associated with the @vport. Note, 2589 * the physical port is treated as @vport 0. 2590 **/ 2591 void 2592 lpfc_cleanup(struct lpfc_vport *vport) 2593 { 2594 struct lpfc_hba *phba = vport->phba; 2595 struct lpfc_nodelist *ndlp, *next_ndlp; 2596 int i = 0; 2597 2598 if (phba->link_state > LPFC_LINK_DOWN) 2599 lpfc_port_link_failure(vport); 2600 2601 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2602 if (!NLP_CHK_NODE_ACT(ndlp)) { 2603 ndlp = lpfc_enable_node(vport, ndlp, 2604 NLP_STE_UNUSED_NODE); 2605 if (!ndlp) 2606 continue; 2607 spin_lock_irq(&phba->ndlp_lock); 2608 NLP_SET_FREE_REQ(ndlp); 2609 spin_unlock_irq(&phba->ndlp_lock); 2610 /* Trigger the release of the ndlp memory */ 2611 lpfc_nlp_put(ndlp); 2612 continue; 2613 } 2614 spin_lock_irq(&phba->ndlp_lock); 2615 if (NLP_CHK_FREE_REQ(ndlp)) { 2616 /* The ndlp should not be in memory free mode already */ 2617 spin_unlock_irq(&phba->ndlp_lock); 2618 continue; 2619 } else 2620 /* Indicate request for freeing ndlp memory */ 2621 NLP_SET_FREE_REQ(ndlp); 2622 spin_unlock_irq(&phba->ndlp_lock); 2623 2624 if (vport->port_type != LPFC_PHYSICAL_PORT && 2625 ndlp->nlp_DID == Fabric_DID) { 2626 /* Just free up ndlp with Fabric_DID for vports */ 2627 lpfc_nlp_put(ndlp); 2628 continue; 2629 } 2630 2631 /* take care of nodes in unused state before the state 2632 * machine taking action. 2633 */ 2634 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 2635 lpfc_nlp_put(ndlp); 2636 continue; 2637 } 2638 2639 if (ndlp->nlp_type & NLP_FABRIC) 2640 lpfc_disc_state_machine(vport, ndlp, NULL, 2641 NLP_EVT_DEVICE_RECOVERY); 2642 2643 lpfc_disc_state_machine(vport, ndlp, NULL, 2644 NLP_EVT_DEVICE_RM); 2645 } 2646 2647 /* At this point, ALL ndlp's should be gone 2648 * because of the previous NLP_EVT_DEVICE_RM. 2649 * Lets wait for this to happen, if needed. 2650 */ 2651 while (!list_empty(&vport->fc_nodes)) { 2652 if (i++ > 3000) { 2653 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2654 "0233 Nodelist not empty\n"); 2655 list_for_each_entry_safe(ndlp, next_ndlp, 2656 &vport->fc_nodes, nlp_listp) { 2657 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2658 LOG_NODE, 2659 "0282 did:x%x ndlp:x%p " 2660 "usgmap:x%x refcnt:%d\n", 2661 ndlp->nlp_DID, (void *)ndlp, 2662 ndlp->nlp_usg_map, 2663 atomic_read( 2664 &ndlp->kref.refcount)); 2665 } 2666 break; 2667 } 2668 2669 /* Wait for any activity on ndlps to settle */ 2670 msleep(10); 2671 } 2672 lpfc_cleanup_vports_rrqs(vport, NULL); 2673 } 2674 2675 /** 2676 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2677 * @vport: pointer to a virtual N_Port data structure. 2678 * 2679 * This routine stops all the timers associated with a @vport. This function 2680 * is invoked before disabling or deleting a @vport. Note that the physical 2681 * port is treated as @vport 0. 2682 **/ 2683 void 2684 lpfc_stop_vport_timers(struct lpfc_vport *vport) 2685 { 2686 del_timer_sync(&vport->els_tmofunc); 2687 del_timer_sync(&vport->delayed_disc_tmo); 2688 lpfc_can_disctmo(vport); 2689 return; 2690 } 2691 2692 /** 2693 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2694 * @phba: pointer to lpfc hba data structure. 2695 * 2696 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2697 * caller of this routine should already hold the host lock. 2698 **/ 2699 void 2700 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2701 { 2702 /* Clear pending FCF rediscovery wait flag */ 2703 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2704 2705 /* Now, try to stop the timer */ 2706 del_timer(&phba->fcf.redisc_wait); 2707 } 2708 2709 /** 2710 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2711 * @phba: pointer to lpfc hba data structure. 2712 * 2713 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2714 * checks whether the FCF rediscovery wait timer is pending with the host 2715 * lock held before proceeding with disabling the timer and clearing the 2716 * wait timer pendig flag. 2717 **/ 2718 void 2719 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2720 { 2721 spin_lock_irq(&phba->hbalock); 2722 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2723 /* FCF rediscovery timer already fired or stopped */ 2724 spin_unlock_irq(&phba->hbalock); 2725 return; 2726 } 2727 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2728 /* Clear failover in progress flags */ 2729 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 2730 spin_unlock_irq(&phba->hbalock); 2731 } 2732 2733 /** 2734 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2735 * @phba: pointer to lpfc hba data structure. 2736 * 2737 * This routine stops all the timers associated with a HBA. This function is 2738 * invoked before either putting a HBA offline or unloading the driver. 2739 **/ 2740 void 2741 lpfc_stop_hba_timers(struct lpfc_hba *phba) 2742 { 2743 lpfc_stop_vport_timers(phba->pport); 2744 del_timer_sync(&phba->sli.mbox_tmo); 2745 del_timer_sync(&phba->fabric_block_timer); 2746 del_timer_sync(&phba->eratt_poll); 2747 del_timer_sync(&phba->hb_tmofunc); 2748 if (phba->sli_rev == LPFC_SLI_REV4) { 2749 del_timer_sync(&phba->rrq_tmr); 2750 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 2751 } 2752 phba->hb_outstanding = 0; 2753 2754 switch (phba->pci_dev_grp) { 2755 case LPFC_PCI_DEV_LP: 2756 /* Stop any LightPulse device specific driver timers */ 2757 del_timer_sync(&phba->fcp_poll_timer); 2758 break; 2759 case LPFC_PCI_DEV_OC: 2760 /* Stop any OneConnect device sepcific driver timers */ 2761 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2762 break; 2763 default: 2764 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2765 "0297 Invalid device group (x%x)\n", 2766 phba->pci_dev_grp); 2767 break; 2768 } 2769 return; 2770 } 2771 2772 /** 2773 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2774 * @phba: pointer to lpfc hba data structure. 2775 * 2776 * This routine marks a HBA's management interface as blocked. Once the HBA's 2777 * management interface is marked as blocked, all the user space access to 2778 * the HBA, whether they are from sysfs interface or libdfc interface will 2779 * all be blocked. The HBA is set to block the management interface when the 2780 * driver prepares the HBA interface for online or offline. 2781 **/ 2782 static void 2783 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) 2784 { 2785 unsigned long iflag; 2786 uint8_t actcmd = MBX_HEARTBEAT; 2787 unsigned long timeout; 2788 2789 spin_lock_irqsave(&phba->hbalock, iflag); 2790 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2791 spin_unlock_irqrestore(&phba->hbalock, iflag); 2792 if (mbx_action == LPFC_MBX_NO_WAIT) 2793 return; 2794 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 2795 spin_lock_irqsave(&phba->hbalock, iflag); 2796 if (phba->sli.mbox_active) { 2797 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 2798 /* Determine how long we might wait for the active mailbox 2799 * command to be gracefully completed by firmware. 2800 */ 2801 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 2802 phba->sli.mbox_active) * 1000) + jiffies; 2803 } 2804 spin_unlock_irqrestore(&phba->hbalock, iflag); 2805 2806 /* Wait for the outstnading mailbox command to complete */ 2807 while (phba->sli.mbox_active) { 2808 /* Check active mailbox complete status every 2ms */ 2809 msleep(2); 2810 if (time_after(jiffies, timeout)) { 2811 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2812 "2813 Mgmt IO is Blocked %x " 2813 "- mbox cmd %x still active\n", 2814 phba->sli.sli_flag, actcmd); 2815 break; 2816 } 2817 } 2818 } 2819 2820 /** 2821 * lpfc_sli4_node_prep - Assign RPIs for active nodes. 2822 * @phba: pointer to lpfc hba data structure. 2823 * 2824 * Allocate RPIs for all active remote nodes. This is needed whenever 2825 * an SLI4 adapter is reset and the driver is not unloading. Its purpose 2826 * is to fixup the temporary rpi assignments. 2827 **/ 2828 void 2829 lpfc_sli4_node_prep(struct lpfc_hba *phba) 2830 { 2831 struct lpfc_nodelist *ndlp, *next_ndlp; 2832 struct lpfc_vport **vports; 2833 int i; 2834 2835 if (phba->sli_rev != LPFC_SLI_REV4) 2836 return; 2837 2838 vports = lpfc_create_vport_work_array(phba); 2839 if (vports != NULL) { 2840 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2841 if (vports[i]->load_flag & FC_UNLOADING) 2842 continue; 2843 2844 list_for_each_entry_safe(ndlp, next_ndlp, 2845 &vports[i]->fc_nodes, 2846 nlp_listp) { 2847 if (NLP_CHK_NODE_ACT(ndlp)) { 2848 ndlp->nlp_rpi = 2849 lpfc_sli4_alloc_rpi(phba); 2850 lpfc_printf_vlog(ndlp->vport, KERN_INFO, 2851 LOG_NODE, 2852 "0009 rpi:%x DID:%x " 2853 "flg:%x map:%x %p\n", 2854 ndlp->nlp_rpi, 2855 ndlp->nlp_DID, 2856 ndlp->nlp_flag, 2857 ndlp->nlp_usg_map, 2858 ndlp); 2859 } 2860 } 2861 } 2862 } 2863 lpfc_destroy_vport_work_array(phba, vports); 2864 } 2865 2866 /** 2867 * lpfc_online - Initialize and bring a HBA online 2868 * @phba: pointer to lpfc hba data structure. 2869 * 2870 * This routine initializes the HBA and brings a HBA online. During this 2871 * process, the management interface is blocked to prevent user space access 2872 * to the HBA interfering with the driver initialization. 2873 * 2874 * Return codes 2875 * 0 - successful 2876 * 1 - failed 2877 **/ 2878 int 2879 lpfc_online(struct lpfc_hba *phba) 2880 { 2881 struct lpfc_vport *vport; 2882 struct lpfc_vport **vports; 2883 int i; 2884 bool vpis_cleared = false; 2885 2886 if (!phba) 2887 return 0; 2888 vport = phba->pport; 2889 2890 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 2891 return 0; 2892 2893 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2894 "0458 Bring Adapter online\n"); 2895 2896 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 2897 2898 if (!lpfc_sli_queue_setup(phba)) { 2899 lpfc_unblock_mgmt_io(phba); 2900 return 1; 2901 } 2902 2903 if (phba->sli_rev == LPFC_SLI_REV4) { 2904 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 2905 lpfc_unblock_mgmt_io(phba); 2906 return 1; 2907 } 2908 spin_lock_irq(&phba->hbalock); 2909 if (!phba->sli4_hba.max_cfg_param.vpi_used) 2910 vpis_cleared = true; 2911 spin_unlock_irq(&phba->hbalock); 2912 } else { 2913 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 2914 lpfc_unblock_mgmt_io(phba); 2915 return 1; 2916 } 2917 } 2918 2919 vports = lpfc_create_vport_work_array(phba); 2920 if (vports != NULL) { 2921 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2922 struct Scsi_Host *shost; 2923 shost = lpfc_shost_from_vport(vports[i]); 2924 spin_lock_irq(shost->host_lock); 2925 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 2926 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 2927 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2928 if (phba->sli_rev == LPFC_SLI_REV4) { 2929 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 2930 if ((vpis_cleared) && 2931 (vports[i]->port_type != 2932 LPFC_PHYSICAL_PORT)) 2933 vports[i]->vpi = 0; 2934 } 2935 spin_unlock_irq(shost->host_lock); 2936 } 2937 } 2938 lpfc_destroy_vport_work_array(phba, vports); 2939 2940 lpfc_unblock_mgmt_io(phba); 2941 return 0; 2942 } 2943 2944 /** 2945 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 2946 * @phba: pointer to lpfc hba data structure. 2947 * 2948 * This routine marks a HBA's management interface as not blocked. Once the 2949 * HBA's management interface is marked as not blocked, all the user space 2950 * access to the HBA, whether they are from sysfs interface or libdfc 2951 * interface will be allowed. The HBA is set to block the management interface 2952 * when the driver prepares the HBA interface for online or offline and then 2953 * set to unblock the management interface afterwards. 2954 **/ 2955 void 2956 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 2957 { 2958 unsigned long iflag; 2959 2960 spin_lock_irqsave(&phba->hbalock, iflag); 2961 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 2962 spin_unlock_irqrestore(&phba->hbalock, iflag); 2963 } 2964 2965 /** 2966 * lpfc_offline_prep - Prepare a HBA to be brought offline 2967 * @phba: pointer to lpfc hba data structure. 2968 * 2969 * This routine is invoked to prepare a HBA to be brought offline. It performs 2970 * unregistration login to all the nodes on all vports and flushes the mailbox 2971 * queue to make it ready to be brought offline. 2972 **/ 2973 void 2974 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) 2975 { 2976 struct lpfc_vport *vport = phba->pport; 2977 struct lpfc_nodelist *ndlp, *next_ndlp; 2978 struct lpfc_vport **vports; 2979 struct Scsi_Host *shost; 2980 int i; 2981 2982 if (vport->fc_flag & FC_OFFLINE_MODE) 2983 return; 2984 2985 lpfc_block_mgmt_io(phba, mbx_action); 2986 2987 lpfc_linkdown(phba); 2988 2989 /* Issue an unreg_login to all nodes on all vports */ 2990 vports = lpfc_create_vport_work_array(phba); 2991 if (vports != NULL) { 2992 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2993 if (vports[i]->load_flag & FC_UNLOADING) 2994 continue; 2995 shost = lpfc_shost_from_vport(vports[i]); 2996 spin_lock_irq(shost->host_lock); 2997 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 2998 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2999 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 3000 spin_unlock_irq(shost->host_lock); 3001 3002 shost = lpfc_shost_from_vport(vports[i]); 3003 list_for_each_entry_safe(ndlp, next_ndlp, 3004 &vports[i]->fc_nodes, 3005 nlp_listp) { 3006 if (!NLP_CHK_NODE_ACT(ndlp)) 3007 continue; 3008 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 3009 continue; 3010 if (ndlp->nlp_type & NLP_FABRIC) { 3011 lpfc_disc_state_machine(vports[i], ndlp, 3012 NULL, NLP_EVT_DEVICE_RECOVERY); 3013 lpfc_disc_state_machine(vports[i], ndlp, 3014 NULL, NLP_EVT_DEVICE_RM); 3015 } 3016 spin_lock_irq(shost->host_lock); 3017 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 3018 spin_unlock_irq(shost->host_lock); 3019 /* 3020 * Whenever an SLI4 port goes offline, free the 3021 * RPI. Get a new RPI when the adapter port 3022 * comes back online. 3023 */ 3024 if (phba->sli_rev == LPFC_SLI_REV4) { 3025 lpfc_printf_vlog(ndlp->vport, 3026 KERN_INFO, LOG_NODE, 3027 "0011 lpfc_offline: " 3028 "ndlp:x%p did %x " 3029 "usgmap:x%x rpi:%x\n", 3030 ndlp, ndlp->nlp_DID, 3031 ndlp->nlp_usg_map, 3032 ndlp->nlp_rpi); 3033 3034 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 3035 } 3036 lpfc_unreg_rpi(vports[i], ndlp); 3037 } 3038 } 3039 } 3040 lpfc_destroy_vport_work_array(phba, vports); 3041 3042 lpfc_sli_mbox_sys_shutdown(phba, mbx_action); 3043 } 3044 3045 /** 3046 * lpfc_offline - Bring a HBA offline 3047 * @phba: pointer to lpfc hba data structure. 3048 * 3049 * This routine actually brings a HBA offline. It stops all the timers 3050 * associated with the HBA, brings down the SLI layer, and eventually 3051 * marks the HBA as in offline state for the upper layer protocol. 3052 **/ 3053 void 3054 lpfc_offline(struct lpfc_hba *phba) 3055 { 3056 struct Scsi_Host *shost; 3057 struct lpfc_vport **vports; 3058 int i; 3059 3060 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 3061 return; 3062 3063 /* stop port and all timers associated with this hba */ 3064 lpfc_stop_port(phba); 3065 vports = lpfc_create_vport_work_array(phba); 3066 if (vports != NULL) 3067 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3068 lpfc_stop_vport_timers(vports[i]); 3069 lpfc_destroy_vport_work_array(phba, vports); 3070 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3071 "0460 Bring Adapter offline\n"); 3072 /* Bring down the SLI Layer and cleanup. The HBA is offline 3073 now. */ 3074 lpfc_sli_hba_down(phba); 3075 spin_lock_irq(&phba->hbalock); 3076 phba->work_ha = 0; 3077 spin_unlock_irq(&phba->hbalock); 3078 vports = lpfc_create_vport_work_array(phba); 3079 if (vports != NULL) 3080 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3081 shost = lpfc_shost_from_vport(vports[i]); 3082 spin_lock_irq(shost->host_lock); 3083 vports[i]->work_port_events = 0; 3084 vports[i]->fc_flag |= FC_OFFLINE_MODE; 3085 spin_unlock_irq(shost->host_lock); 3086 } 3087 lpfc_destroy_vport_work_array(phba, vports); 3088 } 3089 3090 /** 3091 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 3092 * @phba: pointer to lpfc hba data structure. 3093 * 3094 * This routine is to free all the SCSI buffers and IOCBs from the driver 3095 * list back to kernel. It is called from lpfc_pci_remove_one to free 3096 * the internal resources before the device is removed from the system. 3097 **/ 3098 static void 3099 lpfc_scsi_free(struct lpfc_hba *phba) 3100 { 3101 struct lpfc_scsi_buf *sb, *sb_next; 3102 struct lpfc_iocbq *io, *io_next; 3103 3104 spin_lock_irq(&phba->hbalock); 3105 3106 /* Release all the lpfc_scsi_bufs maintained by this host. */ 3107 3108 spin_lock(&phba->scsi_buf_list_put_lock); 3109 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, 3110 list) { 3111 list_del(&sb->list); 3112 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 3113 sb->dma_handle); 3114 kfree(sb); 3115 phba->total_scsi_bufs--; 3116 } 3117 spin_unlock(&phba->scsi_buf_list_put_lock); 3118 3119 spin_lock(&phba->scsi_buf_list_get_lock); 3120 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, 3121 list) { 3122 list_del(&sb->list); 3123 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 3124 sb->dma_handle); 3125 kfree(sb); 3126 phba->total_scsi_bufs--; 3127 } 3128 spin_unlock(&phba->scsi_buf_list_get_lock); 3129 3130 /* Release all the lpfc_iocbq entries maintained by this host. */ 3131 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 3132 list_del(&io->list); 3133 kfree(io); 3134 phba->total_iocbq_bufs--; 3135 } 3136 3137 spin_unlock_irq(&phba->hbalock); 3138 } 3139 3140 /** 3141 * lpfc_sli4_xri_sgl_update - update xri-sgl sizing and mapping 3142 * @phba: pointer to lpfc hba data structure. 3143 * 3144 * This routine first calculates the sizes of the current els and allocated 3145 * scsi sgl lists, and then goes through all sgls to updates the physical 3146 * XRIs assigned due to port function reset. During port initialization, the 3147 * current els and allocated scsi sgl lists are 0s. 3148 * 3149 * Return codes 3150 * 0 - successful (for now, it always returns 0) 3151 **/ 3152 int 3153 lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) 3154 { 3155 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3156 struct lpfc_scsi_buf *psb = NULL, *psb_next = NULL; 3157 uint16_t i, lxri, xri_cnt, els_xri_cnt, scsi_xri_cnt; 3158 LIST_HEAD(els_sgl_list); 3159 LIST_HEAD(scsi_sgl_list); 3160 int rc; 3161 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 3162 3163 /* 3164 * update on pci function's els xri-sgl list 3165 */ 3166 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3167 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { 3168 /* els xri-sgl expanded */ 3169 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; 3170 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3171 "3157 ELS xri-sgl count increased from " 3172 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3173 els_xri_cnt); 3174 /* allocate the additional els sgls */ 3175 for (i = 0; i < xri_cnt; i++) { 3176 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3177 GFP_KERNEL); 3178 if (sglq_entry == NULL) { 3179 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3180 "2562 Failure to allocate an " 3181 "ELS sgl entry:%d\n", i); 3182 rc = -ENOMEM; 3183 goto out_free_mem; 3184 } 3185 sglq_entry->buff_type = GEN_BUFF_TYPE; 3186 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, 3187 &sglq_entry->phys); 3188 if (sglq_entry->virt == NULL) { 3189 kfree(sglq_entry); 3190 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3191 "2563 Failure to allocate an " 3192 "ELS mbuf:%d\n", i); 3193 rc = -ENOMEM; 3194 goto out_free_mem; 3195 } 3196 sglq_entry->sgl = sglq_entry->virt; 3197 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 3198 sglq_entry->state = SGL_FREED; 3199 list_add_tail(&sglq_entry->list, &els_sgl_list); 3200 } 3201 spin_lock_irq(&phba->hbalock); 3202 spin_lock(&pring->ring_lock); 3203 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); 3204 spin_unlock(&pring->ring_lock); 3205 spin_unlock_irq(&phba->hbalock); 3206 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { 3207 /* els xri-sgl shrinked */ 3208 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; 3209 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3210 "3158 ELS xri-sgl count decreased from " 3211 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3212 els_xri_cnt); 3213 spin_lock_irq(&phba->hbalock); 3214 spin_lock(&pring->ring_lock); 3215 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list); 3216 spin_unlock(&pring->ring_lock); 3217 spin_unlock_irq(&phba->hbalock); 3218 /* release extra els sgls from list */ 3219 for (i = 0; i < xri_cnt; i++) { 3220 list_remove_head(&els_sgl_list, 3221 sglq_entry, struct lpfc_sglq, list); 3222 if (sglq_entry) { 3223 lpfc_mbuf_free(phba, sglq_entry->virt, 3224 sglq_entry->phys); 3225 kfree(sglq_entry); 3226 } 3227 } 3228 spin_lock_irq(&phba->hbalock); 3229 spin_lock(&pring->ring_lock); 3230 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); 3231 spin_unlock(&pring->ring_lock); 3232 spin_unlock_irq(&phba->hbalock); 3233 } else 3234 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3235 "3163 ELS xri-sgl count unchanged: %d\n", 3236 els_xri_cnt); 3237 phba->sli4_hba.els_xri_cnt = els_xri_cnt; 3238 3239 /* update xris to els sgls on the list */ 3240 sglq_entry = NULL; 3241 sglq_entry_next = NULL; 3242 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3243 &phba->sli4_hba.lpfc_sgl_list, list) { 3244 lxri = lpfc_sli4_next_xritag(phba); 3245 if (lxri == NO_XRI) { 3246 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3247 "2400 Failed to allocate xri for " 3248 "ELS sgl\n"); 3249 rc = -ENOMEM; 3250 goto out_free_mem; 3251 } 3252 sglq_entry->sli4_lxritag = lxri; 3253 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3254 } 3255 3256 /* 3257 * update on pci function's allocated scsi xri-sgl list 3258 */ 3259 phba->total_scsi_bufs = 0; 3260 3261 /* maximum number of xris available for scsi buffers */ 3262 phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri - 3263 els_xri_cnt; 3264 3265 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3266 "2401 Current allocated SCSI xri-sgl count:%d, " 3267 "maximum SCSI xri count:%d\n", 3268 phba->sli4_hba.scsi_xri_cnt, 3269 phba->sli4_hba.scsi_xri_max); 3270 3271 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3272 spin_lock(&phba->scsi_buf_list_put_lock); 3273 list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list); 3274 list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list); 3275 spin_unlock(&phba->scsi_buf_list_put_lock); 3276 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3277 3278 if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { 3279 /* max scsi xri shrinked below the allocated scsi buffers */ 3280 scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt - 3281 phba->sli4_hba.scsi_xri_max; 3282 /* release the extra allocated scsi buffers */ 3283 for (i = 0; i < scsi_xri_cnt; i++) { 3284 list_remove_head(&scsi_sgl_list, psb, 3285 struct lpfc_scsi_buf, list); 3286 if (psb) { 3287 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 3288 psb->data, psb->dma_handle); 3289 kfree(psb); 3290 } 3291 } 3292 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3293 phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt; 3294 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3295 } 3296 3297 /* update xris associated to remaining allocated scsi buffers */ 3298 psb = NULL; 3299 psb_next = NULL; 3300 list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) { 3301 lxri = lpfc_sli4_next_xritag(phba); 3302 if (lxri == NO_XRI) { 3303 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3304 "2560 Failed to allocate xri for " 3305 "scsi buffer\n"); 3306 rc = -ENOMEM; 3307 goto out_free_mem; 3308 } 3309 psb->cur_iocbq.sli4_lxritag = lxri; 3310 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3311 } 3312 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3313 spin_lock(&phba->scsi_buf_list_put_lock); 3314 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get); 3315 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 3316 spin_unlock(&phba->scsi_buf_list_put_lock); 3317 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3318 3319 return 0; 3320 3321 out_free_mem: 3322 lpfc_free_els_sgl_list(phba); 3323 lpfc_scsi_free(phba); 3324 return rc; 3325 } 3326 3327 /** 3328 * lpfc_create_port - Create an FC port 3329 * @phba: pointer to lpfc hba data structure. 3330 * @instance: a unique integer ID to this FC port. 3331 * @dev: pointer to the device data structure. 3332 * 3333 * This routine creates a FC port for the upper layer protocol. The FC port 3334 * can be created on top of either a physical port or a virtual port provided 3335 * by the HBA. This routine also allocates a SCSI host data structure (shost) 3336 * and associates the FC port created before adding the shost into the SCSI 3337 * layer. 3338 * 3339 * Return codes 3340 * @vport - pointer to the virtual N_Port data structure. 3341 * NULL - port create failed. 3342 **/ 3343 struct lpfc_vport * 3344 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 3345 { 3346 struct lpfc_vport *vport; 3347 struct Scsi_Host *shost; 3348 int error = 0; 3349 3350 if (dev != &phba->pcidev->dev) { 3351 shost = scsi_host_alloc(&lpfc_vport_template, 3352 sizeof(struct lpfc_vport)); 3353 } else { 3354 if (phba->sli_rev == LPFC_SLI_REV4) 3355 shost = scsi_host_alloc(&lpfc_template, 3356 sizeof(struct lpfc_vport)); 3357 else 3358 shost = scsi_host_alloc(&lpfc_template_s3, 3359 sizeof(struct lpfc_vport)); 3360 } 3361 if (!shost) 3362 goto out; 3363 3364 vport = (struct lpfc_vport *) shost->hostdata; 3365 vport->phba = phba; 3366 vport->load_flag |= FC_LOADING; 3367 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3368 vport->fc_rscn_flush = 0; 3369 3370 lpfc_get_vport_cfgparam(vport); 3371 shost->unique_id = instance; 3372 shost->max_id = LPFC_MAX_TARGET; 3373 shost->max_lun = vport->cfg_max_luns; 3374 shost->this_id = -1; 3375 shost->max_cmd_len = 16; 3376 shost->nr_hw_queues = phba->cfg_fcp_io_channel; 3377 if (phba->sli_rev == LPFC_SLI_REV4) { 3378 shost->dma_boundary = 3379 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 3380 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 3381 } 3382 3383 /* 3384 * Set initial can_queue value since 0 is no longer supported and 3385 * scsi_add_host will fail. This will be adjusted later based on the 3386 * max xri value determined in hba setup. 3387 */ 3388 shost->can_queue = phba->cfg_hba_queue_depth - 10; 3389 if (dev != &phba->pcidev->dev) { 3390 shost->transportt = lpfc_vport_transport_template; 3391 vport->port_type = LPFC_NPIV_PORT; 3392 } else { 3393 shost->transportt = lpfc_transport_template; 3394 vport->port_type = LPFC_PHYSICAL_PORT; 3395 } 3396 3397 /* Initialize all internally managed lists. */ 3398 INIT_LIST_HEAD(&vport->fc_nodes); 3399 INIT_LIST_HEAD(&vport->rcv_buffer_list); 3400 spin_lock_init(&vport->work_port_lock); 3401 3402 init_timer(&vport->fc_disctmo); 3403 vport->fc_disctmo.function = lpfc_disc_timeout; 3404 vport->fc_disctmo.data = (unsigned long)vport; 3405 3406 init_timer(&vport->els_tmofunc); 3407 vport->els_tmofunc.function = lpfc_els_timeout; 3408 vport->els_tmofunc.data = (unsigned long)vport; 3409 3410 init_timer(&vport->delayed_disc_tmo); 3411 vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo; 3412 vport->delayed_disc_tmo.data = (unsigned long)vport; 3413 3414 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 3415 if (error) 3416 goto out_put_shost; 3417 3418 spin_lock_irq(&phba->hbalock); 3419 list_add_tail(&vport->listentry, &phba->port_list); 3420 spin_unlock_irq(&phba->hbalock); 3421 return vport; 3422 3423 out_put_shost: 3424 scsi_host_put(shost); 3425 out: 3426 return NULL; 3427 } 3428 3429 /** 3430 * destroy_port - destroy an FC port 3431 * @vport: pointer to an lpfc virtual N_Port data structure. 3432 * 3433 * This routine destroys a FC port from the upper layer protocol. All the 3434 * resources associated with the port are released. 3435 **/ 3436 void 3437 destroy_port(struct lpfc_vport *vport) 3438 { 3439 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3440 struct lpfc_hba *phba = vport->phba; 3441 3442 lpfc_debugfs_terminate(vport); 3443 fc_remove_host(shost); 3444 scsi_remove_host(shost); 3445 3446 spin_lock_irq(&phba->hbalock); 3447 list_del_init(&vport->listentry); 3448 spin_unlock_irq(&phba->hbalock); 3449 3450 lpfc_cleanup(vport); 3451 return; 3452 } 3453 3454 /** 3455 * lpfc_get_instance - Get a unique integer ID 3456 * 3457 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 3458 * uses the kernel idr facility to perform the task. 3459 * 3460 * Return codes: 3461 * instance - a unique integer ID allocated as the new instance. 3462 * -1 - lpfc get instance failed. 3463 **/ 3464 int 3465 lpfc_get_instance(void) 3466 { 3467 int ret; 3468 3469 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL); 3470 return ret < 0 ? -1 : ret; 3471 } 3472 3473 /** 3474 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 3475 * @shost: pointer to SCSI host data structure. 3476 * @time: elapsed time of the scan in jiffies. 3477 * 3478 * This routine is called by the SCSI layer with a SCSI host to determine 3479 * whether the scan host is finished. 3480 * 3481 * Note: there is no scan_start function as adapter initialization will have 3482 * asynchronously kicked off the link initialization. 3483 * 3484 * Return codes 3485 * 0 - SCSI host scan is not over yet. 3486 * 1 - SCSI host scan is over. 3487 **/ 3488 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 3489 { 3490 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3491 struct lpfc_hba *phba = vport->phba; 3492 int stat = 0; 3493 3494 spin_lock_irq(shost->host_lock); 3495 3496 if (vport->load_flag & FC_UNLOADING) { 3497 stat = 1; 3498 goto finished; 3499 } 3500 if (time >= msecs_to_jiffies(30 * 1000)) { 3501 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3502 "0461 Scanning longer than 30 " 3503 "seconds. Continuing initialization\n"); 3504 stat = 1; 3505 goto finished; 3506 } 3507 if (time >= msecs_to_jiffies(15 * 1000) && 3508 phba->link_state <= LPFC_LINK_DOWN) { 3509 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3510 "0465 Link down longer than 15 " 3511 "seconds. Continuing initialization\n"); 3512 stat = 1; 3513 goto finished; 3514 } 3515 3516 if (vport->port_state != LPFC_VPORT_READY) 3517 goto finished; 3518 if (vport->num_disc_nodes || vport->fc_prli_sent) 3519 goto finished; 3520 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) 3521 goto finished; 3522 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 3523 goto finished; 3524 3525 stat = 1; 3526 3527 finished: 3528 spin_unlock_irq(shost->host_lock); 3529 return stat; 3530 } 3531 3532 /** 3533 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 3534 * @shost: pointer to SCSI host data structure. 3535 * 3536 * This routine initializes a given SCSI host attributes on a FC port. The 3537 * SCSI host can be either on top of a physical port or a virtual port. 3538 **/ 3539 void lpfc_host_attrib_init(struct Scsi_Host *shost) 3540 { 3541 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3542 struct lpfc_hba *phba = vport->phba; 3543 /* 3544 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 3545 */ 3546 3547 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 3548 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 3549 fc_host_supported_classes(shost) = FC_COS_CLASS3; 3550 3551 memset(fc_host_supported_fc4s(shost), 0, 3552 sizeof(fc_host_supported_fc4s(shost))); 3553 fc_host_supported_fc4s(shost)[2] = 1; 3554 fc_host_supported_fc4s(shost)[7] = 1; 3555 3556 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 3557 sizeof fc_host_symbolic_name(shost)); 3558 3559 fc_host_supported_speeds(shost) = 0; 3560 if (phba->lmt & LMT_32Gb) 3561 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT; 3562 if (phba->lmt & LMT_16Gb) 3563 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; 3564 if (phba->lmt & LMT_10Gb) 3565 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 3566 if (phba->lmt & LMT_8Gb) 3567 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 3568 if (phba->lmt & LMT_4Gb) 3569 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 3570 if (phba->lmt & LMT_2Gb) 3571 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 3572 if (phba->lmt & LMT_1Gb) 3573 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 3574 3575 fc_host_maxframe_size(shost) = 3576 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 3577 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 3578 3579 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 3580 3581 /* This value is also unchanging */ 3582 memset(fc_host_active_fc4s(shost), 0, 3583 sizeof(fc_host_active_fc4s(shost))); 3584 fc_host_active_fc4s(shost)[2] = 1; 3585 fc_host_active_fc4s(shost)[7] = 1; 3586 3587 fc_host_max_npiv_vports(shost) = phba->max_vpi; 3588 spin_lock_irq(shost->host_lock); 3589 vport->load_flag &= ~FC_LOADING; 3590 spin_unlock_irq(shost->host_lock); 3591 } 3592 3593 /** 3594 * lpfc_stop_port_s3 - Stop SLI3 device port 3595 * @phba: pointer to lpfc hba data structure. 3596 * 3597 * This routine is invoked to stop an SLI3 device port, it stops the device 3598 * from generating interrupts and stops the device driver's timers for the 3599 * device. 3600 **/ 3601 static void 3602 lpfc_stop_port_s3(struct lpfc_hba *phba) 3603 { 3604 /* Clear all interrupt enable conditions */ 3605 writel(0, phba->HCregaddr); 3606 readl(phba->HCregaddr); /* flush */ 3607 /* Clear all pending interrupts */ 3608 writel(0xffffffff, phba->HAregaddr); 3609 readl(phba->HAregaddr); /* flush */ 3610 3611 /* Reset some HBA SLI setup states */ 3612 lpfc_stop_hba_timers(phba); 3613 phba->pport->work_port_events = 0; 3614 } 3615 3616 /** 3617 * lpfc_stop_port_s4 - Stop SLI4 device port 3618 * @phba: pointer to lpfc hba data structure. 3619 * 3620 * This routine is invoked to stop an SLI4 device port, it stops the device 3621 * from generating interrupts and stops the device driver's timers for the 3622 * device. 3623 **/ 3624 static void 3625 lpfc_stop_port_s4(struct lpfc_hba *phba) 3626 { 3627 /* Reset some HBA SLI4 setup states */ 3628 lpfc_stop_hba_timers(phba); 3629 phba->pport->work_port_events = 0; 3630 phba->sli4_hba.intr_enable = 0; 3631 } 3632 3633 /** 3634 * lpfc_stop_port - Wrapper function for stopping hba port 3635 * @phba: Pointer to HBA context object. 3636 * 3637 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 3638 * the API jump table function pointer from the lpfc_hba struct. 3639 **/ 3640 void 3641 lpfc_stop_port(struct lpfc_hba *phba) 3642 { 3643 phba->lpfc_stop_port(phba); 3644 } 3645 3646 /** 3647 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 3648 * @phba: Pointer to hba for which this call is being executed. 3649 * 3650 * This routine starts the timer waiting for the FCF rediscovery to complete. 3651 **/ 3652 void 3653 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 3654 { 3655 unsigned long fcf_redisc_wait_tmo = 3656 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 3657 /* Start fcf rediscovery wait period timer */ 3658 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 3659 spin_lock_irq(&phba->hbalock); 3660 /* Allow action to new fcf asynchronous event */ 3661 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 3662 /* Mark the FCF rediscovery pending state */ 3663 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 3664 spin_unlock_irq(&phba->hbalock); 3665 } 3666 3667 /** 3668 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 3669 * @ptr: Map to lpfc_hba data structure pointer. 3670 * 3671 * This routine is invoked when waiting for FCF table rediscover has been 3672 * timed out. If new FCF record(s) has (have) been discovered during the 3673 * wait period, a new FCF event shall be added to the FCOE async event 3674 * list, and then worker thread shall be waked up for processing from the 3675 * worker thread context. 3676 **/ 3677 static void 3678 lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) 3679 { 3680 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 3681 3682 /* Don't send FCF rediscovery event if timer cancelled */ 3683 spin_lock_irq(&phba->hbalock); 3684 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 3685 spin_unlock_irq(&phba->hbalock); 3686 return; 3687 } 3688 /* Clear FCF rediscovery timer pending flag */ 3689 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 3690 /* FCF rediscovery event to worker thread */ 3691 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 3692 spin_unlock_irq(&phba->hbalock); 3693 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 3694 "2776 FCF rediscover quiescent timer expired\n"); 3695 /* wake up worker thread */ 3696 lpfc_worker_wake_up(phba); 3697 } 3698 3699 /** 3700 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 3701 * @phba: pointer to lpfc hba data structure. 3702 * @acqe_link: pointer to the async link completion queue entry. 3703 * 3704 * This routine is to parse the SLI4 link-attention link fault code and 3705 * translate it into the base driver's read link attention mailbox command 3706 * status. 3707 * 3708 * Return: Link-attention status in terms of base driver's coding. 3709 **/ 3710 static uint16_t 3711 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 3712 struct lpfc_acqe_link *acqe_link) 3713 { 3714 uint16_t latt_fault; 3715 3716 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 3717 case LPFC_ASYNC_LINK_FAULT_NONE: 3718 case LPFC_ASYNC_LINK_FAULT_LOCAL: 3719 case LPFC_ASYNC_LINK_FAULT_REMOTE: 3720 latt_fault = 0; 3721 break; 3722 default: 3723 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3724 "0398 Invalid link fault code: x%x\n", 3725 bf_get(lpfc_acqe_link_fault, acqe_link)); 3726 latt_fault = MBXERR_ERROR; 3727 break; 3728 } 3729 return latt_fault; 3730 } 3731 3732 /** 3733 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 3734 * @phba: pointer to lpfc hba data structure. 3735 * @acqe_link: pointer to the async link completion queue entry. 3736 * 3737 * This routine is to parse the SLI4 link attention type and translate it 3738 * into the base driver's link attention type coding. 3739 * 3740 * Return: Link attention type in terms of base driver's coding. 3741 **/ 3742 static uint8_t 3743 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 3744 struct lpfc_acqe_link *acqe_link) 3745 { 3746 uint8_t att_type; 3747 3748 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 3749 case LPFC_ASYNC_LINK_STATUS_DOWN: 3750 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 3751 att_type = LPFC_ATT_LINK_DOWN; 3752 break; 3753 case LPFC_ASYNC_LINK_STATUS_UP: 3754 /* Ignore physical link up events - wait for logical link up */ 3755 att_type = LPFC_ATT_RESERVED; 3756 break; 3757 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 3758 att_type = LPFC_ATT_LINK_UP; 3759 break; 3760 default: 3761 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3762 "0399 Invalid link attention type: x%x\n", 3763 bf_get(lpfc_acqe_link_status, acqe_link)); 3764 att_type = LPFC_ATT_RESERVED; 3765 break; 3766 } 3767 return att_type; 3768 } 3769 3770 /** 3771 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed 3772 * @phba: pointer to lpfc hba data structure. 3773 * 3774 * This routine is to get an SLI3 FC port's link speed in Mbps. 3775 * 3776 * Return: link speed in terms of Mbps. 3777 **/ 3778 uint32_t 3779 lpfc_sli_port_speed_get(struct lpfc_hba *phba) 3780 { 3781 uint32_t link_speed; 3782 3783 if (!lpfc_is_link_up(phba)) 3784 return 0; 3785 3786 if (phba->sli_rev <= LPFC_SLI_REV3) { 3787 switch (phba->fc_linkspeed) { 3788 case LPFC_LINK_SPEED_1GHZ: 3789 link_speed = 1000; 3790 break; 3791 case LPFC_LINK_SPEED_2GHZ: 3792 link_speed = 2000; 3793 break; 3794 case LPFC_LINK_SPEED_4GHZ: 3795 link_speed = 4000; 3796 break; 3797 case LPFC_LINK_SPEED_8GHZ: 3798 link_speed = 8000; 3799 break; 3800 case LPFC_LINK_SPEED_10GHZ: 3801 link_speed = 10000; 3802 break; 3803 case LPFC_LINK_SPEED_16GHZ: 3804 link_speed = 16000; 3805 break; 3806 default: 3807 link_speed = 0; 3808 } 3809 } else { 3810 if (phba->sli4_hba.link_state.logical_speed) 3811 link_speed = 3812 phba->sli4_hba.link_state.logical_speed; 3813 else 3814 link_speed = phba->sli4_hba.link_state.speed; 3815 } 3816 return link_speed; 3817 } 3818 3819 /** 3820 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed 3821 * @phba: pointer to lpfc hba data structure. 3822 * @evt_code: asynchronous event code. 3823 * @speed_code: asynchronous event link speed code. 3824 * 3825 * This routine is to parse the giving SLI4 async event link speed code into 3826 * value of Mbps for the link speed. 3827 * 3828 * Return: link speed in terms of Mbps. 3829 **/ 3830 static uint32_t 3831 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, 3832 uint8_t speed_code) 3833 { 3834 uint32_t port_speed; 3835 3836 switch (evt_code) { 3837 case LPFC_TRAILER_CODE_LINK: 3838 switch (speed_code) { 3839 case LPFC_ASYNC_LINK_SPEED_ZERO: 3840 port_speed = 0; 3841 break; 3842 case LPFC_ASYNC_LINK_SPEED_10MBPS: 3843 port_speed = 10; 3844 break; 3845 case LPFC_ASYNC_LINK_SPEED_100MBPS: 3846 port_speed = 100; 3847 break; 3848 case LPFC_ASYNC_LINK_SPEED_1GBPS: 3849 port_speed = 1000; 3850 break; 3851 case LPFC_ASYNC_LINK_SPEED_10GBPS: 3852 port_speed = 10000; 3853 break; 3854 case LPFC_ASYNC_LINK_SPEED_20GBPS: 3855 port_speed = 20000; 3856 break; 3857 case LPFC_ASYNC_LINK_SPEED_25GBPS: 3858 port_speed = 25000; 3859 break; 3860 case LPFC_ASYNC_LINK_SPEED_40GBPS: 3861 port_speed = 40000; 3862 break; 3863 default: 3864 port_speed = 0; 3865 } 3866 break; 3867 case LPFC_TRAILER_CODE_FC: 3868 switch (speed_code) { 3869 case LPFC_FC_LA_SPEED_UNKNOWN: 3870 port_speed = 0; 3871 break; 3872 case LPFC_FC_LA_SPEED_1G: 3873 port_speed = 1000; 3874 break; 3875 case LPFC_FC_LA_SPEED_2G: 3876 port_speed = 2000; 3877 break; 3878 case LPFC_FC_LA_SPEED_4G: 3879 port_speed = 4000; 3880 break; 3881 case LPFC_FC_LA_SPEED_8G: 3882 port_speed = 8000; 3883 break; 3884 case LPFC_FC_LA_SPEED_10G: 3885 port_speed = 10000; 3886 break; 3887 case LPFC_FC_LA_SPEED_16G: 3888 port_speed = 16000; 3889 break; 3890 case LPFC_FC_LA_SPEED_32G: 3891 port_speed = 32000; 3892 break; 3893 default: 3894 port_speed = 0; 3895 } 3896 break; 3897 default: 3898 port_speed = 0; 3899 } 3900 return port_speed; 3901 } 3902 3903 /** 3904 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 3905 * @phba: pointer to lpfc hba data structure. 3906 * @acqe_link: pointer to the async link completion queue entry. 3907 * 3908 * This routine is to handle the SLI4 asynchronous FCoE link event. 3909 **/ 3910 static void 3911 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 3912 struct lpfc_acqe_link *acqe_link) 3913 { 3914 struct lpfc_dmabuf *mp; 3915 LPFC_MBOXQ_t *pmb; 3916 MAILBOX_t *mb; 3917 struct lpfc_mbx_read_top *la; 3918 uint8_t att_type; 3919 int rc; 3920 3921 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 3922 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 3923 return; 3924 phba->fcoe_eventtag = acqe_link->event_tag; 3925 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3926 if (!pmb) { 3927 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3928 "0395 The mboxq allocation failed\n"); 3929 return; 3930 } 3931 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3932 if (!mp) { 3933 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3934 "0396 The lpfc_dmabuf allocation failed\n"); 3935 goto out_free_pmb; 3936 } 3937 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3938 if (!mp->virt) { 3939 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3940 "0397 The mbuf allocation failed\n"); 3941 goto out_free_dmabuf; 3942 } 3943 3944 /* Cleanup any outstanding ELS commands */ 3945 lpfc_els_flush_all_cmd(phba); 3946 3947 /* Block ELS IOCBs until we have done process link event */ 3948 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 3949 3950 /* Update link event statistics */ 3951 phba->sli.slistat.link_event++; 3952 3953 /* Create lpfc_handle_latt mailbox command from link ACQE */ 3954 lpfc_read_topology(phba, pmb, mp); 3955 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 3956 pmb->vport = phba->pport; 3957 3958 /* Keep the link status for extra SLI4 state machine reference */ 3959 phba->sli4_hba.link_state.speed = 3960 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK, 3961 bf_get(lpfc_acqe_link_speed, acqe_link)); 3962 phba->sli4_hba.link_state.duplex = 3963 bf_get(lpfc_acqe_link_duplex, acqe_link); 3964 phba->sli4_hba.link_state.status = 3965 bf_get(lpfc_acqe_link_status, acqe_link); 3966 phba->sli4_hba.link_state.type = 3967 bf_get(lpfc_acqe_link_type, acqe_link); 3968 phba->sli4_hba.link_state.number = 3969 bf_get(lpfc_acqe_link_number, acqe_link); 3970 phba->sli4_hba.link_state.fault = 3971 bf_get(lpfc_acqe_link_fault, acqe_link); 3972 phba->sli4_hba.link_state.logical_speed = 3973 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10; 3974 3975 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3976 "2900 Async FC/FCoE Link event - Speed:%dGBit " 3977 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 3978 "Logical speed:%dMbps Fault:%d\n", 3979 phba->sli4_hba.link_state.speed, 3980 phba->sli4_hba.link_state.topology, 3981 phba->sli4_hba.link_state.status, 3982 phba->sli4_hba.link_state.type, 3983 phba->sli4_hba.link_state.number, 3984 phba->sli4_hba.link_state.logical_speed, 3985 phba->sli4_hba.link_state.fault); 3986 /* 3987 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 3988 * topology info. Note: Optional for non FC-AL ports. 3989 */ 3990 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 3991 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3992 if (rc == MBX_NOT_FINISHED) 3993 goto out_free_dmabuf; 3994 return; 3995 } 3996 /* 3997 * For FCoE Mode: fill in all the topology information we need and call 3998 * the READ_TOPOLOGY completion routine to continue without actually 3999 * sending the READ_TOPOLOGY mailbox command to the port. 4000 */ 4001 /* Parse and translate status field */ 4002 mb = &pmb->u.mb; 4003 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); 4004 4005 /* Parse and translate link attention fields */ 4006 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 4007 la->eventTag = acqe_link->event_tag; 4008 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 4009 bf_set(lpfc_mbx_read_top_link_spd, la, 4010 (bf_get(lpfc_acqe_link_speed, acqe_link))); 4011 4012 /* Fake the the following irrelvant fields */ 4013 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 4014 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 4015 bf_set(lpfc_mbx_read_top_il, la, 0); 4016 bf_set(lpfc_mbx_read_top_pb, la, 0); 4017 bf_set(lpfc_mbx_read_top_fa, la, 0); 4018 bf_set(lpfc_mbx_read_top_mm, la, 0); 4019 4020 /* Invoke the lpfc_handle_latt mailbox command callback function */ 4021 lpfc_mbx_cmpl_read_topology(phba, pmb); 4022 4023 return; 4024 4025 out_free_dmabuf: 4026 kfree(mp); 4027 out_free_pmb: 4028 mempool_free(pmb, phba->mbox_mem_pool); 4029 } 4030 4031 /** 4032 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 4033 * @phba: pointer to lpfc hba data structure. 4034 * @acqe_fc: pointer to the async fc completion queue entry. 4035 * 4036 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 4037 * that the event was received and then issue a read_topology mailbox command so 4038 * that the rest of the driver will treat it the same as SLI3. 4039 **/ 4040 static void 4041 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 4042 { 4043 struct lpfc_dmabuf *mp; 4044 LPFC_MBOXQ_t *pmb; 4045 MAILBOX_t *mb; 4046 struct lpfc_mbx_read_top *la; 4047 int rc; 4048 4049 if (bf_get(lpfc_trailer_type, acqe_fc) != 4050 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 4051 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4052 "2895 Non FC link Event detected.(%d)\n", 4053 bf_get(lpfc_trailer_type, acqe_fc)); 4054 return; 4055 } 4056 /* Keep the link status for extra SLI4 state machine reference */ 4057 phba->sli4_hba.link_state.speed = 4058 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 4059 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 4060 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 4061 phba->sli4_hba.link_state.topology = 4062 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 4063 phba->sli4_hba.link_state.status = 4064 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 4065 phba->sli4_hba.link_state.type = 4066 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 4067 phba->sli4_hba.link_state.number = 4068 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 4069 phba->sli4_hba.link_state.fault = 4070 bf_get(lpfc_acqe_link_fault, acqe_fc); 4071 phba->sli4_hba.link_state.logical_speed = 4072 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 4073 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4074 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 4075 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 4076 "%dMbps Fault:%d\n", 4077 phba->sli4_hba.link_state.speed, 4078 phba->sli4_hba.link_state.topology, 4079 phba->sli4_hba.link_state.status, 4080 phba->sli4_hba.link_state.type, 4081 phba->sli4_hba.link_state.number, 4082 phba->sli4_hba.link_state.logical_speed, 4083 phba->sli4_hba.link_state.fault); 4084 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4085 if (!pmb) { 4086 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4087 "2897 The mboxq allocation failed\n"); 4088 return; 4089 } 4090 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4091 if (!mp) { 4092 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4093 "2898 The lpfc_dmabuf allocation failed\n"); 4094 goto out_free_pmb; 4095 } 4096 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 4097 if (!mp->virt) { 4098 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4099 "2899 The mbuf allocation failed\n"); 4100 goto out_free_dmabuf; 4101 } 4102 4103 /* Cleanup any outstanding ELS commands */ 4104 lpfc_els_flush_all_cmd(phba); 4105 4106 /* Block ELS IOCBs until we have done process link event */ 4107 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 4108 4109 /* Update link event statistics */ 4110 phba->sli.slistat.link_event++; 4111 4112 /* Create lpfc_handle_latt mailbox command from link ACQE */ 4113 lpfc_read_topology(phba, pmb, mp); 4114 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 4115 pmb->vport = phba->pport; 4116 4117 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { 4118 /* Parse and translate status field */ 4119 mb = &pmb->u.mb; 4120 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, 4121 (void *)acqe_fc); 4122 4123 /* Parse and translate link attention fields */ 4124 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; 4125 la->eventTag = acqe_fc->event_tag; 4126 bf_set(lpfc_mbx_read_top_att_type, la, 4127 LPFC_FC_LA_TYPE_LINK_DOWN); 4128 4129 /* Invoke the mailbox command callback function */ 4130 lpfc_mbx_cmpl_read_topology(phba, pmb); 4131 4132 return; 4133 } 4134 4135 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 4136 if (rc == MBX_NOT_FINISHED) 4137 goto out_free_dmabuf; 4138 return; 4139 4140 out_free_dmabuf: 4141 kfree(mp); 4142 out_free_pmb: 4143 mempool_free(pmb, phba->mbox_mem_pool); 4144 } 4145 4146 /** 4147 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 4148 * @phba: pointer to lpfc hba data structure. 4149 * @acqe_fc: pointer to the async SLI completion queue entry. 4150 * 4151 * This routine is to handle the SLI4 asynchronous SLI events. 4152 **/ 4153 static void 4154 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 4155 { 4156 char port_name; 4157 char message[128]; 4158 uint8_t status; 4159 uint8_t evt_type; 4160 uint8_t operational = 0; 4161 struct temp_event temp_event_data; 4162 struct lpfc_acqe_misconfigured_event *misconfigured; 4163 struct Scsi_Host *shost; 4164 4165 evt_type = bf_get(lpfc_trailer_type, acqe_sli); 4166 4167 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4168 "2901 Async SLI event - Event Data1:x%08x Event Data2:" 4169 "x%08x SLI Event Type:%d\n", 4170 acqe_sli->event_data1, acqe_sli->event_data2, 4171 evt_type); 4172 4173 port_name = phba->Port[0]; 4174 if (port_name == 0x00) 4175 port_name = '?'; /* get port name is empty */ 4176 4177 switch (evt_type) { 4178 case LPFC_SLI_EVENT_TYPE_OVER_TEMP: 4179 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 4180 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 4181 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 4182 4183 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 4184 "3190 Over Temperature:%d Celsius- Port Name %c\n", 4185 acqe_sli->event_data1, port_name); 4186 4187 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 4188 shost = lpfc_shost_from_vport(phba->pport); 4189 fc_host_post_vendor_event(shost, fc_get_event_number(), 4190 sizeof(temp_event_data), 4191 (char *)&temp_event_data, 4192 SCSI_NL_VID_TYPE_PCI 4193 | PCI_VENDOR_ID_EMULEX); 4194 break; 4195 case LPFC_SLI_EVENT_TYPE_NORM_TEMP: 4196 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 4197 temp_event_data.event_code = LPFC_NORMAL_TEMP; 4198 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 4199 4200 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4201 "3191 Normal Temperature:%d Celsius - Port Name %c\n", 4202 acqe_sli->event_data1, port_name); 4203 4204 shost = lpfc_shost_from_vport(phba->pport); 4205 fc_host_post_vendor_event(shost, fc_get_event_number(), 4206 sizeof(temp_event_data), 4207 (char *)&temp_event_data, 4208 SCSI_NL_VID_TYPE_PCI 4209 | PCI_VENDOR_ID_EMULEX); 4210 break; 4211 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED: 4212 misconfigured = (struct lpfc_acqe_misconfigured_event *) 4213 &acqe_sli->event_data1; 4214 4215 /* fetch the status for this port */ 4216 switch (phba->sli4_hba.lnk_info.lnk_no) { 4217 case LPFC_LINK_NUMBER_0: 4218 status = bf_get(lpfc_sli_misconfigured_port0_state, 4219 &misconfigured->theEvent); 4220 operational = bf_get(lpfc_sli_misconfigured_port0_op, 4221 &misconfigured->theEvent); 4222 break; 4223 case LPFC_LINK_NUMBER_1: 4224 status = bf_get(lpfc_sli_misconfigured_port1_state, 4225 &misconfigured->theEvent); 4226 operational = bf_get(lpfc_sli_misconfigured_port1_op, 4227 &misconfigured->theEvent); 4228 break; 4229 case LPFC_LINK_NUMBER_2: 4230 status = bf_get(lpfc_sli_misconfigured_port2_state, 4231 &misconfigured->theEvent); 4232 operational = bf_get(lpfc_sli_misconfigured_port2_op, 4233 &misconfigured->theEvent); 4234 break; 4235 case LPFC_LINK_NUMBER_3: 4236 status = bf_get(lpfc_sli_misconfigured_port3_state, 4237 &misconfigured->theEvent); 4238 operational = bf_get(lpfc_sli_misconfigured_port3_op, 4239 &misconfigured->theEvent); 4240 break; 4241 default: 4242 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4243 "3296 " 4244 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED " 4245 "event: Invalid link %d", 4246 phba->sli4_hba.lnk_info.lnk_no); 4247 return; 4248 } 4249 4250 /* Skip if optic state unchanged */ 4251 if (phba->sli4_hba.lnk_info.optic_state == status) 4252 return; 4253 4254 switch (status) { 4255 case LPFC_SLI_EVENT_STATUS_VALID: 4256 sprintf(message, "Physical Link is functional"); 4257 break; 4258 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: 4259 sprintf(message, "Optics faulted/incorrectly " 4260 "installed/not installed - Reseat optics, " 4261 "if issue not resolved, replace."); 4262 break; 4263 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: 4264 sprintf(message, 4265 "Optics of two types installed - Remove one " 4266 "optic or install matching pair of optics."); 4267 break; 4268 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: 4269 sprintf(message, "Incompatible optics - Replace with " 4270 "compatible optics for card to function."); 4271 break; 4272 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED: 4273 sprintf(message, "Unqualified optics - Replace with " 4274 "Avago optics for Warranty and Technical " 4275 "Support - Link is%s operational", 4276 (operational) ? "" : " not"); 4277 break; 4278 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED: 4279 sprintf(message, "Uncertified optics - Replace with " 4280 "Avago-certified optics to enable link " 4281 "operation - Link is%s operational", 4282 (operational) ? "" : " not"); 4283 break; 4284 default: 4285 /* firmware is reporting a status we don't know about */ 4286 sprintf(message, "Unknown event status x%02x", status); 4287 break; 4288 } 4289 phba->sli4_hba.lnk_info.optic_state = status; 4290 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4291 "3176 Port Name %c %s\n", port_name, message); 4292 break; 4293 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT: 4294 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4295 "3192 Remote DPort Test Initiated - " 4296 "Event Data1:x%08x Event Data2: x%08x\n", 4297 acqe_sli->event_data1, acqe_sli->event_data2); 4298 break; 4299 default: 4300 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4301 "3193 Async SLI event - Event Data1:x%08x Event Data2:" 4302 "x%08x SLI Event Type:%d\n", 4303 acqe_sli->event_data1, acqe_sli->event_data2, 4304 evt_type); 4305 break; 4306 } 4307 } 4308 4309 /** 4310 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 4311 * @vport: pointer to vport data structure. 4312 * 4313 * This routine is to perform Clear Virtual Link (CVL) on a vport in 4314 * response to a CVL event. 4315 * 4316 * Return the pointer to the ndlp with the vport if successful, otherwise 4317 * return NULL. 4318 **/ 4319 static struct lpfc_nodelist * 4320 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 4321 { 4322 struct lpfc_nodelist *ndlp; 4323 struct Scsi_Host *shost; 4324 struct lpfc_hba *phba; 4325 4326 if (!vport) 4327 return NULL; 4328 phba = vport->phba; 4329 if (!phba) 4330 return NULL; 4331 ndlp = lpfc_findnode_did(vport, Fabric_DID); 4332 if (!ndlp) { 4333 /* Cannot find existing Fabric ndlp, so allocate a new one */ 4334 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 4335 if (!ndlp) 4336 return 0; 4337 lpfc_nlp_init(vport, ndlp, Fabric_DID); 4338 /* Set the node type */ 4339 ndlp->nlp_type |= NLP_FABRIC; 4340 /* Put ndlp onto node list */ 4341 lpfc_enqueue_node(vport, ndlp); 4342 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 4343 /* re-setup ndlp without removing from node list */ 4344 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 4345 if (!ndlp) 4346 return 0; 4347 } 4348 if ((phba->pport->port_state < LPFC_FLOGI) && 4349 (phba->pport->port_state != LPFC_VPORT_FAILED)) 4350 return NULL; 4351 /* If virtual link is not yet instantiated ignore CVL */ 4352 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 4353 && (vport->port_state != LPFC_VPORT_FAILED)) 4354 return NULL; 4355 shost = lpfc_shost_from_vport(vport); 4356 if (!shost) 4357 return NULL; 4358 lpfc_linkdown_port(vport); 4359 lpfc_cleanup_pending_mbox(vport); 4360 spin_lock_irq(shost->host_lock); 4361 vport->fc_flag |= FC_VPORT_CVL_RCVD; 4362 spin_unlock_irq(shost->host_lock); 4363 4364 return ndlp; 4365 } 4366 4367 /** 4368 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 4369 * @vport: pointer to lpfc hba data structure. 4370 * 4371 * This routine is to perform Clear Virtual Link (CVL) on all vports in 4372 * response to a FCF dead event. 4373 **/ 4374 static void 4375 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 4376 { 4377 struct lpfc_vport **vports; 4378 int i; 4379 4380 vports = lpfc_create_vport_work_array(phba); 4381 if (vports) 4382 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 4383 lpfc_sli4_perform_vport_cvl(vports[i]); 4384 lpfc_destroy_vport_work_array(phba, vports); 4385 } 4386 4387 /** 4388 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 4389 * @phba: pointer to lpfc hba data structure. 4390 * @acqe_link: pointer to the async fcoe completion queue entry. 4391 * 4392 * This routine is to handle the SLI4 asynchronous fcoe event. 4393 **/ 4394 static void 4395 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 4396 struct lpfc_acqe_fip *acqe_fip) 4397 { 4398 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 4399 int rc; 4400 struct lpfc_vport *vport; 4401 struct lpfc_nodelist *ndlp; 4402 struct Scsi_Host *shost; 4403 int active_vlink_present; 4404 struct lpfc_vport **vports; 4405 int i; 4406 4407 phba->fc_eventTag = acqe_fip->event_tag; 4408 phba->fcoe_eventtag = acqe_fip->event_tag; 4409 switch (event_type) { 4410 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 4411 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 4412 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 4413 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 4414 LOG_DISCOVERY, 4415 "2546 New FCF event, evt_tag:x%x, " 4416 "index:x%x\n", 4417 acqe_fip->event_tag, 4418 acqe_fip->index); 4419 else 4420 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 4421 LOG_DISCOVERY, 4422 "2788 FCF param modified event, " 4423 "evt_tag:x%x, index:x%x\n", 4424 acqe_fip->event_tag, 4425 acqe_fip->index); 4426 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4427 /* 4428 * During period of FCF discovery, read the FCF 4429 * table record indexed by the event to update 4430 * FCF roundrobin failover eligible FCF bmask. 4431 */ 4432 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 4433 LOG_DISCOVERY, 4434 "2779 Read FCF (x%x) for updating " 4435 "roundrobin FCF failover bmask\n", 4436 acqe_fip->index); 4437 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 4438 } 4439 4440 /* If the FCF discovery is in progress, do nothing. */ 4441 spin_lock_irq(&phba->hbalock); 4442 if (phba->hba_flag & FCF_TS_INPROG) { 4443 spin_unlock_irq(&phba->hbalock); 4444 break; 4445 } 4446 /* If fast FCF failover rescan event is pending, do nothing */ 4447 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 4448 spin_unlock_irq(&phba->hbalock); 4449 break; 4450 } 4451 4452 /* If the FCF has been in discovered state, do nothing. */ 4453 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 4454 spin_unlock_irq(&phba->hbalock); 4455 break; 4456 } 4457 spin_unlock_irq(&phba->hbalock); 4458 4459 /* Otherwise, scan the entire FCF table and re-discover SAN */ 4460 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 4461 "2770 Start FCF table scan per async FCF " 4462 "event, evt_tag:x%x, index:x%x\n", 4463 acqe_fip->event_tag, acqe_fip->index); 4464 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 4465 LPFC_FCOE_FCF_GET_FIRST); 4466 if (rc) 4467 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 4468 "2547 Issue FCF scan read FCF mailbox " 4469 "command failed (x%x)\n", rc); 4470 break; 4471 4472 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 4473 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4474 "2548 FCF Table full count 0x%x tag 0x%x\n", 4475 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 4476 acqe_fip->event_tag); 4477 break; 4478 4479 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 4480 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 4481 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 4482 "2549 FCF (x%x) disconnected from network, " 4483 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); 4484 /* 4485 * If we are in the middle of FCF failover process, clear 4486 * the corresponding FCF bit in the roundrobin bitmap. 4487 */ 4488 spin_lock_irq(&phba->hbalock); 4489 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && 4490 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) { 4491 spin_unlock_irq(&phba->hbalock); 4492 /* Update FLOGI FCF failover eligible FCF bmask */ 4493 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 4494 break; 4495 } 4496 spin_unlock_irq(&phba->hbalock); 4497 4498 /* If the event is not for currently used fcf do nothing */ 4499 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 4500 break; 4501 4502 /* 4503 * Otherwise, request the port to rediscover the entire FCF 4504 * table for a fast recovery from case that the current FCF 4505 * is no longer valid as we are not in the middle of FCF 4506 * failover process already. 4507 */ 4508 spin_lock_irq(&phba->hbalock); 4509 /* Mark the fast failover process in progress */ 4510 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 4511 spin_unlock_irq(&phba->hbalock); 4512 4513 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 4514 "2771 Start FCF fast failover process due to " 4515 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 4516 "\n", acqe_fip->event_tag, acqe_fip->index); 4517 rc = lpfc_sli4_redisc_fcf_table(phba); 4518 if (rc) { 4519 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 4520 LOG_DISCOVERY, 4521 "2772 Issue FCF rediscover mabilbox " 4522 "command failed, fail through to FCF " 4523 "dead event\n"); 4524 spin_lock_irq(&phba->hbalock); 4525 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 4526 spin_unlock_irq(&phba->hbalock); 4527 /* 4528 * Last resort will fail over by treating this 4529 * as a link down to FCF registration. 4530 */ 4531 lpfc_sli4_fcf_dead_failthrough(phba); 4532 } else { 4533 /* Reset FCF roundrobin bmask for new discovery */ 4534 lpfc_sli4_clear_fcf_rr_bmask(phba); 4535 /* 4536 * Handling fast FCF failover to a DEAD FCF event is 4537 * considered equalivant to receiving CVL to all vports. 4538 */ 4539 lpfc_sli4_perform_all_vport_cvl(phba); 4540 } 4541 break; 4542 case LPFC_FIP_EVENT_TYPE_CVL: 4543 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 4544 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 4545 "2718 Clear Virtual Link Received for VPI 0x%x" 4546 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 4547 4548 vport = lpfc_find_vport_by_vpid(phba, 4549 acqe_fip->index); 4550 ndlp = lpfc_sli4_perform_vport_cvl(vport); 4551 if (!ndlp) 4552 break; 4553 active_vlink_present = 0; 4554 4555 vports = lpfc_create_vport_work_array(phba); 4556 if (vports) { 4557 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 4558 i++) { 4559 if ((!(vports[i]->fc_flag & 4560 FC_VPORT_CVL_RCVD)) && 4561 (vports[i]->port_state > LPFC_FDISC)) { 4562 active_vlink_present = 1; 4563 break; 4564 } 4565 } 4566 lpfc_destroy_vport_work_array(phba, vports); 4567 } 4568 4569 /* 4570 * Don't re-instantiate if vport is marked for deletion. 4571 * If we are here first then vport_delete is going to wait 4572 * for discovery to complete. 4573 */ 4574 if (!(vport->load_flag & FC_UNLOADING) && 4575 active_vlink_present) { 4576 /* 4577 * If there are other active VLinks present, 4578 * re-instantiate the Vlink using FDISC. 4579 */ 4580 mod_timer(&ndlp->nlp_delayfunc, 4581 jiffies + msecs_to_jiffies(1000)); 4582 shost = lpfc_shost_from_vport(vport); 4583 spin_lock_irq(shost->host_lock); 4584 ndlp->nlp_flag |= NLP_DELAY_TMO; 4585 spin_unlock_irq(shost->host_lock); 4586 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 4587 vport->port_state = LPFC_FDISC; 4588 } else { 4589 /* 4590 * Otherwise, we request port to rediscover 4591 * the entire FCF table for a fast recovery 4592 * from possible case that the current FCF 4593 * is no longer valid if we are not already 4594 * in the FCF failover process. 4595 */ 4596 spin_lock_irq(&phba->hbalock); 4597 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4598 spin_unlock_irq(&phba->hbalock); 4599 break; 4600 } 4601 /* Mark the fast failover process in progress */ 4602 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 4603 spin_unlock_irq(&phba->hbalock); 4604 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 4605 LOG_DISCOVERY, 4606 "2773 Start FCF failover per CVL, " 4607 "evt_tag:x%x\n", acqe_fip->event_tag); 4608 rc = lpfc_sli4_redisc_fcf_table(phba); 4609 if (rc) { 4610 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 4611 LOG_DISCOVERY, 4612 "2774 Issue FCF rediscover " 4613 "mabilbox command failed, " 4614 "through to CVL event\n"); 4615 spin_lock_irq(&phba->hbalock); 4616 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 4617 spin_unlock_irq(&phba->hbalock); 4618 /* 4619 * Last resort will be re-try on the 4620 * the current registered FCF entry. 4621 */ 4622 lpfc_retry_pport_discovery(phba); 4623 } else 4624 /* 4625 * Reset FCF roundrobin bmask for new 4626 * discovery. 4627 */ 4628 lpfc_sli4_clear_fcf_rr_bmask(phba); 4629 } 4630 break; 4631 default: 4632 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4633 "0288 Unknown FCoE event type 0x%x event tag " 4634 "0x%x\n", event_type, acqe_fip->event_tag); 4635 break; 4636 } 4637 } 4638 4639 /** 4640 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 4641 * @phba: pointer to lpfc hba data structure. 4642 * @acqe_link: pointer to the async dcbx completion queue entry. 4643 * 4644 * This routine is to handle the SLI4 asynchronous dcbx event. 4645 **/ 4646 static void 4647 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 4648 struct lpfc_acqe_dcbx *acqe_dcbx) 4649 { 4650 phba->fc_eventTag = acqe_dcbx->event_tag; 4651 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4652 "0290 The SLI4 DCBX asynchronous event is not " 4653 "handled yet\n"); 4654 } 4655 4656 /** 4657 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 4658 * @phba: pointer to lpfc hba data structure. 4659 * @acqe_link: pointer to the async grp5 completion queue entry. 4660 * 4661 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 4662 * is an asynchronous notified of a logical link speed change. The Port 4663 * reports the logical link speed in units of 10Mbps. 4664 **/ 4665 static void 4666 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 4667 struct lpfc_acqe_grp5 *acqe_grp5) 4668 { 4669 uint16_t prev_ll_spd; 4670 4671 phba->fc_eventTag = acqe_grp5->event_tag; 4672 phba->fcoe_eventtag = acqe_grp5->event_tag; 4673 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 4674 phba->sli4_hba.link_state.logical_speed = 4675 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10; 4676 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4677 "2789 GRP5 Async Event: Updating logical link speed " 4678 "from %dMbps to %dMbps\n", prev_ll_spd, 4679 phba->sli4_hba.link_state.logical_speed); 4680 } 4681 4682 /** 4683 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 4684 * @phba: pointer to lpfc hba data structure. 4685 * 4686 * This routine is invoked by the worker thread to process all the pending 4687 * SLI4 asynchronous events. 4688 **/ 4689 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 4690 { 4691 struct lpfc_cq_event *cq_event; 4692 4693 /* First, declare the async event has been handled */ 4694 spin_lock_irq(&phba->hbalock); 4695 phba->hba_flag &= ~ASYNC_EVENT; 4696 spin_unlock_irq(&phba->hbalock); 4697 /* Now, handle all the async events */ 4698 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 4699 /* Get the first event from the head of the event queue */ 4700 spin_lock_irq(&phba->hbalock); 4701 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 4702 cq_event, struct lpfc_cq_event, list); 4703 spin_unlock_irq(&phba->hbalock); 4704 /* Process the asynchronous event */ 4705 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 4706 case LPFC_TRAILER_CODE_LINK: 4707 lpfc_sli4_async_link_evt(phba, 4708 &cq_event->cqe.acqe_link); 4709 break; 4710 case LPFC_TRAILER_CODE_FCOE: 4711 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 4712 break; 4713 case LPFC_TRAILER_CODE_DCBX: 4714 lpfc_sli4_async_dcbx_evt(phba, 4715 &cq_event->cqe.acqe_dcbx); 4716 break; 4717 case LPFC_TRAILER_CODE_GRP5: 4718 lpfc_sli4_async_grp5_evt(phba, 4719 &cq_event->cqe.acqe_grp5); 4720 break; 4721 case LPFC_TRAILER_CODE_FC: 4722 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 4723 break; 4724 case LPFC_TRAILER_CODE_SLI: 4725 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 4726 break; 4727 default: 4728 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4729 "1804 Invalid asynchrous event code: " 4730 "x%x\n", bf_get(lpfc_trailer_code, 4731 &cq_event->cqe.mcqe_cmpl)); 4732 break; 4733 } 4734 /* Free the completion event processed to the free pool */ 4735 lpfc_sli4_cq_event_release(phba, cq_event); 4736 } 4737 } 4738 4739 /** 4740 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 4741 * @phba: pointer to lpfc hba data structure. 4742 * 4743 * This routine is invoked by the worker thread to process FCF table 4744 * rediscovery pending completion event. 4745 **/ 4746 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 4747 { 4748 int rc; 4749 4750 spin_lock_irq(&phba->hbalock); 4751 /* Clear FCF rediscovery timeout event */ 4752 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 4753 /* Clear driver fast failover FCF record flag */ 4754 phba->fcf.failover_rec.flag = 0; 4755 /* Set state for FCF fast failover */ 4756 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 4757 spin_unlock_irq(&phba->hbalock); 4758 4759 /* Scan FCF table from the first entry to re-discover SAN */ 4760 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 4761 "2777 Start post-quiescent FCF table scan\n"); 4762 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 4763 if (rc) 4764 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 4765 "2747 Issue FCF scan read FCF mailbox " 4766 "command failed 0x%x\n", rc); 4767 } 4768 4769 /** 4770 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 4771 * @phba: pointer to lpfc hba data structure. 4772 * @dev_grp: The HBA PCI-Device group number. 4773 * 4774 * This routine is invoked to set up the per HBA PCI-Device group function 4775 * API jump table entries. 4776 * 4777 * Return: 0 if success, otherwise -ENODEV 4778 **/ 4779 int 4780 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 4781 { 4782 int rc; 4783 4784 /* Set up lpfc PCI-device group */ 4785 phba->pci_dev_grp = dev_grp; 4786 4787 /* The LPFC_PCI_DEV_OC uses SLI4 */ 4788 if (dev_grp == LPFC_PCI_DEV_OC) 4789 phba->sli_rev = LPFC_SLI_REV4; 4790 4791 /* Set up device INIT API function jump table */ 4792 rc = lpfc_init_api_table_setup(phba, dev_grp); 4793 if (rc) 4794 return -ENODEV; 4795 /* Set up SCSI API function jump table */ 4796 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 4797 if (rc) 4798 return -ENODEV; 4799 /* Set up SLI API function jump table */ 4800 rc = lpfc_sli_api_table_setup(phba, dev_grp); 4801 if (rc) 4802 return -ENODEV; 4803 /* Set up MBOX API function jump table */ 4804 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 4805 if (rc) 4806 return -ENODEV; 4807 4808 return 0; 4809 } 4810 4811 /** 4812 * lpfc_log_intr_mode - Log the active interrupt mode 4813 * @phba: pointer to lpfc hba data structure. 4814 * @intr_mode: active interrupt mode adopted. 4815 * 4816 * This routine it invoked to log the currently used active interrupt mode 4817 * to the device. 4818 **/ 4819 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 4820 { 4821 switch (intr_mode) { 4822 case 0: 4823 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4824 "0470 Enable INTx interrupt mode.\n"); 4825 break; 4826 case 1: 4827 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4828 "0481 Enabled MSI interrupt mode.\n"); 4829 break; 4830 case 2: 4831 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4832 "0480 Enabled MSI-X interrupt mode.\n"); 4833 break; 4834 default: 4835 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4836 "0482 Illegal interrupt mode.\n"); 4837 break; 4838 } 4839 return; 4840 } 4841 4842 /** 4843 * lpfc_enable_pci_dev - Enable a generic PCI device. 4844 * @phba: pointer to lpfc hba data structure. 4845 * 4846 * This routine is invoked to enable the PCI device that is common to all 4847 * PCI devices. 4848 * 4849 * Return codes 4850 * 0 - successful 4851 * other values - error 4852 **/ 4853 static int 4854 lpfc_enable_pci_dev(struct lpfc_hba *phba) 4855 { 4856 struct pci_dev *pdev; 4857 4858 /* Obtain PCI device reference */ 4859 if (!phba->pcidev) 4860 goto out_error; 4861 else 4862 pdev = phba->pcidev; 4863 /* Enable PCI device */ 4864 if (pci_enable_device_mem(pdev)) 4865 goto out_error; 4866 /* Request PCI resource for the device */ 4867 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME)) 4868 goto out_disable_device; 4869 /* Set up device as PCI master and save state for EEH */ 4870 pci_set_master(pdev); 4871 pci_try_set_mwi(pdev); 4872 pci_save_state(pdev); 4873 4874 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 4875 if (pci_is_pcie(pdev)) 4876 pdev->needs_freset = 1; 4877 4878 return 0; 4879 4880 out_disable_device: 4881 pci_disable_device(pdev); 4882 out_error: 4883 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4884 "1401 Failed to enable pci device\n"); 4885 return -ENODEV; 4886 } 4887 4888 /** 4889 * lpfc_disable_pci_dev - Disable a generic PCI device. 4890 * @phba: pointer to lpfc hba data structure. 4891 * 4892 * This routine is invoked to disable the PCI device that is common to all 4893 * PCI devices. 4894 **/ 4895 static void 4896 lpfc_disable_pci_dev(struct lpfc_hba *phba) 4897 { 4898 struct pci_dev *pdev; 4899 4900 /* Obtain PCI device reference */ 4901 if (!phba->pcidev) 4902 return; 4903 else 4904 pdev = phba->pcidev; 4905 /* Release PCI resource and disable PCI device */ 4906 pci_release_mem_regions(pdev); 4907 pci_disable_device(pdev); 4908 4909 return; 4910 } 4911 4912 /** 4913 * lpfc_reset_hba - Reset a hba 4914 * @phba: pointer to lpfc hba data structure. 4915 * 4916 * This routine is invoked to reset a hba device. It brings the HBA 4917 * offline, performs a board restart, and then brings the board back 4918 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 4919 * on outstanding mailbox commands. 4920 **/ 4921 void 4922 lpfc_reset_hba(struct lpfc_hba *phba) 4923 { 4924 /* If resets are disabled then set error state and return. */ 4925 if (!phba->cfg_enable_hba_reset) { 4926 phba->link_state = LPFC_HBA_ERROR; 4927 return; 4928 } 4929 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 4930 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 4931 else 4932 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 4933 lpfc_offline(phba); 4934 lpfc_sli_brdrestart(phba); 4935 lpfc_online(phba); 4936 lpfc_unblock_mgmt_io(phba); 4937 } 4938 4939 /** 4940 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions 4941 * @phba: pointer to lpfc hba data structure. 4942 * 4943 * This function enables the PCI SR-IOV virtual functions to a physical 4944 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 4945 * enable the number of virtual functions to the physical function. As 4946 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 4947 * API call does not considered as an error condition for most of the device. 4948 **/ 4949 uint16_t 4950 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) 4951 { 4952 struct pci_dev *pdev = phba->pcidev; 4953 uint16_t nr_virtfn; 4954 int pos; 4955 4956 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 4957 if (pos == 0) 4958 return 0; 4959 4960 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); 4961 return nr_virtfn; 4962 } 4963 4964 /** 4965 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 4966 * @phba: pointer to lpfc hba data structure. 4967 * @nr_vfn: number of virtual functions to be enabled. 4968 * 4969 * This function enables the PCI SR-IOV virtual functions to a physical 4970 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 4971 * enable the number of virtual functions to the physical function. As 4972 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 4973 * API call does not considered as an error condition for most of the device. 4974 **/ 4975 int 4976 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 4977 { 4978 struct pci_dev *pdev = phba->pcidev; 4979 uint16_t max_nr_vfn; 4980 int rc; 4981 4982 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); 4983 if (nr_vfn > max_nr_vfn) { 4984 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4985 "3057 Requested vfs (%d) greater than " 4986 "supported vfs (%d)", nr_vfn, max_nr_vfn); 4987 return -EINVAL; 4988 } 4989 4990 rc = pci_enable_sriov(pdev, nr_vfn); 4991 if (rc) { 4992 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4993 "2806 Failed to enable sriov on this device " 4994 "with vfn number nr_vf:%d, rc:%d\n", 4995 nr_vfn, rc); 4996 } else 4997 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4998 "2807 Successful enable sriov on this device " 4999 "with vfn number nr_vf:%d\n", nr_vfn); 5000 return rc; 5001 } 5002 5003 /** 5004 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. 5005 * @phba: pointer to lpfc hba data structure. 5006 * 5007 * This routine is invoked to set up the driver internal resources specific to 5008 * support the SLI-3 HBA device it attached to. 5009 * 5010 * Return codes 5011 * 0 - successful 5012 * other values - error 5013 **/ 5014 static int 5015 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 5016 { 5017 struct lpfc_sli *psli; 5018 int rc; 5019 5020 /* 5021 * Initialize timers used by driver 5022 */ 5023 5024 /* Heartbeat timer */ 5025 init_timer(&phba->hb_tmofunc); 5026 phba->hb_tmofunc.function = lpfc_hb_timeout; 5027 phba->hb_tmofunc.data = (unsigned long)phba; 5028 5029 psli = &phba->sli; 5030 /* MBOX heartbeat timer */ 5031 init_timer(&psli->mbox_tmo); 5032 psli->mbox_tmo.function = lpfc_mbox_timeout; 5033 psli->mbox_tmo.data = (unsigned long) phba; 5034 /* FCP polling mode timer */ 5035 init_timer(&phba->fcp_poll_timer); 5036 phba->fcp_poll_timer.function = lpfc_poll_timeout; 5037 phba->fcp_poll_timer.data = (unsigned long) phba; 5038 /* Fabric block timer */ 5039 init_timer(&phba->fabric_block_timer); 5040 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 5041 phba->fabric_block_timer.data = (unsigned long) phba; 5042 /* EA polling mode timer */ 5043 init_timer(&phba->eratt_poll); 5044 phba->eratt_poll.function = lpfc_poll_eratt; 5045 phba->eratt_poll.data = (unsigned long) phba; 5046 5047 /* Host attention work mask setup */ 5048 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 5049 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 5050 5051 /* Get all the module params for configuring this host */ 5052 lpfc_get_cfgparam(phba); 5053 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 5054 phba->menlo_flag |= HBA_MENLO_SUPPORT; 5055 /* check for menlo minimum sg count */ 5056 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 5057 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 5058 } 5059 5060 if (!phba->sli.ring) 5061 phba->sli.ring = kzalloc(LPFC_SLI3_MAX_RING * 5062 sizeof(struct lpfc_sli_ring), GFP_KERNEL); 5063 if (!phba->sli.ring) 5064 return -ENOMEM; 5065 5066 /* 5067 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 5068 * used to create the sg_dma_buf_pool must be dynamically calculated. 5069 */ 5070 5071 /* Initialize the host templates the configured values. */ 5072 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5073 lpfc_template_s3.sg_tablesize = phba->cfg_sg_seg_cnt; 5074 5075 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ 5076 if (phba->cfg_enable_bg) { 5077 /* 5078 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 5079 * the FCP rsp, and a BDE for each. Sice we have no control 5080 * over how many protection data segments the SCSI Layer 5081 * will hand us (ie: there could be one for every block 5082 * in the IO), we just allocate enough BDEs to accomidate 5083 * our max amount and we need to limit lpfc_sg_seg_cnt to 5084 * minimize the risk of running out. 5085 */ 5086 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5087 sizeof(struct fcp_rsp) + 5088 (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64)); 5089 5090 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) 5091 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; 5092 5093 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ 5094 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; 5095 } else { 5096 /* 5097 * The scsi_buf for a regular I/O will hold the FCP cmnd, 5098 * the FCP rsp, a BDE for each, and a BDE for up to 5099 * cfg_sg_seg_cnt data segments. 5100 */ 5101 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5102 sizeof(struct fcp_rsp) + 5103 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); 5104 5105 /* Total BDEs in BPL for scsi_sg_list */ 5106 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 5107 } 5108 5109 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 5110 "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", 5111 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 5112 phba->cfg_total_seg_cnt); 5113 5114 phba->max_vpi = LPFC_MAX_VPI; 5115 /* This will be set to correct value after config_port mbox */ 5116 phba->max_vports = 0; 5117 5118 /* 5119 * Initialize the SLI Layer to run with lpfc HBAs. 5120 */ 5121 lpfc_sli_setup(phba); 5122 lpfc_sli_queue_setup(phba); 5123 5124 /* Allocate device driver memory */ 5125 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 5126 return -ENOMEM; 5127 5128 /* 5129 * Enable sr-iov virtual functions if supported and configured 5130 * through the module parameter. 5131 */ 5132 if (phba->cfg_sriov_nr_virtfn > 0) { 5133 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 5134 phba->cfg_sriov_nr_virtfn); 5135 if (rc) { 5136 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5137 "2808 Requested number of SR-IOV " 5138 "virtual functions (%d) is not " 5139 "supported\n", 5140 phba->cfg_sriov_nr_virtfn); 5141 phba->cfg_sriov_nr_virtfn = 0; 5142 } 5143 } 5144 5145 return 0; 5146 } 5147 5148 /** 5149 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 5150 * @phba: pointer to lpfc hba data structure. 5151 * 5152 * This routine is invoked to unset the driver internal resources set up 5153 * specific for supporting the SLI-3 HBA device it attached to. 5154 **/ 5155 static void 5156 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 5157 { 5158 /* Free device driver memory allocated */ 5159 lpfc_mem_free_all(phba); 5160 5161 return; 5162 } 5163 5164 /** 5165 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 5166 * @phba: pointer to lpfc hba data structure. 5167 * 5168 * This routine is invoked to set up the driver internal resources specific to 5169 * support the SLI-4 HBA device it attached to. 5170 * 5171 * Return codes 5172 * 0 - successful 5173 * other values - error 5174 **/ 5175 static int 5176 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 5177 { 5178 struct lpfc_vector_map_info *cpup; 5179 struct lpfc_sli *psli; 5180 LPFC_MBOXQ_t *mboxq; 5181 int rc, i, hbq_count, max_buf_size; 5182 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 5183 struct lpfc_mqe *mqe; 5184 int longs; 5185 int fof_vectors = 0; 5186 5187 /* Get all the module params for configuring this host */ 5188 lpfc_get_cfgparam(phba); 5189 5190 /* Before proceed, wait for POST done and device ready */ 5191 rc = lpfc_sli4_post_status_check(phba); 5192 if (rc) 5193 return -ENODEV; 5194 5195 /* 5196 * Initialize timers used by driver 5197 */ 5198 5199 /* Heartbeat timer */ 5200 init_timer(&phba->hb_tmofunc); 5201 phba->hb_tmofunc.function = lpfc_hb_timeout; 5202 phba->hb_tmofunc.data = (unsigned long)phba; 5203 init_timer(&phba->rrq_tmr); 5204 phba->rrq_tmr.function = lpfc_rrq_timeout; 5205 phba->rrq_tmr.data = (unsigned long)phba; 5206 5207 psli = &phba->sli; 5208 /* MBOX heartbeat timer */ 5209 init_timer(&psli->mbox_tmo); 5210 psli->mbox_tmo.function = lpfc_mbox_timeout; 5211 psli->mbox_tmo.data = (unsigned long) phba; 5212 /* Fabric block timer */ 5213 init_timer(&phba->fabric_block_timer); 5214 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 5215 phba->fabric_block_timer.data = (unsigned long) phba; 5216 /* EA polling mode timer */ 5217 init_timer(&phba->eratt_poll); 5218 phba->eratt_poll.function = lpfc_poll_eratt; 5219 phba->eratt_poll.data = (unsigned long) phba; 5220 /* FCF rediscover timer */ 5221 init_timer(&phba->fcf.redisc_wait); 5222 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo; 5223 phba->fcf.redisc_wait.data = (unsigned long)phba; 5224 5225 /* 5226 * Control structure for handling external multi-buffer mailbox 5227 * command pass-through. 5228 */ 5229 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, 5230 sizeof(struct lpfc_mbox_ext_buf_ctx)); 5231 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 5232 5233 phba->max_vpi = LPFC_MAX_VPI; 5234 5235 /* This will be set to correct value after the read_config mbox */ 5236 phba->max_vports = 0; 5237 5238 /* Program the default value of vlan_id and fc_map */ 5239 phba->valid_vlan = 0; 5240 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 5241 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 5242 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 5243 5244 /* 5245 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands 5246 * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple. 5247 */ 5248 if (!phba->sli.ring) 5249 phba->sli.ring = kzalloc( 5250 (LPFC_SLI3_MAX_RING + phba->cfg_fcp_io_channel) * 5251 sizeof(struct lpfc_sli_ring), GFP_KERNEL); 5252 if (!phba->sli.ring) 5253 return -ENOMEM; 5254 5255 /* 5256 * It doesn't matter what family our adapter is in, we are 5257 * limited to 2 Pages, 512 SGEs, for our SGL. 5258 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp 5259 */ 5260 max_buf_size = (2 * SLI4_PAGE_SIZE); 5261 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2) 5262 phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2; 5263 5264 /* 5265 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 5266 * used to create the sg_dma_buf_pool must be dynamically calculated. 5267 */ 5268 5269 if (phba->cfg_enable_bg) { 5270 /* 5271 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 5272 * the FCP rsp, and a SGE for each. Sice we have no control 5273 * over how many protection data segments the SCSI Layer 5274 * will hand us (ie: there could be one for every block 5275 * in the IO), we just allocate enough SGEs to accomidate 5276 * our max amount and we need to limit lpfc_sg_seg_cnt to 5277 * minimize the risk of running out. 5278 */ 5279 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5280 sizeof(struct fcp_rsp) + max_buf_size; 5281 5282 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ 5283 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; 5284 5285 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF) 5286 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF; 5287 } else { 5288 /* 5289 * The scsi_buf for a regular I/O will hold the FCP cmnd, 5290 * the FCP rsp, a SGE for each, and a SGE for up to 5291 * cfg_sg_seg_cnt data segments. 5292 */ 5293 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5294 sizeof(struct fcp_rsp) + 5295 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)); 5296 5297 /* Total SGEs for scsi_sg_list */ 5298 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 5299 /* 5300 * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need 5301 * to post 1 page for the SGL. 5302 */ 5303 } 5304 5305 /* Initialize the host templates with the updated values. */ 5306 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5307 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5308 5309 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) 5310 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; 5311 else 5312 phba->cfg_sg_dma_buf_size = 5313 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); 5314 5315 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 5316 "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n", 5317 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 5318 phba->cfg_total_seg_cnt); 5319 5320 /* Initialize buffer queue management fields */ 5321 hbq_count = lpfc_sli_hbq_count(); 5322 for (i = 0; i < hbq_count; ++i) 5323 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 5324 INIT_LIST_HEAD(&phba->rb_pend_list); 5325 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 5326 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 5327 5328 /* 5329 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 5330 */ 5331 /* Initialize the Abort scsi buffer list used by driver */ 5332 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 5333 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 5334 /* This abort list used by worker thread */ 5335 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); 5336 5337 /* 5338 * Initialize driver internal slow-path work queues 5339 */ 5340 5341 /* Driver internel slow-path CQ Event pool */ 5342 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 5343 /* Response IOCB work queue list */ 5344 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 5345 /* Asynchronous event CQ Event work queue list */ 5346 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 5347 /* Fast-path XRI aborted CQ Event work queue list */ 5348 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 5349 /* Slow-path XRI aborted CQ Event work queue list */ 5350 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 5351 /* Receive queue CQ Event work queue list */ 5352 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 5353 5354 /* Initialize extent block lists. */ 5355 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); 5356 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); 5357 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); 5358 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); 5359 5360 /* initialize optic_state to 0xFF */ 5361 phba->sli4_hba.lnk_info.optic_state = 0xff; 5362 5363 /* Initialize the driver internal SLI layer lists. */ 5364 lpfc_sli_setup(phba); 5365 lpfc_sli_queue_setup(phba); 5366 5367 /* Allocate device driver memory */ 5368 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 5369 if (rc) 5370 return -ENOMEM; 5371 5372 /* IF Type 2 ports get initialized now. */ 5373 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 5374 LPFC_SLI_INTF_IF_TYPE_2) { 5375 rc = lpfc_pci_function_reset(phba); 5376 if (unlikely(rc)) 5377 return -ENODEV; 5378 phba->temp_sensor_support = 1; 5379 } 5380 5381 /* Create the bootstrap mailbox command */ 5382 rc = lpfc_create_bootstrap_mbox(phba); 5383 if (unlikely(rc)) 5384 goto out_free_mem; 5385 5386 /* Set up the host's endian order with the device. */ 5387 rc = lpfc_setup_endian_order(phba); 5388 if (unlikely(rc)) 5389 goto out_free_bsmbx; 5390 5391 /* Set up the hba's configuration parameters. */ 5392 rc = lpfc_sli4_read_config(phba); 5393 if (unlikely(rc)) 5394 goto out_free_bsmbx; 5395 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba); 5396 if (unlikely(rc)) 5397 goto out_free_bsmbx; 5398 5399 /* IF Type 0 ports get initialized now. */ 5400 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 5401 LPFC_SLI_INTF_IF_TYPE_0) { 5402 rc = lpfc_pci_function_reset(phba); 5403 if (unlikely(rc)) 5404 goto out_free_bsmbx; 5405 } 5406 5407 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 5408 GFP_KERNEL); 5409 if (!mboxq) { 5410 rc = -ENOMEM; 5411 goto out_free_bsmbx; 5412 } 5413 5414 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */ 5415 lpfc_supported_pages(mboxq); 5416 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5417 if (!rc) { 5418 mqe = &mboxq->u.mqe; 5419 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 5420 LPFC_MAX_SUPPORTED_PAGES); 5421 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 5422 switch (pn_page[i]) { 5423 case LPFC_SLI4_PARAMETERS: 5424 phba->sli4_hba.pc_sli4_params.supported = 1; 5425 break; 5426 default: 5427 break; 5428 } 5429 } 5430 /* Read the port's SLI4 Parameters capabilities if supported. */ 5431 if (phba->sli4_hba.pc_sli4_params.supported) 5432 rc = lpfc_pc_sli4_params_get(phba, mboxq); 5433 if (rc) { 5434 mempool_free(mboxq, phba->mbox_mem_pool); 5435 rc = -EIO; 5436 goto out_free_bsmbx; 5437 } 5438 } 5439 5440 /* 5441 * Get sli4 parameters that override parameters from Port capabilities. 5442 * If this call fails, it isn't critical unless the SLI4 parameters come 5443 * back in conflict. 5444 */ 5445 rc = lpfc_get_sli4_parameters(phba, mboxq); 5446 if (rc) { 5447 if (phba->sli4_hba.extents_in_use && 5448 phba->sli4_hba.rpi_hdrs_in_use) { 5449 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5450 "2999 Unsupported SLI4 Parameters " 5451 "Extents and RPI headers enabled.\n"); 5452 goto out_free_bsmbx; 5453 } 5454 } 5455 mempool_free(mboxq, phba->mbox_mem_pool); 5456 5457 /* Verify OAS is supported */ 5458 lpfc_sli4_oas_verify(phba); 5459 if (phba->cfg_fof) 5460 fof_vectors = 1; 5461 5462 /* Verify all the SLI4 queues */ 5463 rc = lpfc_sli4_queue_verify(phba); 5464 if (rc) 5465 goto out_free_bsmbx; 5466 5467 /* Create driver internal CQE event pool */ 5468 rc = lpfc_sli4_cq_event_pool_create(phba); 5469 if (rc) 5470 goto out_free_bsmbx; 5471 5472 /* Initialize sgl lists per host */ 5473 lpfc_init_sgl_list(phba); 5474 5475 /* Allocate and initialize active sgl array */ 5476 rc = lpfc_init_active_sgl_array(phba); 5477 if (rc) { 5478 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5479 "1430 Failed to initialize sgl list.\n"); 5480 goto out_destroy_cq_event_pool; 5481 } 5482 rc = lpfc_sli4_init_rpi_hdrs(phba); 5483 if (rc) { 5484 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5485 "1432 Failed to initialize rpi headers.\n"); 5486 goto out_free_active_sgl; 5487 } 5488 5489 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 5490 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 5491 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), 5492 GFP_KERNEL); 5493 if (!phba->fcf.fcf_rr_bmask) { 5494 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5495 "2759 Failed allocate memory for FCF round " 5496 "robin failover bmask\n"); 5497 rc = -ENOMEM; 5498 goto out_remove_rpi_hdrs; 5499 } 5500 5501 phba->sli4_hba.fcp_eq_hdl = 5502 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 5503 (fof_vectors + phba->cfg_fcp_io_channel)), 5504 GFP_KERNEL); 5505 if (!phba->sli4_hba.fcp_eq_hdl) { 5506 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5507 "2572 Failed allocate memory for " 5508 "fast-path per-EQ handle array\n"); 5509 rc = -ENOMEM; 5510 goto out_free_fcf_rr_bmask; 5511 } 5512 5513 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 5514 (fof_vectors + 5515 phba->cfg_fcp_io_channel)), GFP_KERNEL); 5516 if (!phba->sli4_hba.msix_entries) { 5517 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5518 "2573 Failed allocate memory for msi-x " 5519 "interrupt vector entries\n"); 5520 rc = -ENOMEM; 5521 goto out_free_fcp_eq_hdl; 5522 } 5523 5524 phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) * 5525 phba->sli4_hba.num_present_cpu), 5526 GFP_KERNEL); 5527 if (!phba->sli4_hba.cpu_map) { 5528 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5529 "3327 Failed allocate memory for msi-x " 5530 "interrupt vector mapping\n"); 5531 rc = -ENOMEM; 5532 goto out_free_msix; 5533 } 5534 if (lpfc_used_cpu == NULL) { 5535 lpfc_used_cpu = kzalloc((sizeof(uint16_t) * lpfc_present_cpu), 5536 GFP_KERNEL); 5537 if (!lpfc_used_cpu) { 5538 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5539 "3335 Failed allocate memory for msi-x " 5540 "interrupt vector mapping\n"); 5541 kfree(phba->sli4_hba.cpu_map); 5542 rc = -ENOMEM; 5543 goto out_free_msix; 5544 } 5545 for (i = 0; i < lpfc_present_cpu; i++) 5546 lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY; 5547 } 5548 5549 /* Initialize io channels for round robin */ 5550 cpup = phba->sli4_hba.cpu_map; 5551 rc = 0; 5552 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 5553 cpup->channel_id = rc; 5554 rc++; 5555 if (rc >= phba->cfg_fcp_io_channel) 5556 rc = 0; 5557 } 5558 5559 /* 5560 * Enable sr-iov virtual functions if supported and configured 5561 * through the module parameter. 5562 */ 5563 if (phba->cfg_sriov_nr_virtfn > 0) { 5564 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 5565 phba->cfg_sriov_nr_virtfn); 5566 if (rc) { 5567 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5568 "3020 Requested number of SR-IOV " 5569 "virtual functions (%d) is not " 5570 "supported\n", 5571 phba->cfg_sriov_nr_virtfn); 5572 phba->cfg_sriov_nr_virtfn = 0; 5573 } 5574 } 5575 5576 return 0; 5577 5578 out_free_msix: 5579 kfree(phba->sli4_hba.msix_entries); 5580 out_free_fcp_eq_hdl: 5581 kfree(phba->sli4_hba.fcp_eq_hdl); 5582 out_free_fcf_rr_bmask: 5583 kfree(phba->fcf.fcf_rr_bmask); 5584 out_remove_rpi_hdrs: 5585 lpfc_sli4_remove_rpi_hdrs(phba); 5586 out_free_active_sgl: 5587 lpfc_free_active_sgl(phba); 5588 out_destroy_cq_event_pool: 5589 lpfc_sli4_cq_event_pool_destroy(phba); 5590 out_free_bsmbx: 5591 lpfc_destroy_bootstrap_mbox(phba); 5592 out_free_mem: 5593 lpfc_mem_free(phba); 5594 return rc; 5595 } 5596 5597 /** 5598 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 5599 * @phba: pointer to lpfc hba data structure. 5600 * 5601 * This routine is invoked to unset the driver internal resources set up 5602 * specific for supporting the SLI-4 HBA device it attached to. 5603 **/ 5604 static void 5605 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 5606 { 5607 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 5608 5609 /* Free memory allocated for msi-x interrupt vector to CPU mapping */ 5610 kfree(phba->sli4_hba.cpu_map); 5611 phba->sli4_hba.num_present_cpu = 0; 5612 phba->sli4_hba.num_online_cpu = 0; 5613 phba->sli4_hba.curr_disp_cpu = 0; 5614 5615 /* Free memory allocated for msi-x interrupt vector entries */ 5616 kfree(phba->sli4_hba.msix_entries); 5617 5618 /* Free memory allocated for fast-path work queue handles */ 5619 kfree(phba->sli4_hba.fcp_eq_hdl); 5620 5621 /* Free the allocated rpi headers. */ 5622 lpfc_sli4_remove_rpi_hdrs(phba); 5623 lpfc_sli4_remove_rpis(phba); 5624 5625 /* Free eligible FCF index bmask */ 5626 kfree(phba->fcf.fcf_rr_bmask); 5627 5628 /* Free the ELS sgl list */ 5629 lpfc_free_active_sgl(phba); 5630 lpfc_free_els_sgl_list(phba); 5631 5632 /* Free the completion queue EQ event pool */ 5633 lpfc_sli4_cq_event_release_all(phba); 5634 lpfc_sli4_cq_event_pool_destroy(phba); 5635 5636 /* Release resource identifiers. */ 5637 lpfc_sli4_dealloc_resource_identifiers(phba); 5638 5639 /* Free the bsmbx region. */ 5640 lpfc_destroy_bootstrap_mbox(phba); 5641 5642 /* Free the SLI Layer memory with SLI4 HBAs */ 5643 lpfc_mem_free_all(phba); 5644 5645 /* Free the current connect table */ 5646 list_for_each_entry_safe(conn_entry, next_conn_entry, 5647 &phba->fcf_conn_rec_list, list) { 5648 list_del_init(&conn_entry->list); 5649 kfree(conn_entry); 5650 } 5651 5652 return; 5653 } 5654 5655 /** 5656 * lpfc_init_api_table_setup - Set up init api function jump table 5657 * @phba: The hba struct for which this call is being executed. 5658 * @dev_grp: The HBA PCI-Device group number. 5659 * 5660 * This routine sets up the device INIT interface API function jump table 5661 * in @phba struct. 5662 * 5663 * Returns: 0 - success, -ENODEV - failure. 5664 **/ 5665 int 5666 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 5667 { 5668 phba->lpfc_hba_init_link = lpfc_hba_init_link; 5669 phba->lpfc_hba_down_link = lpfc_hba_down_link; 5670 phba->lpfc_selective_reset = lpfc_selective_reset; 5671 switch (dev_grp) { 5672 case LPFC_PCI_DEV_LP: 5673 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 5674 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 5675 phba->lpfc_stop_port = lpfc_stop_port_s3; 5676 break; 5677 case LPFC_PCI_DEV_OC: 5678 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 5679 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 5680 phba->lpfc_stop_port = lpfc_stop_port_s4; 5681 break; 5682 default: 5683 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5684 "1431 Invalid HBA PCI-device group: 0x%x\n", 5685 dev_grp); 5686 return -ENODEV; 5687 break; 5688 } 5689 return 0; 5690 } 5691 5692 /** 5693 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 5694 * @phba: pointer to lpfc hba data structure. 5695 * 5696 * This routine is invoked to set up the driver internal resources before the 5697 * device specific resource setup to support the HBA device it attached to. 5698 * 5699 * Return codes 5700 * 0 - successful 5701 * other values - error 5702 **/ 5703 static int 5704 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 5705 { 5706 /* 5707 * Driver resources common to all SLI revisions 5708 */ 5709 atomic_set(&phba->fast_event_count, 0); 5710 spin_lock_init(&phba->hbalock); 5711 5712 /* Initialize ndlp management spinlock */ 5713 spin_lock_init(&phba->ndlp_lock); 5714 5715 INIT_LIST_HEAD(&phba->port_list); 5716 INIT_LIST_HEAD(&phba->work_list); 5717 init_waitqueue_head(&phba->wait_4_mlo_m_q); 5718 5719 /* Initialize the wait queue head for the kernel thread */ 5720 init_waitqueue_head(&phba->work_waitq); 5721 5722 /* Initialize the scsi buffer list used by driver for scsi IO */ 5723 spin_lock_init(&phba->scsi_buf_list_get_lock); 5724 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); 5725 spin_lock_init(&phba->scsi_buf_list_put_lock); 5726 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 5727 5728 /* Initialize the fabric iocb list */ 5729 INIT_LIST_HEAD(&phba->fabric_iocb_list); 5730 5731 /* Initialize list to save ELS buffers */ 5732 INIT_LIST_HEAD(&phba->elsbuf); 5733 5734 /* Initialize FCF connection rec list */ 5735 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 5736 5737 /* Initialize OAS configuration list */ 5738 spin_lock_init(&phba->devicelock); 5739 INIT_LIST_HEAD(&phba->luns); 5740 5741 return 0; 5742 } 5743 5744 /** 5745 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 5746 * @phba: pointer to lpfc hba data structure. 5747 * 5748 * This routine is invoked to set up the driver internal resources after the 5749 * device specific resource setup to support the HBA device it attached to. 5750 * 5751 * Return codes 5752 * 0 - successful 5753 * other values - error 5754 **/ 5755 static int 5756 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 5757 { 5758 int error; 5759 5760 /* Startup the kernel thread for this host adapter. */ 5761 phba->worker_thread = kthread_run(lpfc_do_work, phba, 5762 "lpfc_worker_%d", phba->brd_no); 5763 if (IS_ERR(phba->worker_thread)) { 5764 error = PTR_ERR(phba->worker_thread); 5765 return error; 5766 } 5767 5768 return 0; 5769 } 5770 5771 /** 5772 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 5773 * @phba: pointer to lpfc hba data structure. 5774 * 5775 * This routine is invoked to unset the driver internal resources set up after 5776 * the device specific resource setup for supporting the HBA device it 5777 * attached to. 5778 **/ 5779 static void 5780 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 5781 { 5782 /* Stop kernel worker thread */ 5783 kthread_stop(phba->worker_thread); 5784 } 5785 5786 /** 5787 * lpfc_free_iocb_list - Free iocb list. 5788 * @phba: pointer to lpfc hba data structure. 5789 * 5790 * This routine is invoked to free the driver's IOCB list and memory. 5791 **/ 5792 static void 5793 lpfc_free_iocb_list(struct lpfc_hba *phba) 5794 { 5795 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 5796 5797 spin_lock_irq(&phba->hbalock); 5798 list_for_each_entry_safe(iocbq_entry, iocbq_next, 5799 &phba->lpfc_iocb_list, list) { 5800 list_del(&iocbq_entry->list); 5801 kfree(iocbq_entry); 5802 phba->total_iocbq_bufs--; 5803 } 5804 spin_unlock_irq(&phba->hbalock); 5805 5806 return; 5807 } 5808 5809 /** 5810 * lpfc_init_iocb_list - Allocate and initialize iocb list. 5811 * @phba: pointer to lpfc hba data structure. 5812 * 5813 * This routine is invoked to allocate and initizlize the driver's IOCB 5814 * list and set up the IOCB tag array accordingly. 5815 * 5816 * Return codes 5817 * 0 - successful 5818 * other values - error 5819 **/ 5820 static int 5821 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 5822 { 5823 struct lpfc_iocbq *iocbq_entry = NULL; 5824 uint16_t iotag; 5825 int i; 5826 5827 /* Initialize and populate the iocb list per host. */ 5828 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 5829 for (i = 0; i < iocb_count; i++) { 5830 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 5831 if (iocbq_entry == NULL) { 5832 printk(KERN_ERR "%s: only allocated %d iocbs of " 5833 "expected %d count. Unloading driver.\n", 5834 __func__, i, LPFC_IOCB_LIST_CNT); 5835 goto out_free_iocbq; 5836 } 5837 5838 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 5839 if (iotag == 0) { 5840 kfree(iocbq_entry); 5841 printk(KERN_ERR "%s: failed to allocate IOTAG. " 5842 "Unloading driver.\n", __func__); 5843 goto out_free_iocbq; 5844 } 5845 iocbq_entry->sli4_lxritag = NO_XRI; 5846 iocbq_entry->sli4_xritag = NO_XRI; 5847 5848 spin_lock_irq(&phba->hbalock); 5849 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 5850 phba->total_iocbq_bufs++; 5851 spin_unlock_irq(&phba->hbalock); 5852 } 5853 5854 return 0; 5855 5856 out_free_iocbq: 5857 lpfc_free_iocb_list(phba); 5858 5859 return -ENOMEM; 5860 } 5861 5862 /** 5863 * lpfc_free_sgl_list - Free a given sgl list. 5864 * @phba: pointer to lpfc hba data structure. 5865 * @sglq_list: pointer to the head of sgl list. 5866 * 5867 * This routine is invoked to free a give sgl list and memory. 5868 **/ 5869 void 5870 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list) 5871 { 5872 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 5873 5874 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) { 5875 list_del(&sglq_entry->list); 5876 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 5877 kfree(sglq_entry); 5878 } 5879 } 5880 5881 /** 5882 * lpfc_free_els_sgl_list - Free els sgl list. 5883 * @phba: pointer to lpfc hba data structure. 5884 * 5885 * This routine is invoked to free the driver's els sgl list and memory. 5886 **/ 5887 static void 5888 lpfc_free_els_sgl_list(struct lpfc_hba *phba) 5889 { 5890 LIST_HEAD(sglq_list); 5891 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 5892 5893 /* Retrieve all els sgls from driver list */ 5894 spin_lock_irq(&phba->hbalock); 5895 spin_lock(&pring->ring_lock); 5896 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); 5897 spin_unlock(&pring->ring_lock); 5898 spin_unlock_irq(&phba->hbalock); 5899 5900 /* Now free the sgl list */ 5901 lpfc_free_sgl_list(phba, &sglq_list); 5902 } 5903 5904 /** 5905 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 5906 * @phba: pointer to lpfc hba data structure. 5907 * 5908 * This routine is invoked to allocate the driver's active sgl memory. 5909 * This array will hold the sglq_entry's for active IOs. 5910 **/ 5911 static int 5912 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 5913 { 5914 int size; 5915 size = sizeof(struct lpfc_sglq *); 5916 size *= phba->sli4_hba.max_cfg_param.max_xri; 5917 5918 phba->sli4_hba.lpfc_sglq_active_list = 5919 kzalloc(size, GFP_KERNEL); 5920 if (!phba->sli4_hba.lpfc_sglq_active_list) 5921 return -ENOMEM; 5922 return 0; 5923 } 5924 5925 /** 5926 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 5927 * @phba: pointer to lpfc hba data structure. 5928 * 5929 * This routine is invoked to walk through the array of active sglq entries 5930 * and free all of the resources. 5931 * This is just a place holder for now. 5932 **/ 5933 static void 5934 lpfc_free_active_sgl(struct lpfc_hba *phba) 5935 { 5936 kfree(phba->sli4_hba.lpfc_sglq_active_list); 5937 } 5938 5939 /** 5940 * lpfc_init_sgl_list - Allocate and initialize sgl list. 5941 * @phba: pointer to lpfc hba data structure. 5942 * 5943 * This routine is invoked to allocate and initizlize the driver's sgl 5944 * list and set up the sgl xritag tag array accordingly. 5945 * 5946 **/ 5947 static void 5948 lpfc_init_sgl_list(struct lpfc_hba *phba) 5949 { 5950 /* Initialize and populate the sglq list per host/VF. */ 5951 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 5952 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 5953 5954 /* els xri-sgl book keeping */ 5955 phba->sli4_hba.els_xri_cnt = 0; 5956 5957 /* scsi xri-buffer book keeping */ 5958 phba->sli4_hba.scsi_xri_cnt = 0; 5959 } 5960 5961 /** 5962 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 5963 * @phba: pointer to lpfc hba data structure. 5964 * 5965 * This routine is invoked to post rpi header templates to the 5966 * port for those SLI4 ports that do not support extents. This routine 5967 * posts a PAGE_SIZE memory region to the port to hold up to 5968 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 5969 * and should be called only when interrupts are disabled. 5970 * 5971 * Return codes 5972 * 0 - successful 5973 * -ERROR - otherwise. 5974 **/ 5975 int 5976 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 5977 { 5978 int rc = 0; 5979 struct lpfc_rpi_hdr *rpi_hdr; 5980 5981 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 5982 if (!phba->sli4_hba.rpi_hdrs_in_use) 5983 return rc; 5984 if (phba->sli4_hba.extents_in_use) 5985 return -EIO; 5986 5987 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 5988 if (!rpi_hdr) { 5989 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5990 "0391 Error during rpi post operation\n"); 5991 lpfc_sli4_remove_rpis(phba); 5992 rc = -ENODEV; 5993 } 5994 5995 return rc; 5996 } 5997 5998 /** 5999 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 6000 * @phba: pointer to lpfc hba data structure. 6001 * 6002 * This routine is invoked to allocate a single 4KB memory region to 6003 * support rpis and stores them in the phba. This single region 6004 * provides support for up to 64 rpis. The region is used globally 6005 * by the device. 6006 * 6007 * Returns: 6008 * A valid rpi hdr on success. 6009 * A NULL pointer on any failure. 6010 **/ 6011 struct lpfc_rpi_hdr * 6012 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 6013 { 6014 uint16_t rpi_limit, curr_rpi_range; 6015 struct lpfc_dmabuf *dmabuf; 6016 struct lpfc_rpi_hdr *rpi_hdr; 6017 uint32_t rpi_count; 6018 6019 /* 6020 * If the SLI4 port supports extents, posting the rpi header isn't 6021 * required. Set the expected maximum count and let the actual value 6022 * get set when extents are fully allocated. 6023 */ 6024 if (!phba->sli4_hba.rpi_hdrs_in_use) 6025 return NULL; 6026 if (phba->sli4_hba.extents_in_use) 6027 return NULL; 6028 6029 /* The limit on the logical index is just the max_rpi count. */ 6030 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 6031 phba->sli4_hba.max_cfg_param.max_rpi - 1; 6032 6033 spin_lock_irq(&phba->hbalock); 6034 /* 6035 * Establish the starting RPI in this header block. The starting 6036 * rpi is normalized to a zero base because the physical rpi is 6037 * port based. 6038 */ 6039 curr_rpi_range = phba->sli4_hba.next_rpi; 6040 spin_unlock_irq(&phba->hbalock); 6041 6042 /* 6043 * The port has a limited number of rpis. The increment here 6044 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value 6045 * and to allow the full max_rpi range per port. 6046 */ 6047 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) 6048 rpi_count = rpi_limit - curr_rpi_range; 6049 else 6050 rpi_count = LPFC_RPI_HDR_COUNT; 6051 6052 if (!rpi_count) 6053 return NULL; 6054 /* 6055 * First allocate the protocol header region for the port. The 6056 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 6057 */ 6058 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 6059 if (!dmabuf) 6060 return NULL; 6061 6062 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, 6063 LPFC_HDR_TEMPLATE_SIZE, 6064 &dmabuf->phys, GFP_KERNEL); 6065 if (!dmabuf->virt) { 6066 rpi_hdr = NULL; 6067 goto err_free_dmabuf; 6068 } 6069 6070 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 6071 rpi_hdr = NULL; 6072 goto err_free_coherent; 6073 } 6074 6075 /* Save the rpi header data for cleanup later. */ 6076 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 6077 if (!rpi_hdr) 6078 goto err_free_coherent; 6079 6080 rpi_hdr->dmabuf = dmabuf; 6081 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 6082 rpi_hdr->page_count = 1; 6083 spin_lock_irq(&phba->hbalock); 6084 6085 /* The rpi_hdr stores the logical index only. */ 6086 rpi_hdr->start_rpi = curr_rpi_range; 6087 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 6088 6089 /* 6090 * The next_rpi stores the next logical module-64 rpi value used 6091 * to post physical rpis in subsequent rpi postings. 6092 */ 6093 phba->sli4_hba.next_rpi += rpi_count; 6094 spin_unlock_irq(&phba->hbalock); 6095 return rpi_hdr; 6096 6097 err_free_coherent: 6098 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 6099 dmabuf->virt, dmabuf->phys); 6100 err_free_dmabuf: 6101 kfree(dmabuf); 6102 return NULL; 6103 } 6104 6105 /** 6106 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 6107 * @phba: pointer to lpfc hba data structure. 6108 * 6109 * This routine is invoked to remove all memory resources allocated 6110 * to support rpis for SLI4 ports not supporting extents. This routine 6111 * presumes the caller has released all rpis consumed by fabric or port 6112 * logins and is prepared to have the header pages removed. 6113 **/ 6114 void 6115 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 6116 { 6117 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 6118 6119 if (!phba->sli4_hba.rpi_hdrs_in_use) 6120 goto exit; 6121 6122 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 6123 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 6124 list_del(&rpi_hdr->list); 6125 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 6126 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 6127 kfree(rpi_hdr->dmabuf); 6128 kfree(rpi_hdr); 6129 } 6130 exit: 6131 /* There are no rpis available to the port now. */ 6132 phba->sli4_hba.next_rpi = 0; 6133 } 6134 6135 /** 6136 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 6137 * @pdev: pointer to pci device data structure. 6138 * 6139 * This routine is invoked to allocate the driver hba data structure for an 6140 * HBA device. If the allocation is successful, the phba reference to the 6141 * PCI device data structure is set. 6142 * 6143 * Return codes 6144 * pointer to @phba - successful 6145 * NULL - error 6146 **/ 6147 static struct lpfc_hba * 6148 lpfc_hba_alloc(struct pci_dev *pdev) 6149 { 6150 struct lpfc_hba *phba; 6151 6152 /* Allocate memory for HBA structure */ 6153 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 6154 if (!phba) { 6155 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 6156 return NULL; 6157 } 6158 6159 /* Set reference to PCI device in HBA structure */ 6160 phba->pcidev = pdev; 6161 6162 /* Assign an unused board number */ 6163 phba->brd_no = lpfc_get_instance(); 6164 if (phba->brd_no < 0) { 6165 kfree(phba); 6166 return NULL; 6167 } 6168 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL; 6169 6170 spin_lock_init(&phba->ct_ev_lock); 6171 INIT_LIST_HEAD(&phba->ct_ev_waiters); 6172 6173 return phba; 6174 } 6175 6176 /** 6177 * lpfc_hba_free - Free driver hba data structure with a device. 6178 * @phba: pointer to lpfc hba data structure. 6179 * 6180 * This routine is invoked to free the driver hba data structure with an 6181 * HBA device. 6182 **/ 6183 static void 6184 lpfc_hba_free(struct lpfc_hba *phba) 6185 { 6186 /* Release the driver assigned board number */ 6187 idr_remove(&lpfc_hba_index, phba->brd_no); 6188 6189 /* Free memory allocated with sli rings */ 6190 kfree(phba->sli.ring); 6191 phba->sli.ring = NULL; 6192 6193 kfree(phba); 6194 return; 6195 } 6196 6197 /** 6198 * lpfc_create_shost - Create hba physical port with associated scsi host. 6199 * @phba: pointer to lpfc hba data structure. 6200 * 6201 * This routine is invoked to create HBA physical port and associate a SCSI 6202 * host with it. 6203 * 6204 * Return codes 6205 * 0 - successful 6206 * other values - error 6207 **/ 6208 static int 6209 lpfc_create_shost(struct lpfc_hba *phba) 6210 { 6211 struct lpfc_vport *vport; 6212 struct Scsi_Host *shost; 6213 6214 /* Initialize HBA FC structure */ 6215 phba->fc_edtov = FF_DEF_EDTOV; 6216 phba->fc_ratov = FF_DEF_RATOV; 6217 phba->fc_altov = FF_DEF_ALTOV; 6218 phba->fc_arbtov = FF_DEF_ARBTOV; 6219 6220 atomic_set(&phba->sdev_cnt, 0); 6221 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 6222 if (!vport) 6223 return -ENODEV; 6224 6225 shost = lpfc_shost_from_vport(vport); 6226 phba->pport = vport; 6227 lpfc_debugfs_initialize(vport); 6228 /* Put reference to SCSI host to driver's device private data */ 6229 pci_set_drvdata(phba->pcidev, shost); 6230 6231 /* 6232 * At this point we are fully registered with PSA. In addition, 6233 * any initial discovery should be completed. 6234 */ 6235 vport->load_flag |= FC_ALLOW_FDMI; 6236 if (phba->cfg_enable_SmartSAN || 6237 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 6238 6239 /* Setup appropriate attribute masks */ 6240 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 6241 if (phba->cfg_enable_SmartSAN) 6242 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 6243 else 6244 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 6245 } 6246 return 0; 6247 } 6248 6249 /** 6250 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 6251 * @phba: pointer to lpfc hba data structure. 6252 * 6253 * This routine is invoked to destroy HBA physical port and the associated 6254 * SCSI host. 6255 **/ 6256 static void 6257 lpfc_destroy_shost(struct lpfc_hba *phba) 6258 { 6259 struct lpfc_vport *vport = phba->pport; 6260 6261 /* Destroy physical port that associated with the SCSI host */ 6262 destroy_port(vport); 6263 6264 return; 6265 } 6266 6267 /** 6268 * lpfc_setup_bg - Setup Block guard structures and debug areas. 6269 * @phba: pointer to lpfc hba data structure. 6270 * @shost: the shost to be used to detect Block guard settings. 6271 * 6272 * This routine sets up the local Block guard protocol settings for @shost. 6273 * This routine also allocates memory for debugging bg buffers. 6274 **/ 6275 static void 6276 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 6277 { 6278 uint32_t old_mask; 6279 uint32_t old_guard; 6280 6281 int pagecnt = 10; 6282 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 6283 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6284 "1478 Registering BlockGuard with the " 6285 "SCSI layer\n"); 6286 6287 old_mask = phba->cfg_prot_mask; 6288 old_guard = phba->cfg_prot_guard; 6289 6290 /* Only allow supported values */ 6291 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | 6292 SHOST_DIX_TYPE0_PROTECTION | 6293 SHOST_DIX_TYPE1_PROTECTION); 6294 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP | 6295 SHOST_DIX_GUARD_CRC); 6296 6297 /* DIF Type 1 protection for profiles AST1/C1 is end to end */ 6298 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION) 6299 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; 6300 6301 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 6302 if ((old_mask != phba->cfg_prot_mask) || 6303 (old_guard != phba->cfg_prot_guard)) 6304 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6305 "1475 Registering BlockGuard with the " 6306 "SCSI layer: mask %d guard %d\n", 6307 phba->cfg_prot_mask, 6308 phba->cfg_prot_guard); 6309 6310 scsi_host_set_prot(shost, phba->cfg_prot_mask); 6311 scsi_host_set_guard(shost, phba->cfg_prot_guard); 6312 } else 6313 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6314 "1479 Not Registering BlockGuard with the SCSI " 6315 "layer, Bad protection parameters: %d %d\n", 6316 old_mask, old_guard); 6317 } 6318 6319 if (!_dump_buf_data) { 6320 while (pagecnt) { 6321 spin_lock_init(&_dump_buf_lock); 6322 _dump_buf_data = 6323 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 6324 if (_dump_buf_data) { 6325 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6326 "9043 BLKGRD: allocated %d pages for " 6327 "_dump_buf_data at 0x%p\n", 6328 (1 << pagecnt), _dump_buf_data); 6329 _dump_buf_data_order = pagecnt; 6330 memset(_dump_buf_data, 0, 6331 ((1 << PAGE_SHIFT) << pagecnt)); 6332 break; 6333 } else 6334 --pagecnt; 6335 } 6336 if (!_dump_buf_data_order) 6337 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6338 "9044 BLKGRD: ERROR unable to allocate " 6339 "memory for hexdump\n"); 6340 } else 6341 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6342 "9045 BLKGRD: already allocated _dump_buf_data=0x%p" 6343 "\n", _dump_buf_data); 6344 if (!_dump_buf_dif) { 6345 while (pagecnt) { 6346 _dump_buf_dif = 6347 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 6348 if (_dump_buf_dif) { 6349 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6350 "9046 BLKGRD: allocated %d pages for " 6351 "_dump_buf_dif at 0x%p\n", 6352 (1 << pagecnt), _dump_buf_dif); 6353 _dump_buf_dif_order = pagecnt; 6354 memset(_dump_buf_dif, 0, 6355 ((1 << PAGE_SHIFT) << pagecnt)); 6356 break; 6357 } else 6358 --pagecnt; 6359 } 6360 if (!_dump_buf_dif_order) 6361 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6362 "9047 BLKGRD: ERROR unable to allocate " 6363 "memory for hexdump\n"); 6364 } else 6365 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6366 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", 6367 _dump_buf_dif); 6368 } 6369 6370 /** 6371 * lpfc_post_init_setup - Perform necessary device post initialization setup. 6372 * @phba: pointer to lpfc hba data structure. 6373 * 6374 * This routine is invoked to perform all the necessary post initialization 6375 * setup for the device. 6376 **/ 6377 static void 6378 lpfc_post_init_setup(struct lpfc_hba *phba) 6379 { 6380 struct Scsi_Host *shost; 6381 struct lpfc_adapter_event_header adapter_event; 6382 6383 /* Get the default values for Model Name and Description */ 6384 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 6385 6386 /* 6387 * hba setup may have changed the hba_queue_depth so we need to 6388 * adjust the value of can_queue. 6389 */ 6390 shost = pci_get_drvdata(phba->pcidev); 6391 shost->can_queue = phba->cfg_hba_queue_depth - 10; 6392 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 6393 lpfc_setup_bg(phba, shost); 6394 6395 lpfc_host_attrib_init(shost); 6396 6397 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 6398 spin_lock_irq(shost->host_lock); 6399 lpfc_poll_start_timer(phba); 6400 spin_unlock_irq(shost->host_lock); 6401 } 6402 6403 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6404 "0428 Perform SCSI scan\n"); 6405 /* Send board arrival event to upper layer */ 6406 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 6407 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 6408 fc_host_post_vendor_event(shost, fc_get_event_number(), 6409 sizeof(adapter_event), 6410 (char *) &adapter_event, 6411 LPFC_NL_VENDOR_ID); 6412 return; 6413 } 6414 6415 /** 6416 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 6417 * @phba: pointer to lpfc hba data structure. 6418 * 6419 * This routine is invoked to set up the PCI device memory space for device 6420 * with SLI-3 interface spec. 6421 * 6422 * Return codes 6423 * 0 - successful 6424 * other values - error 6425 **/ 6426 static int 6427 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 6428 { 6429 struct pci_dev *pdev; 6430 unsigned long bar0map_len, bar2map_len; 6431 int i, hbq_count; 6432 void *ptr; 6433 int error = -ENODEV; 6434 6435 /* Obtain PCI device reference */ 6436 if (!phba->pcidev) 6437 return error; 6438 else 6439 pdev = phba->pcidev; 6440 6441 /* Set the device DMA mask size */ 6442 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 6443 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 6444 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 6445 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 6446 return error; 6447 } 6448 } 6449 6450 /* Get the bus address of Bar0 and Bar2 and the number of bytes 6451 * required by each mapping. 6452 */ 6453 phba->pci_bar0_map = pci_resource_start(pdev, 0); 6454 bar0map_len = pci_resource_len(pdev, 0); 6455 6456 phba->pci_bar2_map = pci_resource_start(pdev, 2); 6457 bar2map_len = pci_resource_len(pdev, 2); 6458 6459 /* Map HBA SLIM to a kernel virtual address. */ 6460 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 6461 if (!phba->slim_memmap_p) { 6462 dev_printk(KERN_ERR, &pdev->dev, 6463 "ioremap failed for SLIM memory.\n"); 6464 goto out; 6465 } 6466 6467 /* Map HBA Control Registers to a kernel virtual address. */ 6468 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 6469 if (!phba->ctrl_regs_memmap_p) { 6470 dev_printk(KERN_ERR, &pdev->dev, 6471 "ioremap failed for HBA control registers.\n"); 6472 goto out_iounmap_slim; 6473 } 6474 6475 /* Allocate memory for SLI-2 structures */ 6476 phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, 6477 &phba->slim2p.phys, GFP_KERNEL); 6478 if (!phba->slim2p.virt) 6479 goto out_iounmap; 6480 6481 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 6482 phba->mbox_ext = (phba->slim2p.virt + 6483 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 6484 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 6485 phba->IOCBs = (phba->slim2p.virt + 6486 offsetof(struct lpfc_sli2_slim, IOCBs)); 6487 6488 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 6489 lpfc_sli_hbq_size(), 6490 &phba->hbqslimp.phys, 6491 GFP_KERNEL); 6492 if (!phba->hbqslimp.virt) 6493 goto out_free_slim; 6494 6495 hbq_count = lpfc_sli_hbq_count(); 6496 ptr = phba->hbqslimp.virt; 6497 for (i = 0; i < hbq_count; ++i) { 6498 phba->hbqs[i].hbq_virt = ptr; 6499 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 6500 ptr += (lpfc_hbq_defs[i]->entry_count * 6501 sizeof(struct lpfc_hbq_entry)); 6502 } 6503 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 6504 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 6505 6506 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 6507 6508 INIT_LIST_HEAD(&phba->rb_pend_list); 6509 6510 phba->MBslimaddr = phba->slim_memmap_p; 6511 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 6512 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 6513 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 6514 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 6515 6516 return 0; 6517 6518 out_free_slim: 6519 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 6520 phba->slim2p.virt, phba->slim2p.phys); 6521 out_iounmap: 6522 iounmap(phba->ctrl_regs_memmap_p); 6523 out_iounmap_slim: 6524 iounmap(phba->slim_memmap_p); 6525 out: 6526 return error; 6527 } 6528 6529 /** 6530 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 6531 * @phba: pointer to lpfc hba data structure. 6532 * 6533 * This routine is invoked to unset the PCI device memory space for device 6534 * with SLI-3 interface spec. 6535 **/ 6536 static void 6537 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 6538 { 6539 struct pci_dev *pdev; 6540 6541 /* Obtain PCI device reference */ 6542 if (!phba->pcidev) 6543 return; 6544 else 6545 pdev = phba->pcidev; 6546 6547 /* Free coherent DMA memory allocated */ 6548 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 6549 phba->hbqslimp.virt, phba->hbqslimp.phys); 6550 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 6551 phba->slim2p.virt, phba->slim2p.phys); 6552 6553 /* I/O memory unmap */ 6554 iounmap(phba->ctrl_regs_memmap_p); 6555 iounmap(phba->slim_memmap_p); 6556 6557 return; 6558 } 6559 6560 /** 6561 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 6562 * @phba: pointer to lpfc hba data structure. 6563 * 6564 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 6565 * done and check status. 6566 * 6567 * Return 0 if successful, otherwise -ENODEV. 6568 **/ 6569 int 6570 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 6571 { 6572 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 6573 struct lpfc_register reg_data; 6574 int i, port_error = 0; 6575 uint32_t if_type; 6576 6577 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 6578 memset(®_data, 0, sizeof(reg_data)); 6579 if (!phba->sli4_hba.PSMPHRregaddr) 6580 return -ENODEV; 6581 6582 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 6583 for (i = 0; i < 3000; i++) { 6584 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 6585 &portsmphr_reg.word0) || 6586 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { 6587 /* Port has a fatal POST error, break out */ 6588 port_error = -ENODEV; 6589 break; 6590 } 6591 if (LPFC_POST_STAGE_PORT_READY == 6592 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 6593 break; 6594 msleep(10); 6595 } 6596 6597 /* 6598 * If there was a port error during POST, then don't proceed with 6599 * other register reads as the data may not be valid. Just exit. 6600 */ 6601 if (port_error) { 6602 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6603 "1408 Port Failed POST - portsmphr=0x%x, " 6604 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 6605 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 6606 portsmphr_reg.word0, 6607 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 6608 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 6609 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 6610 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 6611 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 6612 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 6613 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 6614 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 6615 } else { 6616 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6617 "2534 Device Info: SLIFamily=0x%x, " 6618 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 6619 "SLIHint_2=0x%x, FT=0x%x\n", 6620 bf_get(lpfc_sli_intf_sli_family, 6621 &phba->sli4_hba.sli_intf), 6622 bf_get(lpfc_sli_intf_slirev, 6623 &phba->sli4_hba.sli_intf), 6624 bf_get(lpfc_sli_intf_if_type, 6625 &phba->sli4_hba.sli_intf), 6626 bf_get(lpfc_sli_intf_sli_hint1, 6627 &phba->sli4_hba.sli_intf), 6628 bf_get(lpfc_sli_intf_sli_hint2, 6629 &phba->sli4_hba.sli_intf), 6630 bf_get(lpfc_sli_intf_func_type, 6631 &phba->sli4_hba.sli_intf)); 6632 /* 6633 * Check for other Port errors during the initialization 6634 * process. Fail the load if the port did not come up 6635 * correctly. 6636 */ 6637 if_type = bf_get(lpfc_sli_intf_if_type, 6638 &phba->sli4_hba.sli_intf); 6639 switch (if_type) { 6640 case LPFC_SLI_INTF_IF_TYPE_0: 6641 phba->sli4_hba.ue_mask_lo = 6642 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 6643 phba->sli4_hba.ue_mask_hi = 6644 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 6645 uerrlo_reg.word0 = 6646 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 6647 uerrhi_reg.word0 = 6648 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 6649 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 6650 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 6651 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6652 "1422 Unrecoverable Error " 6653 "Detected during POST " 6654 "uerr_lo_reg=0x%x, " 6655 "uerr_hi_reg=0x%x, " 6656 "ue_mask_lo_reg=0x%x, " 6657 "ue_mask_hi_reg=0x%x\n", 6658 uerrlo_reg.word0, 6659 uerrhi_reg.word0, 6660 phba->sli4_hba.ue_mask_lo, 6661 phba->sli4_hba.ue_mask_hi); 6662 port_error = -ENODEV; 6663 } 6664 break; 6665 case LPFC_SLI_INTF_IF_TYPE_2: 6666 /* Final checks. The port status should be clean. */ 6667 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 6668 ®_data.word0) || 6669 (bf_get(lpfc_sliport_status_err, ®_data) && 6670 !bf_get(lpfc_sliport_status_rn, ®_data))) { 6671 phba->work_status[0] = 6672 readl(phba->sli4_hba.u.if_type2. 6673 ERR1regaddr); 6674 phba->work_status[1] = 6675 readl(phba->sli4_hba.u.if_type2. 6676 ERR2regaddr); 6677 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6678 "2888 Unrecoverable port error " 6679 "following POST: port status reg " 6680 "0x%x, port_smphr reg 0x%x, " 6681 "error 1=0x%x, error 2=0x%x\n", 6682 reg_data.word0, 6683 portsmphr_reg.word0, 6684 phba->work_status[0], 6685 phba->work_status[1]); 6686 port_error = -ENODEV; 6687 } 6688 break; 6689 case LPFC_SLI_INTF_IF_TYPE_1: 6690 default: 6691 break; 6692 } 6693 } 6694 return port_error; 6695 } 6696 6697 /** 6698 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 6699 * @phba: pointer to lpfc hba data structure. 6700 * @if_type: The SLI4 interface type getting configured. 6701 * 6702 * This routine is invoked to set up SLI4 BAR0 PCI config space register 6703 * memory map. 6704 **/ 6705 static void 6706 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 6707 { 6708 switch (if_type) { 6709 case LPFC_SLI_INTF_IF_TYPE_0: 6710 phba->sli4_hba.u.if_type0.UERRLOregaddr = 6711 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 6712 phba->sli4_hba.u.if_type0.UERRHIregaddr = 6713 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 6714 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 6715 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 6716 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 6717 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 6718 phba->sli4_hba.SLIINTFregaddr = 6719 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 6720 break; 6721 case LPFC_SLI_INTF_IF_TYPE_2: 6722 phba->sli4_hba.u.if_type2.ERR1regaddr = 6723 phba->sli4_hba.conf_regs_memmap_p + 6724 LPFC_CTL_PORT_ER1_OFFSET; 6725 phba->sli4_hba.u.if_type2.ERR2regaddr = 6726 phba->sli4_hba.conf_regs_memmap_p + 6727 LPFC_CTL_PORT_ER2_OFFSET; 6728 phba->sli4_hba.u.if_type2.CTRLregaddr = 6729 phba->sli4_hba.conf_regs_memmap_p + 6730 LPFC_CTL_PORT_CTL_OFFSET; 6731 phba->sli4_hba.u.if_type2.STATUSregaddr = 6732 phba->sli4_hba.conf_regs_memmap_p + 6733 LPFC_CTL_PORT_STA_OFFSET; 6734 phba->sli4_hba.SLIINTFregaddr = 6735 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 6736 phba->sli4_hba.PSMPHRregaddr = 6737 phba->sli4_hba.conf_regs_memmap_p + 6738 LPFC_CTL_PORT_SEM_OFFSET; 6739 phba->sli4_hba.RQDBregaddr = 6740 phba->sli4_hba.conf_regs_memmap_p + 6741 LPFC_ULP0_RQ_DOORBELL; 6742 phba->sli4_hba.WQDBregaddr = 6743 phba->sli4_hba.conf_regs_memmap_p + 6744 LPFC_ULP0_WQ_DOORBELL; 6745 phba->sli4_hba.EQCQDBregaddr = 6746 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 6747 phba->sli4_hba.MQDBregaddr = 6748 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 6749 phba->sli4_hba.BMBXregaddr = 6750 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 6751 break; 6752 case LPFC_SLI_INTF_IF_TYPE_1: 6753 default: 6754 dev_printk(KERN_ERR, &phba->pcidev->dev, 6755 "FATAL - unsupported SLI4 interface type - %d\n", 6756 if_type); 6757 break; 6758 } 6759 } 6760 6761 /** 6762 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 6763 * @phba: pointer to lpfc hba data structure. 6764 * 6765 * This routine is invoked to set up SLI4 BAR1 control status register (CSR) 6766 * memory map. 6767 **/ 6768 static void 6769 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) 6770 { 6771 phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 6772 LPFC_SLIPORT_IF0_SMPHR; 6773 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 6774 LPFC_HST_ISR0; 6775 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 6776 LPFC_HST_IMR0; 6777 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 6778 LPFC_HST_ISCR0; 6779 } 6780 6781 /** 6782 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 6783 * @phba: pointer to lpfc hba data structure. 6784 * @vf: virtual function number 6785 * 6786 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 6787 * based on the given viftual function number, @vf. 6788 * 6789 * Return 0 if successful, otherwise -ENODEV. 6790 **/ 6791 static int 6792 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 6793 { 6794 if (vf > LPFC_VIR_FUNC_MAX) 6795 return -ENODEV; 6796 6797 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6798 vf * LPFC_VFR_PAGE_SIZE + 6799 LPFC_ULP0_RQ_DOORBELL); 6800 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6801 vf * LPFC_VFR_PAGE_SIZE + 6802 LPFC_ULP0_WQ_DOORBELL); 6803 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6804 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); 6805 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6806 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 6807 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6808 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 6809 return 0; 6810 } 6811 6812 /** 6813 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 6814 * @phba: pointer to lpfc hba data structure. 6815 * 6816 * This routine is invoked to create the bootstrap mailbox 6817 * region consistent with the SLI-4 interface spec. This 6818 * routine allocates all memory necessary to communicate 6819 * mailbox commands to the port and sets up all alignment 6820 * needs. No locks are expected to be held when calling 6821 * this routine. 6822 * 6823 * Return codes 6824 * 0 - successful 6825 * -ENOMEM - could not allocated memory. 6826 **/ 6827 static int 6828 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 6829 { 6830 uint32_t bmbx_size; 6831 struct lpfc_dmabuf *dmabuf; 6832 struct dma_address *dma_address; 6833 uint32_t pa_addr; 6834 uint64_t phys_addr; 6835 6836 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 6837 if (!dmabuf) 6838 return -ENOMEM; 6839 6840 /* 6841 * The bootstrap mailbox region is comprised of 2 parts 6842 * plus an alignment restriction of 16 bytes. 6843 */ 6844 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 6845 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size, 6846 &dmabuf->phys, GFP_KERNEL); 6847 if (!dmabuf->virt) { 6848 kfree(dmabuf); 6849 return -ENOMEM; 6850 } 6851 6852 /* 6853 * Initialize the bootstrap mailbox pointers now so that the register 6854 * operations are simple later. The mailbox dma address is required 6855 * to be 16-byte aligned. Also align the virtual memory as each 6856 * maibox is copied into the bmbx mailbox region before issuing the 6857 * command to the port. 6858 */ 6859 phba->sli4_hba.bmbx.dmabuf = dmabuf; 6860 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 6861 6862 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 6863 LPFC_ALIGN_16_BYTE); 6864 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 6865 LPFC_ALIGN_16_BYTE); 6866 6867 /* 6868 * Set the high and low physical addresses now. The SLI4 alignment 6869 * requirement is 16 bytes and the mailbox is posted to the port 6870 * as two 30-bit addresses. The other data is a bit marking whether 6871 * the 30-bit address is the high or low address. 6872 * Upcast bmbx aphys to 64bits so shift instruction compiles 6873 * clean on 32 bit machines. 6874 */ 6875 dma_address = &phba->sli4_hba.bmbx.dma_address; 6876 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 6877 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 6878 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 6879 LPFC_BMBX_BIT1_ADDR_HI); 6880 6881 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 6882 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 6883 LPFC_BMBX_BIT1_ADDR_LO); 6884 return 0; 6885 } 6886 6887 /** 6888 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 6889 * @phba: pointer to lpfc hba data structure. 6890 * 6891 * This routine is invoked to teardown the bootstrap mailbox 6892 * region and release all host resources. This routine requires 6893 * the caller to ensure all mailbox commands recovered, no 6894 * additional mailbox comands are sent, and interrupts are disabled 6895 * before calling this routine. 6896 * 6897 **/ 6898 static void 6899 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 6900 { 6901 dma_free_coherent(&phba->pcidev->dev, 6902 phba->sli4_hba.bmbx.bmbx_size, 6903 phba->sli4_hba.bmbx.dmabuf->virt, 6904 phba->sli4_hba.bmbx.dmabuf->phys); 6905 6906 kfree(phba->sli4_hba.bmbx.dmabuf); 6907 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 6908 } 6909 6910 /** 6911 * lpfc_sli4_read_config - Get the config parameters. 6912 * @phba: pointer to lpfc hba data structure. 6913 * 6914 * This routine is invoked to read the configuration parameters from the HBA. 6915 * The configuration parameters are used to set the base and maximum values 6916 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 6917 * allocation for the port. 6918 * 6919 * Return codes 6920 * 0 - successful 6921 * -ENOMEM - No available memory 6922 * -EIO - The mailbox failed to complete successfully. 6923 **/ 6924 int 6925 lpfc_sli4_read_config(struct lpfc_hba *phba) 6926 { 6927 LPFC_MBOXQ_t *pmb; 6928 struct lpfc_mbx_read_config *rd_config; 6929 union lpfc_sli4_cfg_shdr *shdr; 6930 uint32_t shdr_status, shdr_add_status; 6931 struct lpfc_mbx_get_func_cfg *get_func_cfg; 6932 struct lpfc_rsrc_desc_fcfcoe *desc; 6933 char *pdesc_0; 6934 uint16_t forced_link_speed; 6935 uint32_t if_type; 6936 int length, i, rc = 0, rc2; 6937 6938 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6939 if (!pmb) { 6940 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6941 "2011 Unable to allocate memory for issuing " 6942 "SLI_CONFIG_SPECIAL mailbox command\n"); 6943 return -ENOMEM; 6944 } 6945 6946 lpfc_read_config(phba, pmb); 6947 6948 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 6949 if (rc != MBX_SUCCESS) { 6950 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6951 "2012 Mailbox failed , mbxCmd x%x " 6952 "READ_CONFIG, mbxStatus x%x\n", 6953 bf_get(lpfc_mqe_command, &pmb->u.mqe), 6954 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 6955 rc = -EIO; 6956 } else { 6957 rd_config = &pmb->u.mqe.un.rd_config; 6958 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { 6959 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 6960 phba->sli4_hba.lnk_info.lnk_tp = 6961 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); 6962 phba->sli4_hba.lnk_info.lnk_no = 6963 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); 6964 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6965 "3081 lnk_type:%d, lnk_numb:%d\n", 6966 phba->sli4_hba.lnk_info.lnk_tp, 6967 phba->sli4_hba.lnk_info.lnk_no); 6968 } else 6969 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6970 "3082 Mailbox (x%x) returned ldv:x0\n", 6971 bf_get(lpfc_mqe_command, &pmb->u.mqe)); 6972 phba->sli4_hba.extents_in_use = 6973 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 6974 phba->sli4_hba.max_cfg_param.max_xri = 6975 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 6976 phba->sli4_hba.max_cfg_param.xri_base = 6977 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 6978 phba->sli4_hba.max_cfg_param.max_vpi = 6979 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 6980 phba->sli4_hba.max_cfg_param.vpi_base = 6981 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 6982 phba->sli4_hba.max_cfg_param.max_rpi = 6983 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 6984 phba->sli4_hba.max_cfg_param.rpi_base = 6985 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 6986 phba->sli4_hba.max_cfg_param.max_vfi = 6987 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 6988 phba->sli4_hba.max_cfg_param.vfi_base = 6989 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 6990 phba->sli4_hba.max_cfg_param.max_fcfi = 6991 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 6992 phba->sli4_hba.max_cfg_param.max_eq = 6993 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 6994 phba->sli4_hba.max_cfg_param.max_rq = 6995 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 6996 phba->sli4_hba.max_cfg_param.max_wq = 6997 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 6998 phba->sli4_hba.max_cfg_param.max_cq = 6999 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 7000 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 7001 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 7002 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 7003 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 7004 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 7005 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 7006 phba->max_vports = phba->max_vpi; 7007 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 7008 "2003 cfg params Extents? %d " 7009 "XRI(B:%d M:%d), " 7010 "VPI(B:%d M:%d) " 7011 "VFI(B:%d M:%d) " 7012 "RPI(B:%d M:%d) " 7013 "FCFI(Count:%d)\n", 7014 phba->sli4_hba.extents_in_use, 7015 phba->sli4_hba.max_cfg_param.xri_base, 7016 phba->sli4_hba.max_cfg_param.max_xri, 7017 phba->sli4_hba.max_cfg_param.vpi_base, 7018 phba->sli4_hba.max_cfg_param.max_vpi, 7019 phba->sli4_hba.max_cfg_param.vfi_base, 7020 phba->sli4_hba.max_cfg_param.max_vfi, 7021 phba->sli4_hba.max_cfg_param.rpi_base, 7022 phba->sli4_hba.max_cfg_param.max_rpi, 7023 phba->sli4_hba.max_cfg_param.max_fcfi); 7024 } 7025 7026 if (rc) 7027 goto read_cfg_out; 7028 7029 /* Update link speed if forced link speed is supported */ 7030 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 7031 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 7032 forced_link_speed = 7033 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config); 7034 if (forced_link_speed) { 7035 phba->hba_flag |= HBA_FORCED_LINK_SPEED; 7036 7037 switch (forced_link_speed) { 7038 case LINK_SPEED_1G: 7039 phba->cfg_link_speed = 7040 LPFC_USER_LINK_SPEED_1G; 7041 break; 7042 case LINK_SPEED_2G: 7043 phba->cfg_link_speed = 7044 LPFC_USER_LINK_SPEED_2G; 7045 break; 7046 case LINK_SPEED_4G: 7047 phba->cfg_link_speed = 7048 LPFC_USER_LINK_SPEED_4G; 7049 break; 7050 case LINK_SPEED_8G: 7051 phba->cfg_link_speed = 7052 LPFC_USER_LINK_SPEED_8G; 7053 break; 7054 case LINK_SPEED_10G: 7055 phba->cfg_link_speed = 7056 LPFC_USER_LINK_SPEED_10G; 7057 break; 7058 case LINK_SPEED_16G: 7059 phba->cfg_link_speed = 7060 LPFC_USER_LINK_SPEED_16G; 7061 break; 7062 case LINK_SPEED_32G: 7063 phba->cfg_link_speed = 7064 LPFC_USER_LINK_SPEED_32G; 7065 break; 7066 case 0xffff: 7067 phba->cfg_link_speed = 7068 LPFC_USER_LINK_SPEED_AUTO; 7069 break; 7070 default: 7071 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7072 "0047 Unrecognized link " 7073 "speed : %d\n", 7074 forced_link_speed); 7075 phba->cfg_link_speed = 7076 LPFC_USER_LINK_SPEED_AUTO; 7077 } 7078 } 7079 } 7080 7081 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 7082 length = phba->sli4_hba.max_cfg_param.max_xri - 7083 lpfc_sli4_get_els_iocb_cnt(phba); 7084 if (phba->cfg_hba_queue_depth > length) { 7085 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7086 "3361 HBA queue depth changed from %d to %d\n", 7087 phba->cfg_hba_queue_depth, length); 7088 phba->cfg_hba_queue_depth = length; 7089 } 7090 7091 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 7092 LPFC_SLI_INTF_IF_TYPE_2) 7093 goto read_cfg_out; 7094 7095 /* get the pf# and vf# for SLI4 if_type 2 port */ 7096 length = (sizeof(struct lpfc_mbx_get_func_cfg) - 7097 sizeof(struct lpfc_sli4_cfg_mhdr)); 7098 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, 7099 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 7100 length, LPFC_SLI4_MBX_EMBED); 7101 7102 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 7103 shdr = (union lpfc_sli4_cfg_shdr *) 7104 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 7105 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7106 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 7107 if (rc2 || shdr_status || shdr_add_status) { 7108 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7109 "3026 Mailbox failed , mbxCmd x%x " 7110 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 7111 bf_get(lpfc_mqe_command, &pmb->u.mqe), 7112 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 7113 goto read_cfg_out; 7114 } 7115 7116 /* search for fc_fcoe resrouce descriptor */ 7117 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 7118 7119 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; 7120 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; 7121 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc); 7122 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD) 7123 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH; 7124 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH) 7125 goto read_cfg_out; 7126 7127 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 7128 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i); 7129 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 7130 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) { 7131 phba->sli4_hba.iov.pf_number = 7132 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 7133 phba->sli4_hba.iov.vf_number = 7134 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); 7135 break; 7136 } 7137 } 7138 7139 if (i < LPFC_RSRC_DESC_MAX_NUM) 7140 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 7141 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 7142 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 7143 phba->sli4_hba.iov.vf_number); 7144 else 7145 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7146 "3028 GET_FUNCTION_CONFIG: failed to find " 7147 "Resrouce Descriptor:x%x\n", 7148 LPFC_RSRC_DESC_TYPE_FCFCOE); 7149 7150 read_cfg_out: 7151 mempool_free(pmb, phba->mbox_mem_pool); 7152 return rc; 7153 } 7154 7155 /** 7156 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 7157 * @phba: pointer to lpfc hba data structure. 7158 * 7159 * This routine is invoked to setup the port-side endian order when 7160 * the port if_type is 0. This routine has no function for other 7161 * if_types. 7162 * 7163 * Return codes 7164 * 0 - successful 7165 * -ENOMEM - No available memory 7166 * -EIO - The mailbox failed to complete successfully. 7167 **/ 7168 static int 7169 lpfc_setup_endian_order(struct lpfc_hba *phba) 7170 { 7171 LPFC_MBOXQ_t *mboxq; 7172 uint32_t if_type, rc = 0; 7173 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 7174 HOST_ENDIAN_HIGH_WORD1}; 7175 7176 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 7177 switch (if_type) { 7178 case LPFC_SLI_INTF_IF_TYPE_0: 7179 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 7180 GFP_KERNEL); 7181 if (!mboxq) { 7182 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7183 "0492 Unable to allocate memory for " 7184 "issuing SLI_CONFIG_SPECIAL mailbox " 7185 "command\n"); 7186 return -ENOMEM; 7187 } 7188 7189 /* 7190 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 7191 * two words to contain special data values and no other data. 7192 */ 7193 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 7194 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 7195 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7196 if (rc != MBX_SUCCESS) { 7197 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7198 "0493 SLI_CONFIG_SPECIAL mailbox " 7199 "failed with status x%x\n", 7200 rc); 7201 rc = -EIO; 7202 } 7203 mempool_free(mboxq, phba->mbox_mem_pool); 7204 break; 7205 case LPFC_SLI_INTF_IF_TYPE_2: 7206 case LPFC_SLI_INTF_IF_TYPE_1: 7207 default: 7208 break; 7209 } 7210 return rc; 7211 } 7212 7213 /** 7214 * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts 7215 * @phba: pointer to lpfc hba data structure. 7216 * 7217 * This routine is invoked to check the user settable queue counts for EQs and 7218 * CQs. after this routine is called the counts will be set to valid values that 7219 * adhere to the constraints of the system's interrupt vectors and the port's 7220 * queue resources. 7221 * 7222 * Return codes 7223 * 0 - successful 7224 * -ENOMEM - No available memory 7225 **/ 7226 static int 7227 lpfc_sli4_queue_verify(struct lpfc_hba *phba) 7228 { 7229 int cfg_fcp_io_channel; 7230 uint32_t cpu; 7231 uint32_t i = 0; 7232 int fof_vectors = phba->cfg_fof ? 1 : 0; 7233 7234 /* 7235 * Sanity check for configured queue parameters against the run-time 7236 * device parameters 7237 */ 7238 7239 /* Sanity check on HBA EQ parameters */ 7240 cfg_fcp_io_channel = phba->cfg_fcp_io_channel; 7241 7242 /* It doesn't make sense to have more io channels then online CPUs */ 7243 for_each_present_cpu(cpu) { 7244 if (cpu_online(cpu)) 7245 i++; 7246 } 7247 phba->sli4_hba.num_online_cpu = i; 7248 phba->sli4_hba.num_present_cpu = lpfc_present_cpu; 7249 phba->sli4_hba.curr_disp_cpu = 0; 7250 7251 if (i < cfg_fcp_io_channel) { 7252 lpfc_printf_log(phba, 7253 KERN_ERR, LOG_INIT, 7254 "3188 Reducing IO channels to match number of " 7255 "online CPUs: from %d to %d\n", 7256 cfg_fcp_io_channel, i); 7257 cfg_fcp_io_channel = i; 7258 } 7259 7260 if (cfg_fcp_io_channel + fof_vectors > 7261 phba->sli4_hba.max_cfg_param.max_eq) { 7262 if (phba->sli4_hba.max_cfg_param.max_eq < 7263 LPFC_FCP_IO_CHAN_MIN) { 7264 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7265 "2574 Not enough EQs (%d) from the " 7266 "pci function for supporting FCP " 7267 "EQs (%d)\n", 7268 phba->sli4_hba.max_cfg_param.max_eq, 7269 phba->cfg_fcp_io_channel); 7270 goto out_error; 7271 } 7272 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7273 "2575 Reducing IO channels to match number of " 7274 "available EQs: from %d to %d\n", 7275 cfg_fcp_io_channel, 7276 phba->sli4_hba.max_cfg_param.max_eq); 7277 cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq - 7278 fof_vectors; 7279 } 7280 7281 /* The actual number of FCP event queues adopted */ 7282 phba->cfg_fcp_io_channel = cfg_fcp_io_channel; 7283 7284 /* Get EQ depth from module parameter, fake the default for now */ 7285 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 7286 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 7287 7288 /* Get CQ depth from module parameter, fake the default for now */ 7289 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 7290 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 7291 7292 return 0; 7293 out_error: 7294 return -ENOMEM; 7295 } 7296 7297 /** 7298 * lpfc_sli4_queue_create - Create all the SLI4 queues 7299 * @phba: pointer to lpfc hba data structure. 7300 * 7301 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 7302 * operation. For each SLI4 queue type, the parameters such as queue entry 7303 * count (queue depth) shall be taken from the module parameter. For now, 7304 * we just use some constant number as place holder. 7305 * 7306 * Return codes 7307 * 0 - successful 7308 * -ENOMEM - No availble memory 7309 * -EIO - The mailbox failed to complete successfully. 7310 **/ 7311 int 7312 lpfc_sli4_queue_create(struct lpfc_hba *phba) 7313 { 7314 struct lpfc_queue *qdesc; 7315 uint32_t wqesize; 7316 int idx; 7317 7318 /* 7319 * Create HBA Record arrays. 7320 */ 7321 if (!phba->cfg_fcp_io_channel) 7322 return -ERANGE; 7323 7324 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 7325 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 7326 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 7327 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 7328 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 7329 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 7330 7331 phba->sli4_hba.hba_eq = kzalloc((sizeof(struct lpfc_queue *) * 7332 phba->cfg_fcp_io_channel), GFP_KERNEL); 7333 if (!phba->sli4_hba.hba_eq) { 7334 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7335 "2576 Failed allocate memory for " 7336 "fast-path EQ record array\n"); 7337 goto out_error; 7338 } 7339 7340 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 7341 phba->cfg_fcp_io_channel), GFP_KERNEL); 7342 if (!phba->sli4_hba.fcp_cq) { 7343 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7344 "2577 Failed allocate memory for fast-path " 7345 "CQ record array\n"); 7346 goto out_error; 7347 } 7348 7349 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * 7350 phba->cfg_fcp_io_channel), GFP_KERNEL); 7351 if (!phba->sli4_hba.fcp_wq) { 7352 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7353 "2578 Failed allocate memory for fast-path " 7354 "WQ record array\n"); 7355 goto out_error; 7356 } 7357 7358 /* 7359 * Since the first EQ can have multiple CQs associated with it, 7360 * this array is used to quickly see if we have a FCP fast-path 7361 * CQ match. 7362 */ 7363 phba->sli4_hba.fcp_cq_map = kzalloc((sizeof(uint16_t) * 7364 phba->cfg_fcp_io_channel), GFP_KERNEL); 7365 if (!phba->sli4_hba.fcp_cq_map) { 7366 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7367 "2545 Failed allocate memory for fast-path " 7368 "CQ map\n"); 7369 goto out_error; 7370 } 7371 7372 /* 7373 * Create HBA Event Queues (EQs). The cfg_fcp_io_channel specifies 7374 * how many EQs to create. 7375 */ 7376 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 7377 7378 /* Create EQs */ 7379 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 7380 phba->sli4_hba.eq_ecount); 7381 if (!qdesc) { 7382 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7383 "0497 Failed allocate EQ (%d)\n", idx); 7384 goto out_error; 7385 } 7386 phba->sli4_hba.hba_eq[idx] = qdesc; 7387 7388 /* Create Fast Path FCP CQs */ 7389 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 7390 phba->sli4_hba.cq_ecount); 7391 if (!qdesc) { 7392 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7393 "0499 Failed allocate fast-path FCP " 7394 "CQ (%d)\n", idx); 7395 goto out_error; 7396 } 7397 phba->sli4_hba.fcp_cq[idx] = qdesc; 7398 7399 /* Create Fast Path FCP WQs */ 7400 wqesize = (phba->fcp_embed_io) ? 7401 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; 7402 qdesc = lpfc_sli4_queue_alloc(phba, wqesize, 7403 phba->sli4_hba.wq_ecount); 7404 if (!qdesc) { 7405 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7406 "0503 Failed allocate fast-path FCP " 7407 "WQ (%d)\n", idx); 7408 goto out_error; 7409 } 7410 phba->sli4_hba.fcp_wq[idx] = qdesc; 7411 } 7412 7413 7414 /* 7415 * Create Slow Path Completion Queues (CQs) 7416 */ 7417 7418 /* Create slow-path Mailbox Command Complete Queue */ 7419 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 7420 phba->sli4_hba.cq_ecount); 7421 if (!qdesc) { 7422 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7423 "0500 Failed allocate slow-path mailbox CQ\n"); 7424 goto out_error; 7425 } 7426 phba->sli4_hba.mbx_cq = qdesc; 7427 7428 /* Create slow-path ELS Complete Queue */ 7429 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 7430 phba->sli4_hba.cq_ecount); 7431 if (!qdesc) { 7432 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7433 "0501 Failed allocate slow-path ELS CQ\n"); 7434 goto out_error; 7435 } 7436 phba->sli4_hba.els_cq = qdesc; 7437 7438 7439 /* 7440 * Create Slow Path Work Queues (WQs) 7441 */ 7442 7443 /* Create Mailbox Command Queue */ 7444 7445 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 7446 phba->sli4_hba.mq_ecount); 7447 if (!qdesc) { 7448 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7449 "0505 Failed allocate slow-path MQ\n"); 7450 goto out_error; 7451 } 7452 phba->sli4_hba.mbx_wq = qdesc; 7453 7454 /* 7455 * Create ELS Work Queues 7456 */ 7457 7458 /* Create slow-path ELS Work Queue */ 7459 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 7460 phba->sli4_hba.wq_ecount); 7461 if (!qdesc) { 7462 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7463 "0504 Failed allocate slow-path ELS WQ\n"); 7464 goto out_error; 7465 } 7466 phba->sli4_hba.els_wq = qdesc; 7467 7468 /* 7469 * Create Receive Queue (RQ) 7470 */ 7471 7472 /* Create Receive Queue for header */ 7473 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 7474 phba->sli4_hba.rq_ecount); 7475 if (!qdesc) { 7476 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7477 "0506 Failed allocate receive HRQ\n"); 7478 goto out_error; 7479 } 7480 phba->sli4_hba.hdr_rq = qdesc; 7481 7482 /* Create Receive Queue for data */ 7483 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 7484 phba->sli4_hba.rq_ecount); 7485 if (!qdesc) { 7486 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7487 "0507 Failed allocate receive DRQ\n"); 7488 goto out_error; 7489 } 7490 phba->sli4_hba.dat_rq = qdesc; 7491 7492 /* Create the Queues needed for Flash Optimized Fabric operations */ 7493 if (phba->cfg_fof) 7494 lpfc_fof_queue_create(phba); 7495 return 0; 7496 7497 out_error: 7498 lpfc_sli4_queue_destroy(phba); 7499 return -ENOMEM; 7500 } 7501 7502 /** 7503 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 7504 * @phba: pointer to lpfc hba data structure. 7505 * 7506 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 7507 * operation. 7508 * 7509 * Return codes 7510 * 0 - successful 7511 * -ENOMEM - No available memory 7512 * -EIO - The mailbox failed to complete successfully. 7513 **/ 7514 void 7515 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 7516 { 7517 int idx; 7518 7519 if (phba->cfg_fof) 7520 lpfc_fof_queue_destroy(phba); 7521 7522 if (phba->sli4_hba.hba_eq != NULL) { 7523 /* Release HBA event queue */ 7524 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 7525 if (phba->sli4_hba.hba_eq[idx] != NULL) { 7526 lpfc_sli4_queue_free( 7527 phba->sli4_hba.hba_eq[idx]); 7528 phba->sli4_hba.hba_eq[idx] = NULL; 7529 } 7530 } 7531 kfree(phba->sli4_hba.hba_eq); 7532 phba->sli4_hba.hba_eq = NULL; 7533 } 7534 7535 if (phba->sli4_hba.fcp_cq != NULL) { 7536 /* Release FCP completion queue */ 7537 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 7538 if (phba->sli4_hba.fcp_cq[idx] != NULL) { 7539 lpfc_sli4_queue_free( 7540 phba->sli4_hba.fcp_cq[idx]); 7541 phba->sli4_hba.fcp_cq[idx] = NULL; 7542 } 7543 } 7544 kfree(phba->sli4_hba.fcp_cq); 7545 phba->sli4_hba.fcp_cq = NULL; 7546 } 7547 7548 if (phba->sli4_hba.fcp_wq != NULL) { 7549 /* Release FCP work queue */ 7550 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 7551 if (phba->sli4_hba.fcp_wq[idx] != NULL) { 7552 lpfc_sli4_queue_free( 7553 phba->sli4_hba.fcp_wq[idx]); 7554 phba->sli4_hba.fcp_wq[idx] = NULL; 7555 } 7556 } 7557 kfree(phba->sli4_hba.fcp_wq); 7558 phba->sli4_hba.fcp_wq = NULL; 7559 } 7560 7561 /* Release FCP CQ mapping array */ 7562 if (phba->sli4_hba.fcp_cq_map != NULL) { 7563 kfree(phba->sli4_hba.fcp_cq_map); 7564 phba->sli4_hba.fcp_cq_map = NULL; 7565 } 7566 7567 /* Release mailbox command work queue */ 7568 if (phba->sli4_hba.mbx_wq != NULL) { 7569 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 7570 phba->sli4_hba.mbx_wq = NULL; 7571 } 7572 7573 /* Release ELS work queue */ 7574 if (phba->sli4_hba.els_wq != NULL) { 7575 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 7576 phba->sli4_hba.els_wq = NULL; 7577 } 7578 7579 /* Release unsolicited receive queue */ 7580 if (phba->sli4_hba.hdr_rq != NULL) { 7581 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 7582 phba->sli4_hba.hdr_rq = NULL; 7583 } 7584 if (phba->sli4_hba.dat_rq != NULL) { 7585 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 7586 phba->sli4_hba.dat_rq = NULL; 7587 } 7588 7589 /* Release ELS complete queue */ 7590 if (phba->sli4_hba.els_cq != NULL) { 7591 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 7592 phba->sli4_hba.els_cq = NULL; 7593 } 7594 7595 /* Release mailbox command complete queue */ 7596 if (phba->sli4_hba.mbx_cq != NULL) { 7597 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 7598 phba->sli4_hba.mbx_cq = NULL; 7599 } 7600 7601 return; 7602 } 7603 7604 /** 7605 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 7606 * @phba: pointer to lpfc hba data structure. 7607 * 7608 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 7609 * operation. 7610 * 7611 * Return codes 7612 * 0 - successful 7613 * -ENOMEM - No available memory 7614 * -EIO - The mailbox failed to complete successfully. 7615 **/ 7616 int 7617 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 7618 { 7619 struct lpfc_sli *psli = &phba->sli; 7620 struct lpfc_sli_ring *pring; 7621 int rc = -ENOMEM; 7622 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 7623 int fcp_cq_index = 0; 7624 uint32_t shdr_status, shdr_add_status; 7625 union lpfc_sli4_cfg_shdr *shdr; 7626 LPFC_MBOXQ_t *mboxq; 7627 uint32_t length; 7628 7629 /* Check for dual-ULP support */ 7630 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7631 if (!mboxq) { 7632 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7633 "3249 Unable to allocate memory for " 7634 "QUERY_FW_CFG mailbox command\n"); 7635 return -ENOMEM; 7636 } 7637 length = (sizeof(struct lpfc_mbx_query_fw_config) - 7638 sizeof(struct lpfc_sli4_cfg_mhdr)); 7639 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 7640 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 7641 length, LPFC_SLI4_MBX_EMBED); 7642 7643 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7644 7645 shdr = (union lpfc_sli4_cfg_shdr *) 7646 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 7647 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7648 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 7649 if (shdr_status || shdr_add_status || rc) { 7650 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7651 "3250 QUERY_FW_CFG mailbox failed with status " 7652 "x%x add_status x%x, mbx status x%x\n", 7653 shdr_status, shdr_add_status, rc); 7654 if (rc != MBX_TIMEOUT) 7655 mempool_free(mboxq, phba->mbox_mem_pool); 7656 rc = -ENXIO; 7657 goto out_error; 7658 } 7659 7660 phba->sli4_hba.fw_func_mode = 7661 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; 7662 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; 7663 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; 7664 phba->sli4_hba.physical_port = 7665 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port; 7666 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7667 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " 7668 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, 7669 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); 7670 7671 if (rc != MBX_TIMEOUT) 7672 mempool_free(mboxq, phba->mbox_mem_pool); 7673 7674 /* 7675 * Set up HBA Event Queues (EQs) 7676 */ 7677 7678 /* Set up HBA event queue */ 7679 if (phba->cfg_fcp_io_channel && !phba->sli4_hba.hba_eq) { 7680 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7681 "3147 Fast-path EQs not allocated\n"); 7682 rc = -ENOMEM; 7683 goto out_error; 7684 } 7685 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) { 7686 if (!phba->sli4_hba.hba_eq[fcp_eqidx]) { 7687 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7688 "0522 Fast-path EQ (%d) not " 7689 "allocated\n", fcp_eqidx); 7690 rc = -ENOMEM; 7691 goto out_destroy_hba_eq; 7692 } 7693 rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[fcp_eqidx], 7694 (phba->cfg_fcp_imax / phba->cfg_fcp_io_channel)); 7695 if (rc) { 7696 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7697 "0523 Failed setup of fast-path EQ " 7698 "(%d), rc = 0x%x\n", fcp_eqidx, 7699 (uint32_t)rc); 7700 goto out_destroy_hba_eq; 7701 } 7702 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7703 "2584 HBA EQ setup: " 7704 "queue[%d]-id=%d\n", fcp_eqidx, 7705 phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id); 7706 } 7707 7708 /* Set up fast-path FCP Response Complete Queue */ 7709 if (!phba->sli4_hba.fcp_cq) { 7710 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7711 "3148 Fast-path FCP CQ array not " 7712 "allocated\n"); 7713 rc = -ENOMEM; 7714 goto out_destroy_hba_eq; 7715 } 7716 7717 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) { 7718 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 7719 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7720 "0526 Fast-path FCP CQ (%d) not " 7721 "allocated\n", fcp_cqidx); 7722 rc = -ENOMEM; 7723 goto out_destroy_fcp_cq; 7724 } 7725 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], 7726 phba->sli4_hba.hba_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP); 7727 if (rc) { 7728 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7729 "0527 Failed setup of fast-path FCP " 7730 "CQ (%d), rc = 0x%x\n", fcp_cqidx, 7731 (uint32_t)rc); 7732 goto out_destroy_fcp_cq; 7733 } 7734 7735 /* Setup fcp_cq_map for fast lookup */ 7736 phba->sli4_hba.fcp_cq_map[fcp_cqidx] = 7737 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id; 7738 7739 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7740 "2588 FCP CQ setup: cq[%d]-id=%d, " 7741 "parent seq[%d]-id=%d\n", 7742 fcp_cqidx, 7743 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, 7744 fcp_cqidx, 7745 phba->sli4_hba.hba_eq[fcp_cqidx]->queue_id); 7746 } 7747 7748 /* Set up fast-path FCP Work Queue */ 7749 if (!phba->sli4_hba.fcp_wq) { 7750 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7751 "3149 Fast-path FCP WQ array not " 7752 "allocated\n"); 7753 rc = -ENOMEM; 7754 goto out_destroy_fcp_cq; 7755 } 7756 7757 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) { 7758 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { 7759 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7760 "0534 Fast-path FCP WQ (%d) not " 7761 "allocated\n", fcp_wqidx); 7762 rc = -ENOMEM; 7763 goto out_destroy_fcp_wq; 7764 } 7765 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], 7766 phba->sli4_hba.fcp_cq[fcp_wqidx], 7767 LPFC_FCP); 7768 if (rc) { 7769 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7770 "0535 Failed setup of fast-path FCP " 7771 "WQ (%d), rc = 0x%x\n", fcp_wqidx, 7772 (uint32_t)rc); 7773 goto out_destroy_fcp_wq; 7774 } 7775 7776 /* Bind this WQ to the next FCP ring */ 7777 pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx]; 7778 pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx]; 7779 phba->sli4_hba.fcp_cq[fcp_wqidx]->pring = pring; 7780 7781 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7782 "2591 FCP WQ setup: wq[%d]-id=%d, " 7783 "parent cq[%d]-id=%d\n", 7784 fcp_wqidx, 7785 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, 7786 fcp_cq_index, 7787 phba->sli4_hba.fcp_cq[fcp_wqidx]->queue_id); 7788 } 7789 /* 7790 * Set up Complete Queues (CQs) 7791 */ 7792 7793 /* Set up slow-path MBOX Complete Queue as the first CQ */ 7794 if (!phba->sli4_hba.mbx_cq) { 7795 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7796 "0528 Mailbox CQ not allocated\n"); 7797 rc = -ENOMEM; 7798 goto out_destroy_fcp_wq; 7799 } 7800 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, 7801 phba->sli4_hba.hba_eq[0], LPFC_MCQ, LPFC_MBOX); 7802 if (rc) { 7803 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7804 "0529 Failed setup of slow-path mailbox CQ: " 7805 "rc = 0x%x\n", (uint32_t)rc); 7806 goto out_destroy_fcp_wq; 7807 } 7808 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7809 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", 7810 phba->sli4_hba.mbx_cq->queue_id, 7811 phba->sli4_hba.hba_eq[0]->queue_id); 7812 7813 /* Set up slow-path ELS Complete Queue */ 7814 if (!phba->sli4_hba.els_cq) { 7815 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7816 "0530 ELS CQ not allocated\n"); 7817 rc = -ENOMEM; 7818 goto out_destroy_mbx_cq; 7819 } 7820 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, 7821 phba->sli4_hba.hba_eq[0], LPFC_WCQ, LPFC_ELS); 7822 if (rc) { 7823 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7824 "0531 Failed setup of slow-path ELS CQ: " 7825 "rc = 0x%x\n", (uint32_t)rc); 7826 goto out_destroy_mbx_cq; 7827 } 7828 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7829 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", 7830 phba->sli4_hba.els_cq->queue_id, 7831 phba->sli4_hba.hba_eq[0]->queue_id); 7832 7833 /* 7834 * Set up all the Work Queues (WQs) 7835 */ 7836 7837 /* Set up Mailbox Command Queue */ 7838 if (!phba->sli4_hba.mbx_wq) { 7839 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7840 "0538 Slow-path MQ not allocated\n"); 7841 rc = -ENOMEM; 7842 goto out_destroy_els_cq; 7843 } 7844 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, 7845 phba->sli4_hba.mbx_cq, LPFC_MBOX); 7846 if (rc) { 7847 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7848 "0539 Failed setup of slow-path MQ: " 7849 "rc = 0x%x\n", rc); 7850 goto out_destroy_els_cq; 7851 } 7852 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7853 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 7854 phba->sli4_hba.mbx_wq->queue_id, 7855 phba->sli4_hba.mbx_cq->queue_id); 7856 7857 /* Set up slow-path ELS Work Queue */ 7858 if (!phba->sli4_hba.els_wq) { 7859 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7860 "0536 Slow-path ELS WQ not allocated\n"); 7861 rc = -ENOMEM; 7862 goto out_destroy_mbx_wq; 7863 } 7864 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, 7865 phba->sli4_hba.els_cq, LPFC_ELS); 7866 if (rc) { 7867 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7868 "0537 Failed setup of slow-path ELS WQ: " 7869 "rc = 0x%x\n", (uint32_t)rc); 7870 goto out_destroy_mbx_wq; 7871 } 7872 7873 /* Bind this WQ to the ELS ring */ 7874 pring = &psli->ring[LPFC_ELS_RING]; 7875 pring->sli.sli4.wqp = (void *)phba->sli4_hba.els_wq; 7876 phba->sli4_hba.els_cq->pring = pring; 7877 7878 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7879 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 7880 phba->sli4_hba.els_wq->queue_id, 7881 phba->sli4_hba.els_cq->queue_id); 7882 7883 /* 7884 * Create Receive Queue (RQ) 7885 */ 7886 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 7887 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7888 "0540 Receive Queue not allocated\n"); 7889 rc = -ENOMEM; 7890 goto out_destroy_els_wq; 7891 } 7892 7893 lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ); 7894 lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ); 7895 7896 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 7897 phba->sli4_hba.els_cq, LPFC_USOL); 7898 if (rc) { 7899 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7900 "0541 Failed setup of Receive Queue: " 7901 "rc = 0x%x\n", (uint32_t)rc); 7902 goto out_destroy_fcp_wq; 7903 } 7904 7905 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7906 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 7907 "parent cq-id=%d\n", 7908 phba->sli4_hba.hdr_rq->queue_id, 7909 phba->sli4_hba.dat_rq->queue_id, 7910 phba->sli4_hba.els_cq->queue_id); 7911 7912 if (phba->cfg_fof) { 7913 rc = lpfc_fof_queue_setup(phba); 7914 if (rc) { 7915 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7916 "0549 Failed setup of FOF Queues: " 7917 "rc = 0x%x\n", rc); 7918 goto out_destroy_els_rq; 7919 } 7920 } 7921 7922 /* 7923 * Configure EQ delay multipier for interrupt coalescing using 7924 * MODIFY_EQ_DELAY for all EQs created, LPFC_MAX_EQ_DELAY at a time. 7925 */ 7926 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; 7927 fcp_eqidx += LPFC_MAX_EQ_DELAY) 7928 lpfc_modify_fcp_eq_delay(phba, fcp_eqidx); 7929 return 0; 7930 7931 out_destroy_els_rq: 7932 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 7933 out_destroy_els_wq: 7934 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 7935 out_destroy_mbx_wq: 7936 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 7937 out_destroy_els_cq: 7938 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 7939 out_destroy_mbx_cq: 7940 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 7941 out_destroy_fcp_wq: 7942 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) 7943 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); 7944 out_destroy_fcp_cq: 7945 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 7946 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 7947 out_destroy_hba_eq: 7948 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 7949 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_eqidx]); 7950 out_error: 7951 return rc; 7952 } 7953 7954 /** 7955 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 7956 * @phba: pointer to lpfc hba data structure. 7957 * 7958 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 7959 * operation. 7960 * 7961 * Return codes 7962 * 0 - successful 7963 * -ENOMEM - No available memory 7964 * -EIO - The mailbox failed to complete successfully. 7965 **/ 7966 void 7967 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 7968 { 7969 int fcp_qidx; 7970 7971 /* Unset the queues created for Flash Optimized Fabric operations */ 7972 if (phba->cfg_fof) 7973 lpfc_fof_queue_destroy(phba); 7974 /* Unset mailbox command work queue */ 7975 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 7976 /* Unset ELS work queue */ 7977 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 7978 /* Unset unsolicited receive queue */ 7979 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 7980 /* Unset FCP work queue */ 7981 if (phba->sli4_hba.fcp_wq) { 7982 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; 7983 fcp_qidx++) 7984 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); 7985 } 7986 /* Unset mailbox command complete queue */ 7987 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 7988 /* Unset ELS complete queue */ 7989 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 7990 /* Unset FCP response complete queue */ 7991 if (phba->sli4_hba.fcp_cq) { 7992 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; 7993 fcp_qidx++) 7994 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 7995 } 7996 /* Unset fast-path event queue */ 7997 if (phba->sli4_hba.hba_eq) { 7998 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; 7999 fcp_qidx++) 8000 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_qidx]); 8001 } 8002 } 8003 8004 /** 8005 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 8006 * @phba: pointer to lpfc hba data structure. 8007 * 8008 * This routine is invoked to allocate and set up a pool of completion queue 8009 * events. The body of the completion queue event is a completion queue entry 8010 * CQE. For now, this pool is used for the interrupt service routine to queue 8011 * the following HBA completion queue events for the worker thread to process: 8012 * - Mailbox asynchronous events 8013 * - Receive queue completion unsolicited events 8014 * Later, this can be used for all the slow-path events. 8015 * 8016 * Return codes 8017 * 0 - successful 8018 * -ENOMEM - No available memory 8019 **/ 8020 static int 8021 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 8022 { 8023 struct lpfc_cq_event *cq_event; 8024 int i; 8025 8026 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 8027 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 8028 if (!cq_event) 8029 goto out_pool_create_fail; 8030 list_add_tail(&cq_event->list, 8031 &phba->sli4_hba.sp_cqe_event_pool); 8032 } 8033 return 0; 8034 8035 out_pool_create_fail: 8036 lpfc_sli4_cq_event_pool_destroy(phba); 8037 return -ENOMEM; 8038 } 8039 8040 /** 8041 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 8042 * @phba: pointer to lpfc hba data structure. 8043 * 8044 * This routine is invoked to free the pool of completion queue events at 8045 * driver unload time. Note that, it is the responsibility of the driver 8046 * cleanup routine to free all the outstanding completion-queue events 8047 * allocated from this pool back into the pool before invoking this routine 8048 * to destroy the pool. 8049 **/ 8050 static void 8051 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 8052 { 8053 struct lpfc_cq_event *cq_event, *next_cq_event; 8054 8055 list_for_each_entry_safe(cq_event, next_cq_event, 8056 &phba->sli4_hba.sp_cqe_event_pool, list) { 8057 list_del(&cq_event->list); 8058 kfree(cq_event); 8059 } 8060 } 8061 8062 /** 8063 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 8064 * @phba: pointer to lpfc hba data structure. 8065 * 8066 * This routine is the lock free version of the API invoked to allocate a 8067 * completion-queue event from the free pool. 8068 * 8069 * Return: Pointer to the newly allocated completion-queue event if successful 8070 * NULL otherwise. 8071 **/ 8072 struct lpfc_cq_event * 8073 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 8074 { 8075 struct lpfc_cq_event *cq_event = NULL; 8076 8077 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 8078 struct lpfc_cq_event, list); 8079 return cq_event; 8080 } 8081 8082 /** 8083 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 8084 * @phba: pointer to lpfc hba data structure. 8085 * 8086 * This routine is the lock version of the API invoked to allocate a 8087 * completion-queue event from the free pool. 8088 * 8089 * Return: Pointer to the newly allocated completion-queue event if successful 8090 * NULL otherwise. 8091 **/ 8092 struct lpfc_cq_event * 8093 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 8094 { 8095 struct lpfc_cq_event *cq_event; 8096 unsigned long iflags; 8097 8098 spin_lock_irqsave(&phba->hbalock, iflags); 8099 cq_event = __lpfc_sli4_cq_event_alloc(phba); 8100 spin_unlock_irqrestore(&phba->hbalock, iflags); 8101 return cq_event; 8102 } 8103 8104 /** 8105 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 8106 * @phba: pointer to lpfc hba data structure. 8107 * @cq_event: pointer to the completion queue event to be freed. 8108 * 8109 * This routine is the lock free version of the API invoked to release a 8110 * completion-queue event back into the free pool. 8111 **/ 8112 void 8113 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 8114 struct lpfc_cq_event *cq_event) 8115 { 8116 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 8117 } 8118 8119 /** 8120 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 8121 * @phba: pointer to lpfc hba data structure. 8122 * @cq_event: pointer to the completion queue event to be freed. 8123 * 8124 * This routine is the lock version of the API invoked to release a 8125 * completion-queue event back into the free pool. 8126 **/ 8127 void 8128 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 8129 struct lpfc_cq_event *cq_event) 8130 { 8131 unsigned long iflags; 8132 spin_lock_irqsave(&phba->hbalock, iflags); 8133 __lpfc_sli4_cq_event_release(phba, cq_event); 8134 spin_unlock_irqrestore(&phba->hbalock, iflags); 8135 } 8136 8137 /** 8138 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 8139 * @phba: pointer to lpfc hba data structure. 8140 * 8141 * This routine is to free all the pending completion-queue events to the 8142 * back into the free pool for device reset. 8143 **/ 8144 static void 8145 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 8146 { 8147 LIST_HEAD(cqelist); 8148 struct lpfc_cq_event *cqe; 8149 unsigned long iflags; 8150 8151 /* Retrieve all the pending WCQEs from pending WCQE lists */ 8152 spin_lock_irqsave(&phba->hbalock, iflags); 8153 /* Pending FCP XRI abort events */ 8154 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 8155 &cqelist); 8156 /* Pending ELS XRI abort events */ 8157 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 8158 &cqelist); 8159 /* Pending asynnc events */ 8160 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 8161 &cqelist); 8162 spin_unlock_irqrestore(&phba->hbalock, iflags); 8163 8164 while (!list_empty(&cqelist)) { 8165 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 8166 lpfc_sli4_cq_event_release(phba, cqe); 8167 } 8168 } 8169 8170 /** 8171 * lpfc_pci_function_reset - Reset pci function. 8172 * @phba: pointer to lpfc hba data structure. 8173 * 8174 * This routine is invoked to request a PCI function reset. It will destroys 8175 * all resources assigned to the PCI function which originates this request. 8176 * 8177 * Return codes 8178 * 0 - successful 8179 * -ENOMEM - No available memory 8180 * -EIO - The mailbox failed to complete successfully. 8181 **/ 8182 int 8183 lpfc_pci_function_reset(struct lpfc_hba *phba) 8184 { 8185 LPFC_MBOXQ_t *mboxq; 8186 uint32_t rc = 0, if_type; 8187 uint32_t shdr_status, shdr_add_status; 8188 uint32_t rdy_chk; 8189 uint32_t port_reset = 0; 8190 union lpfc_sli4_cfg_shdr *shdr; 8191 struct lpfc_register reg_data; 8192 uint16_t devid; 8193 8194 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8195 switch (if_type) { 8196 case LPFC_SLI_INTF_IF_TYPE_0: 8197 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 8198 GFP_KERNEL); 8199 if (!mboxq) { 8200 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8201 "0494 Unable to allocate memory for " 8202 "issuing SLI_FUNCTION_RESET mailbox " 8203 "command\n"); 8204 return -ENOMEM; 8205 } 8206 8207 /* Setup PCI function reset mailbox-ioctl command */ 8208 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 8209 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 8210 LPFC_SLI4_MBX_EMBED); 8211 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8212 shdr = (union lpfc_sli4_cfg_shdr *) 8213 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 8214 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 8215 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 8216 &shdr->response); 8217 if (rc != MBX_TIMEOUT) 8218 mempool_free(mboxq, phba->mbox_mem_pool); 8219 if (shdr_status || shdr_add_status || rc) { 8220 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8221 "0495 SLI_FUNCTION_RESET mailbox " 8222 "failed with status x%x add_status x%x," 8223 " mbx status x%x\n", 8224 shdr_status, shdr_add_status, rc); 8225 rc = -ENXIO; 8226 } 8227 break; 8228 case LPFC_SLI_INTF_IF_TYPE_2: 8229 wait: 8230 /* 8231 * Poll the Port Status Register and wait for RDY for 8232 * up to 30 seconds. If the port doesn't respond, treat 8233 * it as an error. 8234 */ 8235 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) { 8236 if (lpfc_readl(phba->sli4_hba.u.if_type2. 8237 STATUSregaddr, ®_data.word0)) { 8238 rc = -ENODEV; 8239 goto out; 8240 } 8241 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 8242 break; 8243 msleep(20); 8244 } 8245 8246 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) { 8247 phba->work_status[0] = readl( 8248 phba->sli4_hba.u.if_type2.ERR1regaddr); 8249 phba->work_status[1] = readl( 8250 phba->sli4_hba.u.if_type2.ERR2regaddr); 8251 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8252 "2890 Port not ready, port status reg " 8253 "0x%x error 1=0x%x, error 2=0x%x\n", 8254 reg_data.word0, 8255 phba->work_status[0], 8256 phba->work_status[1]); 8257 rc = -ENODEV; 8258 goto out; 8259 } 8260 8261 if (!port_reset) { 8262 /* 8263 * Reset the port now 8264 */ 8265 reg_data.word0 = 0; 8266 bf_set(lpfc_sliport_ctrl_end, ®_data, 8267 LPFC_SLIPORT_LITTLE_ENDIAN); 8268 bf_set(lpfc_sliport_ctrl_ip, ®_data, 8269 LPFC_SLIPORT_INIT_PORT); 8270 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 8271 CTRLregaddr); 8272 /* flush */ 8273 pci_read_config_word(phba->pcidev, 8274 PCI_DEVICE_ID, &devid); 8275 8276 port_reset = 1; 8277 msleep(20); 8278 goto wait; 8279 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) { 8280 rc = -ENODEV; 8281 goto out; 8282 } 8283 break; 8284 8285 case LPFC_SLI_INTF_IF_TYPE_1: 8286 default: 8287 break; 8288 } 8289 8290 out: 8291 /* Catch the not-ready port failure after a port reset. */ 8292 if (rc) { 8293 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8294 "3317 HBA not functional: IP Reset Failed " 8295 "try: echo fw_reset > board_mode\n"); 8296 rc = -ENODEV; 8297 } 8298 8299 return rc; 8300 } 8301 8302 /** 8303 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 8304 * @phba: pointer to lpfc hba data structure. 8305 * 8306 * This routine is invoked to set up the PCI device memory space for device 8307 * with SLI-4 interface spec. 8308 * 8309 * Return codes 8310 * 0 - successful 8311 * other values - error 8312 **/ 8313 static int 8314 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 8315 { 8316 struct pci_dev *pdev; 8317 unsigned long bar0map_len, bar1map_len, bar2map_len; 8318 int error = -ENODEV; 8319 uint32_t if_type; 8320 8321 /* Obtain PCI device reference */ 8322 if (!phba->pcidev) 8323 return error; 8324 else 8325 pdev = phba->pcidev; 8326 8327 /* Set the device DMA mask size */ 8328 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 8329 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 8330 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 8331 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 8332 return error; 8333 } 8334 } 8335 8336 /* 8337 * The BARs and register set definitions and offset locations are 8338 * dependent on the if_type. 8339 */ 8340 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 8341 &phba->sli4_hba.sli_intf.word0)) { 8342 return error; 8343 } 8344 8345 /* There is no SLI3 failback for SLI4 devices. */ 8346 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 8347 LPFC_SLI_INTF_VALID) { 8348 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8349 "2894 SLI_INTF reg contents invalid " 8350 "sli_intf reg 0x%x\n", 8351 phba->sli4_hba.sli_intf.word0); 8352 return error; 8353 } 8354 8355 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8356 /* 8357 * Get the bus address of SLI4 device Bar regions and the 8358 * number of bytes required by each mapping. The mapping of the 8359 * particular PCI BARs regions is dependent on the type of 8360 * SLI4 device. 8361 */ 8362 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { 8363 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); 8364 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); 8365 8366 /* 8367 * Map SLI4 PCI Config Space Register base to a kernel virtual 8368 * addr 8369 */ 8370 phba->sli4_hba.conf_regs_memmap_p = 8371 ioremap(phba->pci_bar0_map, bar0map_len); 8372 if (!phba->sli4_hba.conf_regs_memmap_p) { 8373 dev_printk(KERN_ERR, &pdev->dev, 8374 "ioremap failed for SLI4 PCI config " 8375 "registers.\n"); 8376 goto out; 8377 } 8378 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; 8379 /* Set up BAR0 PCI config space register memory map */ 8380 lpfc_sli4_bar0_register_memmap(phba, if_type); 8381 } else { 8382 phba->pci_bar0_map = pci_resource_start(pdev, 1); 8383 bar0map_len = pci_resource_len(pdev, 1); 8384 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 8385 dev_printk(KERN_ERR, &pdev->dev, 8386 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 8387 goto out; 8388 } 8389 phba->sli4_hba.conf_regs_memmap_p = 8390 ioremap(phba->pci_bar0_map, bar0map_len); 8391 if (!phba->sli4_hba.conf_regs_memmap_p) { 8392 dev_printk(KERN_ERR, &pdev->dev, 8393 "ioremap failed for SLI4 PCI config " 8394 "registers.\n"); 8395 goto out; 8396 } 8397 lpfc_sli4_bar0_register_memmap(phba, if_type); 8398 } 8399 8400 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && 8401 (pci_resource_start(pdev, PCI_64BIT_BAR2))) { 8402 /* 8403 * Map SLI4 if type 0 HBA Control Register base to a kernel 8404 * virtual address and setup the registers. 8405 */ 8406 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); 8407 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 8408 phba->sli4_hba.ctrl_regs_memmap_p = 8409 ioremap(phba->pci_bar1_map, bar1map_len); 8410 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 8411 dev_printk(KERN_ERR, &pdev->dev, 8412 "ioremap failed for SLI4 HBA control registers.\n"); 8413 goto out_iounmap_conf; 8414 } 8415 phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p; 8416 lpfc_sli4_bar1_register_memmap(phba); 8417 } 8418 8419 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && 8420 (pci_resource_start(pdev, PCI_64BIT_BAR4))) { 8421 /* 8422 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel 8423 * virtual address and setup the registers. 8424 */ 8425 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); 8426 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 8427 phba->sli4_hba.drbl_regs_memmap_p = 8428 ioremap(phba->pci_bar2_map, bar2map_len); 8429 if (!phba->sli4_hba.drbl_regs_memmap_p) { 8430 dev_printk(KERN_ERR, &pdev->dev, 8431 "ioremap failed for SLI4 HBA doorbell registers.\n"); 8432 goto out_iounmap_ctrl; 8433 } 8434 phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; 8435 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 8436 if (error) 8437 goto out_iounmap_all; 8438 } 8439 8440 return 0; 8441 8442 out_iounmap_all: 8443 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 8444 out_iounmap_ctrl: 8445 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 8446 out_iounmap_conf: 8447 iounmap(phba->sli4_hba.conf_regs_memmap_p); 8448 out: 8449 return error; 8450 } 8451 8452 /** 8453 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 8454 * @phba: pointer to lpfc hba data structure. 8455 * 8456 * This routine is invoked to unset the PCI device memory space for device 8457 * with SLI-4 interface spec. 8458 **/ 8459 static void 8460 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 8461 { 8462 uint32_t if_type; 8463 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8464 8465 switch (if_type) { 8466 case LPFC_SLI_INTF_IF_TYPE_0: 8467 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 8468 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 8469 iounmap(phba->sli4_hba.conf_regs_memmap_p); 8470 break; 8471 case LPFC_SLI_INTF_IF_TYPE_2: 8472 iounmap(phba->sli4_hba.conf_regs_memmap_p); 8473 break; 8474 case LPFC_SLI_INTF_IF_TYPE_1: 8475 default: 8476 dev_printk(KERN_ERR, &phba->pcidev->dev, 8477 "FATAL - unsupported SLI4 interface type - %d\n", 8478 if_type); 8479 break; 8480 } 8481 } 8482 8483 /** 8484 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 8485 * @phba: pointer to lpfc hba data structure. 8486 * 8487 * This routine is invoked to enable the MSI-X interrupt vectors to device 8488 * with SLI-3 interface specs. The kernel function pci_enable_msix_exact() 8489 * is called to enable the MSI-X vectors. Note that pci_enable_msix_exact(), 8490 * once invoked, enables either all or nothing, depending on the current 8491 * availability of PCI vector resources. The device driver is responsible 8492 * for calling the individual request_irq() to register each MSI-X vector 8493 * with a interrupt handler, which is done in this function. Note that 8494 * later when device is unloading, the driver should always call free_irq() 8495 * on all MSI-X vectors it has done request_irq() on before calling 8496 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 8497 * will be left with MSI-X enabled and leaks its vectors. 8498 * 8499 * Return codes 8500 * 0 - successful 8501 * other values - error 8502 **/ 8503 static int 8504 lpfc_sli_enable_msix(struct lpfc_hba *phba) 8505 { 8506 int rc, i; 8507 LPFC_MBOXQ_t *pmb; 8508 8509 /* Set up MSI-X multi-message vectors */ 8510 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 8511 phba->msix_entries[i].entry = i; 8512 8513 /* Configure MSI-X capability structure */ 8514 rc = pci_enable_msix_exact(phba->pcidev, phba->msix_entries, 8515 LPFC_MSIX_VECTORS); 8516 if (rc) { 8517 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8518 "0420 PCI enable MSI-X failed (%d)\n", rc); 8519 goto vec_fail_out; 8520 } 8521 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 8522 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8523 "0477 MSI-X entry[%d]: vector=x%x " 8524 "message=%d\n", i, 8525 phba->msix_entries[i].vector, 8526 phba->msix_entries[i].entry); 8527 /* 8528 * Assign MSI-X vectors to interrupt handlers 8529 */ 8530 8531 /* vector-0 is associated to slow-path handler */ 8532 rc = request_irq(phba->msix_entries[0].vector, 8533 &lpfc_sli_sp_intr_handler, 0, 8534 LPFC_SP_DRIVER_HANDLER_NAME, phba); 8535 if (rc) { 8536 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8537 "0421 MSI-X slow-path request_irq failed " 8538 "(%d)\n", rc); 8539 goto msi_fail_out; 8540 } 8541 8542 /* vector-1 is associated to fast-path handler */ 8543 rc = request_irq(phba->msix_entries[1].vector, 8544 &lpfc_sli_fp_intr_handler, 0, 8545 LPFC_FP_DRIVER_HANDLER_NAME, phba); 8546 8547 if (rc) { 8548 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8549 "0429 MSI-X fast-path request_irq failed " 8550 "(%d)\n", rc); 8551 goto irq_fail_out; 8552 } 8553 8554 /* 8555 * Configure HBA MSI-X attention conditions to messages 8556 */ 8557 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8558 8559 if (!pmb) { 8560 rc = -ENOMEM; 8561 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8562 "0474 Unable to allocate memory for issuing " 8563 "MBOX_CONFIG_MSI command\n"); 8564 goto mem_fail_out; 8565 } 8566 rc = lpfc_config_msi(phba, pmb); 8567 if (rc) 8568 goto mbx_fail_out; 8569 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 8570 if (rc != MBX_SUCCESS) { 8571 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 8572 "0351 Config MSI mailbox command failed, " 8573 "mbxCmd x%x, mbxStatus x%x\n", 8574 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 8575 goto mbx_fail_out; 8576 } 8577 8578 /* Free memory allocated for mailbox command */ 8579 mempool_free(pmb, phba->mbox_mem_pool); 8580 return rc; 8581 8582 mbx_fail_out: 8583 /* Free memory allocated for mailbox command */ 8584 mempool_free(pmb, phba->mbox_mem_pool); 8585 8586 mem_fail_out: 8587 /* free the irq already requested */ 8588 free_irq(phba->msix_entries[1].vector, phba); 8589 8590 irq_fail_out: 8591 /* free the irq already requested */ 8592 free_irq(phba->msix_entries[0].vector, phba); 8593 8594 msi_fail_out: 8595 /* Unconfigure MSI-X capability structure */ 8596 pci_disable_msix(phba->pcidev); 8597 8598 vec_fail_out: 8599 return rc; 8600 } 8601 8602 /** 8603 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. 8604 * @phba: pointer to lpfc hba data structure. 8605 * 8606 * This routine is invoked to release the MSI-X vectors and then disable the 8607 * MSI-X interrupt mode to device with SLI-3 interface spec. 8608 **/ 8609 static void 8610 lpfc_sli_disable_msix(struct lpfc_hba *phba) 8611 { 8612 int i; 8613 8614 /* Free up MSI-X multi-message vectors */ 8615 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 8616 free_irq(phba->msix_entries[i].vector, phba); 8617 /* Disable MSI-X */ 8618 pci_disable_msix(phba->pcidev); 8619 8620 return; 8621 } 8622 8623 /** 8624 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 8625 * @phba: pointer to lpfc hba data structure. 8626 * 8627 * This routine is invoked to enable the MSI interrupt mode to device with 8628 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 8629 * enable the MSI vector. The device driver is responsible for calling the 8630 * request_irq() to register MSI vector with a interrupt the handler, which 8631 * is done in this function. 8632 * 8633 * Return codes 8634 * 0 - successful 8635 * other values - error 8636 */ 8637 static int 8638 lpfc_sli_enable_msi(struct lpfc_hba *phba) 8639 { 8640 int rc; 8641 8642 rc = pci_enable_msi(phba->pcidev); 8643 if (!rc) 8644 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8645 "0462 PCI enable MSI mode success.\n"); 8646 else { 8647 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8648 "0471 PCI enable MSI mode failed (%d)\n", rc); 8649 return rc; 8650 } 8651 8652 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 8653 0, LPFC_DRIVER_NAME, phba); 8654 if (rc) { 8655 pci_disable_msi(phba->pcidev); 8656 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8657 "0478 MSI request_irq failed (%d)\n", rc); 8658 } 8659 return rc; 8660 } 8661 8662 /** 8663 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. 8664 * @phba: pointer to lpfc hba data structure. 8665 * 8666 * This routine is invoked to disable the MSI interrupt mode to device with 8667 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has 8668 * done request_irq() on before calling pci_disable_msi(). Failure to do so 8669 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 8670 * its vector. 8671 */ 8672 static void 8673 lpfc_sli_disable_msi(struct lpfc_hba *phba) 8674 { 8675 free_irq(phba->pcidev->irq, phba); 8676 pci_disable_msi(phba->pcidev); 8677 return; 8678 } 8679 8680 /** 8681 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 8682 * @phba: pointer to lpfc hba data structure. 8683 * 8684 * This routine is invoked to enable device interrupt and associate driver's 8685 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 8686 * spec. Depends on the interrupt mode configured to the driver, the driver 8687 * will try to fallback from the configured interrupt mode to an interrupt 8688 * mode which is supported by the platform, kernel, and device in the order 8689 * of: 8690 * MSI-X -> MSI -> IRQ. 8691 * 8692 * Return codes 8693 * 0 - successful 8694 * other values - error 8695 **/ 8696 static uint32_t 8697 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 8698 { 8699 uint32_t intr_mode = LPFC_INTR_ERROR; 8700 int retval; 8701 8702 if (cfg_mode == 2) { 8703 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 8704 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 8705 if (!retval) { 8706 /* Now, try to enable MSI-X interrupt mode */ 8707 retval = lpfc_sli_enable_msix(phba); 8708 if (!retval) { 8709 /* Indicate initialization to MSI-X mode */ 8710 phba->intr_type = MSIX; 8711 intr_mode = 2; 8712 } 8713 } 8714 } 8715 8716 /* Fallback to MSI if MSI-X initialization failed */ 8717 if (cfg_mode >= 1 && phba->intr_type == NONE) { 8718 retval = lpfc_sli_enable_msi(phba); 8719 if (!retval) { 8720 /* Indicate initialization to MSI mode */ 8721 phba->intr_type = MSI; 8722 intr_mode = 1; 8723 } 8724 } 8725 8726 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 8727 if (phba->intr_type == NONE) { 8728 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 8729 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 8730 if (!retval) { 8731 /* Indicate initialization to INTx mode */ 8732 phba->intr_type = INTx; 8733 intr_mode = 0; 8734 } 8735 } 8736 return intr_mode; 8737 } 8738 8739 /** 8740 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 8741 * @phba: pointer to lpfc hba data structure. 8742 * 8743 * This routine is invoked to disable device interrupt and disassociate the 8744 * driver's interrupt handler(s) from interrupt vector(s) to device with 8745 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 8746 * release the interrupt vector(s) for the message signaled interrupt. 8747 **/ 8748 static void 8749 lpfc_sli_disable_intr(struct lpfc_hba *phba) 8750 { 8751 /* Disable the currently initialized interrupt mode */ 8752 if (phba->intr_type == MSIX) 8753 lpfc_sli_disable_msix(phba); 8754 else if (phba->intr_type == MSI) 8755 lpfc_sli_disable_msi(phba); 8756 else if (phba->intr_type == INTx) 8757 free_irq(phba->pcidev->irq, phba); 8758 8759 /* Reset interrupt management states */ 8760 phba->intr_type = NONE; 8761 phba->sli.slistat.sli_intr = 0; 8762 8763 return; 8764 } 8765 8766 /** 8767 * lpfc_find_next_cpu - Find next available CPU that matches the phys_id 8768 * @phba: pointer to lpfc hba data structure. 8769 * 8770 * Find next available CPU to use for IRQ to CPU affinity. 8771 */ 8772 static int 8773 lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id) 8774 { 8775 struct lpfc_vector_map_info *cpup; 8776 int cpu; 8777 8778 cpup = phba->sli4_hba.cpu_map; 8779 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { 8780 /* CPU must be online */ 8781 if (cpu_online(cpu)) { 8782 if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) && 8783 (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) && 8784 (cpup->phys_id == phys_id)) { 8785 return cpu; 8786 } 8787 } 8788 cpup++; 8789 } 8790 8791 /* 8792 * If we get here, we have used ALL CPUs for the specific 8793 * phys_id. Now we need to clear out lpfc_used_cpu and start 8794 * reusing CPUs. 8795 */ 8796 8797 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { 8798 if (lpfc_used_cpu[cpu] == phys_id) 8799 lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY; 8800 } 8801 8802 cpup = phba->sli4_hba.cpu_map; 8803 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { 8804 /* CPU must be online */ 8805 if (cpu_online(cpu)) { 8806 if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) && 8807 (cpup->phys_id == phys_id)) { 8808 return cpu; 8809 } 8810 } 8811 cpup++; 8812 } 8813 return LPFC_VECTOR_MAP_EMPTY; 8814 } 8815 8816 /** 8817 * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors 8818 * @phba: pointer to lpfc hba data structure. 8819 * @vectors: number of HBA vectors 8820 * 8821 * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector 8822 * affinization across multple physical CPUs (numa nodes). 8823 * In addition, this routine will assign an IO channel for each CPU 8824 * to use when issuing I/Os. 8825 */ 8826 static int 8827 lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) 8828 { 8829 int i, idx, saved_chann, used_chann, cpu, phys_id; 8830 int max_phys_id, min_phys_id; 8831 int num_io_channel, first_cpu, chan; 8832 struct lpfc_vector_map_info *cpup; 8833 #ifdef CONFIG_X86 8834 struct cpuinfo_x86 *cpuinfo; 8835 #endif 8836 uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1]; 8837 8838 /* If there is no mapping, just return */ 8839 if (!phba->cfg_fcp_cpu_map) 8840 return 1; 8841 8842 /* Init cpu_map array */ 8843 memset(phba->sli4_hba.cpu_map, 0xff, 8844 (sizeof(struct lpfc_vector_map_info) * 8845 phba->sli4_hba.num_present_cpu)); 8846 8847 max_phys_id = 0; 8848 min_phys_id = 0xff; 8849 phys_id = 0; 8850 num_io_channel = 0; 8851 first_cpu = LPFC_VECTOR_MAP_EMPTY; 8852 8853 /* Update CPU map with physical id and core id of each CPU */ 8854 cpup = phba->sli4_hba.cpu_map; 8855 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { 8856 #ifdef CONFIG_X86 8857 cpuinfo = &cpu_data(cpu); 8858 cpup->phys_id = cpuinfo->phys_proc_id; 8859 cpup->core_id = cpuinfo->cpu_core_id; 8860 #else 8861 /* No distinction between CPUs for other platforms */ 8862 cpup->phys_id = 0; 8863 cpup->core_id = 0; 8864 #endif 8865 8866 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8867 "3328 CPU physid %d coreid %d\n", 8868 cpup->phys_id, cpup->core_id); 8869 8870 if (cpup->phys_id > max_phys_id) 8871 max_phys_id = cpup->phys_id; 8872 if (cpup->phys_id < min_phys_id) 8873 min_phys_id = cpup->phys_id; 8874 cpup++; 8875 } 8876 8877 phys_id = min_phys_id; 8878 /* Now associate the HBA vectors with specific CPUs */ 8879 for (idx = 0; idx < vectors; idx++) { 8880 cpup = phba->sli4_hba.cpu_map; 8881 cpu = lpfc_find_next_cpu(phba, phys_id); 8882 if (cpu == LPFC_VECTOR_MAP_EMPTY) { 8883 8884 /* Try for all phys_id's */ 8885 for (i = 1; i < max_phys_id; i++) { 8886 phys_id++; 8887 if (phys_id > max_phys_id) 8888 phys_id = min_phys_id; 8889 cpu = lpfc_find_next_cpu(phba, phys_id); 8890 if (cpu == LPFC_VECTOR_MAP_EMPTY) 8891 continue; 8892 goto found; 8893 } 8894 8895 /* Use round robin for scheduling */ 8896 phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN; 8897 chan = 0; 8898 cpup = phba->sli4_hba.cpu_map; 8899 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 8900 cpup->channel_id = chan; 8901 cpup++; 8902 chan++; 8903 if (chan >= phba->cfg_fcp_io_channel) 8904 chan = 0; 8905 } 8906 8907 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8908 "3329 Cannot set affinity:" 8909 "Error mapping vector %d (%d)\n", 8910 idx, vectors); 8911 return 0; 8912 } 8913 found: 8914 cpup += cpu; 8915 if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP) 8916 lpfc_used_cpu[cpu] = phys_id; 8917 8918 /* Associate vector with selected CPU */ 8919 cpup->irq = phba->sli4_hba.msix_entries[idx].vector; 8920 8921 /* Associate IO channel with selected CPU */ 8922 cpup->channel_id = idx; 8923 num_io_channel++; 8924 8925 if (first_cpu == LPFC_VECTOR_MAP_EMPTY) 8926 first_cpu = cpu; 8927 8928 /* Now affinitize to the selected CPU */ 8929 i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx]. 8930 vector, get_cpu_mask(cpu)); 8931 8932 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8933 "3330 Set Affinity: CPU %d channel %d " 8934 "irq %d (%x)\n", 8935 cpu, cpup->channel_id, 8936 phba->sli4_hba.msix_entries[idx].vector, i); 8937 8938 /* Spread vector mapping across multple physical CPU nodes */ 8939 phys_id++; 8940 if (phys_id > max_phys_id) 8941 phys_id = min_phys_id; 8942 } 8943 8944 /* 8945 * Finally fill in the IO channel for any remaining CPUs. 8946 * At this point, all IO channels have been assigned to a specific 8947 * MSIx vector, mapped to a specific CPU. 8948 * Base the remaining IO channel assigned, to IO channels already 8949 * assigned to other CPUs on the same phys_id. 8950 */ 8951 for (i = min_phys_id; i <= max_phys_id; i++) { 8952 /* 8953 * If there are no io channels already mapped to 8954 * this phys_id, just round robin thru the io_channels. 8955 * Setup chann[] for round robin. 8956 */ 8957 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) 8958 chann[idx] = idx; 8959 8960 saved_chann = 0; 8961 used_chann = 0; 8962 8963 /* 8964 * First build a list of IO channels already assigned 8965 * to this phys_id before reassigning the same IO 8966 * channels to the remaining CPUs. 8967 */ 8968 cpup = phba->sli4_hba.cpu_map; 8969 cpu = first_cpu; 8970 cpup += cpu; 8971 for (idx = 0; idx < phba->sli4_hba.num_present_cpu; 8972 idx++) { 8973 if (cpup->phys_id == i) { 8974 /* 8975 * Save any IO channels that are 8976 * already mapped to this phys_id. 8977 */ 8978 if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) { 8979 if (saved_chann <= 8980 LPFC_FCP_IO_CHAN_MAX) { 8981 chann[saved_chann] = 8982 cpup->channel_id; 8983 saved_chann++; 8984 } 8985 goto out; 8986 } 8987 8988 /* See if we are using round-robin */ 8989 if (saved_chann == 0) 8990 saved_chann = 8991 phba->cfg_fcp_io_channel; 8992 8993 /* Associate next IO channel with CPU */ 8994 cpup->channel_id = chann[used_chann]; 8995 num_io_channel++; 8996 used_chann++; 8997 if (used_chann == saved_chann) 8998 used_chann = 0; 8999 9000 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9001 "3331 Set IO_CHANN " 9002 "CPU %d channel %d\n", 9003 idx, cpup->channel_id); 9004 } 9005 out: 9006 cpu++; 9007 if (cpu >= phba->sli4_hba.num_present_cpu) { 9008 cpup = phba->sli4_hba.cpu_map; 9009 cpu = 0; 9010 } else { 9011 cpup++; 9012 } 9013 } 9014 } 9015 9016 if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) { 9017 cpup = phba->sli4_hba.cpu_map; 9018 for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) { 9019 if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) { 9020 cpup->channel_id = 0; 9021 num_io_channel++; 9022 9023 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9024 "3332 Assign IO_CHANN " 9025 "CPU %d channel %d\n", 9026 idx, cpup->channel_id); 9027 } 9028 cpup++; 9029 } 9030 } 9031 9032 /* Sanity check */ 9033 if (num_io_channel != phba->sli4_hba.num_present_cpu) 9034 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9035 "3333 Set affinity mismatch:" 9036 "%d chann != %d cpus: %d vectors\n", 9037 num_io_channel, phba->sli4_hba.num_present_cpu, 9038 vectors); 9039 9040 /* Enable using cpu affinity for scheduling */ 9041 phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU; 9042 return 1; 9043 } 9044 9045 9046 /** 9047 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 9048 * @phba: pointer to lpfc hba data structure. 9049 * 9050 * This routine is invoked to enable the MSI-X interrupt vectors to device 9051 * with SLI-4 interface spec. The kernel function pci_enable_msix_range() 9052 * is called to enable the MSI-X vectors. The device driver is responsible 9053 * for calling the individual request_irq() to register each MSI-X vector 9054 * with a interrupt handler, which is done in this function. Note that 9055 * later when device is unloading, the driver should always call free_irq() 9056 * on all MSI-X vectors it has done request_irq() on before calling 9057 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 9058 * will be left with MSI-X enabled and leaks its vectors. 9059 * 9060 * Return codes 9061 * 0 - successful 9062 * other values - error 9063 **/ 9064 static int 9065 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 9066 { 9067 int vectors, rc, index; 9068 9069 /* Set up MSI-X multi-message vectors */ 9070 for (index = 0; index < phba->cfg_fcp_io_channel; index++) 9071 phba->sli4_hba.msix_entries[index].entry = index; 9072 9073 /* Configure MSI-X capability structure */ 9074 vectors = phba->cfg_fcp_io_channel; 9075 if (phba->cfg_fof) { 9076 phba->sli4_hba.msix_entries[index].entry = index; 9077 vectors++; 9078 } 9079 rc = pci_enable_msix_range(phba->pcidev, phba->sli4_hba.msix_entries, 9080 2, vectors); 9081 if (rc < 0) { 9082 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9083 "0484 PCI enable MSI-X failed (%d)\n", rc); 9084 goto vec_fail_out; 9085 } 9086 vectors = rc; 9087 9088 /* Log MSI-X vector assignment */ 9089 for (index = 0; index < vectors; index++) 9090 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9091 "0489 MSI-X entry[%d]: vector=x%x " 9092 "message=%d\n", index, 9093 phba->sli4_hba.msix_entries[index].vector, 9094 phba->sli4_hba.msix_entries[index].entry); 9095 9096 /* Assign MSI-X vectors to interrupt handlers */ 9097 for (index = 0; index < vectors; index++) { 9098 memset(&phba->sli4_hba.handler_name[index], 0, 16); 9099 snprintf((char *)&phba->sli4_hba.handler_name[index], 9100 LPFC_SLI4_HANDLER_NAME_SZ, 9101 LPFC_DRIVER_HANDLER_NAME"%d", index); 9102 9103 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 9104 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 9105 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1); 9106 if (phba->cfg_fof && (index == (vectors - 1))) 9107 rc = request_irq( 9108 phba->sli4_hba.msix_entries[index].vector, 9109 &lpfc_sli4_fof_intr_handler, 0, 9110 (char *)&phba->sli4_hba.handler_name[index], 9111 &phba->sli4_hba.fcp_eq_hdl[index]); 9112 else 9113 rc = request_irq( 9114 phba->sli4_hba.msix_entries[index].vector, 9115 &lpfc_sli4_hba_intr_handler, 0, 9116 (char *)&phba->sli4_hba.handler_name[index], 9117 &phba->sli4_hba.fcp_eq_hdl[index]); 9118 if (rc) { 9119 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 9120 "0486 MSI-X fast-path (%d) " 9121 "request_irq failed (%d)\n", index, rc); 9122 goto cfg_fail_out; 9123 } 9124 } 9125 9126 if (phba->cfg_fof) 9127 vectors--; 9128 9129 if (vectors != phba->cfg_fcp_io_channel) { 9130 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9131 "3238 Reducing IO channels to match number of " 9132 "MSI-X vectors, requested %d got %d\n", 9133 phba->cfg_fcp_io_channel, vectors); 9134 phba->cfg_fcp_io_channel = vectors; 9135 } 9136 9137 if (!shost_use_blk_mq(lpfc_shost_from_vport(phba->pport))) 9138 lpfc_sli4_set_affinity(phba, vectors); 9139 return rc; 9140 9141 cfg_fail_out: 9142 /* free the irq already requested */ 9143 for (--index; index >= 0; index--) { 9144 irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. 9145 vector, NULL); 9146 free_irq(phba->sli4_hba.msix_entries[index].vector, 9147 &phba->sli4_hba.fcp_eq_hdl[index]); 9148 } 9149 9150 /* Unconfigure MSI-X capability structure */ 9151 pci_disable_msix(phba->pcidev); 9152 9153 vec_fail_out: 9154 return rc; 9155 } 9156 9157 /** 9158 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device 9159 * @phba: pointer to lpfc hba data structure. 9160 * 9161 * This routine is invoked to release the MSI-X vectors and then disable the 9162 * MSI-X interrupt mode to device with SLI-4 interface spec. 9163 **/ 9164 static void 9165 lpfc_sli4_disable_msix(struct lpfc_hba *phba) 9166 { 9167 int index; 9168 9169 /* Free up MSI-X multi-message vectors */ 9170 for (index = 0; index < phba->cfg_fcp_io_channel; index++) { 9171 irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. 9172 vector, NULL); 9173 free_irq(phba->sli4_hba.msix_entries[index].vector, 9174 &phba->sli4_hba.fcp_eq_hdl[index]); 9175 } 9176 if (phba->cfg_fof) { 9177 free_irq(phba->sli4_hba.msix_entries[index].vector, 9178 &phba->sli4_hba.fcp_eq_hdl[index]); 9179 } 9180 /* Disable MSI-X */ 9181 pci_disable_msix(phba->pcidev); 9182 9183 return; 9184 } 9185 9186 /** 9187 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 9188 * @phba: pointer to lpfc hba data structure. 9189 * 9190 * This routine is invoked to enable the MSI interrupt mode to device with 9191 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 9192 * to enable the MSI vector. The device driver is responsible for calling 9193 * the request_irq() to register MSI vector with a interrupt the handler, 9194 * which is done in this function. 9195 * 9196 * Return codes 9197 * 0 - successful 9198 * other values - error 9199 **/ 9200 static int 9201 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 9202 { 9203 int rc, index; 9204 9205 rc = pci_enable_msi(phba->pcidev); 9206 if (!rc) 9207 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9208 "0487 PCI enable MSI mode success.\n"); 9209 else { 9210 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9211 "0488 PCI enable MSI mode failed (%d)\n", rc); 9212 return rc; 9213 } 9214 9215 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 9216 0, LPFC_DRIVER_NAME, phba); 9217 if (rc) { 9218 pci_disable_msi(phba->pcidev); 9219 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 9220 "0490 MSI request_irq failed (%d)\n", rc); 9221 return rc; 9222 } 9223 9224 for (index = 0; index < phba->cfg_fcp_io_channel; index++) { 9225 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 9226 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 9227 } 9228 9229 if (phba->cfg_fof) { 9230 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 9231 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 9232 } 9233 return 0; 9234 } 9235 9236 /** 9237 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device 9238 * @phba: pointer to lpfc hba data structure. 9239 * 9240 * This routine is invoked to disable the MSI interrupt mode to device with 9241 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has 9242 * done request_irq() on before calling pci_disable_msi(). Failure to do so 9243 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 9244 * its vector. 9245 **/ 9246 static void 9247 lpfc_sli4_disable_msi(struct lpfc_hba *phba) 9248 { 9249 free_irq(phba->pcidev->irq, phba); 9250 pci_disable_msi(phba->pcidev); 9251 return; 9252 } 9253 9254 /** 9255 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 9256 * @phba: pointer to lpfc hba data structure. 9257 * 9258 * This routine is invoked to enable device interrupt and associate driver's 9259 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 9260 * interface spec. Depends on the interrupt mode configured to the driver, 9261 * the driver will try to fallback from the configured interrupt mode to an 9262 * interrupt mode which is supported by the platform, kernel, and device in 9263 * the order of: 9264 * MSI-X -> MSI -> IRQ. 9265 * 9266 * Return codes 9267 * 0 - successful 9268 * other values - error 9269 **/ 9270 static uint32_t 9271 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 9272 { 9273 uint32_t intr_mode = LPFC_INTR_ERROR; 9274 int retval, index; 9275 9276 if (cfg_mode == 2) { 9277 /* Preparation before conf_msi mbox cmd */ 9278 retval = 0; 9279 if (!retval) { 9280 /* Now, try to enable MSI-X interrupt mode */ 9281 retval = lpfc_sli4_enable_msix(phba); 9282 if (!retval) { 9283 /* Indicate initialization to MSI-X mode */ 9284 phba->intr_type = MSIX; 9285 intr_mode = 2; 9286 } 9287 } 9288 } 9289 9290 /* Fallback to MSI if MSI-X initialization failed */ 9291 if (cfg_mode >= 1 && phba->intr_type == NONE) { 9292 retval = lpfc_sli4_enable_msi(phba); 9293 if (!retval) { 9294 /* Indicate initialization to MSI mode */ 9295 phba->intr_type = MSI; 9296 intr_mode = 1; 9297 } 9298 } 9299 9300 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 9301 if (phba->intr_type == NONE) { 9302 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 9303 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 9304 if (!retval) { 9305 /* Indicate initialization to INTx mode */ 9306 phba->intr_type = INTx; 9307 intr_mode = 0; 9308 for (index = 0; index < phba->cfg_fcp_io_channel; 9309 index++) { 9310 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 9311 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 9312 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index]. 9313 fcp_eq_in_use, 1); 9314 } 9315 if (phba->cfg_fof) { 9316 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 9317 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 9318 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index]. 9319 fcp_eq_in_use, 1); 9320 } 9321 } 9322 } 9323 return intr_mode; 9324 } 9325 9326 /** 9327 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 9328 * @phba: pointer to lpfc hba data structure. 9329 * 9330 * This routine is invoked to disable device interrupt and disassociate 9331 * the driver's interrupt handler(s) from interrupt vector(s) to device 9332 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 9333 * will release the interrupt vector(s) for the message signaled interrupt. 9334 **/ 9335 static void 9336 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 9337 { 9338 /* Disable the currently initialized interrupt mode */ 9339 if (phba->intr_type == MSIX) 9340 lpfc_sli4_disable_msix(phba); 9341 else if (phba->intr_type == MSI) 9342 lpfc_sli4_disable_msi(phba); 9343 else if (phba->intr_type == INTx) 9344 free_irq(phba->pcidev->irq, phba); 9345 9346 /* Reset interrupt management states */ 9347 phba->intr_type = NONE; 9348 phba->sli.slistat.sli_intr = 0; 9349 9350 return; 9351 } 9352 9353 /** 9354 * lpfc_unset_hba - Unset SLI3 hba device initialization 9355 * @phba: pointer to lpfc hba data structure. 9356 * 9357 * This routine is invoked to unset the HBA device initialization steps to 9358 * a device with SLI-3 interface spec. 9359 **/ 9360 static void 9361 lpfc_unset_hba(struct lpfc_hba *phba) 9362 { 9363 struct lpfc_vport *vport = phba->pport; 9364 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9365 9366 spin_lock_irq(shost->host_lock); 9367 vport->load_flag |= FC_UNLOADING; 9368 spin_unlock_irq(shost->host_lock); 9369 9370 kfree(phba->vpi_bmask); 9371 kfree(phba->vpi_ids); 9372 9373 lpfc_stop_hba_timers(phba); 9374 9375 phba->pport->work_port_events = 0; 9376 9377 lpfc_sli_hba_down(phba); 9378 9379 lpfc_sli_brdrestart(phba); 9380 9381 lpfc_sli_disable_intr(phba); 9382 9383 return; 9384 } 9385 9386 /** 9387 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 9388 * @phba: Pointer to HBA context object. 9389 * 9390 * This function is called in the SLI4 code path to wait for completion 9391 * of device's XRIs exchange busy. It will check the XRI exchange busy 9392 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 9393 * that, it will check the XRI exchange busy on outstanding FCP and ELS 9394 * I/Os every 30 seconds, log error message, and wait forever. Only when 9395 * all XRI exchange busy complete, the driver unload shall proceed with 9396 * invoking the function reset ioctl mailbox command to the CNA and the 9397 * the rest of the driver unload resource release. 9398 **/ 9399 static void 9400 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 9401 { 9402 int wait_time = 0; 9403 int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 9404 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 9405 9406 while (!fcp_xri_cmpl || !els_xri_cmpl) { 9407 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 9408 if (!fcp_xri_cmpl) 9409 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9410 "2877 FCP XRI exchange busy " 9411 "wait time: %d seconds.\n", 9412 wait_time/1000); 9413 if (!els_xri_cmpl) 9414 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9415 "2878 ELS XRI exchange busy " 9416 "wait time: %d seconds.\n", 9417 wait_time/1000); 9418 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 9419 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 9420 } else { 9421 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 9422 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 9423 } 9424 fcp_xri_cmpl = 9425 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 9426 els_xri_cmpl = 9427 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 9428 } 9429 } 9430 9431 /** 9432 * lpfc_sli4_hba_unset - Unset the fcoe hba 9433 * @phba: Pointer to HBA context object. 9434 * 9435 * This function is called in the SLI4 code path to reset the HBA's FCoE 9436 * function. The caller is not required to hold any lock. This routine 9437 * issues PCI function reset mailbox command to reset the FCoE function. 9438 * At the end of the function, it calls lpfc_hba_down_post function to 9439 * free any pending commands. 9440 **/ 9441 static void 9442 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 9443 { 9444 int wait_cnt = 0; 9445 LPFC_MBOXQ_t *mboxq; 9446 struct pci_dev *pdev = phba->pcidev; 9447 9448 lpfc_stop_hba_timers(phba); 9449 phba->sli4_hba.intr_enable = 0; 9450 9451 /* 9452 * Gracefully wait out the potential current outstanding asynchronous 9453 * mailbox command. 9454 */ 9455 9456 /* First, block any pending async mailbox command from posted */ 9457 spin_lock_irq(&phba->hbalock); 9458 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 9459 spin_unlock_irq(&phba->hbalock); 9460 /* Now, trying to wait it out if we can */ 9461 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 9462 msleep(10); 9463 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 9464 break; 9465 } 9466 /* Forcefully release the outstanding mailbox command if timed out */ 9467 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 9468 spin_lock_irq(&phba->hbalock); 9469 mboxq = phba->sli.mbox_active; 9470 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 9471 __lpfc_mbox_cmpl_put(phba, mboxq); 9472 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 9473 phba->sli.mbox_active = NULL; 9474 spin_unlock_irq(&phba->hbalock); 9475 } 9476 9477 /* Abort all iocbs associated with the hba */ 9478 lpfc_sli_hba_iocb_abort(phba); 9479 9480 /* Wait for completion of device XRI exchange busy */ 9481 lpfc_sli4_xri_exchange_busy_wait(phba); 9482 9483 /* Disable PCI subsystem interrupt */ 9484 lpfc_sli4_disable_intr(phba); 9485 9486 /* Disable SR-IOV if enabled */ 9487 if (phba->cfg_sriov_nr_virtfn) 9488 pci_disable_sriov(pdev); 9489 9490 /* Stop kthread signal shall trigger work_done one more time */ 9491 kthread_stop(phba->worker_thread); 9492 9493 /* Reset SLI4 HBA FCoE function */ 9494 lpfc_pci_function_reset(phba); 9495 lpfc_sli4_queue_destroy(phba); 9496 9497 /* Stop the SLI4 device port */ 9498 phba->pport->work_port_events = 0; 9499 } 9500 9501 /** 9502 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 9503 * @phba: Pointer to HBA context object. 9504 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 9505 * 9506 * This function is called in the SLI4 code path to read the port's 9507 * sli4 capabilities. 9508 * 9509 * This function may be be called from any context that can block-wait 9510 * for the completion. The expectation is that this routine is called 9511 * typically from probe_one or from the online routine. 9512 **/ 9513 int 9514 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 9515 { 9516 int rc; 9517 struct lpfc_mqe *mqe; 9518 struct lpfc_pc_sli4_params *sli4_params; 9519 uint32_t mbox_tmo; 9520 9521 rc = 0; 9522 mqe = &mboxq->u.mqe; 9523 9524 /* Read the port's SLI4 Parameters port capabilities */ 9525 lpfc_pc_sli4_params(mboxq); 9526 if (!phba->sli4_hba.intr_enable) 9527 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9528 else { 9529 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 9530 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 9531 } 9532 9533 if (unlikely(rc)) 9534 return 1; 9535 9536 sli4_params = &phba->sli4_hba.pc_sli4_params; 9537 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 9538 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 9539 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 9540 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 9541 &mqe->un.sli4_params); 9542 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 9543 &mqe->un.sli4_params); 9544 sli4_params->proto_types = mqe->un.sli4_params.word3; 9545 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 9546 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 9547 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 9548 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 9549 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 9550 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 9551 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 9552 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 9553 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 9554 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 9555 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 9556 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 9557 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 9558 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 9559 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 9560 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 9561 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 9562 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 9563 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 9564 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 9565 9566 /* Make sure that sge_supp_len can be handled by the driver */ 9567 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 9568 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 9569 9570 return rc; 9571 } 9572 9573 /** 9574 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 9575 * @phba: Pointer to HBA context object. 9576 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 9577 * 9578 * This function is called in the SLI4 code path to read the port's 9579 * sli4 capabilities. 9580 * 9581 * This function may be be called from any context that can block-wait 9582 * for the completion. The expectation is that this routine is called 9583 * typically from probe_one or from the online routine. 9584 **/ 9585 int 9586 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 9587 { 9588 int rc; 9589 struct lpfc_mqe *mqe = &mboxq->u.mqe; 9590 struct lpfc_pc_sli4_params *sli4_params; 9591 uint32_t mbox_tmo; 9592 int length; 9593 struct lpfc_sli4_parameters *mbx_sli4_parameters; 9594 9595 /* 9596 * By default, the driver assumes the SLI4 port requires RPI 9597 * header postings. The SLI4_PARAM response will correct this 9598 * assumption. 9599 */ 9600 phba->sli4_hba.rpi_hdrs_in_use = 1; 9601 9602 /* Read the port's SLI4 Config Parameters */ 9603 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 9604 sizeof(struct lpfc_sli4_cfg_mhdr)); 9605 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 9606 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 9607 length, LPFC_SLI4_MBX_EMBED); 9608 if (!phba->sli4_hba.intr_enable) 9609 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9610 else { 9611 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 9612 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 9613 } 9614 if (unlikely(rc)) 9615 return rc; 9616 sli4_params = &phba->sli4_hba.pc_sli4_params; 9617 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 9618 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 9619 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 9620 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 9621 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 9622 mbx_sli4_parameters); 9623 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 9624 mbx_sli4_parameters); 9625 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 9626 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 9627 else 9628 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 9629 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 9630 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); 9631 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); 9632 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 9633 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 9634 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 9635 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 9636 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); 9637 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 9638 mbx_sli4_parameters); 9639 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 9640 mbx_sli4_parameters); 9641 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); 9642 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); 9643 9644 /* Make sure that sge_supp_len can be handled by the driver */ 9645 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 9646 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 9647 9648 /* 9649 * Issue IOs with CDB embedded in WQE to minimized the number 9650 * of DMAs the firmware has to do. Setting this to 1 also forces 9651 * the driver to use 128 bytes WQEs for FCP IOs. 9652 */ 9653 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters)) 9654 phba->fcp_embed_io = 1; 9655 else 9656 phba->fcp_embed_io = 0; 9657 9658 /* 9659 * Check if the SLI port supports MDS Diagnostics 9660 */ 9661 if (bf_get(cfg_mds_diags, mbx_sli4_parameters)) 9662 phba->mds_diags_support = 1; 9663 else 9664 phba->mds_diags_support = 0; 9665 return 0; 9666 } 9667 9668 /** 9669 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 9670 * @pdev: pointer to PCI device 9671 * @pid: pointer to PCI device identifier 9672 * 9673 * This routine is to be called to attach a device with SLI-3 interface spec 9674 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 9675 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 9676 * information of the device and driver to see if the driver state that it can 9677 * support this kind of device. If the match is successful, the driver core 9678 * invokes this routine. If this routine determines it can claim the HBA, it 9679 * does all the initialization that it needs to do to handle the HBA properly. 9680 * 9681 * Return code 9682 * 0 - driver can claim the device 9683 * negative value - driver can not claim the device 9684 **/ 9685 static int 9686 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 9687 { 9688 struct lpfc_hba *phba; 9689 struct lpfc_vport *vport = NULL; 9690 struct Scsi_Host *shost = NULL; 9691 int error; 9692 uint32_t cfg_mode, intr_mode; 9693 9694 /* Allocate memory for HBA structure */ 9695 phba = lpfc_hba_alloc(pdev); 9696 if (!phba) 9697 return -ENOMEM; 9698 9699 /* Perform generic PCI device enabling operation */ 9700 error = lpfc_enable_pci_dev(phba); 9701 if (error) 9702 goto out_free_phba; 9703 9704 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 9705 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 9706 if (error) 9707 goto out_disable_pci_dev; 9708 9709 /* Set up SLI-3 specific device PCI memory space */ 9710 error = lpfc_sli_pci_mem_setup(phba); 9711 if (error) { 9712 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9713 "1402 Failed to set up pci memory space.\n"); 9714 goto out_disable_pci_dev; 9715 } 9716 9717 /* Set up phase-1 common device driver resources */ 9718 error = lpfc_setup_driver_resource_phase1(phba); 9719 if (error) { 9720 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9721 "1403 Failed to set up driver resource.\n"); 9722 goto out_unset_pci_mem_s3; 9723 } 9724 9725 /* Set up SLI-3 specific device driver resources */ 9726 error = lpfc_sli_driver_resource_setup(phba); 9727 if (error) { 9728 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9729 "1404 Failed to set up driver resource.\n"); 9730 goto out_unset_pci_mem_s3; 9731 } 9732 9733 /* Initialize and populate the iocb list per host */ 9734 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 9735 if (error) { 9736 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9737 "1405 Failed to initialize iocb list.\n"); 9738 goto out_unset_driver_resource_s3; 9739 } 9740 9741 /* Set up common device driver resources */ 9742 error = lpfc_setup_driver_resource_phase2(phba); 9743 if (error) { 9744 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9745 "1406 Failed to set up driver resource.\n"); 9746 goto out_free_iocb_list; 9747 } 9748 9749 /* Get the default values for Model Name and Description */ 9750 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 9751 9752 /* Create SCSI host to the physical port */ 9753 error = lpfc_create_shost(phba); 9754 if (error) { 9755 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9756 "1407 Failed to create scsi host.\n"); 9757 goto out_unset_driver_resource; 9758 } 9759 9760 /* Configure sysfs attributes */ 9761 vport = phba->pport; 9762 error = lpfc_alloc_sysfs_attr(vport); 9763 if (error) { 9764 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9765 "1476 Failed to allocate sysfs attr\n"); 9766 goto out_destroy_shost; 9767 } 9768 9769 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 9770 /* Now, trying to enable interrupt and bring up the device */ 9771 cfg_mode = phba->cfg_use_msi; 9772 while (true) { 9773 /* Put device to a known state before enabling interrupt */ 9774 lpfc_stop_port(phba); 9775 /* Configure and enable interrupt */ 9776 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 9777 if (intr_mode == LPFC_INTR_ERROR) { 9778 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9779 "0431 Failed to enable interrupt.\n"); 9780 error = -ENODEV; 9781 goto out_free_sysfs_attr; 9782 } 9783 /* SLI-3 HBA setup */ 9784 if (lpfc_sli_hba_setup(phba)) { 9785 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9786 "1477 Failed to set up hba\n"); 9787 error = -ENODEV; 9788 goto out_remove_device; 9789 } 9790 9791 /* Wait 50ms for the interrupts of previous mailbox commands */ 9792 msleep(50); 9793 /* Check active interrupts on message signaled interrupts */ 9794 if (intr_mode == 0 || 9795 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 9796 /* Log the current active interrupt mode */ 9797 phba->intr_mode = intr_mode; 9798 lpfc_log_intr_mode(phba, intr_mode); 9799 break; 9800 } else { 9801 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9802 "0447 Configure interrupt mode (%d) " 9803 "failed active interrupt test.\n", 9804 intr_mode); 9805 /* Disable the current interrupt mode */ 9806 lpfc_sli_disable_intr(phba); 9807 /* Try next level of interrupt mode */ 9808 cfg_mode = --intr_mode; 9809 } 9810 } 9811 9812 /* Perform post initialization setup */ 9813 lpfc_post_init_setup(phba); 9814 9815 /* Check if there are static vports to be created. */ 9816 lpfc_create_static_vport(phba); 9817 9818 return 0; 9819 9820 out_remove_device: 9821 lpfc_unset_hba(phba); 9822 out_free_sysfs_attr: 9823 lpfc_free_sysfs_attr(vport); 9824 out_destroy_shost: 9825 lpfc_destroy_shost(phba); 9826 out_unset_driver_resource: 9827 lpfc_unset_driver_resource_phase2(phba); 9828 out_free_iocb_list: 9829 lpfc_free_iocb_list(phba); 9830 out_unset_driver_resource_s3: 9831 lpfc_sli_driver_resource_unset(phba); 9832 out_unset_pci_mem_s3: 9833 lpfc_sli_pci_mem_unset(phba); 9834 out_disable_pci_dev: 9835 lpfc_disable_pci_dev(phba); 9836 if (shost) 9837 scsi_host_put(shost); 9838 out_free_phba: 9839 lpfc_hba_free(phba); 9840 return error; 9841 } 9842 9843 /** 9844 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 9845 * @pdev: pointer to PCI device 9846 * 9847 * This routine is to be called to disattach a device with SLI-3 interface 9848 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 9849 * removed from PCI bus, it performs all the necessary cleanup for the HBA 9850 * device to be removed from the PCI subsystem properly. 9851 **/ 9852 static void 9853 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 9854 { 9855 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9856 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 9857 struct lpfc_vport **vports; 9858 struct lpfc_hba *phba = vport->phba; 9859 int i; 9860 9861 spin_lock_irq(&phba->hbalock); 9862 vport->load_flag |= FC_UNLOADING; 9863 spin_unlock_irq(&phba->hbalock); 9864 9865 lpfc_free_sysfs_attr(vport); 9866 9867 /* Release all the vports against this physical port */ 9868 vports = lpfc_create_vport_work_array(phba); 9869 if (vports != NULL) 9870 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 9871 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 9872 continue; 9873 fc_vport_terminate(vports[i]->fc_vport); 9874 } 9875 lpfc_destroy_vport_work_array(phba, vports); 9876 9877 /* Remove FC host and then SCSI host with the physical port */ 9878 fc_remove_host(shost); 9879 scsi_remove_host(shost); 9880 lpfc_cleanup(vport); 9881 9882 /* 9883 * Bring down the SLI Layer. This step disable all interrupts, 9884 * clears the rings, discards all mailbox commands, and resets 9885 * the HBA. 9886 */ 9887 9888 /* HBA interrupt will be disabled after this call */ 9889 lpfc_sli_hba_down(phba); 9890 /* Stop kthread signal shall trigger work_done one more time */ 9891 kthread_stop(phba->worker_thread); 9892 /* Final cleanup of txcmplq and reset the HBA */ 9893 lpfc_sli_brdrestart(phba); 9894 9895 kfree(phba->vpi_bmask); 9896 kfree(phba->vpi_ids); 9897 9898 lpfc_stop_hba_timers(phba); 9899 spin_lock_irq(&phba->hbalock); 9900 list_del_init(&vport->listentry); 9901 spin_unlock_irq(&phba->hbalock); 9902 9903 lpfc_debugfs_terminate(vport); 9904 9905 /* Disable SR-IOV if enabled */ 9906 if (phba->cfg_sriov_nr_virtfn) 9907 pci_disable_sriov(pdev); 9908 9909 /* Disable interrupt */ 9910 lpfc_sli_disable_intr(phba); 9911 9912 scsi_host_put(shost); 9913 9914 /* 9915 * Call scsi_free before mem_free since scsi bufs are released to their 9916 * corresponding pools here. 9917 */ 9918 lpfc_scsi_free(phba); 9919 lpfc_mem_free_all(phba); 9920 9921 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 9922 phba->hbqslimp.virt, phba->hbqslimp.phys); 9923 9924 /* Free resources associated with SLI2 interface */ 9925 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 9926 phba->slim2p.virt, phba->slim2p.phys); 9927 9928 /* unmap adapter SLIM and Control Registers */ 9929 iounmap(phba->ctrl_regs_memmap_p); 9930 iounmap(phba->slim_memmap_p); 9931 9932 lpfc_hba_free(phba); 9933 9934 pci_release_mem_regions(pdev); 9935 pci_disable_device(pdev); 9936 } 9937 9938 /** 9939 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 9940 * @pdev: pointer to PCI device 9941 * @msg: power management message 9942 * 9943 * This routine is to be called from the kernel's PCI subsystem to support 9944 * system Power Management (PM) to device with SLI-3 interface spec. When 9945 * PM invokes this method, it quiesces the device by stopping the driver's 9946 * worker thread for the device, turning off device's interrupt and DMA, 9947 * and bring the device offline. Note that as the driver implements the 9948 * minimum PM requirements to a power-aware driver's PM support for the 9949 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 9950 * to the suspend() method call will be treated as SUSPEND and the driver will 9951 * fully reinitialize its device during resume() method call, the driver will 9952 * set device to PCI_D3hot state in PCI config space instead of setting it 9953 * according to the @msg provided by the PM. 9954 * 9955 * Return code 9956 * 0 - driver suspended the device 9957 * Error otherwise 9958 **/ 9959 static int 9960 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 9961 { 9962 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9963 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9964 9965 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9966 "0473 PCI device Power Management suspend.\n"); 9967 9968 /* Bring down the device */ 9969 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 9970 lpfc_offline(phba); 9971 kthread_stop(phba->worker_thread); 9972 9973 /* Disable interrupt from device */ 9974 lpfc_sli_disable_intr(phba); 9975 9976 /* Save device state to PCI config space */ 9977 pci_save_state(pdev); 9978 pci_set_power_state(pdev, PCI_D3hot); 9979 9980 return 0; 9981 } 9982 9983 /** 9984 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 9985 * @pdev: pointer to PCI device 9986 * 9987 * This routine is to be called from the kernel's PCI subsystem to support 9988 * system Power Management (PM) to device with SLI-3 interface spec. When PM 9989 * invokes this method, it restores the device's PCI config space state and 9990 * fully reinitializes the device and brings it online. Note that as the 9991 * driver implements the minimum PM requirements to a power-aware driver's 9992 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 9993 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 9994 * driver will fully reinitialize its device during resume() method call, 9995 * the device will be set to PCI_D0 directly in PCI config space before 9996 * restoring the state. 9997 * 9998 * Return code 9999 * 0 - driver suspended the device 10000 * Error otherwise 10001 **/ 10002 static int 10003 lpfc_pci_resume_one_s3(struct pci_dev *pdev) 10004 { 10005 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10006 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10007 uint32_t intr_mode; 10008 int error; 10009 10010 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10011 "0452 PCI device Power Management resume.\n"); 10012 10013 /* Restore device state from PCI config space */ 10014 pci_set_power_state(pdev, PCI_D0); 10015 pci_restore_state(pdev); 10016 10017 /* 10018 * As the new kernel behavior of pci_restore_state() API call clears 10019 * device saved_state flag, need to save the restored state again. 10020 */ 10021 pci_save_state(pdev); 10022 10023 if (pdev->is_busmaster) 10024 pci_set_master(pdev); 10025 10026 /* Startup the kernel thread for this host adapter. */ 10027 phba->worker_thread = kthread_run(lpfc_do_work, phba, 10028 "lpfc_worker_%d", phba->brd_no); 10029 if (IS_ERR(phba->worker_thread)) { 10030 error = PTR_ERR(phba->worker_thread); 10031 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10032 "0434 PM resume failed to start worker " 10033 "thread: error=x%x.\n", error); 10034 return error; 10035 } 10036 10037 /* Configure and enable interrupt */ 10038 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 10039 if (intr_mode == LPFC_INTR_ERROR) { 10040 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10041 "0430 PM resume Failed to enable interrupt\n"); 10042 return -EIO; 10043 } else 10044 phba->intr_mode = intr_mode; 10045 10046 /* Restart HBA and bring it online */ 10047 lpfc_sli_brdrestart(phba); 10048 lpfc_online(phba); 10049 10050 /* Log the current active interrupt mode */ 10051 lpfc_log_intr_mode(phba, phba->intr_mode); 10052 10053 return 0; 10054 } 10055 10056 /** 10057 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 10058 * @phba: pointer to lpfc hba data structure. 10059 * 10060 * This routine is called to prepare the SLI3 device for PCI slot recover. It 10061 * aborts all the outstanding SCSI I/Os to the pci device. 10062 **/ 10063 static void 10064 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 10065 { 10066 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10067 "2723 PCI channel I/O abort preparing for recovery\n"); 10068 10069 /* 10070 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 10071 * and let the SCSI mid-layer to retry them to recover. 10072 */ 10073 lpfc_sli_abort_fcp_rings(phba); 10074 } 10075 10076 /** 10077 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 10078 * @phba: pointer to lpfc hba data structure. 10079 * 10080 * This routine is called to prepare the SLI3 device for PCI slot reset. It 10081 * disables the device interrupt and pci device, and aborts the internal FCP 10082 * pending I/Os. 10083 **/ 10084 static void 10085 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 10086 { 10087 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10088 "2710 PCI channel disable preparing for reset\n"); 10089 10090 /* Block any management I/Os to the device */ 10091 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 10092 10093 /* Block all SCSI devices' I/Os on the host */ 10094 lpfc_scsi_dev_block(phba); 10095 10096 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 10097 lpfc_sli_flush_fcp_rings(phba); 10098 10099 /* stop all timers */ 10100 lpfc_stop_hba_timers(phba); 10101 10102 /* Disable interrupt and pci device */ 10103 lpfc_sli_disable_intr(phba); 10104 pci_disable_device(phba->pcidev); 10105 } 10106 10107 /** 10108 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 10109 * @phba: pointer to lpfc hba data structure. 10110 * 10111 * This routine is called to prepare the SLI3 device for PCI slot permanently 10112 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 10113 * pending I/Os. 10114 **/ 10115 static void 10116 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 10117 { 10118 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10119 "2711 PCI channel permanent disable for failure\n"); 10120 /* Block all SCSI devices' I/Os on the host */ 10121 lpfc_scsi_dev_block(phba); 10122 10123 /* stop all timers */ 10124 lpfc_stop_hba_timers(phba); 10125 10126 /* Clean up all driver's outstanding SCSI I/Os */ 10127 lpfc_sli_flush_fcp_rings(phba); 10128 } 10129 10130 /** 10131 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 10132 * @pdev: pointer to PCI device. 10133 * @state: the current PCI connection state. 10134 * 10135 * This routine is called from the PCI subsystem for I/O error handling to 10136 * device with SLI-3 interface spec. This function is called by the PCI 10137 * subsystem after a PCI bus error affecting this device has been detected. 10138 * When this function is invoked, it will need to stop all the I/Os and 10139 * interrupt(s) to the device. Once that is done, it will return 10140 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 10141 * as desired. 10142 * 10143 * Return codes 10144 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 10145 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 10146 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 10147 **/ 10148 static pci_ers_result_t 10149 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 10150 { 10151 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10152 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10153 10154 switch (state) { 10155 case pci_channel_io_normal: 10156 /* Non-fatal error, prepare for recovery */ 10157 lpfc_sli_prep_dev_for_recover(phba); 10158 return PCI_ERS_RESULT_CAN_RECOVER; 10159 case pci_channel_io_frozen: 10160 /* Fatal error, prepare for slot reset */ 10161 lpfc_sli_prep_dev_for_reset(phba); 10162 return PCI_ERS_RESULT_NEED_RESET; 10163 case pci_channel_io_perm_failure: 10164 /* Permanent failure, prepare for device down */ 10165 lpfc_sli_prep_dev_for_perm_failure(phba); 10166 return PCI_ERS_RESULT_DISCONNECT; 10167 default: 10168 /* Unknown state, prepare and request slot reset */ 10169 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10170 "0472 Unknown PCI error state: x%x\n", state); 10171 lpfc_sli_prep_dev_for_reset(phba); 10172 return PCI_ERS_RESULT_NEED_RESET; 10173 } 10174 } 10175 10176 /** 10177 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 10178 * @pdev: pointer to PCI device. 10179 * 10180 * This routine is called from the PCI subsystem for error handling to 10181 * device with SLI-3 interface spec. This is called after PCI bus has been 10182 * reset to restart the PCI card from scratch, as if from a cold-boot. 10183 * During the PCI subsystem error recovery, after driver returns 10184 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 10185 * recovery and then call this routine before calling the .resume method 10186 * to recover the device. This function will initialize the HBA device, 10187 * enable the interrupt, but it will just put the HBA to offline state 10188 * without passing any I/O traffic. 10189 * 10190 * Return codes 10191 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 10192 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 10193 */ 10194 static pci_ers_result_t 10195 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 10196 { 10197 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10198 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10199 struct lpfc_sli *psli = &phba->sli; 10200 uint32_t intr_mode; 10201 10202 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 10203 if (pci_enable_device_mem(pdev)) { 10204 printk(KERN_ERR "lpfc: Cannot re-enable " 10205 "PCI device after reset.\n"); 10206 return PCI_ERS_RESULT_DISCONNECT; 10207 } 10208 10209 pci_restore_state(pdev); 10210 10211 /* 10212 * As the new kernel behavior of pci_restore_state() API call clears 10213 * device saved_state flag, need to save the restored state again. 10214 */ 10215 pci_save_state(pdev); 10216 10217 if (pdev->is_busmaster) 10218 pci_set_master(pdev); 10219 10220 spin_lock_irq(&phba->hbalock); 10221 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 10222 spin_unlock_irq(&phba->hbalock); 10223 10224 /* Configure and enable interrupt */ 10225 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 10226 if (intr_mode == LPFC_INTR_ERROR) { 10227 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10228 "0427 Cannot re-enable interrupt after " 10229 "slot reset.\n"); 10230 return PCI_ERS_RESULT_DISCONNECT; 10231 } else 10232 phba->intr_mode = intr_mode; 10233 10234 /* Take device offline, it will perform cleanup */ 10235 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 10236 lpfc_offline(phba); 10237 lpfc_sli_brdrestart(phba); 10238 10239 /* Log the current active interrupt mode */ 10240 lpfc_log_intr_mode(phba, phba->intr_mode); 10241 10242 return PCI_ERS_RESULT_RECOVERED; 10243 } 10244 10245 /** 10246 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 10247 * @pdev: pointer to PCI device 10248 * 10249 * This routine is called from the PCI subsystem for error handling to device 10250 * with SLI-3 interface spec. It is called when kernel error recovery tells 10251 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 10252 * error recovery. After this call, traffic can start to flow from this device 10253 * again. 10254 */ 10255 static void 10256 lpfc_io_resume_s3(struct pci_dev *pdev) 10257 { 10258 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10259 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10260 10261 /* Bring device online, it will be no-op for non-fatal error resume */ 10262 lpfc_online(phba); 10263 10264 /* Clean up Advanced Error Reporting (AER) if needed */ 10265 if (phba->hba_flag & HBA_AER_ENABLED) 10266 pci_cleanup_aer_uncorrect_error_status(pdev); 10267 } 10268 10269 /** 10270 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 10271 * @phba: pointer to lpfc hba data structure. 10272 * 10273 * returns the number of ELS/CT IOCBs to reserve 10274 **/ 10275 int 10276 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 10277 { 10278 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 10279 10280 if (phba->sli_rev == LPFC_SLI_REV4) { 10281 if (max_xri <= 100) 10282 return 10; 10283 else if (max_xri <= 256) 10284 return 25; 10285 else if (max_xri <= 512) 10286 return 50; 10287 else if (max_xri <= 1024) 10288 return 100; 10289 else if (max_xri <= 1536) 10290 return 150; 10291 else if (max_xri <= 2048) 10292 return 200; 10293 else 10294 return 250; 10295 } else 10296 return 0; 10297 } 10298 10299 /** 10300 * lpfc_write_firmware - attempt to write a firmware image to the port 10301 * @fw: pointer to firmware image returned from request_firmware. 10302 * @phba: pointer to lpfc hba data structure. 10303 * 10304 **/ 10305 static void 10306 lpfc_write_firmware(const struct firmware *fw, void *context) 10307 { 10308 struct lpfc_hba *phba = (struct lpfc_hba *)context; 10309 char fwrev[FW_REV_STR_SIZE]; 10310 struct lpfc_grp_hdr *image; 10311 struct list_head dma_buffer_list; 10312 int i, rc = 0; 10313 struct lpfc_dmabuf *dmabuf, *next; 10314 uint32_t offset = 0, temp_offset = 0; 10315 uint32_t magic_number, ftype, fid, fsize; 10316 10317 /* It can be null in no-wait mode, sanity check */ 10318 if (!fw) { 10319 rc = -ENXIO; 10320 goto out; 10321 } 10322 image = (struct lpfc_grp_hdr *)fw->data; 10323 10324 magic_number = be32_to_cpu(image->magic_number); 10325 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image); 10326 fid = bf_get_be32(lpfc_grp_hdr_id, image), 10327 fsize = be32_to_cpu(image->size); 10328 10329 INIT_LIST_HEAD(&dma_buffer_list); 10330 if ((magic_number != LPFC_GROUP_OJECT_MAGIC_G5 && 10331 magic_number != LPFC_GROUP_OJECT_MAGIC_G6) || 10332 ftype != LPFC_FILE_TYPE_GROUP || fsize != fw->size) { 10333 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10334 "3022 Invalid FW image found. " 10335 "Magic:%x Type:%x ID:%x Size %d %zd\n", 10336 magic_number, ftype, fid, fsize, fw->size); 10337 rc = -EINVAL; 10338 goto release_out; 10339 } 10340 lpfc_decode_firmware_rev(phba, fwrev, 1); 10341 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 10342 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10343 "3023 Updating Firmware, Current Version:%s " 10344 "New Version:%s\n", 10345 fwrev, image->revision); 10346 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 10347 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 10348 GFP_KERNEL); 10349 if (!dmabuf) { 10350 rc = -ENOMEM; 10351 goto release_out; 10352 } 10353 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 10354 SLI4_PAGE_SIZE, 10355 &dmabuf->phys, 10356 GFP_KERNEL); 10357 if (!dmabuf->virt) { 10358 kfree(dmabuf); 10359 rc = -ENOMEM; 10360 goto release_out; 10361 } 10362 list_add_tail(&dmabuf->list, &dma_buffer_list); 10363 } 10364 while (offset < fw->size) { 10365 temp_offset = offset; 10366 list_for_each_entry(dmabuf, &dma_buffer_list, list) { 10367 if (temp_offset + SLI4_PAGE_SIZE > fw->size) { 10368 memcpy(dmabuf->virt, 10369 fw->data + temp_offset, 10370 fw->size - temp_offset); 10371 temp_offset = fw->size; 10372 break; 10373 } 10374 memcpy(dmabuf->virt, fw->data + temp_offset, 10375 SLI4_PAGE_SIZE); 10376 temp_offset += SLI4_PAGE_SIZE; 10377 } 10378 rc = lpfc_wr_object(phba, &dma_buffer_list, 10379 (fw->size - offset), &offset); 10380 if (rc) 10381 goto release_out; 10382 } 10383 rc = offset; 10384 } 10385 10386 release_out: 10387 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 10388 list_del(&dmabuf->list); 10389 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 10390 dmabuf->virt, dmabuf->phys); 10391 kfree(dmabuf); 10392 } 10393 release_firmware(fw); 10394 out: 10395 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10396 "3024 Firmware update done: %d.\n", rc); 10397 return; 10398 } 10399 10400 /** 10401 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade 10402 * @phba: pointer to lpfc hba data structure. 10403 * 10404 * This routine is called to perform Linux generic firmware upgrade on device 10405 * that supports such feature. 10406 **/ 10407 int 10408 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) 10409 { 10410 uint8_t file_name[ELX_MODEL_NAME_SIZE]; 10411 int ret; 10412 const struct firmware *fw; 10413 10414 /* Only supported on SLI4 interface type 2 for now */ 10415 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 10416 LPFC_SLI_INTF_IF_TYPE_2) 10417 return -EPERM; 10418 10419 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); 10420 10421 if (fw_upgrade == INT_FW_UPGRADE) { 10422 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, 10423 file_name, &phba->pcidev->dev, 10424 GFP_KERNEL, (void *)phba, 10425 lpfc_write_firmware); 10426 } else if (fw_upgrade == RUN_FW_UPGRADE) { 10427 ret = request_firmware(&fw, file_name, &phba->pcidev->dev); 10428 if (!ret) 10429 lpfc_write_firmware(fw, (void *)phba); 10430 } else { 10431 ret = -EINVAL; 10432 } 10433 10434 return ret; 10435 } 10436 10437 /** 10438 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 10439 * @pdev: pointer to PCI device 10440 * @pid: pointer to PCI device identifier 10441 * 10442 * This routine is called from the kernel's PCI subsystem to device with 10443 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 10444 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 10445 * information of the device and driver to see if the driver state that it 10446 * can support this kind of device. If the match is successful, the driver 10447 * core invokes this routine. If this routine determines it can claim the HBA, 10448 * it does all the initialization that it needs to do to handle the HBA 10449 * properly. 10450 * 10451 * Return code 10452 * 0 - driver can claim the device 10453 * negative value - driver can not claim the device 10454 **/ 10455 static int 10456 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 10457 { 10458 struct lpfc_hba *phba; 10459 struct lpfc_vport *vport = NULL; 10460 struct Scsi_Host *shost = NULL; 10461 int error; 10462 uint32_t cfg_mode, intr_mode; 10463 int adjusted_fcp_io_channel; 10464 10465 /* Allocate memory for HBA structure */ 10466 phba = lpfc_hba_alloc(pdev); 10467 if (!phba) 10468 return -ENOMEM; 10469 10470 /* Perform generic PCI device enabling operation */ 10471 error = lpfc_enable_pci_dev(phba); 10472 if (error) 10473 goto out_free_phba; 10474 10475 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 10476 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 10477 if (error) 10478 goto out_disable_pci_dev; 10479 10480 /* Set up SLI-4 specific device PCI memory space */ 10481 error = lpfc_sli4_pci_mem_setup(phba); 10482 if (error) { 10483 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10484 "1410 Failed to set up pci memory space.\n"); 10485 goto out_disable_pci_dev; 10486 } 10487 10488 /* Set up phase-1 common device driver resources */ 10489 error = lpfc_setup_driver_resource_phase1(phba); 10490 if (error) { 10491 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10492 "1411 Failed to set up driver resource.\n"); 10493 goto out_unset_pci_mem_s4; 10494 } 10495 10496 /* Set up SLI-4 Specific device driver resources */ 10497 error = lpfc_sli4_driver_resource_setup(phba); 10498 if (error) { 10499 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10500 "1412 Failed to set up driver resource.\n"); 10501 goto out_unset_pci_mem_s4; 10502 } 10503 10504 /* Initialize and populate the iocb list per host */ 10505 10506 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10507 "2821 initialize iocb list %d.\n", 10508 phba->cfg_iocb_cnt*1024); 10509 error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024); 10510 10511 if (error) { 10512 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10513 "1413 Failed to initialize iocb list.\n"); 10514 goto out_unset_driver_resource_s4; 10515 } 10516 10517 INIT_LIST_HEAD(&phba->active_rrq_list); 10518 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 10519 10520 /* Set up common device driver resources */ 10521 error = lpfc_setup_driver_resource_phase2(phba); 10522 if (error) { 10523 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10524 "1414 Failed to set up driver resource.\n"); 10525 goto out_free_iocb_list; 10526 } 10527 10528 /* Get the default values for Model Name and Description */ 10529 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 10530 10531 /* Create SCSI host to the physical port */ 10532 error = lpfc_create_shost(phba); 10533 if (error) { 10534 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10535 "1415 Failed to create scsi host.\n"); 10536 goto out_unset_driver_resource; 10537 } 10538 10539 /* Configure sysfs attributes */ 10540 vport = phba->pport; 10541 error = lpfc_alloc_sysfs_attr(vport); 10542 if (error) { 10543 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10544 "1416 Failed to allocate sysfs attr\n"); 10545 goto out_destroy_shost; 10546 } 10547 10548 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 10549 /* Now, trying to enable interrupt and bring up the device */ 10550 cfg_mode = phba->cfg_use_msi; 10551 10552 /* Put device to a known state before enabling interrupt */ 10553 lpfc_stop_port(phba); 10554 /* Configure and enable interrupt */ 10555 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 10556 if (intr_mode == LPFC_INTR_ERROR) { 10557 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10558 "0426 Failed to enable interrupt.\n"); 10559 error = -ENODEV; 10560 goto out_free_sysfs_attr; 10561 } 10562 /* Default to single EQ for non-MSI-X */ 10563 if (phba->intr_type != MSIX) 10564 adjusted_fcp_io_channel = 1; 10565 else 10566 adjusted_fcp_io_channel = phba->cfg_fcp_io_channel; 10567 phba->cfg_fcp_io_channel = adjusted_fcp_io_channel; 10568 /* Set up SLI-4 HBA */ 10569 if (lpfc_sli4_hba_setup(phba)) { 10570 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10571 "1421 Failed to set up hba\n"); 10572 error = -ENODEV; 10573 goto out_disable_intr; 10574 } 10575 10576 /* Log the current active interrupt mode */ 10577 phba->intr_mode = intr_mode; 10578 lpfc_log_intr_mode(phba, intr_mode); 10579 10580 /* Perform post initialization setup */ 10581 lpfc_post_init_setup(phba); 10582 10583 /* check for firmware upgrade or downgrade */ 10584 if (phba->cfg_request_firmware_upgrade) 10585 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); 10586 10587 /* Check if there are static vports to be created. */ 10588 lpfc_create_static_vport(phba); 10589 return 0; 10590 10591 out_disable_intr: 10592 lpfc_sli4_disable_intr(phba); 10593 out_free_sysfs_attr: 10594 lpfc_free_sysfs_attr(vport); 10595 out_destroy_shost: 10596 lpfc_destroy_shost(phba); 10597 out_unset_driver_resource: 10598 lpfc_unset_driver_resource_phase2(phba); 10599 out_free_iocb_list: 10600 lpfc_free_iocb_list(phba); 10601 out_unset_driver_resource_s4: 10602 lpfc_sli4_driver_resource_unset(phba); 10603 out_unset_pci_mem_s4: 10604 lpfc_sli4_pci_mem_unset(phba); 10605 out_disable_pci_dev: 10606 lpfc_disable_pci_dev(phba); 10607 if (shost) 10608 scsi_host_put(shost); 10609 out_free_phba: 10610 lpfc_hba_free(phba); 10611 return error; 10612 } 10613 10614 /** 10615 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 10616 * @pdev: pointer to PCI device 10617 * 10618 * This routine is called from the kernel's PCI subsystem to device with 10619 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 10620 * removed from PCI bus, it performs all the necessary cleanup for the HBA 10621 * device to be removed from the PCI subsystem properly. 10622 **/ 10623 static void 10624 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 10625 { 10626 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10627 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 10628 struct lpfc_vport **vports; 10629 struct lpfc_hba *phba = vport->phba; 10630 int i; 10631 10632 /* Mark the device unloading flag */ 10633 spin_lock_irq(&phba->hbalock); 10634 vport->load_flag |= FC_UNLOADING; 10635 spin_unlock_irq(&phba->hbalock); 10636 10637 /* Free the HBA sysfs attributes */ 10638 lpfc_free_sysfs_attr(vport); 10639 10640 /* Release all the vports against this physical port */ 10641 vports = lpfc_create_vport_work_array(phba); 10642 if (vports != NULL) 10643 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 10644 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 10645 continue; 10646 fc_vport_terminate(vports[i]->fc_vport); 10647 } 10648 lpfc_destroy_vport_work_array(phba, vports); 10649 10650 /* Remove FC host and then SCSI host with the physical port */ 10651 fc_remove_host(shost); 10652 scsi_remove_host(shost); 10653 10654 /* Perform cleanup on the physical port */ 10655 lpfc_cleanup(vport); 10656 10657 /* 10658 * Bring down the SLI Layer. This step disables all interrupts, 10659 * clears the rings, discards all mailbox commands, and resets 10660 * the HBA FCoE function. 10661 */ 10662 lpfc_debugfs_terminate(vport); 10663 lpfc_sli4_hba_unset(phba); 10664 10665 spin_lock_irq(&phba->hbalock); 10666 list_del_init(&vport->listentry); 10667 spin_unlock_irq(&phba->hbalock); 10668 10669 /* Perform scsi free before driver resource_unset since scsi 10670 * buffers are released to their corresponding pools here. 10671 */ 10672 lpfc_scsi_free(phba); 10673 10674 lpfc_sli4_driver_resource_unset(phba); 10675 10676 /* Unmap adapter Control and Doorbell registers */ 10677 lpfc_sli4_pci_mem_unset(phba); 10678 10679 /* Release PCI resources and disable device's PCI function */ 10680 scsi_host_put(shost); 10681 lpfc_disable_pci_dev(phba); 10682 10683 /* Finally, free the driver's device data structure */ 10684 lpfc_hba_free(phba); 10685 10686 return; 10687 } 10688 10689 /** 10690 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 10691 * @pdev: pointer to PCI device 10692 * @msg: power management message 10693 * 10694 * This routine is called from the kernel's PCI subsystem to support system 10695 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 10696 * this method, it quiesces the device by stopping the driver's worker 10697 * thread for the device, turning off device's interrupt and DMA, and bring 10698 * the device offline. Note that as the driver implements the minimum PM 10699 * requirements to a power-aware driver's PM support for suspend/resume -- all 10700 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 10701 * method call will be treated as SUSPEND and the driver will fully 10702 * reinitialize its device during resume() method call, the driver will set 10703 * device to PCI_D3hot state in PCI config space instead of setting it 10704 * according to the @msg provided by the PM. 10705 * 10706 * Return code 10707 * 0 - driver suspended the device 10708 * Error otherwise 10709 **/ 10710 static int 10711 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 10712 { 10713 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10714 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10715 10716 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10717 "2843 PCI device Power Management suspend.\n"); 10718 10719 /* Bring down the device */ 10720 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 10721 lpfc_offline(phba); 10722 kthread_stop(phba->worker_thread); 10723 10724 /* Disable interrupt from device */ 10725 lpfc_sli4_disable_intr(phba); 10726 lpfc_sli4_queue_destroy(phba); 10727 10728 /* Save device state to PCI config space */ 10729 pci_save_state(pdev); 10730 pci_set_power_state(pdev, PCI_D3hot); 10731 10732 return 0; 10733 } 10734 10735 /** 10736 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 10737 * @pdev: pointer to PCI device 10738 * 10739 * This routine is called from the kernel's PCI subsystem to support system 10740 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 10741 * this method, it restores the device's PCI config space state and fully 10742 * reinitializes the device and brings it online. Note that as the driver 10743 * implements the minimum PM requirements to a power-aware driver's PM for 10744 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 10745 * to the suspend() method call will be treated as SUSPEND and the driver 10746 * will fully reinitialize its device during resume() method call, the device 10747 * will be set to PCI_D0 directly in PCI config space before restoring the 10748 * state. 10749 * 10750 * Return code 10751 * 0 - driver suspended the device 10752 * Error otherwise 10753 **/ 10754 static int 10755 lpfc_pci_resume_one_s4(struct pci_dev *pdev) 10756 { 10757 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10758 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10759 uint32_t intr_mode; 10760 int error; 10761 10762 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10763 "0292 PCI device Power Management resume.\n"); 10764 10765 /* Restore device state from PCI config space */ 10766 pci_set_power_state(pdev, PCI_D0); 10767 pci_restore_state(pdev); 10768 10769 /* 10770 * As the new kernel behavior of pci_restore_state() API call clears 10771 * device saved_state flag, need to save the restored state again. 10772 */ 10773 pci_save_state(pdev); 10774 10775 if (pdev->is_busmaster) 10776 pci_set_master(pdev); 10777 10778 /* Startup the kernel thread for this host adapter. */ 10779 phba->worker_thread = kthread_run(lpfc_do_work, phba, 10780 "lpfc_worker_%d", phba->brd_no); 10781 if (IS_ERR(phba->worker_thread)) { 10782 error = PTR_ERR(phba->worker_thread); 10783 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10784 "0293 PM resume failed to start worker " 10785 "thread: error=x%x.\n", error); 10786 return error; 10787 } 10788 10789 /* Configure and enable interrupt */ 10790 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 10791 if (intr_mode == LPFC_INTR_ERROR) { 10792 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10793 "0294 PM resume Failed to enable interrupt\n"); 10794 return -EIO; 10795 } else 10796 phba->intr_mode = intr_mode; 10797 10798 /* Restart HBA and bring it online */ 10799 lpfc_sli_brdrestart(phba); 10800 lpfc_online(phba); 10801 10802 /* Log the current active interrupt mode */ 10803 lpfc_log_intr_mode(phba, phba->intr_mode); 10804 10805 return 0; 10806 } 10807 10808 /** 10809 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 10810 * @phba: pointer to lpfc hba data structure. 10811 * 10812 * This routine is called to prepare the SLI4 device for PCI slot recover. It 10813 * aborts all the outstanding SCSI I/Os to the pci device. 10814 **/ 10815 static void 10816 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 10817 { 10818 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10819 "2828 PCI channel I/O abort preparing for recovery\n"); 10820 /* 10821 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 10822 * and let the SCSI mid-layer to retry them to recover. 10823 */ 10824 lpfc_sli_abort_fcp_rings(phba); 10825 } 10826 10827 /** 10828 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 10829 * @phba: pointer to lpfc hba data structure. 10830 * 10831 * This routine is called to prepare the SLI4 device for PCI slot reset. It 10832 * disables the device interrupt and pci device, and aborts the internal FCP 10833 * pending I/Os. 10834 **/ 10835 static void 10836 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 10837 { 10838 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10839 "2826 PCI channel disable preparing for reset\n"); 10840 10841 /* Block any management I/Os to the device */ 10842 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); 10843 10844 /* Block all SCSI devices' I/Os on the host */ 10845 lpfc_scsi_dev_block(phba); 10846 10847 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 10848 lpfc_sli_flush_fcp_rings(phba); 10849 10850 /* stop all timers */ 10851 lpfc_stop_hba_timers(phba); 10852 10853 /* Disable interrupt and pci device */ 10854 lpfc_sli4_disable_intr(phba); 10855 lpfc_sli4_queue_destroy(phba); 10856 pci_disable_device(phba->pcidev); 10857 } 10858 10859 /** 10860 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 10861 * @phba: pointer to lpfc hba data structure. 10862 * 10863 * This routine is called to prepare the SLI4 device for PCI slot permanently 10864 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 10865 * pending I/Os. 10866 **/ 10867 static void 10868 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 10869 { 10870 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10871 "2827 PCI channel permanent disable for failure\n"); 10872 10873 /* Block all SCSI devices' I/Os on the host */ 10874 lpfc_scsi_dev_block(phba); 10875 10876 /* stop all timers */ 10877 lpfc_stop_hba_timers(phba); 10878 10879 /* Clean up all driver's outstanding SCSI I/Os */ 10880 lpfc_sli_flush_fcp_rings(phba); 10881 } 10882 10883 /** 10884 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 10885 * @pdev: pointer to PCI device. 10886 * @state: the current PCI connection state. 10887 * 10888 * This routine is called from the PCI subsystem for error handling to device 10889 * with SLI-4 interface spec. This function is called by the PCI subsystem 10890 * after a PCI bus error affecting this device has been detected. When this 10891 * function is invoked, it will need to stop all the I/Os and interrupt(s) 10892 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 10893 * for the PCI subsystem to perform proper recovery as desired. 10894 * 10895 * Return codes 10896 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 10897 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 10898 **/ 10899 static pci_ers_result_t 10900 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 10901 { 10902 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10903 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10904 10905 switch (state) { 10906 case pci_channel_io_normal: 10907 /* Non-fatal error, prepare for recovery */ 10908 lpfc_sli4_prep_dev_for_recover(phba); 10909 return PCI_ERS_RESULT_CAN_RECOVER; 10910 case pci_channel_io_frozen: 10911 /* Fatal error, prepare for slot reset */ 10912 lpfc_sli4_prep_dev_for_reset(phba); 10913 return PCI_ERS_RESULT_NEED_RESET; 10914 case pci_channel_io_perm_failure: 10915 /* Permanent failure, prepare for device down */ 10916 lpfc_sli4_prep_dev_for_perm_failure(phba); 10917 return PCI_ERS_RESULT_DISCONNECT; 10918 default: 10919 /* Unknown state, prepare and request slot reset */ 10920 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10921 "2825 Unknown PCI error state: x%x\n", state); 10922 lpfc_sli4_prep_dev_for_reset(phba); 10923 return PCI_ERS_RESULT_NEED_RESET; 10924 } 10925 } 10926 10927 /** 10928 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 10929 * @pdev: pointer to PCI device. 10930 * 10931 * This routine is called from the PCI subsystem for error handling to device 10932 * with SLI-4 interface spec. It is called after PCI bus has been reset to 10933 * restart the PCI card from scratch, as if from a cold-boot. During the 10934 * PCI subsystem error recovery, after the driver returns 10935 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 10936 * recovery and then call this routine before calling the .resume method to 10937 * recover the device. This function will initialize the HBA device, enable 10938 * the interrupt, but it will just put the HBA to offline state without 10939 * passing any I/O traffic. 10940 * 10941 * Return codes 10942 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 10943 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 10944 */ 10945 static pci_ers_result_t 10946 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 10947 { 10948 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10949 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10950 struct lpfc_sli *psli = &phba->sli; 10951 uint32_t intr_mode; 10952 10953 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 10954 if (pci_enable_device_mem(pdev)) { 10955 printk(KERN_ERR "lpfc: Cannot re-enable " 10956 "PCI device after reset.\n"); 10957 return PCI_ERS_RESULT_DISCONNECT; 10958 } 10959 10960 pci_restore_state(pdev); 10961 10962 /* 10963 * As the new kernel behavior of pci_restore_state() API call clears 10964 * device saved_state flag, need to save the restored state again. 10965 */ 10966 pci_save_state(pdev); 10967 10968 if (pdev->is_busmaster) 10969 pci_set_master(pdev); 10970 10971 spin_lock_irq(&phba->hbalock); 10972 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 10973 spin_unlock_irq(&phba->hbalock); 10974 10975 /* Configure and enable interrupt */ 10976 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 10977 if (intr_mode == LPFC_INTR_ERROR) { 10978 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10979 "2824 Cannot re-enable interrupt after " 10980 "slot reset.\n"); 10981 return PCI_ERS_RESULT_DISCONNECT; 10982 } else 10983 phba->intr_mode = intr_mode; 10984 10985 /* Log the current active interrupt mode */ 10986 lpfc_log_intr_mode(phba, phba->intr_mode); 10987 10988 return PCI_ERS_RESULT_RECOVERED; 10989 } 10990 10991 /** 10992 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 10993 * @pdev: pointer to PCI device 10994 * 10995 * This routine is called from the PCI subsystem for error handling to device 10996 * with SLI-4 interface spec. It is called when kernel error recovery tells 10997 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 10998 * error recovery. After this call, traffic can start to flow from this device 10999 * again. 11000 **/ 11001 static void 11002 lpfc_io_resume_s4(struct pci_dev *pdev) 11003 { 11004 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11005 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11006 11007 /* 11008 * In case of slot reset, as function reset is performed through 11009 * mailbox command which needs DMA to be enabled, this operation 11010 * has to be moved to the io resume phase. Taking device offline 11011 * will perform the necessary cleanup. 11012 */ 11013 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 11014 /* Perform device reset */ 11015 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 11016 lpfc_offline(phba); 11017 lpfc_sli_brdrestart(phba); 11018 /* Bring the device back online */ 11019 lpfc_online(phba); 11020 } 11021 11022 /* Clean up Advanced Error Reporting (AER) if needed */ 11023 if (phba->hba_flag & HBA_AER_ENABLED) 11024 pci_cleanup_aer_uncorrect_error_status(pdev); 11025 } 11026 11027 /** 11028 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 11029 * @pdev: pointer to PCI device 11030 * @pid: pointer to PCI device identifier 11031 * 11032 * This routine is to be registered to the kernel's PCI subsystem. When an 11033 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 11034 * at PCI device-specific information of the device and driver to see if the 11035 * driver state that it can support this kind of device. If the match is 11036 * successful, the driver core invokes this routine. This routine dispatches 11037 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 11038 * do all the initialization that it needs to do to handle the HBA device 11039 * properly. 11040 * 11041 * Return code 11042 * 0 - driver can claim the device 11043 * negative value - driver can not claim the device 11044 **/ 11045 static int 11046 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 11047 { 11048 int rc; 11049 struct lpfc_sli_intf intf; 11050 11051 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 11052 return -ENODEV; 11053 11054 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 11055 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 11056 rc = lpfc_pci_probe_one_s4(pdev, pid); 11057 else 11058 rc = lpfc_pci_probe_one_s3(pdev, pid); 11059 11060 return rc; 11061 } 11062 11063 /** 11064 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 11065 * @pdev: pointer to PCI device 11066 * 11067 * This routine is to be registered to the kernel's PCI subsystem. When an 11068 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 11069 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 11070 * remove routine, which will perform all the necessary cleanup for the 11071 * device to be removed from the PCI subsystem properly. 11072 **/ 11073 static void 11074 lpfc_pci_remove_one(struct pci_dev *pdev) 11075 { 11076 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11077 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11078 11079 switch (phba->pci_dev_grp) { 11080 case LPFC_PCI_DEV_LP: 11081 lpfc_pci_remove_one_s3(pdev); 11082 break; 11083 case LPFC_PCI_DEV_OC: 11084 lpfc_pci_remove_one_s4(pdev); 11085 break; 11086 default: 11087 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11088 "1424 Invalid PCI device group: 0x%x\n", 11089 phba->pci_dev_grp); 11090 break; 11091 } 11092 return; 11093 } 11094 11095 /** 11096 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 11097 * @pdev: pointer to PCI device 11098 * @msg: power management message 11099 * 11100 * This routine is to be registered to the kernel's PCI subsystem to support 11101 * system Power Management (PM). When PM invokes this method, it dispatches 11102 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 11103 * suspend the device. 11104 * 11105 * Return code 11106 * 0 - driver suspended the device 11107 * Error otherwise 11108 **/ 11109 static int 11110 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 11111 { 11112 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11113 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11114 int rc = -ENODEV; 11115 11116 switch (phba->pci_dev_grp) { 11117 case LPFC_PCI_DEV_LP: 11118 rc = lpfc_pci_suspend_one_s3(pdev, msg); 11119 break; 11120 case LPFC_PCI_DEV_OC: 11121 rc = lpfc_pci_suspend_one_s4(pdev, msg); 11122 break; 11123 default: 11124 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11125 "1425 Invalid PCI device group: 0x%x\n", 11126 phba->pci_dev_grp); 11127 break; 11128 } 11129 return rc; 11130 } 11131 11132 /** 11133 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 11134 * @pdev: pointer to PCI device 11135 * 11136 * This routine is to be registered to the kernel's PCI subsystem to support 11137 * system Power Management (PM). When PM invokes this method, it dispatches 11138 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 11139 * resume the device. 11140 * 11141 * Return code 11142 * 0 - driver suspended the device 11143 * Error otherwise 11144 **/ 11145 static int 11146 lpfc_pci_resume_one(struct pci_dev *pdev) 11147 { 11148 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11149 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11150 int rc = -ENODEV; 11151 11152 switch (phba->pci_dev_grp) { 11153 case LPFC_PCI_DEV_LP: 11154 rc = lpfc_pci_resume_one_s3(pdev); 11155 break; 11156 case LPFC_PCI_DEV_OC: 11157 rc = lpfc_pci_resume_one_s4(pdev); 11158 break; 11159 default: 11160 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11161 "1426 Invalid PCI device group: 0x%x\n", 11162 phba->pci_dev_grp); 11163 break; 11164 } 11165 return rc; 11166 } 11167 11168 /** 11169 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 11170 * @pdev: pointer to PCI device. 11171 * @state: the current PCI connection state. 11172 * 11173 * This routine is registered to the PCI subsystem for error handling. This 11174 * function is called by the PCI subsystem after a PCI bus error affecting 11175 * this device has been detected. When this routine is invoked, it dispatches 11176 * the action to the proper SLI-3 or SLI-4 device error detected handling 11177 * routine, which will perform the proper error detected operation. 11178 * 11179 * Return codes 11180 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 11181 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 11182 **/ 11183 static pci_ers_result_t 11184 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 11185 { 11186 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11187 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11188 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 11189 11190 switch (phba->pci_dev_grp) { 11191 case LPFC_PCI_DEV_LP: 11192 rc = lpfc_io_error_detected_s3(pdev, state); 11193 break; 11194 case LPFC_PCI_DEV_OC: 11195 rc = lpfc_io_error_detected_s4(pdev, state); 11196 break; 11197 default: 11198 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11199 "1427 Invalid PCI device group: 0x%x\n", 11200 phba->pci_dev_grp); 11201 break; 11202 } 11203 return rc; 11204 } 11205 11206 /** 11207 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 11208 * @pdev: pointer to PCI device. 11209 * 11210 * This routine is registered to the PCI subsystem for error handling. This 11211 * function is called after PCI bus has been reset to restart the PCI card 11212 * from scratch, as if from a cold-boot. When this routine is invoked, it 11213 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 11214 * routine, which will perform the proper device reset. 11215 * 11216 * Return codes 11217 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 11218 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 11219 **/ 11220 static pci_ers_result_t 11221 lpfc_io_slot_reset(struct pci_dev *pdev) 11222 { 11223 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11224 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11225 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 11226 11227 switch (phba->pci_dev_grp) { 11228 case LPFC_PCI_DEV_LP: 11229 rc = lpfc_io_slot_reset_s3(pdev); 11230 break; 11231 case LPFC_PCI_DEV_OC: 11232 rc = lpfc_io_slot_reset_s4(pdev); 11233 break; 11234 default: 11235 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11236 "1428 Invalid PCI device group: 0x%x\n", 11237 phba->pci_dev_grp); 11238 break; 11239 } 11240 return rc; 11241 } 11242 11243 /** 11244 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 11245 * @pdev: pointer to PCI device 11246 * 11247 * This routine is registered to the PCI subsystem for error handling. It 11248 * is called when kernel error recovery tells the lpfc driver that it is 11249 * OK to resume normal PCI operation after PCI bus error recovery. When 11250 * this routine is invoked, it dispatches the action to the proper SLI-3 11251 * or SLI-4 device io_resume routine, which will resume the device operation. 11252 **/ 11253 static void 11254 lpfc_io_resume(struct pci_dev *pdev) 11255 { 11256 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11257 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11258 11259 switch (phba->pci_dev_grp) { 11260 case LPFC_PCI_DEV_LP: 11261 lpfc_io_resume_s3(pdev); 11262 break; 11263 case LPFC_PCI_DEV_OC: 11264 lpfc_io_resume_s4(pdev); 11265 break; 11266 default: 11267 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11268 "1429 Invalid PCI device group: 0x%x\n", 11269 phba->pci_dev_grp); 11270 break; 11271 } 11272 return; 11273 } 11274 11275 /** 11276 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter 11277 * @phba: pointer to lpfc hba data structure. 11278 * 11279 * This routine checks to see if OAS is supported for this adapter. If 11280 * supported, the configure Flash Optimized Fabric flag is set. Otherwise, 11281 * the enable oas flag is cleared and the pool created for OAS device data 11282 * is destroyed. 11283 * 11284 **/ 11285 void 11286 lpfc_sli4_oas_verify(struct lpfc_hba *phba) 11287 { 11288 11289 if (!phba->cfg_EnableXLane) 11290 return; 11291 11292 if (phba->sli4_hba.pc_sli4_params.oas_supported) { 11293 phba->cfg_fof = 1; 11294 } else { 11295 phba->cfg_fof = 0; 11296 if (phba->device_data_mem_pool) 11297 mempool_destroy(phba->device_data_mem_pool); 11298 phba->device_data_mem_pool = NULL; 11299 } 11300 11301 return; 11302 } 11303 11304 /** 11305 * lpfc_fof_queue_setup - Set up all the fof queues 11306 * @phba: pointer to lpfc hba data structure. 11307 * 11308 * This routine is invoked to set up all the fof queues for the FC HBA 11309 * operation. 11310 * 11311 * Return codes 11312 * 0 - successful 11313 * -ENOMEM - No available memory 11314 **/ 11315 int 11316 lpfc_fof_queue_setup(struct lpfc_hba *phba) 11317 { 11318 struct lpfc_sli *psli = &phba->sli; 11319 int rc; 11320 11321 rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX); 11322 if (rc) 11323 return -ENOMEM; 11324 11325 if (phba->cfg_fof) { 11326 11327 rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq, 11328 phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP); 11329 if (rc) 11330 goto out_oas_cq; 11331 11332 rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq, 11333 phba->sli4_hba.oas_cq, LPFC_FCP); 11334 if (rc) 11335 goto out_oas_wq; 11336 11337 phba->sli4_hba.oas_cq->pring = &psli->ring[LPFC_FCP_OAS_RING]; 11338 phba->sli4_hba.oas_ring = &psli->ring[LPFC_FCP_OAS_RING]; 11339 } 11340 11341 return 0; 11342 11343 out_oas_wq: 11344 lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq); 11345 out_oas_cq: 11346 lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq); 11347 return rc; 11348 11349 } 11350 11351 /** 11352 * lpfc_fof_queue_create - Create all the fof queues 11353 * @phba: pointer to lpfc hba data structure. 11354 * 11355 * This routine is invoked to allocate all the fof queues for the FC HBA 11356 * operation. For each SLI4 queue type, the parameters such as queue entry 11357 * count (queue depth) shall be taken from the module parameter. For now, 11358 * we just use some constant number as place holder. 11359 * 11360 * Return codes 11361 * 0 - successful 11362 * -ENOMEM - No availble memory 11363 * -EIO - The mailbox failed to complete successfully. 11364 **/ 11365 int 11366 lpfc_fof_queue_create(struct lpfc_hba *phba) 11367 { 11368 struct lpfc_queue *qdesc; 11369 11370 /* Create FOF EQ */ 11371 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 11372 phba->sli4_hba.eq_ecount); 11373 if (!qdesc) 11374 goto out_error; 11375 11376 phba->sli4_hba.fof_eq = qdesc; 11377 11378 if (phba->cfg_fof) { 11379 11380 /* Create OAS CQ */ 11381 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 11382 phba->sli4_hba.cq_ecount); 11383 if (!qdesc) 11384 goto out_error; 11385 11386 phba->sli4_hba.oas_cq = qdesc; 11387 11388 /* Create OAS WQ */ 11389 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 11390 phba->sli4_hba.wq_ecount); 11391 if (!qdesc) 11392 goto out_error; 11393 11394 phba->sli4_hba.oas_wq = qdesc; 11395 11396 } 11397 return 0; 11398 11399 out_error: 11400 lpfc_fof_queue_destroy(phba); 11401 return -ENOMEM; 11402 } 11403 11404 /** 11405 * lpfc_fof_queue_destroy - Destroy all the fof queues 11406 * @phba: pointer to lpfc hba data structure. 11407 * 11408 * This routine is invoked to release all the SLI4 queues with the FC HBA 11409 * operation. 11410 * 11411 * Return codes 11412 * 0 - successful 11413 **/ 11414 int 11415 lpfc_fof_queue_destroy(struct lpfc_hba *phba) 11416 { 11417 /* Release FOF Event queue */ 11418 if (phba->sli4_hba.fof_eq != NULL) { 11419 lpfc_sli4_queue_free(phba->sli4_hba.fof_eq); 11420 phba->sli4_hba.fof_eq = NULL; 11421 } 11422 11423 /* Release OAS Completion queue */ 11424 if (phba->sli4_hba.oas_cq != NULL) { 11425 lpfc_sli4_queue_free(phba->sli4_hba.oas_cq); 11426 phba->sli4_hba.oas_cq = NULL; 11427 } 11428 11429 /* Release OAS Work queue */ 11430 if (phba->sli4_hba.oas_wq != NULL) { 11431 lpfc_sli4_queue_free(phba->sli4_hba.oas_wq); 11432 phba->sli4_hba.oas_wq = NULL; 11433 } 11434 return 0; 11435 } 11436 11437 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 11438 11439 static const struct pci_error_handlers lpfc_err_handler = { 11440 .error_detected = lpfc_io_error_detected, 11441 .slot_reset = lpfc_io_slot_reset, 11442 .resume = lpfc_io_resume, 11443 }; 11444 11445 static struct pci_driver lpfc_driver = { 11446 .name = LPFC_DRIVER_NAME, 11447 .id_table = lpfc_id_table, 11448 .probe = lpfc_pci_probe_one, 11449 .remove = lpfc_pci_remove_one, 11450 .suspend = lpfc_pci_suspend_one, 11451 .resume = lpfc_pci_resume_one, 11452 .err_handler = &lpfc_err_handler, 11453 }; 11454 11455 static const struct file_operations lpfc_mgmt_fop = { 11456 .owner = THIS_MODULE, 11457 }; 11458 11459 static struct miscdevice lpfc_mgmt_dev = { 11460 .minor = MISC_DYNAMIC_MINOR, 11461 .name = "lpfcmgmt", 11462 .fops = &lpfc_mgmt_fop, 11463 }; 11464 11465 /** 11466 * lpfc_init - lpfc module initialization routine 11467 * 11468 * This routine is to be invoked when the lpfc module is loaded into the 11469 * kernel. The special kernel macro module_init() is used to indicate the 11470 * role of this routine to the kernel as lpfc module entry point. 11471 * 11472 * Return codes 11473 * 0 - successful 11474 * -ENOMEM - FC attach transport failed 11475 * all others - failed 11476 */ 11477 static int __init 11478 lpfc_init(void) 11479 { 11480 int cpu; 11481 int error = 0; 11482 11483 printk(LPFC_MODULE_DESC "\n"); 11484 printk(LPFC_COPYRIGHT "\n"); 11485 11486 error = misc_register(&lpfc_mgmt_dev); 11487 if (error) 11488 printk(KERN_ERR "Could not register lpfcmgmt device, " 11489 "misc_register returned with status %d", error); 11490 11491 lpfc_transport_functions.vport_create = lpfc_vport_create; 11492 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 11493 lpfc_transport_template = 11494 fc_attach_transport(&lpfc_transport_functions); 11495 if (lpfc_transport_template == NULL) 11496 return -ENOMEM; 11497 lpfc_vport_transport_template = 11498 fc_attach_transport(&lpfc_vport_transport_functions); 11499 if (lpfc_vport_transport_template == NULL) { 11500 fc_release_transport(lpfc_transport_template); 11501 return -ENOMEM; 11502 } 11503 11504 /* Initialize in case vector mapping is needed */ 11505 lpfc_used_cpu = NULL; 11506 lpfc_present_cpu = 0; 11507 for_each_present_cpu(cpu) 11508 lpfc_present_cpu++; 11509 11510 error = pci_register_driver(&lpfc_driver); 11511 if (error) { 11512 fc_release_transport(lpfc_transport_template); 11513 fc_release_transport(lpfc_vport_transport_template); 11514 } 11515 11516 return error; 11517 } 11518 11519 /** 11520 * lpfc_exit - lpfc module removal routine 11521 * 11522 * This routine is invoked when the lpfc module is removed from the kernel. 11523 * The special kernel macro module_exit() is used to indicate the role of 11524 * this routine to the kernel as lpfc module exit point. 11525 */ 11526 static void __exit 11527 lpfc_exit(void) 11528 { 11529 misc_deregister(&lpfc_mgmt_dev); 11530 pci_unregister_driver(&lpfc_driver); 11531 fc_release_transport(lpfc_transport_template); 11532 fc_release_transport(lpfc_vport_transport_template); 11533 if (_dump_buf_data) { 11534 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " 11535 "_dump_buf_data at 0x%p\n", 11536 (1L << _dump_buf_data_order), _dump_buf_data); 11537 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 11538 } 11539 11540 if (_dump_buf_dif) { 11541 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " 11542 "_dump_buf_dif at 0x%p\n", 11543 (1L << _dump_buf_dif_order), _dump_buf_dif); 11544 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 11545 } 11546 kfree(lpfc_used_cpu); 11547 idr_destroy(&lpfc_hba_index); 11548 } 11549 11550 module_init(lpfc_init); 11551 module_exit(lpfc_exit); 11552 MODULE_LICENSE("GPL"); 11553 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 11554 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 11555 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 11556