1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2010 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/idr.h> 26 #include <linux/interrupt.h> 27 #include <linux/kthread.h> 28 #include <linux/pci.h> 29 #include <linux/spinlock.h> 30 #include <linux/ctype.h> 31 #include <linux/aer.h> 32 #include <linux/slab.h> 33 34 #include <scsi/scsi.h> 35 #include <scsi/scsi_device.h> 36 #include <scsi/scsi_host.h> 37 #include <scsi/scsi_transport_fc.h> 38 39 #include "lpfc_hw4.h" 40 #include "lpfc_hw.h" 41 #include "lpfc_sli.h" 42 #include "lpfc_sli4.h" 43 #include "lpfc_nl.h" 44 #include "lpfc_disc.h" 45 #include "lpfc_scsi.h" 46 #include "lpfc.h" 47 #include "lpfc_logmsg.h" 48 #include "lpfc_crtn.h" 49 #include "lpfc_vport.h" 50 #include "lpfc_version.h" 51 52 char *_dump_buf_data; 53 unsigned long _dump_buf_data_order; 54 char *_dump_buf_dif; 55 unsigned long _dump_buf_dif_order; 56 spinlock_t _dump_buf_lock; 57 58 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 59 static int lpfc_post_rcv_buf(struct lpfc_hba *); 60 static int lpfc_sli4_queue_create(struct lpfc_hba *); 61 static void lpfc_sli4_queue_destroy(struct lpfc_hba *); 62 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 63 static int lpfc_setup_endian_order(struct lpfc_hba *); 64 static int lpfc_sli4_read_config(struct lpfc_hba *); 65 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 66 static void lpfc_free_sgl_list(struct lpfc_hba *); 67 static int lpfc_init_sgl_list(struct lpfc_hba *); 68 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 69 static void lpfc_free_active_sgl(struct lpfc_hba *); 70 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 71 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 72 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 73 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 74 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 75 76 static struct scsi_transport_template *lpfc_transport_template = NULL; 77 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 78 static DEFINE_IDR(lpfc_hba_index); 79 80 /** 81 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 82 * @phba: pointer to lpfc hba data structure. 83 * 84 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 85 * mailbox command. It retrieves the revision information from the HBA and 86 * collects the Vital Product Data (VPD) about the HBA for preparing the 87 * configuration of the HBA. 88 * 89 * Return codes: 90 * 0 - success. 91 * -ERESTART - requests the SLI layer to reset the HBA and try again. 92 * Any other value - indicates an error. 93 **/ 94 int 95 lpfc_config_port_prep(struct lpfc_hba *phba) 96 { 97 lpfc_vpd_t *vp = &phba->vpd; 98 int i = 0, rc; 99 LPFC_MBOXQ_t *pmb; 100 MAILBOX_t *mb; 101 char *lpfc_vpd_data = NULL; 102 uint16_t offset = 0; 103 static char licensed[56] = 104 "key unlock for use with gnu public licensed code only\0"; 105 static int init_key = 1; 106 107 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 108 if (!pmb) { 109 phba->link_state = LPFC_HBA_ERROR; 110 return -ENOMEM; 111 } 112 113 mb = &pmb->u.mb; 114 phba->link_state = LPFC_INIT_MBX_CMDS; 115 116 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 117 if (init_key) { 118 uint32_t *ptext = (uint32_t *) licensed; 119 120 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 121 *ptext = cpu_to_be32(*ptext); 122 init_key = 0; 123 } 124 125 lpfc_read_nv(phba, pmb); 126 memset((char*)mb->un.varRDnvp.rsvd3, 0, 127 sizeof (mb->un.varRDnvp.rsvd3)); 128 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 129 sizeof (licensed)); 130 131 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 132 133 if (rc != MBX_SUCCESS) { 134 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 135 "0324 Config Port initialization " 136 "error, mbxCmd x%x READ_NVPARM, " 137 "mbxStatus x%x\n", 138 mb->mbxCommand, mb->mbxStatus); 139 mempool_free(pmb, phba->mbox_mem_pool); 140 return -ERESTART; 141 } 142 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 143 sizeof(phba->wwnn)); 144 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 145 sizeof(phba->wwpn)); 146 } 147 148 phba->sli3_options = 0x0; 149 150 /* Setup and issue mailbox READ REV command */ 151 lpfc_read_rev(phba, pmb); 152 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 153 if (rc != MBX_SUCCESS) { 154 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 155 "0439 Adapter failed to init, mbxCmd x%x " 156 "READ_REV, mbxStatus x%x\n", 157 mb->mbxCommand, mb->mbxStatus); 158 mempool_free( pmb, phba->mbox_mem_pool); 159 return -ERESTART; 160 } 161 162 163 /* 164 * The value of rr must be 1 since the driver set the cv field to 1. 165 * This setting requires the FW to set all revision fields. 166 */ 167 if (mb->un.varRdRev.rr == 0) { 168 vp->rev.rBit = 0; 169 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 170 "0440 Adapter failed to init, READ_REV has " 171 "missing revision information.\n"); 172 mempool_free(pmb, phba->mbox_mem_pool); 173 return -ERESTART; 174 } 175 176 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 177 mempool_free(pmb, phba->mbox_mem_pool); 178 return -EINVAL; 179 } 180 181 /* Save information as VPD data */ 182 vp->rev.rBit = 1; 183 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 184 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 185 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 186 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 187 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 188 vp->rev.biuRev = mb->un.varRdRev.biuRev; 189 vp->rev.smRev = mb->un.varRdRev.smRev; 190 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 191 vp->rev.endecRev = mb->un.varRdRev.endecRev; 192 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 193 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 194 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 195 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 196 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 197 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 198 199 /* If the sli feature level is less then 9, we must 200 * tear down all RPIs and VPIs on link down if NPIV 201 * is enabled. 202 */ 203 if (vp->rev.feaLevelHigh < 9) 204 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 205 206 if (lpfc_is_LC_HBA(phba->pcidev->device)) 207 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 208 sizeof (phba->RandomData)); 209 210 /* Get adapter VPD information */ 211 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 212 if (!lpfc_vpd_data) 213 goto out_free_mbox; 214 215 do { 216 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 217 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 218 219 if (rc != MBX_SUCCESS) { 220 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 221 "0441 VPD not present on adapter, " 222 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 223 mb->mbxCommand, mb->mbxStatus); 224 mb->un.varDmp.word_cnt = 0; 225 } 226 /* dump mem may return a zero when finished or we got a 227 * mailbox error, either way we are done. 228 */ 229 if (mb->un.varDmp.word_cnt == 0) 230 break; 231 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 232 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 233 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 234 lpfc_vpd_data + offset, 235 mb->un.varDmp.word_cnt); 236 offset += mb->un.varDmp.word_cnt; 237 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 238 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 239 240 kfree(lpfc_vpd_data); 241 out_free_mbox: 242 mempool_free(pmb, phba->mbox_mem_pool); 243 return 0; 244 } 245 246 /** 247 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 248 * @phba: pointer to lpfc hba data structure. 249 * @pmboxq: pointer to the driver internal queue element for mailbox command. 250 * 251 * This is the completion handler for driver's configuring asynchronous event 252 * mailbox command to the device. If the mailbox command returns successfully, 253 * it will set internal async event support flag to 1; otherwise, it will 254 * set internal async event support flag to 0. 255 **/ 256 static void 257 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 258 { 259 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 260 phba->temp_sensor_support = 1; 261 else 262 phba->temp_sensor_support = 0; 263 mempool_free(pmboxq, phba->mbox_mem_pool); 264 return; 265 } 266 267 /** 268 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 269 * @phba: pointer to lpfc hba data structure. 270 * @pmboxq: pointer to the driver internal queue element for mailbox command. 271 * 272 * This is the completion handler for dump mailbox command for getting 273 * wake up parameters. When this command complete, the response contain 274 * Option rom version of the HBA. This function translate the version number 275 * into a human readable string and store it in OptionROMVersion. 276 **/ 277 static void 278 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 279 { 280 struct prog_id *prg; 281 uint32_t prog_id_word; 282 char dist = ' '; 283 /* character array used for decoding dist type. */ 284 char dist_char[] = "nabx"; 285 286 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 287 mempool_free(pmboxq, phba->mbox_mem_pool); 288 return; 289 } 290 291 prg = (struct prog_id *) &prog_id_word; 292 293 /* word 7 contain option rom version */ 294 prog_id_word = pmboxq->u.mb.un.varWords[7]; 295 296 /* Decode the Option rom version word to a readable string */ 297 if (prg->dist < 4) 298 dist = dist_char[prg->dist]; 299 300 if ((prg->dist == 3) && (prg->num == 0)) 301 sprintf(phba->OptionROMVersion, "%d.%d%d", 302 prg->ver, prg->rev, prg->lev); 303 else 304 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d", 305 prg->ver, prg->rev, prg->lev, 306 dist, prg->num); 307 mempool_free(pmboxq, phba->mbox_mem_pool); 308 return; 309 } 310 311 /** 312 * lpfc_config_port_post - Perform lpfc initialization after config port 313 * @phba: pointer to lpfc hba data structure. 314 * 315 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 316 * command call. It performs all internal resource and state setups on the 317 * port: post IOCB buffers, enable appropriate host interrupt attentions, 318 * ELS ring timers, etc. 319 * 320 * Return codes 321 * 0 - success. 322 * Any other value - error. 323 **/ 324 int 325 lpfc_config_port_post(struct lpfc_hba *phba) 326 { 327 struct lpfc_vport *vport = phba->pport; 328 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 329 LPFC_MBOXQ_t *pmb; 330 MAILBOX_t *mb; 331 struct lpfc_dmabuf *mp; 332 struct lpfc_sli *psli = &phba->sli; 333 uint32_t status, timeout; 334 int i, j; 335 int rc; 336 337 spin_lock_irq(&phba->hbalock); 338 /* 339 * If the Config port completed correctly the HBA is not 340 * over heated any more. 341 */ 342 if (phba->over_temp_state == HBA_OVER_TEMP) 343 phba->over_temp_state = HBA_NORMAL_TEMP; 344 spin_unlock_irq(&phba->hbalock); 345 346 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 347 if (!pmb) { 348 phba->link_state = LPFC_HBA_ERROR; 349 return -ENOMEM; 350 } 351 mb = &pmb->u.mb; 352 353 /* Get login parameters for NID. */ 354 rc = lpfc_read_sparam(phba, pmb, 0); 355 if (rc) { 356 mempool_free(pmb, phba->mbox_mem_pool); 357 return -ENOMEM; 358 } 359 360 pmb->vport = vport; 361 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 362 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 363 "0448 Adapter failed init, mbxCmd x%x " 364 "READ_SPARM mbxStatus x%x\n", 365 mb->mbxCommand, mb->mbxStatus); 366 phba->link_state = LPFC_HBA_ERROR; 367 mp = (struct lpfc_dmabuf *) pmb->context1; 368 mempool_free(pmb, phba->mbox_mem_pool); 369 lpfc_mbuf_free(phba, mp->virt, mp->phys); 370 kfree(mp); 371 return -EIO; 372 } 373 374 mp = (struct lpfc_dmabuf *) pmb->context1; 375 376 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 377 lpfc_mbuf_free(phba, mp->virt, mp->phys); 378 kfree(mp); 379 pmb->context1 = NULL; 380 381 if (phba->cfg_soft_wwnn) 382 u64_to_wwn(phba->cfg_soft_wwnn, 383 vport->fc_sparam.nodeName.u.wwn); 384 if (phba->cfg_soft_wwpn) 385 u64_to_wwn(phba->cfg_soft_wwpn, 386 vport->fc_sparam.portName.u.wwn); 387 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 388 sizeof (struct lpfc_name)); 389 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 390 sizeof (struct lpfc_name)); 391 392 /* Update the fc_host data structures with new wwn. */ 393 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 394 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 395 fc_host_max_npiv_vports(shost) = phba->max_vpi; 396 397 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 398 /* This should be consolidated into parse_vpd ? - mr */ 399 if (phba->SerialNumber[0] == 0) { 400 uint8_t *outptr; 401 402 outptr = &vport->fc_nodename.u.s.IEEE[0]; 403 for (i = 0; i < 12; i++) { 404 status = *outptr++; 405 j = ((status & 0xf0) >> 4); 406 if (j <= 9) 407 phba->SerialNumber[i] = 408 (char)((uint8_t) 0x30 + (uint8_t) j); 409 else 410 phba->SerialNumber[i] = 411 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 412 i++; 413 j = (status & 0xf); 414 if (j <= 9) 415 phba->SerialNumber[i] = 416 (char)((uint8_t) 0x30 + (uint8_t) j); 417 else 418 phba->SerialNumber[i] = 419 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 420 } 421 } 422 423 lpfc_read_config(phba, pmb); 424 pmb->vport = vport; 425 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 426 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 427 "0453 Adapter failed to init, mbxCmd x%x " 428 "READ_CONFIG, mbxStatus x%x\n", 429 mb->mbxCommand, mb->mbxStatus); 430 phba->link_state = LPFC_HBA_ERROR; 431 mempool_free( pmb, phba->mbox_mem_pool); 432 return -EIO; 433 } 434 435 /* Check if the port is disabled */ 436 lpfc_sli_read_link_ste(phba); 437 438 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 439 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 440 phba->cfg_hba_queue_depth = 441 (mb->un.varRdConfig.max_xri + 1) - 442 lpfc_sli4_get_els_iocb_cnt(phba); 443 444 phba->lmt = mb->un.varRdConfig.lmt; 445 446 /* Get the default values for Model Name and Description */ 447 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 448 449 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_16G) 450 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) 451 && !(phba->lmt & LMT_1Gb)) 452 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) 453 && !(phba->lmt & LMT_2Gb)) 454 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) 455 && !(phba->lmt & LMT_4Gb)) 456 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) 457 && !(phba->lmt & LMT_8Gb)) 458 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) 459 && !(phba->lmt & LMT_10Gb)) 460 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) 461 && !(phba->lmt & LMT_16Gb))) { 462 /* Reset link speed to auto */ 463 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 464 "1302 Invalid speed for this board: " 465 "Reset link speed to auto: x%x\n", 466 phba->cfg_link_speed); 467 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 468 } 469 470 phba->link_state = LPFC_LINK_DOWN; 471 472 /* Only process IOCBs on ELS ring till hba_state is READY */ 473 if (psli->ring[psli->extra_ring].cmdringaddr) 474 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 475 if (psli->ring[psli->fcp_ring].cmdringaddr) 476 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 477 if (psli->ring[psli->next_ring].cmdringaddr) 478 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 479 480 /* Post receive buffers for desired rings */ 481 if (phba->sli_rev != 3) 482 lpfc_post_rcv_buf(phba); 483 484 /* 485 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 486 */ 487 if (phba->intr_type == MSIX) { 488 rc = lpfc_config_msi(phba, pmb); 489 if (rc) { 490 mempool_free(pmb, phba->mbox_mem_pool); 491 return -EIO; 492 } 493 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 494 if (rc != MBX_SUCCESS) { 495 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 496 "0352 Config MSI mailbox command " 497 "failed, mbxCmd x%x, mbxStatus x%x\n", 498 pmb->u.mb.mbxCommand, 499 pmb->u.mb.mbxStatus); 500 mempool_free(pmb, phba->mbox_mem_pool); 501 return -EIO; 502 } 503 } 504 505 spin_lock_irq(&phba->hbalock); 506 /* Initialize ERATT handling flag */ 507 phba->hba_flag &= ~HBA_ERATT_HANDLED; 508 509 /* Enable appropriate host interrupts */ 510 status = readl(phba->HCregaddr); 511 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 512 if (psli->num_rings > 0) 513 status |= HC_R0INT_ENA; 514 if (psli->num_rings > 1) 515 status |= HC_R1INT_ENA; 516 if (psli->num_rings > 2) 517 status |= HC_R2INT_ENA; 518 if (psli->num_rings > 3) 519 status |= HC_R3INT_ENA; 520 521 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 522 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 523 status &= ~(HC_R0INT_ENA); 524 525 writel(status, phba->HCregaddr); 526 readl(phba->HCregaddr); /* flush */ 527 spin_unlock_irq(&phba->hbalock); 528 529 /* Set up ring-0 (ELS) timer */ 530 timeout = phba->fc_ratov * 2; 531 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 532 /* Set up heart beat (HB) timer */ 533 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 534 phba->hb_outstanding = 0; 535 phba->last_completion_time = jiffies; 536 /* Set up error attention (ERATT) polling timer */ 537 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 538 539 if (phba->hba_flag & LINK_DISABLED) { 540 lpfc_printf_log(phba, 541 KERN_ERR, LOG_INIT, 542 "2598 Adapter Link is disabled.\n"); 543 lpfc_down_link(phba, pmb); 544 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 545 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 546 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 547 lpfc_printf_log(phba, 548 KERN_ERR, LOG_INIT, 549 "2599 Adapter failed to issue DOWN_LINK" 550 " mbox command rc 0x%x\n", rc); 551 552 mempool_free(pmb, phba->mbox_mem_pool); 553 return -EIO; 554 } 555 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 556 lpfc_init_link(phba, pmb, phba->cfg_topology, 557 phba->cfg_link_speed); 558 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 559 lpfc_set_loopback_flag(phba); 560 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 561 if (rc != MBX_SUCCESS) { 562 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 563 "0454 Adapter failed to init, mbxCmd x%x " 564 "INIT_LINK, mbxStatus x%x\n", 565 mb->mbxCommand, mb->mbxStatus); 566 567 /* Clear all interrupt enable conditions */ 568 writel(0, phba->HCregaddr); 569 readl(phba->HCregaddr); /* flush */ 570 /* Clear all pending interrupts */ 571 writel(0xffffffff, phba->HAregaddr); 572 readl(phba->HAregaddr); /* flush */ 573 574 phba->link_state = LPFC_HBA_ERROR; 575 if (rc != MBX_BUSY) 576 mempool_free(pmb, phba->mbox_mem_pool); 577 return -EIO; 578 } 579 } 580 /* MBOX buffer will be freed in mbox compl */ 581 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 582 if (!pmb) { 583 phba->link_state = LPFC_HBA_ERROR; 584 return -ENOMEM; 585 } 586 587 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 588 pmb->mbox_cmpl = lpfc_config_async_cmpl; 589 pmb->vport = phba->pport; 590 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 591 592 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 593 lpfc_printf_log(phba, 594 KERN_ERR, 595 LOG_INIT, 596 "0456 Adapter failed to issue " 597 "ASYNCEVT_ENABLE mbox status x%x\n", 598 rc); 599 mempool_free(pmb, phba->mbox_mem_pool); 600 } 601 602 /* Get Option rom version */ 603 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 604 if (!pmb) { 605 phba->link_state = LPFC_HBA_ERROR; 606 return -ENOMEM; 607 } 608 609 lpfc_dump_wakeup_param(phba, pmb); 610 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 611 pmb->vport = phba->pport; 612 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 613 614 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 615 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 616 "to get Option ROM version status x%x\n", rc); 617 mempool_free(pmb, phba->mbox_mem_pool); 618 } 619 620 return 0; 621 } 622 623 /** 624 * lpfc_hba_init_link - Initialize the FC link 625 * @phba: pointer to lpfc hba data structure. 626 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 627 * 628 * This routine will issue the INIT_LINK mailbox command call. 629 * It is available to other drivers through the lpfc_hba data 630 * structure for use as a delayed link up mechanism with the 631 * module parameter lpfc_suppress_link_up. 632 * 633 * Return code 634 * 0 - success 635 * Any other value - error 636 **/ 637 int 638 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 639 { 640 struct lpfc_vport *vport = phba->pport; 641 LPFC_MBOXQ_t *pmb; 642 MAILBOX_t *mb; 643 int rc; 644 645 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 646 if (!pmb) { 647 phba->link_state = LPFC_HBA_ERROR; 648 return -ENOMEM; 649 } 650 mb = &pmb->u.mb; 651 pmb->vport = vport; 652 653 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); 654 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 655 lpfc_set_loopback_flag(phba); 656 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 657 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 658 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 659 "0498 Adapter failed to init, mbxCmd x%x " 660 "INIT_LINK, mbxStatus x%x\n", 661 mb->mbxCommand, mb->mbxStatus); 662 if (phba->sli_rev <= LPFC_SLI_REV3) { 663 /* Clear all interrupt enable conditions */ 664 writel(0, phba->HCregaddr); 665 readl(phba->HCregaddr); /* flush */ 666 /* Clear all pending interrupts */ 667 writel(0xffffffff, phba->HAregaddr); 668 readl(phba->HAregaddr); /* flush */ 669 } 670 phba->link_state = LPFC_HBA_ERROR; 671 if (rc != MBX_BUSY || flag == MBX_POLL) 672 mempool_free(pmb, phba->mbox_mem_pool); 673 return -EIO; 674 } 675 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 676 if (flag == MBX_POLL) 677 mempool_free(pmb, phba->mbox_mem_pool); 678 679 return 0; 680 } 681 682 /** 683 * lpfc_hba_down_link - this routine downs the FC link 684 * @phba: pointer to lpfc hba data structure. 685 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 686 * 687 * This routine will issue the DOWN_LINK mailbox command call. 688 * It is available to other drivers through the lpfc_hba data 689 * structure for use to stop the link. 690 * 691 * Return code 692 * 0 - success 693 * Any other value - error 694 **/ 695 int 696 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 697 { 698 LPFC_MBOXQ_t *pmb; 699 int rc; 700 701 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 702 if (!pmb) { 703 phba->link_state = LPFC_HBA_ERROR; 704 return -ENOMEM; 705 } 706 707 lpfc_printf_log(phba, 708 KERN_ERR, LOG_INIT, 709 "0491 Adapter Link is disabled.\n"); 710 lpfc_down_link(phba, pmb); 711 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 712 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 713 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 714 lpfc_printf_log(phba, 715 KERN_ERR, LOG_INIT, 716 "2522 Adapter failed to issue DOWN_LINK" 717 " mbox command rc 0x%x\n", rc); 718 719 mempool_free(pmb, phba->mbox_mem_pool); 720 return -EIO; 721 } 722 if (flag == MBX_POLL) 723 mempool_free(pmb, phba->mbox_mem_pool); 724 725 return 0; 726 } 727 728 /** 729 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 730 * @phba: pointer to lpfc HBA data structure. 731 * 732 * This routine will do LPFC uninitialization before the HBA is reset when 733 * bringing down the SLI Layer. 734 * 735 * Return codes 736 * 0 - success. 737 * Any other value - error. 738 **/ 739 int 740 lpfc_hba_down_prep(struct lpfc_hba *phba) 741 { 742 struct lpfc_vport **vports; 743 int i; 744 745 if (phba->sli_rev <= LPFC_SLI_REV3) { 746 /* Disable interrupts */ 747 writel(0, phba->HCregaddr); 748 readl(phba->HCregaddr); /* flush */ 749 } 750 751 if (phba->pport->load_flag & FC_UNLOADING) 752 lpfc_cleanup_discovery_resources(phba->pport); 753 else { 754 vports = lpfc_create_vport_work_array(phba); 755 if (vports != NULL) 756 for (i = 0; i <= phba->max_vports && 757 vports[i] != NULL; i++) 758 lpfc_cleanup_discovery_resources(vports[i]); 759 lpfc_destroy_vport_work_array(phba, vports); 760 } 761 return 0; 762 } 763 764 /** 765 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 766 * @phba: pointer to lpfc HBA data structure. 767 * 768 * This routine will do uninitialization after the HBA is reset when bring 769 * down the SLI Layer. 770 * 771 * Return codes 772 * 0 - success. 773 * Any other value - error. 774 **/ 775 static int 776 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 777 { 778 struct lpfc_sli *psli = &phba->sli; 779 struct lpfc_sli_ring *pring; 780 struct lpfc_dmabuf *mp, *next_mp; 781 LIST_HEAD(completions); 782 int i; 783 784 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 785 lpfc_sli_hbqbuf_free_all(phba); 786 else { 787 /* Cleanup preposted buffers on the ELS ring */ 788 pring = &psli->ring[LPFC_ELS_RING]; 789 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 790 list_del(&mp->list); 791 pring->postbufq_cnt--; 792 lpfc_mbuf_free(phba, mp->virt, mp->phys); 793 kfree(mp); 794 } 795 } 796 797 spin_lock_irq(&phba->hbalock); 798 for (i = 0; i < psli->num_rings; i++) { 799 pring = &psli->ring[i]; 800 801 /* At this point in time the HBA is either reset or DOA. Either 802 * way, nothing should be on txcmplq as it will NEVER complete. 803 */ 804 list_splice_init(&pring->txcmplq, &completions); 805 pring->txcmplq_cnt = 0; 806 spin_unlock_irq(&phba->hbalock); 807 808 /* Cancel all the IOCBs from the completions list */ 809 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 810 IOERR_SLI_ABORTED); 811 812 lpfc_sli_abort_iocb_ring(phba, pring); 813 spin_lock_irq(&phba->hbalock); 814 } 815 spin_unlock_irq(&phba->hbalock); 816 817 return 0; 818 } 819 820 /** 821 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 822 * @phba: pointer to lpfc HBA data structure. 823 * 824 * This routine will do uninitialization after the HBA is reset when bring 825 * down the SLI Layer. 826 * 827 * Return codes 828 * 0 - success. 829 * Any other value - error. 830 **/ 831 static int 832 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 833 { 834 struct lpfc_scsi_buf *psb, *psb_next; 835 LIST_HEAD(aborts); 836 int ret; 837 unsigned long iflag = 0; 838 struct lpfc_sglq *sglq_entry = NULL; 839 840 ret = lpfc_hba_down_post_s3(phba); 841 if (ret) 842 return ret; 843 /* At this point in time the HBA is either reset or DOA. Either 844 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 845 * on the lpfc_sgl_list so that it can either be freed if the 846 * driver is unloading or reposted if the driver is restarting 847 * the port. 848 */ 849 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ 850 /* scsl_buf_list */ 851 /* abts_sgl_list_lock required because worker thread uses this 852 * list. 853 */ 854 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 855 list_for_each_entry(sglq_entry, 856 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 857 sglq_entry->state = SGL_FREED; 858 859 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 860 &phba->sli4_hba.lpfc_sgl_list); 861 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 862 /* abts_scsi_buf_list_lock required because worker thread uses this 863 * list. 864 */ 865 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 866 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, 867 &aborts); 868 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 869 spin_unlock_irq(&phba->hbalock); 870 871 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 872 psb->pCmd = NULL; 873 psb->status = IOSTAT_SUCCESS; 874 } 875 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 876 list_splice(&aborts, &phba->lpfc_scsi_buf_list); 877 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 878 return 0; 879 } 880 881 /** 882 * lpfc_hba_down_post - Wrapper func for hba down post routine 883 * @phba: pointer to lpfc HBA data structure. 884 * 885 * This routine wraps the actual SLI3 or SLI4 routine for performing 886 * uninitialization after the HBA is reset when bring down the SLI Layer. 887 * 888 * Return codes 889 * 0 - success. 890 * Any other value - error. 891 **/ 892 int 893 lpfc_hba_down_post(struct lpfc_hba *phba) 894 { 895 return (*phba->lpfc_hba_down_post)(phba); 896 } 897 898 /** 899 * lpfc_hb_timeout - The HBA-timer timeout handler 900 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 901 * 902 * This is the HBA-timer timeout handler registered to the lpfc driver. When 903 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 904 * work-port-events bitmap and the worker thread is notified. This timeout 905 * event will be used by the worker thread to invoke the actual timeout 906 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 907 * be performed in the timeout handler and the HBA timeout event bit shall 908 * be cleared by the worker thread after it has taken the event bitmap out. 909 **/ 910 static void 911 lpfc_hb_timeout(unsigned long ptr) 912 { 913 struct lpfc_hba *phba; 914 uint32_t tmo_posted; 915 unsigned long iflag; 916 917 phba = (struct lpfc_hba *)ptr; 918 919 /* Check for heart beat timeout conditions */ 920 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 921 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 922 if (!tmo_posted) 923 phba->pport->work_port_events |= WORKER_HB_TMO; 924 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 925 926 /* Tell the worker thread there is work to do */ 927 if (!tmo_posted) 928 lpfc_worker_wake_up(phba); 929 return; 930 } 931 932 /** 933 * lpfc_rrq_timeout - The RRQ-timer timeout handler 934 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 935 * 936 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 937 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 938 * work-port-events bitmap and the worker thread is notified. This timeout 939 * event will be used by the worker thread to invoke the actual timeout 940 * handler routine, lpfc_rrq_handler. Any periodical operations will 941 * be performed in the timeout handler and the RRQ timeout event bit shall 942 * be cleared by the worker thread after it has taken the event bitmap out. 943 **/ 944 static void 945 lpfc_rrq_timeout(unsigned long ptr) 946 { 947 struct lpfc_hba *phba; 948 unsigned long iflag; 949 950 phba = (struct lpfc_hba *)ptr; 951 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 952 phba->hba_flag |= HBA_RRQ_ACTIVE; 953 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 954 lpfc_worker_wake_up(phba); 955 } 956 957 /** 958 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 959 * @phba: pointer to lpfc hba data structure. 960 * @pmboxq: pointer to the driver internal queue element for mailbox command. 961 * 962 * This is the callback function to the lpfc heart-beat mailbox command. 963 * If configured, the lpfc driver issues the heart-beat mailbox command to 964 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 965 * heart-beat mailbox command is issued, the driver shall set up heart-beat 966 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 967 * heart-beat outstanding state. Once the mailbox command comes back and 968 * no error conditions detected, the heart-beat mailbox command timer is 969 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 970 * state is cleared for the next heart-beat. If the timer expired with the 971 * heart-beat outstanding state set, the driver will put the HBA offline. 972 **/ 973 static void 974 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 975 { 976 unsigned long drvr_flag; 977 978 spin_lock_irqsave(&phba->hbalock, drvr_flag); 979 phba->hb_outstanding = 0; 980 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 981 982 /* Check and reset heart-beat timer is necessary */ 983 mempool_free(pmboxq, phba->mbox_mem_pool); 984 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 985 !(phba->link_state == LPFC_HBA_ERROR) && 986 !(phba->pport->load_flag & FC_UNLOADING)) 987 mod_timer(&phba->hb_tmofunc, 988 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 989 return; 990 } 991 992 /** 993 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 994 * @phba: pointer to lpfc hba data structure. 995 * 996 * This is the actual HBA-timer timeout handler to be invoked by the worker 997 * thread whenever the HBA timer fired and HBA-timeout event posted. This 998 * handler performs any periodic operations needed for the device. If such 999 * periodic event has already been attended to either in the interrupt handler 1000 * or by processing slow-ring or fast-ring events within the HBA-timer 1001 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1002 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1003 * is configured and there is no heart-beat mailbox command outstanding, a 1004 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1005 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1006 * to offline. 1007 **/ 1008 void 1009 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1010 { 1011 struct lpfc_vport **vports; 1012 LPFC_MBOXQ_t *pmboxq; 1013 struct lpfc_dmabuf *buf_ptr; 1014 int retval, i; 1015 struct lpfc_sli *psli = &phba->sli; 1016 LIST_HEAD(completions); 1017 1018 vports = lpfc_create_vport_work_array(phba); 1019 if (vports != NULL) 1020 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 1021 lpfc_rcv_seq_check_edtov(vports[i]); 1022 lpfc_destroy_vport_work_array(phba, vports); 1023 1024 if ((phba->link_state == LPFC_HBA_ERROR) || 1025 (phba->pport->load_flag & FC_UNLOADING) || 1026 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1027 return; 1028 1029 spin_lock_irq(&phba->pport->work_port_lock); 1030 1031 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, 1032 jiffies)) { 1033 spin_unlock_irq(&phba->pport->work_port_lock); 1034 if (!phba->hb_outstanding) 1035 mod_timer(&phba->hb_tmofunc, 1036 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 1037 else 1038 mod_timer(&phba->hb_tmofunc, 1039 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1040 return; 1041 } 1042 spin_unlock_irq(&phba->pport->work_port_lock); 1043 1044 if (phba->elsbuf_cnt && 1045 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1046 spin_lock_irq(&phba->hbalock); 1047 list_splice_init(&phba->elsbuf, &completions); 1048 phba->elsbuf_cnt = 0; 1049 phba->elsbuf_prev_cnt = 0; 1050 spin_unlock_irq(&phba->hbalock); 1051 1052 while (!list_empty(&completions)) { 1053 list_remove_head(&completions, buf_ptr, 1054 struct lpfc_dmabuf, list); 1055 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1056 kfree(buf_ptr); 1057 } 1058 } 1059 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1060 1061 /* If there is no heart beat outstanding, issue a heartbeat command */ 1062 if (phba->cfg_enable_hba_heartbeat) { 1063 if (!phba->hb_outstanding) { 1064 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1065 (list_empty(&psli->mboxq))) { 1066 pmboxq = mempool_alloc(phba->mbox_mem_pool, 1067 GFP_KERNEL); 1068 if (!pmboxq) { 1069 mod_timer(&phba->hb_tmofunc, 1070 jiffies + 1071 HZ * LPFC_HB_MBOX_INTERVAL); 1072 return; 1073 } 1074 1075 lpfc_heart_beat(phba, pmboxq); 1076 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1077 pmboxq->vport = phba->pport; 1078 retval = lpfc_sli_issue_mbox(phba, pmboxq, 1079 MBX_NOWAIT); 1080 1081 if (retval != MBX_BUSY && 1082 retval != MBX_SUCCESS) { 1083 mempool_free(pmboxq, 1084 phba->mbox_mem_pool); 1085 mod_timer(&phba->hb_tmofunc, 1086 jiffies + 1087 HZ * LPFC_HB_MBOX_INTERVAL); 1088 return; 1089 } 1090 phba->skipped_hb = 0; 1091 phba->hb_outstanding = 1; 1092 } else if (time_before_eq(phba->last_completion_time, 1093 phba->skipped_hb)) { 1094 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1095 "2857 Last completion time not " 1096 " updated in %d ms\n", 1097 jiffies_to_msecs(jiffies 1098 - phba->last_completion_time)); 1099 } else 1100 phba->skipped_hb = jiffies; 1101 1102 mod_timer(&phba->hb_tmofunc, 1103 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1104 return; 1105 } else { 1106 /* 1107 * If heart beat timeout called with hb_outstanding set 1108 * we need to give the hb mailbox cmd a chance to 1109 * complete or TMO. 1110 */ 1111 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1112 "0459 Adapter heartbeat still out" 1113 "standing:last compl time was %d ms.\n", 1114 jiffies_to_msecs(jiffies 1115 - phba->last_completion_time)); 1116 mod_timer(&phba->hb_tmofunc, 1117 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1118 } 1119 } 1120 } 1121 1122 /** 1123 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1124 * @phba: pointer to lpfc hba data structure. 1125 * 1126 * This routine is called to bring the HBA offline when HBA hardware error 1127 * other than Port Error 6 has been detected. 1128 **/ 1129 static void 1130 lpfc_offline_eratt(struct lpfc_hba *phba) 1131 { 1132 struct lpfc_sli *psli = &phba->sli; 1133 1134 spin_lock_irq(&phba->hbalock); 1135 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1136 spin_unlock_irq(&phba->hbalock); 1137 lpfc_offline_prep(phba); 1138 1139 lpfc_offline(phba); 1140 lpfc_reset_barrier(phba); 1141 spin_lock_irq(&phba->hbalock); 1142 lpfc_sli_brdreset(phba); 1143 spin_unlock_irq(&phba->hbalock); 1144 lpfc_hba_down_post(phba); 1145 lpfc_sli_brdready(phba, HS_MBRDY); 1146 lpfc_unblock_mgmt_io(phba); 1147 phba->link_state = LPFC_HBA_ERROR; 1148 return; 1149 } 1150 1151 /** 1152 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1153 * @phba: pointer to lpfc hba data structure. 1154 * 1155 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1156 * other than Port Error 6 has been detected. 1157 **/ 1158 static void 1159 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1160 { 1161 lpfc_offline_prep(phba); 1162 lpfc_offline(phba); 1163 lpfc_sli4_brdreset(phba); 1164 lpfc_hba_down_post(phba); 1165 lpfc_sli4_post_status_check(phba); 1166 lpfc_unblock_mgmt_io(phba); 1167 phba->link_state = LPFC_HBA_ERROR; 1168 } 1169 1170 /** 1171 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1172 * @phba: pointer to lpfc hba data structure. 1173 * 1174 * This routine is invoked to handle the deferred HBA hardware error 1175 * conditions. This type of error is indicated by HBA by setting ER1 1176 * and another ER bit in the host status register. The driver will 1177 * wait until the ER1 bit clears before handling the error condition. 1178 **/ 1179 static void 1180 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1181 { 1182 uint32_t old_host_status = phba->work_hs; 1183 struct lpfc_sli_ring *pring; 1184 struct lpfc_sli *psli = &phba->sli; 1185 1186 /* If the pci channel is offline, ignore possible errors, 1187 * since we cannot communicate with the pci card anyway. 1188 */ 1189 if (pci_channel_offline(phba->pcidev)) { 1190 spin_lock_irq(&phba->hbalock); 1191 phba->hba_flag &= ~DEFER_ERATT; 1192 spin_unlock_irq(&phba->hbalock); 1193 return; 1194 } 1195 1196 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1197 "0479 Deferred Adapter Hardware Error " 1198 "Data: x%x x%x x%x\n", 1199 phba->work_hs, 1200 phba->work_status[0], phba->work_status[1]); 1201 1202 spin_lock_irq(&phba->hbalock); 1203 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1204 spin_unlock_irq(&phba->hbalock); 1205 1206 1207 /* 1208 * Firmware stops when it triggred erratt. That could cause the I/Os 1209 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1210 * SCSI layer retry it after re-establishing link. 1211 */ 1212 pring = &psli->ring[psli->fcp_ring]; 1213 lpfc_sli_abort_iocb_ring(phba, pring); 1214 1215 /* 1216 * There was a firmware error. Take the hba offline and then 1217 * attempt to restart it. 1218 */ 1219 lpfc_offline_prep(phba); 1220 lpfc_offline(phba); 1221 1222 /* Wait for the ER1 bit to clear.*/ 1223 while (phba->work_hs & HS_FFER1) { 1224 msleep(100); 1225 phba->work_hs = readl(phba->HSregaddr); 1226 /* If driver is unloading let the worker thread continue */ 1227 if (phba->pport->load_flag & FC_UNLOADING) { 1228 phba->work_hs = 0; 1229 break; 1230 } 1231 } 1232 1233 /* 1234 * This is to ptrotect against a race condition in which 1235 * first write to the host attention register clear the 1236 * host status register. 1237 */ 1238 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1239 phba->work_hs = old_host_status & ~HS_FFER1; 1240 1241 spin_lock_irq(&phba->hbalock); 1242 phba->hba_flag &= ~DEFER_ERATT; 1243 spin_unlock_irq(&phba->hbalock); 1244 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1245 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1246 } 1247 1248 static void 1249 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1250 { 1251 struct lpfc_board_event_header board_event; 1252 struct Scsi_Host *shost; 1253 1254 board_event.event_type = FC_REG_BOARD_EVENT; 1255 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1256 shost = lpfc_shost_from_vport(phba->pport); 1257 fc_host_post_vendor_event(shost, fc_get_event_number(), 1258 sizeof(board_event), 1259 (char *) &board_event, 1260 LPFC_NL_VENDOR_ID); 1261 } 1262 1263 /** 1264 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1265 * @phba: pointer to lpfc hba data structure. 1266 * 1267 * This routine is invoked to handle the following HBA hardware error 1268 * conditions: 1269 * 1 - HBA error attention interrupt 1270 * 2 - DMA ring index out of range 1271 * 3 - Mailbox command came back as unknown 1272 **/ 1273 static void 1274 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1275 { 1276 struct lpfc_vport *vport = phba->pport; 1277 struct lpfc_sli *psli = &phba->sli; 1278 struct lpfc_sli_ring *pring; 1279 uint32_t event_data; 1280 unsigned long temperature; 1281 struct temp_event temp_event_data; 1282 struct Scsi_Host *shost; 1283 1284 /* If the pci channel is offline, ignore possible errors, 1285 * since we cannot communicate with the pci card anyway. 1286 */ 1287 if (pci_channel_offline(phba->pcidev)) { 1288 spin_lock_irq(&phba->hbalock); 1289 phba->hba_flag &= ~DEFER_ERATT; 1290 spin_unlock_irq(&phba->hbalock); 1291 return; 1292 } 1293 1294 /* If resets are disabled then leave the HBA alone and return */ 1295 if (!phba->cfg_enable_hba_reset) 1296 return; 1297 1298 /* Send an internal error event to mgmt application */ 1299 lpfc_board_errevt_to_mgmt(phba); 1300 1301 if (phba->hba_flag & DEFER_ERATT) 1302 lpfc_handle_deferred_eratt(phba); 1303 1304 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1305 if (phba->work_hs & HS_FFER6) 1306 /* Re-establishing Link */ 1307 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1308 "1301 Re-establishing Link " 1309 "Data: x%x x%x x%x\n", 1310 phba->work_hs, phba->work_status[0], 1311 phba->work_status[1]); 1312 if (phba->work_hs & HS_FFER8) 1313 /* Device Zeroization */ 1314 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1315 "2861 Host Authentication device " 1316 "zeroization Data:x%x x%x x%x\n", 1317 phba->work_hs, phba->work_status[0], 1318 phba->work_status[1]); 1319 1320 spin_lock_irq(&phba->hbalock); 1321 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1322 spin_unlock_irq(&phba->hbalock); 1323 1324 /* 1325 * Firmware stops when it triggled erratt with HS_FFER6. 1326 * That could cause the I/Os dropped by the firmware. 1327 * Error iocb (I/O) on txcmplq and let the SCSI layer 1328 * retry it after re-establishing link. 1329 */ 1330 pring = &psli->ring[psli->fcp_ring]; 1331 lpfc_sli_abort_iocb_ring(phba, pring); 1332 1333 /* 1334 * There was a firmware error. Take the hba offline and then 1335 * attempt to restart it. 1336 */ 1337 lpfc_offline_prep(phba); 1338 lpfc_offline(phba); 1339 lpfc_sli_brdrestart(phba); 1340 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1341 lpfc_unblock_mgmt_io(phba); 1342 return; 1343 } 1344 lpfc_unblock_mgmt_io(phba); 1345 } else if (phba->work_hs & HS_CRIT_TEMP) { 1346 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1347 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1348 temp_event_data.event_code = LPFC_CRIT_TEMP; 1349 temp_event_data.data = (uint32_t)temperature; 1350 1351 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1352 "0406 Adapter maximum temperature exceeded " 1353 "(%ld), taking this port offline " 1354 "Data: x%x x%x x%x\n", 1355 temperature, phba->work_hs, 1356 phba->work_status[0], phba->work_status[1]); 1357 1358 shost = lpfc_shost_from_vport(phba->pport); 1359 fc_host_post_vendor_event(shost, fc_get_event_number(), 1360 sizeof(temp_event_data), 1361 (char *) &temp_event_data, 1362 SCSI_NL_VID_TYPE_PCI 1363 | PCI_VENDOR_ID_EMULEX); 1364 1365 spin_lock_irq(&phba->hbalock); 1366 phba->over_temp_state = HBA_OVER_TEMP; 1367 spin_unlock_irq(&phba->hbalock); 1368 lpfc_offline_eratt(phba); 1369 1370 } else { 1371 /* The if clause above forces this code path when the status 1372 * failure is a value other than FFER6. Do not call the offline 1373 * twice. This is the adapter hardware error path. 1374 */ 1375 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1376 "0457 Adapter Hardware Error " 1377 "Data: x%x x%x x%x\n", 1378 phba->work_hs, 1379 phba->work_status[0], phba->work_status[1]); 1380 1381 event_data = FC_REG_DUMP_EVENT; 1382 shost = lpfc_shost_from_vport(vport); 1383 fc_host_post_vendor_event(shost, fc_get_event_number(), 1384 sizeof(event_data), (char *) &event_data, 1385 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1386 1387 lpfc_offline_eratt(phba); 1388 } 1389 return; 1390 } 1391 1392 /** 1393 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1394 * @phba: pointer to lpfc hba data structure. 1395 * 1396 * This routine is invoked to handle the SLI4 HBA hardware error attention 1397 * conditions. 1398 **/ 1399 static void 1400 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1401 { 1402 struct lpfc_vport *vport = phba->pport; 1403 uint32_t event_data; 1404 struct Scsi_Host *shost; 1405 uint32_t if_type; 1406 struct lpfc_register portstat_reg; 1407 1408 /* If the pci channel is offline, ignore possible errors, since 1409 * we cannot communicate with the pci card anyway. 1410 */ 1411 if (pci_channel_offline(phba->pcidev)) 1412 return; 1413 /* If resets are disabled then leave the HBA alone and return */ 1414 if (!phba->cfg_enable_hba_reset) 1415 return; 1416 1417 /* Send an internal error event to mgmt application */ 1418 lpfc_board_errevt_to_mgmt(phba); 1419 1420 /* For now, the actual action for SLI4 device handling is not 1421 * specified yet, just treated it as adaptor hardware failure 1422 */ 1423 event_data = FC_REG_DUMP_EVENT; 1424 shost = lpfc_shost_from_vport(vport); 1425 fc_host_post_vendor_event(shost, fc_get_event_number(), 1426 sizeof(event_data), (char *) &event_data, 1427 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1428 1429 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1430 switch (if_type) { 1431 case LPFC_SLI_INTF_IF_TYPE_0: 1432 lpfc_sli4_offline_eratt(phba); 1433 break; 1434 case LPFC_SLI_INTF_IF_TYPE_2: 1435 portstat_reg.word0 = 1436 readl(phba->sli4_hba.u.if_type2.STATUSregaddr); 1437 1438 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1439 /* TODO: Register for Overtemp async events. */ 1440 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1441 "2889 Port Overtemperature event, " 1442 "taking port\n"); 1443 spin_lock_irq(&phba->hbalock); 1444 phba->over_temp_state = HBA_OVER_TEMP; 1445 spin_unlock_irq(&phba->hbalock); 1446 lpfc_sli4_offline_eratt(phba); 1447 return; 1448 } 1449 if (bf_get(lpfc_sliport_status_rn, &portstat_reg)) { 1450 /* 1451 * TODO: Attempt port recovery via a port reset. 1452 * When fully implemented, the driver should 1453 * attempt to recover the port here and return. 1454 * For now, log an error and take the port offline. 1455 */ 1456 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1457 "2887 Port Error: Attempting " 1458 "Port Recovery\n"); 1459 } 1460 lpfc_sli4_offline_eratt(phba); 1461 break; 1462 case LPFC_SLI_INTF_IF_TYPE_1: 1463 default: 1464 break; 1465 } 1466 } 1467 1468 /** 1469 * lpfc_handle_eratt - Wrapper func for handling hba error attention 1470 * @phba: pointer to lpfc HBA data structure. 1471 * 1472 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 1473 * routine from the API jump table function pointer from the lpfc_hba struct. 1474 * 1475 * Return codes 1476 * 0 - success. 1477 * Any other value - error. 1478 **/ 1479 void 1480 lpfc_handle_eratt(struct lpfc_hba *phba) 1481 { 1482 (*phba->lpfc_handle_eratt)(phba); 1483 } 1484 1485 /** 1486 * lpfc_handle_latt - The HBA link event handler 1487 * @phba: pointer to lpfc hba data structure. 1488 * 1489 * This routine is invoked from the worker thread to handle a HBA host 1490 * attention link event. 1491 **/ 1492 void 1493 lpfc_handle_latt(struct lpfc_hba *phba) 1494 { 1495 struct lpfc_vport *vport = phba->pport; 1496 struct lpfc_sli *psli = &phba->sli; 1497 LPFC_MBOXQ_t *pmb; 1498 volatile uint32_t control; 1499 struct lpfc_dmabuf *mp; 1500 int rc = 0; 1501 1502 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1503 if (!pmb) { 1504 rc = 1; 1505 goto lpfc_handle_latt_err_exit; 1506 } 1507 1508 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1509 if (!mp) { 1510 rc = 2; 1511 goto lpfc_handle_latt_free_pmb; 1512 } 1513 1514 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 1515 if (!mp->virt) { 1516 rc = 3; 1517 goto lpfc_handle_latt_free_mp; 1518 } 1519 1520 /* Cleanup any outstanding ELS commands */ 1521 lpfc_els_flush_all_cmd(phba); 1522 1523 psli->slistat.link_event++; 1524 lpfc_read_topology(phba, pmb, mp); 1525 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 1526 pmb->vport = vport; 1527 /* Block ELS IOCBs until we have processed this mbox command */ 1528 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 1529 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 1530 if (rc == MBX_NOT_FINISHED) { 1531 rc = 4; 1532 goto lpfc_handle_latt_free_mbuf; 1533 } 1534 1535 /* Clear Link Attention in HA REG */ 1536 spin_lock_irq(&phba->hbalock); 1537 writel(HA_LATT, phba->HAregaddr); 1538 readl(phba->HAregaddr); /* flush */ 1539 spin_unlock_irq(&phba->hbalock); 1540 1541 return; 1542 1543 lpfc_handle_latt_free_mbuf: 1544 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1545 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1546 lpfc_handle_latt_free_mp: 1547 kfree(mp); 1548 lpfc_handle_latt_free_pmb: 1549 mempool_free(pmb, phba->mbox_mem_pool); 1550 lpfc_handle_latt_err_exit: 1551 /* Enable Link attention interrupts */ 1552 spin_lock_irq(&phba->hbalock); 1553 psli->sli_flag |= LPFC_PROCESS_LA; 1554 control = readl(phba->HCregaddr); 1555 control |= HC_LAINT_ENA; 1556 writel(control, phba->HCregaddr); 1557 readl(phba->HCregaddr); /* flush */ 1558 1559 /* Clear Link Attention in HA REG */ 1560 writel(HA_LATT, phba->HAregaddr); 1561 readl(phba->HAregaddr); /* flush */ 1562 spin_unlock_irq(&phba->hbalock); 1563 lpfc_linkdown(phba); 1564 phba->link_state = LPFC_HBA_ERROR; 1565 1566 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1567 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 1568 1569 return; 1570 } 1571 1572 /** 1573 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 1574 * @phba: pointer to lpfc hba data structure. 1575 * @vpd: pointer to the vital product data. 1576 * @len: length of the vital product data in bytes. 1577 * 1578 * This routine parses the Vital Product Data (VPD). The VPD is treated as 1579 * an array of characters. In this routine, the ModelName, ProgramType, and 1580 * ModelDesc, etc. fields of the phba data structure will be populated. 1581 * 1582 * Return codes 1583 * 0 - pointer to the VPD passed in is NULL 1584 * 1 - success 1585 **/ 1586 int 1587 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1588 { 1589 uint8_t lenlo, lenhi; 1590 int Length; 1591 int i, j; 1592 int finished = 0; 1593 int index = 0; 1594 1595 if (!vpd) 1596 return 0; 1597 1598 /* Vital Product */ 1599 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1600 "0455 Vital Product Data: x%x x%x x%x x%x\n", 1601 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 1602 (uint32_t) vpd[3]); 1603 while (!finished && (index < (len - 4))) { 1604 switch (vpd[index]) { 1605 case 0x82: 1606 case 0x91: 1607 index += 1; 1608 lenlo = vpd[index]; 1609 index += 1; 1610 lenhi = vpd[index]; 1611 index += 1; 1612 i = ((((unsigned short)lenhi) << 8) + lenlo); 1613 index += i; 1614 break; 1615 case 0x90: 1616 index += 1; 1617 lenlo = vpd[index]; 1618 index += 1; 1619 lenhi = vpd[index]; 1620 index += 1; 1621 Length = ((((unsigned short)lenhi) << 8) + lenlo); 1622 if (Length > len - index) 1623 Length = len - index; 1624 while (Length > 0) { 1625 /* Look for Serial Number */ 1626 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 1627 index += 2; 1628 i = vpd[index]; 1629 index += 1; 1630 j = 0; 1631 Length -= (3+i); 1632 while(i--) { 1633 phba->SerialNumber[j++] = vpd[index++]; 1634 if (j == 31) 1635 break; 1636 } 1637 phba->SerialNumber[j] = 0; 1638 continue; 1639 } 1640 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 1641 phba->vpd_flag |= VPD_MODEL_DESC; 1642 index += 2; 1643 i = vpd[index]; 1644 index += 1; 1645 j = 0; 1646 Length -= (3+i); 1647 while(i--) { 1648 phba->ModelDesc[j++] = vpd[index++]; 1649 if (j == 255) 1650 break; 1651 } 1652 phba->ModelDesc[j] = 0; 1653 continue; 1654 } 1655 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 1656 phba->vpd_flag |= VPD_MODEL_NAME; 1657 index += 2; 1658 i = vpd[index]; 1659 index += 1; 1660 j = 0; 1661 Length -= (3+i); 1662 while(i--) { 1663 phba->ModelName[j++] = vpd[index++]; 1664 if (j == 79) 1665 break; 1666 } 1667 phba->ModelName[j] = 0; 1668 continue; 1669 } 1670 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 1671 phba->vpd_flag |= VPD_PROGRAM_TYPE; 1672 index += 2; 1673 i = vpd[index]; 1674 index += 1; 1675 j = 0; 1676 Length -= (3+i); 1677 while(i--) { 1678 phba->ProgramType[j++] = vpd[index++]; 1679 if (j == 255) 1680 break; 1681 } 1682 phba->ProgramType[j] = 0; 1683 continue; 1684 } 1685 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 1686 phba->vpd_flag |= VPD_PORT; 1687 index += 2; 1688 i = vpd[index]; 1689 index += 1; 1690 j = 0; 1691 Length -= (3+i); 1692 while(i--) { 1693 phba->Port[j++] = vpd[index++]; 1694 if (j == 19) 1695 break; 1696 } 1697 phba->Port[j] = 0; 1698 continue; 1699 } 1700 else { 1701 index += 2; 1702 i = vpd[index]; 1703 index += 1; 1704 index += i; 1705 Length -= (3 + i); 1706 } 1707 } 1708 finished = 0; 1709 break; 1710 case 0x78: 1711 finished = 1; 1712 break; 1713 default: 1714 index ++; 1715 break; 1716 } 1717 } 1718 1719 return(1); 1720 } 1721 1722 /** 1723 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 1724 * @phba: pointer to lpfc hba data structure. 1725 * @mdp: pointer to the data structure to hold the derived model name. 1726 * @descp: pointer to the data structure to hold the derived description. 1727 * 1728 * This routine retrieves HBA's description based on its registered PCI device 1729 * ID. The @descp passed into this function points to an array of 256 chars. It 1730 * shall be returned with the model name, maximum speed, and the host bus type. 1731 * The @mdp passed into this function points to an array of 80 chars. When the 1732 * function returns, the @mdp will be filled with the model name. 1733 **/ 1734 static void 1735 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 1736 { 1737 lpfc_vpd_t *vp; 1738 uint16_t dev_id = phba->pcidev->device; 1739 int max_speed; 1740 int GE = 0; 1741 int oneConnect = 0; /* default is not a oneConnect */ 1742 struct { 1743 char *name; 1744 char *bus; 1745 char *function; 1746 } m = {"<Unknown>", "", ""}; 1747 1748 if (mdp && mdp[0] != '\0' 1749 && descp && descp[0] != '\0') 1750 return; 1751 1752 if (phba->lmt & LMT_10Gb) 1753 max_speed = 10; 1754 else if (phba->lmt & LMT_8Gb) 1755 max_speed = 8; 1756 else if (phba->lmt & LMT_4Gb) 1757 max_speed = 4; 1758 else if (phba->lmt & LMT_2Gb) 1759 max_speed = 2; 1760 else 1761 max_speed = 1; 1762 1763 vp = &phba->vpd; 1764 1765 switch (dev_id) { 1766 case PCI_DEVICE_ID_FIREFLY: 1767 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"}; 1768 break; 1769 case PCI_DEVICE_ID_SUPERFLY: 1770 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 1771 m = (typeof(m)){"LP7000", "PCI", 1772 "Fibre Channel Adapter"}; 1773 else 1774 m = (typeof(m)){"LP7000E", "PCI", 1775 "Fibre Channel Adapter"}; 1776 break; 1777 case PCI_DEVICE_ID_DRAGONFLY: 1778 m = (typeof(m)){"LP8000", "PCI", 1779 "Fibre Channel Adapter"}; 1780 break; 1781 case PCI_DEVICE_ID_CENTAUR: 1782 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 1783 m = (typeof(m)){"LP9002", "PCI", 1784 "Fibre Channel Adapter"}; 1785 else 1786 m = (typeof(m)){"LP9000", "PCI", 1787 "Fibre Channel Adapter"}; 1788 break; 1789 case PCI_DEVICE_ID_RFLY: 1790 m = (typeof(m)){"LP952", "PCI", 1791 "Fibre Channel Adapter"}; 1792 break; 1793 case PCI_DEVICE_ID_PEGASUS: 1794 m = (typeof(m)){"LP9802", "PCI-X", 1795 "Fibre Channel Adapter"}; 1796 break; 1797 case PCI_DEVICE_ID_THOR: 1798 m = (typeof(m)){"LP10000", "PCI-X", 1799 "Fibre Channel Adapter"}; 1800 break; 1801 case PCI_DEVICE_ID_VIPER: 1802 m = (typeof(m)){"LPX1000", "PCI-X", 1803 "Fibre Channel Adapter"}; 1804 break; 1805 case PCI_DEVICE_ID_PFLY: 1806 m = (typeof(m)){"LP982", "PCI-X", 1807 "Fibre Channel Adapter"}; 1808 break; 1809 case PCI_DEVICE_ID_TFLY: 1810 m = (typeof(m)){"LP1050", "PCI-X", 1811 "Fibre Channel Adapter"}; 1812 break; 1813 case PCI_DEVICE_ID_HELIOS: 1814 m = (typeof(m)){"LP11000", "PCI-X2", 1815 "Fibre Channel Adapter"}; 1816 break; 1817 case PCI_DEVICE_ID_HELIOS_SCSP: 1818 m = (typeof(m)){"LP11000-SP", "PCI-X2", 1819 "Fibre Channel Adapter"}; 1820 break; 1821 case PCI_DEVICE_ID_HELIOS_DCSP: 1822 m = (typeof(m)){"LP11002-SP", "PCI-X2", 1823 "Fibre Channel Adapter"}; 1824 break; 1825 case PCI_DEVICE_ID_NEPTUNE: 1826 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"}; 1827 break; 1828 case PCI_DEVICE_ID_NEPTUNE_SCSP: 1829 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"}; 1830 break; 1831 case PCI_DEVICE_ID_NEPTUNE_DCSP: 1832 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"}; 1833 break; 1834 case PCI_DEVICE_ID_BMID: 1835 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 1836 break; 1837 case PCI_DEVICE_ID_BSMB: 1838 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"}; 1839 break; 1840 case PCI_DEVICE_ID_ZEPHYR: 1841 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 1842 break; 1843 case PCI_DEVICE_ID_ZEPHYR_SCSP: 1844 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 1845 break; 1846 case PCI_DEVICE_ID_ZEPHYR_DCSP: 1847 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 1848 GE = 1; 1849 break; 1850 case PCI_DEVICE_ID_ZMID: 1851 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 1852 break; 1853 case PCI_DEVICE_ID_ZSMB: 1854 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 1855 break; 1856 case PCI_DEVICE_ID_LP101: 1857 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"}; 1858 break; 1859 case PCI_DEVICE_ID_LP10000S: 1860 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"}; 1861 break; 1862 case PCI_DEVICE_ID_LP11000S: 1863 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"}; 1864 break; 1865 case PCI_DEVICE_ID_LPE11000S: 1866 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"}; 1867 break; 1868 case PCI_DEVICE_ID_SAT: 1869 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 1870 break; 1871 case PCI_DEVICE_ID_SAT_MID: 1872 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 1873 break; 1874 case PCI_DEVICE_ID_SAT_SMB: 1875 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 1876 break; 1877 case PCI_DEVICE_ID_SAT_DCSP: 1878 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 1879 break; 1880 case PCI_DEVICE_ID_SAT_SCSP: 1881 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 1882 break; 1883 case PCI_DEVICE_ID_SAT_S: 1884 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 1885 break; 1886 case PCI_DEVICE_ID_HORNET: 1887 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"}; 1888 GE = 1; 1889 break; 1890 case PCI_DEVICE_ID_PROTEUS_VF: 1891 m = (typeof(m)){"LPev12000", "PCIe IOV", 1892 "Fibre Channel Adapter"}; 1893 break; 1894 case PCI_DEVICE_ID_PROTEUS_PF: 1895 m = (typeof(m)){"LPev12000", "PCIe IOV", 1896 "Fibre Channel Adapter"}; 1897 break; 1898 case PCI_DEVICE_ID_PROTEUS_S: 1899 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 1900 "Fibre Channel Adapter"}; 1901 break; 1902 case PCI_DEVICE_ID_TIGERSHARK: 1903 oneConnect = 1; 1904 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 1905 break; 1906 case PCI_DEVICE_ID_TOMCAT: 1907 oneConnect = 1; 1908 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 1909 break; 1910 case PCI_DEVICE_ID_FALCON: 1911 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 1912 "EmulexSecure Fibre"}; 1913 break; 1914 case PCI_DEVICE_ID_BALIUS: 1915 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 1916 "Fibre Channel Adapter"}; 1917 break; 1918 case PCI_DEVICE_ID_LANCER_FC: 1919 oneConnect = 1; 1920 m = (typeof(m)){"Undefined", "PCIe", "Fibre Channel Adapter"}; 1921 break; 1922 case PCI_DEVICE_ID_LANCER_FCOE: 1923 oneConnect = 1; 1924 m = (typeof(m)){"Undefined", "PCIe", "FCoE"}; 1925 break; 1926 default: 1927 m = (typeof(m)){"Unknown", "", ""}; 1928 break; 1929 } 1930 1931 if (mdp && mdp[0] == '\0') 1932 snprintf(mdp, 79,"%s", m.name); 1933 /* oneConnect hba requires special processing, they are all initiators 1934 * and we put the port number on the end 1935 */ 1936 if (descp && descp[0] == '\0') { 1937 if (oneConnect) 1938 snprintf(descp, 255, 1939 "Emulex OneConnect %s, %s Initiator, Port %s", 1940 m.name, m.function, 1941 phba->Port); 1942 else 1943 snprintf(descp, 255, 1944 "Emulex %s %d%s %s %s", 1945 m.name, max_speed, (GE) ? "GE" : "Gb", 1946 m.bus, m.function); 1947 } 1948 } 1949 1950 /** 1951 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 1952 * @phba: pointer to lpfc hba data structure. 1953 * @pring: pointer to a IOCB ring. 1954 * @cnt: the number of IOCBs to be posted to the IOCB ring. 1955 * 1956 * This routine posts a given number of IOCBs with the associated DMA buffer 1957 * descriptors specified by the cnt argument to the given IOCB ring. 1958 * 1959 * Return codes 1960 * The number of IOCBs NOT able to be posted to the IOCB ring. 1961 **/ 1962 int 1963 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 1964 { 1965 IOCB_t *icmd; 1966 struct lpfc_iocbq *iocb; 1967 struct lpfc_dmabuf *mp1, *mp2; 1968 1969 cnt += pring->missbufcnt; 1970 1971 /* While there are buffers to post */ 1972 while (cnt > 0) { 1973 /* Allocate buffer for command iocb */ 1974 iocb = lpfc_sli_get_iocbq(phba); 1975 if (iocb == NULL) { 1976 pring->missbufcnt = cnt; 1977 return cnt; 1978 } 1979 icmd = &iocb->iocb; 1980 1981 /* 2 buffers can be posted per command */ 1982 /* Allocate buffer to post */ 1983 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1984 if (mp1) 1985 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 1986 if (!mp1 || !mp1->virt) { 1987 kfree(mp1); 1988 lpfc_sli_release_iocbq(phba, iocb); 1989 pring->missbufcnt = cnt; 1990 return cnt; 1991 } 1992 1993 INIT_LIST_HEAD(&mp1->list); 1994 /* Allocate buffer to post */ 1995 if (cnt > 1) { 1996 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1997 if (mp2) 1998 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 1999 &mp2->phys); 2000 if (!mp2 || !mp2->virt) { 2001 kfree(mp2); 2002 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2003 kfree(mp1); 2004 lpfc_sli_release_iocbq(phba, iocb); 2005 pring->missbufcnt = cnt; 2006 return cnt; 2007 } 2008 2009 INIT_LIST_HEAD(&mp2->list); 2010 } else { 2011 mp2 = NULL; 2012 } 2013 2014 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2015 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2016 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2017 icmd->ulpBdeCount = 1; 2018 cnt--; 2019 if (mp2) { 2020 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2021 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2022 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2023 cnt--; 2024 icmd->ulpBdeCount = 2; 2025 } 2026 2027 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2028 icmd->ulpLe = 1; 2029 2030 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2031 IOCB_ERROR) { 2032 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2033 kfree(mp1); 2034 cnt++; 2035 if (mp2) { 2036 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2037 kfree(mp2); 2038 cnt++; 2039 } 2040 lpfc_sli_release_iocbq(phba, iocb); 2041 pring->missbufcnt = cnt; 2042 return cnt; 2043 } 2044 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2045 if (mp2) 2046 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2047 } 2048 pring->missbufcnt = 0; 2049 return 0; 2050 } 2051 2052 /** 2053 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2054 * @phba: pointer to lpfc hba data structure. 2055 * 2056 * This routine posts initial receive IOCB buffers to the ELS ring. The 2057 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2058 * set to 64 IOCBs. 2059 * 2060 * Return codes 2061 * 0 - success (currently always success) 2062 **/ 2063 static int 2064 lpfc_post_rcv_buf(struct lpfc_hba *phba) 2065 { 2066 struct lpfc_sli *psli = &phba->sli; 2067 2068 /* Ring 0, ELS / CT buffers */ 2069 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2070 /* Ring 2 - FCP no buffers needed */ 2071 2072 return 0; 2073 } 2074 2075 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2076 2077 /** 2078 * lpfc_sha_init - Set up initial array of hash table entries 2079 * @HashResultPointer: pointer to an array as hash table. 2080 * 2081 * This routine sets up the initial values to the array of hash table entries 2082 * for the LC HBAs. 2083 **/ 2084 static void 2085 lpfc_sha_init(uint32_t * HashResultPointer) 2086 { 2087 HashResultPointer[0] = 0x67452301; 2088 HashResultPointer[1] = 0xEFCDAB89; 2089 HashResultPointer[2] = 0x98BADCFE; 2090 HashResultPointer[3] = 0x10325476; 2091 HashResultPointer[4] = 0xC3D2E1F0; 2092 } 2093 2094 /** 2095 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2096 * @HashResultPointer: pointer to an initial/result hash table. 2097 * @HashWorkingPointer: pointer to an working hash table. 2098 * 2099 * This routine iterates an initial hash table pointed by @HashResultPointer 2100 * with the values from the working hash table pointeed by @HashWorkingPointer. 2101 * The results are putting back to the initial hash table, returned through 2102 * the @HashResultPointer as the result hash table. 2103 **/ 2104 static void 2105 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2106 { 2107 int t; 2108 uint32_t TEMP; 2109 uint32_t A, B, C, D, E; 2110 t = 16; 2111 do { 2112 HashWorkingPointer[t] = 2113 S(1, 2114 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2115 8] ^ 2116 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2117 } while (++t <= 79); 2118 t = 0; 2119 A = HashResultPointer[0]; 2120 B = HashResultPointer[1]; 2121 C = HashResultPointer[2]; 2122 D = HashResultPointer[3]; 2123 E = HashResultPointer[4]; 2124 2125 do { 2126 if (t < 20) { 2127 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2128 } else if (t < 40) { 2129 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2130 } else if (t < 60) { 2131 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2132 } else { 2133 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2134 } 2135 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2136 E = D; 2137 D = C; 2138 C = S(30, B); 2139 B = A; 2140 A = TEMP; 2141 } while (++t <= 79); 2142 2143 HashResultPointer[0] += A; 2144 HashResultPointer[1] += B; 2145 HashResultPointer[2] += C; 2146 HashResultPointer[3] += D; 2147 HashResultPointer[4] += E; 2148 2149 } 2150 2151 /** 2152 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2153 * @RandomChallenge: pointer to the entry of host challenge random number array. 2154 * @HashWorking: pointer to the entry of the working hash array. 2155 * 2156 * This routine calculates the working hash array referred by @HashWorking 2157 * from the challenge random numbers associated with the host, referred by 2158 * @RandomChallenge. The result is put into the entry of the working hash 2159 * array and returned by reference through @HashWorking. 2160 **/ 2161 static void 2162 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2163 { 2164 *HashWorking = (*RandomChallenge ^ *HashWorking); 2165 } 2166 2167 /** 2168 * lpfc_hba_init - Perform special handling for LC HBA initialization 2169 * @phba: pointer to lpfc hba data structure. 2170 * @hbainit: pointer to an array of unsigned 32-bit integers. 2171 * 2172 * This routine performs the special handling for LC HBA initialization. 2173 **/ 2174 void 2175 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2176 { 2177 int t; 2178 uint32_t *HashWorking; 2179 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2180 2181 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2182 if (!HashWorking) 2183 return; 2184 2185 HashWorking[0] = HashWorking[78] = *pwwnn++; 2186 HashWorking[1] = HashWorking[79] = *pwwnn; 2187 2188 for (t = 0; t < 7; t++) 2189 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2190 2191 lpfc_sha_init(hbainit); 2192 lpfc_sha_iterate(hbainit, HashWorking); 2193 kfree(HashWorking); 2194 } 2195 2196 /** 2197 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2198 * @vport: pointer to a virtual N_Port data structure. 2199 * 2200 * This routine performs the necessary cleanups before deleting the @vport. 2201 * It invokes the discovery state machine to perform necessary state 2202 * transitions and to release the ndlps associated with the @vport. Note, 2203 * the physical port is treated as @vport 0. 2204 **/ 2205 void 2206 lpfc_cleanup(struct lpfc_vport *vport) 2207 { 2208 struct lpfc_hba *phba = vport->phba; 2209 struct lpfc_nodelist *ndlp, *next_ndlp; 2210 int i = 0; 2211 2212 if (phba->link_state > LPFC_LINK_DOWN) 2213 lpfc_port_link_failure(vport); 2214 2215 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2216 if (!NLP_CHK_NODE_ACT(ndlp)) { 2217 ndlp = lpfc_enable_node(vport, ndlp, 2218 NLP_STE_UNUSED_NODE); 2219 if (!ndlp) 2220 continue; 2221 spin_lock_irq(&phba->ndlp_lock); 2222 NLP_SET_FREE_REQ(ndlp); 2223 spin_unlock_irq(&phba->ndlp_lock); 2224 /* Trigger the release of the ndlp memory */ 2225 lpfc_nlp_put(ndlp); 2226 continue; 2227 } 2228 spin_lock_irq(&phba->ndlp_lock); 2229 if (NLP_CHK_FREE_REQ(ndlp)) { 2230 /* The ndlp should not be in memory free mode already */ 2231 spin_unlock_irq(&phba->ndlp_lock); 2232 continue; 2233 } else 2234 /* Indicate request for freeing ndlp memory */ 2235 NLP_SET_FREE_REQ(ndlp); 2236 spin_unlock_irq(&phba->ndlp_lock); 2237 2238 if (vport->port_type != LPFC_PHYSICAL_PORT && 2239 ndlp->nlp_DID == Fabric_DID) { 2240 /* Just free up ndlp with Fabric_DID for vports */ 2241 lpfc_nlp_put(ndlp); 2242 continue; 2243 } 2244 2245 if (ndlp->nlp_type & NLP_FABRIC) 2246 lpfc_disc_state_machine(vport, ndlp, NULL, 2247 NLP_EVT_DEVICE_RECOVERY); 2248 2249 lpfc_disc_state_machine(vport, ndlp, NULL, 2250 NLP_EVT_DEVICE_RM); 2251 2252 } 2253 2254 /* At this point, ALL ndlp's should be gone 2255 * because of the previous NLP_EVT_DEVICE_RM. 2256 * Lets wait for this to happen, if needed. 2257 */ 2258 while (!list_empty(&vport->fc_nodes)) { 2259 if (i++ > 3000) { 2260 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2261 "0233 Nodelist not empty\n"); 2262 list_for_each_entry_safe(ndlp, next_ndlp, 2263 &vport->fc_nodes, nlp_listp) { 2264 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2265 LOG_NODE, 2266 "0282 did:x%x ndlp:x%p " 2267 "usgmap:x%x refcnt:%d\n", 2268 ndlp->nlp_DID, (void *)ndlp, 2269 ndlp->nlp_usg_map, 2270 atomic_read( 2271 &ndlp->kref.refcount)); 2272 } 2273 break; 2274 } 2275 2276 /* Wait for any activity on ndlps to settle */ 2277 msleep(10); 2278 } 2279 lpfc_cleanup_vports_rrqs(vport, NULL); 2280 } 2281 2282 /** 2283 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2284 * @vport: pointer to a virtual N_Port data structure. 2285 * 2286 * This routine stops all the timers associated with a @vport. This function 2287 * is invoked before disabling or deleting a @vport. Note that the physical 2288 * port is treated as @vport 0. 2289 **/ 2290 void 2291 lpfc_stop_vport_timers(struct lpfc_vport *vport) 2292 { 2293 del_timer_sync(&vport->els_tmofunc); 2294 del_timer_sync(&vport->fc_fdmitmo); 2295 del_timer_sync(&vport->delayed_disc_tmo); 2296 lpfc_can_disctmo(vport); 2297 return; 2298 } 2299 2300 /** 2301 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2302 * @phba: pointer to lpfc hba data structure. 2303 * 2304 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2305 * caller of this routine should already hold the host lock. 2306 **/ 2307 void 2308 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2309 { 2310 /* Clear pending FCF rediscovery wait flag */ 2311 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2312 2313 /* Now, try to stop the timer */ 2314 del_timer(&phba->fcf.redisc_wait); 2315 } 2316 2317 /** 2318 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2319 * @phba: pointer to lpfc hba data structure. 2320 * 2321 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2322 * checks whether the FCF rediscovery wait timer is pending with the host 2323 * lock held before proceeding with disabling the timer and clearing the 2324 * wait timer pendig flag. 2325 **/ 2326 void 2327 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2328 { 2329 spin_lock_irq(&phba->hbalock); 2330 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2331 /* FCF rediscovery timer already fired or stopped */ 2332 spin_unlock_irq(&phba->hbalock); 2333 return; 2334 } 2335 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2336 /* Clear failover in progress flags */ 2337 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 2338 spin_unlock_irq(&phba->hbalock); 2339 } 2340 2341 /** 2342 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2343 * @phba: pointer to lpfc hba data structure. 2344 * 2345 * This routine stops all the timers associated with a HBA. This function is 2346 * invoked before either putting a HBA offline or unloading the driver. 2347 **/ 2348 void 2349 lpfc_stop_hba_timers(struct lpfc_hba *phba) 2350 { 2351 lpfc_stop_vport_timers(phba->pport); 2352 del_timer_sync(&phba->sli.mbox_tmo); 2353 del_timer_sync(&phba->fabric_block_timer); 2354 del_timer_sync(&phba->eratt_poll); 2355 del_timer_sync(&phba->hb_tmofunc); 2356 if (phba->sli_rev == LPFC_SLI_REV4) { 2357 del_timer_sync(&phba->rrq_tmr); 2358 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 2359 } 2360 phba->hb_outstanding = 0; 2361 2362 switch (phba->pci_dev_grp) { 2363 case LPFC_PCI_DEV_LP: 2364 /* Stop any LightPulse device specific driver timers */ 2365 del_timer_sync(&phba->fcp_poll_timer); 2366 break; 2367 case LPFC_PCI_DEV_OC: 2368 /* Stop any OneConnect device sepcific driver timers */ 2369 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2370 break; 2371 default: 2372 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2373 "0297 Invalid device group (x%x)\n", 2374 phba->pci_dev_grp); 2375 break; 2376 } 2377 return; 2378 } 2379 2380 /** 2381 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2382 * @phba: pointer to lpfc hba data structure. 2383 * 2384 * This routine marks a HBA's management interface as blocked. Once the HBA's 2385 * management interface is marked as blocked, all the user space access to 2386 * the HBA, whether they are from sysfs interface or libdfc interface will 2387 * all be blocked. The HBA is set to block the management interface when the 2388 * driver prepares the HBA interface for online or offline. 2389 **/ 2390 static void 2391 lpfc_block_mgmt_io(struct lpfc_hba * phba) 2392 { 2393 unsigned long iflag; 2394 uint8_t actcmd = MBX_HEARTBEAT; 2395 unsigned long timeout; 2396 2397 2398 spin_lock_irqsave(&phba->hbalock, iflag); 2399 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2400 if (phba->sli.mbox_active) 2401 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 2402 spin_unlock_irqrestore(&phba->hbalock, iflag); 2403 /* Determine how long we might wait for the active mailbox 2404 * command to be gracefully completed by firmware. 2405 */ 2406 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) + 2407 jiffies; 2408 /* Wait for the outstnading mailbox command to complete */ 2409 while (phba->sli.mbox_active) { 2410 /* Check active mailbox complete status every 2ms */ 2411 msleep(2); 2412 if (time_after(jiffies, timeout)) { 2413 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2414 "2813 Mgmt IO is Blocked %x " 2415 "- mbox cmd %x still active\n", 2416 phba->sli.sli_flag, actcmd); 2417 break; 2418 } 2419 } 2420 } 2421 2422 /** 2423 * lpfc_online - Initialize and bring a HBA online 2424 * @phba: pointer to lpfc hba data structure. 2425 * 2426 * This routine initializes the HBA and brings a HBA online. During this 2427 * process, the management interface is blocked to prevent user space access 2428 * to the HBA interfering with the driver initialization. 2429 * 2430 * Return codes 2431 * 0 - successful 2432 * 1 - failed 2433 **/ 2434 int 2435 lpfc_online(struct lpfc_hba *phba) 2436 { 2437 struct lpfc_vport *vport; 2438 struct lpfc_vport **vports; 2439 int i; 2440 2441 if (!phba) 2442 return 0; 2443 vport = phba->pport; 2444 2445 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 2446 return 0; 2447 2448 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2449 "0458 Bring Adapter online\n"); 2450 2451 lpfc_block_mgmt_io(phba); 2452 2453 if (!lpfc_sli_queue_setup(phba)) { 2454 lpfc_unblock_mgmt_io(phba); 2455 return 1; 2456 } 2457 2458 if (phba->sli_rev == LPFC_SLI_REV4) { 2459 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 2460 lpfc_unblock_mgmt_io(phba); 2461 return 1; 2462 } 2463 } else { 2464 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 2465 lpfc_unblock_mgmt_io(phba); 2466 return 1; 2467 } 2468 } 2469 2470 vports = lpfc_create_vport_work_array(phba); 2471 if (vports != NULL) 2472 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2473 struct Scsi_Host *shost; 2474 shost = lpfc_shost_from_vport(vports[i]); 2475 spin_lock_irq(shost->host_lock); 2476 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 2477 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 2478 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2479 if (phba->sli_rev == LPFC_SLI_REV4) 2480 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 2481 spin_unlock_irq(shost->host_lock); 2482 } 2483 lpfc_destroy_vport_work_array(phba, vports); 2484 2485 lpfc_unblock_mgmt_io(phba); 2486 return 0; 2487 } 2488 2489 /** 2490 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 2491 * @phba: pointer to lpfc hba data structure. 2492 * 2493 * This routine marks a HBA's management interface as not blocked. Once the 2494 * HBA's management interface is marked as not blocked, all the user space 2495 * access to the HBA, whether they are from sysfs interface or libdfc 2496 * interface will be allowed. The HBA is set to block the management interface 2497 * when the driver prepares the HBA interface for online or offline and then 2498 * set to unblock the management interface afterwards. 2499 **/ 2500 void 2501 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 2502 { 2503 unsigned long iflag; 2504 2505 spin_lock_irqsave(&phba->hbalock, iflag); 2506 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 2507 spin_unlock_irqrestore(&phba->hbalock, iflag); 2508 } 2509 2510 /** 2511 * lpfc_offline_prep - Prepare a HBA to be brought offline 2512 * @phba: pointer to lpfc hba data structure. 2513 * 2514 * This routine is invoked to prepare a HBA to be brought offline. It performs 2515 * unregistration login to all the nodes on all vports and flushes the mailbox 2516 * queue to make it ready to be brought offline. 2517 **/ 2518 void 2519 lpfc_offline_prep(struct lpfc_hba * phba) 2520 { 2521 struct lpfc_vport *vport = phba->pport; 2522 struct lpfc_nodelist *ndlp, *next_ndlp; 2523 struct lpfc_vport **vports; 2524 struct Scsi_Host *shost; 2525 int i; 2526 2527 if (vport->fc_flag & FC_OFFLINE_MODE) 2528 return; 2529 2530 lpfc_block_mgmt_io(phba); 2531 2532 lpfc_linkdown(phba); 2533 2534 /* Issue an unreg_login to all nodes on all vports */ 2535 vports = lpfc_create_vport_work_array(phba); 2536 if (vports != NULL) { 2537 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2538 if (vports[i]->load_flag & FC_UNLOADING) 2539 continue; 2540 shost = lpfc_shost_from_vport(vports[i]); 2541 spin_lock_irq(shost->host_lock); 2542 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 2543 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2544 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 2545 spin_unlock_irq(shost->host_lock); 2546 2547 shost = lpfc_shost_from_vport(vports[i]); 2548 list_for_each_entry_safe(ndlp, next_ndlp, 2549 &vports[i]->fc_nodes, 2550 nlp_listp) { 2551 if (!NLP_CHK_NODE_ACT(ndlp)) 2552 continue; 2553 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 2554 continue; 2555 if (ndlp->nlp_type & NLP_FABRIC) { 2556 lpfc_disc_state_machine(vports[i], ndlp, 2557 NULL, NLP_EVT_DEVICE_RECOVERY); 2558 lpfc_disc_state_machine(vports[i], ndlp, 2559 NULL, NLP_EVT_DEVICE_RM); 2560 } 2561 spin_lock_irq(shost->host_lock); 2562 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2563 spin_unlock_irq(shost->host_lock); 2564 lpfc_unreg_rpi(vports[i], ndlp); 2565 } 2566 } 2567 } 2568 lpfc_destroy_vport_work_array(phba, vports); 2569 2570 lpfc_sli_mbox_sys_shutdown(phba); 2571 } 2572 2573 /** 2574 * lpfc_offline - Bring a HBA offline 2575 * @phba: pointer to lpfc hba data structure. 2576 * 2577 * This routine actually brings a HBA offline. It stops all the timers 2578 * associated with the HBA, brings down the SLI layer, and eventually 2579 * marks the HBA as in offline state for the upper layer protocol. 2580 **/ 2581 void 2582 lpfc_offline(struct lpfc_hba *phba) 2583 { 2584 struct Scsi_Host *shost; 2585 struct lpfc_vport **vports; 2586 int i; 2587 2588 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2589 return; 2590 2591 /* stop port and all timers associated with this hba */ 2592 lpfc_stop_port(phba); 2593 vports = lpfc_create_vport_work_array(phba); 2594 if (vports != NULL) 2595 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 2596 lpfc_stop_vport_timers(vports[i]); 2597 lpfc_destroy_vport_work_array(phba, vports); 2598 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2599 "0460 Bring Adapter offline\n"); 2600 /* Bring down the SLI Layer and cleanup. The HBA is offline 2601 now. */ 2602 lpfc_sli_hba_down(phba); 2603 spin_lock_irq(&phba->hbalock); 2604 phba->work_ha = 0; 2605 spin_unlock_irq(&phba->hbalock); 2606 vports = lpfc_create_vport_work_array(phba); 2607 if (vports != NULL) 2608 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2609 shost = lpfc_shost_from_vport(vports[i]); 2610 spin_lock_irq(shost->host_lock); 2611 vports[i]->work_port_events = 0; 2612 vports[i]->fc_flag |= FC_OFFLINE_MODE; 2613 spin_unlock_irq(shost->host_lock); 2614 } 2615 lpfc_destroy_vport_work_array(phba, vports); 2616 } 2617 2618 /** 2619 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 2620 * @phba: pointer to lpfc hba data structure. 2621 * 2622 * This routine is to free all the SCSI buffers and IOCBs from the driver 2623 * list back to kernel. It is called from lpfc_pci_remove_one to free 2624 * the internal resources before the device is removed from the system. 2625 * 2626 * Return codes 2627 * 0 - successful (for now, it always returns 0) 2628 **/ 2629 static int 2630 lpfc_scsi_free(struct lpfc_hba *phba) 2631 { 2632 struct lpfc_scsi_buf *sb, *sb_next; 2633 struct lpfc_iocbq *io, *io_next; 2634 2635 spin_lock_irq(&phba->hbalock); 2636 /* Release all the lpfc_scsi_bufs maintained by this host. */ 2637 spin_lock(&phba->scsi_buf_list_lock); 2638 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 2639 list_del(&sb->list); 2640 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 2641 sb->dma_handle); 2642 kfree(sb); 2643 phba->total_scsi_bufs--; 2644 } 2645 spin_unlock(&phba->scsi_buf_list_lock); 2646 2647 /* Release all the lpfc_iocbq entries maintained by this host. */ 2648 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 2649 list_del(&io->list); 2650 kfree(io); 2651 phba->total_iocbq_bufs--; 2652 } 2653 spin_unlock_irq(&phba->hbalock); 2654 return 0; 2655 } 2656 2657 /** 2658 * lpfc_create_port - Create an FC port 2659 * @phba: pointer to lpfc hba data structure. 2660 * @instance: a unique integer ID to this FC port. 2661 * @dev: pointer to the device data structure. 2662 * 2663 * This routine creates a FC port for the upper layer protocol. The FC port 2664 * can be created on top of either a physical port or a virtual port provided 2665 * by the HBA. This routine also allocates a SCSI host data structure (shost) 2666 * and associates the FC port created before adding the shost into the SCSI 2667 * layer. 2668 * 2669 * Return codes 2670 * @vport - pointer to the virtual N_Port data structure. 2671 * NULL - port create failed. 2672 **/ 2673 struct lpfc_vport * 2674 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 2675 { 2676 struct lpfc_vport *vport; 2677 struct Scsi_Host *shost; 2678 int error = 0; 2679 2680 if (dev != &phba->pcidev->dev) 2681 shost = scsi_host_alloc(&lpfc_vport_template, 2682 sizeof(struct lpfc_vport)); 2683 else 2684 shost = scsi_host_alloc(&lpfc_template, 2685 sizeof(struct lpfc_vport)); 2686 if (!shost) 2687 goto out; 2688 2689 vport = (struct lpfc_vport *) shost->hostdata; 2690 vport->phba = phba; 2691 vport->load_flag |= FC_LOADING; 2692 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2693 vport->fc_rscn_flush = 0; 2694 2695 lpfc_get_vport_cfgparam(vport); 2696 shost->unique_id = instance; 2697 shost->max_id = LPFC_MAX_TARGET; 2698 shost->max_lun = vport->cfg_max_luns; 2699 shost->this_id = -1; 2700 shost->max_cmd_len = 16; 2701 if (phba->sli_rev == LPFC_SLI_REV4) { 2702 shost->dma_boundary = 2703 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 2704 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 2705 } 2706 2707 /* 2708 * Set initial can_queue value since 0 is no longer supported and 2709 * scsi_add_host will fail. This will be adjusted later based on the 2710 * max xri value determined in hba setup. 2711 */ 2712 shost->can_queue = phba->cfg_hba_queue_depth - 10; 2713 if (dev != &phba->pcidev->dev) { 2714 shost->transportt = lpfc_vport_transport_template; 2715 vport->port_type = LPFC_NPIV_PORT; 2716 } else { 2717 shost->transportt = lpfc_transport_template; 2718 vport->port_type = LPFC_PHYSICAL_PORT; 2719 } 2720 2721 /* Initialize all internally managed lists. */ 2722 INIT_LIST_HEAD(&vport->fc_nodes); 2723 INIT_LIST_HEAD(&vport->rcv_buffer_list); 2724 spin_lock_init(&vport->work_port_lock); 2725 2726 init_timer(&vport->fc_disctmo); 2727 vport->fc_disctmo.function = lpfc_disc_timeout; 2728 vport->fc_disctmo.data = (unsigned long)vport; 2729 2730 init_timer(&vport->fc_fdmitmo); 2731 vport->fc_fdmitmo.function = lpfc_fdmi_tmo; 2732 vport->fc_fdmitmo.data = (unsigned long)vport; 2733 2734 init_timer(&vport->els_tmofunc); 2735 vport->els_tmofunc.function = lpfc_els_timeout; 2736 vport->els_tmofunc.data = (unsigned long)vport; 2737 2738 init_timer(&vport->delayed_disc_tmo); 2739 vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo; 2740 vport->delayed_disc_tmo.data = (unsigned long)vport; 2741 2742 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 2743 if (error) 2744 goto out_put_shost; 2745 2746 spin_lock_irq(&phba->hbalock); 2747 list_add_tail(&vport->listentry, &phba->port_list); 2748 spin_unlock_irq(&phba->hbalock); 2749 return vport; 2750 2751 out_put_shost: 2752 scsi_host_put(shost); 2753 out: 2754 return NULL; 2755 } 2756 2757 /** 2758 * destroy_port - destroy an FC port 2759 * @vport: pointer to an lpfc virtual N_Port data structure. 2760 * 2761 * This routine destroys a FC port from the upper layer protocol. All the 2762 * resources associated with the port are released. 2763 **/ 2764 void 2765 destroy_port(struct lpfc_vport *vport) 2766 { 2767 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2768 struct lpfc_hba *phba = vport->phba; 2769 2770 lpfc_debugfs_terminate(vport); 2771 fc_remove_host(shost); 2772 scsi_remove_host(shost); 2773 2774 spin_lock_irq(&phba->hbalock); 2775 list_del_init(&vport->listentry); 2776 spin_unlock_irq(&phba->hbalock); 2777 2778 lpfc_cleanup(vport); 2779 return; 2780 } 2781 2782 /** 2783 * lpfc_get_instance - Get a unique integer ID 2784 * 2785 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 2786 * uses the kernel idr facility to perform the task. 2787 * 2788 * Return codes: 2789 * instance - a unique integer ID allocated as the new instance. 2790 * -1 - lpfc get instance failed. 2791 **/ 2792 int 2793 lpfc_get_instance(void) 2794 { 2795 int instance = 0; 2796 2797 /* Assign an unused number */ 2798 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 2799 return -1; 2800 if (idr_get_new(&lpfc_hba_index, NULL, &instance)) 2801 return -1; 2802 return instance; 2803 } 2804 2805 /** 2806 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 2807 * @shost: pointer to SCSI host data structure. 2808 * @time: elapsed time of the scan in jiffies. 2809 * 2810 * This routine is called by the SCSI layer with a SCSI host to determine 2811 * whether the scan host is finished. 2812 * 2813 * Note: there is no scan_start function as adapter initialization will have 2814 * asynchronously kicked off the link initialization. 2815 * 2816 * Return codes 2817 * 0 - SCSI host scan is not over yet. 2818 * 1 - SCSI host scan is over. 2819 **/ 2820 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 2821 { 2822 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2823 struct lpfc_hba *phba = vport->phba; 2824 int stat = 0; 2825 2826 spin_lock_irq(shost->host_lock); 2827 2828 if (vport->load_flag & FC_UNLOADING) { 2829 stat = 1; 2830 goto finished; 2831 } 2832 if (time >= 30 * HZ) { 2833 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2834 "0461 Scanning longer than 30 " 2835 "seconds. Continuing initialization\n"); 2836 stat = 1; 2837 goto finished; 2838 } 2839 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { 2840 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2841 "0465 Link down longer than 15 " 2842 "seconds. Continuing initialization\n"); 2843 stat = 1; 2844 goto finished; 2845 } 2846 2847 if (vport->port_state != LPFC_VPORT_READY) 2848 goto finished; 2849 if (vport->num_disc_nodes || vport->fc_prli_sent) 2850 goto finished; 2851 if (vport->fc_map_cnt == 0 && time < 2 * HZ) 2852 goto finished; 2853 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 2854 goto finished; 2855 2856 stat = 1; 2857 2858 finished: 2859 spin_unlock_irq(shost->host_lock); 2860 return stat; 2861 } 2862 2863 /** 2864 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 2865 * @shost: pointer to SCSI host data structure. 2866 * 2867 * This routine initializes a given SCSI host attributes on a FC port. The 2868 * SCSI host can be either on top of a physical port or a virtual port. 2869 **/ 2870 void lpfc_host_attrib_init(struct Scsi_Host *shost) 2871 { 2872 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2873 struct lpfc_hba *phba = vport->phba; 2874 /* 2875 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 2876 */ 2877 2878 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 2879 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 2880 fc_host_supported_classes(shost) = FC_COS_CLASS3; 2881 2882 memset(fc_host_supported_fc4s(shost), 0, 2883 sizeof(fc_host_supported_fc4s(shost))); 2884 fc_host_supported_fc4s(shost)[2] = 1; 2885 fc_host_supported_fc4s(shost)[7] = 1; 2886 2887 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 2888 sizeof fc_host_symbolic_name(shost)); 2889 2890 fc_host_supported_speeds(shost) = 0; 2891 if (phba->lmt & LMT_10Gb) 2892 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 2893 if (phba->lmt & LMT_8Gb) 2894 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 2895 if (phba->lmt & LMT_4Gb) 2896 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 2897 if (phba->lmt & LMT_2Gb) 2898 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 2899 if (phba->lmt & LMT_1Gb) 2900 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 2901 2902 fc_host_maxframe_size(shost) = 2903 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 2904 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 2905 2906 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 2907 2908 /* This value is also unchanging */ 2909 memset(fc_host_active_fc4s(shost), 0, 2910 sizeof(fc_host_active_fc4s(shost))); 2911 fc_host_active_fc4s(shost)[2] = 1; 2912 fc_host_active_fc4s(shost)[7] = 1; 2913 2914 fc_host_max_npiv_vports(shost) = phba->max_vpi; 2915 spin_lock_irq(shost->host_lock); 2916 vport->load_flag &= ~FC_LOADING; 2917 spin_unlock_irq(shost->host_lock); 2918 } 2919 2920 /** 2921 * lpfc_stop_port_s3 - Stop SLI3 device port 2922 * @phba: pointer to lpfc hba data structure. 2923 * 2924 * This routine is invoked to stop an SLI3 device port, it stops the device 2925 * from generating interrupts and stops the device driver's timers for the 2926 * device. 2927 **/ 2928 static void 2929 lpfc_stop_port_s3(struct lpfc_hba *phba) 2930 { 2931 /* Clear all interrupt enable conditions */ 2932 writel(0, phba->HCregaddr); 2933 readl(phba->HCregaddr); /* flush */ 2934 /* Clear all pending interrupts */ 2935 writel(0xffffffff, phba->HAregaddr); 2936 readl(phba->HAregaddr); /* flush */ 2937 2938 /* Reset some HBA SLI setup states */ 2939 lpfc_stop_hba_timers(phba); 2940 phba->pport->work_port_events = 0; 2941 } 2942 2943 /** 2944 * lpfc_stop_port_s4 - Stop SLI4 device port 2945 * @phba: pointer to lpfc hba data structure. 2946 * 2947 * This routine is invoked to stop an SLI4 device port, it stops the device 2948 * from generating interrupts and stops the device driver's timers for the 2949 * device. 2950 **/ 2951 static void 2952 lpfc_stop_port_s4(struct lpfc_hba *phba) 2953 { 2954 /* Reset some HBA SLI4 setup states */ 2955 lpfc_stop_hba_timers(phba); 2956 phba->pport->work_port_events = 0; 2957 phba->sli4_hba.intr_enable = 0; 2958 } 2959 2960 /** 2961 * lpfc_stop_port - Wrapper function for stopping hba port 2962 * @phba: Pointer to HBA context object. 2963 * 2964 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 2965 * the API jump table function pointer from the lpfc_hba struct. 2966 **/ 2967 void 2968 lpfc_stop_port(struct lpfc_hba *phba) 2969 { 2970 phba->lpfc_stop_port(phba); 2971 } 2972 2973 /** 2974 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 2975 * @phba: Pointer to hba for which this call is being executed. 2976 * 2977 * This routine starts the timer waiting for the FCF rediscovery to complete. 2978 **/ 2979 void 2980 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 2981 { 2982 unsigned long fcf_redisc_wait_tmo = 2983 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 2984 /* Start fcf rediscovery wait period timer */ 2985 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 2986 spin_lock_irq(&phba->hbalock); 2987 /* Allow action to new fcf asynchronous event */ 2988 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 2989 /* Mark the FCF rediscovery pending state */ 2990 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 2991 spin_unlock_irq(&phba->hbalock); 2992 } 2993 2994 /** 2995 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 2996 * @ptr: Map to lpfc_hba data structure pointer. 2997 * 2998 * This routine is invoked when waiting for FCF table rediscover has been 2999 * timed out. If new FCF record(s) has (have) been discovered during the 3000 * wait period, a new FCF event shall be added to the FCOE async event 3001 * list, and then worker thread shall be waked up for processing from the 3002 * worker thread context. 3003 **/ 3004 void 3005 lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) 3006 { 3007 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 3008 3009 /* Don't send FCF rediscovery event if timer cancelled */ 3010 spin_lock_irq(&phba->hbalock); 3011 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 3012 spin_unlock_irq(&phba->hbalock); 3013 return; 3014 } 3015 /* Clear FCF rediscovery timer pending flag */ 3016 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 3017 /* FCF rediscovery event to worker thread */ 3018 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 3019 spin_unlock_irq(&phba->hbalock); 3020 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 3021 "2776 FCF rediscover quiescent timer expired\n"); 3022 /* wake up worker thread */ 3023 lpfc_worker_wake_up(phba); 3024 } 3025 3026 /** 3027 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 3028 * @phba: pointer to lpfc hba data structure. 3029 * @acqe_link: pointer to the async link completion queue entry. 3030 * 3031 * This routine is to parse the SLI4 link-attention link fault code and 3032 * translate it into the base driver's read link attention mailbox command 3033 * status. 3034 * 3035 * Return: Link-attention status in terms of base driver's coding. 3036 **/ 3037 static uint16_t 3038 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 3039 struct lpfc_acqe_link *acqe_link) 3040 { 3041 uint16_t latt_fault; 3042 3043 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 3044 case LPFC_ASYNC_LINK_FAULT_NONE: 3045 case LPFC_ASYNC_LINK_FAULT_LOCAL: 3046 case LPFC_ASYNC_LINK_FAULT_REMOTE: 3047 latt_fault = 0; 3048 break; 3049 default: 3050 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3051 "0398 Invalid link fault code: x%x\n", 3052 bf_get(lpfc_acqe_link_fault, acqe_link)); 3053 latt_fault = MBXERR_ERROR; 3054 break; 3055 } 3056 return latt_fault; 3057 } 3058 3059 /** 3060 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 3061 * @phba: pointer to lpfc hba data structure. 3062 * @acqe_link: pointer to the async link completion queue entry. 3063 * 3064 * This routine is to parse the SLI4 link attention type and translate it 3065 * into the base driver's link attention type coding. 3066 * 3067 * Return: Link attention type in terms of base driver's coding. 3068 **/ 3069 static uint8_t 3070 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 3071 struct lpfc_acqe_link *acqe_link) 3072 { 3073 uint8_t att_type; 3074 3075 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 3076 case LPFC_ASYNC_LINK_STATUS_DOWN: 3077 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 3078 att_type = LPFC_ATT_LINK_DOWN; 3079 break; 3080 case LPFC_ASYNC_LINK_STATUS_UP: 3081 /* Ignore physical link up events - wait for logical link up */ 3082 att_type = LPFC_ATT_RESERVED; 3083 break; 3084 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 3085 att_type = LPFC_ATT_LINK_UP; 3086 break; 3087 default: 3088 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3089 "0399 Invalid link attention type: x%x\n", 3090 bf_get(lpfc_acqe_link_status, acqe_link)); 3091 att_type = LPFC_ATT_RESERVED; 3092 break; 3093 } 3094 return att_type; 3095 } 3096 3097 /** 3098 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed 3099 * @phba: pointer to lpfc hba data structure. 3100 * @acqe_link: pointer to the async link completion queue entry. 3101 * 3102 * This routine is to parse the SLI4 link-attention link speed and translate 3103 * it into the base driver's link-attention link speed coding. 3104 * 3105 * Return: Link-attention link speed in terms of base driver's coding. 3106 **/ 3107 static uint8_t 3108 lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, 3109 struct lpfc_acqe_link *acqe_link) 3110 { 3111 uint8_t link_speed; 3112 3113 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { 3114 case LPFC_ASYNC_LINK_SPEED_ZERO: 3115 case LPFC_ASYNC_LINK_SPEED_10MBPS: 3116 case LPFC_ASYNC_LINK_SPEED_100MBPS: 3117 link_speed = LPFC_LINK_SPEED_UNKNOWN; 3118 break; 3119 case LPFC_ASYNC_LINK_SPEED_1GBPS: 3120 link_speed = LPFC_LINK_SPEED_1GHZ; 3121 break; 3122 case LPFC_ASYNC_LINK_SPEED_10GBPS: 3123 link_speed = LPFC_LINK_SPEED_10GHZ; 3124 break; 3125 default: 3126 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3127 "0483 Invalid link-attention link speed: x%x\n", 3128 bf_get(lpfc_acqe_link_speed, acqe_link)); 3129 link_speed = LPFC_LINK_SPEED_UNKNOWN; 3130 break; 3131 } 3132 return link_speed; 3133 } 3134 3135 /** 3136 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 3137 * @phba: pointer to lpfc hba data structure. 3138 * @acqe_link: pointer to the async link completion queue entry. 3139 * 3140 * This routine is to handle the SLI4 asynchronous FCoE link event. 3141 **/ 3142 static void 3143 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 3144 struct lpfc_acqe_link *acqe_link) 3145 { 3146 struct lpfc_dmabuf *mp; 3147 LPFC_MBOXQ_t *pmb; 3148 MAILBOX_t *mb; 3149 struct lpfc_mbx_read_top *la; 3150 uint8_t att_type; 3151 int rc; 3152 3153 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 3154 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 3155 return; 3156 phba->fcoe_eventtag = acqe_link->event_tag; 3157 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3158 if (!pmb) { 3159 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3160 "0395 The mboxq allocation failed\n"); 3161 return; 3162 } 3163 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3164 if (!mp) { 3165 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3166 "0396 The lpfc_dmabuf allocation failed\n"); 3167 goto out_free_pmb; 3168 } 3169 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3170 if (!mp->virt) { 3171 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3172 "0397 The mbuf allocation failed\n"); 3173 goto out_free_dmabuf; 3174 } 3175 3176 /* Cleanup any outstanding ELS commands */ 3177 lpfc_els_flush_all_cmd(phba); 3178 3179 /* Block ELS IOCBs until we have done process link event */ 3180 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 3181 3182 /* Update link event statistics */ 3183 phba->sli.slistat.link_event++; 3184 3185 /* Create lpfc_handle_latt mailbox command from link ACQE */ 3186 lpfc_read_topology(phba, pmb, mp); 3187 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 3188 pmb->vport = phba->pport; 3189 3190 /* Keep the link status for extra SLI4 state machine reference */ 3191 phba->sli4_hba.link_state.speed = 3192 bf_get(lpfc_acqe_link_speed, acqe_link); 3193 phba->sli4_hba.link_state.duplex = 3194 bf_get(lpfc_acqe_link_duplex, acqe_link); 3195 phba->sli4_hba.link_state.status = 3196 bf_get(lpfc_acqe_link_status, acqe_link); 3197 phba->sli4_hba.link_state.type = 3198 bf_get(lpfc_acqe_link_type, acqe_link); 3199 phba->sli4_hba.link_state.number = 3200 bf_get(lpfc_acqe_link_number, acqe_link); 3201 phba->sli4_hba.link_state.fault = 3202 bf_get(lpfc_acqe_link_fault, acqe_link); 3203 phba->sli4_hba.link_state.logical_speed = 3204 bf_get(lpfc_acqe_logical_link_speed, acqe_link); 3205 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3206 "2900 Async FCoE Link event - Speed:%dGBit duplex:x%x " 3207 "LA Type:x%x Port Type:%d Port Number:%d Logical " 3208 "speed:%dMbps Fault:%d\n", 3209 phba->sli4_hba.link_state.speed, 3210 phba->sli4_hba.link_state.topology, 3211 phba->sli4_hba.link_state.status, 3212 phba->sli4_hba.link_state.type, 3213 phba->sli4_hba.link_state.number, 3214 phba->sli4_hba.link_state.logical_speed * 10, 3215 phba->sli4_hba.link_state.fault); 3216 /* 3217 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 3218 * topology info. Note: Optional for non FC-AL ports. 3219 */ 3220 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 3221 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3222 if (rc == MBX_NOT_FINISHED) 3223 goto out_free_dmabuf; 3224 return; 3225 } 3226 /* 3227 * For FCoE Mode: fill in all the topology information we need and call 3228 * the READ_TOPOLOGY completion routine to continue without actually 3229 * sending the READ_TOPOLOGY mailbox command to the port. 3230 */ 3231 /* Parse and translate status field */ 3232 mb = &pmb->u.mb; 3233 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); 3234 3235 /* Parse and translate link attention fields */ 3236 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 3237 la->eventTag = acqe_link->event_tag; 3238 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 3239 bf_set(lpfc_mbx_read_top_link_spd, la, 3240 lpfc_sli4_parse_latt_link_speed(phba, acqe_link)); 3241 3242 /* Fake the the following irrelvant fields */ 3243 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 3244 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 3245 bf_set(lpfc_mbx_read_top_il, la, 0); 3246 bf_set(lpfc_mbx_read_top_pb, la, 0); 3247 bf_set(lpfc_mbx_read_top_fa, la, 0); 3248 bf_set(lpfc_mbx_read_top_mm, la, 0); 3249 3250 /* Invoke the lpfc_handle_latt mailbox command callback function */ 3251 lpfc_mbx_cmpl_read_topology(phba, pmb); 3252 3253 return; 3254 3255 out_free_dmabuf: 3256 kfree(mp); 3257 out_free_pmb: 3258 mempool_free(pmb, phba->mbox_mem_pool); 3259 } 3260 3261 /** 3262 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 3263 * @phba: pointer to lpfc hba data structure. 3264 * @acqe_fc: pointer to the async fc completion queue entry. 3265 * 3266 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 3267 * that the event was received and then issue a read_topology mailbox command so 3268 * that the rest of the driver will treat it the same as SLI3. 3269 **/ 3270 static void 3271 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 3272 { 3273 struct lpfc_dmabuf *mp; 3274 LPFC_MBOXQ_t *pmb; 3275 int rc; 3276 3277 if (bf_get(lpfc_trailer_type, acqe_fc) != 3278 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 3279 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3280 "2895 Non FC link Event detected.(%d)\n", 3281 bf_get(lpfc_trailer_type, acqe_fc)); 3282 return; 3283 } 3284 /* Keep the link status for extra SLI4 state machine reference */ 3285 phba->sli4_hba.link_state.speed = 3286 bf_get(lpfc_acqe_fc_la_speed, acqe_fc); 3287 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 3288 phba->sli4_hba.link_state.topology = 3289 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 3290 phba->sli4_hba.link_state.status = 3291 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 3292 phba->sli4_hba.link_state.type = 3293 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 3294 phba->sli4_hba.link_state.number = 3295 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 3296 phba->sli4_hba.link_state.fault = 3297 bf_get(lpfc_acqe_link_fault, acqe_fc); 3298 phba->sli4_hba.link_state.logical_speed = 3299 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc); 3300 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3301 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 3302 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 3303 "%dMbps Fault:%d\n", 3304 phba->sli4_hba.link_state.speed, 3305 phba->sli4_hba.link_state.topology, 3306 phba->sli4_hba.link_state.status, 3307 phba->sli4_hba.link_state.type, 3308 phba->sli4_hba.link_state.number, 3309 phba->sli4_hba.link_state.logical_speed * 10, 3310 phba->sli4_hba.link_state.fault); 3311 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3312 if (!pmb) { 3313 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3314 "2897 The mboxq allocation failed\n"); 3315 return; 3316 } 3317 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3318 if (!mp) { 3319 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3320 "2898 The lpfc_dmabuf allocation failed\n"); 3321 goto out_free_pmb; 3322 } 3323 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3324 if (!mp->virt) { 3325 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3326 "2899 The mbuf allocation failed\n"); 3327 goto out_free_dmabuf; 3328 } 3329 3330 /* Cleanup any outstanding ELS commands */ 3331 lpfc_els_flush_all_cmd(phba); 3332 3333 /* Block ELS IOCBs until we have done process link event */ 3334 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 3335 3336 /* Update link event statistics */ 3337 phba->sli.slistat.link_event++; 3338 3339 /* Create lpfc_handle_latt mailbox command from link ACQE */ 3340 lpfc_read_topology(phba, pmb, mp); 3341 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 3342 pmb->vport = phba->pport; 3343 3344 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3345 if (rc == MBX_NOT_FINISHED) 3346 goto out_free_dmabuf; 3347 return; 3348 3349 out_free_dmabuf: 3350 kfree(mp); 3351 out_free_pmb: 3352 mempool_free(pmb, phba->mbox_mem_pool); 3353 } 3354 3355 /** 3356 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 3357 * @phba: pointer to lpfc hba data structure. 3358 * @acqe_fc: pointer to the async SLI completion queue entry. 3359 * 3360 * This routine is to handle the SLI4 asynchronous SLI events. 3361 **/ 3362 static void 3363 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 3364 { 3365 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3366 "2901 Async SLI event - Event Data1:x%08x Event Data2:" 3367 "x%08x SLI Event Type:%d", 3368 acqe_sli->event_data1, acqe_sli->event_data2, 3369 bf_get(lpfc_trailer_type, acqe_sli)); 3370 return; 3371 } 3372 3373 /** 3374 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 3375 * @vport: pointer to vport data structure. 3376 * 3377 * This routine is to perform Clear Virtual Link (CVL) on a vport in 3378 * response to a CVL event. 3379 * 3380 * Return the pointer to the ndlp with the vport if successful, otherwise 3381 * return NULL. 3382 **/ 3383 static struct lpfc_nodelist * 3384 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 3385 { 3386 struct lpfc_nodelist *ndlp; 3387 struct Scsi_Host *shost; 3388 struct lpfc_hba *phba; 3389 3390 if (!vport) 3391 return NULL; 3392 phba = vport->phba; 3393 if (!phba) 3394 return NULL; 3395 ndlp = lpfc_findnode_did(vport, Fabric_DID); 3396 if (!ndlp) { 3397 /* Cannot find existing Fabric ndlp, so allocate a new one */ 3398 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 3399 if (!ndlp) 3400 return 0; 3401 lpfc_nlp_init(vport, ndlp, Fabric_DID); 3402 /* Set the node type */ 3403 ndlp->nlp_type |= NLP_FABRIC; 3404 /* Put ndlp onto node list */ 3405 lpfc_enqueue_node(vport, ndlp); 3406 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 3407 /* re-setup ndlp without removing from node list */ 3408 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 3409 if (!ndlp) 3410 return 0; 3411 } 3412 if ((phba->pport->port_state < LPFC_FLOGI) && 3413 (phba->pport->port_state != LPFC_VPORT_FAILED)) 3414 return NULL; 3415 /* If virtual link is not yet instantiated ignore CVL */ 3416 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 3417 && (vport->port_state != LPFC_VPORT_FAILED)) 3418 return NULL; 3419 shost = lpfc_shost_from_vport(vport); 3420 if (!shost) 3421 return NULL; 3422 lpfc_linkdown_port(vport); 3423 lpfc_cleanup_pending_mbox(vport); 3424 spin_lock_irq(shost->host_lock); 3425 vport->fc_flag |= FC_VPORT_CVL_RCVD; 3426 spin_unlock_irq(shost->host_lock); 3427 3428 return ndlp; 3429 } 3430 3431 /** 3432 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 3433 * @vport: pointer to lpfc hba data structure. 3434 * 3435 * This routine is to perform Clear Virtual Link (CVL) on all vports in 3436 * response to a FCF dead event. 3437 **/ 3438 static void 3439 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 3440 { 3441 struct lpfc_vport **vports; 3442 int i; 3443 3444 vports = lpfc_create_vport_work_array(phba); 3445 if (vports) 3446 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3447 lpfc_sli4_perform_vport_cvl(vports[i]); 3448 lpfc_destroy_vport_work_array(phba, vports); 3449 } 3450 3451 /** 3452 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 3453 * @phba: pointer to lpfc hba data structure. 3454 * @acqe_link: pointer to the async fcoe completion queue entry. 3455 * 3456 * This routine is to handle the SLI4 asynchronous fcoe event. 3457 **/ 3458 static void 3459 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 3460 struct lpfc_acqe_fip *acqe_fip) 3461 { 3462 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 3463 int rc; 3464 struct lpfc_vport *vport; 3465 struct lpfc_nodelist *ndlp; 3466 struct Scsi_Host *shost; 3467 int active_vlink_present; 3468 struct lpfc_vport **vports; 3469 int i; 3470 3471 phba->fc_eventTag = acqe_fip->event_tag; 3472 phba->fcoe_eventtag = acqe_fip->event_tag; 3473 switch (event_type) { 3474 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 3475 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 3476 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 3477 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3478 LOG_DISCOVERY, 3479 "2546 New FCF event, evt_tag:x%x, " 3480 "index:x%x\n", 3481 acqe_fip->event_tag, 3482 acqe_fip->index); 3483 else 3484 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 3485 LOG_DISCOVERY, 3486 "2788 FCF param modified event, " 3487 "evt_tag:x%x, index:x%x\n", 3488 acqe_fip->event_tag, 3489 acqe_fip->index); 3490 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3491 /* 3492 * During period of FCF discovery, read the FCF 3493 * table record indexed by the event to update 3494 * FCF roundrobin failover eligible FCF bmask. 3495 */ 3496 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3497 LOG_DISCOVERY, 3498 "2779 Read FCF (x%x) for updating " 3499 "roundrobin FCF failover bmask\n", 3500 acqe_fip->index); 3501 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 3502 } 3503 3504 /* If the FCF discovery is in progress, do nothing. */ 3505 spin_lock_irq(&phba->hbalock); 3506 if (phba->hba_flag & FCF_TS_INPROG) { 3507 spin_unlock_irq(&phba->hbalock); 3508 break; 3509 } 3510 /* If fast FCF failover rescan event is pending, do nothing */ 3511 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 3512 spin_unlock_irq(&phba->hbalock); 3513 break; 3514 } 3515 3516 /* If the FCF has been in discovered state, do nothing. */ 3517 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 3518 spin_unlock_irq(&phba->hbalock); 3519 break; 3520 } 3521 spin_unlock_irq(&phba->hbalock); 3522 3523 /* Otherwise, scan the entire FCF table and re-discover SAN */ 3524 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3525 "2770 Start FCF table scan per async FCF " 3526 "event, evt_tag:x%x, index:x%x\n", 3527 acqe_fip->event_tag, acqe_fip->index); 3528 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 3529 LPFC_FCOE_FCF_GET_FIRST); 3530 if (rc) 3531 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3532 "2547 Issue FCF scan read FCF mailbox " 3533 "command failed (x%x)\n", rc); 3534 break; 3535 3536 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 3537 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3538 "2548 FCF Table full count 0x%x tag 0x%x\n", 3539 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 3540 acqe_fip->event_tag); 3541 break; 3542 3543 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 3544 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3545 "2549 FCF (x%x) disconnected from network, " 3546 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); 3547 /* 3548 * If we are in the middle of FCF failover process, clear 3549 * the corresponding FCF bit in the roundrobin bitmap. 3550 */ 3551 spin_lock_irq(&phba->hbalock); 3552 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3553 spin_unlock_irq(&phba->hbalock); 3554 /* Update FLOGI FCF failover eligible FCF bmask */ 3555 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 3556 break; 3557 } 3558 spin_unlock_irq(&phba->hbalock); 3559 3560 /* If the event is not for currently used fcf do nothing */ 3561 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 3562 break; 3563 3564 /* 3565 * Otherwise, request the port to rediscover the entire FCF 3566 * table for a fast recovery from case that the current FCF 3567 * is no longer valid as we are not in the middle of FCF 3568 * failover process already. 3569 */ 3570 spin_lock_irq(&phba->hbalock); 3571 /* Mark the fast failover process in progress */ 3572 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 3573 spin_unlock_irq(&phba->hbalock); 3574 3575 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3576 "2771 Start FCF fast failover process due to " 3577 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 3578 "\n", acqe_fip->event_tag, acqe_fip->index); 3579 rc = lpfc_sli4_redisc_fcf_table(phba); 3580 if (rc) { 3581 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3582 LOG_DISCOVERY, 3583 "2772 Issue FCF rediscover mabilbox " 3584 "command failed, fail through to FCF " 3585 "dead event\n"); 3586 spin_lock_irq(&phba->hbalock); 3587 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 3588 spin_unlock_irq(&phba->hbalock); 3589 /* 3590 * Last resort will fail over by treating this 3591 * as a link down to FCF registration. 3592 */ 3593 lpfc_sli4_fcf_dead_failthrough(phba); 3594 } else { 3595 /* Reset FCF roundrobin bmask for new discovery */ 3596 memset(phba->fcf.fcf_rr_bmask, 0, 3597 sizeof(*phba->fcf.fcf_rr_bmask)); 3598 /* 3599 * Handling fast FCF failover to a DEAD FCF event is 3600 * considered equalivant to receiving CVL to all vports. 3601 */ 3602 lpfc_sli4_perform_all_vport_cvl(phba); 3603 } 3604 break; 3605 case LPFC_FIP_EVENT_TYPE_CVL: 3606 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3607 "2718 Clear Virtual Link Received for VPI 0x%x" 3608 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 3609 vport = lpfc_find_vport_by_vpid(phba, 3610 acqe_fip->index - phba->vpi_base); 3611 ndlp = lpfc_sli4_perform_vport_cvl(vport); 3612 if (!ndlp) 3613 break; 3614 active_vlink_present = 0; 3615 3616 vports = lpfc_create_vport_work_array(phba); 3617 if (vports) { 3618 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 3619 i++) { 3620 if ((!(vports[i]->fc_flag & 3621 FC_VPORT_CVL_RCVD)) && 3622 (vports[i]->port_state > LPFC_FDISC)) { 3623 active_vlink_present = 1; 3624 break; 3625 } 3626 } 3627 lpfc_destroy_vport_work_array(phba, vports); 3628 } 3629 3630 if (active_vlink_present) { 3631 /* 3632 * If there are other active VLinks present, 3633 * re-instantiate the Vlink using FDISC. 3634 */ 3635 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 3636 shost = lpfc_shost_from_vport(vport); 3637 spin_lock_irq(shost->host_lock); 3638 ndlp->nlp_flag |= NLP_DELAY_TMO; 3639 spin_unlock_irq(shost->host_lock); 3640 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 3641 vport->port_state = LPFC_FDISC; 3642 } else { 3643 /* 3644 * Otherwise, we request port to rediscover 3645 * the entire FCF table for a fast recovery 3646 * from possible case that the current FCF 3647 * is no longer valid if we are not already 3648 * in the FCF failover process. 3649 */ 3650 spin_lock_irq(&phba->hbalock); 3651 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3652 spin_unlock_irq(&phba->hbalock); 3653 break; 3654 } 3655 /* Mark the fast failover process in progress */ 3656 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 3657 spin_unlock_irq(&phba->hbalock); 3658 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3659 LOG_DISCOVERY, 3660 "2773 Start FCF failover per CVL, " 3661 "evt_tag:x%x\n", acqe_fip->event_tag); 3662 rc = lpfc_sli4_redisc_fcf_table(phba); 3663 if (rc) { 3664 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3665 LOG_DISCOVERY, 3666 "2774 Issue FCF rediscover " 3667 "mabilbox command failed, " 3668 "through to CVL event\n"); 3669 spin_lock_irq(&phba->hbalock); 3670 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 3671 spin_unlock_irq(&phba->hbalock); 3672 /* 3673 * Last resort will be re-try on the 3674 * the current registered FCF entry. 3675 */ 3676 lpfc_retry_pport_discovery(phba); 3677 } else 3678 /* 3679 * Reset FCF roundrobin bmask for new 3680 * discovery. 3681 */ 3682 memset(phba->fcf.fcf_rr_bmask, 0, 3683 sizeof(*phba->fcf.fcf_rr_bmask)); 3684 } 3685 break; 3686 default: 3687 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3688 "0288 Unknown FCoE event type 0x%x event tag " 3689 "0x%x\n", event_type, acqe_fip->event_tag); 3690 break; 3691 } 3692 } 3693 3694 /** 3695 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 3696 * @phba: pointer to lpfc hba data structure. 3697 * @acqe_link: pointer to the async dcbx completion queue entry. 3698 * 3699 * This routine is to handle the SLI4 asynchronous dcbx event. 3700 **/ 3701 static void 3702 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 3703 struct lpfc_acqe_dcbx *acqe_dcbx) 3704 { 3705 phba->fc_eventTag = acqe_dcbx->event_tag; 3706 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3707 "0290 The SLI4 DCBX asynchronous event is not " 3708 "handled yet\n"); 3709 } 3710 3711 /** 3712 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 3713 * @phba: pointer to lpfc hba data structure. 3714 * @acqe_link: pointer to the async grp5 completion queue entry. 3715 * 3716 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 3717 * is an asynchronous notified of a logical link speed change. The Port 3718 * reports the logical link speed in units of 10Mbps. 3719 **/ 3720 static void 3721 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 3722 struct lpfc_acqe_grp5 *acqe_grp5) 3723 { 3724 uint16_t prev_ll_spd; 3725 3726 phba->fc_eventTag = acqe_grp5->event_tag; 3727 phba->fcoe_eventtag = acqe_grp5->event_tag; 3728 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 3729 phba->sli4_hba.link_state.logical_speed = 3730 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)); 3731 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3732 "2789 GRP5 Async Event: Updating logical link speed " 3733 "from %dMbps to %dMbps\n", (prev_ll_spd * 10), 3734 (phba->sli4_hba.link_state.logical_speed*10)); 3735 } 3736 3737 /** 3738 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 3739 * @phba: pointer to lpfc hba data structure. 3740 * 3741 * This routine is invoked by the worker thread to process all the pending 3742 * SLI4 asynchronous events. 3743 **/ 3744 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 3745 { 3746 struct lpfc_cq_event *cq_event; 3747 3748 /* First, declare the async event has been handled */ 3749 spin_lock_irq(&phba->hbalock); 3750 phba->hba_flag &= ~ASYNC_EVENT; 3751 spin_unlock_irq(&phba->hbalock); 3752 /* Now, handle all the async events */ 3753 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 3754 /* Get the first event from the head of the event queue */ 3755 spin_lock_irq(&phba->hbalock); 3756 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 3757 cq_event, struct lpfc_cq_event, list); 3758 spin_unlock_irq(&phba->hbalock); 3759 /* Process the asynchronous event */ 3760 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 3761 case LPFC_TRAILER_CODE_LINK: 3762 lpfc_sli4_async_link_evt(phba, 3763 &cq_event->cqe.acqe_link); 3764 break; 3765 case LPFC_TRAILER_CODE_FCOE: 3766 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 3767 break; 3768 case LPFC_TRAILER_CODE_DCBX: 3769 lpfc_sli4_async_dcbx_evt(phba, 3770 &cq_event->cqe.acqe_dcbx); 3771 break; 3772 case LPFC_TRAILER_CODE_GRP5: 3773 lpfc_sli4_async_grp5_evt(phba, 3774 &cq_event->cqe.acqe_grp5); 3775 break; 3776 case LPFC_TRAILER_CODE_FC: 3777 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 3778 break; 3779 case LPFC_TRAILER_CODE_SLI: 3780 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 3781 break; 3782 default: 3783 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3784 "1804 Invalid asynchrous event code: " 3785 "x%x\n", bf_get(lpfc_trailer_code, 3786 &cq_event->cqe.mcqe_cmpl)); 3787 break; 3788 } 3789 /* Free the completion event processed to the free pool */ 3790 lpfc_sli4_cq_event_release(phba, cq_event); 3791 } 3792 } 3793 3794 /** 3795 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 3796 * @phba: pointer to lpfc hba data structure. 3797 * 3798 * This routine is invoked by the worker thread to process FCF table 3799 * rediscovery pending completion event. 3800 **/ 3801 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 3802 { 3803 int rc; 3804 3805 spin_lock_irq(&phba->hbalock); 3806 /* Clear FCF rediscovery timeout event */ 3807 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 3808 /* Clear driver fast failover FCF record flag */ 3809 phba->fcf.failover_rec.flag = 0; 3810 /* Set state for FCF fast failover */ 3811 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 3812 spin_unlock_irq(&phba->hbalock); 3813 3814 /* Scan FCF table from the first entry to re-discover SAN */ 3815 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3816 "2777 Start post-quiescent FCF table scan\n"); 3817 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 3818 if (rc) 3819 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3820 "2747 Issue FCF scan read FCF mailbox " 3821 "command failed 0x%x\n", rc); 3822 } 3823 3824 /** 3825 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 3826 * @phba: pointer to lpfc hba data structure. 3827 * @dev_grp: The HBA PCI-Device group number. 3828 * 3829 * This routine is invoked to set up the per HBA PCI-Device group function 3830 * API jump table entries. 3831 * 3832 * Return: 0 if success, otherwise -ENODEV 3833 **/ 3834 int 3835 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 3836 { 3837 int rc; 3838 3839 /* Set up lpfc PCI-device group */ 3840 phba->pci_dev_grp = dev_grp; 3841 3842 /* The LPFC_PCI_DEV_OC uses SLI4 */ 3843 if (dev_grp == LPFC_PCI_DEV_OC) 3844 phba->sli_rev = LPFC_SLI_REV4; 3845 3846 /* Set up device INIT API function jump table */ 3847 rc = lpfc_init_api_table_setup(phba, dev_grp); 3848 if (rc) 3849 return -ENODEV; 3850 /* Set up SCSI API function jump table */ 3851 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 3852 if (rc) 3853 return -ENODEV; 3854 /* Set up SLI API function jump table */ 3855 rc = lpfc_sli_api_table_setup(phba, dev_grp); 3856 if (rc) 3857 return -ENODEV; 3858 /* Set up MBOX API function jump table */ 3859 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 3860 if (rc) 3861 return -ENODEV; 3862 3863 return 0; 3864 } 3865 3866 /** 3867 * lpfc_log_intr_mode - Log the active interrupt mode 3868 * @phba: pointer to lpfc hba data structure. 3869 * @intr_mode: active interrupt mode adopted. 3870 * 3871 * This routine it invoked to log the currently used active interrupt mode 3872 * to the device. 3873 **/ 3874 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 3875 { 3876 switch (intr_mode) { 3877 case 0: 3878 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3879 "0470 Enable INTx interrupt mode.\n"); 3880 break; 3881 case 1: 3882 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3883 "0481 Enabled MSI interrupt mode.\n"); 3884 break; 3885 case 2: 3886 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3887 "0480 Enabled MSI-X interrupt mode.\n"); 3888 break; 3889 default: 3890 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3891 "0482 Illegal interrupt mode.\n"); 3892 break; 3893 } 3894 return; 3895 } 3896 3897 /** 3898 * lpfc_enable_pci_dev - Enable a generic PCI device. 3899 * @phba: pointer to lpfc hba data structure. 3900 * 3901 * This routine is invoked to enable the PCI device that is common to all 3902 * PCI devices. 3903 * 3904 * Return codes 3905 * 0 - successful 3906 * other values - error 3907 **/ 3908 static int 3909 lpfc_enable_pci_dev(struct lpfc_hba *phba) 3910 { 3911 struct pci_dev *pdev; 3912 int bars; 3913 3914 /* Obtain PCI device reference */ 3915 if (!phba->pcidev) 3916 goto out_error; 3917 else 3918 pdev = phba->pcidev; 3919 /* Select PCI BARs */ 3920 bars = pci_select_bars(pdev, IORESOURCE_MEM); 3921 /* Enable PCI device */ 3922 if (pci_enable_device_mem(pdev)) 3923 goto out_error; 3924 /* Request PCI resource for the device */ 3925 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) 3926 goto out_disable_device; 3927 /* Set up device as PCI master and save state for EEH */ 3928 pci_set_master(pdev); 3929 pci_try_set_mwi(pdev); 3930 pci_save_state(pdev); 3931 3932 return 0; 3933 3934 out_disable_device: 3935 pci_disable_device(pdev); 3936 out_error: 3937 return -ENODEV; 3938 } 3939 3940 /** 3941 * lpfc_disable_pci_dev - Disable a generic PCI device. 3942 * @phba: pointer to lpfc hba data structure. 3943 * 3944 * This routine is invoked to disable the PCI device that is common to all 3945 * PCI devices. 3946 **/ 3947 static void 3948 lpfc_disable_pci_dev(struct lpfc_hba *phba) 3949 { 3950 struct pci_dev *pdev; 3951 int bars; 3952 3953 /* Obtain PCI device reference */ 3954 if (!phba->pcidev) 3955 return; 3956 else 3957 pdev = phba->pcidev; 3958 /* Select PCI BARs */ 3959 bars = pci_select_bars(pdev, IORESOURCE_MEM); 3960 /* Release PCI resource and disable PCI device */ 3961 pci_release_selected_regions(pdev, bars); 3962 pci_disable_device(pdev); 3963 /* Null out PCI private reference to driver */ 3964 pci_set_drvdata(pdev, NULL); 3965 3966 return; 3967 } 3968 3969 /** 3970 * lpfc_reset_hba - Reset a hba 3971 * @phba: pointer to lpfc hba data structure. 3972 * 3973 * This routine is invoked to reset a hba device. It brings the HBA 3974 * offline, performs a board restart, and then brings the board back 3975 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 3976 * on outstanding mailbox commands. 3977 **/ 3978 void 3979 lpfc_reset_hba(struct lpfc_hba *phba) 3980 { 3981 /* If resets are disabled then set error state and return. */ 3982 if (!phba->cfg_enable_hba_reset) { 3983 phba->link_state = LPFC_HBA_ERROR; 3984 return; 3985 } 3986 lpfc_offline_prep(phba); 3987 lpfc_offline(phba); 3988 lpfc_sli_brdrestart(phba); 3989 lpfc_online(phba); 3990 lpfc_unblock_mgmt_io(phba); 3991 } 3992 3993 /** 3994 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. 3995 * @phba: pointer to lpfc hba data structure. 3996 * 3997 * This routine is invoked to set up the driver internal resources specific to 3998 * support the SLI-3 HBA device it attached to. 3999 * 4000 * Return codes 4001 * 0 - successful 4002 * other values - error 4003 **/ 4004 static int 4005 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 4006 { 4007 struct lpfc_sli *psli; 4008 4009 /* 4010 * Initialize timers used by driver 4011 */ 4012 4013 /* Heartbeat timer */ 4014 init_timer(&phba->hb_tmofunc); 4015 phba->hb_tmofunc.function = lpfc_hb_timeout; 4016 phba->hb_tmofunc.data = (unsigned long)phba; 4017 4018 psli = &phba->sli; 4019 /* MBOX heartbeat timer */ 4020 init_timer(&psli->mbox_tmo); 4021 psli->mbox_tmo.function = lpfc_mbox_timeout; 4022 psli->mbox_tmo.data = (unsigned long) phba; 4023 /* FCP polling mode timer */ 4024 init_timer(&phba->fcp_poll_timer); 4025 phba->fcp_poll_timer.function = lpfc_poll_timeout; 4026 phba->fcp_poll_timer.data = (unsigned long) phba; 4027 /* Fabric block timer */ 4028 init_timer(&phba->fabric_block_timer); 4029 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 4030 phba->fabric_block_timer.data = (unsigned long) phba; 4031 /* EA polling mode timer */ 4032 init_timer(&phba->eratt_poll); 4033 phba->eratt_poll.function = lpfc_poll_eratt; 4034 phba->eratt_poll.data = (unsigned long) phba; 4035 4036 /* Host attention work mask setup */ 4037 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 4038 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 4039 4040 /* Get all the module params for configuring this host */ 4041 lpfc_get_cfgparam(phba); 4042 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 4043 phba->menlo_flag |= HBA_MENLO_SUPPORT; 4044 /* check for menlo minimum sg count */ 4045 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 4046 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 4047 } 4048 4049 /* 4050 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4051 * used to create the sg_dma_buf_pool must be dynamically calculated. 4052 * 2 segments are added since the IOCB needs a command and response bde. 4053 */ 4054 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 4055 sizeof(struct fcp_rsp) + 4056 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); 4057 4058 if (phba->cfg_enable_bg) { 4059 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; 4060 phba->cfg_sg_dma_buf_size += 4061 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); 4062 } 4063 4064 /* Also reinitialize the host templates with new values. */ 4065 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 4066 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 4067 4068 phba->max_vpi = LPFC_MAX_VPI; 4069 /* This will be set to correct value after config_port mbox */ 4070 phba->max_vports = 0; 4071 4072 /* 4073 * Initialize the SLI Layer to run with lpfc HBAs. 4074 */ 4075 lpfc_sli_setup(phba); 4076 lpfc_sli_queue_setup(phba); 4077 4078 /* Allocate device driver memory */ 4079 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 4080 return -ENOMEM; 4081 4082 return 0; 4083 } 4084 4085 /** 4086 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 4087 * @phba: pointer to lpfc hba data structure. 4088 * 4089 * This routine is invoked to unset the driver internal resources set up 4090 * specific for supporting the SLI-3 HBA device it attached to. 4091 **/ 4092 static void 4093 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 4094 { 4095 /* Free device driver memory allocated */ 4096 lpfc_mem_free_all(phba); 4097 4098 return; 4099 } 4100 4101 /** 4102 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 4103 * @phba: pointer to lpfc hba data structure. 4104 * 4105 * This routine is invoked to set up the driver internal resources specific to 4106 * support the SLI-4 HBA device it attached to. 4107 * 4108 * Return codes 4109 * 0 - successful 4110 * other values - error 4111 **/ 4112 static int 4113 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 4114 { 4115 struct lpfc_sli *psli; 4116 LPFC_MBOXQ_t *mboxq; 4117 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; 4118 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 4119 struct lpfc_mqe *mqe; 4120 int longs, sli_family; 4121 4122 /* Before proceed, wait for POST done and device ready */ 4123 rc = lpfc_sli4_post_status_check(phba); 4124 if (rc) 4125 return -ENODEV; 4126 4127 /* 4128 * Initialize timers used by driver 4129 */ 4130 4131 /* Heartbeat timer */ 4132 init_timer(&phba->hb_tmofunc); 4133 phba->hb_tmofunc.function = lpfc_hb_timeout; 4134 phba->hb_tmofunc.data = (unsigned long)phba; 4135 init_timer(&phba->rrq_tmr); 4136 phba->rrq_tmr.function = lpfc_rrq_timeout; 4137 phba->rrq_tmr.data = (unsigned long)phba; 4138 4139 psli = &phba->sli; 4140 /* MBOX heartbeat timer */ 4141 init_timer(&psli->mbox_tmo); 4142 psli->mbox_tmo.function = lpfc_mbox_timeout; 4143 psli->mbox_tmo.data = (unsigned long) phba; 4144 /* Fabric block timer */ 4145 init_timer(&phba->fabric_block_timer); 4146 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 4147 phba->fabric_block_timer.data = (unsigned long) phba; 4148 /* EA polling mode timer */ 4149 init_timer(&phba->eratt_poll); 4150 phba->eratt_poll.function = lpfc_poll_eratt; 4151 phba->eratt_poll.data = (unsigned long) phba; 4152 /* FCF rediscover timer */ 4153 init_timer(&phba->fcf.redisc_wait); 4154 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo; 4155 phba->fcf.redisc_wait.data = (unsigned long)phba; 4156 4157 /* 4158 * We need to do a READ_CONFIG mailbox command here before 4159 * calling lpfc_get_cfgparam. For VFs this will report the 4160 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. 4161 * All of the resources allocated 4162 * for this Port are tied to these values. 4163 */ 4164 /* Get all the module params for configuring this host */ 4165 lpfc_get_cfgparam(phba); 4166 phba->max_vpi = LPFC_MAX_VPI; 4167 /* This will be set to correct value after the read_config mbox */ 4168 phba->max_vports = 0; 4169 4170 /* Program the default value of vlan_id and fc_map */ 4171 phba->valid_vlan = 0; 4172 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 4173 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 4174 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 4175 4176 /* 4177 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4178 * used to create the sg_dma_buf_pool must be dynamically calculated. 4179 * 2 segments are added since the IOCB needs a command and response bde. 4180 * To insure that the scsi sgl does not cross a 4k page boundary only 4181 * sgl sizes of must be a power of 2. 4182 */ 4183 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + 4184 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge))); 4185 4186 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); 4187 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE; 4188 switch (sli_family) { 4189 case LPFC_SLI_INTF_FAMILY_BE2: 4190 case LPFC_SLI_INTF_FAMILY_BE3: 4191 /* There is a single hint for BE - 2 pages per BPL. */ 4192 if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) == 4193 LPFC_SLI_INTF_SLI_HINT1_1) 4194 max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE; 4195 break; 4196 case LPFC_SLI_INTF_FAMILY_LNCR_A0: 4197 case LPFC_SLI_INTF_FAMILY_LNCR_B0: 4198 default: 4199 break; 4200 } 4201 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE; 4202 dma_buf_size < max_buf_size && buf_size > dma_buf_size; 4203 dma_buf_size = dma_buf_size << 1) 4204 ; 4205 if (dma_buf_size == max_buf_size) 4206 phba->cfg_sg_seg_cnt = (dma_buf_size - 4207 sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) - 4208 (2 * sizeof(struct sli4_sge))) / 4209 sizeof(struct sli4_sge); 4210 phba->cfg_sg_dma_buf_size = dma_buf_size; 4211 4212 /* Initialize buffer queue management fields */ 4213 hbq_count = lpfc_sli_hbq_count(); 4214 for (i = 0; i < hbq_count; ++i) 4215 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 4216 INIT_LIST_HEAD(&phba->rb_pend_list); 4217 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 4218 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 4219 4220 /* 4221 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 4222 */ 4223 /* Initialize the Abort scsi buffer list used by driver */ 4224 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 4225 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 4226 /* This abort list used by worker thread */ 4227 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); 4228 4229 /* 4230 * Initialize dirver internal slow-path work queues 4231 */ 4232 4233 /* Driver internel slow-path CQ Event pool */ 4234 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 4235 /* Response IOCB work queue list */ 4236 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 4237 /* Asynchronous event CQ Event work queue list */ 4238 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 4239 /* Fast-path XRI aborted CQ Event work queue list */ 4240 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 4241 /* Slow-path XRI aborted CQ Event work queue list */ 4242 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 4243 /* Receive queue CQ Event work queue list */ 4244 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 4245 4246 /* Initialize the driver internal SLI layer lists. */ 4247 lpfc_sli_setup(phba); 4248 lpfc_sli_queue_setup(phba); 4249 4250 /* Allocate device driver memory */ 4251 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 4252 if (rc) 4253 return -ENOMEM; 4254 4255 /* IF Type 2 ports get initialized now. */ 4256 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 4257 LPFC_SLI_INTF_IF_TYPE_2) { 4258 rc = lpfc_pci_function_reset(phba); 4259 if (unlikely(rc)) 4260 return -ENODEV; 4261 } 4262 4263 /* Create the bootstrap mailbox command */ 4264 rc = lpfc_create_bootstrap_mbox(phba); 4265 if (unlikely(rc)) 4266 goto out_free_mem; 4267 4268 /* Set up the host's endian order with the device. */ 4269 rc = lpfc_setup_endian_order(phba); 4270 if (unlikely(rc)) 4271 goto out_free_bsmbx; 4272 4273 /* Set up the hba's configuration parameters. */ 4274 rc = lpfc_sli4_read_config(phba); 4275 if (unlikely(rc)) 4276 goto out_free_bsmbx; 4277 4278 /* IF Type 0 ports get initialized now. */ 4279 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 4280 LPFC_SLI_INTF_IF_TYPE_0) { 4281 rc = lpfc_pci_function_reset(phba); 4282 if (unlikely(rc)) 4283 goto out_free_bsmbx; 4284 } 4285 4286 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 4287 GFP_KERNEL); 4288 if (!mboxq) { 4289 rc = -ENOMEM; 4290 goto out_free_bsmbx; 4291 } 4292 4293 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */ 4294 lpfc_supported_pages(mboxq); 4295 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4296 if (!rc) { 4297 mqe = &mboxq->u.mqe; 4298 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 4299 LPFC_MAX_SUPPORTED_PAGES); 4300 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 4301 switch (pn_page[i]) { 4302 case LPFC_SLI4_PARAMETERS: 4303 phba->sli4_hba.pc_sli4_params.supported = 1; 4304 break; 4305 default: 4306 break; 4307 } 4308 } 4309 /* Read the port's SLI4 Parameters capabilities if supported. */ 4310 if (phba->sli4_hba.pc_sli4_params.supported) 4311 rc = lpfc_pc_sli4_params_get(phba, mboxq); 4312 if (rc) { 4313 mempool_free(mboxq, phba->mbox_mem_pool); 4314 rc = -EIO; 4315 goto out_free_bsmbx; 4316 } 4317 } 4318 /* 4319 * Get sli4 parameters that override parameters from Port capabilities. 4320 * If this call fails it is not a critical error so continue loading. 4321 */ 4322 lpfc_get_sli4_parameters(phba, mboxq); 4323 mempool_free(mboxq, phba->mbox_mem_pool); 4324 /* Create all the SLI4 queues */ 4325 rc = lpfc_sli4_queue_create(phba); 4326 if (rc) 4327 goto out_free_bsmbx; 4328 4329 /* Create driver internal CQE event pool */ 4330 rc = lpfc_sli4_cq_event_pool_create(phba); 4331 if (rc) 4332 goto out_destroy_queue; 4333 4334 /* Initialize and populate the iocb list per host */ 4335 rc = lpfc_init_sgl_list(phba); 4336 if (rc) { 4337 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4338 "1400 Failed to initialize sgl list.\n"); 4339 goto out_destroy_cq_event_pool; 4340 } 4341 rc = lpfc_init_active_sgl_array(phba); 4342 if (rc) { 4343 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4344 "1430 Failed to initialize sgl list.\n"); 4345 goto out_free_sgl_list; 4346 } 4347 4348 rc = lpfc_sli4_init_rpi_hdrs(phba); 4349 if (rc) { 4350 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4351 "1432 Failed to initialize rpi headers.\n"); 4352 goto out_free_active_sgl; 4353 } 4354 4355 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 4356 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 4357 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), 4358 GFP_KERNEL); 4359 if (!phba->fcf.fcf_rr_bmask) { 4360 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4361 "2759 Failed allocate memory for FCF round " 4362 "robin failover bmask\n"); 4363 goto out_remove_rpi_hdrs; 4364 } 4365 4366 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 4367 phba->cfg_fcp_eq_count), GFP_KERNEL); 4368 if (!phba->sli4_hba.fcp_eq_hdl) { 4369 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4370 "2572 Failed allocate memory for fast-path " 4371 "per-EQ handle array\n"); 4372 goto out_free_fcf_rr_bmask; 4373 } 4374 4375 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 4376 phba->sli4_hba.cfg_eqn), GFP_KERNEL); 4377 if (!phba->sli4_hba.msix_entries) { 4378 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4379 "2573 Failed allocate memory for msi-x " 4380 "interrupt vector entries\n"); 4381 goto out_free_fcp_eq_hdl; 4382 } 4383 4384 return rc; 4385 4386 out_free_fcp_eq_hdl: 4387 kfree(phba->sli4_hba.fcp_eq_hdl); 4388 out_free_fcf_rr_bmask: 4389 kfree(phba->fcf.fcf_rr_bmask); 4390 out_remove_rpi_hdrs: 4391 lpfc_sli4_remove_rpi_hdrs(phba); 4392 out_free_active_sgl: 4393 lpfc_free_active_sgl(phba); 4394 out_free_sgl_list: 4395 lpfc_free_sgl_list(phba); 4396 out_destroy_cq_event_pool: 4397 lpfc_sli4_cq_event_pool_destroy(phba); 4398 out_destroy_queue: 4399 lpfc_sli4_queue_destroy(phba); 4400 out_free_bsmbx: 4401 lpfc_destroy_bootstrap_mbox(phba); 4402 out_free_mem: 4403 lpfc_mem_free(phba); 4404 return rc; 4405 } 4406 4407 /** 4408 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 4409 * @phba: pointer to lpfc hba data structure. 4410 * 4411 * This routine is invoked to unset the driver internal resources set up 4412 * specific for supporting the SLI-4 HBA device it attached to. 4413 **/ 4414 static void 4415 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 4416 { 4417 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 4418 4419 /* Free memory allocated for msi-x interrupt vector entries */ 4420 kfree(phba->sli4_hba.msix_entries); 4421 4422 /* Free memory allocated for fast-path work queue handles */ 4423 kfree(phba->sli4_hba.fcp_eq_hdl); 4424 4425 /* Free the allocated rpi headers. */ 4426 lpfc_sli4_remove_rpi_hdrs(phba); 4427 lpfc_sli4_remove_rpis(phba); 4428 4429 /* Free eligible FCF index bmask */ 4430 kfree(phba->fcf.fcf_rr_bmask); 4431 4432 /* Free the ELS sgl list */ 4433 lpfc_free_active_sgl(phba); 4434 lpfc_free_sgl_list(phba); 4435 4436 /* Free the SCSI sgl management array */ 4437 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 4438 4439 /* Free the SLI4 queues */ 4440 lpfc_sli4_queue_destroy(phba); 4441 4442 /* Free the completion queue EQ event pool */ 4443 lpfc_sli4_cq_event_release_all(phba); 4444 lpfc_sli4_cq_event_pool_destroy(phba); 4445 4446 /* Free the bsmbx region. */ 4447 lpfc_destroy_bootstrap_mbox(phba); 4448 4449 /* Free the SLI Layer memory with SLI4 HBAs */ 4450 lpfc_mem_free_all(phba); 4451 4452 /* Free the current connect table */ 4453 list_for_each_entry_safe(conn_entry, next_conn_entry, 4454 &phba->fcf_conn_rec_list, list) { 4455 list_del_init(&conn_entry->list); 4456 kfree(conn_entry); 4457 } 4458 4459 return; 4460 } 4461 4462 /** 4463 * lpfc_init_api_table_setup - Set up init api fucntion jump table 4464 * @phba: The hba struct for which this call is being executed. 4465 * @dev_grp: The HBA PCI-Device group number. 4466 * 4467 * This routine sets up the device INIT interface API function jump table 4468 * in @phba struct. 4469 * 4470 * Returns: 0 - success, -ENODEV - failure. 4471 **/ 4472 int 4473 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 4474 { 4475 phba->lpfc_hba_init_link = lpfc_hba_init_link; 4476 phba->lpfc_hba_down_link = lpfc_hba_down_link; 4477 switch (dev_grp) { 4478 case LPFC_PCI_DEV_LP: 4479 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 4480 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 4481 phba->lpfc_stop_port = lpfc_stop_port_s3; 4482 break; 4483 case LPFC_PCI_DEV_OC: 4484 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 4485 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 4486 phba->lpfc_stop_port = lpfc_stop_port_s4; 4487 break; 4488 default: 4489 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4490 "1431 Invalid HBA PCI-device group: 0x%x\n", 4491 dev_grp); 4492 return -ENODEV; 4493 break; 4494 } 4495 return 0; 4496 } 4497 4498 /** 4499 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 4500 * @phba: pointer to lpfc hba data structure. 4501 * 4502 * This routine is invoked to set up the driver internal resources before the 4503 * device specific resource setup to support the HBA device it attached to. 4504 * 4505 * Return codes 4506 * 0 - successful 4507 * other values - error 4508 **/ 4509 static int 4510 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 4511 { 4512 /* 4513 * Driver resources common to all SLI revisions 4514 */ 4515 atomic_set(&phba->fast_event_count, 0); 4516 spin_lock_init(&phba->hbalock); 4517 4518 /* Initialize ndlp management spinlock */ 4519 spin_lock_init(&phba->ndlp_lock); 4520 4521 INIT_LIST_HEAD(&phba->port_list); 4522 INIT_LIST_HEAD(&phba->work_list); 4523 init_waitqueue_head(&phba->wait_4_mlo_m_q); 4524 4525 /* Initialize the wait queue head for the kernel thread */ 4526 init_waitqueue_head(&phba->work_waitq); 4527 4528 /* Initialize the scsi buffer list used by driver for scsi IO */ 4529 spin_lock_init(&phba->scsi_buf_list_lock); 4530 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 4531 4532 /* Initialize the fabric iocb list */ 4533 INIT_LIST_HEAD(&phba->fabric_iocb_list); 4534 4535 /* Initialize list to save ELS buffers */ 4536 INIT_LIST_HEAD(&phba->elsbuf); 4537 4538 /* Initialize FCF connection rec list */ 4539 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 4540 4541 return 0; 4542 } 4543 4544 /** 4545 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 4546 * @phba: pointer to lpfc hba data structure. 4547 * 4548 * This routine is invoked to set up the driver internal resources after the 4549 * device specific resource setup to support the HBA device it attached to. 4550 * 4551 * Return codes 4552 * 0 - successful 4553 * other values - error 4554 **/ 4555 static int 4556 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 4557 { 4558 int error; 4559 4560 /* Startup the kernel thread for this host adapter. */ 4561 phba->worker_thread = kthread_run(lpfc_do_work, phba, 4562 "lpfc_worker_%d", phba->brd_no); 4563 if (IS_ERR(phba->worker_thread)) { 4564 error = PTR_ERR(phba->worker_thread); 4565 return error; 4566 } 4567 4568 return 0; 4569 } 4570 4571 /** 4572 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 4573 * @phba: pointer to lpfc hba data structure. 4574 * 4575 * This routine is invoked to unset the driver internal resources set up after 4576 * the device specific resource setup for supporting the HBA device it 4577 * attached to. 4578 **/ 4579 static void 4580 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 4581 { 4582 /* Stop kernel worker thread */ 4583 kthread_stop(phba->worker_thread); 4584 } 4585 4586 /** 4587 * lpfc_free_iocb_list - Free iocb list. 4588 * @phba: pointer to lpfc hba data structure. 4589 * 4590 * This routine is invoked to free the driver's IOCB list and memory. 4591 **/ 4592 static void 4593 lpfc_free_iocb_list(struct lpfc_hba *phba) 4594 { 4595 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 4596 4597 spin_lock_irq(&phba->hbalock); 4598 list_for_each_entry_safe(iocbq_entry, iocbq_next, 4599 &phba->lpfc_iocb_list, list) { 4600 list_del(&iocbq_entry->list); 4601 kfree(iocbq_entry); 4602 phba->total_iocbq_bufs--; 4603 } 4604 spin_unlock_irq(&phba->hbalock); 4605 4606 return; 4607 } 4608 4609 /** 4610 * lpfc_init_iocb_list - Allocate and initialize iocb list. 4611 * @phba: pointer to lpfc hba data structure. 4612 * 4613 * This routine is invoked to allocate and initizlize the driver's IOCB 4614 * list and set up the IOCB tag array accordingly. 4615 * 4616 * Return codes 4617 * 0 - successful 4618 * other values - error 4619 **/ 4620 static int 4621 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 4622 { 4623 struct lpfc_iocbq *iocbq_entry = NULL; 4624 uint16_t iotag; 4625 int i; 4626 4627 /* Initialize and populate the iocb list per host. */ 4628 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 4629 for (i = 0; i < iocb_count; i++) { 4630 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 4631 if (iocbq_entry == NULL) { 4632 printk(KERN_ERR "%s: only allocated %d iocbs of " 4633 "expected %d count. Unloading driver.\n", 4634 __func__, i, LPFC_IOCB_LIST_CNT); 4635 goto out_free_iocbq; 4636 } 4637 4638 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 4639 if (iotag == 0) { 4640 kfree(iocbq_entry); 4641 printk(KERN_ERR "%s: failed to allocate IOTAG. " 4642 "Unloading driver.\n", __func__); 4643 goto out_free_iocbq; 4644 } 4645 iocbq_entry->sli4_xritag = NO_XRI; 4646 4647 spin_lock_irq(&phba->hbalock); 4648 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 4649 phba->total_iocbq_bufs++; 4650 spin_unlock_irq(&phba->hbalock); 4651 } 4652 4653 return 0; 4654 4655 out_free_iocbq: 4656 lpfc_free_iocb_list(phba); 4657 4658 return -ENOMEM; 4659 } 4660 4661 /** 4662 * lpfc_free_sgl_list - Free sgl list. 4663 * @phba: pointer to lpfc hba data structure. 4664 * 4665 * This routine is invoked to free the driver's sgl list and memory. 4666 **/ 4667 static void 4668 lpfc_free_sgl_list(struct lpfc_hba *phba) 4669 { 4670 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 4671 LIST_HEAD(sglq_list); 4672 4673 spin_lock_irq(&phba->hbalock); 4674 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); 4675 spin_unlock_irq(&phba->hbalock); 4676 4677 list_for_each_entry_safe(sglq_entry, sglq_next, 4678 &sglq_list, list) { 4679 list_del(&sglq_entry->list); 4680 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 4681 kfree(sglq_entry); 4682 phba->sli4_hba.total_sglq_bufs--; 4683 } 4684 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4685 } 4686 4687 /** 4688 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 4689 * @phba: pointer to lpfc hba data structure. 4690 * 4691 * This routine is invoked to allocate the driver's active sgl memory. 4692 * This array will hold the sglq_entry's for active IOs. 4693 **/ 4694 static int 4695 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 4696 { 4697 int size; 4698 size = sizeof(struct lpfc_sglq *); 4699 size *= phba->sli4_hba.max_cfg_param.max_xri; 4700 4701 phba->sli4_hba.lpfc_sglq_active_list = 4702 kzalloc(size, GFP_KERNEL); 4703 if (!phba->sli4_hba.lpfc_sglq_active_list) 4704 return -ENOMEM; 4705 return 0; 4706 } 4707 4708 /** 4709 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 4710 * @phba: pointer to lpfc hba data structure. 4711 * 4712 * This routine is invoked to walk through the array of active sglq entries 4713 * and free all of the resources. 4714 * This is just a place holder for now. 4715 **/ 4716 static void 4717 lpfc_free_active_sgl(struct lpfc_hba *phba) 4718 { 4719 kfree(phba->sli4_hba.lpfc_sglq_active_list); 4720 } 4721 4722 /** 4723 * lpfc_init_sgl_list - Allocate and initialize sgl list. 4724 * @phba: pointer to lpfc hba data structure. 4725 * 4726 * This routine is invoked to allocate and initizlize the driver's sgl 4727 * list and set up the sgl xritag tag array accordingly. 4728 * 4729 * Return codes 4730 * 0 - successful 4731 * other values - error 4732 **/ 4733 static int 4734 lpfc_init_sgl_list(struct lpfc_hba *phba) 4735 { 4736 struct lpfc_sglq *sglq_entry = NULL; 4737 int i; 4738 int els_xri_cnt; 4739 4740 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4741 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4742 "2400 lpfc_init_sgl_list els %d.\n", 4743 els_xri_cnt); 4744 /* Initialize and populate the sglq list per host/VF. */ 4745 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 4746 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 4747 4748 /* Sanity check on XRI management */ 4749 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) { 4750 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4751 "2562 No room left for SCSI XRI allocation: " 4752 "max_xri=%d, els_xri=%d\n", 4753 phba->sli4_hba.max_cfg_param.max_xri, 4754 els_xri_cnt); 4755 return -ENOMEM; 4756 } 4757 4758 /* Allocate memory for the ELS XRI management array */ 4759 phba->sli4_hba.lpfc_els_sgl_array = 4760 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt), 4761 GFP_KERNEL); 4762 4763 if (!phba->sli4_hba.lpfc_els_sgl_array) { 4764 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4765 "2401 Failed to allocate memory for ELS " 4766 "XRI management array of size %d.\n", 4767 els_xri_cnt); 4768 return -ENOMEM; 4769 } 4770 4771 /* Keep the SCSI XRI into the XRI management array */ 4772 phba->sli4_hba.scsi_xri_max = 4773 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4774 phba->sli4_hba.scsi_xri_cnt = 0; 4775 4776 phba->sli4_hba.lpfc_scsi_psb_array = 4777 kzalloc((sizeof(struct lpfc_scsi_buf *) * 4778 phba->sli4_hba.scsi_xri_max), GFP_KERNEL); 4779 4780 if (!phba->sli4_hba.lpfc_scsi_psb_array) { 4781 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4782 "2563 Failed to allocate memory for SCSI " 4783 "XRI management array of size %d.\n", 4784 phba->sli4_hba.scsi_xri_max); 4785 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4786 return -ENOMEM; 4787 } 4788 4789 for (i = 0; i < els_xri_cnt; i++) { 4790 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL); 4791 if (sglq_entry == NULL) { 4792 printk(KERN_ERR "%s: only allocated %d sgls of " 4793 "expected %d count. Unloading driver.\n", 4794 __func__, i, els_xri_cnt); 4795 goto out_free_mem; 4796 } 4797 4798 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba); 4799 if (sglq_entry->sli4_xritag == NO_XRI) { 4800 kfree(sglq_entry); 4801 printk(KERN_ERR "%s: failed to allocate XRI.\n" 4802 "Unloading driver.\n", __func__); 4803 goto out_free_mem; 4804 } 4805 sglq_entry->buff_type = GEN_BUFF_TYPE; 4806 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); 4807 if (sglq_entry->virt == NULL) { 4808 kfree(sglq_entry); 4809 printk(KERN_ERR "%s: failed to allocate mbuf.\n" 4810 "Unloading driver.\n", __func__); 4811 goto out_free_mem; 4812 } 4813 sglq_entry->sgl = sglq_entry->virt; 4814 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 4815 4816 /* The list order is used by later block SGL registraton */ 4817 spin_lock_irq(&phba->hbalock); 4818 sglq_entry->state = SGL_FREED; 4819 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); 4820 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; 4821 phba->sli4_hba.total_sglq_bufs++; 4822 spin_unlock_irq(&phba->hbalock); 4823 } 4824 return 0; 4825 4826 out_free_mem: 4827 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 4828 lpfc_free_sgl_list(phba); 4829 return -ENOMEM; 4830 } 4831 4832 /** 4833 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 4834 * @phba: pointer to lpfc hba data structure. 4835 * 4836 * This routine is invoked to post rpi header templates to the 4837 * HBA consistent with the SLI-4 interface spec. This routine 4838 * posts a PAGE_SIZE memory region to the port to hold up to 4839 * PAGE_SIZE modulo 64 rpi context headers. 4840 * No locks are held here because this is an initialization routine 4841 * called only from probe or lpfc_online when interrupts are not 4842 * enabled and the driver is reinitializing the device. 4843 * 4844 * Return codes 4845 * 0 - successful 4846 * -ENOMEM - No availble memory 4847 * -EIO - The mailbox failed to complete successfully. 4848 **/ 4849 int 4850 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 4851 { 4852 int rc = 0; 4853 int longs; 4854 uint16_t rpi_count; 4855 struct lpfc_rpi_hdr *rpi_hdr; 4856 4857 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 4858 4859 /* 4860 * Provision an rpi bitmask range for discovery. The total count 4861 * is the difference between max and base + 1. 4862 */ 4863 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base + 4864 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4865 4866 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG; 4867 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long), 4868 GFP_KERNEL); 4869 if (!phba->sli4_hba.rpi_bmask) 4870 return -ENOMEM; 4871 4872 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 4873 if (!rpi_hdr) { 4874 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4875 "0391 Error during rpi post operation\n"); 4876 lpfc_sli4_remove_rpis(phba); 4877 rc = -ENODEV; 4878 } 4879 4880 return rc; 4881 } 4882 4883 /** 4884 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 4885 * @phba: pointer to lpfc hba data structure. 4886 * 4887 * This routine is invoked to allocate a single 4KB memory region to 4888 * support rpis and stores them in the phba. This single region 4889 * provides support for up to 64 rpis. The region is used globally 4890 * by the device. 4891 * 4892 * Returns: 4893 * A valid rpi hdr on success. 4894 * A NULL pointer on any failure. 4895 **/ 4896 struct lpfc_rpi_hdr * 4897 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 4898 { 4899 uint16_t rpi_limit, curr_rpi_range; 4900 struct lpfc_dmabuf *dmabuf; 4901 struct lpfc_rpi_hdr *rpi_hdr; 4902 4903 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 4904 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4905 4906 spin_lock_irq(&phba->hbalock); 4907 curr_rpi_range = phba->sli4_hba.next_rpi; 4908 spin_unlock_irq(&phba->hbalock); 4909 4910 /* 4911 * The port has a limited number of rpis. The increment here 4912 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value 4913 * and to allow the full max_rpi range per port. 4914 */ 4915 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) 4916 return NULL; 4917 4918 /* 4919 * First allocate the protocol header region for the port. The 4920 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 4921 */ 4922 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4923 if (!dmabuf) 4924 return NULL; 4925 4926 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 4927 LPFC_HDR_TEMPLATE_SIZE, 4928 &dmabuf->phys, 4929 GFP_KERNEL); 4930 if (!dmabuf->virt) { 4931 rpi_hdr = NULL; 4932 goto err_free_dmabuf; 4933 } 4934 4935 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE); 4936 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 4937 rpi_hdr = NULL; 4938 goto err_free_coherent; 4939 } 4940 4941 /* Save the rpi header data for cleanup later. */ 4942 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 4943 if (!rpi_hdr) 4944 goto err_free_coherent; 4945 4946 rpi_hdr->dmabuf = dmabuf; 4947 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 4948 rpi_hdr->page_count = 1; 4949 spin_lock_irq(&phba->hbalock); 4950 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi; 4951 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 4952 4953 /* 4954 * The next_rpi stores the next module-64 rpi value to post 4955 * in any subsequent rpi memory region postings. 4956 */ 4957 phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT; 4958 spin_unlock_irq(&phba->hbalock); 4959 return rpi_hdr; 4960 4961 err_free_coherent: 4962 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 4963 dmabuf->virt, dmabuf->phys); 4964 err_free_dmabuf: 4965 kfree(dmabuf); 4966 return NULL; 4967 } 4968 4969 /** 4970 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 4971 * @phba: pointer to lpfc hba data structure. 4972 * 4973 * This routine is invoked to remove all memory resources allocated 4974 * to support rpis. This routine presumes the caller has released all 4975 * rpis consumed by fabric or port logins and is prepared to have 4976 * the header pages removed. 4977 **/ 4978 void 4979 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 4980 { 4981 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 4982 4983 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 4984 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 4985 list_del(&rpi_hdr->list); 4986 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 4987 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 4988 kfree(rpi_hdr->dmabuf); 4989 kfree(rpi_hdr); 4990 } 4991 4992 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 4993 memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask)); 4994 } 4995 4996 /** 4997 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 4998 * @pdev: pointer to pci device data structure. 4999 * 5000 * This routine is invoked to allocate the driver hba data structure for an 5001 * HBA device. If the allocation is successful, the phba reference to the 5002 * PCI device data structure is set. 5003 * 5004 * Return codes 5005 * pointer to @phba - successful 5006 * NULL - error 5007 **/ 5008 static struct lpfc_hba * 5009 lpfc_hba_alloc(struct pci_dev *pdev) 5010 { 5011 struct lpfc_hba *phba; 5012 5013 /* Allocate memory for HBA structure */ 5014 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 5015 if (!phba) { 5016 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 5017 return NULL; 5018 } 5019 5020 /* Set reference to PCI device in HBA structure */ 5021 phba->pcidev = pdev; 5022 5023 /* Assign an unused board number */ 5024 phba->brd_no = lpfc_get_instance(); 5025 if (phba->brd_no < 0) { 5026 kfree(phba); 5027 return NULL; 5028 } 5029 5030 spin_lock_init(&phba->ct_ev_lock); 5031 INIT_LIST_HEAD(&phba->ct_ev_waiters); 5032 5033 return phba; 5034 } 5035 5036 /** 5037 * lpfc_hba_free - Free driver hba data structure with a device. 5038 * @phba: pointer to lpfc hba data structure. 5039 * 5040 * This routine is invoked to free the driver hba data structure with an 5041 * HBA device. 5042 **/ 5043 static void 5044 lpfc_hba_free(struct lpfc_hba *phba) 5045 { 5046 /* Release the driver assigned board number */ 5047 idr_remove(&lpfc_hba_index, phba->brd_no); 5048 5049 kfree(phba); 5050 return; 5051 } 5052 5053 /** 5054 * lpfc_create_shost - Create hba physical port with associated scsi host. 5055 * @phba: pointer to lpfc hba data structure. 5056 * 5057 * This routine is invoked to create HBA physical port and associate a SCSI 5058 * host with it. 5059 * 5060 * Return codes 5061 * 0 - successful 5062 * other values - error 5063 **/ 5064 static int 5065 lpfc_create_shost(struct lpfc_hba *phba) 5066 { 5067 struct lpfc_vport *vport; 5068 struct Scsi_Host *shost; 5069 5070 /* Initialize HBA FC structure */ 5071 phba->fc_edtov = FF_DEF_EDTOV; 5072 phba->fc_ratov = FF_DEF_RATOV; 5073 phba->fc_altov = FF_DEF_ALTOV; 5074 phba->fc_arbtov = FF_DEF_ARBTOV; 5075 5076 atomic_set(&phba->sdev_cnt, 0); 5077 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 5078 if (!vport) 5079 return -ENODEV; 5080 5081 shost = lpfc_shost_from_vport(vport); 5082 phba->pport = vport; 5083 lpfc_debugfs_initialize(vport); 5084 /* Put reference to SCSI host to driver's device private data */ 5085 pci_set_drvdata(phba->pcidev, shost); 5086 5087 return 0; 5088 } 5089 5090 /** 5091 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 5092 * @phba: pointer to lpfc hba data structure. 5093 * 5094 * This routine is invoked to destroy HBA physical port and the associated 5095 * SCSI host. 5096 **/ 5097 static void 5098 lpfc_destroy_shost(struct lpfc_hba *phba) 5099 { 5100 struct lpfc_vport *vport = phba->pport; 5101 5102 /* Destroy physical port that associated with the SCSI host */ 5103 destroy_port(vport); 5104 5105 return; 5106 } 5107 5108 /** 5109 * lpfc_setup_bg - Setup Block guard structures and debug areas. 5110 * @phba: pointer to lpfc hba data structure. 5111 * @shost: the shost to be used to detect Block guard settings. 5112 * 5113 * This routine sets up the local Block guard protocol settings for @shost. 5114 * This routine also allocates memory for debugging bg buffers. 5115 **/ 5116 static void 5117 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 5118 { 5119 int pagecnt = 10; 5120 if (lpfc_prot_mask && lpfc_prot_guard) { 5121 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5122 "1478 Registering BlockGuard with the " 5123 "SCSI layer\n"); 5124 scsi_host_set_prot(shost, lpfc_prot_mask); 5125 scsi_host_set_guard(shost, lpfc_prot_guard); 5126 } 5127 if (!_dump_buf_data) { 5128 while (pagecnt) { 5129 spin_lock_init(&_dump_buf_lock); 5130 _dump_buf_data = 5131 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 5132 if (_dump_buf_data) { 5133 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5134 "9043 BLKGRD: allocated %d pages for " 5135 "_dump_buf_data at 0x%p\n", 5136 (1 << pagecnt), _dump_buf_data); 5137 _dump_buf_data_order = pagecnt; 5138 memset(_dump_buf_data, 0, 5139 ((1 << PAGE_SHIFT) << pagecnt)); 5140 break; 5141 } else 5142 --pagecnt; 5143 } 5144 if (!_dump_buf_data_order) 5145 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5146 "9044 BLKGRD: ERROR unable to allocate " 5147 "memory for hexdump\n"); 5148 } else 5149 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5150 "9045 BLKGRD: already allocated _dump_buf_data=0x%p" 5151 "\n", _dump_buf_data); 5152 if (!_dump_buf_dif) { 5153 while (pagecnt) { 5154 _dump_buf_dif = 5155 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 5156 if (_dump_buf_dif) { 5157 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5158 "9046 BLKGRD: allocated %d pages for " 5159 "_dump_buf_dif at 0x%p\n", 5160 (1 << pagecnt), _dump_buf_dif); 5161 _dump_buf_dif_order = pagecnt; 5162 memset(_dump_buf_dif, 0, 5163 ((1 << PAGE_SHIFT) << pagecnt)); 5164 break; 5165 } else 5166 --pagecnt; 5167 } 5168 if (!_dump_buf_dif_order) 5169 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5170 "9047 BLKGRD: ERROR unable to allocate " 5171 "memory for hexdump\n"); 5172 } else 5173 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5174 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", 5175 _dump_buf_dif); 5176 } 5177 5178 /** 5179 * lpfc_post_init_setup - Perform necessary device post initialization setup. 5180 * @phba: pointer to lpfc hba data structure. 5181 * 5182 * This routine is invoked to perform all the necessary post initialization 5183 * setup for the device. 5184 **/ 5185 static void 5186 lpfc_post_init_setup(struct lpfc_hba *phba) 5187 { 5188 struct Scsi_Host *shost; 5189 struct lpfc_adapter_event_header adapter_event; 5190 5191 /* Get the default values for Model Name and Description */ 5192 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 5193 5194 /* 5195 * hba setup may have changed the hba_queue_depth so we need to 5196 * adjust the value of can_queue. 5197 */ 5198 shost = pci_get_drvdata(phba->pcidev); 5199 shost->can_queue = phba->cfg_hba_queue_depth - 10; 5200 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 5201 lpfc_setup_bg(phba, shost); 5202 5203 lpfc_host_attrib_init(shost); 5204 5205 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 5206 spin_lock_irq(shost->host_lock); 5207 lpfc_poll_start_timer(phba); 5208 spin_unlock_irq(shost->host_lock); 5209 } 5210 5211 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5212 "0428 Perform SCSI scan\n"); 5213 /* Send board arrival event to upper layer */ 5214 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 5215 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 5216 fc_host_post_vendor_event(shost, fc_get_event_number(), 5217 sizeof(adapter_event), 5218 (char *) &adapter_event, 5219 LPFC_NL_VENDOR_ID); 5220 return; 5221 } 5222 5223 /** 5224 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 5225 * @phba: pointer to lpfc hba data structure. 5226 * 5227 * This routine is invoked to set up the PCI device memory space for device 5228 * with SLI-3 interface spec. 5229 * 5230 * Return codes 5231 * 0 - successful 5232 * other values - error 5233 **/ 5234 static int 5235 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 5236 { 5237 struct pci_dev *pdev; 5238 unsigned long bar0map_len, bar2map_len; 5239 int i, hbq_count; 5240 void *ptr; 5241 int error = -ENODEV; 5242 5243 /* Obtain PCI device reference */ 5244 if (!phba->pcidev) 5245 return error; 5246 else 5247 pdev = phba->pcidev; 5248 5249 /* Set the device DMA mask size */ 5250 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 5251 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 5252 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 5253 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 5254 return error; 5255 } 5256 } 5257 5258 /* Get the bus address of Bar0 and Bar2 and the number of bytes 5259 * required by each mapping. 5260 */ 5261 phba->pci_bar0_map = pci_resource_start(pdev, 0); 5262 bar0map_len = pci_resource_len(pdev, 0); 5263 5264 phba->pci_bar2_map = pci_resource_start(pdev, 2); 5265 bar2map_len = pci_resource_len(pdev, 2); 5266 5267 /* Map HBA SLIM to a kernel virtual address. */ 5268 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 5269 if (!phba->slim_memmap_p) { 5270 dev_printk(KERN_ERR, &pdev->dev, 5271 "ioremap failed for SLIM memory.\n"); 5272 goto out; 5273 } 5274 5275 /* Map HBA Control Registers to a kernel virtual address. */ 5276 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 5277 if (!phba->ctrl_regs_memmap_p) { 5278 dev_printk(KERN_ERR, &pdev->dev, 5279 "ioremap failed for HBA control registers.\n"); 5280 goto out_iounmap_slim; 5281 } 5282 5283 /* Allocate memory for SLI-2 structures */ 5284 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, 5285 SLI2_SLIM_SIZE, 5286 &phba->slim2p.phys, 5287 GFP_KERNEL); 5288 if (!phba->slim2p.virt) 5289 goto out_iounmap; 5290 5291 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 5292 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 5293 phba->mbox_ext = (phba->slim2p.virt + 5294 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 5295 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 5296 phba->IOCBs = (phba->slim2p.virt + 5297 offsetof(struct lpfc_sli2_slim, IOCBs)); 5298 5299 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 5300 lpfc_sli_hbq_size(), 5301 &phba->hbqslimp.phys, 5302 GFP_KERNEL); 5303 if (!phba->hbqslimp.virt) 5304 goto out_free_slim; 5305 5306 hbq_count = lpfc_sli_hbq_count(); 5307 ptr = phba->hbqslimp.virt; 5308 for (i = 0; i < hbq_count; ++i) { 5309 phba->hbqs[i].hbq_virt = ptr; 5310 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 5311 ptr += (lpfc_hbq_defs[i]->entry_count * 5312 sizeof(struct lpfc_hbq_entry)); 5313 } 5314 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 5315 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 5316 5317 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 5318 5319 INIT_LIST_HEAD(&phba->rb_pend_list); 5320 5321 phba->MBslimaddr = phba->slim_memmap_p; 5322 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 5323 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 5324 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 5325 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 5326 5327 return 0; 5328 5329 out_free_slim: 5330 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 5331 phba->slim2p.virt, phba->slim2p.phys); 5332 out_iounmap: 5333 iounmap(phba->ctrl_regs_memmap_p); 5334 out_iounmap_slim: 5335 iounmap(phba->slim_memmap_p); 5336 out: 5337 return error; 5338 } 5339 5340 /** 5341 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 5342 * @phba: pointer to lpfc hba data structure. 5343 * 5344 * This routine is invoked to unset the PCI device memory space for device 5345 * with SLI-3 interface spec. 5346 **/ 5347 static void 5348 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 5349 { 5350 struct pci_dev *pdev; 5351 5352 /* Obtain PCI device reference */ 5353 if (!phba->pcidev) 5354 return; 5355 else 5356 pdev = phba->pcidev; 5357 5358 /* Free coherent DMA memory allocated */ 5359 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 5360 phba->hbqslimp.virt, phba->hbqslimp.phys); 5361 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 5362 phba->slim2p.virt, phba->slim2p.phys); 5363 5364 /* I/O memory unmap */ 5365 iounmap(phba->ctrl_regs_memmap_p); 5366 iounmap(phba->slim_memmap_p); 5367 5368 return; 5369 } 5370 5371 /** 5372 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 5373 * @phba: pointer to lpfc hba data structure. 5374 * 5375 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 5376 * done and check status. 5377 * 5378 * Return 0 if successful, otherwise -ENODEV. 5379 **/ 5380 int 5381 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 5382 { 5383 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 5384 struct lpfc_register reg_data; 5385 int i, port_error = 0; 5386 uint32_t if_type; 5387 5388 if (!phba->sli4_hba.PSMPHRregaddr) 5389 return -ENODEV; 5390 5391 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 5392 for (i = 0; i < 3000; i++) { 5393 portsmphr_reg.word0 = readl(phba->sli4_hba.PSMPHRregaddr); 5394 if (bf_get(lpfc_port_smphr_perr, &portsmphr_reg)) { 5395 /* Port has a fatal POST error, break out */ 5396 port_error = -ENODEV; 5397 break; 5398 } 5399 if (LPFC_POST_STAGE_PORT_READY == 5400 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 5401 break; 5402 msleep(10); 5403 } 5404 5405 /* 5406 * If there was a port error during POST, then don't proceed with 5407 * other register reads as the data may not be valid. Just exit. 5408 */ 5409 if (port_error) { 5410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5411 "1408 Port Failed POST - portsmphr=0x%x, " 5412 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 5413 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 5414 portsmphr_reg.word0, 5415 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 5416 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 5417 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 5418 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 5419 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 5420 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 5421 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 5422 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 5423 } else { 5424 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5425 "2534 Device Info: SLIFamily=0x%x, " 5426 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 5427 "SLIHint_2=0x%x, FT=0x%x\n", 5428 bf_get(lpfc_sli_intf_sli_family, 5429 &phba->sli4_hba.sli_intf), 5430 bf_get(lpfc_sli_intf_slirev, 5431 &phba->sli4_hba.sli_intf), 5432 bf_get(lpfc_sli_intf_if_type, 5433 &phba->sli4_hba.sli_intf), 5434 bf_get(lpfc_sli_intf_sli_hint1, 5435 &phba->sli4_hba.sli_intf), 5436 bf_get(lpfc_sli_intf_sli_hint2, 5437 &phba->sli4_hba.sli_intf), 5438 bf_get(lpfc_sli_intf_func_type, 5439 &phba->sli4_hba.sli_intf)); 5440 /* 5441 * Check for other Port errors during the initialization 5442 * process. Fail the load if the port did not come up 5443 * correctly. 5444 */ 5445 if_type = bf_get(lpfc_sli_intf_if_type, 5446 &phba->sli4_hba.sli_intf); 5447 switch (if_type) { 5448 case LPFC_SLI_INTF_IF_TYPE_0: 5449 phba->sli4_hba.ue_mask_lo = 5450 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 5451 phba->sli4_hba.ue_mask_hi = 5452 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 5453 uerrlo_reg.word0 = 5454 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 5455 uerrhi_reg.word0 = 5456 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 5457 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 5458 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 5459 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5460 "1422 Unrecoverable Error " 5461 "Detected during POST " 5462 "uerr_lo_reg=0x%x, " 5463 "uerr_hi_reg=0x%x, " 5464 "ue_mask_lo_reg=0x%x, " 5465 "ue_mask_hi_reg=0x%x\n", 5466 uerrlo_reg.word0, 5467 uerrhi_reg.word0, 5468 phba->sli4_hba.ue_mask_lo, 5469 phba->sli4_hba.ue_mask_hi); 5470 port_error = -ENODEV; 5471 } 5472 break; 5473 case LPFC_SLI_INTF_IF_TYPE_2: 5474 /* Final checks. The port status should be clean. */ 5475 reg_data.word0 = 5476 readl(phba->sli4_hba.u.if_type2.STATUSregaddr); 5477 if (bf_get(lpfc_sliport_status_err, ®_data)) { 5478 phba->work_status[0] = 5479 readl(phba->sli4_hba.u.if_type2. 5480 ERR1regaddr); 5481 phba->work_status[1] = 5482 readl(phba->sli4_hba.u.if_type2. 5483 ERR2regaddr); 5484 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5485 "2888 Port Error Detected " 5486 "during POST: " 5487 "port status reg 0x%x, " 5488 "port_smphr reg 0x%x, " 5489 "error 1=0x%x, error 2=0x%x\n", 5490 reg_data.word0, 5491 portsmphr_reg.word0, 5492 phba->work_status[0], 5493 phba->work_status[1]); 5494 port_error = -ENODEV; 5495 } 5496 break; 5497 case LPFC_SLI_INTF_IF_TYPE_1: 5498 default: 5499 break; 5500 } 5501 } 5502 return port_error; 5503 } 5504 5505 /** 5506 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 5507 * @phba: pointer to lpfc hba data structure. 5508 * @if_type: The SLI4 interface type getting configured. 5509 * 5510 * This routine is invoked to set up SLI4 BAR0 PCI config space register 5511 * memory map. 5512 **/ 5513 static void 5514 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 5515 { 5516 switch (if_type) { 5517 case LPFC_SLI_INTF_IF_TYPE_0: 5518 phba->sli4_hba.u.if_type0.UERRLOregaddr = 5519 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 5520 phba->sli4_hba.u.if_type0.UERRHIregaddr = 5521 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 5522 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 5523 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 5524 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 5525 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 5526 phba->sli4_hba.SLIINTFregaddr = 5527 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 5528 break; 5529 case LPFC_SLI_INTF_IF_TYPE_2: 5530 phba->sli4_hba.u.if_type2.ERR1regaddr = 5531 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_1; 5532 phba->sli4_hba.u.if_type2.ERR2regaddr = 5533 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_2; 5534 phba->sli4_hba.u.if_type2.CTRLregaddr = 5535 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_CNTRL; 5536 phba->sli4_hba.u.if_type2.STATUSregaddr = 5537 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_STATUS; 5538 phba->sli4_hba.SLIINTFregaddr = 5539 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 5540 phba->sli4_hba.PSMPHRregaddr = 5541 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_IF2_SMPHR; 5542 phba->sli4_hba.RQDBregaddr = 5543 phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL; 5544 phba->sli4_hba.WQDBregaddr = 5545 phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL; 5546 phba->sli4_hba.EQCQDBregaddr = 5547 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 5548 phba->sli4_hba.MQDBregaddr = 5549 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 5550 phba->sli4_hba.BMBXregaddr = 5551 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 5552 break; 5553 case LPFC_SLI_INTF_IF_TYPE_1: 5554 default: 5555 dev_printk(KERN_ERR, &phba->pcidev->dev, 5556 "FATAL - unsupported SLI4 interface type - %d\n", 5557 if_type); 5558 break; 5559 } 5560 } 5561 5562 /** 5563 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 5564 * @phba: pointer to lpfc hba data structure. 5565 * 5566 * This routine is invoked to set up SLI4 BAR1 control status register (CSR) 5567 * memory map. 5568 **/ 5569 static void 5570 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) 5571 { 5572 phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5573 LPFC_SLIPORT_IF0_SMPHR; 5574 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5575 LPFC_HST_ISR0; 5576 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5577 LPFC_HST_IMR0; 5578 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5579 LPFC_HST_ISCR0; 5580 } 5581 5582 /** 5583 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 5584 * @phba: pointer to lpfc hba data structure. 5585 * @vf: virtual function number 5586 * 5587 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 5588 * based on the given viftual function number, @vf. 5589 * 5590 * Return 0 if successful, otherwise -ENODEV. 5591 **/ 5592 static int 5593 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 5594 { 5595 if (vf > LPFC_VIR_FUNC_MAX) 5596 return -ENODEV; 5597 5598 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5599 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL); 5600 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5601 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL); 5602 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5603 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); 5604 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5605 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 5606 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5607 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 5608 return 0; 5609 } 5610 5611 /** 5612 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 5613 * @phba: pointer to lpfc hba data structure. 5614 * 5615 * This routine is invoked to create the bootstrap mailbox 5616 * region consistent with the SLI-4 interface spec. This 5617 * routine allocates all memory necessary to communicate 5618 * mailbox commands to the port and sets up all alignment 5619 * needs. No locks are expected to be held when calling 5620 * this routine. 5621 * 5622 * Return codes 5623 * 0 - successful 5624 * -ENOMEM - could not allocated memory. 5625 **/ 5626 static int 5627 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 5628 { 5629 uint32_t bmbx_size; 5630 struct lpfc_dmabuf *dmabuf; 5631 struct dma_address *dma_address; 5632 uint32_t pa_addr; 5633 uint64_t phys_addr; 5634 5635 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5636 if (!dmabuf) 5637 return -ENOMEM; 5638 5639 /* 5640 * The bootstrap mailbox region is comprised of 2 parts 5641 * plus an alignment restriction of 16 bytes. 5642 */ 5643 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 5644 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 5645 bmbx_size, 5646 &dmabuf->phys, 5647 GFP_KERNEL); 5648 if (!dmabuf->virt) { 5649 kfree(dmabuf); 5650 return -ENOMEM; 5651 } 5652 memset(dmabuf->virt, 0, bmbx_size); 5653 5654 /* 5655 * Initialize the bootstrap mailbox pointers now so that the register 5656 * operations are simple later. The mailbox dma address is required 5657 * to be 16-byte aligned. Also align the virtual memory as each 5658 * maibox is copied into the bmbx mailbox region before issuing the 5659 * command to the port. 5660 */ 5661 phba->sli4_hba.bmbx.dmabuf = dmabuf; 5662 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 5663 5664 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 5665 LPFC_ALIGN_16_BYTE); 5666 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 5667 LPFC_ALIGN_16_BYTE); 5668 5669 /* 5670 * Set the high and low physical addresses now. The SLI4 alignment 5671 * requirement is 16 bytes and the mailbox is posted to the port 5672 * as two 30-bit addresses. The other data is a bit marking whether 5673 * the 30-bit address is the high or low address. 5674 * Upcast bmbx aphys to 64bits so shift instruction compiles 5675 * clean on 32 bit machines. 5676 */ 5677 dma_address = &phba->sli4_hba.bmbx.dma_address; 5678 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 5679 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 5680 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 5681 LPFC_BMBX_BIT1_ADDR_HI); 5682 5683 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 5684 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 5685 LPFC_BMBX_BIT1_ADDR_LO); 5686 return 0; 5687 } 5688 5689 /** 5690 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 5691 * @phba: pointer to lpfc hba data structure. 5692 * 5693 * This routine is invoked to teardown the bootstrap mailbox 5694 * region and release all host resources. This routine requires 5695 * the caller to ensure all mailbox commands recovered, no 5696 * additional mailbox comands are sent, and interrupts are disabled 5697 * before calling this routine. 5698 * 5699 **/ 5700 static void 5701 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 5702 { 5703 dma_free_coherent(&phba->pcidev->dev, 5704 phba->sli4_hba.bmbx.bmbx_size, 5705 phba->sli4_hba.bmbx.dmabuf->virt, 5706 phba->sli4_hba.bmbx.dmabuf->phys); 5707 5708 kfree(phba->sli4_hba.bmbx.dmabuf); 5709 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 5710 } 5711 5712 /** 5713 * lpfc_sli4_read_config - Get the config parameters. 5714 * @phba: pointer to lpfc hba data structure. 5715 * 5716 * This routine is invoked to read the configuration parameters from the HBA. 5717 * The configuration parameters are used to set the base and maximum values 5718 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 5719 * allocation for the port. 5720 * 5721 * Return codes 5722 * 0 - successful 5723 * -ENOMEM - No availble memory 5724 * -EIO - The mailbox failed to complete successfully. 5725 **/ 5726 static int 5727 lpfc_sli4_read_config(struct lpfc_hba *phba) 5728 { 5729 LPFC_MBOXQ_t *pmb; 5730 struct lpfc_mbx_read_config *rd_config; 5731 uint32_t rc = 0; 5732 5733 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5734 if (!pmb) { 5735 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5736 "2011 Unable to allocate memory for issuing " 5737 "SLI_CONFIG_SPECIAL mailbox command\n"); 5738 return -ENOMEM; 5739 } 5740 5741 lpfc_read_config(phba, pmb); 5742 5743 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 5744 if (rc != MBX_SUCCESS) { 5745 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5746 "2012 Mailbox failed , mbxCmd x%x " 5747 "READ_CONFIG, mbxStatus x%x\n", 5748 bf_get(lpfc_mqe_command, &pmb->u.mqe), 5749 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 5750 rc = -EIO; 5751 } else { 5752 rd_config = &pmb->u.mqe.un.rd_config; 5753 phba->sli4_hba.max_cfg_param.max_xri = 5754 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 5755 phba->sli4_hba.max_cfg_param.xri_base = 5756 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 5757 phba->sli4_hba.max_cfg_param.max_vpi = 5758 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 5759 phba->sli4_hba.max_cfg_param.vpi_base = 5760 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 5761 phba->sli4_hba.max_cfg_param.max_rpi = 5762 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 5763 phba->sli4_hba.max_cfg_param.rpi_base = 5764 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 5765 phba->sli4_hba.max_cfg_param.max_vfi = 5766 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 5767 phba->sli4_hba.max_cfg_param.vfi_base = 5768 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 5769 phba->sli4_hba.max_cfg_param.max_fcfi = 5770 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 5771 phba->sli4_hba.max_cfg_param.fcfi_base = 5772 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config); 5773 phba->sli4_hba.max_cfg_param.max_eq = 5774 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 5775 phba->sli4_hba.max_cfg_param.max_rq = 5776 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 5777 phba->sli4_hba.max_cfg_param.max_wq = 5778 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 5779 phba->sli4_hba.max_cfg_param.max_cq = 5780 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 5781 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 5782 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 5783 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 5784 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 5785 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 5786 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 5787 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 5788 phba->max_vports = phba->max_vpi; 5789 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5790 "2003 cfg params XRI(B:%d M:%d), " 5791 "VPI(B:%d M:%d) " 5792 "VFI(B:%d M:%d) " 5793 "RPI(B:%d M:%d) " 5794 "FCFI(B:%d M:%d)\n", 5795 phba->sli4_hba.max_cfg_param.xri_base, 5796 phba->sli4_hba.max_cfg_param.max_xri, 5797 phba->sli4_hba.max_cfg_param.vpi_base, 5798 phba->sli4_hba.max_cfg_param.max_vpi, 5799 phba->sli4_hba.max_cfg_param.vfi_base, 5800 phba->sli4_hba.max_cfg_param.max_vfi, 5801 phba->sli4_hba.max_cfg_param.rpi_base, 5802 phba->sli4_hba.max_cfg_param.max_rpi, 5803 phba->sli4_hba.max_cfg_param.fcfi_base, 5804 phba->sli4_hba.max_cfg_param.max_fcfi); 5805 } 5806 mempool_free(pmb, phba->mbox_mem_pool); 5807 5808 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 5809 if (phba->cfg_hba_queue_depth > 5810 (phba->sli4_hba.max_cfg_param.max_xri - 5811 lpfc_sli4_get_els_iocb_cnt(phba))) 5812 phba->cfg_hba_queue_depth = 5813 phba->sli4_hba.max_cfg_param.max_xri - 5814 lpfc_sli4_get_els_iocb_cnt(phba); 5815 return rc; 5816 } 5817 5818 /** 5819 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 5820 * @phba: pointer to lpfc hba data structure. 5821 * 5822 * This routine is invoked to setup the port-side endian order when 5823 * the port if_type is 0. This routine has no function for other 5824 * if_types. 5825 * 5826 * Return codes 5827 * 0 - successful 5828 * -ENOMEM - No availble memory 5829 * -EIO - The mailbox failed to complete successfully. 5830 **/ 5831 static int 5832 lpfc_setup_endian_order(struct lpfc_hba *phba) 5833 { 5834 LPFC_MBOXQ_t *mboxq; 5835 uint32_t if_type, rc = 0; 5836 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 5837 HOST_ENDIAN_HIGH_WORD1}; 5838 5839 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 5840 switch (if_type) { 5841 case LPFC_SLI_INTF_IF_TYPE_0: 5842 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 5843 GFP_KERNEL); 5844 if (!mboxq) { 5845 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5846 "0492 Unable to allocate memory for " 5847 "issuing SLI_CONFIG_SPECIAL mailbox " 5848 "command\n"); 5849 return -ENOMEM; 5850 } 5851 5852 /* 5853 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 5854 * two words to contain special data values and no other data. 5855 */ 5856 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 5857 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 5858 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5859 if (rc != MBX_SUCCESS) { 5860 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5861 "0493 SLI_CONFIG_SPECIAL mailbox " 5862 "failed with status x%x\n", 5863 rc); 5864 rc = -EIO; 5865 } 5866 mempool_free(mboxq, phba->mbox_mem_pool); 5867 break; 5868 case LPFC_SLI_INTF_IF_TYPE_2: 5869 case LPFC_SLI_INTF_IF_TYPE_1: 5870 default: 5871 break; 5872 } 5873 return rc; 5874 } 5875 5876 /** 5877 * lpfc_sli4_queue_create - Create all the SLI4 queues 5878 * @phba: pointer to lpfc hba data structure. 5879 * 5880 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 5881 * operation. For each SLI4 queue type, the parameters such as queue entry 5882 * count (queue depth) shall be taken from the module parameter. For now, 5883 * we just use some constant number as place holder. 5884 * 5885 * Return codes 5886 * 0 - successful 5887 * -ENOMEM - No availble memory 5888 * -EIO - The mailbox failed to complete successfully. 5889 **/ 5890 static int 5891 lpfc_sli4_queue_create(struct lpfc_hba *phba) 5892 { 5893 struct lpfc_queue *qdesc; 5894 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 5895 int cfg_fcp_wq_count; 5896 int cfg_fcp_eq_count; 5897 5898 /* 5899 * Sanity check for confiugred queue parameters against the run-time 5900 * device parameters 5901 */ 5902 5903 /* Sanity check on FCP fast-path WQ parameters */ 5904 cfg_fcp_wq_count = phba->cfg_fcp_wq_count; 5905 if (cfg_fcp_wq_count > 5906 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { 5907 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - 5908 LPFC_SP_WQN_DEF; 5909 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { 5910 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5911 "2581 Not enough WQs (%d) from " 5912 "the pci function for supporting " 5913 "FCP WQs (%d)\n", 5914 phba->sli4_hba.max_cfg_param.max_wq, 5915 phba->cfg_fcp_wq_count); 5916 goto out_error; 5917 } 5918 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5919 "2582 Not enough WQs (%d) from the pci " 5920 "function for supporting the requested " 5921 "FCP WQs (%d), the actual FCP WQs can " 5922 "be supported: %d\n", 5923 phba->sli4_hba.max_cfg_param.max_wq, 5924 phba->cfg_fcp_wq_count, cfg_fcp_wq_count); 5925 } 5926 /* The actual number of FCP work queues adopted */ 5927 phba->cfg_fcp_wq_count = cfg_fcp_wq_count; 5928 5929 /* Sanity check on FCP fast-path EQ parameters */ 5930 cfg_fcp_eq_count = phba->cfg_fcp_eq_count; 5931 if (cfg_fcp_eq_count > 5932 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) { 5933 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq - 5934 LPFC_SP_EQN_DEF; 5935 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) { 5936 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5937 "2574 Not enough EQs (%d) from the " 5938 "pci function for supporting FCP " 5939 "EQs (%d)\n", 5940 phba->sli4_hba.max_cfg_param.max_eq, 5941 phba->cfg_fcp_eq_count); 5942 goto out_error; 5943 } 5944 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5945 "2575 Not enough EQs (%d) from the pci " 5946 "function for supporting the requested " 5947 "FCP EQs (%d), the actual FCP EQs can " 5948 "be supported: %d\n", 5949 phba->sli4_hba.max_cfg_param.max_eq, 5950 phba->cfg_fcp_eq_count, cfg_fcp_eq_count); 5951 } 5952 /* It does not make sense to have more EQs than WQs */ 5953 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { 5954 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5955 "2593 The FCP EQ count(%d) cannot be greater " 5956 "than the FCP WQ count(%d), limiting the " 5957 "FCP EQ count to %d\n", cfg_fcp_eq_count, 5958 phba->cfg_fcp_wq_count, 5959 phba->cfg_fcp_wq_count); 5960 cfg_fcp_eq_count = phba->cfg_fcp_wq_count; 5961 } 5962 /* The actual number of FCP event queues adopted */ 5963 phba->cfg_fcp_eq_count = cfg_fcp_eq_count; 5964 /* The overall number of event queues used */ 5965 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; 5966 5967 /* 5968 * Create Event Queues (EQs) 5969 */ 5970 5971 /* Get EQ depth from module parameter, fake the default for now */ 5972 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 5973 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 5974 5975 /* Create slow path event queue */ 5976 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 5977 phba->sli4_hba.eq_ecount); 5978 if (!qdesc) { 5979 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5980 "0496 Failed allocate slow-path EQ\n"); 5981 goto out_error; 5982 } 5983 phba->sli4_hba.sp_eq = qdesc; 5984 5985 /* Create fast-path FCP Event Queue(s) */ 5986 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * 5987 phba->cfg_fcp_eq_count), GFP_KERNEL); 5988 if (!phba->sli4_hba.fp_eq) { 5989 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5990 "2576 Failed allocate memory for fast-path " 5991 "EQ record array\n"); 5992 goto out_free_sp_eq; 5993 } 5994 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 5995 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 5996 phba->sli4_hba.eq_ecount); 5997 if (!qdesc) { 5998 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5999 "0497 Failed allocate fast-path EQ\n"); 6000 goto out_free_fp_eq; 6001 } 6002 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; 6003 } 6004 6005 /* 6006 * Create Complete Queues (CQs) 6007 */ 6008 6009 /* Get CQ depth from module parameter, fake the default for now */ 6010 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 6011 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 6012 6013 /* Create slow-path Mailbox Command Complete Queue */ 6014 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 6015 phba->sli4_hba.cq_ecount); 6016 if (!qdesc) { 6017 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6018 "0500 Failed allocate slow-path mailbox CQ\n"); 6019 goto out_free_fp_eq; 6020 } 6021 phba->sli4_hba.mbx_cq = qdesc; 6022 6023 /* Create slow-path ELS Complete Queue */ 6024 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 6025 phba->sli4_hba.cq_ecount); 6026 if (!qdesc) { 6027 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6028 "0501 Failed allocate slow-path ELS CQ\n"); 6029 goto out_free_mbx_cq; 6030 } 6031 phba->sli4_hba.els_cq = qdesc; 6032 6033 6034 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ 6035 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 6036 phba->cfg_fcp_eq_count), GFP_KERNEL); 6037 if (!phba->sli4_hba.fcp_cq) { 6038 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6039 "2577 Failed allocate memory for fast-path " 6040 "CQ record array\n"); 6041 goto out_free_els_cq; 6042 } 6043 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 6044 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 6045 phba->sli4_hba.cq_ecount); 6046 if (!qdesc) { 6047 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6048 "0499 Failed allocate fast-path FCP " 6049 "CQ (%d)\n", fcp_cqidx); 6050 goto out_free_fcp_cq; 6051 } 6052 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc; 6053 } 6054 6055 /* Create Mailbox Command Queue */ 6056 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 6057 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 6058 6059 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 6060 phba->sli4_hba.mq_ecount); 6061 if (!qdesc) { 6062 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6063 "0505 Failed allocate slow-path MQ\n"); 6064 goto out_free_fcp_cq; 6065 } 6066 phba->sli4_hba.mbx_wq = qdesc; 6067 6068 /* 6069 * Create all the Work Queues (WQs) 6070 */ 6071 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 6072 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 6073 6074 /* Create slow-path ELS Work Queue */ 6075 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 6076 phba->sli4_hba.wq_ecount); 6077 if (!qdesc) { 6078 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6079 "0504 Failed allocate slow-path ELS WQ\n"); 6080 goto out_free_mbx_wq; 6081 } 6082 phba->sli4_hba.els_wq = qdesc; 6083 6084 /* Create fast-path FCP Work Queue(s) */ 6085 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * 6086 phba->cfg_fcp_wq_count), GFP_KERNEL); 6087 if (!phba->sli4_hba.fcp_wq) { 6088 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6089 "2578 Failed allocate memory for fast-path " 6090 "WQ record array\n"); 6091 goto out_free_els_wq; 6092 } 6093 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 6094 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 6095 phba->sli4_hba.wq_ecount); 6096 if (!qdesc) { 6097 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6098 "0503 Failed allocate fast-path FCP " 6099 "WQ (%d)\n", fcp_wqidx); 6100 goto out_free_fcp_wq; 6101 } 6102 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc; 6103 } 6104 6105 /* 6106 * Create Receive Queue (RQ) 6107 */ 6108 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 6109 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 6110 6111 /* Create Receive Queue for header */ 6112 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 6113 phba->sli4_hba.rq_ecount); 6114 if (!qdesc) { 6115 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6116 "0506 Failed allocate receive HRQ\n"); 6117 goto out_free_fcp_wq; 6118 } 6119 phba->sli4_hba.hdr_rq = qdesc; 6120 6121 /* Create Receive Queue for data */ 6122 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 6123 phba->sli4_hba.rq_ecount); 6124 if (!qdesc) { 6125 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6126 "0507 Failed allocate receive DRQ\n"); 6127 goto out_free_hdr_rq; 6128 } 6129 phba->sli4_hba.dat_rq = qdesc; 6130 6131 return 0; 6132 6133 out_free_hdr_rq: 6134 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 6135 phba->sli4_hba.hdr_rq = NULL; 6136 out_free_fcp_wq: 6137 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) { 6138 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]); 6139 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; 6140 } 6141 kfree(phba->sli4_hba.fcp_wq); 6142 out_free_els_wq: 6143 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 6144 phba->sli4_hba.els_wq = NULL; 6145 out_free_mbx_wq: 6146 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 6147 phba->sli4_hba.mbx_wq = NULL; 6148 out_free_fcp_cq: 6149 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) { 6150 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]); 6151 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; 6152 } 6153 kfree(phba->sli4_hba.fcp_cq); 6154 out_free_els_cq: 6155 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 6156 phba->sli4_hba.els_cq = NULL; 6157 out_free_mbx_cq: 6158 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 6159 phba->sli4_hba.mbx_cq = NULL; 6160 out_free_fp_eq: 6161 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) { 6162 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]); 6163 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; 6164 } 6165 kfree(phba->sli4_hba.fp_eq); 6166 out_free_sp_eq: 6167 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 6168 phba->sli4_hba.sp_eq = NULL; 6169 out_error: 6170 return -ENOMEM; 6171 } 6172 6173 /** 6174 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 6175 * @phba: pointer to lpfc hba data structure. 6176 * 6177 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 6178 * operation. 6179 * 6180 * Return codes 6181 * 0 - successful 6182 * -ENOMEM - No availble memory 6183 * -EIO - The mailbox failed to complete successfully. 6184 **/ 6185 static void 6186 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 6187 { 6188 int fcp_qidx; 6189 6190 /* Release mailbox command work queue */ 6191 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 6192 phba->sli4_hba.mbx_wq = NULL; 6193 6194 /* Release ELS work queue */ 6195 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 6196 phba->sli4_hba.els_wq = NULL; 6197 6198 /* Release FCP work queue */ 6199 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 6200 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); 6201 kfree(phba->sli4_hba.fcp_wq); 6202 phba->sli4_hba.fcp_wq = NULL; 6203 6204 /* Release unsolicited receive queue */ 6205 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 6206 phba->sli4_hba.hdr_rq = NULL; 6207 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 6208 phba->sli4_hba.dat_rq = NULL; 6209 6210 /* Release ELS complete queue */ 6211 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 6212 phba->sli4_hba.els_cq = NULL; 6213 6214 /* Release mailbox command complete queue */ 6215 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 6216 phba->sli4_hba.mbx_cq = NULL; 6217 6218 /* Release FCP response complete queue */ 6219 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6220 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); 6221 kfree(phba->sli4_hba.fcp_cq); 6222 phba->sli4_hba.fcp_cq = NULL; 6223 6224 /* Release fast-path event queue */ 6225 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6226 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); 6227 kfree(phba->sli4_hba.fp_eq); 6228 phba->sli4_hba.fp_eq = NULL; 6229 6230 /* Release slow-path event queue */ 6231 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 6232 phba->sli4_hba.sp_eq = NULL; 6233 6234 return; 6235 } 6236 6237 /** 6238 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 6239 * @phba: pointer to lpfc hba data structure. 6240 * 6241 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 6242 * operation. 6243 * 6244 * Return codes 6245 * 0 - successful 6246 * -ENOMEM - No availble memory 6247 * -EIO - The mailbox failed to complete successfully. 6248 **/ 6249 int 6250 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 6251 { 6252 int rc = -ENOMEM; 6253 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 6254 int fcp_cq_index = 0; 6255 6256 /* 6257 * Set up Event Queues (EQs) 6258 */ 6259 6260 /* Set up slow-path event queue */ 6261 if (!phba->sli4_hba.sp_eq) { 6262 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6263 "0520 Slow-path EQ not allocated\n"); 6264 goto out_error; 6265 } 6266 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq, 6267 LPFC_SP_DEF_IMAX); 6268 if (rc) { 6269 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6270 "0521 Failed setup of slow-path EQ: " 6271 "rc = 0x%x\n", rc); 6272 goto out_error; 6273 } 6274 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6275 "2583 Slow-path EQ setup: queue-id=%d\n", 6276 phba->sli4_hba.sp_eq->queue_id); 6277 6278 /* Set up fast-path event queue */ 6279 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 6280 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { 6281 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6282 "0522 Fast-path EQ (%d) not " 6283 "allocated\n", fcp_eqidx); 6284 goto out_destroy_fp_eq; 6285 } 6286 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], 6287 phba->cfg_fcp_imax); 6288 if (rc) { 6289 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6290 "0523 Failed setup of fast-path EQ " 6291 "(%d), rc = 0x%x\n", fcp_eqidx, rc); 6292 goto out_destroy_fp_eq; 6293 } 6294 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6295 "2584 Fast-path EQ setup: " 6296 "queue[%d]-id=%d\n", fcp_eqidx, 6297 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); 6298 } 6299 6300 /* 6301 * Set up Complete Queues (CQs) 6302 */ 6303 6304 /* Set up slow-path MBOX Complete Queue as the first CQ */ 6305 if (!phba->sli4_hba.mbx_cq) { 6306 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6307 "0528 Mailbox CQ not allocated\n"); 6308 goto out_destroy_fp_eq; 6309 } 6310 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, 6311 LPFC_MCQ, LPFC_MBOX); 6312 if (rc) { 6313 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6314 "0529 Failed setup of slow-path mailbox CQ: " 6315 "rc = 0x%x\n", rc); 6316 goto out_destroy_fp_eq; 6317 } 6318 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6319 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", 6320 phba->sli4_hba.mbx_cq->queue_id, 6321 phba->sli4_hba.sp_eq->queue_id); 6322 6323 /* Set up slow-path ELS Complete Queue */ 6324 if (!phba->sli4_hba.els_cq) { 6325 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6326 "0530 ELS CQ not allocated\n"); 6327 goto out_destroy_mbx_cq; 6328 } 6329 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, 6330 LPFC_WCQ, LPFC_ELS); 6331 if (rc) { 6332 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6333 "0531 Failed setup of slow-path ELS CQ: " 6334 "rc = 0x%x\n", rc); 6335 goto out_destroy_mbx_cq; 6336 } 6337 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6338 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", 6339 phba->sli4_hba.els_cq->queue_id, 6340 phba->sli4_hba.sp_eq->queue_id); 6341 6342 /* Set up fast-path FCP Response Complete Queue */ 6343 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 6344 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 6345 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6346 "0526 Fast-path FCP CQ (%d) not " 6347 "allocated\n", fcp_cqidx); 6348 goto out_destroy_fcp_cq; 6349 } 6350 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], 6351 phba->sli4_hba.fp_eq[fcp_cqidx], 6352 LPFC_WCQ, LPFC_FCP); 6353 if (rc) { 6354 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6355 "0527 Failed setup of fast-path FCP " 6356 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); 6357 goto out_destroy_fcp_cq; 6358 } 6359 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6360 "2588 FCP CQ setup: cq[%d]-id=%d, " 6361 "parent eq[%d]-id=%d\n", 6362 fcp_cqidx, 6363 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, 6364 fcp_cqidx, 6365 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id); 6366 } 6367 6368 /* 6369 * Set up all the Work Queues (WQs) 6370 */ 6371 6372 /* Set up Mailbox Command Queue */ 6373 if (!phba->sli4_hba.mbx_wq) { 6374 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6375 "0538 Slow-path MQ not allocated\n"); 6376 goto out_destroy_fcp_cq; 6377 } 6378 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, 6379 phba->sli4_hba.mbx_cq, LPFC_MBOX); 6380 if (rc) { 6381 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6382 "0539 Failed setup of slow-path MQ: " 6383 "rc = 0x%x\n", rc); 6384 goto out_destroy_fcp_cq; 6385 } 6386 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6387 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 6388 phba->sli4_hba.mbx_wq->queue_id, 6389 phba->sli4_hba.mbx_cq->queue_id); 6390 6391 /* Set up slow-path ELS Work Queue */ 6392 if (!phba->sli4_hba.els_wq) { 6393 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6394 "0536 Slow-path ELS WQ not allocated\n"); 6395 goto out_destroy_mbx_wq; 6396 } 6397 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, 6398 phba->sli4_hba.els_cq, LPFC_ELS); 6399 if (rc) { 6400 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6401 "0537 Failed setup of slow-path ELS WQ: " 6402 "rc = 0x%x\n", rc); 6403 goto out_destroy_mbx_wq; 6404 } 6405 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6406 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 6407 phba->sli4_hba.els_wq->queue_id, 6408 phba->sli4_hba.els_cq->queue_id); 6409 6410 /* Set up fast-path FCP Work Queue */ 6411 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 6412 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { 6413 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6414 "0534 Fast-path FCP WQ (%d) not " 6415 "allocated\n", fcp_wqidx); 6416 goto out_destroy_fcp_wq; 6417 } 6418 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], 6419 phba->sli4_hba.fcp_cq[fcp_cq_index], 6420 LPFC_FCP); 6421 if (rc) { 6422 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6423 "0535 Failed setup of fast-path FCP " 6424 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); 6425 goto out_destroy_fcp_wq; 6426 } 6427 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6428 "2591 FCP WQ setup: wq[%d]-id=%d, " 6429 "parent cq[%d]-id=%d\n", 6430 fcp_wqidx, 6431 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, 6432 fcp_cq_index, 6433 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); 6434 /* Round robin FCP Work Queue's Completion Queue assignment */ 6435 fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count); 6436 } 6437 6438 /* 6439 * Create Receive Queue (RQ) 6440 */ 6441 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 6442 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6443 "0540 Receive Queue not allocated\n"); 6444 goto out_destroy_fcp_wq; 6445 } 6446 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 6447 phba->sli4_hba.els_cq, LPFC_USOL); 6448 if (rc) { 6449 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6450 "0541 Failed setup of Receive Queue: " 6451 "rc = 0x%x\n", rc); 6452 goto out_destroy_fcp_wq; 6453 } 6454 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6455 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 6456 "parent cq-id=%d\n", 6457 phba->sli4_hba.hdr_rq->queue_id, 6458 phba->sli4_hba.dat_rq->queue_id, 6459 phba->sli4_hba.els_cq->queue_id); 6460 return 0; 6461 6462 out_destroy_fcp_wq: 6463 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) 6464 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); 6465 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 6466 out_destroy_mbx_wq: 6467 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 6468 out_destroy_fcp_cq: 6469 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 6470 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 6471 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6472 out_destroy_mbx_cq: 6473 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6474 out_destroy_fp_eq: 6475 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 6476 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); 6477 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6478 out_error: 6479 return rc; 6480 } 6481 6482 /** 6483 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 6484 * @phba: pointer to lpfc hba data structure. 6485 * 6486 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 6487 * operation. 6488 * 6489 * Return codes 6490 * 0 - successful 6491 * -ENOMEM - No availble memory 6492 * -EIO - The mailbox failed to complete successfully. 6493 **/ 6494 void 6495 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 6496 { 6497 int fcp_qidx; 6498 6499 /* Unset mailbox command work queue */ 6500 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 6501 /* Unset ELS work queue */ 6502 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 6503 /* Unset unsolicited receive queue */ 6504 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 6505 /* Unset FCP work queue */ 6506 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 6507 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); 6508 /* Unset mailbox command complete queue */ 6509 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6510 /* Unset ELS complete queue */ 6511 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6512 /* Unset FCP response complete queue */ 6513 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6514 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 6515 /* Unset fast-path event queue */ 6516 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6517 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); 6518 /* Unset slow-path event queue */ 6519 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6520 } 6521 6522 /** 6523 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 6524 * @phba: pointer to lpfc hba data structure. 6525 * 6526 * This routine is invoked to allocate and set up a pool of completion queue 6527 * events. The body of the completion queue event is a completion queue entry 6528 * CQE. For now, this pool is used for the interrupt service routine to queue 6529 * the following HBA completion queue events for the worker thread to process: 6530 * - Mailbox asynchronous events 6531 * - Receive queue completion unsolicited events 6532 * Later, this can be used for all the slow-path events. 6533 * 6534 * Return codes 6535 * 0 - successful 6536 * -ENOMEM - No availble memory 6537 **/ 6538 static int 6539 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 6540 { 6541 struct lpfc_cq_event *cq_event; 6542 int i; 6543 6544 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 6545 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 6546 if (!cq_event) 6547 goto out_pool_create_fail; 6548 list_add_tail(&cq_event->list, 6549 &phba->sli4_hba.sp_cqe_event_pool); 6550 } 6551 return 0; 6552 6553 out_pool_create_fail: 6554 lpfc_sli4_cq_event_pool_destroy(phba); 6555 return -ENOMEM; 6556 } 6557 6558 /** 6559 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 6560 * @phba: pointer to lpfc hba data structure. 6561 * 6562 * This routine is invoked to free the pool of completion queue events at 6563 * driver unload time. Note that, it is the responsibility of the driver 6564 * cleanup routine to free all the outstanding completion-queue events 6565 * allocated from this pool back into the pool before invoking this routine 6566 * to destroy the pool. 6567 **/ 6568 static void 6569 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 6570 { 6571 struct lpfc_cq_event *cq_event, *next_cq_event; 6572 6573 list_for_each_entry_safe(cq_event, next_cq_event, 6574 &phba->sli4_hba.sp_cqe_event_pool, list) { 6575 list_del(&cq_event->list); 6576 kfree(cq_event); 6577 } 6578 } 6579 6580 /** 6581 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 6582 * @phba: pointer to lpfc hba data structure. 6583 * 6584 * This routine is the lock free version of the API invoked to allocate a 6585 * completion-queue event from the free pool. 6586 * 6587 * Return: Pointer to the newly allocated completion-queue event if successful 6588 * NULL otherwise. 6589 **/ 6590 struct lpfc_cq_event * 6591 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 6592 { 6593 struct lpfc_cq_event *cq_event = NULL; 6594 6595 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 6596 struct lpfc_cq_event, list); 6597 return cq_event; 6598 } 6599 6600 /** 6601 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 6602 * @phba: pointer to lpfc hba data structure. 6603 * 6604 * This routine is the lock version of the API invoked to allocate a 6605 * completion-queue event from the free pool. 6606 * 6607 * Return: Pointer to the newly allocated completion-queue event if successful 6608 * NULL otherwise. 6609 **/ 6610 struct lpfc_cq_event * 6611 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 6612 { 6613 struct lpfc_cq_event *cq_event; 6614 unsigned long iflags; 6615 6616 spin_lock_irqsave(&phba->hbalock, iflags); 6617 cq_event = __lpfc_sli4_cq_event_alloc(phba); 6618 spin_unlock_irqrestore(&phba->hbalock, iflags); 6619 return cq_event; 6620 } 6621 6622 /** 6623 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 6624 * @phba: pointer to lpfc hba data structure. 6625 * @cq_event: pointer to the completion queue event to be freed. 6626 * 6627 * This routine is the lock free version of the API invoked to release a 6628 * completion-queue event back into the free pool. 6629 **/ 6630 void 6631 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 6632 struct lpfc_cq_event *cq_event) 6633 { 6634 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 6635 } 6636 6637 /** 6638 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 6639 * @phba: pointer to lpfc hba data structure. 6640 * @cq_event: pointer to the completion queue event to be freed. 6641 * 6642 * This routine is the lock version of the API invoked to release a 6643 * completion-queue event back into the free pool. 6644 **/ 6645 void 6646 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 6647 struct lpfc_cq_event *cq_event) 6648 { 6649 unsigned long iflags; 6650 spin_lock_irqsave(&phba->hbalock, iflags); 6651 __lpfc_sli4_cq_event_release(phba, cq_event); 6652 spin_unlock_irqrestore(&phba->hbalock, iflags); 6653 } 6654 6655 /** 6656 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 6657 * @phba: pointer to lpfc hba data structure. 6658 * 6659 * This routine is to free all the pending completion-queue events to the 6660 * back into the free pool for device reset. 6661 **/ 6662 static void 6663 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 6664 { 6665 LIST_HEAD(cqelist); 6666 struct lpfc_cq_event *cqe; 6667 unsigned long iflags; 6668 6669 /* Retrieve all the pending WCQEs from pending WCQE lists */ 6670 spin_lock_irqsave(&phba->hbalock, iflags); 6671 /* Pending FCP XRI abort events */ 6672 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 6673 &cqelist); 6674 /* Pending ELS XRI abort events */ 6675 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 6676 &cqelist); 6677 /* Pending asynnc events */ 6678 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 6679 &cqelist); 6680 spin_unlock_irqrestore(&phba->hbalock, iflags); 6681 6682 while (!list_empty(&cqelist)) { 6683 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 6684 lpfc_sli4_cq_event_release(phba, cqe); 6685 } 6686 } 6687 6688 /** 6689 * lpfc_pci_function_reset - Reset pci function. 6690 * @phba: pointer to lpfc hba data structure. 6691 * 6692 * This routine is invoked to request a PCI function reset. It will destroys 6693 * all resources assigned to the PCI function which originates this request. 6694 * 6695 * Return codes 6696 * 0 - successful 6697 * -ENOMEM - No availble memory 6698 * -EIO - The mailbox failed to complete successfully. 6699 **/ 6700 int 6701 lpfc_pci_function_reset(struct lpfc_hba *phba) 6702 { 6703 LPFC_MBOXQ_t *mboxq; 6704 uint32_t rc = 0, if_type; 6705 uint32_t shdr_status, shdr_add_status; 6706 uint32_t rdy_chk, num_resets = 0, reset_again = 0; 6707 union lpfc_sli4_cfg_shdr *shdr; 6708 struct lpfc_register reg_data; 6709 6710 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 6711 switch (if_type) { 6712 case LPFC_SLI_INTF_IF_TYPE_0: 6713 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 6714 GFP_KERNEL); 6715 if (!mboxq) { 6716 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6717 "0494 Unable to allocate memory for " 6718 "issuing SLI_FUNCTION_RESET mailbox " 6719 "command\n"); 6720 return -ENOMEM; 6721 } 6722 6723 /* Setup PCI function reset mailbox-ioctl command */ 6724 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 6725 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 6726 LPFC_SLI4_MBX_EMBED); 6727 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6728 shdr = (union lpfc_sli4_cfg_shdr *) 6729 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 6730 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6731 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 6732 &shdr->response); 6733 if (rc != MBX_TIMEOUT) 6734 mempool_free(mboxq, phba->mbox_mem_pool); 6735 if (shdr_status || shdr_add_status || rc) { 6736 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6737 "0495 SLI_FUNCTION_RESET mailbox " 6738 "failed with status x%x add_status x%x," 6739 " mbx status x%x\n", 6740 shdr_status, shdr_add_status, rc); 6741 rc = -ENXIO; 6742 } 6743 break; 6744 case LPFC_SLI_INTF_IF_TYPE_2: 6745 for (num_resets = 0; 6746 num_resets < MAX_IF_TYPE_2_RESETS; 6747 num_resets++) { 6748 reg_data.word0 = 0; 6749 bf_set(lpfc_sliport_ctrl_end, ®_data, 6750 LPFC_SLIPORT_LITTLE_ENDIAN); 6751 bf_set(lpfc_sliport_ctrl_ip, ®_data, 6752 LPFC_SLIPORT_INIT_PORT); 6753 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 6754 CTRLregaddr); 6755 6756 /* 6757 * Poll the Port Status Register and wait for RDY for 6758 * up to 10 seconds. If the port doesn't respond, treat 6759 * it as an error. If the port responds with RN, start 6760 * the loop again. 6761 */ 6762 for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) { 6763 reg_data.word0 = 6764 readl(phba->sli4_hba.u.if_type2. 6765 STATUSregaddr); 6766 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 6767 break; 6768 if (bf_get(lpfc_sliport_status_rn, ®_data)) { 6769 reset_again++; 6770 break; 6771 } 6772 msleep(10); 6773 } 6774 6775 /* 6776 * If the port responds to the init request with 6777 * reset needed, delay for a bit and restart the loop. 6778 */ 6779 if (reset_again) { 6780 msleep(10); 6781 reset_again = 0; 6782 continue; 6783 } 6784 6785 /* Detect any port errors. */ 6786 reg_data.word0 = readl(phba->sli4_hba.u.if_type2. 6787 STATUSregaddr); 6788 if ((bf_get(lpfc_sliport_status_err, ®_data)) || 6789 (rdy_chk >= 1000)) { 6790 phba->work_status[0] = readl( 6791 phba->sli4_hba.u.if_type2.ERR1regaddr); 6792 phba->work_status[1] = readl( 6793 phba->sli4_hba.u.if_type2.ERR2regaddr); 6794 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6795 "2890 Port Error Detected " 6796 "during Port Reset: " 6797 "port status reg 0x%x, " 6798 "error 1=0x%x, error 2=0x%x\n", 6799 reg_data.word0, 6800 phba->work_status[0], 6801 phba->work_status[1]); 6802 rc = -ENODEV; 6803 } 6804 6805 /* 6806 * Terminate the outer loop provided the Port indicated 6807 * ready within 10 seconds. 6808 */ 6809 if (rdy_chk < 1000) 6810 break; 6811 } 6812 break; 6813 case LPFC_SLI_INTF_IF_TYPE_1: 6814 default: 6815 break; 6816 } 6817 6818 /* Catch the not-ready port failure after a port reset. */ 6819 if (num_resets >= MAX_IF_TYPE_2_RESETS) 6820 rc = -ENODEV; 6821 6822 return rc; 6823 } 6824 6825 /** 6826 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands 6827 * @phba: pointer to lpfc hba data structure. 6828 * @cnt: number of nop mailbox commands to send. 6829 * 6830 * This routine is invoked to send a number @cnt of NOP mailbox command and 6831 * wait for each command to complete. 6832 * 6833 * Return: the number of NOP mailbox command completed. 6834 **/ 6835 static int 6836 lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt) 6837 { 6838 LPFC_MBOXQ_t *mboxq; 6839 int length, cmdsent; 6840 uint32_t mbox_tmo; 6841 uint32_t rc = 0; 6842 uint32_t shdr_status, shdr_add_status; 6843 union lpfc_sli4_cfg_shdr *shdr; 6844 6845 if (cnt == 0) { 6846 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6847 "2518 Requested to send 0 NOP mailbox cmd\n"); 6848 return cnt; 6849 } 6850 6851 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6852 if (!mboxq) { 6853 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6854 "2519 Unable to allocate memory for issuing " 6855 "NOP mailbox command\n"); 6856 return 0; 6857 } 6858 6859 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */ 6860 length = (sizeof(struct lpfc_mbx_nop) - 6861 sizeof(struct lpfc_sli4_cfg_mhdr)); 6862 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 6863 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED); 6864 6865 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 6866 for (cmdsent = 0; cmdsent < cnt; cmdsent++) { 6867 if (!phba->sli4_hba.intr_enable) 6868 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6869 else 6870 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 6871 if (rc == MBX_TIMEOUT) 6872 break; 6873 /* Check return status */ 6874 shdr = (union lpfc_sli4_cfg_shdr *) 6875 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 6876 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6877 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 6878 &shdr->response); 6879 if (shdr_status || shdr_add_status || rc) { 6880 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6881 "2520 NOP mailbox command failed " 6882 "status x%x add_status x%x mbx " 6883 "status x%x\n", shdr_status, 6884 shdr_add_status, rc); 6885 break; 6886 } 6887 } 6888 6889 if (rc != MBX_TIMEOUT) 6890 mempool_free(mboxq, phba->mbox_mem_pool); 6891 6892 return cmdsent; 6893 } 6894 6895 /** 6896 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 6897 * @phba: pointer to lpfc hba data structure. 6898 * 6899 * This routine is invoked to set up the PCI device memory space for device 6900 * with SLI-4 interface spec. 6901 * 6902 * Return codes 6903 * 0 - successful 6904 * other values - error 6905 **/ 6906 static int 6907 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 6908 { 6909 struct pci_dev *pdev; 6910 unsigned long bar0map_len, bar1map_len, bar2map_len; 6911 int error = -ENODEV; 6912 uint32_t if_type; 6913 6914 /* Obtain PCI device reference */ 6915 if (!phba->pcidev) 6916 return error; 6917 else 6918 pdev = phba->pcidev; 6919 6920 /* Set the device DMA mask size */ 6921 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 6922 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 6923 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 6924 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 6925 return error; 6926 } 6927 } 6928 6929 /* 6930 * The BARs and register set definitions and offset locations are 6931 * dependent on the if_type. 6932 */ 6933 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 6934 &phba->sli4_hba.sli_intf.word0)) { 6935 return error; 6936 } 6937 6938 /* There is no SLI3 failback for SLI4 devices. */ 6939 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 6940 LPFC_SLI_INTF_VALID) { 6941 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6942 "2894 SLI_INTF reg contents invalid " 6943 "sli_intf reg 0x%x\n", 6944 phba->sli4_hba.sli_intf.word0); 6945 return error; 6946 } 6947 6948 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 6949 /* 6950 * Get the bus address of SLI4 device Bar regions and the 6951 * number of bytes required by each mapping. The mapping of the 6952 * particular PCI BARs regions is dependent on the type of 6953 * SLI4 device. 6954 */ 6955 if (pci_resource_start(pdev, 0)) { 6956 phba->pci_bar0_map = pci_resource_start(pdev, 0); 6957 bar0map_len = pci_resource_len(pdev, 0); 6958 6959 /* 6960 * Map SLI4 PCI Config Space Register base to a kernel virtual 6961 * addr 6962 */ 6963 phba->sli4_hba.conf_regs_memmap_p = 6964 ioremap(phba->pci_bar0_map, bar0map_len); 6965 if (!phba->sli4_hba.conf_regs_memmap_p) { 6966 dev_printk(KERN_ERR, &pdev->dev, 6967 "ioremap failed for SLI4 PCI config " 6968 "registers.\n"); 6969 goto out; 6970 } 6971 /* Set up BAR0 PCI config space register memory map */ 6972 lpfc_sli4_bar0_register_memmap(phba, if_type); 6973 } else { 6974 phba->pci_bar0_map = pci_resource_start(pdev, 1); 6975 bar0map_len = pci_resource_len(pdev, 1); 6976 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 6977 dev_printk(KERN_ERR, &pdev->dev, 6978 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 6979 goto out; 6980 } 6981 phba->sli4_hba.conf_regs_memmap_p = 6982 ioremap(phba->pci_bar0_map, bar0map_len); 6983 if (!phba->sli4_hba.conf_regs_memmap_p) { 6984 dev_printk(KERN_ERR, &pdev->dev, 6985 "ioremap failed for SLI4 PCI config " 6986 "registers.\n"); 6987 goto out; 6988 } 6989 lpfc_sli4_bar0_register_memmap(phba, if_type); 6990 } 6991 6992 if (pci_resource_start(pdev, 2)) { 6993 /* 6994 * Map SLI4 if type 0 HBA Control Register base to a kernel 6995 * virtual address and setup the registers. 6996 */ 6997 phba->pci_bar1_map = pci_resource_start(pdev, 2); 6998 bar1map_len = pci_resource_len(pdev, 2); 6999 phba->sli4_hba.ctrl_regs_memmap_p = 7000 ioremap(phba->pci_bar1_map, bar1map_len); 7001 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 7002 dev_printk(KERN_ERR, &pdev->dev, 7003 "ioremap failed for SLI4 HBA control registers.\n"); 7004 goto out_iounmap_conf; 7005 } 7006 lpfc_sli4_bar1_register_memmap(phba); 7007 } 7008 7009 if (pci_resource_start(pdev, 4)) { 7010 /* 7011 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel 7012 * virtual address and setup the registers. 7013 */ 7014 phba->pci_bar2_map = pci_resource_start(pdev, 4); 7015 bar2map_len = pci_resource_len(pdev, 4); 7016 phba->sli4_hba.drbl_regs_memmap_p = 7017 ioremap(phba->pci_bar2_map, bar2map_len); 7018 if (!phba->sli4_hba.drbl_regs_memmap_p) { 7019 dev_printk(KERN_ERR, &pdev->dev, 7020 "ioremap failed for SLI4 HBA doorbell registers.\n"); 7021 goto out_iounmap_ctrl; 7022 } 7023 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 7024 if (error) 7025 goto out_iounmap_all; 7026 } 7027 7028 return 0; 7029 7030 out_iounmap_all: 7031 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 7032 out_iounmap_ctrl: 7033 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 7034 out_iounmap_conf: 7035 iounmap(phba->sli4_hba.conf_regs_memmap_p); 7036 out: 7037 return error; 7038 } 7039 7040 /** 7041 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 7042 * @phba: pointer to lpfc hba data structure. 7043 * 7044 * This routine is invoked to unset the PCI device memory space for device 7045 * with SLI-4 interface spec. 7046 **/ 7047 static void 7048 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 7049 { 7050 struct pci_dev *pdev; 7051 7052 /* Obtain PCI device reference */ 7053 if (!phba->pcidev) 7054 return; 7055 else 7056 pdev = phba->pcidev; 7057 7058 /* Free coherent DMA memory allocated */ 7059 7060 /* Unmap I/O memory space */ 7061 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 7062 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 7063 iounmap(phba->sli4_hba.conf_regs_memmap_p); 7064 7065 return; 7066 } 7067 7068 /** 7069 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 7070 * @phba: pointer to lpfc hba data structure. 7071 * 7072 * This routine is invoked to enable the MSI-X interrupt vectors to device 7073 * with SLI-3 interface specs. The kernel function pci_enable_msix() is 7074 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once 7075 * invoked, enables either all or nothing, depending on the current 7076 * availability of PCI vector resources. The device driver is responsible 7077 * for calling the individual request_irq() to register each MSI-X vector 7078 * with a interrupt handler, which is done in this function. Note that 7079 * later when device is unloading, the driver should always call free_irq() 7080 * on all MSI-X vectors it has done request_irq() on before calling 7081 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 7082 * will be left with MSI-X enabled and leaks its vectors. 7083 * 7084 * Return codes 7085 * 0 - successful 7086 * other values - error 7087 **/ 7088 static int 7089 lpfc_sli_enable_msix(struct lpfc_hba *phba) 7090 { 7091 int rc, i; 7092 LPFC_MBOXQ_t *pmb; 7093 7094 /* Set up MSI-X multi-message vectors */ 7095 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 7096 phba->msix_entries[i].entry = i; 7097 7098 /* Configure MSI-X capability structure */ 7099 rc = pci_enable_msix(phba->pcidev, phba->msix_entries, 7100 ARRAY_SIZE(phba->msix_entries)); 7101 if (rc) { 7102 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7103 "0420 PCI enable MSI-X failed (%d)\n", rc); 7104 goto msi_fail_out; 7105 } 7106 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 7107 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7108 "0477 MSI-X entry[%d]: vector=x%x " 7109 "message=%d\n", i, 7110 phba->msix_entries[i].vector, 7111 phba->msix_entries[i].entry); 7112 /* 7113 * Assign MSI-X vectors to interrupt handlers 7114 */ 7115 7116 /* vector-0 is associated to slow-path handler */ 7117 rc = request_irq(phba->msix_entries[0].vector, 7118 &lpfc_sli_sp_intr_handler, IRQF_SHARED, 7119 LPFC_SP_DRIVER_HANDLER_NAME, phba); 7120 if (rc) { 7121 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7122 "0421 MSI-X slow-path request_irq failed " 7123 "(%d)\n", rc); 7124 goto msi_fail_out; 7125 } 7126 7127 /* vector-1 is associated to fast-path handler */ 7128 rc = request_irq(phba->msix_entries[1].vector, 7129 &lpfc_sli_fp_intr_handler, IRQF_SHARED, 7130 LPFC_FP_DRIVER_HANDLER_NAME, phba); 7131 7132 if (rc) { 7133 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7134 "0429 MSI-X fast-path request_irq failed " 7135 "(%d)\n", rc); 7136 goto irq_fail_out; 7137 } 7138 7139 /* 7140 * Configure HBA MSI-X attention conditions to messages 7141 */ 7142 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7143 7144 if (!pmb) { 7145 rc = -ENOMEM; 7146 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7147 "0474 Unable to allocate memory for issuing " 7148 "MBOX_CONFIG_MSI command\n"); 7149 goto mem_fail_out; 7150 } 7151 rc = lpfc_config_msi(phba, pmb); 7152 if (rc) 7153 goto mbx_fail_out; 7154 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 7155 if (rc != MBX_SUCCESS) { 7156 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 7157 "0351 Config MSI mailbox command failed, " 7158 "mbxCmd x%x, mbxStatus x%x\n", 7159 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 7160 goto mbx_fail_out; 7161 } 7162 7163 /* Free memory allocated for mailbox command */ 7164 mempool_free(pmb, phba->mbox_mem_pool); 7165 return rc; 7166 7167 mbx_fail_out: 7168 /* Free memory allocated for mailbox command */ 7169 mempool_free(pmb, phba->mbox_mem_pool); 7170 7171 mem_fail_out: 7172 /* free the irq already requested */ 7173 free_irq(phba->msix_entries[1].vector, phba); 7174 7175 irq_fail_out: 7176 /* free the irq already requested */ 7177 free_irq(phba->msix_entries[0].vector, phba); 7178 7179 msi_fail_out: 7180 /* Unconfigure MSI-X capability structure */ 7181 pci_disable_msix(phba->pcidev); 7182 return rc; 7183 } 7184 7185 /** 7186 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. 7187 * @phba: pointer to lpfc hba data structure. 7188 * 7189 * This routine is invoked to release the MSI-X vectors and then disable the 7190 * MSI-X interrupt mode to device with SLI-3 interface spec. 7191 **/ 7192 static void 7193 lpfc_sli_disable_msix(struct lpfc_hba *phba) 7194 { 7195 int i; 7196 7197 /* Free up MSI-X multi-message vectors */ 7198 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 7199 free_irq(phba->msix_entries[i].vector, phba); 7200 /* Disable MSI-X */ 7201 pci_disable_msix(phba->pcidev); 7202 7203 return; 7204 } 7205 7206 /** 7207 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 7208 * @phba: pointer to lpfc hba data structure. 7209 * 7210 * This routine is invoked to enable the MSI interrupt mode to device with 7211 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 7212 * enable the MSI vector. The device driver is responsible for calling the 7213 * request_irq() to register MSI vector with a interrupt the handler, which 7214 * is done in this function. 7215 * 7216 * Return codes 7217 * 0 - successful 7218 * other values - error 7219 */ 7220 static int 7221 lpfc_sli_enable_msi(struct lpfc_hba *phba) 7222 { 7223 int rc; 7224 7225 rc = pci_enable_msi(phba->pcidev); 7226 if (!rc) 7227 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7228 "0462 PCI enable MSI mode success.\n"); 7229 else { 7230 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7231 "0471 PCI enable MSI mode failed (%d)\n", rc); 7232 return rc; 7233 } 7234 7235 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 7236 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7237 if (rc) { 7238 pci_disable_msi(phba->pcidev); 7239 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7240 "0478 MSI request_irq failed (%d)\n", rc); 7241 } 7242 return rc; 7243 } 7244 7245 /** 7246 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. 7247 * @phba: pointer to lpfc hba data structure. 7248 * 7249 * This routine is invoked to disable the MSI interrupt mode to device with 7250 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has 7251 * done request_irq() on before calling pci_disable_msi(). Failure to do so 7252 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 7253 * its vector. 7254 */ 7255 static void 7256 lpfc_sli_disable_msi(struct lpfc_hba *phba) 7257 { 7258 free_irq(phba->pcidev->irq, phba); 7259 pci_disable_msi(phba->pcidev); 7260 return; 7261 } 7262 7263 /** 7264 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 7265 * @phba: pointer to lpfc hba data structure. 7266 * 7267 * This routine is invoked to enable device interrupt and associate driver's 7268 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 7269 * spec. Depends on the interrupt mode configured to the driver, the driver 7270 * will try to fallback from the configured interrupt mode to an interrupt 7271 * mode which is supported by the platform, kernel, and device in the order 7272 * of: 7273 * MSI-X -> MSI -> IRQ. 7274 * 7275 * Return codes 7276 * 0 - successful 7277 * other values - error 7278 **/ 7279 static uint32_t 7280 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 7281 { 7282 uint32_t intr_mode = LPFC_INTR_ERROR; 7283 int retval; 7284 7285 if (cfg_mode == 2) { 7286 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 7287 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 7288 if (!retval) { 7289 /* Now, try to enable MSI-X interrupt mode */ 7290 retval = lpfc_sli_enable_msix(phba); 7291 if (!retval) { 7292 /* Indicate initialization to MSI-X mode */ 7293 phba->intr_type = MSIX; 7294 intr_mode = 2; 7295 } 7296 } 7297 } 7298 7299 /* Fallback to MSI if MSI-X initialization failed */ 7300 if (cfg_mode >= 1 && phba->intr_type == NONE) { 7301 retval = lpfc_sli_enable_msi(phba); 7302 if (!retval) { 7303 /* Indicate initialization to MSI mode */ 7304 phba->intr_type = MSI; 7305 intr_mode = 1; 7306 } 7307 } 7308 7309 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 7310 if (phba->intr_type == NONE) { 7311 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 7312 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7313 if (!retval) { 7314 /* Indicate initialization to INTx mode */ 7315 phba->intr_type = INTx; 7316 intr_mode = 0; 7317 } 7318 } 7319 return intr_mode; 7320 } 7321 7322 /** 7323 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 7324 * @phba: pointer to lpfc hba data structure. 7325 * 7326 * This routine is invoked to disable device interrupt and disassociate the 7327 * driver's interrupt handler(s) from interrupt vector(s) to device with 7328 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 7329 * release the interrupt vector(s) for the message signaled interrupt. 7330 **/ 7331 static void 7332 lpfc_sli_disable_intr(struct lpfc_hba *phba) 7333 { 7334 /* Disable the currently initialized interrupt mode */ 7335 if (phba->intr_type == MSIX) 7336 lpfc_sli_disable_msix(phba); 7337 else if (phba->intr_type == MSI) 7338 lpfc_sli_disable_msi(phba); 7339 else if (phba->intr_type == INTx) 7340 free_irq(phba->pcidev->irq, phba); 7341 7342 /* Reset interrupt management states */ 7343 phba->intr_type = NONE; 7344 phba->sli.slistat.sli_intr = 0; 7345 7346 return; 7347 } 7348 7349 /** 7350 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 7351 * @phba: pointer to lpfc hba data structure. 7352 * 7353 * This routine is invoked to enable the MSI-X interrupt vectors to device 7354 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called 7355 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked, 7356 * enables either all or nothing, depending on the current availability of 7357 * PCI vector resources. The device driver is responsible for calling the 7358 * individual request_irq() to register each MSI-X vector with a interrupt 7359 * handler, which is done in this function. Note that later when device is 7360 * unloading, the driver should always call free_irq() on all MSI-X vectors 7361 * it has done request_irq() on before calling pci_disable_msix(). Failure 7362 * to do so results in a BUG_ON() and a device will be left with MSI-X 7363 * enabled and leaks its vectors. 7364 * 7365 * Return codes 7366 * 0 - successful 7367 * other values - error 7368 **/ 7369 static int 7370 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 7371 { 7372 int vectors, rc, index; 7373 7374 /* Set up MSI-X multi-message vectors */ 7375 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 7376 phba->sli4_hba.msix_entries[index].entry = index; 7377 7378 /* Configure MSI-X capability structure */ 7379 vectors = phba->sli4_hba.cfg_eqn; 7380 enable_msix_vectors: 7381 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, 7382 vectors); 7383 if (rc > 1) { 7384 vectors = rc; 7385 goto enable_msix_vectors; 7386 } else if (rc) { 7387 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7388 "0484 PCI enable MSI-X failed (%d)\n", rc); 7389 goto msi_fail_out; 7390 } 7391 7392 /* Log MSI-X vector assignment */ 7393 for (index = 0; index < vectors; index++) 7394 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7395 "0489 MSI-X entry[%d]: vector=x%x " 7396 "message=%d\n", index, 7397 phba->sli4_hba.msix_entries[index].vector, 7398 phba->sli4_hba.msix_entries[index].entry); 7399 /* 7400 * Assign MSI-X vectors to interrupt handlers 7401 */ 7402 7403 /* The first vector must associated to slow-path handler for MQ */ 7404 rc = request_irq(phba->sli4_hba.msix_entries[0].vector, 7405 &lpfc_sli4_sp_intr_handler, IRQF_SHARED, 7406 LPFC_SP_DRIVER_HANDLER_NAME, phba); 7407 if (rc) { 7408 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7409 "0485 MSI-X slow-path request_irq failed " 7410 "(%d)\n", rc); 7411 goto msi_fail_out; 7412 } 7413 7414 /* The rest of the vector(s) are associated to fast-path handler(s) */ 7415 for (index = 1; index < vectors; index++) { 7416 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; 7417 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba; 7418 rc = request_irq(phba->sli4_hba.msix_entries[index].vector, 7419 &lpfc_sli4_fp_intr_handler, IRQF_SHARED, 7420 LPFC_FP_DRIVER_HANDLER_NAME, 7421 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7422 if (rc) { 7423 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7424 "0486 MSI-X fast-path (%d) " 7425 "request_irq failed (%d)\n", index, rc); 7426 goto cfg_fail_out; 7427 } 7428 } 7429 phba->sli4_hba.msix_vec_nr = vectors; 7430 7431 return rc; 7432 7433 cfg_fail_out: 7434 /* free the irq already requested */ 7435 for (--index; index >= 1; index--) 7436 free_irq(phba->sli4_hba.msix_entries[index - 1].vector, 7437 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7438 7439 /* free the irq already requested */ 7440 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 7441 7442 msi_fail_out: 7443 /* Unconfigure MSI-X capability structure */ 7444 pci_disable_msix(phba->pcidev); 7445 return rc; 7446 } 7447 7448 /** 7449 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device 7450 * @phba: pointer to lpfc hba data structure. 7451 * 7452 * This routine is invoked to release the MSI-X vectors and then disable the 7453 * MSI-X interrupt mode to device with SLI-4 interface spec. 7454 **/ 7455 static void 7456 lpfc_sli4_disable_msix(struct lpfc_hba *phba) 7457 { 7458 int index; 7459 7460 /* Free up MSI-X multi-message vectors */ 7461 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 7462 7463 for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++) 7464 free_irq(phba->sli4_hba.msix_entries[index].vector, 7465 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7466 7467 /* Disable MSI-X */ 7468 pci_disable_msix(phba->pcidev); 7469 7470 return; 7471 } 7472 7473 /** 7474 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 7475 * @phba: pointer to lpfc hba data structure. 7476 * 7477 * This routine is invoked to enable the MSI interrupt mode to device with 7478 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 7479 * to enable the MSI vector. The device driver is responsible for calling 7480 * the request_irq() to register MSI vector with a interrupt the handler, 7481 * which is done in this function. 7482 * 7483 * Return codes 7484 * 0 - successful 7485 * other values - error 7486 **/ 7487 static int 7488 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 7489 { 7490 int rc, index; 7491 7492 rc = pci_enable_msi(phba->pcidev); 7493 if (!rc) 7494 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7495 "0487 PCI enable MSI mode success.\n"); 7496 else { 7497 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7498 "0488 PCI enable MSI mode failed (%d)\n", rc); 7499 return rc; 7500 } 7501 7502 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 7503 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7504 if (rc) { 7505 pci_disable_msi(phba->pcidev); 7506 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7507 "0490 MSI request_irq failed (%d)\n", rc); 7508 return rc; 7509 } 7510 7511 for (index = 0; index < phba->cfg_fcp_eq_count; index++) { 7512 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 7513 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 7514 } 7515 7516 return 0; 7517 } 7518 7519 /** 7520 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device 7521 * @phba: pointer to lpfc hba data structure. 7522 * 7523 * This routine is invoked to disable the MSI interrupt mode to device with 7524 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has 7525 * done request_irq() on before calling pci_disable_msi(). Failure to do so 7526 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 7527 * its vector. 7528 **/ 7529 static void 7530 lpfc_sli4_disable_msi(struct lpfc_hba *phba) 7531 { 7532 free_irq(phba->pcidev->irq, phba); 7533 pci_disable_msi(phba->pcidev); 7534 return; 7535 } 7536 7537 /** 7538 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 7539 * @phba: pointer to lpfc hba data structure. 7540 * 7541 * This routine is invoked to enable device interrupt and associate driver's 7542 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 7543 * interface spec. Depends on the interrupt mode configured to the driver, 7544 * the driver will try to fallback from the configured interrupt mode to an 7545 * interrupt mode which is supported by the platform, kernel, and device in 7546 * the order of: 7547 * MSI-X -> MSI -> IRQ. 7548 * 7549 * Return codes 7550 * 0 - successful 7551 * other values - error 7552 **/ 7553 static uint32_t 7554 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 7555 { 7556 uint32_t intr_mode = LPFC_INTR_ERROR; 7557 int retval, index; 7558 7559 if (cfg_mode == 2) { 7560 /* Preparation before conf_msi mbox cmd */ 7561 retval = 0; 7562 if (!retval) { 7563 /* Now, try to enable MSI-X interrupt mode */ 7564 retval = lpfc_sli4_enable_msix(phba); 7565 if (!retval) { 7566 /* Indicate initialization to MSI-X mode */ 7567 phba->intr_type = MSIX; 7568 intr_mode = 2; 7569 } 7570 } 7571 } 7572 7573 /* Fallback to MSI if MSI-X initialization failed */ 7574 if (cfg_mode >= 1 && phba->intr_type == NONE) { 7575 retval = lpfc_sli4_enable_msi(phba); 7576 if (!retval) { 7577 /* Indicate initialization to MSI mode */ 7578 phba->intr_type = MSI; 7579 intr_mode = 1; 7580 } 7581 } 7582 7583 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 7584 if (phba->intr_type == NONE) { 7585 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 7586 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7587 if (!retval) { 7588 /* Indicate initialization to INTx mode */ 7589 phba->intr_type = INTx; 7590 intr_mode = 0; 7591 for (index = 0; index < phba->cfg_fcp_eq_count; 7592 index++) { 7593 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 7594 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 7595 } 7596 } 7597 } 7598 return intr_mode; 7599 } 7600 7601 /** 7602 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 7603 * @phba: pointer to lpfc hba data structure. 7604 * 7605 * This routine is invoked to disable device interrupt and disassociate 7606 * the driver's interrupt handler(s) from interrupt vector(s) to device 7607 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 7608 * will release the interrupt vector(s) for the message signaled interrupt. 7609 **/ 7610 static void 7611 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 7612 { 7613 /* Disable the currently initialized interrupt mode */ 7614 if (phba->intr_type == MSIX) 7615 lpfc_sli4_disable_msix(phba); 7616 else if (phba->intr_type == MSI) 7617 lpfc_sli4_disable_msi(phba); 7618 else if (phba->intr_type == INTx) 7619 free_irq(phba->pcidev->irq, phba); 7620 7621 /* Reset interrupt management states */ 7622 phba->intr_type = NONE; 7623 phba->sli.slistat.sli_intr = 0; 7624 7625 return; 7626 } 7627 7628 /** 7629 * lpfc_unset_hba - Unset SLI3 hba device initialization 7630 * @phba: pointer to lpfc hba data structure. 7631 * 7632 * This routine is invoked to unset the HBA device initialization steps to 7633 * a device with SLI-3 interface spec. 7634 **/ 7635 static void 7636 lpfc_unset_hba(struct lpfc_hba *phba) 7637 { 7638 struct lpfc_vport *vport = phba->pport; 7639 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7640 7641 spin_lock_irq(shost->host_lock); 7642 vport->load_flag |= FC_UNLOADING; 7643 spin_unlock_irq(shost->host_lock); 7644 7645 lpfc_stop_hba_timers(phba); 7646 7647 phba->pport->work_port_events = 0; 7648 7649 lpfc_sli_hba_down(phba); 7650 7651 lpfc_sli_brdrestart(phba); 7652 7653 lpfc_sli_disable_intr(phba); 7654 7655 return; 7656 } 7657 7658 /** 7659 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization. 7660 * @phba: pointer to lpfc hba data structure. 7661 * 7662 * This routine is invoked to unset the HBA device initialization steps to 7663 * a device with SLI-4 interface spec. 7664 **/ 7665 static void 7666 lpfc_sli4_unset_hba(struct lpfc_hba *phba) 7667 { 7668 struct lpfc_vport *vport = phba->pport; 7669 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7670 7671 spin_lock_irq(shost->host_lock); 7672 vport->load_flag |= FC_UNLOADING; 7673 spin_unlock_irq(shost->host_lock); 7674 7675 phba->pport->work_port_events = 0; 7676 7677 /* Stop the SLI4 device port */ 7678 lpfc_stop_port(phba); 7679 7680 lpfc_sli4_disable_intr(phba); 7681 7682 /* Reset SLI4 HBA FCoE function */ 7683 lpfc_pci_function_reset(phba); 7684 7685 return; 7686 } 7687 7688 /** 7689 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 7690 * @phba: Pointer to HBA context object. 7691 * 7692 * This function is called in the SLI4 code path to wait for completion 7693 * of device's XRIs exchange busy. It will check the XRI exchange busy 7694 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 7695 * that, it will check the XRI exchange busy on outstanding FCP and ELS 7696 * I/Os every 30 seconds, log error message, and wait forever. Only when 7697 * all XRI exchange busy complete, the driver unload shall proceed with 7698 * invoking the function reset ioctl mailbox command to the CNA and the 7699 * the rest of the driver unload resource release. 7700 **/ 7701 static void 7702 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 7703 { 7704 int wait_time = 0; 7705 int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 7706 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 7707 7708 while (!fcp_xri_cmpl || !els_xri_cmpl) { 7709 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 7710 if (!fcp_xri_cmpl) 7711 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7712 "2877 FCP XRI exchange busy " 7713 "wait time: %d seconds.\n", 7714 wait_time/1000); 7715 if (!els_xri_cmpl) 7716 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7717 "2878 ELS XRI exchange busy " 7718 "wait time: %d seconds.\n", 7719 wait_time/1000); 7720 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 7721 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 7722 } else { 7723 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 7724 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 7725 } 7726 fcp_xri_cmpl = 7727 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 7728 els_xri_cmpl = 7729 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 7730 } 7731 } 7732 7733 /** 7734 * lpfc_sli4_hba_unset - Unset the fcoe hba 7735 * @phba: Pointer to HBA context object. 7736 * 7737 * This function is called in the SLI4 code path to reset the HBA's FCoE 7738 * function. The caller is not required to hold any lock. This routine 7739 * issues PCI function reset mailbox command to reset the FCoE function. 7740 * At the end of the function, it calls lpfc_hba_down_post function to 7741 * free any pending commands. 7742 **/ 7743 static void 7744 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 7745 { 7746 int wait_cnt = 0; 7747 LPFC_MBOXQ_t *mboxq; 7748 7749 lpfc_stop_hba_timers(phba); 7750 phba->sli4_hba.intr_enable = 0; 7751 7752 /* 7753 * Gracefully wait out the potential current outstanding asynchronous 7754 * mailbox command. 7755 */ 7756 7757 /* First, block any pending async mailbox command from posted */ 7758 spin_lock_irq(&phba->hbalock); 7759 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 7760 spin_unlock_irq(&phba->hbalock); 7761 /* Now, trying to wait it out if we can */ 7762 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7763 msleep(10); 7764 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 7765 break; 7766 } 7767 /* Forcefully release the outstanding mailbox command if timed out */ 7768 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7769 spin_lock_irq(&phba->hbalock); 7770 mboxq = phba->sli.mbox_active; 7771 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 7772 __lpfc_mbox_cmpl_put(phba, mboxq); 7773 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7774 phba->sli.mbox_active = NULL; 7775 spin_unlock_irq(&phba->hbalock); 7776 } 7777 7778 /* Abort all iocbs associated with the hba */ 7779 lpfc_sli_hba_iocb_abort(phba); 7780 7781 /* Wait for completion of device XRI exchange busy */ 7782 lpfc_sli4_xri_exchange_busy_wait(phba); 7783 7784 /* Disable PCI subsystem interrupt */ 7785 lpfc_sli4_disable_intr(phba); 7786 7787 /* Stop kthread signal shall trigger work_done one more time */ 7788 kthread_stop(phba->worker_thread); 7789 7790 /* Reset SLI4 HBA FCoE function */ 7791 lpfc_pci_function_reset(phba); 7792 7793 /* Stop the SLI4 device port */ 7794 phba->pport->work_port_events = 0; 7795 } 7796 7797 /** 7798 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 7799 * @phba: Pointer to HBA context object. 7800 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 7801 * 7802 * This function is called in the SLI4 code path to read the port's 7803 * sli4 capabilities. 7804 * 7805 * This function may be be called from any context that can block-wait 7806 * for the completion. The expectation is that this routine is called 7807 * typically from probe_one or from the online routine. 7808 **/ 7809 int 7810 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 7811 { 7812 int rc; 7813 struct lpfc_mqe *mqe; 7814 struct lpfc_pc_sli4_params *sli4_params; 7815 uint32_t mbox_tmo; 7816 7817 rc = 0; 7818 mqe = &mboxq->u.mqe; 7819 7820 /* Read the port's SLI4 Parameters port capabilities */ 7821 lpfc_pc_sli4_params(mboxq); 7822 if (!phba->sli4_hba.intr_enable) 7823 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7824 else { 7825 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES); 7826 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 7827 } 7828 7829 if (unlikely(rc)) 7830 return 1; 7831 7832 sli4_params = &phba->sli4_hba.pc_sli4_params; 7833 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 7834 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 7835 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 7836 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 7837 &mqe->un.sli4_params); 7838 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 7839 &mqe->un.sli4_params); 7840 sli4_params->proto_types = mqe->un.sli4_params.word3; 7841 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 7842 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 7843 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 7844 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 7845 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 7846 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 7847 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 7848 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 7849 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 7850 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 7851 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 7852 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 7853 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 7854 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 7855 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 7856 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 7857 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 7858 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 7859 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 7860 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 7861 return rc; 7862 } 7863 7864 /** 7865 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 7866 * @phba: Pointer to HBA context object. 7867 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 7868 * 7869 * This function is called in the SLI4 code path to read the port's 7870 * sli4 capabilities. 7871 * 7872 * This function may be be called from any context that can block-wait 7873 * for the completion. The expectation is that this routine is called 7874 * typically from probe_one or from the online routine. 7875 **/ 7876 int 7877 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 7878 { 7879 int rc; 7880 struct lpfc_mqe *mqe = &mboxq->u.mqe; 7881 struct lpfc_pc_sli4_params *sli4_params; 7882 int length; 7883 struct lpfc_sli4_parameters *mbx_sli4_parameters; 7884 7885 /* Read the port's SLI4 Config Parameters */ 7886 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 7887 sizeof(struct lpfc_sli4_cfg_mhdr)); 7888 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 7889 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 7890 length, LPFC_SLI4_MBX_EMBED); 7891 if (!phba->sli4_hba.intr_enable) 7892 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7893 else 7894 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, 7895 lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG)); 7896 if (unlikely(rc)) 7897 return rc; 7898 sli4_params = &phba->sli4_hba.pc_sli4_params; 7899 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 7900 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 7901 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 7902 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 7903 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 7904 mbx_sli4_parameters); 7905 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 7906 mbx_sli4_parameters); 7907 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 7908 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 7909 else 7910 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 7911 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 7912 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); 7913 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 7914 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 7915 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 7916 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 7917 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 7918 mbx_sli4_parameters); 7919 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 7920 mbx_sli4_parameters); 7921 return 0; 7922 } 7923 7924 /** 7925 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 7926 * @pdev: pointer to PCI device 7927 * @pid: pointer to PCI device identifier 7928 * 7929 * This routine is to be called to attach a device with SLI-3 interface spec 7930 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 7931 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 7932 * information of the device and driver to see if the driver state that it can 7933 * support this kind of device. If the match is successful, the driver core 7934 * invokes this routine. If this routine determines it can claim the HBA, it 7935 * does all the initialization that it needs to do to handle the HBA properly. 7936 * 7937 * Return code 7938 * 0 - driver can claim the device 7939 * negative value - driver can not claim the device 7940 **/ 7941 static int __devinit 7942 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 7943 { 7944 struct lpfc_hba *phba; 7945 struct lpfc_vport *vport = NULL; 7946 struct Scsi_Host *shost = NULL; 7947 int error; 7948 uint32_t cfg_mode, intr_mode; 7949 7950 /* Allocate memory for HBA structure */ 7951 phba = lpfc_hba_alloc(pdev); 7952 if (!phba) 7953 return -ENOMEM; 7954 7955 /* Perform generic PCI device enabling operation */ 7956 error = lpfc_enable_pci_dev(phba); 7957 if (error) { 7958 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7959 "1401 Failed to enable pci device.\n"); 7960 goto out_free_phba; 7961 } 7962 7963 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 7964 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 7965 if (error) 7966 goto out_disable_pci_dev; 7967 7968 /* Set up SLI-3 specific device PCI memory space */ 7969 error = lpfc_sli_pci_mem_setup(phba); 7970 if (error) { 7971 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7972 "1402 Failed to set up pci memory space.\n"); 7973 goto out_disable_pci_dev; 7974 } 7975 7976 /* Set up phase-1 common device driver resources */ 7977 error = lpfc_setup_driver_resource_phase1(phba); 7978 if (error) { 7979 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7980 "1403 Failed to set up driver resource.\n"); 7981 goto out_unset_pci_mem_s3; 7982 } 7983 7984 /* Set up SLI-3 specific device driver resources */ 7985 error = lpfc_sli_driver_resource_setup(phba); 7986 if (error) { 7987 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7988 "1404 Failed to set up driver resource.\n"); 7989 goto out_unset_pci_mem_s3; 7990 } 7991 7992 /* Initialize and populate the iocb list per host */ 7993 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 7994 if (error) { 7995 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7996 "1405 Failed to initialize iocb list.\n"); 7997 goto out_unset_driver_resource_s3; 7998 } 7999 8000 /* Set up common device driver resources */ 8001 error = lpfc_setup_driver_resource_phase2(phba); 8002 if (error) { 8003 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8004 "1406 Failed to set up driver resource.\n"); 8005 goto out_free_iocb_list; 8006 } 8007 8008 /* Create SCSI host to the physical port */ 8009 error = lpfc_create_shost(phba); 8010 if (error) { 8011 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8012 "1407 Failed to create scsi host.\n"); 8013 goto out_unset_driver_resource; 8014 } 8015 8016 /* Configure sysfs attributes */ 8017 vport = phba->pport; 8018 error = lpfc_alloc_sysfs_attr(vport); 8019 if (error) { 8020 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8021 "1476 Failed to allocate sysfs attr\n"); 8022 goto out_destroy_shost; 8023 } 8024 8025 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 8026 /* Now, trying to enable interrupt and bring up the device */ 8027 cfg_mode = phba->cfg_use_msi; 8028 while (true) { 8029 /* Put device to a known state before enabling interrupt */ 8030 lpfc_stop_port(phba); 8031 /* Configure and enable interrupt */ 8032 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 8033 if (intr_mode == LPFC_INTR_ERROR) { 8034 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8035 "0431 Failed to enable interrupt.\n"); 8036 error = -ENODEV; 8037 goto out_free_sysfs_attr; 8038 } 8039 /* SLI-3 HBA setup */ 8040 if (lpfc_sli_hba_setup(phba)) { 8041 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8042 "1477 Failed to set up hba\n"); 8043 error = -ENODEV; 8044 goto out_remove_device; 8045 } 8046 8047 /* Wait 50ms for the interrupts of previous mailbox commands */ 8048 msleep(50); 8049 /* Check active interrupts on message signaled interrupts */ 8050 if (intr_mode == 0 || 8051 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 8052 /* Log the current active interrupt mode */ 8053 phba->intr_mode = intr_mode; 8054 lpfc_log_intr_mode(phba, intr_mode); 8055 break; 8056 } else { 8057 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8058 "0447 Configure interrupt mode (%d) " 8059 "failed active interrupt test.\n", 8060 intr_mode); 8061 /* Disable the current interrupt mode */ 8062 lpfc_sli_disable_intr(phba); 8063 /* Try next level of interrupt mode */ 8064 cfg_mode = --intr_mode; 8065 } 8066 } 8067 8068 /* Perform post initialization setup */ 8069 lpfc_post_init_setup(phba); 8070 8071 /* Check if there are static vports to be created. */ 8072 lpfc_create_static_vport(phba); 8073 8074 return 0; 8075 8076 out_remove_device: 8077 lpfc_unset_hba(phba); 8078 out_free_sysfs_attr: 8079 lpfc_free_sysfs_attr(vport); 8080 out_destroy_shost: 8081 lpfc_destroy_shost(phba); 8082 out_unset_driver_resource: 8083 lpfc_unset_driver_resource_phase2(phba); 8084 out_free_iocb_list: 8085 lpfc_free_iocb_list(phba); 8086 out_unset_driver_resource_s3: 8087 lpfc_sli_driver_resource_unset(phba); 8088 out_unset_pci_mem_s3: 8089 lpfc_sli_pci_mem_unset(phba); 8090 out_disable_pci_dev: 8091 lpfc_disable_pci_dev(phba); 8092 if (shost) 8093 scsi_host_put(shost); 8094 out_free_phba: 8095 lpfc_hba_free(phba); 8096 return error; 8097 } 8098 8099 /** 8100 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 8101 * @pdev: pointer to PCI device 8102 * 8103 * This routine is to be called to disattach a device with SLI-3 interface 8104 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 8105 * removed from PCI bus, it performs all the necessary cleanup for the HBA 8106 * device to be removed from the PCI subsystem properly. 8107 **/ 8108 static void __devexit 8109 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 8110 { 8111 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8112 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 8113 struct lpfc_vport **vports; 8114 struct lpfc_hba *phba = vport->phba; 8115 int i; 8116 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 8117 8118 spin_lock_irq(&phba->hbalock); 8119 vport->load_flag |= FC_UNLOADING; 8120 spin_unlock_irq(&phba->hbalock); 8121 8122 lpfc_free_sysfs_attr(vport); 8123 8124 /* Release all the vports against this physical port */ 8125 vports = lpfc_create_vport_work_array(phba); 8126 if (vports != NULL) 8127 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 8128 fc_vport_terminate(vports[i]->fc_vport); 8129 lpfc_destroy_vport_work_array(phba, vports); 8130 8131 /* Remove FC host and then SCSI host with the physical port */ 8132 fc_remove_host(shost); 8133 scsi_remove_host(shost); 8134 lpfc_cleanup(vport); 8135 8136 /* 8137 * Bring down the SLI Layer. This step disable all interrupts, 8138 * clears the rings, discards all mailbox commands, and resets 8139 * the HBA. 8140 */ 8141 8142 /* HBA interrupt will be disabled after this call */ 8143 lpfc_sli_hba_down(phba); 8144 /* Stop kthread signal shall trigger work_done one more time */ 8145 kthread_stop(phba->worker_thread); 8146 /* Final cleanup of txcmplq and reset the HBA */ 8147 lpfc_sli_brdrestart(phba); 8148 8149 lpfc_stop_hba_timers(phba); 8150 spin_lock_irq(&phba->hbalock); 8151 list_del_init(&vport->listentry); 8152 spin_unlock_irq(&phba->hbalock); 8153 8154 lpfc_debugfs_terminate(vport); 8155 8156 /* Disable interrupt */ 8157 lpfc_sli_disable_intr(phba); 8158 8159 pci_set_drvdata(pdev, NULL); 8160 scsi_host_put(shost); 8161 8162 /* 8163 * Call scsi_free before mem_free since scsi bufs are released to their 8164 * corresponding pools here. 8165 */ 8166 lpfc_scsi_free(phba); 8167 lpfc_mem_free_all(phba); 8168 8169 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 8170 phba->hbqslimp.virt, phba->hbqslimp.phys); 8171 8172 /* Free resources associated with SLI2 interface */ 8173 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 8174 phba->slim2p.virt, phba->slim2p.phys); 8175 8176 /* unmap adapter SLIM and Control Registers */ 8177 iounmap(phba->ctrl_regs_memmap_p); 8178 iounmap(phba->slim_memmap_p); 8179 8180 lpfc_hba_free(phba); 8181 8182 pci_release_selected_regions(pdev, bars); 8183 pci_disable_device(pdev); 8184 } 8185 8186 /** 8187 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 8188 * @pdev: pointer to PCI device 8189 * @msg: power management message 8190 * 8191 * This routine is to be called from the kernel's PCI subsystem to support 8192 * system Power Management (PM) to device with SLI-3 interface spec. When 8193 * PM invokes this method, it quiesces the device by stopping the driver's 8194 * worker thread for the device, turning off device's interrupt and DMA, 8195 * and bring the device offline. Note that as the driver implements the 8196 * minimum PM requirements to a power-aware driver's PM support for the 8197 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 8198 * to the suspend() method call will be treated as SUSPEND and the driver will 8199 * fully reinitialize its device during resume() method call, the driver will 8200 * set device to PCI_D3hot state in PCI config space instead of setting it 8201 * according to the @msg provided by the PM. 8202 * 8203 * Return code 8204 * 0 - driver suspended the device 8205 * Error otherwise 8206 **/ 8207 static int 8208 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 8209 { 8210 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8211 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8212 8213 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8214 "0473 PCI device Power Management suspend.\n"); 8215 8216 /* Bring down the device */ 8217 lpfc_offline_prep(phba); 8218 lpfc_offline(phba); 8219 kthread_stop(phba->worker_thread); 8220 8221 /* Disable interrupt from device */ 8222 lpfc_sli_disable_intr(phba); 8223 8224 /* Save device state to PCI config space */ 8225 pci_save_state(pdev); 8226 pci_set_power_state(pdev, PCI_D3hot); 8227 8228 return 0; 8229 } 8230 8231 /** 8232 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 8233 * @pdev: pointer to PCI device 8234 * 8235 * This routine is to be called from the kernel's PCI subsystem to support 8236 * system Power Management (PM) to device with SLI-3 interface spec. When PM 8237 * invokes this method, it restores the device's PCI config space state and 8238 * fully reinitializes the device and brings it online. Note that as the 8239 * driver implements the minimum PM requirements to a power-aware driver's 8240 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 8241 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 8242 * driver will fully reinitialize its device during resume() method call, 8243 * the device will be set to PCI_D0 directly in PCI config space before 8244 * restoring the state. 8245 * 8246 * Return code 8247 * 0 - driver suspended the device 8248 * Error otherwise 8249 **/ 8250 static int 8251 lpfc_pci_resume_one_s3(struct pci_dev *pdev) 8252 { 8253 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8254 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8255 uint32_t intr_mode; 8256 int error; 8257 8258 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8259 "0452 PCI device Power Management resume.\n"); 8260 8261 /* Restore device state from PCI config space */ 8262 pci_set_power_state(pdev, PCI_D0); 8263 pci_restore_state(pdev); 8264 8265 /* 8266 * As the new kernel behavior of pci_restore_state() API call clears 8267 * device saved_state flag, need to save the restored state again. 8268 */ 8269 pci_save_state(pdev); 8270 8271 if (pdev->is_busmaster) 8272 pci_set_master(pdev); 8273 8274 /* Startup the kernel thread for this host adapter. */ 8275 phba->worker_thread = kthread_run(lpfc_do_work, phba, 8276 "lpfc_worker_%d", phba->brd_no); 8277 if (IS_ERR(phba->worker_thread)) { 8278 error = PTR_ERR(phba->worker_thread); 8279 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8280 "0434 PM resume failed to start worker " 8281 "thread: error=x%x.\n", error); 8282 return error; 8283 } 8284 8285 /* Configure and enable interrupt */ 8286 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 8287 if (intr_mode == LPFC_INTR_ERROR) { 8288 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8289 "0430 PM resume Failed to enable interrupt\n"); 8290 return -EIO; 8291 } else 8292 phba->intr_mode = intr_mode; 8293 8294 /* Restart HBA and bring it online */ 8295 lpfc_sli_brdrestart(phba); 8296 lpfc_online(phba); 8297 8298 /* Log the current active interrupt mode */ 8299 lpfc_log_intr_mode(phba, phba->intr_mode); 8300 8301 return 0; 8302 } 8303 8304 /** 8305 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 8306 * @phba: pointer to lpfc hba data structure. 8307 * 8308 * This routine is called to prepare the SLI3 device for PCI slot recover. It 8309 * aborts all the outstanding SCSI I/Os to the pci device. 8310 **/ 8311 static void 8312 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 8313 { 8314 struct lpfc_sli *psli = &phba->sli; 8315 struct lpfc_sli_ring *pring; 8316 8317 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8318 "2723 PCI channel I/O abort preparing for recovery\n"); 8319 8320 /* 8321 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 8322 * and let the SCSI mid-layer to retry them to recover. 8323 */ 8324 pring = &psli->ring[psli->fcp_ring]; 8325 lpfc_sli_abort_iocb_ring(phba, pring); 8326 } 8327 8328 /** 8329 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 8330 * @phba: pointer to lpfc hba data structure. 8331 * 8332 * This routine is called to prepare the SLI3 device for PCI slot reset. It 8333 * disables the device interrupt and pci device, and aborts the internal FCP 8334 * pending I/Os. 8335 **/ 8336 static void 8337 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 8338 { 8339 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8340 "2710 PCI channel disable preparing for reset\n"); 8341 8342 /* Block any management I/Os to the device */ 8343 lpfc_block_mgmt_io(phba); 8344 8345 /* Block all SCSI devices' I/Os on the host */ 8346 lpfc_scsi_dev_block(phba); 8347 8348 /* stop all timers */ 8349 lpfc_stop_hba_timers(phba); 8350 8351 /* Disable interrupt and pci device */ 8352 lpfc_sli_disable_intr(phba); 8353 pci_disable_device(phba->pcidev); 8354 8355 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 8356 lpfc_sli_flush_fcp_rings(phba); 8357 } 8358 8359 /** 8360 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 8361 * @phba: pointer to lpfc hba data structure. 8362 * 8363 * This routine is called to prepare the SLI3 device for PCI slot permanently 8364 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 8365 * pending I/Os. 8366 **/ 8367 static void 8368 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 8369 { 8370 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8371 "2711 PCI channel permanent disable for failure\n"); 8372 /* Block all SCSI devices' I/Os on the host */ 8373 lpfc_scsi_dev_block(phba); 8374 8375 /* stop all timers */ 8376 lpfc_stop_hba_timers(phba); 8377 8378 /* Clean up all driver's outstanding SCSI I/Os */ 8379 lpfc_sli_flush_fcp_rings(phba); 8380 } 8381 8382 /** 8383 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 8384 * @pdev: pointer to PCI device. 8385 * @state: the current PCI connection state. 8386 * 8387 * This routine is called from the PCI subsystem for I/O error handling to 8388 * device with SLI-3 interface spec. This function is called by the PCI 8389 * subsystem after a PCI bus error affecting this device has been detected. 8390 * When this function is invoked, it will need to stop all the I/Os and 8391 * interrupt(s) to the device. Once that is done, it will return 8392 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 8393 * as desired. 8394 * 8395 * Return codes 8396 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 8397 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 8398 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8399 **/ 8400 static pci_ers_result_t 8401 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 8402 { 8403 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8404 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8405 8406 switch (state) { 8407 case pci_channel_io_normal: 8408 /* Non-fatal error, prepare for recovery */ 8409 lpfc_sli_prep_dev_for_recover(phba); 8410 return PCI_ERS_RESULT_CAN_RECOVER; 8411 case pci_channel_io_frozen: 8412 /* Fatal error, prepare for slot reset */ 8413 lpfc_sli_prep_dev_for_reset(phba); 8414 return PCI_ERS_RESULT_NEED_RESET; 8415 case pci_channel_io_perm_failure: 8416 /* Permanent failure, prepare for device down */ 8417 lpfc_sli_prep_dev_for_perm_failure(phba); 8418 return PCI_ERS_RESULT_DISCONNECT; 8419 default: 8420 /* Unknown state, prepare and request slot reset */ 8421 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8422 "0472 Unknown PCI error state: x%x\n", state); 8423 lpfc_sli_prep_dev_for_reset(phba); 8424 return PCI_ERS_RESULT_NEED_RESET; 8425 } 8426 } 8427 8428 /** 8429 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 8430 * @pdev: pointer to PCI device. 8431 * 8432 * This routine is called from the PCI subsystem for error handling to 8433 * device with SLI-3 interface spec. This is called after PCI bus has been 8434 * reset to restart the PCI card from scratch, as if from a cold-boot. 8435 * During the PCI subsystem error recovery, after driver returns 8436 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 8437 * recovery and then call this routine before calling the .resume method 8438 * to recover the device. This function will initialize the HBA device, 8439 * enable the interrupt, but it will just put the HBA to offline state 8440 * without passing any I/O traffic. 8441 * 8442 * Return codes 8443 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 8444 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8445 */ 8446 static pci_ers_result_t 8447 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 8448 { 8449 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8450 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8451 struct lpfc_sli *psli = &phba->sli; 8452 uint32_t intr_mode; 8453 8454 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 8455 if (pci_enable_device_mem(pdev)) { 8456 printk(KERN_ERR "lpfc: Cannot re-enable " 8457 "PCI device after reset.\n"); 8458 return PCI_ERS_RESULT_DISCONNECT; 8459 } 8460 8461 pci_restore_state(pdev); 8462 8463 /* 8464 * As the new kernel behavior of pci_restore_state() API call clears 8465 * device saved_state flag, need to save the restored state again. 8466 */ 8467 pci_save_state(pdev); 8468 8469 if (pdev->is_busmaster) 8470 pci_set_master(pdev); 8471 8472 spin_lock_irq(&phba->hbalock); 8473 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 8474 spin_unlock_irq(&phba->hbalock); 8475 8476 /* Configure and enable interrupt */ 8477 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 8478 if (intr_mode == LPFC_INTR_ERROR) { 8479 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8480 "0427 Cannot re-enable interrupt after " 8481 "slot reset.\n"); 8482 return PCI_ERS_RESULT_DISCONNECT; 8483 } else 8484 phba->intr_mode = intr_mode; 8485 8486 /* Take device offline, it will perform cleanup */ 8487 lpfc_offline_prep(phba); 8488 lpfc_offline(phba); 8489 lpfc_sli_brdrestart(phba); 8490 8491 /* Log the current active interrupt mode */ 8492 lpfc_log_intr_mode(phba, phba->intr_mode); 8493 8494 return PCI_ERS_RESULT_RECOVERED; 8495 } 8496 8497 /** 8498 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 8499 * @pdev: pointer to PCI device 8500 * 8501 * This routine is called from the PCI subsystem for error handling to device 8502 * with SLI-3 interface spec. It is called when kernel error recovery tells 8503 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 8504 * error recovery. After this call, traffic can start to flow from this device 8505 * again. 8506 */ 8507 static void 8508 lpfc_io_resume_s3(struct pci_dev *pdev) 8509 { 8510 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8511 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8512 8513 /* Bring device online, it will be no-op for non-fatal error resume */ 8514 lpfc_online(phba); 8515 8516 /* Clean up Advanced Error Reporting (AER) if needed */ 8517 if (phba->hba_flag & HBA_AER_ENABLED) 8518 pci_cleanup_aer_uncorrect_error_status(pdev); 8519 } 8520 8521 /** 8522 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 8523 * @phba: pointer to lpfc hba data structure. 8524 * 8525 * returns the number of ELS/CT IOCBs to reserve 8526 **/ 8527 int 8528 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 8529 { 8530 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 8531 8532 if (phba->sli_rev == LPFC_SLI_REV4) { 8533 if (max_xri <= 100) 8534 return 10; 8535 else if (max_xri <= 256) 8536 return 25; 8537 else if (max_xri <= 512) 8538 return 50; 8539 else if (max_xri <= 1024) 8540 return 100; 8541 else 8542 return 150; 8543 } else 8544 return 0; 8545 } 8546 8547 /** 8548 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 8549 * @pdev: pointer to PCI device 8550 * @pid: pointer to PCI device identifier 8551 * 8552 * This routine is called from the kernel's PCI subsystem to device with 8553 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 8554 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 8555 * information of the device and driver to see if the driver state that it 8556 * can support this kind of device. If the match is successful, the driver 8557 * core invokes this routine. If this routine determines it can claim the HBA, 8558 * it does all the initialization that it needs to do to handle the HBA 8559 * properly. 8560 * 8561 * Return code 8562 * 0 - driver can claim the device 8563 * negative value - driver can not claim the device 8564 **/ 8565 static int __devinit 8566 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 8567 { 8568 struct lpfc_hba *phba; 8569 struct lpfc_vport *vport = NULL; 8570 struct Scsi_Host *shost = NULL; 8571 int error; 8572 uint32_t cfg_mode, intr_mode; 8573 int mcnt; 8574 8575 /* Allocate memory for HBA structure */ 8576 phba = lpfc_hba_alloc(pdev); 8577 if (!phba) 8578 return -ENOMEM; 8579 8580 /* Perform generic PCI device enabling operation */ 8581 error = lpfc_enable_pci_dev(phba); 8582 if (error) { 8583 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8584 "1409 Failed to enable pci device.\n"); 8585 goto out_free_phba; 8586 } 8587 8588 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 8589 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 8590 if (error) 8591 goto out_disable_pci_dev; 8592 8593 /* Set up SLI-4 specific device PCI memory space */ 8594 error = lpfc_sli4_pci_mem_setup(phba); 8595 if (error) { 8596 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8597 "1410 Failed to set up pci memory space.\n"); 8598 goto out_disable_pci_dev; 8599 } 8600 8601 /* Set up phase-1 common device driver resources */ 8602 error = lpfc_setup_driver_resource_phase1(phba); 8603 if (error) { 8604 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8605 "1411 Failed to set up driver resource.\n"); 8606 goto out_unset_pci_mem_s4; 8607 } 8608 8609 /* Set up SLI-4 Specific device driver resources */ 8610 error = lpfc_sli4_driver_resource_setup(phba); 8611 if (error) { 8612 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8613 "1412 Failed to set up driver resource.\n"); 8614 goto out_unset_pci_mem_s4; 8615 } 8616 8617 /* Initialize and populate the iocb list per host */ 8618 8619 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8620 "2821 initialize iocb list %d.\n", 8621 phba->cfg_iocb_cnt*1024); 8622 error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024); 8623 8624 if (error) { 8625 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8626 "1413 Failed to initialize iocb list.\n"); 8627 goto out_unset_driver_resource_s4; 8628 } 8629 8630 INIT_LIST_HEAD(&phba->active_rrq_list); 8631 8632 /* Set up common device driver resources */ 8633 error = lpfc_setup_driver_resource_phase2(phba); 8634 if (error) { 8635 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8636 "1414 Failed to set up driver resource.\n"); 8637 goto out_free_iocb_list; 8638 } 8639 8640 /* Create SCSI host to the physical port */ 8641 error = lpfc_create_shost(phba); 8642 if (error) { 8643 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8644 "1415 Failed to create scsi host.\n"); 8645 goto out_unset_driver_resource; 8646 } 8647 8648 /* Configure sysfs attributes */ 8649 vport = phba->pport; 8650 error = lpfc_alloc_sysfs_attr(vport); 8651 if (error) { 8652 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8653 "1416 Failed to allocate sysfs attr\n"); 8654 goto out_destroy_shost; 8655 } 8656 8657 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 8658 /* Now, trying to enable interrupt and bring up the device */ 8659 cfg_mode = phba->cfg_use_msi; 8660 while (true) { 8661 /* Put device to a known state before enabling interrupt */ 8662 lpfc_stop_port(phba); 8663 /* Configure and enable interrupt */ 8664 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 8665 if (intr_mode == LPFC_INTR_ERROR) { 8666 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8667 "0426 Failed to enable interrupt.\n"); 8668 error = -ENODEV; 8669 goto out_free_sysfs_attr; 8670 } 8671 /* Default to single FCP EQ for non-MSI-X */ 8672 if (phba->intr_type != MSIX) 8673 phba->cfg_fcp_eq_count = 1; 8674 else if (phba->sli4_hba.msix_vec_nr < phba->cfg_fcp_eq_count) 8675 phba->cfg_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1; 8676 /* Set up SLI-4 HBA */ 8677 if (lpfc_sli4_hba_setup(phba)) { 8678 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8679 "1421 Failed to set up hba\n"); 8680 error = -ENODEV; 8681 goto out_disable_intr; 8682 } 8683 8684 /* Send NOP mbx cmds for non-INTx mode active interrupt test */ 8685 if (intr_mode != 0) 8686 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba, 8687 LPFC_ACT_INTR_CNT); 8688 8689 /* Check active interrupts received only for MSI/MSI-X */ 8690 if (intr_mode == 0 || 8691 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) { 8692 /* Log the current active interrupt mode */ 8693 phba->intr_mode = intr_mode; 8694 lpfc_log_intr_mode(phba, intr_mode); 8695 break; 8696 } 8697 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8698 "0451 Configure interrupt mode (%d) " 8699 "failed active interrupt test.\n", 8700 intr_mode); 8701 /* Unset the previous SLI-4 HBA setup. */ 8702 /* 8703 * TODO: Is this operation compatible with IF TYPE 2 8704 * devices? All port state is deleted and cleared. 8705 */ 8706 lpfc_sli4_unset_hba(phba); 8707 /* Try next level of interrupt mode */ 8708 cfg_mode = --intr_mode; 8709 } 8710 8711 /* Perform post initialization setup */ 8712 lpfc_post_init_setup(phba); 8713 8714 /* Check if there are static vports to be created. */ 8715 lpfc_create_static_vport(phba); 8716 8717 return 0; 8718 8719 out_disable_intr: 8720 lpfc_sli4_disable_intr(phba); 8721 out_free_sysfs_attr: 8722 lpfc_free_sysfs_attr(vport); 8723 out_destroy_shost: 8724 lpfc_destroy_shost(phba); 8725 out_unset_driver_resource: 8726 lpfc_unset_driver_resource_phase2(phba); 8727 out_free_iocb_list: 8728 lpfc_free_iocb_list(phba); 8729 out_unset_driver_resource_s4: 8730 lpfc_sli4_driver_resource_unset(phba); 8731 out_unset_pci_mem_s4: 8732 lpfc_sli4_pci_mem_unset(phba); 8733 out_disable_pci_dev: 8734 lpfc_disable_pci_dev(phba); 8735 if (shost) 8736 scsi_host_put(shost); 8737 out_free_phba: 8738 lpfc_hba_free(phba); 8739 return error; 8740 } 8741 8742 /** 8743 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 8744 * @pdev: pointer to PCI device 8745 * 8746 * This routine is called from the kernel's PCI subsystem to device with 8747 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 8748 * removed from PCI bus, it performs all the necessary cleanup for the HBA 8749 * device to be removed from the PCI subsystem properly. 8750 **/ 8751 static void __devexit 8752 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 8753 { 8754 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8755 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 8756 struct lpfc_vport **vports; 8757 struct lpfc_hba *phba = vport->phba; 8758 int i; 8759 8760 /* Mark the device unloading flag */ 8761 spin_lock_irq(&phba->hbalock); 8762 vport->load_flag |= FC_UNLOADING; 8763 spin_unlock_irq(&phba->hbalock); 8764 8765 /* Free the HBA sysfs attributes */ 8766 lpfc_free_sysfs_attr(vport); 8767 8768 /* Release all the vports against this physical port */ 8769 vports = lpfc_create_vport_work_array(phba); 8770 if (vports != NULL) 8771 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 8772 fc_vport_terminate(vports[i]->fc_vport); 8773 lpfc_destroy_vport_work_array(phba, vports); 8774 8775 /* Remove FC host and then SCSI host with the physical port */ 8776 fc_remove_host(shost); 8777 scsi_remove_host(shost); 8778 8779 /* Perform cleanup on the physical port */ 8780 lpfc_cleanup(vport); 8781 8782 /* 8783 * Bring down the SLI Layer. This step disables all interrupts, 8784 * clears the rings, discards all mailbox commands, and resets 8785 * the HBA FCoE function. 8786 */ 8787 lpfc_debugfs_terminate(vport); 8788 lpfc_sli4_hba_unset(phba); 8789 8790 spin_lock_irq(&phba->hbalock); 8791 list_del_init(&vport->listentry); 8792 spin_unlock_irq(&phba->hbalock); 8793 8794 /* Perform scsi free before driver resource_unset since scsi 8795 * buffers are released to their corresponding pools here. 8796 */ 8797 lpfc_scsi_free(phba); 8798 lpfc_sli4_driver_resource_unset(phba); 8799 8800 /* Unmap adapter Control and Doorbell registers */ 8801 lpfc_sli4_pci_mem_unset(phba); 8802 8803 /* Release PCI resources and disable device's PCI function */ 8804 scsi_host_put(shost); 8805 lpfc_disable_pci_dev(phba); 8806 8807 /* Finally, free the driver's device data structure */ 8808 lpfc_hba_free(phba); 8809 8810 return; 8811 } 8812 8813 /** 8814 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 8815 * @pdev: pointer to PCI device 8816 * @msg: power management message 8817 * 8818 * This routine is called from the kernel's PCI subsystem to support system 8819 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 8820 * this method, it quiesces the device by stopping the driver's worker 8821 * thread for the device, turning off device's interrupt and DMA, and bring 8822 * the device offline. Note that as the driver implements the minimum PM 8823 * requirements to a power-aware driver's PM support for suspend/resume -- all 8824 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 8825 * method call will be treated as SUSPEND and the driver will fully 8826 * reinitialize its device during resume() method call, the driver will set 8827 * device to PCI_D3hot state in PCI config space instead of setting it 8828 * according to the @msg provided by the PM. 8829 * 8830 * Return code 8831 * 0 - driver suspended the device 8832 * Error otherwise 8833 **/ 8834 static int 8835 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 8836 { 8837 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8838 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8839 8840 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8841 "2843 PCI device Power Management suspend.\n"); 8842 8843 /* Bring down the device */ 8844 lpfc_offline_prep(phba); 8845 lpfc_offline(phba); 8846 kthread_stop(phba->worker_thread); 8847 8848 /* Disable interrupt from device */ 8849 lpfc_sli4_disable_intr(phba); 8850 8851 /* Save device state to PCI config space */ 8852 pci_save_state(pdev); 8853 pci_set_power_state(pdev, PCI_D3hot); 8854 8855 return 0; 8856 } 8857 8858 /** 8859 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 8860 * @pdev: pointer to PCI device 8861 * 8862 * This routine is called from the kernel's PCI subsystem to support system 8863 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 8864 * this method, it restores the device's PCI config space state and fully 8865 * reinitializes the device and brings it online. Note that as the driver 8866 * implements the minimum PM requirements to a power-aware driver's PM for 8867 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 8868 * to the suspend() method call will be treated as SUSPEND and the driver 8869 * will fully reinitialize its device during resume() method call, the device 8870 * will be set to PCI_D0 directly in PCI config space before restoring the 8871 * state. 8872 * 8873 * Return code 8874 * 0 - driver suspended the device 8875 * Error otherwise 8876 **/ 8877 static int 8878 lpfc_pci_resume_one_s4(struct pci_dev *pdev) 8879 { 8880 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8881 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8882 uint32_t intr_mode; 8883 int error; 8884 8885 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8886 "0292 PCI device Power Management resume.\n"); 8887 8888 /* Restore device state from PCI config space */ 8889 pci_set_power_state(pdev, PCI_D0); 8890 pci_restore_state(pdev); 8891 8892 /* 8893 * As the new kernel behavior of pci_restore_state() API call clears 8894 * device saved_state flag, need to save the restored state again. 8895 */ 8896 pci_save_state(pdev); 8897 8898 if (pdev->is_busmaster) 8899 pci_set_master(pdev); 8900 8901 /* Startup the kernel thread for this host adapter. */ 8902 phba->worker_thread = kthread_run(lpfc_do_work, phba, 8903 "lpfc_worker_%d", phba->brd_no); 8904 if (IS_ERR(phba->worker_thread)) { 8905 error = PTR_ERR(phba->worker_thread); 8906 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8907 "0293 PM resume failed to start worker " 8908 "thread: error=x%x.\n", error); 8909 return error; 8910 } 8911 8912 /* Configure and enable interrupt */ 8913 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 8914 if (intr_mode == LPFC_INTR_ERROR) { 8915 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8916 "0294 PM resume Failed to enable interrupt\n"); 8917 return -EIO; 8918 } else 8919 phba->intr_mode = intr_mode; 8920 8921 /* Restart HBA and bring it online */ 8922 lpfc_sli_brdrestart(phba); 8923 lpfc_online(phba); 8924 8925 /* Log the current active interrupt mode */ 8926 lpfc_log_intr_mode(phba, phba->intr_mode); 8927 8928 return 0; 8929 } 8930 8931 /** 8932 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 8933 * @phba: pointer to lpfc hba data structure. 8934 * 8935 * This routine is called to prepare the SLI4 device for PCI slot recover. It 8936 * aborts all the outstanding SCSI I/Os to the pci device. 8937 **/ 8938 static void 8939 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 8940 { 8941 struct lpfc_sli *psli = &phba->sli; 8942 struct lpfc_sli_ring *pring; 8943 8944 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8945 "2828 PCI channel I/O abort preparing for recovery\n"); 8946 /* 8947 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 8948 * and let the SCSI mid-layer to retry them to recover. 8949 */ 8950 pring = &psli->ring[psli->fcp_ring]; 8951 lpfc_sli_abort_iocb_ring(phba, pring); 8952 } 8953 8954 /** 8955 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 8956 * @phba: pointer to lpfc hba data structure. 8957 * 8958 * This routine is called to prepare the SLI4 device for PCI slot reset. It 8959 * disables the device interrupt and pci device, and aborts the internal FCP 8960 * pending I/Os. 8961 **/ 8962 static void 8963 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 8964 { 8965 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8966 "2826 PCI channel disable preparing for reset\n"); 8967 8968 /* Block any management I/Os to the device */ 8969 lpfc_block_mgmt_io(phba); 8970 8971 /* Block all SCSI devices' I/Os on the host */ 8972 lpfc_scsi_dev_block(phba); 8973 8974 /* stop all timers */ 8975 lpfc_stop_hba_timers(phba); 8976 8977 /* Disable interrupt and pci device */ 8978 lpfc_sli4_disable_intr(phba); 8979 pci_disable_device(phba->pcidev); 8980 8981 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 8982 lpfc_sli_flush_fcp_rings(phba); 8983 } 8984 8985 /** 8986 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 8987 * @phba: pointer to lpfc hba data structure. 8988 * 8989 * This routine is called to prepare the SLI4 device for PCI slot permanently 8990 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 8991 * pending I/Os. 8992 **/ 8993 static void 8994 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 8995 { 8996 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8997 "2827 PCI channel permanent disable for failure\n"); 8998 8999 /* Block all SCSI devices' I/Os on the host */ 9000 lpfc_scsi_dev_block(phba); 9001 9002 /* stop all timers */ 9003 lpfc_stop_hba_timers(phba); 9004 9005 /* Clean up all driver's outstanding SCSI I/Os */ 9006 lpfc_sli_flush_fcp_rings(phba); 9007 } 9008 9009 /** 9010 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 9011 * @pdev: pointer to PCI device. 9012 * @state: the current PCI connection state. 9013 * 9014 * This routine is called from the PCI subsystem for error handling to device 9015 * with SLI-4 interface spec. This function is called by the PCI subsystem 9016 * after a PCI bus error affecting this device has been detected. When this 9017 * function is invoked, it will need to stop all the I/Os and interrupt(s) 9018 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 9019 * for the PCI subsystem to perform proper recovery as desired. 9020 * 9021 * Return codes 9022 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 9023 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9024 **/ 9025 static pci_ers_result_t 9026 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 9027 { 9028 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9029 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9030 9031 switch (state) { 9032 case pci_channel_io_normal: 9033 /* Non-fatal error, prepare for recovery */ 9034 lpfc_sli4_prep_dev_for_recover(phba); 9035 return PCI_ERS_RESULT_CAN_RECOVER; 9036 case pci_channel_io_frozen: 9037 /* Fatal error, prepare for slot reset */ 9038 lpfc_sli4_prep_dev_for_reset(phba); 9039 return PCI_ERS_RESULT_NEED_RESET; 9040 case pci_channel_io_perm_failure: 9041 /* Permanent failure, prepare for device down */ 9042 lpfc_sli4_prep_dev_for_perm_failure(phba); 9043 return PCI_ERS_RESULT_DISCONNECT; 9044 default: 9045 /* Unknown state, prepare and request slot reset */ 9046 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9047 "2825 Unknown PCI error state: x%x\n", state); 9048 lpfc_sli4_prep_dev_for_reset(phba); 9049 return PCI_ERS_RESULT_NEED_RESET; 9050 } 9051 } 9052 9053 /** 9054 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 9055 * @pdev: pointer to PCI device. 9056 * 9057 * This routine is called from the PCI subsystem for error handling to device 9058 * with SLI-4 interface spec. It is called after PCI bus has been reset to 9059 * restart the PCI card from scratch, as if from a cold-boot. During the 9060 * PCI subsystem error recovery, after the driver returns 9061 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 9062 * recovery and then call this routine before calling the .resume method to 9063 * recover the device. This function will initialize the HBA device, enable 9064 * the interrupt, but it will just put the HBA to offline state without 9065 * passing any I/O traffic. 9066 * 9067 * Return codes 9068 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 9069 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9070 */ 9071 static pci_ers_result_t 9072 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 9073 { 9074 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9075 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9076 struct lpfc_sli *psli = &phba->sli; 9077 uint32_t intr_mode; 9078 9079 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 9080 if (pci_enable_device_mem(pdev)) { 9081 printk(KERN_ERR "lpfc: Cannot re-enable " 9082 "PCI device after reset.\n"); 9083 return PCI_ERS_RESULT_DISCONNECT; 9084 } 9085 9086 pci_restore_state(pdev); 9087 if (pdev->is_busmaster) 9088 pci_set_master(pdev); 9089 9090 spin_lock_irq(&phba->hbalock); 9091 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 9092 spin_unlock_irq(&phba->hbalock); 9093 9094 /* Configure and enable interrupt */ 9095 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 9096 if (intr_mode == LPFC_INTR_ERROR) { 9097 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9098 "2824 Cannot re-enable interrupt after " 9099 "slot reset.\n"); 9100 return PCI_ERS_RESULT_DISCONNECT; 9101 } else 9102 phba->intr_mode = intr_mode; 9103 9104 /* Log the current active interrupt mode */ 9105 lpfc_log_intr_mode(phba, phba->intr_mode); 9106 9107 return PCI_ERS_RESULT_RECOVERED; 9108 } 9109 9110 /** 9111 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 9112 * @pdev: pointer to PCI device 9113 * 9114 * This routine is called from the PCI subsystem for error handling to device 9115 * with SLI-4 interface spec. It is called when kernel error recovery tells 9116 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 9117 * error recovery. After this call, traffic can start to flow from this device 9118 * again. 9119 **/ 9120 static void 9121 lpfc_io_resume_s4(struct pci_dev *pdev) 9122 { 9123 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9124 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9125 9126 /* 9127 * In case of slot reset, as function reset is performed through 9128 * mailbox command which needs DMA to be enabled, this operation 9129 * has to be moved to the io resume phase. Taking device offline 9130 * will perform the necessary cleanup. 9131 */ 9132 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 9133 /* Perform device reset */ 9134 lpfc_offline_prep(phba); 9135 lpfc_offline(phba); 9136 lpfc_sli_brdrestart(phba); 9137 /* Bring the device back online */ 9138 lpfc_online(phba); 9139 } 9140 9141 /* Clean up Advanced Error Reporting (AER) if needed */ 9142 if (phba->hba_flag & HBA_AER_ENABLED) 9143 pci_cleanup_aer_uncorrect_error_status(pdev); 9144 } 9145 9146 /** 9147 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 9148 * @pdev: pointer to PCI device 9149 * @pid: pointer to PCI device identifier 9150 * 9151 * This routine is to be registered to the kernel's PCI subsystem. When an 9152 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 9153 * at PCI device-specific information of the device and driver to see if the 9154 * driver state that it can support this kind of device. If the match is 9155 * successful, the driver core invokes this routine. This routine dispatches 9156 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 9157 * do all the initialization that it needs to do to handle the HBA device 9158 * properly. 9159 * 9160 * Return code 9161 * 0 - driver can claim the device 9162 * negative value - driver can not claim the device 9163 **/ 9164 static int __devinit 9165 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 9166 { 9167 int rc; 9168 struct lpfc_sli_intf intf; 9169 9170 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 9171 return -ENODEV; 9172 9173 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 9174 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 9175 rc = lpfc_pci_probe_one_s4(pdev, pid); 9176 else 9177 rc = lpfc_pci_probe_one_s3(pdev, pid); 9178 9179 return rc; 9180 } 9181 9182 /** 9183 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 9184 * @pdev: pointer to PCI device 9185 * 9186 * This routine is to be registered to the kernel's PCI subsystem. When an 9187 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 9188 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 9189 * remove routine, which will perform all the necessary cleanup for the 9190 * device to be removed from the PCI subsystem properly. 9191 **/ 9192 static void __devexit 9193 lpfc_pci_remove_one(struct pci_dev *pdev) 9194 { 9195 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9196 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9197 9198 switch (phba->pci_dev_grp) { 9199 case LPFC_PCI_DEV_LP: 9200 lpfc_pci_remove_one_s3(pdev); 9201 break; 9202 case LPFC_PCI_DEV_OC: 9203 lpfc_pci_remove_one_s4(pdev); 9204 break; 9205 default: 9206 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9207 "1424 Invalid PCI device group: 0x%x\n", 9208 phba->pci_dev_grp); 9209 break; 9210 } 9211 return; 9212 } 9213 9214 /** 9215 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 9216 * @pdev: pointer to PCI device 9217 * @msg: power management message 9218 * 9219 * This routine is to be registered to the kernel's PCI subsystem to support 9220 * system Power Management (PM). When PM invokes this method, it dispatches 9221 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 9222 * suspend the device. 9223 * 9224 * Return code 9225 * 0 - driver suspended the device 9226 * Error otherwise 9227 **/ 9228 static int 9229 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 9230 { 9231 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9232 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9233 int rc = -ENODEV; 9234 9235 switch (phba->pci_dev_grp) { 9236 case LPFC_PCI_DEV_LP: 9237 rc = lpfc_pci_suspend_one_s3(pdev, msg); 9238 break; 9239 case LPFC_PCI_DEV_OC: 9240 rc = lpfc_pci_suspend_one_s4(pdev, msg); 9241 break; 9242 default: 9243 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9244 "1425 Invalid PCI device group: 0x%x\n", 9245 phba->pci_dev_grp); 9246 break; 9247 } 9248 return rc; 9249 } 9250 9251 /** 9252 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 9253 * @pdev: pointer to PCI device 9254 * 9255 * This routine is to be registered to the kernel's PCI subsystem to support 9256 * system Power Management (PM). When PM invokes this method, it dispatches 9257 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 9258 * resume the device. 9259 * 9260 * Return code 9261 * 0 - driver suspended the device 9262 * Error otherwise 9263 **/ 9264 static int 9265 lpfc_pci_resume_one(struct pci_dev *pdev) 9266 { 9267 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9268 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9269 int rc = -ENODEV; 9270 9271 switch (phba->pci_dev_grp) { 9272 case LPFC_PCI_DEV_LP: 9273 rc = lpfc_pci_resume_one_s3(pdev); 9274 break; 9275 case LPFC_PCI_DEV_OC: 9276 rc = lpfc_pci_resume_one_s4(pdev); 9277 break; 9278 default: 9279 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9280 "1426 Invalid PCI device group: 0x%x\n", 9281 phba->pci_dev_grp); 9282 break; 9283 } 9284 return rc; 9285 } 9286 9287 /** 9288 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 9289 * @pdev: pointer to PCI device. 9290 * @state: the current PCI connection state. 9291 * 9292 * This routine is registered to the PCI subsystem for error handling. This 9293 * function is called by the PCI subsystem after a PCI bus error affecting 9294 * this device has been detected. When this routine is invoked, it dispatches 9295 * the action to the proper SLI-3 or SLI-4 device error detected handling 9296 * routine, which will perform the proper error detected operation. 9297 * 9298 * Return codes 9299 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 9300 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9301 **/ 9302 static pci_ers_result_t 9303 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 9304 { 9305 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9306 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9307 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 9308 9309 switch (phba->pci_dev_grp) { 9310 case LPFC_PCI_DEV_LP: 9311 rc = lpfc_io_error_detected_s3(pdev, state); 9312 break; 9313 case LPFC_PCI_DEV_OC: 9314 rc = lpfc_io_error_detected_s4(pdev, state); 9315 break; 9316 default: 9317 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9318 "1427 Invalid PCI device group: 0x%x\n", 9319 phba->pci_dev_grp); 9320 break; 9321 } 9322 return rc; 9323 } 9324 9325 /** 9326 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 9327 * @pdev: pointer to PCI device. 9328 * 9329 * This routine is registered to the PCI subsystem for error handling. This 9330 * function is called after PCI bus has been reset to restart the PCI card 9331 * from scratch, as if from a cold-boot. When this routine is invoked, it 9332 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 9333 * routine, which will perform the proper device reset. 9334 * 9335 * Return codes 9336 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 9337 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9338 **/ 9339 static pci_ers_result_t 9340 lpfc_io_slot_reset(struct pci_dev *pdev) 9341 { 9342 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9343 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9344 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 9345 9346 switch (phba->pci_dev_grp) { 9347 case LPFC_PCI_DEV_LP: 9348 rc = lpfc_io_slot_reset_s3(pdev); 9349 break; 9350 case LPFC_PCI_DEV_OC: 9351 rc = lpfc_io_slot_reset_s4(pdev); 9352 break; 9353 default: 9354 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9355 "1428 Invalid PCI device group: 0x%x\n", 9356 phba->pci_dev_grp); 9357 break; 9358 } 9359 return rc; 9360 } 9361 9362 /** 9363 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 9364 * @pdev: pointer to PCI device 9365 * 9366 * This routine is registered to the PCI subsystem for error handling. It 9367 * is called when kernel error recovery tells the lpfc driver that it is 9368 * OK to resume normal PCI operation after PCI bus error recovery. When 9369 * this routine is invoked, it dispatches the action to the proper SLI-3 9370 * or SLI-4 device io_resume routine, which will resume the device operation. 9371 **/ 9372 static void 9373 lpfc_io_resume(struct pci_dev *pdev) 9374 { 9375 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9376 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9377 9378 switch (phba->pci_dev_grp) { 9379 case LPFC_PCI_DEV_LP: 9380 lpfc_io_resume_s3(pdev); 9381 break; 9382 case LPFC_PCI_DEV_OC: 9383 lpfc_io_resume_s4(pdev); 9384 break; 9385 default: 9386 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9387 "1429 Invalid PCI device group: 0x%x\n", 9388 phba->pci_dev_grp); 9389 break; 9390 } 9391 return; 9392 } 9393 9394 static struct pci_device_id lpfc_id_table[] = { 9395 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 9396 PCI_ANY_ID, PCI_ANY_ID, }, 9397 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, 9398 PCI_ANY_ID, PCI_ANY_ID, }, 9399 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, 9400 PCI_ANY_ID, PCI_ANY_ID, }, 9401 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, 9402 PCI_ANY_ID, PCI_ANY_ID, }, 9403 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, 9404 PCI_ANY_ID, PCI_ANY_ID, }, 9405 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, 9406 PCI_ANY_ID, PCI_ANY_ID, }, 9407 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, 9408 PCI_ANY_ID, PCI_ANY_ID, }, 9409 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, 9410 PCI_ANY_ID, PCI_ANY_ID, }, 9411 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, 9412 PCI_ANY_ID, PCI_ANY_ID, }, 9413 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, 9414 PCI_ANY_ID, PCI_ANY_ID, }, 9415 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, 9416 PCI_ANY_ID, PCI_ANY_ID, }, 9417 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, 9418 PCI_ANY_ID, PCI_ANY_ID, }, 9419 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, 9420 PCI_ANY_ID, PCI_ANY_ID, }, 9421 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, 9422 PCI_ANY_ID, PCI_ANY_ID, }, 9423 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, 9424 PCI_ANY_ID, PCI_ANY_ID, }, 9425 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, 9426 PCI_ANY_ID, PCI_ANY_ID, }, 9427 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, 9428 PCI_ANY_ID, PCI_ANY_ID, }, 9429 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 9430 PCI_ANY_ID, PCI_ANY_ID, }, 9431 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, 9432 PCI_ANY_ID, PCI_ANY_ID, }, 9433 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, 9434 PCI_ANY_ID, PCI_ANY_ID, }, 9435 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, 9436 PCI_ANY_ID, PCI_ANY_ID, }, 9437 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, 9438 PCI_ANY_ID, PCI_ANY_ID, }, 9439 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, 9440 PCI_ANY_ID, PCI_ANY_ID, }, 9441 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, 9442 PCI_ANY_ID, PCI_ANY_ID, }, 9443 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, 9444 PCI_ANY_ID, PCI_ANY_ID, }, 9445 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, 9446 PCI_ANY_ID, PCI_ANY_ID, }, 9447 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, 9448 PCI_ANY_ID, PCI_ANY_ID, }, 9449 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, 9450 PCI_ANY_ID, PCI_ANY_ID, }, 9451 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, 9452 PCI_ANY_ID, PCI_ANY_ID, }, 9453 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, 9454 PCI_ANY_ID, PCI_ANY_ID, }, 9455 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, 9456 PCI_ANY_ID, PCI_ANY_ID, }, 9457 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, 9458 PCI_ANY_ID, PCI_ANY_ID, }, 9459 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, 9460 PCI_ANY_ID, PCI_ANY_ID, }, 9461 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, 9462 PCI_ANY_ID, PCI_ANY_ID, }, 9463 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, 9464 PCI_ANY_ID, PCI_ANY_ID, }, 9465 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, 9466 PCI_ANY_ID, PCI_ANY_ID, }, 9467 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, 9468 PCI_ANY_ID, PCI_ANY_ID, }, 9469 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, 9470 PCI_ANY_ID, PCI_ANY_ID, }, 9471 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT, 9472 PCI_ANY_ID, PCI_ANY_ID, }, 9473 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON, 9474 PCI_ANY_ID, PCI_ANY_ID, }, 9475 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS, 9476 PCI_ANY_ID, PCI_ANY_ID, }, 9477 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC, 9478 PCI_ANY_ID, PCI_ANY_ID, }, 9479 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE, 9480 PCI_ANY_ID, PCI_ANY_ID, }, 9481 { 0 } 9482 }; 9483 9484 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 9485 9486 static struct pci_error_handlers lpfc_err_handler = { 9487 .error_detected = lpfc_io_error_detected, 9488 .slot_reset = lpfc_io_slot_reset, 9489 .resume = lpfc_io_resume, 9490 }; 9491 9492 static struct pci_driver lpfc_driver = { 9493 .name = LPFC_DRIVER_NAME, 9494 .id_table = lpfc_id_table, 9495 .probe = lpfc_pci_probe_one, 9496 .remove = __devexit_p(lpfc_pci_remove_one), 9497 .suspend = lpfc_pci_suspend_one, 9498 .resume = lpfc_pci_resume_one, 9499 .err_handler = &lpfc_err_handler, 9500 }; 9501 9502 /** 9503 * lpfc_init - lpfc module initialization routine 9504 * 9505 * This routine is to be invoked when the lpfc module is loaded into the 9506 * kernel. The special kernel macro module_init() is used to indicate the 9507 * role of this routine to the kernel as lpfc module entry point. 9508 * 9509 * Return codes 9510 * 0 - successful 9511 * -ENOMEM - FC attach transport failed 9512 * all others - failed 9513 */ 9514 static int __init 9515 lpfc_init(void) 9516 { 9517 int error = 0; 9518 9519 printk(LPFC_MODULE_DESC "\n"); 9520 printk(LPFC_COPYRIGHT "\n"); 9521 9522 if (lpfc_enable_npiv) { 9523 lpfc_transport_functions.vport_create = lpfc_vport_create; 9524 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 9525 } 9526 lpfc_transport_template = 9527 fc_attach_transport(&lpfc_transport_functions); 9528 if (lpfc_transport_template == NULL) 9529 return -ENOMEM; 9530 if (lpfc_enable_npiv) { 9531 lpfc_vport_transport_template = 9532 fc_attach_transport(&lpfc_vport_transport_functions); 9533 if (lpfc_vport_transport_template == NULL) { 9534 fc_release_transport(lpfc_transport_template); 9535 return -ENOMEM; 9536 } 9537 } 9538 error = pci_register_driver(&lpfc_driver); 9539 if (error) { 9540 fc_release_transport(lpfc_transport_template); 9541 if (lpfc_enable_npiv) 9542 fc_release_transport(lpfc_vport_transport_template); 9543 } 9544 9545 return error; 9546 } 9547 9548 /** 9549 * lpfc_exit - lpfc module removal routine 9550 * 9551 * This routine is invoked when the lpfc module is removed from the kernel. 9552 * The special kernel macro module_exit() is used to indicate the role of 9553 * this routine to the kernel as lpfc module exit point. 9554 */ 9555 static void __exit 9556 lpfc_exit(void) 9557 { 9558 pci_unregister_driver(&lpfc_driver); 9559 fc_release_transport(lpfc_transport_template); 9560 if (lpfc_enable_npiv) 9561 fc_release_transport(lpfc_vport_transport_template); 9562 if (_dump_buf_data) { 9563 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " 9564 "_dump_buf_data at 0x%p\n", 9565 (1L << _dump_buf_data_order), _dump_buf_data); 9566 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 9567 } 9568 9569 if (_dump_buf_dif) { 9570 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " 9571 "_dump_buf_dif at 0x%p\n", 9572 (1L << _dump_buf_dif_order), _dump_buf_dif); 9573 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 9574 } 9575 } 9576 9577 module_init(lpfc_init); 9578 module_exit(lpfc_exit); 9579 MODULE_LICENSE("GPL"); 9580 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 9581 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 9582 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 9583