1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/idr.h> 26 #include <linux/interrupt.h> 27 #include <linux/kthread.h> 28 #include <linux/pci.h> 29 #include <linux/spinlock.h> 30 #include <linux/ctype.h> 31 32 #include <scsi/scsi.h> 33 #include <scsi/scsi_device.h> 34 #include <scsi/scsi_host.h> 35 #include <scsi/scsi_transport_fc.h> 36 37 #include "lpfc_hw4.h" 38 #include "lpfc_hw.h" 39 #include "lpfc_sli.h" 40 #include "lpfc_sli4.h" 41 #include "lpfc_nl.h" 42 #include "lpfc_disc.h" 43 #include "lpfc_scsi.h" 44 #include "lpfc.h" 45 #include "lpfc_logmsg.h" 46 #include "lpfc_crtn.h" 47 #include "lpfc_vport.h" 48 #include "lpfc_version.h" 49 50 char *_dump_buf_data; 51 unsigned long _dump_buf_data_order; 52 char *_dump_buf_dif; 53 unsigned long _dump_buf_dif_order; 54 spinlock_t _dump_buf_lock; 55 56 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 57 static int lpfc_post_rcv_buf(struct lpfc_hba *); 58 static int lpfc_sli4_queue_create(struct lpfc_hba *); 59 static void lpfc_sli4_queue_destroy(struct lpfc_hba *); 60 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 61 static int lpfc_setup_endian_order(struct lpfc_hba *); 62 static int lpfc_sli4_read_config(struct lpfc_hba *); 63 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 64 static void lpfc_free_sgl_list(struct lpfc_hba *); 65 static int lpfc_init_sgl_list(struct lpfc_hba *); 66 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 67 static void lpfc_free_active_sgl(struct lpfc_hba *); 68 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 69 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 70 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 71 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 72 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 73 74 static struct scsi_transport_template *lpfc_transport_template = NULL; 75 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 76 static DEFINE_IDR(lpfc_hba_index); 77 78 /** 79 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 80 * @phba: pointer to lpfc hba data structure. 81 * 82 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 83 * mailbox command. It retrieves the revision information from the HBA and 84 * collects the Vital Product Data (VPD) about the HBA for preparing the 85 * configuration of the HBA. 86 * 87 * Return codes: 88 * 0 - success. 89 * -ERESTART - requests the SLI layer to reset the HBA and try again. 90 * Any other value - indicates an error. 91 **/ 92 int 93 lpfc_config_port_prep(struct lpfc_hba *phba) 94 { 95 lpfc_vpd_t *vp = &phba->vpd; 96 int i = 0, rc; 97 LPFC_MBOXQ_t *pmb; 98 MAILBOX_t *mb; 99 char *lpfc_vpd_data = NULL; 100 uint16_t offset = 0; 101 static char licensed[56] = 102 "key unlock for use with gnu public licensed code only\0"; 103 static int init_key = 1; 104 105 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 106 if (!pmb) { 107 phba->link_state = LPFC_HBA_ERROR; 108 return -ENOMEM; 109 } 110 111 mb = &pmb->u.mb; 112 phba->link_state = LPFC_INIT_MBX_CMDS; 113 114 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 115 if (init_key) { 116 uint32_t *ptext = (uint32_t *) licensed; 117 118 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 119 *ptext = cpu_to_be32(*ptext); 120 init_key = 0; 121 } 122 123 lpfc_read_nv(phba, pmb); 124 memset((char*)mb->un.varRDnvp.rsvd3, 0, 125 sizeof (mb->un.varRDnvp.rsvd3)); 126 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 127 sizeof (licensed)); 128 129 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 130 131 if (rc != MBX_SUCCESS) { 132 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 133 "0324 Config Port initialization " 134 "error, mbxCmd x%x READ_NVPARM, " 135 "mbxStatus x%x\n", 136 mb->mbxCommand, mb->mbxStatus); 137 mempool_free(pmb, phba->mbox_mem_pool); 138 return -ERESTART; 139 } 140 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 141 sizeof(phba->wwnn)); 142 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 143 sizeof(phba->wwpn)); 144 } 145 146 phba->sli3_options = 0x0; 147 148 /* Setup and issue mailbox READ REV command */ 149 lpfc_read_rev(phba, pmb); 150 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 151 if (rc != MBX_SUCCESS) { 152 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 153 "0439 Adapter failed to init, mbxCmd x%x " 154 "READ_REV, mbxStatus x%x\n", 155 mb->mbxCommand, mb->mbxStatus); 156 mempool_free( pmb, phba->mbox_mem_pool); 157 return -ERESTART; 158 } 159 160 161 /* 162 * The value of rr must be 1 since the driver set the cv field to 1. 163 * This setting requires the FW to set all revision fields. 164 */ 165 if (mb->un.varRdRev.rr == 0) { 166 vp->rev.rBit = 0; 167 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 168 "0440 Adapter failed to init, READ_REV has " 169 "missing revision information.\n"); 170 mempool_free(pmb, phba->mbox_mem_pool); 171 return -ERESTART; 172 } 173 174 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 175 mempool_free(pmb, phba->mbox_mem_pool); 176 return -EINVAL; 177 } 178 179 /* Save information as VPD data */ 180 vp->rev.rBit = 1; 181 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 182 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 183 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 184 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 185 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 186 vp->rev.biuRev = mb->un.varRdRev.biuRev; 187 vp->rev.smRev = mb->un.varRdRev.smRev; 188 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 189 vp->rev.endecRev = mb->un.varRdRev.endecRev; 190 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 191 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 192 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 193 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 194 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 195 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 196 197 /* If the sli feature level is less then 9, we must 198 * tear down all RPIs and VPIs on link down if NPIV 199 * is enabled. 200 */ 201 if (vp->rev.feaLevelHigh < 9) 202 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 203 204 if (lpfc_is_LC_HBA(phba->pcidev->device)) 205 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 206 sizeof (phba->RandomData)); 207 208 /* Get adapter VPD information */ 209 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 210 if (!lpfc_vpd_data) 211 goto out_free_mbox; 212 213 do { 214 lpfc_dump_mem(phba, pmb, offset); 215 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 216 217 if (rc != MBX_SUCCESS) { 218 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 219 "0441 VPD not present on adapter, " 220 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 221 mb->mbxCommand, mb->mbxStatus); 222 mb->un.varDmp.word_cnt = 0; 223 } 224 /* dump mem may return a zero when finished or we got a 225 * mailbox error, either way we are done. 226 */ 227 if (mb->un.varDmp.word_cnt == 0) 228 break; 229 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 230 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 231 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 232 lpfc_vpd_data + offset, 233 mb->un.varDmp.word_cnt); 234 offset += mb->un.varDmp.word_cnt; 235 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 236 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 237 238 kfree(lpfc_vpd_data); 239 out_free_mbox: 240 mempool_free(pmb, phba->mbox_mem_pool); 241 return 0; 242 } 243 244 /** 245 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 246 * @phba: pointer to lpfc hba data structure. 247 * @pmboxq: pointer to the driver internal queue element for mailbox command. 248 * 249 * This is the completion handler for driver's configuring asynchronous event 250 * mailbox command to the device. If the mailbox command returns successfully, 251 * it will set internal async event support flag to 1; otherwise, it will 252 * set internal async event support flag to 0. 253 **/ 254 static void 255 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 256 { 257 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 258 phba->temp_sensor_support = 1; 259 else 260 phba->temp_sensor_support = 0; 261 mempool_free(pmboxq, phba->mbox_mem_pool); 262 return; 263 } 264 265 /** 266 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 267 * @phba: pointer to lpfc hba data structure. 268 * @pmboxq: pointer to the driver internal queue element for mailbox command. 269 * 270 * This is the completion handler for dump mailbox command for getting 271 * wake up parameters. When this command complete, the response contain 272 * Option rom version of the HBA. This function translate the version number 273 * into a human readable string and store it in OptionROMVersion. 274 **/ 275 static void 276 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 277 { 278 struct prog_id *prg; 279 uint32_t prog_id_word; 280 char dist = ' '; 281 /* character array used for decoding dist type. */ 282 char dist_char[] = "nabx"; 283 284 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 285 mempool_free(pmboxq, phba->mbox_mem_pool); 286 return; 287 } 288 289 prg = (struct prog_id *) &prog_id_word; 290 291 /* word 7 contain option rom version */ 292 prog_id_word = pmboxq->u.mb.un.varWords[7]; 293 294 /* Decode the Option rom version word to a readable string */ 295 if (prg->dist < 4) 296 dist = dist_char[prg->dist]; 297 298 if ((prg->dist == 3) && (prg->num == 0)) 299 sprintf(phba->OptionROMVersion, "%d.%d%d", 300 prg->ver, prg->rev, prg->lev); 301 else 302 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d", 303 prg->ver, prg->rev, prg->lev, 304 dist, prg->num); 305 mempool_free(pmboxq, phba->mbox_mem_pool); 306 return; 307 } 308 309 /** 310 * lpfc_config_port_post - Perform lpfc initialization after config port 311 * @phba: pointer to lpfc hba data structure. 312 * 313 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 314 * command call. It performs all internal resource and state setups on the 315 * port: post IOCB buffers, enable appropriate host interrupt attentions, 316 * ELS ring timers, etc. 317 * 318 * Return codes 319 * 0 - success. 320 * Any other value - error. 321 **/ 322 int 323 lpfc_config_port_post(struct lpfc_hba *phba) 324 { 325 struct lpfc_vport *vport = phba->pport; 326 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 327 LPFC_MBOXQ_t *pmb; 328 MAILBOX_t *mb; 329 struct lpfc_dmabuf *mp; 330 struct lpfc_sli *psli = &phba->sli; 331 uint32_t status, timeout; 332 int i, j; 333 int rc; 334 335 spin_lock_irq(&phba->hbalock); 336 /* 337 * If the Config port completed correctly the HBA is not 338 * over heated any more. 339 */ 340 if (phba->over_temp_state == HBA_OVER_TEMP) 341 phba->over_temp_state = HBA_NORMAL_TEMP; 342 spin_unlock_irq(&phba->hbalock); 343 344 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 345 if (!pmb) { 346 phba->link_state = LPFC_HBA_ERROR; 347 return -ENOMEM; 348 } 349 mb = &pmb->u.mb; 350 351 /* Get login parameters for NID. */ 352 lpfc_read_sparam(phba, pmb, 0); 353 pmb->vport = vport; 354 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 355 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 356 "0448 Adapter failed init, mbxCmd x%x " 357 "READ_SPARM mbxStatus x%x\n", 358 mb->mbxCommand, mb->mbxStatus); 359 phba->link_state = LPFC_HBA_ERROR; 360 mp = (struct lpfc_dmabuf *) pmb->context1; 361 mempool_free( pmb, phba->mbox_mem_pool); 362 lpfc_mbuf_free(phba, mp->virt, mp->phys); 363 kfree(mp); 364 return -EIO; 365 } 366 367 mp = (struct lpfc_dmabuf *) pmb->context1; 368 369 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 370 lpfc_mbuf_free(phba, mp->virt, mp->phys); 371 kfree(mp); 372 pmb->context1 = NULL; 373 374 if (phba->cfg_soft_wwnn) 375 u64_to_wwn(phba->cfg_soft_wwnn, 376 vport->fc_sparam.nodeName.u.wwn); 377 if (phba->cfg_soft_wwpn) 378 u64_to_wwn(phba->cfg_soft_wwpn, 379 vport->fc_sparam.portName.u.wwn); 380 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 381 sizeof (struct lpfc_name)); 382 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 383 sizeof (struct lpfc_name)); 384 385 /* Update the fc_host data structures with new wwn. */ 386 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 387 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 388 fc_host_max_npiv_vports(shost) = phba->max_vpi; 389 390 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 391 /* This should be consolidated into parse_vpd ? - mr */ 392 if (phba->SerialNumber[0] == 0) { 393 uint8_t *outptr; 394 395 outptr = &vport->fc_nodename.u.s.IEEE[0]; 396 for (i = 0; i < 12; i++) { 397 status = *outptr++; 398 j = ((status & 0xf0) >> 4); 399 if (j <= 9) 400 phba->SerialNumber[i] = 401 (char)((uint8_t) 0x30 + (uint8_t) j); 402 else 403 phba->SerialNumber[i] = 404 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 405 i++; 406 j = (status & 0xf); 407 if (j <= 9) 408 phba->SerialNumber[i] = 409 (char)((uint8_t) 0x30 + (uint8_t) j); 410 else 411 phba->SerialNumber[i] = 412 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 413 } 414 } 415 416 lpfc_read_config(phba, pmb); 417 pmb->vport = vport; 418 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 419 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 420 "0453 Adapter failed to init, mbxCmd x%x " 421 "READ_CONFIG, mbxStatus x%x\n", 422 mb->mbxCommand, mb->mbxStatus); 423 phba->link_state = LPFC_HBA_ERROR; 424 mempool_free( pmb, phba->mbox_mem_pool); 425 return -EIO; 426 } 427 428 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 429 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 430 phba->cfg_hba_queue_depth = 431 (mb->un.varRdConfig.max_xri + 1) - 432 lpfc_sli4_get_els_iocb_cnt(phba); 433 434 phba->lmt = mb->un.varRdConfig.lmt; 435 436 /* Get the default values for Model Name and Description */ 437 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 438 439 if ((phba->cfg_link_speed > LINK_SPEED_10G) 440 || ((phba->cfg_link_speed == LINK_SPEED_1G) 441 && !(phba->lmt & LMT_1Gb)) 442 || ((phba->cfg_link_speed == LINK_SPEED_2G) 443 && !(phba->lmt & LMT_2Gb)) 444 || ((phba->cfg_link_speed == LINK_SPEED_4G) 445 && !(phba->lmt & LMT_4Gb)) 446 || ((phba->cfg_link_speed == LINK_SPEED_8G) 447 && !(phba->lmt & LMT_8Gb)) 448 || ((phba->cfg_link_speed == LINK_SPEED_10G) 449 && !(phba->lmt & LMT_10Gb))) { 450 /* Reset link speed to auto */ 451 lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT, 452 "1302 Invalid speed for this board: " 453 "Reset link speed to auto: x%x\n", 454 phba->cfg_link_speed); 455 phba->cfg_link_speed = LINK_SPEED_AUTO; 456 } 457 458 phba->link_state = LPFC_LINK_DOWN; 459 460 /* Only process IOCBs on ELS ring till hba_state is READY */ 461 if (psli->ring[psli->extra_ring].cmdringaddr) 462 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 463 if (psli->ring[psli->fcp_ring].cmdringaddr) 464 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 465 if (psli->ring[psli->next_ring].cmdringaddr) 466 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 467 468 /* Post receive buffers for desired rings */ 469 if (phba->sli_rev != 3) 470 lpfc_post_rcv_buf(phba); 471 472 /* 473 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 474 */ 475 if (phba->intr_type == MSIX) { 476 rc = lpfc_config_msi(phba, pmb); 477 if (rc) { 478 mempool_free(pmb, phba->mbox_mem_pool); 479 return -EIO; 480 } 481 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 482 if (rc != MBX_SUCCESS) { 483 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 484 "0352 Config MSI mailbox command " 485 "failed, mbxCmd x%x, mbxStatus x%x\n", 486 pmb->u.mb.mbxCommand, 487 pmb->u.mb.mbxStatus); 488 mempool_free(pmb, phba->mbox_mem_pool); 489 return -EIO; 490 } 491 } 492 493 spin_lock_irq(&phba->hbalock); 494 /* Initialize ERATT handling flag */ 495 phba->hba_flag &= ~HBA_ERATT_HANDLED; 496 497 /* Enable appropriate host interrupts */ 498 status = readl(phba->HCregaddr); 499 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 500 if (psli->num_rings > 0) 501 status |= HC_R0INT_ENA; 502 if (psli->num_rings > 1) 503 status |= HC_R1INT_ENA; 504 if (psli->num_rings > 2) 505 status |= HC_R2INT_ENA; 506 if (psli->num_rings > 3) 507 status |= HC_R3INT_ENA; 508 509 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 510 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 511 status &= ~(HC_R0INT_ENA); 512 513 writel(status, phba->HCregaddr); 514 readl(phba->HCregaddr); /* flush */ 515 spin_unlock_irq(&phba->hbalock); 516 517 /* Set up ring-0 (ELS) timer */ 518 timeout = phba->fc_ratov * 2; 519 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 520 /* Set up heart beat (HB) timer */ 521 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 522 phba->hb_outstanding = 0; 523 phba->last_completion_time = jiffies; 524 /* Set up error attention (ERATT) polling timer */ 525 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 526 527 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); 528 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 529 lpfc_set_loopback_flag(phba); 530 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 531 if (rc != MBX_SUCCESS) { 532 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 533 "0454 Adapter failed to init, mbxCmd x%x " 534 "INIT_LINK, mbxStatus x%x\n", 535 mb->mbxCommand, mb->mbxStatus); 536 537 /* Clear all interrupt enable conditions */ 538 writel(0, phba->HCregaddr); 539 readl(phba->HCregaddr); /* flush */ 540 /* Clear all pending interrupts */ 541 writel(0xffffffff, phba->HAregaddr); 542 readl(phba->HAregaddr); /* flush */ 543 544 phba->link_state = LPFC_HBA_ERROR; 545 if (rc != MBX_BUSY) 546 mempool_free(pmb, phba->mbox_mem_pool); 547 return -EIO; 548 } 549 /* MBOX buffer will be freed in mbox compl */ 550 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 551 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 552 pmb->mbox_cmpl = lpfc_config_async_cmpl; 553 pmb->vport = phba->pport; 554 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 555 556 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 557 lpfc_printf_log(phba, 558 KERN_ERR, 559 LOG_INIT, 560 "0456 Adapter failed to issue " 561 "ASYNCEVT_ENABLE mbox status x%x \n.", 562 rc); 563 mempool_free(pmb, phba->mbox_mem_pool); 564 } 565 566 /* Get Option rom version */ 567 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 568 lpfc_dump_wakeup_param(phba, pmb); 569 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 570 pmb->vport = phba->pport; 571 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 572 573 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 574 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 575 "to get Option ROM version status x%x\n.", rc); 576 mempool_free(pmb, phba->mbox_mem_pool); 577 } 578 579 return 0; 580 } 581 582 /** 583 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 584 * @phba: pointer to lpfc HBA data structure. 585 * 586 * This routine will do LPFC uninitialization before the HBA is reset when 587 * bringing down the SLI Layer. 588 * 589 * Return codes 590 * 0 - success. 591 * Any other value - error. 592 **/ 593 int 594 lpfc_hba_down_prep(struct lpfc_hba *phba) 595 { 596 struct lpfc_vport **vports; 597 int i; 598 599 if (phba->sli_rev <= LPFC_SLI_REV3) { 600 /* Disable interrupts */ 601 writel(0, phba->HCregaddr); 602 readl(phba->HCregaddr); /* flush */ 603 } 604 605 if (phba->pport->load_flag & FC_UNLOADING) 606 lpfc_cleanup_discovery_resources(phba->pport); 607 else { 608 vports = lpfc_create_vport_work_array(phba); 609 if (vports != NULL) 610 for (i = 0; i <= phba->max_vports && 611 vports[i] != NULL; i++) 612 lpfc_cleanup_discovery_resources(vports[i]); 613 lpfc_destroy_vport_work_array(phba, vports); 614 } 615 return 0; 616 } 617 618 /** 619 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 620 * @phba: pointer to lpfc HBA data structure. 621 * 622 * This routine will do uninitialization after the HBA is reset when bring 623 * down the SLI Layer. 624 * 625 * Return codes 626 * 0 - sucess. 627 * Any other value - error. 628 **/ 629 static int 630 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 631 { 632 struct lpfc_sli *psli = &phba->sli; 633 struct lpfc_sli_ring *pring; 634 struct lpfc_dmabuf *mp, *next_mp; 635 LIST_HEAD(completions); 636 int i; 637 638 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 639 lpfc_sli_hbqbuf_free_all(phba); 640 else { 641 /* Cleanup preposted buffers on the ELS ring */ 642 pring = &psli->ring[LPFC_ELS_RING]; 643 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 644 list_del(&mp->list); 645 pring->postbufq_cnt--; 646 lpfc_mbuf_free(phba, mp->virt, mp->phys); 647 kfree(mp); 648 } 649 } 650 651 spin_lock_irq(&phba->hbalock); 652 for (i = 0; i < psli->num_rings; i++) { 653 pring = &psli->ring[i]; 654 655 /* At this point in time the HBA is either reset or DOA. Either 656 * way, nothing should be on txcmplq as it will NEVER complete. 657 */ 658 list_splice_init(&pring->txcmplq, &completions); 659 pring->txcmplq_cnt = 0; 660 spin_unlock_irq(&phba->hbalock); 661 662 /* Cancel all the IOCBs from the completions list */ 663 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 664 IOERR_SLI_ABORTED); 665 666 lpfc_sli_abort_iocb_ring(phba, pring); 667 spin_lock_irq(&phba->hbalock); 668 } 669 spin_unlock_irq(&phba->hbalock); 670 671 return 0; 672 } 673 /** 674 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 675 * @phba: pointer to lpfc HBA data structure. 676 * 677 * This routine will do uninitialization after the HBA is reset when bring 678 * down the SLI Layer. 679 * 680 * Return codes 681 * 0 - sucess. 682 * Any other value - error. 683 **/ 684 static int 685 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 686 { 687 struct lpfc_scsi_buf *psb, *psb_next; 688 LIST_HEAD(aborts); 689 int ret; 690 unsigned long iflag = 0; 691 ret = lpfc_hba_down_post_s3(phba); 692 if (ret) 693 return ret; 694 /* At this point in time the HBA is either reset or DOA. Either 695 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 696 * on the lpfc_sgl_list so that it can either be freed if the 697 * driver is unloading or reposted if the driver is restarting 698 * the port. 699 */ 700 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ 701 /* scsl_buf_list */ 702 /* abts_sgl_list_lock required because worker thread uses this 703 * list. 704 */ 705 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 706 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 707 &phba->sli4_hba.lpfc_sgl_list); 708 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 709 /* abts_scsi_buf_list_lock required because worker thread uses this 710 * list. 711 */ 712 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 713 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, 714 &aborts); 715 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 716 spin_unlock_irq(&phba->hbalock); 717 718 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 719 psb->pCmd = NULL; 720 psb->status = IOSTAT_SUCCESS; 721 } 722 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 723 list_splice(&aborts, &phba->lpfc_scsi_buf_list); 724 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 725 return 0; 726 } 727 728 /** 729 * lpfc_hba_down_post - Wrapper func for hba down post routine 730 * @phba: pointer to lpfc HBA data structure. 731 * 732 * This routine wraps the actual SLI3 or SLI4 routine for performing 733 * uninitialization after the HBA is reset when bring down the SLI Layer. 734 * 735 * Return codes 736 * 0 - sucess. 737 * Any other value - error. 738 **/ 739 int 740 lpfc_hba_down_post(struct lpfc_hba *phba) 741 { 742 return (*phba->lpfc_hba_down_post)(phba); 743 } 744 745 /** 746 * lpfc_hb_timeout - The HBA-timer timeout handler 747 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 748 * 749 * This is the HBA-timer timeout handler registered to the lpfc driver. When 750 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 751 * work-port-events bitmap and the worker thread is notified. This timeout 752 * event will be used by the worker thread to invoke the actual timeout 753 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 754 * be performed in the timeout handler and the HBA timeout event bit shall 755 * be cleared by the worker thread after it has taken the event bitmap out. 756 **/ 757 static void 758 lpfc_hb_timeout(unsigned long ptr) 759 { 760 struct lpfc_hba *phba; 761 uint32_t tmo_posted; 762 unsigned long iflag; 763 764 phba = (struct lpfc_hba *)ptr; 765 766 /* Check for heart beat timeout conditions */ 767 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 768 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 769 if (!tmo_posted) 770 phba->pport->work_port_events |= WORKER_HB_TMO; 771 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 772 773 /* Tell the worker thread there is work to do */ 774 if (!tmo_posted) 775 lpfc_worker_wake_up(phba); 776 return; 777 } 778 779 /** 780 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 781 * @phba: pointer to lpfc hba data structure. 782 * @pmboxq: pointer to the driver internal queue element for mailbox command. 783 * 784 * This is the callback function to the lpfc heart-beat mailbox command. 785 * If configured, the lpfc driver issues the heart-beat mailbox command to 786 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 787 * heart-beat mailbox command is issued, the driver shall set up heart-beat 788 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 789 * heart-beat outstanding state. Once the mailbox command comes back and 790 * no error conditions detected, the heart-beat mailbox command timer is 791 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 792 * state is cleared for the next heart-beat. If the timer expired with the 793 * heart-beat outstanding state set, the driver will put the HBA offline. 794 **/ 795 static void 796 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 797 { 798 unsigned long drvr_flag; 799 800 spin_lock_irqsave(&phba->hbalock, drvr_flag); 801 phba->hb_outstanding = 0; 802 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 803 804 /* Check and reset heart-beat timer is necessary */ 805 mempool_free(pmboxq, phba->mbox_mem_pool); 806 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 807 !(phba->link_state == LPFC_HBA_ERROR) && 808 !(phba->pport->load_flag & FC_UNLOADING)) 809 mod_timer(&phba->hb_tmofunc, 810 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 811 return; 812 } 813 814 /** 815 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 816 * @phba: pointer to lpfc hba data structure. 817 * 818 * This is the actual HBA-timer timeout handler to be invoked by the worker 819 * thread whenever the HBA timer fired and HBA-timeout event posted. This 820 * handler performs any periodic operations needed for the device. If such 821 * periodic event has already been attended to either in the interrupt handler 822 * or by processing slow-ring or fast-ring events within the HBA-timer 823 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 824 * the timer for the next timeout period. If lpfc heart-beat mailbox command 825 * is configured and there is no heart-beat mailbox command outstanding, a 826 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 827 * has been a heart-beat mailbox command outstanding, the HBA shall be put 828 * to offline. 829 **/ 830 void 831 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 832 { 833 LPFC_MBOXQ_t *pmboxq; 834 struct lpfc_dmabuf *buf_ptr; 835 int retval; 836 struct lpfc_sli *psli = &phba->sli; 837 LIST_HEAD(completions); 838 839 if ((phba->link_state == LPFC_HBA_ERROR) || 840 (phba->pport->load_flag & FC_UNLOADING) || 841 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 842 return; 843 844 spin_lock_irq(&phba->pport->work_port_lock); 845 846 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, 847 jiffies)) { 848 spin_unlock_irq(&phba->pport->work_port_lock); 849 if (!phba->hb_outstanding) 850 mod_timer(&phba->hb_tmofunc, 851 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 852 else 853 mod_timer(&phba->hb_tmofunc, 854 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 855 return; 856 } 857 spin_unlock_irq(&phba->pport->work_port_lock); 858 859 if (phba->elsbuf_cnt && 860 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 861 spin_lock_irq(&phba->hbalock); 862 list_splice_init(&phba->elsbuf, &completions); 863 phba->elsbuf_cnt = 0; 864 phba->elsbuf_prev_cnt = 0; 865 spin_unlock_irq(&phba->hbalock); 866 867 while (!list_empty(&completions)) { 868 list_remove_head(&completions, buf_ptr, 869 struct lpfc_dmabuf, list); 870 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 871 kfree(buf_ptr); 872 } 873 } 874 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 875 876 /* If there is no heart beat outstanding, issue a heartbeat command */ 877 if (phba->cfg_enable_hba_heartbeat) { 878 if (!phba->hb_outstanding) { 879 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); 880 if (!pmboxq) { 881 mod_timer(&phba->hb_tmofunc, 882 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 883 return; 884 } 885 886 lpfc_heart_beat(phba, pmboxq); 887 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 888 pmboxq->vport = phba->pport; 889 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 890 891 if (retval != MBX_BUSY && retval != MBX_SUCCESS) { 892 mempool_free(pmboxq, phba->mbox_mem_pool); 893 mod_timer(&phba->hb_tmofunc, 894 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 895 return; 896 } 897 mod_timer(&phba->hb_tmofunc, 898 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 899 phba->hb_outstanding = 1; 900 return; 901 } else { 902 /* 903 * If heart beat timeout called with hb_outstanding set 904 * we need to take the HBA offline. 905 */ 906 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 907 "0459 Adapter heartbeat failure, " 908 "taking this port offline.\n"); 909 910 spin_lock_irq(&phba->hbalock); 911 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 912 spin_unlock_irq(&phba->hbalock); 913 914 lpfc_offline_prep(phba); 915 lpfc_offline(phba); 916 lpfc_unblock_mgmt_io(phba); 917 phba->link_state = LPFC_HBA_ERROR; 918 lpfc_hba_down_post(phba); 919 } 920 } 921 } 922 923 /** 924 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 925 * @phba: pointer to lpfc hba data structure. 926 * 927 * This routine is called to bring the HBA offline when HBA hardware error 928 * other than Port Error 6 has been detected. 929 **/ 930 static void 931 lpfc_offline_eratt(struct lpfc_hba *phba) 932 { 933 struct lpfc_sli *psli = &phba->sli; 934 935 spin_lock_irq(&phba->hbalock); 936 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 937 spin_unlock_irq(&phba->hbalock); 938 lpfc_offline_prep(phba); 939 940 lpfc_offline(phba); 941 lpfc_reset_barrier(phba); 942 spin_lock_irq(&phba->hbalock); 943 lpfc_sli_brdreset(phba); 944 spin_unlock_irq(&phba->hbalock); 945 lpfc_hba_down_post(phba); 946 lpfc_sli_brdready(phba, HS_MBRDY); 947 lpfc_unblock_mgmt_io(phba); 948 phba->link_state = LPFC_HBA_ERROR; 949 return; 950 } 951 952 /** 953 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 954 * @phba: pointer to lpfc hba data structure. 955 * 956 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 957 * other than Port Error 6 has been detected. 958 **/ 959 static void 960 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 961 { 962 lpfc_offline_prep(phba); 963 lpfc_offline(phba); 964 lpfc_sli4_brdreset(phba); 965 lpfc_hba_down_post(phba); 966 lpfc_sli4_post_status_check(phba); 967 lpfc_unblock_mgmt_io(phba); 968 phba->link_state = LPFC_HBA_ERROR; 969 } 970 971 /** 972 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 973 * @phba: pointer to lpfc hba data structure. 974 * 975 * This routine is invoked to handle the deferred HBA hardware error 976 * conditions. This type of error is indicated by HBA by setting ER1 977 * and another ER bit in the host status register. The driver will 978 * wait until the ER1 bit clears before handling the error condition. 979 **/ 980 static void 981 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 982 { 983 uint32_t old_host_status = phba->work_hs; 984 struct lpfc_sli_ring *pring; 985 struct lpfc_sli *psli = &phba->sli; 986 987 /* If the pci channel is offline, ignore possible errors, 988 * since we cannot communicate with the pci card anyway. 989 */ 990 if (pci_channel_offline(phba->pcidev)) { 991 spin_lock_irq(&phba->hbalock); 992 phba->hba_flag &= ~DEFER_ERATT; 993 spin_unlock_irq(&phba->hbalock); 994 return; 995 } 996 997 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 998 "0479 Deferred Adapter Hardware Error " 999 "Data: x%x x%x x%x\n", 1000 phba->work_hs, 1001 phba->work_status[0], phba->work_status[1]); 1002 1003 spin_lock_irq(&phba->hbalock); 1004 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1005 spin_unlock_irq(&phba->hbalock); 1006 1007 1008 /* 1009 * Firmware stops when it triggred erratt. That could cause the I/Os 1010 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1011 * SCSI layer retry it after re-establishing link. 1012 */ 1013 pring = &psli->ring[psli->fcp_ring]; 1014 lpfc_sli_abort_iocb_ring(phba, pring); 1015 1016 /* 1017 * There was a firmware error. Take the hba offline and then 1018 * attempt to restart it. 1019 */ 1020 lpfc_offline_prep(phba); 1021 lpfc_offline(phba); 1022 1023 /* Wait for the ER1 bit to clear.*/ 1024 while (phba->work_hs & HS_FFER1) { 1025 msleep(100); 1026 phba->work_hs = readl(phba->HSregaddr); 1027 /* If driver is unloading let the worker thread continue */ 1028 if (phba->pport->load_flag & FC_UNLOADING) { 1029 phba->work_hs = 0; 1030 break; 1031 } 1032 } 1033 1034 /* 1035 * This is to ptrotect against a race condition in which 1036 * first write to the host attention register clear the 1037 * host status register. 1038 */ 1039 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1040 phba->work_hs = old_host_status & ~HS_FFER1; 1041 1042 spin_lock_irq(&phba->hbalock); 1043 phba->hba_flag &= ~DEFER_ERATT; 1044 spin_unlock_irq(&phba->hbalock); 1045 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1046 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1047 } 1048 1049 static void 1050 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1051 { 1052 struct lpfc_board_event_header board_event; 1053 struct Scsi_Host *shost; 1054 1055 board_event.event_type = FC_REG_BOARD_EVENT; 1056 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1057 shost = lpfc_shost_from_vport(phba->pport); 1058 fc_host_post_vendor_event(shost, fc_get_event_number(), 1059 sizeof(board_event), 1060 (char *) &board_event, 1061 LPFC_NL_VENDOR_ID); 1062 } 1063 1064 /** 1065 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1066 * @phba: pointer to lpfc hba data structure. 1067 * 1068 * This routine is invoked to handle the following HBA hardware error 1069 * conditions: 1070 * 1 - HBA error attention interrupt 1071 * 2 - DMA ring index out of range 1072 * 3 - Mailbox command came back as unknown 1073 **/ 1074 static void 1075 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1076 { 1077 struct lpfc_vport *vport = phba->pport; 1078 struct lpfc_sli *psli = &phba->sli; 1079 struct lpfc_sli_ring *pring; 1080 uint32_t event_data; 1081 unsigned long temperature; 1082 struct temp_event temp_event_data; 1083 struct Scsi_Host *shost; 1084 1085 /* If the pci channel is offline, ignore possible errors, 1086 * since we cannot communicate with the pci card anyway. 1087 */ 1088 if (pci_channel_offline(phba->pcidev)) { 1089 spin_lock_irq(&phba->hbalock); 1090 phba->hba_flag &= ~DEFER_ERATT; 1091 spin_unlock_irq(&phba->hbalock); 1092 return; 1093 } 1094 1095 /* If resets are disabled then leave the HBA alone and return */ 1096 if (!phba->cfg_enable_hba_reset) 1097 return; 1098 1099 /* Send an internal error event to mgmt application */ 1100 lpfc_board_errevt_to_mgmt(phba); 1101 1102 if (phba->hba_flag & DEFER_ERATT) 1103 lpfc_handle_deferred_eratt(phba); 1104 1105 if (phba->work_hs & HS_FFER6) { 1106 /* Re-establishing Link */ 1107 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1108 "1301 Re-establishing Link " 1109 "Data: x%x x%x x%x\n", 1110 phba->work_hs, 1111 phba->work_status[0], phba->work_status[1]); 1112 1113 spin_lock_irq(&phba->hbalock); 1114 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1115 spin_unlock_irq(&phba->hbalock); 1116 1117 /* 1118 * Firmware stops when it triggled erratt with HS_FFER6. 1119 * That could cause the I/Os dropped by the firmware. 1120 * Error iocb (I/O) on txcmplq and let the SCSI layer 1121 * retry it after re-establishing link. 1122 */ 1123 pring = &psli->ring[psli->fcp_ring]; 1124 lpfc_sli_abort_iocb_ring(phba, pring); 1125 1126 /* 1127 * There was a firmware error. Take the hba offline and then 1128 * attempt to restart it. 1129 */ 1130 lpfc_offline_prep(phba); 1131 lpfc_offline(phba); 1132 lpfc_sli_brdrestart(phba); 1133 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1134 lpfc_unblock_mgmt_io(phba); 1135 return; 1136 } 1137 lpfc_unblock_mgmt_io(phba); 1138 } else if (phba->work_hs & HS_CRIT_TEMP) { 1139 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1140 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1141 temp_event_data.event_code = LPFC_CRIT_TEMP; 1142 temp_event_data.data = (uint32_t)temperature; 1143 1144 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1145 "0406 Adapter maximum temperature exceeded " 1146 "(%ld), taking this port offline " 1147 "Data: x%x x%x x%x\n", 1148 temperature, phba->work_hs, 1149 phba->work_status[0], phba->work_status[1]); 1150 1151 shost = lpfc_shost_from_vport(phba->pport); 1152 fc_host_post_vendor_event(shost, fc_get_event_number(), 1153 sizeof(temp_event_data), 1154 (char *) &temp_event_data, 1155 SCSI_NL_VID_TYPE_PCI 1156 | PCI_VENDOR_ID_EMULEX); 1157 1158 spin_lock_irq(&phba->hbalock); 1159 phba->over_temp_state = HBA_OVER_TEMP; 1160 spin_unlock_irq(&phba->hbalock); 1161 lpfc_offline_eratt(phba); 1162 1163 } else { 1164 /* The if clause above forces this code path when the status 1165 * failure is a value other than FFER6. Do not call the offline 1166 * twice. This is the adapter hardware error path. 1167 */ 1168 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1169 "0457 Adapter Hardware Error " 1170 "Data: x%x x%x x%x\n", 1171 phba->work_hs, 1172 phba->work_status[0], phba->work_status[1]); 1173 1174 event_data = FC_REG_DUMP_EVENT; 1175 shost = lpfc_shost_from_vport(vport); 1176 fc_host_post_vendor_event(shost, fc_get_event_number(), 1177 sizeof(event_data), (char *) &event_data, 1178 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1179 1180 lpfc_offline_eratt(phba); 1181 } 1182 return; 1183 } 1184 1185 /** 1186 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1187 * @phba: pointer to lpfc hba data structure. 1188 * 1189 * This routine is invoked to handle the SLI4 HBA hardware error attention 1190 * conditions. 1191 **/ 1192 static void 1193 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1194 { 1195 struct lpfc_vport *vport = phba->pport; 1196 uint32_t event_data; 1197 struct Scsi_Host *shost; 1198 1199 /* If the pci channel is offline, ignore possible errors, since 1200 * we cannot communicate with the pci card anyway. 1201 */ 1202 if (pci_channel_offline(phba->pcidev)) 1203 return; 1204 /* If resets are disabled then leave the HBA alone and return */ 1205 if (!phba->cfg_enable_hba_reset) 1206 return; 1207 1208 /* Send an internal error event to mgmt application */ 1209 lpfc_board_errevt_to_mgmt(phba); 1210 1211 /* For now, the actual action for SLI4 device handling is not 1212 * specified yet, just treated it as adaptor hardware failure 1213 */ 1214 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1215 "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n", 1216 phba->work_status[0], phba->work_status[1]); 1217 1218 event_data = FC_REG_DUMP_EVENT; 1219 shost = lpfc_shost_from_vport(vport); 1220 fc_host_post_vendor_event(shost, fc_get_event_number(), 1221 sizeof(event_data), (char *) &event_data, 1222 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1223 1224 lpfc_sli4_offline_eratt(phba); 1225 } 1226 1227 /** 1228 * lpfc_handle_eratt - Wrapper func for handling hba error attention 1229 * @phba: pointer to lpfc HBA data structure. 1230 * 1231 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 1232 * routine from the API jump table function pointer from the lpfc_hba struct. 1233 * 1234 * Return codes 1235 * 0 - sucess. 1236 * Any other value - error. 1237 **/ 1238 void 1239 lpfc_handle_eratt(struct lpfc_hba *phba) 1240 { 1241 (*phba->lpfc_handle_eratt)(phba); 1242 } 1243 1244 /** 1245 * lpfc_handle_latt - The HBA link event handler 1246 * @phba: pointer to lpfc hba data structure. 1247 * 1248 * This routine is invoked from the worker thread to handle a HBA host 1249 * attention link event. 1250 **/ 1251 void 1252 lpfc_handle_latt(struct lpfc_hba *phba) 1253 { 1254 struct lpfc_vport *vport = phba->pport; 1255 struct lpfc_sli *psli = &phba->sli; 1256 LPFC_MBOXQ_t *pmb; 1257 volatile uint32_t control; 1258 struct lpfc_dmabuf *mp; 1259 int rc = 0; 1260 1261 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1262 if (!pmb) { 1263 rc = 1; 1264 goto lpfc_handle_latt_err_exit; 1265 } 1266 1267 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1268 if (!mp) { 1269 rc = 2; 1270 goto lpfc_handle_latt_free_pmb; 1271 } 1272 1273 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 1274 if (!mp->virt) { 1275 rc = 3; 1276 goto lpfc_handle_latt_free_mp; 1277 } 1278 1279 /* Cleanup any outstanding ELS commands */ 1280 lpfc_els_flush_all_cmd(phba); 1281 1282 psli->slistat.link_event++; 1283 lpfc_read_la(phba, pmb, mp); 1284 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; 1285 pmb->vport = vport; 1286 /* Block ELS IOCBs until we have processed this mbox command */ 1287 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 1288 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 1289 if (rc == MBX_NOT_FINISHED) { 1290 rc = 4; 1291 goto lpfc_handle_latt_free_mbuf; 1292 } 1293 1294 /* Clear Link Attention in HA REG */ 1295 spin_lock_irq(&phba->hbalock); 1296 writel(HA_LATT, phba->HAregaddr); 1297 readl(phba->HAregaddr); /* flush */ 1298 spin_unlock_irq(&phba->hbalock); 1299 1300 return; 1301 1302 lpfc_handle_latt_free_mbuf: 1303 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1304 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1305 lpfc_handle_latt_free_mp: 1306 kfree(mp); 1307 lpfc_handle_latt_free_pmb: 1308 mempool_free(pmb, phba->mbox_mem_pool); 1309 lpfc_handle_latt_err_exit: 1310 /* Enable Link attention interrupts */ 1311 spin_lock_irq(&phba->hbalock); 1312 psli->sli_flag |= LPFC_PROCESS_LA; 1313 control = readl(phba->HCregaddr); 1314 control |= HC_LAINT_ENA; 1315 writel(control, phba->HCregaddr); 1316 readl(phba->HCregaddr); /* flush */ 1317 1318 /* Clear Link Attention in HA REG */ 1319 writel(HA_LATT, phba->HAregaddr); 1320 readl(phba->HAregaddr); /* flush */ 1321 spin_unlock_irq(&phba->hbalock); 1322 lpfc_linkdown(phba); 1323 phba->link_state = LPFC_HBA_ERROR; 1324 1325 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1326 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 1327 1328 return; 1329 } 1330 1331 /** 1332 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 1333 * @phba: pointer to lpfc hba data structure. 1334 * @vpd: pointer to the vital product data. 1335 * @len: length of the vital product data in bytes. 1336 * 1337 * This routine parses the Vital Product Data (VPD). The VPD is treated as 1338 * an array of characters. In this routine, the ModelName, ProgramType, and 1339 * ModelDesc, etc. fields of the phba data structure will be populated. 1340 * 1341 * Return codes 1342 * 0 - pointer to the VPD passed in is NULL 1343 * 1 - success 1344 **/ 1345 int 1346 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1347 { 1348 uint8_t lenlo, lenhi; 1349 int Length; 1350 int i, j; 1351 int finished = 0; 1352 int index = 0; 1353 1354 if (!vpd) 1355 return 0; 1356 1357 /* Vital Product */ 1358 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1359 "0455 Vital Product Data: x%x x%x x%x x%x\n", 1360 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 1361 (uint32_t) vpd[3]); 1362 while (!finished && (index < (len - 4))) { 1363 switch (vpd[index]) { 1364 case 0x82: 1365 case 0x91: 1366 index += 1; 1367 lenlo = vpd[index]; 1368 index += 1; 1369 lenhi = vpd[index]; 1370 index += 1; 1371 i = ((((unsigned short)lenhi) << 8) + lenlo); 1372 index += i; 1373 break; 1374 case 0x90: 1375 index += 1; 1376 lenlo = vpd[index]; 1377 index += 1; 1378 lenhi = vpd[index]; 1379 index += 1; 1380 Length = ((((unsigned short)lenhi) << 8) + lenlo); 1381 if (Length > len - index) 1382 Length = len - index; 1383 while (Length > 0) { 1384 /* Look for Serial Number */ 1385 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 1386 index += 2; 1387 i = vpd[index]; 1388 index += 1; 1389 j = 0; 1390 Length -= (3+i); 1391 while(i--) { 1392 phba->SerialNumber[j++] = vpd[index++]; 1393 if (j == 31) 1394 break; 1395 } 1396 phba->SerialNumber[j] = 0; 1397 continue; 1398 } 1399 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 1400 phba->vpd_flag |= VPD_MODEL_DESC; 1401 index += 2; 1402 i = vpd[index]; 1403 index += 1; 1404 j = 0; 1405 Length -= (3+i); 1406 while(i--) { 1407 phba->ModelDesc[j++] = vpd[index++]; 1408 if (j == 255) 1409 break; 1410 } 1411 phba->ModelDesc[j] = 0; 1412 continue; 1413 } 1414 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 1415 phba->vpd_flag |= VPD_MODEL_NAME; 1416 index += 2; 1417 i = vpd[index]; 1418 index += 1; 1419 j = 0; 1420 Length -= (3+i); 1421 while(i--) { 1422 phba->ModelName[j++] = vpd[index++]; 1423 if (j == 79) 1424 break; 1425 } 1426 phba->ModelName[j] = 0; 1427 continue; 1428 } 1429 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 1430 phba->vpd_flag |= VPD_PROGRAM_TYPE; 1431 index += 2; 1432 i = vpd[index]; 1433 index += 1; 1434 j = 0; 1435 Length -= (3+i); 1436 while(i--) { 1437 phba->ProgramType[j++] = vpd[index++]; 1438 if (j == 255) 1439 break; 1440 } 1441 phba->ProgramType[j] = 0; 1442 continue; 1443 } 1444 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 1445 phba->vpd_flag |= VPD_PORT; 1446 index += 2; 1447 i = vpd[index]; 1448 index += 1; 1449 j = 0; 1450 Length -= (3+i); 1451 while(i--) { 1452 phba->Port[j++] = vpd[index++]; 1453 if (j == 19) 1454 break; 1455 } 1456 phba->Port[j] = 0; 1457 continue; 1458 } 1459 else { 1460 index += 2; 1461 i = vpd[index]; 1462 index += 1; 1463 index += i; 1464 Length -= (3 + i); 1465 } 1466 } 1467 finished = 0; 1468 break; 1469 case 0x78: 1470 finished = 1; 1471 break; 1472 default: 1473 index ++; 1474 break; 1475 } 1476 } 1477 1478 return(1); 1479 } 1480 1481 /** 1482 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 1483 * @phba: pointer to lpfc hba data structure. 1484 * @mdp: pointer to the data structure to hold the derived model name. 1485 * @descp: pointer to the data structure to hold the derived description. 1486 * 1487 * This routine retrieves HBA's description based on its registered PCI device 1488 * ID. The @descp passed into this function points to an array of 256 chars. It 1489 * shall be returned with the model name, maximum speed, and the host bus type. 1490 * The @mdp passed into this function points to an array of 80 chars. When the 1491 * function returns, the @mdp will be filled with the model name. 1492 **/ 1493 static void 1494 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 1495 { 1496 lpfc_vpd_t *vp; 1497 uint16_t dev_id = phba->pcidev->device; 1498 int max_speed; 1499 int GE = 0; 1500 int oneConnect = 0; /* default is not a oneConnect */ 1501 struct { 1502 char * name; 1503 int max_speed; 1504 char * bus; 1505 } m = {"<Unknown>", 0, ""}; 1506 1507 if (mdp && mdp[0] != '\0' 1508 && descp && descp[0] != '\0') 1509 return; 1510 1511 if (phba->lmt & LMT_10Gb) 1512 max_speed = 10; 1513 else if (phba->lmt & LMT_8Gb) 1514 max_speed = 8; 1515 else if (phba->lmt & LMT_4Gb) 1516 max_speed = 4; 1517 else if (phba->lmt & LMT_2Gb) 1518 max_speed = 2; 1519 else 1520 max_speed = 1; 1521 1522 vp = &phba->vpd; 1523 1524 switch (dev_id) { 1525 case PCI_DEVICE_ID_FIREFLY: 1526 m = (typeof(m)){"LP6000", max_speed, "PCI"}; 1527 break; 1528 case PCI_DEVICE_ID_SUPERFLY: 1529 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 1530 m = (typeof(m)){"LP7000", max_speed, "PCI"}; 1531 else 1532 m = (typeof(m)){"LP7000E", max_speed, "PCI"}; 1533 break; 1534 case PCI_DEVICE_ID_DRAGONFLY: 1535 m = (typeof(m)){"LP8000", max_speed, "PCI"}; 1536 break; 1537 case PCI_DEVICE_ID_CENTAUR: 1538 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 1539 m = (typeof(m)){"LP9002", max_speed, "PCI"}; 1540 else 1541 m = (typeof(m)){"LP9000", max_speed, "PCI"}; 1542 break; 1543 case PCI_DEVICE_ID_RFLY: 1544 m = (typeof(m)){"LP952", max_speed, "PCI"}; 1545 break; 1546 case PCI_DEVICE_ID_PEGASUS: 1547 m = (typeof(m)){"LP9802", max_speed, "PCI-X"}; 1548 break; 1549 case PCI_DEVICE_ID_THOR: 1550 m = (typeof(m)){"LP10000", max_speed, "PCI-X"}; 1551 break; 1552 case PCI_DEVICE_ID_VIPER: 1553 m = (typeof(m)){"LPX1000", max_speed, "PCI-X"}; 1554 break; 1555 case PCI_DEVICE_ID_PFLY: 1556 m = (typeof(m)){"LP982", max_speed, "PCI-X"}; 1557 break; 1558 case PCI_DEVICE_ID_TFLY: 1559 m = (typeof(m)){"LP1050", max_speed, "PCI-X"}; 1560 break; 1561 case PCI_DEVICE_ID_HELIOS: 1562 m = (typeof(m)){"LP11000", max_speed, "PCI-X2"}; 1563 break; 1564 case PCI_DEVICE_ID_HELIOS_SCSP: 1565 m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"}; 1566 break; 1567 case PCI_DEVICE_ID_HELIOS_DCSP: 1568 m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"}; 1569 break; 1570 case PCI_DEVICE_ID_NEPTUNE: 1571 m = (typeof(m)){"LPe1000", max_speed, "PCIe"}; 1572 break; 1573 case PCI_DEVICE_ID_NEPTUNE_SCSP: 1574 m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"}; 1575 break; 1576 case PCI_DEVICE_ID_NEPTUNE_DCSP: 1577 m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"}; 1578 break; 1579 case PCI_DEVICE_ID_BMID: 1580 m = (typeof(m)){"LP1150", max_speed, "PCI-X2"}; 1581 break; 1582 case PCI_DEVICE_ID_BSMB: 1583 m = (typeof(m)){"LP111", max_speed, "PCI-X2"}; 1584 break; 1585 case PCI_DEVICE_ID_ZEPHYR: 1586 m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; 1587 break; 1588 case PCI_DEVICE_ID_ZEPHYR_SCSP: 1589 m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; 1590 break; 1591 case PCI_DEVICE_ID_ZEPHYR_DCSP: 1592 m = (typeof(m)){"LP2105", max_speed, "PCIe"}; 1593 GE = 1; 1594 break; 1595 case PCI_DEVICE_ID_ZMID: 1596 m = (typeof(m)){"LPe1150", max_speed, "PCIe"}; 1597 break; 1598 case PCI_DEVICE_ID_ZSMB: 1599 m = (typeof(m)){"LPe111", max_speed, "PCIe"}; 1600 break; 1601 case PCI_DEVICE_ID_LP101: 1602 m = (typeof(m)){"LP101", max_speed, "PCI-X"}; 1603 break; 1604 case PCI_DEVICE_ID_LP10000S: 1605 m = (typeof(m)){"LP10000-S", max_speed, "PCI"}; 1606 break; 1607 case PCI_DEVICE_ID_LP11000S: 1608 m = (typeof(m)){"LP11000-S", max_speed, 1609 "PCI-X2"}; 1610 break; 1611 case PCI_DEVICE_ID_LPE11000S: 1612 m = (typeof(m)){"LPe11000-S", max_speed, 1613 "PCIe"}; 1614 break; 1615 case PCI_DEVICE_ID_SAT: 1616 m = (typeof(m)){"LPe12000", max_speed, "PCIe"}; 1617 break; 1618 case PCI_DEVICE_ID_SAT_MID: 1619 m = (typeof(m)){"LPe1250", max_speed, "PCIe"}; 1620 break; 1621 case PCI_DEVICE_ID_SAT_SMB: 1622 m = (typeof(m)){"LPe121", max_speed, "PCIe"}; 1623 break; 1624 case PCI_DEVICE_ID_SAT_DCSP: 1625 m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"}; 1626 break; 1627 case PCI_DEVICE_ID_SAT_SCSP: 1628 m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"}; 1629 break; 1630 case PCI_DEVICE_ID_SAT_S: 1631 m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"}; 1632 break; 1633 case PCI_DEVICE_ID_HORNET: 1634 m = (typeof(m)){"LP21000", max_speed, "PCIe"}; 1635 GE = 1; 1636 break; 1637 case PCI_DEVICE_ID_PROTEUS_VF: 1638 m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"}; 1639 break; 1640 case PCI_DEVICE_ID_PROTEUS_PF: 1641 m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"}; 1642 break; 1643 case PCI_DEVICE_ID_PROTEUS_S: 1644 m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"}; 1645 break; 1646 case PCI_DEVICE_ID_TIGERSHARK: 1647 oneConnect = 1; 1648 m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"}; 1649 break; 1650 default: 1651 m = (typeof(m)){ NULL }; 1652 break; 1653 } 1654 1655 if (mdp && mdp[0] == '\0') 1656 snprintf(mdp, 79,"%s", m.name); 1657 /* oneConnect hba requires special processing, they are all initiators 1658 * and we put the port number on the end 1659 */ 1660 if (descp && descp[0] == '\0') { 1661 if (oneConnect) 1662 snprintf(descp, 255, 1663 "Emulex OneConnect %s, FCoE Initiator, Port %s", 1664 m.name, 1665 phba->Port); 1666 else 1667 snprintf(descp, 255, 1668 "Emulex %s %d%s %s %s", 1669 m.name, m.max_speed, 1670 (GE) ? "GE" : "Gb", 1671 m.bus, 1672 (GE) ? "FCoE Adapter" : 1673 "Fibre Channel Adapter"); 1674 } 1675 } 1676 1677 /** 1678 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 1679 * @phba: pointer to lpfc hba data structure. 1680 * @pring: pointer to a IOCB ring. 1681 * @cnt: the number of IOCBs to be posted to the IOCB ring. 1682 * 1683 * This routine posts a given number of IOCBs with the associated DMA buffer 1684 * descriptors specified by the cnt argument to the given IOCB ring. 1685 * 1686 * Return codes 1687 * The number of IOCBs NOT able to be posted to the IOCB ring. 1688 **/ 1689 int 1690 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 1691 { 1692 IOCB_t *icmd; 1693 struct lpfc_iocbq *iocb; 1694 struct lpfc_dmabuf *mp1, *mp2; 1695 1696 cnt += pring->missbufcnt; 1697 1698 /* While there are buffers to post */ 1699 while (cnt > 0) { 1700 /* Allocate buffer for command iocb */ 1701 iocb = lpfc_sli_get_iocbq(phba); 1702 if (iocb == NULL) { 1703 pring->missbufcnt = cnt; 1704 return cnt; 1705 } 1706 icmd = &iocb->iocb; 1707 1708 /* 2 buffers can be posted per command */ 1709 /* Allocate buffer to post */ 1710 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1711 if (mp1) 1712 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 1713 if (!mp1 || !mp1->virt) { 1714 kfree(mp1); 1715 lpfc_sli_release_iocbq(phba, iocb); 1716 pring->missbufcnt = cnt; 1717 return cnt; 1718 } 1719 1720 INIT_LIST_HEAD(&mp1->list); 1721 /* Allocate buffer to post */ 1722 if (cnt > 1) { 1723 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1724 if (mp2) 1725 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 1726 &mp2->phys); 1727 if (!mp2 || !mp2->virt) { 1728 kfree(mp2); 1729 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1730 kfree(mp1); 1731 lpfc_sli_release_iocbq(phba, iocb); 1732 pring->missbufcnt = cnt; 1733 return cnt; 1734 } 1735 1736 INIT_LIST_HEAD(&mp2->list); 1737 } else { 1738 mp2 = NULL; 1739 } 1740 1741 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 1742 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 1743 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 1744 icmd->ulpBdeCount = 1; 1745 cnt--; 1746 if (mp2) { 1747 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 1748 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 1749 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 1750 cnt--; 1751 icmd->ulpBdeCount = 2; 1752 } 1753 1754 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 1755 icmd->ulpLe = 1; 1756 1757 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 1758 IOCB_ERROR) { 1759 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1760 kfree(mp1); 1761 cnt++; 1762 if (mp2) { 1763 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 1764 kfree(mp2); 1765 cnt++; 1766 } 1767 lpfc_sli_release_iocbq(phba, iocb); 1768 pring->missbufcnt = cnt; 1769 return cnt; 1770 } 1771 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 1772 if (mp2) 1773 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 1774 } 1775 pring->missbufcnt = 0; 1776 return 0; 1777 } 1778 1779 /** 1780 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 1781 * @phba: pointer to lpfc hba data structure. 1782 * 1783 * This routine posts initial receive IOCB buffers to the ELS ring. The 1784 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 1785 * set to 64 IOCBs. 1786 * 1787 * Return codes 1788 * 0 - success (currently always success) 1789 **/ 1790 static int 1791 lpfc_post_rcv_buf(struct lpfc_hba *phba) 1792 { 1793 struct lpfc_sli *psli = &phba->sli; 1794 1795 /* Ring 0, ELS / CT buffers */ 1796 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); 1797 /* Ring 2 - FCP no buffers needed */ 1798 1799 return 0; 1800 } 1801 1802 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 1803 1804 /** 1805 * lpfc_sha_init - Set up initial array of hash table entries 1806 * @HashResultPointer: pointer to an array as hash table. 1807 * 1808 * This routine sets up the initial values to the array of hash table entries 1809 * for the LC HBAs. 1810 **/ 1811 static void 1812 lpfc_sha_init(uint32_t * HashResultPointer) 1813 { 1814 HashResultPointer[0] = 0x67452301; 1815 HashResultPointer[1] = 0xEFCDAB89; 1816 HashResultPointer[2] = 0x98BADCFE; 1817 HashResultPointer[3] = 0x10325476; 1818 HashResultPointer[4] = 0xC3D2E1F0; 1819 } 1820 1821 /** 1822 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 1823 * @HashResultPointer: pointer to an initial/result hash table. 1824 * @HashWorkingPointer: pointer to an working hash table. 1825 * 1826 * This routine iterates an initial hash table pointed by @HashResultPointer 1827 * with the values from the working hash table pointeed by @HashWorkingPointer. 1828 * The results are putting back to the initial hash table, returned through 1829 * the @HashResultPointer as the result hash table. 1830 **/ 1831 static void 1832 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 1833 { 1834 int t; 1835 uint32_t TEMP; 1836 uint32_t A, B, C, D, E; 1837 t = 16; 1838 do { 1839 HashWorkingPointer[t] = 1840 S(1, 1841 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 1842 8] ^ 1843 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 1844 } while (++t <= 79); 1845 t = 0; 1846 A = HashResultPointer[0]; 1847 B = HashResultPointer[1]; 1848 C = HashResultPointer[2]; 1849 D = HashResultPointer[3]; 1850 E = HashResultPointer[4]; 1851 1852 do { 1853 if (t < 20) { 1854 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 1855 } else if (t < 40) { 1856 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 1857 } else if (t < 60) { 1858 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 1859 } else { 1860 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 1861 } 1862 TEMP += S(5, A) + E + HashWorkingPointer[t]; 1863 E = D; 1864 D = C; 1865 C = S(30, B); 1866 B = A; 1867 A = TEMP; 1868 } while (++t <= 79); 1869 1870 HashResultPointer[0] += A; 1871 HashResultPointer[1] += B; 1872 HashResultPointer[2] += C; 1873 HashResultPointer[3] += D; 1874 HashResultPointer[4] += E; 1875 1876 } 1877 1878 /** 1879 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 1880 * @RandomChallenge: pointer to the entry of host challenge random number array. 1881 * @HashWorking: pointer to the entry of the working hash array. 1882 * 1883 * This routine calculates the working hash array referred by @HashWorking 1884 * from the challenge random numbers associated with the host, referred by 1885 * @RandomChallenge. The result is put into the entry of the working hash 1886 * array and returned by reference through @HashWorking. 1887 **/ 1888 static void 1889 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 1890 { 1891 *HashWorking = (*RandomChallenge ^ *HashWorking); 1892 } 1893 1894 /** 1895 * lpfc_hba_init - Perform special handling for LC HBA initialization 1896 * @phba: pointer to lpfc hba data structure. 1897 * @hbainit: pointer to an array of unsigned 32-bit integers. 1898 * 1899 * This routine performs the special handling for LC HBA initialization. 1900 **/ 1901 void 1902 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 1903 { 1904 int t; 1905 uint32_t *HashWorking; 1906 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 1907 1908 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 1909 if (!HashWorking) 1910 return; 1911 1912 HashWorking[0] = HashWorking[78] = *pwwnn++; 1913 HashWorking[1] = HashWorking[79] = *pwwnn; 1914 1915 for (t = 0; t < 7; t++) 1916 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 1917 1918 lpfc_sha_init(hbainit); 1919 lpfc_sha_iterate(hbainit, HashWorking); 1920 kfree(HashWorking); 1921 } 1922 1923 /** 1924 * lpfc_cleanup - Performs vport cleanups before deleting a vport 1925 * @vport: pointer to a virtual N_Port data structure. 1926 * 1927 * This routine performs the necessary cleanups before deleting the @vport. 1928 * It invokes the discovery state machine to perform necessary state 1929 * transitions and to release the ndlps associated with the @vport. Note, 1930 * the physical port is treated as @vport 0. 1931 **/ 1932 void 1933 lpfc_cleanup(struct lpfc_vport *vport) 1934 { 1935 struct lpfc_hba *phba = vport->phba; 1936 struct lpfc_nodelist *ndlp, *next_ndlp; 1937 int i = 0; 1938 1939 if (phba->link_state > LPFC_LINK_DOWN) 1940 lpfc_port_link_failure(vport); 1941 1942 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 1943 if (!NLP_CHK_NODE_ACT(ndlp)) { 1944 ndlp = lpfc_enable_node(vport, ndlp, 1945 NLP_STE_UNUSED_NODE); 1946 if (!ndlp) 1947 continue; 1948 spin_lock_irq(&phba->ndlp_lock); 1949 NLP_SET_FREE_REQ(ndlp); 1950 spin_unlock_irq(&phba->ndlp_lock); 1951 /* Trigger the release of the ndlp memory */ 1952 lpfc_nlp_put(ndlp); 1953 continue; 1954 } 1955 spin_lock_irq(&phba->ndlp_lock); 1956 if (NLP_CHK_FREE_REQ(ndlp)) { 1957 /* The ndlp should not be in memory free mode already */ 1958 spin_unlock_irq(&phba->ndlp_lock); 1959 continue; 1960 } else 1961 /* Indicate request for freeing ndlp memory */ 1962 NLP_SET_FREE_REQ(ndlp); 1963 spin_unlock_irq(&phba->ndlp_lock); 1964 1965 if (vport->port_type != LPFC_PHYSICAL_PORT && 1966 ndlp->nlp_DID == Fabric_DID) { 1967 /* Just free up ndlp with Fabric_DID for vports */ 1968 lpfc_nlp_put(ndlp); 1969 continue; 1970 } 1971 1972 if (ndlp->nlp_type & NLP_FABRIC) 1973 lpfc_disc_state_machine(vport, ndlp, NULL, 1974 NLP_EVT_DEVICE_RECOVERY); 1975 1976 lpfc_disc_state_machine(vport, ndlp, NULL, 1977 NLP_EVT_DEVICE_RM); 1978 1979 } 1980 1981 /* At this point, ALL ndlp's should be gone 1982 * because of the previous NLP_EVT_DEVICE_RM. 1983 * Lets wait for this to happen, if needed. 1984 */ 1985 while (!list_empty(&vport->fc_nodes)) { 1986 if (i++ > 3000) { 1987 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1988 "0233 Nodelist not empty\n"); 1989 list_for_each_entry_safe(ndlp, next_ndlp, 1990 &vport->fc_nodes, nlp_listp) { 1991 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 1992 LOG_NODE, 1993 "0282 did:x%x ndlp:x%p " 1994 "usgmap:x%x refcnt:%d\n", 1995 ndlp->nlp_DID, (void *)ndlp, 1996 ndlp->nlp_usg_map, 1997 atomic_read( 1998 &ndlp->kref.refcount)); 1999 } 2000 break; 2001 } 2002 2003 /* Wait for any activity on ndlps to settle */ 2004 msleep(10); 2005 } 2006 } 2007 2008 /** 2009 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2010 * @vport: pointer to a virtual N_Port data structure. 2011 * 2012 * This routine stops all the timers associated with a @vport. This function 2013 * is invoked before disabling or deleting a @vport. Note that the physical 2014 * port is treated as @vport 0. 2015 **/ 2016 void 2017 lpfc_stop_vport_timers(struct lpfc_vport *vport) 2018 { 2019 del_timer_sync(&vport->els_tmofunc); 2020 del_timer_sync(&vport->fc_fdmitmo); 2021 lpfc_can_disctmo(vport); 2022 return; 2023 } 2024 2025 /** 2026 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2027 * @phba: pointer to lpfc hba data structure. 2028 * 2029 * This routine stops all the timers associated with a HBA. This function is 2030 * invoked before either putting a HBA offline or unloading the driver. 2031 **/ 2032 void 2033 lpfc_stop_hba_timers(struct lpfc_hba *phba) 2034 { 2035 lpfc_stop_vport_timers(phba->pport); 2036 del_timer_sync(&phba->sli.mbox_tmo); 2037 del_timer_sync(&phba->fabric_block_timer); 2038 del_timer_sync(&phba->eratt_poll); 2039 del_timer_sync(&phba->hb_tmofunc); 2040 phba->hb_outstanding = 0; 2041 2042 switch (phba->pci_dev_grp) { 2043 case LPFC_PCI_DEV_LP: 2044 /* Stop any LightPulse device specific driver timers */ 2045 del_timer_sync(&phba->fcp_poll_timer); 2046 break; 2047 case LPFC_PCI_DEV_OC: 2048 /* Stop any OneConnect device sepcific driver timers */ 2049 break; 2050 default: 2051 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2052 "0297 Invalid device group (x%x)\n", 2053 phba->pci_dev_grp); 2054 break; 2055 } 2056 return; 2057 } 2058 2059 /** 2060 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2061 * @phba: pointer to lpfc hba data structure. 2062 * 2063 * This routine marks a HBA's management interface as blocked. Once the HBA's 2064 * management interface is marked as blocked, all the user space access to 2065 * the HBA, whether they are from sysfs interface or libdfc interface will 2066 * all be blocked. The HBA is set to block the management interface when the 2067 * driver prepares the HBA interface for online or offline. 2068 **/ 2069 static void 2070 lpfc_block_mgmt_io(struct lpfc_hba * phba) 2071 { 2072 unsigned long iflag; 2073 2074 spin_lock_irqsave(&phba->hbalock, iflag); 2075 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2076 spin_unlock_irqrestore(&phba->hbalock, iflag); 2077 } 2078 2079 /** 2080 * lpfc_online - Initialize and bring a HBA online 2081 * @phba: pointer to lpfc hba data structure. 2082 * 2083 * This routine initializes the HBA and brings a HBA online. During this 2084 * process, the management interface is blocked to prevent user space access 2085 * to the HBA interfering with the driver initialization. 2086 * 2087 * Return codes 2088 * 0 - successful 2089 * 1 - failed 2090 **/ 2091 int 2092 lpfc_online(struct lpfc_hba *phba) 2093 { 2094 struct lpfc_vport *vport; 2095 struct lpfc_vport **vports; 2096 int i; 2097 2098 if (!phba) 2099 return 0; 2100 vport = phba->pport; 2101 2102 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 2103 return 0; 2104 2105 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2106 "0458 Bring Adapter online\n"); 2107 2108 lpfc_block_mgmt_io(phba); 2109 2110 if (!lpfc_sli_queue_setup(phba)) { 2111 lpfc_unblock_mgmt_io(phba); 2112 return 1; 2113 } 2114 2115 if (phba->sli_rev == LPFC_SLI_REV4) { 2116 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 2117 lpfc_unblock_mgmt_io(phba); 2118 return 1; 2119 } 2120 } else { 2121 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 2122 lpfc_unblock_mgmt_io(phba); 2123 return 1; 2124 } 2125 } 2126 2127 vports = lpfc_create_vport_work_array(phba); 2128 if (vports != NULL) 2129 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2130 struct Scsi_Host *shost; 2131 shost = lpfc_shost_from_vport(vports[i]); 2132 spin_lock_irq(shost->host_lock); 2133 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 2134 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 2135 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2136 spin_unlock_irq(shost->host_lock); 2137 } 2138 lpfc_destroy_vport_work_array(phba, vports); 2139 2140 lpfc_unblock_mgmt_io(phba); 2141 return 0; 2142 } 2143 2144 /** 2145 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 2146 * @phba: pointer to lpfc hba data structure. 2147 * 2148 * This routine marks a HBA's management interface as not blocked. Once the 2149 * HBA's management interface is marked as not blocked, all the user space 2150 * access to the HBA, whether they are from sysfs interface or libdfc 2151 * interface will be allowed. The HBA is set to block the management interface 2152 * when the driver prepares the HBA interface for online or offline and then 2153 * set to unblock the management interface afterwards. 2154 **/ 2155 void 2156 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 2157 { 2158 unsigned long iflag; 2159 2160 spin_lock_irqsave(&phba->hbalock, iflag); 2161 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 2162 spin_unlock_irqrestore(&phba->hbalock, iflag); 2163 } 2164 2165 /** 2166 * lpfc_offline_prep - Prepare a HBA to be brought offline 2167 * @phba: pointer to lpfc hba data structure. 2168 * 2169 * This routine is invoked to prepare a HBA to be brought offline. It performs 2170 * unregistration login to all the nodes on all vports and flushes the mailbox 2171 * queue to make it ready to be brought offline. 2172 **/ 2173 void 2174 lpfc_offline_prep(struct lpfc_hba * phba) 2175 { 2176 struct lpfc_vport *vport = phba->pport; 2177 struct lpfc_nodelist *ndlp, *next_ndlp; 2178 struct lpfc_vport **vports; 2179 int i; 2180 2181 if (vport->fc_flag & FC_OFFLINE_MODE) 2182 return; 2183 2184 lpfc_block_mgmt_io(phba); 2185 2186 lpfc_linkdown(phba); 2187 2188 /* Issue an unreg_login to all nodes on all vports */ 2189 vports = lpfc_create_vport_work_array(phba); 2190 if (vports != NULL) { 2191 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2192 struct Scsi_Host *shost; 2193 2194 if (vports[i]->load_flag & FC_UNLOADING) 2195 continue; 2196 vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED; 2197 shost = lpfc_shost_from_vport(vports[i]); 2198 list_for_each_entry_safe(ndlp, next_ndlp, 2199 &vports[i]->fc_nodes, 2200 nlp_listp) { 2201 if (!NLP_CHK_NODE_ACT(ndlp)) 2202 continue; 2203 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 2204 continue; 2205 if (ndlp->nlp_type & NLP_FABRIC) { 2206 lpfc_disc_state_machine(vports[i], ndlp, 2207 NULL, NLP_EVT_DEVICE_RECOVERY); 2208 lpfc_disc_state_machine(vports[i], ndlp, 2209 NULL, NLP_EVT_DEVICE_RM); 2210 } 2211 spin_lock_irq(shost->host_lock); 2212 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2213 spin_unlock_irq(shost->host_lock); 2214 lpfc_unreg_rpi(vports[i], ndlp); 2215 } 2216 } 2217 } 2218 lpfc_destroy_vport_work_array(phba, vports); 2219 2220 lpfc_sli_mbox_sys_shutdown(phba); 2221 } 2222 2223 /** 2224 * lpfc_offline - Bring a HBA offline 2225 * @phba: pointer to lpfc hba data structure. 2226 * 2227 * This routine actually brings a HBA offline. It stops all the timers 2228 * associated with the HBA, brings down the SLI layer, and eventually 2229 * marks the HBA as in offline state for the upper layer protocol. 2230 **/ 2231 void 2232 lpfc_offline(struct lpfc_hba *phba) 2233 { 2234 struct Scsi_Host *shost; 2235 struct lpfc_vport **vports; 2236 int i; 2237 2238 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2239 return; 2240 2241 /* stop port and all timers associated with this hba */ 2242 lpfc_stop_port(phba); 2243 vports = lpfc_create_vport_work_array(phba); 2244 if (vports != NULL) 2245 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 2246 lpfc_stop_vport_timers(vports[i]); 2247 lpfc_destroy_vport_work_array(phba, vports); 2248 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2249 "0460 Bring Adapter offline\n"); 2250 /* Bring down the SLI Layer and cleanup. The HBA is offline 2251 now. */ 2252 lpfc_sli_hba_down(phba); 2253 spin_lock_irq(&phba->hbalock); 2254 phba->work_ha = 0; 2255 spin_unlock_irq(&phba->hbalock); 2256 vports = lpfc_create_vport_work_array(phba); 2257 if (vports != NULL) 2258 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2259 shost = lpfc_shost_from_vport(vports[i]); 2260 spin_lock_irq(shost->host_lock); 2261 vports[i]->work_port_events = 0; 2262 vports[i]->fc_flag |= FC_OFFLINE_MODE; 2263 spin_unlock_irq(shost->host_lock); 2264 } 2265 lpfc_destroy_vport_work_array(phba, vports); 2266 } 2267 2268 /** 2269 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 2270 * @phba: pointer to lpfc hba data structure. 2271 * 2272 * This routine is to free all the SCSI buffers and IOCBs from the driver 2273 * list back to kernel. It is called from lpfc_pci_remove_one to free 2274 * the internal resources before the device is removed from the system. 2275 * 2276 * Return codes 2277 * 0 - successful (for now, it always returns 0) 2278 **/ 2279 static int 2280 lpfc_scsi_free(struct lpfc_hba *phba) 2281 { 2282 struct lpfc_scsi_buf *sb, *sb_next; 2283 struct lpfc_iocbq *io, *io_next; 2284 2285 spin_lock_irq(&phba->hbalock); 2286 /* Release all the lpfc_scsi_bufs maintained by this host. */ 2287 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 2288 list_del(&sb->list); 2289 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 2290 sb->dma_handle); 2291 kfree(sb); 2292 phba->total_scsi_bufs--; 2293 } 2294 2295 /* Release all the lpfc_iocbq entries maintained by this host. */ 2296 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 2297 list_del(&io->list); 2298 kfree(io); 2299 phba->total_iocbq_bufs--; 2300 } 2301 2302 spin_unlock_irq(&phba->hbalock); 2303 2304 return 0; 2305 } 2306 2307 /** 2308 * lpfc_create_port - Create an FC port 2309 * @phba: pointer to lpfc hba data structure. 2310 * @instance: a unique integer ID to this FC port. 2311 * @dev: pointer to the device data structure. 2312 * 2313 * This routine creates a FC port for the upper layer protocol. The FC port 2314 * can be created on top of either a physical port or a virtual port provided 2315 * by the HBA. This routine also allocates a SCSI host data structure (shost) 2316 * and associates the FC port created before adding the shost into the SCSI 2317 * layer. 2318 * 2319 * Return codes 2320 * @vport - pointer to the virtual N_Port data structure. 2321 * NULL - port create failed. 2322 **/ 2323 struct lpfc_vport * 2324 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 2325 { 2326 struct lpfc_vport *vport; 2327 struct Scsi_Host *shost; 2328 int error = 0; 2329 2330 if (dev != &phba->pcidev->dev) 2331 shost = scsi_host_alloc(&lpfc_vport_template, 2332 sizeof(struct lpfc_vport)); 2333 else 2334 shost = scsi_host_alloc(&lpfc_template, 2335 sizeof(struct lpfc_vport)); 2336 if (!shost) 2337 goto out; 2338 2339 vport = (struct lpfc_vport *) shost->hostdata; 2340 vport->phba = phba; 2341 vport->load_flag |= FC_LOADING; 2342 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2343 vport->fc_rscn_flush = 0; 2344 2345 lpfc_get_vport_cfgparam(vport); 2346 shost->unique_id = instance; 2347 shost->max_id = LPFC_MAX_TARGET; 2348 shost->max_lun = vport->cfg_max_luns; 2349 shost->this_id = -1; 2350 shost->max_cmd_len = 16; 2351 if (phba->sli_rev == LPFC_SLI_REV4) { 2352 shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE; 2353 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 2354 } 2355 2356 /* 2357 * Set initial can_queue value since 0 is no longer supported and 2358 * scsi_add_host will fail. This will be adjusted later based on the 2359 * max xri value determined in hba setup. 2360 */ 2361 shost->can_queue = phba->cfg_hba_queue_depth - 10; 2362 if (dev != &phba->pcidev->dev) { 2363 shost->transportt = lpfc_vport_transport_template; 2364 vport->port_type = LPFC_NPIV_PORT; 2365 } else { 2366 shost->transportt = lpfc_transport_template; 2367 vport->port_type = LPFC_PHYSICAL_PORT; 2368 } 2369 2370 /* Initialize all internally managed lists. */ 2371 INIT_LIST_HEAD(&vport->fc_nodes); 2372 INIT_LIST_HEAD(&vport->rcv_buffer_list); 2373 spin_lock_init(&vport->work_port_lock); 2374 2375 init_timer(&vport->fc_disctmo); 2376 vport->fc_disctmo.function = lpfc_disc_timeout; 2377 vport->fc_disctmo.data = (unsigned long)vport; 2378 2379 init_timer(&vport->fc_fdmitmo); 2380 vport->fc_fdmitmo.function = lpfc_fdmi_tmo; 2381 vport->fc_fdmitmo.data = (unsigned long)vport; 2382 2383 init_timer(&vport->els_tmofunc); 2384 vport->els_tmofunc.function = lpfc_els_timeout; 2385 vport->els_tmofunc.data = (unsigned long)vport; 2386 2387 error = scsi_add_host(shost, dev); 2388 if (error) 2389 goto out_put_shost; 2390 2391 spin_lock_irq(&phba->hbalock); 2392 list_add_tail(&vport->listentry, &phba->port_list); 2393 spin_unlock_irq(&phba->hbalock); 2394 return vport; 2395 2396 out_put_shost: 2397 scsi_host_put(shost); 2398 out: 2399 return NULL; 2400 } 2401 2402 /** 2403 * destroy_port - destroy an FC port 2404 * @vport: pointer to an lpfc virtual N_Port data structure. 2405 * 2406 * This routine destroys a FC port from the upper layer protocol. All the 2407 * resources associated with the port are released. 2408 **/ 2409 void 2410 destroy_port(struct lpfc_vport *vport) 2411 { 2412 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2413 struct lpfc_hba *phba = vport->phba; 2414 2415 lpfc_debugfs_terminate(vport); 2416 fc_remove_host(shost); 2417 scsi_remove_host(shost); 2418 2419 spin_lock_irq(&phba->hbalock); 2420 list_del_init(&vport->listentry); 2421 spin_unlock_irq(&phba->hbalock); 2422 2423 lpfc_cleanup(vport); 2424 return; 2425 } 2426 2427 /** 2428 * lpfc_get_instance - Get a unique integer ID 2429 * 2430 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 2431 * uses the kernel idr facility to perform the task. 2432 * 2433 * Return codes: 2434 * instance - a unique integer ID allocated as the new instance. 2435 * -1 - lpfc get instance failed. 2436 **/ 2437 int 2438 lpfc_get_instance(void) 2439 { 2440 int instance = 0; 2441 2442 /* Assign an unused number */ 2443 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 2444 return -1; 2445 if (idr_get_new(&lpfc_hba_index, NULL, &instance)) 2446 return -1; 2447 return instance; 2448 } 2449 2450 /** 2451 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 2452 * @shost: pointer to SCSI host data structure. 2453 * @time: elapsed time of the scan in jiffies. 2454 * 2455 * This routine is called by the SCSI layer with a SCSI host to determine 2456 * whether the scan host is finished. 2457 * 2458 * Note: there is no scan_start function as adapter initialization will have 2459 * asynchronously kicked off the link initialization. 2460 * 2461 * Return codes 2462 * 0 - SCSI host scan is not over yet. 2463 * 1 - SCSI host scan is over. 2464 **/ 2465 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 2466 { 2467 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2468 struct lpfc_hba *phba = vport->phba; 2469 int stat = 0; 2470 2471 spin_lock_irq(shost->host_lock); 2472 2473 if (vport->load_flag & FC_UNLOADING) { 2474 stat = 1; 2475 goto finished; 2476 } 2477 if (time >= 30 * HZ) { 2478 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2479 "0461 Scanning longer than 30 " 2480 "seconds. Continuing initialization\n"); 2481 stat = 1; 2482 goto finished; 2483 } 2484 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { 2485 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2486 "0465 Link down longer than 15 " 2487 "seconds. Continuing initialization\n"); 2488 stat = 1; 2489 goto finished; 2490 } 2491 2492 if (vport->port_state != LPFC_VPORT_READY) 2493 goto finished; 2494 if (vport->num_disc_nodes || vport->fc_prli_sent) 2495 goto finished; 2496 if (vport->fc_map_cnt == 0 && time < 2 * HZ) 2497 goto finished; 2498 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 2499 goto finished; 2500 2501 stat = 1; 2502 2503 finished: 2504 spin_unlock_irq(shost->host_lock); 2505 return stat; 2506 } 2507 2508 /** 2509 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 2510 * @shost: pointer to SCSI host data structure. 2511 * 2512 * This routine initializes a given SCSI host attributes on a FC port. The 2513 * SCSI host can be either on top of a physical port or a virtual port. 2514 **/ 2515 void lpfc_host_attrib_init(struct Scsi_Host *shost) 2516 { 2517 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2518 struct lpfc_hba *phba = vport->phba; 2519 /* 2520 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 2521 */ 2522 2523 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 2524 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 2525 fc_host_supported_classes(shost) = FC_COS_CLASS3; 2526 2527 memset(fc_host_supported_fc4s(shost), 0, 2528 sizeof(fc_host_supported_fc4s(shost))); 2529 fc_host_supported_fc4s(shost)[2] = 1; 2530 fc_host_supported_fc4s(shost)[7] = 1; 2531 2532 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 2533 sizeof fc_host_symbolic_name(shost)); 2534 2535 fc_host_supported_speeds(shost) = 0; 2536 if (phba->lmt & LMT_10Gb) 2537 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 2538 if (phba->lmt & LMT_8Gb) 2539 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 2540 if (phba->lmt & LMT_4Gb) 2541 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 2542 if (phba->lmt & LMT_2Gb) 2543 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 2544 if (phba->lmt & LMT_1Gb) 2545 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 2546 2547 fc_host_maxframe_size(shost) = 2548 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 2549 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 2550 2551 /* This value is also unchanging */ 2552 memset(fc_host_active_fc4s(shost), 0, 2553 sizeof(fc_host_active_fc4s(shost))); 2554 fc_host_active_fc4s(shost)[2] = 1; 2555 fc_host_active_fc4s(shost)[7] = 1; 2556 2557 fc_host_max_npiv_vports(shost) = phba->max_vpi; 2558 spin_lock_irq(shost->host_lock); 2559 vport->load_flag &= ~FC_LOADING; 2560 spin_unlock_irq(shost->host_lock); 2561 } 2562 2563 /** 2564 * lpfc_stop_port_s3 - Stop SLI3 device port 2565 * @phba: pointer to lpfc hba data structure. 2566 * 2567 * This routine is invoked to stop an SLI3 device port, it stops the device 2568 * from generating interrupts and stops the device driver's timers for the 2569 * device. 2570 **/ 2571 static void 2572 lpfc_stop_port_s3(struct lpfc_hba *phba) 2573 { 2574 /* Clear all interrupt enable conditions */ 2575 writel(0, phba->HCregaddr); 2576 readl(phba->HCregaddr); /* flush */ 2577 /* Clear all pending interrupts */ 2578 writel(0xffffffff, phba->HAregaddr); 2579 readl(phba->HAregaddr); /* flush */ 2580 2581 /* Reset some HBA SLI setup states */ 2582 lpfc_stop_hba_timers(phba); 2583 phba->pport->work_port_events = 0; 2584 } 2585 2586 /** 2587 * lpfc_stop_port_s4 - Stop SLI4 device port 2588 * @phba: pointer to lpfc hba data structure. 2589 * 2590 * This routine is invoked to stop an SLI4 device port, it stops the device 2591 * from generating interrupts and stops the device driver's timers for the 2592 * device. 2593 **/ 2594 static void 2595 lpfc_stop_port_s4(struct lpfc_hba *phba) 2596 { 2597 /* Reset some HBA SLI4 setup states */ 2598 lpfc_stop_hba_timers(phba); 2599 phba->pport->work_port_events = 0; 2600 phba->sli4_hba.intr_enable = 0; 2601 /* Hard clear it for now, shall have more graceful way to wait later */ 2602 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2603 } 2604 2605 /** 2606 * lpfc_stop_port - Wrapper function for stopping hba port 2607 * @phba: Pointer to HBA context object. 2608 * 2609 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 2610 * the API jump table function pointer from the lpfc_hba struct. 2611 **/ 2612 void 2613 lpfc_stop_port(struct lpfc_hba *phba) 2614 { 2615 phba->lpfc_stop_port(phba); 2616 } 2617 2618 /** 2619 * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port. 2620 * @phba: pointer to lpfc hba data structure. 2621 * 2622 * This routine is invoked to remove the driver default fcf record from 2623 * the port. This routine currently acts on FCF Index 0. 2624 * 2625 **/ 2626 void 2627 lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba) 2628 { 2629 int rc = 0; 2630 LPFC_MBOXQ_t *mboxq; 2631 struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record; 2632 uint32_t mbox_tmo, req_len; 2633 uint32_t shdr_status, shdr_add_status; 2634 2635 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2636 if (!mboxq) { 2637 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2638 "2020 Failed to allocate mbox for ADD_FCF cmd\n"); 2639 return; 2640 } 2641 2642 req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) - 2643 sizeof(struct lpfc_sli4_cfg_mhdr); 2644 rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 2645 LPFC_MBOX_OPCODE_FCOE_DELETE_FCF, 2646 req_len, LPFC_SLI4_MBX_EMBED); 2647 /* 2648 * In phase 1, there is a single FCF index, 0. In phase2, the driver 2649 * supports multiple FCF indices. 2650 */ 2651 del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry; 2652 bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1); 2653 bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record, 2654 phba->fcf.fcf_indx); 2655 2656 if (!phba->sli4_hba.intr_enable) 2657 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 2658 else { 2659 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 2660 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 2661 } 2662 /* The IOCTL status is embedded in the mailbox subheader. */ 2663 shdr_status = bf_get(lpfc_mbox_hdr_status, 2664 &del_fcf_record->header.cfg_shdr.response); 2665 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 2666 &del_fcf_record->header.cfg_shdr.response); 2667 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) { 2668 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2669 "2516 DEL FCF of default FCF Index failed " 2670 "mbx status x%x, status x%x add_status x%x\n", 2671 rc, shdr_status, shdr_add_status); 2672 } 2673 if (rc != MBX_TIMEOUT) 2674 mempool_free(mboxq, phba->mbox_mem_pool); 2675 } 2676 2677 /** 2678 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 2679 * @phba: pointer to lpfc hba data structure. 2680 * @acqe_link: pointer to the async link completion queue entry. 2681 * 2682 * This routine is to parse the SLI4 link-attention link fault code and 2683 * translate it into the base driver's read link attention mailbox command 2684 * status. 2685 * 2686 * Return: Link-attention status in terms of base driver's coding. 2687 **/ 2688 static uint16_t 2689 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 2690 struct lpfc_acqe_link *acqe_link) 2691 { 2692 uint16_t latt_fault; 2693 2694 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 2695 case LPFC_ASYNC_LINK_FAULT_NONE: 2696 case LPFC_ASYNC_LINK_FAULT_LOCAL: 2697 case LPFC_ASYNC_LINK_FAULT_REMOTE: 2698 latt_fault = 0; 2699 break; 2700 default: 2701 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2702 "0398 Invalid link fault code: x%x\n", 2703 bf_get(lpfc_acqe_link_fault, acqe_link)); 2704 latt_fault = MBXERR_ERROR; 2705 break; 2706 } 2707 return latt_fault; 2708 } 2709 2710 /** 2711 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 2712 * @phba: pointer to lpfc hba data structure. 2713 * @acqe_link: pointer to the async link completion queue entry. 2714 * 2715 * This routine is to parse the SLI4 link attention type and translate it 2716 * into the base driver's link attention type coding. 2717 * 2718 * Return: Link attention type in terms of base driver's coding. 2719 **/ 2720 static uint8_t 2721 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 2722 struct lpfc_acqe_link *acqe_link) 2723 { 2724 uint8_t att_type; 2725 2726 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 2727 case LPFC_ASYNC_LINK_STATUS_DOWN: 2728 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 2729 att_type = AT_LINK_DOWN; 2730 break; 2731 case LPFC_ASYNC_LINK_STATUS_UP: 2732 /* Ignore physical link up events - wait for logical link up */ 2733 att_type = AT_RESERVED; 2734 break; 2735 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 2736 att_type = AT_LINK_UP; 2737 break; 2738 default: 2739 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2740 "0399 Invalid link attention type: x%x\n", 2741 bf_get(lpfc_acqe_link_status, acqe_link)); 2742 att_type = AT_RESERVED; 2743 break; 2744 } 2745 return att_type; 2746 } 2747 2748 /** 2749 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed 2750 * @phba: pointer to lpfc hba data structure. 2751 * @acqe_link: pointer to the async link completion queue entry. 2752 * 2753 * This routine is to parse the SLI4 link-attention link speed and translate 2754 * it into the base driver's link-attention link speed coding. 2755 * 2756 * Return: Link-attention link speed in terms of base driver's coding. 2757 **/ 2758 static uint8_t 2759 lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, 2760 struct lpfc_acqe_link *acqe_link) 2761 { 2762 uint8_t link_speed; 2763 2764 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { 2765 case LPFC_ASYNC_LINK_SPEED_ZERO: 2766 link_speed = LA_UNKNW_LINK; 2767 break; 2768 case LPFC_ASYNC_LINK_SPEED_10MBPS: 2769 link_speed = LA_UNKNW_LINK; 2770 break; 2771 case LPFC_ASYNC_LINK_SPEED_100MBPS: 2772 link_speed = LA_UNKNW_LINK; 2773 break; 2774 case LPFC_ASYNC_LINK_SPEED_1GBPS: 2775 link_speed = LA_1GHZ_LINK; 2776 break; 2777 case LPFC_ASYNC_LINK_SPEED_10GBPS: 2778 link_speed = LA_10GHZ_LINK; 2779 break; 2780 default: 2781 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2782 "0483 Invalid link-attention link speed: x%x\n", 2783 bf_get(lpfc_acqe_link_speed, acqe_link)); 2784 link_speed = LA_UNKNW_LINK; 2785 break; 2786 } 2787 return link_speed; 2788 } 2789 2790 /** 2791 * lpfc_sli4_async_link_evt - Process the asynchronous link event 2792 * @phba: pointer to lpfc hba data structure. 2793 * @acqe_link: pointer to the async link completion queue entry. 2794 * 2795 * This routine is to handle the SLI4 asynchronous link event. 2796 **/ 2797 static void 2798 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 2799 struct lpfc_acqe_link *acqe_link) 2800 { 2801 struct lpfc_dmabuf *mp; 2802 LPFC_MBOXQ_t *pmb; 2803 MAILBOX_t *mb; 2804 READ_LA_VAR *la; 2805 uint8_t att_type; 2806 2807 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 2808 if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP) 2809 return; 2810 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2811 if (!pmb) { 2812 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2813 "0395 The mboxq allocation failed\n"); 2814 return; 2815 } 2816 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2817 if (!mp) { 2818 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2819 "0396 The lpfc_dmabuf allocation failed\n"); 2820 goto out_free_pmb; 2821 } 2822 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 2823 if (!mp->virt) { 2824 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2825 "0397 The mbuf allocation failed\n"); 2826 goto out_free_dmabuf; 2827 } 2828 2829 /* Cleanup any outstanding ELS commands */ 2830 lpfc_els_flush_all_cmd(phba); 2831 2832 /* Block ELS IOCBs until we have done process link event */ 2833 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 2834 2835 /* Update link event statistics */ 2836 phba->sli.slistat.link_event++; 2837 2838 /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */ 2839 lpfc_read_la(phba, pmb, mp); 2840 pmb->vport = phba->pport; 2841 2842 /* Parse and translate status field */ 2843 mb = &pmb->u.mb; 2844 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); 2845 2846 /* Parse and translate link attention fields */ 2847 la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA; 2848 la->eventTag = acqe_link->event_tag; 2849 la->attType = att_type; 2850 la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link); 2851 2852 /* Fake the the following irrelvant fields */ 2853 la->topology = TOPOLOGY_PT_PT; 2854 la->granted_AL_PA = 0; 2855 la->il = 0; 2856 la->pb = 0; 2857 la->fa = 0; 2858 la->mm = 0; 2859 2860 /* Keep the link status for extra SLI4 state machine reference */ 2861 phba->sli4_hba.link_state.speed = 2862 bf_get(lpfc_acqe_link_speed, acqe_link); 2863 phba->sli4_hba.link_state.duplex = 2864 bf_get(lpfc_acqe_link_duplex, acqe_link); 2865 phba->sli4_hba.link_state.status = 2866 bf_get(lpfc_acqe_link_status, acqe_link); 2867 phba->sli4_hba.link_state.physical = 2868 bf_get(lpfc_acqe_link_physical, acqe_link); 2869 phba->sli4_hba.link_state.fault = 2870 bf_get(lpfc_acqe_link_fault, acqe_link); 2871 2872 /* Invoke the lpfc_handle_latt mailbox command callback function */ 2873 lpfc_mbx_cmpl_read_la(phba, pmb); 2874 2875 return; 2876 2877 out_free_dmabuf: 2878 kfree(mp); 2879 out_free_pmb: 2880 mempool_free(pmb, phba->mbox_mem_pool); 2881 } 2882 2883 /** 2884 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event 2885 * @phba: pointer to lpfc hba data structure. 2886 * @acqe_link: pointer to the async fcoe completion queue entry. 2887 * 2888 * This routine is to handle the SLI4 asynchronous fcoe event. 2889 **/ 2890 static void 2891 lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, 2892 struct lpfc_acqe_fcoe *acqe_fcoe) 2893 { 2894 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); 2895 int rc; 2896 2897 switch (event_type) { 2898 case LPFC_FCOE_EVENT_TYPE_NEW_FCF: 2899 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2900 "2546 New FCF found index 0x%x tag 0x%x \n", 2901 acqe_fcoe->fcf_index, 2902 acqe_fcoe->event_tag); 2903 /* 2904 * If the current FCF is in discovered state, 2905 * do nothing. 2906 */ 2907 spin_lock_irq(&phba->hbalock); 2908 if (phba->fcf.fcf_flag & FCF_DISCOVERED) { 2909 spin_unlock_irq(&phba->hbalock); 2910 break; 2911 } 2912 spin_unlock_irq(&phba->hbalock); 2913 2914 /* Read the FCF table and re-discover SAN. */ 2915 rc = lpfc_sli4_read_fcf_record(phba, 2916 LPFC_FCOE_FCF_GET_FIRST); 2917 if (rc) 2918 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2919 "2547 Read FCF record failed 0x%x\n", 2920 rc); 2921 break; 2922 2923 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: 2924 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2925 "2548 FCF Table full count 0x%x tag 0x%x \n", 2926 bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe), 2927 acqe_fcoe->event_tag); 2928 break; 2929 2930 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: 2931 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2932 "2549 FCF disconnected fron network index 0x%x" 2933 " tag 0x%x \n", acqe_fcoe->fcf_index, 2934 acqe_fcoe->event_tag); 2935 /* If the event is not for currently used fcf do nothing */ 2936 if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index) 2937 break; 2938 /* 2939 * Currently, driver support only one FCF - so treat this as 2940 * a link down. 2941 */ 2942 lpfc_linkdown(phba); 2943 /* Unregister FCF if no devices connected to it */ 2944 lpfc_unregister_unused_fcf(phba); 2945 break; 2946 2947 default: 2948 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2949 "0288 Unknown FCoE event type 0x%x event tag " 2950 "0x%x\n", event_type, acqe_fcoe->event_tag); 2951 break; 2952 } 2953 } 2954 2955 /** 2956 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 2957 * @phba: pointer to lpfc hba data structure. 2958 * @acqe_link: pointer to the async dcbx completion queue entry. 2959 * 2960 * This routine is to handle the SLI4 asynchronous dcbx event. 2961 **/ 2962 static void 2963 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 2964 struct lpfc_acqe_dcbx *acqe_dcbx) 2965 { 2966 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2967 "0290 The SLI4 DCBX asynchronous event is not " 2968 "handled yet\n"); 2969 } 2970 2971 /** 2972 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 2973 * @phba: pointer to lpfc hba data structure. 2974 * 2975 * This routine is invoked by the worker thread to process all the pending 2976 * SLI4 asynchronous events. 2977 **/ 2978 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 2979 { 2980 struct lpfc_cq_event *cq_event; 2981 2982 /* First, declare the async event has been handled */ 2983 spin_lock_irq(&phba->hbalock); 2984 phba->hba_flag &= ~ASYNC_EVENT; 2985 spin_unlock_irq(&phba->hbalock); 2986 /* Now, handle all the async events */ 2987 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 2988 /* Get the first event from the head of the event queue */ 2989 spin_lock_irq(&phba->hbalock); 2990 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 2991 cq_event, struct lpfc_cq_event, list); 2992 spin_unlock_irq(&phba->hbalock); 2993 /* Process the asynchronous event */ 2994 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 2995 case LPFC_TRAILER_CODE_LINK: 2996 lpfc_sli4_async_link_evt(phba, 2997 &cq_event->cqe.acqe_link); 2998 break; 2999 case LPFC_TRAILER_CODE_FCOE: 3000 lpfc_sli4_async_fcoe_evt(phba, 3001 &cq_event->cqe.acqe_fcoe); 3002 break; 3003 case LPFC_TRAILER_CODE_DCBX: 3004 lpfc_sli4_async_dcbx_evt(phba, 3005 &cq_event->cqe.acqe_dcbx); 3006 break; 3007 default: 3008 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3009 "1804 Invalid asynchrous event code: " 3010 "x%x\n", bf_get(lpfc_trailer_code, 3011 &cq_event->cqe.mcqe_cmpl)); 3012 break; 3013 } 3014 /* Free the completion event processed to the free pool */ 3015 lpfc_sli4_cq_event_release(phba, cq_event); 3016 } 3017 } 3018 3019 /** 3020 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 3021 * @phba: pointer to lpfc hba data structure. 3022 * @dev_grp: The HBA PCI-Device group number. 3023 * 3024 * This routine is invoked to set up the per HBA PCI-Device group function 3025 * API jump table entries. 3026 * 3027 * Return: 0 if success, otherwise -ENODEV 3028 **/ 3029 int 3030 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 3031 { 3032 int rc; 3033 3034 /* Set up lpfc PCI-device group */ 3035 phba->pci_dev_grp = dev_grp; 3036 3037 /* The LPFC_PCI_DEV_OC uses SLI4 */ 3038 if (dev_grp == LPFC_PCI_DEV_OC) 3039 phba->sli_rev = LPFC_SLI_REV4; 3040 3041 /* Set up device INIT API function jump table */ 3042 rc = lpfc_init_api_table_setup(phba, dev_grp); 3043 if (rc) 3044 return -ENODEV; 3045 /* Set up SCSI API function jump table */ 3046 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 3047 if (rc) 3048 return -ENODEV; 3049 /* Set up SLI API function jump table */ 3050 rc = lpfc_sli_api_table_setup(phba, dev_grp); 3051 if (rc) 3052 return -ENODEV; 3053 /* Set up MBOX API function jump table */ 3054 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 3055 if (rc) 3056 return -ENODEV; 3057 3058 return 0; 3059 } 3060 3061 /** 3062 * lpfc_log_intr_mode - Log the active interrupt mode 3063 * @phba: pointer to lpfc hba data structure. 3064 * @intr_mode: active interrupt mode adopted. 3065 * 3066 * This routine it invoked to log the currently used active interrupt mode 3067 * to the device. 3068 **/ 3069 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 3070 { 3071 switch (intr_mode) { 3072 case 0: 3073 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3074 "0470 Enable INTx interrupt mode.\n"); 3075 break; 3076 case 1: 3077 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3078 "0481 Enabled MSI interrupt mode.\n"); 3079 break; 3080 case 2: 3081 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3082 "0480 Enabled MSI-X interrupt mode.\n"); 3083 break; 3084 default: 3085 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3086 "0482 Illegal interrupt mode.\n"); 3087 break; 3088 } 3089 return; 3090 } 3091 3092 /** 3093 * lpfc_enable_pci_dev - Enable a generic PCI device. 3094 * @phba: pointer to lpfc hba data structure. 3095 * 3096 * This routine is invoked to enable the PCI device that is common to all 3097 * PCI devices. 3098 * 3099 * Return codes 3100 * 0 - sucessful 3101 * other values - error 3102 **/ 3103 static int 3104 lpfc_enable_pci_dev(struct lpfc_hba *phba) 3105 { 3106 struct pci_dev *pdev; 3107 int bars; 3108 3109 /* Obtain PCI device reference */ 3110 if (!phba->pcidev) 3111 goto out_error; 3112 else 3113 pdev = phba->pcidev; 3114 /* Select PCI BARs */ 3115 bars = pci_select_bars(pdev, IORESOURCE_MEM); 3116 /* Enable PCI device */ 3117 if (pci_enable_device_mem(pdev)) 3118 goto out_error; 3119 /* Request PCI resource for the device */ 3120 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) 3121 goto out_disable_device; 3122 /* Set up device as PCI master and save state for EEH */ 3123 pci_set_master(pdev); 3124 pci_try_set_mwi(pdev); 3125 pci_save_state(pdev); 3126 3127 return 0; 3128 3129 out_disable_device: 3130 pci_disable_device(pdev); 3131 out_error: 3132 return -ENODEV; 3133 } 3134 3135 /** 3136 * lpfc_disable_pci_dev - Disable a generic PCI device. 3137 * @phba: pointer to lpfc hba data structure. 3138 * 3139 * This routine is invoked to disable the PCI device that is common to all 3140 * PCI devices. 3141 **/ 3142 static void 3143 lpfc_disable_pci_dev(struct lpfc_hba *phba) 3144 { 3145 struct pci_dev *pdev; 3146 int bars; 3147 3148 /* Obtain PCI device reference */ 3149 if (!phba->pcidev) 3150 return; 3151 else 3152 pdev = phba->pcidev; 3153 /* Select PCI BARs */ 3154 bars = pci_select_bars(pdev, IORESOURCE_MEM); 3155 /* Release PCI resource and disable PCI device */ 3156 pci_release_selected_regions(pdev, bars); 3157 pci_disable_device(pdev); 3158 /* Null out PCI private reference to driver */ 3159 pci_set_drvdata(pdev, NULL); 3160 3161 return; 3162 } 3163 3164 /** 3165 * lpfc_reset_hba - Reset a hba 3166 * @phba: pointer to lpfc hba data structure. 3167 * 3168 * This routine is invoked to reset a hba device. It brings the HBA 3169 * offline, performs a board restart, and then brings the board back 3170 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 3171 * on outstanding mailbox commands. 3172 **/ 3173 void 3174 lpfc_reset_hba(struct lpfc_hba *phba) 3175 { 3176 /* If resets are disabled then set error state and return. */ 3177 if (!phba->cfg_enable_hba_reset) { 3178 phba->link_state = LPFC_HBA_ERROR; 3179 return; 3180 } 3181 lpfc_offline_prep(phba); 3182 lpfc_offline(phba); 3183 lpfc_sli_brdrestart(phba); 3184 lpfc_online(phba); 3185 lpfc_unblock_mgmt_io(phba); 3186 } 3187 3188 /** 3189 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. 3190 * @phba: pointer to lpfc hba data structure. 3191 * 3192 * This routine is invoked to set up the driver internal resources specific to 3193 * support the SLI-3 HBA device it attached to. 3194 * 3195 * Return codes 3196 * 0 - sucessful 3197 * other values - error 3198 **/ 3199 static int 3200 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 3201 { 3202 struct lpfc_sli *psli; 3203 3204 /* 3205 * Initialize timers used by driver 3206 */ 3207 3208 /* Heartbeat timer */ 3209 init_timer(&phba->hb_tmofunc); 3210 phba->hb_tmofunc.function = lpfc_hb_timeout; 3211 phba->hb_tmofunc.data = (unsigned long)phba; 3212 3213 psli = &phba->sli; 3214 /* MBOX heartbeat timer */ 3215 init_timer(&psli->mbox_tmo); 3216 psli->mbox_tmo.function = lpfc_mbox_timeout; 3217 psli->mbox_tmo.data = (unsigned long) phba; 3218 /* FCP polling mode timer */ 3219 init_timer(&phba->fcp_poll_timer); 3220 phba->fcp_poll_timer.function = lpfc_poll_timeout; 3221 phba->fcp_poll_timer.data = (unsigned long) phba; 3222 /* Fabric block timer */ 3223 init_timer(&phba->fabric_block_timer); 3224 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 3225 phba->fabric_block_timer.data = (unsigned long) phba; 3226 /* EA polling mode timer */ 3227 init_timer(&phba->eratt_poll); 3228 phba->eratt_poll.function = lpfc_poll_eratt; 3229 phba->eratt_poll.data = (unsigned long) phba; 3230 3231 /* Host attention work mask setup */ 3232 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 3233 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 3234 3235 /* Get all the module params for configuring this host */ 3236 lpfc_get_cfgparam(phba); 3237 /* 3238 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 3239 * used to create the sg_dma_buf_pool must be dynamically calculated. 3240 * 2 segments are added since the IOCB needs a command and response bde. 3241 */ 3242 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 3243 sizeof(struct fcp_rsp) + 3244 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); 3245 3246 if (phba->cfg_enable_bg) { 3247 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; 3248 phba->cfg_sg_dma_buf_size += 3249 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); 3250 } 3251 3252 /* Also reinitialize the host templates with new values. */ 3253 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 3254 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 3255 3256 phba->max_vpi = LPFC_MAX_VPI; 3257 /* This will be set to correct value after config_port mbox */ 3258 phba->max_vports = 0; 3259 3260 /* 3261 * Initialize the SLI Layer to run with lpfc HBAs. 3262 */ 3263 lpfc_sli_setup(phba); 3264 lpfc_sli_queue_setup(phba); 3265 3266 /* Allocate device driver memory */ 3267 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 3268 return -ENOMEM; 3269 3270 return 0; 3271 } 3272 3273 /** 3274 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 3275 * @phba: pointer to lpfc hba data structure. 3276 * 3277 * This routine is invoked to unset the driver internal resources set up 3278 * specific for supporting the SLI-3 HBA device it attached to. 3279 **/ 3280 static void 3281 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 3282 { 3283 /* Free device driver memory allocated */ 3284 lpfc_mem_free_all(phba); 3285 3286 return; 3287 } 3288 3289 /** 3290 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 3291 * @phba: pointer to lpfc hba data structure. 3292 * 3293 * This routine is invoked to set up the driver internal resources specific to 3294 * support the SLI-4 HBA device it attached to. 3295 * 3296 * Return codes 3297 * 0 - sucessful 3298 * other values - error 3299 **/ 3300 static int 3301 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 3302 { 3303 struct lpfc_sli *psli; 3304 int rc; 3305 int i, hbq_count; 3306 3307 /* Before proceed, wait for POST done and device ready */ 3308 rc = lpfc_sli4_post_status_check(phba); 3309 if (rc) 3310 return -ENODEV; 3311 3312 /* 3313 * Initialize timers used by driver 3314 */ 3315 3316 /* Heartbeat timer */ 3317 init_timer(&phba->hb_tmofunc); 3318 phba->hb_tmofunc.function = lpfc_hb_timeout; 3319 phba->hb_tmofunc.data = (unsigned long)phba; 3320 3321 psli = &phba->sli; 3322 /* MBOX heartbeat timer */ 3323 init_timer(&psli->mbox_tmo); 3324 psli->mbox_tmo.function = lpfc_mbox_timeout; 3325 psli->mbox_tmo.data = (unsigned long) phba; 3326 /* Fabric block timer */ 3327 init_timer(&phba->fabric_block_timer); 3328 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 3329 phba->fabric_block_timer.data = (unsigned long) phba; 3330 /* EA polling mode timer */ 3331 init_timer(&phba->eratt_poll); 3332 phba->eratt_poll.function = lpfc_poll_eratt; 3333 phba->eratt_poll.data = (unsigned long) phba; 3334 /* 3335 * We need to do a READ_CONFIG mailbox command here before 3336 * calling lpfc_get_cfgparam. For VFs this will report the 3337 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. 3338 * All of the resources allocated 3339 * for this Port are tied to these values. 3340 */ 3341 /* Get all the module params for configuring this host */ 3342 lpfc_get_cfgparam(phba); 3343 phba->max_vpi = LPFC_MAX_VPI; 3344 /* This will be set to correct value after the read_config mbox */ 3345 phba->max_vports = 0; 3346 3347 /* Program the default value of vlan_id and fc_map */ 3348 phba->valid_vlan = 0; 3349 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 3350 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 3351 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 3352 3353 /* 3354 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 3355 * used to create the sg_dma_buf_pool must be dynamically calculated. 3356 * 2 segments are added since the IOCB needs a command and response bde. 3357 * To insure that the scsi sgl does not cross a 4k page boundary only 3358 * sgl sizes of 1k, 2k, 4k, and 8k are supported. 3359 * Table of sgl sizes and seg_cnt: 3360 * sgl size, sg_seg_cnt total seg 3361 * 1k 50 52 3362 * 2k 114 116 3363 * 4k 242 244 3364 * 8k 498 500 3365 * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024 3366 * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048 3367 * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096 3368 * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192 3369 */ 3370 if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT) 3371 phba->cfg_sg_seg_cnt = 50; 3372 else if (phba->cfg_sg_seg_cnt <= 114) 3373 phba->cfg_sg_seg_cnt = 114; 3374 else if (phba->cfg_sg_seg_cnt <= 242) 3375 phba->cfg_sg_seg_cnt = 242; 3376 else 3377 phba->cfg_sg_seg_cnt = 498; 3378 3379 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) 3380 + sizeof(struct fcp_rsp); 3381 phba->cfg_sg_dma_buf_size += 3382 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)); 3383 3384 /* Initialize buffer queue management fields */ 3385 hbq_count = lpfc_sli_hbq_count(); 3386 for (i = 0; i < hbq_count; ++i) 3387 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 3388 INIT_LIST_HEAD(&phba->rb_pend_list); 3389 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 3390 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 3391 3392 /* 3393 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 3394 */ 3395 /* Initialize the Abort scsi buffer list used by driver */ 3396 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 3397 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 3398 /* This abort list used by worker thread */ 3399 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); 3400 3401 /* 3402 * Initialize dirver internal slow-path work queues 3403 */ 3404 3405 /* Driver internel slow-path CQ Event pool */ 3406 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 3407 /* Response IOCB work queue list */ 3408 INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue); 3409 /* Asynchronous event CQ Event work queue list */ 3410 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 3411 /* Fast-path XRI aborted CQ Event work queue list */ 3412 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 3413 /* Slow-path XRI aborted CQ Event work queue list */ 3414 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 3415 /* Receive queue CQ Event work queue list */ 3416 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 3417 3418 /* Initialize the driver internal SLI layer lists. */ 3419 lpfc_sli_setup(phba); 3420 lpfc_sli_queue_setup(phba); 3421 3422 /* Allocate device driver memory */ 3423 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 3424 if (rc) 3425 return -ENOMEM; 3426 3427 /* Create the bootstrap mailbox command */ 3428 rc = lpfc_create_bootstrap_mbox(phba); 3429 if (unlikely(rc)) 3430 goto out_free_mem; 3431 3432 /* Set up the host's endian order with the device. */ 3433 rc = lpfc_setup_endian_order(phba); 3434 if (unlikely(rc)) 3435 goto out_free_bsmbx; 3436 3437 /* Set up the hba's configuration parameters. */ 3438 rc = lpfc_sli4_read_config(phba); 3439 if (unlikely(rc)) 3440 goto out_free_bsmbx; 3441 3442 /* Perform a function reset */ 3443 rc = lpfc_pci_function_reset(phba); 3444 if (unlikely(rc)) 3445 goto out_free_bsmbx; 3446 3447 /* Create all the SLI4 queues */ 3448 rc = lpfc_sli4_queue_create(phba); 3449 if (rc) 3450 goto out_free_bsmbx; 3451 3452 /* Create driver internal CQE event pool */ 3453 rc = lpfc_sli4_cq_event_pool_create(phba); 3454 if (rc) 3455 goto out_destroy_queue; 3456 3457 /* Initialize and populate the iocb list per host */ 3458 rc = lpfc_init_sgl_list(phba); 3459 if (rc) { 3460 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3461 "1400 Failed to initialize sgl list.\n"); 3462 goto out_destroy_cq_event_pool; 3463 } 3464 rc = lpfc_init_active_sgl_array(phba); 3465 if (rc) { 3466 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3467 "1430 Failed to initialize sgl list.\n"); 3468 goto out_free_sgl_list; 3469 } 3470 3471 rc = lpfc_sli4_init_rpi_hdrs(phba); 3472 if (rc) { 3473 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3474 "1432 Failed to initialize rpi headers.\n"); 3475 goto out_free_active_sgl; 3476 } 3477 3478 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 3479 phba->cfg_fcp_eq_count), GFP_KERNEL); 3480 if (!phba->sli4_hba.fcp_eq_hdl) { 3481 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3482 "2572 Failed allocate memory for fast-path " 3483 "per-EQ handle array\n"); 3484 goto out_remove_rpi_hdrs; 3485 } 3486 3487 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 3488 phba->sli4_hba.cfg_eqn), GFP_KERNEL); 3489 if (!phba->sli4_hba.msix_entries) { 3490 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3491 "2573 Failed allocate memory for msi-x " 3492 "interrupt vector entries\n"); 3493 goto out_free_fcp_eq_hdl; 3494 } 3495 3496 return rc; 3497 3498 out_free_fcp_eq_hdl: 3499 kfree(phba->sli4_hba.fcp_eq_hdl); 3500 out_remove_rpi_hdrs: 3501 lpfc_sli4_remove_rpi_hdrs(phba); 3502 out_free_active_sgl: 3503 lpfc_free_active_sgl(phba); 3504 out_free_sgl_list: 3505 lpfc_free_sgl_list(phba); 3506 out_destroy_cq_event_pool: 3507 lpfc_sli4_cq_event_pool_destroy(phba); 3508 out_destroy_queue: 3509 lpfc_sli4_queue_destroy(phba); 3510 out_free_bsmbx: 3511 lpfc_destroy_bootstrap_mbox(phba); 3512 out_free_mem: 3513 lpfc_mem_free(phba); 3514 return rc; 3515 } 3516 3517 /** 3518 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 3519 * @phba: pointer to lpfc hba data structure. 3520 * 3521 * This routine is invoked to unset the driver internal resources set up 3522 * specific for supporting the SLI-4 HBA device it attached to. 3523 **/ 3524 static void 3525 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 3526 { 3527 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 3528 3529 /* unregister default FCFI from the HBA */ 3530 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi); 3531 3532 /* Free the default FCR table */ 3533 lpfc_sli_remove_dflt_fcf(phba); 3534 3535 /* Free memory allocated for msi-x interrupt vector entries */ 3536 kfree(phba->sli4_hba.msix_entries); 3537 3538 /* Free memory allocated for fast-path work queue handles */ 3539 kfree(phba->sli4_hba.fcp_eq_hdl); 3540 3541 /* Free the allocated rpi headers. */ 3542 lpfc_sli4_remove_rpi_hdrs(phba); 3543 lpfc_sli4_remove_rpis(phba); 3544 3545 /* Free the ELS sgl list */ 3546 lpfc_free_active_sgl(phba); 3547 lpfc_free_sgl_list(phba); 3548 3549 /* Free the SCSI sgl management array */ 3550 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 3551 3552 /* Free the SLI4 queues */ 3553 lpfc_sli4_queue_destroy(phba); 3554 3555 /* Free the completion queue EQ event pool */ 3556 lpfc_sli4_cq_event_release_all(phba); 3557 lpfc_sli4_cq_event_pool_destroy(phba); 3558 3559 /* Reset SLI4 HBA FCoE function */ 3560 lpfc_pci_function_reset(phba); 3561 3562 /* Free the bsmbx region. */ 3563 lpfc_destroy_bootstrap_mbox(phba); 3564 3565 /* Free the SLI Layer memory with SLI4 HBAs */ 3566 lpfc_mem_free_all(phba); 3567 3568 /* Free the current connect table */ 3569 list_for_each_entry_safe(conn_entry, next_conn_entry, 3570 &phba->fcf_conn_rec_list, list) 3571 kfree(conn_entry); 3572 3573 return; 3574 } 3575 3576 /** 3577 * lpfc_init_api_table_setup - Set up init api fucntion jump table 3578 * @phba: The hba struct for which this call is being executed. 3579 * @dev_grp: The HBA PCI-Device group number. 3580 * 3581 * This routine sets up the device INIT interface API function jump table 3582 * in @phba struct. 3583 * 3584 * Returns: 0 - success, -ENODEV - failure. 3585 **/ 3586 int 3587 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 3588 { 3589 switch (dev_grp) { 3590 case LPFC_PCI_DEV_LP: 3591 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 3592 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 3593 phba->lpfc_stop_port = lpfc_stop_port_s3; 3594 break; 3595 case LPFC_PCI_DEV_OC: 3596 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 3597 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 3598 phba->lpfc_stop_port = lpfc_stop_port_s4; 3599 break; 3600 default: 3601 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3602 "1431 Invalid HBA PCI-device group: 0x%x\n", 3603 dev_grp); 3604 return -ENODEV; 3605 break; 3606 } 3607 return 0; 3608 } 3609 3610 /** 3611 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 3612 * @phba: pointer to lpfc hba data structure. 3613 * 3614 * This routine is invoked to set up the driver internal resources before the 3615 * device specific resource setup to support the HBA device it attached to. 3616 * 3617 * Return codes 3618 * 0 - sucessful 3619 * other values - error 3620 **/ 3621 static int 3622 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 3623 { 3624 /* 3625 * Driver resources common to all SLI revisions 3626 */ 3627 atomic_set(&phba->fast_event_count, 0); 3628 spin_lock_init(&phba->hbalock); 3629 3630 /* Initialize ndlp management spinlock */ 3631 spin_lock_init(&phba->ndlp_lock); 3632 3633 INIT_LIST_HEAD(&phba->port_list); 3634 INIT_LIST_HEAD(&phba->work_list); 3635 init_waitqueue_head(&phba->wait_4_mlo_m_q); 3636 3637 /* Initialize the wait queue head for the kernel thread */ 3638 init_waitqueue_head(&phba->work_waitq); 3639 3640 /* Initialize the scsi buffer list used by driver for scsi IO */ 3641 spin_lock_init(&phba->scsi_buf_list_lock); 3642 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 3643 3644 /* Initialize the fabric iocb list */ 3645 INIT_LIST_HEAD(&phba->fabric_iocb_list); 3646 3647 /* Initialize list to save ELS buffers */ 3648 INIT_LIST_HEAD(&phba->elsbuf); 3649 3650 /* Initialize FCF connection rec list */ 3651 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 3652 3653 return 0; 3654 } 3655 3656 /** 3657 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 3658 * @phba: pointer to lpfc hba data structure. 3659 * 3660 * This routine is invoked to set up the driver internal resources after the 3661 * device specific resource setup to support the HBA device it attached to. 3662 * 3663 * Return codes 3664 * 0 - sucessful 3665 * other values - error 3666 **/ 3667 static int 3668 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 3669 { 3670 int error; 3671 3672 /* Startup the kernel thread for this host adapter. */ 3673 phba->worker_thread = kthread_run(lpfc_do_work, phba, 3674 "lpfc_worker_%d", phba->brd_no); 3675 if (IS_ERR(phba->worker_thread)) { 3676 error = PTR_ERR(phba->worker_thread); 3677 return error; 3678 } 3679 3680 return 0; 3681 } 3682 3683 /** 3684 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 3685 * @phba: pointer to lpfc hba data structure. 3686 * 3687 * This routine is invoked to unset the driver internal resources set up after 3688 * the device specific resource setup for supporting the HBA device it 3689 * attached to. 3690 **/ 3691 static void 3692 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 3693 { 3694 /* Stop kernel worker thread */ 3695 kthread_stop(phba->worker_thread); 3696 } 3697 3698 /** 3699 * lpfc_free_iocb_list - Free iocb list. 3700 * @phba: pointer to lpfc hba data structure. 3701 * 3702 * This routine is invoked to free the driver's IOCB list and memory. 3703 **/ 3704 static void 3705 lpfc_free_iocb_list(struct lpfc_hba *phba) 3706 { 3707 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 3708 3709 spin_lock_irq(&phba->hbalock); 3710 list_for_each_entry_safe(iocbq_entry, iocbq_next, 3711 &phba->lpfc_iocb_list, list) { 3712 list_del(&iocbq_entry->list); 3713 kfree(iocbq_entry); 3714 phba->total_iocbq_bufs--; 3715 } 3716 spin_unlock_irq(&phba->hbalock); 3717 3718 return; 3719 } 3720 3721 /** 3722 * lpfc_init_iocb_list - Allocate and initialize iocb list. 3723 * @phba: pointer to lpfc hba data structure. 3724 * 3725 * This routine is invoked to allocate and initizlize the driver's IOCB 3726 * list and set up the IOCB tag array accordingly. 3727 * 3728 * Return codes 3729 * 0 - sucessful 3730 * other values - error 3731 **/ 3732 static int 3733 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 3734 { 3735 struct lpfc_iocbq *iocbq_entry = NULL; 3736 uint16_t iotag; 3737 int i; 3738 3739 /* Initialize and populate the iocb list per host. */ 3740 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 3741 for (i = 0; i < iocb_count; i++) { 3742 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 3743 if (iocbq_entry == NULL) { 3744 printk(KERN_ERR "%s: only allocated %d iocbs of " 3745 "expected %d count. Unloading driver.\n", 3746 __func__, i, LPFC_IOCB_LIST_CNT); 3747 goto out_free_iocbq; 3748 } 3749 3750 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 3751 if (iotag == 0) { 3752 kfree(iocbq_entry); 3753 printk(KERN_ERR "%s: failed to allocate IOTAG. " 3754 "Unloading driver.\n", __func__); 3755 goto out_free_iocbq; 3756 } 3757 iocbq_entry->sli4_xritag = NO_XRI; 3758 3759 spin_lock_irq(&phba->hbalock); 3760 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 3761 phba->total_iocbq_bufs++; 3762 spin_unlock_irq(&phba->hbalock); 3763 } 3764 3765 return 0; 3766 3767 out_free_iocbq: 3768 lpfc_free_iocb_list(phba); 3769 3770 return -ENOMEM; 3771 } 3772 3773 /** 3774 * lpfc_free_sgl_list - Free sgl list. 3775 * @phba: pointer to lpfc hba data structure. 3776 * 3777 * This routine is invoked to free the driver's sgl list and memory. 3778 **/ 3779 static void 3780 lpfc_free_sgl_list(struct lpfc_hba *phba) 3781 { 3782 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 3783 LIST_HEAD(sglq_list); 3784 int rc = 0; 3785 3786 spin_lock_irq(&phba->hbalock); 3787 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); 3788 spin_unlock_irq(&phba->hbalock); 3789 3790 list_for_each_entry_safe(sglq_entry, sglq_next, 3791 &sglq_list, list) { 3792 list_del(&sglq_entry->list); 3793 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 3794 kfree(sglq_entry); 3795 phba->sli4_hba.total_sglq_bufs--; 3796 } 3797 rc = lpfc_sli4_remove_all_sgl_pages(phba); 3798 if (rc) { 3799 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3800 "2005 Unable to deregister pages from HBA: %x", rc); 3801 } 3802 kfree(phba->sli4_hba.lpfc_els_sgl_array); 3803 } 3804 3805 /** 3806 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 3807 * @phba: pointer to lpfc hba data structure. 3808 * 3809 * This routine is invoked to allocate the driver's active sgl memory. 3810 * This array will hold the sglq_entry's for active IOs. 3811 **/ 3812 static int 3813 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 3814 { 3815 int size; 3816 size = sizeof(struct lpfc_sglq *); 3817 size *= phba->sli4_hba.max_cfg_param.max_xri; 3818 3819 phba->sli4_hba.lpfc_sglq_active_list = 3820 kzalloc(size, GFP_KERNEL); 3821 if (!phba->sli4_hba.lpfc_sglq_active_list) 3822 return -ENOMEM; 3823 return 0; 3824 } 3825 3826 /** 3827 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 3828 * @phba: pointer to lpfc hba data structure. 3829 * 3830 * This routine is invoked to walk through the array of active sglq entries 3831 * and free all of the resources. 3832 * This is just a place holder for now. 3833 **/ 3834 static void 3835 lpfc_free_active_sgl(struct lpfc_hba *phba) 3836 { 3837 kfree(phba->sli4_hba.lpfc_sglq_active_list); 3838 } 3839 3840 /** 3841 * lpfc_init_sgl_list - Allocate and initialize sgl list. 3842 * @phba: pointer to lpfc hba data structure. 3843 * 3844 * This routine is invoked to allocate and initizlize the driver's sgl 3845 * list and set up the sgl xritag tag array accordingly. 3846 * 3847 * Return codes 3848 * 0 - sucessful 3849 * other values - error 3850 **/ 3851 static int 3852 lpfc_init_sgl_list(struct lpfc_hba *phba) 3853 { 3854 struct lpfc_sglq *sglq_entry = NULL; 3855 int i; 3856 int els_xri_cnt; 3857 3858 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3859 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3860 "2400 lpfc_init_sgl_list els %d.\n", 3861 els_xri_cnt); 3862 /* Initialize and populate the sglq list per host/VF. */ 3863 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 3864 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 3865 3866 /* Sanity check on XRI management */ 3867 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) { 3868 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3869 "2562 No room left for SCSI XRI allocation: " 3870 "max_xri=%d, els_xri=%d\n", 3871 phba->sli4_hba.max_cfg_param.max_xri, 3872 els_xri_cnt); 3873 return -ENOMEM; 3874 } 3875 3876 /* Allocate memory for the ELS XRI management array */ 3877 phba->sli4_hba.lpfc_els_sgl_array = 3878 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt), 3879 GFP_KERNEL); 3880 3881 if (!phba->sli4_hba.lpfc_els_sgl_array) { 3882 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3883 "2401 Failed to allocate memory for ELS " 3884 "XRI management array of size %d.\n", 3885 els_xri_cnt); 3886 return -ENOMEM; 3887 } 3888 3889 /* Keep the SCSI XRI into the XRI management array */ 3890 phba->sli4_hba.scsi_xri_max = 3891 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 3892 phba->sli4_hba.scsi_xri_cnt = 0; 3893 3894 phba->sli4_hba.lpfc_scsi_psb_array = 3895 kzalloc((sizeof(struct lpfc_scsi_buf *) * 3896 phba->sli4_hba.scsi_xri_max), GFP_KERNEL); 3897 3898 if (!phba->sli4_hba.lpfc_scsi_psb_array) { 3899 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3900 "2563 Failed to allocate memory for SCSI " 3901 "XRI management array of size %d.\n", 3902 phba->sli4_hba.scsi_xri_max); 3903 kfree(phba->sli4_hba.lpfc_els_sgl_array); 3904 return -ENOMEM; 3905 } 3906 3907 for (i = 0; i < els_xri_cnt; i++) { 3908 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL); 3909 if (sglq_entry == NULL) { 3910 printk(KERN_ERR "%s: only allocated %d sgls of " 3911 "expected %d count. Unloading driver.\n", 3912 __func__, i, els_xri_cnt); 3913 goto out_free_mem; 3914 } 3915 3916 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba); 3917 if (sglq_entry->sli4_xritag == NO_XRI) { 3918 kfree(sglq_entry); 3919 printk(KERN_ERR "%s: failed to allocate XRI.\n" 3920 "Unloading driver.\n", __func__); 3921 goto out_free_mem; 3922 } 3923 sglq_entry->buff_type = GEN_BUFF_TYPE; 3924 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); 3925 if (sglq_entry->virt == NULL) { 3926 kfree(sglq_entry); 3927 printk(KERN_ERR "%s: failed to allocate mbuf.\n" 3928 "Unloading driver.\n", __func__); 3929 goto out_free_mem; 3930 } 3931 sglq_entry->sgl = sglq_entry->virt; 3932 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 3933 3934 /* The list order is used by later block SGL registraton */ 3935 spin_lock_irq(&phba->hbalock); 3936 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); 3937 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; 3938 phba->sli4_hba.total_sglq_bufs++; 3939 spin_unlock_irq(&phba->hbalock); 3940 } 3941 return 0; 3942 3943 out_free_mem: 3944 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 3945 lpfc_free_sgl_list(phba); 3946 return -ENOMEM; 3947 } 3948 3949 /** 3950 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 3951 * @phba: pointer to lpfc hba data structure. 3952 * 3953 * This routine is invoked to post rpi header templates to the 3954 * HBA consistent with the SLI-4 interface spec. This routine 3955 * posts a PAGE_SIZE memory region to the port to hold up to 3956 * PAGE_SIZE modulo 64 rpi context headers. 3957 * No locks are held here because this is an initialization routine 3958 * called only from probe or lpfc_online when interrupts are not 3959 * enabled and the driver is reinitializing the device. 3960 * 3961 * Return codes 3962 * 0 - sucessful 3963 * ENOMEM - No availble memory 3964 * EIO - The mailbox failed to complete successfully. 3965 **/ 3966 int 3967 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 3968 { 3969 int rc = 0; 3970 int longs; 3971 uint16_t rpi_count; 3972 struct lpfc_rpi_hdr *rpi_hdr; 3973 3974 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 3975 3976 /* 3977 * Provision an rpi bitmask range for discovery. The total count 3978 * is the difference between max and base + 1. 3979 */ 3980 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base + 3981 phba->sli4_hba.max_cfg_param.max_rpi - 1; 3982 3983 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG; 3984 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long), 3985 GFP_KERNEL); 3986 if (!phba->sli4_hba.rpi_bmask) 3987 return -ENOMEM; 3988 3989 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 3990 if (!rpi_hdr) { 3991 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 3992 "0391 Error during rpi post operation\n"); 3993 lpfc_sli4_remove_rpis(phba); 3994 rc = -ENODEV; 3995 } 3996 3997 return rc; 3998 } 3999 4000 /** 4001 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 4002 * @phba: pointer to lpfc hba data structure. 4003 * 4004 * This routine is invoked to allocate a single 4KB memory region to 4005 * support rpis and stores them in the phba. This single region 4006 * provides support for up to 64 rpis. The region is used globally 4007 * by the device. 4008 * 4009 * Returns: 4010 * A valid rpi hdr on success. 4011 * A NULL pointer on any failure. 4012 **/ 4013 struct lpfc_rpi_hdr * 4014 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 4015 { 4016 uint16_t rpi_limit, curr_rpi_range; 4017 struct lpfc_dmabuf *dmabuf; 4018 struct lpfc_rpi_hdr *rpi_hdr; 4019 4020 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 4021 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4022 4023 spin_lock_irq(&phba->hbalock); 4024 curr_rpi_range = phba->sli4_hba.next_rpi; 4025 spin_unlock_irq(&phba->hbalock); 4026 4027 /* 4028 * The port has a limited number of rpis. The increment here 4029 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value 4030 * and to allow the full max_rpi range per port. 4031 */ 4032 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) 4033 return NULL; 4034 4035 /* 4036 * First allocate the protocol header region for the port. The 4037 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 4038 */ 4039 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4040 if (!dmabuf) 4041 return NULL; 4042 4043 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 4044 LPFC_HDR_TEMPLATE_SIZE, 4045 &dmabuf->phys, 4046 GFP_KERNEL); 4047 if (!dmabuf->virt) { 4048 rpi_hdr = NULL; 4049 goto err_free_dmabuf; 4050 } 4051 4052 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE); 4053 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 4054 rpi_hdr = NULL; 4055 goto err_free_coherent; 4056 } 4057 4058 /* Save the rpi header data for cleanup later. */ 4059 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 4060 if (!rpi_hdr) 4061 goto err_free_coherent; 4062 4063 rpi_hdr->dmabuf = dmabuf; 4064 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 4065 rpi_hdr->page_count = 1; 4066 spin_lock_irq(&phba->hbalock); 4067 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi; 4068 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 4069 4070 /* 4071 * The next_rpi stores the next module-64 rpi value to post 4072 * in any subsequent rpi memory region postings. 4073 */ 4074 phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT; 4075 spin_unlock_irq(&phba->hbalock); 4076 return rpi_hdr; 4077 4078 err_free_coherent: 4079 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 4080 dmabuf->virt, dmabuf->phys); 4081 err_free_dmabuf: 4082 kfree(dmabuf); 4083 return NULL; 4084 } 4085 4086 /** 4087 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 4088 * @phba: pointer to lpfc hba data structure. 4089 * 4090 * This routine is invoked to remove all memory resources allocated 4091 * to support rpis. This routine presumes the caller has released all 4092 * rpis consumed by fabric or port logins and is prepared to have 4093 * the header pages removed. 4094 **/ 4095 void 4096 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 4097 { 4098 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 4099 4100 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 4101 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 4102 list_del(&rpi_hdr->list); 4103 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 4104 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 4105 kfree(rpi_hdr->dmabuf); 4106 kfree(rpi_hdr); 4107 } 4108 4109 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 4110 memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask)); 4111 } 4112 4113 /** 4114 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 4115 * @pdev: pointer to pci device data structure. 4116 * 4117 * This routine is invoked to allocate the driver hba data structure for an 4118 * HBA device. If the allocation is successful, the phba reference to the 4119 * PCI device data structure is set. 4120 * 4121 * Return codes 4122 * pointer to @phba - sucessful 4123 * NULL - error 4124 **/ 4125 static struct lpfc_hba * 4126 lpfc_hba_alloc(struct pci_dev *pdev) 4127 { 4128 struct lpfc_hba *phba; 4129 4130 /* Allocate memory for HBA structure */ 4131 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 4132 if (!phba) { 4133 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4134 "1417 Failed to allocate hba struct.\n"); 4135 return NULL; 4136 } 4137 4138 /* Set reference to PCI device in HBA structure */ 4139 phba->pcidev = pdev; 4140 4141 /* Assign an unused board number */ 4142 phba->brd_no = lpfc_get_instance(); 4143 if (phba->brd_no < 0) { 4144 kfree(phba); 4145 return NULL; 4146 } 4147 4148 return phba; 4149 } 4150 4151 /** 4152 * lpfc_hba_free - Free driver hba data structure with a device. 4153 * @phba: pointer to lpfc hba data structure. 4154 * 4155 * This routine is invoked to free the driver hba data structure with an 4156 * HBA device. 4157 **/ 4158 static void 4159 lpfc_hba_free(struct lpfc_hba *phba) 4160 { 4161 /* Release the driver assigned board number */ 4162 idr_remove(&lpfc_hba_index, phba->brd_no); 4163 4164 kfree(phba); 4165 return; 4166 } 4167 4168 /** 4169 * lpfc_create_shost - Create hba physical port with associated scsi host. 4170 * @phba: pointer to lpfc hba data structure. 4171 * 4172 * This routine is invoked to create HBA physical port and associate a SCSI 4173 * host with it. 4174 * 4175 * Return codes 4176 * 0 - sucessful 4177 * other values - error 4178 **/ 4179 static int 4180 lpfc_create_shost(struct lpfc_hba *phba) 4181 { 4182 struct lpfc_vport *vport; 4183 struct Scsi_Host *shost; 4184 4185 /* Initialize HBA FC structure */ 4186 phba->fc_edtov = FF_DEF_EDTOV; 4187 phba->fc_ratov = FF_DEF_RATOV; 4188 phba->fc_altov = FF_DEF_ALTOV; 4189 phba->fc_arbtov = FF_DEF_ARBTOV; 4190 4191 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 4192 if (!vport) 4193 return -ENODEV; 4194 4195 shost = lpfc_shost_from_vport(vport); 4196 phba->pport = vport; 4197 lpfc_debugfs_initialize(vport); 4198 /* Put reference to SCSI host to driver's device private data */ 4199 pci_set_drvdata(phba->pcidev, shost); 4200 4201 return 0; 4202 } 4203 4204 /** 4205 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 4206 * @phba: pointer to lpfc hba data structure. 4207 * 4208 * This routine is invoked to destroy HBA physical port and the associated 4209 * SCSI host. 4210 **/ 4211 static void 4212 lpfc_destroy_shost(struct lpfc_hba *phba) 4213 { 4214 struct lpfc_vport *vport = phba->pport; 4215 4216 /* Destroy physical port that associated with the SCSI host */ 4217 destroy_port(vport); 4218 4219 return; 4220 } 4221 4222 /** 4223 * lpfc_setup_bg - Setup Block guard structures and debug areas. 4224 * @phba: pointer to lpfc hba data structure. 4225 * @shost: the shost to be used to detect Block guard settings. 4226 * 4227 * This routine sets up the local Block guard protocol settings for @shost. 4228 * This routine also allocates memory for debugging bg buffers. 4229 **/ 4230 static void 4231 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 4232 { 4233 int pagecnt = 10; 4234 if (lpfc_prot_mask && lpfc_prot_guard) { 4235 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4236 "1478 Registering BlockGuard with the " 4237 "SCSI layer\n"); 4238 scsi_host_set_prot(shost, lpfc_prot_mask); 4239 scsi_host_set_guard(shost, lpfc_prot_guard); 4240 } 4241 if (!_dump_buf_data) { 4242 while (pagecnt) { 4243 spin_lock_init(&_dump_buf_lock); 4244 _dump_buf_data = 4245 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 4246 if (_dump_buf_data) { 4247 printk(KERN_ERR "BLKGRD allocated %d pages for " 4248 "_dump_buf_data at 0x%p\n", 4249 (1 << pagecnt), _dump_buf_data); 4250 _dump_buf_data_order = pagecnt; 4251 memset(_dump_buf_data, 0, 4252 ((1 << PAGE_SHIFT) << pagecnt)); 4253 break; 4254 } else 4255 --pagecnt; 4256 } 4257 if (!_dump_buf_data_order) 4258 printk(KERN_ERR "BLKGRD ERROR unable to allocate " 4259 "memory for hexdump\n"); 4260 } else 4261 printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p" 4262 "\n", _dump_buf_data); 4263 if (!_dump_buf_dif) { 4264 while (pagecnt) { 4265 _dump_buf_dif = 4266 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 4267 if (_dump_buf_dif) { 4268 printk(KERN_ERR "BLKGRD allocated %d pages for " 4269 "_dump_buf_dif at 0x%p\n", 4270 (1 << pagecnt), _dump_buf_dif); 4271 _dump_buf_dif_order = pagecnt; 4272 memset(_dump_buf_dif, 0, 4273 ((1 << PAGE_SHIFT) << pagecnt)); 4274 break; 4275 } else 4276 --pagecnt; 4277 } 4278 if (!_dump_buf_dif_order) 4279 printk(KERN_ERR "BLKGRD ERROR unable to allocate " 4280 "memory for hexdump\n"); 4281 } else 4282 printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n", 4283 _dump_buf_dif); 4284 } 4285 4286 /** 4287 * lpfc_post_init_setup - Perform necessary device post initialization setup. 4288 * @phba: pointer to lpfc hba data structure. 4289 * 4290 * This routine is invoked to perform all the necessary post initialization 4291 * setup for the device. 4292 **/ 4293 static void 4294 lpfc_post_init_setup(struct lpfc_hba *phba) 4295 { 4296 struct Scsi_Host *shost; 4297 struct lpfc_adapter_event_header adapter_event; 4298 4299 /* Get the default values for Model Name and Description */ 4300 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 4301 4302 /* 4303 * hba setup may have changed the hba_queue_depth so we need to 4304 * adjust the value of can_queue. 4305 */ 4306 shost = pci_get_drvdata(phba->pcidev); 4307 shost->can_queue = phba->cfg_hba_queue_depth - 10; 4308 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 4309 lpfc_setup_bg(phba, shost); 4310 4311 lpfc_host_attrib_init(shost); 4312 4313 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 4314 spin_lock_irq(shost->host_lock); 4315 lpfc_poll_start_timer(phba); 4316 spin_unlock_irq(shost->host_lock); 4317 } 4318 4319 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4320 "0428 Perform SCSI scan\n"); 4321 /* Send board arrival event to upper layer */ 4322 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 4323 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 4324 fc_host_post_vendor_event(shost, fc_get_event_number(), 4325 sizeof(adapter_event), 4326 (char *) &adapter_event, 4327 LPFC_NL_VENDOR_ID); 4328 return; 4329 } 4330 4331 /** 4332 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 4333 * @phba: pointer to lpfc hba data structure. 4334 * 4335 * This routine is invoked to set up the PCI device memory space for device 4336 * with SLI-3 interface spec. 4337 * 4338 * Return codes 4339 * 0 - sucessful 4340 * other values - error 4341 **/ 4342 static int 4343 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 4344 { 4345 struct pci_dev *pdev; 4346 unsigned long bar0map_len, bar2map_len; 4347 int i, hbq_count; 4348 void *ptr; 4349 int error = -ENODEV; 4350 4351 /* Obtain PCI device reference */ 4352 if (!phba->pcidev) 4353 return error; 4354 else 4355 pdev = phba->pcidev; 4356 4357 /* Set the device DMA mask size */ 4358 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) 4359 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) 4360 return error; 4361 4362 /* Get the bus address of Bar0 and Bar2 and the number of bytes 4363 * required by each mapping. 4364 */ 4365 phba->pci_bar0_map = pci_resource_start(pdev, 0); 4366 bar0map_len = pci_resource_len(pdev, 0); 4367 4368 phba->pci_bar2_map = pci_resource_start(pdev, 2); 4369 bar2map_len = pci_resource_len(pdev, 2); 4370 4371 /* Map HBA SLIM to a kernel virtual address. */ 4372 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 4373 if (!phba->slim_memmap_p) { 4374 dev_printk(KERN_ERR, &pdev->dev, 4375 "ioremap failed for SLIM memory.\n"); 4376 goto out; 4377 } 4378 4379 /* Map HBA Control Registers to a kernel virtual address. */ 4380 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 4381 if (!phba->ctrl_regs_memmap_p) { 4382 dev_printk(KERN_ERR, &pdev->dev, 4383 "ioremap failed for HBA control registers.\n"); 4384 goto out_iounmap_slim; 4385 } 4386 4387 /* Allocate memory for SLI-2 structures */ 4388 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, 4389 SLI2_SLIM_SIZE, 4390 &phba->slim2p.phys, 4391 GFP_KERNEL); 4392 if (!phba->slim2p.virt) 4393 goto out_iounmap; 4394 4395 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 4396 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 4397 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 4398 phba->IOCBs = (phba->slim2p.virt + 4399 offsetof(struct lpfc_sli2_slim, IOCBs)); 4400 4401 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 4402 lpfc_sli_hbq_size(), 4403 &phba->hbqslimp.phys, 4404 GFP_KERNEL); 4405 if (!phba->hbqslimp.virt) 4406 goto out_free_slim; 4407 4408 hbq_count = lpfc_sli_hbq_count(); 4409 ptr = phba->hbqslimp.virt; 4410 for (i = 0; i < hbq_count; ++i) { 4411 phba->hbqs[i].hbq_virt = ptr; 4412 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 4413 ptr += (lpfc_hbq_defs[i]->entry_count * 4414 sizeof(struct lpfc_hbq_entry)); 4415 } 4416 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 4417 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 4418 4419 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 4420 4421 INIT_LIST_HEAD(&phba->rb_pend_list); 4422 4423 phba->MBslimaddr = phba->slim_memmap_p; 4424 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 4425 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 4426 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 4427 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 4428 4429 return 0; 4430 4431 out_free_slim: 4432 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 4433 phba->slim2p.virt, phba->slim2p.phys); 4434 out_iounmap: 4435 iounmap(phba->ctrl_regs_memmap_p); 4436 out_iounmap_slim: 4437 iounmap(phba->slim_memmap_p); 4438 out: 4439 return error; 4440 } 4441 4442 /** 4443 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 4444 * @phba: pointer to lpfc hba data structure. 4445 * 4446 * This routine is invoked to unset the PCI device memory space for device 4447 * with SLI-3 interface spec. 4448 **/ 4449 static void 4450 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 4451 { 4452 struct pci_dev *pdev; 4453 4454 /* Obtain PCI device reference */ 4455 if (!phba->pcidev) 4456 return; 4457 else 4458 pdev = phba->pcidev; 4459 4460 /* Free coherent DMA memory allocated */ 4461 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 4462 phba->hbqslimp.virt, phba->hbqslimp.phys); 4463 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 4464 phba->slim2p.virt, phba->slim2p.phys); 4465 4466 /* I/O memory unmap */ 4467 iounmap(phba->ctrl_regs_memmap_p); 4468 iounmap(phba->slim_memmap_p); 4469 4470 return; 4471 } 4472 4473 /** 4474 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 4475 * @phba: pointer to lpfc hba data structure. 4476 * 4477 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 4478 * done and check status. 4479 * 4480 * Return 0 if successful, otherwise -ENODEV. 4481 **/ 4482 int 4483 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 4484 { 4485 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad; 4486 uint32_t onlnreg0, onlnreg1; 4487 int i, port_error = -ENODEV; 4488 4489 if (!phba->sli4_hba.STAregaddr) 4490 return -ENODEV; 4491 4492 /* With uncoverable error, log the error message and return error */ 4493 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr); 4494 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr); 4495 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) { 4496 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr); 4497 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr); 4498 if (uerrlo_reg.word0 || uerrhi_reg.word0) { 4499 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4500 "1422 HBA Unrecoverable error: " 4501 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 4502 "online0_reg=0x%x, online1_reg=0x%x\n", 4503 uerrlo_reg.word0, uerrhi_reg.word0, 4504 onlnreg0, onlnreg1); 4505 } 4506 return -ENODEV; 4507 } 4508 4509 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 4510 for (i = 0; i < 3000; i++) { 4511 sta_reg.word0 = readl(phba->sli4_hba.STAregaddr); 4512 /* Encounter fatal POST error, break out */ 4513 if (bf_get(lpfc_hst_state_perr, &sta_reg)) { 4514 port_error = -ENODEV; 4515 break; 4516 } 4517 if (LPFC_POST_STAGE_ARMFW_READY == 4518 bf_get(lpfc_hst_state_port_status, &sta_reg)) { 4519 port_error = 0; 4520 break; 4521 } 4522 msleep(10); 4523 } 4524 4525 if (port_error) 4526 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4527 "1408 Failure HBA POST Status: sta_reg=0x%x, " 4528 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, " 4529 "dl=x%x, pstatus=x%x\n", sta_reg.word0, 4530 bf_get(lpfc_hst_state_perr, &sta_reg), 4531 bf_get(lpfc_hst_state_sfi, &sta_reg), 4532 bf_get(lpfc_hst_state_nip, &sta_reg), 4533 bf_get(lpfc_hst_state_ipc, &sta_reg), 4534 bf_get(lpfc_hst_state_xrom, &sta_reg), 4535 bf_get(lpfc_hst_state_dl, &sta_reg), 4536 bf_get(lpfc_hst_state_port_status, &sta_reg)); 4537 4538 /* Log device information */ 4539 scratchpad.word0 = readl(phba->sli4_hba.SCRATCHPADregaddr); 4540 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4541 "2534 Device Info: ChipType=0x%x, SliRev=0x%x, " 4542 "FeatureL1=0x%x, FeatureL2=0x%x\n", 4543 bf_get(lpfc_scratchpad_chiptype, &scratchpad), 4544 bf_get(lpfc_scratchpad_slirev, &scratchpad), 4545 bf_get(lpfc_scratchpad_featurelevel1, &scratchpad), 4546 bf_get(lpfc_scratchpad_featurelevel2, &scratchpad)); 4547 4548 return port_error; 4549 } 4550 4551 /** 4552 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 4553 * @phba: pointer to lpfc hba data structure. 4554 * 4555 * This routine is invoked to set up SLI4 BAR0 PCI config space register 4556 * memory map. 4557 **/ 4558 static void 4559 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba) 4560 { 4561 phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p + 4562 LPFC_UERR_STATUS_LO; 4563 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p + 4564 LPFC_UERR_STATUS_HI; 4565 phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p + 4566 LPFC_ONLINE0; 4567 phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p + 4568 LPFC_ONLINE1; 4569 phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p + 4570 LPFC_SCRATCHPAD; 4571 } 4572 4573 /** 4574 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 4575 * @phba: pointer to lpfc hba data structure. 4576 * 4577 * This routine is invoked to set up SLI4 BAR1 control status register (CSR) 4578 * memory map. 4579 **/ 4580 static void 4581 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) 4582 { 4583 4584 phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 4585 LPFC_HST_STATE; 4586 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 4587 LPFC_HST_ISR0; 4588 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 4589 LPFC_HST_IMR0; 4590 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 4591 LPFC_HST_ISCR0; 4592 return; 4593 } 4594 4595 /** 4596 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 4597 * @phba: pointer to lpfc hba data structure. 4598 * @vf: virtual function number 4599 * 4600 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 4601 * based on the given viftual function number, @vf. 4602 * 4603 * Return 0 if successful, otherwise -ENODEV. 4604 **/ 4605 static int 4606 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 4607 { 4608 if (vf > LPFC_VIR_FUNC_MAX) 4609 return -ENODEV; 4610 4611 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 4612 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL); 4613 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 4614 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL); 4615 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 4616 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); 4617 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 4618 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 4619 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 4620 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 4621 return 0; 4622 } 4623 4624 /** 4625 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 4626 * @phba: pointer to lpfc hba data structure. 4627 * 4628 * This routine is invoked to create the bootstrap mailbox 4629 * region consistent with the SLI-4 interface spec. This 4630 * routine allocates all memory necessary to communicate 4631 * mailbox commands to the port and sets up all alignment 4632 * needs. No locks are expected to be held when calling 4633 * this routine. 4634 * 4635 * Return codes 4636 * 0 - sucessful 4637 * ENOMEM - could not allocated memory. 4638 **/ 4639 static int 4640 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 4641 { 4642 uint32_t bmbx_size; 4643 struct lpfc_dmabuf *dmabuf; 4644 struct dma_address *dma_address; 4645 uint32_t pa_addr; 4646 uint64_t phys_addr; 4647 4648 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4649 if (!dmabuf) 4650 return -ENOMEM; 4651 4652 /* 4653 * The bootstrap mailbox region is comprised of 2 parts 4654 * plus an alignment restriction of 16 bytes. 4655 */ 4656 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 4657 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 4658 bmbx_size, 4659 &dmabuf->phys, 4660 GFP_KERNEL); 4661 if (!dmabuf->virt) { 4662 kfree(dmabuf); 4663 return -ENOMEM; 4664 } 4665 memset(dmabuf->virt, 0, bmbx_size); 4666 4667 /* 4668 * Initialize the bootstrap mailbox pointers now so that the register 4669 * operations are simple later. The mailbox dma address is required 4670 * to be 16-byte aligned. Also align the virtual memory as each 4671 * maibox is copied into the bmbx mailbox region before issuing the 4672 * command to the port. 4673 */ 4674 phba->sli4_hba.bmbx.dmabuf = dmabuf; 4675 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 4676 4677 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 4678 LPFC_ALIGN_16_BYTE); 4679 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 4680 LPFC_ALIGN_16_BYTE); 4681 4682 /* 4683 * Set the high and low physical addresses now. The SLI4 alignment 4684 * requirement is 16 bytes and the mailbox is posted to the port 4685 * as two 30-bit addresses. The other data is a bit marking whether 4686 * the 30-bit address is the high or low address. 4687 * Upcast bmbx aphys to 64bits so shift instruction compiles 4688 * clean on 32 bit machines. 4689 */ 4690 dma_address = &phba->sli4_hba.bmbx.dma_address; 4691 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 4692 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 4693 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 4694 LPFC_BMBX_BIT1_ADDR_HI); 4695 4696 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 4697 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 4698 LPFC_BMBX_BIT1_ADDR_LO); 4699 return 0; 4700 } 4701 4702 /** 4703 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 4704 * @phba: pointer to lpfc hba data structure. 4705 * 4706 * This routine is invoked to teardown the bootstrap mailbox 4707 * region and release all host resources. This routine requires 4708 * the caller to ensure all mailbox commands recovered, no 4709 * additional mailbox comands are sent, and interrupts are disabled 4710 * before calling this routine. 4711 * 4712 **/ 4713 static void 4714 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 4715 { 4716 dma_free_coherent(&phba->pcidev->dev, 4717 phba->sli4_hba.bmbx.bmbx_size, 4718 phba->sli4_hba.bmbx.dmabuf->virt, 4719 phba->sli4_hba.bmbx.dmabuf->phys); 4720 4721 kfree(phba->sli4_hba.bmbx.dmabuf); 4722 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 4723 } 4724 4725 /** 4726 * lpfc_sli4_read_config - Get the config parameters. 4727 * @phba: pointer to lpfc hba data structure. 4728 * 4729 * This routine is invoked to read the configuration parameters from the HBA. 4730 * The configuration parameters are used to set the base and maximum values 4731 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 4732 * allocation for the port. 4733 * 4734 * Return codes 4735 * 0 - sucessful 4736 * ENOMEM - No availble memory 4737 * EIO - The mailbox failed to complete successfully. 4738 **/ 4739 static int 4740 lpfc_sli4_read_config(struct lpfc_hba *phba) 4741 { 4742 LPFC_MBOXQ_t *pmb; 4743 struct lpfc_mbx_read_config *rd_config; 4744 uint32_t rc = 0; 4745 4746 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4747 if (!pmb) { 4748 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4749 "2011 Unable to allocate memory for issuing " 4750 "SLI_CONFIG_SPECIAL mailbox command\n"); 4751 return -ENOMEM; 4752 } 4753 4754 lpfc_read_config(phba, pmb); 4755 4756 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 4757 if (rc != MBX_SUCCESS) { 4758 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4759 "2012 Mailbox failed , mbxCmd x%x " 4760 "READ_CONFIG, mbxStatus x%x\n", 4761 bf_get(lpfc_mqe_command, &pmb->u.mqe), 4762 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 4763 rc = -EIO; 4764 } else { 4765 rd_config = &pmb->u.mqe.un.rd_config; 4766 phba->sli4_hba.max_cfg_param.max_xri = 4767 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 4768 phba->sli4_hba.max_cfg_param.xri_base = 4769 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 4770 phba->sli4_hba.max_cfg_param.max_vpi = 4771 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 4772 phba->sli4_hba.max_cfg_param.vpi_base = 4773 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 4774 phba->sli4_hba.max_cfg_param.max_rpi = 4775 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 4776 phba->sli4_hba.max_cfg_param.rpi_base = 4777 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 4778 phba->sli4_hba.max_cfg_param.max_vfi = 4779 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 4780 phba->sli4_hba.max_cfg_param.vfi_base = 4781 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 4782 phba->sli4_hba.max_cfg_param.max_fcfi = 4783 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 4784 phba->sli4_hba.max_cfg_param.fcfi_base = 4785 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config); 4786 phba->sli4_hba.max_cfg_param.max_eq = 4787 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 4788 phba->sli4_hba.max_cfg_param.max_rq = 4789 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 4790 phba->sli4_hba.max_cfg_param.max_wq = 4791 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 4792 phba->sli4_hba.max_cfg_param.max_cq = 4793 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 4794 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 4795 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 4796 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 4797 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 4798 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 4799 phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi; 4800 phba->max_vports = phba->max_vpi; 4801 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4802 "2003 cfg params XRI(B:%d M:%d), " 4803 "VPI(B:%d M:%d) " 4804 "VFI(B:%d M:%d) " 4805 "RPI(B:%d M:%d) " 4806 "FCFI(B:%d M:%d)\n", 4807 phba->sli4_hba.max_cfg_param.xri_base, 4808 phba->sli4_hba.max_cfg_param.max_xri, 4809 phba->sli4_hba.max_cfg_param.vpi_base, 4810 phba->sli4_hba.max_cfg_param.max_vpi, 4811 phba->sli4_hba.max_cfg_param.vfi_base, 4812 phba->sli4_hba.max_cfg_param.max_vfi, 4813 phba->sli4_hba.max_cfg_param.rpi_base, 4814 phba->sli4_hba.max_cfg_param.max_rpi, 4815 phba->sli4_hba.max_cfg_param.fcfi_base, 4816 phba->sli4_hba.max_cfg_param.max_fcfi); 4817 } 4818 mempool_free(pmb, phba->mbox_mem_pool); 4819 4820 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 4821 if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri)) 4822 phba->cfg_hba_queue_depth = 4823 phba->sli4_hba.max_cfg_param.max_xri; 4824 return rc; 4825 } 4826 4827 /** 4828 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order. 4829 * @phba: pointer to lpfc hba data structure. 4830 * 4831 * This routine is invoked to setup the host-side endian order to the 4832 * HBA consistent with the SLI-4 interface spec. 4833 * 4834 * Return codes 4835 * 0 - sucessful 4836 * ENOMEM - No availble memory 4837 * EIO - The mailbox failed to complete successfully. 4838 **/ 4839 static int 4840 lpfc_setup_endian_order(struct lpfc_hba *phba) 4841 { 4842 LPFC_MBOXQ_t *mboxq; 4843 uint32_t rc = 0; 4844 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 4845 HOST_ENDIAN_HIGH_WORD1}; 4846 4847 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4848 if (!mboxq) { 4849 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4850 "0492 Unable to allocate memory for issuing " 4851 "SLI_CONFIG_SPECIAL mailbox command\n"); 4852 return -ENOMEM; 4853 } 4854 4855 /* 4856 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two 4857 * words to contain special data values and no other data. 4858 */ 4859 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 4860 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 4861 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4862 if (rc != MBX_SUCCESS) { 4863 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4864 "0493 SLI_CONFIG_SPECIAL mailbox failed with " 4865 "status x%x\n", 4866 rc); 4867 rc = -EIO; 4868 } 4869 4870 mempool_free(mboxq, phba->mbox_mem_pool); 4871 return rc; 4872 } 4873 4874 /** 4875 * lpfc_sli4_queue_create - Create all the SLI4 queues 4876 * @phba: pointer to lpfc hba data structure. 4877 * 4878 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 4879 * operation. For each SLI4 queue type, the parameters such as queue entry 4880 * count (queue depth) shall be taken from the module parameter. For now, 4881 * we just use some constant number as place holder. 4882 * 4883 * Return codes 4884 * 0 - sucessful 4885 * ENOMEM - No availble memory 4886 * EIO - The mailbox failed to complete successfully. 4887 **/ 4888 static int 4889 lpfc_sli4_queue_create(struct lpfc_hba *phba) 4890 { 4891 struct lpfc_queue *qdesc; 4892 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 4893 int cfg_fcp_wq_count; 4894 int cfg_fcp_eq_count; 4895 4896 /* 4897 * Sanity check for confiugred queue parameters against the run-time 4898 * device parameters 4899 */ 4900 4901 /* Sanity check on FCP fast-path WQ parameters */ 4902 cfg_fcp_wq_count = phba->cfg_fcp_wq_count; 4903 if (cfg_fcp_wq_count > 4904 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { 4905 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - 4906 LPFC_SP_WQN_DEF; 4907 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { 4908 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4909 "2581 Not enough WQs (%d) from " 4910 "the pci function for supporting " 4911 "FCP WQs (%d)\n", 4912 phba->sli4_hba.max_cfg_param.max_wq, 4913 phba->cfg_fcp_wq_count); 4914 goto out_error; 4915 } 4916 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4917 "2582 Not enough WQs (%d) from the pci " 4918 "function for supporting the requested " 4919 "FCP WQs (%d), the actual FCP WQs can " 4920 "be supported: %d\n", 4921 phba->sli4_hba.max_cfg_param.max_wq, 4922 phba->cfg_fcp_wq_count, cfg_fcp_wq_count); 4923 } 4924 /* The actual number of FCP work queues adopted */ 4925 phba->cfg_fcp_wq_count = cfg_fcp_wq_count; 4926 4927 /* Sanity check on FCP fast-path EQ parameters */ 4928 cfg_fcp_eq_count = phba->cfg_fcp_eq_count; 4929 if (cfg_fcp_eq_count > 4930 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) { 4931 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq - 4932 LPFC_SP_EQN_DEF; 4933 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) { 4934 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4935 "2574 Not enough EQs (%d) from the " 4936 "pci function for supporting FCP " 4937 "EQs (%d)\n", 4938 phba->sli4_hba.max_cfg_param.max_eq, 4939 phba->cfg_fcp_eq_count); 4940 goto out_error; 4941 } 4942 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4943 "2575 Not enough EQs (%d) from the pci " 4944 "function for supporting the requested " 4945 "FCP EQs (%d), the actual FCP EQs can " 4946 "be supported: %d\n", 4947 phba->sli4_hba.max_cfg_param.max_eq, 4948 phba->cfg_fcp_eq_count, cfg_fcp_eq_count); 4949 } 4950 /* It does not make sense to have more EQs than WQs */ 4951 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { 4952 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4953 "2593 The number of FCP EQs (%d) is more " 4954 "than the number of FCP WQs (%d), take " 4955 "the number of FCP EQs same as than of " 4956 "WQs (%d)\n", cfg_fcp_eq_count, 4957 phba->cfg_fcp_wq_count, 4958 phba->cfg_fcp_wq_count); 4959 cfg_fcp_eq_count = phba->cfg_fcp_wq_count; 4960 } 4961 /* The actual number of FCP event queues adopted */ 4962 phba->cfg_fcp_eq_count = cfg_fcp_eq_count; 4963 /* The overall number of event queues used */ 4964 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; 4965 4966 /* 4967 * Create Event Queues (EQs) 4968 */ 4969 4970 /* Get EQ depth from module parameter, fake the default for now */ 4971 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 4972 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 4973 4974 /* Create slow path event queue */ 4975 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 4976 phba->sli4_hba.eq_ecount); 4977 if (!qdesc) { 4978 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4979 "0496 Failed allocate slow-path EQ\n"); 4980 goto out_error; 4981 } 4982 phba->sli4_hba.sp_eq = qdesc; 4983 4984 /* Create fast-path FCP Event Queue(s) */ 4985 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * 4986 phba->cfg_fcp_eq_count), GFP_KERNEL); 4987 if (!phba->sli4_hba.fp_eq) { 4988 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4989 "2576 Failed allocate memory for fast-path " 4990 "EQ record array\n"); 4991 goto out_free_sp_eq; 4992 } 4993 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 4994 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 4995 phba->sli4_hba.eq_ecount); 4996 if (!qdesc) { 4997 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4998 "0497 Failed allocate fast-path EQ\n"); 4999 goto out_free_fp_eq; 5000 } 5001 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; 5002 } 5003 5004 /* 5005 * Create Complete Queues (CQs) 5006 */ 5007 5008 /* Get CQ depth from module parameter, fake the default for now */ 5009 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 5010 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 5011 5012 /* Create slow-path Mailbox Command Complete Queue */ 5013 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5014 phba->sli4_hba.cq_ecount); 5015 if (!qdesc) { 5016 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5017 "0500 Failed allocate slow-path mailbox CQ\n"); 5018 goto out_free_fp_eq; 5019 } 5020 phba->sli4_hba.mbx_cq = qdesc; 5021 5022 /* Create slow-path ELS Complete Queue */ 5023 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5024 phba->sli4_hba.cq_ecount); 5025 if (!qdesc) { 5026 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5027 "0501 Failed allocate slow-path ELS CQ\n"); 5028 goto out_free_mbx_cq; 5029 } 5030 phba->sli4_hba.els_cq = qdesc; 5031 5032 /* Create slow-path Unsolicited Receive Complete Queue */ 5033 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5034 phba->sli4_hba.cq_ecount); 5035 if (!qdesc) { 5036 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5037 "0502 Failed allocate slow-path USOL RX CQ\n"); 5038 goto out_free_els_cq; 5039 } 5040 phba->sli4_hba.rxq_cq = qdesc; 5041 5042 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ 5043 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 5044 phba->cfg_fcp_eq_count), GFP_KERNEL); 5045 if (!phba->sli4_hba.fcp_cq) { 5046 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5047 "2577 Failed allocate memory for fast-path " 5048 "CQ record array\n"); 5049 goto out_free_rxq_cq; 5050 } 5051 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 5052 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5053 phba->sli4_hba.cq_ecount); 5054 if (!qdesc) { 5055 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5056 "0499 Failed allocate fast-path FCP " 5057 "CQ (%d)\n", fcp_cqidx); 5058 goto out_free_fcp_cq; 5059 } 5060 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc; 5061 } 5062 5063 /* Create Mailbox Command Queue */ 5064 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 5065 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 5066 5067 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 5068 phba->sli4_hba.mq_ecount); 5069 if (!qdesc) { 5070 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5071 "0505 Failed allocate slow-path MQ\n"); 5072 goto out_free_fcp_cq; 5073 } 5074 phba->sli4_hba.mbx_wq = qdesc; 5075 5076 /* 5077 * Create all the Work Queues (WQs) 5078 */ 5079 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 5080 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 5081 5082 /* Create slow-path ELS Work Queue */ 5083 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 5084 phba->sli4_hba.wq_ecount); 5085 if (!qdesc) { 5086 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5087 "0504 Failed allocate slow-path ELS WQ\n"); 5088 goto out_free_mbx_wq; 5089 } 5090 phba->sli4_hba.els_wq = qdesc; 5091 5092 /* Create fast-path FCP Work Queue(s) */ 5093 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * 5094 phba->cfg_fcp_wq_count), GFP_KERNEL); 5095 if (!phba->sli4_hba.fcp_wq) { 5096 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5097 "2578 Failed allocate memory for fast-path " 5098 "WQ record array\n"); 5099 goto out_free_els_wq; 5100 } 5101 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 5102 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 5103 phba->sli4_hba.wq_ecount); 5104 if (!qdesc) { 5105 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5106 "0503 Failed allocate fast-path FCP " 5107 "WQ (%d)\n", fcp_wqidx); 5108 goto out_free_fcp_wq; 5109 } 5110 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc; 5111 } 5112 5113 /* 5114 * Create Receive Queue (RQ) 5115 */ 5116 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 5117 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 5118 5119 /* Create Receive Queue for header */ 5120 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 5121 phba->sli4_hba.rq_ecount); 5122 if (!qdesc) { 5123 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5124 "0506 Failed allocate receive HRQ\n"); 5125 goto out_free_fcp_wq; 5126 } 5127 phba->sli4_hba.hdr_rq = qdesc; 5128 5129 /* Create Receive Queue for data */ 5130 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 5131 phba->sli4_hba.rq_ecount); 5132 if (!qdesc) { 5133 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5134 "0507 Failed allocate receive DRQ\n"); 5135 goto out_free_hdr_rq; 5136 } 5137 phba->sli4_hba.dat_rq = qdesc; 5138 5139 return 0; 5140 5141 out_free_hdr_rq: 5142 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 5143 phba->sli4_hba.hdr_rq = NULL; 5144 out_free_fcp_wq: 5145 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) { 5146 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]); 5147 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; 5148 } 5149 kfree(phba->sli4_hba.fcp_wq); 5150 out_free_els_wq: 5151 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 5152 phba->sli4_hba.els_wq = NULL; 5153 out_free_mbx_wq: 5154 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 5155 phba->sli4_hba.mbx_wq = NULL; 5156 out_free_fcp_cq: 5157 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) { 5158 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]); 5159 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; 5160 } 5161 kfree(phba->sli4_hba.fcp_cq); 5162 out_free_rxq_cq: 5163 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq); 5164 phba->sli4_hba.rxq_cq = NULL; 5165 out_free_els_cq: 5166 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5167 phba->sli4_hba.els_cq = NULL; 5168 out_free_mbx_cq: 5169 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 5170 phba->sli4_hba.mbx_cq = NULL; 5171 out_free_fp_eq: 5172 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) { 5173 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]); 5174 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; 5175 } 5176 kfree(phba->sli4_hba.fp_eq); 5177 out_free_sp_eq: 5178 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 5179 phba->sli4_hba.sp_eq = NULL; 5180 out_error: 5181 return -ENOMEM; 5182 } 5183 5184 /** 5185 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 5186 * @phba: pointer to lpfc hba data structure. 5187 * 5188 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 5189 * operation. 5190 * 5191 * Return codes 5192 * 0 - sucessful 5193 * ENOMEM - No availble memory 5194 * EIO - The mailbox failed to complete successfully. 5195 **/ 5196 static void 5197 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 5198 { 5199 int fcp_qidx; 5200 5201 /* Release mailbox command work queue */ 5202 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 5203 phba->sli4_hba.mbx_wq = NULL; 5204 5205 /* Release ELS work queue */ 5206 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 5207 phba->sli4_hba.els_wq = NULL; 5208 5209 /* Release FCP work queue */ 5210 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 5211 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); 5212 kfree(phba->sli4_hba.fcp_wq); 5213 phba->sli4_hba.fcp_wq = NULL; 5214 5215 /* Release unsolicited receive queue */ 5216 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 5217 phba->sli4_hba.hdr_rq = NULL; 5218 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 5219 phba->sli4_hba.dat_rq = NULL; 5220 5221 /* Release unsolicited receive complete queue */ 5222 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq); 5223 phba->sli4_hba.rxq_cq = NULL; 5224 5225 /* Release ELS complete queue */ 5226 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5227 phba->sli4_hba.els_cq = NULL; 5228 5229 /* Release mailbox command complete queue */ 5230 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 5231 phba->sli4_hba.mbx_cq = NULL; 5232 5233 /* Release FCP response complete queue */ 5234 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 5235 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); 5236 kfree(phba->sli4_hba.fcp_cq); 5237 phba->sli4_hba.fcp_cq = NULL; 5238 5239 /* Release fast-path event queue */ 5240 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 5241 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); 5242 kfree(phba->sli4_hba.fp_eq); 5243 phba->sli4_hba.fp_eq = NULL; 5244 5245 /* Release slow-path event queue */ 5246 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 5247 phba->sli4_hba.sp_eq = NULL; 5248 5249 return; 5250 } 5251 5252 /** 5253 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 5254 * @phba: pointer to lpfc hba data structure. 5255 * 5256 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 5257 * operation. 5258 * 5259 * Return codes 5260 * 0 - sucessful 5261 * ENOMEM - No availble memory 5262 * EIO - The mailbox failed to complete successfully. 5263 **/ 5264 int 5265 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 5266 { 5267 int rc = -ENOMEM; 5268 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 5269 int fcp_cq_index = 0; 5270 5271 /* 5272 * Set up Event Queues (EQs) 5273 */ 5274 5275 /* Set up slow-path event queue */ 5276 if (!phba->sli4_hba.sp_eq) { 5277 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5278 "0520 Slow-path EQ not allocated\n"); 5279 goto out_error; 5280 } 5281 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq, 5282 LPFC_SP_DEF_IMAX); 5283 if (rc) { 5284 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5285 "0521 Failed setup of slow-path EQ: " 5286 "rc = 0x%x\n", rc); 5287 goto out_error; 5288 } 5289 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5290 "2583 Slow-path EQ setup: queue-id=%d\n", 5291 phba->sli4_hba.sp_eq->queue_id); 5292 5293 /* Set up fast-path event queue */ 5294 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 5295 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { 5296 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5297 "0522 Fast-path EQ (%d) not " 5298 "allocated\n", fcp_eqidx); 5299 goto out_destroy_fp_eq; 5300 } 5301 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], 5302 phba->cfg_fcp_imax); 5303 if (rc) { 5304 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5305 "0523 Failed setup of fast-path EQ " 5306 "(%d), rc = 0x%x\n", fcp_eqidx, rc); 5307 goto out_destroy_fp_eq; 5308 } 5309 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5310 "2584 Fast-path EQ setup: " 5311 "queue[%d]-id=%d\n", fcp_eqidx, 5312 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); 5313 } 5314 5315 /* 5316 * Set up Complete Queues (CQs) 5317 */ 5318 5319 /* Set up slow-path MBOX Complete Queue as the first CQ */ 5320 if (!phba->sli4_hba.mbx_cq) { 5321 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5322 "0528 Mailbox CQ not allocated\n"); 5323 goto out_destroy_fp_eq; 5324 } 5325 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, 5326 LPFC_MCQ, LPFC_MBOX); 5327 if (rc) { 5328 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5329 "0529 Failed setup of slow-path mailbox CQ: " 5330 "rc = 0x%x\n", rc); 5331 goto out_destroy_fp_eq; 5332 } 5333 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5334 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", 5335 phba->sli4_hba.mbx_cq->queue_id, 5336 phba->sli4_hba.sp_eq->queue_id); 5337 5338 /* Set up slow-path ELS Complete Queue */ 5339 if (!phba->sli4_hba.els_cq) { 5340 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5341 "0530 ELS CQ not allocated\n"); 5342 goto out_destroy_mbx_cq; 5343 } 5344 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, 5345 LPFC_WCQ, LPFC_ELS); 5346 if (rc) { 5347 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5348 "0531 Failed setup of slow-path ELS CQ: " 5349 "rc = 0x%x\n", rc); 5350 goto out_destroy_mbx_cq; 5351 } 5352 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5353 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", 5354 phba->sli4_hba.els_cq->queue_id, 5355 phba->sli4_hba.sp_eq->queue_id); 5356 5357 /* Set up slow-path Unsolicited Receive Complete Queue */ 5358 if (!phba->sli4_hba.rxq_cq) { 5359 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5360 "0532 USOL RX CQ not allocated\n"); 5361 goto out_destroy_els_cq; 5362 } 5363 rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq, 5364 LPFC_RCQ, LPFC_USOL); 5365 if (rc) { 5366 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5367 "0533 Failed setup of slow-path USOL RX CQ: " 5368 "rc = 0x%x\n", rc); 5369 goto out_destroy_els_cq; 5370 } 5371 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5372 "2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n", 5373 phba->sli4_hba.rxq_cq->queue_id, 5374 phba->sli4_hba.sp_eq->queue_id); 5375 5376 /* Set up fast-path FCP Response Complete Queue */ 5377 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 5378 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 5379 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5380 "0526 Fast-path FCP CQ (%d) not " 5381 "allocated\n", fcp_cqidx); 5382 goto out_destroy_fcp_cq; 5383 } 5384 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], 5385 phba->sli4_hba.fp_eq[fcp_cqidx], 5386 LPFC_WCQ, LPFC_FCP); 5387 if (rc) { 5388 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5389 "0527 Failed setup of fast-path FCP " 5390 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); 5391 goto out_destroy_fcp_cq; 5392 } 5393 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5394 "2588 FCP CQ setup: cq[%d]-id=%d, " 5395 "parent eq[%d]-id=%d\n", 5396 fcp_cqidx, 5397 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, 5398 fcp_cqidx, 5399 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id); 5400 } 5401 5402 /* 5403 * Set up all the Work Queues (WQs) 5404 */ 5405 5406 /* Set up Mailbox Command Queue */ 5407 if (!phba->sli4_hba.mbx_wq) { 5408 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5409 "0538 Slow-path MQ not allocated\n"); 5410 goto out_destroy_fcp_cq; 5411 } 5412 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, 5413 phba->sli4_hba.mbx_cq, LPFC_MBOX); 5414 if (rc) { 5415 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5416 "0539 Failed setup of slow-path MQ: " 5417 "rc = 0x%x\n", rc); 5418 goto out_destroy_fcp_cq; 5419 } 5420 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5421 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 5422 phba->sli4_hba.mbx_wq->queue_id, 5423 phba->sli4_hba.mbx_cq->queue_id); 5424 5425 /* Set up slow-path ELS Work Queue */ 5426 if (!phba->sli4_hba.els_wq) { 5427 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5428 "0536 Slow-path ELS WQ not allocated\n"); 5429 goto out_destroy_mbx_wq; 5430 } 5431 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, 5432 phba->sli4_hba.els_cq, LPFC_ELS); 5433 if (rc) { 5434 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5435 "0537 Failed setup of slow-path ELS WQ: " 5436 "rc = 0x%x\n", rc); 5437 goto out_destroy_mbx_wq; 5438 } 5439 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5440 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 5441 phba->sli4_hba.els_wq->queue_id, 5442 phba->sli4_hba.els_cq->queue_id); 5443 5444 /* Set up fast-path FCP Work Queue */ 5445 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 5446 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { 5447 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5448 "0534 Fast-path FCP WQ (%d) not " 5449 "allocated\n", fcp_wqidx); 5450 goto out_destroy_fcp_wq; 5451 } 5452 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], 5453 phba->sli4_hba.fcp_cq[fcp_cq_index], 5454 LPFC_FCP); 5455 if (rc) { 5456 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5457 "0535 Failed setup of fast-path FCP " 5458 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); 5459 goto out_destroy_fcp_wq; 5460 } 5461 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5462 "2591 FCP WQ setup: wq[%d]-id=%d, " 5463 "parent cq[%d]-id=%d\n", 5464 fcp_wqidx, 5465 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, 5466 fcp_cq_index, 5467 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); 5468 /* Round robin FCP Work Queue's Completion Queue assignment */ 5469 fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count); 5470 } 5471 5472 /* 5473 * Create Receive Queue (RQ) 5474 */ 5475 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 5476 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5477 "0540 Receive Queue not allocated\n"); 5478 goto out_destroy_fcp_wq; 5479 } 5480 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 5481 phba->sli4_hba.rxq_cq, LPFC_USOL); 5482 if (rc) { 5483 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5484 "0541 Failed setup of Receive Queue: " 5485 "rc = 0x%x\n", rc); 5486 goto out_destroy_fcp_wq; 5487 } 5488 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5489 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 5490 "parent cq-id=%d\n", 5491 phba->sli4_hba.hdr_rq->queue_id, 5492 phba->sli4_hba.dat_rq->queue_id, 5493 phba->sli4_hba.rxq_cq->queue_id); 5494 return 0; 5495 5496 out_destroy_fcp_wq: 5497 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) 5498 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); 5499 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 5500 out_destroy_mbx_wq: 5501 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 5502 out_destroy_fcp_cq: 5503 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 5504 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 5505 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq); 5506 out_destroy_els_cq: 5507 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 5508 out_destroy_mbx_cq: 5509 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 5510 out_destroy_fp_eq: 5511 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 5512 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); 5513 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 5514 out_error: 5515 return rc; 5516 } 5517 5518 /** 5519 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 5520 * @phba: pointer to lpfc hba data structure. 5521 * 5522 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 5523 * operation. 5524 * 5525 * Return codes 5526 * 0 - sucessful 5527 * ENOMEM - No availble memory 5528 * EIO - The mailbox failed to complete successfully. 5529 **/ 5530 void 5531 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 5532 { 5533 int fcp_qidx; 5534 5535 /* Unset mailbox command work queue */ 5536 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 5537 /* Unset ELS work queue */ 5538 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 5539 /* Unset unsolicited receive queue */ 5540 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 5541 /* Unset FCP work queue */ 5542 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 5543 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); 5544 /* Unset mailbox command complete queue */ 5545 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 5546 /* Unset ELS complete queue */ 5547 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 5548 /* Unset unsolicited receive complete queue */ 5549 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq); 5550 /* Unset FCP response complete queue */ 5551 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 5552 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 5553 /* Unset fast-path event queue */ 5554 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 5555 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); 5556 /* Unset slow-path event queue */ 5557 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 5558 } 5559 5560 /** 5561 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 5562 * @phba: pointer to lpfc hba data structure. 5563 * 5564 * This routine is invoked to allocate and set up a pool of completion queue 5565 * events. The body of the completion queue event is a completion queue entry 5566 * CQE. For now, this pool is used for the interrupt service routine to queue 5567 * the following HBA completion queue events for the worker thread to process: 5568 * - Mailbox asynchronous events 5569 * - Receive queue completion unsolicited events 5570 * Later, this can be used for all the slow-path events. 5571 * 5572 * Return codes 5573 * 0 - sucessful 5574 * -ENOMEM - No availble memory 5575 **/ 5576 static int 5577 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 5578 { 5579 struct lpfc_cq_event *cq_event; 5580 int i; 5581 5582 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 5583 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 5584 if (!cq_event) 5585 goto out_pool_create_fail; 5586 list_add_tail(&cq_event->list, 5587 &phba->sli4_hba.sp_cqe_event_pool); 5588 } 5589 return 0; 5590 5591 out_pool_create_fail: 5592 lpfc_sli4_cq_event_pool_destroy(phba); 5593 return -ENOMEM; 5594 } 5595 5596 /** 5597 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 5598 * @phba: pointer to lpfc hba data structure. 5599 * 5600 * This routine is invoked to free the pool of completion queue events at 5601 * driver unload time. Note that, it is the responsibility of the driver 5602 * cleanup routine to free all the outstanding completion-queue events 5603 * allocated from this pool back into the pool before invoking this routine 5604 * to destroy the pool. 5605 **/ 5606 static void 5607 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 5608 { 5609 struct lpfc_cq_event *cq_event, *next_cq_event; 5610 5611 list_for_each_entry_safe(cq_event, next_cq_event, 5612 &phba->sli4_hba.sp_cqe_event_pool, list) { 5613 list_del(&cq_event->list); 5614 kfree(cq_event); 5615 } 5616 } 5617 5618 /** 5619 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 5620 * @phba: pointer to lpfc hba data structure. 5621 * 5622 * This routine is the lock free version of the API invoked to allocate a 5623 * completion-queue event from the free pool. 5624 * 5625 * Return: Pointer to the newly allocated completion-queue event if successful 5626 * NULL otherwise. 5627 **/ 5628 struct lpfc_cq_event * 5629 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 5630 { 5631 struct lpfc_cq_event *cq_event = NULL; 5632 5633 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 5634 struct lpfc_cq_event, list); 5635 return cq_event; 5636 } 5637 5638 /** 5639 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 5640 * @phba: pointer to lpfc hba data structure. 5641 * 5642 * This routine is the lock version of the API invoked to allocate a 5643 * completion-queue event from the free pool. 5644 * 5645 * Return: Pointer to the newly allocated completion-queue event if successful 5646 * NULL otherwise. 5647 **/ 5648 struct lpfc_cq_event * 5649 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 5650 { 5651 struct lpfc_cq_event *cq_event; 5652 unsigned long iflags; 5653 5654 spin_lock_irqsave(&phba->hbalock, iflags); 5655 cq_event = __lpfc_sli4_cq_event_alloc(phba); 5656 spin_unlock_irqrestore(&phba->hbalock, iflags); 5657 return cq_event; 5658 } 5659 5660 /** 5661 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 5662 * @phba: pointer to lpfc hba data structure. 5663 * @cq_event: pointer to the completion queue event to be freed. 5664 * 5665 * This routine is the lock free version of the API invoked to release a 5666 * completion-queue event back into the free pool. 5667 **/ 5668 void 5669 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 5670 struct lpfc_cq_event *cq_event) 5671 { 5672 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 5673 } 5674 5675 /** 5676 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 5677 * @phba: pointer to lpfc hba data structure. 5678 * @cq_event: pointer to the completion queue event to be freed. 5679 * 5680 * This routine is the lock version of the API invoked to release a 5681 * completion-queue event back into the free pool. 5682 **/ 5683 void 5684 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 5685 struct lpfc_cq_event *cq_event) 5686 { 5687 unsigned long iflags; 5688 spin_lock_irqsave(&phba->hbalock, iflags); 5689 __lpfc_sli4_cq_event_release(phba, cq_event); 5690 spin_unlock_irqrestore(&phba->hbalock, iflags); 5691 } 5692 5693 /** 5694 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 5695 * @phba: pointer to lpfc hba data structure. 5696 * 5697 * This routine is to free all the pending completion-queue events to the 5698 * back into the free pool for device reset. 5699 **/ 5700 static void 5701 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 5702 { 5703 LIST_HEAD(cqelist); 5704 struct lpfc_cq_event *cqe; 5705 unsigned long iflags; 5706 5707 /* Retrieve all the pending WCQEs from pending WCQE lists */ 5708 spin_lock_irqsave(&phba->hbalock, iflags); 5709 /* Pending FCP XRI abort events */ 5710 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 5711 &cqelist); 5712 /* Pending ELS XRI abort events */ 5713 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 5714 &cqelist); 5715 /* Pending asynnc events */ 5716 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 5717 &cqelist); 5718 spin_unlock_irqrestore(&phba->hbalock, iflags); 5719 5720 while (!list_empty(&cqelist)) { 5721 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 5722 lpfc_sli4_cq_event_release(phba, cqe); 5723 } 5724 } 5725 5726 /** 5727 * lpfc_pci_function_reset - Reset pci function. 5728 * @phba: pointer to lpfc hba data structure. 5729 * 5730 * This routine is invoked to request a PCI function reset. It will destroys 5731 * all resources assigned to the PCI function which originates this request. 5732 * 5733 * Return codes 5734 * 0 - sucessful 5735 * ENOMEM - No availble memory 5736 * EIO - The mailbox failed to complete successfully. 5737 **/ 5738 int 5739 lpfc_pci_function_reset(struct lpfc_hba *phba) 5740 { 5741 LPFC_MBOXQ_t *mboxq; 5742 uint32_t rc = 0; 5743 uint32_t shdr_status, shdr_add_status; 5744 union lpfc_sli4_cfg_shdr *shdr; 5745 5746 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5747 if (!mboxq) { 5748 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5749 "0494 Unable to allocate memory for issuing " 5750 "SLI_FUNCTION_RESET mailbox command\n"); 5751 return -ENOMEM; 5752 } 5753 5754 /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */ 5755 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 5756 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 5757 LPFC_SLI4_MBX_EMBED); 5758 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5759 shdr = (union lpfc_sli4_cfg_shdr *) 5760 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 5761 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5762 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 5763 if (rc != MBX_TIMEOUT) 5764 mempool_free(mboxq, phba->mbox_mem_pool); 5765 if (shdr_status || shdr_add_status || rc) { 5766 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5767 "0495 SLI_FUNCTION_RESET mailbox failed with " 5768 "status x%x add_status x%x, mbx status x%x\n", 5769 shdr_status, shdr_add_status, rc); 5770 rc = -ENXIO; 5771 } 5772 return rc; 5773 } 5774 5775 /** 5776 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands 5777 * @phba: pointer to lpfc hba data structure. 5778 * @cnt: number of nop mailbox commands to send. 5779 * 5780 * This routine is invoked to send a number @cnt of NOP mailbox command and 5781 * wait for each command to complete. 5782 * 5783 * Return: the number of NOP mailbox command completed. 5784 **/ 5785 static int 5786 lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt) 5787 { 5788 LPFC_MBOXQ_t *mboxq; 5789 int length, cmdsent; 5790 uint32_t mbox_tmo; 5791 uint32_t rc = 0; 5792 uint32_t shdr_status, shdr_add_status; 5793 union lpfc_sli4_cfg_shdr *shdr; 5794 5795 if (cnt == 0) { 5796 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5797 "2518 Requested to send 0 NOP mailbox cmd\n"); 5798 return cnt; 5799 } 5800 5801 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5802 if (!mboxq) { 5803 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5804 "2519 Unable to allocate memory for issuing " 5805 "NOP mailbox command\n"); 5806 return 0; 5807 } 5808 5809 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */ 5810 length = (sizeof(struct lpfc_mbx_nop) - 5811 sizeof(struct lpfc_sli4_cfg_mhdr)); 5812 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 5813 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED); 5814 5815 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 5816 for (cmdsent = 0; cmdsent < cnt; cmdsent++) { 5817 if (!phba->sli4_hba.intr_enable) 5818 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5819 else 5820 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 5821 if (rc == MBX_TIMEOUT) 5822 break; 5823 /* Check return status */ 5824 shdr = (union lpfc_sli4_cfg_shdr *) 5825 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 5826 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5827 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 5828 &shdr->response); 5829 if (shdr_status || shdr_add_status || rc) { 5830 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5831 "2520 NOP mailbox command failed " 5832 "status x%x add_status x%x mbx " 5833 "status x%x\n", shdr_status, 5834 shdr_add_status, rc); 5835 break; 5836 } 5837 } 5838 5839 if (rc != MBX_TIMEOUT) 5840 mempool_free(mboxq, phba->mbox_mem_pool); 5841 5842 return cmdsent; 5843 } 5844 5845 /** 5846 * lpfc_sli4_fcfi_unreg - Unregister fcfi to device 5847 * @phba: pointer to lpfc hba data structure. 5848 * @fcfi: fcf index. 5849 * 5850 * This routine is invoked to unregister a FCFI from device. 5851 **/ 5852 void 5853 lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi) 5854 { 5855 LPFC_MBOXQ_t *mbox; 5856 uint32_t mbox_tmo; 5857 int rc; 5858 unsigned long flags; 5859 5860 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5861 5862 if (!mbox) 5863 return; 5864 5865 lpfc_unreg_fcfi(mbox, fcfi); 5866 5867 if (!phba->sli4_hba.intr_enable) 5868 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5869 else { 5870 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 5871 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5872 } 5873 if (rc != MBX_TIMEOUT) 5874 mempool_free(mbox, phba->mbox_mem_pool); 5875 if (rc != MBX_SUCCESS) 5876 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5877 "2517 Unregister FCFI command failed " 5878 "status %d, mbxStatus x%x\n", rc, 5879 bf_get(lpfc_mqe_status, &mbox->u.mqe)); 5880 else { 5881 spin_lock_irqsave(&phba->hbalock, flags); 5882 /* Mark the FCFI is no longer registered */ 5883 phba->fcf.fcf_flag &= 5884 ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED); 5885 spin_unlock_irqrestore(&phba->hbalock, flags); 5886 } 5887 } 5888 5889 /** 5890 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 5891 * @phba: pointer to lpfc hba data structure. 5892 * 5893 * This routine is invoked to set up the PCI device memory space for device 5894 * with SLI-4 interface spec. 5895 * 5896 * Return codes 5897 * 0 - sucessful 5898 * other values - error 5899 **/ 5900 static int 5901 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 5902 { 5903 struct pci_dev *pdev; 5904 unsigned long bar0map_len, bar1map_len, bar2map_len; 5905 int error = -ENODEV; 5906 5907 /* Obtain PCI device reference */ 5908 if (!phba->pcidev) 5909 return error; 5910 else 5911 pdev = phba->pcidev; 5912 5913 /* Set the device DMA mask size */ 5914 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) 5915 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) 5916 return error; 5917 5918 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the 5919 * number of bytes required by each mapping. They are actually 5920 * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device. 5921 */ 5922 phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0); 5923 bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0); 5924 5925 phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1); 5926 bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1); 5927 5928 phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2); 5929 bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2); 5930 5931 /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */ 5932 phba->sli4_hba.conf_regs_memmap_p = 5933 ioremap(phba->pci_bar0_map, bar0map_len); 5934 if (!phba->sli4_hba.conf_regs_memmap_p) { 5935 dev_printk(KERN_ERR, &pdev->dev, 5936 "ioremap failed for SLI4 PCI config registers.\n"); 5937 goto out; 5938 } 5939 5940 /* Map SLI4 HBA Control Register base to a kernel virtual address. */ 5941 phba->sli4_hba.ctrl_regs_memmap_p = 5942 ioremap(phba->pci_bar1_map, bar1map_len); 5943 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 5944 dev_printk(KERN_ERR, &pdev->dev, 5945 "ioremap failed for SLI4 HBA control registers.\n"); 5946 goto out_iounmap_conf; 5947 } 5948 5949 /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */ 5950 phba->sli4_hba.drbl_regs_memmap_p = 5951 ioremap(phba->pci_bar2_map, bar2map_len); 5952 if (!phba->sli4_hba.drbl_regs_memmap_p) { 5953 dev_printk(KERN_ERR, &pdev->dev, 5954 "ioremap failed for SLI4 HBA doorbell registers.\n"); 5955 goto out_iounmap_ctrl; 5956 } 5957 5958 /* Set up BAR0 PCI config space register memory map */ 5959 lpfc_sli4_bar0_register_memmap(phba); 5960 5961 /* Set up BAR1 register memory map */ 5962 lpfc_sli4_bar1_register_memmap(phba); 5963 5964 /* Set up BAR2 register memory map */ 5965 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 5966 if (error) 5967 goto out_iounmap_all; 5968 5969 return 0; 5970 5971 out_iounmap_all: 5972 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 5973 out_iounmap_ctrl: 5974 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 5975 out_iounmap_conf: 5976 iounmap(phba->sli4_hba.conf_regs_memmap_p); 5977 out: 5978 return error; 5979 } 5980 5981 /** 5982 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 5983 * @phba: pointer to lpfc hba data structure. 5984 * 5985 * This routine is invoked to unset the PCI device memory space for device 5986 * with SLI-4 interface spec. 5987 **/ 5988 static void 5989 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 5990 { 5991 struct pci_dev *pdev; 5992 5993 /* Obtain PCI device reference */ 5994 if (!phba->pcidev) 5995 return; 5996 else 5997 pdev = phba->pcidev; 5998 5999 /* Free coherent DMA memory allocated */ 6000 6001 /* Unmap I/O memory space */ 6002 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 6003 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 6004 iounmap(phba->sli4_hba.conf_regs_memmap_p); 6005 6006 return; 6007 } 6008 6009 /** 6010 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 6011 * @phba: pointer to lpfc hba data structure. 6012 * 6013 * This routine is invoked to enable the MSI-X interrupt vectors to device 6014 * with SLI-3 interface specs. The kernel function pci_enable_msix() is 6015 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once 6016 * invoked, enables either all or nothing, depending on the current 6017 * availability of PCI vector resources. The device driver is responsible 6018 * for calling the individual request_irq() to register each MSI-X vector 6019 * with a interrupt handler, which is done in this function. Note that 6020 * later when device is unloading, the driver should always call free_irq() 6021 * on all MSI-X vectors it has done request_irq() on before calling 6022 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 6023 * will be left with MSI-X enabled and leaks its vectors. 6024 * 6025 * Return codes 6026 * 0 - sucessful 6027 * other values - error 6028 **/ 6029 static int 6030 lpfc_sli_enable_msix(struct lpfc_hba *phba) 6031 { 6032 int rc, i; 6033 LPFC_MBOXQ_t *pmb; 6034 6035 /* Set up MSI-X multi-message vectors */ 6036 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6037 phba->msix_entries[i].entry = i; 6038 6039 /* Configure MSI-X capability structure */ 6040 rc = pci_enable_msix(phba->pcidev, phba->msix_entries, 6041 ARRAY_SIZE(phba->msix_entries)); 6042 if (rc) { 6043 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6044 "0420 PCI enable MSI-X failed (%d)\n", rc); 6045 goto msi_fail_out; 6046 } 6047 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6048 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6049 "0477 MSI-X entry[%d]: vector=x%x " 6050 "message=%d\n", i, 6051 phba->msix_entries[i].vector, 6052 phba->msix_entries[i].entry); 6053 /* 6054 * Assign MSI-X vectors to interrupt handlers 6055 */ 6056 6057 /* vector-0 is associated to slow-path handler */ 6058 rc = request_irq(phba->msix_entries[0].vector, 6059 &lpfc_sli_sp_intr_handler, IRQF_SHARED, 6060 LPFC_SP_DRIVER_HANDLER_NAME, phba); 6061 if (rc) { 6062 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6063 "0421 MSI-X slow-path request_irq failed " 6064 "(%d)\n", rc); 6065 goto msi_fail_out; 6066 } 6067 6068 /* vector-1 is associated to fast-path handler */ 6069 rc = request_irq(phba->msix_entries[1].vector, 6070 &lpfc_sli_fp_intr_handler, IRQF_SHARED, 6071 LPFC_FP_DRIVER_HANDLER_NAME, phba); 6072 6073 if (rc) { 6074 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6075 "0429 MSI-X fast-path request_irq failed " 6076 "(%d)\n", rc); 6077 goto irq_fail_out; 6078 } 6079 6080 /* 6081 * Configure HBA MSI-X attention conditions to messages 6082 */ 6083 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6084 6085 if (!pmb) { 6086 rc = -ENOMEM; 6087 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6088 "0474 Unable to allocate memory for issuing " 6089 "MBOX_CONFIG_MSI command\n"); 6090 goto mem_fail_out; 6091 } 6092 rc = lpfc_config_msi(phba, pmb); 6093 if (rc) 6094 goto mbx_fail_out; 6095 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 6096 if (rc != MBX_SUCCESS) { 6097 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 6098 "0351 Config MSI mailbox command failed, " 6099 "mbxCmd x%x, mbxStatus x%x\n", 6100 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 6101 goto mbx_fail_out; 6102 } 6103 6104 /* Free memory allocated for mailbox command */ 6105 mempool_free(pmb, phba->mbox_mem_pool); 6106 return rc; 6107 6108 mbx_fail_out: 6109 /* Free memory allocated for mailbox command */ 6110 mempool_free(pmb, phba->mbox_mem_pool); 6111 6112 mem_fail_out: 6113 /* free the irq already requested */ 6114 free_irq(phba->msix_entries[1].vector, phba); 6115 6116 irq_fail_out: 6117 /* free the irq already requested */ 6118 free_irq(phba->msix_entries[0].vector, phba); 6119 6120 msi_fail_out: 6121 /* Unconfigure MSI-X capability structure */ 6122 pci_disable_msix(phba->pcidev); 6123 return rc; 6124 } 6125 6126 /** 6127 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. 6128 * @phba: pointer to lpfc hba data structure. 6129 * 6130 * This routine is invoked to release the MSI-X vectors and then disable the 6131 * MSI-X interrupt mode to device with SLI-3 interface spec. 6132 **/ 6133 static void 6134 lpfc_sli_disable_msix(struct lpfc_hba *phba) 6135 { 6136 int i; 6137 6138 /* Free up MSI-X multi-message vectors */ 6139 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6140 free_irq(phba->msix_entries[i].vector, phba); 6141 /* Disable MSI-X */ 6142 pci_disable_msix(phba->pcidev); 6143 6144 return; 6145 } 6146 6147 /** 6148 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 6149 * @phba: pointer to lpfc hba data structure. 6150 * 6151 * This routine is invoked to enable the MSI interrupt mode to device with 6152 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 6153 * enable the MSI vector. The device driver is responsible for calling the 6154 * request_irq() to register MSI vector with a interrupt the handler, which 6155 * is done in this function. 6156 * 6157 * Return codes 6158 * 0 - sucessful 6159 * other values - error 6160 */ 6161 static int 6162 lpfc_sli_enable_msi(struct lpfc_hba *phba) 6163 { 6164 int rc; 6165 6166 rc = pci_enable_msi(phba->pcidev); 6167 if (!rc) 6168 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6169 "0462 PCI enable MSI mode success.\n"); 6170 else { 6171 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6172 "0471 PCI enable MSI mode failed (%d)\n", rc); 6173 return rc; 6174 } 6175 6176 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 6177 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6178 if (rc) { 6179 pci_disable_msi(phba->pcidev); 6180 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6181 "0478 MSI request_irq failed (%d)\n", rc); 6182 } 6183 return rc; 6184 } 6185 6186 /** 6187 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. 6188 * @phba: pointer to lpfc hba data structure. 6189 * 6190 * This routine is invoked to disable the MSI interrupt mode to device with 6191 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has 6192 * done request_irq() on before calling pci_disable_msi(). Failure to do so 6193 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 6194 * its vector. 6195 */ 6196 static void 6197 lpfc_sli_disable_msi(struct lpfc_hba *phba) 6198 { 6199 free_irq(phba->pcidev->irq, phba); 6200 pci_disable_msi(phba->pcidev); 6201 return; 6202 } 6203 6204 /** 6205 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 6206 * @phba: pointer to lpfc hba data structure. 6207 * 6208 * This routine is invoked to enable device interrupt and associate driver's 6209 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 6210 * spec. Depends on the interrupt mode configured to the driver, the driver 6211 * will try to fallback from the configured interrupt mode to an interrupt 6212 * mode which is supported by the platform, kernel, and device in the order 6213 * of: 6214 * MSI-X -> MSI -> IRQ. 6215 * 6216 * Return codes 6217 * 0 - sucessful 6218 * other values - error 6219 **/ 6220 static uint32_t 6221 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 6222 { 6223 uint32_t intr_mode = LPFC_INTR_ERROR; 6224 int retval; 6225 6226 if (cfg_mode == 2) { 6227 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 6228 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 6229 if (!retval) { 6230 /* Now, try to enable MSI-X interrupt mode */ 6231 retval = lpfc_sli_enable_msix(phba); 6232 if (!retval) { 6233 /* Indicate initialization to MSI-X mode */ 6234 phba->intr_type = MSIX; 6235 intr_mode = 2; 6236 } 6237 } 6238 } 6239 6240 /* Fallback to MSI if MSI-X initialization failed */ 6241 if (cfg_mode >= 1 && phba->intr_type == NONE) { 6242 retval = lpfc_sli_enable_msi(phba); 6243 if (!retval) { 6244 /* Indicate initialization to MSI mode */ 6245 phba->intr_type = MSI; 6246 intr_mode = 1; 6247 } 6248 } 6249 6250 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 6251 if (phba->intr_type == NONE) { 6252 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 6253 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6254 if (!retval) { 6255 /* Indicate initialization to INTx mode */ 6256 phba->intr_type = INTx; 6257 intr_mode = 0; 6258 } 6259 } 6260 return intr_mode; 6261 } 6262 6263 /** 6264 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 6265 * @phba: pointer to lpfc hba data structure. 6266 * 6267 * This routine is invoked to disable device interrupt and disassociate the 6268 * driver's interrupt handler(s) from interrupt vector(s) to device with 6269 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 6270 * release the interrupt vector(s) for the message signaled interrupt. 6271 **/ 6272 static void 6273 lpfc_sli_disable_intr(struct lpfc_hba *phba) 6274 { 6275 /* Disable the currently initialized interrupt mode */ 6276 if (phba->intr_type == MSIX) 6277 lpfc_sli_disable_msix(phba); 6278 else if (phba->intr_type == MSI) 6279 lpfc_sli_disable_msi(phba); 6280 else if (phba->intr_type == INTx) 6281 free_irq(phba->pcidev->irq, phba); 6282 6283 /* Reset interrupt management states */ 6284 phba->intr_type = NONE; 6285 phba->sli.slistat.sli_intr = 0; 6286 6287 return; 6288 } 6289 6290 /** 6291 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 6292 * @phba: pointer to lpfc hba data structure. 6293 * 6294 * This routine is invoked to enable the MSI-X interrupt vectors to device 6295 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called 6296 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked, 6297 * enables either all or nothing, depending on the current availability of 6298 * PCI vector resources. The device driver is responsible for calling the 6299 * individual request_irq() to register each MSI-X vector with a interrupt 6300 * handler, which is done in this function. Note that later when device is 6301 * unloading, the driver should always call free_irq() on all MSI-X vectors 6302 * it has done request_irq() on before calling pci_disable_msix(). Failure 6303 * to do so results in a BUG_ON() and a device will be left with MSI-X 6304 * enabled and leaks its vectors. 6305 * 6306 * Return codes 6307 * 0 - sucessful 6308 * other values - error 6309 **/ 6310 static int 6311 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 6312 { 6313 int rc, index; 6314 6315 /* Set up MSI-X multi-message vectors */ 6316 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 6317 phba->sli4_hba.msix_entries[index].entry = index; 6318 6319 /* Configure MSI-X capability structure */ 6320 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, 6321 phba->sli4_hba.cfg_eqn); 6322 if (rc) { 6323 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6324 "0484 PCI enable MSI-X failed (%d)\n", rc); 6325 goto msi_fail_out; 6326 } 6327 /* Log MSI-X vector assignment */ 6328 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 6329 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6330 "0489 MSI-X entry[%d]: vector=x%x " 6331 "message=%d\n", index, 6332 phba->sli4_hba.msix_entries[index].vector, 6333 phba->sli4_hba.msix_entries[index].entry); 6334 /* 6335 * Assign MSI-X vectors to interrupt handlers 6336 */ 6337 6338 /* The first vector must associated to slow-path handler for MQ */ 6339 rc = request_irq(phba->sli4_hba.msix_entries[0].vector, 6340 &lpfc_sli4_sp_intr_handler, IRQF_SHARED, 6341 LPFC_SP_DRIVER_HANDLER_NAME, phba); 6342 if (rc) { 6343 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6344 "0485 MSI-X slow-path request_irq failed " 6345 "(%d)\n", rc); 6346 goto msi_fail_out; 6347 } 6348 6349 /* The rest of the vector(s) are associated to fast-path handler(s) */ 6350 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) { 6351 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; 6352 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba; 6353 rc = request_irq(phba->sli4_hba.msix_entries[index].vector, 6354 &lpfc_sli4_fp_intr_handler, IRQF_SHARED, 6355 LPFC_FP_DRIVER_HANDLER_NAME, 6356 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 6357 if (rc) { 6358 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6359 "0486 MSI-X fast-path (%d) " 6360 "request_irq failed (%d)\n", index, rc); 6361 goto cfg_fail_out; 6362 } 6363 } 6364 6365 return rc; 6366 6367 cfg_fail_out: 6368 /* free the irq already requested */ 6369 for (--index; index >= 1; index--) 6370 free_irq(phba->sli4_hba.msix_entries[index - 1].vector, 6371 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 6372 6373 /* free the irq already requested */ 6374 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 6375 6376 msi_fail_out: 6377 /* Unconfigure MSI-X capability structure */ 6378 pci_disable_msix(phba->pcidev); 6379 return rc; 6380 } 6381 6382 /** 6383 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device 6384 * @phba: pointer to lpfc hba data structure. 6385 * 6386 * This routine is invoked to release the MSI-X vectors and then disable the 6387 * MSI-X interrupt mode to device with SLI-4 interface spec. 6388 **/ 6389 static void 6390 lpfc_sli4_disable_msix(struct lpfc_hba *phba) 6391 { 6392 int index; 6393 6394 /* Free up MSI-X multi-message vectors */ 6395 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 6396 6397 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) 6398 free_irq(phba->sli4_hba.msix_entries[index].vector, 6399 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 6400 /* Disable MSI-X */ 6401 pci_disable_msix(phba->pcidev); 6402 6403 return; 6404 } 6405 6406 /** 6407 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 6408 * @phba: pointer to lpfc hba data structure. 6409 * 6410 * This routine is invoked to enable the MSI interrupt mode to device with 6411 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 6412 * to enable the MSI vector. The device driver is responsible for calling 6413 * the request_irq() to register MSI vector with a interrupt the handler, 6414 * which is done in this function. 6415 * 6416 * Return codes 6417 * 0 - sucessful 6418 * other values - error 6419 **/ 6420 static int 6421 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 6422 { 6423 int rc, index; 6424 6425 rc = pci_enable_msi(phba->pcidev); 6426 if (!rc) 6427 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6428 "0487 PCI enable MSI mode success.\n"); 6429 else { 6430 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6431 "0488 PCI enable MSI mode failed (%d)\n", rc); 6432 return rc; 6433 } 6434 6435 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 6436 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6437 if (rc) { 6438 pci_disable_msi(phba->pcidev); 6439 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6440 "0490 MSI request_irq failed (%d)\n", rc); 6441 } 6442 6443 for (index = 0; index < phba->cfg_fcp_eq_count; index++) { 6444 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 6445 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 6446 } 6447 6448 return rc; 6449 } 6450 6451 /** 6452 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device 6453 * @phba: pointer to lpfc hba data structure. 6454 * 6455 * This routine is invoked to disable the MSI interrupt mode to device with 6456 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has 6457 * done request_irq() on before calling pci_disable_msi(). Failure to do so 6458 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 6459 * its vector. 6460 **/ 6461 static void 6462 lpfc_sli4_disable_msi(struct lpfc_hba *phba) 6463 { 6464 free_irq(phba->pcidev->irq, phba); 6465 pci_disable_msi(phba->pcidev); 6466 return; 6467 } 6468 6469 /** 6470 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 6471 * @phba: pointer to lpfc hba data structure. 6472 * 6473 * This routine is invoked to enable device interrupt and associate driver's 6474 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 6475 * interface spec. Depends on the interrupt mode configured to the driver, 6476 * the driver will try to fallback from the configured interrupt mode to an 6477 * interrupt mode which is supported by the platform, kernel, and device in 6478 * the order of: 6479 * MSI-X -> MSI -> IRQ. 6480 * 6481 * Return codes 6482 * 0 - sucessful 6483 * other values - error 6484 **/ 6485 static uint32_t 6486 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 6487 { 6488 uint32_t intr_mode = LPFC_INTR_ERROR; 6489 int retval, index; 6490 6491 if (cfg_mode == 2) { 6492 /* Preparation before conf_msi mbox cmd */ 6493 retval = 0; 6494 if (!retval) { 6495 /* Now, try to enable MSI-X interrupt mode */ 6496 retval = lpfc_sli4_enable_msix(phba); 6497 if (!retval) { 6498 /* Indicate initialization to MSI-X mode */ 6499 phba->intr_type = MSIX; 6500 intr_mode = 2; 6501 } 6502 } 6503 } 6504 6505 /* Fallback to MSI if MSI-X initialization failed */ 6506 if (cfg_mode >= 1 && phba->intr_type == NONE) { 6507 retval = lpfc_sli4_enable_msi(phba); 6508 if (!retval) { 6509 /* Indicate initialization to MSI mode */ 6510 phba->intr_type = MSI; 6511 intr_mode = 1; 6512 } 6513 } 6514 6515 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 6516 if (phba->intr_type == NONE) { 6517 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 6518 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6519 if (!retval) { 6520 /* Indicate initialization to INTx mode */ 6521 phba->intr_type = INTx; 6522 intr_mode = 0; 6523 for (index = 0; index < phba->cfg_fcp_eq_count; 6524 index++) { 6525 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 6526 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 6527 } 6528 } 6529 } 6530 return intr_mode; 6531 } 6532 6533 /** 6534 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 6535 * @phba: pointer to lpfc hba data structure. 6536 * 6537 * This routine is invoked to disable device interrupt and disassociate 6538 * the driver's interrupt handler(s) from interrupt vector(s) to device 6539 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 6540 * will release the interrupt vector(s) for the message signaled interrupt. 6541 **/ 6542 static void 6543 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 6544 { 6545 /* Disable the currently initialized interrupt mode */ 6546 if (phba->intr_type == MSIX) 6547 lpfc_sli4_disable_msix(phba); 6548 else if (phba->intr_type == MSI) 6549 lpfc_sli4_disable_msi(phba); 6550 else if (phba->intr_type == INTx) 6551 free_irq(phba->pcidev->irq, phba); 6552 6553 /* Reset interrupt management states */ 6554 phba->intr_type = NONE; 6555 phba->sli.slistat.sli_intr = 0; 6556 6557 return; 6558 } 6559 6560 /** 6561 * lpfc_unset_hba - Unset SLI3 hba device initialization 6562 * @phba: pointer to lpfc hba data structure. 6563 * 6564 * This routine is invoked to unset the HBA device initialization steps to 6565 * a device with SLI-3 interface spec. 6566 **/ 6567 static void 6568 lpfc_unset_hba(struct lpfc_hba *phba) 6569 { 6570 struct lpfc_vport *vport = phba->pport; 6571 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6572 6573 spin_lock_irq(shost->host_lock); 6574 vport->load_flag |= FC_UNLOADING; 6575 spin_unlock_irq(shost->host_lock); 6576 6577 lpfc_stop_hba_timers(phba); 6578 6579 phba->pport->work_port_events = 0; 6580 6581 lpfc_sli_hba_down(phba); 6582 6583 lpfc_sli_brdrestart(phba); 6584 6585 lpfc_sli_disable_intr(phba); 6586 6587 return; 6588 } 6589 6590 /** 6591 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization. 6592 * @phba: pointer to lpfc hba data structure. 6593 * 6594 * This routine is invoked to unset the HBA device initialization steps to 6595 * a device with SLI-4 interface spec. 6596 **/ 6597 static void 6598 lpfc_sli4_unset_hba(struct lpfc_hba *phba) 6599 { 6600 struct lpfc_vport *vport = phba->pport; 6601 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6602 6603 spin_lock_irq(shost->host_lock); 6604 vport->load_flag |= FC_UNLOADING; 6605 spin_unlock_irq(shost->host_lock); 6606 6607 phba->pport->work_port_events = 0; 6608 6609 lpfc_sli4_hba_down(phba); 6610 6611 lpfc_sli4_disable_intr(phba); 6612 6613 return; 6614 } 6615 6616 /** 6617 * lpfc_sli4_hba_unset - Unset the fcoe hba 6618 * @phba: Pointer to HBA context object. 6619 * 6620 * This function is called in the SLI4 code path to reset the HBA's FCoE 6621 * function. The caller is not required to hold any lock. This routine 6622 * issues PCI function reset mailbox command to reset the FCoE function. 6623 * At the end of the function, it calls lpfc_hba_down_post function to 6624 * free any pending commands. 6625 **/ 6626 static void 6627 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 6628 { 6629 int wait_cnt = 0; 6630 LPFC_MBOXQ_t *mboxq; 6631 6632 lpfc_stop_hba_timers(phba); 6633 phba->sli4_hba.intr_enable = 0; 6634 6635 /* 6636 * Gracefully wait out the potential current outstanding asynchronous 6637 * mailbox command. 6638 */ 6639 6640 /* First, block any pending async mailbox command from posted */ 6641 spin_lock_irq(&phba->hbalock); 6642 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 6643 spin_unlock_irq(&phba->hbalock); 6644 /* Now, trying to wait it out if we can */ 6645 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 6646 msleep(10); 6647 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 6648 break; 6649 } 6650 /* Forcefully release the outstanding mailbox command if timed out */ 6651 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 6652 spin_lock_irq(&phba->hbalock); 6653 mboxq = phba->sli.mbox_active; 6654 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 6655 __lpfc_mbox_cmpl_put(phba, mboxq); 6656 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6657 phba->sli.mbox_active = NULL; 6658 spin_unlock_irq(&phba->hbalock); 6659 } 6660 6661 /* Tear down the queues in the HBA */ 6662 lpfc_sli4_queue_unset(phba); 6663 6664 /* Disable PCI subsystem interrupt */ 6665 lpfc_sli4_disable_intr(phba); 6666 6667 /* Stop kthread signal shall trigger work_done one more time */ 6668 kthread_stop(phba->worker_thread); 6669 6670 /* Stop the SLI4 device port */ 6671 phba->pport->work_port_events = 0; 6672 } 6673 6674 /** 6675 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 6676 * @pdev: pointer to PCI device 6677 * @pid: pointer to PCI device identifier 6678 * 6679 * This routine is to be called to attach a device with SLI-3 interface spec 6680 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 6681 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 6682 * information of the device and driver to see if the driver state that it can 6683 * support this kind of device. If the match is successful, the driver core 6684 * invokes this routine. If this routine determines it can claim the HBA, it 6685 * does all the initialization that it needs to do to handle the HBA properly. 6686 * 6687 * Return code 6688 * 0 - driver can claim the device 6689 * negative value - driver can not claim the device 6690 **/ 6691 static int __devinit 6692 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 6693 { 6694 struct lpfc_hba *phba; 6695 struct lpfc_vport *vport = NULL; 6696 int error; 6697 uint32_t cfg_mode, intr_mode; 6698 6699 /* Allocate memory for HBA structure */ 6700 phba = lpfc_hba_alloc(pdev); 6701 if (!phba) 6702 return -ENOMEM; 6703 6704 /* Perform generic PCI device enabling operation */ 6705 error = lpfc_enable_pci_dev(phba); 6706 if (error) { 6707 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6708 "1401 Failed to enable pci device.\n"); 6709 goto out_free_phba; 6710 } 6711 6712 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 6713 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 6714 if (error) 6715 goto out_disable_pci_dev; 6716 6717 /* Set up SLI-3 specific device PCI memory space */ 6718 error = lpfc_sli_pci_mem_setup(phba); 6719 if (error) { 6720 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6721 "1402 Failed to set up pci memory space.\n"); 6722 goto out_disable_pci_dev; 6723 } 6724 6725 /* Set up phase-1 common device driver resources */ 6726 error = lpfc_setup_driver_resource_phase1(phba); 6727 if (error) { 6728 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6729 "1403 Failed to set up driver resource.\n"); 6730 goto out_unset_pci_mem_s3; 6731 } 6732 6733 /* Set up SLI-3 specific device driver resources */ 6734 error = lpfc_sli_driver_resource_setup(phba); 6735 if (error) { 6736 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6737 "1404 Failed to set up driver resource.\n"); 6738 goto out_unset_pci_mem_s3; 6739 } 6740 6741 /* Initialize and populate the iocb list per host */ 6742 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 6743 if (error) { 6744 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6745 "1405 Failed to initialize iocb list.\n"); 6746 goto out_unset_driver_resource_s3; 6747 } 6748 6749 /* Set up common device driver resources */ 6750 error = lpfc_setup_driver_resource_phase2(phba); 6751 if (error) { 6752 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6753 "1406 Failed to set up driver resource.\n"); 6754 goto out_free_iocb_list; 6755 } 6756 6757 /* Create SCSI host to the physical port */ 6758 error = lpfc_create_shost(phba); 6759 if (error) { 6760 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6761 "1407 Failed to create scsi host.\n"); 6762 goto out_unset_driver_resource; 6763 } 6764 6765 /* Configure sysfs attributes */ 6766 vport = phba->pport; 6767 error = lpfc_alloc_sysfs_attr(vport); 6768 if (error) { 6769 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6770 "1476 Failed to allocate sysfs attr\n"); 6771 goto out_destroy_shost; 6772 } 6773 6774 /* Now, trying to enable interrupt and bring up the device */ 6775 cfg_mode = phba->cfg_use_msi; 6776 while (true) { 6777 /* Put device to a known state before enabling interrupt */ 6778 lpfc_stop_port(phba); 6779 /* Configure and enable interrupt */ 6780 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 6781 if (intr_mode == LPFC_INTR_ERROR) { 6782 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6783 "0431 Failed to enable interrupt.\n"); 6784 error = -ENODEV; 6785 goto out_free_sysfs_attr; 6786 } 6787 /* SLI-3 HBA setup */ 6788 if (lpfc_sli_hba_setup(phba)) { 6789 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6790 "1477 Failed to set up hba\n"); 6791 error = -ENODEV; 6792 goto out_remove_device; 6793 } 6794 6795 /* Wait 50ms for the interrupts of previous mailbox commands */ 6796 msleep(50); 6797 /* Check active interrupts on message signaled interrupts */ 6798 if (intr_mode == 0 || 6799 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 6800 /* Log the current active interrupt mode */ 6801 phba->intr_mode = intr_mode; 6802 lpfc_log_intr_mode(phba, intr_mode); 6803 break; 6804 } else { 6805 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6806 "0447 Configure interrupt mode (%d) " 6807 "failed active interrupt test.\n", 6808 intr_mode); 6809 /* Disable the current interrupt mode */ 6810 lpfc_sli_disable_intr(phba); 6811 /* Try next level of interrupt mode */ 6812 cfg_mode = --intr_mode; 6813 } 6814 } 6815 6816 /* Perform post initialization setup */ 6817 lpfc_post_init_setup(phba); 6818 6819 /* Check if there are static vports to be created. */ 6820 lpfc_create_static_vport(phba); 6821 6822 return 0; 6823 6824 out_remove_device: 6825 lpfc_unset_hba(phba); 6826 out_free_sysfs_attr: 6827 lpfc_free_sysfs_attr(vport); 6828 out_destroy_shost: 6829 lpfc_destroy_shost(phba); 6830 out_unset_driver_resource: 6831 lpfc_unset_driver_resource_phase2(phba); 6832 out_free_iocb_list: 6833 lpfc_free_iocb_list(phba); 6834 out_unset_driver_resource_s3: 6835 lpfc_sli_driver_resource_unset(phba); 6836 out_unset_pci_mem_s3: 6837 lpfc_sli_pci_mem_unset(phba); 6838 out_disable_pci_dev: 6839 lpfc_disable_pci_dev(phba); 6840 out_free_phba: 6841 lpfc_hba_free(phba); 6842 return error; 6843 } 6844 6845 /** 6846 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 6847 * @pdev: pointer to PCI device 6848 * 6849 * This routine is to be called to disattach a device with SLI-3 interface 6850 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 6851 * removed from PCI bus, it performs all the necessary cleanup for the HBA 6852 * device to be removed from the PCI subsystem properly. 6853 **/ 6854 static void __devexit 6855 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 6856 { 6857 struct Scsi_Host *shost = pci_get_drvdata(pdev); 6858 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6859 struct lpfc_vport **vports; 6860 struct lpfc_hba *phba = vport->phba; 6861 int i; 6862 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 6863 6864 spin_lock_irq(&phba->hbalock); 6865 vport->load_flag |= FC_UNLOADING; 6866 spin_unlock_irq(&phba->hbalock); 6867 6868 lpfc_free_sysfs_attr(vport); 6869 6870 /* Release all the vports against this physical port */ 6871 vports = lpfc_create_vport_work_array(phba); 6872 if (vports != NULL) 6873 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 6874 fc_vport_terminate(vports[i]->fc_vport); 6875 lpfc_destroy_vport_work_array(phba, vports); 6876 6877 /* Remove FC host and then SCSI host with the physical port */ 6878 fc_remove_host(shost); 6879 scsi_remove_host(shost); 6880 lpfc_cleanup(vport); 6881 6882 /* 6883 * Bring down the SLI Layer. This step disable all interrupts, 6884 * clears the rings, discards all mailbox commands, and resets 6885 * the HBA. 6886 */ 6887 6888 /* HBA interrupt will be diabled after this call */ 6889 lpfc_sli_hba_down(phba); 6890 /* Stop kthread signal shall trigger work_done one more time */ 6891 kthread_stop(phba->worker_thread); 6892 /* Final cleanup of txcmplq and reset the HBA */ 6893 lpfc_sli_brdrestart(phba); 6894 6895 lpfc_stop_hba_timers(phba); 6896 spin_lock_irq(&phba->hbalock); 6897 list_del_init(&vport->listentry); 6898 spin_unlock_irq(&phba->hbalock); 6899 6900 lpfc_debugfs_terminate(vport); 6901 6902 /* Disable interrupt */ 6903 lpfc_sli_disable_intr(phba); 6904 6905 pci_set_drvdata(pdev, NULL); 6906 scsi_host_put(shost); 6907 6908 /* 6909 * Call scsi_free before mem_free since scsi bufs are released to their 6910 * corresponding pools here. 6911 */ 6912 lpfc_scsi_free(phba); 6913 lpfc_mem_free_all(phba); 6914 6915 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 6916 phba->hbqslimp.virt, phba->hbqslimp.phys); 6917 6918 /* Free resources associated with SLI2 interface */ 6919 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 6920 phba->slim2p.virt, phba->slim2p.phys); 6921 6922 /* unmap adapter SLIM and Control Registers */ 6923 iounmap(phba->ctrl_regs_memmap_p); 6924 iounmap(phba->slim_memmap_p); 6925 6926 lpfc_hba_free(phba); 6927 6928 pci_release_selected_regions(pdev, bars); 6929 pci_disable_device(pdev); 6930 } 6931 6932 /** 6933 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 6934 * @pdev: pointer to PCI device 6935 * @msg: power management message 6936 * 6937 * This routine is to be called from the kernel's PCI subsystem to support 6938 * system Power Management (PM) to device with SLI-3 interface spec. When 6939 * PM invokes this method, it quiesces the device by stopping the driver's 6940 * worker thread for the device, turning off device's interrupt and DMA, 6941 * and bring the device offline. Note that as the driver implements the 6942 * minimum PM requirements to a power-aware driver's PM support for the 6943 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 6944 * to the suspend() method call will be treated as SUSPEND and the driver will 6945 * fully reinitialize its device during resume() method call, the driver will 6946 * set device to PCI_D3hot state in PCI config space instead of setting it 6947 * according to the @msg provided by the PM. 6948 * 6949 * Return code 6950 * 0 - driver suspended the device 6951 * Error otherwise 6952 **/ 6953 static int 6954 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 6955 { 6956 struct Scsi_Host *shost = pci_get_drvdata(pdev); 6957 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 6958 6959 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6960 "0473 PCI device Power Management suspend.\n"); 6961 6962 /* Bring down the device */ 6963 lpfc_offline_prep(phba); 6964 lpfc_offline(phba); 6965 kthread_stop(phba->worker_thread); 6966 6967 /* Disable interrupt from device */ 6968 lpfc_sli_disable_intr(phba); 6969 6970 /* Save device state to PCI config space */ 6971 pci_save_state(pdev); 6972 pci_set_power_state(pdev, PCI_D3hot); 6973 6974 return 0; 6975 } 6976 6977 /** 6978 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 6979 * @pdev: pointer to PCI device 6980 * 6981 * This routine is to be called from the kernel's PCI subsystem to support 6982 * system Power Management (PM) to device with SLI-3 interface spec. When PM 6983 * invokes this method, it restores the device's PCI config space state and 6984 * fully reinitializes the device and brings it online. Note that as the 6985 * driver implements the minimum PM requirements to a power-aware driver's 6986 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 6987 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 6988 * driver will fully reinitialize its device during resume() method call, 6989 * the device will be set to PCI_D0 directly in PCI config space before 6990 * restoring the state. 6991 * 6992 * Return code 6993 * 0 - driver suspended the device 6994 * Error otherwise 6995 **/ 6996 static int 6997 lpfc_pci_resume_one_s3(struct pci_dev *pdev) 6998 { 6999 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7000 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7001 uint32_t intr_mode; 7002 int error; 7003 7004 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7005 "0452 PCI device Power Management resume.\n"); 7006 7007 /* Restore device state from PCI config space */ 7008 pci_set_power_state(pdev, PCI_D0); 7009 pci_restore_state(pdev); 7010 if (pdev->is_busmaster) 7011 pci_set_master(pdev); 7012 7013 /* Startup the kernel thread for this host adapter. */ 7014 phba->worker_thread = kthread_run(lpfc_do_work, phba, 7015 "lpfc_worker_%d", phba->brd_no); 7016 if (IS_ERR(phba->worker_thread)) { 7017 error = PTR_ERR(phba->worker_thread); 7018 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7019 "0434 PM resume failed to start worker " 7020 "thread: error=x%x.\n", error); 7021 return error; 7022 } 7023 7024 /* Configure and enable interrupt */ 7025 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 7026 if (intr_mode == LPFC_INTR_ERROR) { 7027 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7028 "0430 PM resume Failed to enable interrupt\n"); 7029 return -EIO; 7030 } else 7031 phba->intr_mode = intr_mode; 7032 7033 /* Restart HBA and bring it online */ 7034 lpfc_sli_brdrestart(phba); 7035 lpfc_online(phba); 7036 7037 /* Log the current active interrupt mode */ 7038 lpfc_log_intr_mode(phba, phba->intr_mode); 7039 7040 return 0; 7041 } 7042 7043 /** 7044 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 7045 * @pdev: pointer to PCI device. 7046 * @state: the current PCI connection state. 7047 * 7048 * This routine is called from the PCI subsystem for I/O error handling to 7049 * device with SLI-3 interface spec. This function is called by the PCI 7050 * subsystem after a PCI bus error affecting this device has been detected. 7051 * When this function is invoked, it will need to stop all the I/Os and 7052 * interrupt(s) to the device. Once that is done, it will return 7053 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 7054 * as desired. 7055 * 7056 * Return codes 7057 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 7058 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7059 **/ 7060 static pci_ers_result_t 7061 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 7062 { 7063 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7064 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7065 struct lpfc_sli *psli = &phba->sli; 7066 struct lpfc_sli_ring *pring; 7067 7068 if (state == pci_channel_io_perm_failure) { 7069 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7070 "0472 PCI channel I/O permanent failure\n"); 7071 /* Block all SCSI devices' I/Os on the host */ 7072 lpfc_scsi_dev_block(phba); 7073 /* Clean up all driver's outstanding SCSI I/Os */ 7074 lpfc_sli_flush_fcp_rings(phba); 7075 return PCI_ERS_RESULT_DISCONNECT; 7076 } 7077 7078 pci_disable_device(pdev); 7079 /* 7080 * There may be I/Os dropped by the firmware. 7081 * Error iocb (I/O) on txcmplq and let the SCSI layer 7082 * retry it after re-establishing link. 7083 */ 7084 pring = &psli->ring[psli->fcp_ring]; 7085 lpfc_sli_abort_iocb_ring(phba, pring); 7086 7087 /* Disable interrupt */ 7088 lpfc_sli_disable_intr(phba); 7089 7090 /* Request a slot reset. */ 7091 return PCI_ERS_RESULT_NEED_RESET; 7092 } 7093 7094 /** 7095 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 7096 * @pdev: pointer to PCI device. 7097 * 7098 * This routine is called from the PCI subsystem for error handling to 7099 * device with SLI-3 interface spec. This is called after PCI bus has been 7100 * reset to restart the PCI card from scratch, as if from a cold-boot. 7101 * During the PCI subsystem error recovery, after driver returns 7102 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 7103 * recovery and then call this routine before calling the .resume method 7104 * to recover the device. This function will initialize the HBA device, 7105 * enable the interrupt, but it will just put the HBA to offline state 7106 * without passing any I/O traffic. 7107 * 7108 * Return codes 7109 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 7110 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7111 */ 7112 static pci_ers_result_t 7113 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 7114 { 7115 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7116 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7117 struct lpfc_sli *psli = &phba->sli; 7118 uint32_t intr_mode; 7119 7120 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 7121 if (pci_enable_device_mem(pdev)) { 7122 printk(KERN_ERR "lpfc: Cannot re-enable " 7123 "PCI device after reset.\n"); 7124 return PCI_ERS_RESULT_DISCONNECT; 7125 } 7126 7127 pci_restore_state(pdev); 7128 if (pdev->is_busmaster) 7129 pci_set_master(pdev); 7130 7131 spin_lock_irq(&phba->hbalock); 7132 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 7133 spin_unlock_irq(&phba->hbalock); 7134 7135 /* Configure and enable interrupt */ 7136 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 7137 if (intr_mode == LPFC_INTR_ERROR) { 7138 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7139 "0427 Cannot re-enable interrupt after " 7140 "slot reset.\n"); 7141 return PCI_ERS_RESULT_DISCONNECT; 7142 } else 7143 phba->intr_mode = intr_mode; 7144 7145 /* Take device offline; this will perform cleanup */ 7146 lpfc_offline(phba); 7147 lpfc_sli_brdrestart(phba); 7148 7149 /* Log the current active interrupt mode */ 7150 lpfc_log_intr_mode(phba, phba->intr_mode); 7151 7152 return PCI_ERS_RESULT_RECOVERED; 7153 } 7154 7155 /** 7156 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 7157 * @pdev: pointer to PCI device 7158 * 7159 * This routine is called from the PCI subsystem for error handling to device 7160 * with SLI-3 interface spec. It is called when kernel error recovery tells 7161 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 7162 * error recovery. After this call, traffic can start to flow from this device 7163 * again. 7164 */ 7165 static void 7166 lpfc_io_resume_s3(struct pci_dev *pdev) 7167 { 7168 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7169 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7170 7171 lpfc_online(phba); 7172 } 7173 7174 /** 7175 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 7176 * @phba: pointer to lpfc hba data structure. 7177 * 7178 * returns the number of ELS/CT IOCBs to reserve 7179 **/ 7180 int 7181 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 7182 { 7183 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 7184 7185 if (phba->sli_rev == LPFC_SLI_REV4) { 7186 if (max_xri <= 100) 7187 return 4; 7188 else if (max_xri <= 256) 7189 return 8; 7190 else if (max_xri <= 512) 7191 return 16; 7192 else if (max_xri <= 1024) 7193 return 32; 7194 else 7195 return 48; 7196 } else 7197 return 0; 7198 } 7199 7200 /** 7201 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 7202 * @pdev: pointer to PCI device 7203 * @pid: pointer to PCI device identifier 7204 * 7205 * This routine is called from the kernel's PCI subsystem to device with 7206 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 7207 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 7208 * information of the device and driver to see if the driver state that it 7209 * can support this kind of device. If the match is successful, the driver 7210 * core invokes this routine. If this routine determines it can claim the HBA, 7211 * it does all the initialization that it needs to do to handle the HBA 7212 * properly. 7213 * 7214 * Return code 7215 * 0 - driver can claim the device 7216 * negative value - driver can not claim the device 7217 **/ 7218 static int __devinit 7219 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 7220 { 7221 struct lpfc_hba *phba; 7222 struct lpfc_vport *vport = NULL; 7223 int error; 7224 uint32_t cfg_mode, intr_mode; 7225 int mcnt; 7226 7227 /* Allocate memory for HBA structure */ 7228 phba = lpfc_hba_alloc(pdev); 7229 if (!phba) 7230 return -ENOMEM; 7231 7232 /* Perform generic PCI device enabling operation */ 7233 error = lpfc_enable_pci_dev(phba); 7234 if (error) { 7235 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7236 "1409 Failed to enable pci device.\n"); 7237 goto out_free_phba; 7238 } 7239 7240 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 7241 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 7242 if (error) 7243 goto out_disable_pci_dev; 7244 7245 /* Set up SLI-4 specific device PCI memory space */ 7246 error = lpfc_sli4_pci_mem_setup(phba); 7247 if (error) { 7248 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7249 "1410 Failed to set up pci memory space.\n"); 7250 goto out_disable_pci_dev; 7251 } 7252 7253 /* Set up phase-1 common device driver resources */ 7254 error = lpfc_setup_driver_resource_phase1(phba); 7255 if (error) { 7256 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7257 "1411 Failed to set up driver resource.\n"); 7258 goto out_unset_pci_mem_s4; 7259 } 7260 7261 /* Set up SLI-4 Specific device driver resources */ 7262 error = lpfc_sli4_driver_resource_setup(phba); 7263 if (error) { 7264 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7265 "1412 Failed to set up driver resource.\n"); 7266 goto out_unset_pci_mem_s4; 7267 } 7268 7269 /* Initialize and populate the iocb list per host */ 7270 error = lpfc_init_iocb_list(phba, 7271 phba->sli4_hba.max_cfg_param.max_xri); 7272 if (error) { 7273 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7274 "1413 Failed to initialize iocb list.\n"); 7275 goto out_unset_driver_resource_s4; 7276 } 7277 7278 /* Set up common device driver resources */ 7279 error = lpfc_setup_driver_resource_phase2(phba); 7280 if (error) { 7281 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7282 "1414 Failed to set up driver resource.\n"); 7283 goto out_free_iocb_list; 7284 } 7285 7286 /* Create SCSI host to the physical port */ 7287 error = lpfc_create_shost(phba); 7288 if (error) { 7289 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7290 "1415 Failed to create scsi host.\n"); 7291 goto out_unset_driver_resource; 7292 } 7293 7294 /* Configure sysfs attributes */ 7295 vport = phba->pport; 7296 error = lpfc_alloc_sysfs_attr(vport); 7297 if (error) { 7298 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7299 "1416 Failed to allocate sysfs attr\n"); 7300 goto out_destroy_shost; 7301 } 7302 7303 /* Now, trying to enable interrupt and bring up the device */ 7304 cfg_mode = phba->cfg_use_msi; 7305 while (true) { 7306 /* Put device to a known state before enabling interrupt */ 7307 lpfc_stop_port(phba); 7308 /* Configure and enable interrupt */ 7309 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 7310 if (intr_mode == LPFC_INTR_ERROR) { 7311 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7312 "0426 Failed to enable interrupt.\n"); 7313 error = -ENODEV; 7314 goto out_free_sysfs_attr; 7315 } 7316 /* Set up SLI-4 HBA */ 7317 if (lpfc_sli4_hba_setup(phba)) { 7318 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7319 "1421 Failed to set up hba\n"); 7320 error = -ENODEV; 7321 goto out_disable_intr; 7322 } 7323 7324 /* Send NOP mbx cmds for non-INTx mode active interrupt test */ 7325 if (intr_mode != 0) 7326 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba, 7327 LPFC_ACT_INTR_CNT); 7328 7329 /* Check active interrupts received only for MSI/MSI-X */ 7330 if (intr_mode == 0 || 7331 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) { 7332 /* Log the current active interrupt mode */ 7333 phba->intr_mode = intr_mode; 7334 lpfc_log_intr_mode(phba, intr_mode); 7335 break; 7336 } 7337 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7338 "0451 Configure interrupt mode (%d) " 7339 "failed active interrupt test.\n", 7340 intr_mode); 7341 /* Unset the preivous SLI-4 HBA setup */ 7342 lpfc_sli4_unset_hba(phba); 7343 /* Try next level of interrupt mode */ 7344 cfg_mode = --intr_mode; 7345 } 7346 7347 /* Perform post initialization setup */ 7348 lpfc_post_init_setup(phba); 7349 7350 return 0; 7351 7352 out_disable_intr: 7353 lpfc_sli4_disable_intr(phba); 7354 out_free_sysfs_attr: 7355 lpfc_free_sysfs_attr(vport); 7356 out_destroy_shost: 7357 lpfc_destroy_shost(phba); 7358 out_unset_driver_resource: 7359 lpfc_unset_driver_resource_phase2(phba); 7360 out_free_iocb_list: 7361 lpfc_free_iocb_list(phba); 7362 out_unset_driver_resource_s4: 7363 lpfc_sli4_driver_resource_unset(phba); 7364 out_unset_pci_mem_s4: 7365 lpfc_sli4_pci_mem_unset(phba); 7366 out_disable_pci_dev: 7367 lpfc_disable_pci_dev(phba); 7368 out_free_phba: 7369 lpfc_hba_free(phba); 7370 return error; 7371 } 7372 7373 /** 7374 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 7375 * @pdev: pointer to PCI device 7376 * 7377 * This routine is called from the kernel's PCI subsystem to device with 7378 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 7379 * removed from PCI bus, it performs all the necessary cleanup for the HBA 7380 * device to be removed from the PCI subsystem properly. 7381 **/ 7382 static void __devexit 7383 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 7384 { 7385 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7386 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 7387 struct lpfc_vport **vports; 7388 struct lpfc_hba *phba = vport->phba; 7389 int i; 7390 7391 /* Mark the device unloading flag */ 7392 spin_lock_irq(&phba->hbalock); 7393 vport->load_flag |= FC_UNLOADING; 7394 spin_unlock_irq(&phba->hbalock); 7395 7396 /* Free the HBA sysfs attributes */ 7397 lpfc_free_sysfs_attr(vport); 7398 7399 /* Release all the vports against this physical port */ 7400 vports = lpfc_create_vport_work_array(phba); 7401 if (vports != NULL) 7402 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 7403 fc_vport_terminate(vports[i]->fc_vport); 7404 lpfc_destroy_vport_work_array(phba, vports); 7405 7406 /* Remove FC host and then SCSI host with the physical port */ 7407 fc_remove_host(shost); 7408 scsi_remove_host(shost); 7409 7410 /* Perform cleanup on the physical port */ 7411 lpfc_cleanup(vport); 7412 7413 /* 7414 * Bring down the SLI Layer. This step disables all interrupts, 7415 * clears the rings, discards all mailbox commands, and resets 7416 * the HBA FCoE function. 7417 */ 7418 lpfc_debugfs_terminate(vport); 7419 lpfc_sli4_hba_unset(phba); 7420 7421 spin_lock_irq(&phba->hbalock); 7422 list_del_init(&vport->listentry); 7423 spin_unlock_irq(&phba->hbalock); 7424 7425 /* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi 7426 * buffers are released to their corresponding pools here. 7427 */ 7428 lpfc_scsi_free(phba); 7429 lpfc_sli4_driver_resource_unset(phba); 7430 7431 /* Unmap adapter Control and Doorbell registers */ 7432 lpfc_sli4_pci_mem_unset(phba); 7433 7434 /* Release PCI resources and disable device's PCI function */ 7435 scsi_host_put(shost); 7436 lpfc_disable_pci_dev(phba); 7437 7438 /* Finally, free the driver's device data structure */ 7439 lpfc_hba_free(phba); 7440 7441 return; 7442 } 7443 7444 /** 7445 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 7446 * @pdev: pointer to PCI device 7447 * @msg: power management message 7448 * 7449 * This routine is called from the kernel's PCI subsystem to support system 7450 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 7451 * this method, it quiesces the device by stopping the driver's worker 7452 * thread for the device, turning off device's interrupt and DMA, and bring 7453 * the device offline. Note that as the driver implements the minimum PM 7454 * requirements to a power-aware driver's PM support for suspend/resume -- all 7455 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 7456 * method call will be treated as SUSPEND and the driver will fully 7457 * reinitialize its device during resume() method call, the driver will set 7458 * device to PCI_D3hot state in PCI config space instead of setting it 7459 * according to the @msg provided by the PM. 7460 * 7461 * Return code 7462 * 0 - driver suspended the device 7463 * Error otherwise 7464 **/ 7465 static int 7466 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 7467 { 7468 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7469 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7470 7471 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7472 "0298 PCI device Power Management suspend.\n"); 7473 7474 /* Bring down the device */ 7475 lpfc_offline_prep(phba); 7476 lpfc_offline(phba); 7477 kthread_stop(phba->worker_thread); 7478 7479 /* Disable interrupt from device */ 7480 lpfc_sli4_disable_intr(phba); 7481 7482 /* Save device state to PCI config space */ 7483 pci_save_state(pdev); 7484 pci_set_power_state(pdev, PCI_D3hot); 7485 7486 return 0; 7487 } 7488 7489 /** 7490 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 7491 * @pdev: pointer to PCI device 7492 * 7493 * This routine is called from the kernel's PCI subsystem to support system 7494 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 7495 * this method, it restores the device's PCI config space state and fully 7496 * reinitializes the device and brings it online. Note that as the driver 7497 * implements the minimum PM requirements to a power-aware driver's PM for 7498 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 7499 * to the suspend() method call will be treated as SUSPEND and the driver 7500 * will fully reinitialize its device during resume() method call, the device 7501 * will be set to PCI_D0 directly in PCI config space before restoring the 7502 * state. 7503 * 7504 * Return code 7505 * 0 - driver suspended the device 7506 * Error otherwise 7507 **/ 7508 static int 7509 lpfc_pci_resume_one_s4(struct pci_dev *pdev) 7510 { 7511 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7512 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7513 uint32_t intr_mode; 7514 int error; 7515 7516 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7517 "0292 PCI device Power Management resume.\n"); 7518 7519 /* Restore device state from PCI config space */ 7520 pci_set_power_state(pdev, PCI_D0); 7521 pci_restore_state(pdev); 7522 if (pdev->is_busmaster) 7523 pci_set_master(pdev); 7524 7525 /* Startup the kernel thread for this host adapter. */ 7526 phba->worker_thread = kthread_run(lpfc_do_work, phba, 7527 "lpfc_worker_%d", phba->brd_no); 7528 if (IS_ERR(phba->worker_thread)) { 7529 error = PTR_ERR(phba->worker_thread); 7530 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7531 "0293 PM resume failed to start worker " 7532 "thread: error=x%x.\n", error); 7533 return error; 7534 } 7535 7536 /* Configure and enable interrupt */ 7537 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 7538 if (intr_mode == LPFC_INTR_ERROR) { 7539 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7540 "0294 PM resume Failed to enable interrupt\n"); 7541 return -EIO; 7542 } else 7543 phba->intr_mode = intr_mode; 7544 7545 /* Restart HBA and bring it online */ 7546 lpfc_sli_brdrestart(phba); 7547 lpfc_online(phba); 7548 7549 /* Log the current active interrupt mode */ 7550 lpfc_log_intr_mode(phba, phba->intr_mode); 7551 7552 return 0; 7553 } 7554 7555 /** 7556 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 7557 * @pdev: pointer to PCI device. 7558 * @state: the current PCI connection state. 7559 * 7560 * This routine is called from the PCI subsystem for error handling to device 7561 * with SLI-4 interface spec. This function is called by the PCI subsystem 7562 * after a PCI bus error affecting this device has been detected. When this 7563 * function is invoked, it will need to stop all the I/Os and interrupt(s) 7564 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 7565 * for the PCI subsystem to perform proper recovery as desired. 7566 * 7567 * Return codes 7568 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 7569 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7570 **/ 7571 static pci_ers_result_t 7572 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 7573 { 7574 return PCI_ERS_RESULT_NEED_RESET; 7575 } 7576 7577 /** 7578 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 7579 * @pdev: pointer to PCI device. 7580 * 7581 * This routine is called from the PCI subsystem for error handling to device 7582 * with SLI-4 interface spec. It is called after PCI bus has been reset to 7583 * restart the PCI card from scratch, as if from a cold-boot. During the 7584 * PCI subsystem error recovery, after the driver returns 7585 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 7586 * recovery and then call this routine before calling the .resume method to 7587 * recover the device. This function will initialize the HBA device, enable 7588 * the interrupt, but it will just put the HBA to offline state without 7589 * passing any I/O traffic. 7590 * 7591 * Return codes 7592 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 7593 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7594 */ 7595 static pci_ers_result_t 7596 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 7597 { 7598 return PCI_ERS_RESULT_RECOVERED; 7599 } 7600 7601 /** 7602 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 7603 * @pdev: pointer to PCI device 7604 * 7605 * This routine is called from the PCI subsystem for error handling to device 7606 * with SLI-4 interface spec. It is called when kernel error recovery tells 7607 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 7608 * error recovery. After this call, traffic can start to flow from this device 7609 * again. 7610 **/ 7611 static void 7612 lpfc_io_resume_s4(struct pci_dev *pdev) 7613 { 7614 return; 7615 } 7616 7617 /** 7618 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 7619 * @pdev: pointer to PCI device 7620 * @pid: pointer to PCI device identifier 7621 * 7622 * This routine is to be registered to the kernel's PCI subsystem. When an 7623 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 7624 * at PCI device-specific information of the device and driver to see if the 7625 * driver state that it can support this kind of device. If the match is 7626 * successful, the driver core invokes this routine. This routine dispatches 7627 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 7628 * do all the initialization that it needs to do to handle the HBA device 7629 * properly. 7630 * 7631 * Return code 7632 * 0 - driver can claim the device 7633 * negative value - driver can not claim the device 7634 **/ 7635 static int __devinit 7636 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 7637 { 7638 int rc; 7639 uint16_t dev_id; 7640 7641 if (pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id)) 7642 return -ENODEV; 7643 7644 switch (dev_id) { 7645 case PCI_DEVICE_ID_TIGERSHARK: 7646 rc = lpfc_pci_probe_one_s4(pdev, pid); 7647 break; 7648 default: 7649 rc = lpfc_pci_probe_one_s3(pdev, pid); 7650 break; 7651 } 7652 return rc; 7653 } 7654 7655 /** 7656 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 7657 * @pdev: pointer to PCI device 7658 * 7659 * This routine is to be registered to the kernel's PCI subsystem. When an 7660 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 7661 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 7662 * remove routine, which will perform all the necessary cleanup for the 7663 * device to be removed from the PCI subsystem properly. 7664 **/ 7665 static void __devexit 7666 lpfc_pci_remove_one(struct pci_dev *pdev) 7667 { 7668 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7669 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7670 7671 switch (phba->pci_dev_grp) { 7672 case LPFC_PCI_DEV_LP: 7673 lpfc_pci_remove_one_s3(pdev); 7674 break; 7675 case LPFC_PCI_DEV_OC: 7676 lpfc_pci_remove_one_s4(pdev); 7677 break; 7678 default: 7679 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7680 "1424 Invalid PCI device group: 0x%x\n", 7681 phba->pci_dev_grp); 7682 break; 7683 } 7684 return; 7685 } 7686 7687 /** 7688 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 7689 * @pdev: pointer to PCI device 7690 * @msg: power management message 7691 * 7692 * This routine is to be registered to the kernel's PCI subsystem to support 7693 * system Power Management (PM). When PM invokes this method, it dispatches 7694 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 7695 * suspend the device. 7696 * 7697 * Return code 7698 * 0 - driver suspended the device 7699 * Error otherwise 7700 **/ 7701 static int 7702 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 7703 { 7704 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7705 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7706 int rc = -ENODEV; 7707 7708 switch (phba->pci_dev_grp) { 7709 case LPFC_PCI_DEV_LP: 7710 rc = lpfc_pci_suspend_one_s3(pdev, msg); 7711 break; 7712 case LPFC_PCI_DEV_OC: 7713 rc = lpfc_pci_suspend_one_s4(pdev, msg); 7714 break; 7715 default: 7716 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7717 "1425 Invalid PCI device group: 0x%x\n", 7718 phba->pci_dev_grp); 7719 break; 7720 } 7721 return rc; 7722 } 7723 7724 /** 7725 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 7726 * @pdev: pointer to PCI device 7727 * 7728 * This routine is to be registered to the kernel's PCI subsystem to support 7729 * system Power Management (PM). When PM invokes this method, it dispatches 7730 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 7731 * resume the device. 7732 * 7733 * Return code 7734 * 0 - driver suspended the device 7735 * Error otherwise 7736 **/ 7737 static int 7738 lpfc_pci_resume_one(struct pci_dev *pdev) 7739 { 7740 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7741 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7742 int rc = -ENODEV; 7743 7744 switch (phba->pci_dev_grp) { 7745 case LPFC_PCI_DEV_LP: 7746 rc = lpfc_pci_resume_one_s3(pdev); 7747 break; 7748 case LPFC_PCI_DEV_OC: 7749 rc = lpfc_pci_resume_one_s4(pdev); 7750 break; 7751 default: 7752 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7753 "1426 Invalid PCI device group: 0x%x\n", 7754 phba->pci_dev_grp); 7755 break; 7756 } 7757 return rc; 7758 } 7759 7760 /** 7761 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 7762 * @pdev: pointer to PCI device. 7763 * @state: the current PCI connection state. 7764 * 7765 * This routine is registered to the PCI subsystem for error handling. This 7766 * function is called by the PCI subsystem after a PCI bus error affecting 7767 * this device has been detected. When this routine is invoked, it dispatches 7768 * the action to the proper SLI-3 or SLI-4 device error detected handling 7769 * routine, which will perform the proper error detected operation. 7770 * 7771 * Return codes 7772 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 7773 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7774 **/ 7775 static pci_ers_result_t 7776 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 7777 { 7778 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7779 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7780 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 7781 7782 switch (phba->pci_dev_grp) { 7783 case LPFC_PCI_DEV_LP: 7784 rc = lpfc_io_error_detected_s3(pdev, state); 7785 break; 7786 case LPFC_PCI_DEV_OC: 7787 rc = lpfc_io_error_detected_s4(pdev, state); 7788 break; 7789 default: 7790 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7791 "1427 Invalid PCI device group: 0x%x\n", 7792 phba->pci_dev_grp); 7793 break; 7794 } 7795 return rc; 7796 } 7797 7798 /** 7799 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 7800 * @pdev: pointer to PCI device. 7801 * 7802 * This routine is registered to the PCI subsystem for error handling. This 7803 * function is called after PCI bus has been reset to restart the PCI card 7804 * from scratch, as if from a cold-boot. When this routine is invoked, it 7805 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 7806 * routine, which will perform the proper device reset. 7807 * 7808 * Return codes 7809 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 7810 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7811 **/ 7812 static pci_ers_result_t 7813 lpfc_io_slot_reset(struct pci_dev *pdev) 7814 { 7815 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7816 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7817 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 7818 7819 switch (phba->pci_dev_grp) { 7820 case LPFC_PCI_DEV_LP: 7821 rc = lpfc_io_slot_reset_s3(pdev); 7822 break; 7823 case LPFC_PCI_DEV_OC: 7824 rc = lpfc_io_slot_reset_s4(pdev); 7825 break; 7826 default: 7827 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7828 "1428 Invalid PCI device group: 0x%x\n", 7829 phba->pci_dev_grp); 7830 break; 7831 } 7832 return rc; 7833 } 7834 7835 /** 7836 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 7837 * @pdev: pointer to PCI device 7838 * 7839 * This routine is registered to the PCI subsystem for error handling. It 7840 * is called when kernel error recovery tells the lpfc driver that it is 7841 * OK to resume normal PCI operation after PCI bus error recovery. When 7842 * this routine is invoked, it dispatches the action to the proper SLI-3 7843 * or SLI-4 device io_resume routine, which will resume the device operation. 7844 **/ 7845 static void 7846 lpfc_io_resume(struct pci_dev *pdev) 7847 { 7848 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7849 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7850 7851 switch (phba->pci_dev_grp) { 7852 case LPFC_PCI_DEV_LP: 7853 lpfc_io_resume_s3(pdev); 7854 break; 7855 case LPFC_PCI_DEV_OC: 7856 lpfc_io_resume_s4(pdev); 7857 break; 7858 default: 7859 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7860 "1429 Invalid PCI device group: 0x%x\n", 7861 phba->pci_dev_grp); 7862 break; 7863 } 7864 return; 7865 } 7866 7867 static struct pci_device_id lpfc_id_table[] = { 7868 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 7869 PCI_ANY_ID, PCI_ANY_ID, }, 7870 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, 7871 PCI_ANY_ID, PCI_ANY_ID, }, 7872 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, 7873 PCI_ANY_ID, PCI_ANY_ID, }, 7874 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, 7875 PCI_ANY_ID, PCI_ANY_ID, }, 7876 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, 7877 PCI_ANY_ID, PCI_ANY_ID, }, 7878 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, 7879 PCI_ANY_ID, PCI_ANY_ID, }, 7880 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, 7881 PCI_ANY_ID, PCI_ANY_ID, }, 7882 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, 7883 PCI_ANY_ID, PCI_ANY_ID, }, 7884 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, 7885 PCI_ANY_ID, PCI_ANY_ID, }, 7886 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, 7887 PCI_ANY_ID, PCI_ANY_ID, }, 7888 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, 7889 PCI_ANY_ID, PCI_ANY_ID, }, 7890 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, 7891 PCI_ANY_ID, PCI_ANY_ID, }, 7892 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, 7893 PCI_ANY_ID, PCI_ANY_ID, }, 7894 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, 7895 PCI_ANY_ID, PCI_ANY_ID, }, 7896 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, 7897 PCI_ANY_ID, PCI_ANY_ID, }, 7898 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, 7899 PCI_ANY_ID, PCI_ANY_ID, }, 7900 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, 7901 PCI_ANY_ID, PCI_ANY_ID, }, 7902 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 7903 PCI_ANY_ID, PCI_ANY_ID, }, 7904 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, 7905 PCI_ANY_ID, PCI_ANY_ID, }, 7906 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, 7907 PCI_ANY_ID, PCI_ANY_ID, }, 7908 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, 7909 PCI_ANY_ID, PCI_ANY_ID, }, 7910 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, 7911 PCI_ANY_ID, PCI_ANY_ID, }, 7912 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, 7913 PCI_ANY_ID, PCI_ANY_ID, }, 7914 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, 7915 PCI_ANY_ID, PCI_ANY_ID, }, 7916 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, 7917 PCI_ANY_ID, PCI_ANY_ID, }, 7918 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, 7919 PCI_ANY_ID, PCI_ANY_ID, }, 7920 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, 7921 PCI_ANY_ID, PCI_ANY_ID, }, 7922 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, 7923 PCI_ANY_ID, PCI_ANY_ID, }, 7924 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, 7925 PCI_ANY_ID, PCI_ANY_ID, }, 7926 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, 7927 PCI_ANY_ID, PCI_ANY_ID, }, 7928 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, 7929 PCI_ANY_ID, PCI_ANY_ID, }, 7930 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, 7931 PCI_ANY_ID, PCI_ANY_ID, }, 7932 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, 7933 PCI_ANY_ID, PCI_ANY_ID, }, 7934 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, 7935 PCI_ANY_ID, PCI_ANY_ID, }, 7936 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, 7937 PCI_ANY_ID, PCI_ANY_ID, }, 7938 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, 7939 PCI_ANY_ID, PCI_ANY_ID, }, 7940 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, 7941 PCI_ANY_ID, PCI_ANY_ID, }, 7942 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, 7943 PCI_ANY_ID, PCI_ANY_ID, }, 7944 { 0 } 7945 }; 7946 7947 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 7948 7949 static struct pci_error_handlers lpfc_err_handler = { 7950 .error_detected = lpfc_io_error_detected, 7951 .slot_reset = lpfc_io_slot_reset, 7952 .resume = lpfc_io_resume, 7953 }; 7954 7955 static struct pci_driver lpfc_driver = { 7956 .name = LPFC_DRIVER_NAME, 7957 .id_table = lpfc_id_table, 7958 .probe = lpfc_pci_probe_one, 7959 .remove = __devexit_p(lpfc_pci_remove_one), 7960 .suspend = lpfc_pci_suspend_one, 7961 .resume = lpfc_pci_resume_one, 7962 .err_handler = &lpfc_err_handler, 7963 }; 7964 7965 /** 7966 * lpfc_init - lpfc module initialization routine 7967 * 7968 * This routine is to be invoked when the lpfc module is loaded into the 7969 * kernel. The special kernel macro module_init() is used to indicate the 7970 * role of this routine to the kernel as lpfc module entry point. 7971 * 7972 * Return codes 7973 * 0 - successful 7974 * -ENOMEM - FC attach transport failed 7975 * all others - failed 7976 */ 7977 static int __init 7978 lpfc_init(void) 7979 { 7980 int error = 0; 7981 7982 printk(LPFC_MODULE_DESC "\n"); 7983 printk(LPFC_COPYRIGHT "\n"); 7984 7985 if (lpfc_enable_npiv) { 7986 lpfc_transport_functions.vport_create = lpfc_vport_create; 7987 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 7988 } 7989 lpfc_transport_template = 7990 fc_attach_transport(&lpfc_transport_functions); 7991 if (lpfc_transport_template == NULL) 7992 return -ENOMEM; 7993 if (lpfc_enable_npiv) { 7994 lpfc_vport_transport_template = 7995 fc_attach_transport(&lpfc_vport_transport_functions); 7996 if (lpfc_vport_transport_template == NULL) { 7997 fc_release_transport(lpfc_transport_template); 7998 return -ENOMEM; 7999 } 8000 } 8001 error = pci_register_driver(&lpfc_driver); 8002 if (error) { 8003 fc_release_transport(lpfc_transport_template); 8004 if (lpfc_enable_npiv) 8005 fc_release_transport(lpfc_vport_transport_template); 8006 } 8007 8008 return error; 8009 } 8010 8011 /** 8012 * lpfc_exit - lpfc module removal routine 8013 * 8014 * This routine is invoked when the lpfc module is removed from the kernel. 8015 * The special kernel macro module_exit() is used to indicate the role of 8016 * this routine to the kernel as lpfc module exit point. 8017 */ 8018 static void __exit 8019 lpfc_exit(void) 8020 { 8021 pci_unregister_driver(&lpfc_driver); 8022 fc_release_transport(lpfc_transport_template); 8023 if (lpfc_enable_npiv) 8024 fc_release_transport(lpfc_vport_transport_template); 8025 if (_dump_buf_data) { 8026 printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_data " 8027 "at 0x%p\n", 8028 (1L << _dump_buf_data_order), _dump_buf_data); 8029 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 8030 } 8031 8032 if (_dump_buf_dif) { 8033 printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_dif " 8034 "at 0x%p\n", 8035 (1L << _dump_buf_dif_order), _dump_buf_dif); 8036 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 8037 } 8038 } 8039 8040 module_init(lpfc_init); 8041 module_exit(lpfc_exit); 8042 MODULE_LICENSE("GPL"); 8043 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 8044 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 8045 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 8046