1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/idr.h> 26 #include <linux/interrupt.h> 27 #include <linux/kthread.h> 28 #include <linux/pci.h> 29 #include <linux/spinlock.h> 30 #include <linux/ctype.h> 31 32 #include <scsi/scsi.h> 33 #include <scsi/scsi_device.h> 34 #include <scsi/scsi_host.h> 35 #include <scsi/scsi_transport_fc.h> 36 37 #include "lpfc_hw4.h" 38 #include "lpfc_hw.h" 39 #include "lpfc_sli.h" 40 #include "lpfc_sli4.h" 41 #include "lpfc_nl.h" 42 #include "lpfc_disc.h" 43 #include "lpfc_scsi.h" 44 #include "lpfc.h" 45 #include "lpfc_logmsg.h" 46 #include "lpfc_crtn.h" 47 #include "lpfc_vport.h" 48 #include "lpfc_version.h" 49 50 char *_dump_buf_data; 51 unsigned long _dump_buf_data_order; 52 char *_dump_buf_dif; 53 unsigned long _dump_buf_dif_order; 54 spinlock_t _dump_buf_lock; 55 56 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 57 static int lpfc_post_rcv_buf(struct lpfc_hba *); 58 static int lpfc_sli4_queue_create(struct lpfc_hba *); 59 static void lpfc_sli4_queue_destroy(struct lpfc_hba *); 60 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 61 static int lpfc_setup_endian_order(struct lpfc_hba *); 62 static int lpfc_sli4_read_config(struct lpfc_hba *); 63 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 64 static void lpfc_free_sgl_list(struct lpfc_hba *); 65 static int lpfc_init_sgl_list(struct lpfc_hba *); 66 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 67 static void lpfc_free_active_sgl(struct lpfc_hba *); 68 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 69 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 70 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 71 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 72 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 73 74 static struct scsi_transport_template *lpfc_transport_template = NULL; 75 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 76 static DEFINE_IDR(lpfc_hba_index); 77 78 /** 79 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 80 * @phba: pointer to lpfc hba data structure. 81 * 82 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 83 * mailbox command. It retrieves the revision information from the HBA and 84 * collects the Vital Product Data (VPD) about the HBA for preparing the 85 * configuration of the HBA. 86 * 87 * Return codes: 88 * 0 - success. 89 * -ERESTART - requests the SLI layer to reset the HBA and try again. 90 * Any other value - indicates an error. 91 **/ 92 int 93 lpfc_config_port_prep(struct lpfc_hba *phba) 94 { 95 lpfc_vpd_t *vp = &phba->vpd; 96 int i = 0, rc; 97 LPFC_MBOXQ_t *pmb; 98 MAILBOX_t *mb; 99 char *lpfc_vpd_data = NULL; 100 uint16_t offset = 0; 101 static char licensed[56] = 102 "key unlock for use with gnu public licensed code only\0"; 103 static int init_key = 1; 104 105 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 106 if (!pmb) { 107 phba->link_state = LPFC_HBA_ERROR; 108 return -ENOMEM; 109 } 110 111 mb = &pmb->u.mb; 112 phba->link_state = LPFC_INIT_MBX_CMDS; 113 114 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 115 if (init_key) { 116 uint32_t *ptext = (uint32_t *) licensed; 117 118 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 119 *ptext = cpu_to_be32(*ptext); 120 init_key = 0; 121 } 122 123 lpfc_read_nv(phba, pmb); 124 memset((char*)mb->un.varRDnvp.rsvd3, 0, 125 sizeof (mb->un.varRDnvp.rsvd3)); 126 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 127 sizeof (licensed)); 128 129 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 130 131 if (rc != MBX_SUCCESS) { 132 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 133 "0324 Config Port initialization " 134 "error, mbxCmd x%x READ_NVPARM, " 135 "mbxStatus x%x\n", 136 mb->mbxCommand, mb->mbxStatus); 137 mempool_free(pmb, phba->mbox_mem_pool); 138 return -ERESTART; 139 } 140 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 141 sizeof(phba->wwnn)); 142 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 143 sizeof(phba->wwpn)); 144 } 145 146 phba->sli3_options = 0x0; 147 148 /* Setup and issue mailbox READ REV command */ 149 lpfc_read_rev(phba, pmb); 150 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 151 if (rc != MBX_SUCCESS) { 152 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 153 "0439 Adapter failed to init, mbxCmd x%x " 154 "READ_REV, mbxStatus x%x\n", 155 mb->mbxCommand, mb->mbxStatus); 156 mempool_free( pmb, phba->mbox_mem_pool); 157 return -ERESTART; 158 } 159 160 161 /* 162 * The value of rr must be 1 since the driver set the cv field to 1. 163 * This setting requires the FW to set all revision fields. 164 */ 165 if (mb->un.varRdRev.rr == 0) { 166 vp->rev.rBit = 0; 167 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 168 "0440 Adapter failed to init, READ_REV has " 169 "missing revision information.\n"); 170 mempool_free(pmb, phba->mbox_mem_pool); 171 return -ERESTART; 172 } 173 174 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 175 mempool_free(pmb, phba->mbox_mem_pool); 176 return -EINVAL; 177 } 178 179 /* Save information as VPD data */ 180 vp->rev.rBit = 1; 181 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 182 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 183 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 184 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 185 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 186 vp->rev.biuRev = mb->un.varRdRev.biuRev; 187 vp->rev.smRev = mb->un.varRdRev.smRev; 188 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 189 vp->rev.endecRev = mb->un.varRdRev.endecRev; 190 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 191 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 192 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 193 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 194 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 195 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 196 197 /* If the sli feature level is less then 9, we must 198 * tear down all RPIs and VPIs on link down if NPIV 199 * is enabled. 200 */ 201 if (vp->rev.feaLevelHigh < 9) 202 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 203 204 if (lpfc_is_LC_HBA(phba->pcidev->device)) 205 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 206 sizeof (phba->RandomData)); 207 208 /* Get adapter VPD information */ 209 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 210 if (!lpfc_vpd_data) 211 goto out_free_mbox; 212 213 do { 214 lpfc_dump_mem(phba, pmb, offset); 215 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 216 217 if (rc != MBX_SUCCESS) { 218 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 219 "0441 VPD not present on adapter, " 220 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 221 mb->mbxCommand, mb->mbxStatus); 222 mb->un.varDmp.word_cnt = 0; 223 } 224 /* dump mem may return a zero when finished or we got a 225 * mailbox error, either way we are done. 226 */ 227 if (mb->un.varDmp.word_cnt == 0) 228 break; 229 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 230 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 231 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 232 lpfc_vpd_data + offset, 233 mb->un.varDmp.word_cnt); 234 offset += mb->un.varDmp.word_cnt; 235 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 236 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 237 238 kfree(lpfc_vpd_data); 239 out_free_mbox: 240 mempool_free(pmb, phba->mbox_mem_pool); 241 return 0; 242 } 243 244 /** 245 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 246 * @phba: pointer to lpfc hba data structure. 247 * @pmboxq: pointer to the driver internal queue element for mailbox command. 248 * 249 * This is the completion handler for driver's configuring asynchronous event 250 * mailbox command to the device. If the mailbox command returns successfully, 251 * it will set internal async event support flag to 1; otherwise, it will 252 * set internal async event support flag to 0. 253 **/ 254 static void 255 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 256 { 257 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 258 phba->temp_sensor_support = 1; 259 else 260 phba->temp_sensor_support = 0; 261 mempool_free(pmboxq, phba->mbox_mem_pool); 262 return; 263 } 264 265 /** 266 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 267 * @phba: pointer to lpfc hba data structure. 268 * @pmboxq: pointer to the driver internal queue element for mailbox command. 269 * 270 * This is the completion handler for dump mailbox command for getting 271 * wake up parameters. When this command complete, the response contain 272 * Option rom version of the HBA. This function translate the version number 273 * into a human readable string and store it in OptionROMVersion. 274 **/ 275 static void 276 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 277 { 278 struct prog_id *prg; 279 uint32_t prog_id_word; 280 char dist = ' '; 281 /* character array used for decoding dist type. */ 282 char dist_char[] = "nabx"; 283 284 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 285 mempool_free(pmboxq, phba->mbox_mem_pool); 286 return; 287 } 288 289 prg = (struct prog_id *) &prog_id_word; 290 291 /* word 7 contain option rom version */ 292 prog_id_word = pmboxq->u.mb.un.varWords[7]; 293 294 /* Decode the Option rom version word to a readable string */ 295 if (prg->dist < 4) 296 dist = dist_char[prg->dist]; 297 298 if ((prg->dist == 3) && (prg->num == 0)) 299 sprintf(phba->OptionROMVersion, "%d.%d%d", 300 prg->ver, prg->rev, prg->lev); 301 else 302 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d", 303 prg->ver, prg->rev, prg->lev, 304 dist, prg->num); 305 mempool_free(pmboxq, phba->mbox_mem_pool); 306 return; 307 } 308 309 /** 310 * lpfc_config_port_post - Perform lpfc initialization after config port 311 * @phba: pointer to lpfc hba data structure. 312 * 313 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 314 * command call. It performs all internal resource and state setups on the 315 * port: post IOCB buffers, enable appropriate host interrupt attentions, 316 * ELS ring timers, etc. 317 * 318 * Return codes 319 * 0 - success. 320 * Any other value - error. 321 **/ 322 int 323 lpfc_config_port_post(struct lpfc_hba *phba) 324 { 325 struct lpfc_vport *vport = phba->pport; 326 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 327 LPFC_MBOXQ_t *pmb; 328 MAILBOX_t *mb; 329 struct lpfc_dmabuf *mp; 330 struct lpfc_sli *psli = &phba->sli; 331 uint32_t status, timeout; 332 int i, j; 333 int rc; 334 335 spin_lock_irq(&phba->hbalock); 336 /* 337 * If the Config port completed correctly the HBA is not 338 * over heated any more. 339 */ 340 if (phba->over_temp_state == HBA_OVER_TEMP) 341 phba->over_temp_state = HBA_NORMAL_TEMP; 342 spin_unlock_irq(&phba->hbalock); 343 344 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 345 if (!pmb) { 346 phba->link_state = LPFC_HBA_ERROR; 347 return -ENOMEM; 348 } 349 mb = &pmb->u.mb; 350 351 /* Get login parameters for NID. */ 352 lpfc_read_sparam(phba, pmb, 0); 353 pmb->vport = vport; 354 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 355 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 356 "0448 Adapter failed init, mbxCmd x%x " 357 "READ_SPARM mbxStatus x%x\n", 358 mb->mbxCommand, mb->mbxStatus); 359 phba->link_state = LPFC_HBA_ERROR; 360 mp = (struct lpfc_dmabuf *) pmb->context1; 361 mempool_free( pmb, phba->mbox_mem_pool); 362 lpfc_mbuf_free(phba, mp->virt, mp->phys); 363 kfree(mp); 364 return -EIO; 365 } 366 367 mp = (struct lpfc_dmabuf *) pmb->context1; 368 369 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 370 lpfc_mbuf_free(phba, mp->virt, mp->phys); 371 kfree(mp); 372 pmb->context1 = NULL; 373 374 if (phba->cfg_soft_wwnn) 375 u64_to_wwn(phba->cfg_soft_wwnn, 376 vport->fc_sparam.nodeName.u.wwn); 377 if (phba->cfg_soft_wwpn) 378 u64_to_wwn(phba->cfg_soft_wwpn, 379 vport->fc_sparam.portName.u.wwn); 380 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 381 sizeof (struct lpfc_name)); 382 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 383 sizeof (struct lpfc_name)); 384 385 /* Update the fc_host data structures with new wwn. */ 386 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 387 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 388 fc_host_max_npiv_vports(shost) = phba->max_vpi; 389 390 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 391 /* This should be consolidated into parse_vpd ? - mr */ 392 if (phba->SerialNumber[0] == 0) { 393 uint8_t *outptr; 394 395 outptr = &vport->fc_nodename.u.s.IEEE[0]; 396 for (i = 0; i < 12; i++) { 397 status = *outptr++; 398 j = ((status & 0xf0) >> 4); 399 if (j <= 9) 400 phba->SerialNumber[i] = 401 (char)((uint8_t) 0x30 + (uint8_t) j); 402 else 403 phba->SerialNumber[i] = 404 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 405 i++; 406 j = (status & 0xf); 407 if (j <= 9) 408 phba->SerialNumber[i] = 409 (char)((uint8_t) 0x30 + (uint8_t) j); 410 else 411 phba->SerialNumber[i] = 412 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 413 } 414 } 415 416 lpfc_read_config(phba, pmb); 417 pmb->vport = vport; 418 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 419 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 420 "0453 Adapter failed to init, mbxCmd x%x " 421 "READ_CONFIG, mbxStatus x%x\n", 422 mb->mbxCommand, mb->mbxStatus); 423 phba->link_state = LPFC_HBA_ERROR; 424 mempool_free( pmb, phba->mbox_mem_pool); 425 return -EIO; 426 } 427 428 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 429 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 430 phba->cfg_hba_queue_depth = 431 mb->un.varRdConfig.max_xri + 1; 432 433 phba->lmt = mb->un.varRdConfig.lmt; 434 435 /* Get the default values for Model Name and Description */ 436 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 437 438 if ((phba->cfg_link_speed > LINK_SPEED_10G) 439 || ((phba->cfg_link_speed == LINK_SPEED_1G) 440 && !(phba->lmt & LMT_1Gb)) 441 || ((phba->cfg_link_speed == LINK_SPEED_2G) 442 && !(phba->lmt & LMT_2Gb)) 443 || ((phba->cfg_link_speed == LINK_SPEED_4G) 444 && !(phba->lmt & LMT_4Gb)) 445 || ((phba->cfg_link_speed == LINK_SPEED_8G) 446 && !(phba->lmt & LMT_8Gb)) 447 || ((phba->cfg_link_speed == LINK_SPEED_10G) 448 && !(phba->lmt & LMT_10Gb))) { 449 /* Reset link speed to auto */ 450 lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT, 451 "1302 Invalid speed for this board: " 452 "Reset link speed to auto: x%x\n", 453 phba->cfg_link_speed); 454 phba->cfg_link_speed = LINK_SPEED_AUTO; 455 } 456 457 phba->link_state = LPFC_LINK_DOWN; 458 459 /* Only process IOCBs on ELS ring till hba_state is READY */ 460 if (psli->ring[psli->extra_ring].cmdringaddr) 461 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 462 if (psli->ring[psli->fcp_ring].cmdringaddr) 463 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 464 if (psli->ring[psli->next_ring].cmdringaddr) 465 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 466 467 /* Post receive buffers for desired rings */ 468 if (phba->sli_rev != 3) 469 lpfc_post_rcv_buf(phba); 470 471 /* 472 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 473 */ 474 if (phba->intr_type == MSIX) { 475 rc = lpfc_config_msi(phba, pmb); 476 if (rc) { 477 mempool_free(pmb, phba->mbox_mem_pool); 478 return -EIO; 479 } 480 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 481 if (rc != MBX_SUCCESS) { 482 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 483 "0352 Config MSI mailbox command " 484 "failed, mbxCmd x%x, mbxStatus x%x\n", 485 pmb->u.mb.mbxCommand, 486 pmb->u.mb.mbxStatus); 487 mempool_free(pmb, phba->mbox_mem_pool); 488 return -EIO; 489 } 490 } 491 492 spin_lock_irq(&phba->hbalock); 493 /* Initialize ERATT handling flag */ 494 phba->hba_flag &= ~HBA_ERATT_HANDLED; 495 496 /* Enable appropriate host interrupts */ 497 status = readl(phba->HCregaddr); 498 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 499 if (psli->num_rings > 0) 500 status |= HC_R0INT_ENA; 501 if (psli->num_rings > 1) 502 status |= HC_R1INT_ENA; 503 if (psli->num_rings > 2) 504 status |= HC_R2INT_ENA; 505 if (psli->num_rings > 3) 506 status |= HC_R3INT_ENA; 507 508 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 509 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 510 status &= ~(HC_R0INT_ENA); 511 512 writel(status, phba->HCregaddr); 513 readl(phba->HCregaddr); /* flush */ 514 spin_unlock_irq(&phba->hbalock); 515 516 /* Set up ring-0 (ELS) timer */ 517 timeout = phba->fc_ratov * 2; 518 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 519 /* Set up heart beat (HB) timer */ 520 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 521 phba->hb_outstanding = 0; 522 phba->last_completion_time = jiffies; 523 /* Set up error attention (ERATT) polling timer */ 524 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 525 526 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); 527 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 528 lpfc_set_loopback_flag(phba); 529 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 530 if (rc != MBX_SUCCESS) { 531 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 532 "0454 Adapter failed to init, mbxCmd x%x " 533 "INIT_LINK, mbxStatus x%x\n", 534 mb->mbxCommand, mb->mbxStatus); 535 536 /* Clear all interrupt enable conditions */ 537 writel(0, phba->HCregaddr); 538 readl(phba->HCregaddr); /* flush */ 539 /* Clear all pending interrupts */ 540 writel(0xffffffff, phba->HAregaddr); 541 readl(phba->HAregaddr); /* flush */ 542 543 phba->link_state = LPFC_HBA_ERROR; 544 if (rc != MBX_BUSY) 545 mempool_free(pmb, phba->mbox_mem_pool); 546 return -EIO; 547 } 548 /* MBOX buffer will be freed in mbox compl */ 549 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 550 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 551 pmb->mbox_cmpl = lpfc_config_async_cmpl; 552 pmb->vport = phba->pport; 553 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 554 555 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 556 lpfc_printf_log(phba, 557 KERN_ERR, 558 LOG_INIT, 559 "0456 Adapter failed to issue " 560 "ASYNCEVT_ENABLE mbox status x%x \n.", 561 rc); 562 mempool_free(pmb, phba->mbox_mem_pool); 563 } 564 565 /* Get Option rom version */ 566 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 567 lpfc_dump_wakeup_param(phba, pmb); 568 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 569 pmb->vport = phba->pport; 570 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 571 572 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 573 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 574 "to get Option ROM version status x%x\n.", rc); 575 mempool_free(pmb, phba->mbox_mem_pool); 576 } 577 578 return 0; 579 } 580 581 /** 582 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 583 * @phba: pointer to lpfc HBA data structure. 584 * 585 * This routine will do LPFC uninitialization before the HBA is reset when 586 * bringing down the SLI Layer. 587 * 588 * Return codes 589 * 0 - success. 590 * Any other value - error. 591 **/ 592 int 593 lpfc_hba_down_prep(struct lpfc_hba *phba) 594 { 595 struct lpfc_vport **vports; 596 int i; 597 598 if (phba->sli_rev <= LPFC_SLI_REV3) { 599 /* Disable interrupts */ 600 writel(0, phba->HCregaddr); 601 readl(phba->HCregaddr); /* flush */ 602 } 603 604 if (phba->pport->load_flag & FC_UNLOADING) 605 lpfc_cleanup_discovery_resources(phba->pport); 606 else { 607 vports = lpfc_create_vport_work_array(phba); 608 if (vports != NULL) 609 for (i = 0; i <= phba->max_vports && 610 vports[i] != NULL; i++) 611 lpfc_cleanup_discovery_resources(vports[i]); 612 lpfc_destroy_vport_work_array(phba, vports); 613 } 614 return 0; 615 } 616 617 /** 618 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 619 * @phba: pointer to lpfc HBA data structure. 620 * 621 * This routine will do uninitialization after the HBA is reset when bring 622 * down the SLI Layer. 623 * 624 * Return codes 625 * 0 - sucess. 626 * Any other value - error. 627 **/ 628 static int 629 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 630 { 631 struct lpfc_sli *psli = &phba->sli; 632 struct lpfc_sli_ring *pring; 633 struct lpfc_dmabuf *mp, *next_mp; 634 LIST_HEAD(completions); 635 int i; 636 637 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 638 lpfc_sli_hbqbuf_free_all(phba); 639 else { 640 /* Cleanup preposted buffers on the ELS ring */ 641 pring = &psli->ring[LPFC_ELS_RING]; 642 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 643 list_del(&mp->list); 644 pring->postbufq_cnt--; 645 lpfc_mbuf_free(phba, mp->virt, mp->phys); 646 kfree(mp); 647 } 648 } 649 650 spin_lock_irq(&phba->hbalock); 651 for (i = 0; i < psli->num_rings; i++) { 652 pring = &psli->ring[i]; 653 654 /* At this point in time the HBA is either reset or DOA. Either 655 * way, nothing should be on txcmplq as it will NEVER complete. 656 */ 657 list_splice_init(&pring->txcmplq, &completions); 658 pring->txcmplq_cnt = 0; 659 spin_unlock_irq(&phba->hbalock); 660 661 /* Cancel all the IOCBs from the completions list */ 662 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 663 IOERR_SLI_ABORTED); 664 665 lpfc_sli_abort_iocb_ring(phba, pring); 666 spin_lock_irq(&phba->hbalock); 667 } 668 spin_unlock_irq(&phba->hbalock); 669 670 return 0; 671 } 672 /** 673 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 674 * @phba: pointer to lpfc HBA data structure. 675 * 676 * This routine will do uninitialization after the HBA is reset when bring 677 * down the SLI Layer. 678 * 679 * Return codes 680 * 0 - sucess. 681 * Any other value - error. 682 **/ 683 static int 684 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 685 { 686 struct lpfc_scsi_buf *psb, *psb_next; 687 LIST_HEAD(aborts); 688 int ret; 689 unsigned long iflag = 0; 690 ret = lpfc_hba_down_post_s3(phba); 691 if (ret) 692 return ret; 693 /* At this point in time the HBA is either reset or DOA. Either 694 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 695 * on the lpfc_sgl_list so that it can either be freed if the 696 * driver is unloading or reposted if the driver is restarting 697 * the port. 698 */ 699 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ 700 /* scsl_buf_list */ 701 /* abts_sgl_list_lock required because worker thread uses this 702 * list. 703 */ 704 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 705 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 706 &phba->sli4_hba.lpfc_sgl_list); 707 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 708 /* abts_scsi_buf_list_lock required because worker thread uses this 709 * list. 710 */ 711 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 712 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, 713 &aborts); 714 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 715 spin_unlock_irq(&phba->hbalock); 716 717 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 718 psb->pCmd = NULL; 719 psb->status = IOSTAT_SUCCESS; 720 } 721 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 722 list_splice(&aborts, &phba->lpfc_scsi_buf_list); 723 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 724 return 0; 725 } 726 727 /** 728 * lpfc_hba_down_post - Wrapper func for hba down post routine 729 * @phba: pointer to lpfc HBA data structure. 730 * 731 * This routine wraps the actual SLI3 or SLI4 routine for performing 732 * uninitialization after the HBA is reset when bring down the SLI Layer. 733 * 734 * Return codes 735 * 0 - sucess. 736 * Any other value - error. 737 **/ 738 int 739 lpfc_hba_down_post(struct lpfc_hba *phba) 740 { 741 return (*phba->lpfc_hba_down_post)(phba); 742 } 743 744 /** 745 * lpfc_hb_timeout - The HBA-timer timeout handler 746 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 747 * 748 * This is the HBA-timer timeout handler registered to the lpfc driver. When 749 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 750 * work-port-events bitmap and the worker thread is notified. This timeout 751 * event will be used by the worker thread to invoke the actual timeout 752 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 753 * be performed in the timeout handler and the HBA timeout event bit shall 754 * be cleared by the worker thread after it has taken the event bitmap out. 755 **/ 756 static void 757 lpfc_hb_timeout(unsigned long ptr) 758 { 759 struct lpfc_hba *phba; 760 uint32_t tmo_posted; 761 unsigned long iflag; 762 763 phba = (struct lpfc_hba *)ptr; 764 765 /* Check for heart beat timeout conditions */ 766 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 767 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 768 if (!tmo_posted) 769 phba->pport->work_port_events |= WORKER_HB_TMO; 770 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 771 772 /* Tell the worker thread there is work to do */ 773 if (!tmo_posted) 774 lpfc_worker_wake_up(phba); 775 return; 776 } 777 778 /** 779 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 780 * @phba: pointer to lpfc hba data structure. 781 * @pmboxq: pointer to the driver internal queue element for mailbox command. 782 * 783 * This is the callback function to the lpfc heart-beat mailbox command. 784 * If configured, the lpfc driver issues the heart-beat mailbox command to 785 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 786 * heart-beat mailbox command is issued, the driver shall set up heart-beat 787 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 788 * heart-beat outstanding state. Once the mailbox command comes back and 789 * no error conditions detected, the heart-beat mailbox command timer is 790 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 791 * state is cleared for the next heart-beat. If the timer expired with the 792 * heart-beat outstanding state set, the driver will put the HBA offline. 793 **/ 794 static void 795 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 796 { 797 unsigned long drvr_flag; 798 799 spin_lock_irqsave(&phba->hbalock, drvr_flag); 800 phba->hb_outstanding = 0; 801 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 802 803 /* Check and reset heart-beat timer is necessary */ 804 mempool_free(pmboxq, phba->mbox_mem_pool); 805 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 806 !(phba->link_state == LPFC_HBA_ERROR) && 807 !(phba->pport->load_flag & FC_UNLOADING)) 808 mod_timer(&phba->hb_tmofunc, 809 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 810 return; 811 } 812 813 /** 814 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 815 * @phba: pointer to lpfc hba data structure. 816 * 817 * This is the actual HBA-timer timeout handler to be invoked by the worker 818 * thread whenever the HBA timer fired and HBA-timeout event posted. This 819 * handler performs any periodic operations needed for the device. If such 820 * periodic event has already been attended to either in the interrupt handler 821 * or by processing slow-ring or fast-ring events within the HBA-timer 822 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 823 * the timer for the next timeout period. If lpfc heart-beat mailbox command 824 * is configured and there is no heart-beat mailbox command outstanding, a 825 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 826 * has been a heart-beat mailbox command outstanding, the HBA shall be put 827 * to offline. 828 **/ 829 void 830 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 831 { 832 LPFC_MBOXQ_t *pmboxq; 833 struct lpfc_dmabuf *buf_ptr; 834 int retval; 835 struct lpfc_sli *psli = &phba->sli; 836 LIST_HEAD(completions); 837 838 if ((phba->link_state == LPFC_HBA_ERROR) || 839 (phba->pport->load_flag & FC_UNLOADING) || 840 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 841 return; 842 843 spin_lock_irq(&phba->pport->work_port_lock); 844 845 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, 846 jiffies)) { 847 spin_unlock_irq(&phba->pport->work_port_lock); 848 if (!phba->hb_outstanding) 849 mod_timer(&phba->hb_tmofunc, 850 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 851 else 852 mod_timer(&phba->hb_tmofunc, 853 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 854 return; 855 } 856 spin_unlock_irq(&phba->pport->work_port_lock); 857 858 if (phba->elsbuf_cnt && 859 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 860 spin_lock_irq(&phba->hbalock); 861 list_splice_init(&phba->elsbuf, &completions); 862 phba->elsbuf_cnt = 0; 863 phba->elsbuf_prev_cnt = 0; 864 spin_unlock_irq(&phba->hbalock); 865 866 while (!list_empty(&completions)) { 867 list_remove_head(&completions, buf_ptr, 868 struct lpfc_dmabuf, list); 869 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 870 kfree(buf_ptr); 871 } 872 } 873 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 874 875 /* If there is no heart beat outstanding, issue a heartbeat command */ 876 if (phba->cfg_enable_hba_heartbeat) { 877 if (!phba->hb_outstanding) { 878 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); 879 if (!pmboxq) { 880 mod_timer(&phba->hb_tmofunc, 881 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 882 return; 883 } 884 885 lpfc_heart_beat(phba, pmboxq); 886 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 887 pmboxq->vport = phba->pport; 888 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 889 890 if (retval != MBX_BUSY && retval != MBX_SUCCESS) { 891 mempool_free(pmboxq, phba->mbox_mem_pool); 892 mod_timer(&phba->hb_tmofunc, 893 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 894 return; 895 } 896 mod_timer(&phba->hb_tmofunc, 897 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 898 phba->hb_outstanding = 1; 899 return; 900 } else { 901 /* 902 * If heart beat timeout called with hb_outstanding set 903 * we need to take the HBA offline. 904 */ 905 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 906 "0459 Adapter heartbeat failure, " 907 "taking this port offline.\n"); 908 909 spin_lock_irq(&phba->hbalock); 910 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 911 spin_unlock_irq(&phba->hbalock); 912 913 lpfc_offline_prep(phba); 914 lpfc_offline(phba); 915 lpfc_unblock_mgmt_io(phba); 916 phba->link_state = LPFC_HBA_ERROR; 917 lpfc_hba_down_post(phba); 918 } 919 } 920 } 921 922 /** 923 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 924 * @phba: pointer to lpfc hba data structure. 925 * 926 * This routine is called to bring the HBA offline when HBA hardware error 927 * other than Port Error 6 has been detected. 928 **/ 929 static void 930 lpfc_offline_eratt(struct lpfc_hba *phba) 931 { 932 struct lpfc_sli *psli = &phba->sli; 933 934 spin_lock_irq(&phba->hbalock); 935 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 936 spin_unlock_irq(&phba->hbalock); 937 lpfc_offline_prep(phba); 938 939 lpfc_offline(phba); 940 lpfc_reset_barrier(phba); 941 spin_lock_irq(&phba->hbalock); 942 lpfc_sli_brdreset(phba); 943 spin_unlock_irq(&phba->hbalock); 944 lpfc_hba_down_post(phba); 945 lpfc_sli_brdready(phba, HS_MBRDY); 946 lpfc_unblock_mgmt_io(phba); 947 phba->link_state = LPFC_HBA_ERROR; 948 return; 949 } 950 951 /** 952 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 953 * @phba: pointer to lpfc hba data structure. 954 * 955 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 956 * other than Port Error 6 has been detected. 957 **/ 958 static void 959 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 960 { 961 lpfc_offline_prep(phba); 962 lpfc_offline(phba); 963 lpfc_sli4_brdreset(phba); 964 lpfc_hba_down_post(phba); 965 lpfc_sli4_post_status_check(phba); 966 lpfc_unblock_mgmt_io(phba); 967 phba->link_state = LPFC_HBA_ERROR; 968 } 969 970 /** 971 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 972 * @phba: pointer to lpfc hba data structure. 973 * 974 * This routine is invoked to handle the deferred HBA hardware error 975 * conditions. This type of error is indicated by HBA by setting ER1 976 * and another ER bit in the host status register. The driver will 977 * wait until the ER1 bit clears before handling the error condition. 978 **/ 979 static void 980 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 981 { 982 uint32_t old_host_status = phba->work_hs; 983 struct lpfc_sli_ring *pring; 984 struct lpfc_sli *psli = &phba->sli; 985 986 /* If the pci channel is offline, ignore possible errors, 987 * since we cannot communicate with the pci card anyway. 988 */ 989 if (pci_channel_offline(phba->pcidev)) { 990 spin_lock_irq(&phba->hbalock); 991 phba->hba_flag &= ~DEFER_ERATT; 992 spin_unlock_irq(&phba->hbalock); 993 return; 994 } 995 996 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 997 "0479 Deferred Adapter Hardware Error " 998 "Data: x%x x%x x%x\n", 999 phba->work_hs, 1000 phba->work_status[0], phba->work_status[1]); 1001 1002 spin_lock_irq(&phba->hbalock); 1003 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1004 spin_unlock_irq(&phba->hbalock); 1005 1006 1007 /* 1008 * Firmware stops when it triggred erratt. That could cause the I/Os 1009 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1010 * SCSI layer retry it after re-establishing link. 1011 */ 1012 pring = &psli->ring[psli->fcp_ring]; 1013 lpfc_sli_abort_iocb_ring(phba, pring); 1014 1015 /* 1016 * There was a firmware error. Take the hba offline and then 1017 * attempt to restart it. 1018 */ 1019 lpfc_offline_prep(phba); 1020 lpfc_offline(phba); 1021 1022 /* Wait for the ER1 bit to clear.*/ 1023 while (phba->work_hs & HS_FFER1) { 1024 msleep(100); 1025 phba->work_hs = readl(phba->HSregaddr); 1026 /* If driver is unloading let the worker thread continue */ 1027 if (phba->pport->load_flag & FC_UNLOADING) { 1028 phba->work_hs = 0; 1029 break; 1030 } 1031 } 1032 1033 /* 1034 * This is to ptrotect against a race condition in which 1035 * first write to the host attention register clear the 1036 * host status register. 1037 */ 1038 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1039 phba->work_hs = old_host_status & ~HS_FFER1; 1040 1041 spin_lock_irq(&phba->hbalock); 1042 phba->hba_flag &= ~DEFER_ERATT; 1043 spin_unlock_irq(&phba->hbalock); 1044 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1045 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1046 } 1047 1048 static void 1049 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1050 { 1051 struct lpfc_board_event_header board_event; 1052 struct Scsi_Host *shost; 1053 1054 board_event.event_type = FC_REG_BOARD_EVENT; 1055 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1056 shost = lpfc_shost_from_vport(phba->pport); 1057 fc_host_post_vendor_event(shost, fc_get_event_number(), 1058 sizeof(board_event), 1059 (char *) &board_event, 1060 LPFC_NL_VENDOR_ID); 1061 } 1062 1063 /** 1064 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1065 * @phba: pointer to lpfc hba data structure. 1066 * 1067 * This routine is invoked to handle the following HBA hardware error 1068 * conditions: 1069 * 1 - HBA error attention interrupt 1070 * 2 - DMA ring index out of range 1071 * 3 - Mailbox command came back as unknown 1072 **/ 1073 static void 1074 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1075 { 1076 struct lpfc_vport *vport = phba->pport; 1077 struct lpfc_sli *psli = &phba->sli; 1078 struct lpfc_sli_ring *pring; 1079 uint32_t event_data; 1080 unsigned long temperature; 1081 struct temp_event temp_event_data; 1082 struct Scsi_Host *shost; 1083 1084 /* If the pci channel is offline, ignore possible errors, 1085 * since we cannot communicate with the pci card anyway. 1086 */ 1087 if (pci_channel_offline(phba->pcidev)) { 1088 spin_lock_irq(&phba->hbalock); 1089 phba->hba_flag &= ~DEFER_ERATT; 1090 spin_unlock_irq(&phba->hbalock); 1091 return; 1092 } 1093 1094 /* If resets are disabled then leave the HBA alone and return */ 1095 if (!phba->cfg_enable_hba_reset) 1096 return; 1097 1098 /* Send an internal error event to mgmt application */ 1099 lpfc_board_errevt_to_mgmt(phba); 1100 1101 if (phba->hba_flag & DEFER_ERATT) 1102 lpfc_handle_deferred_eratt(phba); 1103 1104 if (phba->work_hs & HS_FFER6) { 1105 /* Re-establishing Link */ 1106 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1107 "1301 Re-establishing Link " 1108 "Data: x%x x%x x%x\n", 1109 phba->work_hs, 1110 phba->work_status[0], phba->work_status[1]); 1111 1112 spin_lock_irq(&phba->hbalock); 1113 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1114 spin_unlock_irq(&phba->hbalock); 1115 1116 /* 1117 * Firmware stops when it triggled erratt with HS_FFER6. 1118 * That could cause the I/Os dropped by the firmware. 1119 * Error iocb (I/O) on txcmplq and let the SCSI layer 1120 * retry it after re-establishing link. 1121 */ 1122 pring = &psli->ring[psli->fcp_ring]; 1123 lpfc_sli_abort_iocb_ring(phba, pring); 1124 1125 /* 1126 * There was a firmware error. Take the hba offline and then 1127 * attempt to restart it. 1128 */ 1129 lpfc_offline_prep(phba); 1130 lpfc_offline(phba); 1131 lpfc_sli_brdrestart(phba); 1132 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1133 lpfc_unblock_mgmt_io(phba); 1134 return; 1135 } 1136 lpfc_unblock_mgmt_io(phba); 1137 } else if (phba->work_hs & HS_CRIT_TEMP) { 1138 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1139 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1140 temp_event_data.event_code = LPFC_CRIT_TEMP; 1141 temp_event_data.data = (uint32_t)temperature; 1142 1143 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1144 "0406 Adapter maximum temperature exceeded " 1145 "(%ld), taking this port offline " 1146 "Data: x%x x%x x%x\n", 1147 temperature, phba->work_hs, 1148 phba->work_status[0], phba->work_status[1]); 1149 1150 shost = lpfc_shost_from_vport(phba->pport); 1151 fc_host_post_vendor_event(shost, fc_get_event_number(), 1152 sizeof(temp_event_data), 1153 (char *) &temp_event_data, 1154 SCSI_NL_VID_TYPE_PCI 1155 | PCI_VENDOR_ID_EMULEX); 1156 1157 spin_lock_irq(&phba->hbalock); 1158 phba->over_temp_state = HBA_OVER_TEMP; 1159 spin_unlock_irq(&phba->hbalock); 1160 lpfc_offline_eratt(phba); 1161 1162 } else { 1163 /* The if clause above forces this code path when the status 1164 * failure is a value other than FFER6. Do not call the offline 1165 * twice. This is the adapter hardware error path. 1166 */ 1167 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1168 "0457 Adapter Hardware Error " 1169 "Data: x%x x%x x%x\n", 1170 phba->work_hs, 1171 phba->work_status[0], phba->work_status[1]); 1172 1173 event_data = FC_REG_DUMP_EVENT; 1174 shost = lpfc_shost_from_vport(vport); 1175 fc_host_post_vendor_event(shost, fc_get_event_number(), 1176 sizeof(event_data), (char *) &event_data, 1177 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1178 1179 lpfc_offline_eratt(phba); 1180 } 1181 return; 1182 } 1183 1184 /** 1185 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1186 * @phba: pointer to lpfc hba data structure. 1187 * 1188 * This routine is invoked to handle the SLI4 HBA hardware error attention 1189 * conditions. 1190 **/ 1191 static void 1192 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1193 { 1194 struct lpfc_vport *vport = phba->pport; 1195 uint32_t event_data; 1196 struct Scsi_Host *shost; 1197 1198 /* If the pci channel is offline, ignore possible errors, since 1199 * we cannot communicate with the pci card anyway. 1200 */ 1201 if (pci_channel_offline(phba->pcidev)) 1202 return; 1203 /* If resets are disabled then leave the HBA alone and return */ 1204 if (!phba->cfg_enable_hba_reset) 1205 return; 1206 1207 /* Send an internal error event to mgmt application */ 1208 lpfc_board_errevt_to_mgmt(phba); 1209 1210 /* For now, the actual action for SLI4 device handling is not 1211 * specified yet, just treated it as adaptor hardware failure 1212 */ 1213 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1214 "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n", 1215 phba->work_status[0], phba->work_status[1]); 1216 1217 event_data = FC_REG_DUMP_EVENT; 1218 shost = lpfc_shost_from_vport(vport); 1219 fc_host_post_vendor_event(shost, fc_get_event_number(), 1220 sizeof(event_data), (char *) &event_data, 1221 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1222 1223 lpfc_sli4_offline_eratt(phba); 1224 } 1225 1226 /** 1227 * lpfc_handle_eratt - Wrapper func for handling hba error attention 1228 * @phba: pointer to lpfc HBA data structure. 1229 * 1230 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 1231 * routine from the API jump table function pointer from the lpfc_hba struct. 1232 * 1233 * Return codes 1234 * 0 - sucess. 1235 * Any other value - error. 1236 **/ 1237 void 1238 lpfc_handle_eratt(struct lpfc_hba *phba) 1239 { 1240 (*phba->lpfc_handle_eratt)(phba); 1241 } 1242 1243 /** 1244 * lpfc_handle_latt - The HBA link event handler 1245 * @phba: pointer to lpfc hba data structure. 1246 * 1247 * This routine is invoked from the worker thread to handle a HBA host 1248 * attention link event. 1249 **/ 1250 void 1251 lpfc_handle_latt(struct lpfc_hba *phba) 1252 { 1253 struct lpfc_vport *vport = phba->pport; 1254 struct lpfc_sli *psli = &phba->sli; 1255 LPFC_MBOXQ_t *pmb; 1256 volatile uint32_t control; 1257 struct lpfc_dmabuf *mp; 1258 int rc = 0; 1259 1260 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1261 if (!pmb) { 1262 rc = 1; 1263 goto lpfc_handle_latt_err_exit; 1264 } 1265 1266 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1267 if (!mp) { 1268 rc = 2; 1269 goto lpfc_handle_latt_free_pmb; 1270 } 1271 1272 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 1273 if (!mp->virt) { 1274 rc = 3; 1275 goto lpfc_handle_latt_free_mp; 1276 } 1277 1278 /* Cleanup any outstanding ELS commands */ 1279 lpfc_els_flush_all_cmd(phba); 1280 1281 psli->slistat.link_event++; 1282 lpfc_read_la(phba, pmb, mp); 1283 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; 1284 pmb->vport = vport; 1285 /* Block ELS IOCBs until we have processed this mbox command */ 1286 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 1287 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 1288 if (rc == MBX_NOT_FINISHED) { 1289 rc = 4; 1290 goto lpfc_handle_latt_free_mbuf; 1291 } 1292 1293 /* Clear Link Attention in HA REG */ 1294 spin_lock_irq(&phba->hbalock); 1295 writel(HA_LATT, phba->HAregaddr); 1296 readl(phba->HAregaddr); /* flush */ 1297 spin_unlock_irq(&phba->hbalock); 1298 1299 return; 1300 1301 lpfc_handle_latt_free_mbuf: 1302 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1303 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1304 lpfc_handle_latt_free_mp: 1305 kfree(mp); 1306 lpfc_handle_latt_free_pmb: 1307 mempool_free(pmb, phba->mbox_mem_pool); 1308 lpfc_handle_latt_err_exit: 1309 /* Enable Link attention interrupts */ 1310 spin_lock_irq(&phba->hbalock); 1311 psli->sli_flag |= LPFC_PROCESS_LA; 1312 control = readl(phba->HCregaddr); 1313 control |= HC_LAINT_ENA; 1314 writel(control, phba->HCregaddr); 1315 readl(phba->HCregaddr); /* flush */ 1316 1317 /* Clear Link Attention in HA REG */ 1318 writel(HA_LATT, phba->HAregaddr); 1319 readl(phba->HAregaddr); /* flush */ 1320 spin_unlock_irq(&phba->hbalock); 1321 lpfc_linkdown(phba); 1322 phba->link_state = LPFC_HBA_ERROR; 1323 1324 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1325 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 1326 1327 return; 1328 } 1329 1330 /** 1331 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 1332 * @phba: pointer to lpfc hba data structure. 1333 * @vpd: pointer to the vital product data. 1334 * @len: length of the vital product data in bytes. 1335 * 1336 * This routine parses the Vital Product Data (VPD). The VPD is treated as 1337 * an array of characters. In this routine, the ModelName, ProgramType, and 1338 * ModelDesc, etc. fields of the phba data structure will be populated. 1339 * 1340 * Return codes 1341 * 0 - pointer to the VPD passed in is NULL 1342 * 1 - success 1343 **/ 1344 int 1345 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1346 { 1347 uint8_t lenlo, lenhi; 1348 int Length; 1349 int i, j; 1350 int finished = 0; 1351 int index = 0; 1352 1353 if (!vpd) 1354 return 0; 1355 1356 /* Vital Product */ 1357 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1358 "0455 Vital Product Data: x%x x%x x%x x%x\n", 1359 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 1360 (uint32_t) vpd[3]); 1361 while (!finished && (index < (len - 4))) { 1362 switch (vpd[index]) { 1363 case 0x82: 1364 case 0x91: 1365 index += 1; 1366 lenlo = vpd[index]; 1367 index += 1; 1368 lenhi = vpd[index]; 1369 index += 1; 1370 i = ((((unsigned short)lenhi) << 8) + lenlo); 1371 index += i; 1372 break; 1373 case 0x90: 1374 index += 1; 1375 lenlo = vpd[index]; 1376 index += 1; 1377 lenhi = vpd[index]; 1378 index += 1; 1379 Length = ((((unsigned short)lenhi) << 8) + lenlo); 1380 if (Length > len - index) 1381 Length = len - index; 1382 while (Length > 0) { 1383 /* Look for Serial Number */ 1384 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 1385 index += 2; 1386 i = vpd[index]; 1387 index += 1; 1388 j = 0; 1389 Length -= (3+i); 1390 while(i--) { 1391 phba->SerialNumber[j++] = vpd[index++]; 1392 if (j == 31) 1393 break; 1394 } 1395 phba->SerialNumber[j] = 0; 1396 continue; 1397 } 1398 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 1399 phba->vpd_flag |= VPD_MODEL_DESC; 1400 index += 2; 1401 i = vpd[index]; 1402 index += 1; 1403 j = 0; 1404 Length -= (3+i); 1405 while(i--) { 1406 phba->ModelDesc[j++] = vpd[index++]; 1407 if (j == 255) 1408 break; 1409 } 1410 phba->ModelDesc[j] = 0; 1411 continue; 1412 } 1413 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 1414 phba->vpd_flag |= VPD_MODEL_NAME; 1415 index += 2; 1416 i = vpd[index]; 1417 index += 1; 1418 j = 0; 1419 Length -= (3+i); 1420 while(i--) { 1421 phba->ModelName[j++] = vpd[index++]; 1422 if (j == 79) 1423 break; 1424 } 1425 phba->ModelName[j] = 0; 1426 continue; 1427 } 1428 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 1429 phba->vpd_flag |= VPD_PROGRAM_TYPE; 1430 index += 2; 1431 i = vpd[index]; 1432 index += 1; 1433 j = 0; 1434 Length -= (3+i); 1435 while(i--) { 1436 phba->ProgramType[j++] = vpd[index++]; 1437 if (j == 255) 1438 break; 1439 } 1440 phba->ProgramType[j] = 0; 1441 continue; 1442 } 1443 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 1444 phba->vpd_flag |= VPD_PORT; 1445 index += 2; 1446 i = vpd[index]; 1447 index += 1; 1448 j = 0; 1449 Length -= (3+i); 1450 while(i--) { 1451 phba->Port[j++] = vpd[index++]; 1452 if (j == 19) 1453 break; 1454 } 1455 phba->Port[j] = 0; 1456 continue; 1457 } 1458 else { 1459 index += 2; 1460 i = vpd[index]; 1461 index += 1; 1462 index += i; 1463 Length -= (3 + i); 1464 } 1465 } 1466 finished = 0; 1467 break; 1468 case 0x78: 1469 finished = 1; 1470 break; 1471 default: 1472 index ++; 1473 break; 1474 } 1475 } 1476 1477 return(1); 1478 } 1479 1480 /** 1481 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 1482 * @phba: pointer to lpfc hba data structure. 1483 * @mdp: pointer to the data structure to hold the derived model name. 1484 * @descp: pointer to the data structure to hold the derived description. 1485 * 1486 * This routine retrieves HBA's description based on its registered PCI device 1487 * ID. The @descp passed into this function points to an array of 256 chars. It 1488 * shall be returned with the model name, maximum speed, and the host bus type. 1489 * The @mdp passed into this function points to an array of 80 chars. When the 1490 * function returns, the @mdp will be filled with the model name. 1491 **/ 1492 static void 1493 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 1494 { 1495 lpfc_vpd_t *vp; 1496 uint16_t dev_id = phba->pcidev->device; 1497 int max_speed; 1498 int GE = 0; 1499 int oneConnect = 0; /* default is not a oneConnect */ 1500 struct { 1501 char * name; 1502 int max_speed; 1503 char * bus; 1504 } m = {"<Unknown>", 0, ""}; 1505 1506 if (mdp && mdp[0] != '\0' 1507 && descp && descp[0] != '\0') 1508 return; 1509 1510 if (phba->lmt & LMT_10Gb) 1511 max_speed = 10; 1512 else if (phba->lmt & LMT_8Gb) 1513 max_speed = 8; 1514 else if (phba->lmt & LMT_4Gb) 1515 max_speed = 4; 1516 else if (phba->lmt & LMT_2Gb) 1517 max_speed = 2; 1518 else 1519 max_speed = 1; 1520 1521 vp = &phba->vpd; 1522 1523 switch (dev_id) { 1524 case PCI_DEVICE_ID_FIREFLY: 1525 m = (typeof(m)){"LP6000", max_speed, "PCI"}; 1526 break; 1527 case PCI_DEVICE_ID_SUPERFLY: 1528 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 1529 m = (typeof(m)){"LP7000", max_speed, "PCI"}; 1530 else 1531 m = (typeof(m)){"LP7000E", max_speed, "PCI"}; 1532 break; 1533 case PCI_DEVICE_ID_DRAGONFLY: 1534 m = (typeof(m)){"LP8000", max_speed, "PCI"}; 1535 break; 1536 case PCI_DEVICE_ID_CENTAUR: 1537 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 1538 m = (typeof(m)){"LP9002", max_speed, "PCI"}; 1539 else 1540 m = (typeof(m)){"LP9000", max_speed, "PCI"}; 1541 break; 1542 case PCI_DEVICE_ID_RFLY: 1543 m = (typeof(m)){"LP952", max_speed, "PCI"}; 1544 break; 1545 case PCI_DEVICE_ID_PEGASUS: 1546 m = (typeof(m)){"LP9802", max_speed, "PCI-X"}; 1547 break; 1548 case PCI_DEVICE_ID_THOR: 1549 m = (typeof(m)){"LP10000", max_speed, "PCI-X"}; 1550 break; 1551 case PCI_DEVICE_ID_VIPER: 1552 m = (typeof(m)){"LPX1000", max_speed, "PCI-X"}; 1553 break; 1554 case PCI_DEVICE_ID_PFLY: 1555 m = (typeof(m)){"LP982", max_speed, "PCI-X"}; 1556 break; 1557 case PCI_DEVICE_ID_TFLY: 1558 m = (typeof(m)){"LP1050", max_speed, "PCI-X"}; 1559 break; 1560 case PCI_DEVICE_ID_HELIOS: 1561 m = (typeof(m)){"LP11000", max_speed, "PCI-X2"}; 1562 break; 1563 case PCI_DEVICE_ID_HELIOS_SCSP: 1564 m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"}; 1565 break; 1566 case PCI_DEVICE_ID_HELIOS_DCSP: 1567 m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"}; 1568 break; 1569 case PCI_DEVICE_ID_NEPTUNE: 1570 m = (typeof(m)){"LPe1000", max_speed, "PCIe"}; 1571 break; 1572 case PCI_DEVICE_ID_NEPTUNE_SCSP: 1573 m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"}; 1574 break; 1575 case PCI_DEVICE_ID_NEPTUNE_DCSP: 1576 m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"}; 1577 break; 1578 case PCI_DEVICE_ID_BMID: 1579 m = (typeof(m)){"LP1150", max_speed, "PCI-X2"}; 1580 break; 1581 case PCI_DEVICE_ID_BSMB: 1582 m = (typeof(m)){"LP111", max_speed, "PCI-X2"}; 1583 break; 1584 case PCI_DEVICE_ID_ZEPHYR: 1585 m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; 1586 break; 1587 case PCI_DEVICE_ID_ZEPHYR_SCSP: 1588 m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; 1589 break; 1590 case PCI_DEVICE_ID_ZEPHYR_DCSP: 1591 m = (typeof(m)){"LP2105", max_speed, "PCIe"}; 1592 GE = 1; 1593 break; 1594 case PCI_DEVICE_ID_ZMID: 1595 m = (typeof(m)){"LPe1150", max_speed, "PCIe"}; 1596 break; 1597 case PCI_DEVICE_ID_ZSMB: 1598 m = (typeof(m)){"LPe111", max_speed, "PCIe"}; 1599 break; 1600 case PCI_DEVICE_ID_LP101: 1601 m = (typeof(m)){"LP101", max_speed, "PCI-X"}; 1602 break; 1603 case PCI_DEVICE_ID_LP10000S: 1604 m = (typeof(m)){"LP10000-S", max_speed, "PCI"}; 1605 break; 1606 case PCI_DEVICE_ID_LP11000S: 1607 m = (typeof(m)){"LP11000-S", max_speed, 1608 "PCI-X2"}; 1609 break; 1610 case PCI_DEVICE_ID_LPE11000S: 1611 m = (typeof(m)){"LPe11000-S", max_speed, 1612 "PCIe"}; 1613 break; 1614 case PCI_DEVICE_ID_SAT: 1615 m = (typeof(m)){"LPe12000", max_speed, "PCIe"}; 1616 break; 1617 case PCI_DEVICE_ID_SAT_MID: 1618 m = (typeof(m)){"LPe1250", max_speed, "PCIe"}; 1619 break; 1620 case PCI_DEVICE_ID_SAT_SMB: 1621 m = (typeof(m)){"LPe121", max_speed, "PCIe"}; 1622 break; 1623 case PCI_DEVICE_ID_SAT_DCSP: 1624 m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"}; 1625 break; 1626 case PCI_DEVICE_ID_SAT_SCSP: 1627 m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"}; 1628 break; 1629 case PCI_DEVICE_ID_SAT_S: 1630 m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"}; 1631 break; 1632 case PCI_DEVICE_ID_HORNET: 1633 m = (typeof(m)){"LP21000", max_speed, "PCIe"}; 1634 GE = 1; 1635 break; 1636 case PCI_DEVICE_ID_PROTEUS_VF: 1637 m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"}; 1638 break; 1639 case PCI_DEVICE_ID_PROTEUS_PF: 1640 m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"}; 1641 break; 1642 case PCI_DEVICE_ID_PROTEUS_S: 1643 m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"}; 1644 break; 1645 case PCI_DEVICE_ID_TIGERSHARK: 1646 oneConnect = 1; 1647 m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"}; 1648 break; 1649 case PCI_DEVICE_ID_TIGERSHARK_S: 1650 oneConnect = 1; 1651 m = (typeof(m)) {"OCe10100-F-S", max_speed, "PCIe"}; 1652 break; 1653 default: 1654 m = (typeof(m)){ NULL }; 1655 break; 1656 } 1657 1658 if (mdp && mdp[0] == '\0') 1659 snprintf(mdp, 79,"%s", m.name); 1660 /* oneConnect hba requires special processing, they are all initiators 1661 * and we put the port number on the end 1662 */ 1663 if (descp && descp[0] == '\0') { 1664 if (oneConnect) 1665 snprintf(descp, 255, 1666 "Emulex OneConnect %s, FCoE Initiator, Port %s", 1667 m.name, 1668 phba->Port); 1669 else 1670 snprintf(descp, 255, 1671 "Emulex %s %d%s %s %s", 1672 m.name, m.max_speed, 1673 (GE) ? "GE" : "Gb", 1674 m.bus, 1675 (GE) ? "FCoE Adapter" : 1676 "Fibre Channel Adapter"); 1677 } 1678 } 1679 1680 /** 1681 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 1682 * @phba: pointer to lpfc hba data structure. 1683 * @pring: pointer to a IOCB ring. 1684 * @cnt: the number of IOCBs to be posted to the IOCB ring. 1685 * 1686 * This routine posts a given number of IOCBs with the associated DMA buffer 1687 * descriptors specified by the cnt argument to the given IOCB ring. 1688 * 1689 * Return codes 1690 * The number of IOCBs NOT able to be posted to the IOCB ring. 1691 **/ 1692 int 1693 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 1694 { 1695 IOCB_t *icmd; 1696 struct lpfc_iocbq *iocb; 1697 struct lpfc_dmabuf *mp1, *mp2; 1698 1699 cnt += pring->missbufcnt; 1700 1701 /* While there are buffers to post */ 1702 while (cnt > 0) { 1703 /* Allocate buffer for command iocb */ 1704 iocb = lpfc_sli_get_iocbq(phba); 1705 if (iocb == NULL) { 1706 pring->missbufcnt = cnt; 1707 return cnt; 1708 } 1709 icmd = &iocb->iocb; 1710 1711 /* 2 buffers can be posted per command */ 1712 /* Allocate buffer to post */ 1713 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1714 if (mp1) 1715 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 1716 if (!mp1 || !mp1->virt) { 1717 kfree(mp1); 1718 lpfc_sli_release_iocbq(phba, iocb); 1719 pring->missbufcnt = cnt; 1720 return cnt; 1721 } 1722 1723 INIT_LIST_HEAD(&mp1->list); 1724 /* Allocate buffer to post */ 1725 if (cnt > 1) { 1726 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1727 if (mp2) 1728 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 1729 &mp2->phys); 1730 if (!mp2 || !mp2->virt) { 1731 kfree(mp2); 1732 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1733 kfree(mp1); 1734 lpfc_sli_release_iocbq(phba, iocb); 1735 pring->missbufcnt = cnt; 1736 return cnt; 1737 } 1738 1739 INIT_LIST_HEAD(&mp2->list); 1740 } else { 1741 mp2 = NULL; 1742 } 1743 1744 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 1745 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 1746 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 1747 icmd->ulpBdeCount = 1; 1748 cnt--; 1749 if (mp2) { 1750 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 1751 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 1752 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 1753 cnt--; 1754 icmd->ulpBdeCount = 2; 1755 } 1756 1757 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 1758 icmd->ulpLe = 1; 1759 1760 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 1761 IOCB_ERROR) { 1762 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1763 kfree(mp1); 1764 cnt++; 1765 if (mp2) { 1766 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 1767 kfree(mp2); 1768 cnt++; 1769 } 1770 lpfc_sli_release_iocbq(phba, iocb); 1771 pring->missbufcnt = cnt; 1772 return cnt; 1773 } 1774 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 1775 if (mp2) 1776 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 1777 } 1778 pring->missbufcnt = 0; 1779 return 0; 1780 } 1781 1782 /** 1783 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 1784 * @phba: pointer to lpfc hba data structure. 1785 * 1786 * This routine posts initial receive IOCB buffers to the ELS ring. The 1787 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 1788 * set to 64 IOCBs. 1789 * 1790 * Return codes 1791 * 0 - success (currently always success) 1792 **/ 1793 static int 1794 lpfc_post_rcv_buf(struct lpfc_hba *phba) 1795 { 1796 struct lpfc_sli *psli = &phba->sli; 1797 1798 /* Ring 0, ELS / CT buffers */ 1799 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); 1800 /* Ring 2 - FCP no buffers needed */ 1801 1802 return 0; 1803 } 1804 1805 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 1806 1807 /** 1808 * lpfc_sha_init - Set up initial array of hash table entries 1809 * @HashResultPointer: pointer to an array as hash table. 1810 * 1811 * This routine sets up the initial values to the array of hash table entries 1812 * for the LC HBAs. 1813 **/ 1814 static void 1815 lpfc_sha_init(uint32_t * HashResultPointer) 1816 { 1817 HashResultPointer[0] = 0x67452301; 1818 HashResultPointer[1] = 0xEFCDAB89; 1819 HashResultPointer[2] = 0x98BADCFE; 1820 HashResultPointer[3] = 0x10325476; 1821 HashResultPointer[4] = 0xC3D2E1F0; 1822 } 1823 1824 /** 1825 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 1826 * @HashResultPointer: pointer to an initial/result hash table. 1827 * @HashWorkingPointer: pointer to an working hash table. 1828 * 1829 * This routine iterates an initial hash table pointed by @HashResultPointer 1830 * with the values from the working hash table pointeed by @HashWorkingPointer. 1831 * The results are putting back to the initial hash table, returned through 1832 * the @HashResultPointer as the result hash table. 1833 **/ 1834 static void 1835 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 1836 { 1837 int t; 1838 uint32_t TEMP; 1839 uint32_t A, B, C, D, E; 1840 t = 16; 1841 do { 1842 HashWorkingPointer[t] = 1843 S(1, 1844 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 1845 8] ^ 1846 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 1847 } while (++t <= 79); 1848 t = 0; 1849 A = HashResultPointer[0]; 1850 B = HashResultPointer[1]; 1851 C = HashResultPointer[2]; 1852 D = HashResultPointer[3]; 1853 E = HashResultPointer[4]; 1854 1855 do { 1856 if (t < 20) { 1857 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 1858 } else if (t < 40) { 1859 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 1860 } else if (t < 60) { 1861 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 1862 } else { 1863 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 1864 } 1865 TEMP += S(5, A) + E + HashWorkingPointer[t]; 1866 E = D; 1867 D = C; 1868 C = S(30, B); 1869 B = A; 1870 A = TEMP; 1871 } while (++t <= 79); 1872 1873 HashResultPointer[0] += A; 1874 HashResultPointer[1] += B; 1875 HashResultPointer[2] += C; 1876 HashResultPointer[3] += D; 1877 HashResultPointer[4] += E; 1878 1879 } 1880 1881 /** 1882 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 1883 * @RandomChallenge: pointer to the entry of host challenge random number array. 1884 * @HashWorking: pointer to the entry of the working hash array. 1885 * 1886 * This routine calculates the working hash array referred by @HashWorking 1887 * from the challenge random numbers associated with the host, referred by 1888 * @RandomChallenge. The result is put into the entry of the working hash 1889 * array and returned by reference through @HashWorking. 1890 **/ 1891 static void 1892 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 1893 { 1894 *HashWorking = (*RandomChallenge ^ *HashWorking); 1895 } 1896 1897 /** 1898 * lpfc_hba_init - Perform special handling for LC HBA initialization 1899 * @phba: pointer to lpfc hba data structure. 1900 * @hbainit: pointer to an array of unsigned 32-bit integers. 1901 * 1902 * This routine performs the special handling for LC HBA initialization. 1903 **/ 1904 void 1905 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 1906 { 1907 int t; 1908 uint32_t *HashWorking; 1909 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 1910 1911 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 1912 if (!HashWorking) 1913 return; 1914 1915 HashWorking[0] = HashWorking[78] = *pwwnn++; 1916 HashWorking[1] = HashWorking[79] = *pwwnn; 1917 1918 for (t = 0; t < 7; t++) 1919 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 1920 1921 lpfc_sha_init(hbainit); 1922 lpfc_sha_iterate(hbainit, HashWorking); 1923 kfree(HashWorking); 1924 } 1925 1926 /** 1927 * lpfc_cleanup - Performs vport cleanups before deleting a vport 1928 * @vport: pointer to a virtual N_Port data structure. 1929 * 1930 * This routine performs the necessary cleanups before deleting the @vport. 1931 * It invokes the discovery state machine to perform necessary state 1932 * transitions and to release the ndlps associated with the @vport. Note, 1933 * the physical port is treated as @vport 0. 1934 **/ 1935 void 1936 lpfc_cleanup(struct lpfc_vport *vport) 1937 { 1938 struct lpfc_hba *phba = vport->phba; 1939 struct lpfc_nodelist *ndlp, *next_ndlp; 1940 int i = 0; 1941 1942 if (phba->link_state > LPFC_LINK_DOWN) 1943 lpfc_port_link_failure(vport); 1944 1945 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 1946 if (!NLP_CHK_NODE_ACT(ndlp)) { 1947 ndlp = lpfc_enable_node(vport, ndlp, 1948 NLP_STE_UNUSED_NODE); 1949 if (!ndlp) 1950 continue; 1951 spin_lock_irq(&phba->ndlp_lock); 1952 NLP_SET_FREE_REQ(ndlp); 1953 spin_unlock_irq(&phba->ndlp_lock); 1954 /* Trigger the release of the ndlp memory */ 1955 lpfc_nlp_put(ndlp); 1956 continue; 1957 } 1958 spin_lock_irq(&phba->ndlp_lock); 1959 if (NLP_CHK_FREE_REQ(ndlp)) { 1960 /* The ndlp should not be in memory free mode already */ 1961 spin_unlock_irq(&phba->ndlp_lock); 1962 continue; 1963 } else 1964 /* Indicate request for freeing ndlp memory */ 1965 NLP_SET_FREE_REQ(ndlp); 1966 spin_unlock_irq(&phba->ndlp_lock); 1967 1968 if (vport->port_type != LPFC_PHYSICAL_PORT && 1969 ndlp->nlp_DID == Fabric_DID) { 1970 /* Just free up ndlp with Fabric_DID for vports */ 1971 lpfc_nlp_put(ndlp); 1972 continue; 1973 } 1974 1975 if (ndlp->nlp_type & NLP_FABRIC) 1976 lpfc_disc_state_machine(vport, ndlp, NULL, 1977 NLP_EVT_DEVICE_RECOVERY); 1978 1979 lpfc_disc_state_machine(vport, ndlp, NULL, 1980 NLP_EVT_DEVICE_RM); 1981 1982 } 1983 1984 /* At this point, ALL ndlp's should be gone 1985 * because of the previous NLP_EVT_DEVICE_RM. 1986 * Lets wait for this to happen, if needed. 1987 */ 1988 while (!list_empty(&vport->fc_nodes)) { 1989 if (i++ > 3000) { 1990 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1991 "0233 Nodelist not empty\n"); 1992 list_for_each_entry_safe(ndlp, next_ndlp, 1993 &vport->fc_nodes, nlp_listp) { 1994 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 1995 LOG_NODE, 1996 "0282 did:x%x ndlp:x%p " 1997 "usgmap:x%x refcnt:%d\n", 1998 ndlp->nlp_DID, (void *)ndlp, 1999 ndlp->nlp_usg_map, 2000 atomic_read( 2001 &ndlp->kref.refcount)); 2002 } 2003 break; 2004 } 2005 2006 /* Wait for any activity on ndlps to settle */ 2007 msleep(10); 2008 } 2009 } 2010 2011 /** 2012 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2013 * @vport: pointer to a virtual N_Port data structure. 2014 * 2015 * This routine stops all the timers associated with a @vport. This function 2016 * is invoked before disabling or deleting a @vport. Note that the physical 2017 * port is treated as @vport 0. 2018 **/ 2019 void 2020 lpfc_stop_vport_timers(struct lpfc_vport *vport) 2021 { 2022 del_timer_sync(&vport->els_tmofunc); 2023 del_timer_sync(&vport->fc_fdmitmo); 2024 lpfc_can_disctmo(vport); 2025 return; 2026 } 2027 2028 /** 2029 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2030 * @phba: pointer to lpfc hba data structure. 2031 * 2032 * This routine stops all the timers associated with a HBA. This function is 2033 * invoked before either putting a HBA offline or unloading the driver. 2034 **/ 2035 void 2036 lpfc_stop_hba_timers(struct lpfc_hba *phba) 2037 { 2038 lpfc_stop_vport_timers(phba->pport); 2039 del_timer_sync(&phba->sli.mbox_tmo); 2040 del_timer_sync(&phba->fabric_block_timer); 2041 del_timer_sync(&phba->eratt_poll); 2042 del_timer_sync(&phba->hb_tmofunc); 2043 phba->hb_outstanding = 0; 2044 2045 switch (phba->pci_dev_grp) { 2046 case LPFC_PCI_DEV_LP: 2047 /* Stop any LightPulse device specific driver timers */ 2048 del_timer_sync(&phba->fcp_poll_timer); 2049 break; 2050 case LPFC_PCI_DEV_OC: 2051 /* Stop any OneConnect device sepcific driver timers */ 2052 break; 2053 default: 2054 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2055 "0297 Invalid device group (x%x)\n", 2056 phba->pci_dev_grp); 2057 break; 2058 } 2059 return; 2060 } 2061 2062 /** 2063 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2064 * @phba: pointer to lpfc hba data structure. 2065 * 2066 * This routine marks a HBA's management interface as blocked. Once the HBA's 2067 * management interface is marked as blocked, all the user space access to 2068 * the HBA, whether they are from sysfs interface or libdfc interface will 2069 * all be blocked. The HBA is set to block the management interface when the 2070 * driver prepares the HBA interface for online or offline. 2071 **/ 2072 static void 2073 lpfc_block_mgmt_io(struct lpfc_hba * phba) 2074 { 2075 unsigned long iflag; 2076 2077 spin_lock_irqsave(&phba->hbalock, iflag); 2078 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2079 spin_unlock_irqrestore(&phba->hbalock, iflag); 2080 } 2081 2082 /** 2083 * lpfc_online - Initialize and bring a HBA online 2084 * @phba: pointer to lpfc hba data structure. 2085 * 2086 * This routine initializes the HBA and brings a HBA online. During this 2087 * process, the management interface is blocked to prevent user space access 2088 * to the HBA interfering with the driver initialization. 2089 * 2090 * Return codes 2091 * 0 - successful 2092 * 1 - failed 2093 **/ 2094 int 2095 lpfc_online(struct lpfc_hba *phba) 2096 { 2097 struct lpfc_vport *vport; 2098 struct lpfc_vport **vports; 2099 int i; 2100 2101 if (!phba) 2102 return 0; 2103 vport = phba->pport; 2104 2105 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 2106 return 0; 2107 2108 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2109 "0458 Bring Adapter online\n"); 2110 2111 lpfc_block_mgmt_io(phba); 2112 2113 if (!lpfc_sli_queue_setup(phba)) { 2114 lpfc_unblock_mgmt_io(phba); 2115 return 1; 2116 } 2117 2118 if (phba->sli_rev == LPFC_SLI_REV4) { 2119 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 2120 lpfc_unblock_mgmt_io(phba); 2121 return 1; 2122 } 2123 } else { 2124 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 2125 lpfc_unblock_mgmt_io(phba); 2126 return 1; 2127 } 2128 } 2129 2130 vports = lpfc_create_vport_work_array(phba); 2131 if (vports != NULL) 2132 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2133 struct Scsi_Host *shost; 2134 shost = lpfc_shost_from_vport(vports[i]); 2135 spin_lock_irq(shost->host_lock); 2136 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 2137 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 2138 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2139 spin_unlock_irq(shost->host_lock); 2140 } 2141 lpfc_destroy_vport_work_array(phba, vports); 2142 2143 lpfc_unblock_mgmt_io(phba); 2144 return 0; 2145 } 2146 2147 /** 2148 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 2149 * @phba: pointer to lpfc hba data structure. 2150 * 2151 * This routine marks a HBA's management interface as not blocked. Once the 2152 * HBA's management interface is marked as not blocked, all the user space 2153 * access to the HBA, whether they are from sysfs interface or libdfc 2154 * interface will be allowed. The HBA is set to block the management interface 2155 * when the driver prepares the HBA interface for online or offline and then 2156 * set to unblock the management interface afterwards. 2157 **/ 2158 void 2159 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 2160 { 2161 unsigned long iflag; 2162 2163 spin_lock_irqsave(&phba->hbalock, iflag); 2164 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 2165 spin_unlock_irqrestore(&phba->hbalock, iflag); 2166 } 2167 2168 /** 2169 * lpfc_offline_prep - Prepare a HBA to be brought offline 2170 * @phba: pointer to lpfc hba data structure. 2171 * 2172 * This routine is invoked to prepare a HBA to be brought offline. It performs 2173 * unregistration login to all the nodes on all vports and flushes the mailbox 2174 * queue to make it ready to be brought offline. 2175 **/ 2176 void 2177 lpfc_offline_prep(struct lpfc_hba * phba) 2178 { 2179 struct lpfc_vport *vport = phba->pport; 2180 struct lpfc_nodelist *ndlp, *next_ndlp; 2181 struct lpfc_vport **vports; 2182 int i; 2183 2184 if (vport->fc_flag & FC_OFFLINE_MODE) 2185 return; 2186 2187 lpfc_block_mgmt_io(phba); 2188 2189 lpfc_linkdown(phba); 2190 2191 /* Issue an unreg_login to all nodes on all vports */ 2192 vports = lpfc_create_vport_work_array(phba); 2193 if (vports != NULL) { 2194 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2195 struct Scsi_Host *shost; 2196 2197 if (vports[i]->load_flag & FC_UNLOADING) 2198 continue; 2199 vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED; 2200 shost = lpfc_shost_from_vport(vports[i]); 2201 list_for_each_entry_safe(ndlp, next_ndlp, 2202 &vports[i]->fc_nodes, 2203 nlp_listp) { 2204 if (!NLP_CHK_NODE_ACT(ndlp)) 2205 continue; 2206 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 2207 continue; 2208 if (ndlp->nlp_type & NLP_FABRIC) { 2209 lpfc_disc_state_machine(vports[i], ndlp, 2210 NULL, NLP_EVT_DEVICE_RECOVERY); 2211 lpfc_disc_state_machine(vports[i], ndlp, 2212 NULL, NLP_EVT_DEVICE_RM); 2213 } 2214 spin_lock_irq(shost->host_lock); 2215 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2216 spin_unlock_irq(shost->host_lock); 2217 lpfc_unreg_rpi(vports[i], ndlp); 2218 } 2219 } 2220 } 2221 lpfc_destroy_vport_work_array(phba, vports); 2222 2223 lpfc_sli_mbox_sys_shutdown(phba); 2224 } 2225 2226 /** 2227 * lpfc_offline - Bring a HBA offline 2228 * @phba: pointer to lpfc hba data structure. 2229 * 2230 * This routine actually brings a HBA offline. It stops all the timers 2231 * associated with the HBA, brings down the SLI layer, and eventually 2232 * marks the HBA as in offline state for the upper layer protocol. 2233 **/ 2234 void 2235 lpfc_offline(struct lpfc_hba *phba) 2236 { 2237 struct Scsi_Host *shost; 2238 struct lpfc_vport **vports; 2239 int i; 2240 2241 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2242 return; 2243 2244 /* stop port and all timers associated with this hba */ 2245 lpfc_stop_port(phba); 2246 vports = lpfc_create_vport_work_array(phba); 2247 if (vports != NULL) 2248 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 2249 lpfc_stop_vport_timers(vports[i]); 2250 lpfc_destroy_vport_work_array(phba, vports); 2251 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2252 "0460 Bring Adapter offline\n"); 2253 /* Bring down the SLI Layer and cleanup. The HBA is offline 2254 now. */ 2255 lpfc_sli_hba_down(phba); 2256 spin_lock_irq(&phba->hbalock); 2257 phba->work_ha = 0; 2258 spin_unlock_irq(&phba->hbalock); 2259 vports = lpfc_create_vport_work_array(phba); 2260 if (vports != NULL) 2261 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2262 shost = lpfc_shost_from_vport(vports[i]); 2263 spin_lock_irq(shost->host_lock); 2264 vports[i]->work_port_events = 0; 2265 vports[i]->fc_flag |= FC_OFFLINE_MODE; 2266 spin_unlock_irq(shost->host_lock); 2267 } 2268 lpfc_destroy_vport_work_array(phba, vports); 2269 } 2270 2271 /** 2272 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 2273 * @phba: pointer to lpfc hba data structure. 2274 * 2275 * This routine is to free all the SCSI buffers and IOCBs from the driver 2276 * list back to kernel. It is called from lpfc_pci_remove_one to free 2277 * the internal resources before the device is removed from the system. 2278 * 2279 * Return codes 2280 * 0 - successful (for now, it always returns 0) 2281 **/ 2282 static int 2283 lpfc_scsi_free(struct lpfc_hba *phba) 2284 { 2285 struct lpfc_scsi_buf *sb, *sb_next; 2286 struct lpfc_iocbq *io, *io_next; 2287 2288 spin_lock_irq(&phba->hbalock); 2289 /* Release all the lpfc_scsi_bufs maintained by this host. */ 2290 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 2291 list_del(&sb->list); 2292 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 2293 sb->dma_handle); 2294 kfree(sb); 2295 phba->total_scsi_bufs--; 2296 } 2297 2298 /* Release all the lpfc_iocbq entries maintained by this host. */ 2299 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 2300 list_del(&io->list); 2301 kfree(io); 2302 phba->total_iocbq_bufs--; 2303 } 2304 2305 spin_unlock_irq(&phba->hbalock); 2306 2307 return 0; 2308 } 2309 2310 /** 2311 * lpfc_create_port - Create an FC port 2312 * @phba: pointer to lpfc hba data structure. 2313 * @instance: a unique integer ID to this FC port. 2314 * @dev: pointer to the device data structure. 2315 * 2316 * This routine creates a FC port for the upper layer protocol. The FC port 2317 * can be created on top of either a physical port or a virtual port provided 2318 * by the HBA. This routine also allocates a SCSI host data structure (shost) 2319 * and associates the FC port created before adding the shost into the SCSI 2320 * layer. 2321 * 2322 * Return codes 2323 * @vport - pointer to the virtual N_Port data structure. 2324 * NULL - port create failed. 2325 **/ 2326 struct lpfc_vport * 2327 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 2328 { 2329 struct lpfc_vport *vport; 2330 struct Scsi_Host *shost; 2331 int error = 0; 2332 2333 if (dev != &phba->pcidev->dev) 2334 shost = scsi_host_alloc(&lpfc_vport_template, 2335 sizeof(struct lpfc_vport)); 2336 else 2337 shost = scsi_host_alloc(&lpfc_template, 2338 sizeof(struct lpfc_vport)); 2339 if (!shost) 2340 goto out; 2341 2342 vport = (struct lpfc_vport *) shost->hostdata; 2343 vport->phba = phba; 2344 vport->load_flag |= FC_LOADING; 2345 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2346 vport->fc_rscn_flush = 0; 2347 2348 lpfc_get_vport_cfgparam(vport); 2349 shost->unique_id = instance; 2350 shost->max_id = LPFC_MAX_TARGET; 2351 shost->max_lun = vport->cfg_max_luns; 2352 shost->this_id = -1; 2353 shost->max_cmd_len = 16; 2354 if (phba->sli_rev == LPFC_SLI_REV4) { 2355 shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE; 2356 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 2357 } 2358 2359 /* 2360 * Set initial can_queue value since 0 is no longer supported and 2361 * scsi_add_host will fail. This will be adjusted later based on the 2362 * max xri value determined in hba setup. 2363 */ 2364 shost->can_queue = phba->cfg_hba_queue_depth - 10; 2365 if (dev != &phba->pcidev->dev) { 2366 shost->transportt = lpfc_vport_transport_template; 2367 vport->port_type = LPFC_NPIV_PORT; 2368 } else { 2369 shost->transportt = lpfc_transport_template; 2370 vport->port_type = LPFC_PHYSICAL_PORT; 2371 } 2372 2373 /* Initialize all internally managed lists. */ 2374 INIT_LIST_HEAD(&vport->fc_nodes); 2375 INIT_LIST_HEAD(&vport->rcv_buffer_list); 2376 spin_lock_init(&vport->work_port_lock); 2377 2378 init_timer(&vport->fc_disctmo); 2379 vport->fc_disctmo.function = lpfc_disc_timeout; 2380 vport->fc_disctmo.data = (unsigned long)vport; 2381 2382 init_timer(&vport->fc_fdmitmo); 2383 vport->fc_fdmitmo.function = lpfc_fdmi_tmo; 2384 vport->fc_fdmitmo.data = (unsigned long)vport; 2385 2386 init_timer(&vport->els_tmofunc); 2387 vport->els_tmofunc.function = lpfc_els_timeout; 2388 vport->els_tmofunc.data = (unsigned long)vport; 2389 2390 error = scsi_add_host(shost, dev); 2391 if (error) 2392 goto out_put_shost; 2393 2394 spin_lock_irq(&phba->hbalock); 2395 list_add_tail(&vport->listentry, &phba->port_list); 2396 spin_unlock_irq(&phba->hbalock); 2397 return vport; 2398 2399 out_put_shost: 2400 scsi_host_put(shost); 2401 out: 2402 return NULL; 2403 } 2404 2405 /** 2406 * destroy_port - destroy an FC port 2407 * @vport: pointer to an lpfc virtual N_Port data structure. 2408 * 2409 * This routine destroys a FC port from the upper layer protocol. All the 2410 * resources associated with the port are released. 2411 **/ 2412 void 2413 destroy_port(struct lpfc_vport *vport) 2414 { 2415 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2416 struct lpfc_hba *phba = vport->phba; 2417 2418 lpfc_debugfs_terminate(vport); 2419 fc_remove_host(shost); 2420 scsi_remove_host(shost); 2421 2422 spin_lock_irq(&phba->hbalock); 2423 list_del_init(&vport->listentry); 2424 spin_unlock_irq(&phba->hbalock); 2425 2426 lpfc_cleanup(vport); 2427 return; 2428 } 2429 2430 /** 2431 * lpfc_get_instance - Get a unique integer ID 2432 * 2433 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 2434 * uses the kernel idr facility to perform the task. 2435 * 2436 * Return codes: 2437 * instance - a unique integer ID allocated as the new instance. 2438 * -1 - lpfc get instance failed. 2439 **/ 2440 int 2441 lpfc_get_instance(void) 2442 { 2443 int instance = 0; 2444 2445 /* Assign an unused number */ 2446 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 2447 return -1; 2448 if (idr_get_new(&lpfc_hba_index, NULL, &instance)) 2449 return -1; 2450 return instance; 2451 } 2452 2453 /** 2454 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 2455 * @shost: pointer to SCSI host data structure. 2456 * @time: elapsed time of the scan in jiffies. 2457 * 2458 * This routine is called by the SCSI layer with a SCSI host to determine 2459 * whether the scan host is finished. 2460 * 2461 * Note: there is no scan_start function as adapter initialization will have 2462 * asynchronously kicked off the link initialization. 2463 * 2464 * Return codes 2465 * 0 - SCSI host scan is not over yet. 2466 * 1 - SCSI host scan is over. 2467 **/ 2468 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 2469 { 2470 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2471 struct lpfc_hba *phba = vport->phba; 2472 int stat = 0; 2473 2474 spin_lock_irq(shost->host_lock); 2475 2476 if (vport->load_flag & FC_UNLOADING) { 2477 stat = 1; 2478 goto finished; 2479 } 2480 if (time >= 30 * HZ) { 2481 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2482 "0461 Scanning longer than 30 " 2483 "seconds. Continuing initialization\n"); 2484 stat = 1; 2485 goto finished; 2486 } 2487 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { 2488 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2489 "0465 Link down longer than 15 " 2490 "seconds. Continuing initialization\n"); 2491 stat = 1; 2492 goto finished; 2493 } 2494 2495 if (vport->port_state != LPFC_VPORT_READY) 2496 goto finished; 2497 if (vport->num_disc_nodes || vport->fc_prli_sent) 2498 goto finished; 2499 if (vport->fc_map_cnt == 0 && time < 2 * HZ) 2500 goto finished; 2501 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 2502 goto finished; 2503 2504 stat = 1; 2505 2506 finished: 2507 spin_unlock_irq(shost->host_lock); 2508 return stat; 2509 } 2510 2511 /** 2512 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 2513 * @shost: pointer to SCSI host data structure. 2514 * 2515 * This routine initializes a given SCSI host attributes on a FC port. The 2516 * SCSI host can be either on top of a physical port or a virtual port. 2517 **/ 2518 void lpfc_host_attrib_init(struct Scsi_Host *shost) 2519 { 2520 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2521 struct lpfc_hba *phba = vport->phba; 2522 /* 2523 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 2524 */ 2525 2526 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 2527 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 2528 fc_host_supported_classes(shost) = FC_COS_CLASS3; 2529 2530 memset(fc_host_supported_fc4s(shost), 0, 2531 sizeof(fc_host_supported_fc4s(shost))); 2532 fc_host_supported_fc4s(shost)[2] = 1; 2533 fc_host_supported_fc4s(shost)[7] = 1; 2534 2535 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 2536 sizeof fc_host_symbolic_name(shost)); 2537 2538 fc_host_supported_speeds(shost) = 0; 2539 if (phba->lmt & LMT_10Gb) 2540 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 2541 if (phba->lmt & LMT_8Gb) 2542 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 2543 if (phba->lmt & LMT_4Gb) 2544 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 2545 if (phba->lmt & LMT_2Gb) 2546 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 2547 if (phba->lmt & LMT_1Gb) 2548 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 2549 2550 fc_host_maxframe_size(shost) = 2551 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 2552 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 2553 2554 /* This value is also unchanging */ 2555 memset(fc_host_active_fc4s(shost), 0, 2556 sizeof(fc_host_active_fc4s(shost))); 2557 fc_host_active_fc4s(shost)[2] = 1; 2558 fc_host_active_fc4s(shost)[7] = 1; 2559 2560 fc_host_max_npiv_vports(shost) = phba->max_vpi; 2561 spin_lock_irq(shost->host_lock); 2562 vport->load_flag &= ~FC_LOADING; 2563 spin_unlock_irq(shost->host_lock); 2564 } 2565 2566 /** 2567 * lpfc_stop_port_s3 - Stop SLI3 device port 2568 * @phba: pointer to lpfc hba data structure. 2569 * 2570 * This routine is invoked to stop an SLI3 device port, it stops the device 2571 * from generating interrupts and stops the device driver's timers for the 2572 * device. 2573 **/ 2574 static void 2575 lpfc_stop_port_s3(struct lpfc_hba *phba) 2576 { 2577 /* Clear all interrupt enable conditions */ 2578 writel(0, phba->HCregaddr); 2579 readl(phba->HCregaddr); /* flush */ 2580 /* Clear all pending interrupts */ 2581 writel(0xffffffff, phba->HAregaddr); 2582 readl(phba->HAregaddr); /* flush */ 2583 2584 /* Reset some HBA SLI setup states */ 2585 lpfc_stop_hba_timers(phba); 2586 phba->pport->work_port_events = 0; 2587 } 2588 2589 /** 2590 * lpfc_stop_port_s4 - Stop SLI4 device port 2591 * @phba: pointer to lpfc hba data structure. 2592 * 2593 * This routine is invoked to stop an SLI4 device port, it stops the device 2594 * from generating interrupts and stops the device driver's timers for the 2595 * device. 2596 **/ 2597 static void 2598 lpfc_stop_port_s4(struct lpfc_hba *phba) 2599 { 2600 /* Reset some HBA SLI4 setup states */ 2601 lpfc_stop_hba_timers(phba); 2602 phba->pport->work_port_events = 0; 2603 phba->sli4_hba.intr_enable = 0; 2604 /* Hard clear it for now, shall have more graceful way to wait later */ 2605 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2606 } 2607 2608 /** 2609 * lpfc_stop_port - Wrapper function for stopping hba port 2610 * @phba: Pointer to HBA context object. 2611 * 2612 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 2613 * the API jump table function pointer from the lpfc_hba struct. 2614 **/ 2615 void 2616 lpfc_stop_port(struct lpfc_hba *phba) 2617 { 2618 phba->lpfc_stop_port(phba); 2619 } 2620 2621 /** 2622 * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port. 2623 * @phba: pointer to lpfc hba data structure. 2624 * 2625 * This routine is invoked to remove the driver default fcf record from 2626 * the port. This routine currently acts on FCF Index 0. 2627 * 2628 **/ 2629 void 2630 lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba) 2631 { 2632 int rc = 0; 2633 LPFC_MBOXQ_t *mboxq; 2634 struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record; 2635 uint32_t mbox_tmo, req_len; 2636 uint32_t shdr_status, shdr_add_status; 2637 2638 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2639 if (!mboxq) { 2640 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2641 "2020 Failed to allocate mbox for ADD_FCF cmd\n"); 2642 return; 2643 } 2644 2645 req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) - 2646 sizeof(struct lpfc_sli4_cfg_mhdr); 2647 rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 2648 LPFC_MBOX_OPCODE_FCOE_DELETE_FCF, 2649 req_len, LPFC_SLI4_MBX_EMBED); 2650 /* 2651 * In phase 1, there is a single FCF index, 0. In phase2, the driver 2652 * supports multiple FCF indices. 2653 */ 2654 del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry; 2655 bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1); 2656 bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record, 2657 phba->fcf.fcf_indx); 2658 2659 if (!phba->sli4_hba.intr_enable) 2660 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 2661 else { 2662 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 2663 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 2664 } 2665 /* The IOCTL status is embedded in the mailbox subheader. */ 2666 shdr_status = bf_get(lpfc_mbox_hdr_status, 2667 &del_fcf_record->header.cfg_shdr.response); 2668 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 2669 &del_fcf_record->header.cfg_shdr.response); 2670 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) { 2671 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2672 "2516 DEL FCF of default FCF Index failed " 2673 "mbx status x%x, status x%x add_status x%x\n", 2674 rc, shdr_status, shdr_add_status); 2675 } 2676 if (rc != MBX_TIMEOUT) 2677 mempool_free(mboxq, phba->mbox_mem_pool); 2678 } 2679 2680 /** 2681 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 2682 * @phba: pointer to lpfc hba data structure. 2683 * @acqe_link: pointer to the async link completion queue entry. 2684 * 2685 * This routine is to parse the SLI4 link-attention link fault code and 2686 * translate it into the base driver's read link attention mailbox command 2687 * status. 2688 * 2689 * Return: Link-attention status in terms of base driver's coding. 2690 **/ 2691 static uint16_t 2692 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 2693 struct lpfc_acqe_link *acqe_link) 2694 { 2695 uint16_t latt_fault; 2696 2697 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 2698 case LPFC_ASYNC_LINK_FAULT_NONE: 2699 case LPFC_ASYNC_LINK_FAULT_LOCAL: 2700 case LPFC_ASYNC_LINK_FAULT_REMOTE: 2701 latt_fault = 0; 2702 break; 2703 default: 2704 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2705 "0398 Invalid link fault code: x%x\n", 2706 bf_get(lpfc_acqe_link_fault, acqe_link)); 2707 latt_fault = MBXERR_ERROR; 2708 break; 2709 } 2710 return latt_fault; 2711 } 2712 2713 /** 2714 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 2715 * @phba: pointer to lpfc hba data structure. 2716 * @acqe_link: pointer to the async link completion queue entry. 2717 * 2718 * This routine is to parse the SLI4 link attention type and translate it 2719 * into the base driver's link attention type coding. 2720 * 2721 * Return: Link attention type in terms of base driver's coding. 2722 **/ 2723 static uint8_t 2724 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 2725 struct lpfc_acqe_link *acqe_link) 2726 { 2727 uint8_t att_type; 2728 2729 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 2730 case LPFC_ASYNC_LINK_STATUS_DOWN: 2731 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 2732 att_type = AT_LINK_DOWN; 2733 break; 2734 case LPFC_ASYNC_LINK_STATUS_UP: 2735 /* Ignore physical link up events - wait for logical link up */ 2736 att_type = AT_RESERVED; 2737 break; 2738 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 2739 att_type = AT_LINK_UP; 2740 break; 2741 default: 2742 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2743 "0399 Invalid link attention type: x%x\n", 2744 bf_get(lpfc_acqe_link_status, acqe_link)); 2745 att_type = AT_RESERVED; 2746 break; 2747 } 2748 return att_type; 2749 } 2750 2751 /** 2752 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed 2753 * @phba: pointer to lpfc hba data structure. 2754 * @acqe_link: pointer to the async link completion queue entry. 2755 * 2756 * This routine is to parse the SLI4 link-attention link speed and translate 2757 * it into the base driver's link-attention link speed coding. 2758 * 2759 * Return: Link-attention link speed in terms of base driver's coding. 2760 **/ 2761 static uint8_t 2762 lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, 2763 struct lpfc_acqe_link *acqe_link) 2764 { 2765 uint8_t link_speed; 2766 2767 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { 2768 case LPFC_ASYNC_LINK_SPEED_ZERO: 2769 link_speed = LA_UNKNW_LINK; 2770 break; 2771 case LPFC_ASYNC_LINK_SPEED_10MBPS: 2772 link_speed = LA_UNKNW_LINK; 2773 break; 2774 case LPFC_ASYNC_LINK_SPEED_100MBPS: 2775 link_speed = LA_UNKNW_LINK; 2776 break; 2777 case LPFC_ASYNC_LINK_SPEED_1GBPS: 2778 link_speed = LA_1GHZ_LINK; 2779 break; 2780 case LPFC_ASYNC_LINK_SPEED_10GBPS: 2781 link_speed = LA_10GHZ_LINK; 2782 break; 2783 default: 2784 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2785 "0483 Invalid link-attention link speed: x%x\n", 2786 bf_get(lpfc_acqe_link_speed, acqe_link)); 2787 link_speed = LA_UNKNW_LINK; 2788 break; 2789 } 2790 return link_speed; 2791 } 2792 2793 /** 2794 * lpfc_sli4_async_link_evt - Process the asynchronous link event 2795 * @phba: pointer to lpfc hba data structure. 2796 * @acqe_link: pointer to the async link completion queue entry. 2797 * 2798 * This routine is to handle the SLI4 asynchronous link event. 2799 **/ 2800 static void 2801 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 2802 struct lpfc_acqe_link *acqe_link) 2803 { 2804 struct lpfc_dmabuf *mp; 2805 LPFC_MBOXQ_t *pmb; 2806 MAILBOX_t *mb; 2807 READ_LA_VAR *la; 2808 uint8_t att_type; 2809 2810 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 2811 if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP) 2812 return; 2813 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2814 if (!pmb) { 2815 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2816 "0395 The mboxq allocation failed\n"); 2817 return; 2818 } 2819 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2820 if (!mp) { 2821 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2822 "0396 The lpfc_dmabuf allocation failed\n"); 2823 goto out_free_pmb; 2824 } 2825 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 2826 if (!mp->virt) { 2827 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2828 "0397 The mbuf allocation failed\n"); 2829 goto out_free_dmabuf; 2830 } 2831 2832 /* Cleanup any outstanding ELS commands */ 2833 lpfc_els_flush_all_cmd(phba); 2834 2835 /* Block ELS IOCBs until we have done process link event */ 2836 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 2837 2838 /* Update link event statistics */ 2839 phba->sli.slistat.link_event++; 2840 2841 /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */ 2842 lpfc_read_la(phba, pmb, mp); 2843 pmb->vport = phba->pport; 2844 2845 /* Parse and translate status field */ 2846 mb = &pmb->u.mb; 2847 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); 2848 2849 /* Parse and translate link attention fields */ 2850 la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA; 2851 la->eventTag = acqe_link->event_tag; 2852 la->attType = att_type; 2853 la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link); 2854 2855 /* Fake the the following irrelvant fields */ 2856 la->topology = TOPOLOGY_PT_PT; 2857 la->granted_AL_PA = 0; 2858 la->il = 0; 2859 la->pb = 0; 2860 la->fa = 0; 2861 la->mm = 0; 2862 2863 /* Keep the link status for extra SLI4 state machine reference */ 2864 phba->sli4_hba.link_state.speed = 2865 bf_get(lpfc_acqe_link_speed, acqe_link); 2866 phba->sli4_hba.link_state.duplex = 2867 bf_get(lpfc_acqe_link_duplex, acqe_link); 2868 phba->sli4_hba.link_state.status = 2869 bf_get(lpfc_acqe_link_status, acqe_link); 2870 phba->sli4_hba.link_state.physical = 2871 bf_get(lpfc_acqe_link_physical, acqe_link); 2872 phba->sli4_hba.link_state.fault = 2873 bf_get(lpfc_acqe_link_fault, acqe_link); 2874 2875 /* Invoke the lpfc_handle_latt mailbox command callback function */ 2876 lpfc_mbx_cmpl_read_la(phba, pmb); 2877 2878 return; 2879 2880 out_free_dmabuf: 2881 kfree(mp); 2882 out_free_pmb: 2883 mempool_free(pmb, phba->mbox_mem_pool); 2884 } 2885 2886 /** 2887 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event 2888 * @phba: pointer to lpfc hba data structure. 2889 * @acqe_link: pointer to the async fcoe completion queue entry. 2890 * 2891 * This routine is to handle the SLI4 asynchronous fcoe event. 2892 **/ 2893 static void 2894 lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, 2895 struct lpfc_acqe_fcoe *acqe_fcoe) 2896 { 2897 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); 2898 int rc; 2899 2900 switch (event_type) { 2901 case LPFC_FCOE_EVENT_TYPE_NEW_FCF: 2902 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2903 "2546 New FCF found index 0x%x tag 0x%x \n", 2904 acqe_fcoe->fcf_index, 2905 acqe_fcoe->event_tag); 2906 /* 2907 * If the current FCF is in discovered state, 2908 * do nothing. 2909 */ 2910 spin_lock_irq(&phba->hbalock); 2911 if (phba->fcf.fcf_flag & FCF_DISCOVERED) { 2912 spin_unlock_irq(&phba->hbalock); 2913 break; 2914 } 2915 spin_unlock_irq(&phba->hbalock); 2916 2917 /* Read the FCF table and re-discover SAN. */ 2918 rc = lpfc_sli4_read_fcf_record(phba, 2919 LPFC_FCOE_FCF_GET_FIRST); 2920 if (rc) 2921 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2922 "2547 Read FCF record failed 0x%x\n", 2923 rc); 2924 break; 2925 2926 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: 2927 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2928 "2548 FCF Table full count 0x%x tag 0x%x \n", 2929 bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe), 2930 acqe_fcoe->event_tag); 2931 break; 2932 2933 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: 2934 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2935 "2549 FCF disconnected fron network index 0x%x" 2936 " tag 0x%x \n", acqe_fcoe->fcf_index, 2937 acqe_fcoe->event_tag); 2938 /* If the event is not for currently used fcf do nothing */ 2939 if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index) 2940 break; 2941 /* 2942 * Currently, driver support only one FCF - so treat this as 2943 * a link down. 2944 */ 2945 lpfc_linkdown(phba); 2946 /* Unregister FCF if no devices connected to it */ 2947 lpfc_unregister_unused_fcf(phba); 2948 break; 2949 2950 default: 2951 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2952 "0288 Unknown FCoE event type 0x%x event tag " 2953 "0x%x\n", event_type, acqe_fcoe->event_tag); 2954 break; 2955 } 2956 } 2957 2958 /** 2959 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 2960 * @phba: pointer to lpfc hba data structure. 2961 * @acqe_link: pointer to the async dcbx completion queue entry. 2962 * 2963 * This routine is to handle the SLI4 asynchronous dcbx event. 2964 **/ 2965 static void 2966 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 2967 struct lpfc_acqe_dcbx *acqe_dcbx) 2968 { 2969 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2970 "0290 The SLI4 DCBX asynchronous event is not " 2971 "handled yet\n"); 2972 } 2973 2974 /** 2975 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 2976 * @phba: pointer to lpfc hba data structure. 2977 * 2978 * This routine is invoked by the worker thread to process all the pending 2979 * SLI4 asynchronous events. 2980 **/ 2981 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 2982 { 2983 struct lpfc_cq_event *cq_event; 2984 2985 /* First, declare the async event has been handled */ 2986 spin_lock_irq(&phba->hbalock); 2987 phba->hba_flag &= ~ASYNC_EVENT; 2988 spin_unlock_irq(&phba->hbalock); 2989 /* Now, handle all the async events */ 2990 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 2991 /* Get the first event from the head of the event queue */ 2992 spin_lock_irq(&phba->hbalock); 2993 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 2994 cq_event, struct lpfc_cq_event, list); 2995 spin_unlock_irq(&phba->hbalock); 2996 /* Process the asynchronous event */ 2997 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 2998 case LPFC_TRAILER_CODE_LINK: 2999 lpfc_sli4_async_link_evt(phba, 3000 &cq_event->cqe.acqe_link); 3001 break; 3002 case LPFC_TRAILER_CODE_FCOE: 3003 lpfc_sli4_async_fcoe_evt(phba, 3004 &cq_event->cqe.acqe_fcoe); 3005 break; 3006 case LPFC_TRAILER_CODE_DCBX: 3007 lpfc_sli4_async_dcbx_evt(phba, 3008 &cq_event->cqe.acqe_dcbx); 3009 break; 3010 default: 3011 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3012 "1804 Invalid asynchrous event code: " 3013 "x%x\n", bf_get(lpfc_trailer_code, 3014 &cq_event->cqe.mcqe_cmpl)); 3015 break; 3016 } 3017 /* Free the completion event processed to the free pool */ 3018 lpfc_sli4_cq_event_release(phba, cq_event); 3019 } 3020 } 3021 3022 /** 3023 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 3024 * @phba: pointer to lpfc hba data structure. 3025 * @dev_grp: The HBA PCI-Device group number. 3026 * 3027 * This routine is invoked to set up the per HBA PCI-Device group function 3028 * API jump table entries. 3029 * 3030 * Return: 0 if success, otherwise -ENODEV 3031 **/ 3032 int 3033 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 3034 { 3035 int rc; 3036 3037 /* Set up lpfc PCI-device group */ 3038 phba->pci_dev_grp = dev_grp; 3039 3040 /* The LPFC_PCI_DEV_OC uses SLI4 */ 3041 if (dev_grp == LPFC_PCI_DEV_OC) 3042 phba->sli_rev = LPFC_SLI_REV4; 3043 3044 /* Set up device INIT API function jump table */ 3045 rc = lpfc_init_api_table_setup(phba, dev_grp); 3046 if (rc) 3047 return -ENODEV; 3048 /* Set up SCSI API function jump table */ 3049 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 3050 if (rc) 3051 return -ENODEV; 3052 /* Set up SLI API function jump table */ 3053 rc = lpfc_sli_api_table_setup(phba, dev_grp); 3054 if (rc) 3055 return -ENODEV; 3056 /* Set up MBOX API function jump table */ 3057 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 3058 if (rc) 3059 return -ENODEV; 3060 3061 return 0; 3062 } 3063 3064 /** 3065 * lpfc_log_intr_mode - Log the active interrupt mode 3066 * @phba: pointer to lpfc hba data structure. 3067 * @intr_mode: active interrupt mode adopted. 3068 * 3069 * This routine it invoked to log the currently used active interrupt mode 3070 * to the device. 3071 **/ 3072 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 3073 { 3074 switch (intr_mode) { 3075 case 0: 3076 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3077 "0470 Enable INTx interrupt mode.\n"); 3078 break; 3079 case 1: 3080 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3081 "0481 Enabled MSI interrupt mode.\n"); 3082 break; 3083 case 2: 3084 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3085 "0480 Enabled MSI-X interrupt mode.\n"); 3086 break; 3087 default: 3088 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3089 "0482 Illegal interrupt mode.\n"); 3090 break; 3091 } 3092 return; 3093 } 3094 3095 /** 3096 * lpfc_enable_pci_dev - Enable a generic PCI device. 3097 * @phba: pointer to lpfc hba data structure. 3098 * 3099 * This routine is invoked to enable the PCI device that is common to all 3100 * PCI devices. 3101 * 3102 * Return codes 3103 * 0 - sucessful 3104 * other values - error 3105 **/ 3106 static int 3107 lpfc_enable_pci_dev(struct lpfc_hba *phba) 3108 { 3109 struct pci_dev *pdev; 3110 int bars; 3111 3112 /* Obtain PCI device reference */ 3113 if (!phba->pcidev) 3114 goto out_error; 3115 else 3116 pdev = phba->pcidev; 3117 /* Select PCI BARs */ 3118 bars = pci_select_bars(pdev, IORESOURCE_MEM); 3119 /* Enable PCI device */ 3120 if (pci_enable_device_mem(pdev)) 3121 goto out_error; 3122 /* Request PCI resource for the device */ 3123 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) 3124 goto out_disable_device; 3125 /* Set up device as PCI master and save state for EEH */ 3126 pci_set_master(pdev); 3127 pci_try_set_mwi(pdev); 3128 pci_save_state(pdev); 3129 3130 return 0; 3131 3132 out_disable_device: 3133 pci_disable_device(pdev); 3134 out_error: 3135 return -ENODEV; 3136 } 3137 3138 /** 3139 * lpfc_disable_pci_dev - Disable a generic PCI device. 3140 * @phba: pointer to lpfc hba data structure. 3141 * 3142 * This routine is invoked to disable the PCI device that is common to all 3143 * PCI devices. 3144 **/ 3145 static void 3146 lpfc_disable_pci_dev(struct lpfc_hba *phba) 3147 { 3148 struct pci_dev *pdev; 3149 int bars; 3150 3151 /* Obtain PCI device reference */ 3152 if (!phba->pcidev) 3153 return; 3154 else 3155 pdev = phba->pcidev; 3156 /* Select PCI BARs */ 3157 bars = pci_select_bars(pdev, IORESOURCE_MEM); 3158 /* Release PCI resource and disable PCI device */ 3159 pci_release_selected_regions(pdev, bars); 3160 pci_disable_device(pdev); 3161 /* Null out PCI private reference to driver */ 3162 pci_set_drvdata(pdev, NULL); 3163 3164 return; 3165 } 3166 3167 /** 3168 * lpfc_reset_hba - Reset a hba 3169 * @phba: pointer to lpfc hba data structure. 3170 * 3171 * This routine is invoked to reset a hba device. It brings the HBA 3172 * offline, performs a board restart, and then brings the board back 3173 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 3174 * on outstanding mailbox commands. 3175 **/ 3176 void 3177 lpfc_reset_hba(struct lpfc_hba *phba) 3178 { 3179 /* If resets are disabled then set error state and return. */ 3180 if (!phba->cfg_enable_hba_reset) { 3181 phba->link_state = LPFC_HBA_ERROR; 3182 return; 3183 } 3184 lpfc_offline_prep(phba); 3185 lpfc_offline(phba); 3186 lpfc_sli_brdrestart(phba); 3187 lpfc_online(phba); 3188 lpfc_unblock_mgmt_io(phba); 3189 } 3190 3191 /** 3192 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. 3193 * @phba: pointer to lpfc hba data structure. 3194 * 3195 * This routine is invoked to set up the driver internal resources specific to 3196 * support the SLI-3 HBA device it attached to. 3197 * 3198 * Return codes 3199 * 0 - sucessful 3200 * other values - error 3201 **/ 3202 static int 3203 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 3204 { 3205 struct lpfc_sli *psli; 3206 3207 /* 3208 * Initialize timers used by driver 3209 */ 3210 3211 /* Heartbeat timer */ 3212 init_timer(&phba->hb_tmofunc); 3213 phba->hb_tmofunc.function = lpfc_hb_timeout; 3214 phba->hb_tmofunc.data = (unsigned long)phba; 3215 3216 psli = &phba->sli; 3217 /* MBOX heartbeat timer */ 3218 init_timer(&psli->mbox_tmo); 3219 psli->mbox_tmo.function = lpfc_mbox_timeout; 3220 psli->mbox_tmo.data = (unsigned long) phba; 3221 /* FCP polling mode timer */ 3222 init_timer(&phba->fcp_poll_timer); 3223 phba->fcp_poll_timer.function = lpfc_poll_timeout; 3224 phba->fcp_poll_timer.data = (unsigned long) phba; 3225 /* Fabric block timer */ 3226 init_timer(&phba->fabric_block_timer); 3227 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 3228 phba->fabric_block_timer.data = (unsigned long) phba; 3229 /* EA polling mode timer */ 3230 init_timer(&phba->eratt_poll); 3231 phba->eratt_poll.function = lpfc_poll_eratt; 3232 phba->eratt_poll.data = (unsigned long) phba; 3233 3234 /* Host attention work mask setup */ 3235 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 3236 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 3237 3238 /* Get all the module params for configuring this host */ 3239 lpfc_get_cfgparam(phba); 3240 /* 3241 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 3242 * used to create the sg_dma_buf_pool must be dynamically calculated. 3243 * 2 segments are added since the IOCB needs a command and response bde. 3244 */ 3245 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 3246 sizeof(struct fcp_rsp) + 3247 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); 3248 3249 if (phba->cfg_enable_bg) { 3250 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; 3251 phba->cfg_sg_dma_buf_size += 3252 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); 3253 } 3254 3255 /* Also reinitialize the host templates with new values. */ 3256 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 3257 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 3258 3259 phba->max_vpi = LPFC_MAX_VPI; 3260 /* This will be set to correct value after config_port mbox */ 3261 phba->max_vports = 0; 3262 3263 /* 3264 * Initialize the SLI Layer to run with lpfc HBAs. 3265 */ 3266 lpfc_sli_setup(phba); 3267 lpfc_sli_queue_setup(phba); 3268 3269 /* Allocate device driver memory */ 3270 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 3271 return -ENOMEM; 3272 3273 return 0; 3274 } 3275 3276 /** 3277 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 3278 * @phba: pointer to lpfc hba data structure. 3279 * 3280 * This routine is invoked to unset the driver internal resources set up 3281 * specific for supporting the SLI-3 HBA device it attached to. 3282 **/ 3283 static void 3284 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 3285 { 3286 /* Free device driver memory allocated */ 3287 lpfc_mem_free_all(phba); 3288 3289 return; 3290 } 3291 3292 /** 3293 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 3294 * @phba: pointer to lpfc hba data structure. 3295 * 3296 * This routine is invoked to set up the driver internal resources specific to 3297 * support the SLI-4 HBA device it attached to. 3298 * 3299 * Return codes 3300 * 0 - sucessful 3301 * other values - error 3302 **/ 3303 static int 3304 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 3305 { 3306 struct lpfc_sli *psli; 3307 int rc; 3308 int i, hbq_count; 3309 3310 /* Before proceed, wait for POST done and device ready */ 3311 rc = lpfc_sli4_post_status_check(phba); 3312 if (rc) 3313 return -ENODEV; 3314 3315 /* 3316 * Initialize timers used by driver 3317 */ 3318 3319 /* Heartbeat timer */ 3320 init_timer(&phba->hb_tmofunc); 3321 phba->hb_tmofunc.function = lpfc_hb_timeout; 3322 phba->hb_tmofunc.data = (unsigned long)phba; 3323 3324 psli = &phba->sli; 3325 /* MBOX heartbeat timer */ 3326 init_timer(&psli->mbox_tmo); 3327 psli->mbox_tmo.function = lpfc_mbox_timeout; 3328 psli->mbox_tmo.data = (unsigned long) phba; 3329 /* Fabric block timer */ 3330 init_timer(&phba->fabric_block_timer); 3331 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 3332 phba->fabric_block_timer.data = (unsigned long) phba; 3333 /* EA polling mode timer */ 3334 init_timer(&phba->eratt_poll); 3335 phba->eratt_poll.function = lpfc_poll_eratt; 3336 phba->eratt_poll.data = (unsigned long) phba; 3337 /* 3338 * We need to do a READ_CONFIG mailbox command here before 3339 * calling lpfc_get_cfgparam. For VFs this will report the 3340 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. 3341 * All of the resources allocated 3342 * for this Port are tied to these values. 3343 */ 3344 /* Get all the module params for configuring this host */ 3345 lpfc_get_cfgparam(phba); 3346 phba->max_vpi = LPFC_MAX_VPI; 3347 /* This will be set to correct value after the read_config mbox */ 3348 phba->max_vports = 0; 3349 3350 /* Program the default value of vlan_id and fc_map */ 3351 phba->valid_vlan = 0; 3352 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 3353 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 3354 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 3355 3356 /* 3357 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 3358 * used to create the sg_dma_buf_pool must be dynamically calculated. 3359 * 2 segments are added since the IOCB needs a command and response bde. 3360 * To insure that the scsi sgl does not cross a 4k page boundary only 3361 * sgl sizes of 1k, 2k, 4k, and 8k are supported. 3362 * Table of sgl sizes and seg_cnt: 3363 * sgl size, sg_seg_cnt total seg 3364 * 1k 50 52 3365 * 2k 114 116 3366 * 4k 242 244 3367 * 8k 498 500 3368 * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024 3369 * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048 3370 * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096 3371 * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192 3372 */ 3373 if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT) 3374 phba->cfg_sg_seg_cnt = 50; 3375 else if (phba->cfg_sg_seg_cnt <= 114) 3376 phba->cfg_sg_seg_cnt = 114; 3377 else if (phba->cfg_sg_seg_cnt <= 242) 3378 phba->cfg_sg_seg_cnt = 242; 3379 else 3380 phba->cfg_sg_seg_cnt = 498; 3381 3382 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) 3383 + sizeof(struct fcp_rsp); 3384 phba->cfg_sg_dma_buf_size += 3385 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)); 3386 3387 /* Initialize buffer queue management fields */ 3388 hbq_count = lpfc_sli_hbq_count(); 3389 for (i = 0; i < hbq_count; ++i) 3390 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 3391 INIT_LIST_HEAD(&phba->rb_pend_list); 3392 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 3393 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 3394 3395 /* 3396 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 3397 */ 3398 /* Initialize the Abort scsi buffer list used by driver */ 3399 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 3400 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 3401 /* This abort list used by worker thread */ 3402 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); 3403 3404 /* 3405 * Initialize dirver internal slow-path work queues 3406 */ 3407 3408 /* Driver internel slow-path CQ Event pool */ 3409 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 3410 /* Response IOCB work queue list */ 3411 INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue); 3412 /* Asynchronous event CQ Event work queue list */ 3413 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 3414 /* Fast-path XRI aborted CQ Event work queue list */ 3415 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 3416 /* Slow-path XRI aborted CQ Event work queue list */ 3417 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 3418 /* Receive queue CQ Event work queue list */ 3419 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 3420 3421 /* Initialize the driver internal SLI layer lists. */ 3422 lpfc_sli_setup(phba); 3423 lpfc_sli_queue_setup(phba); 3424 3425 /* Allocate device driver memory */ 3426 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 3427 if (rc) 3428 return -ENOMEM; 3429 3430 /* Create the bootstrap mailbox command */ 3431 rc = lpfc_create_bootstrap_mbox(phba); 3432 if (unlikely(rc)) 3433 goto out_free_mem; 3434 3435 /* Set up the host's endian order with the device. */ 3436 rc = lpfc_setup_endian_order(phba); 3437 if (unlikely(rc)) 3438 goto out_free_bsmbx; 3439 3440 /* Set up the hba's configuration parameters. */ 3441 rc = lpfc_sli4_read_config(phba); 3442 if (unlikely(rc)) 3443 goto out_free_bsmbx; 3444 3445 /* Perform a function reset */ 3446 rc = lpfc_pci_function_reset(phba); 3447 if (unlikely(rc)) 3448 goto out_free_bsmbx; 3449 3450 /* Create all the SLI4 queues */ 3451 rc = lpfc_sli4_queue_create(phba); 3452 if (rc) 3453 goto out_free_bsmbx; 3454 3455 /* Create driver internal CQE event pool */ 3456 rc = lpfc_sli4_cq_event_pool_create(phba); 3457 if (rc) 3458 goto out_destroy_queue; 3459 3460 /* Initialize and populate the iocb list per host */ 3461 rc = lpfc_init_sgl_list(phba); 3462 if (rc) { 3463 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3464 "1400 Failed to initialize sgl list.\n"); 3465 goto out_destroy_cq_event_pool; 3466 } 3467 rc = lpfc_init_active_sgl_array(phba); 3468 if (rc) { 3469 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3470 "1430 Failed to initialize sgl list.\n"); 3471 goto out_free_sgl_list; 3472 } 3473 3474 rc = lpfc_sli4_init_rpi_hdrs(phba); 3475 if (rc) { 3476 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3477 "1432 Failed to initialize rpi headers.\n"); 3478 goto out_free_active_sgl; 3479 } 3480 3481 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 3482 phba->cfg_fcp_eq_count), GFP_KERNEL); 3483 if (!phba->sli4_hba.fcp_eq_hdl) { 3484 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3485 "2572 Failed allocate memory for fast-path " 3486 "per-EQ handle array\n"); 3487 goto out_remove_rpi_hdrs; 3488 } 3489 3490 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 3491 phba->sli4_hba.cfg_eqn), GFP_KERNEL); 3492 if (!phba->sli4_hba.msix_entries) { 3493 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3494 "2573 Failed allocate memory for msi-x " 3495 "interrupt vector entries\n"); 3496 goto out_free_fcp_eq_hdl; 3497 } 3498 3499 return rc; 3500 3501 out_free_fcp_eq_hdl: 3502 kfree(phba->sli4_hba.fcp_eq_hdl); 3503 out_remove_rpi_hdrs: 3504 lpfc_sli4_remove_rpi_hdrs(phba); 3505 out_free_active_sgl: 3506 lpfc_free_active_sgl(phba); 3507 out_free_sgl_list: 3508 lpfc_free_sgl_list(phba); 3509 out_destroy_cq_event_pool: 3510 lpfc_sli4_cq_event_pool_destroy(phba); 3511 out_destroy_queue: 3512 lpfc_sli4_queue_destroy(phba); 3513 out_free_bsmbx: 3514 lpfc_destroy_bootstrap_mbox(phba); 3515 out_free_mem: 3516 lpfc_mem_free(phba); 3517 return rc; 3518 } 3519 3520 /** 3521 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 3522 * @phba: pointer to lpfc hba data structure. 3523 * 3524 * This routine is invoked to unset the driver internal resources set up 3525 * specific for supporting the SLI-4 HBA device it attached to. 3526 **/ 3527 static void 3528 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 3529 { 3530 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 3531 3532 /* unregister default FCFI from the HBA */ 3533 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi); 3534 3535 /* Free the default FCR table */ 3536 lpfc_sli_remove_dflt_fcf(phba); 3537 3538 /* Free memory allocated for msi-x interrupt vector entries */ 3539 kfree(phba->sli4_hba.msix_entries); 3540 3541 /* Free memory allocated for fast-path work queue handles */ 3542 kfree(phba->sli4_hba.fcp_eq_hdl); 3543 3544 /* Free the allocated rpi headers. */ 3545 lpfc_sli4_remove_rpi_hdrs(phba); 3546 3547 /* Free the ELS sgl list */ 3548 lpfc_free_active_sgl(phba); 3549 lpfc_free_sgl_list(phba); 3550 3551 /* Free the SCSI sgl management array */ 3552 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 3553 3554 /* Free the SLI4 queues */ 3555 lpfc_sli4_queue_destroy(phba); 3556 3557 /* Free the completion queue EQ event pool */ 3558 lpfc_sli4_cq_event_release_all(phba); 3559 lpfc_sli4_cq_event_pool_destroy(phba); 3560 3561 /* Reset SLI4 HBA FCoE function */ 3562 lpfc_pci_function_reset(phba); 3563 3564 /* Free the bsmbx region. */ 3565 lpfc_destroy_bootstrap_mbox(phba); 3566 3567 /* Free the SLI Layer memory with SLI4 HBAs */ 3568 lpfc_mem_free_all(phba); 3569 3570 /* Free the current connect table */ 3571 list_for_each_entry_safe(conn_entry, next_conn_entry, 3572 &phba->fcf_conn_rec_list, list) 3573 kfree(conn_entry); 3574 3575 return; 3576 } 3577 3578 /** 3579 * lpfc_init_api_table_setup - Set up init api fucntion jump table 3580 * @phba: The hba struct for which this call is being executed. 3581 * @dev_grp: The HBA PCI-Device group number. 3582 * 3583 * This routine sets up the device INIT interface API function jump table 3584 * in @phba struct. 3585 * 3586 * Returns: 0 - success, -ENODEV - failure. 3587 **/ 3588 int 3589 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 3590 { 3591 switch (dev_grp) { 3592 case LPFC_PCI_DEV_LP: 3593 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 3594 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 3595 phba->lpfc_stop_port = lpfc_stop_port_s3; 3596 break; 3597 case LPFC_PCI_DEV_OC: 3598 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 3599 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 3600 phba->lpfc_stop_port = lpfc_stop_port_s4; 3601 break; 3602 default: 3603 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3604 "1431 Invalid HBA PCI-device group: 0x%x\n", 3605 dev_grp); 3606 return -ENODEV; 3607 break; 3608 } 3609 return 0; 3610 } 3611 3612 /** 3613 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 3614 * @phba: pointer to lpfc hba data structure. 3615 * 3616 * This routine is invoked to set up the driver internal resources before the 3617 * device specific resource setup to support the HBA device it attached to. 3618 * 3619 * Return codes 3620 * 0 - sucessful 3621 * other values - error 3622 **/ 3623 static int 3624 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 3625 { 3626 /* 3627 * Driver resources common to all SLI revisions 3628 */ 3629 atomic_set(&phba->fast_event_count, 0); 3630 spin_lock_init(&phba->hbalock); 3631 3632 /* Initialize ndlp management spinlock */ 3633 spin_lock_init(&phba->ndlp_lock); 3634 3635 INIT_LIST_HEAD(&phba->port_list); 3636 INIT_LIST_HEAD(&phba->work_list); 3637 init_waitqueue_head(&phba->wait_4_mlo_m_q); 3638 3639 /* Initialize the wait queue head for the kernel thread */ 3640 init_waitqueue_head(&phba->work_waitq); 3641 3642 /* Initialize the scsi buffer list used by driver for scsi IO */ 3643 spin_lock_init(&phba->scsi_buf_list_lock); 3644 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 3645 3646 /* Initialize the fabric iocb list */ 3647 INIT_LIST_HEAD(&phba->fabric_iocb_list); 3648 3649 /* Initialize list to save ELS buffers */ 3650 INIT_LIST_HEAD(&phba->elsbuf); 3651 3652 /* Initialize FCF connection rec list */ 3653 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 3654 3655 return 0; 3656 } 3657 3658 /** 3659 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 3660 * @phba: pointer to lpfc hba data structure. 3661 * 3662 * This routine is invoked to set up the driver internal resources after the 3663 * device specific resource setup to support the HBA device it attached to. 3664 * 3665 * Return codes 3666 * 0 - sucessful 3667 * other values - error 3668 **/ 3669 static int 3670 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 3671 { 3672 int error; 3673 3674 /* Startup the kernel thread for this host adapter. */ 3675 phba->worker_thread = kthread_run(lpfc_do_work, phba, 3676 "lpfc_worker_%d", phba->brd_no); 3677 if (IS_ERR(phba->worker_thread)) { 3678 error = PTR_ERR(phba->worker_thread); 3679 return error; 3680 } 3681 3682 return 0; 3683 } 3684 3685 /** 3686 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 3687 * @phba: pointer to lpfc hba data structure. 3688 * 3689 * This routine is invoked to unset the driver internal resources set up after 3690 * the device specific resource setup for supporting the HBA device it 3691 * attached to. 3692 **/ 3693 static void 3694 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 3695 { 3696 /* Stop kernel worker thread */ 3697 kthread_stop(phba->worker_thread); 3698 } 3699 3700 /** 3701 * lpfc_free_iocb_list - Free iocb list. 3702 * @phba: pointer to lpfc hba data structure. 3703 * 3704 * This routine is invoked to free the driver's IOCB list and memory. 3705 **/ 3706 static void 3707 lpfc_free_iocb_list(struct lpfc_hba *phba) 3708 { 3709 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 3710 3711 spin_lock_irq(&phba->hbalock); 3712 list_for_each_entry_safe(iocbq_entry, iocbq_next, 3713 &phba->lpfc_iocb_list, list) { 3714 list_del(&iocbq_entry->list); 3715 kfree(iocbq_entry); 3716 phba->total_iocbq_bufs--; 3717 } 3718 spin_unlock_irq(&phba->hbalock); 3719 3720 return; 3721 } 3722 3723 /** 3724 * lpfc_init_iocb_list - Allocate and initialize iocb list. 3725 * @phba: pointer to lpfc hba data structure. 3726 * 3727 * This routine is invoked to allocate and initizlize the driver's IOCB 3728 * list and set up the IOCB tag array accordingly. 3729 * 3730 * Return codes 3731 * 0 - sucessful 3732 * other values - error 3733 **/ 3734 static int 3735 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 3736 { 3737 struct lpfc_iocbq *iocbq_entry = NULL; 3738 uint16_t iotag; 3739 int i; 3740 3741 /* Initialize and populate the iocb list per host. */ 3742 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 3743 for (i = 0; i < iocb_count; i++) { 3744 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 3745 if (iocbq_entry == NULL) { 3746 printk(KERN_ERR "%s: only allocated %d iocbs of " 3747 "expected %d count. Unloading driver.\n", 3748 __func__, i, LPFC_IOCB_LIST_CNT); 3749 goto out_free_iocbq; 3750 } 3751 3752 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 3753 if (iotag == 0) { 3754 kfree(iocbq_entry); 3755 printk(KERN_ERR "%s: failed to allocate IOTAG. " 3756 "Unloading driver.\n", __func__); 3757 goto out_free_iocbq; 3758 } 3759 iocbq_entry->sli4_xritag = NO_XRI; 3760 3761 spin_lock_irq(&phba->hbalock); 3762 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 3763 phba->total_iocbq_bufs++; 3764 spin_unlock_irq(&phba->hbalock); 3765 } 3766 3767 return 0; 3768 3769 out_free_iocbq: 3770 lpfc_free_iocb_list(phba); 3771 3772 return -ENOMEM; 3773 } 3774 3775 /** 3776 * lpfc_free_sgl_list - Free sgl list. 3777 * @phba: pointer to lpfc hba data structure. 3778 * 3779 * This routine is invoked to free the driver's sgl list and memory. 3780 **/ 3781 static void 3782 lpfc_free_sgl_list(struct lpfc_hba *phba) 3783 { 3784 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 3785 LIST_HEAD(sglq_list); 3786 int rc = 0; 3787 3788 spin_lock_irq(&phba->hbalock); 3789 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); 3790 spin_unlock_irq(&phba->hbalock); 3791 3792 list_for_each_entry_safe(sglq_entry, sglq_next, 3793 &sglq_list, list) { 3794 list_del(&sglq_entry->list); 3795 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 3796 kfree(sglq_entry); 3797 phba->sli4_hba.total_sglq_bufs--; 3798 } 3799 rc = lpfc_sli4_remove_all_sgl_pages(phba); 3800 if (rc) { 3801 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3802 "2005 Unable to deregister pages from HBA: %x", rc); 3803 } 3804 kfree(phba->sli4_hba.lpfc_els_sgl_array); 3805 } 3806 3807 /** 3808 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 3809 * @phba: pointer to lpfc hba data structure. 3810 * 3811 * This routine is invoked to allocate the driver's active sgl memory. 3812 * This array will hold the sglq_entry's for active IOs. 3813 **/ 3814 static int 3815 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 3816 { 3817 int size; 3818 size = sizeof(struct lpfc_sglq *); 3819 size *= phba->sli4_hba.max_cfg_param.max_xri; 3820 3821 phba->sli4_hba.lpfc_sglq_active_list = 3822 kzalloc(size, GFP_KERNEL); 3823 if (!phba->sli4_hba.lpfc_sglq_active_list) 3824 return -ENOMEM; 3825 return 0; 3826 } 3827 3828 /** 3829 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 3830 * @phba: pointer to lpfc hba data structure. 3831 * 3832 * This routine is invoked to walk through the array of active sglq entries 3833 * and free all of the resources. 3834 * This is just a place holder for now. 3835 **/ 3836 static void 3837 lpfc_free_active_sgl(struct lpfc_hba *phba) 3838 { 3839 kfree(phba->sli4_hba.lpfc_sglq_active_list); 3840 } 3841 3842 /** 3843 * lpfc_init_sgl_list - Allocate and initialize sgl list. 3844 * @phba: pointer to lpfc hba data structure. 3845 * 3846 * This routine is invoked to allocate and initizlize the driver's sgl 3847 * list and set up the sgl xritag tag array accordingly. 3848 * 3849 * Return codes 3850 * 0 - sucessful 3851 * other values - error 3852 **/ 3853 static int 3854 lpfc_init_sgl_list(struct lpfc_hba *phba) 3855 { 3856 struct lpfc_sglq *sglq_entry = NULL; 3857 int i; 3858 int els_xri_cnt; 3859 3860 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3861 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3862 "2400 lpfc_init_sgl_list els %d.\n", 3863 els_xri_cnt); 3864 /* Initialize and populate the sglq list per host/VF. */ 3865 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 3866 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 3867 3868 /* Sanity check on XRI management */ 3869 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) { 3870 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3871 "2562 No room left for SCSI XRI allocation: " 3872 "max_xri=%d, els_xri=%d\n", 3873 phba->sli4_hba.max_cfg_param.max_xri, 3874 els_xri_cnt); 3875 return -ENOMEM; 3876 } 3877 3878 /* Allocate memory for the ELS XRI management array */ 3879 phba->sli4_hba.lpfc_els_sgl_array = 3880 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt), 3881 GFP_KERNEL); 3882 3883 if (!phba->sli4_hba.lpfc_els_sgl_array) { 3884 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3885 "2401 Failed to allocate memory for ELS " 3886 "XRI management array of size %d.\n", 3887 els_xri_cnt); 3888 return -ENOMEM; 3889 } 3890 3891 /* Keep the SCSI XRI into the XRI management array */ 3892 phba->sli4_hba.scsi_xri_max = 3893 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 3894 phba->sli4_hba.scsi_xri_cnt = 0; 3895 3896 phba->sli4_hba.lpfc_scsi_psb_array = 3897 kzalloc((sizeof(struct lpfc_scsi_buf *) * 3898 phba->sli4_hba.scsi_xri_max), GFP_KERNEL); 3899 3900 if (!phba->sli4_hba.lpfc_scsi_psb_array) { 3901 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3902 "2563 Failed to allocate memory for SCSI " 3903 "XRI management array of size %d.\n", 3904 phba->sli4_hba.scsi_xri_max); 3905 kfree(phba->sli4_hba.lpfc_els_sgl_array); 3906 return -ENOMEM; 3907 } 3908 3909 for (i = 0; i < els_xri_cnt; i++) { 3910 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL); 3911 if (sglq_entry == NULL) { 3912 printk(KERN_ERR "%s: only allocated %d sgls of " 3913 "expected %d count. Unloading driver.\n", 3914 __func__, i, els_xri_cnt); 3915 goto out_free_mem; 3916 } 3917 3918 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba); 3919 if (sglq_entry->sli4_xritag == NO_XRI) { 3920 kfree(sglq_entry); 3921 printk(KERN_ERR "%s: failed to allocate XRI.\n" 3922 "Unloading driver.\n", __func__); 3923 goto out_free_mem; 3924 } 3925 sglq_entry->buff_type = GEN_BUFF_TYPE; 3926 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); 3927 if (sglq_entry->virt == NULL) { 3928 kfree(sglq_entry); 3929 printk(KERN_ERR "%s: failed to allocate mbuf.\n" 3930 "Unloading driver.\n", __func__); 3931 goto out_free_mem; 3932 } 3933 sglq_entry->sgl = sglq_entry->virt; 3934 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 3935 3936 /* The list order is used by later block SGL registraton */ 3937 spin_lock_irq(&phba->hbalock); 3938 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); 3939 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; 3940 phba->sli4_hba.total_sglq_bufs++; 3941 spin_unlock_irq(&phba->hbalock); 3942 } 3943 return 0; 3944 3945 out_free_mem: 3946 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 3947 lpfc_free_sgl_list(phba); 3948 return -ENOMEM; 3949 } 3950 3951 /** 3952 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 3953 * @phba: pointer to lpfc hba data structure. 3954 * 3955 * This routine is invoked to post rpi header templates to the 3956 * HBA consistent with the SLI-4 interface spec. This routine 3957 * posts a PAGE_SIZE memory region to the port to hold up to 3958 * PAGE_SIZE modulo 64 rpi context headers. 3959 * No locks are held here because this is an initialization routine 3960 * called only from probe or lpfc_online when interrupts are not 3961 * enabled and the driver is reinitializing the device. 3962 * 3963 * Return codes 3964 * 0 - sucessful 3965 * ENOMEM - No availble memory 3966 * EIO - The mailbox failed to complete successfully. 3967 **/ 3968 int 3969 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 3970 { 3971 int rc = 0; 3972 int longs; 3973 uint16_t rpi_count; 3974 struct lpfc_rpi_hdr *rpi_hdr; 3975 3976 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 3977 3978 /* 3979 * Provision an rpi bitmask range for discovery. The total count 3980 * is the difference between max and base + 1. 3981 */ 3982 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base + 3983 phba->sli4_hba.max_cfg_param.max_rpi - 1; 3984 3985 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG; 3986 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long), 3987 GFP_KERNEL); 3988 if (!phba->sli4_hba.rpi_bmask) 3989 return -ENOMEM; 3990 3991 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 3992 if (!rpi_hdr) { 3993 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 3994 "0391 Error during rpi post operation\n"); 3995 lpfc_sli4_remove_rpis(phba); 3996 rc = -ENODEV; 3997 } 3998 3999 return rc; 4000 } 4001 4002 /** 4003 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 4004 * @phba: pointer to lpfc hba data structure. 4005 * 4006 * This routine is invoked to allocate a single 4KB memory region to 4007 * support rpis and stores them in the phba. This single region 4008 * provides support for up to 64 rpis. The region is used globally 4009 * by the device. 4010 * 4011 * Returns: 4012 * A valid rpi hdr on success. 4013 * A NULL pointer on any failure. 4014 **/ 4015 struct lpfc_rpi_hdr * 4016 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 4017 { 4018 uint16_t rpi_limit, curr_rpi_range; 4019 struct lpfc_dmabuf *dmabuf; 4020 struct lpfc_rpi_hdr *rpi_hdr; 4021 4022 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 4023 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4024 4025 spin_lock_irq(&phba->hbalock); 4026 curr_rpi_range = phba->sli4_hba.next_rpi; 4027 spin_unlock_irq(&phba->hbalock); 4028 4029 /* 4030 * The port has a limited number of rpis. The increment here 4031 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value 4032 * and to allow the full max_rpi range per port. 4033 */ 4034 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) 4035 return NULL; 4036 4037 /* 4038 * First allocate the protocol header region for the port. The 4039 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 4040 */ 4041 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4042 if (!dmabuf) 4043 return NULL; 4044 4045 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 4046 LPFC_HDR_TEMPLATE_SIZE, 4047 &dmabuf->phys, 4048 GFP_KERNEL); 4049 if (!dmabuf->virt) { 4050 rpi_hdr = NULL; 4051 goto err_free_dmabuf; 4052 } 4053 4054 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE); 4055 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 4056 rpi_hdr = NULL; 4057 goto err_free_coherent; 4058 } 4059 4060 /* Save the rpi header data for cleanup later. */ 4061 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 4062 if (!rpi_hdr) 4063 goto err_free_coherent; 4064 4065 rpi_hdr->dmabuf = dmabuf; 4066 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 4067 rpi_hdr->page_count = 1; 4068 spin_lock_irq(&phba->hbalock); 4069 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi; 4070 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 4071 4072 /* 4073 * The next_rpi stores the next module-64 rpi value to post 4074 * in any subsequent rpi memory region postings. 4075 */ 4076 phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT; 4077 spin_unlock_irq(&phba->hbalock); 4078 return rpi_hdr; 4079 4080 err_free_coherent: 4081 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 4082 dmabuf->virt, dmabuf->phys); 4083 err_free_dmabuf: 4084 kfree(dmabuf); 4085 return NULL; 4086 } 4087 4088 /** 4089 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 4090 * @phba: pointer to lpfc hba data structure. 4091 * 4092 * This routine is invoked to remove all memory resources allocated 4093 * to support rpis. This routine presumes the caller has released all 4094 * rpis consumed by fabric or port logins and is prepared to have 4095 * the header pages removed. 4096 **/ 4097 void 4098 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 4099 { 4100 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 4101 4102 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 4103 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 4104 list_del(&rpi_hdr->list); 4105 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 4106 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 4107 kfree(rpi_hdr->dmabuf); 4108 kfree(rpi_hdr); 4109 } 4110 4111 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 4112 memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask)); 4113 } 4114 4115 /** 4116 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 4117 * @pdev: pointer to pci device data structure. 4118 * 4119 * This routine is invoked to allocate the driver hba data structure for an 4120 * HBA device. If the allocation is successful, the phba reference to the 4121 * PCI device data structure is set. 4122 * 4123 * Return codes 4124 * pointer to @phba - sucessful 4125 * NULL - error 4126 **/ 4127 static struct lpfc_hba * 4128 lpfc_hba_alloc(struct pci_dev *pdev) 4129 { 4130 struct lpfc_hba *phba; 4131 4132 /* Allocate memory for HBA structure */ 4133 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 4134 if (!phba) { 4135 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4136 "1417 Failed to allocate hba struct.\n"); 4137 return NULL; 4138 } 4139 4140 /* Set reference to PCI device in HBA structure */ 4141 phba->pcidev = pdev; 4142 4143 /* Assign an unused board number */ 4144 phba->brd_no = lpfc_get_instance(); 4145 if (phba->brd_no < 0) { 4146 kfree(phba); 4147 return NULL; 4148 } 4149 4150 return phba; 4151 } 4152 4153 /** 4154 * lpfc_hba_free - Free driver hba data structure with a device. 4155 * @phba: pointer to lpfc hba data structure. 4156 * 4157 * This routine is invoked to free the driver hba data structure with an 4158 * HBA device. 4159 **/ 4160 static void 4161 lpfc_hba_free(struct lpfc_hba *phba) 4162 { 4163 /* Release the driver assigned board number */ 4164 idr_remove(&lpfc_hba_index, phba->brd_no); 4165 4166 kfree(phba); 4167 return; 4168 } 4169 4170 /** 4171 * lpfc_create_shost - Create hba physical port with associated scsi host. 4172 * @phba: pointer to lpfc hba data structure. 4173 * 4174 * This routine is invoked to create HBA physical port and associate a SCSI 4175 * host with it. 4176 * 4177 * Return codes 4178 * 0 - sucessful 4179 * other values - error 4180 **/ 4181 static int 4182 lpfc_create_shost(struct lpfc_hba *phba) 4183 { 4184 struct lpfc_vport *vport; 4185 struct Scsi_Host *shost; 4186 4187 /* Initialize HBA FC structure */ 4188 phba->fc_edtov = FF_DEF_EDTOV; 4189 phba->fc_ratov = FF_DEF_RATOV; 4190 phba->fc_altov = FF_DEF_ALTOV; 4191 phba->fc_arbtov = FF_DEF_ARBTOV; 4192 4193 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 4194 if (!vport) 4195 return -ENODEV; 4196 4197 shost = lpfc_shost_from_vport(vport); 4198 phba->pport = vport; 4199 lpfc_debugfs_initialize(vport); 4200 /* Put reference to SCSI host to driver's device private data */ 4201 pci_set_drvdata(phba->pcidev, shost); 4202 4203 return 0; 4204 } 4205 4206 /** 4207 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 4208 * @phba: pointer to lpfc hba data structure. 4209 * 4210 * This routine is invoked to destroy HBA physical port and the associated 4211 * SCSI host. 4212 **/ 4213 static void 4214 lpfc_destroy_shost(struct lpfc_hba *phba) 4215 { 4216 struct lpfc_vport *vport = phba->pport; 4217 4218 /* Destroy physical port that associated with the SCSI host */ 4219 destroy_port(vport); 4220 4221 return; 4222 } 4223 4224 /** 4225 * lpfc_setup_bg - Setup Block guard structures and debug areas. 4226 * @phba: pointer to lpfc hba data structure. 4227 * @shost: the shost to be used to detect Block guard settings. 4228 * 4229 * This routine sets up the local Block guard protocol settings for @shost. 4230 * This routine also allocates memory for debugging bg buffers. 4231 **/ 4232 static void 4233 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 4234 { 4235 int pagecnt = 10; 4236 if (lpfc_prot_mask && lpfc_prot_guard) { 4237 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4238 "1478 Registering BlockGuard with the " 4239 "SCSI layer\n"); 4240 scsi_host_set_prot(shost, lpfc_prot_mask); 4241 scsi_host_set_guard(shost, lpfc_prot_guard); 4242 } 4243 if (!_dump_buf_data) { 4244 while (pagecnt) { 4245 spin_lock_init(&_dump_buf_lock); 4246 _dump_buf_data = 4247 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 4248 if (_dump_buf_data) { 4249 printk(KERN_ERR "BLKGRD allocated %d pages for " 4250 "_dump_buf_data at 0x%p\n", 4251 (1 << pagecnt), _dump_buf_data); 4252 _dump_buf_data_order = pagecnt; 4253 memset(_dump_buf_data, 0, 4254 ((1 << PAGE_SHIFT) << pagecnt)); 4255 break; 4256 } else 4257 --pagecnt; 4258 } 4259 if (!_dump_buf_data_order) 4260 printk(KERN_ERR "BLKGRD ERROR unable to allocate " 4261 "memory for hexdump\n"); 4262 } else 4263 printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p" 4264 "\n", _dump_buf_data); 4265 if (!_dump_buf_dif) { 4266 while (pagecnt) { 4267 _dump_buf_dif = 4268 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 4269 if (_dump_buf_dif) { 4270 printk(KERN_ERR "BLKGRD allocated %d pages for " 4271 "_dump_buf_dif at 0x%p\n", 4272 (1 << pagecnt), _dump_buf_dif); 4273 _dump_buf_dif_order = pagecnt; 4274 memset(_dump_buf_dif, 0, 4275 ((1 << PAGE_SHIFT) << pagecnt)); 4276 break; 4277 } else 4278 --pagecnt; 4279 } 4280 if (!_dump_buf_dif_order) 4281 printk(KERN_ERR "BLKGRD ERROR unable to allocate " 4282 "memory for hexdump\n"); 4283 } else 4284 printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n", 4285 _dump_buf_dif); 4286 } 4287 4288 /** 4289 * lpfc_post_init_setup - Perform necessary device post initialization setup. 4290 * @phba: pointer to lpfc hba data structure. 4291 * 4292 * This routine is invoked to perform all the necessary post initialization 4293 * setup for the device. 4294 **/ 4295 static void 4296 lpfc_post_init_setup(struct lpfc_hba *phba) 4297 { 4298 struct Scsi_Host *shost; 4299 struct lpfc_adapter_event_header adapter_event; 4300 4301 /* Get the default values for Model Name and Description */ 4302 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 4303 4304 /* 4305 * hba setup may have changed the hba_queue_depth so we need to 4306 * adjust the value of can_queue. 4307 */ 4308 shost = pci_get_drvdata(phba->pcidev); 4309 shost->can_queue = phba->cfg_hba_queue_depth - 10; 4310 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 4311 lpfc_setup_bg(phba, shost); 4312 4313 lpfc_host_attrib_init(shost); 4314 4315 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 4316 spin_lock_irq(shost->host_lock); 4317 lpfc_poll_start_timer(phba); 4318 spin_unlock_irq(shost->host_lock); 4319 } 4320 4321 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4322 "0428 Perform SCSI scan\n"); 4323 /* Send board arrival event to upper layer */ 4324 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 4325 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 4326 fc_host_post_vendor_event(shost, fc_get_event_number(), 4327 sizeof(adapter_event), 4328 (char *) &adapter_event, 4329 LPFC_NL_VENDOR_ID); 4330 return; 4331 } 4332 4333 /** 4334 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 4335 * @phba: pointer to lpfc hba data structure. 4336 * 4337 * This routine is invoked to set up the PCI device memory space for device 4338 * with SLI-3 interface spec. 4339 * 4340 * Return codes 4341 * 0 - sucessful 4342 * other values - error 4343 **/ 4344 static int 4345 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 4346 { 4347 struct pci_dev *pdev; 4348 unsigned long bar0map_len, bar2map_len; 4349 int i, hbq_count; 4350 void *ptr; 4351 int error = -ENODEV; 4352 4353 /* Obtain PCI device reference */ 4354 if (!phba->pcidev) 4355 return error; 4356 else 4357 pdev = phba->pcidev; 4358 4359 /* Set the device DMA mask size */ 4360 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) 4361 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) 4362 return error; 4363 4364 /* Get the bus address of Bar0 and Bar2 and the number of bytes 4365 * required by each mapping. 4366 */ 4367 phba->pci_bar0_map = pci_resource_start(pdev, 0); 4368 bar0map_len = pci_resource_len(pdev, 0); 4369 4370 phba->pci_bar2_map = pci_resource_start(pdev, 2); 4371 bar2map_len = pci_resource_len(pdev, 2); 4372 4373 /* Map HBA SLIM to a kernel virtual address. */ 4374 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 4375 if (!phba->slim_memmap_p) { 4376 dev_printk(KERN_ERR, &pdev->dev, 4377 "ioremap failed for SLIM memory.\n"); 4378 goto out; 4379 } 4380 4381 /* Map HBA Control Registers to a kernel virtual address. */ 4382 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 4383 if (!phba->ctrl_regs_memmap_p) { 4384 dev_printk(KERN_ERR, &pdev->dev, 4385 "ioremap failed for HBA control registers.\n"); 4386 goto out_iounmap_slim; 4387 } 4388 4389 /* Allocate memory for SLI-2 structures */ 4390 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, 4391 SLI2_SLIM_SIZE, 4392 &phba->slim2p.phys, 4393 GFP_KERNEL); 4394 if (!phba->slim2p.virt) 4395 goto out_iounmap; 4396 4397 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 4398 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 4399 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 4400 phba->IOCBs = (phba->slim2p.virt + 4401 offsetof(struct lpfc_sli2_slim, IOCBs)); 4402 4403 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 4404 lpfc_sli_hbq_size(), 4405 &phba->hbqslimp.phys, 4406 GFP_KERNEL); 4407 if (!phba->hbqslimp.virt) 4408 goto out_free_slim; 4409 4410 hbq_count = lpfc_sli_hbq_count(); 4411 ptr = phba->hbqslimp.virt; 4412 for (i = 0; i < hbq_count; ++i) { 4413 phba->hbqs[i].hbq_virt = ptr; 4414 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 4415 ptr += (lpfc_hbq_defs[i]->entry_count * 4416 sizeof(struct lpfc_hbq_entry)); 4417 } 4418 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 4419 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 4420 4421 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 4422 4423 INIT_LIST_HEAD(&phba->rb_pend_list); 4424 4425 phba->MBslimaddr = phba->slim_memmap_p; 4426 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 4427 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 4428 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 4429 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 4430 4431 return 0; 4432 4433 out_free_slim: 4434 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 4435 phba->slim2p.virt, phba->slim2p.phys); 4436 out_iounmap: 4437 iounmap(phba->ctrl_regs_memmap_p); 4438 out_iounmap_slim: 4439 iounmap(phba->slim_memmap_p); 4440 out: 4441 return error; 4442 } 4443 4444 /** 4445 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 4446 * @phba: pointer to lpfc hba data structure. 4447 * 4448 * This routine is invoked to unset the PCI device memory space for device 4449 * with SLI-3 interface spec. 4450 **/ 4451 static void 4452 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 4453 { 4454 struct pci_dev *pdev; 4455 4456 /* Obtain PCI device reference */ 4457 if (!phba->pcidev) 4458 return; 4459 else 4460 pdev = phba->pcidev; 4461 4462 /* Free coherent DMA memory allocated */ 4463 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 4464 phba->hbqslimp.virt, phba->hbqslimp.phys); 4465 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 4466 phba->slim2p.virt, phba->slim2p.phys); 4467 4468 /* I/O memory unmap */ 4469 iounmap(phba->ctrl_regs_memmap_p); 4470 iounmap(phba->slim_memmap_p); 4471 4472 return; 4473 } 4474 4475 /** 4476 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 4477 * @phba: pointer to lpfc hba data structure. 4478 * 4479 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 4480 * done and check status. 4481 * 4482 * Return 0 if successful, otherwise -ENODEV. 4483 **/ 4484 int 4485 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 4486 { 4487 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad; 4488 uint32_t onlnreg0, onlnreg1; 4489 int i, port_error = -ENODEV; 4490 4491 if (!phba->sli4_hba.STAregaddr) 4492 return -ENODEV; 4493 4494 /* With uncoverable error, log the error message and return error */ 4495 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr); 4496 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr); 4497 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) { 4498 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr); 4499 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr); 4500 if (uerrlo_reg.word0 || uerrhi_reg.word0) { 4501 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4502 "1422 HBA Unrecoverable error: " 4503 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 4504 "online0_reg=0x%x, online1_reg=0x%x\n", 4505 uerrlo_reg.word0, uerrhi_reg.word0, 4506 onlnreg0, onlnreg1); 4507 } 4508 return -ENODEV; 4509 } 4510 4511 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 4512 for (i = 0; i < 3000; i++) { 4513 sta_reg.word0 = readl(phba->sli4_hba.STAregaddr); 4514 /* Encounter fatal POST error, break out */ 4515 if (bf_get(lpfc_hst_state_perr, &sta_reg)) { 4516 port_error = -ENODEV; 4517 break; 4518 } 4519 if (LPFC_POST_STAGE_ARMFW_READY == 4520 bf_get(lpfc_hst_state_port_status, &sta_reg)) { 4521 port_error = 0; 4522 break; 4523 } 4524 msleep(10); 4525 } 4526 4527 if (port_error) 4528 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4529 "1408 Failure HBA POST Status: sta_reg=0x%x, " 4530 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, " 4531 "dl=x%x, pstatus=x%x\n", sta_reg.word0, 4532 bf_get(lpfc_hst_state_perr, &sta_reg), 4533 bf_get(lpfc_hst_state_sfi, &sta_reg), 4534 bf_get(lpfc_hst_state_nip, &sta_reg), 4535 bf_get(lpfc_hst_state_ipc, &sta_reg), 4536 bf_get(lpfc_hst_state_xrom, &sta_reg), 4537 bf_get(lpfc_hst_state_dl, &sta_reg), 4538 bf_get(lpfc_hst_state_port_status, &sta_reg)); 4539 4540 /* Log device information */ 4541 scratchpad.word0 = readl(phba->sli4_hba.SCRATCHPADregaddr); 4542 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4543 "2534 Device Info: ChipType=0x%x, SliRev=0x%x, " 4544 "FeatureL1=0x%x, FeatureL2=0x%x\n", 4545 bf_get(lpfc_scratchpad_chiptype, &scratchpad), 4546 bf_get(lpfc_scratchpad_slirev, &scratchpad), 4547 bf_get(lpfc_scratchpad_featurelevel1, &scratchpad), 4548 bf_get(lpfc_scratchpad_featurelevel2, &scratchpad)); 4549 4550 return port_error; 4551 } 4552 4553 /** 4554 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 4555 * @phba: pointer to lpfc hba data structure. 4556 * 4557 * This routine is invoked to set up SLI4 BAR0 PCI config space register 4558 * memory map. 4559 **/ 4560 static void 4561 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba) 4562 { 4563 phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p + 4564 LPFC_UERR_STATUS_LO; 4565 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p + 4566 LPFC_UERR_STATUS_HI; 4567 phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p + 4568 LPFC_ONLINE0; 4569 phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p + 4570 LPFC_ONLINE1; 4571 phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p + 4572 LPFC_SCRATCHPAD; 4573 } 4574 4575 /** 4576 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 4577 * @phba: pointer to lpfc hba data structure. 4578 * 4579 * This routine is invoked to set up SLI4 BAR1 control status register (CSR) 4580 * memory map. 4581 **/ 4582 static void 4583 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) 4584 { 4585 4586 phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 4587 LPFC_HST_STATE; 4588 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 4589 LPFC_HST_ISR0; 4590 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 4591 LPFC_HST_IMR0; 4592 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 4593 LPFC_HST_ISCR0; 4594 return; 4595 } 4596 4597 /** 4598 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 4599 * @phba: pointer to lpfc hba data structure. 4600 * @vf: virtual function number 4601 * 4602 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 4603 * based on the given viftual function number, @vf. 4604 * 4605 * Return 0 if successful, otherwise -ENODEV. 4606 **/ 4607 static int 4608 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 4609 { 4610 if (vf > LPFC_VIR_FUNC_MAX) 4611 return -ENODEV; 4612 4613 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 4614 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL); 4615 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 4616 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL); 4617 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 4618 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); 4619 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 4620 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 4621 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 4622 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 4623 return 0; 4624 } 4625 4626 /** 4627 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 4628 * @phba: pointer to lpfc hba data structure. 4629 * 4630 * This routine is invoked to create the bootstrap mailbox 4631 * region consistent with the SLI-4 interface spec. This 4632 * routine allocates all memory necessary to communicate 4633 * mailbox commands to the port and sets up all alignment 4634 * needs. No locks are expected to be held when calling 4635 * this routine. 4636 * 4637 * Return codes 4638 * 0 - sucessful 4639 * ENOMEM - could not allocated memory. 4640 **/ 4641 static int 4642 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 4643 { 4644 uint32_t bmbx_size; 4645 struct lpfc_dmabuf *dmabuf; 4646 struct dma_address *dma_address; 4647 uint32_t pa_addr; 4648 uint64_t phys_addr; 4649 4650 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4651 if (!dmabuf) 4652 return -ENOMEM; 4653 4654 /* 4655 * The bootstrap mailbox region is comprised of 2 parts 4656 * plus an alignment restriction of 16 bytes. 4657 */ 4658 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 4659 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 4660 bmbx_size, 4661 &dmabuf->phys, 4662 GFP_KERNEL); 4663 if (!dmabuf->virt) { 4664 kfree(dmabuf); 4665 return -ENOMEM; 4666 } 4667 memset(dmabuf->virt, 0, bmbx_size); 4668 4669 /* 4670 * Initialize the bootstrap mailbox pointers now so that the register 4671 * operations are simple later. The mailbox dma address is required 4672 * to be 16-byte aligned. Also align the virtual memory as each 4673 * maibox is copied into the bmbx mailbox region before issuing the 4674 * command to the port. 4675 */ 4676 phba->sli4_hba.bmbx.dmabuf = dmabuf; 4677 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 4678 4679 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 4680 LPFC_ALIGN_16_BYTE); 4681 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 4682 LPFC_ALIGN_16_BYTE); 4683 4684 /* 4685 * Set the high and low physical addresses now. The SLI4 alignment 4686 * requirement is 16 bytes and the mailbox is posted to the port 4687 * as two 30-bit addresses. The other data is a bit marking whether 4688 * the 30-bit address is the high or low address. 4689 * Upcast bmbx aphys to 64bits so shift instruction compiles 4690 * clean on 32 bit machines. 4691 */ 4692 dma_address = &phba->sli4_hba.bmbx.dma_address; 4693 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 4694 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 4695 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 4696 LPFC_BMBX_BIT1_ADDR_HI); 4697 4698 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 4699 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 4700 LPFC_BMBX_BIT1_ADDR_LO); 4701 return 0; 4702 } 4703 4704 /** 4705 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 4706 * @phba: pointer to lpfc hba data structure. 4707 * 4708 * This routine is invoked to teardown the bootstrap mailbox 4709 * region and release all host resources. This routine requires 4710 * the caller to ensure all mailbox commands recovered, no 4711 * additional mailbox comands are sent, and interrupts are disabled 4712 * before calling this routine. 4713 * 4714 **/ 4715 static void 4716 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 4717 { 4718 dma_free_coherent(&phba->pcidev->dev, 4719 phba->sli4_hba.bmbx.bmbx_size, 4720 phba->sli4_hba.bmbx.dmabuf->virt, 4721 phba->sli4_hba.bmbx.dmabuf->phys); 4722 4723 kfree(phba->sli4_hba.bmbx.dmabuf); 4724 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 4725 } 4726 4727 /** 4728 * lpfc_sli4_read_config - Get the config parameters. 4729 * @phba: pointer to lpfc hba data structure. 4730 * 4731 * This routine is invoked to read the configuration parameters from the HBA. 4732 * The configuration parameters are used to set the base and maximum values 4733 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 4734 * allocation for the port. 4735 * 4736 * Return codes 4737 * 0 - sucessful 4738 * ENOMEM - No availble memory 4739 * EIO - The mailbox failed to complete successfully. 4740 **/ 4741 static int 4742 lpfc_sli4_read_config(struct lpfc_hba *phba) 4743 { 4744 LPFC_MBOXQ_t *pmb; 4745 struct lpfc_mbx_read_config *rd_config; 4746 uint32_t rc = 0; 4747 4748 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4749 if (!pmb) { 4750 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4751 "2011 Unable to allocate memory for issuing " 4752 "SLI_CONFIG_SPECIAL mailbox command\n"); 4753 return -ENOMEM; 4754 } 4755 4756 lpfc_read_config(phba, pmb); 4757 4758 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 4759 if (rc != MBX_SUCCESS) { 4760 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4761 "2012 Mailbox failed , mbxCmd x%x " 4762 "READ_CONFIG, mbxStatus x%x\n", 4763 bf_get(lpfc_mqe_command, &pmb->u.mqe), 4764 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 4765 rc = -EIO; 4766 } else { 4767 rd_config = &pmb->u.mqe.un.rd_config; 4768 phba->sli4_hba.max_cfg_param.max_xri = 4769 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 4770 phba->sli4_hba.max_cfg_param.xri_base = 4771 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 4772 phba->sli4_hba.max_cfg_param.max_vpi = 4773 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 4774 phba->sli4_hba.max_cfg_param.vpi_base = 4775 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 4776 phba->sli4_hba.max_cfg_param.max_rpi = 4777 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 4778 phba->sli4_hba.max_cfg_param.rpi_base = 4779 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 4780 phba->sli4_hba.max_cfg_param.max_vfi = 4781 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 4782 phba->sli4_hba.max_cfg_param.vfi_base = 4783 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 4784 phba->sli4_hba.max_cfg_param.max_fcfi = 4785 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 4786 phba->sli4_hba.max_cfg_param.fcfi_base = 4787 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config); 4788 phba->sli4_hba.max_cfg_param.max_eq = 4789 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 4790 phba->sli4_hba.max_cfg_param.max_rq = 4791 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 4792 phba->sli4_hba.max_cfg_param.max_wq = 4793 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 4794 phba->sli4_hba.max_cfg_param.max_cq = 4795 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 4796 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 4797 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 4798 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 4799 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 4800 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 4801 phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi; 4802 phba->max_vports = phba->max_vpi; 4803 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4804 "2003 cfg params XRI(B:%d M:%d), " 4805 "VPI(B:%d M:%d) " 4806 "VFI(B:%d M:%d) " 4807 "RPI(B:%d M:%d) " 4808 "FCFI(B:%d M:%d)\n", 4809 phba->sli4_hba.max_cfg_param.xri_base, 4810 phba->sli4_hba.max_cfg_param.max_xri, 4811 phba->sli4_hba.max_cfg_param.vpi_base, 4812 phba->sli4_hba.max_cfg_param.max_vpi, 4813 phba->sli4_hba.max_cfg_param.vfi_base, 4814 phba->sli4_hba.max_cfg_param.max_vfi, 4815 phba->sli4_hba.max_cfg_param.rpi_base, 4816 phba->sli4_hba.max_cfg_param.max_rpi, 4817 phba->sli4_hba.max_cfg_param.fcfi_base, 4818 phba->sli4_hba.max_cfg_param.max_fcfi); 4819 } 4820 mempool_free(pmb, phba->mbox_mem_pool); 4821 4822 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 4823 if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri)) 4824 phba->cfg_hba_queue_depth = 4825 phba->sli4_hba.max_cfg_param.max_xri; 4826 return rc; 4827 } 4828 4829 /** 4830 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order. 4831 * @phba: pointer to lpfc hba data structure. 4832 * 4833 * This routine is invoked to setup the host-side endian order to the 4834 * HBA consistent with the SLI-4 interface spec. 4835 * 4836 * Return codes 4837 * 0 - sucessful 4838 * ENOMEM - No availble memory 4839 * EIO - The mailbox failed to complete successfully. 4840 **/ 4841 static int 4842 lpfc_setup_endian_order(struct lpfc_hba *phba) 4843 { 4844 LPFC_MBOXQ_t *mboxq; 4845 uint32_t rc = 0; 4846 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 4847 HOST_ENDIAN_HIGH_WORD1}; 4848 4849 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4850 if (!mboxq) { 4851 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4852 "0492 Unable to allocate memory for issuing " 4853 "SLI_CONFIG_SPECIAL mailbox command\n"); 4854 return -ENOMEM; 4855 } 4856 4857 /* 4858 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two 4859 * words to contain special data values and no other data. 4860 */ 4861 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 4862 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 4863 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4864 if (rc != MBX_SUCCESS) { 4865 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4866 "0493 SLI_CONFIG_SPECIAL mailbox failed with " 4867 "status x%x\n", 4868 rc); 4869 rc = -EIO; 4870 } 4871 4872 mempool_free(mboxq, phba->mbox_mem_pool); 4873 return rc; 4874 } 4875 4876 /** 4877 * lpfc_sli4_queue_create - Create all the SLI4 queues 4878 * @phba: pointer to lpfc hba data structure. 4879 * 4880 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 4881 * operation. For each SLI4 queue type, the parameters such as queue entry 4882 * count (queue depth) shall be taken from the module parameter. For now, 4883 * we just use some constant number as place holder. 4884 * 4885 * Return codes 4886 * 0 - sucessful 4887 * ENOMEM - No availble memory 4888 * EIO - The mailbox failed to complete successfully. 4889 **/ 4890 static int 4891 lpfc_sli4_queue_create(struct lpfc_hba *phba) 4892 { 4893 struct lpfc_queue *qdesc; 4894 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 4895 int cfg_fcp_wq_count; 4896 int cfg_fcp_eq_count; 4897 4898 /* 4899 * Sanity check for confiugred queue parameters against the run-time 4900 * device parameters 4901 */ 4902 4903 /* Sanity check on FCP fast-path WQ parameters */ 4904 cfg_fcp_wq_count = phba->cfg_fcp_wq_count; 4905 if (cfg_fcp_wq_count > 4906 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { 4907 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - 4908 LPFC_SP_WQN_DEF; 4909 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { 4910 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4911 "2581 Not enough WQs (%d) from " 4912 "the pci function for supporting " 4913 "FCP WQs (%d)\n", 4914 phba->sli4_hba.max_cfg_param.max_wq, 4915 phba->cfg_fcp_wq_count); 4916 goto out_error; 4917 } 4918 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4919 "2582 Not enough WQs (%d) from the pci " 4920 "function for supporting the requested " 4921 "FCP WQs (%d), the actual FCP WQs can " 4922 "be supported: %d\n", 4923 phba->sli4_hba.max_cfg_param.max_wq, 4924 phba->cfg_fcp_wq_count, cfg_fcp_wq_count); 4925 } 4926 /* The actual number of FCP work queues adopted */ 4927 phba->cfg_fcp_wq_count = cfg_fcp_wq_count; 4928 4929 /* Sanity check on FCP fast-path EQ parameters */ 4930 cfg_fcp_eq_count = phba->cfg_fcp_eq_count; 4931 if (cfg_fcp_eq_count > 4932 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) { 4933 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq - 4934 LPFC_SP_EQN_DEF; 4935 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) { 4936 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4937 "2574 Not enough EQs (%d) from the " 4938 "pci function for supporting FCP " 4939 "EQs (%d)\n", 4940 phba->sli4_hba.max_cfg_param.max_eq, 4941 phba->cfg_fcp_eq_count); 4942 goto out_error; 4943 } 4944 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4945 "2575 Not enough EQs (%d) from the pci " 4946 "function for supporting the requested " 4947 "FCP EQs (%d), the actual FCP EQs can " 4948 "be supported: %d\n", 4949 phba->sli4_hba.max_cfg_param.max_eq, 4950 phba->cfg_fcp_eq_count, cfg_fcp_eq_count); 4951 } 4952 /* It does not make sense to have more EQs than WQs */ 4953 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { 4954 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4955 "2593 The number of FCP EQs (%d) is more " 4956 "than the number of FCP WQs (%d), take " 4957 "the number of FCP EQs same as than of " 4958 "WQs (%d)\n", cfg_fcp_eq_count, 4959 phba->cfg_fcp_wq_count, 4960 phba->cfg_fcp_wq_count); 4961 cfg_fcp_eq_count = phba->cfg_fcp_wq_count; 4962 } 4963 /* The actual number of FCP event queues adopted */ 4964 phba->cfg_fcp_eq_count = cfg_fcp_eq_count; 4965 /* The overall number of event queues used */ 4966 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; 4967 4968 /* 4969 * Create Event Queues (EQs) 4970 */ 4971 4972 /* Get EQ depth from module parameter, fake the default for now */ 4973 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 4974 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 4975 4976 /* Create slow path event queue */ 4977 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 4978 phba->sli4_hba.eq_ecount); 4979 if (!qdesc) { 4980 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4981 "0496 Failed allocate slow-path EQ\n"); 4982 goto out_error; 4983 } 4984 phba->sli4_hba.sp_eq = qdesc; 4985 4986 /* Create fast-path FCP Event Queue(s) */ 4987 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * 4988 phba->cfg_fcp_eq_count), GFP_KERNEL); 4989 if (!phba->sli4_hba.fp_eq) { 4990 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4991 "2576 Failed allocate memory for fast-path " 4992 "EQ record array\n"); 4993 goto out_free_sp_eq; 4994 } 4995 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 4996 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 4997 phba->sli4_hba.eq_ecount); 4998 if (!qdesc) { 4999 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5000 "0497 Failed allocate fast-path EQ\n"); 5001 goto out_free_fp_eq; 5002 } 5003 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; 5004 } 5005 5006 /* 5007 * Create Complete Queues (CQs) 5008 */ 5009 5010 /* Get CQ depth from module parameter, fake the default for now */ 5011 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 5012 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 5013 5014 /* Create slow-path Mailbox Command Complete Queue */ 5015 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5016 phba->sli4_hba.cq_ecount); 5017 if (!qdesc) { 5018 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5019 "0500 Failed allocate slow-path mailbox CQ\n"); 5020 goto out_free_fp_eq; 5021 } 5022 phba->sli4_hba.mbx_cq = qdesc; 5023 5024 /* Create slow-path ELS Complete Queue */ 5025 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5026 phba->sli4_hba.cq_ecount); 5027 if (!qdesc) { 5028 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5029 "0501 Failed allocate slow-path ELS CQ\n"); 5030 goto out_free_mbx_cq; 5031 } 5032 phba->sli4_hba.els_cq = qdesc; 5033 5034 /* Create slow-path Unsolicited Receive Complete Queue */ 5035 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5036 phba->sli4_hba.cq_ecount); 5037 if (!qdesc) { 5038 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5039 "0502 Failed allocate slow-path USOL RX CQ\n"); 5040 goto out_free_els_cq; 5041 } 5042 phba->sli4_hba.rxq_cq = qdesc; 5043 5044 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ 5045 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 5046 phba->cfg_fcp_eq_count), GFP_KERNEL); 5047 if (!phba->sli4_hba.fcp_cq) { 5048 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5049 "2577 Failed allocate memory for fast-path " 5050 "CQ record array\n"); 5051 goto out_free_rxq_cq; 5052 } 5053 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 5054 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5055 phba->sli4_hba.cq_ecount); 5056 if (!qdesc) { 5057 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5058 "0499 Failed allocate fast-path FCP " 5059 "CQ (%d)\n", fcp_cqidx); 5060 goto out_free_fcp_cq; 5061 } 5062 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc; 5063 } 5064 5065 /* Create Mailbox Command Queue */ 5066 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 5067 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 5068 5069 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 5070 phba->sli4_hba.mq_ecount); 5071 if (!qdesc) { 5072 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5073 "0505 Failed allocate slow-path MQ\n"); 5074 goto out_free_fcp_cq; 5075 } 5076 phba->sli4_hba.mbx_wq = qdesc; 5077 5078 /* 5079 * Create all the Work Queues (WQs) 5080 */ 5081 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 5082 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 5083 5084 /* Create slow-path ELS Work Queue */ 5085 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 5086 phba->sli4_hba.wq_ecount); 5087 if (!qdesc) { 5088 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5089 "0504 Failed allocate slow-path ELS WQ\n"); 5090 goto out_free_mbx_wq; 5091 } 5092 phba->sli4_hba.els_wq = qdesc; 5093 5094 /* Create fast-path FCP Work Queue(s) */ 5095 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * 5096 phba->cfg_fcp_wq_count), GFP_KERNEL); 5097 if (!phba->sli4_hba.fcp_wq) { 5098 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5099 "2578 Failed allocate memory for fast-path " 5100 "WQ record array\n"); 5101 goto out_free_els_wq; 5102 } 5103 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 5104 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 5105 phba->sli4_hba.wq_ecount); 5106 if (!qdesc) { 5107 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5108 "0503 Failed allocate fast-path FCP " 5109 "WQ (%d)\n", fcp_wqidx); 5110 goto out_free_fcp_wq; 5111 } 5112 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc; 5113 } 5114 5115 /* 5116 * Create Receive Queue (RQ) 5117 */ 5118 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 5119 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 5120 5121 /* Create Receive Queue for header */ 5122 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 5123 phba->sli4_hba.rq_ecount); 5124 if (!qdesc) { 5125 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5126 "0506 Failed allocate receive HRQ\n"); 5127 goto out_free_fcp_wq; 5128 } 5129 phba->sli4_hba.hdr_rq = qdesc; 5130 5131 /* Create Receive Queue for data */ 5132 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 5133 phba->sli4_hba.rq_ecount); 5134 if (!qdesc) { 5135 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5136 "0507 Failed allocate receive DRQ\n"); 5137 goto out_free_hdr_rq; 5138 } 5139 phba->sli4_hba.dat_rq = qdesc; 5140 5141 return 0; 5142 5143 out_free_hdr_rq: 5144 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 5145 phba->sli4_hba.hdr_rq = NULL; 5146 out_free_fcp_wq: 5147 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) { 5148 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]); 5149 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; 5150 } 5151 kfree(phba->sli4_hba.fcp_wq); 5152 out_free_els_wq: 5153 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 5154 phba->sli4_hba.els_wq = NULL; 5155 out_free_mbx_wq: 5156 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 5157 phba->sli4_hba.mbx_wq = NULL; 5158 out_free_fcp_cq: 5159 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) { 5160 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]); 5161 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; 5162 } 5163 kfree(phba->sli4_hba.fcp_cq); 5164 out_free_rxq_cq: 5165 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq); 5166 phba->sli4_hba.rxq_cq = NULL; 5167 out_free_els_cq: 5168 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5169 phba->sli4_hba.els_cq = NULL; 5170 out_free_mbx_cq: 5171 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 5172 phba->sli4_hba.mbx_cq = NULL; 5173 out_free_fp_eq: 5174 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) { 5175 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]); 5176 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; 5177 } 5178 kfree(phba->sli4_hba.fp_eq); 5179 out_free_sp_eq: 5180 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 5181 phba->sli4_hba.sp_eq = NULL; 5182 out_error: 5183 return -ENOMEM; 5184 } 5185 5186 /** 5187 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 5188 * @phba: pointer to lpfc hba data structure. 5189 * 5190 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 5191 * operation. 5192 * 5193 * Return codes 5194 * 0 - sucessful 5195 * ENOMEM - No availble memory 5196 * EIO - The mailbox failed to complete successfully. 5197 **/ 5198 static void 5199 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 5200 { 5201 int fcp_qidx; 5202 5203 /* Release mailbox command work queue */ 5204 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 5205 phba->sli4_hba.mbx_wq = NULL; 5206 5207 /* Release ELS work queue */ 5208 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 5209 phba->sli4_hba.els_wq = NULL; 5210 5211 /* Release FCP work queue */ 5212 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 5213 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); 5214 kfree(phba->sli4_hba.fcp_wq); 5215 phba->sli4_hba.fcp_wq = NULL; 5216 5217 /* Release unsolicited receive queue */ 5218 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 5219 phba->sli4_hba.hdr_rq = NULL; 5220 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 5221 phba->sli4_hba.dat_rq = NULL; 5222 5223 /* Release unsolicited receive complete queue */ 5224 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq); 5225 phba->sli4_hba.rxq_cq = NULL; 5226 5227 /* Release ELS complete queue */ 5228 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5229 phba->sli4_hba.els_cq = NULL; 5230 5231 /* Release mailbox command complete queue */ 5232 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 5233 phba->sli4_hba.mbx_cq = NULL; 5234 5235 /* Release FCP response complete queue */ 5236 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 5237 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); 5238 kfree(phba->sli4_hba.fcp_cq); 5239 phba->sli4_hba.fcp_cq = NULL; 5240 5241 /* Release fast-path event queue */ 5242 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 5243 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); 5244 kfree(phba->sli4_hba.fp_eq); 5245 phba->sli4_hba.fp_eq = NULL; 5246 5247 /* Release slow-path event queue */ 5248 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 5249 phba->sli4_hba.sp_eq = NULL; 5250 5251 return; 5252 } 5253 5254 /** 5255 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 5256 * @phba: pointer to lpfc hba data structure. 5257 * 5258 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 5259 * operation. 5260 * 5261 * Return codes 5262 * 0 - sucessful 5263 * ENOMEM - No availble memory 5264 * EIO - The mailbox failed to complete successfully. 5265 **/ 5266 int 5267 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 5268 { 5269 int rc = -ENOMEM; 5270 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 5271 int fcp_cq_index = 0; 5272 5273 /* 5274 * Set up Event Queues (EQs) 5275 */ 5276 5277 /* Set up slow-path event queue */ 5278 if (!phba->sli4_hba.sp_eq) { 5279 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5280 "0520 Slow-path EQ not allocated\n"); 5281 goto out_error; 5282 } 5283 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq, 5284 LPFC_SP_DEF_IMAX); 5285 if (rc) { 5286 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5287 "0521 Failed setup of slow-path EQ: " 5288 "rc = 0x%x\n", rc); 5289 goto out_error; 5290 } 5291 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5292 "2583 Slow-path EQ setup: queue-id=%d\n", 5293 phba->sli4_hba.sp_eq->queue_id); 5294 5295 /* Set up fast-path event queue */ 5296 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 5297 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { 5298 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5299 "0522 Fast-path EQ (%d) not " 5300 "allocated\n", fcp_eqidx); 5301 goto out_destroy_fp_eq; 5302 } 5303 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], 5304 phba->cfg_fcp_imax); 5305 if (rc) { 5306 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5307 "0523 Failed setup of fast-path EQ " 5308 "(%d), rc = 0x%x\n", fcp_eqidx, rc); 5309 goto out_destroy_fp_eq; 5310 } 5311 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5312 "2584 Fast-path EQ setup: " 5313 "queue[%d]-id=%d\n", fcp_eqidx, 5314 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); 5315 } 5316 5317 /* 5318 * Set up Complete Queues (CQs) 5319 */ 5320 5321 /* Set up slow-path MBOX Complete Queue as the first CQ */ 5322 if (!phba->sli4_hba.mbx_cq) { 5323 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5324 "0528 Mailbox CQ not allocated\n"); 5325 goto out_destroy_fp_eq; 5326 } 5327 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, 5328 LPFC_MCQ, LPFC_MBOX); 5329 if (rc) { 5330 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5331 "0529 Failed setup of slow-path mailbox CQ: " 5332 "rc = 0x%x\n", rc); 5333 goto out_destroy_fp_eq; 5334 } 5335 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5336 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", 5337 phba->sli4_hba.mbx_cq->queue_id, 5338 phba->sli4_hba.sp_eq->queue_id); 5339 5340 /* Set up slow-path ELS Complete Queue */ 5341 if (!phba->sli4_hba.els_cq) { 5342 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5343 "0530 ELS CQ not allocated\n"); 5344 goto out_destroy_mbx_cq; 5345 } 5346 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, 5347 LPFC_WCQ, LPFC_ELS); 5348 if (rc) { 5349 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5350 "0531 Failed setup of slow-path ELS CQ: " 5351 "rc = 0x%x\n", rc); 5352 goto out_destroy_mbx_cq; 5353 } 5354 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5355 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", 5356 phba->sli4_hba.els_cq->queue_id, 5357 phba->sli4_hba.sp_eq->queue_id); 5358 5359 /* Set up slow-path Unsolicited Receive Complete Queue */ 5360 if (!phba->sli4_hba.rxq_cq) { 5361 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5362 "0532 USOL RX CQ not allocated\n"); 5363 goto out_destroy_els_cq; 5364 } 5365 rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq, 5366 LPFC_RCQ, LPFC_USOL); 5367 if (rc) { 5368 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5369 "0533 Failed setup of slow-path USOL RX CQ: " 5370 "rc = 0x%x\n", rc); 5371 goto out_destroy_els_cq; 5372 } 5373 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5374 "2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n", 5375 phba->sli4_hba.rxq_cq->queue_id, 5376 phba->sli4_hba.sp_eq->queue_id); 5377 5378 /* Set up fast-path FCP Response Complete Queue */ 5379 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 5380 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 5381 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5382 "0526 Fast-path FCP CQ (%d) not " 5383 "allocated\n", fcp_cqidx); 5384 goto out_destroy_fcp_cq; 5385 } 5386 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], 5387 phba->sli4_hba.fp_eq[fcp_cqidx], 5388 LPFC_WCQ, LPFC_FCP); 5389 if (rc) { 5390 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5391 "0527 Failed setup of fast-path FCP " 5392 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); 5393 goto out_destroy_fcp_cq; 5394 } 5395 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5396 "2588 FCP CQ setup: cq[%d]-id=%d, " 5397 "parent eq[%d]-id=%d\n", 5398 fcp_cqidx, 5399 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, 5400 fcp_cqidx, 5401 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id); 5402 } 5403 5404 /* 5405 * Set up all the Work Queues (WQs) 5406 */ 5407 5408 /* Set up Mailbox Command Queue */ 5409 if (!phba->sli4_hba.mbx_wq) { 5410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5411 "0538 Slow-path MQ not allocated\n"); 5412 goto out_destroy_fcp_cq; 5413 } 5414 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, 5415 phba->sli4_hba.mbx_cq, LPFC_MBOX); 5416 if (rc) { 5417 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5418 "0539 Failed setup of slow-path MQ: " 5419 "rc = 0x%x\n", rc); 5420 goto out_destroy_fcp_cq; 5421 } 5422 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5423 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 5424 phba->sli4_hba.mbx_wq->queue_id, 5425 phba->sli4_hba.mbx_cq->queue_id); 5426 5427 /* Set up slow-path ELS Work Queue */ 5428 if (!phba->sli4_hba.els_wq) { 5429 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5430 "0536 Slow-path ELS WQ not allocated\n"); 5431 goto out_destroy_mbx_wq; 5432 } 5433 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, 5434 phba->sli4_hba.els_cq, LPFC_ELS); 5435 if (rc) { 5436 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5437 "0537 Failed setup of slow-path ELS WQ: " 5438 "rc = 0x%x\n", rc); 5439 goto out_destroy_mbx_wq; 5440 } 5441 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5442 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 5443 phba->sli4_hba.els_wq->queue_id, 5444 phba->sli4_hba.els_cq->queue_id); 5445 5446 /* Set up fast-path FCP Work Queue */ 5447 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 5448 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { 5449 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5450 "0534 Fast-path FCP WQ (%d) not " 5451 "allocated\n", fcp_wqidx); 5452 goto out_destroy_fcp_wq; 5453 } 5454 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], 5455 phba->sli4_hba.fcp_cq[fcp_cq_index], 5456 LPFC_FCP); 5457 if (rc) { 5458 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5459 "0535 Failed setup of fast-path FCP " 5460 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); 5461 goto out_destroy_fcp_wq; 5462 } 5463 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5464 "2591 FCP WQ setup: wq[%d]-id=%d, " 5465 "parent cq[%d]-id=%d\n", 5466 fcp_wqidx, 5467 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, 5468 fcp_cq_index, 5469 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); 5470 /* Round robin FCP Work Queue's Completion Queue assignment */ 5471 fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count); 5472 } 5473 5474 /* 5475 * Create Receive Queue (RQ) 5476 */ 5477 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 5478 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5479 "0540 Receive Queue not allocated\n"); 5480 goto out_destroy_fcp_wq; 5481 } 5482 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 5483 phba->sli4_hba.rxq_cq, LPFC_USOL); 5484 if (rc) { 5485 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5486 "0541 Failed setup of Receive Queue: " 5487 "rc = 0x%x\n", rc); 5488 goto out_destroy_fcp_wq; 5489 } 5490 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5491 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 5492 "parent cq-id=%d\n", 5493 phba->sli4_hba.hdr_rq->queue_id, 5494 phba->sli4_hba.dat_rq->queue_id, 5495 phba->sli4_hba.rxq_cq->queue_id); 5496 return 0; 5497 5498 out_destroy_fcp_wq: 5499 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) 5500 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); 5501 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 5502 out_destroy_mbx_wq: 5503 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 5504 out_destroy_fcp_cq: 5505 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 5506 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 5507 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq); 5508 out_destroy_els_cq: 5509 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 5510 out_destroy_mbx_cq: 5511 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 5512 out_destroy_fp_eq: 5513 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 5514 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); 5515 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 5516 out_error: 5517 return rc; 5518 } 5519 5520 /** 5521 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 5522 * @phba: pointer to lpfc hba data structure. 5523 * 5524 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 5525 * operation. 5526 * 5527 * Return codes 5528 * 0 - sucessful 5529 * ENOMEM - No availble memory 5530 * EIO - The mailbox failed to complete successfully. 5531 **/ 5532 void 5533 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 5534 { 5535 int fcp_qidx; 5536 5537 /* Unset mailbox command work queue */ 5538 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 5539 /* Unset ELS work queue */ 5540 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 5541 /* Unset unsolicited receive queue */ 5542 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 5543 /* Unset FCP work queue */ 5544 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 5545 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); 5546 /* Unset mailbox command complete queue */ 5547 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 5548 /* Unset ELS complete queue */ 5549 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 5550 /* Unset unsolicited receive complete queue */ 5551 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq); 5552 /* Unset FCP response complete queue */ 5553 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 5554 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 5555 /* Unset fast-path event queue */ 5556 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 5557 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); 5558 /* Unset slow-path event queue */ 5559 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 5560 } 5561 5562 /** 5563 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 5564 * @phba: pointer to lpfc hba data structure. 5565 * 5566 * This routine is invoked to allocate and set up a pool of completion queue 5567 * events. The body of the completion queue event is a completion queue entry 5568 * CQE. For now, this pool is used for the interrupt service routine to queue 5569 * the following HBA completion queue events for the worker thread to process: 5570 * - Mailbox asynchronous events 5571 * - Receive queue completion unsolicited events 5572 * Later, this can be used for all the slow-path events. 5573 * 5574 * Return codes 5575 * 0 - sucessful 5576 * -ENOMEM - No availble memory 5577 **/ 5578 static int 5579 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 5580 { 5581 struct lpfc_cq_event *cq_event; 5582 int i; 5583 5584 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 5585 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 5586 if (!cq_event) 5587 goto out_pool_create_fail; 5588 list_add_tail(&cq_event->list, 5589 &phba->sli4_hba.sp_cqe_event_pool); 5590 } 5591 return 0; 5592 5593 out_pool_create_fail: 5594 lpfc_sli4_cq_event_pool_destroy(phba); 5595 return -ENOMEM; 5596 } 5597 5598 /** 5599 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 5600 * @phba: pointer to lpfc hba data structure. 5601 * 5602 * This routine is invoked to free the pool of completion queue events at 5603 * driver unload time. Note that, it is the responsibility of the driver 5604 * cleanup routine to free all the outstanding completion-queue events 5605 * allocated from this pool back into the pool before invoking this routine 5606 * to destroy the pool. 5607 **/ 5608 static void 5609 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 5610 { 5611 struct lpfc_cq_event *cq_event, *next_cq_event; 5612 5613 list_for_each_entry_safe(cq_event, next_cq_event, 5614 &phba->sli4_hba.sp_cqe_event_pool, list) { 5615 list_del(&cq_event->list); 5616 kfree(cq_event); 5617 } 5618 } 5619 5620 /** 5621 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 5622 * @phba: pointer to lpfc hba data structure. 5623 * 5624 * This routine is the lock free version of the API invoked to allocate a 5625 * completion-queue event from the free pool. 5626 * 5627 * Return: Pointer to the newly allocated completion-queue event if successful 5628 * NULL otherwise. 5629 **/ 5630 struct lpfc_cq_event * 5631 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 5632 { 5633 struct lpfc_cq_event *cq_event = NULL; 5634 5635 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 5636 struct lpfc_cq_event, list); 5637 return cq_event; 5638 } 5639 5640 /** 5641 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 5642 * @phba: pointer to lpfc hba data structure. 5643 * 5644 * This routine is the lock version of the API invoked to allocate a 5645 * completion-queue event from the free pool. 5646 * 5647 * Return: Pointer to the newly allocated completion-queue event if successful 5648 * NULL otherwise. 5649 **/ 5650 struct lpfc_cq_event * 5651 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 5652 { 5653 struct lpfc_cq_event *cq_event; 5654 unsigned long iflags; 5655 5656 spin_lock_irqsave(&phba->hbalock, iflags); 5657 cq_event = __lpfc_sli4_cq_event_alloc(phba); 5658 spin_unlock_irqrestore(&phba->hbalock, iflags); 5659 return cq_event; 5660 } 5661 5662 /** 5663 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 5664 * @phba: pointer to lpfc hba data structure. 5665 * @cq_event: pointer to the completion queue event to be freed. 5666 * 5667 * This routine is the lock free version of the API invoked to release a 5668 * completion-queue event back into the free pool. 5669 **/ 5670 void 5671 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 5672 struct lpfc_cq_event *cq_event) 5673 { 5674 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 5675 } 5676 5677 /** 5678 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 5679 * @phba: pointer to lpfc hba data structure. 5680 * @cq_event: pointer to the completion queue event to be freed. 5681 * 5682 * This routine is the lock version of the API invoked to release a 5683 * completion-queue event back into the free pool. 5684 **/ 5685 void 5686 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 5687 struct lpfc_cq_event *cq_event) 5688 { 5689 unsigned long iflags; 5690 spin_lock_irqsave(&phba->hbalock, iflags); 5691 __lpfc_sli4_cq_event_release(phba, cq_event); 5692 spin_unlock_irqrestore(&phba->hbalock, iflags); 5693 } 5694 5695 /** 5696 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 5697 * @phba: pointer to lpfc hba data structure. 5698 * 5699 * This routine is to free all the pending completion-queue events to the 5700 * back into the free pool for device reset. 5701 **/ 5702 static void 5703 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 5704 { 5705 LIST_HEAD(cqelist); 5706 struct lpfc_cq_event *cqe; 5707 unsigned long iflags; 5708 5709 /* Retrieve all the pending WCQEs from pending WCQE lists */ 5710 spin_lock_irqsave(&phba->hbalock, iflags); 5711 /* Pending FCP XRI abort events */ 5712 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 5713 &cqelist); 5714 /* Pending ELS XRI abort events */ 5715 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 5716 &cqelist); 5717 /* Pending asynnc events */ 5718 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 5719 &cqelist); 5720 spin_unlock_irqrestore(&phba->hbalock, iflags); 5721 5722 while (!list_empty(&cqelist)) { 5723 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 5724 lpfc_sli4_cq_event_release(phba, cqe); 5725 } 5726 } 5727 5728 /** 5729 * lpfc_pci_function_reset - Reset pci function. 5730 * @phba: pointer to lpfc hba data structure. 5731 * 5732 * This routine is invoked to request a PCI function reset. It will destroys 5733 * all resources assigned to the PCI function which originates this request. 5734 * 5735 * Return codes 5736 * 0 - sucessful 5737 * ENOMEM - No availble memory 5738 * EIO - The mailbox failed to complete successfully. 5739 **/ 5740 int 5741 lpfc_pci_function_reset(struct lpfc_hba *phba) 5742 { 5743 LPFC_MBOXQ_t *mboxq; 5744 uint32_t rc = 0; 5745 uint32_t shdr_status, shdr_add_status; 5746 union lpfc_sli4_cfg_shdr *shdr; 5747 5748 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5749 if (!mboxq) { 5750 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5751 "0494 Unable to allocate memory for issuing " 5752 "SLI_FUNCTION_RESET mailbox command\n"); 5753 return -ENOMEM; 5754 } 5755 5756 /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */ 5757 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 5758 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 5759 LPFC_SLI4_MBX_EMBED); 5760 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5761 shdr = (union lpfc_sli4_cfg_shdr *) 5762 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 5763 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5764 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 5765 if (rc != MBX_TIMEOUT) 5766 mempool_free(mboxq, phba->mbox_mem_pool); 5767 if (shdr_status || shdr_add_status || rc) { 5768 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5769 "0495 SLI_FUNCTION_RESET mailbox failed with " 5770 "status x%x add_status x%x, mbx status x%x\n", 5771 shdr_status, shdr_add_status, rc); 5772 rc = -ENXIO; 5773 } 5774 return rc; 5775 } 5776 5777 /** 5778 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands 5779 * @phba: pointer to lpfc hba data structure. 5780 * @cnt: number of nop mailbox commands to send. 5781 * 5782 * This routine is invoked to send a number @cnt of NOP mailbox command and 5783 * wait for each command to complete. 5784 * 5785 * Return: the number of NOP mailbox command completed. 5786 **/ 5787 static int 5788 lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt) 5789 { 5790 LPFC_MBOXQ_t *mboxq; 5791 int length, cmdsent; 5792 uint32_t mbox_tmo; 5793 uint32_t rc = 0; 5794 uint32_t shdr_status, shdr_add_status; 5795 union lpfc_sli4_cfg_shdr *shdr; 5796 5797 if (cnt == 0) { 5798 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5799 "2518 Requested to send 0 NOP mailbox cmd\n"); 5800 return cnt; 5801 } 5802 5803 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5804 if (!mboxq) { 5805 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5806 "2519 Unable to allocate memory for issuing " 5807 "NOP mailbox command\n"); 5808 return 0; 5809 } 5810 5811 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */ 5812 length = (sizeof(struct lpfc_mbx_nop) - 5813 sizeof(struct lpfc_sli4_cfg_mhdr)); 5814 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 5815 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED); 5816 5817 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 5818 for (cmdsent = 0; cmdsent < cnt; cmdsent++) { 5819 if (!phba->sli4_hba.intr_enable) 5820 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5821 else 5822 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 5823 if (rc == MBX_TIMEOUT) 5824 break; 5825 /* Check return status */ 5826 shdr = (union lpfc_sli4_cfg_shdr *) 5827 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 5828 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5829 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 5830 &shdr->response); 5831 if (shdr_status || shdr_add_status || rc) { 5832 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5833 "2520 NOP mailbox command failed " 5834 "status x%x add_status x%x mbx " 5835 "status x%x\n", shdr_status, 5836 shdr_add_status, rc); 5837 break; 5838 } 5839 } 5840 5841 if (rc != MBX_TIMEOUT) 5842 mempool_free(mboxq, phba->mbox_mem_pool); 5843 5844 return cmdsent; 5845 } 5846 5847 /** 5848 * lpfc_sli4_fcfi_unreg - Unregister fcfi to device 5849 * @phba: pointer to lpfc hba data structure. 5850 * @fcfi: fcf index. 5851 * 5852 * This routine is invoked to unregister a FCFI from device. 5853 **/ 5854 void 5855 lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi) 5856 { 5857 LPFC_MBOXQ_t *mbox; 5858 uint32_t mbox_tmo; 5859 int rc; 5860 unsigned long flags; 5861 5862 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5863 5864 if (!mbox) 5865 return; 5866 5867 lpfc_unreg_fcfi(mbox, fcfi); 5868 5869 if (!phba->sli4_hba.intr_enable) 5870 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5871 else { 5872 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 5873 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5874 } 5875 if (rc != MBX_TIMEOUT) 5876 mempool_free(mbox, phba->mbox_mem_pool); 5877 if (rc != MBX_SUCCESS) 5878 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5879 "2517 Unregister FCFI command failed " 5880 "status %d, mbxStatus x%x\n", rc, 5881 bf_get(lpfc_mqe_status, &mbox->u.mqe)); 5882 else { 5883 spin_lock_irqsave(&phba->hbalock, flags); 5884 /* Mark the FCFI is no longer registered */ 5885 phba->fcf.fcf_flag &= 5886 ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED); 5887 spin_unlock_irqrestore(&phba->hbalock, flags); 5888 } 5889 } 5890 5891 /** 5892 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 5893 * @phba: pointer to lpfc hba data structure. 5894 * 5895 * This routine is invoked to set up the PCI device memory space for device 5896 * with SLI-4 interface spec. 5897 * 5898 * Return codes 5899 * 0 - sucessful 5900 * other values - error 5901 **/ 5902 static int 5903 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 5904 { 5905 struct pci_dev *pdev; 5906 unsigned long bar0map_len, bar1map_len, bar2map_len; 5907 int error = -ENODEV; 5908 5909 /* Obtain PCI device reference */ 5910 if (!phba->pcidev) 5911 return error; 5912 else 5913 pdev = phba->pcidev; 5914 5915 /* Set the device DMA mask size */ 5916 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) 5917 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) 5918 return error; 5919 5920 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the 5921 * number of bytes required by each mapping. They are actually 5922 * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device. 5923 */ 5924 phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0); 5925 bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0); 5926 5927 phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1); 5928 bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1); 5929 5930 phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2); 5931 bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2); 5932 5933 /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */ 5934 phba->sli4_hba.conf_regs_memmap_p = 5935 ioremap(phba->pci_bar0_map, bar0map_len); 5936 if (!phba->sli4_hba.conf_regs_memmap_p) { 5937 dev_printk(KERN_ERR, &pdev->dev, 5938 "ioremap failed for SLI4 PCI config registers.\n"); 5939 goto out; 5940 } 5941 5942 /* Map SLI4 HBA Control Register base to a kernel virtual address. */ 5943 phba->sli4_hba.ctrl_regs_memmap_p = 5944 ioremap(phba->pci_bar1_map, bar1map_len); 5945 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 5946 dev_printk(KERN_ERR, &pdev->dev, 5947 "ioremap failed for SLI4 HBA control registers.\n"); 5948 goto out_iounmap_conf; 5949 } 5950 5951 /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */ 5952 phba->sli4_hba.drbl_regs_memmap_p = 5953 ioremap(phba->pci_bar2_map, bar2map_len); 5954 if (!phba->sli4_hba.drbl_regs_memmap_p) { 5955 dev_printk(KERN_ERR, &pdev->dev, 5956 "ioremap failed for SLI4 HBA doorbell registers.\n"); 5957 goto out_iounmap_ctrl; 5958 } 5959 5960 /* Set up BAR0 PCI config space register memory map */ 5961 lpfc_sli4_bar0_register_memmap(phba); 5962 5963 /* Set up BAR1 register memory map */ 5964 lpfc_sli4_bar1_register_memmap(phba); 5965 5966 /* Set up BAR2 register memory map */ 5967 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 5968 if (error) 5969 goto out_iounmap_all; 5970 5971 return 0; 5972 5973 out_iounmap_all: 5974 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 5975 out_iounmap_ctrl: 5976 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 5977 out_iounmap_conf: 5978 iounmap(phba->sli4_hba.conf_regs_memmap_p); 5979 out: 5980 return error; 5981 } 5982 5983 /** 5984 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 5985 * @phba: pointer to lpfc hba data structure. 5986 * 5987 * This routine is invoked to unset the PCI device memory space for device 5988 * with SLI-4 interface spec. 5989 **/ 5990 static void 5991 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 5992 { 5993 struct pci_dev *pdev; 5994 5995 /* Obtain PCI device reference */ 5996 if (!phba->pcidev) 5997 return; 5998 else 5999 pdev = phba->pcidev; 6000 6001 /* Free coherent DMA memory allocated */ 6002 6003 /* Unmap I/O memory space */ 6004 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 6005 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 6006 iounmap(phba->sli4_hba.conf_regs_memmap_p); 6007 6008 return; 6009 } 6010 6011 /** 6012 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 6013 * @phba: pointer to lpfc hba data structure. 6014 * 6015 * This routine is invoked to enable the MSI-X interrupt vectors to device 6016 * with SLI-3 interface specs. The kernel function pci_enable_msix() is 6017 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once 6018 * invoked, enables either all or nothing, depending on the current 6019 * availability of PCI vector resources. The device driver is responsible 6020 * for calling the individual request_irq() to register each MSI-X vector 6021 * with a interrupt handler, which is done in this function. Note that 6022 * later when device is unloading, the driver should always call free_irq() 6023 * on all MSI-X vectors it has done request_irq() on before calling 6024 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 6025 * will be left with MSI-X enabled and leaks its vectors. 6026 * 6027 * Return codes 6028 * 0 - sucessful 6029 * other values - error 6030 **/ 6031 static int 6032 lpfc_sli_enable_msix(struct lpfc_hba *phba) 6033 { 6034 int rc, i; 6035 LPFC_MBOXQ_t *pmb; 6036 6037 /* Set up MSI-X multi-message vectors */ 6038 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6039 phba->msix_entries[i].entry = i; 6040 6041 /* Configure MSI-X capability structure */ 6042 rc = pci_enable_msix(phba->pcidev, phba->msix_entries, 6043 ARRAY_SIZE(phba->msix_entries)); 6044 if (rc) { 6045 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6046 "0420 PCI enable MSI-X failed (%d)\n", rc); 6047 goto msi_fail_out; 6048 } 6049 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6050 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6051 "0477 MSI-X entry[%d]: vector=x%x " 6052 "message=%d\n", i, 6053 phba->msix_entries[i].vector, 6054 phba->msix_entries[i].entry); 6055 /* 6056 * Assign MSI-X vectors to interrupt handlers 6057 */ 6058 6059 /* vector-0 is associated to slow-path handler */ 6060 rc = request_irq(phba->msix_entries[0].vector, 6061 &lpfc_sli_sp_intr_handler, IRQF_SHARED, 6062 LPFC_SP_DRIVER_HANDLER_NAME, phba); 6063 if (rc) { 6064 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6065 "0421 MSI-X slow-path request_irq failed " 6066 "(%d)\n", rc); 6067 goto msi_fail_out; 6068 } 6069 6070 /* vector-1 is associated to fast-path handler */ 6071 rc = request_irq(phba->msix_entries[1].vector, 6072 &lpfc_sli_fp_intr_handler, IRQF_SHARED, 6073 LPFC_FP_DRIVER_HANDLER_NAME, phba); 6074 6075 if (rc) { 6076 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6077 "0429 MSI-X fast-path request_irq failed " 6078 "(%d)\n", rc); 6079 goto irq_fail_out; 6080 } 6081 6082 /* 6083 * Configure HBA MSI-X attention conditions to messages 6084 */ 6085 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6086 6087 if (!pmb) { 6088 rc = -ENOMEM; 6089 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6090 "0474 Unable to allocate memory for issuing " 6091 "MBOX_CONFIG_MSI command\n"); 6092 goto mem_fail_out; 6093 } 6094 rc = lpfc_config_msi(phba, pmb); 6095 if (rc) 6096 goto mbx_fail_out; 6097 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 6098 if (rc != MBX_SUCCESS) { 6099 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 6100 "0351 Config MSI mailbox command failed, " 6101 "mbxCmd x%x, mbxStatus x%x\n", 6102 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 6103 goto mbx_fail_out; 6104 } 6105 6106 /* Free memory allocated for mailbox command */ 6107 mempool_free(pmb, phba->mbox_mem_pool); 6108 return rc; 6109 6110 mbx_fail_out: 6111 /* Free memory allocated for mailbox command */ 6112 mempool_free(pmb, phba->mbox_mem_pool); 6113 6114 mem_fail_out: 6115 /* free the irq already requested */ 6116 free_irq(phba->msix_entries[1].vector, phba); 6117 6118 irq_fail_out: 6119 /* free the irq already requested */ 6120 free_irq(phba->msix_entries[0].vector, phba); 6121 6122 msi_fail_out: 6123 /* Unconfigure MSI-X capability structure */ 6124 pci_disable_msix(phba->pcidev); 6125 return rc; 6126 } 6127 6128 /** 6129 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. 6130 * @phba: pointer to lpfc hba data structure. 6131 * 6132 * This routine is invoked to release the MSI-X vectors and then disable the 6133 * MSI-X interrupt mode to device with SLI-3 interface spec. 6134 **/ 6135 static void 6136 lpfc_sli_disable_msix(struct lpfc_hba *phba) 6137 { 6138 int i; 6139 6140 /* Free up MSI-X multi-message vectors */ 6141 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6142 free_irq(phba->msix_entries[i].vector, phba); 6143 /* Disable MSI-X */ 6144 pci_disable_msix(phba->pcidev); 6145 6146 return; 6147 } 6148 6149 /** 6150 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 6151 * @phba: pointer to lpfc hba data structure. 6152 * 6153 * This routine is invoked to enable the MSI interrupt mode to device with 6154 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 6155 * enable the MSI vector. The device driver is responsible for calling the 6156 * request_irq() to register MSI vector with a interrupt the handler, which 6157 * is done in this function. 6158 * 6159 * Return codes 6160 * 0 - sucessful 6161 * other values - error 6162 */ 6163 static int 6164 lpfc_sli_enable_msi(struct lpfc_hba *phba) 6165 { 6166 int rc; 6167 6168 rc = pci_enable_msi(phba->pcidev); 6169 if (!rc) 6170 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6171 "0462 PCI enable MSI mode success.\n"); 6172 else { 6173 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6174 "0471 PCI enable MSI mode failed (%d)\n", rc); 6175 return rc; 6176 } 6177 6178 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 6179 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6180 if (rc) { 6181 pci_disable_msi(phba->pcidev); 6182 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6183 "0478 MSI request_irq failed (%d)\n", rc); 6184 } 6185 return rc; 6186 } 6187 6188 /** 6189 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. 6190 * @phba: pointer to lpfc hba data structure. 6191 * 6192 * This routine is invoked to disable the MSI interrupt mode to device with 6193 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has 6194 * done request_irq() on before calling pci_disable_msi(). Failure to do so 6195 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 6196 * its vector. 6197 */ 6198 static void 6199 lpfc_sli_disable_msi(struct lpfc_hba *phba) 6200 { 6201 free_irq(phba->pcidev->irq, phba); 6202 pci_disable_msi(phba->pcidev); 6203 return; 6204 } 6205 6206 /** 6207 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 6208 * @phba: pointer to lpfc hba data structure. 6209 * 6210 * This routine is invoked to enable device interrupt and associate driver's 6211 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 6212 * spec. Depends on the interrupt mode configured to the driver, the driver 6213 * will try to fallback from the configured interrupt mode to an interrupt 6214 * mode which is supported by the platform, kernel, and device in the order 6215 * of: 6216 * MSI-X -> MSI -> IRQ. 6217 * 6218 * Return codes 6219 * 0 - sucessful 6220 * other values - error 6221 **/ 6222 static uint32_t 6223 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 6224 { 6225 uint32_t intr_mode = LPFC_INTR_ERROR; 6226 int retval; 6227 6228 if (cfg_mode == 2) { 6229 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 6230 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 6231 if (!retval) { 6232 /* Now, try to enable MSI-X interrupt mode */ 6233 retval = lpfc_sli_enable_msix(phba); 6234 if (!retval) { 6235 /* Indicate initialization to MSI-X mode */ 6236 phba->intr_type = MSIX; 6237 intr_mode = 2; 6238 } 6239 } 6240 } 6241 6242 /* Fallback to MSI if MSI-X initialization failed */ 6243 if (cfg_mode >= 1 && phba->intr_type == NONE) { 6244 retval = lpfc_sli_enable_msi(phba); 6245 if (!retval) { 6246 /* Indicate initialization to MSI mode */ 6247 phba->intr_type = MSI; 6248 intr_mode = 1; 6249 } 6250 } 6251 6252 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 6253 if (phba->intr_type == NONE) { 6254 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 6255 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6256 if (!retval) { 6257 /* Indicate initialization to INTx mode */ 6258 phba->intr_type = INTx; 6259 intr_mode = 0; 6260 } 6261 } 6262 return intr_mode; 6263 } 6264 6265 /** 6266 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 6267 * @phba: pointer to lpfc hba data structure. 6268 * 6269 * This routine is invoked to disable device interrupt and disassociate the 6270 * driver's interrupt handler(s) from interrupt vector(s) to device with 6271 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 6272 * release the interrupt vector(s) for the message signaled interrupt. 6273 **/ 6274 static void 6275 lpfc_sli_disable_intr(struct lpfc_hba *phba) 6276 { 6277 /* Disable the currently initialized interrupt mode */ 6278 if (phba->intr_type == MSIX) 6279 lpfc_sli_disable_msix(phba); 6280 else if (phba->intr_type == MSI) 6281 lpfc_sli_disable_msi(phba); 6282 else if (phba->intr_type == INTx) 6283 free_irq(phba->pcidev->irq, phba); 6284 6285 /* Reset interrupt management states */ 6286 phba->intr_type = NONE; 6287 phba->sli.slistat.sli_intr = 0; 6288 6289 return; 6290 } 6291 6292 /** 6293 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 6294 * @phba: pointer to lpfc hba data structure. 6295 * 6296 * This routine is invoked to enable the MSI-X interrupt vectors to device 6297 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called 6298 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked, 6299 * enables either all or nothing, depending on the current availability of 6300 * PCI vector resources. The device driver is responsible for calling the 6301 * individual request_irq() to register each MSI-X vector with a interrupt 6302 * handler, which is done in this function. Note that later when device is 6303 * unloading, the driver should always call free_irq() on all MSI-X vectors 6304 * it has done request_irq() on before calling pci_disable_msix(). Failure 6305 * to do so results in a BUG_ON() and a device will be left with MSI-X 6306 * enabled and leaks its vectors. 6307 * 6308 * Return codes 6309 * 0 - sucessful 6310 * other values - error 6311 **/ 6312 static int 6313 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 6314 { 6315 int rc, index; 6316 6317 /* Set up MSI-X multi-message vectors */ 6318 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 6319 phba->sli4_hba.msix_entries[index].entry = index; 6320 6321 /* Configure MSI-X capability structure */ 6322 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, 6323 phba->sli4_hba.cfg_eqn); 6324 if (rc) { 6325 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6326 "0484 PCI enable MSI-X failed (%d)\n", rc); 6327 goto msi_fail_out; 6328 } 6329 /* Log MSI-X vector assignment */ 6330 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 6331 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6332 "0489 MSI-X entry[%d]: vector=x%x " 6333 "message=%d\n", index, 6334 phba->sli4_hba.msix_entries[index].vector, 6335 phba->sli4_hba.msix_entries[index].entry); 6336 /* 6337 * Assign MSI-X vectors to interrupt handlers 6338 */ 6339 6340 /* The first vector must associated to slow-path handler for MQ */ 6341 rc = request_irq(phba->sli4_hba.msix_entries[0].vector, 6342 &lpfc_sli4_sp_intr_handler, IRQF_SHARED, 6343 LPFC_SP_DRIVER_HANDLER_NAME, phba); 6344 if (rc) { 6345 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6346 "0485 MSI-X slow-path request_irq failed " 6347 "(%d)\n", rc); 6348 goto msi_fail_out; 6349 } 6350 6351 /* The rest of the vector(s) are associated to fast-path handler(s) */ 6352 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) { 6353 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; 6354 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba; 6355 rc = request_irq(phba->sli4_hba.msix_entries[index].vector, 6356 &lpfc_sli4_fp_intr_handler, IRQF_SHARED, 6357 LPFC_FP_DRIVER_HANDLER_NAME, 6358 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 6359 if (rc) { 6360 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6361 "0486 MSI-X fast-path (%d) " 6362 "request_irq failed (%d)\n", index, rc); 6363 goto cfg_fail_out; 6364 } 6365 } 6366 6367 return rc; 6368 6369 cfg_fail_out: 6370 /* free the irq already requested */ 6371 for (--index; index >= 1; index--) 6372 free_irq(phba->sli4_hba.msix_entries[index - 1].vector, 6373 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 6374 6375 /* free the irq already requested */ 6376 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 6377 6378 msi_fail_out: 6379 /* Unconfigure MSI-X capability structure */ 6380 pci_disable_msix(phba->pcidev); 6381 return rc; 6382 } 6383 6384 /** 6385 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device 6386 * @phba: pointer to lpfc hba data structure. 6387 * 6388 * This routine is invoked to release the MSI-X vectors and then disable the 6389 * MSI-X interrupt mode to device with SLI-4 interface spec. 6390 **/ 6391 static void 6392 lpfc_sli4_disable_msix(struct lpfc_hba *phba) 6393 { 6394 int index; 6395 6396 /* Free up MSI-X multi-message vectors */ 6397 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 6398 6399 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) 6400 free_irq(phba->sli4_hba.msix_entries[index].vector, 6401 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 6402 /* Disable MSI-X */ 6403 pci_disable_msix(phba->pcidev); 6404 6405 return; 6406 } 6407 6408 /** 6409 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 6410 * @phba: pointer to lpfc hba data structure. 6411 * 6412 * This routine is invoked to enable the MSI interrupt mode to device with 6413 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 6414 * to enable the MSI vector. The device driver is responsible for calling 6415 * the request_irq() to register MSI vector with a interrupt the handler, 6416 * which is done in this function. 6417 * 6418 * Return codes 6419 * 0 - sucessful 6420 * other values - error 6421 **/ 6422 static int 6423 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 6424 { 6425 int rc, index; 6426 6427 rc = pci_enable_msi(phba->pcidev); 6428 if (!rc) 6429 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6430 "0487 PCI enable MSI mode success.\n"); 6431 else { 6432 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6433 "0488 PCI enable MSI mode failed (%d)\n", rc); 6434 return rc; 6435 } 6436 6437 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 6438 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6439 if (rc) { 6440 pci_disable_msi(phba->pcidev); 6441 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6442 "0490 MSI request_irq failed (%d)\n", rc); 6443 } 6444 6445 for (index = 0; index < phba->cfg_fcp_eq_count; index++) { 6446 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 6447 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 6448 } 6449 6450 return rc; 6451 } 6452 6453 /** 6454 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device 6455 * @phba: pointer to lpfc hba data structure. 6456 * 6457 * This routine is invoked to disable the MSI interrupt mode to device with 6458 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has 6459 * done request_irq() on before calling pci_disable_msi(). Failure to do so 6460 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 6461 * its vector. 6462 **/ 6463 static void 6464 lpfc_sli4_disable_msi(struct lpfc_hba *phba) 6465 { 6466 free_irq(phba->pcidev->irq, phba); 6467 pci_disable_msi(phba->pcidev); 6468 return; 6469 } 6470 6471 /** 6472 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 6473 * @phba: pointer to lpfc hba data structure. 6474 * 6475 * This routine is invoked to enable device interrupt and associate driver's 6476 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 6477 * interface spec. Depends on the interrupt mode configured to the driver, 6478 * the driver will try to fallback from the configured interrupt mode to an 6479 * interrupt mode which is supported by the platform, kernel, and device in 6480 * the order of: 6481 * MSI-X -> MSI -> IRQ. 6482 * 6483 * Return codes 6484 * 0 - sucessful 6485 * other values - error 6486 **/ 6487 static uint32_t 6488 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 6489 { 6490 uint32_t intr_mode = LPFC_INTR_ERROR; 6491 int retval, index; 6492 6493 if (cfg_mode == 2) { 6494 /* Preparation before conf_msi mbox cmd */ 6495 retval = 0; 6496 if (!retval) { 6497 /* Now, try to enable MSI-X interrupt mode */ 6498 retval = lpfc_sli4_enable_msix(phba); 6499 if (!retval) { 6500 /* Indicate initialization to MSI-X mode */ 6501 phba->intr_type = MSIX; 6502 intr_mode = 2; 6503 } 6504 } 6505 } 6506 6507 /* Fallback to MSI if MSI-X initialization failed */ 6508 if (cfg_mode >= 1 && phba->intr_type == NONE) { 6509 retval = lpfc_sli4_enable_msi(phba); 6510 if (!retval) { 6511 /* Indicate initialization to MSI mode */ 6512 phba->intr_type = MSI; 6513 intr_mode = 1; 6514 } 6515 } 6516 6517 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 6518 if (phba->intr_type == NONE) { 6519 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 6520 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6521 if (!retval) { 6522 /* Indicate initialization to INTx mode */ 6523 phba->intr_type = INTx; 6524 intr_mode = 0; 6525 for (index = 0; index < phba->cfg_fcp_eq_count; 6526 index++) { 6527 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 6528 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 6529 } 6530 } 6531 } 6532 return intr_mode; 6533 } 6534 6535 /** 6536 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 6537 * @phba: pointer to lpfc hba data structure. 6538 * 6539 * This routine is invoked to disable device interrupt and disassociate 6540 * the driver's interrupt handler(s) from interrupt vector(s) to device 6541 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 6542 * will release the interrupt vector(s) for the message signaled interrupt. 6543 **/ 6544 static void 6545 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 6546 { 6547 /* Disable the currently initialized interrupt mode */ 6548 if (phba->intr_type == MSIX) 6549 lpfc_sli4_disable_msix(phba); 6550 else if (phba->intr_type == MSI) 6551 lpfc_sli4_disable_msi(phba); 6552 else if (phba->intr_type == INTx) 6553 free_irq(phba->pcidev->irq, phba); 6554 6555 /* Reset interrupt management states */ 6556 phba->intr_type = NONE; 6557 phba->sli.slistat.sli_intr = 0; 6558 6559 return; 6560 } 6561 6562 /** 6563 * lpfc_unset_hba - Unset SLI3 hba device initialization 6564 * @phba: pointer to lpfc hba data structure. 6565 * 6566 * This routine is invoked to unset the HBA device initialization steps to 6567 * a device with SLI-3 interface spec. 6568 **/ 6569 static void 6570 lpfc_unset_hba(struct lpfc_hba *phba) 6571 { 6572 struct lpfc_vport *vport = phba->pport; 6573 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6574 6575 spin_lock_irq(shost->host_lock); 6576 vport->load_flag |= FC_UNLOADING; 6577 spin_unlock_irq(shost->host_lock); 6578 6579 lpfc_stop_hba_timers(phba); 6580 6581 phba->pport->work_port_events = 0; 6582 6583 lpfc_sli_hba_down(phba); 6584 6585 lpfc_sli_brdrestart(phba); 6586 6587 lpfc_sli_disable_intr(phba); 6588 6589 return; 6590 } 6591 6592 /** 6593 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization. 6594 * @phba: pointer to lpfc hba data structure. 6595 * 6596 * This routine is invoked to unset the HBA device initialization steps to 6597 * a device with SLI-4 interface spec. 6598 **/ 6599 static void 6600 lpfc_sli4_unset_hba(struct lpfc_hba *phba) 6601 { 6602 struct lpfc_vport *vport = phba->pport; 6603 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6604 6605 spin_lock_irq(shost->host_lock); 6606 vport->load_flag |= FC_UNLOADING; 6607 spin_unlock_irq(shost->host_lock); 6608 6609 phba->pport->work_port_events = 0; 6610 6611 lpfc_sli4_hba_down(phba); 6612 6613 lpfc_sli4_disable_intr(phba); 6614 6615 return; 6616 } 6617 6618 /** 6619 * lpfc_sli4_hba_unset - Unset the fcoe hba 6620 * @phba: Pointer to HBA context object. 6621 * 6622 * This function is called in the SLI4 code path to reset the HBA's FCoE 6623 * function. The caller is not required to hold any lock. This routine 6624 * issues PCI function reset mailbox command to reset the FCoE function. 6625 * At the end of the function, it calls lpfc_hba_down_post function to 6626 * free any pending commands. 6627 **/ 6628 static void 6629 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 6630 { 6631 int wait_cnt = 0; 6632 LPFC_MBOXQ_t *mboxq; 6633 6634 lpfc_stop_hba_timers(phba); 6635 phba->sli4_hba.intr_enable = 0; 6636 6637 /* 6638 * Gracefully wait out the potential current outstanding asynchronous 6639 * mailbox command. 6640 */ 6641 6642 /* First, block any pending async mailbox command from posted */ 6643 spin_lock_irq(&phba->hbalock); 6644 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 6645 spin_unlock_irq(&phba->hbalock); 6646 /* Now, trying to wait it out if we can */ 6647 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 6648 msleep(10); 6649 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 6650 break; 6651 } 6652 /* Forcefully release the outstanding mailbox command if timed out */ 6653 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 6654 spin_lock_irq(&phba->hbalock); 6655 mboxq = phba->sli.mbox_active; 6656 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 6657 __lpfc_mbox_cmpl_put(phba, mboxq); 6658 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6659 phba->sli.mbox_active = NULL; 6660 spin_unlock_irq(&phba->hbalock); 6661 } 6662 6663 /* Tear down the queues in the HBA */ 6664 lpfc_sli4_queue_unset(phba); 6665 6666 /* Disable PCI subsystem interrupt */ 6667 lpfc_sli4_disable_intr(phba); 6668 6669 /* Stop kthread signal shall trigger work_done one more time */ 6670 kthread_stop(phba->worker_thread); 6671 6672 /* Stop the SLI4 device port */ 6673 phba->pport->work_port_events = 0; 6674 } 6675 6676 /** 6677 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 6678 * @pdev: pointer to PCI device 6679 * @pid: pointer to PCI device identifier 6680 * 6681 * This routine is to be called to attach a device with SLI-3 interface spec 6682 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 6683 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 6684 * information of the device and driver to see if the driver state that it can 6685 * support this kind of device. If the match is successful, the driver core 6686 * invokes this routine. If this routine determines it can claim the HBA, it 6687 * does all the initialization that it needs to do to handle the HBA properly. 6688 * 6689 * Return code 6690 * 0 - driver can claim the device 6691 * negative value - driver can not claim the device 6692 **/ 6693 static int __devinit 6694 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 6695 { 6696 struct lpfc_hba *phba; 6697 struct lpfc_vport *vport = NULL; 6698 int error; 6699 uint32_t cfg_mode, intr_mode; 6700 6701 /* Allocate memory for HBA structure */ 6702 phba = lpfc_hba_alloc(pdev); 6703 if (!phba) 6704 return -ENOMEM; 6705 6706 /* Perform generic PCI device enabling operation */ 6707 error = lpfc_enable_pci_dev(phba); 6708 if (error) { 6709 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6710 "1401 Failed to enable pci device.\n"); 6711 goto out_free_phba; 6712 } 6713 6714 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 6715 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 6716 if (error) 6717 goto out_disable_pci_dev; 6718 6719 /* Set up SLI-3 specific device PCI memory space */ 6720 error = lpfc_sli_pci_mem_setup(phba); 6721 if (error) { 6722 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6723 "1402 Failed to set up pci memory space.\n"); 6724 goto out_disable_pci_dev; 6725 } 6726 6727 /* Set up phase-1 common device driver resources */ 6728 error = lpfc_setup_driver_resource_phase1(phba); 6729 if (error) { 6730 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6731 "1403 Failed to set up driver resource.\n"); 6732 goto out_unset_pci_mem_s3; 6733 } 6734 6735 /* Set up SLI-3 specific device driver resources */ 6736 error = lpfc_sli_driver_resource_setup(phba); 6737 if (error) { 6738 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6739 "1404 Failed to set up driver resource.\n"); 6740 goto out_unset_pci_mem_s3; 6741 } 6742 6743 /* Initialize and populate the iocb list per host */ 6744 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 6745 if (error) { 6746 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6747 "1405 Failed to initialize iocb list.\n"); 6748 goto out_unset_driver_resource_s3; 6749 } 6750 6751 /* Set up common device driver resources */ 6752 error = lpfc_setup_driver_resource_phase2(phba); 6753 if (error) { 6754 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6755 "1406 Failed to set up driver resource.\n"); 6756 goto out_free_iocb_list; 6757 } 6758 6759 /* Create SCSI host to the physical port */ 6760 error = lpfc_create_shost(phba); 6761 if (error) { 6762 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6763 "1407 Failed to create scsi host.\n"); 6764 goto out_unset_driver_resource; 6765 } 6766 6767 /* Configure sysfs attributes */ 6768 vport = phba->pport; 6769 error = lpfc_alloc_sysfs_attr(vport); 6770 if (error) { 6771 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6772 "1476 Failed to allocate sysfs attr\n"); 6773 goto out_destroy_shost; 6774 } 6775 6776 /* Now, trying to enable interrupt and bring up the device */ 6777 cfg_mode = phba->cfg_use_msi; 6778 while (true) { 6779 /* Put device to a known state before enabling interrupt */ 6780 lpfc_stop_port(phba); 6781 /* Configure and enable interrupt */ 6782 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 6783 if (intr_mode == LPFC_INTR_ERROR) { 6784 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6785 "0431 Failed to enable interrupt.\n"); 6786 error = -ENODEV; 6787 goto out_free_sysfs_attr; 6788 } 6789 /* SLI-3 HBA setup */ 6790 if (lpfc_sli_hba_setup(phba)) { 6791 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6792 "1477 Failed to set up hba\n"); 6793 error = -ENODEV; 6794 goto out_remove_device; 6795 } 6796 6797 /* Wait 50ms for the interrupts of previous mailbox commands */ 6798 msleep(50); 6799 /* Check active interrupts on message signaled interrupts */ 6800 if (intr_mode == 0 || 6801 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 6802 /* Log the current active interrupt mode */ 6803 phba->intr_mode = intr_mode; 6804 lpfc_log_intr_mode(phba, intr_mode); 6805 break; 6806 } else { 6807 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6808 "0447 Configure interrupt mode (%d) " 6809 "failed active interrupt test.\n", 6810 intr_mode); 6811 /* Disable the current interrupt mode */ 6812 lpfc_sli_disable_intr(phba); 6813 /* Try next level of interrupt mode */ 6814 cfg_mode = --intr_mode; 6815 } 6816 } 6817 6818 /* Perform post initialization setup */ 6819 lpfc_post_init_setup(phba); 6820 6821 /* Check if there are static vports to be created. */ 6822 lpfc_create_static_vport(phba); 6823 6824 return 0; 6825 6826 out_remove_device: 6827 lpfc_unset_hba(phba); 6828 out_free_sysfs_attr: 6829 lpfc_free_sysfs_attr(vport); 6830 out_destroy_shost: 6831 lpfc_destroy_shost(phba); 6832 out_unset_driver_resource: 6833 lpfc_unset_driver_resource_phase2(phba); 6834 out_free_iocb_list: 6835 lpfc_free_iocb_list(phba); 6836 out_unset_driver_resource_s3: 6837 lpfc_sli_driver_resource_unset(phba); 6838 out_unset_pci_mem_s3: 6839 lpfc_sli_pci_mem_unset(phba); 6840 out_disable_pci_dev: 6841 lpfc_disable_pci_dev(phba); 6842 out_free_phba: 6843 lpfc_hba_free(phba); 6844 return error; 6845 } 6846 6847 /** 6848 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 6849 * @pdev: pointer to PCI device 6850 * 6851 * This routine is to be called to disattach a device with SLI-3 interface 6852 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 6853 * removed from PCI bus, it performs all the necessary cleanup for the HBA 6854 * device to be removed from the PCI subsystem properly. 6855 **/ 6856 static void __devexit 6857 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 6858 { 6859 struct Scsi_Host *shost = pci_get_drvdata(pdev); 6860 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6861 struct lpfc_vport **vports; 6862 struct lpfc_hba *phba = vport->phba; 6863 int i; 6864 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 6865 6866 spin_lock_irq(&phba->hbalock); 6867 vport->load_flag |= FC_UNLOADING; 6868 spin_unlock_irq(&phba->hbalock); 6869 6870 lpfc_free_sysfs_attr(vport); 6871 6872 /* Release all the vports against this physical port */ 6873 vports = lpfc_create_vport_work_array(phba); 6874 if (vports != NULL) 6875 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 6876 fc_vport_terminate(vports[i]->fc_vport); 6877 lpfc_destroy_vport_work_array(phba, vports); 6878 6879 /* Remove FC host and then SCSI host with the physical port */ 6880 fc_remove_host(shost); 6881 scsi_remove_host(shost); 6882 lpfc_cleanup(vport); 6883 6884 /* 6885 * Bring down the SLI Layer. This step disable all interrupts, 6886 * clears the rings, discards all mailbox commands, and resets 6887 * the HBA. 6888 */ 6889 6890 /* HBA interrupt will be diabled after this call */ 6891 lpfc_sli_hba_down(phba); 6892 /* Stop kthread signal shall trigger work_done one more time */ 6893 kthread_stop(phba->worker_thread); 6894 /* Final cleanup of txcmplq and reset the HBA */ 6895 lpfc_sli_brdrestart(phba); 6896 6897 lpfc_stop_hba_timers(phba); 6898 spin_lock_irq(&phba->hbalock); 6899 list_del_init(&vport->listentry); 6900 spin_unlock_irq(&phba->hbalock); 6901 6902 lpfc_debugfs_terminate(vport); 6903 6904 /* Disable interrupt */ 6905 lpfc_sli_disable_intr(phba); 6906 6907 pci_set_drvdata(pdev, NULL); 6908 scsi_host_put(shost); 6909 6910 /* 6911 * Call scsi_free before mem_free since scsi bufs are released to their 6912 * corresponding pools here. 6913 */ 6914 lpfc_scsi_free(phba); 6915 lpfc_mem_free_all(phba); 6916 6917 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 6918 phba->hbqslimp.virt, phba->hbqslimp.phys); 6919 6920 /* Free resources associated with SLI2 interface */ 6921 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 6922 phba->slim2p.virt, phba->slim2p.phys); 6923 6924 /* unmap adapter SLIM and Control Registers */ 6925 iounmap(phba->ctrl_regs_memmap_p); 6926 iounmap(phba->slim_memmap_p); 6927 6928 lpfc_hba_free(phba); 6929 6930 pci_release_selected_regions(pdev, bars); 6931 pci_disable_device(pdev); 6932 } 6933 6934 /** 6935 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 6936 * @pdev: pointer to PCI device 6937 * @msg: power management message 6938 * 6939 * This routine is to be called from the kernel's PCI subsystem to support 6940 * system Power Management (PM) to device with SLI-3 interface spec. When 6941 * PM invokes this method, it quiesces the device by stopping the driver's 6942 * worker thread for the device, turning off device's interrupt and DMA, 6943 * and bring the device offline. Note that as the driver implements the 6944 * minimum PM requirements to a power-aware driver's PM support for the 6945 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 6946 * to the suspend() method call will be treated as SUSPEND and the driver will 6947 * fully reinitialize its device during resume() method call, the driver will 6948 * set device to PCI_D3hot state in PCI config space instead of setting it 6949 * according to the @msg provided by the PM. 6950 * 6951 * Return code 6952 * 0 - driver suspended the device 6953 * Error otherwise 6954 **/ 6955 static int 6956 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 6957 { 6958 struct Scsi_Host *shost = pci_get_drvdata(pdev); 6959 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 6960 6961 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6962 "0473 PCI device Power Management suspend.\n"); 6963 6964 /* Bring down the device */ 6965 lpfc_offline_prep(phba); 6966 lpfc_offline(phba); 6967 kthread_stop(phba->worker_thread); 6968 6969 /* Disable interrupt from device */ 6970 lpfc_sli_disable_intr(phba); 6971 6972 /* Save device state to PCI config space */ 6973 pci_save_state(pdev); 6974 pci_set_power_state(pdev, PCI_D3hot); 6975 6976 return 0; 6977 } 6978 6979 /** 6980 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 6981 * @pdev: pointer to PCI device 6982 * 6983 * This routine is to be called from the kernel's PCI subsystem to support 6984 * system Power Management (PM) to device with SLI-3 interface spec. When PM 6985 * invokes this method, it restores the device's PCI config space state and 6986 * fully reinitializes the device and brings it online. Note that as the 6987 * driver implements the minimum PM requirements to a power-aware driver's 6988 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 6989 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 6990 * driver will fully reinitialize its device during resume() method call, 6991 * the device will be set to PCI_D0 directly in PCI config space before 6992 * restoring the state. 6993 * 6994 * Return code 6995 * 0 - driver suspended the device 6996 * Error otherwise 6997 **/ 6998 static int 6999 lpfc_pci_resume_one_s3(struct pci_dev *pdev) 7000 { 7001 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7002 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7003 uint32_t intr_mode; 7004 int error; 7005 7006 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7007 "0452 PCI device Power Management resume.\n"); 7008 7009 /* Restore device state from PCI config space */ 7010 pci_set_power_state(pdev, PCI_D0); 7011 pci_restore_state(pdev); 7012 if (pdev->is_busmaster) 7013 pci_set_master(pdev); 7014 7015 /* Startup the kernel thread for this host adapter. */ 7016 phba->worker_thread = kthread_run(lpfc_do_work, phba, 7017 "lpfc_worker_%d", phba->brd_no); 7018 if (IS_ERR(phba->worker_thread)) { 7019 error = PTR_ERR(phba->worker_thread); 7020 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7021 "0434 PM resume failed to start worker " 7022 "thread: error=x%x.\n", error); 7023 return error; 7024 } 7025 7026 /* Configure and enable interrupt */ 7027 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 7028 if (intr_mode == LPFC_INTR_ERROR) { 7029 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7030 "0430 PM resume Failed to enable interrupt\n"); 7031 return -EIO; 7032 } else 7033 phba->intr_mode = intr_mode; 7034 7035 /* Restart HBA and bring it online */ 7036 lpfc_sli_brdrestart(phba); 7037 lpfc_online(phba); 7038 7039 /* Log the current active interrupt mode */ 7040 lpfc_log_intr_mode(phba, phba->intr_mode); 7041 7042 return 0; 7043 } 7044 7045 /** 7046 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 7047 * @pdev: pointer to PCI device. 7048 * @state: the current PCI connection state. 7049 * 7050 * This routine is called from the PCI subsystem for I/O error handling to 7051 * device with SLI-3 interface spec. This function is called by the PCI 7052 * subsystem after a PCI bus error affecting this device has been detected. 7053 * When this function is invoked, it will need to stop all the I/Os and 7054 * interrupt(s) to the device. Once that is done, it will return 7055 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 7056 * as desired. 7057 * 7058 * Return codes 7059 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 7060 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7061 **/ 7062 static pci_ers_result_t 7063 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 7064 { 7065 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7066 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7067 struct lpfc_sli *psli = &phba->sli; 7068 struct lpfc_sli_ring *pring; 7069 7070 if (state == pci_channel_io_perm_failure) { 7071 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7072 "0472 PCI channel I/O permanent failure\n"); 7073 /* Block all SCSI devices' I/Os on the host */ 7074 lpfc_scsi_dev_block(phba); 7075 /* Clean up all driver's outstanding SCSI I/Os */ 7076 lpfc_sli_flush_fcp_rings(phba); 7077 return PCI_ERS_RESULT_DISCONNECT; 7078 } 7079 7080 pci_disable_device(pdev); 7081 /* 7082 * There may be I/Os dropped by the firmware. 7083 * Error iocb (I/O) on txcmplq and let the SCSI layer 7084 * retry it after re-establishing link. 7085 */ 7086 pring = &psli->ring[psli->fcp_ring]; 7087 lpfc_sli_abort_iocb_ring(phba, pring); 7088 7089 /* Disable interrupt */ 7090 lpfc_sli_disable_intr(phba); 7091 7092 /* Request a slot reset. */ 7093 return PCI_ERS_RESULT_NEED_RESET; 7094 } 7095 7096 /** 7097 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 7098 * @pdev: pointer to PCI device. 7099 * 7100 * This routine is called from the PCI subsystem for error handling to 7101 * device with SLI-3 interface spec. This is called after PCI bus has been 7102 * reset to restart the PCI card from scratch, as if from a cold-boot. 7103 * During the PCI subsystem error recovery, after driver returns 7104 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 7105 * recovery and then call this routine before calling the .resume method 7106 * to recover the device. This function will initialize the HBA device, 7107 * enable the interrupt, but it will just put the HBA to offline state 7108 * without passing any I/O traffic. 7109 * 7110 * Return codes 7111 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 7112 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7113 */ 7114 static pci_ers_result_t 7115 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 7116 { 7117 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7118 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7119 struct lpfc_sli *psli = &phba->sli; 7120 uint32_t intr_mode; 7121 7122 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 7123 if (pci_enable_device_mem(pdev)) { 7124 printk(KERN_ERR "lpfc: Cannot re-enable " 7125 "PCI device after reset.\n"); 7126 return PCI_ERS_RESULT_DISCONNECT; 7127 } 7128 7129 pci_restore_state(pdev); 7130 if (pdev->is_busmaster) 7131 pci_set_master(pdev); 7132 7133 spin_lock_irq(&phba->hbalock); 7134 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 7135 spin_unlock_irq(&phba->hbalock); 7136 7137 /* Configure and enable interrupt */ 7138 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 7139 if (intr_mode == LPFC_INTR_ERROR) { 7140 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7141 "0427 Cannot re-enable interrupt after " 7142 "slot reset.\n"); 7143 return PCI_ERS_RESULT_DISCONNECT; 7144 } else 7145 phba->intr_mode = intr_mode; 7146 7147 /* Take device offline; this will perform cleanup */ 7148 lpfc_offline(phba); 7149 lpfc_sli_brdrestart(phba); 7150 7151 /* Log the current active interrupt mode */ 7152 lpfc_log_intr_mode(phba, phba->intr_mode); 7153 7154 return PCI_ERS_RESULT_RECOVERED; 7155 } 7156 7157 /** 7158 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 7159 * @pdev: pointer to PCI device 7160 * 7161 * This routine is called from the PCI subsystem for error handling to device 7162 * with SLI-3 interface spec. It is called when kernel error recovery tells 7163 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 7164 * error recovery. After this call, traffic can start to flow from this device 7165 * again. 7166 */ 7167 static void 7168 lpfc_io_resume_s3(struct pci_dev *pdev) 7169 { 7170 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7171 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7172 7173 lpfc_online(phba); 7174 } 7175 7176 /** 7177 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 7178 * @phba: pointer to lpfc hba data structure. 7179 * 7180 * returns the number of ELS/CT IOCBs to reserve 7181 **/ 7182 int 7183 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 7184 { 7185 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 7186 7187 if (max_xri <= 100) 7188 return 4; 7189 else if (max_xri <= 256) 7190 return 8; 7191 else if (max_xri <= 512) 7192 return 16; 7193 else if (max_xri <= 1024) 7194 return 32; 7195 else 7196 return 48; 7197 } 7198 7199 /** 7200 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 7201 * @pdev: pointer to PCI device 7202 * @pid: pointer to PCI device identifier 7203 * 7204 * This routine is called from the kernel's PCI subsystem to device with 7205 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 7206 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 7207 * information of the device and driver to see if the driver state that it 7208 * can support this kind of device. If the match is successful, the driver 7209 * core invokes this routine. If this routine determines it can claim the HBA, 7210 * it does all the initialization that it needs to do to handle the HBA 7211 * properly. 7212 * 7213 * Return code 7214 * 0 - driver can claim the device 7215 * negative value - driver can not claim the device 7216 **/ 7217 static int __devinit 7218 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 7219 { 7220 struct lpfc_hba *phba; 7221 struct lpfc_vport *vport = NULL; 7222 int error; 7223 uint32_t cfg_mode, intr_mode; 7224 int mcnt; 7225 7226 /* Allocate memory for HBA structure */ 7227 phba = lpfc_hba_alloc(pdev); 7228 if (!phba) 7229 return -ENOMEM; 7230 7231 /* Perform generic PCI device enabling operation */ 7232 error = lpfc_enable_pci_dev(phba); 7233 if (error) { 7234 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7235 "1409 Failed to enable pci device.\n"); 7236 goto out_free_phba; 7237 } 7238 7239 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 7240 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 7241 if (error) 7242 goto out_disable_pci_dev; 7243 7244 /* Set up SLI-4 specific device PCI memory space */ 7245 error = lpfc_sli4_pci_mem_setup(phba); 7246 if (error) { 7247 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7248 "1410 Failed to set up pci memory space.\n"); 7249 goto out_disable_pci_dev; 7250 } 7251 7252 /* Set up phase-1 common device driver resources */ 7253 error = lpfc_setup_driver_resource_phase1(phba); 7254 if (error) { 7255 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7256 "1411 Failed to set up driver resource.\n"); 7257 goto out_unset_pci_mem_s4; 7258 } 7259 7260 /* Set up SLI-4 Specific device driver resources */ 7261 error = lpfc_sli4_driver_resource_setup(phba); 7262 if (error) { 7263 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7264 "1412 Failed to set up driver resource.\n"); 7265 goto out_unset_pci_mem_s4; 7266 } 7267 7268 /* Initialize and populate the iocb list per host */ 7269 error = lpfc_init_iocb_list(phba, 7270 phba->sli4_hba.max_cfg_param.max_xri); 7271 if (error) { 7272 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7273 "1413 Failed to initialize iocb list.\n"); 7274 goto out_unset_driver_resource_s4; 7275 } 7276 7277 /* Set up common device driver resources */ 7278 error = lpfc_setup_driver_resource_phase2(phba); 7279 if (error) { 7280 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7281 "1414 Failed to set up driver resource.\n"); 7282 goto out_free_iocb_list; 7283 } 7284 7285 /* Create SCSI host to the physical port */ 7286 error = lpfc_create_shost(phba); 7287 if (error) { 7288 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7289 "1415 Failed to create scsi host.\n"); 7290 goto out_unset_driver_resource; 7291 } 7292 7293 /* Configure sysfs attributes */ 7294 vport = phba->pport; 7295 error = lpfc_alloc_sysfs_attr(vport); 7296 if (error) { 7297 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7298 "1416 Failed to allocate sysfs attr\n"); 7299 goto out_destroy_shost; 7300 } 7301 7302 /* Now, trying to enable interrupt and bring up the device */ 7303 cfg_mode = phba->cfg_use_msi; 7304 while (true) { 7305 /* Put device to a known state before enabling interrupt */ 7306 lpfc_stop_port(phba); 7307 /* Configure and enable interrupt */ 7308 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 7309 if (intr_mode == LPFC_INTR_ERROR) { 7310 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7311 "0426 Failed to enable interrupt.\n"); 7312 error = -ENODEV; 7313 goto out_free_sysfs_attr; 7314 } 7315 /* Set up SLI-4 HBA */ 7316 if (lpfc_sli4_hba_setup(phba)) { 7317 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7318 "1421 Failed to set up hba\n"); 7319 error = -ENODEV; 7320 goto out_disable_intr; 7321 } 7322 7323 /* Send NOP mbx cmds for non-INTx mode active interrupt test */ 7324 if (intr_mode != 0) 7325 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba, 7326 LPFC_ACT_INTR_CNT); 7327 7328 /* Check active interrupts received only for MSI/MSI-X */ 7329 if (intr_mode == 0 || 7330 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) { 7331 /* Log the current active interrupt mode */ 7332 phba->intr_mode = intr_mode; 7333 lpfc_log_intr_mode(phba, intr_mode); 7334 break; 7335 } 7336 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7337 "0451 Configure interrupt mode (%d) " 7338 "failed active interrupt test.\n", 7339 intr_mode); 7340 /* Unset the preivous SLI-4 HBA setup */ 7341 lpfc_sli4_unset_hba(phba); 7342 /* Try next level of interrupt mode */ 7343 cfg_mode = --intr_mode; 7344 } 7345 7346 /* Perform post initialization setup */ 7347 lpfc_post_init_setup(phba); 7348 7349 return 0; 7350 7351 out_disable_intr: 7352 lpfc_sli4_disable_intr(phba); 7353 out_free_sysfs_attr: 7354 lpfc_free_sysfs_attr(vport); 7355 out_destroy_shost: 7356 lpfc_destroy_shost(phba); 7357 out_unset_driver_resource: 7358 lpfc_unset_driver_resource_phase2(phba); 7359 out_free_iocb_list: 7360 lpfc_free_iocb_list(phba); 7361 out_unset_driver_resource_s4: 7362 lpfc_sli4_driver_resource_unset(phba); 7363 out_unset_pci_mem_s4: 7364 lpfc_sli4_pci_mem_unset(phba); 7365 out_disable_pci_dev: 7366 lpfc_disable_pci_dev(phba); 7367 out_free_phba: 7368 lpfc_hba_free(phba); 7369 return error; 7370 } 7371 7372 /** 7373 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 7374 * @pdev: pointer to PCI device 7375 * 7376 * This routine is called from the kernel's PCI subsystem to device with 7377 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 7378 * removed from PCI bus, it performs all the necessary cleanup for the HBA 7379 * device to be removed from the PCI subsystem properly. 7380 **/ 7381 static void __devexit 7382 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 7383 { 7384 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7385 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 7386 struct lpfc_vport **vports; 7387 struct lpfc_hba *phba = vport->phba; 7388 int i; 7389 7390 /* Mark the device unloading flag */ 7391 spin_lock_irq(&phba->hbalock); 7392 vport->load_flag |= FC_UNLOADING; 7393 spin_unlock_irq(&phba->hbalock); 7394 7395 /* Free the HBA sysfs attributes */ 7396 lpfc_free_sysfs_attr(vport); 7397 7398 /* Release all the vports against this physical port */ 7399 vports = lpfc_create_vport_work_array(phba); 7400 if (vports != NULL) 7401 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 7402 fc_vport_terminate(vports[i]->fc_vport); 7403 lpfc_destroy_vport_work_array(phba, vports); 7404 7405 /* Remove FC host and then SCSI host with the physical port */ 7406 fc_remove_host(shost); 7407 scsi_remove_host(shost); 7408 7409 /* Perform cleanup on the physical port */ 7410 lpfc_cleanup(vport); 7411 7412 /* 7413 * Bring down the SLI Layer. This step disables all interrupts, 7414 * clears the rings, discards all mailbox commands, and resets 7415 * the HBA FCoE function. 7416 */ 7417 lpfc_debugfs_terminate(vport); 7418 lpfc_sli4_hba_unset(phba); 7419 7420 spin_lock_irq(&phba->hbalock); 7421 list_del_init(&vport->listentry); 7422 spin_unlock_irq(&phba->hbalock); 7423 7424 /* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi 7425 * buffers are released to their corresponding pools here. 7426 */ 7427 lpfc_scsi_free(phba); 7428 lpfc_sli4_driver_resource_unset(phba); 7429 7430 /* Unmap adapter Control and Doorbell registers */ 7431 lpfc_sli4_pci_mem_unset(phba); 7432 7433 /* Release PCI resources and disable device's PCI function */ 7434 scsi_host_put(shost); 7435 lpfc_disable_pci_dev(phba); 7436 7437 /* Finally, free the driver's device data structure */ 7438 lpfc_hba_free(phba); 7439 7440 return; 7441 } 7442 7443 /** 7444 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 7445 * @pdev: pointer to PCI device 7446 * @msg: power management message 7447 * 7448 * This routine is called from the kernel's PCI subsystem to support system 7449 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 7450 * this method, it quiesces the device by stopping the driver's worker 7451 * thread for the device, turning off device's interrupt and DMA, and bring 7452 * the device offline. Note that as the driver implements the minimum PM 7453 * requirements to a power-aware driver's PM support for suspend/resume -- all 7454 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 7455 * method call will be treated as SUSPEND and the driver will fully 7456 * reinitialize its device during resume() method call, the driver will set 7457 * device to PCI_D3hot state in PCI config space instead of setting it 7458 * according to the @msg provided by the PM. 7459 * 7460 * Return code 7461 * 0 - driver suspended the device 7462 * Error otherwise 7463 **/ 7464 static int 7465 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 7466 { 7467 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7468 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7469 7470 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7471 "0298 PCI device Power Management suspend.\n"); 7472 7473 /* Bring down the device */ 7474 lpfc_offline_prep(phba); 7475 lpfc_offline(phba); 7476 kthread_stop(phba->worker_thread); 7477 7478 /* Disable interrupt from device */ 7479 lpfc_sli4_disable_intr(phba); 7480 7481 /* Save device state to PCI config space */ 7482 pci_save_state(pdev); 7483 pci_set_power_state(pdev, PCI_D3hot); 7484 7485 return 0; 7486 } 7487 7488 /** 7489 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 7490 * @pdev: pointer to PCI device 7491 * 7492 * This routine is called from the kernel's PCI subsystem to support system 7493 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 7494 * this method, it restores the device's PCI config space state and fully 7495 * reinitializes the device and brings it online. Note that as the driver 7496 * implements the minimum PM requirements to a power-aware driver's PM for 7497 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 7498 * to the suspend() method call will be treated as SUSPEND and the driver 7499 * will fully reinitialize its device during resume() method call, the device 7500 * will be set to PCI_D0 directly in PCI config space before restoring the 7501 * state. 7502 * 7503 * Return code 7504 * 0 - driver suspended the device 7505 * Error otherwise 7506 **/ 7507 static int 7508 lpfc_pci_resume_one_s4(struct pci_dev *pdev) 7509 { 7510 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7511 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7512 uint32_t intr_mode; 7513 int error; 7514 7515 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7516 "0292 PCI device Power Management resume.\n"); 7517 7518 /* Restore device state from PCI config space */ 7519 pci_set_power_state(pdev, PCI_D0); 7520 pci_restore_state(pdev); 7521 if (pdev->is_busmaster) 7522 pci_set_master(pdev); 7523 7524 /* Startup the kernel thread for this host adapter. */ 7525 phba->worker_thread = kthread_run(lpfc_do_work, phba, 7526 "lpfc_worker_%d", phba->brd_no); 7527 if (IS_ERR(phba->worker_thread)) { 7528 error = PTR_ERR(phba->worker_thread); 7529 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7530 "0293 PM resume failed to start worker " 7531 "thread: error=x%x.\n", error); 7532 return error; 7533 } 7534 7535 /* Configure and enable interrupt */ 7536 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 7537 if (intr_mode == LPFC_INTR_ERROR) { 7538 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7539 "0294 PM resume Failed to enable interrupt\n"); 7540 return -EIO; 7541 } else 7542 phba->intr_mode = intr_mode; 7543 7544 /* Restart HBA and bring it online */ 7545 lpfc_sli_brdrestart(phba); 7546 lpfc_online(phba); 7547 7548 /* Log the current active interrupt mode */ 7549 lpfc_log_intr_mode(phba, phba->intr_mode); 7550 7551 return 0; 7552 } 7553 7554 /** 7555 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 7556 * @pdev: pointer to PCI device. 7557 * @state: the current PCI connection state. 7558 * 7559 * This routine is called from the PCI subsystem for error handling to device 7560 * with SLI-4 interface spec. This function is called by the PCI subsystem 7561 * after a PCI bus error affecting this device has been detected. When this 7562 * function is invoked, it will need to stop all the I/Os and interrupt(s) 7563 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 7564 * for the PCI subsystem to perform proper recovery as desired. 7565 * 7566 * Return codes 7567 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 7568 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7569 **/ 7570 static pci_ers_result_t 7571 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 7572 { 7573 return PCI_ERS_RESULT_NEED_RESET; 7574 } 7575 7576 /** 7577 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 7578 * @pdev: pointer to PCI device. 7579 * 7580 * This routine is called from the PCI subsystem for error handling to device 7581 * with SLI-4 interface spec. It is called after PCI bus has been reset to 7582 * restart the PCI card from scratch, as if from a cold-boot. During the 7583 * PCI subsystem error recovery, after the driver returns 7584 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 7585 * recovery and then call this routine before calling the .resume method to 7586 * recover the device. This function will initialize the HBA device, enable 7587 * the interrupt, but it will just put the HBA to offline state without 7588 * passing any I/O traffic. 7589 * 7590 * Return codes 7591 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 7592 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7593 */ 7594 static pci_ers_result_t 7595 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 7596 { 7597 return PCI_ERS_RESULT_RECOVERED; 7598 } 7599 7600 /** 7601 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 7602 * @pdev: pointer to PCI device 7603 * 7604 * This routine is called from the PCI subsystem for error handling to device 7605 * with SLI-4 interface spec. It is called when kernel error recovery tells 7606 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 7607 * error recovery. After this call, traffic can start to flow from this device 7608 * again. 7609 **/ 7610 static void 7611 lpfc_io_resume_s4(struct pci_dev *pdev) 7612 { 7613 return; 7614 } 7615 7616 /** 7617 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 7618 * @pdev: pointer to PCI device 7619 * @pid: pointer to PCI device identifier 7620 * 7621 * This routine is to be registered to the kernel's PCI subsystem. When an 7622 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 7623 * at PCI device-specific information of the device and driver to see if the 7624 * driver state that it can support this kind of device. If the match is 7625 * successful, the driver core invokes this routine. This routine dispatches 7626 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 7627 * do all the initialization that it needs to do to handle the HBA device 7628 * properly. 7629 * 7630 * Return code 7631 * 0 - driver can claim the device 7632 * negative value - driver can not claim the device 7633 **/ 7634 static int __devinit 7635 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 7636 { 7637 int rc; 7638 uint16_t dev_id; 7639 7640 if (pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id)) 7641 return -ENODEV; 7642 7643 switch (dev_id) { 7644 case PCI_DEVICE_ID_TIGERSHARK: 7645 case PCI_DEVICE_ID_TIGERSHARK_S: 7646 rc = lpfc_pci_probe_one_s4(pdev, pid); 7647 break; 7648 default: 7649 rc = lpfc_pci_probe_one_s3(pdev, pid); 7650 break; 7651 } 7652 return rc; 7653 } 7654 7655 /** 7656 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 7657 * @pdev: pointer to PCI device 7658 * 7659 * This routine is to be registered to the kernel's PCI subsystem. When an 7660 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 7661 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 7662 * remove routine, which will perform all the necessary cleanup for the 7663 * device to be removed from the PCI subsystem properly. 7664 **/ 7665 static void __devexit 7666 lpfc_pci_remove_one(struct pci_dev *pdev) 7667 { 7668 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7669 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7670 7671 switch (phba->pci_dev_grp) { 7672 case LPFC_PCI_DEV_LP: 7673 lpfc_pci_remove_one_s3(pdev); 7674 break; 7675 case LPFC_PCI_DEV_OC: 7676 lpfc_pci_remove_one_s4(pdev); 7677 break; 7678 default: 7679 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7680 "1424 Invalid PCI device group: 0x%x\n", 7681 phba->pci_dev_grp); 7682 break; 7683 } 7684 return; 7685 } 7686 7687 /** 7688 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 7689 * @pdev: pointer to PCI device 7690 * @msg: power management message 7691 * 7692 * This routine is to be registered to the kernel's PCI subsystem to support 7693 * system Power Management (PM). When PM invokes this method, it dispatches 7694 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 7695 * suspend the device. 7696 * 7697 * Return code 7698 * 0 - driver suspended the device 7699 * Error otherwise 7700 **/ 7701 static int 7702 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 7703 { 7704 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7705 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7706 int rc = -ENODEV; 7707 7708 switch (phba->pci_dev_grp) { 7709 case LPFC_PCI_DEV_LP: 7710 rc = lpfc_pci_suspend_one_s3(pdev, msg); 7711 break; 7712 case LPFC_PCI_DEV_OC: 7713 rc = lpfc_pci_suspend_one_s4(pdev, msg); 7714 break; 7715 default: 7716 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7717 "1425 Invalid PCI device group: 0x%x\n", 7718 phba->pci_dev_grp); 7719 break; 7720 } 7721 return rc; 7722 } 7723 7724 /** 7725 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 7726 * @pdev: pointer to PCI device 7727 * 7728 * This routine is to be registered to the kernel's PCI subsystem to support 7729 * system Power Management (PM). When PM invokes this method, it dispatches 7730 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 7731 * resume the device. 7732 * 7733 * Return code 7734 * 0 - driver suspended the device 7735 * Error otherwise 7736 **/ 7737 static int 7738 lpfc_pci_resume_one(struct pci_dev *pdev) 7739 { 7740 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7741 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7742 int rc = -ENODEV; 7743 7744 switch (phba->pci_dev_grp) { 7745 case LPFC_PCI_DEV_LP: 7746 rc = lpfc_pci_resume_one_s3(pdev); 7747 break; 7748 case LPFC_PCI_DEV_OC: 7749 rc = lpfc_pci_resume_one_s4(pdev); 7750 break; 7751 default: 7752 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7753 "1426 Invalid PCI device group: 0x%x\n", 7754 phba->pci_dev_grp); 7755 break; 7756 } 7757 return rc; 7758 } 7759 7760 /** 7761 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 7762 * @pdev: pointer to PCI device. 7763 * @state: the current PCI connection state. 7764 * 7765 * This routine is registered to the PCI subsystem for error handling. This 7766 * function is called by the PCI subsystem after a PCI bus error affecting 7767 * this device has been detected. When this routine is invoked, it dispatches 7768 * the action to the proper SLI-3 or SLI-4 device error detected handling 7769 * routine, which will perform the proper error detected operation. 7770 * 7771 * Return codes 7772 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 7773 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7774 **/ 7775 static pci_ers_result_t 7776 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 7777 { 7778 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7779 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7780 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 7781 7782 switch (phba->pci_dev_grp) { 7783 case LPFC_PCI_DEV_LP: 7784 rc = lpfc_io_error_detected_s3(pdev, state); 7785 break; 7786 case LPFC_PCI_DEV_OC: 7787 rc = lpfc_io_error_detected_s4(pdev, state); 7788 break; 7789 default: 7790 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7791 "1427 Invalid PCI device group: 0x%x\n", 7792 phba->pci_dev_grp); 7793 break; 7794 } 7795 return rc; 7796 } 7797 7798 /** 7799 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 7800 * @pdev: pointer to PCI device. 7801 * 7802 * This routine is registered to the PCI subsystem for error handling. This 7803 * function is called after PCI bus has been reset to restart the PCI card 7804 * from scratch, as if from a cold-boot. When this routine is invoked, it 7805 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 7806 * routine, which will perform the proper device reset. 7807 * 7808 * Return codes 7809 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 7810 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7811 **/ 7812 static pci_ers_result_t 7813 lpfc_io_slot_reset(struct pci_dev *pdev) 7814 { 7815 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7816 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7817 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 7818 7819 switch (phba->pci_dev_grp) { 7820 case LPFC_PCI_DEV_LP: 7821 rc = lpfc_io_slot_reset_s3(pdev); 7822 break; 7823 case LPFC_PCI_DEV_OC: 7824 rc = lpfc_io_slot_reset_s4(pdev); 7825 break; 7826 default: 7827 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7828 "1428 Invalid PCI device group: 0x%x\n", 7829 phba->pci_dev_grp); 7830 break; 7831 } 7832 return rc; 7833 } 7834 7835 /** 7836 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 7837 * @pdev: pointer to PCI device 7838 * 7839 * This routine is registered to the PCI subsystem for error handling. It 7840 * is called when kernel error recovery tells the lpfc driver that it is 7841 * OK to resume normal PCI operation after PCI bus error recovery. When 7842 * this routine is invoked, it dispatches the action to the proper SLI-3 7843 * or SLI-4 device io_resume routine, which will resume the device operation. 7844 **/ 7845 static void 7846 lpfc_io_resume(struct pci_dev *pdev) 7847 { 7848 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7849 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7850 7851 switch (phba->pci_dev_grp) { 7852 case LPFC_PCI_DEV_LP: 7853 lpfc_io_resume_s3(pdev); 7854 break; 7855 case LPFC_PCI_DEV_OC: 7856 lpfc_io_resume_s4(pdev); 7857 break; 7858 default: 7859 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7860 "1429 Invalid PCI device group: 0x%x\n", 7861 phba->pci_dev_grp); 7862 break; 7863 } 7864 return; 7865 } 7866 7867 static struct pci_device_id lpfc_id_table[] = { 7868 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 7869 PCI_ANY_ID, PCI_ANY_ID, }, 7870 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, 7871 PCI_ANY_ID, PCI_ANY_ID, }, 7872 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, 7873 PCI_ANY_ID, PCI_ANY_ID, }, 7874 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, 7875 PCI_ANY_ID, PCI_ANY_ID, }, 7876 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, 7877 PCI_ANY_ID, PCI_ANY_ID, }, 7878 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, 7879 PCI_ANY_ID, PCI_ANY_ID, }, 7880 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, 7881 PCI_ANY_ID, PCI_ANY_ID, }, 7882 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, 7883 PCI_ANY_ID, PCI_ANY_ID, }, 7884 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, 7885 PCI_ANY_ID, PCI_ANY_ID, }, 7886 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, 7887 PCI_ANY_ID, PCI_ANY_ID, }, 7888 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, 7889 PCI_ANY_ID, PCI_ANY_ID, }, 7890 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, 7891 PCI_ANY_ID, PCI_ANY_ID, }, 7892 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, 7893 PCI_ANY_ID, PCI_ANY_ID, }, 7894 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, 7895 PCI_ANY_ID, PCI_ANY_ID, }, 7896 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, 7897 PCI_ANY_ID, PCI_ANY_ID, }, 7898 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, 7899 PCI_ANY_ID, PCI_ANY_ID, }, 7900 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, 7901 PCI_ANY_ID, PCI_ANY_ID, }, 7902 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 7903 PCI_ANY_ID, PCI_ANY_ID, }, 7904 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, 7905 PCI_ANY_ID, PCI_ANY_ID, }, 7906 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, 7907 PCI_ANY_ID, PCI_ANY_ID, }, 7908 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, 7909 PCI_ANY_ID, PCI_ANY_ID, }, 7910 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, 7911 PCI_ANY_ID, PCI_ANY_ID, }, 7912 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, 7913 PCI_ANY_ID, PCI_ANY_ID, }, 7914 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, 7915 PCI_ANY_ID, PCI_ANY_ID, }, 7916 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, 7917 PCI_ANY_ID, PCI_ANY_ID, }, 7918 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, 7919 PCI_ANY_ID, PCI_ANY_ID, }, 7920 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, 7921 PCI_ANY_ID, PCI_ANY_ID, }, 7922 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, 7923 PCI_ANY_ID, PCI_ANY_ID, }, 7924 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, 7925 PCI_ANY_ID, PCI_ANY_ID, }, 7926 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, 7927 PCI_ANY_ID, PCI_ANY_ID, }, 7928 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, 7929 PCI_ANY_ID, PCI_ANY_ID, }, 7930 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, 7931 PCI_ANY_ID, PCI_ANY_ID, }, 7932 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, 7933 PCI_ANY_ID, PCI_ANY_ID, }, 7934 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, 7935 PCI_ANY_ID, PCI_ANY_ID, }, 7936 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, 7937 PCI_ANY_ID, PCI_ANY_ID, }, 7938 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, 7939 PCI_ANY_ID, PCI_ANY_ID, }, 7940 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, 7941 PCI_ANY_ID, PCI_ANY_ID, }, 7942 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, 7943 PCI_ANY_ID, PCI_ANY_ID, }, 7944 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK_S, 7945 PCI_ANY_ID, PCI_ANY_ID, }, 7946 { 0 } 7947 }; 7948 7949 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 7950 7951 static struct pci_error_handlers lpfc_err_handler = { 7952 .error_detected = lpfc_io_error_detected, 7953 .slot_reset = lpfc_io_slot_reset, 7954 .resume = lpfc_io_resume, 7955 }; 7956 7957 static struct pci_driver lpfc_driver = { 7958 .name = LPFC_DRIVER_NAME, 7959 .id_table = lpfc_id_table, 7960 .probe = lpfc_pci_probe_one, 7961 .remove = __devexit_p(lpfc_pci_remove_one), 7962 .suspend = lpfc_pci_suspend_one, 7963 .resume = lpfc_pci_resume_one, 7964 .err_handler = &lpfc_err_handler, 7965 }; 7966 7967 /** 7968 * lpfc_init - lpfc module initialization routine 7969 * 7970 * This routine is to be invoked when the lpfc module is loaded into the 7971 * kernel. The special kernel macro module_init() is used to indicate the 7972 * role of this routine to the kernel as lpfc module entry point. 7973 * 7974 * Return codes 7975 * 0 - successful 7976 * -ENOMEM - FC attach transport failed 7977 * all others - failed 7978 */ 7979 static int __init 7980 lpfc_init(void) 7981 { 7982 int error = 0; 7983 7984 printk(LPFC_MODULE_DESC "\n"); 7985 printk(LPFC_COPYRIGHT "\n"); 7986 7987 if (lpfc_enable_npiv) { 7988 lpfc_transport_functions.vport_create = lpfc_vport_create; 7989 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 7990 } 7991 lpfc_transport_template = 7992 fc_attach_transport(&lpfc_transport_functions); 7993 if (lpfc_transport_template == NULL) 7994 return -ENOMEM; 7995 if (lpfc_enable_npiv) { 7996 lpfc_vport_transport_template = 7997 fc_attach_transport(&lpfc_vport_transport_functions); 7998 if (lpfc_vport_transport_template == NULL) { 7999 fc_release_transport(lpfc_transport_template); 8000 return -ENOMEM; 8001 } 8002 } 8003 error = pci_register_driver(&lpfc_driver); 8004 if (error) { 8005 fc_release_transport(lpfc_transport_template); 8006 if (lpfc_enable_npiv) 8007 fc_release_transport(lpfc_vport_transport_template); 8008 } 8009 8010 return error; 8011 } 8012 8013 /** 8014 * lpfc_exit - lpfc module removal routine 8015 * 8016 * This routine is invoked when the lpfc module is removed from the kernel. 8017 * The special kernel macro module_exit() is used to indicate the role of 8018 * this routine to the kernel as lpfc module exit point. 8019 */ 8020 static void __exit 8021 lpfc_exit(void) 8022 { 8023 pci_unregister_driver(&lpfc_driver); 8024 fc_release_transport(lpfc_transport_template); 8025 if (lpfc_enable_npiv) 8026 fc_release_transport(lpfc_vport_transport_template); 8027 if (_dump_buf_data) { 8028 printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_data " 8029 "at 0x%p\n", 8030 (1L << _dump_buf_data_order), _dump_buf_data); 8031 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 8032 } 8033 8034 if (_dump_buf_dif) { 8035 printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_dif " 8036 "at 0x%p\n", 8037 (1L << _dump_buf_dif_order), _dump_buf_dif); 8038 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 8039 } 8040 } 8041 8042 module_init(lpfc_init); 8043 module_exit(lpfc_exit); 8044 MODULE_LICENSE("GPL"); 8045 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 8046 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 8047 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 8048