1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2006 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/idr.h> 26 #include <linux/interrupt.h> 27 #include <linux/kthread.h> 28 #include <linux/pci.h> 29 #include <linux/spinlock.h> 30 31 #include <scsi/scsi.h> 32 #include <scsi/scsi_device.h> 33 #include <scsi/scsi_host.h> 34 #include <scsi/scsi_transport_fc.h> 35 36 #include "lpfc_hw.h" 37 #include "lpfc_sli.h" 38 #include "lpfc_disc.h" 39 #include "lpfc_scsi.h" 40 #include "lpfc.h" 41 #include "lpfc_logmsg.h" 42 #include "lpfc_crtn.h" 43 #include "lpfc_version.h" 44 45 static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); 46 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 47 static int lpfc_post_rcv_buf(struct lpfc_hba *); 48 49 static struct scsi_transport_template *lpfc_transport_template = NULL; 50 static DEFINE_IDR(lpfc_hba_index); 51 52 /************************************************************************/ 53 /* */ 54 /* lpfc_config_port_prep */ 55 /* This routine will do LPFC initialization prior to the */ 56 /* CONFIG_PORT mailbox command. This will be initialized */ 57 /* as a SLI layer callback routine. */ 58 /* This routine returns 0 on success or -ERESTART if it wants */ 59 /* the SLI layer to reset the HBA and try again. Any */ 60 /* other return value indicates an error. */ 61 /* */ 62 /************************************************************************/ 63 int 64 lpfc_config_port_prep(struct lpfc_hba * phba) 65 { 66 lpfc_vpd_t *vp = &phba->vpd; 67 int i = 0, rc; 68 LPFC_MBOXQ_t *pmb; 69 MAILBOX_t *mb; 70 char *lpfc_vpd_data = NULL; 71 uint16_t offset = 0; 72 static char licensed[56] = 73 "key unlock for use with gnu public licensed code only\0"; 74 static int init_key = 1; 75 76 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 77 if (!pmb) { 78 phba->hba_state = LPFC_HBA_ERROR; 79 return -ENOMEM; 80 } 81 82 mb = &pmb->mb; 83 phba->hba_state = LPFC_INIT_MBX_CMDS; 84 85 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 86 if (init_key) { 87 uint32_t *ptext = (uint32_t *) licensed; 88 89 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 90 *ptext = cpu_to_be32(*ptext); 91 init_key = 0; 92 } 93 94 lpfc_read_nv(phba, pmb); 95 memset((char*)mb->un.varRDnvp.rsvd3, 0, 96 sizeof (mb->un.varRDnvp.rsvd3)); 97 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 98 sizeof (licensed)); 99 100 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 101 102 if (rc != MBX_SUCCESS) { 103 lpfc_printf_log(phba, 104 KERN_ERR, 105 LOG_MBOX, 106 "%d:0324 Config Port initialization " 107 "error, mbxCmd x%x READ_NVPARM, " 108 "mbxStatus x%x\n", 109 phba->brd_no, 110 mb->mbxCommand, mb->mbxStatus); 111 mempool_free(pmb, phba->mbox_mem_pool); 112 return -ERESTART; 113 } 114 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 115 sizeof (mb->un.varRDnvp.nodename)); 116 } 117 118 /* Setup and issue mailbox READ REV command */ 119 lpfc_read_rev(phba, pmb); 120 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 121 if (rc != MBX_SUCCESS) { 122 lpfc_printf_log(phba, 123 KERN_ERR, 124 LOG_INIT, 125 "%d:0439 Adapter failed to init, mbxCmd x%x " 126 "READ_REV, mbxStatus x%x\n", 127 phba->brd_no, 128 mb->mbxCommand, mb->mbxStatus); 129 mempool_free( pmb, phba->mbox_mem_pool); 130 return -ERESTART; 131 } 132 133 /* 134 * The value of rr must be 1 since the driver set the cv field to 1. 135 * This setting requires the FW to set all revision fields. 136 */ 137 if (mb->un.varRdRev.rr == 0) { 138 vp->rev.rBit = 0; 139 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 140 "%d:0440 Adapter failed to init, READ_REV has " 141 "missing revision information.\n", 142 phba->brd_no); 143 mempool_free(pmb, phba->mbox_mem_pool); 144 return -ERESTART; 145 } 146 147 /* Save information as VPD data */ 148 vp->rev.rBit = 1; 149 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 150 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 151 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 152 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 153 vp->rev.biuRev = mb->un.varRdRev.biuRev; 154 vp->rev.smRev = mb->un.varRdRev.smRev; 155 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 156 vp->rev.endecRev = mb->un.varRdRev.endecRev; 157 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 158 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 159 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 160 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 161 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 162 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 163 164 if (lpfc_is_LC_HBA(phba->pcidev->device)) 165 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 166 sizeof (phba->RandomData)); 167 168 /* Get adapter VPD information */ 169 pmb->context2 = kmalloc(DMP_RSP_SIZE, GFP_KERNEL); 170 if (!pmb->context2) 171 goto out_free_mbox; 172 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 173 if (!lpfc_vpd_data) 174 goto out_free_context2; 175 176 do { 177 lpfc_dump_mem(phba, pmb, offset); 178 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 179 180 if (rc != MBX_SUCCESS) { 181 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 182 "%d:0441 VPD not present on adapter, " 183 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 184 phba->brd_no, 185 mb->mbxCommand, mb->mbxStatus); 186 mb->un.varDmp.word_cnt = 0; 187 } 188 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 189 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 190 lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset, 191 mb->un.varDmp.word_cnt); 192 offset += mb->un.varDmp.word_cnt; 193 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 194 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 195 196 kfree(lpfc_vpd_data); 197 out_free_context2: 198 kfree(pmb->context2); 199 out_free_mbox: 200 mempool_free(pmb, phba->mbox_mem_pool); 201 return 0; 202 } 203 204 /************************************************************************/ 205 /* */ 206 /* lpfc_config_port_post */ 207 /* This routine will do LPFC initialization after the */ 208 /* CONFIG_PORT mailbox command. This will be initialized */ 209 /* as a SLI layer callback routine. */ 210 /* This routine returns 0 on success. Any other return value */ 211 /* indicates an error. */ 212 /* */ 213 /************************************************************************/ 214 int 215 lpfc_config_port_post(struct lpfc_hba * phba) 216 { 217 LPFC_MBOXQ_t *pmb; 218 MAILBOX_t *mb; 219 struct lpfc_dmabuf *mp; 220 struct lpfc_sli *psli = &phba->sli; 221 uint32_t status, timeout; 222 int i, j, rc; 223 224 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 225 if (!pmb) { 226 phba->hba_state = LPFC_HBA_ERROR; 227 return -ENOMEM; 228 } 229 mb = &pmb->mb; 230 231 lpfc_config_link(phba, pmb); 232 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 233 if (rc != MBX_SUCCESS) { 234 lpfc_printf_log(phba, 235 KERN_ERR, 236 LOG_INIT, 237 "%d:0447 Adapter failed init, mbxCmd x%x " 238 "CONFIG_LINK mbxStatus x%x\n", 239 phba->brd_no, 240 mb->mbxCommand, mb->mbxStatus); 241 phba->hba_state = LPFC_HBA_ERROR; 242 mempool_free( pmb, phba->mbox_mem_pool); 243 return -EIO; 244 } 245 246 /* Get login parameters for NID. */ 247 lpfc_read_sparam(phba, pmb); 248 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 249 lpfc_printf_log(phba, 250 KERN_ERR, 251 LOG_INIT, 252 "%d:0448 Adapter failed init, mbxCmd x%x " 253 "READ_SPARM mbxStatus x%x\n", 254 phba->brd_no, 255 mb->mbxCommand, mb->mbxStatus); 256 phba->hba_state = LPFC_HBA_ERROR; 257 mp = (struct lpfc_dmabuf *) pmb->context1; 258 mempool_free( pmb, phba->mbox_mem_pool); 259 lpfc_mbuf_free(phba, mp->virt, mp->phys); 260 kfree(mp); 261 return -EIO; 262 } 263 264 mp = (struct lpfc_dmabuf *) pmb->context1; 265 266 memcpy(&phba->fc_sparam, mp->virt, sizeof (struct serv_parm)); 267 lpfc_mbuf_free(phba, mp->virt, mp->phys); 268 kfree(mp); 269 pmb->context1 = NULL; 270 271 if (phba->cfg_soft_wwnn) 272 u64_to_wwn(phba->cfg_soft_wwnn, phba->fc_sparam.nodeName.u.wwn); 273 if (phba->cfg_soft_wwpn) 274 u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn); 275 memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName, 276 sizeof (struct lpfc_name)); 277 memcpy(&phba->fc_portname, &phba->fc_sparam.portName, 278 sizeof (struct lpfc_name)); 279 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 280 /* This should be consolidated into parse_vpd ? - mr */ 281 if (phba->SerialNumber[0] == 0) { 282 uint8_t *outptr; 283 284 outptr = &phba->fc_nodename.u.s.IEEE[0]; 285 for (i = 0; i < 12; i++) { 286 status = *outptr++; 287 j = ((status & 0xf0) >> 4); 288 if (j <= 9) 289 phba->SerialNumber[i] = 290 (char)((uint8_t) 0x30 + (uint8_t) j); 291 else 292 phba->SerialNumber[i] = 293 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 294 i++; 295 j = (status & 0xf); 296 if (j <= 9) 297 phba->SerialNumber[i] = 298 (char)((uint8_t) 0x30 + (uint8_t) j); 299 else 300 phba->SerialNumber[i] = 301 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 302 } 303 } 304 305 lpfc_read_config(phba, pmb); 306 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 307 lpfc_printf_log(phba, 308 KERN_ERR, 309 LOG_INIT, 310 "%d:0453 Adapter failed to init, mbxCmd x%x " 311 "READ_CONFIG, mbxStatus x%x\n", 312 phba->brd_no, 313 mb->mbxCommand, mb->mbxStatus); 314 phba->hba_state = LPFC_HBA_ERROR; 315 mempool_free( pmb, phba->mbox_mem_pool); 316 return -EIO; 317 } 318 319 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 320 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 321 phba->cfg_hba_queue_depth = 322 mb->un.varRdConfig.max_xri + 1; 323 324 phba->lmt = mb->un.varRdConfig.lmt; 325 326 /* Get the default values for Model Name and Description */ 327 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 328 329 if ((phba->cfg_link_speed > LINK_SPEED_10G) 330 || ((phba->cfg_link_speed == LINK_SPEED_1G) 331 && !(phba->lmt & LMT_1Gb)) 332 || ((phba->cfg_link_speed == LINK_SPEED_2G) 333 && !(phba->lmt & LMT_2Gb)) 334 || ((phba->cfg_link_speed == LINK_SPEED_4G) 335 && !(phba->lmt & LMT_4Gb)) 336 || ((phba->cfg_link_speed == LINK_SPEED_8G) 337 && !(phba->lmt & LMT_8Gb)) 338 || ((phba->cfg_link_speed == LINK_SPEED_10G) 339 && !(phba->lmt & LMT_10Gb))) { 340 /* Reset link speed to auto */ 341 lpfc_printf_log(phba, 342 KERN_WARNING, 343 LOG_LINK_EVENT, 344 "%d:1302 Invalid speed for this board: " 345 "Reset link speed to auto: x%x\n", 346 phba->brd_no, 347 phba->cfg_link_speed); 348 phba->cfg_link_speed = LINK_SPEED_AUTO; 349 } 350 351 phba->hba_state = LPFC_LINK_DOWN; 352 353 /* Only process IOCBs on ring 0 till hba_state is READY */ 354 if (psli->ring[psli->extra_ring].cmdringaddr) 355 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 356 if (psli->ring[psli->fcp_ring].cmdringaddr) 357 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 358 if (psli->ring[psli->next_ring].cmdringaddr) 359 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 360 361 /* Post receive buffers for desired rings */ 362 lpfc_post_rcv_buf(phba); 363 364 /* Enable appropriate host interrupts */ 365 spin_lock_irq(phba->host->host_lock); 366 status = readl(phba->HCregaddr); 367 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 368 if (psli->num_rings > 0) 369 status |= HC_R0INT_ENA; 370 if (psli->num_rings > 1) 371 status |= HC_R1INT_ENA; 372 if (psli->num_rings > 2) 373 status |= HC_R2INT_ENA; 374 if (psli->num_rings > 3) 375 status |= HC_R3INT_ENA; 376 377 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 378 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 379 status &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 380 381 writel(status, phba->HCregaddr); 382 readl(phba->HCregaddr); /* flush */ 383 spin_unlock_irq(phba->host->host_lock); 384 385 /* 386 * Setup the ring 0 (els) timeout handler 387 */ 388 timeout = phba->fc_ratov << 1; 389 phba->els_tmofunc.expires = jiffies + HZ * timeout; 390 add_timer(&phba->els_tmofunc); 391 392 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); 393 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 394 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 395 if (rc != MBX_SUCCESS) { 396 lpfc_printf_log(phba, 397 KERN_ERR, 398 LOG_INIT, 399 "%d:0454 Adapter failed to init, mbxCmd x%x " 400 "INIT_LINK, mbxStatus x%x\n", 401 phba->brd_no, 402 mb->mbxCommand, mb->mbxStatus); 403 404 /* Clear all interrupt enable conditions */ 405 writel(0, phba->HCregaddr); 406 readl(phba->HCregaddr); /* flush */ 407 /* Clear all pending interrupts */ 408 writel(0xffffffff, phba->HAregaddr); 409 readl(phba->HAregaddr); /* flush */ 410 411 phba->hba_state = LPFC_HBA_ERROR; 412 if (rc != MBX_BUSY) 413 mempool_free(pmb, phba->mbox_mem_pool); 414 return -EIO; 415 } 416 /* MBOX buffer will be freed in mbox compl */ 417 418 return (0); 419 } 420 421 static int 422 lpfc_discovery_wait(struct lpfc_hba *phba) 423 { 424 int i = 0; 425 426 while ((phba->hba_state != LPFC_HBA_READY) || 427 (phba->num_disc_nodes) || (phba->fc_prli_sent) || 428 ((phba->fc_map_cnt == 0) && (i<2)) || 429 (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE)) { 430 /* Check every second for 30 retries. */ 431 i++; 432 if (i > 30) { 433 return -ETIMEDOUT; 434 } 435 if ((i >= 15) && (phba->hba_state <= LPFC_LINK_DOWN)) { 436 /* The link is down. Set linkdown timeout */ 437 return -ETIMEDOUT; 438 } 439 440 /* Delay for 1 second to give discovery time to complete. */ 441 msleep(1000); 442 443 } 444 445 return 0; 446 } 447 448 /************************************************************************/ 449 /* */ 450 /* lpfc_hba_down_prep */ 451 /* This routine will do LPFC uninitialization before the */ 452 /* HBA is reset when bringing down the SLI Layer. This will be */ 453 /* initialized as a SLI layer callback routine. */ 454 /* This routine returns 0 on success. Any other return value */ 455 /* indicates an error. */ 456 /* */ 457 /************************************************************************/ 458 int 459 lpfc_hba_down_prep(struct lpfc_hba * phba) 460 { 461 /* Disable interrupts */ 462 writel(0, phba->HCregaddr); 463 readl(phba->HCregaddr); /* flush */ 464 465 /* Cleanup potential discovery resources */ 466 lpfc_els_flush_rscn(phba); 467 lpfc_els_flush_cmd(phba); 468 lpfc_disc_flush_list(phba); 469 470 return (0); 471 } 472 473 /************************************************************************/ 474 /* */ 475 /* lpfc_hba_down_post */ 476 /* This routine will do uninitialization after the HBA is reset */ 477 /* when bringing down the SLI Layer. */ 478 /* This routine returns 0 on success. Any other return value */ 479 /* indicates an error. */ 480 /* */ 481 /************************************************************************/ 482 int 483 lpfc_hba_down_post(struct lpfc_hba * phba) 484 { 485 struct lpfc_sli *psli = &phba->sli; 486 struct lpfc_sli_ring *pring; 487 struct lpfc_dmabuf *mp, *next_mp; 488 int i; 489 490 /* Cleanup preposted buffers on the ELS ring */ 491 pring = &psli->ring[LPFC_ELS_RING]; 492 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 493 list_del(&mp->list); 494 pring->postbufq_cnt--; 495 lpfc_mbuf_free(phba, mp->virt, mp->phys); 496 kfree(mp); 497 } 498 499 for (i = 0; i < psli->num_rings; i++) { 500 pring = &psli->ring[i]; 501 lpfc_sli_abort_iocb_ring(phba, pring); 502 } 503 504 return 0; 505 } 506 507 /************************************************************************/ 508 /* */ 509 /* lpfc_handle_eratt */ 510 /* This routine will handle processing a Host Attention */ 511 /* Error Status event. This will be initialized */ 512 /* as a SLI layer callback routine. */ 513 /* */ 514 /************************************************************************/ 515 void 516 lpfc_handle_eratt(struct lpfc_hba * phba) 517 { 518 struct lpfc_sli *psli = &phba->sli; 519 struct lpfc_sli_ring *pring; 520 uint32_t event_data; 521 522 if (phba->work_hs & HS_FFER6 || 523 phba->work_hs & HS_FFER5) { 524 /* Re-establishing Link */ 525 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 526 "%d:1301 Re-establishing Link " 527 "Data: x%x x%x x%x\n", 528 phba->brd_no, phba->work_hs, 529 phba->work_status[0], phba->work_status[1]); 530 spin_lock_irq(phba->host->host_lock); 531 phba->fc_flag |= FC_ESTABLISH_LINK; 532 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 533 spin_unlock_irq(phba->host->host_lock); 534 535 /* 536 * Firmware stops when it triggled erratt with HS_FFER6. 537 * That could cause the I/Os dropped by the firmware. 538 * Error iocb (I/O) on txcmplq and let the SCSI layer 539 * retry it after re-establishing link. 540 */ 541 pring = &psli->ring[psli->fcp_ring]; 542 lpfc_sli_abort_iocb_ring(phba, pring); 543 544 545 /* 546 * There was a firmware error. Take the hba offline and then 547 * attempt to restart it. 548 */ 549 lpfc_offline(phba); 550 lpfc_sli_brdrestart(phba); 551 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 552 mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60); 553 return; 554 } 555 } else { 556 /* The if clause above forces this code path when the status 557 * failure is a value other than FFER6. Do not call the offline 558 * twice. This is the adapter hardware error path. 559 */ 560 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 561 "%d:0457 Adapter Hardware Error " 562 "Data: x%x x%x x%x\n", 563 phba->brd_no, phba->work_hs, 564 phba->work_status[0], phba->work_status[1]); 565 566 event_data = FC_REG_DUMP_EVENT; 567 fc_host_post_vendor_event(phba->host, fc_get_event_number(), 568 sizeof(event_data), (char *) &event_data, 569 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 570 571 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 572 lpfc_offline(phba); 573 phba->hba_state = LPFC_HBA_ERROR; 574 lpfc_hba_down_post(phba); 575 } 576 } 577 578 /************************************************************************/ 579 /* */ 580 /* lpfc_handle_latt */ 581 /* This routine will handle processing a Host Attention */ 582 /* Link Status event. This will be initialized */ 583 /* as a SLI layer callback routine. */ 584 /* */ 585 /************************************************************************/ 586 void 587 lpfc_handle_latt(struct lpfc_hba * phba) 588 { 589 struct lpfc_sli *psli = &phba->sli; 590 LPFC_MBOXQ_t *pmb; 591 volatile uint32_t control; 592 struct lpfc_dmabuf *mp; 593 int rc = -ENOMEM; 594 595 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 596 if (!pmb) 597 goto lpfc_handle_latt_err_exit; 598 599 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 600 if (!mp) 601 goto lpfc_handle_latt_free_pmb; 602 603 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 604 if (!mp->virt) 605 goto lpfc_handle_latt_free_mp; 606 607 rc = -EIO; 608 609 /* Cleanup any outstanding ELS commands */ 610 lpfc_els_flush_cmd(phba); 611 612 psli->slistat.link_event++; 613 lpfc_read_la(phba, pmb, mp); 614 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; 615 rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB)); 616 if (rc == MBX_NOT_FINISHED) 617 goto lpfc_handle_latt_free_mbuf; 618 619 /* Clear Link Attention in HA REG */ 620 spin_lock_irq(phba->host->host_lock); 621 writel(HA_LATT, phba->HAregaddr); 622 readl(phba->HAregaddr); /* flush */ 623 spin_unlock_irq(phba->host->host_lock); 624 625 return; 626 627 lpfc_handle_latt_free_mbuf: 628 lpfc_mbuf_free(phba, mp->virt, mp->phys); 629 lpfc_handle_latt_free_mp: 630 kfree(mp); 631 lpfc_handle_latt_free_pmb: 632 kfree(pmb); 633 lpfc_handle_latt_err_exit: 634 /* Enable Link attention interrupts */ 635 spin_lock_irq(phba->host->host_lock); 636 psli->sli_flag |= LPFC_PROCESS_LA; 637 control = readl(phba->HCregaddr); 638 control |= HC_LAINT_ENA; 639 writel(control, phba->HCregaddr); 640 readl(phba->HCregaddr); /* flush */ 641 642 /* Clear Link Attention in HA REG */ 643 writel(HA_LATT, phba->HAregaddr); 644 readl(phba->HAregaddr); /* flush */ 645 spin_unlock_irq(phba->host->host_lock); 646 lpfc_linkdown(phba); 647 phba->hba_state = LPFC_HBA_ERROR; 648 649 /* The other case is an error from issue_mbox */ 650 if (rc == -ENOMEM) 651 lpfc_printf_log(phba, 652 KERN_WARNING, 653 LOG_MBOX, 654 "%d:0300 READ_LA: no buffers\n", 655 phba->brd_no); 656 657 return; 658 } 659 660 /************************************************************************/ 661 /* */ 662 /* lpfc_parse_vpd */ 663 /* This routine will parse the VPD data */ 664 /* */ 665 /************************************************************************/ 666 static int 667 lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd, int len) 668 { 669 uint8_t lenlo, lenhi; 670 uint32_t Length; 671 int i, j; 672 int finished = 0; 673 int index = 0; 674 675 if (!vpd) 676 return 0; 677 678 /* Vital Product */ 679 lpfc_printf_log(phba, 680 KERN_INFO, 681 LOG_INIT, 682 "%d:0455 Vital Product Data: x%x x%x x%x x%x\n", 683 phba->brd_no, 684 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 685 (uint32_t) vpd[3]); 686 while (!finished && (index < (len - 4))) { 687 switch (vpd[index]) { 688 case 0x82: 689 case 0x91: 690 index += 1; 691 lenlo = vpd[index]; 692 index += 1; 693 lenhi = vpd[index]; 694 index += 1; 695 i = ((((unsigned short)lenhi) << 8) + lenlo); 696 index += i; 697 break; 698 case 0x90: 699 index += 1; 700 lenlo = vpd[index]; 701 index += 1; 702 lenhi = vpd[index]; 703 index += 1; 704 Length = ((((unsigned short)lenhi) << 8) + lenlo); 705 if (Length > len - index) 706 Length = len - index; 707 while (Length > 0) { 708 /* Look for Serial Number */ 709 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 710 index += 2; 711 i = vpd[index]; 712 index += 1; 713 j = 0; 714 Length -= (3+i); 715 while(i--) { 716 phba->SerialNumber[j++] = vpd[index++]; 717 if (j == 31) 718 break; 719 } 720 phba->SerialNumber[j] = 0; 721 continue; 722 } 723 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 724 phba->vpd_flag |= VPD_MODEL_DESC; 725 index += 2; 726 i = vpd[index]; 727 index += 1; 728 j = 0; 729 Length -= (3+i); 730 while(i--) { 731 phba->ModelDesc[j++] = vpd[index++]; 732 if (j == 255) 733 break; 734 } 735 phba->ModelDesc[j] = 0; 736 continue; 737 } 738 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 739 phba->vpd_flag |= VPD_MODEL_NAME; 740 index += 2; 741 i = vpd[index]; 742 index += 1; 743 j = 0; 744 Length -= (3+i); 745 while(i--) { 746 phba->ModelName[j++] = vpd[index++]; 747 if (j == 79) 748 break; 749 } 750 phba->ModelName[j] = 0; 751 continue; 752 } 753 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 754 phba->vpd_flag |= VPD_PROGRAM_TYPE; 755 index += 2; 756 i = vpd[index]; 757 index += 1; 758 j = 0; 759 Length -= (3+i); 760 while(i--) { 761 phba->ProgramType[j++] = vpd[index++]; 762 if (j == 255) 763 break; 764 } 765 phba->ProgramType[j] = 0; 766 continue; 767 } 768 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 769 phba->vpd_flag |= VPD_PORT; 770 index += 2; 771 i = vpd[index]; 772 index += 1; 773 j = 0; 774 Length -= (3+i); 775 while(i--) { 776 phba->Port[j++] = vpd[index++]; 777 if (j == 19) 778 break; 779 } 780 phba->Port[j] = 0; 781 continue; 782 } 783 else { 784 index += 2; 785 i = vpd[index]; 786 index += 1; 787 index += i; 788 Length -= (3 + i); 789 } 790 } 791 finished = 0; 792 break; 793 case 0x78: 794 finished = 1; 795 break; 796 default: 797 index ++; 798 break; 799 } 800 } 801 802 return(1); 803 } 804 805 static void 806 lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp) 807 { 808 lpfc_vpd_t *vp; 809 uint16_t dev_id = phba->pcidev->device; 810 int max_speed; 811 struct { 812 char * name; 813 int max_speed; 814 char * bus; 815 } m = {"<Unknown>", 0, ""}; 816 817 if (mdp && mdp[0] != '\0' 818 && descp && descp[0] != '\0') 819 return; 820 821 if (phba->lmt & LMT_10Gb) 822 max_speed = 10; 823 else if (phba->lmt & LMT_8Gb) 824 max_speed = 8; 825 else if (phba->lmt & LMT_4Gb) 826 max_speed = 4; 827 else if (phba->lmt & LMT_2Gb) 828 max_speed = 2; 829 else 830 max_speed = 1; 831 832 vp = &phba->vpd; 833 834 switch (dev_id) { 835 case PCI_DEVICE_ID_FIREFLY: 836 m = (typeof(m)){"LP6000", max_speed, "PCI"}; 837 break; 838 case PCI_DEVICE_ID_SUPERFLY: 839 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 840 m = (typeof(m)){"LP7000", max_speed, "PCI"}; 841 else 842 m = (typeof(m)){"LP7000E", max_speed, "PCI"}; 843 break; 844 case PCI_DEVICE_ID_DRAGONFLY: 845 m = (typeof(m)){"LP8000", max_speed, "PCI"}; 846 break; 847 case PCI_DEVICE_ID_CENTAUR: 848 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 849 m = (typeof(m)){"LP9002", max_speed, "PCI"}; 850 else 851 m = (typeof(m)){"LP9000", max_speed, "PCI"}; 852 break; 853 case PCI_DEVICE_ID_RFLY: 854 m = (typeof(m)){"LP952", max_speed, "PCI"}; 855 break; 856 case PCI_DEVICE_ID_PEGASUS: 857 m = (typeof(m)){"LP9802", max_speed, "PCI-X"}; 858 break; 859 case PCI_DEVICE_ID_THOR: 860 m = (typeof(m)){"LP10000", max_speed, "PCI-X"}; 861 break; 862 case PCI_DEVICE_ID_VIPER: 863 m = (typeof(m)){"LPX1000", max_speed, "PCI-X"}; 864 break; 865 case PCI_DEVICE_ID_PFLY: 866 m = (typeof(m)){"LP982", max_speed, "PCI-X"}; 867 break; 868 case PCI_DEVICE_ID_TFLY: 869 m = (typeof(m)){"LP1050", max_speed, "PCI-X"}; 870 break; 871 case PCI_DEVICE_ID_HELIOS: 872 m = (typeof(m)){"LP11000", max_speed, "PCI-X2"}; 873 break; 874 case PCI_DEVICE_ID_HELIOS_SCSP: 875 m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"}; 876 break; 877 case PCI_DEVICE_ID_HELIOS_DCSP: 878 m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"}; 879 break; 880 case PCI_DEVICE_ID_NEPTUNE: 881 m = (typeof(m)){"LPe1000", max_speed, "PCIe"}; 882 break; 883 case PCI_DEVICE_ID_NEPTUNE_SCSP: 884 m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"}; 885 break; 886 case PCI_DEVICE_ID_NEPTUNE_DCSP: 887 m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"}; 888 break; 889 case PCI_DEVICE_ID_BMID: 890 m = (typeof(m)){"LP1150", max_speed, "PCI-X2"}; 891 break; 892 case PCI_DEVICE_ID_BSMB: 893 m = (typeof(m)){"LP111", max_speed, "PCI-X2"}; 894 break; 895 case PCI_DEVICE_ID_ZEPHYR: 896 m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; 897 break; 898 case PCI_DEVICE_ID_ZEPHYR_SCSP: 899 m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; 900 break; 901 case PCI_DEVICE_ID_ZEPHYR_DCSP: 902 m = (typeof(m)){"LPe11002-SP", max_speed, "PCIe"}; 903 break; 904 case PCI_DEVICE_ID_ZMID: 905 m = (typeof(m)){"LPe1150", max_speed, "PCIe"}; 906 break; 907 case PCI_DEVICE_ID_ZSMB: 908 m = (typeof(m)){"LPe111", max_speed, "PCIe"}; 909 break; 910 case PCI_DEVICE_ID_LP101: 911 m = (typeof(m)){"LP101", max_speed, "PCI-X"}; 912 break; 913 case PCI_DEVICE_ID_LP10000S: 914 m = (typeof(m)){"LP10000-S", max_speed, "PCI"}; 915 break; 916 case PCI_DEVICE_ID_LP11000S: 917 m = (typeof(m)){"LP11000-S", max_speed, 918 "PCI-X2"}; 919 break; 920 case PCI_DEVICE_ID_LPE11000S: 921 m = (typeof(m)){"LPe11000-S", max_speed, 922 "PCIe"}; 923 break; 924 default: 925 m = (typeof(m)){ NULL }; 926 break; 927 } 928 929 if (mdp && mdp[0] == '\0') 930 snprintf(mdp, 79,"%s", m.name); 931 if (descp && descp[0] == '\0') 932 snprintf(descp, 255, 933 "Emulex %s %dGb %s Fibre Channel Adapter", 934 m.name, m.max_speed, m.bus); 935 } 936 937 /**************************************************/ 938 /* lpfc_post_buffer */ 939 /* */ 940 /* This routine will post count buffers to the */ 941 /* ring with the QUE_RING_BUF_CN command. This */ 942 /* allows 3 buffers / command to be posted. */ 943 /* Returns the number of buffers NOT posted. */ 944 /**************************************************/ 945 int 946 lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt, 947 int type) 948 { 949 IOCB_t *icmd; 950 struct lpfc_iocbq *iocb; 951 struct lpfc_dmabuf *mp1, *mp2; 952 953 cnt += pring->missbufcnt; 954 955 /* While there are buffers to post */ 956 while (cnt > 0) { 957 /* Allocate buffer for command iocb */ 958 spin_lock_irq(phba->host->host_lock); 959 iocb = lpfc_sli_get_iocbq(phba); 960 spin_unlock_irq(phba->host->host_lock); 961 if (iocb == NULL) { 962 pring->missbufcnt = cnt; 963 return cnt; 964 } 965 icmd = &iocb->iocb; 966 967 /* 2 buffers can be posted per command */ 968 /* Allocate buffer to post */ 969 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 970 if (mp1) 971 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 972 &mp1->phys); 973 if (mp1 == 0 || mp1->virt == 0) { 974 kfree(mp1); 975 spin_lock_irq(phba->host->host_lock); 976 lpfc_sli_release_iocbq(phba, iocb); 977 spin_unlock_irq(phba->host->host_lock); 978 pring->missbufcnt = cnt; 979 return cnt; 980 } 981 982 INIT_LIST_HEAD(&mp1->list); 983 /* Allocate buffer to post */ 984 if (cnt > 1) { 985 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 986 if (mp2) 987 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 988 &mp2->phys); 989 if (mp2 == 0 || mp2->virt == 0) { 990 kfree(mp2); 991 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 992 kfree(mp1); 993 spin_lock_irq(phba->host->host_lock); 994 lpfc_sli_release_iocbq(phba, iocb); 995 spin_unlock_irq(phba->host->host_lock); 996 pring->missbufcnt = cnt; 997 return cnt; 998 } 999 1000 INIT_LIST_HEAD(&mp2->list); 1001 } else { 1002 mp2 = NULL; 1003 } 1004 1005 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 1006 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 1007 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 1008 icmd->ulpBdeCount = 1; 1009 cnt--; 1010 if (mp2) { 1011 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 1012 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 1013 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 1014 cnt--; 1015 icmd->ulpBdeCount = 2; 1016 } 1017 1018 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 1019 icmd->ulpLe = 1; 1020 1021 spin_lock_irq(phba->host->host_lock); 1022 if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) { 1023 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1024 kfree(mp1); 1025 cnt++; 1026 if (mp2) { 1027 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 1028 kfree(mp2); 1029 cnt++; 1030 } 1031 lpfc_sli_release_iocbq(phba, iocb); 1032 pring->missbufcnt = cnt; 1033 spin_unlock_irq(phba->host->host_lock); 1034 return cnt; 1035 } 1036 spin_unlock_irq(phba->host->host_lock); 1037 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 1038 if (mp2) { 1039 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 1040 } 1041 } 1042 pring->missbufcnt = 0; 1043 return 0; 1044 } 1045 1046 /************************************************************************/ 1047 /* */ 1048 /* lpfc_post_rcv_buf */ 1049 /* This routine post initial rcv buffers to the configured rings */ 1050 /* */ 1051 /************************************************************************/ 1052 static int 1053 lpfc_post_rcv_buf(struct lpfc_hba * phba) 1054 { 1055 struct lpfc_sli *psli = &phba->sli; 1056 1057 /* Ring 0, ELS / CT buffers */ 1058 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0, 1); 1059 /* Ring 2 - FCP no buffers needed */ 1060 1061 return 0; 1062 } 1063 1064 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 1065 1066 /************************************************************************/ 1067 /* */ 1068 /* lpfc_sha_init */ 1069 /* */ 1070 /************************************************************************/ 1071 static void 1072 lpfc_sha_init(uint32_t * HashResultPointer) 1073 { 1074 HashResultPointer[0] = 0x67452301; 1075 HashResultPointer[1] = 0xEFCDAB89; 1076 HashResultPointer[2] = 0x98BADCFE; 1077 HashResultPointer[3] = 0x10325476; 1078 HashResultPointer[4] = 0xC3D2E1F0; 1079 } 1080 1081 /************************************************************************/ 1082 /* */ 1083 /* lpfc_sha_iterate */ 1084 /* */ 1085 /************************************************************************/ 1086 static void 1087 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 1088 { 1089 int t; 1090 uint32_t TEMP; 1091 uint32_t A, B, C, D, E; 1092 t = 16; 1093 do { 1094 HashWorkingPointer[t] = 1095 S(1, 1096 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 1097 8] ^ 1098 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 1099 } while (++t <= 79); 1100 t = 0; 1101 A = HashResultPointer[0]; 1102 B = HashResultPointer[1]; 1103 C = HashResultPointer[2]; 1104 D = HashResultPointer[3]; 1105 E = HashResultPointer[4]; 1106 1107 do { 1108 if (t < 20) { 1109 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 1110 } else if (t < 40) { 1111 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 1112 } else if (t < 60) { 1113 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 1114 } else { 1115 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 1116 } 1117 TEMP += S(5, A) + E + HashWorkingPointer[t]; 1118 E = D; 1119 D = C; 1120 C = S(30, B); 1121 B = A; 1122 A = TEMP; 1123 } while (++t <= 79); 1124 1125 HashResultPointer[0] += A; 1126 HashResultPointer[1] += B; 1127 HashResultPointer[2] += C; 1128 HashResultPointer[3] += D; 1129 HashResultPointer[4] += E; 1130 1131 } 1132 1133 /************************************************************************/ 1134 /* */ 1135 /* lpfc_challenge_key */ 1136 /* */ 1137 /************************************************************************/ 1138 static void 1139 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 1140 { 1141 *HashWorking = (*RandomChallenge ^ *HashWorking); 1142 } 1143 1144 /************************************************************************/ 1145 /* */ 1146 /* lpfc_hba_init */ 1147 /* */ 1148 /************************************************************************/ 1149 void 1150 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 1151 { 1152 int t; 1153 uint32_t *HashWorking; 1154 uint32_t *pwwnn = phba->wwnn; 1155 1156 HashWorking = kmalloc(80 * sizeof(uint32_t), GFP_KERNEL); 1157 if (!HashWorking) 1158 return; 1159 1160 memset(HashWorking, 0, (80 * sizeof(uint32_t))); 1161 HashWorking[0] = HashWorking[78] = *pwwnn++; 1162 HashWorking[1] = HashWorking[79] = *pwwnn; 1163 1164 for (t = 0; t < 7; t++) 1165 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 1166 1167 lpfc_sha_init(hbainit); 1168 lpfc_sha_iterate(hbainit, HashWorking); 1169 kfree(HashWorking); 1170 } 1171 1172 static void 1173 lpfc_cleanup(struct lpfc_hba * phba, uint32_t save_bind) 1174 { 1175 struct lpfc_nodelist *ndlp, *next_ndlp; 1176 1177 /* clean up phba - lpfc specific */ 1178 lpfc_can_disctmo(phba); 1179 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpunmap_list, 1180 nlp_listp) { 1181 lpfc_nlp_remove(phba, ndlp); 1182 } 1183 1184 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list, 1185 nlp_listp) { 1186 lpfc_nlp_remove(phba, ndlp); 1187 } 1188 1189 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list, 1190 nlp_listp) { 1191 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1192 } 1193 1194 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list, 1195 nlp_listp) { 1196 lpfc_nlp_remove(phba, ndlp); 1197 } 1198 1199 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list, 1200 nlp_listp) { 1201 lpfc_nlp_remove(phba, ndlp); 1202 } 1203 1204 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_reglogin_list, 1205 nlp_listp) { 1206 lpfc_nlp_remove(phba, ndlp); 1207 } 1208 1209 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list, 1210 nlp_listp) { 1211 lpfc_nlp_remove(phba, ndlp); 1212 } 1213 1214 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list, 1215 nlp_listp) { 1216 lpfc_nlp_remove(phba, ndlp); 1217 } 1218 1219 INIT_LIST_HEAD(&phba->fc_nlpmap_list); 1220 INIT_LIST_HEAD(&phba->fc_nlpunmap_list); 1221 INIT_LIST_HEAD(&phba->fc_unused_list); 1222 INIT_LIST_HEAD(&phba->fc_plogi_list); 1223 INIT_LIST_HEAD(&phba->fc_adisc_list); 1224 INIT_LIST_HEAD(&phba->fc_reglogin_list); 1225 INIT_LIST_HEAD(&phba->fc_prli_list); 1226 INIT_LIST_HEAD(&phba->fc_npr_list); 1227 1228 phba->fc_map_cnt = 0; 1229 phba->fc_unmap_cnt = 0; 1230 phba->fc_plogi_cnt = 0; 1231 phba->fc_adisc_cnt = 0; 1232 phba->fc_reglogin_cnt = 0; 1233 phba->fc_prli_cnt = 0; 1234 phba->fc_npr_cnt = 0; 1235 phba->fc_unused_cnt= 0; 1236 return; 1237 } 1238 1239 static void 1240 lpfc_establish_link_tmo(unsigned long ptr) 1241 { 1242 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 1243 unsigned long iflag; 1244 1245 1246 /* Re-establishing Link, timer expired */ 1247 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1248 "%d:1300 Re-establishing Link, timer expired " 1249 "Data: x%x x%x\n", 1250 phba->brd_no, phba->fc_flag, phba->hba_state); 1251 spin_lock_irqsave(phba->host->host_lock, iflag); 1252 phba->fc_flag &= ~FC_ESTABLISH_LINK; 1253 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1254 } 1255 1256 static int 1257 lpfc_stop_timer(struct lpfc_hba * phba) 1258 { 1259 struct lpfc_sli *psli = &phba->sli; 1260 1261 /* Instead of a timer, this has been converted to a 1262 * deferred procedding list. 1263 */ 1264 while (!list_empty(&phba->freebufList)) { 1265 1266 struct lpfc_dmabuf *mp = NULL; 1267 1268 list_remove_head((&phba->freebufList), mp, 1269 struct lpfc_dmabuf, list); 1270 if (mp) { 1271 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1272 kfree(mp); 1273 } 1274 } 1275 1276 del_timer_sync(&phba->fcp_poll_timer); 1277 del_timer_sync(&phba->fc_estabtmo); 1278 del_timer_sync(&phba->fc_disctmo); 1279 del_timer_sync(&phba->fc_fdmitmo); 1280 del_timer_sync(&phba->els_tmofunc); 1281 psli = &phba->sli; 1282 del_timer_sync(&psli->mbox_tmo); 1283 return(1); 1284 } 1285 1286 int 1287 lpfc_online(struct lpfc_hba * phba) 1288 { 1289 if (!phba) 1290 return 0; 1291 1292 if (!(phba->fc_flag & FC_OFFLINE_MODE)) 1293 return 0; 1294 1295 lpfc_printf_log(phba, 1296 KERN_WARNING, 1297 LOG_INIT, 1298 "%d:0458 Bring Adapter online\n", 1299 phba->brd_no); 1300 1301 if (!lpfc_sli_queue_setup(phba)) 1302 return 1; 1303 1304 if (lpfc_sli_hba_setup(phba)) /* Initialize the HBA */ 1305 return 1; 1306 1307 spin_lock_irq(phba->host->host_lock); 1308 phba->fc_flag &= ~FC_OFFLINE_MODE; 1309 spin_unlock_irq(phba->host->host_lock); 1310 1311 return 0; 1312 } 1313 1314 int 1315 lpfc_offline(struct lpfc_hba * phba) 1316 { 1317 struct lpfc_sli_ring *pring; 1318 struct lpfc_sli *psli; 1319 unsigned long iflag; 1320 int i; 1321 int cnt = 0; 1322 1323 if (!phba) 1324 return 0; 1325 1326 if (phba->fc_flag & FC_OFFLINE_MODE) 1327 return 0; 1328 1329 psli = &phba->sli; 1330 1331 lpfc_linkdown(phba); 1332 lpfc_sli_flush_mbox_queue(phba); 1333 1334 for (i = 0; i < psli->num_rings; i++) { 1335 pring = &psli->ring[i]; 1336 /* The linkdown event takes 30 seconds to timeout. */ 1337 while (pring->txcmplq_cnt) { 1338 mdelay(10); 1339 if (cnt++ > 3000) { 1340 lpfc_printf_log(phba, 1341 KERN_WARNING, LOG_INIT, 1342 "%d:0466 Outstanding IO when " 1343 "bringing Adapter offline\n", 1344 phba->brd_no); 1345 break; 1346 } 1347 } 1348 } 1349 1350 1351 /* stop all timers associated with this hba */ 1352 lpfc_stop_timer(phba); 1353 phba->work_hba_events = 0; 1354 phba->work_ha = 0; 1355 1356 lpfc_printf_log(phba, 1357 KERN_WARNING, 1358 LOG_INIT, 1359 "%d:0460 Bring Adapter offline\n", 1360 phba->brd_no); 1361 1362 /* Bring down the SLI Layer and cleanup. The HBA is offline 1363 now. */ 1364 lpfc_sli_hba_down(phba); 1365 lpfc_cleanup(phba, 1); 1366 spin_lock_irqsave(phba->host->host_lock, iflag); 1367 phba->fc_flag |= FC_OFFLINE_MODE; 1368 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1369 return 0; 1370 } 1371 1372 /****************************************************************************** 1373 * Function name: lpfc_scsi_free 1374 * 1375 * Description: Called from lpfc_pci_remove_one free internal driver resources 1376 * 1377 ******************************************************************************/ 1378 static int 1379 lpfc_scsi_free(struct lpfc_hba * phba) 1380 { 1381 struct lpfc_scsi_buf *sb, *sb_next; 1382 struct lpfc_iocbq *io, *io_next; 1383 1384 spin_lock_irq(phba->host->host_lock); 1385 /* Release all the lpfc_scsi_bufs maintained by this host. */ 1386 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 1387 list_del(&sb->list); 1388 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 1389 sb->dma_handle); 1390 kfree(sb); 1391 phba->total_scsi_bufs--; 1392 } 1393 1394 /* Release all the lpfc_iocbq entries maintained by this host. */ 1395 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 1396 list_del(&io->list); 1397 kfree(io); 1398 phba->total_iocbq_bufs--; 1399 } 1400 1401 spin_unlock_irq(phba->host->host_lock); 1402 1403 return 0; 1404 } 1405 1406 1407 static int __devinit 1408 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 1409 { 1410 struct Scsi_Host *host; 1411 struct lpfc_hba *phba; 1412 struct lpfc_sli *psli; 1413 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 1414 unsigned long bar0map_len, bar2map_len; 1415 int error = -ENODEV, retval; 1416 int i; 1417 uint16_t iotag; 1418 1419 if (pci_enable_device(pdev)) 1420 goto out; 1421 if (pci_request_regions(pdev, LPFC_DRIVER_NAME)) 1422 goto out_disable_device; 1423 1424 host = scsi_host_alloc(&lpfc_template, sizeof (struct lpfc_hba)); 1425 if (!host) 1426 goto out_release_regions; 1427 1428 phba = (struct lpfc_hba*)host->hostdata; 1429 memset(phba, 0, sizeof (struct lpfc_hba)); 1430 phba->host = host; 1431 1432 phba->fc_flag |= FC_LOADING; 1433 phba->pcidev = pdev; 1434 1435 /* Assign an unused board number */ 1436 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 1437 goto out_put_host; 1438 1439 error = idr_get_new(&lpfc_hba_index, NULL, &phba->brd_no); 1440 if (error) 1441 goto out_put_host; 1442 1443 host->unique_id = phba->brd_no; 1444 INIT_LIST_HEAD(&phba->ctrspbuflist); 1445 INIT_LIST_HEAD(&phba->rnidrspbuflist); 1446 INIT_LIST_HEAD(&phba->freebufList); 1447 1448 /* Initialize timers used by driver */ 1449 init_timer(&phba->fc_estabtmo); 1450 phba->fc_estabtmo.function = lpfc_establish_link_tmo; 1451 phba->fc_estabtmo.data = (unsigned long)phba; 1452 init_timer(&phba->fc_disctmo); 1453 phba->fc_disctmo.function = lpfc_disc_timeout; 1454 phba->fc_disctmo.data = (unsigned long)phba; 1455 1456 init_timer(&phba->fc_fdmitmo); 1457 phba->fc_fdmitmo.function = lpfc_fdmi_tmo; 1458 phba->fc_fdmitmo.data = (unsigned long)phba; 1459 init_timer(&phba->els_tmofunc); 1460 phba->els_tmofunc.function = lpfc_els_timeout; 1461 phba->els_tmofunc.data = (unsigned long)phba; 1462 psli = &phba->sli; 1463 init_timer(&psli->mbox_tmo); 1464 psli->mbox_tmo.function = lpfc_mbox_timeout; 1465 psli->mbox_tmo.data = (unsigned long)phba; 1466 1467 init_timer(&phba->fcp_poll_timer); 1468 phba->fcp_poll_timer.function = lpfc_poll_timeout; 1469 phba->fcp_poll_timer.data = (unsigned long)phba; 1470 1471 /* 1472 * Get all the module params for configuring this host and then 1473 * establish the host parameters. 1474 */ 1475 lpfc_get_cfgparam(phba); 1476 1477 host->max_id = LPFC_MAX_TARGET; 1478 host->max_lun = phba->cfg_max_luns; 1479 host->this_id = -1; 1480 1481 /* Initialize all internally managed lists. */ 1482 INIT_LIST_HEAD(&phba->fc_nlpmap_list); 1483 INIT_LIST_HEAD(&phba->fc_nlpunmap_list); 1484 INIT_LIST_HEAD(&phba->fc_unused_list); 1485 INIT_LIST_HEAD(&phba->fc_plogi_list); 1486 INIT_LIST_HEAD(&phba->fc_adisc_list); 1487 INIT_LIST_HEAD(&phba->fc_reglogin_list); 1488 INIT_LIST_HEAD(&phba->fc_prli_list); 1489 INIT_LIST_HEAD(&phba->fc_npr_list); 1490 1491 1492 pci_set_master(pdev); 1493 retval = pci_set_mwi(pdev); 1494 if (retval) 1495 dev_printk(KERN_WARNING, &pdev->dev, 1496 "Warning: pci_set_mwi returned %d\n", retval); 1497 1498 if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0) 1499 if (pci_set_dma_mask(phba->pcidev, DMA_32BIT_MASK) != 0) 1500 goto out_idr_remove; 1501 1502 /* 1503 * Get the bus address of Bar0 and Bar2 and the number of bytes 1504 * required by each mapping. 1505 */ 1506 phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0); 1507 bar0map_len = pci_resource_len(phba->pcidev, 0); 1508 1509 phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2); 1510 bar2map_len = pci_resource_len(phba->pcidev, 2); 1511 1512 /* Map HBA SLIM to a kernel virtual address. */ 1513 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 1514 if (!phba->slim_memmap_p) { 1515 error = -ENODEV; 1516 dev_printk(KERN_ERR, &pdev->dev, 1517 "ioremap failed for SLIM memory.\n"); 1518 goto out_idr_remove; 1519 } 1520 1521 /* Map HBA Control Registers to a kernel virtual address. */ 1522 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 1523 if (!phba->ctrl_regs_memmap_p) { 1524 error = -ENODEV; 1525 dev_printk(KERN_ERR, &pdev->dev, 1526 "ioremap failed for HBA control registers.\n"); 1527 goto out_iounmap_slim; 1528 } 1529 1530 /* Allocate memory for SLI-2 structures */ 1531 phba->slim2p = dma_alloc_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE, 1532 &phba->slim2p_mapping, GFP_KERNEL); 1533 if (!phba->slim2p) 1534 goto out_iounmap; 1535 1536 memset(phba->slim2p, 0, SLI2_SLIM_SIZE); 1537 1538 /* Initialize the SLI Layer to run with lpfc HBAs. */ 1539 lpfc_sli_setup(phba); 1540 lpfc_sli_queue_setup(phba); 1541 1542 error = lpfc_mem_alloc(phba); 1543 if (error) 1544 goto out_free_slim; 1545 1546 /* Initialize and populate the iocb list per host. */ 1547 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 1548 for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) { 1549 iocbq_entry = kmalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 1550 if (iocbq_entry == NULL) { 1551 printk(KERN_ERR "%s: only allocated %d iocbs of " 1552 "expected %d count. Unloading driver.\n", 1553 __FUNCTION__, i, LPFC_IOCB_LIST_CNT); 1554 error = -ENOMEM; 1555 goto out_free_iocbq; 1556 } 1557 1558 memset(iocbq_entry, 0, sizeof(struct lpfc_iocbq)); 1559 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 1560 if (iotag == 0) { 1561 kfree (iocbq_entry); 1562 printk(KERN_ERR "%s: failed to allocate IOTAG. " 1563 "Unloading driver.\n", 1564 __FUNCTION__); 1565 error = -ENOMEM; 1566 goto out_free_iocbq; 1567 } 1568 spin_lock_irq(phba->host->host_lock); 1569 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 1570 phba->total_iocbq_bufs++; 1571 spin_unlock_irq(phba->host->host_lock); 1572 } 1573 1574 /* Initialize HBA structure */ 1575 phba->fc_edtov = FF_DEF_EDTOV; 1576 phba->fc_ratov = FF_DEF_RATOV; 1577 phba->fc_altov = FF_DEF_ALTOV; 1578 phba->fc_arbtov = FF_DEF_ARBTOV; 1579 1580 INIT_LIST_HEAD(&phba->work_list); 1581 phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT); 1582 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 1583 1584 /* Startup the kernel thread for this host adapter. */ 1585 phba->worker_thread = kthread_run(lpfc_do_work, phba, 1586 "lpfc_worker_%d", phba->brd_no); 1587 if (IS_ERR(phba->worker_thread)) { 1588 error = PTR_ERR(phba->worker_thread); 1589 goto out_free_iocbq; 1590 } 1591 1592 /* 1593 * Set initial can_queue value since 0 is no longer supported and 1594 * scsi_add_host will fail. This will be adjusted later based on the 1595 * max xri value determined in hba setup. 1596 */ 1597 host->can_queue = phba->cfg_hba_queue_depth - 10; 1598 1599 /* Tell the midlayer we support 16 byte commands */ 1600 host->max_cmd_len = 16; 1601 1602 /* Initialize the list of scsi buffers used by driver for scsi IO. */ 1603 spin_lock_init(&phba->scsi_buf_list_lock); 1604 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 1605 1606 host->transportt = lpfc_transport_template; 1607 pci_set_drvdata(pdev, host); 1608 error = scsi_add_host(host, &pdev->dev); 1609 if (error) 1610 goto out_kthread_stop; 1611 1612 error = lpfc_alloc_sysfs_attr(phba); 1613 if (error) 1614 goto out_remove_host; 1615 1616 if (phba->cfg_use_msi) { 1617 error = pci_enable_msi(phba->pcidev); 1618 if (error) 1619 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "%d:0452 " 1620 "Enable MSI failed, continuing with " 1621 "IRQ\n", phba->brd_no); 1622 } 1623 1624 error = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED, 1625 LPFC_DRIVER_NAME, phba); 1626 if (error) { 1627 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1628 "%d:0451 Enable interrupt handler failed\n", 1629 phba->brd_no); 1630 goto out_free_sysfs_attr; 1631 } 1632 phba->MBslimaddr = phba->slim_memmap_p; 1633 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 1634 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 1635 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 1636 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 1637 1638 error = lpfc_sli_hba_setup(phba); 1639 if (error) { 1640 error = -ENODEV; 1641 goto out_free_irq; 1642 } 1643 1644 /* 1645 * hba setup may have changed the hba_queue_depth so we need to adjust 1646 * the value of can_queue. 1647 */ 1648 host->can_queue = phba->cfg_hba_queue_depth - 10; 1649 1650 lpfc_discovery_wait(phba); 1651 1652 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1653 spin_lock_irq(phba->host->host_lock); 1654 lpfc_poll_start_timer(phba); 1655 spin_unlock_irq(phba->host->host_lock); 1656 } 1657 1658 /* 1659 * set fixed host attributes 1660 * Must done after lpfc_sli_hba_setup() 1661 */ 1662 1663 fc_host_node_name(host) = wwn_to_u64(phba->fc_nodename.u.wwn); 1664 fc_host_port_name(host) = wwn_to_u64(phba->fc_portname.u.wwn); 1665 fc_host_supported_classes(host) = FC_COS_CLASS3; 1666 1667 memset(fc_host_supported_fc4s(host), 0, 1668 sizeof(fc_host_supported_fc4s(host))); 1669 fc_host_supported_fc4s(host)[2] = 1; 1670 fc_host_supported_fc4s(host)[7] = 1; 1671 1672 lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(host)); 1673 1674 fc_host_supported_speeds(host) = 0; 1675 if (phba->lmt & LMT_10Gb) 1676 fc_host_supported_speeds(host) |= FC_PORTSPEED_10GBIT; 1677 if (phba->lmt & LMT_4Gb) 1678 fc_host_supported_speeds(host) |= FC_PORTSPEED_4GBIT; 1679 if (phba->lmt & LMT_2Gb) 1680 fc_host_supported_speeds(host) |= FC_PORTSPEED_2GBIT; 1681 if (phba->lmt & LMT_1Gb) 1682 fc_host_supported_speeds(host) |= FC_PORTSPEED_1GBIT; 1683 1684 fc_host_maxframe_size(host) = 1685 ((((uint32_t) phba->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 1686 (uint32_t) phba->fc_sparam.cmn.bbRcvSizeLsb); 1687 1688 /* This value is also unchanging */ 1689 memset(fc_host_active_fc4s(host), 0, 1690 sizeof(fc_host_active_fc4s(host))); 1691 fc_host_active_fc4s(host)[2] = 1; 1692 fc_host_active_fc4s(host)[7] = 1; 1693 1694 spin_lock_irq(phba->host->host_lock); 1695 phba->fc_flag &= ~FC_LOADING; 1696 spin_unlock_irq(phba->host->host_lock); 1697 return 0; 1698 1699 out_free_irq: 1700 lpfc_stop_timer(phba); 1701 phba->work_hba_events = 0; 1702 free_irq(phba->pcidev->irq, phba); 1703 pci_disable_msi(phba->pcidev); 1704 out_free_sysfs_attr: 1705 lpfc_free_sysfs_attr(phba); 1706 out_remove_host: 1707 fc_remove_host(phba->host); 1708 scsi_remove_host(phba->host); 1709 out_kthread_stop: 1710 kthread_stop(phba->worker_thread); 1711 out_free_iocbq: 1712 list_for_each_entry_safe(iocbq_entry, iocbq_next, 1713 &phba->lpfc_iocb_list, list) { 1714 spin_lock_irq(phba->host->host_lock); 1715 kfree(iocbq_entry); 1716 phba->total_iocbq_bufs--; 1717 spin_unlock_irq(phba->host->host_lock); 1718 } 1719 lpfc_mem_free(phba); 1720 out_free_slim: 1721 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p, 1722 phba->slim2p_mapping); 1723 out_iounmap: 1724 iounmap(phba->ctrl_regs_memmap_p); 1725 out_iounmap_slim: 1726 iounmap(phba->slim_memmap_p); 1727 out_idr_remove: 1728 idr_remove(&lpfc_hba_index, phba->brd_no); 1729 out_put_host: 1730 phba->host = NULL; 1731 scsi_host_put(host); 1732 out_release_regions: 1733 pci_release_regions(pdev); 1734 out_disable_device: 1735 pci_disable_device(pdev); 1736 out: 1737 pci_set_drvdata(pdev, NULL); 1738 return error; 1739 } 1740 1741 static void __devexit 1742 lpfc_pci_remove_one(struct pci_dev *pdev) 1743 { 1744 struct Scsi_Host *host = pci_get_drvdata(pdev); 1745 struct lpfc_hba *phba = (struct lpfc_hba *)host->hostdata; 1746 unsigned long iflag; 1747 1748 lpfc_free_sysfs_attr(phba); 1749 1750 spin_lock_irqsave(phba->host->host_lock, iflag); 1751 phba->fc_flag |= FC_UNLOADING; 1752 1753 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1754 1755 fc_remove_host(phba->host); 1756 scsi_remove_host(phba->host); 1757 1758 kthread_stop(phba->worker_thread); 1759 1760 /* 1761 * Bring down the SLI Layer. This step disable all interrupts, 1762 * clears the rings, discards all mailbox commands, and resets 1763 * the HBA. 1764 */ 1765 lpfc_sli_hba_down(phba); 1766 lpfc_sli_brdrestart(phba); 1767 1768 /* Release the irq reservation */ 1769 free_irq(phba->pcidev->irq, phba); 1770 pci_disable_msi(phba->pcidev); 1771 1772 lpfc_cleanup(phba, 0); 1773 lpfc_stop_timer(phba); 1774 phba->work_hba_events = 0; 1775 1776 /* 1777 * Call scsi_free before mem_free since scsi bufs are released to their 1778 * corresponding pools here. 1779 */ 1780 lpfc_scsi_free(phba); 1781 lpfc_mem_free(phba); 1782 1783 /* Free resources associated with SLI2 interface */ 1784 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 1785 phba->slim2p, phba->slim2p_mapping); 1786 1787 /* unmap adapter SLIM and Control Registers */ 1788 iounmap(phba->ctrl_regs_memmap_p); 1789 iounmap(phba->slim_memmap_p); 1790 1791 pci_release_regions(phba->pcidev); 1792 pci_disable_device(phba->pcidev); 1793 1794 idr_remove(&lpfc_hba_index, phba->brd_no); 1795 scsi_host_put(phba->host); 1796 1797 pci_set_drvdata(pdev, NULL); 1798 } 1799 1800 static struct pci_device_id lpfc_id_table[] = { 1801 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 1802 PCI_ANY_ID, PCI_ANY_ID, }, 1803 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, 1804 PCI_ANY_ID, PCI_ANY_ID, }, 1805 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, 1806 PCI_ANY_ID, PCI_ANY_ID, }, 1807 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, 1808 PCI_ANY_ID, PCI_ANY_ID, }, 1809 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, 1810 PCI_ANY_ID, PCI_ANY_ID, }, 1811 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, 1812 PCI_ANY_ID, PCI_ANY_ID, }, 1813 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, 1814 PCI_ANY_ID, PCI_ANY_ID, }, 1815 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, 1816 PCI_ANY_ID, PCI_ANY_ID, }, 1817 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, 1818 PCI_ANY_ID, PCI_ANY_ID, }, 1819 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, 1820 PCI_ANY_ID, PCI_ANY_ID, }, 1821 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, 1822 PCI_ANY_ID, PCI_ANY_ID, }, 1823 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, 1824 PCI_ANY_ID, PCI_ANY_ID, }, 1825 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, 1826 PCI_ANY_ID, PCI_ANY_ID, }, 1827 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, 1828 PCI_ANY_ID, PCI_ANY_ID, }, 1829 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, 1830 PCI_ANY_ID, PCI_ANY_ID, }, 1831 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, 1832 PCI_ANY_ID, PCI_ANY_ID, }, 1833 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, 1834 PCI_ANY_ID, PCI_ANY_ID, }, 1835 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 1836 PCI_ANY_ID, PCI_ANY_ID, }, 1837 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, 1838 PCI_ANY_ID, PCI_ANY_ID, }, 1839 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, 1840 PCI_ANY_ID, PCI_ANY_ID, }, 1841 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, 1842 PCI_ANY_ID, PCI_ANY_ID, }, 1843 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, 1844 PCI_ANY_ID, PCI_ANY_ID, }, 1845 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, 1846 PCI_ANY_ID, PCI_ANY_ID, }, 1847 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, 1848 PCI_ANY_ID, PCI_ANY_ID, }, 1849 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, 1850 PCI_ANY_ID, PCI_ANY_ID, }, 1851 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, 1852 PCI_ANY_ID, PCI_ANY_ID, }, 1853 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, 1854 PCI_ANY_ID, PCI_ANY_ID, }, 1855 { 0 } 1856 }; 1857 1858 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 1859 1860 static struct pci_driver lpfc_driver = { 1861 .name = LPFC_DRIVER_NAME, 1862 .id_table = lpfc_id_table, 1863 .probe = lpfc_pci_probe_one, 1864 .remove = __devexit_p(lpfc_pci_remove_one), 1865 }; 1866 1867 static int __init 1868 lpfc_init(void) 1869 { 1870 int error = 0; 1871 1872 printk(LPFC_MODULE_DESC "\n"); 1873 printk(LPFC_COPYRIGHT "\n"); 1874 1875 lpfc_transport_template = 1876 fc_attach_transport(&lpfc_transport_functions); 1877 if (!lpfc_transport_template) 1878 return -ENOMEM; 1879 error = pci_register_driver(&lpfc_driver); 1880 if (error) 1881 fc_release_transport(lpfc_transport_template); 1882 1883 return error; 1884 } 1885 1886 static void __exit 1887 lpfc_exit(void) 1888 { 1889 pci_unregister_driver(&lpfc_driver); 1890 fc_release_transport(lpfc_transport_template); 1891 } 1892 1893 module_init(lpfc_init); 1894 module_exit(lpfc_exit); 1895 MODULE_LICENSE("GPL"); 1896 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 1897 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 1898 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 1899