1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2011 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/idr.h> 26 #include <linux/interrupt.h> 27 #include <linux/module.h> 28 #include <linux/kthread.h> 29 #include <linux/pci.h> 30 #include <linux/spinlock.h> 31 #include <linux/ctype.h> 32 #include <linux/aer.h> 33 #include <linux/slab.h> 34 #include <linux/firmware.h> 35 36 #include <scsi/scsi.h> 37 #include <scsi/scsi_device.h> 38 #include <scsi/scsi_host.h> 39 #include <scsi/scsi_transport_fc.h> 40 41 #include "lpfc_hw4.h" 42 #include "lpfc_hw.h" 43 #include "lpfc_sli.h" 44 #include "lpfc_sli4.h" 45 #include "lpfc_nl.h" 46 #include "lpfc_disc.h" 47 #include "lpfc_scsi.h" 48 #include "lpfc.h" 49 #include "lpfc_logmsg.h" 50 #include "lpfc_crtn.h" 51 #include "lpfc_vport.h" 52 #include "lpfc_version.h" 53 54 char *_dump_buf_data; 55 unsigned long _dump_buf_data_order; 56 char *_dump_buf_dif; 57 unsigned long _dump_buf_dif_order; 58 spinlock_t _dump_buf_lock; 59 60 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 61 static int lpfc_post_rcv_buf(struct lpfc_hba *); 62 static int lpfc_sli4_queue_verify(struct lpfc_hba *); 63 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 64 static int lpfc_setup_endian_order(struct lpfc_hba *); 65 static int lpfc_sli4_read_config(struct lpfc_hba *); 66 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 67 static void lpfc_free_sgl_list(struct lpfc_hba *); 68 static int lpfc_init_sgl_list(struct lpfc_hba *); 69 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 70 static void lpfc_free_active_sgl(struct lpfc_hba *); 71 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 72 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 73 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 74 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 75 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 76 77 static struct scsi_transport_template *lpfc_transport_template = NULL; 78 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 79 static DEFINE_IDR(lpfc_hba_index); 80 81 /** 82 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 83 * @phba: pointer to lpfc hba data structure. 84 * 85 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 86 * mailbox command. It retrieves the revision information from the HBA and 87 * collects the Vital Product Data (VPD) about the HBA for preparing the 88 * configuration of the HBA. 89 * 90 * Return codes: 91 * 0 - success. 92 * -ERESTART - requests the SLI layer to reset the HBA and try again. 93 * Any other value - indicates an error. 94 **/ 95 int 96 lpfc_config_port_prep(struct lpfc_hba *phba) 97 { 98 lpfc_vpd_t *vp = &phba->vpd; 99 int i = 0, rc; 100 LPFC_MBOXQ_t *pmb; 101 MAILBOX_t *mb; 102 char *lpfc_vpd_data = NULL; 103 uint16_t offset = 0; 104 static char licensed[56] = 105 "key unlock for use with gnu public licensed code only\0"; 106 static int init_key = 1; 107 108 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 109 if (!pmb) { 110 phba->link_state = LPFC_HBA_ERROR; 111 return -ENOMEM; 112 } 113 114 mb = &pmb->u.mb; 115 phba->link_state = LPFC_INIT_MBX_CMDS; 116 117 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 118 if (init_key) { 119 uint32_t *ptext = (uint32_t *) licensed; 120 121 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 122 *ptext = cpu_to_be32(*ptext); 123 init_key = 0; 124 } 125 126 lpfc_read_nv(phba, pmb); 127 memset((char*)mb->un.varRDnvp.rsvd3, 0, 128 sizeof (mb->un.varRDnvp.rsvd3)); 129 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 130 sizeof (licensed)); 131 132 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 133 134 if (rc != MBX_SUCCESS) { 135 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 136 "0324 Config Port initialization " 137 "error, mbxCmd x%x READ_NVPARM, " 138 "mbxStatus x%x\n", 139 mb->mbxCommand, mb->mbxStatus); 140 mempool_free(pmb, phba->mbox_mem_pool); 141 return -ERESTART; 142 } 143 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 144 sizeof(phba->wwnn)); 145 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 146 sizeof(phba->wwpn)); 147 } 148 149 phba->sli3_options = 0x0; 150 151 /* Setup and issue mailbox READ REV command */ 152 lpfc_read_rev(phba, pmb); 153 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 154 if (rc != MBX_SUCCESS) { 155 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 156 "0439 Adapter failed to init, mbxCmd x%x " 157 "READ_REV, mbxStatus x%x\n", 158 mb->mbxCommand, mb->mbxStatus); 159 mempool_free( pmb, phba->mbox_mem_pool); 160 return -ERESTART; 161 } 162 163 164 /* 165 * The value of rr must be 1 since the driver set the cv field to 1. 166 * This setting requires the FW to set all revision fields. 167 */ 168 if (mb->un.varRdRev.rr == 0) { 169 vp->rev.rBit = 0; 170 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 171 "0440 Adapter failed to init, READ_REV has " 172 "missing revision information.\n"); 173 mempool_free(pmb, phba->mbox_mem_pool); 174 return -ERESTART; 175 } 176 177 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 178 mempool_free(pmb, phba->mbox_mem_pool); 179 return -EINVAL; 180 } 181 182 /* Save information as VPD data */ 183 vp->rev.rBit = 1; 184 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 185 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 186 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 187 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 188 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 189 vp->rev.biuRev = mb->un.varRdRev.biuRev; 190 vp->rev.smRev = mb->un.varRdRev.smRev; 191 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 192 vp->rev.endecRev = mb->un.varRdRev.endecRev; 193 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 194 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 195 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 196 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 197 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 198 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 199 200 /* If the sli feature level is less then 9, we must 201 * tear down all RPIs and VPIs on link down if NPIV 202 * is enabled. 203 */ 204 if (vp->rev.feaLevelHigh < 9) 205 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 206 207 if (lpfc_is_LC_HBA(phba->pcidev->device)) 208 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 209 sizeof (phba->RandomData)); 210 211 /* Get adapter VPD information */ 212 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 213 if (!lpfc_vpd_data) 214 goto out_free_mbox; 215 do { 216 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 217 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 218 219 if (rc != MBX_SUCCESS) { 220 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 221 "0441 VPD not present on adapter, " 222 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 223 mb->mbxCommand, mb->mbxStatus); 224 mb->un.varDmp.word_cnt = 0; 225 } 226 /* dump mem may return a zero when finished or we got a 227 * mailbox error, either way we are done. 228 */ 229 if (mb->un.varDmp.word_cnt == 0) 230 break; 231 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 232 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 233 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 234 lpfc_vpd_data + offset, 235 mb->un.varDmp.word_cnt); 236 offset += mb->un.varDmp.word_cnt; 237 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 238 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 239 240 kfree(lpfc_vpd_data); 241 out_free_mbox: 242 mempool_free(pmb, phba->mbox_mem_pool); 243 return 0; 244 } 245 246 /** 247 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 248 * @phba: pointer to lpfc hba data structure. 249 * @pmboxq: pointer to the driver internal queue element for mailbox command. 250 * 251 * This is the completion handler for driver's configuring asynchronous event 252 * mailbox command to the device. If the mailbox command returns successfully, 253 * it will set internal async event support flag to 1; otherwise, it will 254 * set internal async event support flag to 0. 255 **/ 256 static void 257 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 258 { 259 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 260 phba->temp_sensor_support = 1; 261 else 262 phba->temp_sensor_support = 0; 263 mempool_free(pmboxq, phba->mbox_mem_pool); 264 return; 265 } 266 267 /** 268 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 269 * @phba: pointer to lpfc hba data structure. 270 * @pmboxq: pointer to the driver internal queue element for mailbox command. 271 * 272 * This is the completion handler for dump mailbox command for getting 273 * wake up parameters. When this command complete, the response contain 274 * Option rom version of the HBA. This function translate the version number 275 * into a human readable string and store it in OptionROMVersion. 276 **/ 277 static void 278 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 279 { 280 struct prog_id *prg; 281 uint32_t prog_id_word; 282 char dist = ' '; 283 /* character array used for decoding dist type. */ 284 char dist_char[] = "nabx"; 285 286 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 287 mempool_free(pmboxq, phba->mbox_mem_pool); 288 return; 289 } 290 291 prg = (struct prog_id *) &prog_id_word; 292 293 /* word 7 contain option rom version */ 294 prog_id_word = pmboxq->u.mb.un.varWords[7]; 295 296 /* Decode the Option rom version word to a readable string */ 297 if (prg->dist < 4) 298 dist = dist_char[prg->dist]; 299 300 if ((prg->dist == 3) && (prg->num == 0)) 301 sprintf(phba->OptionROMVersion, "%d.%d%d", 302 prg->ver, prg->rev, prg->lev); 303 else 304 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d", 305 prg->ver, prg->rev, prg->lev, 306 dist, prg->num); 307 mempool_free(pmboxq, phba->mbox_mem_pool); 308 return; 309 } 310 311 /** 312 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, 313 * cfg_soft_wwnn, cfg_soft_wwpn 314 * @vport: pointer to lpfc vport data structure. 315 * 316 * 317 * Return codes 318 * None. 319 **/ 320 void 321 lpfc_update_vport_wwn(struct lpfc_vport *vport) 322 { 323 /* If the soft name exists then update it using the service params */ 324 if (vport->phba->cfg_soft_wwnn) 325 u64_to_wwn(vport->phba->cfg_soft_wwnn, 326 vport->fc_sparam.nodeName.u.wwn); 327 if (vport->phba->cfg_soft_wwpn) 328 u64_to_wwn(vport->phba->cfg_soft_wwpn, 329 vport->fc_sparam.portName.u.wwn); 330 331 /* 332 * If the name is empty or there exists a soft name 333 * then copy the service params name, otherwise use the fc name 334 */ 335 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) 336 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 337 sizeof(struct lpfc_name)); 338 else 339 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, 340 sizeof(struct lpfc_name)); 341 342 if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn) 343 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 344 sizeof(struct lpfc_name)); 345 else 346 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, 347 sizeof(struct lpfc_name)); 348 } 349 350 /** 351 * lpfc_config_port_post - Perform lpfc initialization after config port 352 * @phba: pointer to lpfc hba data structure. 353 * 354 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 355 * command call. It performs all internal resource and state setups on the 356 * port: post IOCB buffers, enable appropriate host interrupt attentions, 357 * ELS ring timers, etc. 358 * 359 * Return codes 360 * 0 - success. 361 * Any other value - error. 362 **/ 363 int 364 lpfc_config_port_post(struct lpfc_hba *phba) 365 { 366 struct lpfc_vport *vport = phba->pport; 367 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 368 LPFC_MBOXQ_t *pmb; 369 MAILBOX_t *mb; 370 struct lpfc_dmabuf *mp; 371 struct lpfc_sli *psli = &phba->sli; 372 uint32_t status, timeout; 373 int i, j; 374 int rc; 375 376 spin_lock_irq(&phba->hbalock); 377 /* 378 * If the Config port completed correctly the HBA is not 379 * over heated any more. 380 */ 381 if (phba->over_temp_state == HBA_OVER_TEMP) 382 phba->over_temp_state = HBA_NORMAL_TEMP; 383 spin_unlock_irq(&phba->hbalock); 384 385 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 386 if (!pmb) { 387 phba->link_state = LPFC_HBA_ERROR; 388 return -ENOMEM; 389 } 390 mb = &pmb->u.mb; 391 392 /* Get login parameters for NID. */ 393 rc = lpfc_read_sparam(phba, pmb, 0); 394 if (rc) { 395 mempool_free(pmb, phba->mbox_mem_pool); 396 return -ENOMEM; 397 } 398 399 pmb->vport = vport; 400 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 401 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 402 "0448 Adapter failed init, mbxCmd x%x " 403 "READ_SPARM mbxStatus x%x\n", 404 mb->mbxCommand, mb->mbxStatus); 405 phba->link_state = LPFC_HBA_ERROR; 406 mp = (struct lpfc_dmabuf *) pmb->context1; 407 mempool_free(pmb, phba->mbox_mem_pool); 408 lpfc_mbuf_free(phba, mp->virt, mp->phys); 409 kfree(mp); 410 return -EIO; 411 } 412 413 mp = (struct lpfc_dmabuf *) pmb->context1; 414 415 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 416 lpfc_mbuf_free(phba, mp->virt, mp->phys); 417 kfree(mp); 418 pmb->context1 = NULL; 419 lpfc_update_vport_wwn(vport); 420 421 /* Update the fc_host data structures with new wwn. */ 422 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 423 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 424 fc_host_max_npiv_vports(shost) = phba->max_vpi; 425 426 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 427 /* This should be consolidated into parse_vpd ? - mr */ 428 if (phba->SerialNumber[0] == 0) { 429 uint8_t *outptr; 430 431 outptr = &vport->fc_nodename.u.s.IEEE[0]; 432 for (i = 0; i < 12; i++) { 433 status = *outptr++; 434 j = ((status & 0xf0) >> 4); 435 if (j <= 9) 436 phba->SerialNumber[i] = 437 (char)((uint8_t) 0x30 + (uint8_t) j); 438 else 439 phba->SerialNumber[i] = 440 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 441 i++; 442 j = (status & 0xf); 443 if (j <= 9) 444 phba->SerialNumber[i] = 445 (char)((uint8_t) 0x30 + (uint8_t) j); 446 else 447 phba->SerialNumber[i] = 448 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 449 } 450 } 451 452 lpfc_read_config(phba, pmb); 453 pmb->vport = vport; 454 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 455 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 456 "0453 Adapter failed to init, mbxCmd x%x " 457 "READ_CONFIG, mbxStatus x%x\n", 458 mb->mbxCommand, mb->mbxStatus); 459 phba->link_state = LPFC_HBA_ERROR; 460 mempool_free( pmb, phba->mbox_mem_pool); 461 return -EIO; 462 } 463 464 /* Check if the port is disabled */ 465 lpfc_sli_read_link_ste(phba); 466 467 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 468 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 469 phba->cfg_hba_queue_depth = 470 (mb->un.varRdConfig.max_xri + 1) - 471 lpfc_sli4_get_els_iocb_cnt(phba); 472 473 phba->lmt = mb->un.varRdConfig.lmt; 474 475 /* Get the default values for Model Name and Description */ 476 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 477 478 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_16G) 479 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) 480 && !(phba->lmt & LMT_1Gb)) 481 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) 482 && !(phba->lmt & LMT_2Gb)) 483 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) 484 && !(phba->lmt & LMT_4Gb)) 485 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) 486 && !(phba->lmt & LMT_8Gb)) 487 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) 488 && !(phba->lmt & LMT_10Gb)) 489 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) 490 && !(phba->lmt & LMT_16Gb))) { 491 /* Reset link speed to auto */ 492 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 493 "1302 Invalid speed for this board: " 494 "Reset link speed to auto: x%x\n", 495 phba->cfg_link_speed); 496 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 497 } 498 499 phba->link_state = LPFC_LINK_DOWN; 500 501 /* Only process IOCBs on ELS ring till hba_state is READY */ 502 if (psli->ring[psli->extra_ring].cmdringaddr) 503 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 504 if (psli->ring[psli->fcp_ring].cmdringaddr) 505 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 506 if (psli->ring[psli->next_ring].cmdringaddr) 507 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 508 509 /* Post receive buffers for desired rings */ 510 if (phba->sli_rev != 3) 511 lpfc_post_rcv_buf(phba); 512 513 /* 514 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 515 */ 516 if (phba->intr_type == MSIX) { 517 rc = lpfc_config_msi(phba, pmb); 518 if (rc) { 519 mempool_free(pmb, phba->mbox_mem_pool); 520 return -EIO; 521 } 522 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 523 if (rc != MBX_SUCCESS) { 524 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 525 "0352 Config MSI mailbox command " 526 "failed, mbxCmd x%x, mbxStatus x%x\n", 527 pmb->u.mb.mbxCommand, 528 pmb->u.mb.mbxStatus); 529 mempool_free(pmb, phba->mbox_mem_pool); 530 return -EIO; 531 } 532 } 533 534 spin_lock_irq(&phba->hbalock); 535 /* Initialize ERATT handling flag */ 536 phba->hba_flag &= ~HBA_ERATT_HANDLED; 537 538 /* Enable appropriate host interrupts */ 539 if (lpfc_readl(phba->HCregaddr, &status)) { 540 spin_unlock_irq(&phba->hbalock); 541 return -EIO; 542 } 543 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 544 if (psli->num_rings > 0) 545 status |= HC_R0INT_ENA; 546 if (psli->num_rings > 1) 547 status |= HC_R1INT_ENA; 548 if (psli->num_rings > 2) 549 status |= HC_R2INT_ENA; 550 if (psli->num_rings > 3) 551 status |= HC_R3INT_ENA; 552 553 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 554 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 555 status &= ~(HC_R0INT_ENA); 556 557 writel(status, phba->HCregaddr); 558 readl(phba->HCregaddr); /* flush */ 559 spin_unlock_irq(&phba->hbalock); 560 561 /* Set up ring-0 (ELS) timer */ 562 timeout = phba->fc_ratov * 2; 563 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 564 /* Set up heart beat (HB) timer */ 565 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 566 phba->hb_outstanding = 0; 567 phba->last_completion_time = jiffies; 568 /* Set up error attention (ERATT) polling timer */ 569 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 570 571 if (phba->hba_flag & LINK_DISABLED) { 572 lpfc_printf_log(phba, 573 KERN_ERR, LOG_INIT, 574 "2598 Adapter Link is disabled.\n"); 575 lpfc_down_link(phba, pmb); 576 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 577 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 578 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 579 lpfc_printf_log(phba, 580 KERN_ERR, LOG_INIT, 581 "2599 Adapter failed to issue DOWN_LINK" 582 " mbox command rc 0x%x\n", rc); 583 584 mempool_free(pmb, phba->mbox_mem_pool); 585 return -EIO; 586 } 587 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 588 lpfc_init_link(phba, pmb, phba->cfg_topology, 589 phba->cfg_link_speed); 590 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 591 lpfc_set_loopback_flag(phba); 592 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 593 if (rc != MBX_SUCCESS) { 594 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 595 "0454 Adapter failed to init, mbxCmd x%x " 596 "INIT_LINK, mbxStatus x%x\n", 597 mb->mbxCommand, mb->mbxStatus); 598 599 /* Clear all interrupt enable conditions */ 600 writel(0, phba->HCregaddr); 601 readl(phba->HCregaddr); /* flush */ 602 /* Clear all pending interrupts */ 603 writel(0xffffffff, phba->HAregaddr); 604 readl(phba->HAregaddr); /* flush */ 605 phba->link_state = LPFC_HBA_ERROR; 606 if (rc != MBX_BUSY) 607 mempool_free(pmb, phba->mbox_mem_pool); 608 return -EIO; 609 } 610 } 611 /* MBOX buffer will be freed in mbox compl */ 612 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 613 if (!pmb) { 614 phba->link_state = LPFC_HBA_ERROR; 615 return -ENOMEM; 616 } 617 618 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 619 pmb->mbox_cmpl = lpfc_config_async_cmpl; 620 pmb->vport = phba->pport; 621 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 622 623 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 624 lpfc_printf_log(phba, 625 KERN_ERR, 626 LOG_INIT, 627 "0456 Adapter failed to issue " 628 "ASYNCEVT_ENABLE mbox status x%x\n", 629 rc); 630 mempool_free(pmb, phba->mbox_mem_pool); 631 } 632 633 /* Get Option rom version */ 634 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 635 if (!pmb) { 636 phba->link_state = LPFC_HBA_ERROR; 637 return -ENOMEM; 638 } 639 640 lpfc_dump_wakeup_param(phba, pmb); 641 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 642 pmb->vport = phba->pport; 643 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 644 645 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 646 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 647 "to get Option ROM version status x%x\n", rc); 648 mempool_free(pmb, phba->mbox_mem_pool); 649 } 650 651 return 0; 652 } 653 654 /** 655 * lpfc_hba_init_link - Initialize the FC link 656 * @phba: pointer to lpfc hba data structure. 657 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 658 * 659 * This routine will issue the INIT_LINK mailbox command call. 660 * It is available to other drivers through the lpfc_hba data 661 * structure for use as a delayed link up mechanism with the 662 * module parameter lpfc_suppress_link_up. 663 * 664 * Return code 665 * 0 - success 666 * Any other value - error 667 **/ 668 int 669 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 670 { 671 struct lpfc_vport *vport = phba->pport; 672 LPFC_MBOXQ_t *pmb; 673 MAILBOX_t *mb; 674 int rc; 675 676 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 677 if (!pmb) { 678 phba->link_state = LPFC_HBA_ERROR; 679 return -ENOMEM; 680 } 681 mb = &pmb->u.mb; 682 pmb->vport = vport; 683 684 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); 685 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 686 lpfc_set_loopback_flag(phba); 687 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 688 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 689 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 690 "0498 Adapter failed to init, mbxCmd x%x " 691 "INIT_LINK, mbxStatus x%x\n", 692 mb->mbxCommand, mb->mbxStatus); 693 if (phba->sli_rev <= LPFC_SLI_REV3) { 694 /* Clear all interrupt enable conditions */ 695 writel(0, phba->HCregaddr); 696 readl(phba->HCregaddr); /* flush */ 697 /* Clear all pending interrupts */ 698 writel(0xffffffff, phba->HAregaddr); 699 readl(phba->HAregaddr); /* flush */ 700 } 701 phba->link_state = LPFC_HBA_ERROR; 702 if (rc != MBX_BUSY || flag == MBX_POLL) 703 mempool_free(pmb, phba->mbox_mem_pool); 704 return -EIO; 705 } 706 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 707 if (flag == MBX_POLL) 708 mempool_free(pmb, phba->mbox_mem_pool); 709 710 return 0; 711 } 712 713 /** 714 * lpfc_hba_down_link - this routine downs the FC link 715 * @phba: pointer to lpfc hba data structure. 716 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 717 * 718 * This routine will issue the DOWN_LINK mailbox command call. 719 * It is available to other drivers through the lpfc_hba data 720 * structure for use to stop the link. 721 * 722 * Return code 723 * 0 - success 724 * Any other value - error 725 **/ 726 int 727 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 728 { 729 LPFC_MBOXQ_t *pmb; 730 int rc; 731 732 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 733 if (!pmb) { 734 phba->link_state = LPFC_HBA_ERROR; 735 return -ENOMEM; 736 } 737 738 lpfc_printf_log(phba, 739 KERN_ERR, LOG_INIT, 740 "0491 Adapter Link is disabled.\n"); 741 lpfc_down_link(phba, pmb); 742 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 743 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 744 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 745 lpfc_printf_log(phba, 746 KERN_ERR, LOG_INIT, 747 "2522 Adapter failed to issue DOWN_LINK" 748 " mbox command rc 0x%x\n", rc); 749 750 mempool_free(pmb, phba->mbox_mem_pool); 751 return -EIO; 752 } 753 if (flag == MBX_POLL) 754 mempool_free(pmb, phba->mbox_mem_pool); 755 756 return 0; 757 } 758 759 /** 760 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 761 * @phba: pointer to lpfc HBA data structure. 762 * 763 * This routine will do LPFC uninitialization before the HBA is reset when 764 * bringing down the SLI Layer. 765 * 766 * Return codes 767 * 0 - success. 768 * Any other value - error. 769 **/ 770 int 771 lpfc_hba_down_prep(struct lpfc_hba *phba) 772 { 773 struct lpfc_vport **vports; 774 int i; 775 776 if (phba->sli_rev <= LPFC_SLI_REV3) { 777 /* Disable interrupts */ 778 writel(0, phba->HCregaddr); 779 readl(phba->HCregaddr); /* flush */ 780 } 781 782 if (phba->pport->load_flag & FC_UNLOADING) 783 lpfc_cleanup_discovery_resources(phba->pport); 784 else { 785 vports = lpfc_create_vport_work_array(phba); 786 if (vports != NULL) 787 for (i = 0; i <= phba->max_vports && 788 vports[i] != NULL; i++) 789 lpfc_cleanup_discovery_resources(vports[i]); 790 lpfc_destroy_vport_work_array(phba, vports); 791 } 792 return 0; 793 } 794 795 /** 796 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 797 * @phba: pointer to lpfc HBA data structure. 798 * 799 * This routine will do uninitialization after the HBA is reset when bring 800 * down the SLI Layer. 801 * 802 * Return codes 803 * 0 - success. 804 * Any other value - error. 805 **/ 806 static int 807 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 808 { 809 struct lpfc_sli *psli = &phba->sli; 810 struct lpfc_sli_ring *pring; 811 struct lpfc_dmabuf *mp, *next_mp; 812 LIST_HEAD(completions); 813 int i; 814 815 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 816 lpfc_sli_hbqbuf_free_all(phba); 817 else { 818 /* Cleanup preposted buffers on the ELS ring */ 819 pring = &psli->ring[LPFC_ELS_RING]; 820 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 821 list_del(&mp->list); 822 pring->postbufq_cnt--; 823 lpfc_mbuf_free(phba, mp->virt, mp->phys); 824 kfree(mp); 825 } 826 } 827 828 spin_lock_irq(&phba->hbalock); 829 for (i = 0; i < psli->num_rings; i++) { 830 pring = &psli->ring[i]; 831 832 /* At this point in time the HBA is either reset or DOA. Either 833 * way, nothing should be on txcmplq as it will NEVER complete. 834 */ 835 list_splice_init(&pring->txcmplq, &completions); 836 pring->txcmplq_cnt = 0; 837 spin_unlock_irq(&phba->hbalock); 838 839 /* Cancel all the IOCBs from the completions list */ 840 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 841 IOERR_SLI_ABORTED); 842 843 lpfc_sli_abort_iocb_ring(phba, pring); 844 spin_lock_irq(&phba->hbalock); 845 } 846 spin_unlock_irq(&phba->hbalock); 847 848 return 0; 849 } 850 851 /** 852 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 853 * @phba: pointer to lpfc HBA data structure. 854 * 855 * This routine will do uninitialization after the HBA is reset when bring 856 * down the SLI Layer. 857 * 858 * Return codes 859 * 0 - success. 860 * Any other value - error. 861 **/ 862 static int 863 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 864 { 865 struct lpfc_scsi_buf *psb, *psb_next; 866 LIST_HEAD(aborts); 867 int ret; 868 unsigned long iflag = 0; 869 struct lpfc_sglq *sglq_entry = NULL; 870 871 ret = lpfc_hba_down_post_s3(phba); 872 if (ret) 873 return ret; 874 /* At this point in time the HBA is either reset or DOA. Either 875 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 876 * on the lpfc_sgl_list so that it can either be freed if the 877 * driver is unloading or reposted if the driver is restarting 878 * the port. 879 */ 880 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ 881 /* scsl_buf_list */ 882 /* abts_sgl_list_lock required because worker thread uses this 883 * list. 884 */ 885 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 886 list_for_each_entry(sglq_entry, 887 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 888 sglq_entry->state = SGL_FREED; 889 890 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 891 &phba->sli4_hba.lpfc_sgl_list); 892 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 893 /* abts_scsi_buf_list_lock required because worker thread uses this 894 * list. 895 */ 896 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 897 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, 898 &aborts); 899 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 900 spin_unlock_irq(&phba->hbalock); 901 902 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 903 psb->pCmd = NULL; 904 psb->status = IOSTAT_SUCCESS; 905 } 906 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 907 list_splice(&aborts, &phba->lpfc_scsi_buf_list); 908 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 909 return 0; 910 } 911 912 /** 913 * lpfc_hba_down_post - Wrapper func for hba down post routine 914 * @phba: pointer to lpfc HBA data structure. 915 * 916 * This routine wraps the actual SLI3 or SLI4 routine for performing 917 * uninitialization after the HBA is reset when bring down the SLI Layer. 918 * 919 * Return codes 920 * 0 - success. 921 * Any other value - error. 922 **/ 923 int 924 lpfc_hba_down_post(struct lpfc_hba *phba) 925 { 926 return (*phba->lpfc_hba_down_post)(phba); 927 } 928 929 /** 930 * lpfc_hb_timeout - The HBA-timer timeout handler 931 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 932 * 933 * This is the HBA-timer timeout handler registered to the lpfc driver. When 934 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 935 * work-port-events bitmap and the worker thread is notified. This timeout 936 * event will be used by the worker thread to invoke the actual timeout 937 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 938 * be performed in the timeout handler and the HBA timeout event bit shall 939 * be cleared by the worker thread after it has taken the event bitmap out. 940 **/ 941 static void 942 lpfc_hb_timeout(unsigned long ptr) 943 { 944 struct lpfc_hba *phba; 945 uint32_t tmo_posted; 946 unsigned long iflag; 947 948 phba = (struct lpfc_hba *)ptr; 949 950 /* Check for heart beat timeout conditions */ 951 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 952 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 953 if (!tmo_posted) 954 phba->pport->work_port_events |= WORKER_HB_TMO; 955 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 956 957 /* Tell the worker thread there is work to do */ 958 if (!tmo_posted) 959 lpfc_worker_wake_up(phba); 960 return; 961 } 962 963 /** 964 * lpfc_rrq_timeout - The RRQ-timer timeout handler 965 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 966 * 967 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 968 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 969 * work-port-events bitmap and the worker thread is notified. This timeout 970 * event will be used by the worker thread to invoke the actual timeout 971 * handler routine, lpfc_rrq_handler. Any periodical operations will 972 * be performed in the timeout handler and the RRQ timeout event bit shall 973 * be cleared by the worker thread after it has taken the event bitmap out. 974 **/ 975 static void 976 lpfc_rrq_timeout(unsigned long ptr) 977 { 978 struct lpfc_hba *phba; 979 unsigned long iflag; 980 981 phba = (struct lpfc_hba *)ptr; 982 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 983 phba->hba_flag |= HBA_RRQ_ACTIVE; 984 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 985 lpfc_worker_wake_up(phba); 986 } 987 988 /** 989 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 990 * @phba: pointer to lpfc hba data structure. 991 * @pmboxq: pointer to the driver internal queue element for mailbox command. 992 * 993 * This is the callback function to the lpfc heart-beat mailbox command. 994 * If configured, the lpfc driver issues the heart-beat mailbox command to 995 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 996 * heart-beat mailbox command is issued, the driver shall set up heart-beat 997 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 998 * heart-beat outstanding state. Once the mailbox command comes back and 999 * no error conditions detected, the heart-beat mailbox command timer is 1000 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 1001 * state is cleared for the next heart-beat. If the timer expired with the 1002 * heart-beat outstanding state set, the driver will put the HBA offline. 1003 **/ 1004 static void 1005 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 1006 { 1007 unsigned long drvr_flag; 1008 1009 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1010 phba->hb_outstanding = 0; 1011 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1012 1013 /* Check and reset heart-beat timer is necessary */ 1014 mempool_free(pmboxq, phba->mbox_mem_pool); 1015 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 1016 !(phba->link_state == LPFC_HBA_ERROR) && 1017 !(phba->pport->load_flag & FC_UNLOADING)) 1018 mod_timer(&phba->hb_tmofunc, 1019 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 1020 return; 1021 } 1022 1023 /** 1024 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 1025 * @phba: pointer to lpfc hba data structure. 1026 * 1027 * This is the actual HBA-timer timeout handler to be invoked by the worker 1028 * thread whenever the HBA timer fired and HBA-timeout event posted. This 1029 * handler performs any periodic operations needed for the device. If such 1030 * periodic event has already been attended to either in the interrupt handler 1031 * or by processing slow-ring or fast-ring events within the HBA-timer 1032 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1033 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1034 * is configured and there is no heart-beat mailbox command outstanding, a 1035 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1036 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1037 * to offline. 1038 **/ 1039 void 1040 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1041 { 1042 struct lpfc_vport **vports; 1043 LPFC_MBOXQ_t *pmboxq; 1044 struct lpfc_dmabuf *buf_ptr; 1045 int retval, i; 1046 struct lpfc_sli *psli = &phba->sli; 1047 LIST_HEAD(completions); 1048 1049 vports = lpfc_create_vport_work_array(phba); 1050 if (vports != NULL) 1051 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 1052 lpfc_rcv_seq_check_edtov(vports[i]); 1053 lpfc_destroy_vport_work_array(phba, vports); 1054 1055 if ((phba->link_state == LPFC_HBA_ERROR) || 1056 (phba->pport->load_flag & FC_UNLOADING) || 1057 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1058 return; 1059 1060 spin_lock_irq(&phba->pport->work_port_lock); 1061 1062 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, 1063 jiffies)) { 1064 spin_unlock_irq(&phba->pport->work_port_lock); 1065 if (!phba->hb_outstanding) 1066 mod_timer(&phba->hb_tmofunc, 1067 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 1068 else 1069 mod_timer(&phba->hb_tmofunc, 1070 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1071 return; 1072 } 1073 spin_unlock_irq(&phba->pport->work_port_lock); 1074 1075 if (phba->elsbuf_cnt && 1076 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1077 spin_lock_irq(&phba->hbalock); 1078 list_splice_init(&phba->elsbuf, &completions); 1079 phba->elsbuf_cnt = 0; 1080 phba->elsbuf_prev_cnt = 0; 1081 spin_unlock_irq(&phba->hbalock); 1082 1083 while (!list_empty(&completions)) { 1084 list_remove_head(&completions, buf_ptr, 1085 struct lpfc_dmabuf, list); 1086 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1087 kfree(buf_ptr); 1088 } 1089 } 1090 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1091 1092 /* If there is no heart beat outstanding, issue a heartbeat command */ 1093 if (phba->cfg_enable_hba_heartbeat) { 1094 if (!phba->hb_outstanding) { 1095 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1096 (list_empty(&psli->mboxq))) { 1097 pmboxq = mempool_alloc(phba->mbox_mem_pool, 1098 GFP_KERNEL); 1099 if (!pmboxq) { 1100 mod_timer(&phba->hb_tmofunc, 1101 jiffies + 1102 HZ * LPFC_HB_MBOX_INTERVAL); 1103 return; 1104 } 1105 1106 lpfc_heart_beat(phba, pmboxq); 1107 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1108 pmboxq->vport = phba->pport; 1109 retval = lpfc_sli_issue_mbox(phba, pmboxq, 1110 MBX_NOWAIT); 1111 1112 if (retval != MBX_BUSY && 1113 retval != MBX_SUCCESS) { 1114 mempool_free(pmboxq, 1115 phba->mbox_mem_pool); 1116 mod_timer(&phba->hb_tmofunc, 1117 jiffies + 1118 HZ * LPFC_HB_MBOX_INTERVAL); 1119 return; 1120 } 1121 phba->skipped_hb = 0; 1122 phba->hb_outstanding = 1; 1123 } else if (time_before_eq(phba->last_completion_time, 1124 phba->skipped_hb)) { 1125 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1126 "2857 Last completion time not " 1127 " updated in %d ms\n", 1128 jiffies_to_msecs(jiffies 1129 - phba->last_completion_time)); 1130 } else 1131 phba->skipped_hb = jiffies; 1132 1133 mod_timer(&phba->hb_tmofunc, 1134 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1135 return; 1136 } else { 1137 /* 1138 * If heart beat timeout called with hb_outstanding set 1139 * we need to give the hb mailbox cmd a chance to 1140 * complete or TMO. 1141 */ 1142 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1143 "0459 Adapter heartbeat still out" 1144 "standing:last compl time was %d ms.\n", 1145 jiffies_to_msecs(jiffies 1146 - phba->last_completion_time)); 1147 mod_timer(&phba->hb_tmofunc, 1148 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1149 } 1150 } 1151 } 1152 1153 /** 1154 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1155 * @phba: pointer to lpfc hba data structure. 1156 * 1157 * This routine is called to bring the HBA offline when HBA hardware error 1158 * other than Port Error 6 has been detected. 1159 **/ 1160 static void 1161 lpfc_offline_eratt(struct lpfc_hba *phba) 1162 { 1163 struct lpfc_sli *psli = &phba->sli; 1164 1165 spin_lock_irq(&phba->hbalock); 1166 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1167 spin_unlock_irq(&phba->hbalock); 1168 lpfc_offline_prep(phba); 1169 1170 lpfc_offline(phba); 1171 lpfc_reset_barrier(phba); 1172 spin_lock_irq(&phba->hbalock); 1173 lpfc_sli_brdreset(phba); 1174 spin_unlock_irq(&phba->hbalock); 1175 lpfc_hba_down_post(phba); 1176 lpfc_sli_brdready(phba, HS_MBRDY); 1177 lpfc_unblock_mgmt_io(phba); 1178 phba->link_state = LPFC_HBA_ERROR; 1179 return; 1180 } 1181 1182 /** 1183 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1184 * @phba: pointer to lpfc hba data structure. 1185 * 1186 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1187 * other than Port Error 6 has been detected. 1188 **/ 1189 static void 1190 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1191 { 1192 lpfc_offline_prep(phba); 1193 lpfc_offline(phba); 1194 lpfc_sli4_brdreset(phba); 1195 lpfc_hba_down_post(phba); 1196 lpfc_sli4_post_status_check(phba); 1197 lpfc_unblock_mgmt_io(phba); 1198 phba->link_state = LPFC_HBA_ERROR; 1199 } 1200 1201 /** 1202 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1203 * @phba: pointer to lpfc hba data structure. 1204 * 1205 * This routine is invoked to handle the deferred HBA hardware error 1206 * conditions. This type of error is indicated by HBA by setting ER1 1207 * and another ER bit in the host status register. The driver will 1208 * wait until the ER1 bit clears before handling the error condition. 1209 **/ 1210 static void 1211 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1212 { 1213 uint32_t old_host_status = phba->work_hs; 1214 struct lpfc_sli_ring *pring; 1215 struct lpfc_sli *psli = &phba->sli; 1216 1217 /* If the pci channel is offline, ignore possible errors, 1218 * since we cannot communicate with the pci card anyway. 1219 */ 1220 if (pci_channel_offline(phba->pcidev)) { 1221 spin_lock_irq(&phba->hbalock); 1222 phba->hba_flag &= ~DEFER_ERATT; 1223 spin_unlock_irq(&phba->hbalock); 1224 return; 1225 } 1226 1227 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1228 "0479 Deferred Adapter Hardware Error " 1229 "Data: x%x x%x x%x\n", 1230 phba->work_hs, 1231 phba->work_status[0], phba->work_status[1]); 1232 1233 spin_lock_irq(&phba->hbalock); 1234 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1235 spin_unlock_irq(&phba->hbalock); 1236 1237 1238 /* 1239 * Firmware stops when it triggred erratt. That could cause the I/Os 1240 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1241 * SCSI layer retry it after re-establishing link. 1242 */ 1243 pring = &psli->ring[psli->fcp_ring]; 1244 lpfc_sli_abort_iocb_ring(phba, pring); 1245 1246 /* 1247 * There was a firmware error. Take the hba offline and then 1248 * attempt to restart it. 1249 */ 1250 lpfc_offline_prep(phba); 1251 lpfc_offline(phba); 1252 1253 /* Wait for the ER1 bit to clear.*/ 1254 while (phba->work_hs & HS_FFER1) { 1255 msleep(100); 1256 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { 1257 phba->work_hs = UNPLUG_ERR ; 1258 break; 1259 } 1260 /* If driver is unloading let the worker thread continue */ 1261 if (phba->pport->load_flag & FC_UNLOADING) { 1262 phba->work_hs = 0; 1263 break; 1264 } 1265 } 1266 1267 /* 1268 * This is to ptrotect against a race condition in which 1269 * first write to the host attention register clear the 1270 * host status register. 1271 */ 1272 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1273 phba->work_hs = old_host_status & ~HS_FFER1; 1274 1275 spin_lock_irq(&phba->hbalock); 1276 phba->hba_flag &= ~DEFER_ERATT; 1277 spin_unlock_irq(&phba->hbalock); 1278 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1279 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1280 } 1281 1282 static void 1283 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1284 { 1285 struct lpfc_board_event_header board_event; 1286 struct Scsi_Host *shost; 1287 1288 board_event.event_type = FC_REG_BOARD_EVENT; 1289 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1290 shost = lpfc_shost_from_vport(phba->pport); 1291 fc_host_post_vendor_event(shost, fc_get_event_number(), 1292 sizeof(board_event), 1293 (char *) &board_event, 1294 LPFC_NL_VENDOR_ID); 1295 } 1296 1297 /** 1298 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1299 * @phba: pointer to lpfc hba data structure. 1300 * 1301 * This routine is invoked to handle the following HBA hardware error 1302 * conditions: 1303 * 1 - HBA error attention interrupt 1304 * 2 - DMA ring index out of range 1305 * 3 - Mailbox command came back as unknown 1306 **/ 1307 static void 1308 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1309 { 1310 struct lpfc_vport *vport = phba->pport; 1311 struct lpfc_sli *psli = &phba->sli; 1312 struct lpfc_sli_ring *pring; 1313 uint32_t event_data; 1314 unsigned long temperature; 1315 struct temp_event temp_event_data; 1316 struct Scsi_Host *shost; 1317 1318 /* If the pci channel is offline, ignore possible errors, 1319 * since we cannot communicate with the pci card anyway. 1320 */ 1321 if (pci_channel_offline(phba->pcidev)) { 1322 spin_lock_irq(&phba->hbalock); 1323 phba->hba_flag &= ~DEFER_ERATT; 1324 spin_unlock_irq(&phba->hbalock); 1325 return; 1326 } 1327 1328 /* If resets are disabled then leave the HBA alone and return */ 1329 if (!phba->cfg_enable_hba_reset) 1330 return; 1331 1332 /* Send an internal error event to mgmt application */ 1333 lpfc_board_errevt_to_mgmt(phba); 1334 1335 if (phba->hba_flag & DEFER_ERATT) 1336 lpfc_handle_deferred_eratt(phba); 1337 1338 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1339 if (phba->work_hs & HS_FFER6) 1340 /* Re-establishing Link */ 1341 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1342 "1301 Re-establishing Link " 1343 "Data: x%x x%x x%x\n", 1344 phba->work_hs, phba->work_status[0], 1345 phba->work_status[1]); 1346 if (phba->work_hs & HS_FFER8) 1347 /* Device Zeroization */ 1348 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1349 "2861 Host Authentication device " 1350 "zeroization Data:x%x x%x x%x\n", 1351 phba->work_hs, phba->work_status[0], 1352 phba->work_status[1]); 1353 1354 spin_lock_irq(&phba->hbalock); 1355 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1356 spin_unlock_irq(&phba->hbalock); 1357 1358 /* 1359 * Firmware stops when it triggled erratt with HS_FFER6. 1360 * That could cause the I/Os dropped by the firmware. 1361 * Error iocb (I/O) on txcmplq and let the SCSI layer 1362 * retry it after re-establishing link. 1363 */ 1364 pring = &psli->ring[psli->fcp_ring]; 1365 lpfc_sli_abort_iocb_ring(phba, pring); 1366 1367 /* 1368 * There was a firmware error. Take the hba offline and then 1369 * attempt to restart it. 1370 */ 1371 lpfc_offline_prep(phba); 1372 lpfc_offline(phba); 1373 lpfc_sli_brdrestart(phba); 1374 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1375 lpfc_unblock_mgmt_io(phba); 1376 return; 1377 } 1378 lpfc_unblock_mgmt_io(phba); 1379 } else if (phba->work_hs & HS_CRIT_TEMP) { 1380 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1381 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1382 temp_event_data.event_code = LPFC_CRIT_TEMP; 1383 temp_event_data.data = (uint32_t)temperature; 1384 1385 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1386 "0406 Adapter maximum temperature exceeded " 1387 "(%ld), taking this port offline " 1388 "Data: x%x x%x x%x\n", 1389 temperature, phba->work_hs, 1390 phba->work_status[0], phba->work_status[1]); 1391 1392 shost = lpfc_shost_from_vport(phba->pport); 1393 fc_host_post_vendor_event(shost, fc_get_event_number(), 1394 sizeof(temp_event_data), 1395 (char *) &temp_event_data, 1396 SCSI_NL_VID_TYPE_PCI 1397 | PCI_VENDOR_ID_EMULEX); 1398 1399 spin_lock_irq(&phba->hbalock); 1400 phba->over_temp_state = HBA_OVER_TEMP; 1401 spin_unlock_irq(&phba->hbalock); 1402 lpfc_offline_eratt(phba); 1403 1404 } else { 1405 /* The if clause above forces this code path when the status 1406 * failure is a value other than FFER6. Do not call the offline 1407 * twice. This is the adapter hardware error path. 1408 */ 1409 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1410 "0457 Adapter Hardware Error " 1411 "Data: x%x x%x x%x\n", 1412 phba->work_hs, 1413 phba->work_status[0], phba->work_status[1]); 1414 1415 event_data = FC_REG_DUMP_EVENT; 1416 shost = lpfc_shost_from_vport(vport); 1417 fc_host_post_vendor_event(shost, fc_get_event_number(), 1418 sizeof(event_data), (char *) &event_data, 1419 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1420 1421 lpfc_offline_eratt(phba); 1422 } 1423 return; 1424 } 1425 1426 /** 1427 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1428 * @phba: pointer to lpfc hba data structure. 1429 * 1430 * This routine is invoked to handle the SLI4 HBA hardware error attention 1431 * conditions. 1432 **/ 1433 static void 1434 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1435 { 1436 struct lpfc_vport *vport = phba->pport; 1437 uint32_t event_data; 1438 struct Scsi_Host *shost; 1439 uint32_t if_type; 1440 struct lpfc_register portstat_reg; 1441 int rc; 1442 1443 /* If the pci channel is offline, ignore possible errors, since 1444 * we cannot communicate with the pci card anyway. 1445 */ 1446 if (pci_channel_offline(phba->pcidev)) 1447 return; 1448 /* If resets are disabled then leave the HBA alone and return */ 1449 if (!phba->cfg_enable_hba_reset) 1450 return; 1451 1452 /* Send an internal error event to mgmt application */ 1453 lpfc_board_errevt_to_mgmt(phba); 1454 1455 /* For now, the actual action for SLI4 device handling is not 1456 * specified yet, just treated it as adaptor hardware failure 1457 */ 1458 event_data = FC_REG_DUMP_EVENT; 1459 shost = lpfc_shost_from_vport(vport); 1460 fc_host_post_vendor_event(shost, fc_get_event_number(), 1461 sizeof(event_data), (char *) &event_data, 1462 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1463 1464 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1465 switch (if_type) { 1466 case LPFC_SLI_INTF_IF_TYPE_0: 1467 lpfc_sli4_offline_eratt(phba); 1468 break; 1469 case LPFC_SLI_INTF_IF_TYPE_2: 1470 portstat_reg.word0 = 1471 readl(phba->sli4_hba.u.if_type2.STATUSregaddr); 1472 1473 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1474 /* TODO: Register for Overtemp async events. */ 1475 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1476 "2889 Port Overtemperature event, " 1477 "taking port\n"); 1478 spin_lock_irq(&phba->hbalock); 1479 phba->over_temp_state = HBA_OVER_TEMP; 1480 spin_unlock_irq(&phba->hbalock); 1481 lpfc_sli4_offline_eratt(phba); 1482 return; 1483 } 1484 /* 1485 * On error status condition, driver need to wait for port 1486 * ready before performing reset. 1487 */ 1488 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1489 if (!rc) { 1490 /* need reset: attempt for port recovery */ 1491 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1492 "2887 Port Error: Attempting " 1493 "Port Recovery\n"); 1494 lpfc_offline_prep(phba); 1495 lpfc_offline(phba); 1496 lpfc_sli_brdrestart(phba); 1497 if (lpfc_online(phba) == 0) { 1498 lpfc_unblock_mgmt_io(phba); 1499 return; 1500 } 1501 /* fall through for not able to recover */ 1502 } 1503 lpfc_sli4_offline_eratt(phba); 1504 break; 1505 case LPFC_SLI_INTF_IF_TYPE_1: 1506 default: 1507 break; 1508 } 1509 } 1510 1511 /** 1512 * lpfc_handle_eratt - Wrapper func for handling hba error attention 1513 * @phba: pointer to lpfc HBA data structure. 1514 * 1515 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 1516 * routine from the API jump table function pointer from the lpfc_hba struct. 1517 * 1518 * Return codes 1519 * 0 - success. 1520 * Any other value - error. 1521 **/ 1522 void 1523 lpfc_handle_eratt(struct lpfc_hba *phba) 1524 { 1525 (*phba->lpfc_handle_eratt)(phba); 1526 } 1527 1528 /** 1529 * lpfc_handle_latt - The HBA link event handler 1530 * @phba: pointer to lpfc hba data structure. 1531 * 1532 * This routine is invoked from the worker thread to handle a HBA host 1533 * attention link event. 1534 **/ 1535 void 1536 lpfc_handle_latt(struct lpfc_hba *phba) 1537 { 1538 struct lpfc_vport *vport = phba->pport; 1539 struct lpfc_sli *psli = &phba->sli; 1540 LPFC_MBOXQ_t *pmb; 1541 volatile uint32_t control; 1542 struct lpfc_dmabuf *mp; 1543 int rc = 0; 1544 1545 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1546 if (!pmb) { 1547 rc = 1; 1548 goto lpfc_handle_latt_err_exit; 1549 } 1550 1551 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1552 if (!mp) { 1553 rc = 2; 1554 goto lpfc_handle_latt_free_pmb; 1555 } 1556 1557 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 1558 if (!mp->virt) { 1559 rc = 3; 1560 goto lpfc_handle_latt_free_mp; 1561 } 1562 1563 /* Cleanup any outstanding ELS commands */ 1564 lpfc_els_flush_all_cmd(phba); 1565 1566 psli->slistat.link_event++; 1567 lpfc_read_topology(phba, pmb, mp); 1568 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 1569 pmb->vport = vport; 1570 /* Block ELS IOCBs until we have processed this mbox command */ 1571 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 1572 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 1573 if (rc == MBX_NOT_FINISHED) { 1574 rc = 4; 1575 goto lpfc_handle_latt_free_mbuf; 1576 } 1577 1578 /* Clear Link Attention in HA REG */ 1579 spin_lock_irq(&phba->hbalock); 1580 writel(HA_LATT, phba->HAregaddr); 1581 readl(phba->HAregaddr); /* flush */ 1582 spin_unlock_irq(&phba->hbalock); 1583 1584 return; 1585 1586 lpfc_handle_latt_free_mbuf: 1587 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1588 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1589 lpfc_handle_latt_free_mp: 1590 kfree(mp); 1591 lpfc_handle_latt_free_pmb: 1592 mempool_free(pmb, phba->mbox_mem_pool); 1593 lpfc_handle_latt_err_exit: 1594 /* Enable Link attention interrupts */ 1595 spin_lock_irq(&phba->hbalock); 1596 psli->sli_flag |= LPFC_PROCESS_LA; 1597 control = readl(phba->HCregaddr); 1598 control |= HC_LAINT_ENA; 1599 writel(control, phba->HCregaddr); 1600 readl(phba->HCregaddr); /* flush */ 1601 1602 /* Clear Link Attention in HA REG */ 1603 writel(HA_LATT, phba->HAregaddr); 1604 readl(phba->HAregaddr); /* flush */ 1605 spin_unlock_irq(&phba->hbalock); 1606 lpfc_linkdown(phba); 1607 phba->link_state = LPFC_HBA_ERROR; 1608 1609 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1610 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 1611 1612 return; 1613 } 1614 1615 /** 1616 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 1617 * @phba: pointer to lpfc hba data structure. 1618 * @vpd: pointer to the vital product data. 1619 * @len: length of the vital product data in bytes. 1620 * 1621 * This routine parses the Vital Product Data (VPD). The VPD is treated as 1622 * an array of characters. In this routine, the ModelName, ProgramType, and 1623 * ModelDesc, etc. fields of the phba data structure will be populated. 1624 * 1625 * Return codes 1626 * 0 - pointer to the VPD passed in is NULL 1627 * 1 - success 1628 **/ 1629 int 1630 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1631 { 1632 uint8_t lenlo, lenhi; 1633 int Length; 1634 int i, j; 1635 int finished = 0; 1636 int index = 0; 1637 1638 if (!vpd) 1639 return 0; 1640 1641 /* Vital Product */ 1642 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1643 "0455 Vital Product Data: x%x x%x x%x x%x\n", 1644 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 1645 (uint32_t) vpd[3]); 1646 while (!finished && (index < (len - 4))) { 1647 switch (vpd[index]) { 1648 case 0x82: 1649 case 0x91: 1650 index += 1; 1651 lenlo = vpd[index]; 1652 index += 1; 1653 lenhi = vpd[index]; 1654 index += 1; 1655 i = ((((unsigned short)lenhi) << 8) + lenlo); 1656 index += i; 1657 break; 1658 case 0x90: 1659 index += 1; 1660 lenlo = vpd[index]; 1661 index += 1; 1662 lenhi = vpd[index]; 1663 index += 1; 1664 Length = ((((unsigned short)lenhi) << 8) + lenlo); 1665 if (Length > len - index) 1666 Length = len - index; 1667 while (Length > 0) { 1668 /* Look for Serial Number */ 1669 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 1670 index += 2; 1671 i = vpd[index]; 1672 index += 1; 1673 j = 0; 1674 Length -= (3+i); 1675 while(i--) { 1676 phba->SerialNumber[j++] = vpd[index++]; 1677 if (j == 31) 1678 break; 1679 } 1680 phba->SerialNumber[j] = 0; 1681 continue; 1682 } 1683 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 1684 phba->vpd_flag |= VPD_MODEL_DESC; 1685 index += 2; 1686 i = vpd[index]; 1687 index += 1; 1688 j = 0; 1689 Length -= (3+i); 1690 while(i--) { 1691 phba->ModelDesc[j++] = vpd[index++]; 1692 if (j == 255) 1693 break; 1694 } 1695 phba->ModelDesc[j] = 0; 1696 continue; 1697 } 1698 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 1699 phba->vpd_flag |= VPD_MODEL_NAME; 1700 index += 2; 1701 i = vpd[index]; 1702 index += 1; 1703 j = 0; 1704 Length -= (3+i); 1705 while(i--) { 1706 phba->ModelName[j++] = vpd[index++]; 1707 if (j == 79) 1708 break; 1709 } 1710 phba->ModelName[j] = 0; 1711 continue; 1712 } 1713 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 1714 phba->vpd_flag |= VPD_PROGRAM_TYPE; 1715 index += 2; 1716 i = vpd[index]; 1717 index += 1; 1718 j = 0; 1719 Length -= (3+i); 1720 while(i--) { 1721 phba->ProgramType[j++] = vpd[index++]; 1722 if (j == 255) 1723 break; 1724 } 1725 phba->ProgramType[j] = 0; 1726 continue; 1727 } 1728 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 1729 phba->vpd_flag |= VPD_PORT; 1730 index += 2; 1731 i = vpd[index]; 1732 index += 1; 1733 j = 0; 1734 Length -= (3+i); 1735 while(i--) { 1736 if ((phba->sli_rev == LPFC_SLI_REV4) && 1737 (phba->sli4_hba.pport_name_sta == 1738 LPFC_SLI4_PPNAME_GET)) { 1739 j++; 1740 index++; 1741 } else 1742 phba->Port[j++] = vpd[index++]; 1743 if (j == 19) 1744 break; 1745 } 1746 if ((phba->sli_rev != LPFC_SLI_REV4) || 1747 (phba->sli4_hba.pport_name_sta == 1748 LPFC_SLI4_PPNAME_NON)) 1749 phba->Port[j] = 0; 1750 continue; 1751 } 1752 else { 1753 index += 2; 1754 i = vpd[index]; 1755 index += 1; 1756 index += i; 1757 Length -= (3 + i); 1758 } 1759 } 1760 finished = 0; 1761 break; 1762 case 0x78: 1763 finished = 1; 1764 break; 1765 default: 1766 index ++; 1767 break; 1768 } 1769 } 1770 1771 return(1); 1772 } 1773 1774 /** 1775 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 1776 * @phba: pointer to lpfc hba data structure. 1777 * @mdp: pointer to the data structure to hold the derived model name. 1778 * @descp: pointer to the data structure to hold the derived description. 1779 * 1780 * This routine retrieves HBA's description based on its registered PCI device 1781 * ID. The @descp passed into this function points to an array of 256 chars. It 1782 * shall be returned with the model name, maximum speed, and the host bus type. 1783 * The @mdp passed into this function points to an array of 80 chars. When the 1784 * function returns, the @mdp will be filled with the model name. 1785 **/ 1786 static void 1787 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 1788 { 1789 lpfc_vpd_t *vp; 1790 uint16_t dev_id = phba->pcidev->device; 1791 int max_speed; 1792 int GE = 0; 1793 int oneConnect = 0; /* default is not a oneConnect */ 1794 struct { 1795 char *name; 1796 char *bus; 1797 char *function; 1798 } m = {"<Unknown>", "", ""}; 1799 1800 if (mdp && mdp[0] != '\0' 1801 && descp && descp[0] != '\0') 1802 return; 1803 1804 if (phba->lmt & LMT_16Gb) 1805 max_speed = 16; 1806 else if (phba->lmt & LMT_10Gb) 1807 max_speed = 10; 1808 else if (phba->lmt & LMT_8Gb) 1809 max_speed = 8; 1810 else if (phba->lmt & LMT_4Gb) 1811 max_speed = 4; 1812 else if (phba->lmt & LMT_2Gb) 1813 max_speed = 2; 1814 else 1815 max_speed = 1; 1816 1817 vp = &phba->vpd; 1818 1819 switch (dev_id) { 1820 case PCI_DEVICE_ID_FIREFLY: 1821 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"}; 1822 break; 1823 case PCI_DEVICE_ID_SUPERFLY: 1824 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 1825 m = (typeof(m)){"LP7000", "PCI", 1826 "Fibre Channel Adapter"}; 1827 else 1828 m = (typeof(m)){"LP7000E", "PCI", 1829 "Fibre Channel Adapter"}; 1830 break; 1831 case PCI_DEVICE_ID_DRAGONFLY: 1832 m = (typeof(m)){"LP8000", "PCI", 1833 "Fibre Channel Adapter"}; 1834 break; 1835 case PCI_DEVICE_ID_CENTAUR: 1836 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 1837 m = (typeof(m)){"LP9002", "PCI", 1838 "Fibre Channel Adapter"}; 1839 else 1840 m = (typeof(m)){"LP9000", "PCI", 1841 "Fibre Channel Adapter"}; 1842 break; 1843 case PCI_DEVICE_ID_RFLY: 1844 m = (typeof(m)){"LP952", "PCI", 1845 "Fibre Channel Adapter"}; 1846 break; 1847 case PCI_DEVICE_ID_PEGASUS: 1848 m = (typeof(m)){"LP9802", "PCI-X", 1849 "Fibre Channel Adapter"}; 1850 break; 1851 case PCI_DEVICE_ID_THOR: 1852 m = (typeof(m)){"LP10000", "PCI-X", 1853 "Fibre Channel Adapter"}; 1854 break; 1855 case PCI_DEVICE_ID_VIPER: 1856 m = (typeof(m)){"LPX1000", "PCI-X", 1857 "Fibre Channel Adapter"}; 1858 break; 1859 case PCI_DEVICE_ID_PFLY: 1860 m = (typeof(m)){"LP982", "PCI-X", 1861 "Fibre Channel Adapter"}; 1862 break; 1863 case PCI_DEVICE_ID_TFLY: 1864 m = (typeof(m)){"LP1050", "PCI-X", 1865 "Fibre Channel Adapter"}; 1866 break; 1867 case PCI_DEVICE_ID_HELIOS: 1868 m = (typeof(m)){"LP11000", "PCI-X2", 1869 "Fibre Channel Adapter"}; 1870 break; 1871 case PCI_DEVICE_ID_HELIOS_SCSP: 1872 m = (typeof(m)){"LP11000-SP", "PCI-X2", 1873 "Fibre Channel Adapter"}; 1874 break; 1875 case PCI_DEVICE_ID_HELIOS_DCSP: 1876 m = (typeof(m)){"LP11002-SP", "PCI-X2", 1877 "Fibre Channel Adapter"}; 1878 break; 1879 case PCI_DEVICE_ID_NEPTUNE: 1880 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"}; 1881 break; 1882 case PCI_DEVICE_ID_NEPTUNE_SCSP: 1883 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"}; 1884 break; 1885 case PCI_DEVICE_ID_NEPTUNE_DCSP: 1886 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"}; 1887 break; 1888 case PCI_DEVICE_ID_BMID: 1889 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 1890 break; 1891 case PCI_DEVICE_ID_BSMB: 1892 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"}; 1893 break; 1894 case PCI_DEVICE_ID_ZEPHYR: 1895 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 1896 break; 1897 case PCI_DEVICE_ID_ZEPHYR_SCSP: 1898 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 1899 break; 1900 case PCI_DEVICE_ID_ZEPHYR_DCSP: 1901 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 1902 GE = 1; 1903 break; 1904 case PCI_DEVICE_ID_ZMID: 1905 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 1906 break; 1907 case PCI_DEVICE_ID_ZSMB: 1908 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 1909 break; 1910 case PCI_DEVICE_ID_LP101: 1911 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"}; 1912 break; 1913 case PCI_DEVICE_ID_LP10000S: 1914 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"}; 1915 break; 1916 case PCI_DEVICE_ID_LP11000S: 1917 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"}; 1918 break; 1919 case PCI_DEVICE_ID_LPE11000S: 1920 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"}; 1921 break; 1922 case PCI_DEVICE_ID_SAT: 1923 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 1924 break; 1925 case PCI_DEVICE_ID_SAT_MID: 1926 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 1927 break; 1928 case PCI_DEVICE_ID_SAT_SMB: 1929 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 1930 break; 1931 case PCI_DEVICE_ID_SAT_DCSP: 1932 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 1933 break; 1934 case PCI_DEVICE_ID_SAT_SCSP: 1935 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 1936 break; 1937 case PCI_DEVICE_ID_SAT_S: 1938 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 1939 break; 1940 case PCI_DEVICE_ID_HORNET: 1941 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"}; 1942 GE = 1; 1943 break; 1944 case PCI_DEVICE_ID_PROTEUS_VF: 1945 m = (typeof(m)){"LPev12000", "PCIe IOV", 1946 "Fibre Channel Adapter"}; 1947 break; 1948 case PCI_DEVICE_ID_PROTEUS_PF: 1949 m = (typeof(m)){"LPev12000", "PCIe IOV", 1950 "Fibre Channel Adapter"}; 1951 break; 1952 case PCI_DEVICE_ID_PROTEUS_S: 1953 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 1954 "Fibre Channel Adapter"}; 1955 break; 1956 case PCI_DEVICE_ID_TIGERSHARK: 1957 oneConnect = 1; 1958 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 1959 break; 1960 case PCI_DEVICE_ID_TOMCAT: 1961 oneConnect = 1; 1962 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 1963 break; 1964 case PCI_DEVICE_ID_FALCON: 1965 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 1966 "EmulexSecure Fibre"}; 1967 break; 1968 case PCI_DEVICE_ID_BALIUS: 1969 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 1970 "Fibre Channel Adapter"}; 1971 break; 1972 case PCI_DEVICE_ID_LANCER_FC: 1973 case PCI_DEVICE_ID_LANCER_FC_VF: 1974 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 1975 break; 1976 case PCI_DEVICE_ID_LANCER_FCOE: 1977 case PCI_DEVICE_ID_LANCER_FCOE_VF: 1978 oneConnect = 1; 1979 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 1980 break; 1981 default: 1982 m = (typeof(m)){"Unknown", "", ""}; 1983 break; 1984 } 1985 1986 if (mdp && mdp[0] == '\0') 1987 snprintf(mdp, 79,"%s", m.name); 1988 /* 1989 * oneConnect hba requires special processing, they are all initiators 1990 * and we put the port number on the end 1991 */ 1992 if (descp && descp[0] == '\0') { 1993 if (oneConnect) 1994 snprintf(descp, 255, 1995 "Emulex OneConnect %s, %s Initiator, Port %s", 1996 m.name, m.function, 1997 phba->Port); 1998 else 1999 snprintf(descp, 255, 2000 "Emulex %s %d%s %s %s", 2001 m.name, max_speed, (GE) ? "GE" : "Gb", 2002 m.bus, m.function); 2003 } 2004 } 2005 2006 /** 2007 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 2008 * @phba: pointer to lpfc hba data structure. 2009 * @pring: pointer to a IOCB ring. 2010 * @cnt: the number of IOCBs to be posted to the IOCB ring. 2011 * 2012 * This routine posts a given number of IOCBs with the associated DMA buffer 2013 * descriptors specified by the cnt argument to the given IOCB ring. 2014 * 2015 * Return codes 2016 * The number of IOCBs NOT able to be posted to the IOCB ring. 2017 **/ 2018 int 2019 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 2020 { 2021 IOCB_t *icmd; 2022 struct lpfc_iocbq *iocb; 2023 struct lpfc_dmabuf *mp1, *mp2; 2024 2025 cnt += pring->missbufcnt; 2026 2027 /* While there are buffers to post */ 2028 while (cnt > 0) { 2029 /* Allocate buffer for command iocb */ 2030 iocb = lpfc_sli_get_iocbq(phba); 2031 if (iocb == NULL) { 2032 pring->missbufcnt = cnt; 2033 return cnt; 2034 } 2035 icmd = &iocb->iocb; 2036 2037 /* 2 buffers can be posted per command */ 2038 /* Allocate buffer to post */ 2039 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2040 if (mp1) 2041 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 2042 if (!mp1 || !mp1->virt) { 2043 kfree(mp1); 2044 lpfc_sli_release_iocbq(phba, iocb); 2045 pring->missbufcnt = cnt; 2046 return cnt; 2047 } 2048 2049 INIT_LIST_HEAD(&mp1->list); 2050 /* Allocate buffer to post */ 2051 if (cnt > 1) { 2052 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2053 if (mp2) 2054 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 2055 &mp2->phys); 2056 if (!mp2 || !mp2->virt) { 2057 kfree(mp2); 2058 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2059 kfree(mp1); 2060 lpfc_sli_release_iocbq(phba, iocb); 2061 pring->missbufcnt = cnt; 2062 return cnt; 2063 } 2064 2065 INIT_LIST_HEAD(&mp2->list); 2066 } else { 2067 mp2 = NULL; 2068 } 2069 2070 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2071 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2072 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2073 icmd->ulpBdeCount = 1; 2074 cnt--; 2075 if (mp2) { 2076 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2077 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2078 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2079 cnt--; 2080 icmd->ulpBdeCount = 2; 2081 } 2082 2083 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2084 icmd->ulpLe = 1; 2085 2086 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2087 IOCB_ERROR) { 2088 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2089 kfree(mp1); 2090 cnt++; 2091 if (mp2) { 2092 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2093 kfree(mp2); 2094 cnt++; 2095 } 2096 lpfc_sli_release_iocbq(phba, iocb); 2097 pring->missbufcnt = cnt; 2098 return cnt; 2099 } 2100 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2101 if (mp2) 2102 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2103 } 2104 pring->missbufcnt = 0; 2105 return 0; 2106 } 2107 2108 /** 2109 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2110 * @phba: pointer to lpfc hba data structure. 2111 * 2112 * This routine posts initial receive IOCB buffers to the ELS ring. The 2113 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2114 * set to 64 IOCBs. 2115 * 2116 * Return codes 2117 * 0 - success (currently always success) 2118 **/ 2119 static int 2120 lpfc_post_rcv_buf(struct lpfc_hba *phba) 2121 { 2122 struct lpfc_sli *psli = &phba->sli; 2123 2124 /* Ring 0, ELS / CT buffers */ 2125 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2126 /* Ring 2 - FCP no buffers needed */ 2127 2128 return 0; 2129 } 2130 2131 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2132 2133 /** 2134 * lpfc_sha_init - Set up initial array of hash table entries 2135 * @HashResultPointer: pointer to an array as hash table. 2136 * 2137 * This routine sets up the initial values to the array of hash table entries 2138 * for the LC HBAs. 2139 **/ 2140 static void 2141 lpfc_sha_init(uint32_t * HashResultPointer) 2142 { 2143 HashResultPointer[0] = 0x67452301; 2144 HashResultPointer[1] = 0xEFCDAB89; 2145 HashResultPointer[2] = 0x98BADCFE; 2146 HashResultPointer[3] = 0x10325476; 2147 HashResultPointer[4] = 0xC3D2E1F0; 2148 } 2149 2150 /** 2151 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2152 * @HashResultPointer: pointer to an initial/result hash table. 2153 * @HashWorkingPointer: pointer to an working hash table. 2154 * 2155 * This routine iterates an initial hash table pointed by @HashResultPointer 2156 * with the values from the working hash table pointeed by @HashWorkingPointer. 2157 * The results are putting back to the initial hash table, returned through 2158 * the @HashResultPointer as the result hash table. 2159 **/ 2160 static void 2161 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2162 { 2163 int t; 2164 uint32_t TEMP; 2165 uint32_t A, B, C, D, E; 2166 t = 16; 2167 do { 2168 HashWorkingPointer[t] = 2169 S(1, 2170 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2171 8] ^ 2172 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2173 } while (++t <= 79); 2174 t = 0; 2175 A = HashResultPointer[0]; 2176 B = HashResultPointer[1]; 2177 C = HashResultPointer[2]; 2178 D = HashResultPointer[3]; 2179 E = HashResultPointer[4]; 2180 2181 do { 2182 if (t < 20) { 2183 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2184 } else if (t < 40) { 2185 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2186 } else if (t < 60) { 2187 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2188 } else { 2189 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2190 } 2191 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2192 E = D; 2193 D = C; 2194 C = S(30, B); 2195 B = A; 2196 A = TEMP; 2197 } while (++t <= 79); 2198 2199 HashResultPointer[0] += A; 2200 HashResultPointer[1] += B; 2201 HashResultPointer[2] += C; 2202 HashResultPointer[3] += D; 2203 HashResultPointer[4] += E; 2204 2205 } 2206 2207 /** 2208 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2209 * @RandomChallenge: pointer to the entry of host challenge random number array. 2210 * @HashWorking: pointer to the entry of the working hash array. 2211 * 2212 * This routine calculates the working hash array referred by @HashWorking 2213 * from the challenge random numbers associated with the host, referred by 2214 * @RandomChallenge. The result is put into the entry of the working hash 2215 * array and returned by reference through @HashWorking. 2216 **/ 2217 static void 2218 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2219 { 2220 *HashWorking = (*RandomChallenge ^ *HashWorking); 2221 } 2222 2223 /** 2224 * lpfc_hba_init - Perform special handling for LC HBA initialization 2225 * @phba: pointer to lpfc hba data structure. 2226 * @hbainit: pointer to an array of unsigned 32-bit integers. 2227 * 2228 * This routine performs the special handling for LC HBA initialization. 2229 **/ 2230 void 2231 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2232 { 2233 int t; 2234 uint32_t *HashWorking; 2235 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2236 2237 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2238 if (!HashWorking) 2239 return; 2240 2241 HashWorking[0] = HashWorking[78] = *pwwnn++; 2242 HashWorking[1] = HashWorking[79] = *pwwnn; 2243 2244 for (t = 0; t < 7; t++) 2245 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2246 2247 lpfc_sha_init(hbainit); 2248 lpfc_sha_iterate(hbainit, HashWorking); 2249 kfree(HashWorking); 2250 } 2251 2252 /** 2253 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2254 * @vport: pointer to a virtual N_Port data structure. 2255 * 2256 * This routine performs the necessary cleanups before deleting the @vport. 2257 * It invokes the discovery state machine to perform necessary state 2258 * transitions and to release the ndlps associated with the @vport. Note, 2259 * the physical port is treated as @vport 0. 2260 **/ 2261 void 2262 lpfc_cleanup(struct lpfc_vport *vport) 2263 { 2264 struct lpfc_hba *phba = vport->phba; 2265 struct lpfc_nodelist *ndlp, *next_ndlp; 2266 int i = 0; 2267 2268 if (phba->link_state > LPFC_LINK_DOWN) 2269 lpfc_port_link_failure(vport); 2270 2271 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2272 if (!NLP_CHK_NODE_ACT(ndlp)) { 2273 ndlp = lpfc_enable_node(vport, ndlp, 2274 NLP_STE_UNUSED_NODE); 2275 if (!ndlp) 2276 continue; 2277 spin_lock_irq(&phba->ndlp_lock); 2278 NLP_SET_FREE_REQ(ndlp); 2279 spin_unlock_irq(&phba->ndlp_lock); 2280 /* Trigger the release of the ndlp memory */ 2281 lpfc_nlp_put(ndlp); 2282 continue; 2283 } 2284 spin_lock_irq(&phba->ndlp_lock); 2285 if (NLP_CHK_FREE_REQ(ndlp)) { 2286 /* The ndlp should not be in memory free mode already */ 2287 spin_unlock_irq(&phba->ndlp_lock); 2288 continue; 2289 } else 2290 /* Indicate request for freeing ndlp memory */ 2291 NLP_SET_FREE_REQ(ndlp); 2292 spin_unlock_irq(&phba->ndlp_lock); 2293 2294 if (vport->port_type != LPFC_PHYSICAL_PORT && 2295 ndlp->nlp_DID == Fabric_DID) { 2296 /* Just free up ndlp with Fabric_DID for vports */ 2297 lpfc_nlp_put(ndlp); 2298 continue; 2299 } 2300 2301 if (ndlp->nlp_type & NLP_FABRIC) 2302 lpfc_disc_state_machine(vport, ndlp, NULL, 2303 NLP_EVT_DEVICE_RECOVERY); 2304 2305 lpfc_disc_state_machine(vport, ndlp, NULL, 2306 NLP_EVT_DEVICE_RM); 2307 2308 } 2309 2310 /* At this point, ALL ndlp's should be gone 2311 * because of the previous NLP_EVT_DEVICE_RM. 2312 * Lets wait for this to happen, if needed. 2313 */ 2314 while (!list_empty(&vport->fc_nodes)) { 2315 if (i++ > 3000) { 2316 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2317 "0233 Nodelist not empty\n"); 2318 list_for_each_entry_safe(ndlp, next_ndlp, 2319 &vport->fc_nodes, nlp_listp) { 2320 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2321 LOG_NODE, 2322 "0282 did:x%x ndlp:x%p " 2323 "usgmap:x%x refcnt:%d\n", 2324 ndlp->nlp_DID, (void *)ndlp, 2325 ndlp->nlp_usg_map, 2326 atomic_read( 2327 &ndlp->kref.refcount)); 2328 } 2329 break; 2330 } 2331 2332 /* Wait for any activity on ndlps to settle */ 2333 msleep(10); 2334 } 2335 lpfc_cleanup_vports_rrqs(vport, NULL); 2336 } 2337 2338 /** 2339 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2340 * @vport: pointer to a virtual N_Port data structure. 2341 * 2342 * This routine stops all the timers associated with a @vport. This function 2343 * is invoked before disabling or deleting a @vport. Note that the physical 2344 * port is treated as @vport 0. 2345 **/ 2346 void 2347 lpfc_stop_vport_timers(struct lpfc_vport *vport) 2348 { 2349 del_timer_sync(&vport->els_tmofunc); 2350 del_timer_sync(&vport->fc_fdmitmo); 2351 del_timer_sync(&vport->delayed_disc_tmo); 2352 lpfc_can_disctmo(vport); 2353 return; 2354 } 2355 2356 /** 2357 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2358 * @phba: pointer to lpfc hba data structure. 2359 * 2360 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2361 * caller of this routine should already hold the host lock. 2362 **/ 2363 void 2364 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2365 { 2366 /* Clear pending FCF rediscovery wait flag */ 2367 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2368 2369 /* Now, try to stop the timer */ 2370 del_timer(&phba->fcf.redisc_wait); 2371 } 2372 2373 /** 2374 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2375 * @phba: pointer to lpfc hba data structure. 2376 * 2377 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2378 * checks whether the FCF rediscovery wait timer is pending with the host 2379 * lock held before proceeding with disabling the timer and clearing the 2380 * wait timer pendig flag. 2381 **/ 2382 void 2383 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2384 { 2385 spin_lock_irq(&phba->hbalock); 2386 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2387 /* FCF rediscovery timer already fired or stopped */ 2388 spin_unlock_irq(&phba->hbalock); 2389 return; 2390 } 2391 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2392 /* Clear failover in progress flags */ 2393 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 2394 spin_unlock_irq(&phba->hbalock); 2395 } 2396 2397 /** 2398 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2399 * @phba: pointer to lpfc hba data structure. 2400 * 2401 * This routine stops all the timers associated with a HBA. This function is 2402 * invoked before either putting a HBA offline or unloading the driver. 2403 **/ 2404 void 2405 lpfc_stop_hba_timers(struct lpfc_hba *phba) 2406 { 2407 lpfc_stop_vport_timers(phba->pport); 2408 del_timer_sync(&phba->sli.mbox_tmo); 2409 del_timer_sync(&phba->fabric_block_timer); 2410 del_timer_sync(&phba->eratt_poll); 2411 del_timer_sync(&phba->hb_tmofunc); 2412 if (phba->sli_rev == LPFC_SLI_REV4) { 2413 del_timer_sync(&phba->rrq_tmr); 2414 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 2415 } 2416 phba->hb_outstanding = 0; 2417 2418 switch (phba->pci_dev_grp) { 2419 case LPFC_PCI_DEV_LP: 2420 /* Stop any LightPulse device specific driver timers */ 2421 del_timer_sync(&phba->fcp_poll_timer); 2422 break; 2423 case LPFC_PCI_DEV_OC: 2424 /* Stop any OneConnect device sepcific driver timers */ 2425 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2426 break; 2427 default: 2428 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2429 "0297 Invalid device group (x%x)\n", 2430 phba->pci_dev_grp); 2431 break; 2432 } 2433 return; 2434 } 2435 2436 /** 2437 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2438 * @phba: pointer to lpfc hba data structure. 2439 * 2440 * This routine marks a HBA's management interface as blocked. Once the HBA's 2441 * management interface is marked as blocked, all the user space access to 2442 * the HBA, whether they are from sysfs interface or libdfc interface will 2443 * all be blocked. The HBA is set to block the management interface when the 2444 * driver prepares the HBA interface for online or offline. 2445 **/ 2446 static void 2447 lpfc_block_mgmt_io(struct lpfc_hba * phba) 2448 { 2449 unsigned long iflag; 2450 uint8_t actcmd = MBX_HEARTBEAT; 2451 unsigned long timeout; 2452 2453 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 2454 spin_lock_irqsave(&phba->hbalock, iflag); 2455 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2456 if (phba->sli.mbox_active) { 2457 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 2458 /* Determine how long we might wait for the active mailbox 2459 * command to be gracefully completed by firmware. 2460 */ 2461 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 2462 phba->sli.mbox_active) * 1000) + jiffies; 2463 } 2464 spin_unlock_irqrestore(&phba->hbalock, iflag); 2465 2466 /* Wait for the outstnading mailbox command to complete */ 2467 while (phba->sli.mbox_active) { 2468 /* Check active mailbox complete status every 2ms */ 2469 msleep(2); 2470 if (time_after(jiffies, timeout)) { 2471 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2472 "2813 Mgmt IO is Blocked %x " 2473 "- mbox cmd %x still active\n", 2474 phba->sli.sli_flag, actcmd); 2475 break; 2476 } 2477 } 2478 } 2479 2480 /** 2481 * lpfc_online - Initialize and bring a HBA online 2482 * @phba: pointer to lpfc hba data structure. 2483 * 2484 * This routine initializes the HBA and brings a HBA online. During this 2485 * process, the management interface is blocked to prevent user space access 2486 * to the HBA interfering with the driver initialization. 2487 * 2488 * Return codes 2489 * 0 - successful 2490 * 1 - failed 2491 **/ 2492 int 2493 lpfc_online(struct lpfc_hba *phba) 2494 { 2495 struct lpfc_vport *vport; 2496 struct lpfc_vport **vports; 2497 int i; 2498 2499 if (!phba) 2500 return 0; 2501 vport = phba->pport; 2502 2503 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 2504 return 0; 2505 2506 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2507 "0458 Bring Adapter online\n"); 2508 2509 lpfc_block_mgmt_io(phba); 2510 2511 if (!lpfc_sli_queue_setup(phba)) { 2512 lpfc_unblock_mgmt_io(phba); 2513 return 1; 2514 } 2515 2516 if (phba->sli_rev == LPFC_SLI_REV4) { 2517 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 2518 lpfc_unblock_mgmt_io(phba); 2519 return 1; 2520 } 2521 } else { 2522 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 2523 lpfc_unblock_mgmt_io(phba); 2524 return 1; 2525 } 2526 } 2527 2528 vports = lpfc_create_vport_work_array(phba); 2529 if (vports != NULL) 2530 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2531 struct Scsi_Host *shost; 2532 shost = lpfc_shost_from_vport(vports[i]); 2533 spin_lock_irq(shost->host_lock); 2534 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 2535 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 2536 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2537 if (phba->sli_rev == LPFC_SLI_REV4) 2538 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 2539 spin_unlock_irq(shost->host_lock); 2540 } 2541 lpfc_destroy_vport_work_array(phba, vports); 2542 2543 lpfc_unblock_mgmt_io(phba); 2544 return 0; 2545 } 2546 2547 /** 2548 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 2549 * @phba: pointer to lpfc hba data structure. 2550 * 2551 * This routine marks a HBA's management interface as not blocked. Once the 2552 * HBA's management interface is marked as not blocked, all the user space 2553 * access to the HBA, whether they are from sysfs interface or libdfc 2554 * interface will be allowed. The HBA is set to block the management interface 2555 * when the driver prepares the HBA interface for online or offline and then 2556 * set to unblock the management interface afterwards. 2557 **/ 2558 void 2559 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 2560 { 2561 unsigned long iflag; 2562 2563 spin_lock_irqsave(&phba->hbalock, iflag); 2564 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 2565 spin_unlock_irqrestore(&phba->hbalock, iflag); 2566 } 2567 2568 /** 2569 * lpfc_offline_prep - Prepare a HBA to be brought offline 2570 * @phba: pointer to lpfc hba data structure. 2571 * 2572 * This routine is invoked to prepare a HBA to be brought offline. It performs 2573 * unregistration login to all the nodes on all vports and flushes the mailbox 2574 * queue to make it ready to be brought offline. 2575 **/ 2576 void 2577 lpfc_offline_prep(struct lpfc_hba * phba) 2578 { 2579 struct lpfc_vport *vport = phba->pport; 2580 struct lpfc_nodelist *ndlp, *next_ndlp; 2581 struct lpfc_vport **vports; 2582 struct Scsi_Host *shost; 2583 int i; 2584 2585 if (vport->fc_flag & FC_OFFLINE_MODE) 2586 return; 2587 2588 lpfc_block_mgmt_io(phba); 2589 2590 lpfc_linkdown(phba); 2591 2592 /* Issue an unreg_login to all nodes on all vports */ 2593 vports = lpfc_create_vport_work_array(phba); 2594 if (vports != NULL) { 2595 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2596 if (vports[i]->load_flag & FC_UNLOADING) 2597 continue; 2598 shost = lpfc_shost_from_vport(vports[i]); 2599 spin_lock_irq(shost->host_lock); 2600 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 2601 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2602 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 2603 spin_unlock_irq(shost->host_lock); 2604 2605 shost = lpfc_shost_from_vport(vports[i]); 2606 list_for_each_entry_safe(ndlp, next_ndlp, 2607 &vports[i]->fc_nodes, 2608 nlp_listp) { 2609 if (!NLP_CHK_NODE_ACT(ndlp)) 2610 continue; 2611 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 2612 continue; 2613 if (ndlp->nlp_type & NLP_FABRIC) { 2614 lpfc_disc_state_machine(vports[i], ndlp, 2615 NULL, NLP_EVT_DEVICE_RECOVERY); 2616 lpfc_disc_state_machine(vports[i], ndlp, 2617 NULL, NLP_EVT_DEVICE_RM); 2618 } 2619 spin_lock_irq(shost->host_lock); 2620 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2621 spin_unlock_irq(shost->host_lock); 2622 lpfc_unreg_rpi(vports[i], ndlp); 2623 } 2624 } 2625 } 2626 lpfc_destroy_vport_work_array(phba, vports); 2627 2628 lpfc_sli_mbox_sys_shutdown(phba); 2629 } 2630 2631 /** 2632 * lpfc_offline - Bring a HBA offline 2633 * @phba: pointer to lpfc hba data structure. 2634 * 2635 * This routine actually brings a HBA offline. It stops all the timers 2636 * associated with the HBA, brings down the SLI layer, and eventually 2637 * marks the HBA as in offline state for the upper layer protocol. 2638 **/ 2639 void 2640 lpfc_offline(struct lpfc_hba *phba) 2641 { 2642 struct Scsi_Host *shost; 2643 struct lpfc_vport **vports; 2644 int i; 2645 2646 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2647 return; 2648 2649 /* stop port and all timers associated with this hba */ 2650 lpfc_stop_port(phba); 2651 vports = lpfc_create_vport_work_array(phba); 2652 if (vports != NULL) 2653 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 2654 lpfc_stop_vport_timers(vports[i]); 2655 lpfc_destroy_vport_work_array(phba, vports); 2656 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2657 "0460 Bring Adapter offline\n"); 2658 /* Bring down the SLI Layer and cleanup. The HBA is offline 2659 now. */ 2660 lpfc_sli_hba_down(phba); 2661 spin_lock_irq(&phba->hbalock); 2662 phba->work_ha = 0; 2663 spin_unlock_irq(&phba->hbalock); 2664 vports = lpfc_create_vport_work_array(phba); 2665 if (vports != NULL) 2666 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2667 shost = lpfc_shost_from_vport(vports[i]); 2668 spin_lock_irq(shost->host_lock); 2669 vports[i]->work_port_events = 0; 2670 vports[i]->fc_flag |= FC_OFFLINE_MODE; 2671 spin_unlock_irq(shost->host_lock); 2672 } 2673 lpfc_destroy_vport_work_array(phba, vports); 2674 } 2675 2676 /** 2677 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 2678 * @phba: pointer to lpfc hba data structure. 2679 * 2680 * This routine is to free all the SCSI buffers and IOCBs from the driver 2681 * list back to kernel. It is called from lpfc_pci_remove_one to free 2682 * the internal resources before the device is removed from the system. 2683 * 2684 * Return codes 2685 * 0 - successful (for now, it always returns 0) 2686 **/ 2687 static int 2688 lpfc_scsi_free(struct lpfc_hba *phba) 2689 { 2690 struct lpfc_scsi_buf *sb, *sb_next; 2691 struct lpfc_iocbq *io, *io_next; 2692 2693 spin_lock_irq(&phba->hbalock); 2694 /* Release all the lpfc_scsi_bufs maintained by this host. */ 2695 spin_lock(&phba->scsi_buf_list_lock); 2696 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 2697 list_del(&sb->list); 2698 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 2699 sb->dma_handle); 2700 kfree(sb); 2701 phba->total_scsi_bufs--; 2702 } 2703 spin_unlock(&phba->scsi_buf_list_lock); 2704 2705 /* Release all the lpfc_iocbq entries maintained by this host. */ 2706 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 2707 list_del(&io->list); 2708 kfree(io); 2709 phba->total_iocbq_bufs--; 2710 } 2711 2712 spin_unlock_irq(&phba->hbalock); 2713 return 0; 2714 } 2715 2716 /** 2717 * lpfc_create_port - Create an FC port 2718 * @phba: pointer to lpfc hba data structure. 2719 * @instance: a unique integer ID to this FC port. 2720 * @dev: pointer to the device data structure. 2721 * 2722 * This routine creates a FC port for the upper layer protocol. The FC port 2723 * can be created on top of either a physical port or a virtual port provided 2724 * by the HBA. This routine also allocates a SCSI host data structure (shost) 2725 * and associates the FC port created before adding the shost into the SCSI 2726 * layer. 2727 * 2728 * Return codes 2729 * @vport - pointer to the virtual N_Port data structure. 2730 * NULL - port create failed. 2731 **/ 2732 struct lpfc_vport * 2733 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 2734 { 2735 struct lpfc_vport *vport; 2736 struct Scsi_Host *shost; 2737 int error = 0; 2738 2739 if (dev != &phba->pcidev->dev) 2740 shost = scsi_host_alloc(&lpfc_vport_template, 2741 sizeof(struct lpfc_vport)); 2742 else 2743 shost = scsi_host_alloc(&lpfc_template, 2744 sizeof(struct lpfc_vport)); 2745 if (!shost) 2746 goto out; 2747 2748 vport = (struct lpfc_vport *) shost->hostdata; 2749 vport->phba = phba; 2750 vport->load_flag |= FC_LOADING; 2751 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2752 vport->fc_rscn_flush = 0; 2753 2754 lpfc_get_vport_cfgparam(vport); 2755 shost->unique_id = instance; 2756 shost->max_id = LPFC_MAX_TARGET; 2757 shost->max_lun = vport->cfg_max_luns; 2758 shost->this_id = -1; 2759 shost->max_cmd_len = 16; 2760 if (phba->sli_rev == LPFC_SLI_REV4) { 2761 shost->dma_boundary = 2762 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 2763 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 2764 } 2765 2766 /* 2767 * Set initial can_queue value since 0 is no longer supported and 2768 * scsi_add_host will fail. This will be adjusted later based on the 2769 * max xri value determined in hba setup. 2770 */ 2771 shost->can_queue = phba->cfg_hba_queue_depth - 10; 2772 if (dev != &phba->pcidev->dev) { 2773 shost->transportt = lpfc_vport_transport_template; 2774 vport->port_type = LPFC_NPIV_PORT; 2775 } else { 2776 shost->transportt = lpfc_transport_template; 2777 vport->port_type = LPFC_PHYSICAL_PORT; 2778 } 2779 2780 /* Initialize all internally managed lists. */ 2781 INIT_LIST_HEAD(&vport->fc_nodes); 2782 INIT_LIST_HEAD(&vport->rcv_buffer_list); 2783 spin_lock_init(&vport->work_port_lock); 2784 2785 init_timer(&vport->fc_disctmo); 2786 vport->fc_disctmo.function = lpfc_disc_timeout; 2787 vport->fc_disctmo.data = (unsigned long)vport; 2788 2789 init_timer(&vport->fc_fdmitmo); 2790 vport->fc_fdmitmo.function = lpfc_fdmi_tmo; 2791 vport->fc_fdmitmo.data = (unsigned long)vport; 2792 2793 init_timer(&vport->els_tmofunc); 2794 vport->els_tmofunc.function = lpfc_els_timeout; 2795 vport->els_tmofunc.data = (unsigned long)vport; 2796 2797 init_timer(&vport->delayed_disc_tmo); 2798 vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo; 2799 vport->delayed_disc_tmo.data = (unsigned long)vport; 2800 2801 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 2802 if (error) 2803 goto out_put_shost; 2804 2805 spin_lock_irq(&phba->hbalock); 2806 list_add_tail(&vport->listentry, &phba->port_list); 2807 spin_unlock_irq(&phba->hbalock); 2808 return vport; 2809 2810 out_put_shost: 2811 scsi_host_put(shost); 2812 out: 2813 return NULL; 2814 } 2815 2816 /** 2817 * destroy_port - destroy an FC port 2818 * @vport: pointer to an lpfc virtual N_Port data structure. 2819 * 2820 * This routine destroys a FC port from the upper layer protocol. All the 2821 * resources associated with the port are released. 2822 **/ 2823 void 2824 destroy_port(struct lpfc_vport *vport) 2825 { 2826 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2827 struct lpfc_hba *phba = vport->phba; 2828 2829 lpfc_debugfs_terminate(vport); 2830 fc_remove_host(shost); 2831 scsi_remove_host(shost); 2832 2833 spin_lock_irq(&phba->hbalock); 2834 list_del_init(&vport->listentry); 2835 spin_unlock_irq(&phba->hbalock); 2836 2837 lpfc_cleanup(vport); 2838 return; 2839 } 2840 2841 /** 2842 * lpfc_get_instance - Get a unique integer ID 2843 * 2844 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 2845 * uses the kernel idr facility to perform the task. 2846 * 2847 * Return codes: 2848 * instance - a unique integer ID allocated as the new instance. 2849 * -1 - lpfc get instance failed. 2850 **/ 2851 int 2852 lpfc_get_instance(void) 2853 { 2854 int instance = 0; 2855 2856 /* Assign an unused number */ 2857 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 2858 return -1; 2859 if (idr_get_new(&lpfc_hba_index, NULL, &instance)) 2860 return -1; 2861 return instance; 2862 } 2863 2864 /** 2865 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 2866 * @shost: pointer to SCSI host data structure. 2867 * @time: elapsed time of the scan in jiffies. 2868 * 2869 * This routine is called by the SCSI layer with a SCSI host to determine 2870 * whether the scan host is finished. 2871 * 2872 * Note: there is no scan_start function as adapter initialization will have 2873 * asynchronously kicked off the link initialization. 2874 * 2875 * Return codes 2876 * 0 - SCSI host scan is not over yet. 2877 * 1 - SCSI host scan is over. 2878 **/ 2879 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 2880 { 2881 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2882 struct lpfc_hba *phba = vport->phba; 2883 int stat = 0; 2884 2885 spin_lock_irq(shost->host_lock); 2886 2887 if (vport->load_flag & FC_UNLOADING) { 2888 stat = 1; 2889 goto finished; 2890 } 2891 if (time >= 30 * HZ) { 2892 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2893 "0461 Scanning longer than 30 " 2894 "seconds. Continuing initialization\n"); 2895 stat = 1; 2896 goto finished; 2897 } 2898 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { 2899 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2900 "0465 Link down longer than 15 " 2901 "seconds. Continuing initialization\n"); 2902 stat = 1; 2903 goto finished; 2904 } 2905 2906 if (vport->port_state != LPFC_VPORT_READY) 2907 goto finished; 2908 if (vport->num_disc_nodes || vport->fc_prli_sent) 2909 goto finished; 2910 if (vport->fc_map_cnt == 0 && time < 2 * HZ) 2911 goto finished; 2912 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 2913 goto finished; 2914 2915 stat = 1; 2916 2917 finished: 2918 spin_unlock_irq(shost->host_lock); 2919 return stat; 2920 } 2921 2922 /** 2923 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 2924 * @shost: pointer to SCSI host data structure. 2925 * 2926 * This routine initializes a given SCSI host attributes on a FC port. The 2927 * SCSI host can be either on top of a physical port or a virtual port. 2928 **/ 2929 void lpfc_host_attrib_init(struct Scsi_Host *shost) 2930 { 2931 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2932 struct lpfc_hba *phba = vport->phba; 2933 /* 2934 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 2935 */ 2936 2937 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 2938 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 2939 fc_host_supported_classes(shost) = FC_COS_CLASS3; 2940 2941 memset(fc_host_supported_fc4s(shost), 0, 2942 sizeof(fc_host_supported_fc4s(shost))); 2943 fc_host_supported_fc4s(shost)[2] = 1; 2944 fc_host_supported_fc4s(shost)[7] = 1; 2945 2946 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 2947 sizeof fc_host_symbolic_name(shost)); 2948 2949 fc_host_supported_speeds(shost) = 0; 2950 if (phba->lmt & LMT_16Gb) 2951 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; 2952 if (phba->lmt & LMT_10Gb) 2953 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 2954 if (phba->lmt & LMT_8Gb) 2955 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 2956 if (phba->lmt & LMT_4Gb) 2957 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 2958 if (phba->lmt & LMT_2Gb) 2959 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 2960 if (phba->lmt & LMT_1Gb) 2961 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 2962 2963 fc_host_maxframe_size(shost) = 2964 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 2965 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 2966 2967 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 2968 2969 /* This value is also unchanging */ 2970 memset(fc_host_active_fc4s(shost), 0, 2971 sizeof(fc_host_active_fc4s(shost))); 2972 fc_host_active_fc4s(shost)[2] = 1; 2973 fc_host_active_fc4s(shost)[7] = 1; 2974 2975 fc_host_max_npiv_vports(shost) = phba->max_vpi; 2976 spin_lock_irq(shost->host_lock); 2977 vport->load_flag &= ~FC_LOADING; 2978 spin_unlock_irq(shost->host_lock); 2979 } 2980 2981 /** 2982 * lpfc_stop_port_s3 - Stop SLI3 device port 2983 * @phba: pointer to lpfc hba data structure. 2984 * 2985 * This routine is invoked to stop an SLI3 device port, it stops the device 2986 * from generating interrupts and stops the device driver's timers for the 2987 * device. 2988 **/ 2989 static void 2990 lpfc_stop_port_s3(struct lpfc_hba *phba) 2991 { 2992 /* Clear all interrupt enable conditions */ 2993 writel(0, phba->HCregaddr); 2994 readl(phba->HCregaddr); /* flush */ 2995 /* Clear all pending interrupts */ 2996 writel(0xffffffff, phba->HAregaddr); 2997 readl(phba->HAregaddr); /* flush */ 2998 2999 /* Reset some HBA SLI setup states */ 3000 lpfc_stop_hba_timers(phba); 3001 phba->pport->work_port_events = 0; 3002 } 3003 3004 /** 3005 * lpfc_stop_port_s4 - Stop SLI4 device port 3006 * @phba: pointer to lpfc hba data structure. 3007 * 3008 * This routine is invoked to stop an SLI4 device port, it stops the device 3009 * from generating interrupts and stops the device driver's timers for the 3010 * device. 3011 **/ 3012 static void 3013 lpfc_stop_port_s4(struct lpfc_hba *phba) 3014 { 3015 /* Reset some HBA SLI4 setup states */ 3016 lpfc_stop_hba_timers(phba); 3017 phba->pport->work_port_events = 0; 3018 phba->sli4_hba.intr_enable = 0; 3019 } 3020 3021 /** 3022 * lpfc_stop_port - Wrapper function for stopping hba port 3023 * @phba: Pointer to HBA context object. 3024 * 3025 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 3026 * the API jump table function pointer from the lpfc_hba struct. 3027 **/ 3028 void 3029 lpfc_stop_port(struct lpfc_hba *phba) 3030 { 3031 phba->lpfc_stop_port(phba); 3032 } 3033 3034 /** 3035 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 3036 * @phba: Pointer to hba for which this call is being executed. 3037 * 3038 * This routine starts the timer waiting for the FCF rediscovery to complete. 3039 **/ 3040 void 3041 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 3042 { 3043 unsigned long fcf_redisc_wait_tmo = 3044 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 3045 /* Start fcf rediscovery wait period timer */ 3046 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 3047 spin_lock_irq(&phba->hbalock); 3048 /* Allow action to new fcf asynchronous event */ 3049 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 3050 /* Mark the FCF rediscovery pending state */ 3051 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 3052 spin_unlock_irq(&phba->hbalock); 3053 } 3054 3055 /** 3056 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 3057 * @ptr: Map to lpfc_hba data structure pointer. 3058 * 3059 * This routine is invoked when waiting for FCF table rediscover has been 3060 * timed out. If new FCF record(s) has (have) been discovered during the 3061 * wait period, a new FCF event shall be added to the FCOE async event 3062 * list, and then worker thread shall be waked up for processing from the 3063 * worker thread context. 3064 **/ 3065 void 3066 lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) 3067 { 3068 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 3069 3070 /* Don't send FCF rediscovery event if timer cancelled */ 3071 spin_lock_irq(&phba->hbalock); 3072 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 3073 spin_unlock_irq(&phba->hbalock); 3074 return; 3075 } 3076 /* Clear FCF rediscovery timer pending flag */ 3077 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 3078 /* FCF rediscovery event to worker thread */ 3079 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 3080 spin_unlock_irq(&phba->hbalock); 3081 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 3082 "2776 FCF rediscover quiescent timer expired\n"); 3083 /* wake up worker thread */ 3084 lpfc_worker_wake_up(phba); 3085 } 3086 3087 /** 3088 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 3089 * @phba: pointer to lpfc hba data structure. 3090 * @acqe_link: pointer to the async link completion queue entry. 3091 * 3092 * This routine is to parse the SLI4 link-attention link fault code and 3093 * translate it into the base driver's read link attention mailbox command 3094 * status. 3095 * 3096 * Return: Link-attention status in terms of base driver's coding. 3097 **/ 3098 static uint16_t 3099 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 3100 struct lpfc_acqe_link *acqe_link) 3101 { 3102 uint16_t latt_fault; 3103 3104 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 3105 case LPFC_ASYNC_LINK_FAULT_NONE: 3106 case LPFC_ASYNC_LINK_FAULT_LOCAL: 3107 case LPFC_ASYNC_LINK_FAULT_REMOTE: 3108 latt_fault = 0; 3109 break; 3110 default: 3111 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3112 "0398 Invalid link fault code: x%x\n", 3113 bf_get(lpfc_acqe_link_fault, acqe_link)); 3114 latt_fault = MBXERR_ERROR; 3115 break; 3116 } 3117 return latt_fault; 3118 } 3119 3120 /** 3121 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 3122 * @phba: pointer to lpfc hba data structure. 3123 * @acqe_link: pointer to the async link completion queue entry. 3124 * 3125 * This routine is to parse the SLI4 link attention type and translate it 3126 * into the base driver's link attention type coding. 3127 * 3128 * Return: Link attention type in terms of base driver's coding. 3129 **/ 3130 static uint8_t 3131 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 3132 struct lpfc_acqe_link *acqe_link) 3133 { 3134 uint8_t att_type; 3135 3136 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 3137 case LPFC_ASYNC_LINK_STATUS_DOWN: 3138 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 3139 att_type = LPFC_ATT_LINK_DOWN; 3140 break; 3141 case LPFC_ASYNC_LINK_STATUS_UP: 3142 /* Ignore physical link up events - wait for logical link up */ 3143 att_type = LPFC_ATT_RESERVED; 3144 break; 3145 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 3146 att_type = LPFC_ATT_LINK_UP; 3147 break; 3148 default: 3149 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3150 "0399 Invalid link attention type: x%x\n", 3151 bf_get(lpfc_acqe_link_status, acqe_link)); 3152 att_type = LPFC_ATT_RESERVED; 3153 break; 3154 } 3155 return att_type; 3156 } 3157 3158 /** 3159 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed 3160 * @phba: pointer to lpfc hba data structure. 3161 * @acqe_link: pointer to the async link completion queue entry. 3162 * 3163 * This routine is to parse the SLI4 link-attention link speed and translate 3164 * it into the base driver's link-attention link speed coding. 3165 * 3166 * Return: Link-attention link speed in terms of base driver's coding. 3167 **/ 3168 static uint8_t 3169 lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, 3170 struct lpfc_acqe_link *acqe_link) 3171 { 3172 uint8_t link_speed; 3173 3174 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { 3175 case LPFC_ASYNC_LINK_SPEED_ZERO: 3176 case LPFC_ASYNC_LINK_SPEED_10MBPS: 3177 case LPFC_ASYNC_LINK_SPEED_100MBPS: 3178 link_speed = LPFC_LINK_SPEED_UNKNOWN; 3179 break; 3180 case LPFC_ASYNC_LINK_SPEED_1GBPS: 3181 link_speed = LPFC_LINK_SPEED_1GHZ; 3182 break; 3183 case LPFC_ASYNC_LINK_SPEED_10GBPS: 3184 link_speed = LPFC_LINK_SPEED_10GHZ; 3185 break; 3186 default: 3187 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3188 "0483 Invalid link-attention link speed: x%x\n", 3189 bf_get(lpfc_acqe_link_speed, acqe_link)); 3190 link_speed = LPFC_LINK_SPEED_UNKNOWN; 3191 break; 3192 } 3193 return link_speed; 3194 } 3195 3196 /** 3197 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 3198 * @phba: pointer to lpfc hba data structure. 3199 * @acqe_link: pointer to the async link completion queue entry. 3200 * 3201 * This routine is to handle the SLI4 asynchronous FCoE link event. 3202 **/ 3203 static void 3204 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 3205 struct lpfc_acqe_link *acqe_link) 3206 { 3207 struct lpfc_dmabuf *mp; 3208 LPFC_MBOXQ_t *pmb; 3209 MAILBOX_t *mb; 3210 struct lpfc_mbx_read_top *la; 3211 uint8_t att_type; 3212 int rc; 3213 3214 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 3215 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 3216 return; 3217 phba->fcoe_eventtag = acqe_link->event_tag; 3218 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3219 if (!pmb) { 3220 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3221 "0395 The mboxq allocation failed\n"); 3222 return; 3223 } 3224 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3225 if (!mp) { 3226 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3227 "0396 The lpfc_dmabuf allocation failed\n"); 3228 goto out_free_pmb; 3229 } 3230 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3231 if (!mp->virt) { 3232 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3233 "0397 The mbuf allocation failed\n"); 3234 goto out_free_dmabuf; 3235 } 3236 3237 /* Cleanup any outstanding ELS commands */ 3238 lpfc_els_flush_all_cmd(phba); 3239 3240 /* Block ELS IOCBs until we have done process link event */ 3241 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 3242 3243 /* Update link event statistics */ 3244 phba->sli.slistat.link_event++; 3245 3246 /* Create lpfc_handle_latt mailbox command from link ACQE */ 3247 lpfc_read_topology(phba, pmb, mp); 3248 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 3249 pmb->vport = phba->pport; 3250 3251 /* Keep the link status for extra SLI4 state machine reference */ 3252 phba->sli4_hba.link_state.speed = 3253 bf_get(lpfc_acqe_link_speed, acqe_link); 3254 phba->sli4_hba.link_state.duplex = 3255 bf_get(lpfc_acqe_link_duplex, acqe_link); 3256 phba->sli4_hba.link_state.status = 3257 bf_get(lpfc_acqe_link_status, acqe_link); 3258 phba->sli4_hba.link_state.type = 3259 bf_get(lpfc_acqe_link_type, acqe_link); 3260 phba->sli4_hba.link_state.number = 3261 bf_get(lpfc_acqe_link_number, acqe_link); 3262 phba->sli4_hba.link_state.fault = 3263 bf_get(lpfc_acqe_link_fault, acqe_link); 3264 phba->sli4_hba.link_state.logical_speed = 3265 bf_get(lpfc_acqe_logical_link_speed, acqe_link); 3266 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3267 "2900 Async FC/FCoE Link event - Speed:%dGBit " 3268 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 3269 "Logical speed:%dMbps Fault:%d\n", 3270 phba->sli4_hba.link_state.speed, 3271 phba->sli4_hba.link_state.topology, 3272 phba->sli4_hba.link_state.status, 3273 phba->sli4_hba.link_state.type, 3274 phba->sli4_hba.link_state.number, 3275 phba->sli4_hba.link_state.logical_speed * 10, 3276 phba->sli4_hba.link_state.fault); 3277 /* 3278 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 3279 * topology info. Note: Optional for non FC-AL ports. 3280 */ 3281 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 3282 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3283 if (rc == MBX_NOT_FINISHED) 3284 goto out_free_dmabuf; 3285 return; 3286 } 3287 /* 3288 * For FCoE Mode: fill in all the topology information we need and call 3289 * the READ_TOPOLOGY completion routine to continue without actually 3290 * sending the READ_TOPOLOGY mailbox command to the port. 3291 */ 3292 /* Parse and translate status field */ 3293 mb = &pmb->u.mb; 3294 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); 3295 3296 /* Parse and translate link attention fields */ 3297 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 3298 la->eventTag = acqe_link->event_tag; 3299 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 3300 bf_set(lpfc_mbx_read_top_link_spd, la, 3301 lpfc_sli4_parse_latt_link_speed(phba, acqe_link)); 3302 3303 /* Fake the the following irrelvant fields */ 3304 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 3305 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 3306 bf_set(lpfc_mbx_read_top_il, la, 0); 3307 bf_set(lpfc_mbx_read_top_pb, la, 0); 3308 bf_set(lpfc_mbx_read_top_fa, la, 0); 3309 bf_set(lpfc_mbx_read_top_mm, la, 0); 3310 3311 /* Invoke the lpfc_handle_latt mailbox command callback function */ 3312 lpfc_mbx_cmpl_read_topology(phba, pmb); 3313 3314 return; 3315 3316 out_free_dmabuf: 3317 kfree(mp); 3318 out_free_pmb: 3319 mempool_free(pmb, phba->mbox_mem_pool); 3320 } 3321 3322 /** 3323 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 3324 * @phba: pointer to lpfc hba data structure. 3325 * @acqe_fc: pointer to the async fc completion queue entry. 3326 * 3327 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 3328 * that the event was received and then issue a read_topology mailbox command so 3329 * that the rest of the driver will treat it the same as SLI3. 3330 **/ 3331 static void 3332 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 3333 { 3334 struct lpfc_dmabuf *mp; 3335 LPFC_MBOXQ_t *pmb; 3336 int rc; 3337 3338 if (bf_get(lpfc_trailer_type, acqe_fc) != 3339 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 3340 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3341 "2895 Non FC link Event detected.(%d)\n", 3342 bf_get(lpfc_trailer_type, acqe_fc)); 3343 return; 3344 } 3345 /* Keep the link status for extra SLI4 state machine reference */ 3346 phba->sli4_hba.link_state.speed = 3347 bf_get(lpfc_acqe_fc_la_speed, acqe_fc); 3348 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 3349 phba->sli4_hba.link_state.topology = 3350 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 3351 phba->sli4_hba.link_state.status = 3352 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 3353 phba->sli4_hba.link_state.type = 3354 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 3355 phba->sli4_hba.link_state.number = 3356 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 3357 phba->sli4_hba.link_state.fault = 3358 bf_get(lpfc_acqe_link_fault, acqe_fc); 3359 phba->sli4_hba.link_state.logical_speed = 3360 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc); 3361 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3362 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 3363 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 3364 "%dMbps Fault:%d\n", 3365 phba->sli4_hba.link_state.speed, 3366 phba->sli4_hba.link_state.topology, 3367 phba->sli4_hba.link_state.status, 3368 phba->sli4_hba.link_state.type, 3369 phba->sli4_hba.link_state.number, 3370 phba->sli4_hba.link_state.logical_speed * 10, 3371 phba->sli4_hba.link_state.fault); 3372 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3373 if (!pmb) { 3374 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3375 "2897 The mboxq allocation failed\n"); 3376 return; 3377 } 3378 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3379 if (!mp) { 3380 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3381 "2898 The lpfc_dmabuf allocation failed\n"); 3382 goto out_free_pmb; 3383 } 3384 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3385 if (!mp->virt) { 3386 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3387 "2899 The mbuf allocation failed\n"); 3388 goto out_free_dmabuf; 3389 } 3390 3391 /* Cleanup any outstanding ELS commands */ 3392 lpfc_els_flush_all_cmd(phba); 3393 3394 /* Block ELS IOCBs until we have done process link event */ 3395 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 3396 3397 /* Update link event statistics */ 3398 phba->sli.slistat.link_event++; 3399 3400 /* Create lpfc_handle_latt mailbox command from link ACQE */ 3401 lpfc_read_topology(phba, pmb, mp); 3402 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 3403 pmb->vport = phba->pport; 3404 3405 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3406 if (rc == MBX_NOT_FINISHED) 3407 goto out_free_dmabuf; 3408 return; 3409 3410 out_free_dmabuf: 3411 kfree(mp); 3412 out_free_pmb: 3413 mempool_free(pmb, phba->mbox_mem_pool); 3414 } 3415 3416 /** 3417 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 3418 * @phba: pointer to lpfc hba data structure. 3419 * @acqe_fc: pointer to the async SLI completion queue entry. 3420 * 3421 * This routine is to handle the SLI4 asynchronous SLI events. 3422 **/ 3423 static void 3424 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 3425 { 3426 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3427 "2901 Async SLI event - Event Data1:x%08x Event Data2:" 3428 "x%08x SLI Event Type:%d", 3429 acqe_sli->event_data1, acqe_sli->event_data2, 3430 bf_get(lpfc_trailer_type, acqe_sli)); 3431 return; 3432 } 3433 3434 /** 3435 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 3436 * @vport: pointer to vport data structure. 3437 * 3438 * This routine is to perform Clear Virtual Link (CVL) on a vport in 3439 * response to a CVL event. 3440 * 3441 * Return the pointer to the ndlp with the vport if successful, otherwise 3442 * return NULL. 3443 **/ 3444 static struct lpfc_nodelist * 3445 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 3446 { 3447 struct lpfc_nodelist *ndlp; 3448 struct Scsi_Host *shost; 3449 struct lpfc_hba *phba; 3450 3451 if (!vport) 3452 return NULL; 3453 phba = vport->phba; 3454 if (!phba) 3455 return NULL; 3456 ndlp = lpfc_findnode_did(vport, Fabric_DID); 3457 if (!ndlp) { 3458 /* Cannot find existing Fabric ndlp, so allocate a new one */ 3459 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 3460 if (!ndlp) 3461 return 0; 3462 lpfc_nlp_init(vport, ndlp, Fabric_DID); 3463 /* Set the node type */ 3464 ndlp->nlp_type |= NLP_FABRIC; 3465 /* Put ndlp onto node list */ 3466 lpfc_enqueue_node(vport, ndlp); 3467 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 3468 /* re-setup ndlp without removing from node list */ 3469 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 3470 if (!ndlp) 3471 return 0; 3472 } 3473 if ((phba->pport->port_state < LPFC_FLOGI) && 3474 (phba->pport->port_state != LPFC_VPORT_FAILED)) 3475 return NULL; 3476 /* If virtual link is not yet instantiated ignore CVL */ 3477 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 3478 && (vport->port_state != LPFC_VPORT_FAILED)) 3479 return NULL; 3480 shost = lpfc_shost_from_vport(vport); 3481 if (!shost) 3482 return NULL; 3483 lpfc_linkdown_port(vport); 3484 lpfc_cleanup_pending_mbox(vport); 3485 spin_lock_irq(shost->host_lock); 3486 vport->fc_flag |= FC_VPORT_CVL_RCVD; 3487 spin_unlock_irq(shost->host_lock); 3488 3489 return ndlp; 3490 } 3491 3492 /** 3493 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 3494 * @vport: pointer to lpfc hba data structure. 3495 * 3496 * This routine is to perform Clear Virtual Link (CVL) on all vports in 3497 * response to a FCF dead event. 3498 **/ 3499 static void 3500 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 3501 { 3502 struct lpfc_vport **vports; 3503 int i; 3504 3505 vports = lpfc_create_vport_work_array(phba); 3506 if (vports) 3507 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3508 lpfc_sli4_perform_vport_cvl(vports[i]); 3509 lpfc_destroy_vport_work_array(phba, vports); 3510 } 3511 3512 /** 3513 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 3514 * @phba: pointer to lpfc hba data structure. 3515 * @acqe_link: pointer to the async fcoe completion queue entry. 3516 * 3517 * This routine is to handle the SLI4 asynchronous fcoe event. 3518 **/ 3519 static void 3520 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 3521 struct lpfc_acqe_fip *acqe_fip) 3522 { 3523 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 3524 int rc; 3525 struct lpfc_vport *vport; 3526 struct lpfc_nodelist *ndlp; 3527 struct Scsi_Host *shost; 3528 int active_vlink_present; 3529 struct lpfc_vport **vports; 3530 int i; 3531 3532 phba->fc_eventTag = acqe_fip->event_tag; 3533 phba->fcoe_eventtag = acqe_fip->event_tag; 3534 switch (event_type) { 3535 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 3536 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 3537 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 3538 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3539 LOG_DISCOVERY, 3540 "2546 New FCF event, evt_tag:x%x, " 3541 "index:x%x\n", 3542 acqe_fip->event_tag, 3543 acqe_fip->index); 3544 else 3545 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 3546 LOG_DISCOVERY, 3547 "2788 FCF param modified event, " 3548 "evt_tag:x%x, index:x%x\n", 3549 acqe_fip->event_tag, 3550 acqe_fip->index); 3551 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3552 /* 3553 * During period of FCF discovery, read the FCF 3554 * table record indexed by the event to update 3555 * FCF roundrobin failover eligible FCF bmask. 3556 */ 3557 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3558 LOG_DISCOVERY, 3559 "2779 Read FCF (x%x) for updating " 3560 "roundrobin FCF failover bmask\n", 3561 acqe_fip->index); 3562 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 3563 } 3564 3565 /* If the FCF discovery is in progress, do nothing. */ 3566 spin_lock_irq(&phba->hbalock); 3567 if (phba->hba_flag & FCF_TS_INPROG) { 3568 spin_unlock_irq(&phba->hbalock); 3569 break; 3570 } 3571 /* If fast FCF failover rescan event is pending, do nothing */ 3572 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 3573 spin_unlock_irq(&phba->hbalock); 3574 break; 3575 } 3576 3577 /* If the FCF has been in discovered state, do nothing. */ 3578 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 3579 spin_unlock_irq(&phba->hbalock); 3580 break; 3581 } 3582 spin_unlock_irq(&phba->hbalock); 3583 3584 /* Otherwise, scan the entire FCF table and re-discover SAN */ 3585 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3586 "2770 Start FCF table scan per async FCF " 3587 "event, evt_tag:x%x, index:x%x\n", 3588 acqe_fip->event_tag, acqe_fip->index); 3589 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 3590 LPFC_FCOE_FCF_GET_FIRST); 3591 if (rc) 3592 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3593 "2547 Issue FCF scan read FCF mailbox " 3594 "command failed (x%x)\n", rc); 3595 break; 3596 3597 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 3598 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3599 "2548 FCF Table full count 0x%x tag 0x%x\n", 3600 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 3601 acqe_fip->event_tag); 3602 break; 3603 3604 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 3605 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3606 "2549 FCF (x%x) disconnected from network, " 3607 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); 3608 /* 3609 * If we are in the middle of FCF failover process, clear 3610 * the corresponding FCF bit in the roundrobin bitmap. 3611 */ 3612 spin_lock_irq(&phba->hbalock); 3613 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3614 spin_unlock_irq(&phba->hbalock); 3615 /* Update FLOGI FCF failover eligible FCF bmask */ 3616 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 3617 break; 3618 } 3619 spin_unlock_irq(&phba->hbalock); 3620 3621 /* If the event is not for currently used fcf do nothing */ 3622 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 3623 break; 3624 3625 /* 3626 * Otherwise, request the port to rediscover the entire FCF 3627 * table for a fast recovery from case that the current FCF 3628 * is no longer valid as we are not in the middle of FCF 3629 * failover process already. 3630 */ 3631 spin_lock_irq(&phba->hbalock); 3632 /* Mark the fast failover process in progress */ 3633 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 3634 spin_unlock_irq(&phba->hbalock); 3635 3636 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3637 "2771 Start FCF fast failover process due to " 3638 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 3639 "\n", acqe_fip->event_tag, acqe_fip->index); 3640 rc = lpfc_sli4_redisc_fcf_table(phba); 3641 if (rc) { 3642 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3643 LOG_DISCOVERY, 3644 "2772 Issue FCF rediscover mabilbox " 3645 "command failed, fail through to FCF " 3646 "dead event\n"); 3647 spin_lock_irq(&phba->hbalock); 3648 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 3649 spin_unlock_irq(&phba->hbalock); 3650 /* 3651 * Last resort will fail over by treating this 3652 * as a link down to FCF registration. 3653 */ 3654 lpfc_sli4_fcf_dead_failthrough(phba); 3655 } else { 3656 /* Reset FCF roundrobin bmask for new discovery */ 3657 lpfc_sli4_clear_fcf_rr_bmask(phba); 3658 /* 3659 * Handling fast FCF failover to a DEAD FCF event is 3660 * considered equalivant to receiving CVL to all vports. 3661 */ 3662 lpfc_sli4_perform_all_vport_cvl(phba); 3663 } 3664 break; 3665 case LPFC_FIP_EVENT_TYPE_CVL: 3666 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3667 "2718 Clear Virtual Link Received for VPI 0x%x" 3668 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 3669 3670 vport = lpfc_find_vport_by_vpid(phba, 3671 acqe_fip->index); 3672 ndlp = lpfc_sli4_perform_vport_cvl(vport); 3673 if (!ndlp) 3674 break; 3675 active_vlink_present = 0; 3676 3677 vports = lpfc_create_vport_work_array(phba); 3678 if (vports) { 3679 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 3680 i++) { 3681 if ((!(vports[i]->fc_flag & 3682 FC_VPORT_CVL_RCVD)) && 3683 (vports[i]->port_state > LPFC_FDISC)) { 3684 active_vlink_present = 1; 3685 break; 3686 } 3687 } 3688 lpfc_destroy_vport_work_array(phba, vports); 3689 } 3690 3691 if (active_vlink_present) { 3692 /* 3693 * If there are other active VLinks present, 3694 * re-instantiate the Vlink using FDISC. 3695 */ 3696 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 3697 shost = lpfc_shost_from_vport(vport); 3698 spin_lock_irq(shost->host_lock); 3699 ndlp->nlp_flag |= NLP_DELAY_TMO; 3700 spin_unlock_irq(shost->host_lock); 3701 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 3702 vport->port_state = LPFC_FDISC; 3703 } else { 3704 /* 3705 * Otherwise, we request port to rediscover 3706 * the entire FCF table for a fast recovery 3707 * from possible case that the current FCF 3708 * is no longer valid if we are not already 3709 * in the FCF failover process. 3710 */ 3711 spin_lock_irq(&phba->hbalock); 3712 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3713 spin_unlock_irq(&phba->hbalock); 3714 break; 3715 } 3716 /* Mark the fast failover process in progress */ 3717 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 3718 spin_unlock_irq(&phba->hbalock); 3719 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3720 LOG_DISCOVERY, 3721 "2773 Start FCF failover per CVL, " 3722 "evt_tag:x%x\n", acqe_fip->event_tag); 3723 rc = lpfc_sli4_redisc_fcf_table(phba); 3724 if (rc) { 3725 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3726 LOG_DISCOVERY, 3727 "2774 Issue FCF rediscover " 3728 "mabilbox command failed, " 3729 "through to CVL event\n"); 3730 spin_lock_irq(&phba->hbalock); 3731 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 3732 spin_unlock_irq(&phba->hbalock); 3733 /* 3734 * Last resort will be re-try on the 3735 * the current registered FCF entry. 3736 */ 3737 lpfc_retry_pport_discovery(phba); 3738 } else 3739 /* 3740 * Reset FCF roundrobin bmask for new 3741 * discovery. 3742 */ 3743 lpfc_sli4_clear_fcf_rr_bmask(phba); 3744 } 3745 break; 3746 default: 3747 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3748 "0288 Unknown FCoE event type 0x%x event tag " 3749 "0x%x\n", event_type, acqe_fip->event_tag); 3750 break; 3751 } 3752 } 3753 3754 /** 3755 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 3756 * @phba: pointer to lpfc hba data structure. 3757 * @acqe_link: pointer to the async dcbx completion queue entry. 3758 * 3759 * This routine is to handle the SLI4 asynchronous dcbx event. 3760 **/ 3761 static void 3762 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 3763 struct lpfc_acqe_dcbx *acqe_dcbx) 3764 { 3765 phba->fc_eventTag = acqe_dcbx->event_tag; 3766 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3767 "0290 The SLI4 DCBX asynchronous event is not " 3768 "handled yet\n"); 3769 } 3770 3771 /** 3772 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 3773 * @phba: pointer to lpfc hba data structure. 3774 * @acqe_link: pointer to the async grp5 completion queue entry. 3775 * 3776 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 3777 * is an asynchronous notified of a logical link speed change. The Port 3778 * reports the logical link speed in units of 10Mbps. 3779 **/ 3780 static void 3781 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 3782 struct lpfc_acqe_grp5 *acqe_grp5) 3783 { 3784 uint16_t prev_ll_spd; 3785 3786 phba->fc_eventTag = acqe_grp5->event_tag; 3787 phba->fcoe_eventtag = acqe_grp5->event_tag; 3788 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 3789 phba->sli4_hba.link_state.logical_speed = 3790 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)); 3791 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3792 "2789 GRP5 Async Event: Updating logical link speed " 3793 "from %dMbps to %dMbps\n", (prev_ll_spd * 10), 3794 (phba->sli4_hba.link_state.logical_speed*10)); 3795 } 3796 3797 /** 3798 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 3799 * @phba: pointer to lpfc hba data structure. 3800 * 3801 * This routine is invoked by the worker thread to process all the pending 3802 * SLI4 asynchronous events. 3803 **/ 3804 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 3805 { 3806 struct lpfc_cq_event *cq_event; 3807 3808 /* First, declare the async event has been handled */ 3809 spin_lock_irq(&phba->hbalock); 3810 phba->hba_flag &= ~ASYNC_EVENT; 3811 spin_unlock_irq(&phba->hbalock); 3812 /* Now, handle all the async events */ 3813 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 3814 /* Get the first event from the head of the event queue */ 3815 spin_lock_irq(&phba->hbalock); 3816 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 3817 cq_event, struct lpfc_cq_event, list); 3818 spin_unlock_irq(&phba->hbalock); 3819 /* Process the asynchronous event */ 3820 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 3821 case LPFC_TRAILER_CODE_LINK: 3822 lpfc_sli4_async_link_evt(phba, 3823 &cq_event->cqe.acqe_link); 3824 break; 3825 case LPFC_TRAILER_CODE_FCOE: 3826 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 3827 break; 3828 case LPFC_TRAILER_CODE_DCBX: 3829 lpfc_sli4_async_dcbx_evt(phba, 3830 &cq_event->cqe.acqe_dcbx); 3831 break; 3832 case LPFC_TRAILER_CODE_GRP5: 3833 lpfc_sli4_async_grp5_evt(phba, 3834 &cq_event->cqe.acqe_grp5); 3835 break; 3836 case LPFC_TRAILER_CODE_FC: 3837 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 3838 break; 3839 case LPFC_TRAILER_CODE_SLI: 3840 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 3841 break; 3842 default: 3843 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3844 "1804 Invalid asynchrous event code: " 3845 "x%x\n", bf_get(lpfc_trailer_code, 3846 &cq_event->cqe.mcqe_cmpl)); 3847 break; 3848 } 3849 /* Free the completion event processed to the free pool */ 3850 lpfc_sli4_cq_event_release(phba, cq_event); 3851 } 3852 } 3853 3854 /** 3855 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 3856 * @phba: pointer to lpfc hba data structure. 3857 * 3858 * This routine is invoked by the worker thread to process FCF table 3859 * rediscovery pending completion event. 3860 **/ 3861 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 3862 { 3863 int rc; 3864 3865 spin_lock_irq(&phba->hbalock); 3866 /* Clear FCF rediscovery timeout event */ 3867 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 3868 /* Clear driver fast failover FCF record flag */ 3869 phba->fcf.failover_rec.flag = 0; 3870 /* Set state for FCF fast failover */ 3871 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 3872 spin_unlock_irq(&phba->hbalock); 3873 3874 /* Scan FCF table from the first entry to re-discover SAN */ 3875 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3876 "2777 Start post-quiescent FCF table scan\n"); 3877 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 3878 if (rc) 3879 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3880 "2747 Issue FCF scan read FCF mailbox " 3881 "command failed 0x%x\n", rc); 3882 } 3883 3884 /** 3885 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 3886 * @phba: pointer to lpfc hba data structure. 3887 * @dev_grp: The HBA PCI-Device group number. 3888 * 3889 * This routine is invoked to set up the per HBA PCI-Device group function 3890 * API jump table entries. 3891 * 3892 * Return: 0 if success, otherwise -ENODEV 3893 **/ 3894 int 3895 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 3896 { 3897 int rc; 3898 3899 /* Set up lpfc PCI-device group */ 3900 phba->pci_dev_grp = dev_grp; 3901 3902 /* The LPFC_PCI_DEV_OC uses SLI4 */ 3903 if (dev_grp == LPFC_PCI_DEV_OC) 3904 phba->sli_rev = LPFC_SLI_REV4; 3905 3906 /* Set up device INIT API function jump table */ 3907 rc = lpfc_init_api_table_setup(phba, dev_grp); 3908 if (rc) 3909 return -ENODEV; 3910 /* Set up SCSI API function jump table */ 3911 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 3912 if (rc) 3913 return -ENODEV; 3914 /* Set up SLI API function jump table */ 3915 rc = lpfc_sli_api_table_setup(phba, dev_grp); 3916 if (rc) 3917 return -ENODEV; 3918 /* Set up MBOX API function jump table */ 3919 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 3920 if (rc) 3921 return -ENODEV; 3922 3923 return 0; 3924 } 3925 3926 /** 3927 * lpfc_log_intr_mode - Log the active interrupt mode 3928 * @phba: pointer to lpfc hba data structure. 3929 * @intr_mode: active interrupt mode adopted. 3930 * 3931 * This routine it invoked to log the currently used active interrupt mode 3932 * to the device. 3933 **/ 3934 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 3935 { 3936 switch (intr_mode) { 3937 case 0: 3938 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3939 "0470 Enable INTx interrupt mode.\n"); 3940 break; 3941 case 1: 3942 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3943 "0481 Enabled MSI interrupt mode.\n"); 3944 break; 3945 case 2: 3946 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3947 "0480 Enabled MSI-X interrupt mode.\n"); 3948 break; 3949 default: 3950 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3951 "0482 Illegal interrupt mode.\n"); 3952 break; 3953 } 3954 return; 3955 } 3956 3957 /** 3958 * lpfc_enable_pci_dev - Enable a generic PCI device. 3959 * @phba: pointer to lpfc hba data structure. 3960 * 3961 * This routine is invoked to enable the PCI device that is common to all 3962 * PCI devices. 3963 * 3964 * Return codes 3965 * 0 - successful 3966 * other values - error 3967 **/ 3968 static int 3969 lpfc_enable_pci_dev(struct lpfc_hba *phba) 3970 { 3971 struct pci_dev *pdev; 3972 int bars = 0; 3973 3974 /* Obtain PCI device reference */ 3975 if (!phba->pcidev) 3976 goto out_error; 3977 else 3978 pdev = phba->pcidev; 3979 /* Select PCI BARs */ 3980 bars = pci_select_bars(pdev, IORESOURCE_MEM); 3981 /* Enable PCI device */ 3982 if (pci_enable_device_mem(pdev)) 3983 goto out_error; 3984 /* Request PCI resource for the device */ 3985 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) 3986 goto out_disable_device; 3987 /* Set up device as PCI master and save state for EEH */ 3988 pci_set_master(pdev); 3989 pci_try_set_mwi(pdev); 3990 pci_save_state(pdev); 3991 3992 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 3993 if (pci_find_capability(pdev, PCI_CAP_ID_EXP)) 3994 pdev->needs_freset = 1; 3995 3996 return 0; 3997 3998 out_disable_device: 3999 pci_disable_device(pdev); 4000 out_error: 4001 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4002 "1401 Failed to enable pci device, bars:x%x\n", bars); 4003 return -ENODEV; 4004 } 4005 4006 /** 4007 * lpfc_disable_pci_dev - Disable a generic PCI device. 4008 * @phba: pointer to lpfc hba data structure. 4009 * 4010 * This routine is invoked to disable the PCI device that is common to all 4011 * PCI devices. 4012 **/ 4013 static void 4014 lpfc_disable_pci_dev(struct lpfc_hba *phba) 4015 { 4016 struct pci_dev *pdev; 4017 int bars; 4018 4019 /* Obtain PCI device reference */ 4020 if (!phba->pcidev) 4021 return; 4022 else 4023 pdev = phba->pcidev; 4024 /* Select PCI BARs */ 4025 bars = pci_select_bars(pdev, IORESOURCE_MEM); 4026 /* Release PCI resource and disable PCI device */ 4027 pci_release_selected_regions(pdev, bars); 4028 pci_disable_device(pdev); 4029 /* Null out PCI private reference to driver */ 4030 pci_set_drvdata(pdev, NULL); 4031 4032 return; 4033 } 4034 4035 /** 4036 * lpfc_reset_hba - Reset a hba 4037 * @phba: pointer to lpfc hba data structure. 4038 * 4039 * This routine is invoked to reset a hba device. It brings the HBA 4040 * offline, performs a board restart, and then brings the board back 4041 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 4042 * on outstanding mailbox commands. 4043 **/ 4044 void 4045 lpfc_reset_hba(struct lpfc_hba *phba) 4046 { 4047 /* If resets are disabled then set error state and return. */ 4048 if (!phba->cfg_enable_hba_reset) { 4049 phba->link_state = LPFC_HBA_ERROR; 4050 return; 4051 } 4052 lpfc_offline_prep(phba); 4053 lpfc_offline(phba); 4054 lpfc_sli_brdrestart(phba); 4055 lpfc_online(phba); 4056 lpfc_unblock_mgmt_io(phba); 4057 } 4058 4059 /** 4060 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions 4061 * @phba: pointer to lpfc hba data structure. 4062 * 4063 * This function enables the PCI SR-IOV virtual functions to a physical 4064 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 4065 * enable the number of virtual functions to the physical function. As 4066 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 4067 * API call does not considered as an error condition for most of the device. 4068 **/ 4069 uint16_t 4070 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) 4071 { 4072 struct pci_dev *pdev = phba->pcidev; 4073 uint16_t nr_virtfn; 4074 int pos; 4075 4076 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 4077 if (pos == 0) 4078 return 0; 4079 4080 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); 4081 return nr_virtfn; 4082 } 4083 4084 /** 4085 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 4086 * @phba: pointer to lpfc hba data structure. 4087 * @nr_vfn: number of virtual functions to be enabled. 4088 * 4089 * This function enables the PCI SR-IOV virtual functions to a physical 4090 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 4091 * enable the number of virtual functions to the physical function. As 4092 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 4093 * API call does not considered as an error condition for most of the device. 4094 **/ 4095 int 4096 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 4097 { 4098 struct pci_dev *pdev = phba->pcidev; 4099 uint16_t max_nr_vfn; 4100 int rc; 4101 4102 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); 4103 if (nr_vfn > max_nr_vfn) { 4104 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4105 "3057 Requested vfs (%d) greater than " 4106 "supported vfs (%d)", nr_vfn, max_nr_vfn); 4107 return -EINVAL; 4108 } 4109 4110 rc = pci_enable_sriov(pdev, nr_vfn); 4111 if (rc) { 4112 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4113 "2806 Failed to enable sriov on this device " 4114 "with vfn number nr_vf:%d, rc:%d\n", 4115 nr_vfn, rc); 4116 } else 4117 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4118 "2807 Successful enable sriov on this device " 4119 "with vfn number nr_vf:%d\n", nr_vfn); 4120 return rc; 4121 } 4122 4123 /** 4124 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. 4125 * @phba: pointer to lpfc hba data structure. 4126 * 4127 * This routine is invoked to set up the driver internal resources specific to 4128 * support the SLI-3 HBA device it attached to. 4129 * 4130 * Return codes 4131 * 0 - successful 4132 * other values - error 4133 **/ 4134 static int 4135 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 4136 { 4137 struct lpfc_sli *psli; 4138 int rc; 4139 4140 /* 4141 * Initialize timers used by driver 4142 */ 4143 4144 /* Heartbeat timer */ 4145 init_timer(&phba->hb_tmofunc); 4146 phba->hb_tmofunc.function = lpfc_hb_timeout; 4147 phba->hb_tmofunc.data = (unsigned long)phba; 4148 4149 psli = &phba->sli; 4150 /* MBOX heartbeat timer */ 4151 init_timer(&psli->mbox_tmo); 4152 psli->mbox_tmo.function = lpfc_mbox_timeout; 4153 psli->mbox_tmo.data = (unsigned long) phba; 4154 /* FCP polling mode timer */ 4155 init_timer(&phba->fcp_poll_timer); 4156 phba->fcp_poll_timer.function = lpfc_poll_timeout; 4157 phba->fcp_poll_timer.data = (unsigned long) phba; 4158 /* Fabric block timer */ 4159 init_timer(&phba->fabric_block_timer); 4160 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 4161 phba->fabric_block_timer.data = (unsigned long) phba; 4162 /* EA polling mode timer */ 4163 init_timer(&phba->eratt_poll); 4164 phba->eratt_poll.function = lpfc_poll_eratt; 4165 phba->eratt_poll.data = (unsigned long) phba; 4166 4167 /* Host attention work mask setup */ 4168 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 4169 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 4170 4171 /* Get all the module params for configuring this host */ 4172 lpfc_get_cfgparam(phba); 4173 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 4174 phba->menlo_flag |= HBA_MENLO_SUPPORT; 4175 /* check for menlo minimum sg count */ 4176 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 4177 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 4178 } 4179 4180 /* 4181 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4182 * used to create the sg_dma_buf_pool must be dynamically calculated. 4183 * 2 segments are added since the IOCB needs a command and response bde. 4184 */ 4185 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 4186 sizeof(struct fcp_rsp) + 4187 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); 4188 4189 if (phba->cfg_enable_bg) { 4190 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; 4191 phba->cfg_sg_dma_buf_size += 4192 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); 4193 } 4194 4195 /* Also reinitialize the host templates with new values. */ 4196 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 4197 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 4198 4199 phba->max_vpi = LPFC_MAX_VPI; 4200 /* This will be set to correct value after config_port mbox */ 4201 phba->max_vports = 0; 4202 4203 /* 4204 * Initialize the SLI Layer to run with lpfc HBAs. 4205 */ 4206 lpfc_sli_setup(phba); 4207 lpfc_sli_queue_setup(phba); 4208 4209 /* Allocate device driver memory */ 4210 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 4211 return -ENOMEM; 4212 4213 /* 4214 * Enable sr-iov virtual functions if supported and configured 4215 * through the module parameter. 4216 */ 4217 if (phba->cfg_sriov_nr_virtfn > 0) { 4218 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 4219 phba->cfg_sriov_nr_virtfn); 4220 if (rc) { 4221 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4222 "2808 Requested number of SR-IOV " 4223 "virtual functions (%d) is not " 4224 "supported\n", 4225 phba->cfg_sriov_nr_virtfn); 4226 phba->cfg_sriov_nr_virtfn = 0; 4227 } 4228 } 4229 4230 return 0; 4231 } 4232 4233 /** 4234 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 4235 * @phba: pointer to lpfc hba data structure. 4236 * 4237 * This routine is invoked to unset the driver internal resources set up 4238 * specific for supporting the SLI-3 HBA device it attached to. 4239 **/ 4240 static void 4241 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 4242 { 4243 /* Free device driver memory allocated */ 4244 lpfc_mem_free_all(phba); 4245 4246 return; 4247 } 4248 4249 /** 4250 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 4251 * @phba: pointer to lpfc hba data structure. 4252 * 4253 * This routine is invoked to set up the driver internal resources specific to 4254 * support the SLI-4 HBA device it attached to. 4255 * 4256 * Return codes 4257 * 0 - successful 4258 * other values - error 4259 **/ 4260 static int 4261 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 4262 { 4263 struct lpfc_sli *psli; 4264 LPFC_MBOXQ_t *mboxq; 4265 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; 4266 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 4267 struct lpfc_mqe *mqe; 4268 int longs, sli_family; 4269 4270 /* Before proceed, wait for POST done and device ready */ 4271 rc = lpfc_sli4_post_status_check(phba); 4272 if (rc) 4273 return -ENODEV; 4274 4275 /* 4276 * Initialize timers used by driver 4277 */ 4278 4279 /* Heartbeat timer */ 4280 init_timer(&phba->hb_tmofunc); 4281 phba->hb_tmofunc.function = lpfc_hb_timeout; 4282 phba->hb_tmofunc.data = (unsigned long)phba; 4283 init_timer(&phba->rrq_tmr); 4284 phba->rrq_tmr.function = lpfc_rrq_timeout; 4285 phba->rrq_tmr.data = (unsigned long)phba; 4286 4287 psli = &phba->sli; 4288 /* MBOX heartbeat timer */ 4289 init_timer(&psli->mbox_tmo); 4290 psli->mbox_tmo.function = lpfc_mbox_timeout; 4291 psli->mbox_tmo.data = (unsigned long) phba; 4292 /* Fabric block timer */ 4293 init_timer(&phba->fabric_block_timer); 4294 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 4295 phba->fabric_block_timer.data = (unsigned long) phba; 4296 /* EA polling mode timer */ 4297 init_timer(&phba->eratt_poll); 4298 phba->eratt_poll.function = lpfc_poll_eratt; 4299 phba->eratt_poll.data = (unsigned long) phba; 4300 /* FCF rediscover timer */ 4301 init_timer(&phba->fcf.redisc_wait); 4302 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo; 4303 phba->fcf.redisc_wait.data = (unsigned long)phba; 4304 4305 /* 4306 * Control structure for handling external multi-buffer mailbox 4307 * command pass-through. 4308 */ 4309 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, 4310 sizeof(struct lpfc_mbox_ext_buf_ctx)); 4311 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 4312 4313 /* 4314 * We need to do a READ_CONFIG mailbox command here before 4315 * calling lpfc_get_cfgparam. For VFs this will report the 4316 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. 4317 * All of the resources allocated 4318 * for this Port are tied to these values. 4319 */ 4320 /* Get all the module params for configuring this host */ 4321 lpfc_get_cfgparam(phba); 4322 phba->max_vpi = LPFC_MAX_VPI; 4323 /* This will be set to correct value after the read_config mbox */ 4324 phba->max_vports = 0; 4325 4326 /* Program the default value of vlan_id and fc_map */ 4327 phba->valid_vlan = 0; 4328 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 4329 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 4330 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 4331 4332 /* 4333 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4334 * used to create the sg_dma_buf_pool must be dynamically calculated. 4335 * 2 segments are added since the IOCB needs a command and response bde. 4336 * To insure that the scsi sgl does not cross a 4k page boundary only 4337 * sgl sizes of must be a power of 2. 4338 */ 4339 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + 4340 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge))); 4341 4342 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); 4343 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE; 4344 switch (sli_family) { 4345 case LPFC_SLI_INTF_FAMILY_BE2: 4346 case LPFC_SLI_INTF_FAMILY_BE3: 4347 /* There is a single hint for BE - 2 pages per BPL. */ 4348 if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) == 4349 LPFC_SLI_INTF_SLI_HINT1_1) 4350 max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE; 4351 break; 4352 case LPFC_SLI_INTF_FAMILY_LNCR_A0: 4353 case LPFC_SLI_INTF_FAMILY_LNCR_B0: 4354 default: 4355 break; 4356 } 4357 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE; 4358 dma_buf_size < max_buf_size && buf_size > dma_buf_size; 4359 dma_buf_size = dma_buf_size << 1) 4360 ; 4361 if (dma_buf_size == max_buf_size) 4362 phba->cfg_sg_seg_cnt = (dma_buf_size - 4363 sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) - 4364 (2 * sizeof(struct sli4_sge))) / 4365 sizeof(struct sli4_sge); 4366 phba->cfg_sg_dma_buf_size = dma_buf_size; 4367 4368 /* Initialize buffer queue management fields */ 4369 hbq_count = lpfc_sli_hbq_count(); 4370 for (i = 0; i < hbq_count; ++i) 4371 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 4372 INIT_LIST_HEAD(&phba->rb_pend_list); 4373 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 4374 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 4375 4376 /* 4377 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 4378 */ 4379 /* Initialize the Abort scsi buffer list used by driver */ 4380 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 4381 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 4382 /* This abort list used by worker thread */ 4383 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); 4384 4385 /* 4386 * Initialize driver internal slow-path work queues 4387 */ 4388 4389 /* Driver internel slow-path CQ Event pool */ 4390 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 4391 /* Response IOCB work queue list */ 4392 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 4393 /* Asynchronous event CQ Event work queue list */ 4394 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 4395 /* Fast-path XRI aborted CQ Event work queue list */ 4396 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 4397 /* Slow-path XRI aborted CQ Event work queue list */ 4398 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 4399 /* Receive queue CQ Event work queue list */ 4400 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 4401 4402 /* Initialize extent block lists. */ 4403 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); 4404 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); 4405 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); 4406 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); 4407 4408 /* Initialize the driver internal SLI layer lists. */ 4409 lpfc_sli_setup(phba); 4410 lpfc_sli_queue_setup(phba); 4411 4412 /* Allocate device driver memory */ 4413 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 4414 if (rc) 4415 return -ENOMEM; 4416 4417 /* IF Type 2 ports get initialized now. */ 4418 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 4419 LPFC_SLI_INTF_IF_TYPE_2) { 4420 rc = lpfc_pci_function_reset(phba); 4421 if (unlikely(rc)) 4422 return -ENODEV; 4423 } 4424 4425 /* Create the bootstrap mailbox command */ 4426 rc = lpfc_create_bootstrap_mbox(phba); 4427 if (unlikely(rc)) 4428 goto out_free_mem; 4429 4430 /* Set up the host's endian order with the device. */ 4431 rc = lpfc_setup_endian_order(phba); 4432 if (unlikely(rc)) 4433 goto out_free_bsmbx; 4434 4435 /* Set up the hba's configuration parameters. */ 4436 rc = lpfc_sli4_read_config(phba); 4437 if (unlikely(rc)) 4438 goto out_free_bsmbx; 4439 4440 /* IF Type 0 ports get initialized now. */ 4441 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 4442 LPFC_SLI_INTF_IF_TYPE_0) { 4443 rc = lpfc_pci_function_reset(phba); 4444 if (unlikely(rc)) 4445 goto out_free_bsmbx; 4446 } 4447 4448 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 4449 GFP_KERNEL); 4450 if (!mboxq) { 4451 rc = -ENOMEM; 4452 goto out_free_bsmbx; 4453 } 4454 4455 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */ 4456 lpfc_supported_pages(mboxq); 4457 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4458 if (!rc) { 4459 mqe = &mboxq->u.mqe; 4460 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 4461 LPFC_MAX_SUPPORTED_PAGES); 4462 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 4463 switch (pn_page[i]) { 4464 case LPFC_SLI4_PARAMETERS: 4465 phba->sli4_hba.pc_sli4_params.supported = 1; 4466 break; 4467 default: 4468 break; 4469 } 4470 } 4471 /* Read the port's SLI4 Parameters capabilities if supported. */ 4472 if (phba->sli4_hba.pc_sli4_params.supported) 4473 rc = lpfc_pc_sli4_params_get(phba, mboxq); 4474 if (rc) { 4475 mempool_free(mboxq, phba->mbox_mem_pool); 4476 rc = -EIO; 4477 goto out_free_bsmbx; 4478 } 4479 } 4480 /* 4481 * Get sli4 parameters that override parameters from Port capabilities. 4482 * If this call fails, it isn't critical unless the SLI4 parameters come 4483 * back in conflict. 4484 */ 4485 rc = lpfc_get_sli4_parameters(phba, mboxq); 4486 if (rc) { 4487 if (phba->sli4_hba.extents_in_use && 4488 phba->sli4_hba.rpi_hdrs_in_use) { 4489 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4490 "2999 Unsupported SLI4 Parameters " 4491 "Extents and RPI headers enabled.\n"); 4492 goto out_free_bsmbx; 4493 } 4494 } 4495 mempool_free(mboxq, phba->mbox_mem_pool); 4496 /* Verify all the SLI4 queues */ 4497 rc = lpfc_sli4_queue_verify(phba); 4498 if (rc) 4499 goto out_free_bsmbx; 4500 4501 /* Create driver internal CQE event pool */ 4502 rc = lpfc_sli4_cq_event_pool_create(phba); 4503 if (rc) 4504 goto out_free_bsmbx; 4505 4506 /* Initialize and populate the iocb list per host */ 4507 rc = lpfc_init_sgl_list(phba); 4508 if (rc) { 4509 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4510 "1400 Failed to initialize sgl list.\n"); 4511 goto out_destroy_cq_event_pool; 4512 } 4513 rc = lpfc_init_active_sgl_array(phba); 4514 if (rc) { 4515 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4516 "1430 Failed to initialize sgl list.\n"); 4517 goto out_free_sgl_list; 4518 } 4519 rc = lpfc_sli4_init_rpi_hdrs(phba); 4520 if (rc) { 4521 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4522 "1432 Failed to initialize rpi headers.\n"); 4523 goto out_free_active_sgl; 4524 } 4525 4526 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 4527 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 4528 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), 4529 GFP_KERNEL); 4530 if (!phba->fcf.fcf_rr_bmask) { 4531 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4532 "2759 Failed allocate memory for FCF round " 4533 "robin failover bmask\n"); 4534 rc = -ENOMEM; 4535 goto out_remove_rpi_hdrs; 4536 } 4537 4538 /* 4539 * The cfg_fcp_eq_count can be zero whenever there is exactly one 4540 * interrupt vector. This is not an error 4541 */ 4542 if (phba->cfg_fcp_eq_count) { 4543 phba->sli4_hba.fcp_eq_hdl = 4544 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 4545 phba->cfg_fcp_eq_count), GFP_KERNEL); 4546 if (!phba->sli4_hba.fcp_eq_hdl) { 4547 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4548 "2572 Failed allocate memory for " 4549 "fast-path per-EQ handle array\n"); 4550 rc = -ENOMEM; 4551 goto out_free_fcf_rr_bmask; 4552 } 4553 } 4554 4555 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 4556 phba->sli4_hba.cfg_eqn), GFP_KERNEL); 4557 if (!phba->sli4_hba.msix_entries) { 4558 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4559 "2573 Failed allocate memory for msi-x " 4560 "interrupt vector entries\n"); 4561 rc = -ENOMEM; 4562 goto out_free_fcp_eq_hdl; 4563 } 4564 4565 /* 4566 * Enable sr-iov virtual functions if supported and configured 4567 * through the module parameter. 4568 */ 4569 if (phba->cfg_sriov_nr_virtfn > 0) { 4570 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 4571 phba->cfg_sriov_nr_virtfn); 4572 if (rc) { 4573 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4574 "3020 Requested number of SR-IOV " 4575 "virtual functions (%d) is not " 4576 "supported\n", 4577 phba->cfg_sriov_nr_virtfn); 4578 phba->cfg_sriov_nr_virtfn = 0; 4579 } 4580 } 4581 4582 return 0; 4583 4584 out_free_fcp_eq_hdl: 4585 kfree(phba->sli4_hba.fcp_eq_hdl); 4586 out_free_fcf_rr_bmask: 4587 kfree(phba->fcf.fcf_rr_bmask); 4588 out_remove_rpi_hdrs: 4589 lpfc_sli4_remove_rpi_hdrs(phba); 4590 out_free_active_sgl: 4591 lpfc_free_active_sgl(phba); 4592 out_free_sgl_list: 4593 lpfc_free_sgl_list(phba); 4594 out_destroy_cq_event_pool: 4595 lpfc_sli4_cq_event_pool_destroy(phba); 4596 out_free_bsmbx: 4597 lpfc_destroy_bootstrap_mbox(phba); 4598 out_free_mem: 4599 lpfc_mem_free(phba); 4600 return rc; 4601 } 4602 4603 /** 4604 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 4605 * @phba: pointer to lpfc hba data structure. 4606 * 4607 * This routine is invoked to unset the driver internal resources set up 4608 * specific for supporting the SLI-4 HBA device it attached to. 4609 **/ 4610 static void 4611 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 4612 { 4613 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 4614 4615 /* Free memory allocated for msi-x interrupt vector entries */ 4616 kfree(phba->sli4_hba.msix_entries); 4617 4618 /* Free memory allocated for fast-path work queue handles */ 4619 kfree(phba->sli4_hba.fcp_eq_hdl); 4620 4621 /* Free the allocated rpi headers. */ 4622 lpfc_sli4_remove_rpi_hdrs(phba); 4623 lpfc_sli4_remove_rpis(phba); 4624 4625 /* Free eligible FCF index bmask */ 4626 kfree(phba->fcf.fcf_rr_bmask); 4627 4628 /* Free the ELS sgl list */ 4629 lpfc_free_active_sgl(phba); 4630 lpfc_free_sgl_list(phba); 4631 4632 /* Free the SCSI sgl management array */ 4633 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 4634 4635 /* Free the completion queue EQ event pool */ 4636 lpfc_sli4_cq_event_release_all(phba); 4637 lpfc_sli4_cq_event_pool_destroy(phba); 4638 4639 /* Release resource identifiers. */ 4640 lpfc_sli4_dealloc_resource_identifiers(phba); 4641 4642 /* Free the bsmbx region. */ 4643 lpfc_destroy_bootstrap_mbox(phba); 4644 4645 /* Free the SLI Layer memory with SLI4 HBAs */ 4646 lpfc_mem_free_all(phba); 4647 4648 /* Free the current connect table */ 4649 list_for_each_entry_safe(conn_entry, next_conn_entry, 4650 &phba->fcf_conn_rec_list, list) { 4651 list_del_init(&conn_entry->list); 4652 kfree(conn_entry); 4653 } 4654 4655 return; 4656 } 4657 4658 /** 4659 * lpfc_init_api_table_setup - Set up init api function jump table 4660 * @phba: The hba struct for which this call is being executed. 4661 * @dev_grp: The HBA PCI-Device group number. 4662 * 4663 * This routine sets up the device INIT interface API function jump table 4664 * in @phba struct. 4665 * 4666 * Returns: 0 - success, -ENODEV - failure. 4667 **/ 4668 int 4669 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 4670 { 4671 phba->lpfc_hba_init_link = lpfc_hba_init_link; 4672 phba->lpfc_hba_down_link = lpfc_hba_down_link; 4673 phba->lpfc_selective_reset = lpfc_selective_reset; 4674 switch (dev_grp) { 4675 case LPFC_PCI_DEV_LP: 4676 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 4677 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 4678 phba->lpfc_stop_port = lpfc_stop_port_s3; 4679 break; 4680 case LPFC_PCI_DEV_OC: 4681 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 4682 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 4683 phba->lpfc_stop_port = lpfc_stop_port_s4; 4684 break; 4685 default: 4686 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4687 "1431 Invalid HBA PCI-device group: 0x%x\n", 4688 dev_grp); 4689 return -ENODEV; 4690 break; 4691 } 4692 return 0; 4693 } 4694 4695 /** 4696 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 4697 * @phba: pointer to lpfc hba data structure. 4698 * 4699 * This routine is invoked to set up the driver internal resources before the 4700 * device specific resource setup to support the HBA device it attached to. 4701 * 4702 * Return codes 4703 * 0 - successful 4704 * other values - error 4705 **/ 4706 static int 4707 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 4708 { 4709 /* 4710 * Driver resources common to all SLI revisions 4711 */ 4712 atomic_set(&phba->fast_event_count, 0); 4713 spin_lock_init(&phba->hbalock); 4714 4715 /* Initialize ndlp management spinlock */ 4716 spin_lock_init(&phba->ndlp_lock); 4717 4718 INIT_LIST_HEAD(&phba->port_list); 4719 INIT_LIST_HEAD(&phba->work_list); 4720 init_waitqueue_head(&phba->wait_4_mlo_m_q); 4721 4722 /* Initialize the wait queue head for the kernel thread */ 4723 init_waitqueue_head(&phba->work_waitq); 4724 4725 /* Initialize the scsi buffer list used by driver for scsi IO */ 4726 spin_lock_init(&phba->scsi_buf_list_lock); 4727 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 4728 4729 /* Initialize the fabric iocb list */ 4730 INIT_LIST_HEAD(&phba->fabric_iocb_list); 4731 4732 /* Initialize list to save ELS buffers */ 4733 INIT_LIST_HEAD(&phba->elsbuf); 4734 4735 /* Initialize FCF connection rec list */ 4736 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 4737 4738 return 0; 4739 } 4740 4741 /** 4742 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 4743 * @phba: pointer to lpfc hba data structure. 4744 * 4745 * This routine is invoked to set up the driver internal resources after the 4746 * device specific resource setup to support the HBA device it attached to. 4747 * 4748 * Return codes 4749 * 0 - successful 4750 * other values - error 4751 **/ 4752 static int 4753 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 4754 { 4755 int error; 4756 4757 /* Startup the kernel thread for this host adapter. */ 4758 phba->worker_thread = kthread_run(lpfc_do_work, phba, 4759 "lpfc_worker_%d", phba->brd_no); 4760 if (IS_ERR(phba->worker_thread)) { 4761 error = PTR_ERR(phba->worker_thread); 4762 return error; 4763 } 4764 4765 return 0; 4766 } 4767 4768 /** 4769 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 4770 * @phba: pointer to lpfc hba data structure. 4771 * 4772 * This routine is invoked to unset the driver internal resources set up after 4773 * the device specific resource setup for supporting the HBA device it 4774 * attached to. 4775 **/ 4776 static void 4777 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 4778 { 4779 /* Stop kernel worker thread */ 4780 kthread_stop(phba->worker_thread); 4781 } 4782 4783 /** 4784 * lpfc_free_iocb_list - Free iocb list. 4785 * @phba: pointer to lpfc hba data structure. 4786 * 4787 * This routine is invoked to free the driver's IOCB list and memory. 4788 **/ 4789 static void 4790 lpfc_free_iocb_list(struct lpfc_hba *phba) 4791 { 4792 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 4793 4794 spin_lock_irq(&phba->hbalock); 4795 list_for_each_entry_safe(iocbq_entry, iocbq_next, 4796 &phba->lpfc_iocb_list, list) { 4797 list_del(&iocbq_entry->list); 4798 kfree(iocbq_entry); 4799 phba->total_iocbq_bufs--; 4800 } 4801 spin_unlock_irq(&phba->hbalock); 4802 4803 return; 4804 } 4805 4806 /** 4807 * lpfc_init_iocb_list - Allocate and initialize iocb list. 4808 * @phba: pointer to lpfc hba data structure. 4809 * 4810 * This routine is invoked to allocate and initizlize the driver's IOCB 4811 * list and set up the IOCB tag array accordingly. 4812 * 4813 * Return codes 4814 * 0 - successful 4815 * other values - error 4816 **/ 4817 static int 4818 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 4819 { 4820 struct lpfc_iocbq *iocbq_entry = NULL; 4821 uint16_t iotag; 4822 int i; 4823 4824 /* Initialize and populate the iocb list per host. */ 4825 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 4826 for (i = 0; i < iocb_count; i++) { 4827 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 4828 if (iocbq_entry == NULL) { 4829 printk(KERN_ERR "%s: only allocated %d iocbs of " 4830 "expected %d count. Unloading driver.\n", 4831 __func__, i, LPFC_IOCB_LIST_CNT); 4832 goto out_free_iocbq; 4833 } 4834 4835 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 4836 if (iotag == 0) { 4837 kfree(iocbq_entry); 4838 printk(KERN_ERR "%s: failed to allocate IOTAG. " 4839 "Unloading driver.\n", __func__); 4840 goto out_free_iocbq; 4841 } 4842 iocbq_entry->sli4_lxritag = NO_XRI; 4843 iocbq_entry->sli4_xritag = NO_XRI; 4844 4845 spin_lock_irq(&phba->hbalock); 4846 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 4847 phba->total_iocbq_bufs++; 4848 spin_unlock_irq(&phba->hbalock); 4849 } 4850 4851 return 0; 4852 4853 out_free_iocbq: 4854 lpfc_free_iocb_list(phba); 4855 4856 return -ENOMEM; 4857 } 4858 4859 /** 4860 * lpfc_free_sgl_list - Free sgl list. 4861 * @phba: pointer to lpfc hba data structure. 4862 * 4863 * This routine is invoked to free the driver's sgl list and memory. 4864 **/ 4865 static void 4866 lpfc_free_sgl_list(struct lpfc_hba *phba) 4867 { 4868 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 4869 LIST_HEAD(sglq_list); 4870 4871 spin_lock_irq(&phba->hbalock); 4872 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); 4873 spin_unlock_irq(&phba->hbalock); 4874 4875 list_for_each_entry_safe(sglq_entry, sglq_next, 4876 &sglq_list, list) { 4877 list_del(&sglq_entry->list); 4878 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 4879 kfree(sglq_entry); 4880 phba->sli4_hba.total_sglq_bufs--; 4881 } 4882 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4883 } 4884 4885 /** 4886 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 4887 * @phba: pointer to lpfc hba data structure. 4888 * 4889 * This routine is invoked to allocate the driver's active sgl memory. 4890 * This array will hold the sglq_entry's for active IOs. 4891 **/ 4892 static int 4893 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 4894 { 4895 int size; 4896 size = sizeof(struct lpfc_sglq *); 4897 size *= phba->sli4_hba.max_cfg_param.max_xri; 4898 4899 phba->sli4_hba.lpfc_sglq_active_list = 4900 kzalloc(size, GFP_KERNEL); 4901 if (!phba->sli4_hba.lpfc_sglq_active_list) 4902 return -ENOMEM; 4903 return 0; 4904 } 4905 4906 /** 4907 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 4908 * @phba: pointer to lpfc hba data structure. 4909 * 4910 * This routine is invoked to walk through the array of active sglq entries 4911 * and free all of the resources. 4912 * This is just a place holder for now. 4913 **/ 4914 static void 4915 lpfc_free_active_sgl(struct lpfc_hba *phba) 4916 { 4917 kfree(phba->sli4_hba.lpfc_sglq_active_list); 4918 } 4919 4920 /** 4921 * lpfc_init_sgl_list - Allocate and initialize sgl list. 4922 * @phba: pointer to lpfc hba data structure. 4923 * 4924 * This routine is invoked to allocate and initizlize the driver's sgl 4925 * list and set up the sgl xritag tag array accordingly. 4926 * 4927 * Return codes 4928 * 0 - successful 4929 * other values - error 4930 **/ 4931 static int 4932 lpfc_init_sgl_list(struct lpfc_hba *phba) 4933 { 4934 struct lpfc_sglq *sglq_entry = NULL; 4935 int i; 4936 int els_xri_cnt; 4937 4938 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4939 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4940 "2400 ELS XRI count %d.\n", 4941 els_xri_cnt); 4942 /* Initialize and populate the sglq list per host/VF. */ 4943 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 4944 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 4945 4946 /* Sanity check on XRI management */ 4947 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) { 4948 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4949 "2562 No room left for SCSI XRI allocation: " 4950 "max_xri=%d, els_xri=%d\n", 4951 phba->sli4_hba.max_cfg_param.max_xri, 4952 els_xri_cnt); 4953 return -ENOMEM; 4954 } 4955 4956 /* Allocate memory for the ELS XRI management array */ 4957 phba->sli4_hba.lpfc_els_sgl_array = 4958 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt), 4959 GFP_KERNEL); 4960 4961 if (!phba->sli4_hba.lpfc_els_sgl_array) { 4962 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4963 "2401 Failed to allocate memory for ELS " 4964 "XRI management array of size %d.\n", 4965 els_xri_cnt); 4966 return -ENOMEM; 4967 } 4968 4969 /* Keep the SCSI XRI into the XRI management array */ 4970 phba->sli4_hba.scsi_xri_max = 4971 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4972 phba->sli4_hba.scsi_xri_cnt = 0; 4973 phba->sli4_hba.lpfc_scsi_psb_array = 4974 kzalloc((sizeof(struct lpfc_scsi_buf *) * 4975 phba->sli4_hba.scsi_xri_max), GFP_KERNEL); 4976 4977 if (!phba->sli4_hba.lpfc_scsi_psb_array) { 4978 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4979 "2563 Failed to allocate memory for SCSI " 4980 "XRI management array of size %d.\n", 4981 phba->sli4_hba.scsi_xri_max); 4982 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4983 return -ENOMEM; 4984 } 4985 4986 for (i = 0; i < els_xri_cnt; i++) { 4987 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL); 4988 if (sglq_entry == NULL) { 4989 printk(KERN_ERR "%s: only allocated %d sgls of " 4990 "expected %d count. Unloading driver.\n", 4991 __func__, i, els_xri_cnt); 4992 goto out_free_mem; 4993 } 4994 4995 sglq_entry->buff_type = GEN_BUFF_TYPE; 4996 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); 4997 if (sglq_entry->virt == NULL) { 4998 kfree(sglq_entry); 4999 printk(KERN_ERR "%s: failed to allocate mbuf.\n" 5000 "Unloading driver.\n", __func__); 5001 goto out_free_mem; 5002 } 5003 sglq_entry->sgl = sglq_entry->virt; 5004 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 5005 5006 /* The list order is used by later block SGL registraton */ 5007 spin_lock_irq(&phba->hbalock); 5008 sglq_entry->state = SGL_FREED; 5009 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); 5010 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; 5011 phba->sli4_hba.total_sglq_bufs++; 5012 spin_unlock_irq(&phba->hbalock); 5013 } 5014 return 0; 5015 5016 out_free_mem: 5017 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 5018 lpfc_free_sgl_list(phba); 5019 return -ENOMEM; 5020 } 5021 5022 /** 5023 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 5024 * @phba: pointer to lpfc hba data structure. 5025 * 5026 * This routine is invoked to post rpi header templates to the 5027 * port for those SLI4 ports that do not support extents. This routine 5028 * posts a PAGE_SIZE memory region to the port to hold up to 5029 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 5030 * and should be called only when interrupts are disabled. 5031 * 5032 * Return codes 5033 * 0 - successful 5034 * -ERROR - otherwise. 5035 **/ 5036 int 5037 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 5038 { 5039 int rc = 0; 5040 struct lpfc_rpi_hdr *rpi_hdr; 5041 5042 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 5043 /* 5044 * If the SLI4 port supports extents, posting the rpi header isn't 5045 * required. Set the expected maximum count and let the actual value 5046 * get set when extents are fully allocated. 5047 */ 5048 if (!phba->sli4_hba.rpi_hdrs_in_use) { 5049 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 5050 return rc; 5051 } 5052 if (phba->sli4_hba.extents_in_use) 5053 return -EIO; 5054 5055 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 5056 if (!rpi_hdr) { 5057 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5058 "0391 Error during rpi post operation\n"); 5059 lpfc_sli4_remove_rpis(phba); 5060 rc = -ENODEV; 5061 } 5062 5063 return rc; 5064 } 5065 5066 /** 5067 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 5068 * @phba: pointer to lpfc hba data structure. 5069 * 5070 * This routine is invoked to allocate a single 4KB memory region to 5071 * support rpis and stores them in the phba. This single region 5072 * provides support for up to 64 rpis. The region is used globally 5073 * by the device. 5074 * 5075 * Returns: 5076 * A valid rpi hdr on success. 5077 * A NULL pointer on any failure. 5078 **/ 5079 struct lpfc_rpi_hdr * 5080 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 5081 { 5082 uint16_t rpi_limit, curr_rpi_range; 5083 struct lpfc_dmabuf *dmabuf; 5084 struct lpfc_rpi_hdr *rpi_hdr; 5085 uint32_t rpi_count; 5086 5087 /* 5088 * If the SLI4 port supports extents, posting the rpi header isn't 5089 * required. Set the expected maximum count and let the actual value 5090 * get set when extents are fully allocated. 5091 */ 5092 if (!phba->sli4_hba.rpi_hdrs_in_use) 5093 return NULL; 5094 if (phba->sli4_hba.extents_in_use) 5095 return NULL; 5096 5097 /* The limit on the logical index is just the max_rpi count. */ 5098 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 5099 phba->sli4_hba.max_cfg_param.max_rpi - 1; 5100 5101 spin_lock_irq(&phba->hbalock); 5102 /* 5103 * Establish the starting RPI in this header block. The starting 5104 * rpi is normalized to a zero base because the physical rpi is 5105 * port based. 5106 */ 5107 curr_rpi_range = phba->sli4_hba.next_rpi - 5108 phba->sli4_hba.max_cfg_param.rpi_base; 5109 spin_unlock_irq(&phba->hbalock); 5110 5111 /* 5112 * The port has a limited number of rpis. The increment here 5113 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value 5114 * and to allow the full max_rpi range per port. 5115 */ 5116 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) 5117 rpi_count = rpi_limit - curr_rpi_range; 5118 else 5119 rpi_count = LPFC_RPI_HDR_COUNT; 5120 5121 if (!rpi_count) 5122 return NULL; 5123 /* 5124 * First allocate the protocol header region for the port. The 5125 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 5126 */ 5127 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5128 if (!dmabuf) 5129 return NULL; 5130 5131 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 5132 LPFC_HDR_TEMPLATE_SIZE, 5133 &dmabuf->phys, 5134 GFP_KERNEL); 5135 if (!dmabuf->virt) { 5136 rpi_hdr = NULL; 5137 goto err_free_dmabuf; 5138 } 5139 5140 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE); 5141 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 5142 rpi_hdr = NULL; 5143 goto err_free_coherent; 5144 } 5145 5146 /* Save the rpi header data for cleanup later. */ 5147 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 5148 if (!rpi_hdr) 5149 goto err_free_coherent; 5150 5151 rpi_hdr->dmabuf = dmabuf; 5152 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 5153 rpi_hdr->page_count = 1; 5154 spin_lock_irq(&phba->hbalock); 5155 5156 /* The rpi_hdr stores the logical index only. */ 5157 rpi_hdr->start_rpi = curr_rpi_range; 5158 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 5159 5160 /* 5161 * The next_rpi stores the next logical module-64 rpi value used 5162 * to post physical rpis in subsequent rpi postings. 5163 */ 5164 phba->sli4_hba.next_rpi += rpi_count; 5165 spin_unlock_irq(&phba->hbalock); 5166 return rpi_hdr; 5167 5168 err_free_coherent: 5169 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 5170 dmabuf->virt, dmabuf->phys); 5171 err_free_dmabuf: 5172 kfree(dmabuf); 5173 return NULL; 5174 } 5175 5176 /** 5177 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 5178 * @phba: pointer to lpfc hba data structure. 5179 * 5180 * This routine is invoked to remove all memory resources allocated 5181 * to support rpis for SLI4 ports not supporting extents. This routine 5182 * presumes the caller has released all rpis consumed by fabric or port 5183 * logins and is prepared to have the header pages removed. 5184 **/ 5185 void 5186 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 5187 { 5188 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 5189 5190 if (!phba->sli4_hba.rpi_hdrs_in_use) 5191 goto exit; 5192 5193 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 5194 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 5195 list_del(&rpi_hdr->list); 5196 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 5197 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 5198 kfree(rpi_hdr->dmabuf); 5199 kfree(rpi_hdr); 5200 } 5201 exit: 5202 /* There are no rpis available to the port now. */ 5203 phba->sli4_hba.next_rpi = 0; 5204 } 5205 5206 /** 5207 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 5208 * @pdev: pointer to pci device data structure. 5209 * 5210 * This routine is invoked to allocate the driver hba data structure for an 5211 * HBA device. If the allocation is successful, the phba reference to the 5212 * PCI device data structure is set. 5213 * 5214 * Return codes 5215 * pointer to @phba - successful 5216 * NULL - error 5217 **/ 5218 static struct lpfc_hba * 5219 lpfc_hba_alloc(struct pci_dev *pdev) 5220 { 5221 struct lpfc_hba *phba; 5222 5223 /* Allocate memory for HBA structure */ 5224 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 5225 if (!phba) { 5226 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 5227 return NULL; 5228 } 5229 5230 /* Set reference to PCI device in HBA structure */ 5231 phba->pcidev = pdev; 5232 5233 /* Assign an unused board number */ 5234 phba->brd_no = lpfc_get_instance(); 5235 if (phba->brd_no < 0) { 5236 kfree(phba); 5237 return NULL; 5238 } 5239 5240 spin_lock_init(&phba->ct_ev_lock); 5241 INIT_LIST_HEAD(&phba->ct_ev_waiters); 5242 5243 return phba; 5244 } 5245 5246 /** 5247 * lpfc_hba_free - Free driver hba data structure with a device. 5248 * @phba: pointer to lpfc hba data structure. 5249 * 5250 * This routine is invoked to free the driver hba data structure with an 5251 * HBA device. 5252 **/ 5253 static void 5254 lpfc_hba_free(struct lpfc_hba *phba) 5255 { 5256 /* Release the driver assigned board number */ 5257 idr_remove(&lpfc_hba_index, phba->brd_no); 5258 5259 kfree(phba); 5260 return; 5261 } 5262 5263 /** 5264 * lpfc_create_shost - Create hba physical port with associated scsi host. 5265 * @phba: pointer to lpfc hba data structure. 5266 * 5267 * This routine is invoked to create HBA physical port and associate a SCSI 5268 * host with it. 5269 * 5270 * Return codes 5271 * 0 - successful 5272 * other values - error 5273 **/ 5274 static int 5275 lpfc_create_shost(struct lpfc_hba *phba) 5276 { 5277 struct lpfc_vport *vport; 5278 struct Scsi_Host *shost; 5279 5280 /* Initialize HBA FC structure */ 5281 phba->fc_edtov = FF_DEF_EDTOV; 5282 phba->fc_ratov = FF_DEF_RATOV; 5283 phba->fc_altov = FF_DEF_ALTOV; 5284 phba->fc_arbtov = FF_DEF_ARBTOV; 5285 5286 atomic_set(&phba->sdev_cnt, 0); 5287 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 5288 if (!vport) 5289 return -ENODEV; 5290 5291 shost = lpfc_shost_from_vport(vport); 5292 phba->pport = vport; 5293 lpfc_debugfs_initialize(vport); 5294 /* Put reference to SCSI host to driver's device private data */ 5295 pci_set_drvdata(phba->pcidev, shost); 5296 5297 return 0; 5298 } 5299 5300 /** 5301 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 5302 * @phba: pointer to lpfc hba data structure. 5303 * 5304 * This routine is invoked to destroy HBA physical port and the associated 5305 * SCSI host. 5306 **/ 5307 static void 5308 lpfc_destroy_shost(struct lpfc_hba *phba) 5309 { 5310 struct lpfc_vport *vport = phba->pport; 5311 5312 /* Destroy physical port that associated with the SCSI host */ 5313 destroy_port(vport); 5314 5315 return; 5316 } 5317 5318 /** 5319 * lpfc_setup_bg - Setup Block guard structures and debug areas. 5320 * @phba: pointer to lpfc hba data structure. 5321 * @shost: the shost to be used to detect Block guard settings. 5322 * 5323 * This routine sets up the local Block guard protocol settings for @shost. 5324 * This routine also allocates memory for debugging bg buffers. 5325 **/ 5326 static void 5327 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 5328 { 5329 int pagecnt = 10; 5330 if (lpfc_prot_mask && lpfc_prot_guard) { 5331 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5332 "1478 Registering BlockGuard with the " 5333 "SCSI layer\n"); 5334 scsi_host_set_prot(shost, lpfc_prot_mask); 5335 scsi_host_set_guard(shost, lpfc_prot_guard); 5336 } 5337 if (!_dump_buf_data) { 5338 while (pagecnt) { 5339 spin_lock_init(&_dump_buf_lock); 5340 _dump_buf_data = 5341 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 5342 if (_dump_buf_data) { 5343 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5344 "9043 BLKGRD: allocated %d pages for " 5345 "_dump_buf_data at 0x%p\n", 5346 (1 << pagecnt), _dump_buf_data); 5347 _dump_buf_data_order = pagecnt; 5348 memset(_dump_buf_data, 0, 5349 ((1 << PAGE_SHIFT) << pagecnt)); 5350 break; 5351 } else 5352 --pagecnt; 5353 } 5354 if (!_dump_buf_data_order) 5355 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5356 "9044 BLKGRD: ERROR unable to allocate " 5357 "memory for hexdump\n"); 5358 } else 5359 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5360 "9045 BLKGRD: already allocated _dump_buf_data=0x%p" 5361 "\n", _dump_buf_data); 5362 if (!_dump_buf_dif) { 5363 while (pagecnt) { 5364 _dump_buf_dif = 5365 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 5366 if (_dump_buf_dif) { 5367 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5368 "9046 BLKGRD: allocated %d pages for " 5369 "_dump_buf_dif at 0x%p\n", 5370 (1 << pagecnt), _dump_buf_dif); 5371 _dump_buf_dif_order = pagecnt; 5372 memset(_dump_buf_dif, 0, 5373 ((1 << PAGE_SHIFT) << pagecnt)); 5374 break; 5375 } else 5376 --pagecnt; 5377 } 5378 if (!_dump_buf_dif_order) 5379 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5380 "9047 BLKGRD: ERROR unable to allocate " 5381 "memory for hexdump\n"); 5382 } else 5383 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5384 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", 5385 _dump_buf_dif); 5386 } 5387 5388 /** 5389 * lpfc_post_init_setup - Perform necessary device post initialization setup. 5390 * @phba: pointer to lpfc hba data structure. 5391 * 5392 * This routine is invoked to perform all the necessary post initialization 5393 * setup for the device. 5394 **/ 5395 static void 5396 lpfc_post_init_setup(struct lpfc_hba *phba) 5397 { 5398 struct Scsi_Host *shost; 5399 struct lpfc_adapter_event_header adapter_event; 5400 5401 /* Get the default values for Model Name and Description */ 5402 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 5403 5404 /* 5405 * hba setup may have changed the hba_queue_depth so we need to 5406 * adjust the value of can_queue. 5407 */ 5408 shost = pci_get_drvdata(phba->pcidev); 5409 shost->can_queue = phba->cfg_hba_queue_depth - 10; 5410 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 5411 lpfc_setup_bg(phba, shost); 5412 5413 lpfc_host_attrib_init(shost); 5414 5415 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 5416 spin_lock_irq(shost->host_lock); 5417 lpfc_poll_start_timer(phba); 5418 spin_unlock_irq(shost->host_lock); 5419 } 5420 5421 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5422 "0428 Perform SCSI scan\n"); 5423 /* Send board arrival event to upper layer */ 5424 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 5425 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 5426 fc_host_post_vendor_event(shost, fc_get_event_number(), 5427 sizeof(adapter_event), 5428 (char *) &adapter_event, 5429 LPFC_NL_VENDOR_ID); 5430 return; 5431 } 5432 5433 /** 5434 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 5435 * @phba: pointer to lpfc hba data structure. 5436 * 5437 * This routine is invoked to set up the PCI device memory space for device 5438 * with SLI-3 interface spec. 5439 * 5440 * Return codes 5441 * 0 - successful 5442 * other values - error 5443 **/ 5444 static int 5445 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 5446 { 5447 struct pci_dev *pdev; 5448 unsigned long bar0map_len, bar2map_len; 5449 int i, hbq_count; 5450 void *ptr; 5451 int error = -ENODEV; 5452 5453 /* Obtain PCI device reference */ 5454 if (!phba->pcidev) 5455 return error; 5456 else 5457 pdev = phba->pcidev; 5458 5459 /* Set the device DMA mask size */ 5460 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 5461 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 5462 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 5463 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 5464 return error; 5465 } 5466 } 5467 5468 /* Get the bus address of Bar0 and Bar2 and the number of bytes 5469 * required by each mapping. 5470 */ 5471 phba->pci_bar0_map = pci_resource_start(pdev, 0); 5472 bar0map_len = pci_resource_len(pdev, 0); 5473 5474 phba->pci_bar2_map = pci_resource_start(pdev, 2); 5475 bar2map_len = pci_resource_len(pdev, 2); 5476 5477 /* Map HBA SLIM to a kernel virtual address. */ 5478 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 5479 if (!phba->slim_memmap_p) { 5480 dev_printk(KERN_ERR, &pdev->dev, 5481 "ioremap failed for SLIM memory.\n"); 5482 goto out; 5483 } 5484 5485 /* Map HBA Control Registers to a kernel virtual address. */ 5486 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 5487 if (!phba->ctrl_regs_memmap_p) { 5488 dev_printk(KERN_ERR, &pdev->dev, 5489 "ioremap failed for HBA control registers.\n"); 5490 goto out_iounmap_slim; 5491 } 5492 5493 /* Allocate memory for SLI-2 structures */ 5494 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, 5495 SLI2_SLIM_SIZE, 5496 &phba->slim2p.phys, 5497 GFP_KERNEL); 5498 if (!phba->slim2p.virt) 5499 goto out_iounmap; 5500 5501 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 5502 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 5503 phba->mbox_ext = (phba->slim2p.virt + 5504 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 5505 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 5506 phba->IOCBs = (phba->slim2p.virt + 5507 offsetof(struct lpfc_sli2_slim, IOCBs)); 5508 5509 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 5510 lpfc_sli_hbq_size(), 5511 &phba->hbqslimp.phys, 5512 GFP_KERNEL); 5513 if (!phba->hbqslimp.virt) 5514 goto out_free_slim; 5515 5516 hbq_count = lpfc_sli_hbq_count(); 5517 ptr = phba->hbqslimp.virt; 5518 for (i = 0; i < hbq_count; ++i) { 5519 phba->hbqs[i].hbq_virt = ptr; 5520 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 5521 ptr += (lpfc_hbq_defs[i]->entry_count * 5522 sizeof(struct lpfc_hbq_entry)); 5523 } 5524 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 5525 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 5526 5527 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 5528 5529 INIT_LIST_HEAD(&phba->rb_pend_list); 5530 5531 phba->MBslimaddr = phba->slim_memmap_p; 5532 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 5533 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 5534 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 5535 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 5536 5537 return 0; 5538 5539 out_free_slim: 5540 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 5541 phba->slim2p.virt, phba->slim2p.phys); 5542 out_iounmap: 5543 iounmap(phba->ctrl_regs_memmap_p); 5544 out_iounmap_slim: 5545 iounmap(phba->slim_memmap_p); 5546 out: 5547 return error; 5548 } 5549 5550 /** 5551 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 5552 * @phba: pointer to lpfc hba data structure. 5553 * 5554 * This routine is invoked to unset the PCI device memory space for device 5555 * with SLI-3 interface spec. 5556 **/ 5557 static void 5558 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 5559 { 5560 struct pci_dev *pdev; 5561 5562 /* Obtain PCI device reference */ 5563 if (!phba->pcidev) 5564 return; 5565 else 5566 pdev = phba->pcidev; 5567 5568 /* Free coherent DMA memory allocated */ 5569 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 5570 phba->hbqslimp.virt, phba->hbqslimp.phys); 5571 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 5572 phba->slim2p.virt, phba->slim2p.phys); 5573 5574 /* I/O memory unmap */ 5575 iounmap(phba->ctrl_regs_memmap_p); 5576 iounmap(phba->slim_memmap_p); 5577 5578 return; 5579 } 5580 5581 /** 5582 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 5583 * @phba: pointer to lpfc hba data structure. 5584 * 5585 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 5586 * done and check status. 5587 * 5588 * Return 0 if successful, otherwise -ENODEV. 5589 **/ 5590 int 5591 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 5592 { 5593 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 5594 struct lpfc_register reg_data; 5595 int i, port_error = 0; 5596 uint32_t if_type; 5597 5598 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 5599 memset(®_data, 0, sizeof(reg_data)); 5600 if (!phba->sli4_hba.PSMPHRregaddr) 5601 return -ENODEV; 5602 5603 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 5604 for (i = 0; i < 3000; i++) { 5605 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 5606 &portsmphr_reg.word0) || 5607 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { 5608 /* Port has a fatal POST error, break out */ 5609 port_error = -ENODEV; 5610 break; 5611 } 5612 if (LPFC_POST_STAGE_PORT_READY == 5613 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 5614 break; 5615 msleep(10); 5616 } 5617 5618 /* 5619 * If there was a port error during POST, then don't proceed with 5620 * other register reads as the data may not be valid. Just exit. 5621 */ 5622 if (port_error) { 5623 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5624 "1408 Port Failed POST - portsmphr=0x%x, " 5625 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 5626 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 5627 portsmphr_reg.word0, 5628 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 5629 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 5630 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 5631 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 5632 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 5633 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 5634 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 5635 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 5636 } else { 5637 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5638 "2534 Device Info: SLIFamily=0x%x, " 5639 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 5640 "SLIHint_2=0x%x, FT=0x%x\n", 5641 bf_get(lpfc_sli_intf_sli_family, 5642 &phba->sli4_hba.sli_intf), 5643 bf_get(lpfc_sli_intf_slirev, 5644 &phba->sli4_hba.sli_intf), 5645 bf_get(lpfc_sli_intf_if_type, 5646 &phba->sli4_hba.sli_intf), 5647 bf_get(lpfc_sli_intf_sli_hint1, 5648 &phba->sli4_hba.sli_intf), 5649 bf_get(lpfc_sli_intf_sli_hint2, 5650 &phba->sli4_hba.sli_intf), 5651 bf_get(lpfc_sli_intf_func_type, 5652 &phba->sli4_hba.sli_intf)); 5653 /* 5654 * Check for other Port errors during the initialization 5655 * process. Fail the load if the port did not come up 5656 * correctly. 5657 */ 5658 if_type = bf_get(lpfc_sli_intf_if_type, 5659 &phba->sli4_hba.sli_intf); 5660 switch (if_type) { 5661 case LPFC_SLI_INTF_IF_TYPE_0: 5662 phba->sli4_hba.ue_mask_lo = 5663 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 5664 phba->sli4_hba.ue_mask_hi = 5665 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 5666 uerrlo_reg.word0 = 5667 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 5668 uerrhi_reg.word0 = 5669 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 5670 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 5671 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 5672 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5673 "1422 Unrecoverable Error " 5674 "Detected during POST " 5675 "uerr_lo_reg=0x%x, " 5676 "uerr_hi_reg=0x%x, " 5677 "ue_mask_lo_reg=0x%x, " 5678 "ue_mask_hi_reg=0x%x\n", 5679 uerrlo_reg.word0, 5680 uerrhi_reg.word0, 5681 phba->sli4_hba.ue_mask_lo, 5682 phba->sli4_hba.ue_mask_hi); 5683 port_error = -ENODEV; 5684 } 5685 break; 5686 case LPFC_SLI_INTF_IF_TYPE_2: 5687 /* Final checks. The port status should be clean. */ 5688 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 5689 ®_data.word0) || 5690 (bf_get(lpfc_sliport_status_err, ®_data) && 5691 !bf_get(lpfc_sliport_status_rn, ®_data))) { 5692 phba->work_status[0] = 5693 readl(phba->sli4_hba.u.if_type2. 5694 ERR1regaddr); 5695 phba->work_status[1] = 5696 readl(phba->sli4_hba.u.if_type2. 5697 ERR2regaddr); 5698 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5699 "2888 Port Error Detected " 5700 "during POST: " 5701 "port status reg 0x%x, " 5702 "port_smphr reg 0x%x, " 5703 "error 1=0x%x, error 2=0x%x\n", 5704 reg_data.word0, 5705 portsmphr_reg.word0, 5706 phba->work_status[0], 5707 phba->work_status[1]); 5708 port_error = -ENODEV; 5709 } 5710 break; 5711 case LPFC_SLI_INTF_IF_TYPE_1: 5712 default: 5713 break; 5714 } 5715 } 5716 return port_error; 5717 } 5718 5719 /** 5720 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 5721 * @phba: pointer to lpfc hba data structure. 5722 * @if_type: The SLI4 interface type getting configured. 5723 * 5724 * This routine is invoked to set up SLI4 BAR0 PCI config space register 5725 * memory map. 5726 **/ 5727 static void 5728 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 5729 { 5730 switch (if_type) { 5731 case LPFC_SLI_INTF_IF_TYPE_0: 5732 phba->sli4_hba.u.if_type0.UERRLOregaddr = 5733 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 5734 phba->sli4_hba.u.if_type0.UERRHIregaddr = 5735 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 5736 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 5737 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 5738 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 5739 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 5740 phba->sli4_hba.SLIINTFregaddr = 5741 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 5742 break; 5743 case LPFC_SLI_INTF_IF_TYPE_2: 5744 phba->sli4_hba.u.if_type2.ERR1regaddr = 5745 phba->sli4_hba.conf_regs_memmap_p + 5746 LPFC_CTL_PORT_ER1_OFFSET; 5747 phba->sli4_hba.u.if_type2.ERR2regaddr = 5748 phba->sli4_hba.conf_regs_memmap_p + 5749 LPFC_CTL_PORT_ER2_OFFSET; 5750 phba->sli4_hba.u.if_type2.CTRLregaddr = 5751 phba->sli4_hba.conf_regs_memmap_p + 5752 LPFC_CTL_PORT_CTL_OFFSET; 5753 phba->sli4_hba.u.if_type2.STATUSregaddr = 5754 phba->sli4_hba.conf_regs_memmap_p + 5755 LPFC_CTL_PORT_STA_OFFSET; 5756 phba->sli4_hba.SLIINTFregaddr = 5757 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 5758 phba->sli4_hba.PSMPHRregaddr = 5759 phba->sli4_hba.conf_regs_memmap_p + 5760 LPFC_CTL_PORT_SEM_OFFSET; 5761 phba->sli4_hba.RQDBregaddr = 5762 phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL; 5763 phba->sli4_hba.WQDBregaddr = 5764 phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL; 5765 phba->sli4_hba.EQCQDBregaddr = 5766 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 5767 phba->sli4_hba.MQDBregaddr = 5768 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 5769 phba->sli4_hba.BMBXregaddr = 5770 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 5771 break; 5772 case LPFC_SLI_INTF_IF_TYPE_1: 5773 default: 5774 dev_printk(KERN_ERR, &phba->pcidev->dev, 5775 "FATAL - unsupported SLI4 interface type - %d\n", 5776 if_type); 5777 break; 5778 } 5779 } 5780 5781 /** 5782 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 5783 * @phba: pointer to lpfc hba data structure. 5784 * 5785 * This routine is invoked to set up SLI4 BAR1 control status register (CSR) 5786 * memory map. 5787 **/ 5788 static void 5789 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) 5790 { 5791 phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5792 LPFC_SLIPORT_IF0_SMPHR; 5793 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5794 LPFC_HST_ISR0; 5795 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5796 LPFC_HST_IMR0; 5797 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5798 LPFC_HST_ISCR0; 5799 } 5800 5801 /** 5802 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 5803 * @phba: pointer to lpfc hba data structure. 5804 * @vf: virtual function number 5805 * 5806 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 5807 * based on the given viftual function number, @vf. 5808 * 5809 * Return 0 if successful, otherwise -ENODEV. 5810 **/ 5811 static int 5812 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 5813 { 5814 if (vf > LPFC_VIR_FUNC_MAX) 5815 return -ENODEV; 5816 5817 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5818 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL); 5819 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5820 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL); 5821 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5822 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); 5823 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5824 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 5825 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5826 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 5827 return 0; 5828 } 5829 5830 /** 5831 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 5832 * @phba: pointer to lpfc hba data structure. 5833 * 5834 * This routine is invoked to create the bootstrap mailbox 5835 * region consistent with the SLI-4 interface spec. This 5836 * routine allocates all memory necessary to communicate 5837 * mailbox commands to the port and sets up all alignment 5838 * needs. No locks are expected to be held when calling 5839 * this routine. 5840 * 5841 * Return codes 5842 * 0 - successful 5843 * -ENOMEM - could not allocated memory. 5844 **/ 5845 static int 5846 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 5847 { 5848 uint32_t bmbx_size; 5849 struct lpfc_dmabuf *dmabuf; 5850 struct dma_address *dma_address; 5851 uint32_t pa_addr; 5852 uint64_t phys_addr; 5853 5854 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5855 if (!dmabuf) 5856 return -ENOMEM; 5857 5858 /* 5859 * The bootstrap mailbox region is comprised of 2 parts 5860 * plus an alignment restriction of 16 bytes. 5861 */ 5862 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 5863 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 5864 bmbx_size, 5865 &dmabuf->phys, 5866 GFP_KERNEL); 5867 if (!dmabuf->virt) { 5868 kfree(dmabuf); 5869 return -ENOMEM; 5870 } 5871 memset(dmabuf->virt, 0, bmbx_size); 5872 5873 /* 5874 * Initialize the bootstrap mailbox pointers now so that the register 5875 * operations are simple later. The mailbox dma address is required 5876 * to be 16-byte aligned. Also align the virtual memory as each 5877 * maibox is copied into the bmbx mailbox region before issuing the 5878 * command to the port. 5879 */ 5880 phba->sli4_hba.bmbx.dmabuf = dmabuf; 5881 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 5882 5883 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 5884 LPFC_ALIGN_16_BYTE); 5885 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 5886 LPFC_ALIGN_16_BYTE); 5887 5888 /* 5889 * Set the high and low physical addresses now. The SLI4 alignment 5890 * requirement is 16 bytes and the mailbox is posted to the port 5891 * as two 30-bit addresses. The other data is a bit marking whether 5892 * the 30-bit address is the high or low address. 5893 * Upcast bmbx aphys to 64bits so shift instruction compiles 5894 * clean on 32 bit machines. 5895 */ 5896 dma_address = &phba->sli4_hba.bmbx.dma_address; 5897 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 5898 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 5899 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 5900 LPFC_BMBX_BIT1_ADDR_HI); 5901 5902 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 5903 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 5904 LPFC_BMBX_BIT1_ADDR_LO); 5905 return 0; 5906 } 5907 5908 /** 5909 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 5910 * @phba: pointer to lpfc hba data structure. 5911 * 5912 * This routine is invoked to teardown the bootstrap mailbox 5913 * region and release all host resources. This routine requires 5914 * the caller to ensure all mailbox commands recovered, no 5915 * additional mailbox comands are sent, and interrupts are disabled 5916 * before calling this routine. 5917 * 5918 **/ 5919 static void 5920 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 5921 { 5922 dma_free_coherent(&phba->pcidev->dev, 5923 phba->sli4_hba.bmbx.bmbx_size, 5924 phba->sli4_hba.bmbx.dmabuf->virt, 5925 phba->sli4_hba.bmbx.dmabuf->phys); 5926 5927 kfree(phba->sli4_hba.bmbx.dmabuf); 5928 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 5929 } 5930 5931 /** 5932 * lpfc_sli4_read_config - Get the config parameters. 5933 * @phba: pointer to lpfc hba data structure. 5934 * 5935 * This routine is invoked to read the configuration parameters from the HBA. 5936 * The configuration parameters are used to set the base and maximum values 5937 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 5938 * allocation for the port. 5939 * 5940 * Return codes 5941 * 0 - successful 5942 * -ENOMEM - No available memory 5943 * -EIO - The mailbox failed to complete successfully. 5944 **/ 5945 static int 5946 lpfc_sli4_read_config(struct lpfc_hba *phba) 5947 { 5948 LPFC_MBOXQ_t *pmb; 5949 struct lpfc_mbx_read_config *rd_config; 5950 union lpfc_sli4_cfg_shdr *shdr; 5951 uint32_t shdr_status, shdr_add_status; 5952 struct lpfc_mbx_get_func_cfg *get_func_cfg; 5953 struct lpfc_rsrc_desc_fcfcoe *desc; 5954 uint32_t desc_count; 5955 int length, i, rc = 0; 5956 5957 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5958 if (!pmb) { 5959 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5960 "2011 Unable to allocate memory for issuing " 5961 "SLI_CONFIG_SPECIAL mailbox command\n"); 5962 return -ENOMEM; 5963 } 5964 5965 lpfc_read_config(phba, pmb); 5966 5967 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 5968 if (rc != MBX_SUCCESS) { 5969 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5970 "2012 Mailbox failed , mbxCmd x%x " 5971 "READ_CONFIG, mbxStatus x%x\n", 5972 bf_get(lpfc_mqe_command, &pmb->u.mqe), 5973 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 5974 rc = -EIO; 5975 } else { 5976 rd_config = &pmb->u.mqe.un.rd_config; 5977 phba->sli4_hba.extents_in_use = 5978 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 5979 phba->sli4_hba.max_cfg_param.max_xri = 5980 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 5981 phba->sli4_hba.max_cfg_param.xri_base = 5982 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 5983 phba->sli4_hba.max_cfg_param.max_vpi = 5984 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 5985 phba->sli4_hba.max_cfg_param.vpi_base = 5986 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 5987 phba->sli4_hba.max_cfg_param.max_rpi = 5988 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 5989 phba->sli4_hba.max_cfg_param.rpi_base = 5990 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 5991 phba->sli4_hba.max_cfg_param.max_vfi = 5992 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 5993 phba->sli4_hba.max_cfg_param.vfi_base = 5994 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 5995 phba->sli4_hba.max_cfg_param.max_fcfi = 5996 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 5997 phba->sli4_hba.max_cfg_param.max_eq = 5998 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 5999 phba->sli4_hba.max_cfg_param.max_rq = 6000 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 6001 phba->sli4_hba.max_cfg_param.max_wq = 6002 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 6003 phba->sli4_hba.max_cfg_param.max_cq = 6004 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 6005 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 6006 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 6007 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 6008 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 6009 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 6010 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 6011 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 6012 phba->max_vports = phba->max_vpi; 6013 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6014 "2003 cfg params Extents? %d " 6015 "XRI(B:%d M:%d), " 6016 "VPI(B:%d M:%d) " 6017 "VFI(B:%d M:%d) " 6018 "RPI(B:%d M:%d) " 6019 "FCFI(Count:%d)\n", 6020 phba->sli4_hba.extents_in_use, 6021 phba->sli4_hba.max_cfg_param.xri_base, 6022 phba->sli4_hba.max_cfg_param.max_xri, 6023 phba->sli4_hba.max_cfg_param.vpi_base, 6024 phba->sli4_hba.max_cfg_param.max_vpi, 6025 phba->sli4_hba.max_cfg_param.vfi_base, 6026 phba->sli4_hba.max_cfg_param.max_vfi, 6027 phba->sli4_hba.max_cfg_param.rpi_base, 6028 phba->sli4_hba.max_cfg_param.max_rpi, 6029 phba->sli4_hba.max_cfg_param.max_fcfi); 6030 } 6031 6032 if (rc) 6033 goto read_cfg_out; 6034 6035 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 6036 if (phba->cfg_hba_queue_depth > 6037 (phba->sli4_hba.max_cfg_param.max_xri - 6038 lpfc_sli4_get_els_iocb_cnt(phba))) 6039 phba->cfg_hba_queue_depth = 6040 phba->sli4_hba.max_cfg_param.max_xri - 6041 lpfc_sli4_get_els_iocb_cnt(phba); 6042 6043 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 6044 LPFC_SLI_INTF_IF_TYPE_2) 6045 goto read_cfg_out; 6046 6047 /* get the pf# and vf# for SLI4 if_type 2 port */ 6048 length = (sizeof(struct lpfc_mbx_get_func_cfg) - 6049 sizeof(struct lpfc_sli4_cfg_mhdr)); 6050 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, 6051 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 6052 length, LPFC_SLI4_MBX_EMBED); 6053 6054 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 6055 shdr = (union lpfc_sli4_cfg_shdr *) 6056 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 6057 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6058 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6059 if (rc || shdr_status || shdr_add_status) { 6060 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6061 "3026 Mailbox failed , mbxCmd x%x " 6062 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 6063 bf_get(lpfc_mqe_command, &pmb->u.mqe), 6064 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 6065 rc = -EIO; 6066 goto read_cfg_out; 6067 } 6068 6069 /* search for fc_fcoe resrouce descriptor */ 6070 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 6071 desc_count = get_func_cfg->func_cfg.rsrc_desc_count; 6072 6073 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 6074 desc = (struct lpfc_rsrc_desc_fcfcoe *) 6075 &get_func_cfg->func_cfg.desc[i]; 6076 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 6077 bf_get(lpfc_rsrc_desc_pcie_type, desc)) { 6078 phba->sli4_hba.iov.pf_number = 6079 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 6080 phba->sli4_hba.iov.vf_number = 6081 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); 6082 break; 6083 } 6084 } 6085 6086 if (i < LPFC_RSRC_DESC_MAX_NUM) 6087 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6088 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 6089 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 6090 phba->sli4_hba.iov.vf_number); 6091 else { 6092 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6093 "3028 GET_FUNCTION_CONFIG: failed to find " 6094 "Resrouce Descriptor:x%x\n", 6095 LPFC_RSRC_DESC_TYPE_FCFCOE); 6096 rc = -EIO; 6097 } 6098 6099 read_cfg_out: 6100 mempool_free(pmb, phba->mbox_mem_pool); 6101 return rc; 6102 } 6103 6104 /** 6105 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 6106 * @phba: pointer to lpfc hba data structure. 6107 * 6108 * This routine is invoked to setup the port-side endian order when 6109 * the port if_type is 0. This routine has no function for other 6110 * if_types. 6111 * 6112 * Return codes 6113 * 0 - successful 6114 * -ENOMEM - No available memory 6115 * -EIO - The mailbox failed to complete successfully. 6116 **/ 6117 static int 6118 lpfc_setup_endian_order(struct lpfc_hba *phba) 6119 { 6120 LPFC_MBOXQ_t *mboxq; 6121 uint32_t if_type, rc = 0; 6122 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 6123 HOST_ENDIAN_HIGH_WORD1}; 6124 6125 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 6126 switch (if_type) { 6127 case LPFC_SLI_INTF_IF_TYPE_0: 6128 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 6129 GFP_KERNEL); 6130 if (!mboxq) { 6131 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6132 "0492 Unable to allocate memory for " 6133 "issuing SLI_CONFIG_SPECIAL mailbox " 6134 "command\n"); 6135 return -ENOMEM; 6136 } 6137 6138 /* 6139 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 6140 * two words to contain special data values and no other data. 6141 */ 6142 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 6143 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 6144 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6145 if (rc != MBX_SUCCESS) { 6146 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6147 "0493 SLI_CONFIG_SPECIAL mailbox " 6148 "failed with status x%x\n", 6149 rc); 6150 rc = -EIO; 6151 } 6152 mempool_free(mboxq, phba->mbox_mem_pool); 6153 break; 6154 case LPFC_SLI_INTF_IF_TYPE_2: 6155 case LPFC_SLI_INTF_IF_TYPE_1: 6156 default: 6157 break; 6158 } 6159 return rc; 6160 } 6161 6162 /** 6163 * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts 6164 * @phba: pointer to lpfc hba data structure. 6165 * 6166 * This routine is invoked to check the user settable queue counts for EQs and 6167 * CQs. after this routine is called the counts will be set to valid values that 6168 * adhere to the constraints of the system's interrupt vectors and the port's 6169 * queue resources. 6170 * 6171 * Return codes 6172 * 0 - successful 6173 * -ENOMEM - No available memory 6174 **/ 6175 static int 6176 lpfc_sli4_queue_verify(struct lpfc_hba *phba) 6177 { 6178 int cfg_fcp_wq_count; 6179 int cfg_fcp_eq_count; 6180 6181 /* 6182 * Sanity check for confiugred queue parameters against the run-time 6183 * device parameters 6184 */ 6185 6186 /* Sanity check on FCP fast-path WQ parameters */ 6187 cfg_fcp_wq_count = phba->cfg_fcp_wq_count; 6188 if (cfg_fcp_wq_count > 6189 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { 6190 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - 6191 LPFC_SP_WQN_DEF; 6192 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { 6193 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6194 "2581 Not enough WQs (%d) from " 6195 "the pci function for supporting " 6196 "FCP WQs (%d)\n", 6197 phba->sli4_hba.max_cfg_param.max_wq, 6198 phba->cfg_fcp_wq_count); 6199 goto out_error; 6200 } 6201 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6202 "2582 Not enough WQs (%d) from the pci " 6203 "function for supporting the requested " 6204 "FCP WQs (%d), the actual FCP WQs can " 6205 "be supported: %d\n", 6206 phba->sli4_hba.max_cfg_param.max_wq, 6207 phba->cfg_fcp_wq_count, cfg_fcp_wq_count); 6208 } 6209 /* The actual number of FCP work queues adopted */ 6210 phba->cfg_fcp_wq_count = cfg_fcp_wq_count; 6211 6212 /* Sanity check on FCP fast-path EQ parameters */ 6213 cfg_fcp_eq_count = phba->cfg_fcp_eq_count; 6214 if (cfg_fcp_eq_count > 6215 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) { 6216 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq - 6217 LPFC_SP_EQN_DEF; 6218 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) { 6219 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6220 "2574 Not enough EQs (%d) from the " 6221 "pci function for supporting FCP " 6222 "EQs (%d)\n", 6223 phba->sli4_hba.max_cfg_param.max_eq, 6224 phba->cfg_fcp_eq_count); 6225 goto out_error; 6226 } 6227 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6228 "2575 Not enough EQs (%d) from the pci " 6229 "function for supporting the requested " 6230 "FCP EQs (%d), the actual FCP EQs can " 6231 "be supported: %d\n", 6232 phba->sli4_hba.max_cfg_param.max_eq, 6233 phba->cfg_fcp_eq_count, cfg_fcp_eq_count); 6234 } 6235 /* It does not make sense to have more EQs than WQs */ 6236 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { 6237 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6238 "2593 The FCP EQ count(%d) cannot be greater " 6239 "than the FCP WQ count(%d), limiting the " 6240 "FCP EQ count to %d\n", cfg_fcp_eq_count, 6241 phba->cfg_fcp_wq_count, 6242 phba->cfg_fcp_wq_count); 6243 cfg_fcp_eq_count = phba->cfg_fcp_wq_count; 6244 } 6245 /* The actual number of FCP event queues adopted */ 6246 phba->cfg_fcp_eq_count = cfg_fcp_eq_count; 6247 /* The overall number of event queues used */ 6248 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; 6249 6250 /* Get EQ depth from module parameter, fake the default for now */ 6251 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 6252 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 6253 6254 /* Get CQ depth from module parameter, fake the default for now */ 6255 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 6256 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 6257 6258 return 0; 6259 out_error: 6260 return -ENOMEM; 6261 } 6262 6263 /** 6264 * lpfc_sli4_queue_create - Create all the SLI4 queues 6265 * @phba: pointer to lpfc hba data structure. 6266 * 6267 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 6268 * operation. For each SLI4 queue type, the parameters such as queue entry 6269 * count (queue depth) shall be taken from the module parameter. For now, 6270 * we just use some constant number as place holder. 6271 * 6272 * Return codes 6273 * 0 - sucessful 6274 * -ENOMEM - No availble memory 6275 * -EIO - The mailbox failed to complete successfully. 6276 **/ 6277 int 6278 lpfc_sli4_queue_create(struct lpfc_hba *phba) 6279 { 6280 struct lpfc_queue *qdesc; 6281 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 6282 6283 /* 6284 * Create Event Queues (EQs) 6285 */ 6286 6287 /* Create slow path event queue */ 6288 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 6289 phba->sli4_hba.eq_ecount); 6290 if (!qdesc) { 6291 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6292 "0496 Failed allocate slow-path EQ\n"); 6293 goto out_error; 6294 } 6295 phba->sli4_hba.sp_eq = qdesc; 6296 6297 /* 6298 * Create fast-path FCP Event Queue(s). The cfg_fcp_eq_count can be 6299 * zero whenever there is exactly one interrupt vector. This is not 6300 * an error. 6301 */ 6302 if (phba->cfg_fcp_eq_count) { 6303 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * 6304 phba->cfg_fcp_eq_count), GFP_KERNEL); 6305 if (!phba->sli4_hba.fp_eq) { 6306 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6307 "2576 Failed allocate memory for " 6308 "fast-path EQ record array\n"); 6309 goto out_free_sp_eq; 6310 } 6311 } 6312 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 6313 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 6314 phba->sli4_hba.eq_ecount); 6315 if (!qdesc) { 6316 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6317 "0497 Failed allocate fast-path EQ\n"); 6318 goto out_free_fp_eq; 6319 } 6320 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; 6321 } 6322 6323 /* 6324 * Create Complete Queues (CQs) 6325 */ 6326 6327 /* Create slow-path Mailbox Command Complete Queue */ 6328 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 6329 phba->sli4_hba.cq_ecount); 6330 if (!qdesc) { 6331 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6332 "0500 Failed allocate slow-path mailbox CQ\n"); 6333 goto out_free_fp_eq; 6334 } 6335 phba->sli4_hba.mbx_cq = qdesc; 6336 6337 /* Create slow-path ELS Complete Queue */ 6338 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 6339 phba->sli4_hba.cq_ecount); 6340 if (!qdesc) { 6341 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6342 "0501 Failed allocate slow-path ELS CQ\n"); 6343 goto out_free_mbx_cq; 6344 } 6345 phba->sli4_hba.els_cq = qdesc; 6346 6347 6348 /* 6349 * Create fast-path FCP Completion Queue(s), one-to-one with FCP EQs. 6350 * If there are no FCP EQs then create exactly one FCP CQ. 6351 */ 6352 if (phba->cfg_fcp_eq_count) 6353 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 6354 phba->cfg_fcp_eq_count), 6355 GFP_KERNEL); 6356 else 6357 phba->sli4_hba.fcp_cq = kzalloc(sizeof(struct lpfc_queue *), 6358 GFP_KERNEL); 6359 if (!phba->sli4_hba.fcp_cq) { 6360 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6361 "2577 Failed allocate memory for fast-path " 6362 "CQ record array\n"); 6363 goto out_free_els_cq; 6364 } 6365 fcp_cqidx = 0; 6366 do { 6367 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 6368 phba->sli4_hba.cq_ecount); 6369 if (!qdesc) { 6370 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6371 "0499 Failed allocate fast-path FCP " 6372 "CQ (%d)\n", fcp_cqidx); 6373 goto out_free_fcp_cq; 6374 } 6375 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc; 6376 } while (++fcp_cqidx < phba->cfg_fcp_eq_count); 6377 6378 /* Create Mailbox Command Queue */ 6379 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 6380 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 6381 6382 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 6383 phba->sli4_hba.mq_ecount); 6384 if (!qdesc) { 6385 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6386 "0505 Failed allocate slow-path MQ\n"); 6387 goto out_free_fcp_cq; 6388 } 6389 phba->sli4_hba.mbx_wq = qdesc; 6390 6391 /* 6392 * Create all the Work Queues (WQs) 6393 */ 6394 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 6395 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 6396 6397 /* Create slow-path ELS Work Queue */ 6398 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 6399 phba->sli4_hba.wq_ecount); 6400 if (!qdesc) { 6401 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6402 "0504 Failed allocate slow-path ELS WQ\n"); 6403 goto out_free_mbx_wq; 6404 } 6405 phba->sli4_hba.els_wq = qdesc; 6406 6407 /* Create fast-path FCP Work Queue(s) */ 6408 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * 6409 phba->cfg_fcp_wq_count), GFP_KERNEL); 6410 if (!phba->sli4_hba.fcp_wq) { 6411 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6412 "2578 Failed allocate memory for fast-path " 6413 "WQ record array\n"); 6414 goto out_free_els_wq; 6415 } 6416 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 6417 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 6418 phba->sli4_hba.wq_ecount); 6419 if (!qdesc) { 6420 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6421 "0503 Failed allocate fast-path FCP " 6422 "WQ (%d)\n", fcp_wqidx); 6423 goto out_free_fcp_wq; 6424 } 6425 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc; 6426 } 6427 6428 /* 6429 * Create Receive Queue (RQ) 6430 */ 6431 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 6432 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 6433 6434 /* Create Receive Queue for header */ 6435 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 6436 phba->sli4_hba.rq_ecount); 6437 if (!qdesc) { 6438 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6439 "0506 Failed allocate receive HRQ\n"); 6440 goto out_free_fcp_wq; 6441 } 6442 phba->sli4_hba.hdr_rq = qdesc; 6443 6444 /* Create Receive Queue for data */ 6445 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 6446 phba->sli4_hba.rq_ecount); 6447 if (!qdesc) { 6448 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6449 "0507 Failed allocate receive DRQ\n"); 6450 goto out_free_hdr_rq; 6451 } 6452 phba->sli4_hba.dat_rq = qdesc; 6453 6454 return 0; 6455 6456 out_free_hdr_rq: 6457 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 6458 phba->sli4_hba.hdr_rq = NULL; 6459 out_free_fcp_wq: 6460 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) { 6461 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]); 6462 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; 6463 } 6464 kfree(phba->sli4_hba.fcp_wq); 6465 out_free_els_wq: 6466 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 6467 phba->sli4_hba.els_wq = NULL; 6468 out_free_mbx_wq: 6469 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 6470 phba->sli4_hba.mbx_wq = NULL; 6471 out_free_fcp_cq: 6472 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) { 6473 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]); 6474 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; 6475 } 6476 kfree(phba->sli4_hba.fcp_cq); 6477 out_free_els_cq: 6478 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 6479 phba->sli4_hba.els_cq = NULL; 6480 out_free_mbx_cq: 6481 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 6482 phba->sli4_hba.mbx_cq = NULL; 6483 out_free_fp_eq: 6484 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) { 6485 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]); 6486 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; 6487 } 6488 kfree(phba->sli4_hba.fp_eq); 6489 out_free_sp_eq: 6490 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 6491 phba->sli4_hba.sp_eq = NULL; 6492 out_error: 6493 return -ENOMEM; 6494 } 6495 6496 /** 6497 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 6498 * @phba: pointer to lpfc hba data structure. 6499 * 6500 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 6501 * operation. 6502 * 6503 * Return codes 6504 * 0 - successful 6505 * -ENOMEM - No available memory 6506 * -EIO - The mailbox failed to complete successfully. 6507 **/ 6508 void 6509 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 6510 { 6511 int fcp_qidx; 6512 6513 /* Release mailbox command work queue */ 6514 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 6515 phba->sli4_hba.mbx_wq = NULL; 6516 6517 /* Release ELS work queue */ 6518 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 6519 phba->sli4_hba.els_wq = NULL; 6520 6521 /* Release FCP work queue */ 6522 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 6523 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); 6524 kfree(phba->sli4_hba.fcp_wq); 6525 phba->sli4_hba.fcp_wq = NULL; 6526 6527 /* Release unsolicited receive queue */ 6528 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 6529 phba->sli4_hba.hdr_rq = NULL; 6530 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 6531 phba->sli4_hba.dat_rq = NULL; 6532 6533 /* Release ELS complete queue */ 6534 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 6535 phba->sli4_hba.els_cq = NULL; 6536 6537 /* Release mailbox command complete queue */ 6538 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 6539 phba->sli4_hba.mbx_cq = NULL; 6540 6541 /* Release FCP response complete queue */ 6542 fcp_qidx = 0; 6543 do 6544 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); 6545 while (++fcp_qidx < phba->cfg_fcp_eq_count); 6546 kfree(phba->sli4_hba.fcp_cq); 6547 phba->sli4_hba.fcp_cq = NULL; 6548 6549 /* Release fast-path event queue */ 6550 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6551 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); 6552 kfree(phba->sli4_hba.fp_eq); 6553 phba->sli4_hba.fp_eq = NULL; 6554 6555 /* Release slow-path event queue */ 6556 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 6557 phba->sli4_hba.sp_eq = NULL; 6558 6559 return; 6560 } 6561 6562 /** 6563 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 6564 * @phba: pointer to lpfc hba data structure. 6565 * 6566 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 6567 * operation. 6568 * 6569 * Return codes 6570 * 0 - successful 6571 * -ENOMEM - No available memory 6572 * -EIO - The mailbox failed to complete successfully. 6573 **/ 6574 int 6575 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 6576 { 6577 int rc = -ENOMEM; 6578 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 6579 int fcp_cq_index = 0; 6580 6581 /* 6582 * Set up Event Queues (EQs) 6583 */ 6584 6585 /* Set up slow-path event queue */ 6586 if (!phba->sli4_hba.sp_eq) { 6587 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6588 "0520 Slow-path EQ not allocated\n"); 6589 goto out_error; 6590 } 6591 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq, 6592 LPFC_SP_DEF_IMAX); 6593 if (rc) { 6594 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6595 "0521 Failed setup of slow-path EQ: " 6596 "rc = 0x%x\n", rc); 6597 goto out_error; 6598 } 6599 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6600 "2583 Slow-path EQ setup: queue-id=%d\n", 6601 phba->sli4_hba.sp_eq->queue_id); 6602 6603 /* Set up fast-path event queue */ 6604 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 6605 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { 6606 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6607 "0522 Fast-path EQ (%d) not " 6608 "allocated\n", fcp_eqidx); 6609 goto out_destroy_fp_eq; 6610 } 6611 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], 6612 phba->cfg_fcp_imax); 6613 if (rc) { 6614 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6615 "0523 Failed setup of fast-path EQ " 6616 "(%d), rc = 0x%x\n", fcp_eqidx, rc); 6617 goto out_destroy_fp_eq; 6618 } 6619 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6620 "2584 Fast-path EQ setup: " 6621 "queue[%d]-id=%d\n", fcp_eqidx, 6622 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); 6623 } 6624 6625 /* 6626 * Set up Complete Queues (CQs) 6627 */ 6628 6629 /* Set up slow-path MBOX Complete Queue as the first CQ */ 6630 if (!phba->sli4_hba.mbx_cq) { 6631 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6632 "0528 Mailbox CQ not allocated\n"); 6633 goto out_destroy_fp_eq; 6634 } 6635 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, 6636 LPFC_MCQ, LPFC_MBOX); 6637 if (rc) { 6638 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6639 "0529 Failed setup of slow-path mailbox CQ: " 6640 "rc = 0x%x\n", rc); 6641 goto out_destroy_fp_eq; 6642 } 6643 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6644 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", 6645 phba->sli4_hba.mbx_cq->queue_id, 6646 phba->sli4_hba.sp_eq->queue_id); 6647 6648 /* Set up slow-path ELS Complete Queue */ 6649 if (!phba->sli4_hba.els_cq) { 6650 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6651 "0530 ELS CQ not allocated\n"); 6652 goto out_destroy_mbx_cq; 6653 } 6654 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, 6655 LPFC_WCQ, LPFC_ELS); 6656 if (rc) { 6657 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6658 "0531 Failed setup of slow-path ELS CQ: " 6659 "rc = 0x%x\n", rc); 6660 goto out_destroy_mbx_cq; 6661 } 6662 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6663 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", 6664 phba->sli4_hba.els_cq->queue_id, 6665 phba->sli4_hba.sp_eq->queue_id); 6666 6667 /* Set up fast-path FCP Response Complete Queue */ 6668 fcp_cqidx = 0; 6669 do { 6670 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 6671 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6672 "0526 Fast-path FCP CQ (%d) not " 6673 "allocated\n", fcp_cqidx); 6674 goto out_destroy_fcp_cq; 6675 } 6676 if (phba->cfg_fcp_eq_count) 6677 rc = lpfc_cq_create(phba, 6678 phba->sli4_hba.fcp_cq[fcp_cqidx], 6679 phba->sli4_hba.fp_eq[fcp_cqidx], 6680 LPFC_WCQ, LPFC_FCP); 6681 else 6682 rc = lpfc_cq_create(phba, 6683 phba->sli4_hba.fcp_cq[fcp_cqidx], 6684 phba->sli4_hba.sp_eq, 6685 LPFC_WCQ, LPFC_FCP); 6686 if (rc) { 6687 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6688 "0527 Failed setup of fast-path FCP " 6689 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); 6690 goto out_destroy_fcp_cq; 6691 } 6692 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6693 "2588 FCP CQ setup: cq[%d]-id=%d, " 6694 "parent %seq[%d]-id=%d\n", 6695 fcp_cqidx, 6696 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, 6697 (phba->cfg_fcp_eq_count) ? "" : "sp_", 6698 fcp_cqidx, 6699 (phba->cfg_fcp_eq_count) ? 6700 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id : 6701 phba->sli4_hba.sp_eq->queue_id); 6702 } while (++fcp_cqidx < phba->cfg_fcp_eq_count); 6703 6704 /* 6705 * Set up all the Work Queues (WQs) 6706 */ 6707 6708 /* Set up Mailbox Command Queue */ 6709 if (!phba->sli4_hba.mbx_wq) { 6710 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6711 "0538 Slow-path MQ not allocated\n"); 6712 goto out_destroy_fcp_cq; 6713 } 6714 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, 6715 phba->sli4_hba.mbx_cq, LPFC_MBOX); 6716 if (rc) { 6717 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6718 "0539 Failed setup of slow-path MQ: " 6719 "rc = 0x%x\n", rc); 6720 goto out_destroy_fcp_cq; 6721 } 6722 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6723 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 6724 phba->sli4_hba.mbx_wq->queue_id, 6725 phba->sli4_hba.mbx_cq->queue_id); 6726 6727 /* Set up slow-path ELS Work Queue */ 6728 if (!phba->sli4_hba.els_wq) { 6729 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6730 "0536 Slow-path ELS WQ not allocated\n"); 6731 goto out_destroy_mbx_wq; 6732 } 6733 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, 6734 phba->sli4_hba.els_cq, LPFC_ELS); 6735 if (rc) { 6736 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6737 "0537 Failed setup of slow-path ELS WQ: " 6738 "rc = 0x%x\n", rc); 6739 goto out_destroy_mbx_wq; 6740 } 6741 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6742 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 6743 phba->sli4_hba.els_wq->queue_id, 6744 phba->sli4_hba.els_cq->queue_id); 6745 6746 /* Set up fast-path FCP Work Queue */ 6747 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 6748 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { 6749 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6750 "0534 Fast-path FCP WQ (%d) not " 6751 "allocated\n", fcp_wqidx); 6752 goto out_destroy_fcp_wq; 6753 } 6754 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], 6755 phba->sli4_hba.fcp_cq[fcp_cq_index], 6756 LPFC_FCP); 6757 if (rc) { 6758 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6759 "0535 Failed setup of fast-path FCP " 6760 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); 6761 goto out_destroy_fcp_wq; 6762 } 6763 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6764 "2591 FCP WQ setup: wq[%d]-id=%d, " 6765 "parent cq[%d]-id=%d\n", 6766 fcp_wqidx, 6767 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, 6768 fcp_cq_index, 6769 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); 6770 /* Round robin FCP Work Queue's Completion Queue assignment */ 6771 if (phba->cfg_fcp_eq_count) 6772 fcp_cq_index = ((fcp_cq_index + 1) % 6773 phba->cfg_fcp_eq_count); 6774 } 6775 6776 /* 6777 * Create Receive Queue (RQ) 6778 */ 6779 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 6780 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6781 "0540 Receive Queue not allocated\n"); 6782 goto out_destroy_fcp_wq; 6783 } 6784 6785 lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ); 6786 lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ); 6787 6788 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 6789 phba->sli4_hba.els_cq, LPFC_USOL); 6790 if (rc) { 6791 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6792 "0541 Failed setup of Receive Queue: " 6793 "rc = 0x%x\n", rc); 6794 goto out_destroy_fcp_wq; 6795 } 6796 6797 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6798 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 6799 "parent cq-id=%d\n", 6800 phba->sli4_hba.hdr_rq->queue_id, 6801 phba->sli4_hba.dat_rq->queue_id, 6802 phba->sli4_hba.els_cq->queue_id); 6803 return 0; 6804 6805 out_destroy_fcp_wq: 6806 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) 6807 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); 6808 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 6809 out_destroy_mbx_wq: 6810 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 6811 out_destroy_fcp_cq: 6812 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 6813 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 6814 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6815 out_destroy_mbx_cq: 6816 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6817 out_destroy_fp_eq: 6818 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 6819 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); 6820 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6821 out_error: 6822 return rc; 6823 } 6824 6825 /** 6826 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 6827 * @phba: pointer to lpfc hba data structure. 6828 * 6829 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 6830 * operation. 6831 * 6832 * Return codes 6833 * 0 - successful 6834 * -ENOMEM - No available memory 6835 * -EIO - The mailbox failed to complete successfully. 6836 **/ 6837 void 6838 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 6839 { 6840 int fcp_qidx; 6841 6842 /* Unset mailbox command work queue */ 6843 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 6844 /* Unset ELS work queue */ 6845 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 6846 /* Unset unsolicited receive queue */ 6847 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 6848 /* Unset FCP work queue */ 6849 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 6850 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); 6851 /* Unset mailbox command complete queue */ 6852 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6853 /* Unset ELS complete queue */ 6854 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6855 /* Unset FCP response complete queue */ 6856 fcp_qidx = 0; 6857 do { 6858 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 6859 } while (++fcp_qidx < phba->cfg_fcp_eq_count); 6860 /* Unset fast-path event queue */ 6861 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6862 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); 6863 /* Unset slow-path event queue */ 6864 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6865 } 6866 6867 /** 6868 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 6869 * @phba: pointer to lpfc hba data structure. 6870 * 6871 * This routine is invoked to allocate and set up a pool of completion queue 6872 * events. The body of the completion queue event is a completion queue entry 6873 * CQE. For now, this pool is used for the interrupt service routine to queue 6874 * the following HBA completion queue events for the worker thread to process: 6875 * - Mailbox asynchronous events 6876 * - Receive queue completion unsolicited events 6877 * Later, this can be used for all the slow-path events. 6878 * 6879 * Return codes 6880 * 0 - successful 6881 * -ENOMEM - No available memory 6882 **/ 6883 static int 6884 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 6885 { 6886 struct lpfc_cq_event *cq_event; 6887 int i; 6888 6889 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 6890 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 6891 if (!cq_event) 6892 goto out_pool_create_fail; 6893 list_add_tail(&cq_event->list, 6894 &phba->sli4_hba.sp_cqe_event_pool); 6895 } 6896 return 0; 6897 6898 out_pool_create_fail: 6899 lpfc_sli4_cq_event_pool_destroy(phba); 6900 return -ENOMEM; 6901 } 6902 6903 /** 6904 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 6905 * @phba: pointer to lpfc hba data structure. 6906 * 6907 * This routine is invoked to free the pool of completion queue events at 6908 * driver unload time. Note that, it is the responsibility of the driver 6909 * cleanup routine to free all the outstanding completion-queue events 6910 * allocated from this pool back into the pool before invoking this routine 6911 * to destroy the pool. 6912 **/ 6913 static void 6914 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 6915 { 6916 struct lpfc_cq_event *cq_event, *next_cq_event; 6917 6918 list_for_each_entry_safe(cq_event, next_cq_event, 6919 &phba->sli4_hba.sp_cqe_event_pool, list) { 6920 list_del(&cq_event->list); 6921 kfree(cq_event); 6922 } 6923 } 6924 6925 /** 6926 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 6927 * @phba: pointer to lpfc hba data structure. 6928 * 6929 * This routine is the lock free version of the API invoked to allocate a 6930 * completion-queue event from the free pool. 6931 * 6932 * Return: Pointer to the newly allocated completion-queue event if successful 6933 * NULL otherwise. 6934 **/ 6935 struct lpfc_cq_event * 6936 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 6937 { 6938 struct lpfc_cq_event *cq_event = NULL; 6939 6940 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 6941 struct lpfc_cq_event, list); 6942 return cq_event; 6943 } 6944 6945 /** 6946 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 6947 * @phba: pointer to lpfc hba data structure. 6948 * 6949 * This routine is the lock version of the API invoked to allocate a 6950 * completion-queue event from the free pool. 6951 * 6952 * Return: Pointer to the newly allocated completion-queue event if successful 6953 * NULL otherwise. 6954 **/ 6955 struct lpfc_cq_event * 6956 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 6957 { 6958 struct lpfc_cq_event *cq_event; 6959 unsigned long iflags; 6960 6961 spin_lock_irqsave(&phba->hbalock, iflags); 6962 cq_event = __lpfc_sli4_cq_event_alloc(phba); 6963 spin_unlock_irqrestore(&phba->hbalock, iflags); 6964 return cq_event; 6965 } 6966 6967 /** 6968 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 6969 * @phba: pointer to lpfc hba data structure. 6970 * @cq_event: pointer to the completion queue event to be freed. 6971 * 6972 * This routine is the lock free version of the API invoked to release a 6973 * completion-queue event back into the free pool. 6974 **/ 6975 void 6976 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 6977 struct lpfc_cq_event *cq_event) 6978 { 6979 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 6980 } 6981 6982 /** 6983 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 6984 * @phba: pointer to lpfc hba data structure. 6985 * @cq_event: pointer to the completion queue event to be freed. 6986 * 6987 * This routine is the lock version of the API invoked to release a 6988 * completion-queue event back into the free pool. 6989 **/ 6990 void 6991 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 6992 struct lpfc_cq_event *cq_event) 6993 { 6994 unsigned long iflags; 6995 spin_lock_irqsave(&phba->hbalock, iflags); 6996 __lpfc_sli4_cq_event_release(phba, cq_event); 6997 spin_unlock_irqrestore(&phba->hbalock, iflags); 6998 } 6999 7000 /** 7001 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 7002 * @phba: pointer to lpfc hba data structure. 7003 * 7004 * This routine is to free all the pending completion-queue events to the 7005 * back into the free pool for device reset. 7006 **/ 7007 static void 7008 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 7009 { 7010 LIST_HEAD(cqelist); 7011 struct lpfc_cq_event *cqe; 7012 unsigned long iflags; 7013 7014 /* Retrieve all the pending WCQEs from pending WCQE lists */ 7015 spin_lock_irqsave(&phba->hbalock, iflags); 7016 /* Pending FCP XRI abort events */ 7017 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 7018 &cqelist); 7019 /* Pending ELS XRI abort events */ 7020 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 7021 &cqelist); 7022 /* Pending asynnc events */ 7023 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 7024 &cqelist); 7025 spin_unlock_irqrestore(&phba->hbalock, iflags); 7026 7027 while (!list_empty(&cqelist)) { 7028 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 7029 lpfc_sli4_cq_event_release(phba, cqe); 7030 } 7031 } 7032 7033 /** 7034 * lpfc_pci_function_reset - Reset pci function. 7035 * @phba: pointer to lpfc hba data structure. 7036 * 7037 * This routine is invoked to request a PCI function reset. It will destroys 7038 * all resources assigned to the PCI function which originates this request. 7039 * 7040 * Return codes 7041 * 0 - successful 7042 * -ENOMEM - No available memory 7043 * -EIO - The mailbox failed to complete successfully. 7044 **/ 7045 int 7046 lpfc_pci_function_reset(struct lpfc_hba *phba) 7047 { 7048 LPFC_MBOXQ_t *mboxq; 7049 uint32_t rc = 0, if_type; 7050 uint32_t shdr_status, shdr_add_status; 7051 uint32_t rdy_chk, num_resets = 0, reset_again = 0; 7052 union lpfc_sli4_cfg_shdr *shdr; 7053 struct lpfc_register reg_data; 7054 7055 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 7056 switch (if_type) { 7057 case LPFC_SLI_INTF_IF_TYPE_0: 7058 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 7059 GFP_KERNEL); 7060 if (!mboxq) { 7061 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7062 "0494 Unable to allocate memory for " 7063 "issuing SLI_FUNCTION_RESET mailbox " 7064 "command\n"); 7065 return -ENOMEM; 7066 } 7067 7068 /* Setup PCI function reset mailbox-ioctl command */ 7069 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 7070 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 7071 LPFC_SLI4_MBX_EMBED); 7072 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7073 shdr = (union lpfc_sli4_cfg_shdr *) 7074 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 7075 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7076 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 7077 &shdr->response); 7078 if (rc != MBX_TIMEOUT) 7079 mempool_free(mboxq, phba->mbox_mem_pool); 7080 if (shdr_status || shdr_add_status || rc) { 7081 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7082 "0495 SLI_FUNCTION_RESET mailbox " 7083 "failed with status x%x add_status x%x," 7084 " mbx status x%x\n", 7085 shdr_status, shdr_add_status, rc); 7086 rc = -ENXIO; 7087 } 7088 break; 7089 case LPFC_SLI_INTF_IF_TYPE_2: 7090 for (num_resets = 0; 7091 num_resets < MAX_IF_TYPE_2_RESETS; 7092 num_resets++) { 7093 reg_data.word0 = 0; 7094 bf_set(lpfc_sliport_ctrl_end, ®_data, 7095 LPFC_SLIPORT_LITTLE_ENDIAN); 7096 bf_set(lpfc_sliport_ctrl_ip, ®_data, 7097 LPFC_SLIPORT_INIT_PORT); 7098 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 7099 CTRLregaddr); 7100 7101 /* 7102 * Poll the Port Status Register and wait for RDY for 7103 * up to 10 seconds. If the port doesn't respond, treat 7104 * it as an error. If the port responds with RN, start 7105 * the loop again. 7106 */ 7107 for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) { 7108 msleep(10); 7109 if (lpfc_readl(phba->sli4_hba.u.if_type2. 7110 STATUSregaddr, ®_data.word0)) { 7111 rc = -ENODEV; 7112 goto out; 7113 } 7114 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 7115 break; 7116 if (bf_get(lpfc_sliport_status_rn, ®_data)) { 7117 reset_again++; 7118 break; 7119 } 7120 } 7121 7122 /* 7123 * If the port responds to the init request with 7124 * reset needed, delay for a bit and restart the loop. 7125 */ 7126 if (reset_again) { 7127 msleep(10); 7128 reset_again = 0; 7129 continue; 7130 } 7131 7132 /* Detect any port errors. */ 7133 if ((bf_get(lpfc_sliport_status_err, ®_data)) || 7134 (rdy_chk >= 1000)) { 7135 phba->work_status[0] = readl( 7136 phba->sli4_hba.u.if_type2.ERR1regaddr); 7137 phba->work_status[1] = readl( 7138 phba->sli4_hba.u.if_type2.ERR2regaddr); 7139 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7140 "2890 Port Error Detected " 7141 "during Port Reset: " 7142 "port status reg 0x%x, " 7143 "error 1=0x%x, error 2=0x%x\n", 7144 reg_data.word0, 7145 phba->work_status[0], 7146 phba->work_status[1]); 7147 rc = -ENODEV; 7148 } 7149 7150 /* 7151 * Terminate the outer loop provided the Port indicated 7152 * ready within 10 seconds. 7153 */ 7154 if (rdy_chk < 1000) 7155 break; 7156 } 7157 /* delay driver action following IF_TYPE_2 function reset */ 7158 msleep(100); 7159 break; 7160 case LPFC_SLI_INTF_IF_TYPE_1: 7161 default: 7162 break; 7163 } 7164 7165 out: 7166 /* Catch the not-ready port failure after a port reset. */ 7167 if (num_resets >= MAX_IF_TYPE_2_RESETS) 7168 rc = -ENODEV; 7169 7170 return rc; 7171 } 7172 7173 /** 7174 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands 7175 * @phba: pointer to lpfc hba data structure. 7176 * @cnt: number of nop mailbox commands to send. 7177 * 7178 * This routine is invoked to send a number @cnt of NOP mailbox command and 7179 * wait for each command to complete. 7180 * 7181 * Return: the number of NOP mailbox command completed. 7182 **/ 7183 static int 7184 lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt) 7185 { 7186 LPFC_MBOXQ_t *mboxq; 7187 int length, cmdsent; 7188 uint32_t mbox_tmo; 7189 uint32_t rc = 0; 7190 uint32_t shdr_status, shdr_add_status; 7191 union lpfc_sli4_cfg_shdr *shdr; 7192 7193 if (cnt == 0) { 7194 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7195 "2518 Requested to send 0 NOP mailbox cmd\n"); 7196 return cnt; 7197 } 7198 7199 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7200 if (!mboxq) { 7201 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7202 "2519 Unable to allocate memory for issuing " 7203 "NOP mailbox command\n"); 7204 return 0; 7205 } 7206 7207 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */ 7208 length = (sizeof(struct lpfc_mbx_nop) - 7209 sizeof(struct lpfc_sli4_cfg_mhdr)); 7210 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 7211 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED); 7212 7213 for (cmdsent = 0; cmdsent < cnt; cmdsent++) { 7214 if (!phba->sli4_hba.intr_enable) 7215 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7216 else { 7217 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 7218 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 7219 } 7220 if (rc == MBX_TIMEOUT) 7221 break; 7222 /* Check return status */ 7223 shdr = (union lpfc_sli4_cfg_shdr *) 7224 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 7225 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7226 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 7227 &shdr->response); 7228 if (shdr_status || shdr_add_status || rc) { 7229 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7230 "2520 NOP mailbox command failed " 7231 "status x%x add_status x%x mbx " 7232 "status x%x\n", shdr_status, 7233 shdr_add_status, rc); 7234 break; 7235 } 7236 } 7237 7238 if (rc != MBX_TIMEOUT) 7239 mempool_free(mboxq, phba->mbox_mem_pool); 7240 7241 return cmdsent; 7242 } 7243 7244 /** 7245 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 7246 * @phba: pointer to lpfc hba data structure. 7247 * 7248 * This routine is invoked to set up the PCI device memory space for device 7249 * with SLI-4 interface spec. 7250 * 7251 * Return codes 7252 * 0 - successful 7253 * other values - error 7254 **/ 7255 static int 7256 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 7257 { 7258 struct pci_dev *pdev; 7259 unsigned long bar0map_len, bar1map_len, bar2map_len; 7260 int error = -ENODEV; 7261 uint32_t if_type; 7262 7263 /* Obtain PCI device reference */ 7264 if (!phba->pcidev) 7265 return error; 7266 else 7267 pdev = phba->pcidev; 7268 7269 /* Set the device DMA mask size */ 7270 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 7271 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 7272 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 7273 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 7274 return error; 7275 } 7276 } 7277 7278 /* 7279 * The BARs and register set definitions and offset locations are 7280 * dependent on the if_type. 7281 */ 7282 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 7283 &phba->sli4_hba.sli_intf.word0)) { 7284 return error; 7285 } 7286 7287 /* There is no SLI3 failback for SLI4 devices. */ 7288 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 7289 LPFC_SLI_INTF_VALID) { 7290 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7291 "2894 SLI_INTF reg contents invalid " 7292 "sli_intf reg 0x%x\n", 7293 phba->sli4_hba.sli_intf.word0); 7294 return error; 7295 } 7296 7297 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 7298 /* 7299 * Get the bus address of SLI4 device Bar regions and the 7300 * number of bytes required by each mapping. The mapping of the 7301 * particular PCI BARs regions is dependent on the type of 7302 * SLI4 device. 7303 */ 7304 if (pci_resource_start(pdev, 0)) { 7305 phba->pci_bar0_map = pci_resource_start(pdev, 0); 7306 bar0map_len = pci_resource_len(pdev, 0); 7307 7308 /* 7309 * Map SLI4 PCI Config Space Register base to a kernel virtual 7310 * addr 7311 */ 7312 phba->sli4_hba.conf_regs_memmap_p = 7313 ioremap(phba->pci_bar0_map, bar0map_len); 7314 if (!phba->sli4_hba.conf_regs_memmap_p) { 7315 dev_printk(KERN_ERR, &pdev->dev, 7316 "ioremap failed for SLI4 PCI config " 7317 "registers.\n"); 7318 goto out; 7319 } 7320 /* Set up BAR0 PCI config space register memory map */ 7321 lpfc_sli4_bar0_register_memmap(phba, if_type); 7322 } else { 7323 phba->pci_bar0_map = pci_resource_start(pdev, 1); 7324 bar0map_len = pci_resource_len(pdev, 1); 7325 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 7326 dev_printk(KERN_ERR, &pdev->dev, 7327 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 7328 goto out; 7329 } 7330 phba->sli4_hba.conf_regs_memmap_p = 7331 ioremap(phba->pci_bar0_map, bar0map_len); 7332 if (!phba->sli4_hba.conf_regs_memmap_p) { 7333 dev_printk(KERN_ERR, &pdev->dev, 7334 "ioremap failed for SLI4 PCI config " 7335 "registers.\n"); 7336 goto out; 7337 } 7338 lpfc_sli4_bar0_register_memmap(phba, if_type); 7339 } 7340 7341 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && 7342 (pci_resource_start(pdev, 2))) { 7343 /* 7344 * Map SLI4 if type 0 HBA Control Register base to a kernel 7345 * virtual address and setup the registers. 7346 */ 7347 phba->pci_bar1_map = pci_resource_start(pdev, 2); 7348 bar1map_len = pci_resource_len(pdev, 2); 7349 phba->sli4_hba.ctrl_regs_memmap_p = 7350 ioremap(phba->pci_bar1_map, bar1map_len); 7351 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 7352 dev_printk(KERN_ERR, &pdev->dev, 7353 "ioremap failed for SLI4 HBA control registers.\n"); 7354 goto out_iounmap_conf; 7355 } 7356 lpfc_sli4_bar1_register_memmap(phba); 7357 } 7358 7359 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && 7360 (pci_resource_start(pdev, 4))) { 7361 /* 7362 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel 7363 * virtual address and setup the registers. 7364 */ 7365 phba->pci_bar2_map = pci_resource_start(pdev, 4); 7366 bar2map_len = pci_resource_len(pdev, 4); 7367 phba->sli4_hba.drbl_regs_memmap_p = 7368 ioremap(phba->pci_bar2_map, bar2map_len); 7369 if (!phba->sli4_hba.drbl_regs_memmap_p) { 7370 dev_printk(KERN_ERR, &pdev->dev, 7371 "ioremap failed for SLI4 HBA doorbell registers.\n"); 7372 goto out_iounmap_ctrl; 7373 } 7374 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 7375 if (error) 7376 goto out_iounmap_all; 7377 } 7378 7379 return 0; 7380 7381 out_iounmap_all: 7382 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 7383 out_iounmap_ctrl: 7384 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 7385 out_iounmap_conf: 7386 iounmap(phba->sli4_hba.conf_regs_memmap_p); 7387 out: 7388 return error; 7389 } 7390 7391 /** 7392 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 7393 * @phba: pointer to lpfc hba data structure. 7394 * 7395 * This routine is invoked to unset the PCI device memory space for device 7396 * with SLI-4 interface spec. 7397 **/ 7398 static void 7399 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 7400 { 7401 struct pci_dev *pdev; 7402 7403 /* Obtain PCI device reference */ 7404 if (!phba->pcidev) 7405 return; 7406 else 7407 pdev = phba->pcidev; 7408 7409 /* Free coherent DMA memory allocated */ 7410 7411 /* Unmap I/O memory space */ 7412 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 7413 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 7414 iounmap(phba->sli4_hba.conf_regs_memmap_p); 7415 7416 return; 7417 } 7418 7419 /** 7420 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 7421 * @phba: pointer to lpfc hba data structure. 7422 * 7423 * This routine is invoked to enable the MSI-X interrupt vectors to device 7424 * with SLI-3 interface specs. The kernel function pci_enable_msix() is 7425 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once 7426 * invoked, enables either all or nothing, depending on the current 7427 * availability of PCI vector resources. The device driver is responsible 7428 * for calling the individual request_irq() to register each MSI-X vector 7429 * with a interrupt handler, which is done in this function. Note that 7430 * later when device is unloading, the driver should always call free_irq() 7431 * on all MSI-X vectors it has done request_irq() on before calling 7432 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 7433 * will be left with MSI-X enabled and leaks its vectors. 7434 * 7435 * Return codes 7436 * 0 - successful 7437 * other values - error 7438 **/ 7439 static int 7440 lpfc_sli_enable_msix(struct lpfc_hba *phba) 7441 { 7442 int rc, i; 7443 LPFC_MBOXQ_t *pmb; 7444 7445 /* Set up MSI-X multi-message vectors */ 7446 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 7447 phba->msix_entries[i].entry = i; 7448 7449 /* Configure MSI-X capability structure */ 7450 rc = pci_enable_msix(phba->pcidev, phba->msix_entries, 7451 ARRAY_SIZE(phba->msix_entries)); 7452 if (rc) { 7453 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7454 "0420 PCI enable MSI-X failed (%d)\n", rc); 7455 goto msi_fail_out; 7456 } 7457 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 7458 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7459 "0477 MSI-X entry[%d]: vector=x%x " 7460 "message=%d\n", i, 7461 phba->msix_entries[i].vector, 7462 phba->msix_entries[i].entry); 7463 /* 7464 * Assign MSI-X vectors to interrupt handlers 7465 */ 7466 7467 /* vector-0 is associated to slow-path handler */ 7468 rc = request_irq(phba->msix_entries[0].vector, 7469 &lpfc_sli_sp_intr_handler, IRQF_SHARED, 7470 LPFC_SP_DRIVER_HANDLER_NAME, phba); 7471 if (rc) { 7472 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7473 "0421 MSI-X slow-path request_irq failed " 7474 "(%d)\n", rc); 7475 goto msi_fail_out; 7476 } 7477 7478 /* vector-1 is associated to fast-path handler */ 7479 rc = request_irq(phba->msix_entries[1].vector, 7480 &lpfc_sli_fp_intr_handler, IRQF_SHARED, 7481 LPFC_FP_DRIVER_HANDLER_NAME, phba); 7482 7483 if (rc) { 7484 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7485 "0429 MSI-X fast-path request_irq failed " 7486 "(%d)\n", rc); 7487 goto irq_fail_out; 7488 } 7489 7490 /* 7491 * Configure HBA MSI-X attention conditions to messages 7492 */ 7493 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7494 7495 if (!pmb) { 7496 rc = -ENOMEM; 7497 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7498 "0474 Unable to allocate memory for issuing " 7499 "MBOX_CONFIG_MSI command\n"); 7500 goto mem_fail_out; 7501 } 7502 rc = lpfc_config_msi(phba, pmb); 7503 if (rc) 7504 goto mbx_fail_out; 7505 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 7506 if (rc != MBX_SUCCESS) { 7507 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 7508 "0351 Config MSI mailbox command failed, " 7509 "mbxCmd x%x, mbxStatus x%x\n", 7510 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 7511 goto mbx_fail_out; 7512 } 7513 7514 /* Free memory allocated for mailbox command */ 7515 mempool_free(pmb, phba->mbox_mem_pool); 7516 return rc; 7517 7518 mbx_fail_out: 7519 /* Free memory allocated for mailbox command */ 7520 mempool_free(pmb, phba->mbox_mem_pool); 7521 7522 mem_fail_out: 7523 /* free the irq already requested */ 7524 free_irq(phba->msix_entries[1].vector, phba); 7525 7526 irq_fail_out: 7527 /* free the irq already requested */ 7528 free_irq(phba->msix_entries[0].vector, phba); 7529 7530 msi_fail_out: 7531 /* Unconfigure MSI-X capability structure */ 7532 pci_disable_msix(phba->pcidev); 7533 return rc; 7534 } 7535 7536 /** 7537 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. 7538 * @phba: pointer to lpfc hba data structure. 7539 * 7540 * This routine is invoked to release the MSI-X vectors and then disable the 7541 * MSI-X interrupt mode to device with SLI-3 interface spec. 7542 **/ 7543 static void 7544 lpfc_sli_disable_msix(struct lpfc_hba *phba) 7545 { 7546 int i; 7547 7548 /* Free up MSI-X multi-message vectors */ 7549 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 7550 free_irq(phba->msix_entries[i].vector, phba); 7551 /* Disable MSI-X */ 7552 pci_disable_msix(phba->pcidev); 7553 7554 return; 7555 } 7556 7557 /** 7558 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 7559 * @phba: pointer to lpfc hba data structure. 7560 * 7561 * This routine is invoked to enable the MSI interrupt mode to device with 7562 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 7563 * enable the MSI vector. The device driver is responsible for calling the 7564 * request_irq() to register MSI vector with a interrupt the handler, which 7565 * is done in this function. 7566 * 7567 * Return codes 7568 * 0 - successful 7569 * other values - error 7570 */ 7571 static int 7572 lpfc_sli_enable_msi(struct lpfc_hba *phba) 7573 { 7574 int rc; 7575 7576 rc = pci_enable_msi(phba->pcidev); 7577 if (!rc) 7578 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7579 "0462 PCI enable MSI mode success.\n"); 7580 else { 7581 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7582 "0471 PCI enable MSI mode failed (%d)\n", rc); 7583 return rc; 7584 } 7585 7586 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 7587 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7588 if (rc) { 7589 pci_disable_msi(phba->pcidev); 7590 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7591 "0478 MSI request_irq failed (%d)\n", rc); 7592 } 7593 return rc; 7594 } 7595 7596 /** 7597 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. 7598 * @phba: pointer to lpfc hba data structure. 7599 * 7600 * This routine is invoked to disable the MSI interrupt mode to device with 7601 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has 7602 * done request_irq() on before calling pci_disable_msi(). Failure to do so 7603 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 7604 * its vector. 7605 */ 7606 static void 7607 lpfc_sli_disable_msi(struct lpfc_hba *phba) 7608 { 7609 free_irq(phba->pcidev->irq, phba); 7610 pci_disable_msi(phba->pcidev); 7611 return; 7612 } 7613 7614 /** 7615 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 7616 * @phba: pointer to lpfc hba data structure. 7617 * 7618 * This routine is invoked to enable device interrupt and associate driver's 7619 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 7620 * spec. Depends on the interrupt mode configured to the driver, the driver 7621 * will try to fallback from the configured interrupt mode to an interrupt 7622 * mode which is supported by the platform, kernel, and device in the order 7623 * of: 7624 * MSI-X -> MSI -> IRQ. 7625 * 7626 * Return codes 7627 * 0 - successful 7628 * other values - error 7629 **/ 7630 static uint32_t 7631 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 7632 { 7633 uint32_t intr_mode = LPFC_INTR_ERROR; 7634 int retval; 7635 7636 if (cfg_mode == 2) { 7637 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 7638 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 7639 if (!retval) { 7640 /* Now, try to enable MSI-X interrupt mode */ 7641 retval = lpfc_sli_enable_msix(phba); 7642 if (!retval) { 7643 /* Indicate initialization to MSI-X mode */ 7644 phba->intr_type = MSIX; 7645 intr_mode = 2; 7646 } 7647 } 7648 } 7649 7650 /* Fallback to MSI if MSI-X initialization failed */ 7651 if (cfg_mode >= 1 && phba->intr_type == NONE) { 7652 retval = lpfc_sli_enable_msi(phba); 7653 if (!retval) { 7654 /* Indicate initialization to MSI mode */ 7655 phba->intr_type = MSI; 7656 intr_mode = 1; 7657 } 7658 } 7659 7660 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 7661 if (phba->intr_type == NONE) { 7662 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 7663 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7664 if (!retval) { 7665 /* Indicate initialization to INTx mode */ 7666 phba->intr_type = INTx; 7667 intr_mode = 0; 7668 } 7669 } 7670 return intr_mode; 7671 } 7672 7673 /** 7674 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 7675 * @phba: pointer to lpfc hba data structure. 7676 * 7677 * This routine is invoked to disable device interrupt and disassociate the 7678 * driver's interrupt handler(s) from interrupt vector(s) to device with 7679 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 7680 * release the interrupt vector(s) for the message signaled interrupt. 7681 **/ 7682 static void 7683 lpfc_sli_disable_intr(struct lpfc_hba *phba) 7684 { 7685 /* Disable the currently initialized interrupt mode */ 7686 if (phba->intr_type == MSIX) 7687 lpfc_sli_disable_msix(phba); 7688 else if (phba->intr_type == MSI) 7689 lpfc_sli_disable_msi(phba); 7690 else if (phba->intr_type == INTx) 7691 free_irq(phba->pcidev->irq, phba); 7692 7693 /* Reset interrupt management states */ 7694 phba->intr_type = NONE; 7695 phba->sli.slistat.sli_intr = 0; 7696 7697 return; 7698 } 7699 7700 /** 7701 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 7702 * @phba: pointer to lpfc hba data structure. 7703 * 7704 * This routine is invoked to enable the MSI-X interrupt vectors to device 7705 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called 7706 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked, 7707 * enables either all or nothing, depending on the current availability of 7708 * PCI vector resources. The device driver is responsible for calling the 7709 * individual request_irq() to register each MSI-X vector with a interrupt 7710 * handler, which is done in this function. Note that later when device is 7711 * unloading, the driver should always call free_irq() on all MSI-X vectors 7712 * it has done request_irq() on before calling pci_disable_msix(). Failure 7713 * to do so results in a BUG_ON() and a device will be left with MSI-X 7714 * enabled and leaks its vectors. 7715 * 7716 * Return codes 7717 * 0 - successful 7718 * other values - error 7719 **/ 7720 static int 7721 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 7722 { 7723 int vectors, rc, index; 7724 7725 /* Set up MSI-X multi-message vectors */ 7726 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 7727 phba->sli4_hba.msix_entries[index].entry = index; 7728 7729 /* Configure MSI-X capability structure */ 7730 vectors = phba->sli4_hba.cfg_eqn; 7731 enable_msix_vectors: 7732 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, 7733 vectors); 7734 if (rc > 1) { 7735 vectors = rc; 7736 goto enable_msix_vectors; 7737 } else if (rc) { 7738 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7739 "0484 PCI enable MSI-X failed (%d)\n", rc); 7740 goto msi_fail_out; 7741 } 7742 7743 /* Log MSI-X vector assignment */ 7744 for (index = 0; index < vectors; index++) 7745 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7746 "0489 MSI-X entry[%d]: vector=x%x " 7747 "message=%d\n", index, 7748 phba->sli4_hba.msix_entries[index].vector, 7749 phba->sli4_hba.msix_entries[index].entry); 7750 /* 7751 * Assign MSI-X vectors to interrupt handlers 7752 */ 7753 if (vectors > 1) 7754 rc = request_irq(phba->sli4_hba.msix_entries[0].vector, 7755 &lpfc_sli4_sp_intr_handler, IRQF_SHARED, 7756 LPFC_SP_DRIVER_HANDLER_NAME, phba); 7757 else 7758 /* All Interrupts need to be handled by one EQ */ 7759 rc = request_irq(phba->sli4_hba.msix_entries[0].vector, 7760 &lpfc_sli4_intr_handler, IRQF_SHARED, 7761 LPFC_DRIVER_NAME, phba); 7762 if (rc) { 7763 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7764 "0485 MSI-X slow-path request_irq failed " 7765 "(%d)\n", rc); 7766 goto msi_fail_out; 7767 } 7768 7769 /* The rest of the vector(s) are associated to fast-path handler(s) */ 7770 for (index = 1; index < vectors; index++) { 7771 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; 7772 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba; 7773 rc = request_irq(phba->sli4_hba.msix_entries[index].vector, 7774 &lpfc_sli4_fp_intr_handler, IRQF_SHARED, 7775 LPFC_FP_DRIVER_HANDLER_NAME, 7776 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7777 if (rc) { 7778 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7779 "0486 MSI-X fast-path (%d) " 7780 "request_irq failed (%d)\n", index, rc); 7781 goto cfg_fail_out; 7782 } 7783 } 7784 phba->sli4_hba.msix_vec_nr = vectors; 7785 7786 return rc; 7787 7788 cfg_fail_out: 7789 /* free the irq already requested */ 7790 for (--index; index >= 1; index--) 7791 free_irq(phba->sli4_hba.msix_entries[index - 1].vector, 7792 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7793 7794 /* free the irq already requested */ 7795 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 7796 7797 msi_fail_out: 7798 /* Unconfigure MSI-X capability structure */ 7799 pci_disable_msix(phba->pcidev); 7800 return rc; 7801 } 7802 7803 /** 7804 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device 7805 * @phba: pointer to lpfc hba data structure. 7806 * 7807 * This routine is invoked to release the MSI-X vectors and then disable the 7808 * MSI-X interrupt mode to device with SLI-4 interface spec. 7809 **/ 7810 static void 7811 lpfc_sli4_disable_msix(struct lpfc_hba *phba) 7812 { 7813 int index; 7814 7815 /* Free up MSI-X multi-message vectors */ 7816 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 7817 7818 for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++) 7819 free_irq(phba->sli4_hba.msix_entries[index].vector, 7820 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7821 7822 /* Disable MSI-X */ 7823 pci_disable_msix(phba->pcidev); 7824 7825 return; 7826 } 7827 7828 /** 7829 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 7830 * @phba: pointer to lpfc hba data structure. 7831 * 7832 * This routine is invoked to enable the MSI interrupt mode to device with 7833 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 7834 * to enable the MSI vector. The device driver is responsible for calling 7835 * the request_irq() to register MSI vector with a interrupt the handler, 7836 * which is done in this function. 7837 * 7838 * Return codes 7839 * 0 - successful 7840 * other values - error 7841 **/ 7842 static int 7843 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 7844 { 7845 int rc, index; 7846 7847 rc = pci_enable_msi(phba->pcidev); 7848 if (!rc) 7849 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7850 "0487 PCI enable MSI mode success.\n"); 7851 else { 7852 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7853 "0488 PCI enable MSI mode failed (%d)\n", rc); 7854 return rc; 7855 } 7856 7857 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 7858 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7859 if (rc) { 7860 pci_disable_msi(phba->pcidev); 7861 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7862 "0490 MSI request_irq failed (%d)\n", rc); 7863 return rc; 7864 } 7865 7866 for (index = 0; index < phba->cfg_fcp_eq_count; index++) { 7867 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 7868 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 7869 } 7870 7871 return 0; 7872 } 7873 7874 /** 7875 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device 7876 * @phba: pointer to lpfc hba data structure. 7877 * 7878 * This routine is invoked to disable the MSI interrupt mode to device with 7879 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has 7880 * done request_irq() on before calling pci_disable_msi(). Failure to do so 7881 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 7882 * its vector. 7883 **/ 7884 static void 7885 lpfc_sli4_disable_msi(struct lpfc_hba *phba) 7886 { 7887 free_irq(phba->pcidev->irq, phba); 7888 pci_disable_msi(phba->pcidev); 7889 return; 7890 } 7891 7892 /** 7893 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 7894 * @phba: pointer to lpfc hba data structure. 7895 * 7896 * This routine is invoked to enable device interrupt and associate driver's 7897 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 7898 * interface spec. Depends on the interrupt mode configured to the driver, 7899 * the driver will try to fallback from the configured interrupt mode to an 7900 * interrupt mode which is supported by the platform, kernel, and device in 7901 * the order of: 7902 * MSI-X -> MSI -> IRQ. 7903 * 7904 * Return codes 7905 * 0 - successful 7906 * other values - error 7907 **/ 7908 static uint32_t 7909 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 7910 { 7911 uint32_t intr_mode = LPFC_INTR_ERROR; 7912 int retval, index; 7913 7914 if (cfg_mode == 2) { 7915 /* Preparation before conf_msi mbox cmd */ 7916 retval = 0; 7917 if (!retval) { 7918 /* Now, try to enable MSI-X interrupt mode */ 7919 retval = lpfc_sli4_enable_msix(phba); 7920 if (!retval) { 7921 /* Indicate initialization to MSI-X mode */ 7922 phba->intr_type = MSIX; 7923 intr_mode = 2; 7924 } 7925 } 7926 } 7927 7928 /* Fallback to MSI if MSI-X initialization failed */ 7929 if (cfg_mode >= 1 && phba->intr_type == NONE) { 7930 retval = lpfc_sli4_enable_msi(phba); 7931 if (!retval) { 7932 /* Indicate initialization to MSI mode */ 7933 phba->intr_type = MSI; 7934 intr_mode = 1; 7935 } 7936 } 7937 7938 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 7939 if (phba->intr_type == NONE) { 7940 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 7941 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7942 if (!retval) { 7943 /* Indicate initialization to INTx mode */ 7944 phba->intr_type = INTx; 7945 intr_mode = 0; 7946 for (index = 0; index < phba->cfg_fcp_eq_count; 7947 index++) { 7948 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 7949 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 7950 } 7951 } 7952 } 7953 return intr_mode; 7954 } 7955 7956 /** 7957 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 7958 * @phba: pointer to lpfc hba data structure. 7959 * 7960 * This routine is invoked to disable device interrupt and disassociate 7961 * the driver's interrupt handler(s) from interrupt vector(s) to device 7962 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 7963 * will release the interrupt vector(s) for the message signaled interrupt. 7964 **/ 7965 static void 7966 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 7967 { 7968 /* Disable the currently initialized interrupt mode */ 7969 if (phba->intr_type == MSIX) 7970 lpfc_sli4_disable_msix(phba); 7971 else if (phba->intr_type == MSI) 7972 lpfc_sli4_disable_msi(phba); 7973 else if (phba->intr_type == INTx) 7974 free_irq(phba->pcidev->irq, phba); 7975 7976 /* Reset interrupt management states */ 7977 phba->intr_type = NONE; 7978 phba->sli.slistat.sli_intr = 0; 7979 7980 return; 7981 } 7982 7983 /** 7984 * lpfc_unset_hba - Unset SLI3 hba device initialization 7985 * @phba: pointer to lpfc hba data structure. 7986 * 7987 * This routine is invoked to unset the HBA device initialization steps to 7988 * a device with SLI-3 interface spec. 7989 **/ 7990 static void 7991 lpfc_unset_hba(struct lpfc_hba *phba) 7992 { 7993 struct lpfc_vport *vport = phba->pport; 7994 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7995 7996 spin_lock_irq(shost->host_lock); 7997 vport->load_flag |= FC_UNLOADING; 7998 spin_unlock_irq(shost->host_lock); 7999 8000 lpfc_stop_hba_timers(phba); 8001 8002 phba->pport->work_port_events = 0; 8003 8004 lpfc_sli_hba_down(phba); 8005 8006 lpfc_sli_brdrestart(phba); 8007 8008 lpfc_sli_disable_intr(phba); 8009 8010 return; 8011 } 8012 8013 /** 8014 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization. 8015 * @phba: pointer to lpfc hba data structure. 8016 * 8017 * This routine is invoked to unset the HBA device initialization steps to 8018 * a device with SLI-4 interface spec. 8019 **/ 8020 static void 8021 lpfc_sli4_unset_hba(struct lpfc_hba *phba) 8022 { 8023 struct lpfc_vport *vport = phba->pport; 8024 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8025 8026 spin_lock_irq(shost->host_lock); 8027 vport->load_flag |= FC_UNLOADING; 8028 spin_unlock_irq(shost->host_lock); 8029 8030 phba->pport->work_port_events = 0; 8031 8032 /* Stop the SLI4 device port */ 8033 lpfc_stop_port(phba); 8034 8035 lpfc_sli4_disable_intr(phba); 8036 8037 /* Reset SLI4 HBA FCoE function */ 8038 lpfc_pci_function_reset(phba); 8039 lpfc_sli4_queue_destroy(phba); 8040 8041 return; 8042 } 8043 8044 /** 8045 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 8046 * @phba: Pointer to HBA context object. 8047 * 8048 * This function is called in the SLI4 code path to wait for completion 8049 * of device's XRIs exchange busy. It will check the XRI exchange busy 8050 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 8051 * that, it will check the XRI exchange busy on outstanding FCP and ELS 8052 * I/Os every 30 seconds, log error message, and wait forever. Only when 8053 * all XRI exchange busy complete, the driver unload shall proceed with 8054 * invoking the function reset ioctl mailbox command to the CNA and the 8055 * the rest of the driver unload resource release. 8056 **/ 8057 static void 8058 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 8059 { 8060 int wait_time = 0; 8061 int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 8062 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 8063 8064 while (!fcp_xri_cmpl || !els_xri_cmpl) { 8065 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 8066 if (!fcp_xri_cmpl) 8067 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8068 "2877 FCP XRI exchange busy " 8069 "wait time: %d seconds.\n", 8070 wait_time/1000); 8071 if (!els_xri_cmpl) 8072 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8073 "2878 ELS XRI exchange busy " 8074 "wait time: %d seconds.\n", 8075 wait_time/1000); 8076 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 8077 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 8078 } else { 8079 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 8080 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 8081 } 8082 fcp_xri_cmpl = 8083 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 8084 els_xri_cmpl = 8085 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 8086 } 8087 } 8088 8089 /** 8090 * lpfc_sli4_hba_unset - Unset the fcoe hba 8091 * @phba: Pointer to HBA context object. 8092 * 8093 * This function is called in the SLI4 code path to reset the HBA's FCoE 8094 * function. The caller is not required to hold any lock. This routine 8095 * issues PCI function reset mailbox command to reset the FCoE function. 8096 * At the end of the function, it calls lpfc_hba_down_post function to 8097 * free any pending commands. 8098 **/ 8099 static void 8100 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 8101 { 8102 int wait_cnt = 0; 8103 LPFC_MBOXQ_t *mboxq; 8104 struct pci_dev *pdev = phba->pcidev; 8105 8106 lpfc_stop_hba_timers(phba); 8107 phba->sli4_hba.intr_enable = 0; 8108 8109 /* 8110 * Gracefully wait out the potential current outstanding asynchronous 8111 * mailbox command. 8112 */ 8113 8114 /* First, block any pending async mailbox command from posted */ 8115 spin_lock_irq(&phba->hbalock); 8116 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 8117 spin_unlock_irq(&phba->hbalock); 8118 /* Now, trying to wait it out if we can */ 8119 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8120 msleep(10); 8121 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 8122 break; 8123 } 8124 /* Forcefully release the outstanding mailbox command if timed out */ 8125 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8126 spin_lock_irq(&phba->hbalock); 8127 mboxq = phba->sli.mbox_active; 8128 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 8129 __lpfc_mbox_cmpl_put(phba, mboxq); 8130 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8131 phba->sli.mbox_active = NULL; 8132 spin_unlock_irq(&phba->hbalock); 8133 } 8134 8135 /* Abort all iocbs associated with the hba */ 8136 lpfc_sli_hba_iocb_abort(phba); 8137 8138 /* Wait for completion of device XRI exchange busy */ 8139 lpfc_sli4_xri_exchange_busy_wait(phba); 8140 8141 /* Disable PCI subsystem interrupt */ 8142 lpfc_sli4_disable_intr(phba); 8143 8144 /* Disable SR-IOV if enabled */ 8145 if (phba->cfg_sriov_nr_virtfn) 8146 pci_disable_sriov(pdev); 8147 8148 /* Stop kthread signal shall trigger work_done one more time */ 8149 kthread_stop(phba->worker_thread); 8150 8151 /* Reset SLI4 HBA FCoE function */ 8152 lpfc_pci_function_reset(phba); 8153 lpfc_sli4_queue_destroy(phba); 8154 8155 /* Stop the SLI4 device port */ 8156 phba->pport->work_port_events = 0; 8157 } 8158 8159 /** 8160 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 8161 * @phba: Pointer to HBA context object. 8162 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 8163 * 8164 * This function is called in the SLI4 code path to read the port's 8165 * sli4 capabilities. 8166 * 8167 * This function may be be called from any context that can block-wait 8168 * for the completion. The expectation is that this routine is called 8169 * typically from probe_one or from the online routine. 8170 **/ 8171 int 8172 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 8173 { 8174 int rc; 8175 struct lpfc_mqe *mqe; 8176 struct lpfc_pc_sli4_params *sli4_params; 8177 uint32_t mbox_tmo; 8178 8179 rc = 0; 8180 mqe = &mboxq->u.mqe; 8181 8182 /* Read the port's SLI4 Parameters port capabilities */ 8183 lpfc_pc_sli4_params(mboxq); 8184 if (!phba->sli4_hba.intr_enable) 8185 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8186 else { 8187 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 8188 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 8189 } 8190 8191 if (unlikely(rc)) 8192 return 1; 8193 8194 sli4_params = &phba->sli4_hba.pc_sli4_params; 8195 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 8196 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 8197 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 8198 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 8199 &mqe->un.sli4_params); 8200 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 8201 &mqe->un.sli4_params); 8202 sli4_params->proto_types = mqe->un.sli4_params.word3; 8203 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 8204 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 8205 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 8206 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 8207 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 8208 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 8209 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 8210 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 8211 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 8212 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 8213 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 8214 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 8215 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 8216 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 8217 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 8218 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 8219 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 8220 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 8221 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 8222 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 8223 8224 /* Make sure that sge_supp_len can be handled by the driver */ 8225 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 8226 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 8227 8228 return rc; 8229 } 8230 8231 /** 8232 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 8233 * @phba: Pointer to HBA context object. 8234 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 8235 * 8236 * This function is called in the SLI4 code path to read the port's 8237 * sli4 capabilities. 8238 * 8239 * This function may be be called from any context that can block-wait 8240 * for the completion. The expectation is that this routine is called 8241 * typically from probe_one or from the online routine. 8242 **/ 8243 int 8244 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 8245 { 8246 int rc; 8247 struct lpfc_mqe *mqe = &mboxq->u.mqe; 8248 struct lpfc_pc_sli4_params *sli4_params; 8249 uint32_t mbox_tmo; 8250 int length; 8251 struct lpfc_sli4_parameters *mbx_sli4_parameters; 8252 8253 /* 8254 * By default, the driver assumes the SLI4 port requires RPI 8255 * header postings. The SLI4_PARAM response will correct this 8256 * assumption. 8257 */ 8258 phba->sli4_hba.rpi_hdrs_in_use = 1; 8259 8260 /* Read the port's SLI4 Config Parameters */ 8261 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 8262 sizeof(struct lpfc_sli4_cfg_mhdr)); 8263 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 8264 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 8265 length, LPFC_SLI4_MBX_EMBED); 8266 if (!phba->sli4_hba.intr_enable) 8267 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8268 else { 8269 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 8270 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 8271 } 8272 if (unlikely(rc)) 8273 return rc; 8274 sli4_params = &phba->sli4_hba.pc_sli4_params; 8275 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 8276 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 8277 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 8278 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 8279 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 8280 mbx_sli4_parameters); 8281 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 8282 mbx_sli4_parameters); 8283 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 8284 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 8285 else 8286 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 8287 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 8288 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); 8289 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 8290 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 8291 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 8292 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 8293 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 8294 mbx_sli4_parameters); 8295 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 8296 mbx_sli4_parameters); 8297 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); 8298 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); 8299 8300 /* Make sure that sge_supp_len can be handled by the driver */ 8301 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 8302 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 8303 8304 return 0; 8305 } 8306 8307 /** 8308 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 8309 * @pdev: pointer to PCI device 8310 * @pid: pointer to PCI device identifier 8311 * 8312 * This routine is to be called to attach a device with SLI-3 interface spec 8313 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 8314 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 8315 * information of the device and driver to see if the driver state that it can 8316 * support this kind of device. If the match is successful, the driver core 8317 * invokes this routine. If this routine determines it can claim the HBA, it 8318 * does all the initialization that it needs to do to handle the HBA properly. 8319 * 8320 * Return code 8321 * 0 - driver can claim the device 8322 * negative value - driver can not claim the device 8323 **/ 8324 static int __devinit 8325 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 8326 { 8327 struct lpfc_hba *phba; 8328 struct lpfc_vport *vport = NULL; 8329 struct Scsi_Host *shost = NULL; 8330 int error; 8331 uint32_t cfg_mode, intr_mode; 8332 8333 /* Allocate memory for HBA structure */ 8334 phba = lpfc_hba_alloc(pdev); 8335 if (!phba) 8336 return -ENOMEM; 8337 8338 /* Perform generic PCI device enabling operation */ 8339 error = lpfc_enable_pci_dev(phba); 8340 if (error) 8341 goto out_free_phba; 8342 8343 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 8344 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 8345 if (error) 8346 goto out_disable_pci_dev; 8347 8348 /* Set up SLI-3 specific device PCI memory space */ 8349 error = lpfc_sli_pci_mem_setup(phba); 8350 if (error) { 8351 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8352 "1402 Failed to set up pci memory space.\n"); 8353 goto out_disable_pci_dev; 8354 } 8355 8356 /* Set up phase-1 common device driver resources */ 8357 error = lpfc_setup_driver_resource_phase1(phba); 8358 if (error) { 8359 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8360 "1403 Failed to set up driver resource.\n"); 8361 goto out_unset_pci_mem_s3; 8362 } 8363 8364 /* Set up SLI-3 specific device driver resources */ 8365 error = lpfc_sli_driver_resource_setup(phba); 8366 if (error) { 8367 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8368 "1404 Failed to set up driver resource.\n"); 8369 goto out_unset_pci_mem_s3; 8370 } 8371 8372 /* Initialize and populate the iocb list per host */ 8373 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 8374 if (error) { 8375 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8376 "1405 Failed to initialize iocb list.\n"); 8377 goto out_unset_driver_resource_s3; 8378 } 8379 8380 /* Set up common device driver resources */ 8381 error = lpfc_setup_driver_resource_phase2(phba); 8382 if (error) { 8383 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8384 "1406 Failed to set up driver resource.\n"); 8385 goto out_free_iocb_list; 8386 } 8387 8388 /* Get the default values for Model Name and Description */ 8389 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 8390 8391 /* Create SCSI host to the physical port */ 8392 error = lpfc_create_shost(phba); 8393 if (error) { 8394 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8395 "1407 Failed to create scsi host.\n"); 8396 goto out_unset_driver_resource; 8397 } 8398 8399 /* Configure sysfs attributes */ 8400 vport = phba->pport; 8401 error = lpfc_alloc_sysfs_attr(vport); 8402 if (error) { 8403 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8404 "1476 Failed to allocate sysfs attr\n"); 8405 goto out_destroy_shost; 8406 } 8407 8408 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 8409 /* Now, trying to enable interrupt and bring up the device */ 8410 cfg_mode = phba->cfg_use_msi; 8411 while (true) { 8412 /* Put device to a known state before enabling interrupt */ 8413 lpfc_stop_port(phba); 8414 /* Configure and enable interrupt */ 8415 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 8416 if (intr_mode == LPFC_INTR_ERROR) { 8417 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8418 "0431 Failed to enable interrupt.\n"); 8419 error = -ENODEV; 8420 goto out_free_sysfs_attr; 8421 } 8422 /* SLI-3 HBA setup */ 8423 if (lpfc_sli_hba_setup(phba)) { 8424 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8425 "1477 Failed to set up hba\n"); 8426 error = -ENODEV; 8427 goto out_remove_device; 8428 } 8429 8430 /* Wait 50ms for the interrupts of previous mailbox commands */ 8431 msleep(50); 8432 /* Check active interrupts on message signaled interrupts */ 8433 if (intr_mode == 0 || 8434 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 8435 /* Log the current active interrupt mode */ 8436 phba->intr_mode = intr_mode; 8437 lpfc_log_intr_mode(phba, intr_mode); 8438 break; 8439 } else { 8440 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8441 "0447 Configure interrupt mode (%d) " 8442 "failed active interrupt test.\n", 8443 intr_mode); 8444 /* Disable the current interrupt mode */ 8445 lpfc_sli_disable_intr(phba); 8446 /* Try next level of interrupt mode */ 8447 cfg_mode = --intr_mode; 8448 } 8449 } 8450 8451 /* Perform post initialization setup */ 8452 lpfc_post_init_setup(phba); 8453 8454 /* Check if there are static vports to be created. */ 8455 lpfc_create_static_vport(phba); 8456 8457 return 0; 8458 8459 out_remove_device: 8460 lpfc_unset_hba(phba); 8461 out_free_sysfs_attr: 8462 lpfc_free_sysfs_attr(vport); 8463 out_destroy_shost: 8464 lpfc_destroy_shost(phba); 8465 out_unset_driver_resource: 8466 lpfc_unset_driver_resource_phase2(phba); 8467 out_free_iocb_list: 8468 lpfc_free_iocb_list(phba); 8469 out_unset_driver_resource_s3: 8470 lpfc_sli_driver_resource_unset(phba); 8471 out_unset_pci_mem_s3: 8472 lpfc_sli_pci_mem_unset(phba); 8473 out_disable_pci_dev: 8474 lpfc_disable_pci_dev(phba); 8475 if (shost) 8476 scsi_host_put(shost); 8477 out_free_phba: 8478 lpfc_hba_free(phba); 8479 return error; 8480 } 8481 8482 /** 8483 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 8484 * @pdev: pointer to PCI device 8485 * 8486 * This routine is to be called to disattach a device with SLI-3 interface 8487 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 8488 * removed from PCI bus, it performs all the necessary cleanup for the HBA 8489 * device to be removed from the PCI subsystem properly. 8490 **/ 8491 static void __devexit 8492 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 8493 { 8494 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8495 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 8496 struct lpfc_vport **vports; 8497 struct lpfc_hba *phba = vport->phba; 8498 int i; 8499 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 8500 8501 spin_lock_irq(&phba->hbalock); 8502 vport->load_flag |= FC_UNLOADING; 8503 spin_unlock_irq(&phba->hbalock); 8504 8505 lpfc_free_sysfs_attr(vport); 8506 8507 /* Release all the vports against this physical port */ 8508 vports = lpfc_create_vport_work_array(phba); 8509 if (vports != NULL) 8510 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 8511 fc_vport_terminate(vports[i]->fc_vport); 8512 lpfc_destroy_vport_work_array(phba, vports); 8513 8514 /* Remove FC host and then SCSI host with the physical port */ 8515 fc_remove_host(shost); 8516 scsi_remove_host(shost); 8517 lpfc_cleanup(vport); 8518 8519 /* 8520 * Bring down the SLI Layer. This step disable all interrupts, 8521 * clears the rings, discards all mailbox commands, and resets 8522 * the HBA. 8523 */ 8524 8525 /* HBA interrupt will be disabled after this call */ 8526 lpfc_sli_hba_down(phba); 8527 /* Stop kthread signal shall trigger work_done one more time */ 8528 kthread_stop(phba->worker_thread); 8529 /* Final cleanup of txcmplq and reset the HBA */ 8530 lpfc_sli_brdrestart(phba); 8531 8532 lpfc_stop_hba_timers(phba); 8533 spin_lock_irq(&phba->hbalock); 8534 list_del_init(&vport->listentry); 8535 spin_unlock_irq(&phba->hbalock); 8536 8537 lpfc_debugfs_terminate(vport); 8538 8539 /* Disable SR-IOV if enabled */ 8540 if (phba->cfg_sriov_nr_virtfn) 8541 pci_disable_sriov(pdev); 8542 8543 /* Disable interrupt */ 8544 lpfc_sli_disable_intr(phba); 8545 8546 pci_set_drvdata(pdev, NULL); 8547 scsi_host_put(shost); 8548 8549 /* 8550 * Call scsi_free before mem_free since scsi bufs are released to their 8551 * corresponding pools here. 8552 */ 8553 lpfc_scsi_free(phba); 8554 lpfc_mem_free_all(phba); 8555 8556 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 8557 phba->hbqslimp.virt, phba->hbqslimp.phys); 8558 8559 /* Free resources associated with SLI2 interface */ 8560 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 8561 phba->slim2p.virt, phba->slim2p.phys); 8562 8563 /* unmap adapter SLIM and Control Registers */ 8564 iounmap(phba->ctrl_regs_memmap_p); 8565 iounmap(phba->slim_memmap_p); 8566 8567 lpfc_hba_free(phba); 8568 8569 pci_release_selected_regions(pdev, bars); 8570 pci_disable_device(pdev); 8571 } 8572 8573 /** 8574 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 8575 * @pdev: pointer to PCI device 8576 * @msg: power management message 8577 * 8578 * This routine is to be called from the kernel's PCI subsystem to support 8579 * system Power Management (PM) to device with SLI-3 interface spec. When 8580 * PM invokes this method, it quiesces the device by stopping the driver's 8581 * worker thread for the device, turning off device's interrupt and DMA, 8582 * and bring the device offline. Note that as the driver implements the 8583 * minimum PM requirements to a power-aware driver's PM support for the 8584 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 8585 * to the suspend() method call will be treated as SUSPEND and the driver will 8586 * fully reinitialize its device during resume() method call, the driver will 8587 * set device to PCI_D3hot state in PCI config space instead of setting it 8588 * according to the @msg provided by the PM. 8589 * 8590 * Return code 8591 * 0 - driver suspended the device 8592 * Error otherwise 8593 **/ 8594 static int 8595 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 8596 { 8597 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8598 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8599 8600 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8601 "0473 PCI device Power Management suspend.\n"); 8602 8603 /* Bring down the device */ 8604 lpfc_offline_prep(phba); 8605 lpfc_offline(phba); 8606 kthread_stop(phba->worker_thread); 8607 8608 /* Disable interrupt from device */ 8609 lpfc_sli_disable_intr(phba); 8610 8611 /* Save device state to PCI config space */ 8612 pci_save_state(pdev); 8613 pci_set_power_state(pdev, PCI_D3hot); 8614 8615 return 0; 8616 } 8617 8618 /** 8619 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 8620 * @pdev: pointer to PCI device 8621 * 8622 * This routine is to be called from the kernel's PCI subsystem to support 8623 * system Power Management (PM) to device with SLI-3 interface spec. When PM 8624 * invokes this method, it restores the device's PCI config space state and 8625 * fully reinitializes the device and brings it online. Note that as the 8626 * driver implements the minimum PM requirements to a power-aware driver's 8627 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 8628 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 8629 * driver will fully reinitialize its device during resume() method call, 8630 * the device will be set to PCI_D0 directly in PCI config space before 8631 * restoring the state. 8632 * 8633 * Return code 8634 * 0 - driver suspended the device 8635 * Error otherwise 8636 **/ 8637 static int 8638 lpfc_pci_resume_one_s3(struct pci_dev *pdev) 8639 { 8640 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8641 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8642 uint32_t intr_mode; 8643 int error; 8644 8645 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8646 "0452 PCI device Power Management resume.\n"); 8647 8648 /* Restore device state from PCI config space */ 8649 pci_set_power_state(pdev, PCI_D0); 8650 pci_restore_state(pdev); 8651 8652 /* 8653 * As the new kernel behavior of pci_restore_state() API call clears 8654 * device saved_state flag, need to save the restored state again. 8655 */ 8656 pci_save_state(pdev); 8657 8658 if (pdev->is_busmaster) 8659 pci_set_master(pdev); 8660 8661 /* Startup the kernel thread for this host adapter. */ 8662 phba->worker_thread = kthread_run(lpfc_do_work, phba, 8663 "lpfc_worker_%d", phba->brd_no); 8664 if (IS_ERR(phba->worker_thread)) { 8665 error = PTR_ERR(phba->worker_thread); 8666 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8667 "0434 PM resume failed to start worker " 8668 "thread: error=x%x.\n", error); 8669 return error; 8670 } 8671 8672 /* Configure and enable interrupt */ 8673 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 8674 if (intr_mode == LPFC_INTR_ERROR) { 8675 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8676 "0430 PM resume Failed to enable interrupt\n"); 8677 return -EIO; 8678 } else 8679 phba->intr_mode = intr_mode; 8680 8681 /* Restart HBA and bring it online */ 8682 lpfc_sli_brdrestart(phba); 8683 lpfc_online(phba); 8684 8685 /* Log the current active interrupt mode */ 8686 lpfc_log_intr_mode(phba, phba->intr_mode); 8687 8688 return 0; 8689 } 8690 8691 /** 8692 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 8693 * @phba: pointer to lpfc hba data structure. 8694 * 8695 * This routine is called to prepare the SLI3 device for PCI slot recover. It 8696 * aborts all the outstanding SCSI I/Os to the pci device. 8697 **/ 8698 static void 8699 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 8700 { 8701 struct lpfc_sli *psli = &phba->sli; 8702 struct lpfc_sli_ring *pring; 8703 8704 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8705 "2723 PCI channel I/O abort preparing for recovery\n"); 8706 8707 /* 8708 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 8709 * and let the SCSI mid-layer to retry them to recover. 8710 */ 8711 pring = &psli->ring[psli->fcp_ring]; 8712 lpfc_sli_abort_iocb_ring(phba, pring); 8713 } 8714 8715 /** 8716 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 8717 * @phba: pointer to lpfc hba data structure. 8718 * 8719 * This routine is called to prepare the SLI3 device for PCI slot reset. It 8720 * disables the device interrupt and pci device, and aborts the internal FCP 8721 * pending I/Os. 8722 **/ 8723 static void 8724 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 8725 { 8726 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8727 "2710 PCI channel disable preparing for reset\n"); 8728 8729 /* Block any management I/Os to the device */ 8730 lpfc_block_mgmt_io(phba); 8731 8732 /* Block all SCSI devices' I/Os on the host */ 8733 lpfc_scsi_dev_block(phba); 8734 8735 /* stop all timers */ 8736 lpfc_stop_hba_timers(phba); 8737 8738 /* Disable interrupt and pci device */ 8739 lpfc_sli_disable_intr(phba); 8740 pci_disable_device(phba->pcidev); 8741 8742 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 8743 lpfc_sli_flush_fcp_rings(phba); 8744 } 8745 8746 /** 8747 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 8748 * @phba: pointer to lpfc hba data structure. 8749 * 8750 * This routine is called to prepare the SLI3 device for PCI slot permanently 8751 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 8752 * pending I/Os. 8753 **/ 8754 static void 8755 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 8756 { 8757 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8758 "2711 PCI channel permanent disable for failure\n"); 8759 /* Block all SCSI devices' I/Os on the host */ 8760 lpfc_scsi_dev_block(phba); 8761 8762 /* stop all timers */ 8763 lpfc_stop_hba_timers(phba); 8764 8765 /* Clean up all driver's outstanding SCSI I/Os */ 8766 lpfc_sli_flush_fcp_rings(phba); 8767 } 8768 8769 /** 8770 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 8771 * @pdev: pointer to PCI device. 8772 * @state: the current PCI connection state. 8773 * 8774 * This routine is called from the PCI subsystem for I/O error handling to 8775 * device with SLI-3 interface spec. This function is called by the PCI 8776 * subsystem after a PCI bus error affecting this device has been detected. 8777 * When this function is invoked, it will need to stop all the I/Os and 8778 * interrupt(s) to the device. Once that is done, it will return 8779 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 8780 * as desired. 8781 * 8782 * Return codes 8783 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 8784 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 8785 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8786 **/ 8787 static pci_ers_result_t 8788 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 8789 { 8790 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8791 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8792 8793 switch (state) { 8794 case pci_channel_io_normal: 8795 /* Non-fatal error, prepare for recovery */ 8796 lpfc_sli_prep_dev_for_recover(phba); 8797 return PCI_ERS_RESULT_CAN_RECOVER; 8798 case pci_channel_io_frozen: 8799 /* Fatal error, prepare for slot reset */ 8800 lpfc_sli_prep_dev_for_reset(phba); 8801 return PCI_ERS_RESULT_NEED_RESET; 8802 case pci_channel_io_perm_failure: 8803 /* Permanent failure, prepare for device down */ 8804 lpfc_sli_prep_dev_for_perm_failure(phba); 8805 return PCI_ERS_RESULT_DISCONNECT; 8806 default: 8807 /* Unknown state, prepare and request slot reset */ 8808 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8809 "0472 Unknown PCI error state: x%x\n", state); 8810 lpfc_sli_prep_dev_for_reset(phba); 8811 return PCI_ERS_RESULT_NEED_RESET; 8812 } 8813 } 8814 8815 /** 8816 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 8817 * @pdev: pointer to PCI device. 8818 * 8819 * This routine is called from the PCI subsystem for error handling to 8820 * device with SLI-3 interface spec. This is called after PCI bus has been 8821 * reset to restart the PCI card from scratch, as if from a cold-boot. 8822 * During the PCI subsystem error recovery, after driver returns 8823 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 8824 * recovery and then call this routine before calling the .resume method 8825 * to recover the device. This function will initialize the HBA device, 8826 * enable the interrupt, but it will just put the HBA to offline state 8827 * without passing any I/O traffic. 8828 * 8829 * Return codes 8830 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 8831 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8832 */ 8833 static pci_ers_result_t 8834 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 8835 { 8836 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8837 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8838 struct lpfc_sli *psli = &phba->sli; 8839 uint32_t intr_mode; 8840 8841 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 8842 if (pci_enable_device_mem(pdev)) { 8843 printk(KERN_ERR "lpfc: Cannot re-enable " 8844 "PCI device after reset.\n"); 8845 return PCI_ERS_RESULT_DISCONNECT; 8846 } 8847 8848 pci_restore_state(pdev); 8849 8850 /* 8851 * As the new kernel behavior of pci_restore_state() API call clears 8852 * device saved_state flag, need to save the restored state again. 8853 */ 8854 pci_save_state(pdev); 8855 8856 if (pdev->is_busmaster) 8857 pci_set_master(pdev); 8858 8859 spin_lock_irq(&phba->hbalock); 8860 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 8861 spin_unlock_irq(&phba->hbalock); 8862 8863 /* Configure and enable interrupt */ 8864 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 8865 if (intr_mode == LPFC_INTR_ERROR) { 8866 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8867 "0427 Cannot re-enable interrupt after " 8868 "slot reset.\n"); 8869 return PCI_ERS_RESULT_DISCONNECT; 8870 } else 8871 phba->intr_mode = intr_mode; 8872 8873 /* Take device offline, it will perform cleanup */ 8874 lpfc_offline_prep(phba); 8875 lpfc_offline(phba); 8876 lpfc_sli_brdrestart(phba); 8877 8878 /* Log the current active interrupt mode */ 8879 lpfc_log_intr_mode(phba, phba->intr_mode); 8880 8881 return PCI_ERS_RESULT_RECOVERED; 8882 } 8883 8884 /** 8885 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 8886 * @pdev: pointer to PCI device 8887 * 8888 * This routine is called from the PCI subsystem for error handling to device 8889 * with SLI-3 interface spec. It is called when kernel error recovery tells 8890 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 8891 * error recovery. After this call, traffic can start to flow from this device 8892 * again. 8893 */ 8894 static void 8895 lpfc_io_resume_s3(struct pci_dev *pdev) 8896 { 8897 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8898 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8899 8900 /* Bring device online, it will be no-op for non-fatal error resume */ 8901 lpfc_online(phba); 8902 8903 /* Clean up Advanced Error Reporting (AER) if needed */ 8904 if (phba->hba_flag & HBA_AER_ENABLED) 8905 pci_cleanup_aer_uncorrect_error_status(pdev); 8906 } 8907 8908 /** 8909 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 8910 * @phba: pointer to lpfc hba data structure. 8911 * 8912 * returns the number of ELS/CT IOCBs to reserve 8913 **/ 8914 int 8915 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 8916 { 8917 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 8918 8919 if (phba->sli_rev == LPFC_SLI_REV4) { 8920 if (max_xri <= 100) 8921 return 10; 8922 else if (max_xri <= 256) 8923 return 25; 8924 else if (max_xri <= 512) 8925 return 50; 8926 else if (max_xri <= 1024) 8927 return 100; 8928 else 8929 return 150; 8930 } else 8931 return 0; 8932 } 8933 8934 /** 8935 * lpfc_write_firmware - attempt to write a firmware image to the port 8936 * @phba: pointer to lpfc hba data structure. 8937 * @fw: pointer to firmware image returned from request_firmware. 8938 * 8939 * returns the number of bytes written if write is successful. 8940 * returns a negative error value if there were errors. 8941 * returns 0 if firmware matches currently active firmware on port. 8942 **/ 8943 int 8944 lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw) 8945 { 8946 char fwrev[32]; 8947 struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data; 8948 struct list_head dma_buffer_list; 8949 int i, rc = 0; 8950 struct lpfc_dmabuf *dmabuf, *next; 8951 uint32_t offset = 0, temp_offset = 0; 8952 8953 INIT_LIST_HEAD(&dma_buffer_list); 8954 if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) || 8955 (bf_get_be32(lpfc_grp_hdr_file_type, image) != 8956 LPFC_FILE_TYPE_GROUP) || 8957 (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) || 8958 (be32_to_cpu(image->size) != fw->size)) { 8959 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8960 "3022 Invalid FW image found. " 8961 "Magic:%x Type:%x ID:%x\n", 8962 be32_to_cpu(image->magic_number), 8963 bf_get_be32(lpfc_grp_hdr_file_type, image), 8964 bf_get_be32(lpfc_grp_hdr_id, image)); 8965 return -EINVAL; 8966 } 8967 lpfc_decode_firmware_rev(phba, fwrev, 1); 8968 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 8969 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8970 "3023 Updating Firmware. Current Version:%s " 8971 "New Version:%s\n", 8972 fwrev, image->revision); 8973 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 8974 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 8975 GFP_KERNEL); 8976 if (!dmabuf) { 8977 rc = -ENOMEM; 8978 goto out; 8979 } 8980 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 8981 SLI4_PAGE_SIZE, 8982 &dmabuf->phys, 8983 GFP_KERNEL); 8984 if (!dmabuf->virt) { 8985 kfree(dmabuf); 8986 rc = -ENOMEM; 8987 goto out; 8988 } 8989 list_add_tail(&dmabuf->list, &dma_buffer_list); 8990 } 8991 while (offset < fw->size) { 8992 temp_offset = offset; 8993 list_for_each_entry(dmabuf, &dma_buffer_list, list) { 8994 if (temp_offset + SLI4_PAGE_SIZE > fw->size) { 8995 memcpy(dmabuf->virt, 8996 fw->data + temp_offset, 8997 fw->size - temp_offset); 8998 temp_offset = fw->size; 8999 break; 9000 } 9001 memcpy(dmabuf->virt, fw->data + temp_offset, 9002 SLI4_PAGE_SIZE); 9003 temp_offset += SLI4_PAGE_SIZE; 9004 } 9005 rc = lpfc_wr_object(phba, &dma_buffer_list, 9006 (fw->size - offset), &offset); 9007 if (rc) { 9008 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9009 "3024 Firmware update failed. " 9010 "%d\n", rc); 9011 goto out; 9012 } 9013 } 9014 rc = offset; 9015 } 9016 out: 9017 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 9018 list_del(&dmabuf->list); 9019 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 9020 dmabuf->virt, dmabuf->phys); 9021 kfree(dmabuf); 9022 } 9023 return rc; 9024 } 9025 9026 /** 9027 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 9028 * @pdev: pointer to PCI device 9029 * @pid: pointer to PCI device identifier 9030 * 9031 * This routine is called from the kernel's PCI subsystem to device with 9032 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 9033 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 9034 * information of the device and driver to see if the driver state that it 9035 * can support this kind of device. If the match is successful, the driver 9036 * core invokes this routine. If this routine determines it can claim the HBA, 9037 * it does all the initialization that it needs to do to handle the HBA 9038 * properly. 9039 * 9040 * Return code 9041 * 0 - driver can claim the device 9042 * negative value - driver can not claim the device 9043 **/ 9044 static int __devinit 9045 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 9046 { 9047 struct lpfc_hba *phba; 9048 struct lpfc_vport *vport = NULL; 9049 struct Scsi_Host *shost = NULL; 9050 int error; 9051 uint32_t cfg_mode, intr_mode; 9052 int mcnt; 9053 int adjusted_fcp_eq_count; 9054 const struct firmware *fw; 9055 uint8_t file_name[16]; 9056 9057 /* Allocate memory for HBA structure */ 9058 phba = lpfc_hba_alloc(pdev); 9059 if (!phba) 9060 return -ENOMEM; 9061 9062 /* Perform generic PCI device enabling operation */ 9063 error = lpfc_enable_pci_dev(phba); 9064 if (error) 9065 goto out_free_phba; 9066 9067 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 9068 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 9069 if (error) 9070 goto out_disable_pci_dev; 9071 9072 /* Set up SLI-4 specific device PCI memory space */ 9073 error = lpfc_sli4_pci_mem_setup(phba); 9074 if (error) { 9075 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9076 "1410 Failed to set up pci memory space.\n"); 9077 goto out_disable_pci_dev; 9078 } 9079 9080 /* Set up phase-1 common device driver resources */ 9081 error = lpfc_setup_driver_resource_phase1(phba); 9082 if (error) { 9083 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9084 "1411 Failed to set up driver resource.\n"); 9085 goto out_unset_pci_mem_s4; 9086 } 9087 9088 /* Set up SLI-4 Specific device driver resources */ 9089 error = lpfc_sli4_driver_resource_setup(phba); 9090 if (error) { 9091 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9092 "1412 Failed to set up driver resource.\n"); 9093 goto out_unset_pci_mem_s4; 9094 } 9095 9096 /* Initialize and populate the iocb list per host */ 9097 9098 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9099 "2821 initialize iocb list %d.\n", 9100 phba->cfg_iocb_cnt*1024); 9101 error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024); 9102 9103 if (error) { 9104 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9105 "1413 Failed to initialize iocb list.\n"); 9106 goto out_unset_driver_resource_s4; 9107 } 9108 9109 INIT_LIST_HEAD(&phba->active_rrq_list); 9110 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 9111 9112 /* Set up common device driver resources */ 9113 error = lpfc_setup_driver_resource_phase2(phba); 9114 if (error) { 9115 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9116 "1414 Failed to set up driver resource.\n"); 9117 goto out_free_iocb_list; 9118 } 9119 9120 /* Get the default values for Model Name and Description */ 9121 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 9122 9123 /* Create SCSI host to the physical port */ 9124 error = lpfc_create_shost(phba); 9125 if (error) { 9126 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9127 "1415 Failed to create scsi host.\n"); 9128 goto out_unset_driver_resource; 9129 } 9130 9131 /* Configure sysfs attributes */ 9132 vport = phba->pport; 9133 error = lpfc_alloc_sysfs_attr(vport); 9134 if (error) { 9135 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9136 "1416 Failed to allocate sysfs attr\n"); 9137 goto out_destroy_shost; 9138 } 9139 9140 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 9141 /* Now, trying to enable interrupt and bring up the device */ 9142 cfg_mode = phba->cfg_use_msi; 9143 while (true) { 9144 /* Put device to a known state before enabling interrupt */ 9145 lpfc_stop_port(phba); 9146 /* Configure and enable interrupt */ 9147 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 9148 if (intr_mode == LPFC_INTR_ERROR) { 9149 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9150 "0426 Failed to enable interrupt.\n"); 9151 error = -ENODEV; 9152 goto out_free_sysfs_attr; 9153 } 9154 /* Default to single EQ for non-MSI-X */ 9155 if (phba->intr_type != MSIX) 9156 adjusted_fcp_eq_count = 0; 9157 else if (phba->sli4_hba.msix_vec_nr < 9158 phba->cfg_fcp_eq_count + 1) 9159 adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1; 9160 else 9161 adjusted_fcp_eq_count = phba->cfg_fcp_eq_count; 9162 phba->cfg_fcp_eq_count = adjusted_fcp_eq_count; 9163 /* Set up SLI-4 HBA */ 9164 if (lpfc_sli4_hba_setup(phba)) { 9165 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9166 "1421 Failed to set up hba\n"); 9167 error = -ENODEV; 9168 goto out_disable_intr; 9169 } 9170 9171 /* Send NOP mbx cmds for non-INTx mode active interrupt test */ 9172 if (intr_mode != 0) 9173 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba, 9174 LPFC_ACT_INTR_CNT); 9175 9176 /* Check active interrupts received only for MSI/MSI-X */ 9177 if (intr_mode == 0 || 9178 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) { 9179 /* Log the current active interrupt mode */ 9180 phba->intr_mode = intr_mode; 9181 lpfc_log_intr_mode(phba, intr_mode); 9182 break; 9183 } 9184 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9185 "0451 Configure interrupt mode (%d) " 9186 "failed active interrupt test.\n", 9187 intr_mode); 9188 /* Unset the previous SLI-4 HBA setup. */ 9189 /* 9190 * TODO: Is this operation compatible with IF TYPE 2 9191 * devices? All port state is deleted and cleared. 9192 */ 9193 lpfc_sli4_unset_hba(phba); 9194 /* Try next level of interrupt mode */ 9195 cfg_mode = --intr_mode; 9196 } 9197 9198 /* Perform post initialization setup */ 9199 lpfc_post_init_setup(phba); 9200 9201 /* check for firmware upgrade or downgrade */ 9202 snprintf(file_name, 16, "%s.grp", phba->ModelName); 9203 error = request_firmware(&fw, file_name, &phba->pcidev->dev); 9204 if (!error) { 9205 lpfc_write_firmware(phba, fw); 9206 release_firmware(fw); 9207 } 9208 9209 /* Check if there are static vports to be created. */ 9210 lpfc_create_static_vport(phba); 9211 return 0; 9212 9213 out_disable_intr: 9214 lpfc_sli4_disable_intr(phba); 9215 out_free_sysfs_attr: 9216 lpfc_free_sysfs_attr(vport); 9217 out_destroy_shost: 9218 lpfc_destroy_shost(phba); 9219 out_unset_driver_resource: 9220 lpfc_unset_driver_resource_phase2(phba); 9221 out_free_iocb_list: 9222 lpfc_free_iocb_list(phba); 9223 out_unset_driver_resource_s4: 9224 lpfc_sli4_driver_resource_unset(phba); 9225 out_unset_pci_mem_s4: 9226 lpfc_sli4_pci_mem_unset(phba); 9227 out_disable_pci_dev: 9228 lpfc_disable_pci_dev(phba); 9229 if (shost) 9230 scsi_host_put(shost); 9231 out_free_phba: 9232 lpfc_hba_free(phba); 9233 return error; 9234 } 9235 9236 /** 9237 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 9238 * @pdev: pointer to PCI device 9239 * 9240 * This routine is called from the kernel's PCI subsystem to device with 9241 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 9242 * removed from PCI bus, it performs all the necessary cleanup for the HBA 9243 * device to be removed from the PCI subsystem properly. 9244 **/ 9245 static void __devexit 9246 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 9247 { 9248 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9249 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 9250 struct lpfc_vport **vports; 9251 struct lpfc_hba *phba = vport->phba; 9252 int i; 9253 9254 /* Mark the device unloading flag */ 9255 spin_lock_irq(&phba->hbalock); 9256 vport->load_flag |= FC_UNLOADING; 9257 spin_unlock_irq(&phba->hbalock); 9258 9259 /* Free the HBA sysfs attributes */ 9260 lpfc_free_sysfs_attr(vport); 9261 9262 /* Release all the vports against this physical port */ 9263 vports = lpfc_create_vport_work_array(phba); 9264 if (vports != NULL) 9265 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 9266 fc_vport_terminate(vports[i]->fc_vport); 9267 lpfc_destroy_vport_work_array(phba, vports); 9268 9269 /* Remove FC host and then SCSI host with the physical port */ 9270 fc_remove_host(shost); 9271 scsi_remove_host(shost); 9272 9273 /* Perform cleanup on the physical port */ 9274 lpfc_cleanup(vport); 9275 9276 /* 9277 * Bring down the SLI Layer. This step disables all interrupts, 9278 * clears the rings, discards all mailbox commands, and resets 9279 * the HBA FCoE function. 9280 */ 9281 lpfc_debugfs_terminate(vport); 9282 lpfc_sli4_hba_unset(phba); 9283 9284 spin_lock_irq(&phba->hbalock); 9285 list_del_init(&vport->listentry); 9286 spin_unlock_irq(&phba->hbalock); 9287 9288 /* Perform scsi free before driver resource_unset since scsi 9289 * buffers are released to their corresponding pools here. 9290 */ 9291 lpfc_scsi_free(phba); 9292 lpfc_sli4_driver_resource_unset(phba); 9293 9294 /* Unmap adapter Control and Doorbell registers */ 9295 lpfc_sli4_pci_mem_unset(phba); 9296 9297 /* Release PCI resources and disable device's PCI function */ 9298 scsi_host_put(shost); 9299 lpfc_disable_pci_dev(phba); 9300 9301 /* Finally, free the driver's device data structure */ 9302 lpfc_hba_free(phba); 9303 9304 return; 9305 } 9306 9307 /** 9308 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 9309 * @pdev: pointer to PCI device 9310 * @msg: power management message 9311 * 9312 * This routine is called from the kernel's PCI subsystem to support system 9313 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 9314 * this method, it quiesces the device by stopping the driver's worker 9315 * thread for the device, turning off device's interrupt and DMA, and bring 9316 * the device offline. Note that as the driver implements the minimum PM 9317 * requirements to a power-aware driver's PM support for suspend/resume -- all 9318 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 9319 * method call will be treated as SUSPEND and the driver will fully 9320 * reinitialize its device during resume() method call, the driver will set 9321 * device to PCI_D3hot state in PCI config space instead of setting it 9322 * according to the @msg provided by the PM. 9323 * 9324 * Return code 9325 * 0 - driver suspended the device 9326 * Error otherwise 9327 **/ 9328 static int 9329 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 9330 { 9331 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9332 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9333 9334 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9335 "2843 PCI device Power Management suspend.\n"); 9336 9337 /* Bring down the device */ 9338 lpfc_offline_prep(phba); 9339 lpfc_offline(phba); 9340 kthread_stop(phba->worker_thread); 9341 9342 /* Disable interrupt from device */ 9343 lpfc_sli4_disable_intr(phba); 9344 lpfc_sli4_queue_destroy(phba); 9345 9346 /* Save device state to PCI config space */ 9347 pci_save_state(pdev); 9348 pci_set_power_state(pdev, PCI_D3hot); 9349 9350 return 0; 9351 } 9352 9353 /** 9354 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 9355 * @pdev: pointer to PCI device 9356 * 9357 * This routine is called from the kernel's PCI subsystem to support system 9358 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 9359 * this method, it restores the device's PCI config space state and fully 9360 * reinitializes the device and brings it online. Note that as the driver 9361 * implements the minimum PM requirements to a power-aware driver's PM for 9362 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 9363 * to the suspend() method call will be treated as SUSPEND and the driver 9364 * will fully reinitialize its device during resume() method call, the device 9365 * will be set to PCI_D0 directly in PCI config space before restoring the 9366 * state. 9367 * 9368 * Return code 9369 * 0 - driver suspended the device 9370 * Error otherwise 9371 **/ 9372 static int 9373 lpfc_pci_resume_one_s4(struct pci_dev *pdev) 9374 { 9375 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9376 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9377 uint32_t intr_mode; 9378 int error; 9379 9380 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9381 "0292 PCI device Power Management resume.\n"); 9382 9383 /* Restore device state from PCI config space */ 9384 pci_set_power_state(pdev, PCI_D0); 9385 pci_restore_state(pdev); 9386 9387 /* 9388 * As the new kernel behavior of pci_restore_state() API call clears 9389 * device saved_state flag, need to save the restored state again. 9390 */ 9391 pci_save_state(pdev); 9392 9393 if (pdev->is_busmaster) 9394 pci_set_master(pdev); 9395 9396 /* Startup the kernel thread for this host adapter. */ 9397 phba->worker_thread = kthread_run(lpfc_do_work, phba, 9398 "lpfc_worker_%d", phba->brd_no); 9399 if (IS_ERR(phba->worker_thread)) { 9400 error = PTR_ERR(phba->worker_thread); 9401 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9402 "0293 PM resume failed to start worker " 9403 "thread: error=x%x.\n", error); 9404 return error; 9405 } 9406 9407 /* Configure and enable interrupt */ 9408 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 9409 if (intr_mode == LPFC_INTR_ERROR) { 9410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9411 "0294 PM resume Failed to enable interrupt\n"); 9412 return -EIO; 9413 } else 9414 phba->intr_mode = intr_mode; 9415 9416 /* Restart HBA and bring it online */ 9417 lpfc_sli_brdrestart(phba); 9418 lpfc_online(phba); 9419 9420 /* Log the current active interrupt mode */ 9421 lpfc_log_intr_mode(phba, phba->intr_mode); 9422 9423 return 0; 9424 } 9425 9426 /** 9427 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 9428 * @phba: pointer to lpfc hba data structure. 9429 * 9430 * This routine is called to prepare the SLI4 device for PCI slot recover. It 9431 * aborts all the outstanding SCSI I/Os to the pci device. 9432 **/ 9433 static void 9434 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 9435 { 9436 struct lpfc_sli *psli = &phba->sli; 9437 struct lpfc_sli_ring *pring; 9438 9439 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9440 "2828 PCI channel I/O abort preparing for recovery\n"); 9441 /* 9442 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 9443 * and let the SCSI mid-layer to retry them to recover. 9444 */ 9445 pring = &psli->ring[psli->fcp_ring]; 9446 lpfc_sli_abort_iocb_ring(phba, pring); 9447 } 9448 9449 /** 9450 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 9451 * @phba: pointer to lpfc hba data structure. 9452 * 9453 * This routine is called to prepare the SLI4 device for PCI slot reset. It 9454 * disables the device interrupt and pci device, and aborts the internal FCP 9455 * pending I/Os. 9456 **/ 9457 static void 9458 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 9459 { 9460 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9461 "2826 PCI channel disable preparing for reset\n"); 9462 9463 /* Block any management I/Os to the device */ 9464 lpfc_block_mgmt_io(phba); 9465 9466 /* Block all SCSI devices' I/Os on the host */ 9467 lpfc_scsi_dev_block(phba); 9468 9469 /* stop all timers */ 9470 lpfc_stop_hba_timers(phba); 9471 9472 /* Disable interrupt and pci device */ 9473 lpfc_sli4_disable_intr(phba); 9474 lpfc_sli4_queue_destroy(phba); 9475 pci_disable_device(phba->pcidev); 9476 9477 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 9478 lpfc_sli_flush_fcp_rings(phba); 9479 } 9480 9481 /** 9482 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 9483 * @phba: pointer to lpfc hba data structure. 9484 * 9485 * This routine is called to prepare the SLI4 device for PCI slot permanently 9486 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 9487 * pending I/Os. 9488 **/ 9489 static void 9490 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 9491 { 9492 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9493 "2827 PCI channel permanent disable for failure\n"); 9494 9495 /* Block all SCSI devices' I/Os on the host */ 9496 lpfc_scsi_dev_block(phba); 9497 9498 /* stop all timers */ 9499 lpfc_stop_hba_timers(phba); 9500 9501 /* Clean up all driver's outstanding SCSI I/Os */ 9502 lpfc_sli_flush_fcp_rings(phba); 9503 } 9504 9505 /** 9506 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 9507 * @pdev: pointer to PCI device. 9508 * @state: the current PCI connection state. 9509 * 9510 * This routine is called from the PCI subsystem for error handling to device 9511 * with SLI-4 interface spec. This function is called by the PCI subsystem 9512 * after a PCI bus error affecting this device has been detected. When this 9513 * function is invoked, it will need to stop all the I/Os and interrupt(s) 9514 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 9515 * for the PCI subsystem to perform proper recovery as desired. 9516 * 9517 * Return codes 9518 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 9519 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9520 **/ 9521 static pci_ers_result_t 9522 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 9523 { 9524 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9525 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9526 9527 switch (state) { 9528 case pci_channel_io_normal: 9529 /* Non-fatal error, prepare for recovery */ 9530 lpfc_sli4_prep_dev_for_recover(phba); 9531 return PCI_ERS_RESULT_CAN_RECOVER; 9532 case pci_channel_io_frozen: 9533 /* Fatal error, prepare for slot reset */ 9534 lpfc_sli4_prep_dev_for_reset(phba); 9535 return PCI_ERS_RESULT_NEED_RESET; 9536 case pci_channel_io_perm_failure: 9537 /* Permanent failure, prepare for device down */ 9538 lpfc_sli4_prep_dev_for_perm_failure(phba); 9539 return PCI_ERS_RESULT_DISCONNECT; 9540 default: 9541 /* Unknown state, prepare and request slot reset */ 9542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9543 "2825 Unknown PCI error state: x%x\n", state); 9544 lpfc_sli4_prep_dev_for_reset(phba); 9545 return PCI_ERS_RESULT_NEED_RESET; 9546 } 9547 } 9548 9549 /** 9550 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 9551 * @pdev: pointer to PCI device. 9552 * 9553 * This routine is called from the PCI subsystem for error handling to device 9554 * with SLI-4 interface spec. It is called after PCI bus has been reset to 9555 * restart the PCI card from scratch, as if from a cold-boot. During the 9556 * PCI subsystem error recovery, after the driver returns 9557 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 9558 * recovery and then call this routine before calling the .resume method to 9559 * recover the device. This function will initialize the HBA device, enable 9560 * the interrupt, but it will just put the HBA to offline state without 9561 * passing any I/O traffic. 9562 * 9563 * Return codes 9564 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 9565 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9566 */ 9567 static pci_ers_result_t 9568 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 9569 { 9570 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9571 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9572 struct lpfc_sli *psli = &phba->sli; 9573 uint32_t intr_mode; 9574 9575 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 9576 if (pci_enable_device_mem(pdev)) { 9577 printk(KERN_ERR "lpfc: Cannot re-enable " 9578 "PCI device after reset.\n"); 9579 return PCI_ERS_RESULT_DISCONNECT; 9580 } 9581 9582 pci_restore_state(pdev); 9583 9584 /* 9585 * As the new kernel behavior of pci_restore_state() API call clears 9586 * device saved_state flag, need to save the restored state again. 9587 */ 9588 pci_save_state(pdev); 9589 9590 if (pdev->is_busmaster) 9591 pci_set_master(pdev); 9592 9593 spin_lock_irq(&phba->hbalock); 9594 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 9595 spin_unlock_irq(&phba->hbalock); 9596 9597 /* Configure and enable interrupt */ 9598 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 9599 if (intr_mode == LPFC_INTR_ERROR) { 9600 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9601 "2824 Cannot re-enable interrupt after " 9602 "slot reset.\n"); 9603 return PCI_ERS_RESULT_DISCONNECT; 9604 } else 9605 phba->intr_mode = intr_mode; 9606 9607 /* Log the current active interrupt mode */ 9608 lpfc_log_intr_mode(phba, phba->intr_mode); 9609 9610 return PCI_ERS_RESULT_RECOVERED; 9611 } 9612 9613 /** 9614 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 9615 * @pdev: pointer to PCI device 9616 * 9617 * This routine is called from the PCI subsystem for error handling to device 9618 * with SLI-4 interface spec. It is called when kernel error recovery tells 9619 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 9620 * error recovery. After this call, traffic can start to flow from this device 9621 * again. 9622 **/ 9623 static void 9624 lpfc_io_resume_s4(struct pci_dev *pdev) 9625 { 9626 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9627 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9628 9629 /* 9630 * In case of slot reset, as function reset is performed through 9631 * mailbox command which needs DMA to be enabled, this operation 9632 * has to be moved to the io resume phase. Taking device offline 9633 * will perform the necessary cleanup. 9634 */ 9635 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 9636 /* Perform device reset */ 9637 lpfc_offline_prep(phba); 9638 lpfc_offline(phba); 9639 lpfc_sli_brdrestart(phba); 9640 /* Bring the device back online */ 9641 lpfc_online(phba); 9642 } 9643 9644 /* Clean up Advanced Error Reporting (AER) if needed */ 9645 if (phba->hba_flag & HBA_AER_ENABLED) 9646 pci_cleanup_aer_uncorrect_error_status(pdev); 9647 } 9648 9649 /** 9650 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 9651 * @pdev: pointer to PCI device 9652 * @pid: pointer to PCI device identifier 9653 * 9654 * This routine is to be registered to the kernel's PCI subsystem. When an 9655 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 9656 * at PCI device-specific information of the device and driver to see if the 9657 * driver state that it can support this kind of device. If the match is 9658 * successful, the driver core invokes this routine. This routine dispatches 9659 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 9660 * do all the initialization that it needs to do to handle the HBA device 9661 * properly. 9662 * 9663 * Return code 9664 * 0 - driver can claim the device 9665 * negative value - driver can not claim the device 9666 **/ 9667 static int __devinit 9668 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 9669 { 9670 int rc; 9671 struct lpfc_sli_intf intf; 9672 9673 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 9674 return -ENODEV; 9675 9676 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 9677 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 9678 rc = lpfc_pci_probe_one_s4(pdev, pid); 9679 else 9680 rc = lpfc_pci_probe_one_s3(pdev, pid); 9681 9682 return rc; 9683 } 9684 9685 /** 9686 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 9687 * @pdev: pointer to PCI device 9688 * 9689 * This routine is to be registered to the kernel's PCI subsystem. When an 9690 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 9691 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 9692 * remove routine, which will perform all the necessary cleanup for the 9693 * device to be removed from the PCI subsystem properly. 9694 **/ 9695 static void __devexit 9696 lpfc_pci_remove_one(struct pci_dev *pdev) 9697 { 9698 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9699 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9700 9701 switch (phba->pci_dev_grp) { 9702 case LPFC_PCI_DEV_LP: 9703 lpfc_pci_remove_one_s3(pdev); 9704 break; 9705 case LPFC_PCI_DEV_OC: 9706 lpfc_pci_remove_one_s4(pdev); 9707 break; 9708 default: 9709 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9710 "1424 Invalid PCI device group: 0x%x\n", 9711 phba->pci_dev_grp); 9712 break; 9713 } 9714 return; 9715 } 9716 9717 /** 9718 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 9719 * @pdev: pointer to PCI device 9720 * @msg: power management message 9721 * 9722 * This routine is to be registered to the kernel's PCI subsystem to support 9723 * system Power Management (PM). When PM invokes this method, it dispatches 9724 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 9725 * suspend the device. 9726 * 9727 * Return code 9728 * 0 - driver suspended the device 9729 * Error otherwise 9730 **/ 9731 static int 9732 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 9733 { 9734 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9735 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9736 int rc = -ENODEV; 9737 9738 switch (phba->pci_dev_grp) { 9739 case LPFC_PCI_DEV_LP: 9740 rc = lpfc_pci_suspend_one_s3(pdev, msg); 9741 break; 9742 case LPFC_PCI_DEV_OC: 9743 rc = lpfc_pci_suspend_one_s4(pdev, msg); 9744 break; 9745 default: 9746 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9747 "1425 Invalid PCI device group: 0x%x\n", 9748 phba->pci_dev_grp); 9749 break; 9750 } 9751 return rc; 9752 } 9753 9754 /** 9755 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 9756 * @pdev: pointer to PCI device 9757 * 9758 * This routine is to be registered to the kernel's PCI subsystem to support 9759 * system Power Management (PM). When PM invokes this method, it dispatches 9760 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 9761 * resume the device. 9762 * 9763 * Return code 9764 * 0 - driver suspended the device 9765 * Error otherwise 9766 **/ 9767 static int 9768 lpfc_pci_resume_one(struct pci_dev *pdev) 9769 { 9770 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9771 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9772 int rc = -ENODEV; 9773 9774 switch (phba->pci_dev_grp) { 9775 case LPFC_PCI_DEV_LP: 9776 rc = lpfc_pci_resume_one_s3(pdev); 9777 break; 9778 case LPFC_PCI_DEV_OC: 9779 rc = lpfc_pci_resume_one_s4(pdev); 9780 break; 9781 default: 9782 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9783 "1426 Invalid PCI device group: 0x%x\n", 9784 phba->pci_dev_grp); 9785 break; 9786 } 9787 return rc; 9788 } 9789 9790 /** 9791 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 9792 * @pdev: pointer to PCI device. 9793 * @state: the current PCI connection state. 9794 * 9795 * This routine is registered to the PCI subsystem for error handling. This 9796 * function is called by the PCI subsystem after a PCI bus error affecting 9797 * this device has been detected. When this routine is invoked, it dispatches 9798 * the action to the proper SLI-3 or SLI-4 device error detected handling 9799 * routine, which will perform the proper error detected operation. 9800 * 9801 * Return codes 9802 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 9803 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9804 **/ 9805 static pci_ers_result_t 9806 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 9807 { 9808 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9809 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9810 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 9811 9812 switch (phba->pci_dev_grp) { 9813 case LPFC_PCI_DEV_LP: 9814 rc = lpfc_io_error_detected_s3(pdev, state); 9815 break; 9816 case LPFC_PCI_DEV_OC: 9817 rc = lpfc_io_error_detected_s4(pdev, state); 9818 break; 9819 default: 9820 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9821 "1427 Invalid PCI device group: 0x%x\n", 9822 phba->pci_dev_grp); 9823 break; 9824 } 9825 return rc; 9826 } 9827 9828 /** 9829 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 9830 * @pdev: pointer to PCI device. 9831 * 9832 * This routine is registered to the PCI subsystem for error handling. This 9833 * function is called after PCI bus has been reset to restart the PCI card 9834 * from scratch, as if from a cold-boot. When this routine is invoked, it 9835 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 9836 * routine, which will perform the proper device reset. 9837 * 9838 * Return codes 9839 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 9840 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9841 **/ 9842 static pci_ers_result_t 9843 lpfc_io_slot_reset(struct pci_dev *pdev) 9844 { 9845 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9846 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9847 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 9848 9849 switch (phba->pci_dev_grp) { 9850 case LPFC_PCI_DEV_LP: 9851 rc = lpfc_io_slot_reset_s3(pdev); 9852 break; 9853 case LPFC_PCI_DEV_OC: 9854 rc = lpfc_io_slot_reset_s4(pdev); 9855 break; 9856 default: 9857 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9858 "1428 Invalid PCI device group: 0x%x\n", 9859 phba->pci_dev_grp); 9860 break; 9861 } 9862 return rc; 9863 } 9864 9865 /** 9866 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 9867 * @pdev: pointer to PCI device 9868 * 9869 * This routine is registered to the PCI subsystem for error handling. It 9870 * is called when kernel error recovery tells the lpfc driver that it is 9871 * OK to resume normal PCI operation after PCI bus error recovery. When 9872 * this routine is invoked, it dispatches the action to the proper SLI-3 9873 * or SLI-4 device io_resume routine, which will resume the device operation. 9874 **/ 9875 static void 9876 lpfc_io_resume(struct pci_dev *pdev) 9877 { 9878 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9879 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9880 9881 switch (phba->pci_dev_grp) { 9882 case LPFC_PCI_DEV_LP: 9883 lpfc_io_resume_s3(pdev); 9884 break; 9885 case LPFC_PCI_DEV_OC: 9886 lpfc_io_resume_s4(pdev); 9887 break; 9888 default: 9889 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9890 "1429 Invalid PCI device group: 0x%x\n", 9891 phba->pci_dev_grp); 9892 break; 9893 } 9894 return; 9895 } 9896 9897 static struct pci_device_id lpfc_id_table[] = { 9898 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 9899 PCI_ANY_ID, PCI_ANY_ID, }, 9900 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, 9901 PCI_ANY_ID, PCI_ANY_ID, }, 9902 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, 9903 PCI_ANY_ID, PCI_ANY_ID, }, 9904 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, 9905 PCI_ANY_ID, PCI_ANY_ID, }, 9906 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, 9907 PCI_ANY_ID, PCI_ANY_ID, }, 9908 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, 9909 PCI_ANY_ID, PCI_ANY_ID, }, 9910 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, 9911 PCI_ANY_ID, PCI_ANY_ID, }, 9912 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, 9913 PCI_ANY_ID, PCI_ANY_ID, }, 9914 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, 9915 PCI_ANY_ID, PCI_ANY_ID, }, 9916 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, 9917 PCI_ANY_ID, PCI_ANY_ID, }, 9918 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, 9919 PCI_ANY_ID, PCI_ANY_ID, }, 9920 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, 9921 PCI_ANY_ID, PCI_ANY_ID, }, 9922 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, 9923 PCI_ANY_ID, PCI_ANY_ID, }, 9924 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, 9925 PCI_ANY_ID, PCI_ANY_ID, }, 9926 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, 9927 PCI_ANY_ID, PCI_ANY_ID, }, 9928 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, 9929 PCI_ANY_ID, PCI_ANY_ID, }, 9930 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, 9931 PCI_ANY_ID, PCI_ANY_ID, }, 9932 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 9933 PCI_ANY_ID, PCI_ANY_ID, }, 9934 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, 9935 PCI_ANY_ID, PCI_ANY_ID, }, 9936 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, 9937 PCI_ANY_ID, PCI_ANY_ID, }, 9938 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, 9939 PCI_ANY_ID, PCI_ANY_ID, }, 9940 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, 9941 PCI_ANY_ID, PCI_ANY_ID, }, 9942 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, 9943 PCI_ANY_ID, PCI_ANY_ID, }, 9944 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, 9945 PCI_ANY_ID, PCI_ANY_ID, }, 9946 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, 9947 PCI_ANY_ID, PCI_ANY_ID, }, 9948 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, 9949 PCI_ANY_ID, PCI_ANY_ID, }, 9950 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, 9951 PCI_ANY_ID, PCI_ANY_ID, }, 9952 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, 9953 PCI_ANY_ID, PCI_ANY_ID, }, 9954 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, 9955 PCI_ANY_ID, PCI_ANY_ID, }, 9956 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, 9957 PCI_ANY_ID, PCI_ANY_ID, }, 9958 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, 9959 PCI_ANY_ID, PCI_ANY_ID, }, 9960 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, 9961 PCI_ANY_ID, PCI_ANY_ID, }, 9962 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, 9963 PCI_ANY_ID, PCI_ANY_ID, }, 9964 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, 9965 PCI_ANY_ID, PCI_ANY_ID, }, 9966 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, 9967 PCI_ANY_ID, PCI_ANY_ID, }, 9968 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, 9969 PCI_ANY_ID, PCI_ANY_ID, }, 9970 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, 9971 PCI_ANY_ID, PCI_ANY_ID, }, 9972 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, 9973 PCI_ANY_ID, PCI_ANY_ID, }, 9974 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT, 9975 PCI_ANY_ID, PCI_ANY_ID, }, 9976 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON, 9977 PCI_ANY_ID, PCI_ANY_ID, }, 9978 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS, 9979 PCI_ANY_ID, PCI_ANY_ID, }, 9980 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC, 9981 PCI_ANY_ID, PCI_ANY_ID, }, 9982 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE, 9983 PCI_ANY_ID, PCI_ANY_ID, }, 9984 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF, 9985 PCI_ANY_ID, PCI_ANY_ID, }, 9986 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF, 9987 PCI_ANY_ID, PCI_ANY_ID, }, 9988 { 0 } 9989 }; 9990 9991 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 9992 9993 static struct pci_error_handlers lpfc_err_handler = { 9994 .error_detected = lpfc_io_error_detected, 9995 .slot_reset = lpfc_io_slot_reset, 9996 .resume = lpfc_io_resume, 9997 }; 9998 9999 static struct pci_driver lpfc_driver = { 10000 .name = LPFC_DRIVER_NAME, 10001 .id_table = lpfc_id_table, 10002 .probe = lpfc_pci_probe_one, 10003 .remove = __devexit_p(lpfc_pci_remove_one), 10004 .suspend = lpfc_pci_suspend_one, 10005 .resume = lpfc_pci_resume_one, 10006 .err_handler = &lpfc_err_handler, 10007 }; 10008 10009 /** 10010 * lpfc_init - lpfc module initialization routine 10011 * 10012 * This routine is to be invoked when the lpfc module is loaded into the 10013 * kernel. The special kernel macro module_init() is used to indicate the 10014 * role of this routine to the kernel as lpfc module entry point. 10015 * 10016 * Return codes 10017 * 0 - successful 10018 * -ENOMEM - FC attach transport failed 10019 * all others - failed 10020 */ 10021 static int __init 10022 lpfc_init(void) 10023 { 10024 int error = 0; 10025 10026 printk(LPFC_MODULE_DESC "\n"); 10027 printk(LPFC_COPYRIGHT "\n"); 10028 10029 if (lpfc_enable_npiv) { 10030 lpfc_transport_functions.vport_create = lpfc_vport_create; 10031 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 10032 } 10033 lpfc_transport_template = 10034 fc_attach_transport(&lpfc_transport_functions); 10035 if (lpfc_transport_template == NULL) 10036 return -ENOMEM; 10037 if (lpfc_enable_npiv) { 10038 lpfc_vport_transport_template = 10039 fc_attach_transport(&lpfc_vport_transport_functions); 10040 if (lpfc_vport_transport_template == NULL) { 10041 fc_release_transport(lpfc_transport_template); 10042 return -ENOMEM; 10043 } 10044 } 10045 error = pci_register_driver(&lpfc_driver); 10046 if (error) { 10047 fc_release_transport(lpfc_transport_template); 10048 if (lpfc_enable_npiv) 10049 fc_release_transport(lpfc_vport_transport_template); 10050 } 10051 10052 return error; 10053 } 10054 10055 /** 10056 * lpfc_exit - lpfc module removal routine 10057 * 10058 * This routine is invoked when the lpfc module is removed from the kernel. 10059 * The special kernel macro module_exit() is used to indicate the role of 10060 * this routine to the kernel as lpfc module exit point. 10061 */ 10062 static void __exit 10063 lpfc_exit(void) 10064 { 10065 pci_unregister_driver(&lpfc_driver); 10066 fc_release_transport(lpfc_transport_template); 10067 if (lpfc_enable_npiv) 10068 fc_release_transport(lpfc_vport_transport_template); 10069 if (_dump_buf_data) { 10070 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " 10071 "_dump_buf_data at 0x%p\n", 10072 (1L << _dump_buf_data_order), _dump_buf_data); 10073 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 10074 } 10075 10076 if (_dump_buf_dif) { 10077 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " 10078 "_dump_buf_dif at 0x%p\n", 10079 (1L << _dump_buf_dif_order), _dump_buf_dif); 10080 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 10081 } 10082 } 10083 10084 module_init(lpfc_init); 10085 module_exit(lpfc_exit); 10086 MODULE_LICENSE("GPL"); 10087 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 10088 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 10089 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 10090