1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2011 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/idr.h> 26 #include <linux/interrupt.h> 27 #include <linux/kthread.h> 28 #include <linux/pci.h> 29 #include <linux/spinlock.h> 30 #include <linux/ctype.h> 31 #include <linux/aer.h> 32 #include <linux/slab.h> 33 #include <linux/firmware.h> 34 35 #include <scsi/scsi.h> 36 #include <scsi/scsi_device.h> 37 #include <scsi/scsi_host.h> 38 #include <scsi/scsi_transport_fc.h> 39 40 #include "lpfc_hw4.h" 41 #include "lpfc_hw.h" 42 #include "lpfc_sli.h" 43 #include "lpfc_sli4.h" 44 #include "lpfc_nl.h" 45 #include "lpfc_disc.h" 46 #include "lpfc_scsi.h" 47 #include "lpfc.h" 48 #include "lpfc_logmsg.h" 49 #include "lpfc_crtn.h" 50 #include "lpfc_vport.h" 51 #include "lpfc_version.h" 52 53 char *_dump_buf_data; 54 unsigned long _dump_buf_data_order; 55 char *_dump_buf_dif; 56 unsigned long _dump_buf_dif_order; 57 spinlock_t _dump_buf_lock; 58 59 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 60 static int lpfc_post_rcv_buf(struct lpfc_hba *); 61 static int lpfc_sli4_queue_create(struct lpfc_hba *); 62 static void lpfc_sli4_queue_destroy(struct lpfc_hba *); 63 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 64 static int lpfc_setup_endian_order(struct lpfc_hba *); 65 static int lpfc_sli4_read_config(struct lpfc_hba *); 66 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 67 static void lpfc_free_sgl_list(struct lpfc_hba *); 68 static int lpfc_init_sgl_list(struct lpfc_hba *); 69 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 70 static void lpfc_free_active_sgl(struct lpfc_hba *); 71 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 72 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 73 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 74 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 75 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 76 77 static struct scsi_transport_template *lpfc_transport_template = NULL; 78 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 79 static DEFINE_IDR(lpfc_hba_index); 80 81 /** 82 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 83 * @phba: pointer to lpfc hba data structure. 84 * 85 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 86 * mailbox command. It retrieves the revision information from the HBA and 87 * collects the Vital Product Data (VPD) about the HBA for preparing the 88 * configuration of the HBA. 89 * 90 * Return codes: 91 * 0 - success. 92 * -ERESTART - requests the SLI layer to reset the HBA and try again. 93 * Any other value - indicates an error. 94 **/ 95 int 96 lpfc_config_port_prep(struct lpfc_hba *phba) 97 { 98 lpfc_vpd_t *vp = &phba->vpd; 99 int i = 0, rc; 100 LPFC_MBOXQ_t *pmb; 101 MAILBOX_t *mb; 102 char *lpfc_vpd_data = NULL; 103 uint16_t offset = 0; 104 static char licensed[56] = 105 "key unlock for use with gnu public licensed code only\0"; 106 static int init_key = 1; 107 108 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 109 if (!pmb) { 110 phba->link_state = LPFC_HBA_ERROR; 111 return -ENOMEM; 112 } 113 114 mb = &pmb->u.mb; 115 phba->link_state = LPFC_INIT_MBX_CMDS; 116 117 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 118 if (init_key) { 119 uint32_t *ptext = (uint32_t *) licensed; 120 121 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 122 *ptext = cpu_to_be32(*ptext); 123 init_key = 0; 124 } 125 126 lpfc_read_nv(phba, pmb); 127 memset((char*)mb->un.varRDnvp.rsvd3, 0, 128 sizeof (mb->un.varRDnvp.rsvd3)); 129 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 130 sizeof (licensed)); 131 132 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 133 134 if (rc != MBX_SUCCESS) { 135 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 136 "0324 Config Port initialization " 137 "error, mbxCmd x%x READ_NVPARM, " 138 "mbxStatus x%x\n", 139 mb->mbxCommand, mb->mbxStatus); 140 mempool_free(pmb, phba->mbox_mem_pool); 141 return -ERESTART; 142 } 143 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 144 sizeof(phba->wwnn)); 145 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 146 sizeof(phba->wwpn)); 147 } 148 149 phba->sli3_options = 0x0; 150 151 /* Setup and issue mailbox READ REV command */ 152 lpfc_read_rev(phba, pmb); 153 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 154 if (rc != MBX_SUCCESS) { 155 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 156 "0439 Adapter failed to init, mbxCmd x%x " 157 "READ_REV, mbxStatus x%x\n", 158 mb->mbxCommand, mb->mbxStatus); 159 mempool_free( pmb, phba->mbox_mem_pool); 160 return -ERESTART; 161 } 162 163 164 /* 165 * The value of rr must be 1 since the driver set the cv field to 1. 166 * This setting requires the FW to set all revision fields. 167 */ 168 if (mb->un.varRdRev.rr == 0) { 169 vp->rev.rBit = 0; 170 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 171 "0440 Adapter failed to init, READ_REV has " 172 "missing revision information.\n"); 173 mempool_free(pmb, phba->mbox_mem_pool); 174 return -ERESTART; 175 } 176 177 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 178 mempool_free(pmb, phba->mbox_mem_pool); 179 return -EINVAL; 180 } 181 182 /* Save information as VPD data */ 183 vp->rev.rBit = 1; 184 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 185 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 186 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 187 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 188 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 189 vp->rev.biuRev = mb->un.varRdRev.biuRev; 190 vp->rev.smRev = mb->un.varRdRev.smRev; 191 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 192 vp->rev.endecRev = mb->un.varRdRev.endecRev; 193 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 194 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 195 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 196 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 197 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 198 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 199 200 /* If the sli feature level is less then 9, we must 201 * tear down all RPIs and VPIs on link down if NPIV 202 * is enabled. 203 */ 204 if (vp->rev.feaLevelHigh < 9) 205 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 206 207 if (lpfc_is_LC_HBA(phba->pcidev->device)) 208 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 209 sizeof (phba->RandomData)); 210 211 /* Get adapter VPD information */ 212 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 213 if (!lpfc_vpd_data) 214 goto out_free_mbox; 215 do { 216 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 217 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 218 219 if (rc != MBX_SUCCESS) { 220 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 221 "0441 VPD not present on adapter, " 222 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 223 mb->mbxCommand, mb->mbxStatus); 224 mb->un.varDmp.word_cnt = 0; 225 } 226 /* dump mem may return a zero when finished or we got a 227 * mailbox error, either way we are done. 228 */ 229 if (mb->un.varDmp.word_cnt == 0) 230 break; 231 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 232 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 233 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 234 lpfc_vpd_data + offset, 235 mb->un.varDmp.word_cnt); 236 offset += mb->un.varDmp.word_cnt; 237 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 238 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 239 240 kfree(lpfc_vpd_data); 241 out_free_mbox: 242 mempool_free(pmb, phba->mbox_mem_pool); 243 return 0; 244 } 245 246 /** 247 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 248 * @phba: pointer to lpfc hba data structure. 249 * @pmboxq: pointer to the driver internal queue element for mailbox command. 250 * 251 * This is the completion handler for driver's configuring asynchronous event 252 * mailbox command to the device. If the mailbox command returns successfully, 253 * it will set internal async event support flag to 1; otherwise, it will 254 * set internal async event support flag to 0. 255 **/ 256 static void 257 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 258 { 259 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 260 phba->temp_sensor_support = 1; 261 else 262 phba->temp_sensor_support = 0; 263 mempool_free(pmboxq, phba->mbox_mem_pool); 264 return; 265 } 266 267 /** 268 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 269 * @phba: pointer to lpfc hba data structure. 270 * @pmboxq: pointer to the driver internal queue element for mailbox command. 271 * 272 * This is the completion handler for dump mailbox command for getting 273 * wake up parameters. When this command complete, the response contain 274 * Option rom version of the HBA. This function translate the version number 275 * into a human readable string and store it in OptionROMVersion. 276 **/ 277 static void 278 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 279 { 280 struct prog_id *prg; 281 uint32_t prog_id_word; 282 char dist = ' '; 283 /* character array used for decoding dist type. */ 284 char dist_char[] = "nabx"; 285 286 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 287 mempool_free(pmboxq, phba->mbox_mem_pool); 288 return; 289 } 290 291 prg = (struct prog_id *) &prog_id_word; 292 293 /* word 7 contain option rom version */ 294 prog_id_word = pmboxq->u.mb.un.varWords[7]; 295 296 /* Decode the Option rom version word to a readable string */ 297 if (prg->dist < 4) 298 dist = dist_char[prg->dist]; 299 300 if ((prg->dist == 3) && (prg->num == 0)) 301 sprintf(phba->OptionROMVersion, "%d.%d%d", 302 prg->ver, prg->rev, prg->lev); 303 else 304 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d", 305 prg->ver, prg->rev, prg->lev, 306 dist, prg->num); 307 mempool_free(pmboxq, phba->mbox_mem_pool); 308 return; 309 } 310 311 /** 312 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, 313 * cfg_soft_wwnn, cfg_soft_wwpn 314 * @vport: pointer to lpfc vport data structure. 315 * 316 * 317 * Return codes 318 * None. 319 **/ 320 void 321 lpfc_update_vport_wwn(struct lpfc_vport *vport) 322 { 323 /* If the soft name exists then update it using the service params */ 324 if (vport->phba->cfg_soft_wwnn) 325 u64_to_wwn(vport->phba->cfg_soft_wwnn, 326 vport->fc_sparam.nodeName.u.wwn); 327 if (vport->phba->cfg_soft_wwpn) 328 u64_to_wwn(vport->phba->cfg_soft_wwpn, 329 vport->fc_sparam.portName.u.wwn); 330 331 /* 332 * If the name is empty or there exists a soft name 333 * then copy the service params name, otherwise use the fc name 334 */ 335 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) 336 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 337 sizeof(struct lpfc_name)); 338 else 339 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, 340 sizeof(struct lpfc_name)); 341 342 if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn) 343 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 344 sizeof(struct lpfc_name)); 345 else 346 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, 347 sizeof(struct lpfc_name)); 348 } 349 350 /** 351 * lpfc_config_port_post - Perform lpfc initialization after config port 352 * @phba: pointer to lpfc hba data structure. 353 * 354 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 355 * command call. It performs all internal resource and state setups on the 356 * port: post IOCB buffers, enable appropriate host interrupt attentions, 357 * ELS ring timers, etc. 358 * 359 * Return codes 360 * 0 - success. 361 * Any other value - error. 362 **/ 363 int 364 lpfc_config_port_post(struct lpfc_hba *phba) 365 { 366 struct lpfc_vport *vport = phba->pport; 367 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 368 LPFC_MBOXQ_t *pmb; 369 MAILBOX_t *mb; 370 struct lpfc_dmabuf *mp; 371 struct lpfc_sli *psli = &phba->sli; 372 uint32_t status, timeout; 373 int i, j; 374 int rc; 375 376 spin_lock_irq(&phba->hbalock); 377 /* 378 * If the Config port completed correctly the HBA is not 379 * over heated any more. 380 */ 381 if (phba->over_temp_state == HBA_OVER_TEMP) 382 phba->over_temp_state = HBA_NORMAL_TEMP; 383 spin_unlock_irq(&phba->hbalock); 384 385 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 386 if (!pmb) { 387 phba->link_state = LPFC_HBA_ERROR; 388 return -ENOMEM; 389 } 390 mb = &pmb->u.mb; 391 392 /* Get login parameters for NID. */ 393 rc = lpfc_read_sparam(phba, pmb, 0); 394 if (rc) { 395 mempool_free(pmb, phba->mbox_mem_pool); 396 return -ENOMEM; 397 } 398 399 pmb->vport = vport; 400 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 401 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 402 "0448 Adapter failed init, mbxCmd x%x " 403 "READ_SPARM mbxStatus x%x\n", 404 mb->mbxCommand, mb->mbxStatus); 405 phba->link_state = LPFC_HBA_ERROR; 406 mp = (struct lpfc_dmabuf *) pmb->context1; 407 mempool_free(pmb, phba->mbox_mem_pool); 408 lpfc_mbuf_free(phba, mp->virt, mp->phys); 409 kfree(mp); 410 return -EIO; 411 } 412 413 mp = (struct lpfc_dmabuf *) pmb->context1; 414 415 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 416 lpfc_mbuf_free(phba, mp->virt, mp->phys); 417 kfree(mp); 418 pmb->context1 = NULL; 419 lpfc_update_vport_wwn(vport); 420 421 /* Update the fc_host data structures with new wwn. */ 422 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 423 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 424 fc_host_max_npiv_vports(shost) = phba->max_vpi; 425 426 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 427 /* This should be consolidated into parse_vpd ? - mr */ 428 if (phba->SerialNumber[0] == 0) { 429 uint8_t *outptr; 430 431 outptr = &vport->fc_nodename.u.s.IEEE[0]; 432 for (i = 0; i < 12; i++) { 433 status = *outptr++; 434 j = ((status & 0xf0) >> 4); 435 if (j <= 9) 436 phba->SerialNumber[i] = 437 (char)((uint8_t) 0x30 + (uint8_t) j); 438 else 439 phba->SerialNumber[i] = 440 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 441 i++; 442 j = (status & 0xf); 443 if (j <= 9) 444 phba->SerialNumber[i] = 445 (char)((uint8_t) 0x30 + (uint8_t) j); 446 else 447 phba->SerialNumber[i] = 448 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 449 } 450 } 451 452 lpfc_read_config(phba, pmb); 453 pmb->vport = vport; 454 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 455 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 456 "0453 Adapter failed to init, mbxCmd x%x " 457 "READ_CONFIG, mbxStatus x%x\n", 458 mb->mbxCommand, mb->mbxStatus); 459 phba->link_state = LPFC_HBA_ERROR; 460 mempool_free( pmb, phba->mbox_mem_pool); 461 return -EIO; 462 } 463 464 /* Check if the port is disabled */ 465 lpfc_sli_read_link_ste(phba); 466 467 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 468 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 469 phba->cfg_hba_queue_depth = 470 (mb->un.varRdConfig.max_xri + 1) - 471 lpfc_sli4_get_els_iocb_cnt(phba); 472 473 phba->lmt = mb->un.varRdConfig.lmt; 474 475 /* Get the default values for Model Name and Description */ 476 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 477 478 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_16G) 479 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) 480 && !(phba->lmt & LMT_1Gb)) 481 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) 482 && !(phba->lmt & LMT_2Gb)) 483 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) 484 && !(phba->lmt & LMT_4Gb)) 485 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) 486 && !(phba->lmt & LMT_8Gb)) 487 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) 488 && !(phba->lmt & LMT_10Gb)) 489 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) 490 && !(phba->lmt & LMT_16Gb))) { 491 /* Reset link speed to auto */ 492 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 493 "1302 Invalid speed for this board: " 494 "Reset link speed to auto: x%x\n", 495 phba->cfg_link_speed); 496 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 497 } 498 499 phba->link_state = LPFC_LINK_DOWN; 500 501 /* Only process IOCBs on ELS ring till hba_state is READY */ 502 if (psli->ring[psli->extra_ring].cmdringaddr) 503 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 504 if (psli->ring[psli->fcp_ring].cmdringaddr) 505 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 506 if (psli->ring[psli->next_ring].cmdringaddr) 507 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 508 509 /* Post receive buffers for desired rings */ 510 if (phba->sli_rev != 3) 511 lpfc_post_rcv_buf(phba); 512 513 /* 514 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 515 */ 516 if (phba->intr_type == MSIX) { 517 rc = lpfc_config_msi(phba, pmb); 518 if (rc) { 519 mempool_free(pmb, phba->mbox_mem_pool); 520 return -EIO; 521 } 522 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 523 if (rc != MBX_SUCCESS) { 524 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 525 "0352 Config MSI mailbox command " 526 "failed, mbxCmd x%x, mbxStatus x%x\n", 527 pmb->u.mb.mbxCommand, 528 pmb->u.mb.mbxStatus); 529 mempool_free(pmb, phba->mbox_mem_pool); 530 return -EIO; 531 } 532 } 533 534 spin_lock_irq(&phba->hbalock); 535 /* Initialize ERATT handling flag */ 536 phba->hba_flag &= ~HBA_ERATT_HANDLED; 537 538 /* Enable appropriate host interrupts */ 539 if (lpfc_readl(phba->HCregaddr, &status)) { 540 spin_unlock_irq(&phba->hbalock); 541 return -EIO; 542 } 543 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 544 if (psli->num_rings > 0) 545 status |= HC_R0INT_ENA; 546 if (psli->num_rings > 1) 547 status |= HC_R1INT_ENA; 548 if (psli->num_rings > 2) 549 status |= HC_R2INT_ENA; 550 if (psli->num_rings > 3) 551 status |= HC_R3INT_ENA; 552 553 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 554 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 555 status &= ~(HC_R0INT_ENA); 556 557 writel(status, phba->HCregaddr); 558 readl(phba->HCregaddr); /* flush */ 559 spin_unlock_irq(&phba->hbalock); 560 561 /* Set up ring-0 (ELS) timer */ 562 timeout = phba->fc_ratov * 2; 563 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 564 /* Set up heart beat (HB) timer */ 565 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 566 phba->hb_outstanding = 0; 567 phba->last_completion_time = jiffies; 568 /* Set up error attention (ERATT) polling timer */ 569 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 570 571 if (phba->hba_flag & LINK_DISABLED) { 572 lpfc_printf_log(phba, 573 KERN_ERR, LOG_INIT, 574 "2598 Adapter Link is disabled.\n"); 575 lpfc_down_link(phba, pmb); 576 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 577 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 578 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 579 lpfc_printf_log(phba, 580 KERN_ERR, LOG_INIT, 581 "2599 Adapter failed to issue DOWN_LINK" 582 " mbox command rc 0x%x\n", rc); 583 584 mempool_free(pmb, phba->mbox_mem_pool); 585 return -EIO; 586 } 587 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 588 lpfc_init_link(phba, pmb, phba->cfg_topology, 589 phba->cfg_link_speed); 590 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 591 lpfc_set_loopback_flag(phba); 592 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 593 if (rc != MBX_SUCCESS) { 594 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 595 "0454 Adapter failed to init, mbxCmd x%x " 596 "INIT_LINK, mbxStatus x%x\n", 597 mb->mbxCommand, mb->mbxStatus); 598 599 /* Clear all interrupt enable conditions */ 600 writel(0, phba->HCregaddr); 601 readl(phba->HCregaddr); /* flush */ 602 /* Clear all pending interrupts */ 603 writel(0xffffffff, phba->HAregaddr); 604 readl(phba->HAregaddr); /* flush */ 605 phba->link_state = LPFC_HBA_ERROR; 606 if (rc != MBX_BUSY) 607 mempool_free(pmb, phba->mbox_mem_pool); 608 return -EIO; 609 } 610 } 611 /* MBOX buffer will be freed in mbox compl */ 612 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 613 if (!pmb) { 614 phba->link_state = LPFC_HBA_ERROR; 615 return -ENOMEM; 616 } 617 618 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 619 pmb->mbox_cmpl = lpfc_config_async_cmpl; 620 pmb->vport = phba->pport; 621 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 622 623 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 624 lpfc_printf_log(phba, 625 KERN_ERR, 626 LOG_INIT, 627 "0456 Adapter failed to issue " 628 "ASYNCEVT_ENABLE mbox status x%x\n", 629 rc); 630 mempool_free(pmb, phba->mbox_mem_pool); 631 } 632 633 /* Get Option rom version */ 634 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 635 if (!pmb) { 636 phba->link_state = LPFC_HBA_ERROR; 637 return -ENOMEM; 638 } 639 640 lpfc_dump_wakeup_param(phba, pmb); 641 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 642 pmb->vport = phba->pport; 643 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 644 645 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 646 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 647 "to get Option ROM version status x%x\n", rc); 648 mempool_free(pmb, phba->mbox_mem_pool); 649 } 650 651 return 0; 652 } 653 654 /** 655 * lpfc_hba_init_link - Initialize the FC link 656 * @phba: pointer to lpfc hba data structure. 657 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 658 * 659 * This routine will issue the INIT_LINK mailbox command call. 660 * It is available to other drivers through the lpfc_hba data 661 * structure for use as a delayed link up mechanism with the 662 * module parameter lpfc_suppress_link_up. 663 * 664 * Return code 665 * 0 - success 666 * Any other value - error 667 **/ 668 int 669 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 670 { 671 struct lpfc_vport *vport = phba->pport; 672 LPFC_MBOXQ_t *pmb; 673 MAILBOX_t *mb; 674 int rc; 675 676 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 677 if (!pmb) { 678 phba->link_state = LPFC_HBA_ERROR; 679 return -ENOMEM; 680 } 681 mb = &pmb->u.mb; 682 pmb->vport = vport; 683 684 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); 685 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 686 lpfc_set_loopback_flag(phba); 687 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 688 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 689 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 690 "0498 Adapter failed to init, mbxCmd x%x " 691 "INIT_LINK, mbxStatus x%x\n", 692 mb->mbxCommand, mb->mbxStatus); 693 if (phba->sli_rev <= LPFC_SLI_REV3) { 694 /* Clear all interrupt enable conditions */ 695 writel(0, phba->HCregaddr); 696 readl(phba->HCregaddr); /* flush */ 697 /* Clear all pending interrupts */ 698 writel(0xffffffff, phba->HAregaddr); 699 readl(phba->HAregaddr); /* flush */ 700 } 701 phba->link_state = LPFC_HBA_ERROR; 702 if (rc != MBX_BUSY || flag == MBX_POLL) 703 mempool_free(pmb, phba->mbox_mem_pool); 704 return -EIO; 705 } 706 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 707 if (flag == MBX_POLL) 708 mempool_free(pmb, phba->mbox_mem_pool); 709 710 return 0; 711 } 712 713 /** 714 * lpfc_hba_down_link - this routine downs the FC link 715 * @phba: pointer to lpfc hba data structure. 716 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 717 * 718 * This routine will issue the DOWN_LINK mailbox command call. 719 * It is available to other drivers through the lpfc_hba data 720 * structure for use to stop the link. 721 * 722 * Return code 723 * 0 - success 724 * Any other value - error 725 **/ 726 int 727 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 728 { 729 LPFC_MBOXQ_t *pmb; 730 int rc; 731 732 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 733 if (!pmb) { 734 phba->link_state = LPFC_HBA_ERROR; 735 return -ENOMEM; 736 } 737 738 lpfc_printf_log(phba, 739 KERN_ERR, LOG_INIT, 740 "0491 Adapter Link is disabled.\n"); 741 lpfc_down_link(phba, pmb); 742 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 743 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 744 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 745 lpfc_printf_log(phba, 746 KERN_ERR, LOG_INIT, 747 "2522 Adapter failed to issue DOWN_LINK" 748 " mbox command rc 0x%x\n", rc); 749 750 mempool_free(pmb, phba->mbox_mem_pool); 751 return -EIO; 752 } 753 if (flag == MBX_POLL) 754 mempool_free(pmb, phba->mbox_mem_pool); 755 756 return 0; 757 } 758 759 /** 760 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 761 * @phba: pointer to lpfc HBA data structure. 762 * 763 * This routine will do LPFC uninitialization before the HBA is reset when 764 * bringing down the SLI Layer. 765 * 766 * Return codes 767 * 0 - success. 768 * Any other value - error. 769 **/ 770 int 771 lpfc_hba_down_prep(struct lpfc_hba *phba) 772 { 773 struct lpfc_vport **vports; 774 int i; 775 776 if (phba->sli_rev <= LPFC_SLI_REV3) { 777 /* Disable interrupts */ 778 writel(0, phba->HCregaddr); 779 readl(phba->HCregaddr); /* flush */ 780 } 781 782 if (phba->pport->load_flag & FC_UNLOADING) 783 lpfc_cleanup_discovery_resources(phba->pport); 784 else { 785 vports = lpfc_create_vport_work_array(phba); 786 if (vports != NULL) 787 for (i = 0; i <= phba->max_vports && 788 vports[i] != NULL; i++) 789 lpfc_cleanup_discovery_resources(vports[i]); 790 lpfc_destroy_vport_work_array(phba, vports); 791 } 792 return 0; 793 } 794 795 /** 796 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 797 * @phba: pointer to lpfc HBA data structure. 798 * 799 * This routine will do uninitialization after the HBA is reset when bring 800 * down the SLI Layer. 801 * 802 * Return codes 803 * 0 - success. 804 * Any other value - error. 805 **/ 806 static int 807 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 808 { 809 struct lpfc_sli *psli = &phba->sli; 810 struct lpfc_sli_ring *pring; 811 struct lpfc_dmabuf *mp, *next_mp; 812 LIST_HEAD(completions); 813 int i; 814 815 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 816 lpfc_sli_hbqbuf_free_all(phba); 817 else { 818 /* Cleanup preposted buffers on the ELS ring */ 819 pring = &psli->ring[LPFC_ELS_RING]; 820 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 821 list_del(&mp->list); 822 pring->postbufq_cnt--; 823 lpfc_mbuf_free(phba, mp->virt, mp->phys); 824 kfree(mp); 825 } 826 } 827 828 spin_lock_irq(&phba->hbalock); 829 for (i = 0; i < psli->num_rings; i++) { 830 pring = &psli->ring[i]; 831 832 /* At this point in time the HBA is either reset or DOA. Either 833 * way, nothing should be on txcmplq as it will NEVER complete. 834 */ 835 list_splice_init(&pring->txcmplq, &completions); 836 pring->txcmplq_cnt = 0; 837 spin_unlock_irq(&phba->hbalock); 838 839 /* Cancel all the IOCBs from the completions list */ 840 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 841 IOERR_SLI_ABORTED); 842 843 lpfc_sli_abort_iocb_ring(phba, pring); 844 spin_lock_irq(&phba->hbalock); 845 } 846 spin_unlock_irq(&phba->hbalock); 847 848 return 0; 849 } 850 851 /** 852 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 853 * @phba: pointer to lpfc HBA data structure. 854 * 855 * This routine will do uninitialization after the HBA is reset when bring 856 * down the SLI Layer. 857 * 858 * Return codes 859 * 0 - success. 860 * Any other value - error. 861 **/ 862 static int 863 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 864 { 865 struct lpfc_scsi_buf *psb, *psb_next; 866 LIST_HEAD(aborts); 867 int ret; 868 unsigned long iflag = 0; 869 struct lpfc_sglq *sglq_entry = NULL; 870 871 ret = lpfc_hba_down_post_s3(phba); 872 if (ret) 873 return ret; 874 /* At this point in time the HBA is either reset or DOA. Either 875 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 876 * on the lpfc_sgl_list so that it can either be freed if the 877 * driver is unloading or reposted if the driver is restarting 878 * the port. 879 */ 880 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ 881 /* scsl_buf_list */ 882 /* abts_sgl_list_lock required because worker thread uses this 883 * list. 884 */ 885 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 886 list_for_each_entry(sglq_entry, 887 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 888 sglq_entry->state = SGL_FREED; 889 890 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 891 &phba->sli4_hba.lpfc_sgl_list); 892 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 893 /* abts_scsi_buf_list_lock required because worker thread uses this 894 * list. 895 */ 896 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 897 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, 898 &aborts); 899 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 900 spin_unlock_irq(&phba->hbalock); 901 902 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 903 psb->pCmd = NULL; 904 psb->status = IOSTAT_SUCCESS; 905 } 906 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 907 list_splice(&aborts, &phba->lpfc_scsi_buf_list); 908 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 909 return 0; 910 } 911 912 /** 913 * lpfc_hba_down_post - Wrapper func for hba down post routine 914 * @phba: pointer to lpfc HBA data structure. 915 * 916 * This routine wraps the actual SLI3 or SLI4 routine for performing 917 * uninitialization after the HBA is reset when bring down the SLI Layer. 918 * 919 * Return codes 920 * 0 - success. 921 * Any other value - error. 922 **/ 923 int 924 lpfc_hba_down_post(struct lpfc_hba *phba) 925 { 926 return (*phba->lpfc_hba_down_post)(phba); 927 } 928 929 /** 930 * lpfc_hb_timeout - The HBA-timer timeout handler 931 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 932 * 933 * This is the HBA-timer timeout handler registered to the lpfc driver. When 934 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 935 * work-port-events bitmap and the worker thread is notified. This timeout 936 * event will be used by the worker thread to invoke the actual timeout 937 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 938 * be performed in the timeout handler and the HBA timeout event bit shall 939 * be cleared by the worker thread after it has taken the event bitmap out. 940 **/ 941 static void 942 lpfc_hb_timeout(unsigned long ptr) 943 { 944 struct lpfc_hba *phba; 945 uint32_t tmo_posted; 946 unsigned long iflag; 947 948 phba = (struct lpfc_hba *)ptr; 949 950 /* Check for heart beat timeout conditions */ 951 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 952 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 953 if (!tmo_posted) 954 phba->pport->work_port_events |= WORKER_HB_TMO; 955 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 956 957 /* Tell the worker thread there is work to do */ 958 if (!tmo_posted) 959 lpfc_worker_wake_up(phba); 960 return; 961 } 962 963 /** 964 * lpfc_rrq_timeout - The RRQ-timer timeout handler 965 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 966 * 967 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 968 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 969 * work-port-events bitmap and the worker thread is notified. This timeout 970 * event will be used by the worker thread to invoke the actual timeout 971 * handler routine, lpfc_rrq_handler. Any periodical operations will 972 * be performed in the timeout handler and the RRQ timeout event bit shall 973 * be cleared by the worker thread after it has taken the event bitmap out. 974 **/ 975 static void 976 lpfc_rrq_timeout(unsigned long ptr) 977 { 978 struct lpfc_hba *phba; 979 unsigned long iflag; 980 981 phba = (struct lpfc_hba *)ptr; 982 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 983 phba->hba_flag |= HBA_RRQ_ACTIVE; 984 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 985 lpfc_worker_wake_up(phba); 986 } 987 988 /** 989 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 990 * @phba: pointer to lpfc hba data structure. 991 * @pmboxq: pointer to the driver internal queue element for mailbox command. 992 * 993 * This is the callback function to the lpfc heart-beat mailbox command. 994 * If configured, the lpfc driver issues the heart-beat mailbox command to 995 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 996 * heart-beat mailbox command is issued, the driver shall set up heart-beat 997 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 998 * heart-beat outstanding state. Once the mailbox command comes back and 999 * no error conditions detected, the heart-beat mailbox command timer is 1000 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 1001 * state is cleared for the next heart-beat. If the timer expired with the 1002 * heart-beat outstanding state set, the driver will put the HBA offline. 1003 **/ 1004 static void 1005 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 1006 { 1007 unsigned long drvr_flag; 1008 1009 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1010 phba->hb_outstanding = 0; 1011 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1012 1013 /* Check and reset heart-beat timer is necessary */ 1014 mempool_free(pmboxq, phba->mbox_mem_pool); 1015 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 1016 !(phba->link_state == LPFC_HBA_ERROR) && 1017 !(phba->pport->load_flag & FC_UNLOADING)) 1018 mod_timer(&phba->hb_tmofunc, 1019 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 1020 return; 1021 } 1022 1023 /** 1024 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 1025 * @phba: pointer to lpfc hba data structure. 1026 * 1027 * This is the actual HBA-timer timeout handler to be invoked by the worker 1028 * thread whenever the HBA timer fired and HBA-timeout event posted. This 1029 * handler performs any periodic operations needed for the device. If such 1030 * periodic event has already been attended to either in the interrupt handler 1031 * or by processing slow-ring or fast-ring events within the HBA-timer 1032 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1033 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1034 * is configured and there is no heart-beat mailbox command outstanding, a 1035 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1036 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1037 * to offline. 1038 **/ 1039 void 1040 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1041 { 1042 struct lpfc_vport **vports; 1043 LPFC_MBOXQ_t *pmboxq; 1044 struct lpfc_dmabuf *buf_ptr; 1045 int retval, i; 1046 struct lpfc_sli *psli = &phba->sli; 1047 LIST_HEAD(completions); 1048 1049 vports = lpfc_create_vport_work_array(phba); 1050 if (vports != NULL) 1051 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 1052 lpfc_rcv_seq_check_edtov(vports[i]); 1053 lpfc_destroy_vport_work_array(phba, vports); 1054 1055 if ((phba->link_state == LPFC_HBA_ERROR) || 1056 (phba->pport->load_flag & FC_UNLOADING) || 1057 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1058 return; 1059 1060 spin_lock_irq(&phba->pport->work_port_lock); 1061 1062 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, 1063 jiffies)) { 1064 spin_unlock_irq(&phba->pport->work_port_lock); 1065 if (!phba->hb_outstanding) 1066 mod_timer(&phba->hb_tmofunc, 1067 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 1068 else 1069 mod_timer(&phba->hb_tmofunc, 1070 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1071 return; 1072 } 1073 spin_unlock_irq(&phba->pport->work_port_lock); 1074 1075 if (phba->elsbuf_cnt && 1076 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1077 spin_lock_irq(&phba->hbalock); 1078 list_splice_init(&phba->elsbuf, &completions); 1079 phba->elsbuf_cnt = 0; 1080 phba->elsbuf_prev_cnt = 0; 1081 spin_unlock_irq(&phba->hbalock); 1082 1083 while (!list_empty(&completions)) { 1084 list_remove_head(&completions, buf_ptr, 1085 struct lpfc_dmabuf, list); 1086 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1087 kfree(buf_ptr); 1088 } 1089 } 1090 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1091 1092 /* If there is no heart beat outstanding, issue a heartbeat command */ 1093 if (phba->cfg_enable_hba_heartbeat) { 1094 if (!phba->hb_outstanding) { 1095 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1096 (list_empty(&psli->mboxq))) { 1097 pmboxq = mempool_alloc(phba->mbox_mem_pool, 1098 GFP_KERNEL); 1099 if (!pmboxq) { 1100 mod_timer(&phba->hb_tmofunc, 1101 jiffies + 1102 HZ * LPFC_HB_MBOX_INTERVAL); 1103 return; 1104 } 1105 1106 lpfc_heart_beat(phba, pmboxq); 1107 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1108 pmboxq->vport = phba->pport; 1109 retval = lpfc_sli_issue_mbox(phba, pmboxq, 1110 MBX_NOWAIT); 1111 1112 if (retval != MBX_BUSY && 1113 retval != MBX_SUCCESS) { 1114 mempool_free(pmboxq, 1115 phba->mbox_mem_pool); 1116 mod_timer(&phba->hb_tmofunc, 1117 jiffies + 1118 HZ * LPFC_HB_MBOX_INTERVAL); 1119 return; 1120 } 1121 phba->skipped_hb = 0; 1122 phba->hb_outstanding = 1; 1123 } else if (time_before_eq(phba->last_completion_time, 1124 phba->skipped_hb)) { 1125 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1126 "2857 Last completion time not " 1127 " updated in %d ms\n", 1128 jiffies_to_msecs(jiffies 1129 - phba->last_completion_time)); 1130 } else 1131 phba->skipped_hb = jiffies; 1132 1133 mod_timer(&phba->hb_tmofunc, 1134 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1135 return; 1136 } else { 1137 /* 1138 * If heart beat timeout called with hb_outstanding set 1139 * we need to give the hb mailbox cmd a chance to 1140 * complete or TMO. 1141 */ 1142 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1143 "0459 Adapter heartbeat still out" 1144 "standing:last compl time was %d ms.\n", 1145 jiffies_to_msecs(jiffies 1146 - phba->last_completion_time)); 1147 mod_timer(&phba->hb_tmofunc, 1148 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1149 } 1150 } 1151 } 1152 1153 /** 1154 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1155 * @phba: pointer to lpfc hba data structure. 1156 * 1157 * This routine is called to bring the HBA offline when HBA hardware error 1158 * other than Port Error 6 has been detected. 1159 **/ 1160 static void 1161 lpfc_offline_eratt(struct lpfc_hba *phba) 1162 { 1163 struct lpfc_sli *psli = &phba->sli; 1164 1165 spin_lock_irq(&phba->hbalock); 1166 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1167 spin_unlock_irq(&phba->hbalock); 1168 lpfc_offline_prep(phba); 1169 1170 lpfc_offline(phba); 1171 lpfc_reset_barrier(phba); 1172 spin_lock_irq(&phba->hbalock); 1173 lpfc_sli_brdreset(phba); 1174 spin_unlock_irq(&phba->hbalock); 1175 lpfc_hba_down_post(phba); 1176 lpfc_sli_brdready(phba, HS_MBRDY); 1177 lpfc_unblock_mgmt_io(phba); 1178 phba->link_state = LPFC_HBA_ERROR; 1179 return; 1180 } 1181 1182 /** 1183 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1184 * @phba: pointer to lpfc hba data structure. 1185 * 1186 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1187 * other than Port Error 6 has been detected. 1188 **/ 1189 static void 1190 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1191 { 1192 lpfc_offline_prep(phba); 1193 lpfc_offline(phba); 1194 lpfc_sli4_brdreset(phba); 1195 lpfc_hba_down_post(phba); 1196 lpfc_sli4_post_status_check(phba); 1197 lpfc_unblock_mgmt_io(phba); 1198 phba->link_state = LPFC_HBA_ERROR; 1199 } 1200 1201 /** 1202 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1203 * @phba: pointer to lpfc hba data structure. 1204 * 1205 * This routine is invoked to handle the deferred HBA hardware error 1206 * conditions. This type of error is indicated by HBA by setting ER1 1207 * and another ER bit in the host status register. The driver will 1208 * wait until the ER1 bit clears before handling the error condition. 1209 **/ 1210 static void 1211 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1212 { 1213 uint32_t old_host_status = phba->work_hs; 1214 struct lpfc_sli_ring *pring; 1215 struct lpfc_sli *psli = &phba->sli; 1216 1217 /* If the pci channel is offline, ignore possible errors, 1218 * since we cannot communicate with the pci card anyway. 1219 */ 1220 if (pci_channel_offline(phba->pcidev)) { 1221 spin_lock_irq(&phba->hbalock); 1222 phba->hba_flag &= ~DEFER_ERATT; 1223 spin_unlock_irq(&phba->hbalock); 1224 return; 1225 } 1226 1227 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1228 "0479 Deferred Adapter Hardware Error " 1229 "Data: x%x x%x x%x\n", 1230 phba->work_hs, 1231 phba->work_status[0], phba->work_status[1]); 1232 1233 spin_lock_irq(&phba->hbalock); 1234 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1235 spin_unlock_irq(&phba->hbalock); 1236 1237 1238 /* 1239 * Firmware stops when it triggred erratt. That could cause the I/Os 1240 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1241 * SCSI layer retry it after re-establishing link. 1242 */ 1243 pring = &psli->ring[psli->fcp_ring]; 1244 lpfc_sli_abort_iocb_ring(phba, pring); 1245 1246 /* 1247 * There was a firmware error. Take the hba offline and then 1248 * attempt to restart it. 1249 */ 1250 lpfc_offline_prep(phba); 1251 lpfc_offline(phba); 1252 1253 /* Wait for the ER1 bit to clear.*/ 1254 while (phba->work_hs & HS_FFER1) { 1255 msleep(100); 1256 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { 1257 phba->work_hs = UNPLUG_ERR ; 1258 break; 1259 } 1260 /* If driver is unloading let the worker thread continue */ 1261 if (phba->pport->load_flag & FC_UNLOADING) { 1262 phba->work_hs = 0; 1263 break; 1264 } 1265 } 1266 1267 /* 1268 * This is to ptrotect against a race condition in which 1269 * first write to the host attention register clear the 1270 * host status register. 1271 */ 1272 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1273 phba->work_hs = old_host_status & ~HS_FFER1; 1274 1275 spin_lock_irq(&phba->hbalock); 1276 phba->hba_flag &= ~DEFER_ERATT; 1277 spin_unlock_irq(&phba->hbalock); 1278 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1279 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1280 } 1281 1282 static void 1283 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1284 { 1285 struct lpfc_board_event_header board_event; 1286 struct Scsi_Host *shost; 1287 1288 board_event.event_type = FC_REG_BOARD_EVENT; 1289 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1290 shost = lpfc_shost_from_vport(phba->pport); 1291 fc_host_post_vendor_event(shost, fc_get_event_number(), 1292 sizeof(board_event), 1293 (char *) &board_event, 1294 LPFC_NL_VENDOR_ID); 1295 } 1296 1297 /** 1298 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1299 * @phba: pointer to lpfc hba data structure. 1300 * 1301 * This routine is invoked to handle the following HBA hardware error 1302 * conditions: 1303 * 1 - HBA error attention interrupt 1304 * 2 - DMA ring index out of range 1305 * 3 - Mailbox command came back as unknown 1306 **/ 1307 static void 1308 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1309 { 1310 struct lpfc_vport *vport = phba->pport; 1311 struct lpfc_sli *psli = &phba->sli; 1312 struct lpfc_sli_ring *pring; 1313 uint32_t event_data; 1314 unsigned long temperature; 1315 struct temp_event temp_event_data; 1316 struct Scsi_Host *shost; 1317 1318 /* If the pci channel is offline, ignore possible errors, 1319 * since we cannot communicate with the pci card anyway. 1320 */ 1321 if (pci_channel_offline(phba->pcidev)) { 1322 spin_lock_irq(&phba->hbalock); 1323 phba->hba_flag &= ~DEFER_ERATT; 1324 spin_unlock_irq(&phba->hbalock); 1325 return; 1326 } 1327 1328 /* If resets are disabled then leave the HBA alone and return */ 1329 if (!phba->cfg_enable_hba_reset) 1330 return; 1331 1332 /* Send an internal error event to mgmt application */ 1333 lpfc_board_errevt_to_mgmt(phba); 1334 1335 if (phba->hba_flag & DEFER_ERATT) 1336 lpfc_handle_deferred_eratt(phba); 1337 1338 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1339 if (phba->work_hs & HS_FFER6) 1340 /* Re-establishing Link */ 1341 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1342 "1301 Re-establishing Link " 1343 "Data: x%x x%x x%x\n", 1344 phba->work_hs, phba->work_status[0], 1345 phba->work_status[1]); 1346 if (phba->work_hs & HS_FFER8) 1347 /* Device Zeroization */ 1348 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1349 "2861 Host Authentication device " 1350 "zeroization Data:x%x x%x x%x\n", 1351 phba->work_hs, phba->work_status[0], 1352 phba->work_status[1]); 1353 1354 spin_lock_irq(&phba->hbalock); 1355 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1356 spin_unlock_irq(&phba->hbalock); 1357 1358 /* 1359 * Firmware stops when it triggled erratt with HS_FFER6. 1360 * That could cause the I/Os dropped by the firmware. 1361 * Error iocb (I/O) on txcmplq and let the SCSI layer 1362 * retry it after re-establishing link. 1363 */ 1364 pring = &psli->ring[psli->fcp_ring]; 1365 lpfc_sli_abort_iocb_ring(phba, pring); 1366 1367 /* 1368 * There was a firmware error. Take the hba offline and then 1369 * attempt to restart it. 1370 */ 1371 lpfc_offline_prep(phba); 1372 lpfc_offline(phba); 1373 lpfc_sli_brdrestart(phba); 1374 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1375 lpfc_unblock_mgmt_io(phba); 1376 return; 1377 } 1378 lpfc_unblock_mgmt_io(phba); 1379 } else if (phba->work_hs & HS_CRIT_TEMP) { 1380 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1381 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1382 temp_event_data.event_code = LPFC_CRIT_TEMP; 1383 temp_event_data.data = (uint32_t)temperature; 1384 1385 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1386 "0406 Adapter maximum temperature exceeded " 1387 "(%ld), taking this port offline " 1388 "Data: x%x x%x x%x\n", 1389 temperature, phba->work_hs, 1390 phba->work_status[0], phba->work_status[1]); 1391 1392 shost = lpfc_shost_from_vport(phba->pport); 1393 fc_host_post_vendor_event(shost, fc_get_event_number(), 1394 sizeof(temp_event_data), 1395 (char *) &temp_event_data, 1396 SCSI_NL_VID_TYPE_PCI 1397 | PCI_VENDOR_ID_EMULEX); 1398 1399 spin_lock_irq(&phba->hbalock); 1400 phba->over_temp_state = HBA_OVER_TEMP; 1401 spin_unlock_irq(&phba->hbalock); 1402 lpfc_offline_eratt(phba); 1403 1404 } else { 1405 /* The if clause above forces this code path when the status 1406 * failure is a value other than FFER6. Do not call the offline 1407 * twice. This is the adapter hardware error path. 1408 */ 1409 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1410 "0457 Adapter Hardware Error " 1411 "Data: x%x x%x x%x\n", 1412 phba->work_hs, 1413 phba->work_status[0], phba->work_status[1]); 1414 1415 event_data = FC_REG_DUMP_EVENT; 1416 shost = lpfc_shost_from_vport(vport); 1417 fc_host_post_vendor_event(shost, fc_get_event_number(), 1418 sizeof(event_data), (char *) &event_data, 1419 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1420 1421 lpfc_offline_eratt(phba); 1422 } 1423 return; 1424 } 1425 1426 /** 1427 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1428 * @phba: pointer to lpfc hba data structure. 1429 * 1430 * This routine is invoked to handle the SLI4 HBA hardware error attention 1431 * conditions. 1432 **/ 1433 static void 1434 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1435 { 1436 struct lpfc_vport *vport = phba->pport; 1437 uint32_t event_data; 1438 struct Scsi_Host *shost; 1439 uint32_t if_type; 1440 struct lpfc_register portstat_reg; 1441 1442 /* If the pci channel is offline, ignore possible errors, since 1443 * we cannot communicate with the pci card anyway. 1444 */ 1445 if (pci_channel_offline(phba->pcidev)) 1446 return; 1447 /* If resets are disabled then leave the HBA alone and return */ 1448 if (!phba->cfg_enable_hba_reset) 1449 return; 1450 1451 /* Send an internal error event to mgmt application */ 1452 lpfc_board_errevt_to_mgmt(phba); 1453 1454 /* For now, the actual action for SLI4 device handling is not 1455 * specified yet, just treated it as adaptor hardware failure 1456 */ 1457 event_data = FC_REG_DUMP_EVENT; 1458 shost = lpfc_shost_from_vport(vport); 1459 fc_host_post_vendor_event(shost, fc_get_event_number(), 1460 sizeof(event_data), (char *) &event_data, 1461 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1462 1463 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1464 switch (if_type) { 1465 case LPFC_SLI_INTF_IF_TYPE_0: 1466 lpfc_sli4_offline_eratt(phba); 1467 break; 1468 case LPFC_SLI_INTF_IF_TYPE_2: 1469 portstat_reg.word0 = 1470 readl(phba->sli4_hba.u.if_type2.STATUSregaddr); 1471 1472 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1473 /* TODO: Register for Overtemp async events. */ 1474 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1475 "2889 Port Overtemperature event, " 1476 "taking port\n"); 1477 spin_lock_irq(&phba->hbalock); 1478 phba->over_temp_state = HBA_OVER_TEMP; 1479 spin_unlock_irq(&phba->hbalock); 1480 lpfc_sli4_offline_eratt(phba); 1481 return; 1482 } 1483 if (bf_get(lpfc_sliport_status_rn, &portstat_reg)) { 1484 /* 1485 * TODO: Attempt port recovery via a port reset. 1486 * When fully implemented, the driver should 1487 * attempt to recover the port here and return. 1488 * For now, log an error and take the port offline. 1489 */ 1490 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1491 "2887 Port Error: Attempting " 1492 "Port Recovery\n"); 1493 } 1494 lpfc_sli4_offline_eratt(phba); 1495 break; 1496 case LPFC_SLI_INTF_IF_TYPE_1: 1497 default: 1498 break; 1499 } 1500 } 1501 1502 /** 1503 * lpfc_handle_eratt - Wrapper func for handling hba error attention 1504 * @phba: pointer to lpfc HBA data structure. 1505 * 1506 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 1507 * routine from the API jump table function pointer from the lpfc_hba struct. 1508 * 1509 * Return codes 1510 * 0 - success. 1511 * Any other value - error. 1512 **/ 1513 void 1514 lpfc_handle_eratt(struct lpfc_hba *phba) 1515 { 1516 (*phba->lpfc_handle_eratt)(phba); 1517 } 1518 1519 /** 1520 * lpfc_handle_latt - The HBA link event handler 1521 * @phba: pointer to lpfc hba data structure. 1522 * 1523 * This routine is invoked from the worker thread to handle a HBA host 1524 * attention link event. 1525 **/ 1526 void 1527 lpfc_handle_latt(struct lpfc_hba *phba) 1528 { 1529 struct lpfc_vport *vport = phba->pport; 1530 struct lpfc_sli *psli = &phba->sli; 1531 LPFC_MBOXQ_t *pmb; 1532 volatile uint32_t control; 1533 struct lpfc_dmabuf *mp; 1534 int rc = 0; 1535 1536 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1537 if (!pmb) { 1538 rc = 1; 1539 goto lpfc_handle_latt_err_exit; 1540 } 1541 1542 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1543 if (!mp) { 1544 rc = 2; 1545 goto lpfc_handle_latt_free_pmb; 1546 } 1547 1548 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 1549 if (!mp->virt) { 1550 rc = 3; 1551 goto lpfc_handle_latt_free_mp; 1552 } 1553 1554 /* Cleanup any outstanding ELS commands */ 1555 lpfc_els_flush_all_cmd(phba); 1556 1557 psli->slistat.link_event++; 1558 lpfc_read_topology(phba, pmb, mp); 1559 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 1560 pmb->vport = vport; 1561 /* Block ELS IOCBs until we have processed this mbox command */ 1562 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 1563 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 1564 if (rc == MBX_NOT_FINISHED) { 1565 rc = 4; 1566 goto lpfc_handle_latt_free_mbuf; 1567 } 1568 1569 /* Clear Link Attention in HA REG */ 1570 spin_lock_irq(&phba->hbalock); 1571 writel(HA_LATT, phba->HAregaddr); 1572 readl(phba->HAregaddr); /* flush */ 1573 spin_unlock_irq(&phba->hbalock); 1574 1575 return; 1576 1577 lpfc_handle_latt_free_mbuf: 1578 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1579 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1580 lpfc_handle_latt_free_mp: 1581 kfree(mp); 1582 lpfc_handle_latt_free_pmb: 1583 mempool_free(pmb, phba->mbox_mem_pool); 1584 lpfc_handle_latt_err_exit: 1585 /* Enable Link attention interrupts */ 1586 spin_lock_irq(&phba->hbalock); 1587 psli->sli_flag |= LPFC_PROCESS_LA; 1588 control = readl(phba->HCregaddr); 1589 control |= HC_LAINT_ENA; 1590 writel(control, phba->HCregaddr); 1591 readl(phba->HCregaddr); /* flush */ 1592 1593 /* Clear Link Attention in HA REG */ 1594 writel(HA_LATT, phba->HAregaddr); 1595 readl(phba->HAregaddr); /* flush */ 1596 spin_unlock_irq(&phba->hbalock); 1597 lpfc_linkdown(phba); 1598 phba->link_state = LPFC_HBA_ERROR; 1599 1600 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1601 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 1602 1603 return; 1604 } 1605 1606 /** 1607 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 1608 * @phba: pointer to lpfc hba data structure. 1609 * @vpd: pointer to the vital product data. 1610 * @len: length of the vital product data in bytes. 1611 * 1612 * This routine parses the Vital Product Data (VPD). The VPD is treated as 1613 * an array of characters. In this routine, the ModelName, ProgramType, and 1614 * ModelDesc, etc. fields of the phba data structure will be populated. 1615 * 1616 * Return codes 1617 * 0 - pointer to the VPD passed in is NULL 1618 * 1 - success 1619 **/ 1620 int 1621 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1622 { 1623 uint8_t lenlo, lenhi; 1624 int Length; 1625 int i, j; 1626 int finished = 0; 1627 int index = 0; 1628 1629 if (!vpd) 1630 return 0; 1631 1632 /* Vital Product */ 1633 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1634 "0455 Vital Product Data: x%x x%x x%x x%x\n", 1635 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 1636 (uint32_t) vpd[3]); 1637 while (!finished && (index < (len - 4))) { 1638 switch (vpd[index]) { 1639 case 0x82: 1640 case 0x91: 1641 index += 1; 1642 lenlo = vpd[index]; 1643 index += 1; 1644 lenhi = vpd[index]; 1645 index += 1; 1646 i = ((((unsigned short)lenhi) << 8) + lenlo); 1647 index += i; 1648 break; 1649 case 0x90: 1650 index += 1; 1651 lenlo = vpd[index]; 1652 index += 1; 1653 lenhi = vpd[index]; 1654 index += 1; 1655 Length = ((((unsigned short)lenhi) << 8) + lenlo); 1656 if (Length > len - index) 1657 Length = len - index; 1658 while (Length > 0) { 1659 /* Look for Serial Number */ 1660 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 1661 index += 2; 1662 i = vpd[index]; 1663 index += 1; 1664 j = 0; 1665 Length -= (3+i); 1666 while(i--) { 1667 phba->SerialNumber[j++] = vpd[index++]; 1668 if (j == 31) 1669 break; 1670 } 1671 phba->SerialNumber[j] = 0; 1672 continue; 1673 } 1674 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 1675 phba->vpd_flag |= VPD_MODEL_DESC; 1676 index += 2; 1677 i = vpd[index]; 1678 index += 1; 1679 j = 0; 1680 Length -= (3+i); 1681 while(i--) { 1682 phba->ModelDesc[j++] = vpd[index++]; 1683 if (j == 255) 1684 break; 1685 } 1686 phba->ModelDesc[j] = 0; 1687 continue; 1688 } 1689 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 1690 phba->vpd_flag |= VPD_MODEL_NAME; 1691 index += 2; 1692 i = vpd[index]; 1693 index += 1; 1694 j = 0; 1695 Length -= (3+i); 1696 while(i--) { 1697 phba->ModelName[j++] = vpd[index++]; 1698 if (j == 79) 1699 break; 1700 } 1701 phba->ModelName[j] = 0; 1702 continue; 1703 } 1704 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 1705 phba->vpd_flag |= VPD_PROGRAM_TYPE; 1706 index += 2; 1707 i = vpd[index]; 1708 index += 1; 1709 j = 0; 1710 Length -= (3+i); 1711 while(i--) { 1712 phba->ProgramType[j++] = vpd[index++]; 1713 if (j == 255) 1714 break; 1715 } 1716 phba->ProgramType[j] = 0; 1717 continue; 1718 } 1719 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 1720 phba->vpd_flag |= VPD_PORT; 1721 index += 2; 1722 i = vpd[index]; 1723 index += 1; 1724 j = 0; 1725 Length -= (3+i); 1726 while(i--) { 1727 phba->Port[j++] = vpd[index++]; 1728 if (j == 19) 1729 break; 1730 } 1731 phba->Port[j] = 0; 1732 continue; 1733 } 1734 else { 1735 index += 2; 1736 i = vpd[index]; 1737 index += 1; 1738 index += i; 1739 Length -= (3 + i); 1740 } 1741 } 1742 finished = 0; 1743 break; 1744 case 0x78: 1745 finished = 1; 1746 break; 1747 default: 1748 index ++; 1749 break; 1750 } 1751 } 1752 1753 return(1); 1754 } 1755 1756 /** 1757 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 1758 * @phba: pointer to lpfc hba data structure. 1759 * @mdp: pointer to the data structure to hold the derived model name. 1760 * @descp: pointer to the data structure to hold the derived description. 1761 * 1762 * This routine retrieves HBA's description based on its registered PCI device 1763 * ID. The @descp passed into this function points to an array of 256 chars. It 1764 * shall be returned with the model name, maximum speed, and the host bus type. 1765 * The @mdp passed into this function points to an array of 80 chars. When the 1766 * function returns, the @mdp will be filled with the model name. 1767 **/ 1768 static void 1769 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 1770 { 1771 lpfc_vpd_t *vp; 1772 uint16_t dev_id = phba->pcidev->device; 1773 int max_speed; 1774 int GE = 0; 1775 int oneConnect = 0; /* default is not a oneConnect */ 1776 struct { 1777 char *name; 1778 char *bus; 1779 char *function; 1780 } m = {"<Unknown>", "", ""}; 1781 1782 if (mdp && mdp[0] != '\0' 1783 && descp && descp[0] != '\0') 1784 return; 1785 1786 if (phba->lmt & LMT_16Gb) 1787 max_speed = 16; 1788 else if (phba->lmt & LMT_10Gb) 1789 max_speed = 10; 1790 else if (phba->lmt & LMT_8Gb) 1791 max_speed = 8; 1792 else if (phba->lmt & LMT_4Gb) 1793 max_speed = 4; 1794 else if (phba->lmt & LMT_2Gb) 1795 max_speed = 2; 1796 else 1797 max_speed = 1; 1798 1799 vp = &phba->vpd; 1800 1801 switch (dev_id) { 1802 case PCI_DEVICE_ID_FIREFLY: 1803 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"}; 1804 break; 1805 case PCI_DEVICE_ID_SUPERFLY: 1806 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 1807 m = (typeof(m)){"LP7000", "PCI", 1808 "Fibre Channel Adapter"}; 1809 else 1810 m = (typeof(m)){"LP7000E", "PCI", 1811 "Fibre Channel Adapter"}; 1812 break; 1813 case PCI_DEVICE_ID_DRAGONFLY: 1814 m = (typeof(m)){"LP8000", "PCI", 1815 "Fibre Channel Adapter"}; 1816 break; 1817 case PCI_DEVICE_ID_CENTAUR: 1818 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 1819 m = (typeof(m)){"LP9002", "PCI", 1820 "Fibre Channel Adapter"}; 1821 else 1822 m = (typeof(m)){"LP9000", "PCI", 1823 "Fibre Channel Adapter"}; 1824 break; 1825 case PCI_DEVICE_ID_RFLY: 1826 m = (typeof(m)){"LP952", "PCI", 1827 "Fibre Channel Adapter"}; 1828 break; 1829 case PCI_DEVICE_ID_PEGASUS: 1830 m = (typeof(m)){"LP9802", "PCI-X", 1831 "Fibre Channel Adapter"}; 1832 break; 1833 case PCI_DEVICE_ID_THOR: 1834 m = (typeof(m)){"LP10000", "PCI-X", 1835 "Fibre Channel Adapter"}; 1836 break; 1837 case PCI_DEVICE_ID_VIPER: 1838 m = (typeof(m)){"LPX1000", "PCI-X", 1839 "Fibre Channel Adapter"}; 1840 break; 1841 case PCI_DEVICE_ID_PFLY: 1842 m = (typeof(m)){"LP982", "PCI-X", 1843 "Fibre Channel Adapter"}; 1844 break; 1845 case PCI_DEVICE_ID_TFLY: 1846 m = (typeof(m)){"LP1050", "PCI-X", 1847 "Fibre Channel Adapter"}; 1848 break; 1849 case PCI_DEVICE_ID_HELIOS: 1850 m = (typeof(m)){"LP11000", "PCI-X2", 1851 "Fibre Channel Adapter"}; 1852 break; 1853 case PCI_DEVICE_ID_HELIOS_SCSP: 1854 m = (typeof(m)){"LP11000-SP", "PCI-X2", 1855 "Fibre Channel Adapter"}; 1856 break; 1857 case PCI_DEVICE_ID_HELIOS_DCSP: 1858 m = (typeof(m)){"LP11002-SP", "PCI-X2", 1859 "Fibre Channel Adapter"}; 1860 break; 1861 case PCI_DEVICE_ID_NEPTUNE: 1862 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"}; 1863 break; 1864 case PCI_DEVICE_ID_NEPTUNE_SCSP: 1865 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"}; 1866 break; 1867 case PCI_DEVICE_ID_NEPTUNE_DCSP: 1868 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"}; 1869 break; 1870 case PCI_DEVICE_ID_BMID: 1871 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 1872 break; 1873 case PCI_DEVICE_ID_BSMB: 1874 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"}; 1875 break; 1876 case PCI_DEVICE_ID_ZEPHYR: 1877 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 1878 break; 1879 case PCI_DEVICE_ID_ZEPHYR_SCSP: 1880 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 1881 break; 1882 case PCI_DEVICE_ID_ZEPHYR_DCSP: 1883 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 1884 GE = 1; 1885 break; 1886 case PCI_DEVICE_ID_ZMID: 1887 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 1888 break; 1889 case PCI_DEVICE_ID_ZSMB: 1890 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 1891 break; 1892 case PCI_DEVICE_ID_LP101: 1893 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"}; 1894 break; 1895 case PCI_DEVICE_ID_LP10000S: 1896 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"}; 1897 break; 1898 case PCI_DEVICE_ID_LP11000S: 1899 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"}; 1900 break; 1901 case PCI_DEVICE_ID_LPE11000S: 1902 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"}; 1903 break; 1904 case PCI_DEVICE_ID_SAT: 1905 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 1906 break; 1907 case PCI_DEVICE_ID_SAT_MID: 1908 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 1909 break; 1910 case PCI_DEVICE_ID_SAT_SMB: 1911 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 1912 break; 1913 case PCI_DEVICE_ID_SAT_DCSP: 1914 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 1915 break; 1916 case PCI_DEVICE_ID_SAT_SCSP: 1917 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 1918 break; 1919 case PCI_DEVICE_ID_SAT_S: 1920 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 1921 break; 1922 case PCI_DEVICE_ID_HORNET: 1923 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"}; 1924 GE = 1; 1925 break; 1926 case PCI_DEVICE_ID_PROTEUS_VF: 1927 m = (typeof(m)){"LPev12000", "PCIe IOV", 1928 "Fibre Channel Adapter"}; 1929 break; 1930 case PCI_DEVICE_ID_PROTEUS_PF: 1931 m = (typeof(m)){"LPev12000", "PCIe IOV", 1932 "Fibre Channel Adapter"}; 1933 break; 1934 case PCI_DEVICE_ID_PROTEUS_S: 1935 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 1936 "Fibre Channel Adapter"}; 1937 break; 1938 case PCI_DEVICE_ID_TIGERSHARK: 1939 oneConnect = 1; 1940 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 1941 break; 1942 case PCI_DEVICE_ID_TOMCAT: 1943 oneConnect = 1; 1944 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 1945 break; 1946 case PCI_DEVICE_ID_FALCON: 1947 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 1948 "EmulexSecure Fibre"}; 1949 break; 1950 case PCI_DEVICE_ID_BALIUS: 1951 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 1952 "Fibre Channel Adapter"}; 1953 break; 1954 case PCI_DEVICE_ID_LANCER_FC: 1955 case PCI_DEVICE_ID_LANCER_FC_VF: 1956 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 1957 break; 1958 case PCI_DEVICE_ID_LANCER_FCOE: 1959 case PCI_DEVICE_ID_LANCER_FCOE_VF: 1960 oneConnect = 1; 1961 m = (typeof(m)){"OCe50100", "PCIe", "FCoE"}; 1962 break; 1963 default: 1964 m = (typeof(m)){"Unknown", "", ""}; 1965 break; 1966 } 1967 1968 if (mdp && mdp[0] == '\0') 1969 snprintf(mdp, 79,"%s", m.name); 1970 /* 1971 * oneConnect hba requires special processing, they are all initiators 1972 * and we put the port number on the end 1973 */ 1974 if (descp && descp[0] == '\0') { 1975 if (oneConnect) 1976 snprintf(descp, 255, 1977 "Emulex OneConnect %s, %s Initiator, Port %s", 1978 m.name, m.function, 1979 phba->Port); 1980 else 1981 snprintf(descp, 255, 1982 "Emulex %s %d%s %s %s", 1983 m.name, max_speed, (GE) ? "GE" : "Gb", 1984 m.bus, m.function); 1985 } 1986 } 1987 1988 /** 1989 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 1990 * @phba: pointer to lpfc hba data structure. 1991 * @pring: pointer to a IOCB ring. 1992 * @cnt: the number of IOCBs to be posted to the IOCB ring. 1993 * 1994 * This routine posts a given number of IOCBs with the associated DMA buffer 1995 * descriptors specified by the cnt argument to the given IOCB ring. 1996 * 1997 * Return codes 1998 * The number of IOCBs NOT able to be posted to the IOCB ring. 1999 **/ 2000 int 2001 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 2002 { 2003 IOCB_t *icmd; 2004 struct lpfc_iocbq *iocb; 2005 struct lpfc_dmabuf *mp1, *mp2; 2006 2007 cnt += pring->missbufcnt; 2008 2009 /* While there are buffers to post */ 2010 while (cnt > 0) { 2011 /* Allocate buffer for command iocb */ 2012 iocb = lpfc_sli_get_iocbq(phba); 2013 if (iocb == NULL) { 2014 pring->missbufcnt = cnt; 2015 return cnt; 2016 } 2017 icmd = &iocb->iocb; 2018 2019 /* 2 buffers can be posted per command */ 2020 /* Allocate buffer to post */ 2021 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2022 if (mp1) 2023 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 2024 if (!mp1 || !mp1->virt) { 2025 kfree(mp1); 2026 lpfc_sli_release_iocbq(phba, iocb); 2027 pring->missbufcnt = cnt; 2028 return cnt; 2029 } 2030 2031 INIT_LIST_HEAD(&mp1->list); 2032 /* Allocate buffer to post */ 2033 if (cnt > 1) { 2034 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2035 if (mp2) 2036 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 2037 &mp2->phys); 2038 if (!mp2 || !mp2->virt) { 2039 kfree(mp2); 2040 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2041 kfree(mp1); 2042 lpfc_sli_release_iocbq(phba, iocb); 2043 pring->missbufcnt = cnt; 2044 return cnt; 2045 } 2046 2047 INIT_LIST_HEAD(&mp2->list); 2048 } else { 2049 mp2 = NULL; 2050 } 2051 2052 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2053 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2054 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2055 icmd->ulpBdeCount = 1; 2056 cnt--; 2057 if (mp2) { 2058 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2059 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2060 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2061 cnt--; 2062 icmd->ulpBdeCount = 2; 2063 } 2064 2065 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2066 icmd->ulpLe = 1; 2067 2068 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2069 IOCB_ERROR) { 2070 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2071 kfree(mp1); 2072 cnt++; 2073 if (mp2) { 2074 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2075 kfree(mp2); 2076 cnt++; 2077 } 2078 lpfc_sli_release_iocbq(phba, iocb); 2079 pring->missbufcnt = cnt; 2080 return cnt; 2081 } 2082 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2083 if (mp2) 2084 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2085 } 2086 pring->missbufcnt = 0; 2087 return 0; 2088 } 2089 2090 /** 2091 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2092 * @phba: pointer to lpfc hba data structure. 2093 * 2094 * This routine posts initial receive IOCB buffers to the ELS ring. The 2095 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2096 * set to 64 IOCBs. 2097 * 2098 * Return codes 2099 * 0 - success (currently always success) 2100 **/ 2101 static int 2102 lpfc_post_rcv_buf(struct lpfc_hba *phba) 2103 { 2104 struct lpfc_sli *psli = &phba->sli; 2105 2106 /* Ring 0, ELS / CT buffers */ 2107 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2108 /* Ring 2 - FCP no buffers needed */ 2109 2110 return 0; 2111 } 2112 2113 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2114 2115 /** 2116 * lpfc_sha_init - Set up initial array of hash table entries 2117 * @HashResultPointer: pointer to an array as hash table. 2118 * 2119 * This routine sets up the initial values to the array of hash table entries 2120 * for the LC HBAs. 2121 **/ 2122 static void 2123 lpfc_sha_init(uint32_t * HashResultPointer) 2124 { 2125 HashResultPointer[0] = 0x67452301; 2126 HashResultPointer[1] = 0xEFCDAB89; 2127 HashResultPointer[2] = 0x98BADCFE; 2128 HashResultPointer[3] = 0x10325476; 2129 HashResultPointer[4] = 0xC3D2E1F0; 2130 } 2131 2132 /** 2133 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2134 * @HashResultPointer: pointer to an initial/result hash table. 2135 * @HashWorkingPointer: pointer to an working hash table. 2136 * 2137 * This routine iterates an initial hash table pointed by @HashResultPointer 2138 * with the values from the working hash table pointeed by @HashWorkingPointer. 2139 * The results are putting back to the initial hash table, returned through 2140 * the @HashResultPointer as the result hash table. 2141 **/ 2142 static void 2143 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2144 { 2145 int t; 2146 uint32_t TEMP; 2147 uint32_t A, B, C, D, E; 2148 t = 16; 2149 do { 2150 HashWorkingPointer[t] = 2151 S(1, 2152 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2153 8] ^ 2154 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2155 } while (++t <= 79); 2156 t = 0; 2157 A = HashResultPointer[0]; 2158 B = HashResultPointer[1]; 2159 C = HashResultPointer[2]; 2160 D = HashResultPointer[3]; 2161 E = HashResultPointer[4]; 2162 2163 do { 2164 if (t < 20) { 2165 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2166 } else if (t < 40) { 2167 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2168 } else if (t < 60) { 2169 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2170 } else { 2171 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2172 } 2173 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2174 E = D; 2175 D = C; 2176 C = S(30, B); 2177 B = A; 2178 A = TEMP; 2179 } while (++t <= 79); 2180 2181 HashResultPointer[0] += A; 2182 HashResultPointer[1] += B; 2183 HashResultPointer[2] += C; 2184 HashResultPointer[3] += D; 2185 HashResultPointer[4] += E; 2186 2187 } 2188 2189 /** 2190 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2191 * @RandomChallenge: pointer to the entry of host challenge random number array. 2192 * @HashWorking: pointer to the entry of the working hash array. 2193 * 2194 * This routine calculates the working hash array referred by @HashWorking 2195 * from the challenge random numbers associated with the host, referred by 2196 * @RandomChallenge. The result is put into the entry of the working hash 2197 * array and returned by reference through @HashWorking. 2198 **/ 2199 static void 2200 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2201 { 2202 *HashWorking = (*RandomChallenge ^ *HashWorking); 2203 } 2204 2205 /** 2206 * lpfc_hba_init - Perform special handling for LC HBA initialization 2207 * @phba: pointer to lpfc hba data structure. 2208 * @hbainit: pointer to an array of unsigned 32-bit integers. 2209 * 2210 * This routine performs the special handling for LC HBA initialization. 2211 **/ 2212 void 2213 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2214 { 2215 int t; 2216 uint32_t *HashWorking; 2217 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2218 2219 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2220 if (!HashWorking) 2221 return; 2222 2223 HashWorking[0] = HashWorking[78] = *pwwnn++; 2224 HashWorking[1] = HashWorking[79] = *pwwnn; 2225 2226 for (t = 0; t < 7; t++) 2227 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2228 2229 lpfc_sha_init(hbainit); 2230 lpfc_sha_iterate(hbainit, HashWorking); 2231 kfree(HashWorking); 2232 } 2233 2234 /** 2235 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2236 * @vport: pointer to a virtual N_Port data structure. 2237 * 2238 * This routine performs the necessary cleanups before deleting the @vport. 2239 * It invokes the discovery state machine to perform necessary state 2240 * transitions and to release the ndlps associated with the @vport. Note, 2241 * the physical port is treated as @vport 0. 2242 **/ 2243 void 2244 lpfc_cleanup(struct lpfc_vport *vport) 2245 { 2246 struct lpfc_hba *phba = vport->phba; 2247 struct lpfc_nodelist *ndlp, *next_ndlp; 2248 int i = 0; 2249 2250 if (phba->link_state > LPFC_LINK_DOWN) 2251 lpfc_port_link_failure(vport); 2252 2253 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2254 if (!NLP_CHK_NODE_ACT(ndlp)) { 2255 ndlp = lpfc_enable_node(vport, ndlp, 2256 NLP_STE_UNUSED_NODE); 2257 if (!ndlp) 2258 continue; 2259 spin_lock_irq(&phba->ndlp_lock); 2260 NLP_SET_FREE_REQ(ndlp); 2261 spin_unlock_irq(&phba->ndlp_lock); 2262 /* Trigger the release of the ndlp memory */ 2263 lpfc_nlp_put(ndlp); 2264 continue; 2265 } 2266 spin_lock_irq(&phba->ndlp_lock); 2267 if (NLP_CHK_FREE_REQ(ndlp)) { 2268 /* The ndlp should not be in memory free mode already */ 2269 spin_unlock_irq(&phba->ndlp_lock); 2270 continue; 2271 } else 2272 /* Indicate request for freeing ndlp memory */ 2273 NLP_SET_FREE_REQ(ndlp); 2274 spin_unlock_irq(&phba->ndlp_lock); 2275 2276 if (vport->port_type != LPFC_PHYSICAL_PORT && 2277 ndlp->nlp_DID == Fabric_DID) { 2278 /* Just free up ndlp with Fabric_DID for vports */ 2279 lpfc_nlp_put(ndlp); 2280 continue; 2281 } 2282 2283 if (ndlp->nlp_type & NLP_FABRIC) 2284 lpfc_disc_state_machine(vport, ndlp, NULL, 2285 NLP_EVT_DEVICE_RECOVERY); 2286 2287 lpfc_disc_state_machine(vport, ndlp, NULL, 2288 NLP_EVT_DEVICE_RM); 2289 2290 } 2291 2292 /* At this point, ALL ndlp's should be gone 2293 * because of the previous NLP_EVT_DEVICE_RM. 2294 * Lets wait for this to happen, if needed. 2295 */ 2296 while (!list_empty(&vport->fc_nodes)) { 2297 if (i++ > 3000) { 2298 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2299 "0233 Nodelist not empty\n"); 2300 list_for_each_entry_safe(ndlp, next_ndlp, 2301 &vport->fc_nodes, nlp_listp) { 2302 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2303 LOG_NODE, 2304 "0282 did:x%x ndlp:x%p " 2305 "usgmap:x%x refcnt:%d\n", 2306 ndlp->nlp_DID, (void *)ndlp, 2307 ndlp->nlp_usg_map, 2308 atomic_read( 2309 &ndlp->kref.refcount)); 2310 } 2311 break; 2312 } 2313 2314 /* Wait for any activity on ndlps to settle */ 2315 msleep(10); 2316 } 2317 lpfc_cleanup_vports_rrqs(vport, NULL); 2318 } 2319 2320 /** 2321 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2322 * @vport: pointer to a virtual N_Port data structure. 2323 * 2324 * This routine stops all the timers associated with a @vport. This function 2325 * is invoked before disabling or deleting a @vport. Note that the physical 2326 * port is treated as @vport 0. 2327 **/ 2328 void 2329 lpfc_stop_vport_timers(struct lpfc_vport *vport) 2330 { 2331 del_timer_sync(&vport->els_tmofunc); 2332 del_timer_sync(&vport->fc_fdmitmo); 2333 del_timer_sync(&vport->delayed_disc_tmo); 2334 lpfc_can_disctmo(vport); 2335 return; 2336 } 2337 2338 /** 2339 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2340 * @phba: pointer to lpfc hba data structure. 2341 * 2342 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2343 * caller of this routine should already hold the host lock. 2344 **/ 2345 void 2346 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2347 { 2348 /* Clear pending FCF rediscovery wait flag */ 2349 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2350 2351 /* Now, try to stop the timer */ 2352 del_timer(&phba->fcf.redisc_wait); 2353 } 2354 2355 /** 2356 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2357 * @phba: pointer to lpfc hba data structure. 2358 * 2359 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2360 * checks whether the FCF rediscovery wait timer is pending with the host 2361 * lock held before proceeding with disabling the timer and clearing the 2362 * wait timer pendig flag. 2363 **/ 2364 void 2365 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2366 { 2367 spin_lock_irq(&phba->hbalock); 2368 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2369 /* FCF rediscovery timer already fired or stopped */ 2370 spin_unlock_irq(&phba->hbalock); 2371 return; 2372 } 2373 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2374 /* Clear failover in progress flags */ 2375 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 2376 spin_unlock_irq(&phba->hbalock); 2377 } 2378 2379 /** 2380 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2381 * @phba: pointer to lpfc hba data structure. 2382 * 2383 * This routine stops all the timers associated with a HBA. This function is 2384 * invoked before either putting a HBA offline or unloading the driver. 2385 **/ 2386 void 2387 lpfc_stop_hba_timers(struct lpfc_hba *phba) 2388 { 2389 lpfc_stop_vport_timers(phba->pport); 2390 del_timer_sync(&phba->sli.mbox_tmo); 2391 del_timer_sync(&phba->fabric_block_timer); 2392 del_timer_sync(&phba->eratt_poll); 2393 del_timer_sync(&phba->hb_tmofunc); 2394 if (phba->sli_rev == LPFC_SLI_REV4) { 2395 del_timer_sync(&phba->rrq_tmr); 2396 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 2397 } 2398 phba->hb_outstanding = 0; 2399 2400 switch (phba->pci_dev_grp) { 2401 case LPFC_PCI_DEV_LP: 2402 /* Stop any LightPulse device specific driver timers */ 2403 del_timer_sync(&phba->fcp_poll_timer); 2404 break; 2405 case LPFC_PCI_DEV_OC: 2406 /* Stop any OneConnect device sepcific driver timers */ 2407 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2408 break; 2409 default: 2410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2411 "0297 Invalid device group (x%x)\n", 2412 phba->pci_dev_grp); 2413 break; 2414 } 2415 return; 2416 } 2417 2418 /** 2419 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2420 * @phba: pointer to lpfc hba data structure. 2421 * 2422 * This routine marks a HBA's management interface as blocked. Once the HBA's 2423 * management interface is marked as blocked, all the user space access to 2424 * the HBA, whether they are from sysfs interface or libdfc interface will 2425 * all be blocked. The HBA is set to block the management interface when the 2426 * driver prepares the HBA interface for online or offline. 2427 **/ 2428 static void 2429 lpfc_block_mgmt_io(struct lpfc_hba * phba) 2430 { 2431 unsigned long iflag; 2432 uint8_t actcmd = MBX_HEARTBEAT; 2433 unsigned long timeout; 2434 2435 2436 spin_lock_irqsave(&phba->hbalock, iflag); 2437 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2438 if (phba->sli.mbox_active) 2439 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 2440 spin_unlock_irqrestore(&phba->hbalock, iflag); 2441 /* Determine how long we might wait for the active mailbox 2442 * command to be gracefully completed by firmware. 2443 */ 2444 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) + 2445 jiffies; 2446 /* Wait for the outstnading mailbox command to complete */ 2447 while (phba->sli.mbox_active) { 2448 /* Check active mailbox complete status every 2ms */ 2449 msleep(2); 2450 if (time_after(jiffies, timeout)) { 2451 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2452 "2813 Mgmt IO is Blocked %x " 2453 "- mbox cmd %x still active\n", 2454 phba->sli.sli_flag, actcmd); 2455 break; 2456 } 2457 } 2458 } 2459 2460 /** 2461 * lpfc_online - Initialize and bring a HBA online 2462 * @phba: pointer to lpfc hba data structure. 2463 * 2464 * This routine initializes the HBA and brings a HBA online. During this 2465 * process, the management interface is blocked to prevent user space access 2466 * to the HBA interfering with the driver initialization. 2467 * 2468 * Return codes 2469 * 0 - successful 2470 * 1 - failed 2471 **/ 2472 int 2473 lpfc_online(struct lpfc_hba *phba) 2474 { 2475 struct lpfc_vport *vport; 2476 struct lpfc_vport **vports; 2477 int i; 2478 2479 if (!phba) 2480 return 0; 2481 vport = phba->pport; 2482 2483 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 2484 return 0; 2485 2486 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2487 "0458 Bring Adapter online\n"); 2488 2489 lpfc_block_mgmt_io(phba); 2490 2491 if (!lpfc_sli_queue_setup(phba)) { 2492 lpfc_unblock_mgmt_io(phba); 2493 return 1; 2494 } 2495 2496 if (phba->sli_rev == LPFC_SLI_REV4) { 2497 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 2498 lpfc_unblock_mgmt_io(phba); 2499 return 1; 2500 } 2501 } else { 2502 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 2503 lpfc_unblock_mgmt_io(phba); 2504 return 1; 2505 } 2506 } 2507 2508 vports = lpfc_create_vport_work_array(phba); 2509 if (vports != NULL) 2510 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2511 struct Scsi_Host *shost; 2512 shost = lpfc_shost_from_vport(vports[i]); 2513 spin_lock_irq(shost->host_lock); 2514 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 2515 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 2516 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2517 if (phba->sli_rev == LPFC_SLI_REV4) 2518 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 2519 spin_unlock_irq(shost->host_lock); 2520 } 2521 lpfc_destroy_vport_work_array(phba, vports); 2522 2523 lpfc_unblock_mgmt_io(phba); 2524 return 0; 2525 } 2526 2527 /** 2528 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 2529 * @phba: pointer to lpfc hba data structure. 2530 * 2531 * This routine marks a HBA's management interface as not blocked. Once the 2532 * HBA's management interface is marked as not blocked, all the user space 2533 * access to the HBA, whether they are from sysfs interface or libdfc 2534 * interface will be allowed. The HBA is set to block the management interface 2535 * when the driver prepares the HBA interface for online or offline and then 2536 * set to unblock the management interface afterwards. 2537 **/ 2538 void 2539 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 2540 { 2541 unsigned long iflag; 2542 2543 spin_lock_irqsave(&phba->hbalock, iflag); 2544 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 2545 spin_unlock_irqrestore(&phba->hbalock, iflag); 2546 } 2547 2548 /** 2549 * lpfc_offline_prep - Prepare a HBA to be brought offline 2550 * @phba: pointer to lpfc hba data structure. 2551 * 2552 * This routine is invoked to prepare a HBA to be brought offline. It performs 2553 * unregistration login to all the nodes on all vports and flushes the mailbox 2554 * queue to make it ready to be brought offline. 2555 **/ 2556 void 2557 lpfc_offline_prep(struct lpfc_hba * phba) 2558 { 2559 struct lpfc_vport *vport = phba->pport; 2560 struct lpfc_nodelist *ndlp, *next_ndlp; 2561 struct lpfc_vport **vports; 2562 struct Scsi_Host *shost; 2563 int i; 2564 2565 if (vport->fc_flag & FC_OFFLINE_MODE) 2566 return; 2567 2568 lpfc_block_mgmt_io(phba); 2569 2570 lpfc_linkdown(phba); 2571 2572 /* Issue an unreg_login to all nodes on all vports */ 2573 vports = lpfc_create_vport_work_array(phba); 2574 if (vports != NULL) { 2575 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2576 if (vports[i]->load_flag & FC_UNLOADING) 2577 continue; 2578 shost = lpfc_shost_from_vport(vports[i]); 2579 spin_lock_irq(shost->host_lock); 2580 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 2581 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2582 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 2583 spin_unlock_irq(shost->host_lock); 2584 2585 shost = lpfc_shost_from_vport(vports[i]); 2586 list_for_each_entry_safe(ndlp, next_ndlp, 2587 &vports[i]->fc_nodes, 2588 nlp_listp) { 2589 if (!NLP_CHK_NODE_ACT(ndlp)) 2590 continue; 2591 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 2592 continue; 2593 if (ndlp->nlp_type & NLP_FABRIC) { 2594 lpfc_disc_state_machine(vports[i], ndlp, 2595 NULL, NLP_EVT_DEVICE_RECOVERY); 2596 lpfc_disc_state_machine(vports[i], ndlp, 2597 NULL, NLP_EVT_DEVICE_RM); 2598 } 2599 spin_lock_irq(shost->host_lock); 2600 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2601 spin_unlock_irq(shost->host_lock); 2602 lpfc_unreg_rpi(vports[i], ndlp); 2603 } 2604 } 2605 } 2606 lpfc_destroy_vport_work_array(phba, vports); 2607 2608 lpfc_sli_mbox_sys_shutdown(phba); 2609 } 2610 2611 /** 2612 * lpfc_offline - Bring a HBA offline 2613 * @phba: pointer to lpfc hba data structure. 2614 * 2615 * This routine actually brings a HBA offline. It stops all the timers 2616 * associated with the HBA, brings down the SLI layer, and eventually 2617 * marks the HBA as in offline state for the upper layer protocol. 2618 **/ 2619 void 2620 lpfc_offline(struct lpfc_hba *phba) 2621 { 2622 struct Scsi_Host *shost; 2623 struct lpfc_vport **vports; 2624 int i; 2625 2626 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2627 return; 2628 2629 /* stop port and all timers associated with this hba */ 2630 lpfc_stop_port(phba); 2631 vports = lpfc_create_vport_work_array(phba); 2632 if (vports != NULL) 2633 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 2634 lpfc_stop_vport_timers(vports[i]); 2635 lpfc_destroy_vport_work_array(phba, vports); 2636 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2637 "0460 Bring Adapter offline\n"); 2638 /* Bring down the SLI Layer and cleanup. The HBA is offline 2639 now. */ 2640 lpfc_sli_hba_down(phba); 2641 spin_lock_irq(&phba->hbalock); 2642 phba->work_ha = 0; 2643 spin_unlock_irq(&phba->hbalock); 2644 vports = lpfc_create_vport_work_array(phba); 2645 if (vports != NULL) 2646 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2647 shost = lpfc_shost_from_vport(vports[i]); 2648 spin_lock_irq(shost->host_lock); 2649 vports[i]->work_port_events = 0; 2650 vports[i]->fc_flag |= FC_OFFLINE_MODE; 2651 spin_unlock_irq(shost->host_lock); 2652 } 2653 lpfc_destroy_vport_work_array(phba, vports); 2654 } 2655 2656 /** 2657 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 2658 * @phba: pointer to lpfc hba data structure. 2659 * 2660 * This routine is to free all the SCSI buffers and IOCBs from the driver 2661 * list back to kernel. It is called from lpfc_pci_remove_one to free 2662 * the internal resources before the device is removed from the system. 2663 * 2664 * Return codes 2665 * 0 - successful (for now, it always returns 0) 2666 **/ 2667 static int 2668 lpfc_scsi_free(struct lpfc_hba *phba) 2669 { 2670 struct lpfc_scsi_buf *sb, *sb_next; 2671 struct lpfc_iocbq *io, *io_next; 2672 2673 spin_lock_irq(&phba->hbalock); 2674 /* Release all the lpfc_scsi_bufs maintained by this host. */ 2675 spin_lock(&phba->scsi_buf_list_lock); 2676 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 2677 list_del(&sb->list); 2678 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 2679 sb->dma_handle); 2680 kfree(sb); 2681 phba->total_scsi_bufs--; 2682 } 2683 spin_unlock(&phba->scsi_buf_list_lock); 2684 2685 /* Release all the lpfc_iocbq entries maintained by this host. */ 2686 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 2687 list_del(&io->list); 2688 kfree(io); 2689 phba->total_iocbq_bufs--; 2690 } 2691 2692 spin_unlock_irq(&phba->hbalock); 2693 return 0; 2694 } 2695 2696 /** 2697 * lpfc_create_port - Create an FC port 2698 * @phba: pointer to lpfc hba data structure. 2699 * @instance: a unique integer ID to this FC port. 2700 * @dev: pointer to the device data structure. 2701 * 2702 * This routine creates a FC port for the upper layer protocol. The FC port 2703 * can be created on top of either a physical port or a virtual port provided 2704 * by the HBA. This routine also allocates a SCSI host data structure (shost) 2705 * and associates the FC port created before adding the shost into the SCSI 2706 * layer. 2707 * 2708 * Return codes 2709 * @vport - pointer to the virtual N_Port data structure. 2710 * NULL - port create failed. 2711 **/ 2712 struct lpfc_vport * 2713 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 2714 { 2715 struct lpfc_vport *vport; 2716 struct Scsi_Host *shost; 2717 int error = 0; 2718 2719 if (dev != &phba->pcidev->dev) 2720 shost = scsi_host_alloc(&lpfc_vport_template, 2721 sizeof(struct lpfc_vport)); 2722 else 2723 shost = scsi_host_alloc(&lpfc_template, 2724 sizeof(struct lpfc_vport)); 2725 if (!shost) 2726 goto out; 2727 2728 vport = (struct lpfc_vport *) shost->hostdata; 2729 vport->phba = phba; 2730 vport->load_flag |= FC_LOADING; 2731 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2732 vport->fc_rscn_flush = 0; 2733 2734 lpfc_get_vport_cfgparam(vport); 2735 shost->unique_id = instance; 2736 shost->max_id = LPFC_MAX_TARGET; 2737 shost->max_lun = vport->cfg_max_luns; 2738 shost->this_id = -1; 2739 shost->max_cmd_len = 16; 2740 if (phba->sli_rev == LPFC_SLI_REV4) { 2741 shost->dma_boundary = 2742 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 2743 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 2744 } 2745 2746 /* 2747 * Set initial can_queue value since 0 is no longer supported and 2748 * scsi_add_host will fail. This will be adjusted later based on the 2749 * max xri value determined in hba setup. 2750 */ 2751 shost->can_queue = phba->cfg_hba_queue_depth - 10; 2752 if (dev != &phba->pcidev->dev) { 2753 shost->transportt = lpfc_vport_transport_template; 2754 vport->port_type = LPFC_NPIV_PORT; 2755 } else { 2756 shost->transportt = lpfc_transport_template; 2757 vport->port_type = LPFC_PHYSICAL_PORT; 2758 } 2759 2760 /* Initialize all internally managed lists. */ 2761 INIT_LIST_HEAD(&vport->fc_nodes); 2762 INIT_LIST_HEAD(&vport->rcv_buffer_list); 2763 spin_lock_init(&vport->work_port_lock); 2764 2765 init_timer(&vport->fc_disctmo); 2766 vport->fc_disctmo.function = lpfc_disc_timeout; 2767 vport->fc_disctmo.data = (unsigned long)vport; 2768 2769 init_timer(&vport->fc_fdmitmo); 2770 vport->fc_fdmitmo.function = lpfc_fdmi_tmo; 2771 vport->fc_fdmitmo.data = (unsigned long)vport; 2772 2773 init_timer(&vport->els_tmofunc); 2774 vport->els_tmofunc.function = lpfc_els_timeout; 2775 vport->els_tmofunc.data = (unsigned long)vport; 2776 2777 init_timer(&vport->delayed_disc_tmo); 2778 vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo; 2779 vport->delayed_disc_tmo.data = (unsigned long)vport; 2780 2781 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 2782 if (error) 2783 goto out_put_shost; 2784 2785 spin_lock_irq(&phba->hbalock); 2786 list_add_tail(&vport->listentry, &phba->port_list); 2787 spin_unlock_irq(&phba->hbalock); 2788 return vport; 2789 2790 out_put_shost: 2791 scsi_host_put(shost); 2792 out: 2793 return NULL; 2794 } 2795 2796 /** 2797 * destroy_port - destroy an FC port 2798 * @vport: pointer to an lpfc virtual N_Port data structure. 2799 * 2800 * This routine destroys a FC port from the upper layer protocol. All the 2801 * resources associated with the port are released. 2802 **/ 2803 void 2804 destroy_port(struct lpfc_vport *vport) 2805 { 2806 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2807 struct lpfc_hba *phba = vport->phba; 2808 2809 lpfc_debugfs_terminate(vport); 2810 fc_remove_host(shost); 2811 scsi_remove_host(shost); 2812 2813 spin_lock_irq(&phba->hbalock); 2814 list_del_init(&vport->listentry); 2815 spin_unlock_irq(&phba->hbalock); 2816 2817 lpfc_cleanup(vport); 2818 return; 2819 } 2820 2821 /** 2822 * lpfc_get_instance - Get a unique integer ID 2823 * 2824 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 2825 * uses the kernel idr facility to perform the task. 2826 * 2827 * Return codes: 2828 * instance - a unique integer ID allocated as the new instance. 2829 * -1 - lpfc get instance failed. 2830 **/ 2831 int 2832 lpfc_get_instance(void) 2833 { 2834 int instance = 0; 2835 2836 /* Assign an unused number */ 2837 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 2838 return -1; 2839 if (idr_get_new(&lpfc_hba_index, NULL, &instance)) 2840 return -1; 2841 return instance; 2842 } 2843 2844 /** 2845 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 2846 * @shost: pointer to SCSI host data structure. 2847 * @time: elapsed time of the scan in jiffies. 2848 * 2849 * This routine is called by the SCSI layer with a SCSI host to determine 2850 * whether the scan host is finished. 2851 * 2852 * Note: there is no scan_start function as adapter initialization will have 2853 * asynchronously kicked off the link initialization. 2854 * 2855 * Return codes 2856 * 0 - SCSI host scan is not over yet. 2857 * 1 - SCSI host scan is over. 2858 **/ 2859 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 2860 { 2861 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2862 struct lpfc_hba *phba = vport->phba; 2863 int stat = 0; 2864 2865 spin_lock_irq(shost->host_lock); 2866 2867 if (vport->load_flag & FC_UNLOADING) { 2868 stat = 1; 2869 goto finished; 2870 } 2871 if (time >= 30 * HZ) { 2872 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2873 "0461 Scanning longer than 30 " 2874 "seconds. Continuing initialization\n"); 2875 stat = 1; 2876 goto finished; 2877 } 2878 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { 2879 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2880 "0465 Link down longer than 15 " 2881 "seconds. Continuing initialization\n"); 2882 stat = 1; 2883 goto finished; 2884 } 2885 2886 if (vport->port_state != LPFC_VPORT_READY) 2887 goto finished; 2888 if (vport->num_disc_nodes || vport->fc_prli_sent) 2889 goto finished; 2890 if (vport->fc_map_cnt == 0 && time < 2 * HZ) 2891 goto finished; 2892 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 2893 goto finished; 2894 2895 stat = 1; 2896 2897 finished: 2898 spin_unlock_irq(shost->host_lock); 2899 return stat; 2900 } 2901 2902 /** 2903 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 2904 * @shost: pointer to SCSI host data structure. 2905 * 2906 * This routine initializes a given SCSI host attributes on a FC port. The 2907 * SCSI host can be either on top of a physical port or a virtual port. 2908 **/ 2909 void lpfc_host_attrib_init(struct Scsi_Host *shost) 2910 { 2911 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2912 struct lpfc_hba *phba = vport->phba; 2913 /* 2914 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 2915 */ 2916 2917 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 2918 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 2919 fc_host_supported_classes(shost) = FC_COS_CLASS3; 2920 2921 memset(fc_host_supported_fc4s(shost), 0, 2922 sizeof(fc_host_supported_fc4s(shost))); 2923 fc_host_supported_fc4s(shost)[2] = 1; 2924 fc_host_supported_fc4s(shost)[7] = 1; 2925 2926 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 2927 sizeof fc_host_symbolic_name(shost)); 2928 2929 fc_host_supported_speeds(shost) = 0; 2930 if (phba->lmt & LMT_16Gb) 2931 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; 2932 if (phba->lmt & LMT_10Gb) 2933 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 2934 if (phba->lmt & LMT_8Gb) 2935 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 2936 if (phba->lmt & LMT_4Gb) 2937 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 2938 if (phba->lmt & LMT_2Gb) 2939 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 2940 if (phba->lmt & LMT_1Gb) 2941 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 2942 2943 fc_host_maxframe_size(shost) = 2944 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 2945 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 2946 2947 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 2948 2949 /* This value is also unchanging */ 2950 memset(fc_host_active_fc4s(shost), 0, 2951 sizeof(fc_host_active_fc4s(shost))); 2952 fc_host_active_fc4s(shost)[2] = 1; 2953 fc_host_active_fc4s(shost)[7] = 1; 2954 2955 fc_host_max_npiv_vports(shost) = phba->max_vpi; 2956 spin_lock_irq(shost->host_lock); 2957 vport->load_flag &= ~FC_LOADING; 2958 spin_unlock_irq(shost->host_lock); 2959 } 2960 2961 /** 2962 * lpfc_stop_port_s3 - Stop SLI3 device port 2963 * @phba: pointer to lpfc hba data structure. 2964 * 2965 * This routine is invoked to stop an SLI3 device port, it stops the device 2966 * from generating interrupts and stops the device driver's timers for the 2967 * device. 2968 **/ 2969 static void 2970 lpfc_stop_port_s3(struct lpfc_hba *phba) 2971 { 2972 /* Clear all interrupt enable conditions */ 2973 writel(0, phba->HCregaddr); 2974 readl(phba->HCregaddr); /* flush */ 2975 /* Clear all pending interrupts */ 2976 writel(0xffffffff, phba->HAregaddr); 2977 readl(phba->HAregaddr); /* flush */ 2978 2979 /* Reset some HBA SLI setup states */ 2980 lpfc_stop_hba_timers(phba); 2981 phba->pport->work_port_events = 0; 2982 } 2983 2984 /** 2985 * lpfc_stop_port_s4 - Stop SLI4 device port 2986 * @phba: pointer to lpfc hba data structure. 2987 * 2988 * This routine is invoked to stop an SLI4 device port, it stops the device 2989 * from generating interrupts and stops the device driver's timers for the 2990 * device. 2991 **/ 2992 static void 2993 lpfc_stop_port_s4(struct lpfc_hba *phba) 2994 { 2995 /* Reset some HBA SLI4 setup states */ 2996 lpfc_stop_hba_timers(phba); 2997 phba->pport->work_port_events = 0; 2998 phba->sli4_hba.intr_enable = 0; 2999 } 3000 3001 /** 3002 * lpfc_stop_port - Wrapper function for stopping hba port 3003 * @phba: Pointer to HBA context object. 3004 * 3005 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 3006 * the API jump table function pointer from the lpfc_hba struct. 3007 **/ 3008 void 3009 lpfc_stop_port(struct lpfc_hba *phba) 3010 { 3011 phba->lpfc_stop_port(phba); 3012 } 3013 3014 /** 3015 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 3016 * @phba: Pointer to hba for which this call is being executed. 3017 * 3018 * This routine starts the timer waiting for the FCF rediscovery to complete. 3019 **/ 3020 void 3021 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 3022 { 3023 unsigned long fcf_redisc_wait_tmo = 3024 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 3025 /* Start fcf rediscovery wait period timer */ 3026 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 3027 spin_lock_irq(&phba->hbalock); 3028 /* Allow action to new fcf asynchronous event */ 3029 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 3030 /* Mark the FCF rediscovery pending state */ 3031 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 3032 spin_unlock_irq(&phba->hbalock); 3033 } 3034 3035 /** 3036 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 3037 * @ptr: Map to lpfc_hba data structure pointer. 3038 * 3039 * This routine is invoked when waiting for FCF table rediscover has been 3040 * timed out. If new FCF record(s) has (have) been discovered during the 3041 * wait period, a new FCF event shall be added to the FCOE async event 3042 * list, and then worker thread shall be waked up for processing from the 3043 * worker thread context. 3044 **/ 3045 void 3046 lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) 3047 { 3048 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 3049 3050 /* Don't send FCF rediscovery event if timer cancelled */ 3051 spin_lock_irq(&phba->hbalock); 3052 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 3053 spin_unlock_irq(&phba->hbalock); 3054 return; 3055 } 3056 /* Clear FCF rediscovery timer pending flag */ 3057 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 3058 /* FCF rediscovery event to worker thread */ 3059 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 3060 spin_unlock_irq(&phba->hbalock); 3061 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 3062 "2776 FCF rediscover quiescent timer expired\n"); 3063 /* wake up worker thread */ 3064 lpfc_worker_wake_up(phba); 3065 } 3066 3067 /** 3068 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 3069 * @phba: pointer to lpfc hba data structure. 3070 * @acqe_link: pointer to the async link completion queue entry. 3071 * 3072 * This routine is to parse the SLI4 link-attention link fault code and 3073 * translate it into the base driver's read link attention mailbox command 3074 * status. 3075 * 3076 * Return: Link-attention status in terms of base driver's coding. 3077 **/ 3078 static uint16_t 3079 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 3080 struct lpfc_acqe_link *acqe_link) 3081 { 3082 uint16_t latt_fault; 3083 3084 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 3085 case LPFC_ASYNC_LINK_FAULT_NONE: 3086 case LPFC_ASYNC_LINK_FAULT_LOCAL: 3087 case LPFC_ASYNC_LINK_FAULT_REMOTE: 3088 latt_fault = 0; 3089 break; 3090 default: 3091 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3092 "0398 Invalid link fault code: x%x\n", 3093 bf_get(lpfc_acqe_link_fault, acqe_link)); 3094 latt_fault = MBXERR_ERROR; 3095 break; 3096 } 3097 return latt_fault; 3098 } 3099 3100 /** 3101 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 3102 * @phba: pointer to lpfc hba data structure. 3103 * @acqe_link: pointer to the async link completion queue entry. 3104 * 3105 * This routine is to parse the SLI4 link attention type and translate it 3106 * into the base driver's link attention type coding. 3107 * 3108 * Return: Link attention type in terms of base driver's coding. 3109 **/ 3110 static uint8_t 3111 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 3112 struct lpfc_acqe_link *acqe_link) 3113 { 3114 uint8_t att_type; 3115 3116 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 3117 case LPFC_ASYNC_LINK_STATUS_DOWN: 3118 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 3119 att_type = LPFC_ATT_LINK_DOWN; 3120 break; 3121 case LPFC_ASYNC_LINK_STATUS_UP: 3122 /* Ignore physical link up events - wait for logical link up */ 3123 att_type = LPFC_ATT_RESERVED; 3124 break; 3125 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 3126 att_type = LPFC_ATT_LINK_UP; 3127 break; 3128 default: 3129 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3130 "0399 Invalid link attention type: x%x\n", 3131 bf_get(lpfc_acqe_link_status, acqe_link)); 3132 att_type = LPFC_ATT_RESERVED; 3133 break; 3134 } 3135 return att_type; 3136 } 3137 3138 /** 3139 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed 3140 * @phba: pointer to lpfc hba data structure. 3141 * @acqe_link: pointer to the async link completion queue entry. 3142 * 3143 * This routine is to parse the SLI4 link-attention link speed and translate 3144 * it into the base driver's link-attention link speed coding. 3145 * 3146 * Return: Link-attention link speed in terms of base driver's coding. 3147 **/ 3148 static uint8_t 3149 lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, 3150 struct lpfc_acqe_link *acqe_link) 3151 { 3152 uint8_t link_speed; 3153 3154 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { 3155 case LPFC_ASYNC_LINK_SPEED_ZERO: 3156 case LPFC_ASYNC_LINK_SPEED_10MBPS: 3157 case LPFC_ASYNC_LINK_SPEED_100MBPS: 3158 link_speed = LPFC_LINK_SPEED_UNKNOWN; 3159 break; 3160 case LPFC_ASYNC_LINK_SPEED_1GBPS: 3161 link_speed = LPFC_LINK_SPEED_1GHZ; 3162 break; 3163 case LPFC_ASYNC_LINK_SPEED_10GBPS: 3164 link_speed = LPFC_LINK_SPEED_10GHZ; 3165 break; 3166 default: 3167 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3168 "0483 Invalid link-attention link speed: x%x\n", 3169 bf_get(lpfc_acqe_link_speed, acqe_link)); 3170 link_speed = LPFC_LINK_SPEED_UNKNOWN; 3171 break; 3172 } 3173 return link_speed; 3174 } 3175 3176 /** 3177 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 3178 * @phba: pointer to lpfc hba data structure. 3179 * @acqe_link: pointer to the async link completion queue entry. 3180 * 3181 * This routine is to handle the SLI4 asynchronous FCoE link event. 3182 **/ 3183 static void 3184 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 3185 struct lpfc_acqe_link *acqe_link) 3186 { 3187 struct lpfc_dmabuf *mp; 3188 LPFC_MBOXQ_t *pmb; 3189 MAILBOX_t *mb; 3190 struct lpfc_mbx_read_top *la; 3191 uint8_t att_type; 3192 int rc; 3193 3194 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 3195 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 3196 return; 3197 phba->fcoe_eventtag = acqe_link->event_tag; 3198 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3199 if (!pmb) { 3200 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3201 "0395 The mboxq allocation failed\n"); 3202 return; 3203 } 3204 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3205 if (!mp) { 3206 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3207 "0396 The lpfc_dmabuf allocation failed\n"); 3208 goto out_free_pmb; 3209 } 3210 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3211 if (!mp->virt) { 3212 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3213 "0397 The mbuf allocation failed\n"); 3214 goto out_free_dmabuf; 3215 } 3216 3217 /* Cleanup any outstanding ELS commands */ 3218 lpfc_els_flush_all_cmd(phba); 3219 3220 /* Block ELS IOCBs until we have done process link event */ 3221 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 3222 3223 /* Update link event statistics */ 3224 phba->sli.slistat.link_event++; 3225 3226 /* Create lpfc_handle_latt mailbox command from link ACQE */ 3227 lpfc_read_topology(phba, pmb, mp); 3228 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 3229 pmb->vport = phba->pport; 3230 3231 /* Keep the link status for extra SLI4 state machine reference */ 3232 phba->sli4_hba.link_state.speed = 3233 bf_get(lpfc_acqe_link_speed, acqe_link); 3234 phba->sli4_hba.link_state.duplex = 3235 bf_get(lpfc_acqe_link_duplex, acqe_link); 3236 phba->sli4_hba.link_state.status = 3237 bf_get(lpfc_acqe_link_status, acqe_link); 3238 phba->sli4_hba.link_state.type = 3239 bf_get(lpfc_acqe_link_type, acqe_link); 3240 phba->sli4_hba.link_state.number = 3241 bf_get(lpfc_acqe_link_number, acqe_link); 3242 phba->sli4_hba.link_state.fault = 3243 bf_get(lpfc_acqe_link_fault, acqe_link); 3244 phba->sli4_hba.link_state.logical_speed = 3245 bf_get(lpfc_acqe_logical_link_speed, acqe_link); 3246 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3247 "2900 Async FC/FCoE Link event - Speed:%dGBit " 3248 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 3249 "Logical speed:%dMbps Fault:%d\n", 3250 phba->sli4_hba.link_state.speed, 3251 phba->sli4_hba.link_state.topology, 3252 phba->sli4_hba.link_state.status, 3253 phba->sli4_hba.link_state.type, 3254 phba->sli4_hba.link_state.number, 3255 phba->sli4_hba.link_state.logical_speed * 10, 3256 phba->sli4_hba.link_state.fault); 3257 /* 3258 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 3259 * topology info. Note: Optional for non FC-AL ports. 3260 */ 3261 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 3262 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3263 if (rc == MBX_NOT_FINISHED) 3264 goto out_free_dmabuf; 3265 return; 3266 } 3267 /* 3268 * For FCoE Mode: fill in all the topology information we need and call 3269 * the READ_TOPOLOGY completion routine to continue without actually 3270 * sending the READ_TOPOLOGY mailbox command to the port. 3271 */ 3272 /* Parse and translate status field */ 3273 mb = &pmb->u.mb; 3274 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); 3275 3276 /* Parse and translate link attention fields */ 3277 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 3278 la->eventTag = acqe_link->event_tag; 3279 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 3280 bf_set(lpfc_mbx_read_top_link_spd, la, 3281 lpfc_sli4_parse_latt_link_speed(phba, acqe_link)); 3282 3283 /* Fake the the following irrelvant fields */ 3284 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 3285 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 3286 bf_set(lpfc_mbx_read_top_il, la, 0); 3287 bf_set(lpfc_mbx_read_top_pb, la, 0); 3288 bf_set(lpfc_mbx_read_top_fa, la, 0); 3289 bf_set(lpfc_mbx_read_top_mm, la, 0); 3290 3291 /* Invoke the lpfc_handle_latt mailbox command callback function */ 3292 lpfc_mbx_cmpl_read_topology(phba, pmb); 3293 3294 return; 3295 3296 out_free_dmabuf: 3297 kfree(mp); 3298 out_free_pmb: 3299 mempool_free(pmb, phba->mbox_mem_pool); 3300 } 3301 3302 /** 3303 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 3304 * @phba: pointer to lpfc hba data structure. 3305 * @acqe_fc: pointer to the async fc completion queue entry. 3306 * 3307 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 3308 * that the event was received and then issue a read_topology mailbox command so 3309 * that the rest of the driver will treat it the same as SLI3. 3310 **/ 3311 static void 3312 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 3313 { 3314 struct lpfc_dmabuf *mp; 3315 LPFC_MBOXQ_t *pmb; 3316 int rc; 3317 3318 if (bf_get(lpfc_trailer_type, acqe_fc) != 3319 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 3320 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3321 "2895 Non FC link Event detected.(%d)\n", 3322 bf_get(lpfc_trailer_type, acqe_fc)); 3323 return; 3324 } 3325 /* Keep the link status for extra SLI4 state machine reference */ 3326 phba->sli4_hba.link_state.speed = 3327 bf_get(lpfc_acqe_fc_la_speed, acqe_fc); 3328 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 3329 phba->sli4_hba.link_state.topology = 3330 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 3331 phba->sli4_hba.link_state.status = 3332 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 3333 phba->sli4_hba.link_state.type = 3334 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 3335 phba->sli4_hba.link_state.number = 3336 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 3337 phba->sli4_hba.link_state.fault = 3338 bf_get(lpfc_acqe_link_fault, acqe_fc); 3339 phba->sli4_hba.link_state.logical_speed = 3340 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc); 3341 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3342 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 3343 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 3344 "%dMbps Fault:%d\n", 3345 phba->sli4_hba.link_state.speed, 3346 phba->sli4_hba.link_state.topology, 3347 phba->sli4_hba.link_state.status, 3348 phba->sli4_hba.link_state.type, 3349 phba->sli4_hba.link_state.number, 3350 phba->sli4_hba.link_state.logical_speed * 10, 3351 phba->sli4_hba.link_state.fault); 3352 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3353 if (!pmb) { 3354 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3355 "2897 The mboxq allocation failed\n"); 3356 return; 3357 } 3358 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3359 if (!mp) { 3360 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3361 "2898 The lpfc_dmabuf allocation failed\n"); 3362 goto out_free_pmb; 3363 } 3364 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3365 if (!mp->virt) { 3366 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3367 "2899 The mbuf allocation failed\n"); 3368 goto out_free_dmabuf; 3369 } 3370 3371 /* Cleanup any outstanding ELS commands */ 3372 lpfc_els_flush_all_cmd(phba); 3373 3374 /* Block ELS IOCBs until we have done process link event */ 3375 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 3376 3377 /* Update link event statistics */ 3378 phba->sli.slistat.link_event++; 3379 3380 /* Create lpfc_handle_latt mailbox command from link ACQE */ 3381 lpfc_read_topology(phba, pmb, mp); 3382 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 3383 pmb->vport = phba->pport; 3384 3385 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3386 if (rc == MBX_NOT_FINISHED) 3387 goto out_free_dmabuf; 3388 return; 3389 3390 out_free_dmabuf: 3391 kfree(mp); 3392 out_free_pmb: 3393 mempool_free(pmb, phba->mbox_mem_pool); 3394 } 3395 3396 /** 3397 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 3398 * @phba: pointer to lpfc hba data structure. 3399 * @acqe_fc: pointer to the async SLI completion queue entry. 3400 * 3401 * This routine is to handle the SLI4 asynchronous SLI events. 3402 **/ 3403 static void 3404 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 3405 { 3406 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3407 "2901 Async SLI event - Event Data1:x%08x Event Data2:" 3408 "x%08x SLI Event Type:%d", 3409 acqe_sli->event_data1, acqe_sli->event_data2, 3410 bf_get(lpfc_trailer_type, acqe_sli)); 3411 return; 3412 } 3413 3414 /** 3415 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 3416 * @vport: pointer to vport data structure. 3417 * 3418 * This routine is to perform Clear Virtual Link (CVL) on a vport in 3419 * response to a CVL event. 3420 * 3421 * Return the pointer to the ndlp with the vport if successful, otherwise 3422 * return NULL. 3423 **/ 3424 static struct lpfc_nodelist * 3425 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 3426 { 3427 struct lpfc_nodelist *ndlp; 3428 struct Scsi_Host *shost; 3429 struct lpfc_hba *phba; 3430 3431 if (!vport) 3432 return NULL; 3433 phba = vport->phba; 3434 if (!phba) 3435 return NULL; 3436 ndlp = lpfc_findnode_did(vport, Fabric_DID); 3437 if (!ndlp) { 3438 /* Cannot find existing Fabric ndlp, so allocate a new one */ 3439 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 3440 if (!ndlp) 3441 return 0; 3442 lpfc_nlp_init(vport, ndlp, Fabric_DID); 3443 /* Set the node type */ 3444 ndlp->nlp_type |= NLP_FABRIC; 3445 /* Put ndlp onto node list */ 3446 lpfc_enqueue_node(vport, ndlp); 3447 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 3448 /* re-setup ndlp without removing from node list */ 3449 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 3450 if (!ndlp) 3451 return 0; 3452 } 3453 if ((phba->pport->port_state < LPFC_FLOGI) && 3454 (phba->pport->port_state != LPFC_VPORT_FAILED)) 3455 return NULL; 3456 /* If virtual link is not yet instantiated ignore CVL */ 3457 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 3458 && (vport->port_state != LPFC_VPORT_FAILED)) 3459 return NULL; 3460 shost = lpfc_shost_from_vport(vport); 3461 if (!shost) 3462 return NULL; 3463 lpfc_linkdown_port(vport); 3464 lpfc_cleanup_pending_mbox(vport); 3465 spin_lock_irq(shost->host_lock); 3466 vport->fc_flag |= FC_VPORT_CVL_RCVD; 3467 spin_unlock_irq(shost->host_lock); 3468 3469 return ndlp; 3470 } 3471 3472 /** 3473 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 3474 * @vport: pointer to lpfc hba data structure. 3475 * 3476 * This routine is to perform Clear Virtual Link (CVL) on all vports in 3477 * response to a FCF dead event. 3478 **/ 3479 static void 3480 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 3481 { 3482 struct lpfc_vport **vports; 3483 int i; 3484 3485 vports = lpfc_create_vport_work_array(phba); 3486 if (vports) 3487 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3488 lpfc_sli4_perform_vport_cvl(vports[i]); 3489 lpfc_destroy_vport_work_array(phba, vports); 3490 } 3491 3492 /** 3493 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 3494 * @phba: pointer to lpfc hba data structure. 3495 * @acqe_link: pointer to the async fcoe completion queue entry. 3496 * 3497 * This routine is to handle the SLI4 asynchronous fcoe event. 3498 **/ 3499 static void 3500 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 3501 struct lpfc_acqe_fip *acqe_fip) 3502 { 3503 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 3504 int rc; 3505 struct lpfc_vport *vport; 3506 struct lpfc_nodelist *ndlp; 3507 struct Scsi_Host *shost; 3508 int active_vlink_present; 3509 struct lpfc_vport **vports; 3510 int i; 3511 3512 phba->fc_eventTag = acqe_fip->event_tag; 3513 phba->fcoe_eventtag = acqe_fip->event_tag; 3514 switch (event_type) { 3515 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 3516 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 3517 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 3518 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3519 LOG_DISCOVERY, 3520 "2546 New FCF event, evt_tag:x%x, " 3521 "index:x%x\n", 3522 acqe_fip->event_tag, 3523 acqe_fip->index); 3524 else 3525 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 3526 LOG_DISCOVERY, 3527 "2788 FCF param modified event, " 3528 "evt_tag:x%x, index:x%x\n", 3529 acqe_fip->event_tag, 3530 acqe_fip->index); 3531 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3532 /* 3533 * During period of FCF discovery, read the FCF 3534 * table record indexed by the event to update 3535 * FCF roundrobin failover eligible FCF bmask. 3536 */ 3537 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3538 LOG_DISCOVERY, 3539 "2779 Read FCF (x%x) for updating " 3540 "roundrobin FCF failover bmask\n", 3541 acqe_fip->index); 3542 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 3543 } 3544 3545 /* If the FCF discovery is in progress, do nothing. */ 3546 spin_lock_irq(&phba->hbalock); 3547 if (phba->hba_flag & FCF_TS_INPROG) { 3548 spin_unlock_irq(&phba->hbalock); 3549 break; 3550 } 3551 /* If fast FCF failover rescan event is pending, do nothing */ 3552 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 3553 spin_unlock_irq(&phba->hbalock); 3554 break; 3555 } 3556 3557 /* If the FCF has been in discovered state, do nothing. */ 3558 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 3559 spin_unlock_irq(&phba->hbalock); 3560 break; 3561 } 3562 spin_unlock_irq(&phba->hbalock); 3563 3564 /* Otherwise, scan the entire FCF table and re-discover SAN */ 3565 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3566 "2770 Start FCF table scan per async FCF " 3567 "event, evt_tag:x%x, index:x%x\n", 3568 acqe_fip->event_tag, acqe_fip->index); 3569 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 3570 LPFC_FCOE_FCF_GET_FIRST); 3571 if (rc) 3572 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3573 "2547 Issue FCF scan read FCF mailbox " 3574 "command failed (x%x)\n", rc); 3575 break; 3576 3577 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 3578 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3579 "2548 FCF Table full count 0x%x tag 0x%x\n", 3580 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 3581 acqe_fip->event_tag); 3582 break; 3583 3584 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 3585 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3586 "2549 FCF (x%x) disconnected from network, " 3587 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); 3588 /* 3589 * If we are in the middle of FCF failover process, clear 3590 * the corresponding FCF bit in the roundrobin bitmap. 3591 */ 3592 spin_lock_irq(&phba->hbalock); 3593 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3594 spin_unlock_irq(&phba->hbalock); 3595 /* Update FLOGI FCF failover eligible FCF bmask */ 3596 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 3597 break; 3598 } 3599 spin_unlock_irq(&phba->hbalock); 3600 3601 /* If the event is not for currently used fcf do nothing */ 3602 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 3603 break; 3604 3605 /* 3606 * Otherwise, request the port to rediscover the entire FCF 3607 * table for a fast recovery from case that the current FCF 3608 * is no longer valid as we are not in the middle of FCF 3609 * failover process already. 3610 */ 3611 spin_lock_irq(&phba->hbalock); 3612 /* Mark the fast failover process in progress */ 3613 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 3614 spin_unlock_irq(&phba->hbalock); 3615 3616 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3617 "2771 Start FCF fast failover process due to " 3618 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 3619 "\n", acqe_fip->event_tag, acqe_fip->index); 3620 rc = lpfc_sli4_redisc_fcf_table(phba); 3621 if (rc) { 3622 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3623 LOG_DISCOVERY, 3624 "2772 Issue FCF rediscover mabilbox " 3625 "command failed, fail through to FCF " 3626 "dead event\n"); 3627 spin_lock_irq(&phba->hbalock); 3628 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 3629 spin_unlock_irq(&phba->hbalock); 3630 /* 3631 * Last resort will fail over by treating this 3632 * as a link down to FCF registration. 3633 */ 3634 lpfc_sli4_fcf_dead_failthrough(phba); 3635 } else { 3636 /* Reset FCF roundrobin bmask for new discovery */ 3637 lpfc_sli4_clear_fcf_rr_bmask(phba); 3638 /* 3639 * Handling fast FCF failover to a DEAD FCF event is 3640 * considered equalivant to receiving CVL to all vports. 3641 */ 3642 lpfc_sli4_perform_all_vport_cvl(phba); 3643 } 3644 break; 3645 case LPFC_FIP_EVENT_TYPE_CVL: 3646 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3647 "2718 Clear Virtual Link Received for VPI 0x%x" 3648 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 3649 3650 vport = lpfc_find_vport_by_vpid(phba, 3651 acqe_fip->index); 3652 ndlp = lpfc_sli4_perform_vport_cvl(vport); 3653 if (!ndlp) 3654 break; 3655 active_vlink_present = 0; 3656 3657 vports = lpfc_create_vport_work_array(phba); 3658 if (vports) { 3659 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 3660 i++) { 3661 if ((!(vports[i]->fc_flag & 3662 FC_VPORT_CVL_RCVD)) && 3663 (vports[i]->port_state > LPFC_FDISC)) { 3664 active_vlink_present = 1; 3665 break; 3666 } 3667 } 3668 lpfc_destroy_vport_work_array(phba, vports); 3669 } 3670 3671 if (active_vlink_present) { 3672 /* 3673 * If there are other active VLinks present, 3674 * re-instantiate the Vlink using FDISC. 3675 */ 3676 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 3677 shost = lpfc_shost_from_vport(vport); 3678 spin_lock_irq(shost->host_lock); 3679 ndlp->nlp_flag |= NLP_DELAY_TMO; 3680 spin_unlock_irq(shost->host_lock); 3681 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 3682 vport->port_state = LPFC_FDISC; 3683 } else { 3684 /* 3685 * Otherwise, we request port to rediscover 3686 * the entire FCF table for a fast recovery 3687 * from possible case that the current FCF 3688 * is no longer valid if we are not already 3689 * in the FCF failover process. 3690 */ 3691 spin_lock_irq(&phba->hbalock); 3692 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3693 spin_unlock_irq(&phba->hbalock); 3694 break; 3695 } 3696 /* Mark the fast failover process in progress */ 3697 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 3698 spin_unlock_irq(&phba->hbalock); 3699 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3700 LOG_DISCOVERY, 3701 "2773 Start FCF failover per CVL, " 3702 "evt_tag:x%x\n", acqe_fip->event_tag); 3703 rc = lpfc_sli4_redisc_fcf_table(phba); 3704 if (rc) { 3705 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3706 LOG_DISCOVERY, 3707 "2774 Issue FCF rediscover " 3708 "mabilbox command failed, " 3709 "through to CVL event\n"); 3710 spin_lock_irq(&phba->hbalock); 3711 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 3712 spin_unlock_irq(&phba->hbalock); 3713 /* 3714 * Last resort will be re-try on the 3715 * the current registered FCF entry. 3716 */ 3717 lpfc_retry_pport_discovery(phba); 3718 } else 3719 /* 3720 * Reset FCF roundrobin bmask for new 3721 * discovery. 3722 */ 3723 lpfc_sli4_clear_fcf_rr_bmask(phba); 3724 } 3725 break; 3726 default: 3727 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3728 "0288 Unknown FCoE event type 0x%x event tag " 3729 "0x%x\n", event_type, acqe_fip->event_tag); 3730 break; 3731 } 3732 } 3733 3734 /** 3735 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 3736 * @phba: pointer to lpfc hba data structure. 3737 * @acqe_link: pointer to the async dcbx completion queue entry. 3738 * 3739 * This routine is to handle the SLI4 asynchronous dcbx event. 3740 **/ 3741 static void 3742 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 3743 struct lpfc_acqe_dcbx *acqe_dcbx) 3744 { 3745 phba->fc_eventTag = acqe_dcbx->event_tag; 3746 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3747 "0290 The SLI4 DCBX asynchronous event is not " 3748 "handled yet\n"); 3749 } 3750 3751 /** 3752 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 3753 * @phba: pointer to lpfc hba data structure. 3754 * @acqe_link: pointer to the async grp5 completion queue entry. 3755 * 3756 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 3757 * is an asynchronous notified of a logical link speed change. The Port 3758 * reports the logical link speed in units of 10Mbps. 3759 **/ 3760 static void 3761 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 3762 struct lpfc_acqe_grp5 *acqe_grp5) 3763 { 3764 uint16_t prev_ll_spd; 3765 3766 phba->fc_eventTag = acqe_grp5->event_tag; 3767 phba->fcoe_eventtag = acqe_grp5->event_tag; 3768 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 3769 phba->sli4_hba.link_state.logical_speed = 3770 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)); 3771 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3772 "2789 GRP5 Async Event: Updating logical link speed " 3773 "from %dMbps to %dMbps\n", (prev_ll_spd * 10), 3774 (phba->sli4_hba.link_state.logical_speed*10)); 3775 } 3776 3777 /** 3778 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 3779 * @phba: pointer to lpfc hba data structure. 3780 * 3781 * This routine is invoked by the worker thread to process all the pending 3782 * SLI4 asynchronous events. 3783 **/ 3784 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 3785 { 3786 struct lpfc_cq_event *cq_event; 3787 3788 /* First, declare the async event has been handled */ 3789 spin_lock_irq(&phba->hbalock); 3790 phba->hba_flag &= ~ASYNC_EVENT; 3791 spin_unlock_irq(&phba->hbalock); 3792 /* Now, handle all the async events */ 3793 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 3794 /* Get the first event from the head of the event queue */ 3795 spin_lock_irq(&phba->hbalock); 3796 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 3797 cq_event, struct lpfc_cq_event, list); 3798 spin_unlock_irq(&phba->hbalock); 3799 /* Process the asynchronous event */ 3800 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 3801 case LPFC_TRAILER_CODE_LINK: 3802 lpfc_sli4_async_link_evt(phba, 3803 &cq_event->cqe.acqe_link); 3804 break; 3805 case LPFC_TRAILER_CODE_FCOE: 3806 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 3807 break; 3808 case LPFC_TRAILER_CODE_DCBX: 3809 lpfc_sli4_async_dcbx_evt(phba, 3810 &cq_event->cqe.acqe_dcbx); 3811 break; 3812 case LPFC_TRAILER_CODE_GRP5: 3813 lpfc_sli4_async_grp5_evt(phba, 3814 &cq_event->cqe.acqe_grp5); 3815 break; 3816 case LPFC_TRAILER_CODE_FC: 3817 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 3818 break; 3819 case LPFC_TRAILER_CODE_SLI: 3820 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 3821 break; 3822 default: 3823 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3824 "1804 Invalid asynchrous event code: " 3825 "x%x\n", bf_get(lpfc_trailer_code, 3826 &cq_event->cqe.mcqe_cmpl)); 3827 break; 3828 } 3829 /* Free the completion event processed to the free pool */ 3830 lpfc_sli4_cq_event_release(phba, cq_event); 3831 } 3832 } 3833 3834 /** 3835 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 3836 * @phba: pointer to lpfc hba data structure. 3837 * 3838 * This routine is invoked by the worker thread to process FCF table 3839 * rediscovery pending completion event. 3840 **/ 3841 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 3842 { 3843 int rc; 3844 3845 spin_lock_irq(&phba->hbalock); 3846 /* Clear FCF rediscovery timeout event */ 3847 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 3848 /* Clear driver fast failover FCF record flag */ 3849 phba->fcf.failover_rec.flag = 0; 3850 /* Set state for FCF fast failover */ 3851 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 3852 spin_unlock_irq(&phba->hbalock); 3853 3854 /* Scan FCF table from the first entry to re-discover SAN */ 3855 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3856 "2777 Start post-quiescent FCF table scan\n"); 3857 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 3858 if (rc) 3859 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3860 "2747 Issue FCF scan read FCF mailbox " 3861 "command failed 0x%x\n", rc); 3862 } 3863 3864 /** 3865 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 3866 * @phba: pointer to lpfc hba data structure. 3867 * @dev_grp: The HBA PCI-Device group number. 3868 * 3869 * This routine is invoked to set up the per HBA PCI-Device group function 3870 * API jump table entries. 3871 * 3872 * Return: 0 if success, otherwise -ENODEV 3873 **/ 3874 int 3875 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 3876 { 3877 int rc; 3878 3879 /* Set up lpfc PCI-device group */ 3880 phba->pci_dev_grp = dev_grp; 3881 3882 /* The LPFC_PCI_DEV_OC uses SLI4 */ 3883 if (dev_grp == LPFC_PCI_DEV_OC) 3884 phba->sli_rev = LPFC_SLI_REV4; 3885 3886 /* Set up device INIT API function jump table */ 3887 rc = lpfc_init_api_table_setup(phba, dev_grp); 3888 if (rc) 3889 return -ENODEV; 3890 /* Set up SCSI API function jump table */ 3891 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 3892 if (rc) 3893 return -ENODEV; 3894 /* Set up SLI API function jump table */ 3895 rc = lpfc_sli_api_table_setup(phba, dev_grp); 3896 if (rc) 3897 return -ENODEV; 3898 /* Set up MBOX API function jump table */ 3899 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 3900 if (rc) 3901 return -ENODEV; 3902 3903 return 0; 3904 } 3905 3906 /** 3907 * lpfc_log_intr_mode - Log the active interrupt mode 3908 * @phba: pointer to lpfc hba data structure. 3909 * @intr_mode: active interrupt mode adopted. 3910 * 3911 * This routine it invoked to log the currently used active interrupt mode 3912 * to the device. 3913 **/ 3914 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 3915 { 3916 switch (intr_mode) { 3917 case 0: 3918 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3919 "0470 Enable INTx interrupt mode.\n"); 3920 break; 3921 case 1: 3922 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3923 "0481 Enabled MSI interrupt mode.\n"); 3924 break; 3925 case 2: 3926 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3927 "0480 Enabled MSI-X interrupt mode.\n"); 3928 break; 3929 default: 3930 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3931 "0482 Illegal interrupt mode.\n"); 3932 break; 3933 } 3934 return; 3935 } 3936 3937 /** 3938 * lpfc_enable_pci_dev - Enable a generic PCI device. 3939 * @phba: pointer to lpfc hba data structure. 3940 * 3941 * This routine is invoked to enable the PCI device that is common to all 3942 * PCI devices. 3943 * 3944 * Return codes 3945 * 0 - successful 3946 * other values - error 3947 **/ 3948 static int 3949 lpfc_enable_pci_dev(struct lpfc_hba *phba) 3950 { 3951 struct pci_dev *pdev; 3952 int bars; 3953 3954 /* Obtain PCI device reference */ 3955 if (!phba->pcidev) 3956 goto out_error; 3957 else 3958 pdev = phba->pcidev; 3959 /* Select PCI BARs */ 3960 bars = pci_select_bars(pdev, IORESOURCE_MEM); 3961 /* Enable PCI device */ 3962 if (pci_enable_device_mem(pdev)) 3963 goto out_error; 3964 /* Request PCI resource for the device */ 3965 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) 3966 goto out_disable_device; 3967 /* Set up device as PCI master and save state for EEH */ 3968 pci_set_master(pdev); 3969 pci_try_set_mwi(pdev); 3970 pci_save_state(pdev); 3971 3972 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 3973 if (pci_find_capability(pdev, PCI_CAP_ID_EXP)) 3974 pdev->needs_freset = 1; 3975 3976 return 0; 3977 3978 out_disable_device: 3979 pci_disable_device(pdev); 3980 out_error: 3981 return -ENODEV; 3982 } 3983 3984 /** 3985 * lpfc_disable_pci_dev - Disable a generic PCI device. 3986 * @phba: pointer to lpfc hba data structure. 3987 * 3988 * This routine is invoked to disable the PCI device that is common to all 3989 * PCI devices. 3990 **/ 3991 static void 3992 lpfc_disable_pci_dev(struct lpfc_hba *phba) 3993 { 3994 struct pci_dev *pdev; 3995 int bars; 3996 3997 /* Obtain PCI device reference */ 3998 if (!phba->pcidev) 3999 return; 4000 else 4001 pdev = phba->pcidev; 4002 /* Select PCI BARs */ 4003 bars = pci_select_bars(pdev, IORESOURCE_MEM); 4004 /* Release PCI resource and disable PCI device */ 4005 pci_release_selected_regions(pdev, bars); 4006 pci_disable_device(pdev); 4007 /* Null out PCI private reference to driver */ 4008 pci_set_drvdata(pdev, NULL); 4009 4010 return; 4011 } 4012 4013 /** 4014 * lpfc_reset_hba - Reset a hba 4015 * @phba: pointer to lpfc hba data structure. 4016 * 4017 * This routine is invoked to reset a hba device. It brings the HBA 4018 * offline, performs a board restart, and then brings the board back 4019 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 4020 * on outstanding mailbox commands. 4021 **/ 4022 void 4023 lpfc_reset_hba(struct lpfc_hba *phba) 4024 { 4025 /* If resets are disabled then set error state and return. */ 4026 if (!phba->cfg_enable_hba_reset) { 4027 phba->link_state = LPFC_HBA_ERROR; 4028 return; 4029 } 4030 lpfc_offline_prep(phba); 4031 lpfc_offline(phba); 4032 lpfc_sli_brdrestart(phba); 4033 lpfc_online(phba); 4034 lpfc_unblock_mgmt_io(phba); 4035 } 4036 4037 /** 4038 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions 4039 * @phba: pointer to lpfc hba data structure. 4040 * 4041 * This function enables the PCI SR-IOV virtual functions to a physical 4042 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 4043 * enable the number of virtual functions to the physical function. As 4044 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 4045 * API call does not considered as an error condition for most of the device. 4046 **/ 4047 uint16_t 4048 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) 4049 { 4050 struct pci_dev *pdev = phba->pcidev; 4051 uint16_t nr_virtfn; 4052 int pos; 4053 4054 if (!pdev->is_physfn) 4055 return 0; 4056 4057 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 4058 if (pos == 0) 4059 return 0; 4060 4061 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); 4062 return nr_virtfn; 4063 } 4064 4065 /** 4066 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 4067 * @phba: pointer to lpfc hba data structure. 4068 * @nr_vfn: number of virtual functions to be enabled. 4069 * 4070 * This function enables the PCI SR-IOV virtual functions to a physical 4071 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 4072 * enable the number of virtual functions to the physical function. As 4073 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 4074 * API call does not considered as an error condition for most of the device. 4075 **/ 4076 int 4077 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 4078 { 4079 struct pci_dev *pdev = phba->pcidev; 4080 uint16_t max_nr_vfn; 4081 int rc; 4082 4083 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); 4084 if (nr_vfn > max_nr_vfn) { 4085 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4086 "3057 Requested vfs (%d) greater than " 4087 "supported vfs (%d)", nr_vfn, max_nr_vfn); 4088 return -EINVAL; 4089 } 4090 4091 rc = pci_enable_sriov(pdev, nr_vfn); 4092 if (rc) { 4093 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4094 "2806 Failed to enable sriov on this device " 4095 "with vfn number nr_vf:%d, rc:%d\n", 4096 nr_vfn, rc); 4097 } else 4098 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4099 "2807 Successful enable sriov on this device " 4100 "with vfn number nr_vf:%d\n", nr_vfn); 4101 return rc; 4102 } 4103 4104 /** 4105 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. 4106 * @phba: pointer to lpfc hba data structure. 4107 * 4108 * This routine is invoked to set up the driver internal resources specific to 4109 * support the SLI-3 HBA device it attached to. 4110 * 4111 * Return codes 4112 * 0 - successful 4113 * other values - error 4114 **/ 4115 static int 4116 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 4117 { 4118 struct lpfc_sli *psli; 4119 int rc; 4120 4121 /* 4122 * Initialize timers used by driver 4123 */ 4124 4125 /* Heartbeat timer */ 4126 init_timer(&phba->hb_tmofunc); 4127 phba->hb_tmofunc.function = lpfc_hb_timeout; 4128 phba->hb_tmofunc.data = (unsigned long)phba; 4129 4130 psli = &phba->sli; 4131 /* MBOX heartbeat timer */ 4132 init_timer(&psli->mbox_tmo); 4133 psli->mbox_tmo.function = lpfc_mbox_timeout; 4134 psli->mbox_tmo.data = (unsigned long) phba; 4135 /* FCP polling mode timer */ 4136 init_timer(&phba->fcp_poll_timer); 4137 phba->fcp_poll_timer.function = lpfc_poll_timeout; 4138 phba->fcp_poll_timer.data = (unsigned long) phba; 4139 /* Fabric block timer */ 4140 init_timer(&phba->fabric_block_timer); 4141 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 4142 phba->fabric_block_timer.data = (unsigned long) phba; 4143 /* EA polling mode timer */ 4144 init_timer(&phba->eratt_poll); 4145 phba->eratt_poll.function = lpfc_poll_eratt; 4146 phba->eratt_poll.data = (unsigned long) phba; 4147 4148 /* Host attention work mask setup */ 4149 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 4150 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 4151 4152 /* Get all the module params for configuring this host */ 4153 lpfc_get_cfgparam(phba); 4154 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 4155 phba->menlo_flag |= HBA_MENLO_SUPPORT; 4156 /* check for menlo minimum sg count */ 4157 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 4158 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 4159 } 4160 4161 /* 4162 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4163 * used to create the sg_dma_buf_pool must be dynamically calculated. 4164 * 2 segments are added since the IOCB needs a command and response bde. 4165 */ 4166 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 4167 sizeof(struct fcp_rsp) + 4168 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); 4169 4170 if (phba->cfg_enable_bg) { 4171 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; 4172 phba->cfg_sg_dma_buf_size += 4173 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); 4174 } 4175 4176 /* Also reinitialize the host templates with new values. */ 4177 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 4178 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 4179 4180 phba->max_vpi = LPFC_MAX_VPI; 4181 /* This will be set to correct value after config_port mbox */ 4182 phba->max_vports = 0; 4183 4184 /* 4185 * Initialize the SLI Layer to run with lpfc HBAs. 4186 */ 4187 lpfc_sli_setup(phba); 4188 lpfc_sli_queue_setup(phba); 4189 4190 /* Allocate device driver memory */ 4191 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 4192 return -ENOMEM; 4193 4194 /* 4195 * Enable sr-iov virtual functions if supported and configured 4196 * through the module parameter. 4197 */ 4198 if (phba->cfg_sriov_nr_virtfn > 0) { 4199 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 4200 phba->cfg_sriov_nr_virtfn); 4201 if (rc) { 4202 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4203 "2808 Requested number of SR-IOV " 4204 "virtual functions (%d) is not " 4205 "supported\n", 4206 phba->cfg_sriov_nr_virtfn); 4207 phba->cfg_sriov_nr_virtfn = 0; 4208 } 4209 } 4210 4211 return 0; 4212 } 4213 4214 /** 4215 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 4216 * @phba: pointer to lpfc hba data structure. 4217 * 4218 * This routine is invoked to unset the driver internal resources set up 4219 * specific for supporting the SLI-3 HBA device it attached to. 4220 **/ 4221 static void 4222 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 4223 { 4224 /* Free device driver memory allocated */ 4225 lpfc_mem_free_all(phba); 4226 4227 return; 4228 } 4229 4230 /** 4231 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 4232 * @phba: pointer to lpfc hba data structure. 4233 * 4234 * This routine is invoked to set up the driver internal resources specific to 4235 * support the SLI-4 HBA device it attached to. 4236 * 4237 * Return codes 4238 * 0 - successful 4239 * other values - error 4240 **/ 4241 static int 4242 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 4243 { 4244 struct lpfc_sli *psli; 4245 LPFC_MBOXQ_t *mboxq; 4246 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; 4247 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 4248 struct lpfc_mqe *mqe; 4249 int longs, sli_family; 4250 4251 /* Before proceed, wait for POST done and device ready */ 4252 rc = lpfc_sli4_post_status_check(phba); 4253 if (rc) 4254 return -ENODEV; 4255 4256 /* 4257 * Initialize timers used by driver 4258 */ 4259 4260 /* Heartbeat timer */ 4261 init_timer(&phba->hb_tmofunc); 4262 phba->hb_tmofunc.function = lpfc_hb_timeout; 4263 phba->hb_tmofunc.data = (unsigned long)phba; 4264 init_timer(&phba->rrq_tmr); 4265 phba->rrq_tmr.function = lpfc_rrq_timeout; 4266 phba->rrq_tmr.data = (unsigned long)phba; 4267 4268 psli = &phba->sli; 4269 /* MBOX heartbeat timer */ 4270 init_timer(&psli->mbox_tmo); 4271 psli->mbox_tmo.function = lpfc_mbox_timeout; 4272 psli->mbox_tmo.data = (unsigned long) phba; 4273 /* Fabric block timer */ 4274 init_timer(&phba->fabric_block_timer); 4275 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 4276 phba->fabric_block_timer.data = (unsigned long) phba; 4277 /* EA polling mode timer */ 4278 init_timer(&phba->eratt_poll); 4279 phba->eratt_poll.function = lpfc_poll_eratt; 4280 phba->eratt_poll.data = (unsigned long) phba; 4281 /* FCF rediscover timer */ 4282 init_timer(&phba->fcf.redisc_wait); 4283 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo; 4284 phba->fcf.redisc_wait.data = (unsigned long)phba; 4285 4286 /* 4287 * Control structure for handling external multi-buffer mailbox 4288 * command pass-through. 4289 */ 4290 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, 4291 sizeof(struct lpfc_mbox_ext_buf_ctx)); 4292 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 4293 4294 /* 4295 * We need to do a READ_CONFIG mailbox command here before 4296 * calling lpfc_get_cfgparam. For VFs this will report the 4297 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. 4298 * All of the resources allocated 4299 * for this Port are tied to these values. 4300 */ 4301 /* Get all the module params for configuring this host */ 4302 lpfc_get_cfgparam(phba); 4303 phba->max_vpi = LPFC_MAX_VPI; 4304 /* This will be set to correct value after the read_config mbox */ 4305 phba->max_vports = 0; 4306 4307 /* Program the default value of vlan_id and fc_map */ 4308 phba->valid_vlan = 0; 4309 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 4310 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 4311 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 4312 4313 /* 4314 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4315 * used to create the sg_dma_buf_pool must be dynamically calculated. 4316 * 2 segments are added since the IOCB needs a command and response bde. 4317 * To insure that the scsi sgl does not cross a 4k page boundary only 4318 * sgl sizes of must be a power of 2. 4319 */ 4320 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + 4321 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge))); 4322 4323 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); 4324 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE; 4325 switch (sli_family) { 4326 case LPFC_SLI_INTF_FAMILY_BE2: 4327 case LPFC_SLI_INTF_FAMILY_BE3: 4328 /* There is a single hint for BE - 2 pages per BPL. */ 4329 if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) == 4330 LPFC_SLI_INTF_SLI_HINT1_1) 4331 max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE; 4332 break; 4333 case LPFC_SLI_INTF_FAMILY_LNCR_A0: 4334 case LPFC_SLI_INTF_FAMILY_LNCR_B0: 4335 default: 4336 break; 4337 } 4338 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE; 4339 dma_buf_size < max_buf_size && buf_size > dma_buf_size; 4340 dma_buf_size = dma_buf_size << 1) 4341 ; 4342 if (dma_buf_size == max_buf_size) 4343 phba->cfg_sg_seg_cnt = (dma_buf_size - 4344 sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) - 4345 (2 * sizeof(struct sli4_sge))) / 4346 sizeof(struct sli4_sge); 4347 phba->cfg_sg_dma_buf_size = dma_buf_size; 4348 4349 /* Initialize buffer queue management fields */ 4350 hbq_count = lpfc_sli_hbq_count(); 4351 for (i = 0; i < hbq_count; ++i) 4352 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 4353 INIT_LIST_HEAD(&phba->rb_pend_list); 4354 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 4355 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 4356 4357 /* 4358 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 4359 */ 4360 /* Initialize the Abort scsi buffer list used by driver */ 4361 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 4362 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 4363 /* This abort list used by worker thread */ 4364 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); 4365 4366 /* 4367 * Initialize driver internal slow-path work queues 4368 */ 4369 4370 /* Driver internel slow-path CQ Event pool */ 4371 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 4372 /* Response IOCB work queue list */ 4373 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 4374 /* Asynchronous event CQ Event work queue list */ 4375 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 4376 /* Fast-path XRI aborted CQ Event work queue list */ 4377 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 4378 /* Slow-path XRI aborted CQ Event work queue list */ 4379 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 4380 /* Receive queue CQ Event work queue list */ 4381 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 4382 4383 /* Initialize extent block lists. */ 4384 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); 4385 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); 4386 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); 4387 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); 4388 4389 /* Initialize the driver internal SLI layer lists. */ 4390 lpfc_sli_setup(phba); 4391 lpfc_sli_queue_setup(phba); 4392 4393 /* Allocate device driver memory */ 4394 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 4395 if (rc) 4396 return -ENOMEM; 4397 4398 /* IF Type 2 ports get initialized now. */ 4399 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 4400 LPFC_SLI_INTF_IF_TYPE_2) { 4401 rc = lpfc_pci_function_reset(phba); 4402 if (unlikely(rc)) 4403 return -ENODEV; 4404 } 4405 4406 /* Create the bootstrap mailbox command */ 4407 rc = lpfc_create_bootstrap_mbox(phba); 4408 if (unlikely(rc)) 4409 goto out_free_mem; 4410 4411 /* Set up the host's endian order with the device. */ 4412 rc = lpfc_setup_endian_order(phba); 4413 if (unlikely(rc)) 4414 goto out_free_bsmbx; 4415 4416 /* Set up the hba's configuration parameters. */ 4417 rc = lpfc_sli4_read_config(phba); 4418 if (unlikely(rc)) 4419 goto out_free_bsmbx; 4420 4421 /* IF Type 0 ports get initialized now. */ 4422 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 4423 LPFC_SLI_INTF_IF_TYPE_0) { 4424 rc = lpfc_pci_function_reset(phba); 4425 if (unlikely(rc)) 4426 goto out_free_bsmbx; 4427 } 4428 4429 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 4430 GFP_KERNEL); 4431 if (!mboxq) { 4432 rc = -ENOMEM; 4433 goto out_free_bsmbx; 4434 } 4435 4436 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */ 4437 lpfc_supported_pages(mboxq); 4438 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4439 if (!rc) { 4440 mqe = &mboxq->u.mqe; 4441 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 4442 LPFC_MAX_SUPPORTED_PAGES); 4443 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 4444 switch (pn_page[i]) { 4445 case LPFC_SLI4_PARAMETERS: 4446 phba->sli4_hba.pc_sli4_params.supported = 1; 4447 break; 4448 default: 4449 break; 4450 } 4451 } 4452 /* Read the port's SLI4 Parameters capabilities if supported. */ 4453 if (phba->sli4_hba.pc_sli4_params.supported) 4454 rc = lpfc_pc_sli4_params_get(phba, mboxq); 4455 if (rc) { 4456 mempool_free(mboxq, phba->mbox_mem_pool); 4457 rc = -EIO; 4458 goto out_free_bsmbx; 4459 } 4460 } 4461 /* 4462 * Get sli4 parameters that override parameters from Port capabilities. 4463 * If this call fails, it isn't critical unless the SLI4 parameters come 4464 * back in conflict. 4465 */ 4466 rc = lpfc_get_sli4_parameters(phba, mboxq); 4467 if (rc) { 4468 if (phba->sli4_hba.extents_in_use && 4469 phba->sli4_hba.rpi_hdrs_in_use) { 4470 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4471 "2999 Unsupported SLI4 Parameters " 4472 "Extents and RPI headers enabled.\n"); 4473 goto out_free_bsmbx; 4474 } 4475 } 4476 mempool_free(mboxq, phba->mbox_mem_pool); 4477 /* Create all the SLI4 queues */ 4478 rc = lpfc_sli4_queue_create(phba); 4479 if (rc) 4480 goto out_free_bsmbx; 4481 4482 /* Create driver internal CQE event pool */ 4483 rc = lpfc_sli4_cq_event_pool_create(phba); 4484 if (rc) 4485 goto out_destroy_queue; 4486 4487 /* Initialize and populate the iocb list per host */ 4488 rc = lpfc_init_sgl_list(phba); 4489 if (rc) { 4490 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4491 "1400 Failed to initialize sgl list.\n"); 4492 goto out_destroy_cq_event_pool; 4493 } 4494 rc = lpfc_init_active_sgl_array(phba); 4495 if (rc) { 4496 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4497 "1430 Failed to initialize sgl list.\n"); 4498 goto out_free_sgl_list; 4499 } 4500 rc = lpfc_sli4_init_rpi_hdrs(phba); 4501 if (rc) { 4502 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4503 "1432 Failed to initialize rpi headers.\n"); 4504 goto out_free_active_sgl; 4505 } 4506 4507 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 4508 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 4509 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), 4510 GFP_KERNEL); 4511 if (!phba->fcf.fcf_rr_bmask) { 4512 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4513 "2759 Failed allocate memory for FCF round " 4514 "robin failover bmask\n"); 4515 rc = -ENOMEM; 4516 goto out_remove_rpi_hdrs; 4517 } 4518 4519 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 4520 phba->cfg_fcp_eq_count), GFP_KERNEL); 4521 if (!phba->sli4_hba.fcp_eq_hdl) { 4522 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4523 "2572 Failed allocate memory for fast-path " 4524 "per-EQ handle array\n"); 4525 rc = -ENOMEM; 4526 goto out_free_fcf_rr_bmask; 4527 } 4528 4529 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 4530 phba->sli4_hba.cfg_eqn), GFP_KERNEL); 4531 if (!phba->sli4_hba.msix_entries) { 4532 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4533 "2573 Failed allocate memory for msi-x " 4534 "interrupt vector entries\n"); 4535 rc = -ENOMEM; 4536 goto out_free_fcp_eq_hdl; 4537 } 4538 4539 /* 4540 * Enable sr-iov virtual functions if supported and configured 4541 * through the module parameter. 4542 */ 4543 if (phba->cfg_sriov_nr_virtfn > 0) { 4544 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 4545 phba->cfg_sriov_nr_virtfn); 4546 if (rc) { 4547 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4548 "3020 Requested number of SR-IOV " 4549 "virtual functions (%d) is not " 4550 "supported\n", 4551 phba->cfg_sriov_nr_virtfn); 4552 phba->cfg_sriov_nr_virtfn = 0; 4553 } 4554 } 4555 4556 return 0; 4557 4558 out_free_fcp_eq_hdl: 4559 kfree(phba->sli4_hba.fcp_eq_hdl); 4560 out_free_fcf_rr_bmask: 4561 kfree(phba->fcf.fcf_rr_bmask); 4562 out_remove_rpi_hdrs: 4563 lpfc_sli4_remove_rpi_hdrs(phba); 4564 out_free_active_sgl: 4565 lpfc_free_active_sgl(phba); 4566 out_free_sgl_list: 4567 lpfc_free_sgl_list(phba); 4568 out_destroy_cq_event_pool: 4569 lpfc_sli4_cq_event_pool_destroy(phba); 4570 out_destroy_queue: 4571 lpfc_sli4_queue_destroy(phba); 4572 out_free_bsmbx: 4573 lpfc_destroy_bootstrap_mbox(phba); 4574 out_free_mem: 4575 lpfc_mem_free(phba); 4576 return rc; 4577 } 4578 4579 /** 4580 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 4581 * @phba: pointer to lpfc hba data structure. 4582 * 4583 * This routine is invoked to unset the driver internal resources set up 4584 * specific for supporting the SLI-4 HBA device it attached to. 4585 **/ 4586 static void 4587 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 4588 { 4589 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 4590 4591 /* Free memory allocated for msi-x interrupt vector entries */ 4592 kfree(phba->sli4_hba.msix_entries); 4593 4594 /* Free memory allocated for fast-path work queue handles */ 4595 kfree(phba->sli4_hba.fcp_eq_hdl); 4596 4597 /* Free the allocated rpi headers. */ 4598 lpfc_sli4_remove_rpi_hdrs(phba); 4599 lpfc_sli4_remove_rpis(phba); 4600 4601 /* Free eligible FCF index bmask */ 4602 kfree(phba->fcf.fcf_rr_bmask); 4603 4604 /* Free the ELS sgl list */ 4605 lpfc_free_active_sgl(phba); 4606 lpfc_free_sgl_list(phba); 4607 4608 /* Free the SCSI sgl management array */ 4609 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 4610 4611 /* Free the SLI4 queues */ 4612 lpfc_sli4_queue_destroy(phba); 4613 4614 /* Free the completion queue EQ event pool */ 4615 lpfc_sli4_cq_event_release_all(phba); 4616 lpfc_sli4_cq_event_pool_destroy(phba); 4617 4618 /* Release resource identifiers. */ 4619 lpfc_sli4_dealloc_resource_identifiers(phba); 4620 4621 /* Free the bsmbx region. */ 4622 lpfc_destroy_bootstrap_mbox(phba); 4623 4624 /* Free the SLI Layer memory with SLI4 HBAs */ 4625 lpfc_mem_free_all(phba); 4626 4627 /* Free the current connect table */ 4628 list_for_each_entry_safe(conn_entry, next_conn_entry, 4629 &phba->fcf_conn_rec_list, list) { 4630 list_del_init(&conn_entry->list); 4631 kfree(conn_entry); 4632 } 4633 4634 return; 4635 } 4636 4637 /** 4638 * lpfc_init_api_table_setup - Set up init api function jump table 4639 * @phba: The hba struct for which this call is being executed. 4640 * @dev_grp: The HBA PCI-Device group number. 4641 * 4642 * This routine sets up the device INIT interface API function jump table 4643 * in @phba struct. 4644 * 4645 * Returns: 0 - success, -ENODEV - failure. 4646 **/ 4647 int 4648 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 4649 { 4650 phba->lpfc_hba_init_link = lpfc_hba_init_link; 4651 phba->lpfc_hba_down_link = lpfc_hba_down_link; 4652 phba->lpfc_selective_reset = lpfc_selective_reset; 4653 switch (dev_grp) { 4654 case LPFC_PCI_DEV_LP: 4655 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 4656 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 4657 phba->lpfc_stop_port = lpfc_stop_port_s3; 4658 break; 4659 case LPFC_PCI_DEV_OC: 4660 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 4661 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 4662 phba->lpfc_stop_port = lpfc_stop_port_s4; 4663 break; 4664 default: 4665 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4666 "1431 Invalid HBA PCI-device group: 0x%x\n", 4667 dev_grp); 4668 return -ENODEV; 4669 break; 4670 } 4671 return 0; 4672 } 4673 4674 /** 4675 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 4676 * @phba: pointer to lpfc hba data structure. 4677 * 4678 * This routine is invoked to set up the driver internal resources before the 4679 * device specific resource setup to support the HBA device it attached to. 4680 * 4681 * Return codes 4682 * 0 - successful 4683 * other values - error 4684 **/ 4685 static int 4686 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 4687 { 4688 /* 4689 * Driver resources common to all SLI revisions 4690 */ 4691 atomic_set(&phba->fast_event_count, 0); 4692 spin_lock_init(&phba->hbalock); 4693 4694 /* Initialize ndlp management spinlock */ 4695 spin_lock_init(&phba->ndlp_lock); 4696 4697 INIT_LIST_HEAD(&phba->port_list); 4698 INIT_LIST_HEAD(&phba->work_list); 4699 init_waitqueue_head(&phba->wait_4_mlo_m_q); 4700 4701 /* Initialize the wait queue head for the kernel thread */ 4702 init_waitqueue_head(&phba->work_waitq); 4703 4704 /* Initialize the scsi buffer list used by driver for scsi IO */ 4705 spin_lock_init(&phba->scsi_buf_list_lock); 4706 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 4707 4708 /* Initialize the fabric iocb list */ 4709 INIT_LIST_HEAD(&phba->fabric_iocb_list); 4710 4711 /* Initialize list to save ELS buffers */ 4712 INIT_LIST_HEAD(&phba->elsbuf); 4713 4714 /* Initialize FCF connection rec list */ 4715 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 4716 4717 return 0; 4718 } 4719 4720 /** 4721 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 4722 * @phba: pointer to lpfc hba data structure. 4723 * 4724 * This routine is invoked to set up the driver internal resources after the 4725 * device specific resource setup to support the HBA device it attached to. 4726 * 4727 * Return codes 4728 * 0 - successful 4729 * other values - error 4730 **/ 4731 static int 4732 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 4733 { 4734 int error; 4735 4736 /* Startup the kernel thread for this host adapter. */ 4737 phba->worker_thread = kthread_run(lpfc_do_work, phba, 4738 "lpfc_worker_%d", phba->brd_no); 4739 if (IS_ERR(phba->worker_thread)) { 4740 error = PTR_ERR(phba->worker_thread); 4741 return error; 4742 } 4743 4744 return 0; 4745 } 4746 4747 /** 4748 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 4749 * @phba: pointer to lpfc hba data structure. 4750 * 4751 * This routine is invoked to unset the driver internal resources set up after 4752 * the device specific resource setup for supporting the HBA device it 4753 * attached to. 4754 **/ 4755 static void 4756 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 4757 { 4758 /* Stop kernel worker thread */ 4759 kthread_stop(phba->worker_thread); 4760 } 4761 4762 /** 4763 * lpfc_free_iocb_list - Free iocb list. 4764 * @phba: pointer to lpfc hba data structure. 4765 * 4766 * This routine is invoked to free the driver's IOCB list and memory. 4767 **/ 4768 static void 4769 lpfc_free_iocb_list(struct lpfc_hba *phba) 4770 { 4771 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 4772 4773 spin_lock_irq(&phba->hbalock); 4774 list_for_each_entry_safe(iocbq_entry, iocbq_next, 4775 &phba->lpfc_iocb_list, list) { 4776 list_del(&iocbq_entry->list); 4777 kfree(iocbq_entry); 4778 phba->total_iocbq_bufs--; 4779 } 4780 spin_unlock_irq(&phba->hbalock); 4781 4782 return; 4783 } 4784 4785 /** 4786 * lpfc_init_iocb_list - Allocate and initialize iocb list. 4787 * @phba: pointer to lpfc hba data structure. 4788 * 4789 * This routine is invoked to allocate and initizlize the driver's IOCB 4790 * list and set up the IOCB tag array accordingly. 4791 * 4792 * Return codes 4793 * 0 - successful 4794 * other values - error 4795 **/ 4796 static int 4797 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 4798 { 4799 struct lpfc_iocbq *iocbq_entry = NULL; 4800 uint16_t iotag; 4801 int i; 4802 4803 /* Initialize and populate the iocb list per host. */ 4804 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 4805 for (i = 0; i < iocb_count; i++) { 4806 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 4807 if (iocbq_entry == NULL) { 4808 printk(KERN_ERR "%s: only allocated %d iocbs of " 4809 "expected %d count. Unloading driver.\n", 4810 __func__, i, LPFC_IOCB_LIST_CNT); 4811 goto out_free_iocbq; 4812 } 4813 4814 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 4815 if (iotag == 0) { 4816 kfree(iocbq_entry); 4817 printk(KERN_ERR "%s: failed to allocate IOTAG. " 4818 "Unloading driver.\n", __func__); 4819 goto out_free_iocbq; 4820 } 4821 iocbq_entry->sli4_lxritag = NO_XRI; 4822 iocbq_entry->sli4_xritag = NO_XRI; 4823 4824 spin_lock_irq(&phba->hbalock); 4825 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 4826 phba->total_iocbq_bufs++; 4827 spin_unlock_irq(&phba->hbalock); 4828 } 4829 4830 return 0; 4831 4832 out_free_iocbq: 4833 lpfc_free_iocb_list(phba); 4834 4835 return -ENOMEM; 4836 } 4837 4838 /** 4839 * lpfc_free_sgl_list - Free sgl list. 4840 * @phba: pointer to lpfc hba data structure. 4841 * 4842 * This routine is invoked to free the driver's sgl list and memory. 4843 **/ 4844 static void 4845 lpfc_free_sgl_list(struct lpfc_hba *phba) 4846 { 4847 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 4848 LIST_HEAD(sglq_list); 4849 4850 spin_lock_irq(&phba->hbalock); 4851 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); 4852 spin_unlock_irq(&phba->hbalock); 4853 4854 list_for_each_entry_safe(sglq_entry, sglq_next, 4855 &sglq_list, list) { 4856 list_del(&sglq_entry->list); 4857 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 4858 kfree(sglq_entry); 4859 phba->sli4_hba.total_sglq_bufs--; 4860 } 4861 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4862 } 4863 4864 /** 4865 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 4866 * @phba: pointer to lpfc hba data structure. 4867 * 4868 * This routine is invoked to allocate the driver's active sgl memory. 4869 * This array will hold the sglq_entry's for active IOs. 4870 **/ 4871 static int 4872 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 4873 { 4874 int size; 4875 size = sizeof(struct lpfc_sglq *); 4876 size *= phba->sli4_hba.max_cfg_param.max_xri; 4877 4878 phba->sli4_hba.lpfc_sglq_active_list = 4879 kzalloc(size, GFP_KERNEL); 4880 if (!phba->sli4_hba.lpfc_sglq_active_list) 4881 return -ENOMEM; 4882 return 0; 4883 } 4884 4885 /** 4886 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 4887 * @phba: pointer to lpfc hba data structure. 4888 * 4889 * This routine is invoked to walk through the array of active sglq entries 4890 * and free all of the resources. 4891 * This is just a place holder for now. 4892 **/ 4893 static void 4894 lpfc_free_active_sgl(struct lpfc_hba *phba) 4895 { 4896 kfree(phba->sli4_hba.lpfc_sglq_active_list); 4897 } 4898 4899 /** 4900 * lpfc_init_sgl_list - Allocate and initialize sgl list. 4901 * @phba: pointer to lpfc hba data structure. 4902 * 4903 * This routine is invoked to allocate and initizlize the driver's sgl 4904 * list and set up the sgl xritag tag array accordingly. 4905 * 4906 * Return codes 4907 * 0 - successful 4908 * other values - error 4909 **/ 4910 static int 4911 lpfc_init_sgl_list(struct lpfc_hba *phba) 4912 { 4913 struct lpfc_sglq *sglq_entry = NULL; 4914 int i; 4915 int els_xri_cnt; 4916 4917 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4918 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4919 "2400 ELS XRI count %d.\n", 4920 els_xri_cnt); 4921 /* Initialize and populate the sglq list per host/VF. */ 4922 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 4923 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 4924 4925 /* Sanity check on XRI management */ 4926 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) { 4927 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4928 "2562 No room left for SCSI XRI allocation: " 4929 "max_xri=%d, els_xri=%d\n", 4930 phba->sli4_hba.max_cfg_param.max_xri, 4931 els_xri_cnt); 4932 return -ENOMEM; 4933 } 4934 4935 /* Allocate memory for the ELS XRI management array */ 4936 phba->sli4_hba.lpfc_els_sgl_array = 4937 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt), 4938 GFP_KERNEL); 4939 4940 if (!phba->sli4_hba.lpfc_els_sgl_array) { 4941 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4942 "2401 Failed to allocate memory for ELS " 4943 "XRI management array of size %d.\n", 4944 els_xri_cnt); 4945 return -ENOMEM; 4946 } 4947 4948 /* Keep the SCSI XRI into the XRI management array */ 4949 phba->sli4_hba.scsi_xri_max = 4950 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4951 phba->sli4_hba.scsi_xri_cnt = 0; 4952 phba->sli4_hba.lpfc_scsi_psb_array = 4953 kzalloc((sizeof(struct lpfc_scsi_buf *) * 4954 phba->sli4_hba.scsi_xri_max), GFP_KERNEL); 4955 4956 if (!phba->sli4_hba.lpfc_scsi_psb_array) { 4957 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4958 "2563 Failed to allocate memory for SCSI " 4959 "XRI management array of size %d.\n", 4960 phba->sli4_hba.scsi_xri_max); 4961 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4962 return -ENOMEM; 4963 } 4964 4965 for (i = 0; i < els_xri_cnt; i++) { 4966 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL); 4967 if (sglq_entry == NULL) { 4968 printk(KERN_ERR "%s: only allocated %d sgls of " 4969 "expected %d count. Unloading driver.\n", 4970 __func__, i, els_xri_cnt); 4971 goto out_free_mem; 4972 } 4973 4974 sglq_entry->buff_type = GEN_BUFF_TYPE; 4975 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); 4976 if (sglq_entry->virt == NULL) { 4977 kfree(sglq_entry); 4978 printk(KERN_ERR "%s: failed to allocate mbuf.\n" 4979 "Unloading driver.\n", __func__); 4980 goto out_free_mem; 4981 } 4982 sglq_entry->sgl = sglq_entry->virt; 4983 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 4984 4985 /* The list order is used by later block SGL registraton */ 4986 spin_lock_irq(&phba->hbalock); 4987 sglq_entry->state = SGL_FREED; 4988 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); 4989 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; 4990 phba->sli4_hba.total_sglq_bufs++; 4991 spin_unlock_irq(&phba->hbalock); 4992 } 4993 return 0; 4994 4995 out_free_mem: 4996 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 4997 lpfc_free_sgl_list(phba); 4998 return -ENOMEM; 4999 } 5000 5001 /** 5002 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 5003 * @phba: pointer to lpfc hba data structure. 5004 * 5005 * This routine is invoked to post rpi header templates to the 5006 * port for those SLI4 ports that do not support extents. This routine 5007 * posts a PAGE_SIZE memory region to the port to hold up to 5008 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 5009 * and should be called only when interrupts are disabled. 5010 * 5011 * Return codes 5012 * 0 - successful 5013 * -ERROR - otherwise. 5014 **/ 5015 int 5016 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 5017 { 5018 int rc = 0; 5019 struct lpfc_rpi_hdr *rpi_hdr; 5020 5021 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 5022 /* 5023 * If the SLI4 port supports extents, posting the rpi header isn't 5024 * required. Set the expected maximum count and let the actual value 5025 * get set when extents are fully allocated. 5026 */ 5027 if (!phba->sli4_hba.rpi_hdrs_in_use) { 5028 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 5029 return rc; 5030 } 5031 if (phba->sli4_hba.extents_in_use) 5032 return -EIO; 5033 5034 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 5035 if (!rpi_hdr) { 5036 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5037 "0391 Error during rpi post operation\n"); 5038 lpfc_sli4_remove_rpis(phba); 5039 rc = -ENODEV; 5040 } 5041 5042 return rc; 5043 } 5044 5045 /** 5046 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 5047 * @phba: pointer to lpfc hba data structure. 5048 * 5049 * This routine is invoked to allocate a single 4KB memory region to 5050 * support rpis and stores them in the phba. This single region 5051 * provides support for up to 64 rpis. The region is used globally 5052 * by the device. 5053 * 5054 * Returns: 5055 * A valid rpi hdr on success. 5056 * A NULL pointer on any failure. 5057 **/ 5058 struct lpfc_rpi_hdr * 5059 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 5060 { 5061 uint16_t rpi_limit, curr_rpi_range; 5062 struct lpfc_dmabuf *dmabuf; 5063 struct lpfc_rpi_hdr *rpi_hdr; 5064 uint32_t rpi_count; 5065 5066 /* 5067 * If the SLI4 port supports extents, posting the rpi header isn't 5068 * required. Set the expected maximum count and let the actual value 5069 * get set when extents are fully allocated. 5070 */ 5071 if (!phba->sli4_hba.rpi_hdrs_in_use) 5072 return NULL; 5073 if (phba->sli4_hba.extents_in_use) 5074 return NULL; 5075 5076 /* The limit on the logical index is just the max_rpi count. */ 5077 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 5078 phba->sli4_hba.max_cfg_param.max_rpi - 1; 5079 5080 spin_lock_irq(&phba->hbalock); 5081 /* 5082 * Establish the starting RPI in this header block. The starting 5083 * rpi is normalized to a zero base because the physical rpi is 5084 * port based. 5085 */ 5086 curr_rpi_range = phba->sli4_hba.next_rpi - 5087 phba->sli4_hba.max_cfg_param.rpi_base; 5088 spin_unlock_irq(&phba->hbalock); 5089 5090 /* 5091 * The port has a limited number of rpis. The increment here 5092 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value 5093 * and to allow the full max_rpi range per port. 5094 */ 5095 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) 5096 rpi_count = rpi_limit - curr_rpi_range; 5097 else 5098 rpi_count = LPFC_RPI_HDR_COUNT; 5099 5100 if (!rpi_count) 5101 return NULL; 5102 /* 5103 * First allocate the protocol header region for the port. The 5104 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 5105 */ 5106 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5107 if (!dmabuf) 5108 return NULL; 5109 5110 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 5111 LPFC_HDR_TEMPLATE_SIZE, 5112 &dmabuf->phys, 5113 GFP_KERNEL); 5114 if (!dmabuf->virt) { 5115 rpi_hdr = NULL; 5116 goto err_free_dmabuf; 5117 } 5118 5119 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE); 5120 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 5121 rpi_hdr = NULL; 5122 goto err_free_coherent; 5123 } 5124 5125 /* Save the rpi header data for cleanup later. */ 5126 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 5127 if (!rpi_hdr) 5128 goto err_free_coherent; 5129 5130 rpi_hdr->dmabuf = dmabuf; 5131 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 5132 rpi_hdr->page_count = 1; 5133 spin_lock_irq(&phba->hbalock); 5134 5135 /* The rpi_hdr stores the logical index only. */ 5136 rpi_hdr->start_rpi = curr_rpi_range; 5137 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 5138 5139 /* 5140 * The next_rpi stores the next logical module-64 rpi value used 5141 * to post physical rpis in subsequent rpi postings. 5142 */ 5143 phba->sli4_hba.next_rpi += rpi_count; 5144 spin_unlock_irq(&phba->hbalock); 5145 return rpi_hdr; 5146 5147 err_free_coherent: 5148 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 5149 dmabuf->virt, dmabuf->phys); 5150 err_free_dmabuf: 5151 kfree(dmabuf); 5152 return NULL; 5153 } 5154 5155 /** 5156 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 5157 * @phba: pointer to lpfc hba data structure. 5158 * 5159 * This routine is invoked to remove all memory resources allocated 5160 * to support rpis for SLI4 ports not supporting extents. This routine 5161 * presumes the caller has released all rpis consumed by fabric or port 5162 * logins and is prepared to have the header pages removed. 5163 **/ 5164 void 5165 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 5166 { 5167 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 5168 5169 if (!phba->sli4_hba.rpi_hdrs_in_use) 5170 goto exit; 5171 5172 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 5173 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 5174 list_del(&rpi_hdr->list); 5175 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 5176 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 5177 kfree(rpi_hdr->dmabuf); 5178 kfree(rpi_hdr); 5179 } 5180 exit: 5181 /* There are no rpis available to the port now. */ 5182 phba->sli4_hba.next_rpi = 0; 5183 } 5184 5185 /** 5186 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 5187 * @pdev: pointer to pci device data structure. 5188 * 5189 * This routine is invoked to allocate the driver hba data structure for an 5190 * HBA device. If the allocation is successful, the phba reference to the 5191 * PCI device data structure is set. 5192 * 5193 * Return codes 5194 * pointer to @phba - successful 5195 * NULL - error 5196 **/ 5197 static struct lpfc_hba * 5198 lpfc_hba_alloc(struct pci_dev *pdev) 5199 { 5200 struct lpfc_hba *phba; 5201 5202 /* Allocate memory for HBA structure */ 5203 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 5204 if (!phba) { 5205 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 5206 return NULL; 5207 } 5208 5209 /* Set reference to PCI device in HBA structure */ 5210 phba->pcidev = pdev; 5211 5212 /* Assign an unused board number */ 5213 phba->brd_no = lpfc_get_instance(); 5214 if (phba->brd_no < 0) { 5215 kfree(phba); 5216 return NULL; 5217 } 5218 5219 spin_lock_init(&phba->ct_ev_lock); 5220 INIT_LIST_HEAD(&phba->ct_ev_waiters); 5221 5222 return phba; 5223 } 5224 5225 /** 5226 * lpfc_hba_free - Free driver hba data structure with a device. 5227 * @phba: pointer to lpfc hba data structure. 5228 * 5229 * This routine is invoked to free the driver hba data structure with an 5230 * HBA device. 5231 **/ 5232 static void 5233 lpfc_hba_free(struct lpfc_hba *phba) 5234 { 5235 /* Release the driver assigned board number */ 5236 idr_remove(&lpfc_hba_index, phba->brd_no); 5237 5238 kfree(phba); 5239 return; 5240 } 5241 5242 /** 5243 * lpfc_create_shost - Create hba physical port with associated scsi host. 5244 * @phba: pointer to lpfc hba data structure. 5245 * 5246 * This routine is invoked to create HBA physical port and associate a SCSI 5247 * host with it. 5248 * 5249 * Return codes 5250 * 0 - successful 5251 * other values - error 5252 **/ 5253 static int 5254 lpfc_create_shost(struct lpfc_hba *phba) 5255 { 5256 struct lpfc_vport *vport; 5257 struct Scsi_Host *shost; 5258 5259 /* Initialize HBA FC structure */ 5260 phba->fc_edtov = FF_DEF_EDTOV; 5261 phba->fc_ratov = FF_DEF_RATOV; 5262 phba->fc_altov = FF_DEF_ALTOV; 5263 phba->fc_arbtov = FF_DEF_ARBTOV; 5264 5265 atomic_set(&phba->sdev_cnt, 0); 5266 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 5267 if (!vport) 5268 return -ENODEV; 5269 5270 shost = lpfc_shost_from_vport(vport); 5271 phba->pport = vport; 5272 lpfc_debugfs_initialize(vport); 5273 /* Put reference to SCSI host to driver's device private data */ 5274 pci_set_drvdata(phba->pcidev, shost); 5275 5276 return 0; 5277 } 5278 5279 /** 5280 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 5281 * @phba: pointer to lpfc hba data structure. 5282 * 5283 * This routine is invoked to destroy HBA physical port and the associated 5284 * SCSI host. 5285 **/ 5286 static void 5287 lpfc_destroy_shost(struct lpfc_hba *phba) 5288 { 5289 struct lpfc_vport *vport = phba->pport; 5290 5291 /* Destroy physical port that associated with the SCSI host */ 5292 destroy_port(vport); 5293 5294 return; 5295 } 5296 5297 /** 5298 * lpfc_setup_bg - Setup Block guard structures and debug areas. 5299 * @phba: pointer to lpfc hba data structure. 5300 * @shost: the shost to be used to detect Block guard settings. 5301 * 5302 * This routine sets up the local Block guard protocol settings for @shost. 5303 * This routine also allocates memory for debugging bg buffers. 5304 **/ 5305 static void 5306 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 5307 { 5308 int pagecnt = 10; 5309 if (lpfc_prot_mask && lpfc_prot_guard) { 5310 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5311 "1478 Registering BlockGuard with the " 5312 "SCSI layer\n"); 5313 scsi_host_set_prot(shost, lpfc_prot_mask); 5314 scsi_host_set_guard(shost, lpfc_prot_guard); 5315 } 5316 if (!_dump_buf_data) { 5317 while (pagecnt) { 5318 spin_lock_init(&_dump_buf_lock); 5319 _dump_buf_data = 5320 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 5321 if (_dump_buf_data) { 5322 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5323 "9043 BLKGRD: allocated %d pages for " 5324 "_dump_buf_data at 0x%p\n", 5325 (1 << pagecnt), _dump_buf_data); 5326 _dump_buf_data_order = pagecnt; 5327 memset(_dump_buf_data, 0, 5328 ((1 << PAGE_SHIFT) << pagecnt)); 5329 break; 5330 } else 5331 --pagecnt; 5332 } 5333 if (!_dump_buf_data_order) 5334 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5335 "9044 BLKGRD: ERROR unable to allocate " 5336 "memory for hexdump\n"); 5337 } else 5338 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5339 "9045 BLKGRD: already allocated _dump_buf_data=0x%p" 5340 "\n", _dump_buf_data); 5341 if (!_dump_buf_dif) { 5342 while (pagecnt) { 5343 _dump_buf_dif = 5344 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 5345 if (_dump_buf_dif) { 5346 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5347 "9046 BLKGRD: allocated %d pages for " 5348 "_dump_buf_dif at 0x%p\n", 5349 (1 << pagecnt), _dump_buf_dif); 5350 _dump_buf_dif_order = pagecnt; 5351 memset(_dump_buf_dif, 0, 5352 ((1 << PAGE_SHIFT) << pagecnt)); 5353 break; 5354 } else 5355 --pagecnt; 5356 } 5357 if (!_dump_buf_dif_order) 5358 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5359 "9047 BLKGRD: ERROR unable to allocate " 5360 "memory for hexdump\n"); 5361 } else 5362 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5363 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", 5364 _dump_buf_dif); 5365 } 5366 5367 /** 5368 * lpfc_post_init_setup - Perform necessary device post initialization setup. 5369 * @phba: pointer to lpfc hba data structure. 5370 * 5371 * This routine is invoked to perform all the necessary post initialization 5372 * setup for the device. 5373 **/ 5374 static void 5375 lpfc_post_init_setup(struct lpfc_hba *phba) 5376 { 5377 struct Scsi_Host *shost; 5378 struct lpfc_adapter_event_header adapter_event; 5379 5380 /* Get the default values for Model Name and Description */ 5381 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 5382 5383 /* 5384 * hba setup may have changed the hba_queue_depth so we need to 5385 * adjust the value of can_queue. 5386 */ 5387 shost = pci_get_drvdata(phba->pcidev); 5388 shost->can_queue = phba->cfg_hba_queue_depth - 10; 5389 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 5390 lpfc_setup_bg(phba, shost); 5391 5392 lpfc_host_attrib_init(shost); 5393 5394 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 5395 spin_lock_irq(shost->host_lock); 5396 lpfc_poll_start_timer(phba); 5397 spin_unlock_irq(shost->host_lock); 5398 } 5399 5400 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5401 "0428 Perform SCSI scan\n"); 5402 /* Send board arrival event to upper layer */ 5403 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 5404 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 5405 fc_host_post_vendor_event(shost, fc_get_event_number(), 5406 sizeof(adapter_event), 5407 (char *) &adapter_event, 5408 LPFC_NL_VENDOR_ID); 5409 return; 5410 } 5411 5412 /** 5413 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 5414 * @phba: pointer to lpfc hba data structure. 5415 * 5416 * This routine is invoked to set up the PCI device memory space for device 5417 * with SLI-3 interface spec. 5418 * 5419 * Return codes 5420 * 0 - successful 5421 * other values - error 5422 **/ 5423 static int 5424 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 5425 { 5426 struct pci_dev *pdev; 5427 unsigned long bar0map_len, bar2map_len; 5428 int i, hbq_count; 5429 void *ptr; 5430 int error = -ENODEV; 5431 5432 /* Obtain PCI device reference */ 5433 if (!phba->pcidev) 5434 return error; 5435 else 5436 pdev = phba->pcidev; 5437 5438 /* Set the device DMA mask size */ 5439 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 5440 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 5441 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 5442 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 5443 return error; 5444 } 5445 } 5446 5447 /* Get the bus address of Bar0 and Bar2 and the number of bytes 5448 * required by each mapping. 5449 */ 5450 phba->pci_bar0_map = pci_resource_start(pdev, 0); 5451 bar0map_len = pci_resource_len(pdev, 0); 5452 5453 phba->pci_bar2_map = pci_resource_start(pdev, 2); 5454 bar2map_len = pci_resource_len(pdev, 2); 5455 5456 /* Map HBA SLIM to a kernel virtual address. */ 5457 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 5458 if (!phba->slim_memmap_p) { 5459 dev_printk(KERN_ERR, &pdev->dev, 5460 "ioremap failed for SLIM memory.\n"); 5461 goto out; 5462 } 5463 5464 /* Map HBA Control Registers to a kernel virtual address. */ 5465 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 5466 if (!phba->ctrl_regs_memmap_p) { 5467 dev_printk(KERN_ERR, &pdev->dev, 5468 "ioremap failed for HBA control registers.\n"); 5469 goto out_iounmap_slim; 5470 } 5471 5472 /* Allocate memory for SLI-2 structures */ 5473 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, 5474 SLI2_SLIM_SIZE, 5475 &phba->slim2p.phys, 5476 GFP_KERNEL); 5477 if (!phba->slim2p.virt) 5478 goto out_iounmap; 5479 5480 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 5481 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 5482 phba->mbox_ext = (phba->slim2p.virt + 5483 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 5484 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 5485 phba->IOCBs = (phba->slim2p.virt + 5486 offsetof(struct lpfc_sli2_slim, IOCBs)); 5487 5488 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 5489 lpfc_sli_hbq_size(), 5490 &phba->hbqslimp.phys, 5491 GFP_KERNEL); 5492 if (!phba->hbqslimp.virt) 5493 goto out_free_slim; 5494 5495 hbq_count = lpfc_sli_hbq_count(); 5496 ptr = phba->hbqslimp.virt; 5497 for (i = 0; i < hbq_count; ++i) { 5498 phba->hbqs[i].hbq_virt = ptr; 5499 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 5500 ptr += (lpfc_hbq_defs[i]->entry_count * 5501 sizeof(struct lpfc_hbq_entry)); 5502 } 5503 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 5504 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 5505 5506 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 5507 5508 INIT_LIST_HEAD(&phba->rb_pend_list); 5509 5510 phba->MBslimaddr = phba->slim_memmap_p; 5511 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 5512 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 5513 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 5514 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 5515 5516 return 0; 5517 5518 out_free_slim: 5519 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 5520 phba->slim2p.virt, phba->slim2p.phys); 5521 out_iounmap: 5522 iounmap(phba->ctrl_regs_memmap_p); 5523 out_iounmap_slim: 5524 iounmap(phba->slim_memmap_p); 5525 out: 5526 return error; 5527 } 5528 5529 /** 5530 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 5531 * @phba: pointer to lpfc hba data structure. 5532 * 5533 * This routine is invoked to unset the PCI device memory space for device 5534 * with SLI-3 interface spec. 5535 **/ 5536 static void 5537 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 5538 { 5539 struct pci_dev *pdev; 5540 5541 /* Obtain PCI device reference */ 5542 if (!phba->pcidev) 5543 return; 5544 else 5545 pdev = phba->pcidev; 5546 5547 /* Free coherent DMA memory allocated */ 5548 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 5549 phba->hbqslimp.virt, phba->hbqslimp.phys); 5550 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 5551 phba->slim2p.virt, phba->slim2p.phys); 5552 5553 /* I/O memory unmap */ 5554 iounmap(phba->ctrl_regs_memmap_p); 5555 iounmap(phba->slim_memmap_p); 5556 5557 return; 5558 } 5559 5560 /** 5561 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 5562 * @phba: pointer to lpfc hba data structure. 5563 * 5564 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 5565 * done and check status. 5566 * 5567 * Return 0 if successful, otherwise -ENODEV. 5568 **/ 5569 int 5570 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 5571 { 5572 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 5573 struct lpfc_register reg_data; 5574 int i, port_error = 0; 5575 uint32_t if_type; 5576 5577 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 5578 memset(®_data, 0, sizeof(reg_data)); 5579 if (!phba->sli4_hba.PSMPHRregaddr) 5580 return -ENODEV; 5581 5582 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 5583 for (i = 0; i < 3000; i++) { 5584 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 5585 &portsmphr_reg.word0) || 5586 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { 5587 /* Port has a fatal POST error, break out */ 5588 port_error = -ENODEV; 5589 break; 5590 } 5591 if (LPFC_POST_STAGE_PORT_READY == 5592 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 5593 break; 5594 msleep(10); 5595 } 5596 5597 /* 5598 * If there was a port error during POST, then don't proceed with 5599 * other register reads as the data may not be valid. Just exit. 5600 */ 5601 if (port_error) { 5602 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5603 "1408 Port Failed POST - portsmphr=0x%x, " 5604 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 5605 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 5606 portsmphr_reg.word0, 5607 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 5608 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 5609 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 5610 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 5611 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 5612 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 5613 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 5614 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 5615 } else { 5616 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5617 "2534 Device Info: SLIFamily=0x%x, " 5618 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 5619 "SLIHint_2=0x%x, FT=0x%x\n", 5620 bf_get(lpfc_sli_intf_sli_family, 5621 &phba->sli4_hba.sli_intf), 5622 bf_get(lpfc_sli_intf_slirev, 5623 &phba->sli4_hba.sli_intf), 5624 bf_get(lpfc_sli_intf_if_type, 5625 &phba->sli4_hba.sli_intf), 5626 bf_get(lpfc_sli_intf_sli_hint1, 5627 &phba->sli4_hba.sli_intf), 5628 bf_get(lpfc_sli_intf_sli_hint2, 5629 &phba->sli4_hba.sli_intf), 5630 bf_get(lpfc_sli_intf_func_type, 5631 &phba->sli4_hba.sli_intf)); 5632 /* 5633 * Check for other Port errors during the initialization 5634 * process. Fail the load if the port did not come up 5635 * correctly. 5636 */ 5637 if_type = bf_get(lpfc_sli_intf_if_type, 5638 &phba->sli4_hba.sli_intf); 5639 switch (if_type) { 5640 case LPFC_SLI_INTF_IF_TYPE_0: 5641 phba->sli4_hba.ue_mask_lo = 5642 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 5643 phba->sli4_hba.ue_mask_hi = 5644 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 5645 uerrlo_reg.word0 = 5646 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 5647 uerrhi_reg.word0 = 5648 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 5649 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 5650 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 5651 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5652 "1422 Unrecoverable Error " 5653 "Detected during POST " 5654 "uerr_lo_reg=0x%x, " 5655 "uerr_hi_reg=0x%x, " 5656 "ue_mask_lo_reg=0x%x, " 5657 "ue_mask_hi_reg=0x%x\n", 5658 uerrlo_reg.word0, 5659 uerrhi_reg.word0, 5660 phba->sli4_hba.ue_mask_lo, 5661 phba->sli4_hba.ue_mask_hi); 5662 port_error = -ENODEV; 5663 } 5664 break; 5665 case LPFC_SLI_INTF_IF_TYPE_2: 5666 /* Final checks. The port status should be clean. */ 5667 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 5668 ®_data.word0) || 5669 (bf_get(lpfc_sliport_status_err, ®_data) && 5670 !bf_get(lpfc_sliport_status_rn, ®_data))) { 5671 phba->work_status[0] = 5672 readl(phba->sli4_hba.u.if_type2. 5673 ERR1regaddr); 5674 phba->work_status[1] = 5675 readl(phba->sli4_hba.u.if_type2. 5676 ERR2regaddr); 5677 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5678 "2888 Port Error Detected " 5679 "during POST: " 5680 "port status reg 0x%x, " 5681 "port_smphr reg 0x%x, " 5682 "error 1=0x%x, error 2=0x%x\n", 5683 reg_data.word0, 5684 portsmphr_reg.word0, 5685 phba->work_status[0], 5686 phba->work_status[1]); 5687 port_error = -ENODEV; 5688 } 5689 break; 5690 case LPFC_SLI_INTF_IF_TYPE_1: 5691 default: 5692 break; 5693 } 5694 } 5695 return port_error; 5696 } 5697 5698 /** 5699 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 5700 * @phba: pointer to lpfc hba data structure. 5701 * @if_type: The SLI4 interface type getting configured. 5702 * 5703 * This routine is invoked to set up SLI4 BAR0 PCI config space register 5704 * memory map. 5705 **/ 5706 static void 5707 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 5708 { 5709 switch (if_type) { 5710 case LPFC_SLI_INTF_IF_TYPE_0: 5711 phba->sli4_hba.u.if_type0.UERRLOregaddr = 5712 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 5713 phba->sli4_hba.u.if_type0.UERRHIregaddr = 5714 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 5715 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 5716 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 5717 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 5718 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 5719 phba->sli4_hba.SLIINTFregaddr = 5720 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 5721 break; 5722 case LPFC_SLI_INTF_IF_TYPE_2: 5723 phba->sli4_hba.u.if_type2.ERR1regaddr = 5724 phba->sli4_hba.conf_regs_memmap_p + 5725 LPFC_CTL_PORT_ER1_OFFSET; 5726 phba->sli4_hba.u.if_type2.ERR2regaddr = 5727 phba->sli4_hba.conf_regs_memmap_p + 5728 LPFC_CTL_PORT_ER2_OFFSET; 5729 phba->sli4_hba.u.if_type2.CTRLregaddr = 5730 phba->sli4_hba.conf_regs_memmap_p + 5731 LPFC_CTL_PORT_CTL_OFFSET; 5732 phba->sli4_hba.u.if_type2.STATUSregaddr = 5733 phba->sli4_hba.conf_regs_memmap_p + 5734 LPFC_CTL_PORT_STA_OFFSET; 5735 phba->sli4_hba.SLIINTFregaddr = 5736 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 5737 phba->sli4_hba.PSMPHRregaddr = 5738 phba->sli4_hba.conf_regs_memmap_p + 5739 LPFC_CTL_PORT_SEM_OFFSET; 5740 phba->sli4_hba.RQDBregaddr = 5741 phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL; 5742 phba->sli4_hba.WQDBregaddr = 5743 phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL; 5744 phba->sli4_hba.EQCQDBregaddr = 5745 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 5746 phba->sli4_hba.MQDBregaddr = 5747 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 5748 phba->sli4_hba.BMBXregaddr = 5749 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 5750 break; 5751 case LPFC_SLI_INTF_IF_TYPE_1: 5752 default: 5753 dev_printk(KERN_ERR, &phba->pcidev->dev, 5754 "FATAL - unsupported SLI4 interface type - %d\n", 5755 if_type); 5756 break; 5757 } 5758 } 5759 5760 /** 5761 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 5762 * @phba: pointer to lpfc hba data structure. 5763 * 5764 * This routine is invoked to set up SLI4 BAR1 control status register (CSR) 5765 * memory map. 5766 **/ 5767 static void 5768 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) 5769 { 5770 phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5771 LPFC_SLIPORT_IF0_SMPHR; 5772 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5773 LPFC_HST_ISR0; 5774 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5775 LPFC_HST_IMR0; 5776 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5777 LPFC_HST_ISCR0; 5778 } 5779 5780 /** 5781 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 5782 * @phba: pointer to lpfc hba data structure. 5783 * @vf: virtual function number 5784 * 5785 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 5786 * based on the given viftual function number, @vf. 5787 * 5788 * Return 0 if successful, otherwise -ENODEV. 5789 **/ 5790 static int 5791 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 5792 { 5793 if (vf > LPFC_VIR_FUNC_MAX) 5794 return -ENODEV; 5795 5796 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5797 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL); 5798 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5799 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL); 5800 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5801 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); 5802 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5803 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 5804 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5805 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 5806 return 0; 5807 } 5808 5809 /** 5810 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 5811 * @phba: pointer to lpfc hba data structure. 5812 * 5813 * This routine is invoked to create the bootstrap mailbox 5814 * region consistent with the SLI-4 interface spec. This 5815 * routine allocates all memory necessary to communicate 5816 * mailbox commands to the port and sets up all alignment 5817 * needs. No locks are expected to be held when calling 5818 * this routine. 5819 * 5820 * Return codes 5821 * 0 - successful 5822 * -ENOMEM - could not allocated memory. 5823 **/ 5824 static int 5825 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 5826 { 5827 uint32_t bmbx_size; 5828 struct lpfc_dmabuf *dmabuf; 5829 struct dma_address *dma_address; 5830 uint32_t pa_addr; 5831 uint64_t phys_addr; 5832 5833 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5834 if (!dmabuf) 5835 return -ENOMEM; 5836 5837 /* 5838 * The bootstrap mailbox region is comprised of 2 parts 5839 * plus an alignment restriction of 16 bytes. 5840 */ 5841 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 5842 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 5843 bmbx_size, 5844 &dmabuf->phys, 5845 GFP_KERNEL); 5846 if (!dmabuf->virt) { 5847 kfree(dmabuf); 5848 return -ENOMEM; 5849 } 5850 memset(dmabuf->virt, 0, bmbx_size); 5851 5852 /* 5853 * Initialize the bootstrap mailbox pointers now so that the register 5854 * operations are simple later. The mailbox dma address is required 5855 * to be 16-byte aligned. Also align the virtual memory as each 5856 * maibox is copied into the bmbx mailbox region before issuing the 5857 * command to the port. 5858 */ 5859 phba->sli4_hba.bmbx.dmabuf = dmabuf; 5860 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 5861 5862 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 5863 LPFC_ALIGN_16_BYTE); 5864 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 5865 LPFC_ALIGN_16_BYTE); 5866 5867 /* 5868 * Set the high and low physical addresses now. The SLI4 alignment 5869 * requirement is 16 bytes and the mailbox is posted to the port 5870 * as two 30-bit addresses. The other data is a bit marking whether 5871 * the 30-bit address is the high or low address. 5872 * Upcast bmbx aphys to 64bits so shift instruction compiles 5873 * clean on 32 bit machines. 5874 */ 5875 dma_address = &phba->sli4_hba.bmbx.dma_address; 5876 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 5877 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 5878 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 5879 LPFC_BMBX_BIT1_ADDR_HI); 5880 5881 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 5882 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 5883 LPFC_BMBX_BIT1_ADDR_LO); 5884 return 0; 5885 } 5886 5887 /** 5888 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 5889 * @phba: pointer to lpfc hba data structure. 5890 * 5891 * This routine is invoked to teardown the bootstrap mailbox 5892 * region and release all host resources. This routine requires 5893 * the caller to ensure all mailbox commands recovered, no 5894 * additional mailbox comands are sent, and interrupts are disabled 5895 * before calling this routine. 5896 * 5897 **/ 5898 static void 5899 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 5900 { 5901 dma_free_coherent(&phba->pcidev->dev, 5902 phba->sli4_hba.bmbx.bmbx_size, 5903 phba->sli4_hba.bmbx.dmabuf->virt, 5904 phba->sli4_hba.bmbx.dmabuf->phys); 5905 5906 kfree(phba->sli4_hba.bmbx.dmabuf); 5907 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 5908 } 5909 5910 /** 5911 * lpfc_sli4_read_config - Get the config parameters. 5912 * @phba: pointer to lpfc hba data structure. 5913 * 5914 * This routine is invoked to read the configuration parameters from the HBA. 5915 * The configuration parameters are used to set the base and maximum values 5916 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 5917 * allocation for the port. 5918 * 5919 * Return codes 5920 * 0 - successful 5921 * -ENOMEM - No available memory 5922 * -EIO - The mailbox failed to complete successfully. 5923 **/ 5924 static int 5925 lpfc_sli4_read_config(struct lpfc_hba *phba) 5926 { 5927 LPFC_MBOXQ_t *pmb; 5928 struct lpfc_mbx_read_config *rd_config; 5929 union lpfc_sli4_cfg_shdr *shdr; 5930 uint32_t shdr_status, shdr_add_status; 5931 struct lpfc_mbx_get_func_cfg *get_func_cfg; 5932 struct lpfc_rsrc_desc_fcfcoe *desc; 5933 uint32_t desc_count; 5934 int length, i, rc = 0; 5935 5936 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5937 if (!pmb) { 5938 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5939 "2011 Unable to allocate memory for issuing " 5940 "SLI_CONFIG_SPECIAL mailbox command\n"); 5941 return -ENOMEM; 5942 } 5943 5944 lpfc_read_config(phba, pmb); 5945 5946 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 5947 if (rc != MBX_SUCCESS) { 5948 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5949 "2012 Mailbox failed , mbxCmd x%x " 5950 "READ_CONFIG, mbxStatus x%x\n", 5951 bf_get(lpfc_mqe_command, &pmb->u.mqe), 5952 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 5953 rc = -EIO; 5954 } else { 5955 rd_config = &pmb->u.mqe.un.rd_config; 5956 phba->sli4_hba.extents_in_use = 5957 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 5958 phba->sli4_hba.max_cfg_param.max_xri = 5959 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 5960 phba->sli4_hba.max_cfg_param.xri_base = 5961 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 5962 phba->sli4_hba.max_cfg_param.max_vpi = 5963 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 5964 phba->sli4_hba.max_cfg_param.vpi_base = 5965 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 5966 phba->sli4_hba.max_cfg_param.max_rpi = 5967 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 5968 phba->sli4_hba.max_cfg_param.rpi_base = 5969 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 5970 phba->sli4_hba.max_cfg_param.max_vfi = 5971 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 5972 phba->sli4_hba.max_cfg_param.vfi_base = 5973 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 5974 phba->sli4_hba.max_cfg_param.max_fcfi = 5975 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 5976 phba->sli4_hba.max_cfg_param.max_eq = 5977 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 5978 phba->sli4_hba.max_cfg_param.max_rq = 5979 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 5980 phba->sli4_hba.max_cfg_param.max_wq = 5981 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 5982 phba->sli4_hba.max_cfg_param.max_cq = 5983 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 5984 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 5985 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 5986 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 5987 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 5988 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 5989 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 5990 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 5991 phba->max_vports = phba->max_vpi; 5992 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5993 "2003 cfg params Extents? %d " 5994 "XRI(B:%d M:%d), " 5995 "VPI(B:%d M:%d) " 5996 "VFI(B:%d M:%d) " 5997 "RPI(B:%d M:%d) " 5998 "FCFI(Count:%d)\n", 5999 phba->sli4_hba.extents_in_use, 6000 phba->sli4_hba.max_cfg_param.xri_base, 6001 phba->sli4_hba.max_cfg_param.max_xri, 6002 phba->sli4_hba.max_cfg_param.vpi_base, 6003 phba->sli4_hba.max_cfg_param.max_vpi, 6004 phba->sli4_hba.max_cfg_param.vfi_base, 6005 phba->sli4_hba.max_cfg_param.max_vfi, 6006 phba->sli4_hba.max_cfg_param.rpi_base, 6007 phba->sli4_hba.max_cfg_param.max_rpi, 6008 phba->sli4_hba.max_cfg_param.max_fcfi); 6009 } 6010 6011 if (rc) 6012 goto read_cfg_out; 6013 6014 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 6015 if (phba->cfg_hba_queue_depth > 6016 (phba->sli4_hba.max_cfg_param.max_xri - 6017 lpfc_sli4_get_els_iocb_cnt(phba))) 6018 phba->cfg_hba_queue_depth = 6019 phba->sli4_hba.max_cfg_param.max_xri - 6020 lpfc_sli4_get_els_iocb_cnt(phba); 6021 6022 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 6023 LPFC_SLI_INTF_IF_TYPE_2) 6024 goto read_cfg_out; 6025 6026 /* get the pf# and vf# for SLI4 if_type 2 port */ 6027 length = (sizeof(struct lpfc_mbx_get_func_cfg) - 6028 sizeof(struct lpfc_sli4_cfg_mhdr)); 6029 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, 6030 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 6031 length, LPFC_SLI4_MBX_EMBED); 6032 6033 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 6034 shdr = (union lpfc_sli4_cfg_shdr *) 6035 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 6036 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6037 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6038 if (rc || shdr_status || shdr_add_status) { 6039 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6040 "3026 Mailbox failed , mbxCmd x%x " 6041 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 6042 bf_get(lpfc_mqe_command, &pmb->u.mqe), 6043 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 6044 rc = -EIO; 6045 goto read_cfg_out; 6046 } 6047 6048 /* search for fc_fcoe resrouce descriptor */ 6049 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 6050 desc_count = get_func_cfg->func_cfg.rsrc_desc_count; 6051 6052 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 6053 desc = (struct lpfc_rsrc_desc_fcfcoe *) 6054 &get_func_cfg->func_cfg.desc[i]; 6055 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 6056 bf_get(lpfc_rsrc_desc_pcie_type, desc)) { 6057 phba->sli4_hba.iov.pf_number = 6058 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 6059 phba->sli4_hba.iov.vf_number = 6060 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); 6061 break; 6062 } 6063 } 6064 6065 if (i < LPFC_RSRC_DESC_MAX_NUM) 6066 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6067 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 6068 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 6069 phba->sli4_hba.iov.vf_number); 6070 else { 6071 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6072 "3028 GET_FUNCTION_CONFIG: failed to find " 6073 "Resrouce Descriptor:x%x\n", 6074 LPFC_RSRC_DESC_TYPE_FCFCOE); 6075 rc = -EIO; 6076 } 6077 6078 read_cfg_out: 6079 mempool_free(pmb, phba->mbox_mem_pool); 6080 return rc; 6081 } 6082 6083 /** 6084 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 6085 * @phba: pointer to lpfc hba data structure. 6086 * 6087 * This routine is invoked to setup the port-side endian order when 6088 * the port if_type is 0. This routine has no function for other 6089 * if_types. 6090 * 6091 * Return codes 6092 * 0 - successful 6093 * -ENOMEM - No available memory 6094 * -EIO - The mailbox failed to complete successfully. 6095 **/ 6096 static int 6097 lpfc_setup_endian_order(struct lpfc_hba *phba) 6098 { 6099 LPFC_MBOXQ_t *mboxq; 6100 uint32_t if_type, rc = 0; 6101 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 6102 HOST_ENDIAN_HIGH_WORD1}; 6103 6104 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 6105 switch (if_type) { 6106 case LPFC_SLI_INTF_IF_TYPE_0: 6107 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 6108 GFP_KERNEL); 6109 if (!mboxq) { 6110 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6111 "0492 Unable to allocate memory for " 6112 "issuing SLI_CONFIG_SPECIAL mailbox " 6113 "command\n"); 6114 return -ENOMEM; 6115 } 6116 6117 /* 6118 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 6119 * two words to contain special data values and no other data. 6120 */ 6121 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 6122 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 6123 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6124 if (rc != MBX_SUCCESS) { 6125 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6126 "0493 SLI_CONFIG_SPECIAL mailbox " 6127 "failed with status x%x\n", 6128 rc); 6129 rc = -EIO; 6130 } 6131 mempool_free(mboxq, phba->mbox_mem_pool); 6132 break; 6133 case LPFC_SLI_INTF_IF_TYPE_2: 6134 case LPFC_SLI_INTF_IF_TYPE_1: 6135 default: 6136 break; 6137 } 6138 return rc; 6139 } 6140 6141 /** 6142 * lpfc_sli4_queue_create - Create all the SLI4 queues 6143 * @phba: pointer to lpfc hba data structure. 6144 * 6145 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 6146 * operation. For each SLI4 queue type, the parameters such as queue entry 6147 * count (queue depth) shall be taken from the module parameter. For now, 6148 * we just use some constant number as place holder. 6149 * 6150 * Return codes 6151 * 0 - successful 6152 * -ENOMEM - No available memory 6153 * -EIO - The mailbox failed to complete successfully. 6154 **/ 6155 static int 6156 lpfc_sli4_queue_create(struct lpfc_hba *phba) 6157 { 6158 struct lpfc_queue *qdesc; 6159 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 6160 int cfg_fcp_wq_count; 6161 int cfg_fcp_eq_count; 6162 6163 /* 6164 * Sanity check for confiugred queue parameters against the run-time 6165 * device parameters 6166 */ 6167 6168 /* Sanity check on FCP fast-path WQ parameters */ 6169 cfg_fcp_wq_count = phba->cfg_fcp_wq_count; 6170 if (cfg_fcp_wq_count > 6171 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { 6172 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - 6173 LPFC_SP_WQN_DEF; 6174 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { 6175 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6176 "2581 Not enough WQs (%d) from " 6177 "the pci function for supporting " 6178 "FCP WQs (%d)\n", 6179 phba->sli4_hba.max_cfg_param.max_wq, 6180 phba->cfg_fcp_wq_count); 6181 goto out_error; 6182 } 6183 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6184 "2582 Not enough WQs (%d) from the pci " 6185 "function for supporting the requested " 6186 "FCP WQs (%d), the actual FCP WQs can " 6187 "be supported: %d\n", 6188 phba->sli4_hba.max_cfg_param.max_wq, 6189 phba->cfg_fcp_wq_count, cfg_fcp_wq_count); 6190 } 6191 /* The actual number of FCP work queues adopted */ 6192 phba->cfg_fcp_wq_count = cfg_fcp_wq_count; 6193 6194 /* Sanity check on FCP fast-path EQ parameters */ 6195 cfg_fcp_eq_count = phba->cfg_fcp_eq_count; 6196 if (cfg_fcp_eq_count > 6197 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) { 6198 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq - 6199 LPFC_SP_EQN_DEF; 6200 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) { 6201 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6202 "2574 Not enough EQs (%d) from the " 6203 "pci function for supporting FCP " 6204 "EQs (%d)\n", 6205 phba->sli4_hba.max_cfg_param.max_eq, 6206 phba->cfg_fcp_eq_count); 6207 goto out_error; 6208 } 6209 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6210 "2575 Not enough EQs (%d) from the pci " 6211 "function for supporting the requested " 6212 "FCP EQs (%d), the actual FCP EQs can " 6213 "be supported: %d\n", 6214 phba->sli4_hba.max_cfg_param.max_eq, 6215 phba->cfg_fcp_eq_count, cfg_fcp_eq_count); 6216 } 6217 /* It does not make sense to have more EQs than WQs */ 6218 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { 6219 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6220 "2593 The FCP EQ count(%d) cannot be greater " 6221 "than the FCP WQ count(%d), limiting the " 6222 "FCP EQ count to %d\n", cfg_fcp_eq_count, 6223 phba->cfg_fcp_wq_count, 6224 phba->cfg_fcp_wq_count); 6225 cfg_fcp_eq_count = phba->cfg_fcp_wq_count; 6226 } 6227 /* The actual number of FCP event queues adopted */ 6228 phba->cfg_fcp_eq_count = cfg_fcp_eq_count; 6229 /* The overall number of event queues used */ 6230 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; 6231 6232 /* 6233 * Create Event Queues (EQs) 6234 */ 6235 6236 /* Get EQ depth from module parameter, fake the default for now */ 6237 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 6238 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 6239 6240 /* Create slow path event queue */ 6241 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 6242 phba->sli4_hba.eq_ecount); 6243 if (!qdesc) { 6244 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6245 "0496 Failed allocate slow-path EQ\n"); 6246 goto out_error; 6247 } 6248 phba->sli4_hba.sp_eq = qdesc; 6249 6250 /* Create fast-path FCP Event Queue(s) */ 6251 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * 6252 phba->cfg_fcp_eq_count), GFP_KERNEL); 6253 if (!phba->sli4_hba.fp_eq) { 6254 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6255 "2576 Failed allocate memory for fast-path " 6256 "EQ record array\n"); 6257 goto out_free_sp_eq; 6258 } 6259 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 6260 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 6261 phba->sli4_hba.eq_ecount); 6262 if (!qdesc) { 6263 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6264 "0497 Failed allocate fast-path EQ\n"); 6265 goto out_free_fp_eq; 6266 } 6267 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; 6268 } 6269 6270 /* 6271 * Create Complete Queues (CQs) 6272 */ 6273 6274 /* Get CQ depth from module parameter, fake the default for now */ 6275 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 6276 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 6277 6278 /* Create slow-path Mailbox Command Complete Queue */ 6279 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 6280 phba->sli4_hba.cq_ecount); 6281 if (!qdesc) { 6282 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6283 "0500 Failed allocate slow-path mailbox CQ\n"); 6284 goto out_free_fp_eq; 6285 } 6286 phba->sli4_hba.mbx_cq = qdesc; 6287 6288 /* Create slow-path ELS Complete Queue */ 6289 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 6290 phba->sli4_hba.cq_ecount); 6291 if (!qdesc) { 6292 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6293 "0501 Failed allocate slow-path ELS CQ\n"); 6294 goto out_free_mbx_cq; 6295 } 6296 phba->sli4_hba.els_cq = qdesc; 6297 6298 6299 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ 6300 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 6301 phba->cfg_fcp_eq_count), GFP_KERNEL); 6302 if (!phba->sli4_hba.fcp_cq) { 6303 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6304 "2577 Failed allocate memory for fast-path " 6305 "CQ record array\n"); 6306 goto out_free_els_cq; 6307 } 6308 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 6309 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 6310 phba->sli4_hba.cq_ecount); 6311 if (!qdesc) { 6312 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6313 "0499 Failed allocate fast-path FCP " 6314 "CQ (%d)\n", fcp_cqidx); 6315 goto out_free_fcp_cq; 6316 } 6317 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc; 6318 } 6319 6320 /* Create Mailbox Command Queue */ 6321 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 6322 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 6323 6324 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 6325 phba->sli4_hba.mq_ecount); 6326 if (!qdesc) { 6327 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6328 "0505 Failed allocate slow-path MQ\n"); 6329 goto out_free_fcp_cq; 6330 } 6331 phba->sli4_hba.mbx_wq = qdesc; 6332 6333 /* 6334 * Create all the Work Queues (WQs) 6335 */ 6336 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 6337 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 6338 6339 /* Create slow-path ELS Work Queue */ 6340 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 6341 phba->sli4_hba.wq_ecount); 6342 if (!qdesc) { 6343 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6344 "0504 Failed allocate slow-path ELS WQ\n"); 6345 goto out_free_mbx_wq; 6346 } 6347 phba->sli4_hba.els_wq = qdesc; 6348 6349 /* Create fast-path FCP Work Queue(s) */ 6350 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * 6351 phba->cfg_fcp_wq_count), GFP_KERNEL); 6352 if (!phba->sli4_hba.fcp_wq) { 6353 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6354 "2578 Failed allocate memory for fast-path " 6355 "WQ record array\n"); 6356 goto out_free_els_wq; 6357 } 6358 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 6359 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 6360 phba->sli4_hba.wq_ecount); 6361 if (!qdesc) { 6362 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6363 "0503 Failed allocate fast-path FCP " 6364 "WQ (%d)\n", fcp_wqidx); 6365 goto out_free_fcp_wq; 6366 } 6367 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc; 6368 } 6369 6370 /* 6371 * Create Receive Queue (RQ) 6372 */ 6373 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 6374 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 6375 6376 /* Create Receive Queue for header */ 6377 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 6378 phba->sli4_hba.rq_ecount); 6379 if (!qdesc) { 6380 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6381 "0506 Failed allocate receive HRQ\n"); 6382 goto out_free_fcp_wq; 6383 } 6384 phba->sli4_hba.hdr_rq = qdesc; 6385 6386 /* Create Receive Queue for data */ 6387 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 6388 phba->sli4_hba.rq_ecount); 6389 if (!qdesc) { 6390 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6391 "0507 Failed allocate receive DRQ\n"); 6392 goto out_free_hdr_rq; 6393 } 6394 phba->sli4_hba.dat_rq = qdesc; 6395 6396 return 0; 6397 6398 out_free_hdr_rq: 6399 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 6400 phba->sli4_hba.hdr_rq = NULL; 6401 out_free_fcp_wq: 6402 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) { 6403 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]); 6404 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; 6405 } 6406 kfree(phba->sli4_hba.fcp_wq); 6407 out_free_els_wq: 6408 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 6409 phba->sli4_hba.els_wq = NULL; 6410 out_free_mbx_wq: 6411 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 6412 phba->sli4_hba.mbx_wq = NULL; 6413 out_free_fcp_cq: 6414 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) { 6415 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]); 6416 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; 6417 } 6418 kfree(phba->sli4_hba.fcp_cq); 6419 out_free_els_cq: 6420 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 6421 phba->sli4_hba.els_cq = NULL; 6422 out_free_mbx_cq: 6423 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 6424 phba->sli4_hba.mbx_cq = NULL; 6425 out_free_fp_eq: 6426 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) { 6427 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]); 6428 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; 6429 } 6430 kfree(phba->sli4_hba.fp_eq); 6431 out_free_sp_eq: 6432 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 6433 phba->sli4_hba.sp_eq = NULL; 6434 out_error: 6435 return -ENOMEM; 6436 } 6437 6438 /** 6439 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 6440 * @phba: pointer to lpfc hba data structure. 6441 * 6442 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 6443 * operation. 6444 * 6445 * Return codes 6446 * 0 - successful 6447 * -ENOMEM - No available memory 6448 * -EIO - The mailbox failed to complete successfully. 6449 **/ 6450 static void 6451 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 6452 { 6453 int fcp_qidx; 6454 6455 /* Release mailbox command work queue */ 6456 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 6457 phba->sli4_hba.mbx_wq = NULL; 6458 6459 /* Release ELS work queue */ 6460 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 6461 phba->sli4_hba.els_wq = NULL; 6462 6463 /* Release FCP work queue */ 6464 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 6465 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); 6466 kfree(phba->sli4_hba.fcp_wq); 6467 phba->sli4_hba.fcp_wq = NULL; 6468 6469 /* Release unsolicited receive queue */ 6470 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 6471 phba->sli4_hba.hdr_rq = NULL; 6472 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 6473 phba->sli4_hba.dat_rq = NULL; 6474 6475 /* Release ELS complete queue */ 6476 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 6477 phba->sli4_hba.els_cq = NULL; 6478 6479 /* Release mailbox command complete queue */ 6480 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 6481 phba->sli4_hba.mbx_cq = NULL; 6482 6483 /* Release FCP response complete queue */ 6484 fcp_qidx = 0; 6485 do 6486 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); 6487 while (++fcp_qidx < phba->cfg_fcp_eq_count); 6488 kfree(phba->sli4_hba.fcp_cq); 6489 phba->sli4_hba.fcp_cq = NULL; 6490 6491 /* Release fast-path event queue */ 6492 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6493 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); 6494 kfree(phba->sli4_hba.fp_eq); 6495 phba->sli4_hba.fp_eq = NULL; 6496 6497 /* Release slow-path event queue */ 6498 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 6499 phba->sli4_hba.sp_eq = NULL; 6500 6501 return; 6502 } 6503 6504 /** 6505 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 6506 * @phba: pointer to lpfc hba data structure. 6507 * 6508 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 6509 * operation. 6510 * 6511 * Return codes 6512 * 0 - successful 6513 * -ENOMEM - No available memory 6514 * -EIO - The mailbox failed to complete successfully. 6515 **/ 6516 int 6517 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 6518 { 6519 int rc = -ENOMEM; 6520 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 6521 int fcp_cq_index = 0; 6522 6523 /* 6524 * Set up Event Queues (EQs) 6525 */ 6526 6527 /* Set up slow-path event queue */ 6528 if (!phba->sli4_hba.sp_eq) { 6529 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6530 "0520 Slow-path EQ not allocated\n"); 6531 goto out_error; 6532 } 6533 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq, 6534 LPFC_SP_DEF_IMAX); 6535 if (rc) { 6536 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6537 "0521 Failed setup of slow-path EQ: " 6538 "rc = 0x%x\n", rc); 6539 goto out_error; 6540 } 6541 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6542 "2583 Slow-path EQ setup: queue-id=%d\n", 6543 phba->sli4_hba.sp_eq->queue_id); 6544 6545 /* Set up fast-path event queue */ 6546 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 6547 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { 6548 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6549 "0522 Fast-path EQ (%d) not " 6550 "allocated\n", fcp_eqidx); 6551 goto out_destroy_fp_eq; 6552 } 6553 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], 6554 phba->cfg_fcp_imax); 6555 if (rc) { 6556 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6557 "0523 Failed setup of fast-path EQ " 6558 "(%d), rc = 0x%x\n", fcp_eqidx, rc); 6559 goto out_destroy_fp_eq; 6560 } 6561 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6562 "2584 Fast-path EQ setup: " 6563 "queue[%d]-id=%d\n", fcp_eqidx, 6564 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); 6565 } 6566 6567 /* 6568 * Set up Complete Queues (CQs) 6569 */ 6570 6571 /* Set up slow-path MBOX Complete Queue as the first CQ */ 6572 if (!phba->sli4_hba.mbx_cq) { 6573 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6574 "0528 Mailbox CQ not allocated\n"); 6575 goto out_destroy_fp_eq; 6576 } 6577 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, 6578 LPFC_MCQ, LPFC_MBOX); 6579 if (rc) { 6580 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6581 "0529 Failed setup of slow-path mailbox CQ: " 6582 "rc = 0x%x\n", rc); 6583 goto out_destroy_fp_eq; 6584 } 6585 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6586 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", 6587 phba->sli4_hba.mbx_cq->queue_id, 6588 phba->sli4_hba.sp_eq->queue_id); 6589 6590 /* Set up slow-path ELS Complete Queue */ 6591 if (!phba->sli4_hba.els_cq) { 6592 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6593 "0530 ELS CQ not allocated\n"); 6594 goto out_destroy_mbx_cq; 6595 } 6596 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, 6597 LPFC_WCQ, LPFC_ELS); 6598 if (rc) { 6599 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6600 "0531 Failed setup of slow-path ELS CQ: " 6601 "rc = 0x%x\n", rc); 6602 goto out_destroy_mbx_cq; 6603 } 6604 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6605 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", 6606 phba->sli4_hba.els_cq->queue_id, 6607 phba->sli4_hba.sp_eq->queue_id); 6608 6609 /* Set up fast-path FCP Response Complete Queue */ 6610 fcp_cqidx = 0; 6611 do { 6612 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 6613 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6614 "0526 Fast-path FCP CQ (%d) not " 6615 "allocated\n", fcp_cqidx); 6616 goto out_destroy_fcp_cq; 6617 } 6618 if (phba->cfg_fcp_eq_count) 6619 rc = lpfc_cq_create(phba, 6620 phba->sli4_hba.fcp_cq[fcp_cqidx], 6621 phba->sli4_hba.fp_eq[fcp_cqidx], 6622 LPFC_WCQ, LPFC_FCP); 6623 else 6624 rc = lpfc_cq_create(phba, 6625 phba->sli4_hba.fcp_cq[fcp_cqidx], 6626 phba->sli4_hba.sp_eq, 6627 LPFC_WCQ, LPFC_FCP); 6628 if (rc) { 6629 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6630 "0527 Failed setup of fast-path FCP " 6631 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); 6632 goto out_destroy_fcp_cq; 6633 } 6634 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6635 "2588 FCP CQ setup: cq[%d]-id=%d, " 6636 "parent %seq[%d]-id=%d\n", 6637 fcp_cqidx, 6638 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, 6639 (phba->cfg_fcp_eq_count) ? "" : "sp_", 6640 fcp_cqidx, 6641 (phba->cfg_fcp_eq_count) ? 6642 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id : 6643 phba->sli4_hba.sp_eq->queue_id); 6644 } while (++fcp_cqidx < phba->cfg_fcp_eq_count); 6645 6646 /* 6647 * Set up all the Work Queues (WQs) 6648 */ 6649 6650 /* Set up Mailbox Command Queue */ 6651 if (!phba->sli4_hba.mbx_wq) { 6652 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6653 "0538 Slow-path MQ not allocated\n"); 6654 goto out_destroy_fcp_cq; 6655 } 6656 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, 6657 phba->sli4_hba.mbx_cq, LPFC_MBOX); 6658 if (rc) { 6659 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6660 "0539 Failed setup of slow-path MQ: " 6661 "rc = 0x%x\n", rc); 6662 goto out_destroy_fcp_cq; 6663 } 6664 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6665 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 6666 phba->sli4_hba.mbx_wq->queue_id, 6667 phba->sli4_hba.mbx_cq->queue_id); 6668 6669 /* Set up slow-path ELS Work Queue */ 6670 if (!phba->sli4_hba.els_wq) { 6671 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6672 "0536 Slow-path ELS WQ not allocated\n"); 6673 goto out_destroy_mbx_wq; 6674 } 6675 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, 6676 phba->sli4_hba.els_cq, LPFC_ELS); 6677 if (rc) { 6678 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6679 "0537 Failed setup of slow-path ELS WQ: " 6680 "rc = 0x%x\n", rc); 6681 goto out_destroy_mbx_wq; 6682 } 6683 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6684 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 6685 phba->sli4_hba.els_wq->queue_id, 6686 phba->sli4_hba.els_cq->queue_id); 6687 6688 /* Set up fast-path FCP Work Queue */ 6689 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 6690 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { 6691 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6692 "0534 Fast-path FCP WQ (%d) not " 6693 "allocated\n", fcp_wqidx); 6694 goto out_destroy_fcp_wq; 6695 } 6696 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], 6697 phba->sli4_hba.fcp_cq[fcp_cq_index], 6698 LPFC_FCP); 6699 if (rc) { 6700 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6701 "0535 Failed setup of fast-path FCP " 6702 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); 6703 goto out_destroy_fcp_wq; 6704 } 6705 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6706 "2591 FCP WQ setup: wq[%d]-id=%d, " 6707 "parent cq[%d]-id=%d\n", 6708 fcp_wqidx, 6709 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, 6710 fcp_cq_index, 6711 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); 6712 /* Round robin FCP Work Queue's Completion Queue assignment */ 6713 if (phba->cfg_fcp_eq_count) 6714 fcp_cq_index = ((fcp_cq_index + 1) % 6715 phba->cfg_fcp_eq_count); 6716 } 6717 6718 /* 6719 * Create Receive Queue (RQ) 6720 */ 6721 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 6722 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6723 "0540 Receive Queue not allocated\n"); 6724 goto out_destroy_fcp_wq; 6725 } 6726 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 6727 phba->sli4_hba.els_cq, LPFC_USOL); 6728 if (rc) { 6729 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6730 "0541 Failed setup of Receive Queue: " 6731 "rc = 0x%x\n", rc); 6732 goto out_destroy_fcp_wq; 6733 } 6734 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6735 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 6736 "parent cq-id=%d\n", 6737 phba->sli4_hba.hdr_rq->queue_id, 6738 phba->sli4_hba.dat_rq->queue_id, 6739 phba->sli4_hba.els_cq->queue_id); 6740 return 0; 6741 6742 out_destroy_fcp_wq: 6743 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) 6744 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); 6745 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 6746 out_destroy_mbx_wq: 6747 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 6748 out_destroy_fcp_cq: 6749 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 6750 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 6751 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6752 out_destroy_mbx_cq: 6753 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6754 out_destroy_fp_eq: 6755 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 6756 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); 6757 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6758 out_error: 6759 return rc; 6760 } 6761 6762 /** 6763 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 6764 * @phba: pointer to lpfc hba data structure. 6765 * 6766 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 6767 * operation. 6768 * 6769 * Return codes 6770 * 0 - successful 6771 * -ENOMEM - No available memory 6772 * -EIO - The mailbox failed to complete successfully. 6773 **/ 6774 void 6775 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 6776 { 6777 int fcp_qidx; 6778 6779 /* Unset mailbox command work queue */ 6780 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 6781 /* Unset ELS work queue */ 6782 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 6783 /* Unset unsolicited receive queue */ 6784 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 6785 /* Unset FCP work queue */ 6786 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 6787 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); 6788 /* Unset mailbox command complete queue */ 6789 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6790 /* Unset ELS complete queue */ 6791 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6792 /* Unset FCP response complete queue */ 6793 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6794 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 6795 /* Unset fast-path event queue */ 6796 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6797 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); 6798 /* Unset slow-path event queue */ 6799 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6800 } 6801 6802 /** 6803 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 6804 * @phba: pointer to lpfc hba data structure. 6805 * 6806 * This routine is invoked to allocate and set up a pool of completion queue 6807 * events. The body of the completion queue event is a completion queue entry 6808 * CQE. For now, this pool is used for the interrupt service routine to queue 6809 * the following HBA completion queue events for the worker thread to process: 6810 * - Mailbox asynchronous events 6811 * - Receive queue completion unsolicited events 6812 * Later, this can be used for all the slow-path events. 6813 * 6814 * Return codes 6815 * 0 - successful 6816 * -ENOMEM - No available memory 6817 **/ 6818 static int 6819 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 6820 { 6821 struct lpfc_cq_event *cq_event; 6822 int i; 6823 6824 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 6825 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 6826 if (!cq_event) 6827 goto out_pool_create_fail; 6828 list_add_tail(&cq_event->list, 6829 &phba->sli4_hba.sp_cqe_event_pool); 6830 } 6831 return 0; 6832 6833 out_pool_create_fail: 6834 lpfc_sli4_cq_event_pool_destroy(phba); 6835 return -ENOMEM; 6836 } 6837 6838 /** 6839 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 6840 * @phba: pointer to lpfc hba data structure. 6841 * 6842 * This routine is invoked to free the pool of completion queue events at 6843 * driver unload time. Note that, it is the responsibility of the driver 6844 * cleanup routine to free all the outstanding completion-queue events 6845 * allocated from this pool back into the pool before invoking this routine 6846 * to destroy the pool. 6847 **/ 6848 static void 6849 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 6850 { 6851 struct lpfc_cq_event *cq_event, *next_cq_event; 6852 6853 list_for_each_entry_safe(cq_event, next_cq_event, 6854 &phba->sli4_hba.sp_cqe_event_pool, list) { 6855 list_del(&cq_event->list); 6856 kfree(cq_event); 6857 } 6858 } 6859 6860 /** 6861 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 6862 * @phba: pointer to lpfc hba data structure. 6863 * 6864 * This routine is the lock free version of the API invoked to allocate a 6865 * completion-queue event from the free pool. 6866 * 6867 * Return: Pointer to the newly allocated completion-queue event if successful 6868 * NULL otherwise. 6869 **/ 6870 struct lpfc_cq_event * 6871 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 6872 { 6873 struct lpfc_cq_event *cq_event = NULL; 6874 6875 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 6876 struct lpfc_cq_event, list); 6877 return cq_event; 6878 } 6879 6880 /** 6881 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 6882 * @phba: pointer to lpfc hba data structure. 6883 * 6884 * This routine is the lock version of the API invoked to allocate a 6885 * completion-queue event from the free pool. 6886 * 6887 * Return: Pointer to the newly allocated completion-queue event if successful 6888 * NULL otherwise. 6889 **/ 6890 struct lpfc_cq_event * 6891 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 6892 { 6893 struct lpfc_cq_event *cq_event; 6894 unsigned long iflags; 6895 6896 spin_lock_irqsave(&phba->hbalock, iflags); 6897 cq_event = __lpfc_sli4_cq_event_alloc(phba); 6898 spin_unlock_irqrestore(&phba->hbalock, iflags); 6899 return cq_event; 6900 } 6901 6902 /** 6903 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 6904 * @phba: pointer to lpfc hba data structure. 6905 * @cq_event: pointer to the completion queue event to be freed. 6906 * 6907 * This routine is the lock free version of the API invoked to release a 6908 * completion-queue event back into the free pool. 6909 **/ 6910 void 6911 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 6912 struct lpfc_cq_event *cq_event) 6913 { 6914 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 6915 } 6916 6917 /** 6918 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 6919 * @phba: pointer to lpfc hba data structure. 6920 * @cq_event: pointer to the completion queue event to be freed. 6921 * 6922 * This routine is the lock version of the API invoked to release a 6923 * completion-queue event back into the free pool. 6924 **/ 6925 void 6926 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 6927 struct lpfc_cq_event *cq_event) 6928 { 6929 unsigned long iflags; 6930 spin_lock_irqsave(&phba->hbalock, iflags); 6931 __lpfc_sli4_cq_event_release(phba, cq_event); 6932 spin_unlock_irqrestore(&phba->hbalock, iflags); 6933 } 6934 6935 /** 6936 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 6937 * @phba: pointer to lpfc hba data structure. 6938 * 6939 * This routine is to free all the pending completion-queue events to the 6940 * back into the free pool for device reset. 6941 **/ 6942 static void 6943 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 6944 { 6945 LIST_HEAD(cqelist); 6946 struct lpfc_cq_event *cqe; 6947 unsigned long iflags; 6948 6949 /* Retrieve all the pending WCQEs from pending WCQE lists */ 6950 spin_lock_irqsave(&phba->hbalock, iflags); 6951 /* Pending FCP XRI abort events */ 6952 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 6953 &cqelist); 6954 /* Pending ELS XRI abort events */ 6955 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 6956 &cqelist); 6957 /* Pending asynnc events */ 6958 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 6959 &cqelist); 6960 spin_unlock_irqrestore(&phba->hbalock, iflags); 6961 6962 while (!list_empty(&cqelist)) { 6963 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 6964 lpfc_sli4_cq_event_release(phba, cqe); 6965 } 6966 } 6967 6968 /** 6969 * lpfc_pci_function_reset - Reset pci function. 6970 * @phba: pointer to lpfc hba data structure. 6971 * 6972 * This routine is invoked to request a PCI function reset. It will destroys 6973 * all resources assigned to the PCI function which originates this request. 6974 * 6975 * Return codes 6976 * 0 - successful 6977 * -ENOMEM - No available memory 6978 * -EIO - The mailbox failed to complete successfully. 6979 **/ 6980 int 6981 lpfc_pci_function_reset(struct lpfc_hba *phba) 6982 { 6983 LPFC_MBOXQ_t *mboxq; 6984 uint32_t rc = 0, if_type; 6985 uint32_t shdr_status, shdr_add_status; 6986 uint32_t rdy_chk, num_resets = 0, reset_again = 0; 6987 union lpfc_sli4_cfg_shdr *shdr; 6988 struct lpfc_register reg_data; 6989 6990 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 6991 switch (if_type) { 6992 case LPFC_SLI_INTF_IF_TYPE_0: 6993 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 6994 GFP_KERNEL); 6995 if (!mboxq) { 6996 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6997 "0494 Unable to allocate memory for " 6998 "issuing SLI_FUNCTION_RESET mailbox " 6999 "command\n"); 7000 return -ENOMEM; 7001 } 7002 7003 /* Setup PCI function reset mailbox-ioctl command */ 7004 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 7005 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 7006 LPFC_SLI4_MBX_EMBED); 7007 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7008 shdr = (union lpfc_sli4_cfg_shdr *) 7009 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 7010 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7011 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 7012 &shdr->response); 7013 if (rc != MBX_TIMEOUT) 7014 mempool_free(mboxq, phba->mbox_mem_pool); 7015 if (shdr_status || shdr_add_status || rc) { 7016 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7017 "0495 SLI_FUNCTION_RESET mailbox " 7018 "failed with status x%x add_status x%x," 7019 " mbx status x%x\n", 7020 shdr_status, shdr_add_status, rc); 7021 rc = -ENXIO; 7022 } 7023 break; 7024 case LPFC_SLI_INTF_IF_TYPE_2: 7025 for (num_resets = 0; 7026 num_resets < MAX_IF_TYPE_2_RESETS; 7027 num_resets++) { 7028 reg_data.word0 = 0; 7029 bf_set(lpfc_sliport_ctrl_end, ®_data, 7030 LPFC_SLIPORT_LITTLE_ENDIAN); 7031 bf_set(lpfc_sliport_ctrl_ip, ®_data, 7032 LPFC_SLIPORT_INIT_PORT); 7033 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 7034 CTRLregaddr); 7035 7036 /* 7037 * Poll the Port Status Register and wait for RDY for 7038 * up to 10 seconds. If the port doesn't respond, treat 7039 * it as an error. If the port responds with RN, start 7040 * the loop again. 7041 */ 7042 for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) { 7043 if (lpfc_readl(phba->sli4_hba.u.if_type2. 7044 STATUSregaddr, ®_data.word0)) { 7045 rc = -ENODEV; 7046 break; 7047 } 7048 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 7049 break; 7050 if (bf_get(lpfc_sliport_status_rn, ®_data)) { 7051 reset_again++; 7052 break; 7053 } 7054 msleep(10); 7055 } 7056 7057 /* 7058 * If the port responds to the init request with 7059 * reset needed, delay for a bit and restart the loop. 7060 */ 7061 if (reset_again) { 7062 msleep(10); 7063 reset_again = 0; 7064 continue; 7065 } 7066 7067 /* Detect any port errors. */ 7068 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 7069 ®_data.word0)) { 7070 rc = -ENODEV; 7071 break; 7072 } 7073 if ((bf_get(lpfc_sliport_status_err, ®_data)) || 7074 (rdy_chk >= 1000)) { 7075 phba->work_status[0] = readl( 7076 phba->sli4_hba.u.if_type2.ERR1regaddr); 7077 phba->work_status[1] = readl( 7078 phba->sli4_hba.u.if_type2.ERR2regaddr); 7079 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7080 "2890 Port Error Detected " 7081 "during Port Reset: " 7082 "port status reg 0x%x, " 7083 "error 1=0x%x, error 2=0x%x\n", 7084 reg_data.word0, 7085 phba->work_status[0], 7086 phba->work_status[1]); 7087 rc = -ENODEV; 7088 } 7089 7090 /* 7091 * Terminate the outer loop provided the Port indicated 7092 * ready within 10 seconds. 7093 */ 7094 if (rdy_chk < 1000) 7095 break; 7096 } 7097 /* delay driver action following IF_TYPE_2 function reset */ 7098 msleep(100); 7099 break; 7100 case LPFC_SLI_INTF_IF_TYPE_1: 7101 default: 7102 break; 7103 } 7104 7105 /* Catch the not-ready port failure after a port reset. */ 7106 if (num_resets >= MAX_IF_TYPE_2_RESETS) 7107 rc = -ENODEV; 7108 7109 return rc; 7110 } 7111 7112 /** 7113 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands 7114 * @phba: pointer to lpfc hba data structure. 7115 * @cnt: number of nop mailbox commands to send. 7116 * 7117 * This routine is invoked to send a number @cnt of NOP mailbox command and 7118 * wait for each command to complete. 7119 * 7120 * Return: the number of NOP mailbox command completed. 7121 **/ 7122 static int 7123 lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt) 7124 { 7125 LPFC_MBOXQ_t *mboxq; 7126 int length, cmdsent; 7127 uint32_t mbox_tmo; 7128 uint32_t rc = 0; 7129 uint32_t shdr_status, shdr_add_status; 7130 union lpfc_sli4_cfg_shdr *shdr; 7131 7132 if (cnt == 0) { 7133 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7134 "2518 Requested to send 0 NOP mailbox cmd\n"); 7135 return cnt; 7136 } 7137 7138 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7139 if (!mboxq) { 7140 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7141 "2519 Unable to allocate memory for issuing " 7142 "NOP mailbox command\n"); 7143 return 0; 7144 } 7145 7146 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */ 7147 length = (sizeof(struct lpfc_mbx_nop) - 7148 sizeof(struct lpfc_sli4_cfg_mhdr)); 7149 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 7150 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED); 7151 7152 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 7153 for (cmdsent = 0; cmdsent < cnt; cmdsent++) { 7154 if (!phba->sli4_hba.intr_enable) 7155 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7156 else 7157 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 7158 if (rc == MBX_TIMEOUT) 7159 break; 7160 /* Check return status */ 7161 shdr = (union lpfc_sli4_cfg_shdr *) 7162 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 7163 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7164 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 7165 &shdr->response); 7166 if (shdr_status || shdr_add_status || rc) { 7167 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7168 "2520 NOP mailbox command failed " 7169 "status x%x add_status x%x mbx " 7170 "status x%x\n", shdr_status, 7171 shdr_add_status, rc); 7172 break; 7173 } 7174 } 7175 7176 if (rc != MBX_TIMEOUT) 7177 mempool_free(mboxq, phba->mbox_mem_pool); 7178 7179 return cmdsent; 7180 } 7181 7182 /** 7183 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 7184 * @phba: pointer to lpfc hba data structure. 7185 * 7186 * This routine is invoked to set up the PCI device memory space for device 7187 * with SLI-4 interface spec. 7188 * 7189 * Return codes 7190 * 0 - successful 7191 * other values - error 7192 **/ 7193 static int 7194 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 7195 { 7196 struct pci_dev *pdev; 7197 unsigned long bar0map_len, bar1map_len, bar2map_len; 7198 int error = -ENODEV; 7199 uint32_t if_type; 7200 7201 /* Obtain PCI device reference */ 7202 if (!phba->pcidev) 7203 return error; 7204 else 7205 pdev = phba->pcidev; 7206 7207 /* Set the device DMA mask size */ 7208 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 7209 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 7210 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 7211 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 7212 return error; 7213 } 7214 } 7215 7216 /* 7217 * The BARs and register set definitions and offset locations are 7218 * dependent on the if_type. 7219 */ 7220 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 7221 &phba->sli4_hba.sli_intf.word0)) { 7222 return error; 7223 } 7224 7225 /* There is no SLI3 failback for SLI4 devices. */ 7226 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 7227 LPFC_SLI_INTF_VALID) { 7228 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7229 "2894 SLI_INTF reg contents invalid " 7230 "sli_intf reg 0x%x\n", 7231 phba->sli4_hba.sli_intf.word0); 7232 return error; 7233 } 7234 7235 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 7236 /* 7237 * Get the bus address of SLI4 device Bar regions and the 7238 * number of bytes required by each mapping. The mapping of the 7239 * particular PCI BARs regions is dependent on the type of 7240 * SLI4 device. 7241 */ 7242 if (pci_resource_start(pdev, 0)) { 7243 phba->pci_bar0_map = pci_resource_start(pdev, 0); 7244 bar0map_len = pci_resource_len(pdev, 0); 7245 7246 /* 7247 * Map SLI4 PCI Config Space Register base to a kernel virtual 7248 * addr 7249 */ 7250 phba->sli4_hba.conf_regs_memmap_p = 7251 ioremap(phba->pci_bar0_map, bar0map_len); 7252 if (!phba->sli4_hba.conf_regs_memmap_p) { 7253 dev_printk(KERN_ERR, &pdev->dev, 7254 "ioremap failed for SLI4 PCI config " 7255 "registers.\n"); 7256 goto out; 7257 } 7258 /* Set up BAR0 PCI config space register memory map */ 7259 lpfc_sli4_bar0_register_memmap(phba, if_type); 7260 } else { 7261 phba->pci_bar0_map = pci_resource_start(pdev, 1); 7262 bar0map_len = pci_resource_len(pdev, 1); 7263 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 7264 dev_printk(KERN_ERR, &pdev->dev, 7265 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 7266 goto out; 7267 } 7268 phba->sli4_hba.conf_regs_memmap_p = 7269 ioremap(phba->pci_bar0_map, bar0map_len); 7270 if (!phba->sli4_hba.conf_regs_memmap_p) { 7271 dev_printk(KERN_ERR, &pdev->dev, 7272 "ioremap failed for SLI4 PCI config " 7273 "registers.\n"); 7274 goto out; 7275 } 7276 lpfc_sli4_bar0_register_memmap(phba, if_type); 7277 } 7278 7279 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && 7280 (pci_resource_start(pdev, 2))) { 7281 /* 7282 * Map SLI4 if type 0 HBA Control Register base to a kernel 7283 * virtual address and setup the registers. 7284 */ 7285 phba->pci_bar1_map = pci_resource_start(pdev, 2); 7286 bar1map_len = pci_resource_len(pdev, 2); 7287 phba->sli4_hba.ctrl_regs_memmap_p = 7288 ioremap(phba->pci_bar1_map, bar1map_len); 7289 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 7290 dev_printk(KERN_ERR, &pdev->dev, 7291 "ioremap failed for SLI4 HBA control registers.\n"); 7292 goto out_iounmap_conf; 7293 } 7294 lpfc_sli4_bar1_register_memmap(phba); 7295 } 7296 7297 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && 7298 (pci_resource_start(pdev, 4))) { 7299 /* 7300 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel 7301 * virtual address and setup the registers. 7302 */ 7303 phba->pci_bar2_map = pci_resource_start(pdev, 4); 7304 bar2map_len = pci_resource_len(pdev, 4); 7305 phba->sli4_hba.drbl_regs_memmap_p = 7306 ioremap(phba->pci_bar2_map, bar2map_len); 7307 if (!phba->sli4_hba.drbl_regs_memmap_p) { 7308 dev_printk(KERN_ERR, &pdev->dev, 7309 "ioremap failed for SLI4 HBA doorbell registers.\n"); 7310 goto out_iounmap_ctrl; 7311 } 7312 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 7313 if (error) 7314 goto out_iounmap_all; 7315 } 7316 7317 return 0; 7318 7319 out_iounmap_all: 7320 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 7321 out_iounmap_ctrl: 7322 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 7323 out_iounmap_conf: 7324 iounmap(phba->sli4_hba.conf_regs_memmap_p); 7325 out: 7326 return error; 7327 } 7328 7329 /** 7330 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 7331 * @phba: pointer to lpfc hba data structure. 7332 * 7333 * This routine is invoked to unset the PCI device memory space for device 7334 * with SLI-4 interface spec. 7335 **/ 7336 static void 7337 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 7338 { 7339 struct pci_dev *pdev; 7340 7341 /* Obtain PCI device reference */ 7342 if (!phba->pcidev) 7343 return; 7344 else 7345 pdev = phba->pcidev; 7346 7347 /* Free coherent DMA memory allocated */ 7348 7349 /* Unmap I/O memory space */ 7350 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 7351 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 7352 iounmap(phba->sli4_hba.conf_regs_memmap_p); 7353 7354 return; 7355 } 7356 7357 /** 7358 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 7359 * @phba: pointer to lpfc hba data structure. 7360 * 7361 * This routine is invoked to enable the MSI-X interrupt vectors to device 7362 * with SLI-3 interface specs. The kernel function pci_enable_msix() is 7363 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once 7364 * invoked, enables either all or nothing, depending on the current 7365 * availability of PCI vector resources. The device driver is responsible 7366 * for calling the individual request_irq() to register each MSI-X vector 7367 * with a interrupt handler, which is done in this function. Note that 7368 * later when device is unloading, the driver should always call free_irq() 7369 * on all MSI-X vectors it has done request_irq() on before calling 7370 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 7371 * will be left with MSI-X enabled and leaks its vectors. 7372 * 7373 * Return codes 7374 * 0 - successful 7375 * other values - error 7376 **/ 7377 static int 7378 lpfc_sli_enable_msix(struct lpfc_hba *phba) 7379 { 7380 int rc, i; 7381 LPFC_MBOXQ_t *pmb; 7382 7383 /* Set up MSI-X multi-message vectors */ 7384 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 7385 phba->msix_entries[i].entry = i; 7386 7387 /* Configure MSI-X capability structure */ 7388 rc = pci_enable_msix(phba->pcidev, phba->msix_entries, 7389 ARRAY_SIZE(phba->msix_entries)); 7390 if (rc) { 7391 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7392 "0420 PCI enable MSI-X failed (%d)\n", rc); 7393 goto msi_fail_out; 7394 } 7395 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 7396 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7397 "0477 MSI-X entry[%d]: vector=x%x " 7398 "message=%d\n", i, 7399 phba->msix_entries[i].vector, 7400 phba->msix_entries[i].entry); 7401 /* 7402 * Assign MSI-X vectors to interrupt handlers 7403 */ 7404 7405 /* vector-0 is associated to slow-path handler */ 7406 rc = request_irq(phba->msix_entries[0].vector, 7407 &lpfc_sli_sp_intr_handler, IRQF_SHARED, 7408 LPFC_SP_DRIVER_HANDLER_NAME, phba); 7409 if (rc) { 7410 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7411 "0421 MSI-X slow-path request_irq failed " 7412 "(%d)\n", rc); 7413 goto msi_fail_out; 7414 } 7415 7416 /* vector-1 is associated to fast-path handler */ 7417 rc = request_irq(phba->msix_entries[1].vector, 7418 &lpfc_sli_fp_intr_handler, IRQF_SHARED, 7419 LPFC_FP_DRIVER_HANDLER_NAME, phba); 7420 7421 if (rc) { 7422 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7423 "0429 MSI-X fast-path request_irq failed " 7424 "(%d)\n", rc); 7425 goto irq_fail_out; 7426 } 7427 7428 /* 7429 * Configure HBA MSI-X attention conditions to messages 7430 */ 7431 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7432 7433 if (!pmb) { 7434 rc = -ENOMEM; 7435 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7436 "0474 Unable to allocate memory for issuing " 7437 "MBOX_CONFIG_MSI command\n"); 7438 goto mem_fail_out; 7439 } 7440 rc = lpfc_config_msi(phba, pmb); 7441 if (rc) 7442 goto mbx_fail_out; 7443 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 7444 if (rc != MBX_SUCCESS) { 7445 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 7446 "0351 Config MSI mailbox command failed, " 7447 "mbxCmd x%x, mbxStatus x%x\n", 7448 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 7449 goto mbx_fail_out; 7450 } 7451 7452 /* Free memory allocated for mailbox command */ 7453 mempool_free(pmb, phba->mbox_mem_pool); 7454 return rc; 7455 7456 mbx_fail_out: 7457 /* Free memory allocated for mailbox command */ 7458 mempool_free(pmb, phba->mbox_mem_pool); 7459 7460 mem_fail_out: 7461 /* free the irq already requested */ 7462 free_irq(phba->msix_entries[1].vector, phba); 7463 7464 irq_fail_out: 7465 /* free the irq already requested */ 7466 free_irq(phba->msix_entries[0].vector, phba); 7467 7468 msi_fail_out: 7469 /* Unconfigure MSI-X capability structure */ 7470 pci_disable_msix(phba->pcidev); 7471 return rc; 7472 } 7473 7474 /** 7475 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. 7476 * @phba: pointer to lpfc hba data structure. 7477 * 7478 * This routine is invoked to release the MSI-X vectors and then disable the 7479 * MSI-X interrupt mode to device with SLI-3 interface spec. 7480 **/ 7481 static void 7482 lpfc_sli_disable_msix(struct lpfc_hba *phba) 7483 { 7484 int i; 7485 7486 /* Free up MSI-X multi-message vectors */ 7487 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 7488 free_irq(phba->msix_entries[i].vector, phba); 7489 /* Disable MSI-X */ 7490 pci_disable_msix(phba->pcidev); 7491 7492 return; 7493 } 7494 7495 /** 7496 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 7497 * @phba: pointer to lpfc hba data structure. 7498 * 7499 * This routine is invoked to enable the MSI interrupt mode to device with 7500 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 7501 * enable the MSI vector. The device driver is responsible for calling the 7502 * request_irq() to register MSI vector with a interrupt the handler, which 7503 * is done in this function. 7504 * 7505 * Return codes 7506 * 0 - successful 7507 * other values - error 7508 */ 7509 static int 7510 lpfc_sli_enable_msi(struct lpfc_hba *phba) 7511 { 7512 int rc; 7513 7514 rc = pci_enable_msi(phba->pcidev); 7515 if (!rc) 7516 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7517 "0462 PCI enable MSI mode success.\n"); 7518 else { 7519 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7520 "0471 PCI enable MSI mode failed (%d)\n", rc); 7521 return rc; 7522 } 7523 7524 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 7525 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7526 if (rc) { 7527 pci_disable_msi(phba->pcidev); 7528 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7529 "0478 MSI request_irq failed (%d)\n", rc); 7530 } 7531 return rc; 7532 } 7533 7534 /** 7535 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. 7536 * @phba: pointer to lpfc hba data structure. 7537 * 7538 * This routine is invoked to disable the MSI interrupt mode to device with 7539 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has 7540 * done request_irq() on before calling pci_disable_msi(). Failure to do so 7541 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 7542 * its vector. 7543 */ 7544 static void 7545 lpfc_sli_disable_msi(struct lpfc_hba *phba) 7546 { 7547 free_irq(phba->pcidev->irq, phba); 7548 pci_disable_msi(phba->pcidev); 7549 return; 7550 } 7551 7552 /** 7553 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 7554 * @phba: pointer to lpfc hba data structure. 7555 * 7556 * This routine is invoked to enable device interrupt and associate driver's 7557 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 7558 * spec. Depends on the interrupt mode configured to the driver, the driver 7559 * will try to fallback from the configured interrupt mode to an interrupt 7560 * mode which is supported by the platform, kernel, and device in the order 7561 * of: 7562 * MSI-X -> MSI -> IRQ. 7563 * 7564 * Return codes 7565 * 0 - successful 7566 * other values - error 7567 **/ 7568 static uint32_t 7569 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 7570 { 7571 uint32_t intr_mode = LPFC_INTR_ERROR; 7572 int retval; 7573 7574 if (cfg_mode == 2) { 7575 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 7576 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 7577 if (!retval) { 7578 /* Now, try to enable MSI-X interrupt mode */ 7579 retval = lpfc_sli_enable_msix(phba); 7580 if (!retval) { 7581 /* Indicate initialization to MSI-X mode */ 7582 phba->intr_type = MSIX; 7583 intr_mode = 2; 7584 } 7585 } 7586 } 7587 7588 /* Fallback to MSI if MSI-X initialization failed */ 7589 if (cfg_mode >= 1 && phba->intr_type == NONE) { 7590 retval = lpfc_sli_enable_msi(phba); 7591 if (!retval) { 7592 /* Indicate initialization to MSI mode */ 7593 phba->intr_type = MSI; 7594 intr_mode = 1; 7595 } 7596 } 7597 7598 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 7599 if (phba->intr_type == NONE) { 7600 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 7601 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7602 if (!retval) { 7603 /* Indicate initialization to INTx mode */ 7604 phba->intr_type = INTx; 7605 intr_mode = 0; 7606 } 7607 } 7608 return intr_mode; 7609 } 7610 7611 /** 7612 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 7613 * @phba: pointer to lpfc hba data structure. 7614 * 7615 * This routine is invoked to disable device interrupt and disassociate the 7616 * driver's interrupt handler(s) from interrupt vector(s) to device with 7617 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 7618 * release the interrupt vector(s) for the message signaled interrupt. 7619 **/ 7620 static void 7621 lpfc_sli_disable_intr(struct lpfc_hba *phba) 7622 { 7623 /* Disable the currently initialized interrupt mode */ 7624 if (phba->intr_type == MSIX) 7625 lpfc_sli_disable_msix(phba); 7626 else if (phba->intr_type == MSI) 7627 lpfc_sli_disable_msi(phba); 7628 else if (phba->intr_type == INTx) 7629 free_irq(phba->pcidev->irq, phba); 7630 7631 /* Reset interrupt management states */ 7632 phba->intr_type = NONE; 7633 phba->sli.slistat.sli_intr = 0; 7634 7635 return; 7636 } 7637 7638 /** 7639 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 7640 * @phba: pointer to lpfc hba data structure. 7641 * 7642 * This routine is invoked to enable the MSI-X interrupt vectors to device 7643 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called 7644 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked, 7645 * enables either all or nothing, depending on the current availability of 7646 * PCI vector resources. The device driver is responsible for calling the 7647 * individual request_irq() to register each MSI-X vector with a interrupt 7648 * handler, which is done in this function. Note that later when device is 7649 * unloading, the driver should always call free_irq() on all MSI-X vectors 7650 * it has done request_irq() on before calling pci_disable_msix(). Failure 7651 * to do so results in a BUG_ON() and a device will be left with MSI-X 7652 * enabled and leaks its vectors. 7653 * 7654 * Return codes 7655 * 0 - successful 7656 * other values - error 7657 **/ 7658 static int 7659 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 7660 { 7661 int vectors, rc, index; 7662 7663 /* Set up MSI-X multi-message vectors */ 7664 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 7665 phba->sli4_hba.msix_entries[index].entry = index; 7666 7667 /* Configure MSI-X capability structure */ 7668 vectors = phba->sli4_hba.cfg_eqn; 7669 enable_msix_vectors: 7670 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, 7671 vectors); 7672 if (rc > 1) { 7673 vectors = rc; 7674 goto enable_msix_vectors; 7675 } else if (rc) { 7676 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7677 "0484 PCI enable MSI-X failed (%d)\n", rc); 7678 goto msi_fail_out; 7679 } 7680 7681 /* Log MSI-X vector assignment */ 7682 for (index = 0; index < vectors; index++) 7683 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7684 "0489 MSI-X entry[%d]: vector=x%x " 7685 "message=%d\n", index, 7686 phba->sli4_hba.msix_entries[index].vector, 7687 phba->sli4_hba.msix_entries[index].entry); 7688 /* 7689 * Assign MSI-X vectors to interrupt handlers 7690 */ 7691 if (vectors > 1) 7692 rc = request_irq(phba->sli4_hba.msix_entries[0].vector, 7693 &lpfc_sli4_sp_intr_handler, IRQF_SHARED, 7694 LPFC_SP_DRIVER_HANDLER_NAME, phba); 7695 else 7696 /* All Interrupts need to be handled by one EQ */ 7697 rc = request_irq(phba->sli4_hba.msix_entries[0].vector, 7698 &lpfc_sli4_intr_handler, IRQF_SHARED, 7699 LPFC_DRIVER_NAME, phba); 7700 if (rc) { 7701 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7702 "0485 MSI-X slow-path request_irq failed " 7703 "(%d)\n", rc); 7704 goto msi_fail_out; 7705 } 7706 7707 /* The rest of the vector(s) are associated to fast-path handler(s) */ 7708 for (index = 1; index < vectors; index++) { 7709 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; 7710 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba; 7711 rc = request_irq(phba->sli4_hba.msix_entries[index].vector, 7712 &lpfc_sli4_fp_intr_handler, IRQF_SHARED, 7713 LPFC_FP_DRIVER_HANDLER_NAME, 7714 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7715 if (rc) { 7716 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7717 "0486 MSI-X fast-path (%d) " 7718 "request_irq failed (%d)\n", index, rc); 7719 goto cfg_fail_out; 7720 } 7721 } 7722 phba->sli4_hba.msix_vec_nr = vectors; 7723 7724 return rc; 7725 7726 cfg_fail_out: 7727 /* free the irq already requested */ 7728 for (--index; index >= 1; index--) 7729 free_irq(phba->sli4_hba.msix_entries[index - 1].vector, 7730 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7731 7732 /* free the irq already requested */ 7733 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 7734 7735 msi_fail_out: 7736 /* Unconfigure MSI-X capability structure */ 7737 pci_disable_msix(phba->pcidev); 7738 return rc; 7739 } 7740 7741 /** 7742 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device 7743 * @phba: pointer to lpfc hba data structure. 7744 * 7745 * This routine is invoked to release the MSI-X vectors and then disable the 7746 * MSI-X interrupt mode to device with SLI-4 interface spec. 7747 **/ 7748 static void 7749 lpfc_sli4_disable_msix(struct lpfc_hba *phba) 7750 { 7751 int index; 7752 7753 /* Free up MSI-X multi-message vectors */ 7754 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 7755 7756 for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++) 7757 free_irq(phba->sli4_hba.msix_entries[index].vector, 7758 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7759 7760 /* Disable MSI-X */ 7761 pci_disable_msix(phba->pcidev); 7762 7763 return; 7764 } 7765 7766 /** 7767 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 7768 * @phba: pointer to lpfc hba data structure. 7769 * 7770 * This routine is invoked to enable the MSI interrupt mode to device with 7771 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 7772 * to enable the MSI vector. The device driver is responsible for calling 7773 * the request_irq() to register MSI vector with a interrupt the handler, 7774 * which is done in this function. 7775 * 7776 * Return codes 7777 * 0 - successful 7778 * other values - error 7779 **/ 7780 static int 7781 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 7782 { 7783 int rc, index; 7784 7785 rc = pci_enable_msi(phba->pcidev); 7786 if (!rc) 7787 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7788 "0487 PCI enable MSI mode success.\n"); 7789 else { 7790 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7791 "0488 PCI enable MSI mode failed (%d)\n", rc); 7792 return rc; 7793 } 7794 7795 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 7796 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7797 if (rc) { 7798 pci_disable_msi(phba->pcidev); 7799 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7800 "0490 MSI request_irq failed (%d)\n", rc); 7801 return rc; 7802 } 7803 7804 for (index = 0; index < phba->cfg_fcp_eq_count; index++) { 7805 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 7806 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 7807 } 7808 7809 return 0; 7810 } 7811 7812 /** 7813 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device 7814 * @phba: pointer to lpfc hba data structure. 7815 * 7816 * This routine is invoked to disable the MSI interrupt mode to device with 7817 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has 7818 * done request_irq() on before calling pci_disable_msi(). Failure to do so 7819 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 7820 * its vector. 7821 **/ 7822 static void 7823 lpfc_sli4_disable_msi(struct lpfc_hba *phba) 7824 { 7825 free_irq(phba->pcidev->irq, phba); 7826 pci_disable_msi(phba->pcidev); 7827 return; 7828 } 7829 7830 /** 7831 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 7832 * @phba: pointer to lpfc hba data structure. 7833 * 7834 * This routine is invoked to enable device interrupt and associate driver's 7835 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 7836 * interface spec. Depends on the interrupt mode configured to the driver, 7837 * the driver will try to fallback from the configured interrupt mode to an 7838 * interrupt mode which is supported by the platform, kernel, and device in 7839 * the order of: 7840 * MSI-X -> MSI -> IRQ. 7841 * 7842 * Return codes 7843 * 0 - successful 7844 * other values - error 7845 **/ 7846 static uint32_t 7847 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 7848 { 7849 uint32_t intr_mode = LPFC_INTR_ERROR; 7850 int retval, index; 7851 7852 if (cfg_mode == 2) { 7853 /* Preparation before conf_msi mbox cmd */ 7854 retval = 0; 7855 if (!retval) { 7856 /* Now, try to enable MSI-X interrupt mode */ 7857 retval = lpfc_sli4_enable_msix(phba); 7858 if (!retval) { 7859 /* Indicate initialization to MSI-X mode */ 7860 phba->intr_type = MSIX; 7861 intr_mode = 2; 7862 } 7863 } 7864 } 7865 7866 /* Fallback to MSI if MSI-X initialization failed */ 7867 if (cfg_mode >= 1 && phba->intr_type == NONE) { 7868 retval = lpfc_sli4_enable_msi(phba); 7869 if (!retval) { 7870 /* Indicate initialization to MSI mode */ 7871 phba->intr_type = MSI; 7872 intr_mode = 1; 7873 } 7874 } 7875 7876 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 7877 if (phba->intr_type == NONE) { 7878 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 7879 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7880 if (!retval) { 7881 /* Indicate initialization to INTx mode */ 7882 phba->intr_type = INTx; 7883 intr_mode = 0; 7884 for (index = 0; index < phba->cfg_fcp_eq_count; 7885 index++) { 7886 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 7887 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 7888 } 7889 } 7890 } 7891 return intr_mode; 7892 } 7893 7894 /** 7895 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 7896 * @phba: pointer to lpfc hba data structure. 7897 * 7898 * This routine is invoked to disable device interrupt and disassociate 7899 * the driver's interrupt handler(s) from interrupt vector(s) to device 7900 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 7901 * will release the interrupt vector(s) for the message signaled interrupt. 7902 **/ 7903 static void 7904 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 7905 { 7906 /* Disable the currently initialized interrupt mode */ 7907 if (phba->intr_type == MSIX) 7908 lpfc_sli4_disable_msix(phba); 7909 else if (phba->intr_type == MSI) 7910 lpfc_sli4_disable_msi(phba); 7911 else if (phba->intr_type == INTx) 7912 free_irq(phba->pcidev->irq, phba); 7913 7914 /* Reset interrupt management states */ 7915 phba->intr_type = NONE; 7916 phba->sli.slistat.sli_intr = 0; 7917 7918 return; 7919 } 7920 7921 /** 7922 * lpfc_unset_hba - Unset SLI3 hba device initialization 7923 * @phba: pointer to lpfc hba data structure. 7924 * 7925 * This routine is invoked to unset the HBA device initialization steps to 7926 * a device with SLI-3 interface spec. 7927 **/ 7928 static void 7929 lpfc_unset_hba(struct lpfc_hba *phba) 7930 { 7931 struct lpfc_vport *vport = phba->pport; 7932 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7933 7934 spin_lock_irq(shost->host_lock); 7935 vport->load_flag |= FC_UNLOADING; 7936 spin_unlock_irq(shost->host_lock); 7937 7938 lpfc_stop_hba_timers(phba); 7939 7940 phba->pport->work_port_events = 0; 7941 7942 lpfc_sli_hba_down(phba); 7943 7944 lpfc_sli_brdrestart(phba); 7945 7946 lpfc_sli_disable_intr(phba); 7947 7948 return; 7949 } 7950 7951 /** 7952 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization. 7953 * @phba: pointer to lpfc hba data structure. 7954 * 7955 * This routine is invoked to unset the HBA device initialization steps to 7956 * a device with SLI-4 interface spec. 7957 **/ 7958 static void 7959 lpfc_sli4_unset_hba(struct lpfc_hba *phba) 7960 { 7961 struct lpfc_vport *vport = phba->pport; 7962 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7963 7964 spin_lock_irq(shost->host_lock); 7965 vport->load_flag |= FC_UNLOADING; 7966 spin_unlock_irq(shost->host_lock); 7967 7968 phba->pport->work_port_events = 0; 7969 7970 /* Stop the SLI4 device port */ 7971 lpfc_stop_port(phba); 7972 7973 lpfc_sli4_disable_intr(phba); 7974 7975 /* Reset SLI4 HBA FCoE function */ 7976 lpfc_pci_function_reset(phba); 7977 7978 return; 7979 } 7980 7981 /** 7982 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 7983 * @phba: Pointer to HBA context object. 7984 * 7985 * This function is called in the SLI4 code path to wait for completion 7986 * of device's XRIs exchange busy. It will check the XRI exchange busy 7987 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 7988 * that, it will check the XRI exchange busy on outstanding FCP and ELS 7989 * I/Os every 30 seconds, log error message, and wait forever. Only when 7990 * all XRI exchange busy complete, the driver unload shall proceed with 7991 * invoking the function reset ioctl mailbox command to the CNA and the 7992 * the rest of the driver unload resource release. 7993 **/ 7994 static void 7995 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 7996 { 7997 int wait_time = 0; 7998 int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 7999 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 8000 8001 while (!fcp_xri_cmpl || !els_xri_cmpl) { 8002 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 8003 if (!fcp_xri_cmpl) 8004 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8005 "2877 FCP XRI exchange busy " 8006 "wait time: %d seconds.\n", 8007 wait_time/1000); 8008 if (!els_xri_cmpl) 8009 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8010 "2878 ELS XRI exchange busy " 8011 "wait time: %d seconds.\n", 8012 wait_time/1000); 8013 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 8014 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 8015 } else { 8016 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 8017 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 8018 } 8019 fcp_xri_cmpl = 8020 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 8021 els_xri_cmpl = 8022 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 8023 } 8024 } 8025 8026 /** 8027 * lpfc_sli4_hba_unset - Unset the fcoe hba 8028 * @phba: Pointer to HBA context object. 8029 * 8030 * This function is called in the SLI4 code path to reset the HBA's FCoE 8031 * function. The caller is not required to hold any lock. This routine 8032 * issues PCI function reset mailbox command to reset the FCoE function. 8033 * At the end of the function, it calls lpfc_hba_down_post function to 8034 * free any pending commands. 8035 **/ 8036 static void 8037 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 8038 { 8039 int wait_cnt = 0; 8040 LPFC_MBOXQ_t *mboxq; 8041 struct pci_dev *pdev = phba->pcidev; 8042 8043 lpfc_stop_hba_timers(phba); 8044 phba->sli4_hba.intr_enable = 0; 8045 8046 /* 8047 * Gracefully wait out the potential current outstanding asynchronous 8048 * mailbox command. 8049 */ 8050 8051 /* First, block any pending async mailbox command from posted */ 8052 spin_lock_irq(&phba->hbalock); 8053 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 8054 spin_unlock_irq(&phba->hbalock); 8055 /* Now, trying to wait it out if we can */ 8056 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8057 msleep(10); 8058 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 8059 break; 8060 } 8061 /* Forcefully release the outstanding mailbox command if timed out */ 8062 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8063 spin_lock_irq(&phba->hbalock); 8064 mboxq = phba->sli.mbox_active; 8065 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 8066 __lpfc_mbox_cmpl_put(phba, mboxq); 8067 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8068 phba->sli.mbox_active = NULL; 8069 spin_unlock_irq(&phba->hbalock); 8070 } 8071 8072 /* Abort all iocbs associated with the hba */ 8073 lpfc_sli_hba_iocb_abort(phba); 8074 8075 /* Wait for completion of device XRI exchange busy */ 8076 lpfc_sli4_xri_exchange_busy_wait(phba); 8077 8078 /* Disable PCI subsystem interrupt */ 8079 lpfc_sli4_disable_intr(phba); 8080 8081 /* Disable SR-IOV if enabled */ 8082 if (phba->cfg_sriov_nr_virtfn) 8083 pci_disable_sriov(pdev); 8084 8085 /* Stop kthread signal shall trigger work_done one more time */ 8086 kthread_stop(phba->worker_thread); 8087 8088 /* Reset SLI4 HBA FCoE function */ 8089 lpfc_pci_function_reset(phba); 8090 8091 /* Stop the SLI4 device port */ 8092 phba->pport->work_port_events = 0; 8093 } 8094 8095 /** 8096 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 8097 * @phba: Pointer to HBA context object. 8098 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 8099 * 8100 * This function is called in the SLI4 code path to read the port's 8101 * sli4 capabilities. 8102 * 8103 * This function may be be called from any context that can block-wait 8104 * for the completion. The expectation is that this routine is called 8105 * typically from probe_one or from the online routine. 8106 **/ 8107 int 8108 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 8109 { 8110 int rc; 8111 struct lpfc_mqe *mqe; 8112 struct lpfc_pc_sli4_params *sli4_params; 8113 uint32_t mbox_tmo; 8114 8115 rc = 0; 8116 mqe = &mboxq->u.mqe; 8117 8118 /* Read the port's SLI4 Parameters port capabilities */ 8119 lpfc_pc_sli4_params(mboxq); 8120 if (!phba->sli4_hba.intr_enable) 8121 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8122 else { 8123 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES); 8124 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 8125 } 8126 8127 if (unlikely(rc)) 8128 return 1; 8129 8130 sli4_params = &phba->sli4_hba.pc_sli4_params; 8131 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 8132 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 8133 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 8134 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 8135 &mqe->un.sli4_params); 8136 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 8137 &mqe->un.sli4_params); 8138 sli4_params->proto_types = mqe->un.sli4_params.word3; 8139 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 8140 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 8141 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 8142 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 8143 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 8144 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 8145 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 8146 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 8147 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 8148 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 8149 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 8150 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 8151 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 8152 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 8153 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 8154 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 8155 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 8156 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 8157 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 8158 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 8159 8160 /* Make sure that sge_supp_len can be handled by the driver */ 8161 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 8162 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 8163 8164 return rc; 8165 } 8166 8167 /** 8168 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 8169 * @phba: Pointer to HBA context object. 8170 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 8171 * 8172 * This function is called in the SLI4 code path to read the port's 8173 * sli4 capabilities. 8174 * 8175 * This function may be be called from any context that can block-wait 8176 * for the completion. The expectation is that this routine is called 8177 * typically from probe_one or from the online routine. 8178 **/ 8179 int 8180 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 8181 { 8182 int rc; 8183 struct lpfc_mqe *mqe = &mboxq->u.mqe; 8184 struct lpfc_pc_sli4_params *sli4_params; 8185 int length; 8186 struct lpfc_sli4_parameters *mbx_sli4_parameters; 8187 8188 /* 8189 * By default, the driver assumes the SLI4 port requires RPI 8190 * header postings. The SLI4_PARAM response will correct this 8191 * assumption. 8192 */ 8193 phba->sli4_hba.rpi_hdrs_in_use = 1; 8194 8195 /* Read the port's SLI4 Config Parameters */ 8196 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 8197 sizeof(struct lpfc_sli4_cfg_mhdr)); 8198 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 8199 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 8200 length, LPFC_SLI4_MBX_EMBED); 8201 if (!phba->sli4_hba.intr_enable) 8202 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8203 else 8204 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, 8205 lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG)); 8206 if (unlikely(rc)) 8207 return rc; 8208 sli4_params = &phba->sli4_hba.pc_sli4_params; 8209 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 8210 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 8211 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 8212 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 8213 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 8214 mbx_sli4_parameters); 8215 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 8216 mbx_sli4_parameters); 8217 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 8218 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 8219 else 8220 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 8221 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 8222 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); 8223 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 8224 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 8225 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 8226 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 8227 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 8228 mbx_sli4_parameters); 8229 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 8230 mbx_sli4_parameters); 8231 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); 8232 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); 8233 8234 /* Make sure that sge_supp_len can be handled by the driver */ 8235 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 8236 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 8237 8238 return 0; 8239 } 8240 8241 /** 8242 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 8243 * @pdev: pointer to PCI device 8244 * @pid: pointer to PCI device identifier 8245 * 8246 * This routine is to be called to attach a device with SLI-3 interface spec 8247 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 8248 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 8249 * information of the device and driver to see if the driver state that it can 8250 * support this kind of device. If the match is successful, the driver core 8251 * invokes this routine. If this routine determines it can claim the HBA, it 8252 * does all the initialization that it needs to do to handle the HBA properly. 8253 * 8254 * Return code 8255 * 0 - driver can claim the device 8256 * negative value - driver can not claim the device 8257 **/ 8258 static int __devinit 8259 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 8260 { 8261 struct lpfc_hba *phba; 8262 struct lpfc_vport *vport = NULL; 8263 struct Scsi_Host *shost = NULL; 8264 int error; 8265 uint32_t cfg_mode, intr_mode; 8266 8267 /* Allocate memory for HBA structure */ 8268 phba = lpfc_hba_alloc(pdev); 8269 if (!phba) 8270 return -ENOMEM; 8271 8272 /* Perform generic PCI device enabling operation */ 8273 error = lpfc_enable_pci_dev(phba); 8274 if (error) { 8275 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8276 "1401 Failed to enable pci device.\n"); 8277 goto out_free_phba; 8278 } 8279 8280 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 8281 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 8282 if (error) 8283 goto out_disable_pci_dev; 8284 8285 /* Set up SLI-3 specific device PCI memory space */ 8286 error = lpfc_sli_pci_mem_setup(phba); 8287 if (error) { 8288 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8289 "1402 Failed to set up pci memory space.\n"); 8290 goto out_disable_pci_dev; 8291 } 8292 8293 /* Set up phase-1 common device driver resources */ 8294 error = lpfc_setup_driver_resource_phase1(phba); 8295 if (error) { 8296 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8297 "1403 Failed to set up driver resource.\n"); 8298 goto out_unset_pci_mem_s3; 8299 } 8300 8301 /* Set up SLI-3 specific device driver resources */ 8302 error = lpfc_sli_driver_resource_setup(phba); 8303 if (error) { 8304 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8305 "1404 Failed to set up driver resource.\n"); 8306 goto out_unset_pci_mem_s3; 8307 } 8308 8309 /* Initialize and populate the iocb list per host */ 8310 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 8311 if (error) { 8312 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8313 "1405 Failed to initialize iocb list.\n"); 8314 goto out_unset_driver_resource_s3; 8315 } 8316 8317 /* Set up common device driver resources */ 8318 error = lpfc_setup_driver_resource_phase2(phba); 8319 if (error) { 8320 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8321 "1406 Failed to set up driver resource.\n"); 8322 goto out_free_iocb_list; 8323 } 8324 8325 /* Create SCSI host to the physical port */ 8326 error = lpfc_create_shost(phba); 8327 if (error) { 8328 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8329 "1407 Failed to create scsi host.\n"); 8330 goto out_unset_driver_resource; 8331 } 8332 8333 /* Configure sysfs attributes */ 8334 vport = phba->pport; 8335 error = lpfc_alloc_sysfs_attr(vport); 8336 if (error) { 8337 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8338 "1476 Failed to allocate sysfs attr\n"); 8339 goto out_destroy_shost; 8340 } 8341 8342 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 8343 /* Now, trying to enable interrupt and bring up the device */ 8344 cfg_mode = phba->cfg_use_msi; 8345 while (true) { 8346 /* Put device to a known state before enabling interrupt */ 8347 lpfc_stop_port(phba); 8348 /* Configure and enable interrupt */ 8349 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 8350 if (intr_mode == LPFC_INTR_ERROR) { 8351 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8352 "0431 Failed to enable interrupt.\n"); 8353 error = -ENODEV; 8354 goto out_free_sysfs_attr; 8355 } 8356 /* SLI-3 HBA setup */ 8357 if (lpfc_sli_hba_setup(phba)) { 8358 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8359 "1477 Failed to set up hba\n"); 8360 error = -ENODEV; 8361 goto out_remove_device; 8362 } 8363 8364 /* Wait 50ms for the interrupts of previous mailbox commands */ 8365 msleep(50); 8366 /* Check active interrupts on message signaled interrupts */ 8367 if (intr_mode == 0 || 8368 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 8369 /* Log the current active interrupt mode */ 8370 phba->intr_mode = intr_mode; 8371 lpfc_log_intr_mode(phba, intr_mode); 8372 break; 8373 } else { 8374 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8375 "0447 Configure interrupt mode (%d) " 8376 "failed active interrupt test.\n", 8377 intr_mode); 8378 /* Disable the current interrupt mode */ 8379 lpfc_sli_disable_intr(phba); 8380 /* Try next level of interrupt mode */ 8381 cfg_mode = --intr_mode; 8382 } 8383 } 8384 8385 /* Perform post initialization setup */ 8386 lpfc_post_init_setup(phba); 8387 8388 /* Check if there are static vports to be created. */ 8389 lpfc_create_static_vport(phba); 8390 8391 return 0; 8392 8393 out_remove_device: 8394 lpfc_unset_hba(phba); 8395 out_free_sysfs_attr: 8396 lpfc_free_sysfs_attr(vport); 8397 out_destroy_shost: 8398 lpfc_destroy_shost(phba); 8399 out_unset_driver_resource: 8400 lpfc_unset_driver_resource_phase2(phba); 8401 out_free_iocb_list: 8402 lpfc_free_iocb_list(phba); 8403 out_unset_driver_resource_s3: 8404 lpfc_sli_driver_resource_unset(phba); 8405 out_unset_pci_mem_s3: 8406 lpfc_sli_pci_mem_unset(phba); 8407 out_disable_pci_dev: 8408 lpfc_disable_pci_dev(phba); 8409 if (shost) 8410 scsi_host_put(shost); 8411 out_free_phba: 8412 lpfc_hba_free(phba); 8413 return error; 8414 } 8415 8416 /** 8417 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 8418 * @pdev: pointer to PCI device 8419 * 8420 * This routine is to be called to disattach a device with SLI-3 interface 8421 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 8422 * removed from PCI bus, it performs all the necessary cleanup for the HBA 8423 * device to be removed from the PCI subsystem properly. 8424 **/ 8425 static void __devexit 8426 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 8427 { 8428 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8429 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 8430 struct lpfc_vport **vports; 8431 struct lpfc_hba *phba = vport->phba; 8432 int i; 8433 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 8434 8435 spin_lock_irq(&phba->hbalock); 8436 vport->load_flag |= FC_UNLOADING; 8437 spin_unlock_irq(&phba->hbalock); 8438 8439 lpfc_free_sysfs_attr(vport); 8440 8441 /* Release all the vports against this physical port */ 8442 vports = lpfc_create_vport_work_array(phba); 8443 if (vports != NULL) 8444 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 8445 fc_vport_terminate(vports[i]->fc_vport); 8446 lpfc_destroy_vport_work_array(phba, vports); 8447 8448 /* Remove FC host and then SCSI host with the physical port */ 8449 fc_remove_host(shost); 8450 scsi_remove_host(shost); 8451 lpfc_cleanup(vport); 8452 8453 /* 8454 * Bring down the SLI Layer. This step disable all interrupts, 8455 * clears the rings, discards all mailbox commands, and resets 8456 * the HBA. 8457 */ 8458 8459 /* HBA interrupt will be disabled after this call */ 8460 lpfc_sli_hba_down(phba); 8461 /* Stop kthread signal shall trigger work_done one more time */ 8462 kthread_stop(phba->worker_thread); 8463 /* Final cleanup of txcmplq and reset the HBA */ 8464 lpfc_sli_brdrestart(phba); 8465 8466 lpfc_stop_hba_timers(phba); 8467 spin_lock_irq(&phba->hbalock); 8468 list_del_init(&vport->listentry); 8469 spin_unlock_irq(&phba->hbalock); 8470 8471 lpfc_debugfs_terminate(vport); 8472 8473 /* Disable SR-IOV if enabled */ 8474 if (phba->cfg_sriov_nr_virtfn) 8475 pci_disable_sriov(pdev); 8476 8477 /* Disable interrupt */ 8478 lpfc_sli_disable_intr(phba); 8479 8480 pci_set_drvdata(pdev, NULL); 8481 scsi_host_put(shost); 8482 8483 /* 8484 * Call scsi_free before mem_free since scsi bufs are released to their 8485 * corresponding pools here. 8486 */ 8487 lpfc_scsi_free(phba); 8488 lpfc_mem_free_all(phba); 8489 8490 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 8491 phba->hbqslimp.virt, phba->hbqslimp.phys); 8492 8493 /* Free resources associated with SLI2 interface */ 8494 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 8495 phba->slim2p.virt, phba->slim2p.phys); 8496 8497 /* unmap adapter SLIM and Control Registers */ 8498 iounmap(phba->ctrl_regs_memmap_p); 8499 iounmap(phba->slim_memmap_p); 8500 8501 lpfc_hba_free(phba); 8502 8503 pci_release_selected_regions(pdev, bars); 8504 pci_disable_device(pdev); 8505 } 8506 8507 /** 8508 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 8509 * @pdev: pointer to PCI device 8510 * @msg: power management message 8511 * 8512 * This routine is to be called from the kernel's PCI subsystem to support 8513 * system Power Management (PM) to device with SLI-3 interface spec. When 8514 * PM invokes this method, it quiesces the device by stopping the driver's 8515 * worker thread for the device, turning off device's interrupt and DMA, 8516 * and bring the device offline. Note that as the driver implements the 8517 * minimum PM requirements to a power-aware driver's PM support for the 8518 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 8519 * to the suspend() method call will be treated as SUSPEND and the driver will 8520 * fully reinitialize its device during resume() method call, the driver will 8521 * set device to PCI_D3hot state in PCI config space instead of setting it 8522 * according to the @msg provided by the PM. 8523 * 8524 * Return code 8525 * 0 - driver suspended the device 8526 * Error otherwise 8527 **/ 8528 static int 8529 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 8530 { 8531 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8532 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8533 8534 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8535 "0473 PCI device Power Management suspend.\n"); 8536 8537 /* Bring down the device */ 8538 lpfc_offline_prep(phba); 8539 lpfc_offline(phba); 8540 kthread_stop(phba->worker_thread); 8541 8542 /* Disable interrupt from device */ 8543 lpfc_sli_disable_intr(phba); 8544 8545 /* Save device state to PCI config space */ 8546 pci_save_state(pdev); 8547 pci_set_power_state(pdev, PCI_D3hot); 8548 8549 return 0; 8550 } 8551 8552 /** 8553 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 8554 * @pdev: pointer to PCI device 8555 * 8556 * This routine is to be called from the kernel's PCI subsystem to support 8557 * system Power Management (PM) to device with SLI-3 interface spec. When PM 8558 * invokes this method, it restores the device's PCI config space state and 8559 * fully reinitializes the device and brings it online. Note that as the 8560 * driver implements the minimum PM requirements to a power-aware driver's 8561 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 8562 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 8563 * driver will fully reinitialize its device during resume() method call, 8564 * the device will be set to PCI_D0 directly in PCI config space before 8565 * restoring the state. 8566 * 8567 * Return code 8568 * 0 - driver suspended the device 8569 * Error otherwise 8570 **/ 8571 static int 8572 lpfc_pci_resume_one_s3(struct pci_dev *pdev) 8573 { 8574 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8575 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8576 uint32_t intr_mode; 8577 int error; 8578 8579 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8580 "0452 PCI device Power Management resume.\n"); 8581 8582 /* Restore device state from PCI config space */ 8583 pci_set_power_state(pdev, PCI_D0); 8584 pci_restore_state(pdev); 8585 8586 /* 8587 * As the new kernel behavior of pci_restore_state() API call clears 8588 * device saved_state flag, need to save the restored state again. 8589 */ 8590 pci_save_state(pdev); 8591 8592 if (pdev->is_busmaster) 8593 pci_set_master(pdev); 8594 8595 /* Startup the kernel thread for this host adapter. */ 8596 phba->worker_thread = kthread_run(lpfc_do_work, phba, 8597 "lpfc_worker_%d", phba->brd_no); 8598 if (IS_ERR(phba->worker_thread)) { 8599 error = PTR_ERR(phba->worker_thread); 8600 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8601 "0434 PM resume failed to start worker " 8602 "thread: error=x%x.\n", error); 8603 return error; 8604 } 8605 8606 /* Configure and enable interrupt */ 8607 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 8608 if (intr_mode == LPFC_INTR_ERROR) { 8609 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8610 "0430 PM resume Failed to enable interrupt\n"); 8611 return -EIO; 8612 } else 8613 phba->intr_mode = intr_mode; 8614 8615 /* Restart HBA and bring it online */ 8616 lpfc_sli_brdrestart(phba); 8617 lpfc_online(phba); 8618 8619 /* Log the current active interrupt mode */ 8620 lpfc_log_intr_mode(phba, phba->intr_mode); 8621 8622 return 0; 8623 } 8624 8625 /** 8626 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 8627 * @phba: pointer to lpfc hba data structure. 8628 * 8629 * This routine is called to prepare the SLI3 device for PCI slot recover. It 8630 * aborts all the outstanding SCSI I/Os to the pci device. 8631 **/ 8632 static void 8633 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 8634 { 8635 struct lpfc_sli *psli = &phba->sli; 8636 struct lpfc_sli_ring *pring; 8637 8638 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8639 "2723 PCI channel I/O abort preparing for recovery\n"); 8640 8641 /* 8642 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 8643 * and let the SCSI mid-layer to retry them to recover. 8644 */ 8645 pring = &psli->ring[psli->fcp_ring]; 8646 lpfc_sli_abort_iocb_ring(phba, pring); 8647 } 8648 8649 /** 8650 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 8651 * @phba: pointer to lpfc hba data structure. 8652 * 8653 * This routine is called to prepare the SLI3 device for PCI slot reset. It 8654 * disables the device interrupt and pci device, and aborts the internal FCP 8655 * pending I/Os. 8656 **/ 8657 static void 8658 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 8659 { 8660 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8661 "2710 PCI channel disable preparing for reset\n"); 8662 8663 /* Block any management I/Os to the device */ 8664 lpfc_block_mgmt_io(phba); 8665 8666 /* Block all SCSI devices' I/Os on the host */ 8667 lpfc_scsi_dev_block(phba); 8668 8669 /* stop all timers */ 8670 lpfc_stop_hba_timers(phba); 8671 8672 /* Disable interrupt and pci device */ 8673 lpfc_sli_disable_intr(phba); 8674 pci_disable_device(phba->pcidev); 8675 8676 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 8677 lpfc_sli_flush_fcp_rings(phba); 8678 } 8679 8680 /** 8681 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 8682 * @phba: pointer to lpfc hba data structure. 8683 * 8684 * This routine is called to prepare the SLI3 device for PCI slot permanently 8685 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 8686 * pending I/Os. 8687 **/ 8688 static void 8689 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 8690 { 8691 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8692 "2711 PCI channel permanent disable for failure\n"); 8693 /* Block all SCSI devices' I/Os on the host */ 8694 lpfc_scsi_dev_block(phba); 8695 8696 /* stop all timers */ 8697 lpfc_stop_hba_timers(phba); 8698 8699 /* Clean up all driver's outstanding SCSI I/Os */ 8700 lpfc_sli_flush_fcp_rings(phba); 8701 } 8702 8703 /** 8704 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 8705 * @pdev: pointer to PCI device. 8706 * @state: the current PCI connection state. 8707 * 8708 * This routine is called from the PCI subsystem for I/O error handling to 8709 * device with SLI-3 interface spec. This function is called by the PCI 8710 * subsystem after a PCI bus error affecting this device has been detected. 8711 * When this function is invoked, it will need to stop all the I/Os and 8712 * interrupt(s) to the device. Once that is done, it will return 8713 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 8714 * as desired. 8715 * 8716 * Return codes 8717 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 8718 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 8719 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8720 **/ 8721 static pci_ers_result_t 8722 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 8723 { 8724 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8725 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8726 8727 switch (state) { 8728 case pci_channel_io_normal: 8729 /* Non-fatal error, prepare for recovery */ 8730 lpfc_sli_prep_dev_for_recover(phba); 8731 return PCI_ERS_RESULT_CAN_RECOVER; 8732 case pci_channel_io_frozen: 8733 /* Fatal error, prepare for slot reset */ 8734 lpfc_sli_prep_dev_for_reset(phba); 8735 return PCI_ERS_RESULT_NEED_RESET; 8736 case pci_channel_io_perm_failure: 8737 /* Permanent failure, prepare for device down */ 8738 lpfc_sli_prep_dev_for_perm_failure(phba); 8739 return PCI_ERS_RESULT_DISCONNECT; 8740 default: 8741 /* Unknown state, prepare and request slot reset */ 8742 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8743 "0472 Unknown PCI error state: x%x\n", state); 8744 lpfc_sli_prep_dev_for_reset(phba); 8745 return PCI_ERS_RESULT_NEED_RESET; 8746 } 8747 } 8748 8749 /** 8750 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 8751 * @pdev: pointer to PCI device. 8752 * 8753 * This routine is called from the PCI subsystem for error handling to 8754 * device with SLI-3 interface spec. This is called after PCI bus has been 8755 * reset to restart the PCI card from scratch, as if from a cold-boot. 8756 * During the PCI subsystem error recovery, after driver returns 8757 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 8758 * recovery and then call this routine before calling the .resume method 8759 * to recover the device. This function will initialize the HBA device, 8760 * enable the interrupt, but it will just put the HBA to offline state 8761 * without passing any I/O traffic. 8762 * 8763 * Return codes 8764 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 8765 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8766 */ 8767 static pci_ers_result_t 8768 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 8769 { 8770 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8771 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8772 struct lpfc_sli *psli = &phba->sli; 8773 uint32_t intr_mode; 8774 8775 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 8776 if (pci_enable_device_mem(pdev)) { 8777 printk(KERN_ERR "lpfc: Cannot re-enable " 8778 "PCI device after reset.\n"); 8779 return PCI_ERS_RESULT_DISCONNECT; 8780 } 8781 8782 pci_restore_state(pdev); 8783 8784 /* 8785 * As the new kernel behavior of pci_restore_state() API call clears 8786 * device saved_state flag, need to save the restored state again. 8787 */ 8788 pci_save_state(pdev); 8789 8790 if (pdev->is_busmaster) 8791 pci_set_master(pdev); 8792 8793 spin_lock_irq(&phba->hbalock); 8794 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 8795 spin_unlock_irq(&phba->hbalock); 8796 8797 /* Configure and enable interrupt */ 8798 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 8799 if (intr_mode == LPFC_INTR_ERROR) { 8800 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8801 "0427 Cannot re-enable interrupt after " 8802 "slot reset.\n"); 8803 return PCI_ERS_RESULT_DISCONNECT; 8804 } else 8805 phba->intr_mode = intr_mode; 8806 8807 /* Take device offline, it will perform cleanup */ 8808 lpfc_offline_prep(phba); 8809 lpfc_offline(phba); 8810 lpfc_sli_brdrestart(phba); 8811 8812 /* Log the current active interrupt mode */ 8813 lpfc_log_intr_mode(phba, phba->intr_mode); 8814 8815 return PCI_ERS_RESULT_RECOVERED; 8816 } 8817 8818 /** 8819 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 8820 * @pdev: pointer to PCI device 8821 * 8822 * This routine is called from the PCI subsystem for error handling to device 8823 * with SLI-3 interface spec. It is called when kernel error recovery tells 8824 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 8825 * error recovery. After this call, traffic can start to flow from this device 8826 * again. 8827 */ 8828 static void 8829 lpfc_io_resume_s3(struct pci_dev *pdev) 8830 { 8831 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8832 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8833 8834 /* Bring device online, it will be no-op for non-fatal error resume */ 8835 lpfc_online(phba); 8836 8837 /* Clean up Advanced Error Reporting (AER) if needed */ 8838 if (phba->hba_flag & HBA_AER_ENABLED) 8839 pci_cleanup_aer_uncorrect_error_status(pdev); 8840 } 8841 8842 /** 8843 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 8844 * @phba: pointer to lpfc hba data structure. 8845 * 8846 * returns the number of ELS/CT IOCBs to reserve 8847 **/ 8848 int 8849 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 8850 { 8851 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 8852 8853 if (phba->sli_rev == LPFC_SLI_REV4) { 8854 if (max_xri <= 100) 8855 return 10; 8856 else if (max_xri <= 256) 8857 return 25; 8858 else if (max_xri <= 512) 8859 return 50; 8860 else if (max_xri <= 1024) 8861 return 100; 8862 else 8863 return 150; 8864 } else 8865 return 0; 8866 } 8867 8868 /** 8869 * lpfc_write_firmware - attempt to write a firmware image to the port 8870 * @phba: pointer to lpfc hba data structure. 8871 * @fw: pointer to firmware image returned from request_firmware. 8872 * 8873 * returns the number of bytes written if write is successful. 8874 * returns a negative error value if there were errors. 8875 * returns 0 if firmware matches currently active firmware on port. 8876 **/ 8877 int 8878 lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw) 8879 { 8880 char fwrev[32]; 8881 struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data; 8882 struct list_head dma_buffer_list; 8883 int i, rc = 0; 8884 struct lpfc_dmabuf *dmabuf, *next; 8885 uint32_t offset = 0, temp_offset = 0; 8886 8887 INIT_LIST_HEAD(&dma_buffer_list); 8888 if ((image->magic_number != LPFC_GROUP_OJECT_MAGIC_NUM) || 8889 (bf_get(lpfc_grp_hdr_file_type, image) != LPFC_FILE_TYPE_GROUP) || 8890 (bf_get(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) || 8891 (image->size != fw->size)) { 8892 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8893 "3022 Invalid FW image found. " 8894 "Magic:%d Type:%x ID:%x\n", 8895 image->magic_number, 8896 bf_get(lpfc_grp_hdr_file_type, image), 8897 bf_get(lpfc_grp_hdr_id, image)); 8898 return -EINVAL; 8899 } 8900 lpfc_decode_firmware_rev(phba, fwrev, 1); 8901 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 8902 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8903 "3023 Updating Firmware. Current Version:%s " 8904 "New Version:%s\n", 8905 fwrev, image->revision); 8906 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 8907 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 8908 GFP_KERNEL); 8909 if (!dmabuf) { 8910 rc = -ENOMEM; 8911 goto out; 8912 } 8913 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 8914 SLI4_PAGE_SIZE, 8915 &dmabuf->phys, 8916 GFP_KERNEL); 8917 if (!dmabuf->virt) { 8918 kfree(dmabuf); 8919 rc = -ENOMEM; 8920 goto out; 8921 } 8922 list_add_tail(&dmabuf->list, &dma_buffer_list); 8923 } 8924 while (offset < fw->size) { 8925 temp_offset = offset; 8926 list_for_each_entry(dmabuf, &dma_buffer_list, list) { 8927 if (offset + SLI4_PAGE_SIZE > fw->size) { 8928 temp_offset += fw->size - offset; 8929 memcpy(dmabuf->virt, 8930 fw->data + temp_offset, 8931 fw->size - offset); 8932 break; 8933 } 8934 memcpy(dmabuf->virt, fw->data + temp_offset, 8935 SLI4_PAGE_SIZE); 8936 temp_offset += SLI4_PAGE_SIZE; 8937 } 8938 rc = lpfc_wr_object(phba, &dma_buffer_list, 8939 (fw->size - offset), &offset); 8940 if (rc) { 8941 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8942 "3024 Firmware update failed. " 8943 "%d\n", rc); 8944 goto out; 8945 } 8946 } 8947 rc = offset; 8948 } 8949 out: 8950 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 8951 list_del(&dmabuf->list); 8952 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 8953 dmabuf->virt, dmabuf->phys); 8954 kfree(dmabuf); 8955 } 8956 return rc; 8957 } 8958 8959 /** 8960 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 8961 * @pdev: pointer to PCI device 8962 * @pid: pointer to PCI device identifier 8963 * 8964 * This routine is called from the kernel's PCI subsystem to device with 8965 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 8966 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 8967 * information of the device and driver to see if the driver state that it 8968 * can support this kind of device. If the match is successful, the driver 8969 * core invokes this routine. If this routine determines it can claim the HBA, 8970 * it does all the initialization that it needs to do to handle the HBA 8971 * properly. 8972 * 8973 * Return code 8974 * 0 - driver can claim the device 8975 * negative value - driver can not claim the device 8976 **/ 8977 static int __devinit 8978 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 8979 { 8980 struct lpfc_hba *phba; 8981 struct lpfc_vport *vport = NULL; 8982 struct Scsi_Host *shost = NULL; 8983 int error; 8984 uint32_t cfg_mode, intr_mode; 8985 int mcnt; 8986 int adjusted_fcp_eq_count; 8987 int fcp_qidx; 8988 const struct firmware *fw; 8989 uint8_t file_name[16]; 8990 8991 /* Allocate memory for HBA structure */ 8992 phba = lpfc_hba_alloc(pdev); 8993 if (!phba) 8994 return -ENOMEM; 8995 8996 /* Perform generic PCI device enabling operation */ 8997 error = lpfc_enable_pci_dev(phba); 8998 if (error) { 8999 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9000 "1409 Failed to enable pci device.\n"); 9001 goto out_free_phba; 9002 } 9003 9004 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 9005 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 9006 if (error) 9007 goto out_disable_pci_dev; 9008 9009 /* Set up SLI-4 specific device PCI memory space */ 9010 error = lpfc_sli4_pci_mem_setup(phba); 9011 if (error) { 9012 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9013 "1410 Failed to set up pci memory space.\n"); 9014 goto out_disable_pci_dev; 9015 } 9016 9017 /* Set up phase-1 common device driver resources */ 9018 error = lpfc_setup_driver_resource_phase1(phba); 9019 if (error) { 9020 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9021 "1411 Failed to set up driver resource.\n"); 9022 goto out_unset_pci_mem_s4; 9023 } 9024 9025 /* Set up SLI-4 Specific device driver resources */ 9026 error = lpfc_sli4_driver_resource_setup(phba); 9027 if (error) { 9028 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9029 "1412 Failed to set up driver resource.\n"); 9030 goto out_unset_pci_mem_s4; 9031 } 9032 9033 /* Initialize and populate the iocb list per host */ 9034 9035 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9036 "2821 initialize iocb list %d.\n", 9037 phba->cfg_iocb_cnt*1024); 9038 error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024); 9039 9040 if (error) { 9041 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9042 "1413 Failed to initialize iocb list.\n"); 9043 goto out_unset_driver_resource_s4; 9044 } 9045 9046 INIT_LIST_HEAD(&phba->active_rrq_list); 9047 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 9048 9049 /* Set up common device driver resources */ 9050 error = lpfc_setup_driver_resource_phase2(phba); 9051 if (error) { 9052 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9053 "1414 Failed to set up driver resource.\n"); 9054 goto out_free_iocb_list; 9055 } 9056 9057 /* Create SCSI host to the physical port */ 9058 error = lpfc_create_shost(phba); 9059 if (error) { 9060 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9061 "1415 Failed to create scsi host.\n"); 9062 goto out_unset_driver_resource; 9063 } 9064 9065 /* Configure sysfs attributes */ 9066 vport = phba->pport; 9067 error = lpfc_alloc_sysfs_attr(vport); 9068 if (error) { 9069 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9070 "1416 Failed to allocate sysfs attr\n"); 9071 goto out_destroy_shost; 9072 } 9073 9074 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 9075 /* Now, trying to enable interrupt and bring up the device */ 9076 cfg_mode = phba->cfg_use_msi; 9077 while (true) { 9078 /* Put device to a known state before enabling interrupt */ 9079 lpfc_stop_port(phba); 9080 /* Configure and enable interrupt */ 9081 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 9082 if (intr_mode == LPFC_INTR_ERROR) { 9083 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9084 "0426 Failed to enable interrupt.\n"); 9085 error = -ENODEV; 9086 goto out_free_sysfs_attr; 9087 } 9088 /* Default to single EQ for non-MSI-X */ 9089 if (phba->intr_type != MSIX) 9090 adjusted_fcp_eq_count = 0; 9091 else if (phba->sli4_hba.msix_vec_nr < 9092 phba->cfg_fcp_eq_count + 1) 9093 adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1; 9094 else 9095 adjusted_fcp_eq_count = phba->cfg_fcp_eq_count; 9096 /* Free unused EQs */ 9097 for (fcp_qidx = adjusted_fcp_eq_count; 9098 fcp_qidx < phba->cfg_fcp_eq_count; 9099 fcp_qidx++) { 9100 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); 9101 /* do not delete the first fcp_cq */ 9102 if (fcp_qidx) 9103 lpfc_sli4_queue_free( 9104 phba->sli4_hba.fcp_cq[fcp_qidx]); 9105 } 9106 phba->cfg_fcp_eq_count = adjusted_fcp_eq_count; 9107 /* Set up SLI-4 HBA */ 9108 if (lpfc_sli4_hba_setup(phba)) { 9109 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9110 "1421 Failed to set up hba\n"); 9111 error = -ENODEV; 9112 goto out_disable_intr; 9113 } 9114 9115 /* Send NOP mbx cmds for non-INTx mode active interrupt test */ 9116 if (intr_mode != 0) 9117 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba, 9118 LPFC_ACT_INTR_CNT); 9119 9120 /* Check active interrupts received only for MSI/MSI-X */ 9121 if (intr_mode == 0 || 9122 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) { 9123 /* Log the current active interrupt mode */ 9124 phba->intr_mode = intr_mode; 9125 lpfc_log_intr_mode(phba, intr_mode); 9126 break; 9127 } 9128 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9129 "0451 Configure interrupt mode (%d) " 9130 "failed active interrupt test.\n", 9131 intr_mode); 9132 /* Unset the previous SLI-4 HBA setup. */ 9133 /* 9134 * TODO: Is this operation compatible with IF TYPE 2 9135 * devices? All port state is deleted and cleared. 9136 */ 9137 lpfc_sli4_unset_hba(phba); 9138 /* Try next level of interrupt mode */ 9139 cfg_mode = --intr_mode; 9140 } 9141 9142 /* Perform post initialization setup */ 9143 lpfc_post_init_setup(phba); 9144 9145 /* check for firmware upgrade or downgrade */ 9146 snprintf(file_name, 16, "%s.grp", phba->ModelName); 9147 error = request_firmware(&fw, file_name, &phba->pcidev->dev); 9148 if (!error) { 9149 lpfc_write_firmware(phba, fw); 9150 release_firmware(fw); 9151 } 9152 9153 /* Check if there are static vports to be created. */ 9154 lpfc_create_static_vport(phba); 9155 return 0; 9156 9157 out_disable_intr: 9158 lpfc_sli4_disable_intr(phba); 9159 out_free_sysfs_attr: 9160 lpfc_free_sysfs_attr(vport); 9161 out_destroy_shost: 9162 lpfc_destroy_shost(phba); 9163 out_unset_driver_resource: 9164 lpfc_unset_driver_resource_phase2(phba); 9165 out_free_iocb_list: 9166 lpfc_free_iocb_list(phba); 9167 out_unset_driver_resource_s4: 9168 lpfc_sli4_driver_resource_unset(phba); 9169 out_unset_pci_mem_s4: 9170 lpfc_sli4_pci_mem_unset(phba); 9171 out_disable_pci_dev: 9172 lpfc_disable_pci_dev(phba); 9173 if (shost) 9174 scsi_host_put(shost); 9175 out_free_phba: 9176 lpfc_hba_free(phba); 9177 return error; 9178 } 9179 9180 /** 9181 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 9182 * @pdev: pointer to PCI device 9183 * 9184 * This routine is called from the kernel's PCI subsystem to device with 9185 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 9186 * removed from PCI bus, it performs all the necessary cleanup for the HBA 9187 * device to be removed from the PCI subsystem properly. 9188 **/ 9189 static void __devexit 9190 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 9191 { 9192 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9193 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 9194 struct lpfc_vport **vports; 9195 struct lpfc_hba *phba = vport->phba; 9196 int i; 9197 9198 /* Mark the device unloading flag */ 9199 spin_lock_irq(&phba->hbalock); 9200 vport->load_flag |= FC_UNLOADING; 9201 spin_unlock_irq(&phba->hbalock); 9202 9203 /* Free the HBA sysfs attributes */ 9204 lpfc_free_sysfs_attr(vport); 9205 9206 /* Release all the vports against this physical port */ 9207 vports = lpfc_create_vport_work_array(phba); 9208 if (vports != NULL) 9209 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 9210 fc_vport_terminate(vports[i]->fc_vport); 9211 lpfc_destroy_vport_work_array(phba, vports); 9212 9213 /* Remove FC host and then SCSI host with the physical port */ 9214 fc_remove_host(shost); 9215 scsi_remove_host(shost); 9216 9217 /* Perform cleanup on the physical port */ 9218 lpfc_cleanup(vport); 9219 9220 /* 9221 * Bring down the SLI Layer. This step disables all interrupts, 9222 * clears the rings, discards all mailbox commands, and resets 9223 * the HBA FCoE function. 9224 */ 9225 lpfc_debugfs_terminate(vport); 9226 lpfc_sli4_hba_unset(phba); 9227 9228 spin_lock_irq(&phba->hbalock); 9229 list_del_init(&vport->listentry); 9230 spin_unlock_irq(&phba->hbalock); 9231 9232 /* Perform scsi free before driver resource_unset since scsi 9233 * buffers are released to their corresponding pools here. 9234 */ 9235 lpfc_scsi_free(phba); 9236 lpfc_sli4_driver_resource_unset(phba); 9237 9238 /* Unmap adapter Control and Doorbell registers */ 9239 lpfc_sli4_pci_mem_unset(phba); 9240 9241 /* Release PCI resources and disable device's PCI function */ 9242 scsi_host_put(shost); 9243 lpfc_disable_pci_dev(phba); 9244 9245 /* Finally, free the driver's device data structure */ 9246 lpfc_hba_free(phba); 9247 9248 return; 9249 } 9250 9251 /** 9252 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 9253 * @pdev: pointer to PCI device 9254 * @msg: power management message 9255 * 9256 * This routine is called from the kernel's PCI subsystem to support system 9257 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 9258 * this method, it quiesces the device by stopping the driver's worker 9259 * thread for the device, turning off device's interrupt and DMA, and bring 9260 * the device offline. Note that as the driver implements the minimum PM 9261 * requirements to a power-aware driver's PM support for suspend/resume -- all 9262 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 9263 * method call will be treated as SUSPEND and the driver will fully 9264 * reinitialize its device during resume() method call, the driver will set 9265 * device to PCI_D3hot state in PCI config space instead of setting it 9266 * according to the @msg provided by the PM. 9267 * 9268 * Return code 9269 * 0 - driver suspended the device 9270 * Error otherwise 9271 **/ 9272 static int 9273 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 9274 { 9275 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9276 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9277 9278 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9279 "2843 PCI device Power Management suspend.\n"); 9280 9281 /* Bring down the device */ 9282 lpfc_offline_prep(phba); 9283 lpfc_offline(phba); 9284 kthread_stop(phba->worker_thread); 9285 9286 /* Disable interrupt from device */ 9287 lpfc_sli4_disable_intr(phba); 9288 9289 /* Save device state to PCI config space */ 9290 pci_save_state(pdev); 9291 pci_set_power_state(pdev, PCI_D3hot); 9292 9293 return 0; 9294 } 9295 9296 /** 9297 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 9298 * @pdev: pointer to PCI device 9299 * 9300 * This routine is called from the kernel's PCI subsystem to support system 9301 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 9302 * this method, it restores the device's PCI config space state and fully 9303 * reinitializes the device and brings it online. Note that as the driver 9304 * implements the minimum PM requirements to a power-aware driver's PM for 9305 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 9306 * to the suspend() method call will be treated as SUSPEND and the driver 9307 * will fully reinitialize its device during resume() method call, the device 9308 * will be set to PCI_D0 directly in PCI config space before restoring the 9309 * state. 9310 * 9311 * Return code 9312 * 0 - driver suspended the device 9313 * Error otherwise 9314 **/ 9315 static int 9316 lpfc_pci_resume_one_s4(struct pci_dev *pdev) 9317 { 9318 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9319 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9320 uint32_t intr_mode; 9321 int error; 9322 9323 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9324 "0292 PCI device Power Management resume.\n"); 9325 9326 /* Restore device state from PCI config space */ 9327 pci_set_power_state(pdev, PCI_D0); 9328 pci_restore_state(pdev); 9329 9330 /* 9331 * As the new kernel behavior of pci_restore_state() API call clears 9332 * device saved_state flag, need to save the restored state again. 9333 */ 9334 pci_save_state(pdev); 9335 9336 if (pdev->is_busmaster) 9337 pci_set_master(pdev); 9338 9339 /* Startup the kernel thread for this host adapter. */ 9340 phba->worker_thread = kthread_run(lpfc_do_work, phba, 9341 "lpfc_worker_%d", phba->brd_no); 9342 if (IS_ERR(phba->worker_thread)) { 9343 error = PTR_ERR(phba->worker_thread); 9344 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9345 "0293 PM resume failed to start worker " 9346 "thread: error=x%x.\n", error); 9347 return error; 9348 } 9349 9350 /* Configure and enable interrupt */ 9351 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 9352 if (intr_mode == LPFC_INTR_ERROR) { 9353 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9354 "0294 PM resume Failed to enable interrupt\n"); 9355 return -EIO; 9356 } else 9357 phba->intr_mode = intr_mode; 9358 9359 /* Restart HBA and bring it online */ 9360 lpfc_sli_brdrestart(phba); 9361 lpfc_online(phba); 9362 9363 /* Log the current active interrupt mode */ 9364 lpfc_log_intr_mode(phba, phba->intr_mode); 9365 9366 return 0; 9367 } 9368 9369 /** 9370 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 9371 * @phba: pointer to lpfc hba data structure. 9372 * 9373 * This routine is called to prepare the SLI4 device for PCI slot recover. It 9374 * aborts all the outstanding SCSI I/Os to the pci device. 9375 **/ 9376 static void 9377 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 9378 { 9379 struct lpfc_sli *psli = &phba->sli; 9380 struct lpfc_sli_ring *pring; 9381 9382 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9383 "2828 PCI channel I/O abort preparing for recovery\n"); 9384 /* 9385 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 9386 * and let the SCSI mid-layer to retry them to recover. 9387 */ 9388 pring = &psli->ring[psli->fcp_ring]; 9389 lpfc_sli_abort_iocb_ring(phba, pring); 9390 } 9391 9392 /** 9393 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 9394 * @phba: pointer to lpfc hba data structure. 9395 * 9396 * This routine is called to prepare the SLI4 device for PCI slot reset. It 9397 * disables the device interrupt and pci device, and aborts the internal FCP 9398 * pending I/Os. 9399 **/ 9400 static void 9401 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 9402 { 9403 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9404 "2826 PCI channel disable preparing for reset\n"); 9405 9406 /* Block any management I/Os to the device */ 9407 lpfc_block_mgmt_io(phba); 9408 9409 /* Block all SCSI devices' I/Os on the host */ 9410 lpfc_scsi_dev_block(phba); 9411 9412 /* stop all timers */ 9413 lpfc_stop_hba_timers(phba); 9414 9415 /* Disable interrupt and pci device */ 9416 lpfc_sli4_disable_intr(phba); 9417 pci_disable_device(phba->pcidev); 9418 9419 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 9420 lpfc_sli_flush_fcp_rings(phba); 9421 } 9422 9423 /** 9424 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 9425 * @phba: pointer to lpfc hba data structure. 9426 * 9427 * This routine is called to prepare the SLI4 device for PCI slot permanently 9428 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 9429 * pending I/Os. 9430 **/ 9431 static void 9432 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 9433 { 9434 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9435 "2827 PCI channel permanent disable for failure\n"); 9436 9437 /* Block all SCSI devices' I/Os on the host */ 9438 lpfc_scsi_dev_block(phba); 9439 9440 /* stop all timers */ 9441 lpfc_stop_hba_timers(phba); 9442 9443 /* Clean up all driver's outstanding SCSI I/Os */ 9444 lpfc_sli_flush_fcp_rings(phba); 9445 } 9446 9447 /** 9448 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 9449 * @pdev: pointer to PCI device. 9450 * @state: the current PCI connection state. 9451 * 9452 * This routine is called from the PCI subsystem for error handling to device 9453 * with SLI-4 interface spec. This function is called by the PCI subsystem 9454 * after a PCI bus error affecting this device has been detected. When this 9455 * function is invoked, it will need to stop all the I/Os and interrupt(s) 9456 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 9457 * for the PCI subsystem to perform proper recovery as desired. 9458 * 9459 * Return codes 9460 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 9461 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9462 **/ 9463 static pci_ers_result_t 9464 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 9465 { 9466 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9467 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9468 9469 switch (state) { 9470 case pci_channel_io_normal: 9471 /* Non-fatal error, prepare for recovery */ 9472 lpfc_sli4_prep_dev_for_recover(phba); 9473 return PCI_ERS_RESULT_CAN_RECOVER; 9474 case pci_channel_io_frozen: 9475 /* Fatal error, prepare for slot reset */ 9476 lpfc_sli4_prep_dev_for_reset(phba); 9477 return PCI_ERS_RESULT_NEED_RESET; 9478 case pci_channel_io_perm_failure: 9479 /* Permanent failure, prepare for device down */ 9480 lpfc_sli4_prep_dev_for_perm_failure(phba); 9481 return PCI_ERS_RESULT_DISCONNECT; 9482 default: 9483 /* Unknown state, prepare and request slot reset */ 9484 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9485 "2825 Unknown PCI error state: x%x\n", state); 9486 lpfc_sli4_prep_dev_for_reset(phba); 9487 return PCI_ERS_RESULT_NEED_RESET; 9488 } 9489 } 9490 9491 /** 9492 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 9493 * @pdev: pointer to PCI device. 9494 * 9495 * This routine is called from the PCI subsystem for error handling to device 9496 * with SLI-4 interface spec. It is called after PCI bus has been reset to 9497 * restart the PCI card from scratch, as if from a cold-boot. During the 9498 * PCI subsystem error recovery, after the driver returns 9499 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 9500 * recovery and then call this routine before calling the .resume method to 9501 * recover the device. This function will initialize the HBA device, enable 9502 * the interrupt, but it will just put the HBA to offline state without 9503 * passing any I/O traffic. 9504 * 9505 * Return codes 9506 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 9507 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9508 */ 9509 static pci_ers_result_t 9510 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 9511 { 9512 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9513 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9514 struct lpfc_sli *psli = &phba->sli; 9515 uint32_t intr_mode; 9516 9517 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 9518 if (pci_enable_device_mem(pdev)) { 9519 printk(KERN_ERR "lpfc: Cannot re-enable " 9520 "PCI device after reset.\n"); 9521 return PCI_ERS_RESULT_DISCONNECT; 9522 } 9523 9524 pci_restore_state(pdev); 9525 9526 /* 9527 * As the new kernel behavior of pci_restore_state() API call clears 9528 * device saved_state flag, need to save the restored state again. 9529 */ 9530 pci_save_state(pdev); 9531 9532 if (pdev->is_busmaster) 9533 pci_set_master(pdev); 9534 9535 spin_lock_irq(&phba->hbalock); 9536 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 9537 spin_unlock_irq(&phba->hbalock); 9538 9539 /* Configure and enable interrupt */ 9540 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 9541 if (intr_mode == LPFC_INTR_ERROR) { 9542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9543 "2824 Cannot re-enable interrupt after " 9544 "slot reset.\n"); 9545 return PCI_ERS_RESULT_DISCONNECT; 9546 } else 9547 phba->intr_mode = intr_mode; 9548 9549 /* Log the current active interrupt mode */ 9550 lpfc_log_intr_mode(phba, phba->intr_mode); 9551 9552 return PCI_ERS_RESULT_RECOVERED; 9553 } 9554 9555 /** 9556 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 9557 * @pdev: pointer to PCI device 9558 * 9559 * This routine is called from the PCI subsystem for error handling to device 9560 * with SLI-4 interface spec. It is called when kernel error recovery tells 9561 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 9562 * error recovery. After this call, traffic can start to flow from this device 9563 * again. 9564 **/ 9565 static void 9566 lpfc_io_resume_s4(struct pci_dev *pdev) 9567 { 9568 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9569 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9570 9571 /* 9572 * In case of slot reset, as function reset is performed through 9573 * mailbox command which needs DMA to be enabled, this operation 9574 * has to be moved to the io resume phase. Taking device offline 9575 * will perform the necessary cleanup. 9576 */ 9577 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 9578 /* Perform device reset */ 9579 lpfc_offline_prep(phba); 9580 lpfc_offline(phba); 9581 lpfc_sli_brdrestart(phba); 9582 /* Bring the device back online */ 9583 lpfc_online(phba); 9584 } 9585 9586 /* Clean up Advanced Error Reporting (AER) if needed */ 9587 if (phba->hba_flag & HBA_AER_ENABLED) 9588 pci_cleanup_aer_uncorrect_error_status(pdev); 9589 } 9590 9591 /** 9592 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 9593 * @pdev: pointer to PCI device 9594 * @pid: pointer to PCI device identifier 9595 * 9596 * This routine is to be registered to the kernel's PCI subsystem. When an 9597 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 9598 * at PCI device-specific information of the device and driver to see if the 9599 * driver state that it can support this kind of device. If the match is 9600 * successful, the driver core invokes this routine. This routine dispatches 9601 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 9602 * do all the initialization that it needs to do to handle the HBA device 9603 * properly. 9604 * 9605 * Return code 9606 * 0 - driver can claim the device 9607 * negative value - driver can not claim the device 9608 **/ 9609 static int __devinit 9610 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 9611 { 9612 int rc; 9613 struct lpfc_sli_intf intf; 9614 9615 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 9616 return -ENODEV; 9617 9618 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 9619 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 9620 rc = lpfc_pci_probe_one_s4(pdev, pid); 9621 else 9622 rc = lpfc_pci_probe_one_s3(pdev, pid); 9623 9624 return rc; 9625 } 9626 9627 /** 9628 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 9629 * @pdev: pointer to PCI device 9630 * 9631 * This routine is to be registered to the kernel's PCI subsystem. When an 9632 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 9633 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 9634 * remove routine, which will perform all the necessary cleanup for the 9635 * device to be removed from the PCI subsystem properly. 9636 **/ 9637 static void __devexit 9638 lpfc_pci_remove_one(struct pci_dev *pdev) 9639 { 9640 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9641 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9642 9643 switch (phba->pci_dev_grp) { 9644 case LPFC_PCI_DEV_LP: 9645 lpfc_pci_remove_one_s3(pdev); 9646 break; 9647 case LPFC_PCI_DEV_OC: 9648 lpfc_pci_remove_one_s4(pdev); 9649 break; 9650 default: 9651 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9652 "1424 Invalid PCI device group: 0x%x\n", 9653 phba->pci_dev_grp); 9654 break; 9655 } 9656 return; 9657 } 9658 9659 /** 9660 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 9661 * @pdev: pointer to PCI device 9662 * @msg: power management message 9663 * 9664 * This routine is to be registered to the kernel's PCI subsystem to support 9665 * system Power Management (PM). When PM invokes this method, it dispatches 9666 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 9667 * suspend the device. 9668 * 9669 * Return code 9670 * 0 - driver suspended the device 9671 * Error otherwise 9672 **/ 9673 static int 9674 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 9675 { 9676 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9677 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9678 int rc = -ENODEV; 9679 9680 switch (phba->pci_dev_grp) { 9681 case LPFC_PCI_DEV_LP: 9682 rc = lpfc_pci_suspend_one_s3(pdev, msg); 9683 break; 9684 case LPFC_PCI_DEV_OC: 9685 rc = lpfc_pci_suspend_one_s4(pdev, msg); 9686 break; 9687 default: 9688 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9689 "1425 Invalid PCI device group: 0x%x\n", 9690 phba->pci_dev_grp); 9691 break; 9692 } 9693 return rc; 9694 } 9695 9696 /** 9697 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 9698 * @pdev: pointer to PCI device 9699 * 9700 * This routine is to be registered to the kernel's PCI subsystem to support 9701 * system Power Management (PM). When PM invokes this method, it dispatches 9702 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 9703 * resume the device. 9704 * 9705 * Return code 9706 * 0 - driver suspended the device 9707 * Error otherwise 9708 **/ 9709 static int 9710 lpfc_pci_resume_one(struct pci_dev *pdev) 9711 { 9712 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9713 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9714 int rc = -ENODEV; 9715 9716 switch (phba->pci_dev_grp) { 9717 case LPFC_PCI_DEV_LP: 9718 rc = lpfc_pci_resume_one_s3(pdev); 9719 break; 9720 case LPFC_PCI_DEV_OC: 9721 rc = lpfc_pci_resume_one_s4(pdev); 9722 break; 9723 default: 9724 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9725 "1426 Invalid PCI device group: 0x%x\n", 9726 phba->pci_dev_grp); 9727 break; 9728 } 9729 return rc; 9730 } 9731 9732 /** 9733 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 9734 * @pdev: pointer to PCI device. 9735 * @state: the current PCI connection state. 9736 * 9737 * This routine is registered to the PCI subsystem for error handling. This 9738 * function is called by the PCI subsystem after a PCI bus error affecting 9739 * this device has been detected. When this routine is invoked, it dispatches 9740 * the action to the proper SLI-3 or SLI-4 device error detected handling 9741 * routine, which will perform the proper error detected operation. 9742 * 9743 * Return codes 9744 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 9745 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9746 **/ 9747 static pci_ers_result_t 9748 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 9749 { 9750 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9751 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9752 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 9753 9754 switch (phba->pci_dev_grp) { 9755 case LPFC_PCI_DEV_LP: 9756 rc = lpfc_io_error_detected_s3(pdev, state); 9757 break; 9758 case LPFC_PCI_DEV_OC: 9759 rc = lpfc_io_error_detected_s4(pdev, state); 9760 break; 9761 default: 9762 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9763 "1427 Invalid PCI device group: 0x%x\n", 9764 phba->pci_dev_grp); 9765 break; 9766 } 9767 return rc; 9768 } 9769 9770 /** 9771 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 9772 * @pdev: pointer to PCI device. 9773 * 9774 * This routine is registered to the PCI subsystem for error handling. This 9775 * function is called after PCI bus has been reset to restart the PCI card 9776 * from scratch, as if from a cold-boot. When this routine is invoked, it 9777 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 9778 * routine, which will perform the proper device reset. 9779 * 9780 * Return codes 9781 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 9782 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9783 **/ 9784 static pci_ers_result_t 9785 lpfc_io_slot_reset(struct pci_dev *pdev) 9786 { 9787 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9788 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9789 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 9790 9791 switch (phba->pci_dev_grp) { 9792 case LPFC_PCI_DEV_LP: 9793 rc = lpfc_io_slot_reset_s3(pdev); 9794 break; 9795 case LPFC_PCI_DEV_OC: 9796 rc = lpfc_io_slot_reset_s4(pdev); 9797 break; 9798 default: 9799 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9800 "1428 Invalid PCI device group: 0x%x\n", 9801 phba->pci_dev_grp); 9802 break; 9803 } 9804 return rc; 9805 } 9806 9807 /** 9808 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 9809 * @pdev: pointer to PCI device 9810 * 9811 * This routine is registered to the PCI subsystem for error handling. It 9812 * is called when kernel error recovery tells the lpfc driver that it is 9813 * OK to resume normal PCI operation after PCI bus error recovery. When 9814 * this routine is invoked, it dispatches the action to the proper SLI-3 9815 * or SLI-4 device io_resume routine, which will resume the device operation. 9816 **/ 9817 static void 9818 lpfc_io_resume(struct pci_dev *pdev) 9819 { 9820 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9821 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9822 9823 switch (phba->pci_dev_grp) { 9824 case LPFC_PCI_DEV_LP: 9825 lpfc_io_resume_s3(pdev); 9826 break; 9827 case LPFC_PCI_DEV_OC: 9828 lpfc_io_resume_s4(pdev); 9829 break; 9830 default: 9831 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9832 "1429 Invalid PCI device group: 0x%x\n", 9833 phba->pci_dev_grp); 9834 break; 9835 } 9836 return; 9837 } 9838 9839 static struct pci_device_id lpfc_id_table[] = { 9840 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 9841 PCI_ANY_ID, PCI_ANY_ID, }, 9842 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, 9843 PCI_ANY_ID, PCI_ANY_ID, }, 9844 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, 9845 PCI_ANY_ID, PCI_ANY_ID, }, 9846 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, 9847 PCI_ANY_ID, PCI_ANY_ID, }, 9848 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, 9849 PCI_ANY_ID, PCI_ANY_ID, }, 9850 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, 9851 PCI_ANY_ID, PCI_ANY_ID, }, 9852 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, 9853 PCI_ANY_ID, PCI_ANY_ID, }, 9854 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, 9855 PCI_ANY_ID, PCI_ANY_ID, }, 9856 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, 9857 PCI_ANY_ID, PCI_ANY_ID, }, 9858 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, 9859 PCI_ANY_ID, PCI_ANY_ID, }, 9860 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, 9861 PCI_ANY_ID, PCI_ANY_ID, }, 9862 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, 9863 PCI_ANY_ID, PCI_ANY_ID, }, 9864 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, 9865 PCI_ANY_ID, PCI_ANY_ID, }, 9866 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, 9867 PCI_ANY_ID, PCI_ANY_ID, }, 9868 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, 9869 PCI_ANY_ID, PCI_ANY_ID, }, 9870 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, 9871 PCI_ANY_ID, PCI_ANY_ID, }, 9872 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, 9873 PCI_ANY_ID, PCI_ANY_ID, }, 9874 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 9875 PCI_ANY_ID, PCI_ANY_ID, }, 9876 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, 9877 PCI_ANY_ID, PCI_ANY_ID, }, 9878 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, 9879 PCI_ANY_ID, PCI_ANY_ID, }, 9880 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, 9881 PCI_ANY_ID, PCI_ANY_ID, }, 9882 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, 9883 PCI_ANY_ID, PCI_ANY_ID, }, 9884 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, 9885 PCI_ANY_ID, PCI_ANY_ID, }, 9886 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, 9887 PCI_ANY_ID, PCI_ANY_ID, }, 9888 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, 9889 PCI_ANY_ID, PCI_ANY_ID, }, 9890 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, 9891 PCI_ANY_ID, PCI_ANY_ID, }, 9892 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, 9893 PCI_ANY_ID, PCI_ANY_ID, }, 9894 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, 9895 PCI_ANY_ID, PCI_ANY_ID, }, 9896 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, 9897 PCI_ANY_ID, PCI_ANY_ID, }, 9898 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, 9899 PCI_ANY_ID, PCI_ANY_ID, }, 9900 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, 9901 PCI_ANY_ID, PCI_ANY_ID, }, 9902 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, 9903 PCI_ANY_ID, PCI_ANY_ID, }, 9904 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, 9905 PCI_ANY_ID, PCI_ANY_ID, }, 9906 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, 9907 PCI_ANY_ID, PCI_ANY_ID, }, 9908 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, 9909 PCI_ANY_ID, PCI_ANY_ID, }, 9910 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, 9911 PCI_ANY_ID, PCI_ANY_ID, }, 9912 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, 9913 PCI_ANY_ID, PCI_ANY_ID, }, 9914 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, 9915 PCI_ANY_ID, PCI_ANY_ID, }, 9916 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT, 9917 PCI_ANY_ID, PCI_ANY_ID, }, 9918 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON, 9919 PCI_ANY_ID, PCI_ANY_ID, }, 9920 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS, 9921 PCI_ANY_ID, PCI_ANY_ID, }, 9922 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC, 9923 PCI_ANY_ID, PCI_ANY_ID, }, 9924 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE, 9925 PCI_ANY_ID, PCI_ANY_ID, }, 9926 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF, 9927 PCI_ANY_ID, PCI_ANY_ID, }, 9928 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF, 9929 PCI_ANY_ID, PCI_ANY_ID, }, 9930 { 0 } 9931 }; 9932 9933 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 9934 9935 static struct pci_error_handlers lpfc_err_handler = { 9936 .error_detected = lpfc_io_error_detected, 9937 .slot_reset = lpfc_io_slot_reset, 9938 .resume = lpfc_io_resume, 9939 }; 9940 9941 static struct pci_driver lpfc_driver = { 9942 .name = LPFC_DRIVER_NAME, 9943 .id_table = lpfc_id_table, 9944 .probe = lpfc_pci_probe_one, 9945 .remove = __devexit_p(lpfc_pci_remove_one), 9946 .suspend = lpfc_pci_suspend_one, 9947 .resume = lpfc_pci_resume_one, 9948 .err_handler = &lpfc_err_handler, 9949 }; 9950 9951 /** 9952 * lpfc_init - lpfc module initialization routine 9953 * 9954 * This routine is to be invoked when the lpfc module is loaded into the 9955 * kernel. The special kernel macro module_init() is used to indicate the 9956 * role of this routine to the kernel as lpfc module entry point. 9957 * 9958 * Return codes 9959 * 0 - successful 9960 * -ENOMEM - FC attach transport failed 9961 * all others - failed 9962 */ 9963 static int __init 9964 lpfc_init(void) 9965 { 9966 int error = 0; 9967 9968 printk(LPFC_MODULE_DESC "\n"); 9969 printk(LPFC_COPYRIGHT "\n"); 9970 9971 if (lpfc_enable_npiv) { 9972 lpfc_transport_functions.vport_create = lpfc_vport_create; 9973 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 9974 } 9975 lpfc_transport_template = 9976 fc_attach_transport(&lpfc_transport_functions); 9977 if (lpfc_transport_template == NULL) 9978 return -ENOMEM; 9979 if (lpfc_enable_npiv) { 9980 lpfc_vport_transport_template = 9981 fc_attach_transport(&lpfc_vport_transport_functions); 9982 if (lpfc_vport_transport_template == NULL) { 9983 fc_release_transport(lpfc_transport_template); 9984 return -ENOMEM; 9985 } 9986 } 9987 error = pci_register_driver(&lpfc_driver); 9988 if (error) { 9989 fc_release_transport(lpfc_transport_template); 9990 if (lpfc_enable_npiv) 9991 fc_release_transport(lpfc_vport_transport_template); 9992 } 9993 9994 return error; 9995 } 9996 9997 /** 9998 * lpfc_exit - lpfc module removal routine 9999 * 10000 * This routine is invoked when the lpfc module is removed from the kernel. 10001 * The special kernel macro module_exit() is used to indicate the role of 10002 * this routine to the kernel as lpfc module exit point. 10003 */ 10004 static void __exit 10005 lpfc_exit(void) 10006 { 10007 pci_unregister_driver(&lpfc_driver); 10008 fc_release_transport(lpfc_transport_template); 10009 if (lpfc_enable_npiv) 10010 fc_release_transport(lpfc_vport_transport_template); 10011 if (_dump_buf_data) { 10012 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " 10013 "_dump_buf_data at 0x%p\n", 10014 (1L << _dump_buf_data_order), _dump_buf_data); 10015 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 10016 } 10017 10018 if (_dump_buf_dif) { 10019 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " 10020 "_dump_buf_dif at 0x%p\n", 10021 (1L << _dump_buf_dif_order), _dump_buf_dif); 10022 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 10023 } 10024 } 10025 10026 module_init(lpfc_init); 10027 module_exit(lpfc_exit); 10028 MODULE_LICENSE("GPL"); 10029 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 10030 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 10031 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 10032