1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2015 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/idr.h> 26 #include <linux/interrupt.h> 27 #include <linux/module.h> 28 #include <linux/kthread.h> 29 #include <linux/pci.h> 30 #include <linux/spinlock.h> 31 #include <linux/ctype.h> 32 #include <linux/aer.h> 33 #include <linux/slab.h> 34 #include <linux/firmware.h> 35 #include <linux/miscdevice.h> 36 #include <linux/percpu.h> 37 38 #include <scsi/scsi.h> 39 #include <scsi/scsi_device.h> 40 #include <scsi/scsi_host.h> 41 #include <scsi/scsi_transport_fc.h> 42 43 #include "lpfc_hw4.h" 44 #include "lpfc_hw.h" 45 #include "lpfc_sli.h" 46 #include "lpfc_sli4.h" 47 #include "lpfc_nl.h" 48 #include "lpfc_disc.h" 49 #include "lpfc_scsi.h" 50 #include "lpfc.h" 51 #include "lpfc_logmsg.h" 52 #include "lpfc_crtn.h" 53 #include "lpfc_vport.h" 54 #include "lpfc_version.h" 55 56 char *_dump_buf_data; 57 unsigned long _dump_buf_data_order; 58 char *_dump_buf_dif; 59 unsigned long _dump_buf_dif_order; 60 spinlock_t _dump_buf_lock; 61 62 /* Used when mapping IRQ vectors in a driver centric manner */ 63 uint16_t *lpfc_used_cpu; 64 uint32_t lpfc_present_cpu; 65 66 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 67 static int lpfc_post_rcv_buf(struct lpfc_hba *); 68 static int lpfc_sli4_queue_verify(struct lpfc_hba *); 69 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 70 static int lpfc_setup_endian_order(struct lpfc_hba *); 71 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 72 static void lpfc_free_els_sgl_list(struct lpfc_hba *); 73 static void lpfc_init_sgl_list(struct lpfc_hba *); 74 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 75 static void lpfc_free_active_sgl(struct lpfc_hba *); 76 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 77 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 78 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 79 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 80 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 81 static void lpfc_sli4_disable_intr(struct lpfc_hba *); 82 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); 83 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); 84 85 static struct scsi_transport_template *lpfc_transport_template = NULL; 86 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 87 static DEFINE_IDR(lpfc_hba_index); 88 89 /** 90 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 91 * @phba: pointer to lpfc hba data structure. 92 * 93 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 94 * mailbox command. It retrieves the revision information from the HBA and 95 * collects the Vital Product Data (VPD) about the HBA for preparing the 96 * configuration of the HBA. 97 * 98 * Return codes: 99 * 0 - success. 100 * -ERESTART - requests the SLI layer to reset the HBA and try again. 101 * Any other value - indicates an error. 102 **/ 103 int 104 lpfc_config_port_prep(struct lpfc_hba *phba) 105 { 106 lpfc_vpd_t *vp = &phba->vpd; 107 int i = 0, rc; 108 LPFC_MBOXQ_t *pmb; 109 MAILBOX_t *mb; 110 char *lpfc_vpd_data = NULL; 111 uint16_t offset = 0; 112 static char licensed[56] = 113 "key unlock for use with gnu public licensed code only\0"; 114 static int init_key = 1; 115 116 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 117 if (!pmb) { 118 phba->link_state = LPFC_HBA_ERROR; 119 return -ENOMEM; 120 } 121 122 mb = &pmb->u.mb; 123 phba->link_state = LPFC_INIT_MBX_CMDS; 124 125 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 126 if (init_key) { 127 uint32_t *ptext = (uint32_t *) licensed; 128 129 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 130 *ptext = cpu_to_be32(*ptext); 131 init_key = 0; 132 } 133 134 lpfc_read_nv(phba, pmb); 135 memset((char*)mb->un.varRDnvp.rsvd3, 0, 136 sizeof (mb->un.varRDnvp.rsvd3)); 137 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 138 sizeof (licensed)); 139 140 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 141 142 if (rc != MBX_SUCCESS) { 143 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 144 "0324 Config Port initialization " 145 "error, mbxCmd x%x READ_NVPARM, " 146 "mbxStatus x%x\n", 147 mb->mbxCommand, mb->mbxStatus); 148 mempool_free(pmb, phba->mbox_mem_pool); 149 return -ERESTART; 150 } 151 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 152 sizeof(phba->wwnn)); 153 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 154 sizeof(phba->wwpn)); 155 } 156 157 phba->sli3_options = 0x0; 158 159 /* Setup and issue mailbox READ REV command */ 160 lpfc_read_rev(phba, pmb); 161 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 162 if (rc != MBX_SUCCESS) { 163 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 164 "0439 Adapter failed to init, mbxCmd x%x " 165 "READ_REV, mbxStatus x%x\n", 166 mb->mbxCommand, mb->mbxStatus); 167 mempool_free( pmb, phba->mbox_mem_pool); 168 return -ERESTART; 169 } 170 171 172 /* 173 * The value of rr must be 1 since the driver set the cv field to 1. 174 * This setting requires the FW to set all revision fields. 175 */ 176 if (mb->un.varRdRev.rr == 0) { 177 vp->rev.rBit = 0; 178 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 179 "0440 Adapter failed to init, READ_REV has " 180 "missing revision information.\n"); 181 mempool_free(pmb, phba->mbox_mem_pool); 182 return -ERESTART; 183 } 184 185 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 186 mempool_free(pmb, phba->mbox_mem_pool); 187 return -EINVAL; 188 } 189 190 /* Save information as VPD data */ 191 vp->rev.rBit = 1; 192 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 193 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 194 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 195 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 196 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 197 vp->rev.biuRev = mb->un.varRdRev.biuRev; 198 vp->rev.smRev = mb->un.varRdRev.smRev; 199 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 200 vp->rev.endecRev = mb->un.varRdRev.endecRev; 201 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 202 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 203 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 204 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 205 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 206 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 207 208 /* If the sli feature level is less then 9, we must 209 * tear down all RPIs and VPIs on link down if NPIV 210 * is enabled. 211 */ 212 if (vp->rev.feaLevelHigh < 9) 213 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 214 215 if (lpfc_is_LC_HBA(phba->pcidev->device)) 216 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 217 sizeof (phba->RandomData)); 218 219 /* Get adapter VPD information */ 220 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 221 if (!lpfc_vpd_data) 222 goto out_free_mbox; 223 do { 224 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 225 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 226 227 if (rc != MBX_SUCCESS) { 228 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 229 "0441 VPD not present on adapter, " 230 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 231 mb->mbxCommand, mb->mbxStatus); 232 mb->un.varDmp.word_cnt = 0; 233 } 234 /* dump mem may return a zero when finished or we got a 235 * mailbox error, either way we are done. 236 */ 237 if (mb->un.varDmp.word_cnt == 0) 238 break; 239 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 240 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 241 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 242 lpfc_vpd_data + offset, 243 mb->un.varDmp.word_cnt); 244 offset += mb->un.varDmp.word_cnt; 245 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 246 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 247 248 kfree(lpfc_vpd_data); 249 out_free_mbox: 250 mempool_free(pmb, phba->mbox_mem_pool); 251 return 0; 252 } 253 254 /** 255 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 256 * @phba: pointer to lpfc hba data structure. 257 * @pmboxq: pointer to the driver internal queue element for mailbox command. 258 * 259 * This is the completion handler for driver's configuring asynchronous event 260 * mailbox command to the device. If the mailbox command returns successfully, 261 * it will set internal async event support flag to 1; otherwise, it will 262 * set internal async event support flag to 0. 263 **/ 264 static void 265 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 266 { 267 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 268 phba->temp_sensor_support = 1; 269 else 270 phba->temp_sensor_support = 0; 271 mempool_free(pmboxq, phba->mbox_mem_pool); 272 return; 273 } 274 275 /** 276 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 277 * @phba: pointer to lpfc hba data structure. 278 * @pmboxq: pointer to the driver internal queue element for mailbox command. 279 * 280 * This is the completion handler for dump mailbox command for getting 281 * wake up parameters. When this command complete, the response contain 282 * Option rom version of the HBA. This function translate the version number 283 * into a human readable string and store it in OptionROMVersion. 284 **/ 285 static void 286 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 287 { 288 struct prog_id *prg; 289 uint32_t prog_id_word; 290 char dist = ' '; 291 /* character array used for decoding dist type. */ 292 char dist_char[] = "nabx"; 293 294 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 295 mempool_free(pmboxq, phba->mbox_mem_pool); 296 return; 297 } 298 299 prg = (struct prog_id *) &prog_id_word; 300 301 /* word 7 contain option rom version */ 302 prog_id_word = pmboxq->u.mb.un.varWords[7]; 303 304 /* Decode the Option rom version word to a readable string */ 305 if (prg->dist < 4) 306 dist = dist_char[prg->dist]; 307 308 if ((prg->dist == 3) && (prg->num == 0)) 309 snprintf(phba->OptionROMVersion, 32, "%d.%d%d", 310 prg->ver, prg->rev, prg->lev); 311 else 312 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d", 313 prg->ver, prg->rev, prg->lev, 314 dist, prg->num); 315 mempool_free(pmboxq, phba->mbox_mem_pool); 316 return; 317 } 318 319 /** 320 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, 321 * cfg_soft_wwnn, cfg_soft_wwpn 322 * @vport: pointer to lpfc vport data structure. 323 * 324 * 325 * Return codes 326 * None. 327 **/ 328 void 329 lpfc_update_vport_wwn(struct lpfc_vport *vport) 330 { 331 /* If the soft name exists then update it using the service params */ 332 if (vport->phba->cfg_soft_wwnn) 333 u64_to_wwn(vport->phba->cfg_soft_wwnn, 334 vport->fc_sparam.nodeName.u.wwn); 335 if (vport->phba->cfg_soft_wwpn) 336 u64_to_wwn(vport->phba->cfg_soft_wwpn, 337 vport->fc_sparam.portName.u.wwn); 338 339 /* 340 * If the name is empty or there exists a soft name 341 * then copy the service params name, otherwise use the fc name 342 */ 343 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) 344 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 345 sizeof(struct lpfc_name)); 346 else 347 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, 348 sizeof(struct lpfc_name)); 349 350 if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn) 351 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 352 sizeof(struct lpfc_name)); 353 else 354 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, 355 sizeof(struct lpfc_name)); 356 } 357 358 /** 359 * lpfc_config_port_post - Perform lpfc initialization after config port 360 * @phba: pointer to lpfc hba data structure. 361 * 362 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 363 * command call. It performs all internal resource and state setups on the 364 * port: post IOCB buffers, enable appropriate host interrupt attentions, 365 * ELS ring timers, etc. 366 * 367 * Return codes 368 * 0 - success. 369 * Any other value - error. 370 **/ 371 int 372 lpfc_config_port_post(struct lpfc_hba *phba) 373 { 374 struct lpfc_vport *vport = phba->pport; 375 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 376 LPFC_MBOXQ_t *pmb; 377 MAILBOX_t *mb; 378 struct lpfc_dmabuf *mp; 379 struct lpfc_sli *psli = &phba->sli; 380 uint32_t status, timeout; 381 int i, j; 382 int rc; 383 384 spin_lock_irq(&phba->hbalock); 385 /* 386 * If the Config port completed correctly the HBA is not 387 * over heated any more. 388 */ 389 if (phba->over_temp_state == HBA_OVER_TEMP) 390 phba->over_temp_state = HBA_NORMAL_TEMP; 391 spin_unlock_irq(&phba->hbalock); 392 393 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 394 if (!pmb) { 395 phba->link_state = LPFC_HBA_ERROR; 396 return -ENOMEM; 397 } 398 mb = &pmb->u.mb; 399 400 /* Get login parameters for NID. */ 401 rc = lpfc_read_sparam(phba, pmb, 0); 402 if (rc) { 403 mempool_free(pmb, phba->mbox_mem_pool); 404 return -ENOMEM; 405 } 406 407 pmb->vport = vport; 408 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 409 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 410 "0448 Adapter failed init, mbxCmd x%x " 411 "READ_SPARM mbxStatus x%x\n", 412 mb->mbxCommand, mb->mbxStatus); 413 phba->link_state = LPFC_HBA_ERROR; 414 mp = (struct lpfc_dmabuf *) pmb->context1; 415 mempool_free(pmb, phba->mbox_mem_pool); 416 lpfc_mbuf_free(phba, mp->virt, mp->phys); 417 kfree(mp); 418 return -EIO; 419 } 420 421 mp = (struct lpfc_dmabuf *) pmb->context1; 422 423 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 424 lpfc_mbuf_free(phba, mp->virt, mp->phys); 425 kfree(mp); 426 pmb->context1 = NULL; 427 lpfc_update_vport_wwn(vport); 428 429 /* Update the fc_host data structures with new wwn. */ 430 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 431 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 432 fc_host_max_npiv_vports(shost) = phba->max_vpi; 433 434 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 435 /* This should be consolidated into parse_vpd ? - mr */ 436 if (phba->SerialNumber[0] == 0) { 437 uint8_t *outptr; 438 439 outptr = &vport->fc_nodename.u.s.IEEE[0]; 440 for (i = 0; i < 12; i++) { 441 status = *outptr++; 442 j = ((status & 0xf0) >> 4); 443 if (j <= 9) 444 phba->SerialNumber[i] = 445 (char)((uint8_t) 0x30 + (uint8_t) j); 446 else 447 phba->SerialNumber[i] = 448 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 449 i++; 450 j = (status & 0xf); 451 if (j <= 9) 452 phba->SerialNumber[i] = 453 (char)((uint8_t) 0x30 + (uint8_t) j); 454 else 455 phba->SerialNumber[i] = 456 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 457 } 458 } 459 460 lpfc_read_config(phba, pmb); 461 pmb->vport = vport; 462 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 463 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 464 "0453 Adapter failed to init, mbxCmd x%x " 465 "READ_CONFIG, mbxStatus x%x\n", 466 mb->mbxCommand, mb->mbxStatus); 467 phba->link_state = LPFC_HBA_ERROR; 468 mempool_free( pmb, phba->mbox_mem_pool); 469 return -EIO; 470 } 471 472 /* Check if the port is disabled */ 473 lpfc_sli_read_link_ste(phba); 474 475 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 476 i = (mb->un.varRdConfig.max_xri + 1); 477 if (phba->cfg_hba_queue_depth > i) { 478 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 479 "3359 HBA queue depth changed from %d to %d\n", 480 phba->cfg_hba_queue_depth, i); 481 phba->cfg_hba_queue_depth = i; 482 } 483 484 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ 485 i = (mb->un.varRdConfig.max_xri >> 3); 486 if (phba->pport->cfg_lun_queue_depth > i) { 487 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 488 "3360 LUN queue depth changed from %d to %d\n", 489 phba->pport->cfg_lun_queue_depth, i); 490 phba->pport->cfg_lun_queue_depth = i; 491 } 492 493 phba->lmt = mb->un.varRdConfig.lmt; 494 495 /* Get the default values for Model Name and Description */ 496 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 497 498 phba->link_state = LPFC_LINK_DOWN; 499 500 /* Only process IOCBs on ELS ring till hba_state is READY */ 501 if (psli->ring[psli->extra_ring].sli.sli3.cmdringaddr) 502 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 503 if (psli->ring[psli->fcp_ring].sli.sli3.cmdringaddr) 504 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 505 if (psli->ring[psli->next_ring].sli.sli3.cmdringaddr) 506 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 507 508 /* Post receive buffers for desired rings */ 509 if (phba->sli_rev != 3) 510 lpfc_post_rcv_buf(phba); 511 512 /* 513 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 514 */ 515 if (phba->intr_type == MSIX) { 516 rc = lpfc_config_msi(phba, pmb); 517 if (rc) { 518 mempool_free(pmb, phba->mbox_mem_pool); 519 return -EIO; 520 } 521 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 522 if (rc != MBX_SUCCESS) { 523 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 524 "0352 Config MSI mailbox command " 525 "failed, mbxCmd x%x, mbxStatus x%x\n", 526 pmb->u.mb.mbxCommand, 527 pmb->u.mb.mbxStatus); 528 mempool_free(pmb, phba->mbox_mem_pool); 529 return -EIO; 530 } 531 } 532 533 spin_lock_irq(&phba->hbalock); 534 /* Initialize ERATT handling flag */ 535 phba->hba_flag &= ~HBA_ERATT_HANDLED; 536 537 /* Enable appropriate host interrupts */ 538 if (lpfc_readl(phba->HCregaddr, &status)) { 539 spin_unlock_irq(&phba->hbalock); 540 return -EIO; 541 } 542 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 543 if (psli->num_rings > 0) 544 status |= HC_R0INT_ENA; 545 if (psli->num_rings > 1) 546 status |= HC_R1INT_ENA; 547 if (psli->num_rings > 2) 548 status |= HC_R2INT_ENA; 549 if (psli->num_rings > 3) 550 status |= HC_R3INT_ENA; 551 552 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 553 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 554 status &= ~(HC_R0INT_ENA); 555 556 writel(status, phba->HCregaddr); 557 readl(phba->HCregaddr); /* flush */ 558 spin_unlock_irq(&phba->hbalock); 559 560 /* Set up ring-0 (ELS) timer */ 561 timeout = phba->fc_ratov * 2; 562 mod_timer(&vport->els_tmofunc, 563 jiffies + msecs_to_jiffies(1000 * timeout)); 564 /* Set up heart beat (HB) timer */ 565 mod_timer(&phba->hb_tmofunc, 566 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 567 phba->hb_outstanding = 0; 568 phba->last_completion_time = jiffies; 569 /* Set up error attention (ERATT) polling timer */ 570 mod_timer(&phba->eratt_poll, 571 jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); 572 573 if (phba->hba_flag & LINK_DISABLED) { 574 lpfc_printf_log(phba, 575 KERN_ERR, LOG_INIT, 576 "2598 Adapter Link is disabled.\n"); 577 lpfc_down_link(phba, pmb); 578 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 579 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 580 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 581 lpfc_printf_log(phba, 582 KERN_ERR, LOG_INIT, 583 "2599 Adapter failed to issue DOWN_LINK" 584 " mbox command rc 0x%x\n", rc); 585 586 mempool_free(pmb, phba->mbox_mem_pool); 587 return -EIO; 588 } 589 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 590 mempool_free(pmb, phba->mbox_mem_pool); 591 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 592 if (rc) 593 return rc; 594 } 595 /* MBOX buffer will be freed in mbox compl */ 596 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 597 if (!pmb) { 598 phba->link_state = LPFC_HBA_ERROR; 599 return -ENOMEM; 600 } 601 602 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 603 pmb->mbox_cmpl = lpfc_config_async_cmpl; 604 pmb->vport = phba->pport; 605 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 606 607 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 608 lpfc_printf_log(phba, 609 KERN_ERR, 610 LOG_INIT, 611 "0456 Adapter failed to issue " 612 "ASYNCEVT_ENABLE mbox status x%x\n", 613 rc); 614 mempool_free(pmb, phba->mbox_mem_pool); 615 } 616 617 /* Get Option rom version */ 618 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 619 if (!pmb) { 620 phba->link_state = LPFC_HBA_ERROR; 621 return -ENOMEM; 622 } 623 624 lpfc_dump_wakeup_param(phba, pmb); 625 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 626 pmb->vport = phba->pport; 627 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 628 629 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 630 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 631 "to get Option ROM version status x%x\n", rc); 632 mempool_free(pmb, phba->mbox_mem_pool); 633 } 634 635 return 0; 636 } 637 638 /** 639 * lpfc_hba_init_link - Initialize the FC link 640 * @phba: pointer to lpfc hba data structure. 641 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 642 * 643 * This routine will issue the INIT_LINK mailbox command call. 644 * It is available to other drivers through the lpfc_hba data 645 * structure for use as a delayed link up mechanism with the 646 * module parameter lpfc_suppress_link_up. 647 * 648 * Return code 649 * 0 - success 650 * Any other value - error 651 **/ 652 static int 653 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 654 { 655 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); 656 } 657 658 /** 659 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology 660 * @phba: pointer to lpfc hba data structure. 661 * @fc_topology: desired fc topology. 662 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 663 * 664 * This routine will issue the INIT_LINK mailbox command call. 665 * It is available to other drivers through the lpfc_hba data 666 * structure for use as a delayed link up mechanism with the 667 * module parameter lpfc_suppress_link_up. 668 * 669 * Return code 670 * 0 - success 671 * Any other value - error 672 **/ 673 int 674 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, 675 uint32_t flag) 676 { 677 struct lpfc_vport *vport = phba->pport; 678 LPFC_MBOXQ_t *pmb; 679 MAILBOX_t *mb; 680 int rc; 681 682 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 683 if (!pmb) { 684 phba->link_state = LPFC_HBA_ERROR; 685 return -ENOMEM; 686 } 687 mb = &pmb->u.mb; 688 pmb->vport = vport; 689 690 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || 691 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && 692 !(phba->lmt & LMT_1Gb)) || 693 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && 694 !(phba->lmt & LMT_2Gb)) || 695 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && 696 !(phba->lmt & LMT_4Gb)) || 697 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && 698 !(phba->lmt & LMT_8Gb)) || 699 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && 700 !(phba->lmt & LMT_10Gb)) || 701 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && 702 !(phba->lmt & LMT_16Gb))) { 703 /* Reset link speed to auto */ 704 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 705 "1302 Invalid speed for this board:%d " 706 "Reset link speed to auto.\n", 707 phba->cfg_link_speed); 708 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 709 } 710 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); 711 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 712 if (phba->sli_rev < LPFC_SLI_REV4) 713 lpfc_set_loopback_flag(phba); 714 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 715 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 716 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 717 "0498 Adapter failed to init, mbxCmd x%x " 718 "INIT_LINK, mbxStatus x%x\n", 719 mb->mbxCommand, mb->mbxStatus); 720 if (phba->sli_rev <= LPFC_SLI_REV3) { 721 /* Clear all interrupt enable conditions */ 722 writel(0, phba->HCregaddr); 723 readl(phba->HCregaddr); /* flush */ 724 /* Clear all pending interrupts */ 725 writel(0xffffffff, phba->HAregaddr); 726 readl(phba->HAregaddr); /* flush */ 727 } 728 phba->link_state = LPFC_HBA_ERROR; 729 if (rc != MBX_BUSY || flag == MBX_POLL) 730 mempool_free(pmb, phba->mbox_mem_pool); 731 return -EIO; 732 } 733 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 734 if (flag == MBX_POLL) 735 mempool_free(pmb, phba->mbox_mem_pool); 736 737 return 0; 738 } 739 740 /** 741 * lpfc_hba_down_link - this routine downs the FC link 742 * @phba: pointer to lpfc hba data structure. 743 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 744 * 745 * This routine will issue the DOWN_LINK mailbox command call. 746 * It is available to other drivers through the lpfc_hba data 747 * structure for use to stop the link. 748 * 749 * Return code 750 * 0 - success 751 * Any other value - error 752 **/ 753 static int 754 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 755 { 756 LPFC_MBOXQ_t *pmb; 757 int rc; 758 759 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 760 if (!pmb) { 761 phba->link_state = LPFC_HBA_ERROR; 762 return -ENOMEM; 763 } 764 765 lpfc_printf_log(phba, 766 KERN_ERR, LOG_INIT, 767 "0491 Adapter Link is disabled.\n"); 768 lpfc_down_link(phba, pmb); 769 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 770 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 771 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 772 lpfc_printf_log(phba, 773 KERN_ERR, LOG_INIT, 774 "2522 Adapter failed to issue DOWN_LINK" 775 " mbox command rc 0x%x\n", rc); 776 777 mempool_free(pmb, phba->mbox_mem_pool); 778 return -EIO; 779 } 780 if (flag == MBX_POLL) 781 mempool_free(pmb, phba->mbox_mem_pool); 782 783 return 0; 784 } 785 786 /** 787 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 788 * @phba: pointer to lpfc HBA data structure. 789 * 790 * This routine will do LPFC uninitialization before the HBA is reset when 791 * bringing down the SLI Layer. 792 * 793 * Return codes 794 * 0 - success. 795 * Any other value - error. 796 **/ 797 int 798 lpfc_hba_down_prep(struct lpfc_hba *phba) 799 { 800 struct lpfc_vport **vports; 801 int i; 802 803 if (phba->sli_rev <= LPFC_SLI_REV3) { 804 /* Disable interrupts */ 805 writel(0, phba->HCregaddr); 806 readl(phba->HCregaddr); /* flush */ 807 } 808 809 if (phba->pport->load_flag & FC_UNLOADING) 810 lpfc_cleanup_discovery_resources(phba->pport); 811 else { 812 vports = lpfc_create_vport_work_array(phba); 813 if (vports != NULL) 814 for (i = 0; i <= phba->max_vports && 815 vports[i] != NULL; i++) 816 lpfc_cleanup_discovery_resources(vports[i]); 817 lpfc_destroy_vport_work_array(phba, vports); 818 } 819 return 0; 820 } 821 822 /** 823 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free 824 * rspiocb which got deferred 825 * 826 * @phba: pointer to lpfc HBA data structure. 827 * 828 * This routine will cleanup completed slow path events after HBA is reset 829 * when bringing down the SLI Layer. 830 * 831 * 832 * Return codes 833 * void. 834 **/ 835 static void 836 lpfc_sli4_free_sp_events(struct lpfc_hba *phba) 837 { 838 struct lpfc_iocbq *rspiocbq; 839 struct hbq_dmabuf *dmabuf; 840 struct lpfc_cq_event *cq_event; 841 842 spin_lock_irq(&phba->hbalock); 843 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 844 spin_unlock_irq(&phba->hbalock); 845 846 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 847 /* Get the response iocb from the head of work queue */ 848 spin_lock_irq(&phba->hbalock); 849 list_remove_head(&phba->sli4_hba.sp_queue_event, 850 cq_event, struct lpfc_cq_event, list); 851 spin_unlock_irq(&phba->hbalock); 852 853 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 854 case CQE_CODE_COMPL_WQE: 855 rspiocbq = container_of(cq_event, struct lpfc_iocbq, 856 cq_event); 857 lpfc_sli_release_iocbq(phba, rspiocbq); 858 break; 859 case CQE_CODE_RECEIVE: 860 case CQE_CODE_RECEIVE_V1: 861 dmabuf = container_of(cq_event, struct hbq_dmabuf, 862 cq_event); 863 lpfc_in_buf_free(phba, &dmabuf->dbuf); 864 } 865 } 866 } 867 868 /** 869 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset 870 * @phba: pointer to lpfc HBA data structure. 871 * 872 * This routine will cleanup posted ELS buffers after the HBA is reset 873 * when bringing down the SLI Layer. 874 * 875 * 876 * Return codes 877 * void. 878 **/ 879 static void 880 lpfc_hba_free_post_buf(struct lpfc_hba *phba) 881 { 882 struct lpfc_sli *psli = &phba->sli; 883 struct lpfc_sli_ring *pring; 884 struct lpfc_dmabuf *mp, *next_mp; 885 LIST_HEAD(buflist); 886 int count; 887 888 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 889 lpfc_sli_hbqbuf_free_all(phba); 890 else { 891 /* Cleanup preposted buffers on the ELS ring */ 892 pring = &psli->ring[LPFC_ELS_RING]; 893 spin_lock_irq(&phba->hbalock); 894 list_splice_init(&pring->postbufq, &buflist); 895 spin_unlock_irq(&phba->hbalock); 896 897 count = 0; 898 list_for_each_entry_safe(mp, next_mp, &buflist, list) { 899 list_del(&mp->list); 900 count++; 901 lpfc_mbuf_free(phba, mp->virt, mp->phys); 902 kfree(mp); 903 } 904 905 spin_lock_irq(&phba->hbalock); 906 pring->postbufq_cnt -= count; 907 spin_unlock_irq(&phba->hbalock); 908 } 909 } 910 911 /** 912 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset 913 * @phba: pointer to lpfc HBA data structure. 914 * 915 * This routine will cleanup the txcmplq after the HBA is reset when bringing 916 * down the SLI Layer. 917 * 918 * Return codes 919 * void 920 **/ 921 static void 922 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) 923 { 924 struct lpfc_sli *psli = &phba->sli; 925 struct lpfc_sli_ring *pring; 926 LIST_HEAD(completions); 927 int i; 928 929 for (i = 0; i < psli->num_rings; i++) { 930 pring = &psli->ring[i]; 931 if (phba->sli_rev >= LPFC_SLI_REV4) 932 spin_lock_irq(&pring->ring_lock); 933 else 934 spin_lock_irq(&phba->hbalock); 935 /* At this point in time the HBA is either reset or DOA. Either 936 * way, nothing should be on txcmplq as it will NEVER complete. 937 */ 938 list_splice_init(&pring->txcmplq, &completions); 939 pring->txcmplq_cnt = 0; 940 941 if (phba->sli_rev >= LPFC_SLI_REV4) 942 spin_unlock_irq(&pring->ring_lock); 943 else 944 spin_unlock_irq(&phba->hbalock); 945 946 /* Cancel all the IOCBs from the completions list */ 947 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 948 IOERR_SLI_ABORTED); 949 lpfc_sli_abort_iocb_ring(phba, pring); 950 } 951 } 952 953 /** 954 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 955 int i; 956 * @phba: pointer to lpfc HBA data structure. 957 * 958 * This routine will do uninitialization after the HBA is reset when bring 959 * down the SLI Layer. 960 * 961 * Return codes 962 * 0 - success. 963 * Any other value - error. 964 **/ 965 static int 966 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 967 { 968 lpfc_hba_free_post_buf(phba); 969 lpfc_hba_clean_txcmplq(phba); 970 return 0; 971 } 972 973 /** 974 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 975 * @phba: pointer to lpfc HBA data structure. 976 * 977 * This routine will do uninitialization after the HBA is reset when bring 978 * down the SLI Layer. 979 * 980 * Return codes 981 * 0 - success. 982 * Any other value - error. 983 **/ 984 static int 985 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 986 { 987 struct lpfc_scsi_buf *psb, *psb_next; 988 LIST_HEAD(aborts); 989 unsigned long iflag = 0; 990 struct lpfc_sglq *sglq_entry = NULL; 991 struct lpfc_sli *psli = &phba->sli; 992 struct lpfc_sli_ring *pring; 993 994 lpfc_hba_free_post_buf(phba); 995 lpfc_hba_clean_txcmplq(phba); 996 pring = &psli->ring[LPFC_ELS_RING]; 997 998 /* At this point in time the HBA is either reset or DOA. Either 999 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 1000 * on the lpfc_sgl_list so that it can either be freed if the 1001 * driver is unloading or reposted if the driver is restarting 1002 * the port. 1003 */ 1004 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ 1005 /* scsl_buf_list */ 1006 /* abts_sgl_list_lock required because worker thread uses this 1007 * list. 1008 */ 1009 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 1010 list_for_each_entry(sglq_entry, 1011 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 1012 sglq_entry->state = SGL_FREED; 1013 1014 spin_lock(&pring->ring_lock); 1015 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 1016 &phba->sli4_hba.lpfc_sgl_list); 1017 spin_unlock(&pring->ring_lock); 1018 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 1019 /* abts_scsi_buf_list_lock required because worker thread uses this 1020 * list. 1021 */ 1022 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 1023 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, 1024 &aborts); 1025 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 1026 spin_unlock_irq(&phba->hbalock); 1027 1028 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 1029 psb->pCmd = NULL; 1030 psb->status = IOSTAT_SUCCESS; 1031 } 1032 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); 1033 list_splice(&aborts, &phba->lpfc_scsi_buf_list_put); 1034 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); 1035 1036 lpfc_sli4_free_sp_events(phba); 1037 return 0; 1038 } 1039 1040 /** 1041 * lpfc_hba_down_post - Wrapper func for hba down post routine 1042 * @phba: pointer to lpfc HBA data structure. 1043 * 1044 * This routine wraps the actual SLI3 or SLI4 routine for performing 1045 * uninitialization after the HBA is reset when bring down the SLI Layer. 1046 * 1047 * Return codes 1048 * 0 - success. 1049 * Any other value - error. 1050 **/ 1051 int 1052 lpfc_hba_down_post(struct lpfc_hba *phba) 1053 { 1054 return (*phba->lpfc_hba_down_post)(phba); 1055 } 1056 1057 /** 1058 * lpfc_hb_timeout - The HBA-timer timeout handler 1059 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 1060 * 1061 * This is the HBA-timer timeout handler registered to the lpfc driver. When 1062 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 1063 * work-port-events bitmap and the worker thread is notified. This timeout 1064 * event will be used by the worker thread to invoke the actual timeout 1065 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 1066 * be performed in the timeout handler and the HBA timeout event bit shall 1067 * be cleared by the worker thread after it has taken the event bitmap out. 1068 **/ 1069 static void 1070 lpfc_hb_timeout(unsigned long ptr) 1071 { 1072 struct lpfc_hba *phba; 1073 uint32_t tmo_posted; 1074 unsigned long iflag; 1075 1076 phba = (struct lpfc_hba *)ptr; 1077 1078 /* Check for heart beat timeout conditions */ 1079 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1080 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 1081 if (!tmo_posted) 1082 phba->pport->work_port_events |= WORKER_HB_TMO; 1083 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1084 1085 /* Tell the worker thread there is work to do */ 1086 if (!tmo_posted) 1087 lpfc_worker_wake_up(phba); 1088 return; 1089 } 1090 1091 /** 1092 * lpfc_rrq_timeout - The RRQ-timer timeout handler 1093 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 1094 * 1095 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 1096 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 1097 * work-port-events bitmap and the worker thread is notified. This timeout 1098 * event will be used by the worker thread to invoke the actual timeout 1099 * handler routine, lpfc_rrq_handler. Any periodical operations will 1100 * be performed in the timeout handler and the RRQ timeout event bit shall 1101 * be cleared by the worker thread after it has taken the event bitmap out. 1102 **/ 1103 static void 1104 lpfc_rrq_timeout(unsigned long ptr) 1105 { 1106 struct lpfc_hba *phba; 1107 unsigned long iflag; 1108 1109 phba = (struct lpfc_hba *)ptr; 1110 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1111 if (!(phba->pport->load_flag & FC_UNLOADING)) 1112 phba->hba_flag |= HBA_RRQ_ACTIVE; 1113 else 1114 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1115 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1116 1117 if (!(phba->pport->load_flag & FC_UNLOADING)) 1118 lpfc_worker_wake_up(phba); 1119 } 1120 1121 /** 1122 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 1123 * @phba: pointer to lpfc hba data structure. 1124 * @pmboxq: pointer to the driver internal queue element for mailbox command. 1125 * 1126 * This is the callback function to the lpfc heart-beat mailbox command. 1127 * If configured, the lpfc driver issues the heart-beat mailbox command to 1128 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 1129 * heart-beat mailbox command is issued, the driver shall set up heart-beat 1130 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 1131 * heart-beat outstanding state. Once the mailbox command comes back and 1132 * no error conditions detected, the heart-beat mailbox command timer is 1133 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 1134 * state is cleared for the next heart-beat. If the timer expired with the 1135 * heart-beat outstanding state set, the driver will put the HBA offline. 1136 **/ 1137 static void 1138 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 1139 { 1140 unsigned long drvr_flag; 1141 1142 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1143 phba->hb_outstanding = 0; 1144 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1145 1146 /* Check and reset heart-beat timer is necessary */ 1147 mempool_free(pmboxq, phba->mbox_mem_pool); 1148 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 1149 !(phba->link_state == LPFC_HBA_ERROR) && 1150 !(phba->pport->load_flag & FC_UNLOADING)) 1151 mod_timer(&phba->hb_tmofunc, 1152 jiffies + 1153 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1154 return; 1155 } 1156 1157 /** 1158 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 1159 * @phba: pointer to lpfc hba data structure. 1160 * 1161 * This is the actual HBA-timer timeout handler to be invoked by the worker 1162 * thread whenever the HBA timer fired and HBA-timeout event posted. This 1163 * handler performs any periodic operations needed for the device. If such 1164 * periodic event has already been attended to either in the interrupt handler 1165 * or by processing slow-ring or fast-ring events within the HBA-timer 1166 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1167 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1168 * is configured and there is no heart-beat mailbox command outstanding, a 1169 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1170 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1171 * to offline. 1172 **/ 1173 void 1174 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1175 { 1176 struct lpfc_vport **vports; 1177 LPFC_MBOXQ_t *pmboxq; 1178 struct lpfc_dmabuf *buf_ptr; 1179 int retval, i; 1180 struct lpfc_sli *psli = &phba->sli; 1181 LIST_HEAD(completions); 1182 1183 vports = lpfc_create_vport_work_array(phba); 1184 if (vports != NULL) 1185 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 1186 lpfc_rcv_seq_check_edtov(vports[i]); 1187 lpfc_destroy_vport_work_array(phba, vports); 1188 1189 if ((phba->link_state == LPFC_HBA_ERROR) || 1190 (phba->pport->load_flag & FC_UNLOADING) || 1191 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1192 return; 1193 1194 spin_lock_irq(&phba->pport->work_port_lock); 1195 1196 if (time_after(phba->last_completion_time + 1197 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), 1198 jiffies)) { 1199 spin_unlock_irq(&phba->pport->work_port_lock); 1200 if (!phba->hb_outstanding) 1201 mod_timer(&phba->hb_tmofunc, 1202 jiffies + 1203 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1204 else 1205 mod_timer(&phba->hb_tmofunc, 1206 jiffies + 1207 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1208 return; 1209 } 1210 spin_unlock_irq(&phba->pport->work_port_lock); 1211 1212 if (phba->elsbuf_cnt && 1213 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1214 spin_lock_irq(&phba->hbalock); 1215 list_splice_init(&phba->elsbuf, &completions); 1216 phba->elsbuf_cnt = 0; 1217 phba->elsbuf_prev_cnt = 0; 1218 spin_unlock_irq(&phba->hbalock); 1219 1220 while (!list_empty(&completions)) { 1221 list_remove_head(&completions, buf_ptr, 1222 struct lpfc_dmabuf, list); 1223 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1224 kfree(buf_ptr); 1225 } 1226 } 1227 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1228 1229 /* If there is no heart beat outstanding, issue a heartbeat command */ 1230 if (phba->cfg_enable_hba_heartbeat) { 1231 if (!phba->hb_outstanding) { 1232 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1233 (list_empty(&psli->mboxq))) { 1234 pmboxq = mempool_alloc(phba->mbox_mem_pool, 1235 GFP_KERNEL); 1236 if (!pmboxq) { 1237 mod_timer(&phba->hb_tmofunc, 1238 jiffies + 1239 msecs_to_jiffies(1000 * 1240 LPFC_HB_MBOX_INTERVAL)); 1241 return; 1242 } 1243 1244 lpfc_heart_beat(phba, pmboxq); 1245 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1246 pmboxq->vport = phba->pport; 1247 retval = lpfc_sli_issue_mbox(phba, pmboxq, 1248 MBX_NOWAIT); 1249 1250 if (retval != MBX_BUSY && 1251 retval != MBX_SUCCESS) { 1252 mempool_free(pmboxq, 1253 phba->mbox_mem_pool); 1254 mod_timer(&phba->hb_tmofunc, 1255 jiffies + 1256 msecs_to_jiffies(1000 * 1257 LPFC_HB_MBOX_INTERVAL)); 1258 return; 1259 } 1260 phba->skipped_hb = 0; 1261 phba->hb_outstanding = 1; 1262 } else if (time_before_eq(phba->last_completion_time, 1263 phba->skipped_hb)) { 1264 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1265 "2857 Last completion time not " 1266 " updated in %d ms\n", 1267 jiffies_to_msecs(jiffies 1268 - phba->last_completion_time)); 1269 } else 1270 phba->skipped_hb = jiffies; 1271 1272 mod_timer(&phba->hb_tmofunc, 1273 jiffies + 1274 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1275 return; 1276 } else { 1277 /* 1278 * If heart beat timeout called with hb_outstanding set 1279 * we need to give the hb mailbox cmd a chance to 1280 * complete or TMO. 1281 */ 1282 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1283 "0459 Adapter heartbeat still out" 1284 "standing:last compl time was %d ms.\n", 1285 jiffies_to_msecs(jiffies 1286 - phba->last_completion_time)); 1287 mod_timer(&phba->hb_tmofunc, 1288 jiffies + 1289 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1290 } 1291 } 1292 } 1293 1294 /** 1295 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1296 * @phba: pointer to lpfc hba data structure. 1297 * 1298 * This routine is called to bring the HBA offline when HBA hardware error 1299 * other than Port Error 6 has been detected. 1300 **/ 1301 static void 1302 lpfc_offline_eratt(struct lpfc_hba *phba) 1303 { 1304 struct lpfc_sli *psli = &phba->sli; 1305 1306 spin_lock_irq(&phba->hbalock); 1307 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1308 spin_unlock_irq(&phba->hbalock); 1309 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1310 1311 lpfc_offline(phba); 1312 lpfc_reset_barrier(phba); 1313 spin_lock_irq(&phba->hbalock); 1314 lpfc_sli_brdreset(phba); 1315 spin_unlock_irq(&phba->hbalock); 1316 lpfc_hba_down_post(phba); 1317 lpfc_sli_brdready(phba, HS_MBRDY); 1318 lpfc_unblock_mgmt_io(phba); 1319 phba->link_state = LPFC_HBA_ERROR; 1320 return; 1321 } 1322 1323 /** 1324 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1325 * @phba: pointer to lpfc hba data structure. 1326 * 1327 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1328 * other than Port Error 6 has been detected. 1329 **/ 1330 void 1331 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1332 { 1333 spin_lock_irq(&phba->hbalock); 1334 phba->link_state = LPFC_HBA_ERROR; 1335 spin_unlock_irq(&phba->hbalock); 1336 1337 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1338 lpfc_offline(phba); 1339 lpfc_hba_down_post(phba); 1340 lpfc_unblock_mgmt_io(phba); 1341 } 1342 1343 /** 1344 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1345 * @phba: pointer to lpfc hba data structure. 1346 * 1347 * This routine is invoked to handle the deferred HBA hardware error 1348 * conditions. This type of error is indicated by HBA by setting ER1 1349 * and another ER bit in the host status register. The driver will 1350 * wait until the ER1 bit clears before handling the error condition. 1351 **/ 1352 static void 1353 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1354 { 1355 uint32_t old_host_status = phba->work_hs; 1356 struct lpfc_sli *psli = &phba->sli; 1357 1358 /* If the pci channel is offline, ignore possible errors, 1359 * since we cannot communicate with the pci card anyway. 1360 */ 1361 if (pci_channel_offline(phba->pcidev)) { 1362 spin_lock_irq(&phba->hbalock); 1363 phba->hba_flag &= ~DEFER_ERATT; 1364 spin_unlock_irq(&phba->hbalock); 1365 return; 1366 } 1367 1368 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1369 "0479 Deferred Adapter Hardware Error " 1370 "Data: x%x x%x x%x\n", 1371 phba->work_hs, 1372 phba->work_status[0], phba->work_status[1]); 1373 1374 spin_lock_irq(&phba->hbalock); 1375 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1376 spin_unlock_irq(&phba->hbalock); 1377 1378 1379 /* 1380 * Firmware stops when it triggred erratt. That could cause the I/Os 1381 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1382 * SCSI layer retry it after re-establishing link. 1383 */ 1384 lpfc_sli_abort_fcp_rings(phba); 1385 1386 /* 1387 * There was a firmware error. Take the hba offline and then 1388 * attempt to restart it. 1389 */ 1390 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 1391 lpfc_offline(phba); 1392 1393 /* Wait for the ER1 bit to clear.*/ 1394 while (phba->work_hs & HS_FFER1) { 1395 msleep(100); 1396 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { 1397 phba->work_hs = UNPLUG_ERR ; 1398 break; 1399 } 1400 /* If driver is unloading let the worker thread continue */ 1401 if (phba->pport->load_flag & FC_UNLOADING) { 1402 phba->work_hs = 0; 1403 break; 1404 } 1405 } 1406 1407 /* 1408 * This is to ptrotect against a race condition in which 1409 * first write to the host attention register clear the 1410 * host status register. 1411 */ 1412 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1413 phba->work_hs = old_host_status & ~HS_FFER1; 1414 1415 spin_lock_irq(&phba->hbalock); 1416 phba->hba_flag &= ~DEFER_ERATT; 1417 spin_unlock_irq(&phba->hbalock); 1418 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1419 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1420 } 1421 1422 static void 1423 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1424 { 1425 struct lpfc_board_event_header board_event; 1426 struct Scsi_Host *shost; 1427 1428 board_event.event_type = FC_REG_BOARD_EVENT; 1429 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1430 shost = lpfc_shost_from_vport(phba->pport); 1431 fc_host_post_vendor_event(shost, fc_get_event_number(), 1432 sizeof(board_event), 1433 (char *) &board_event, 1434 LPFC_NL_VENDOR_ID); 1435 } 1436 1437 /** 1438 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1439 * @phba: pointer to lpfc hba data structure. 1440 * 1441 * This routine is invoked to handle the following HBA hardware error 1442 * conditions: 1443 * 1 - HBA error attention interrupt 1444 * 2 - DMA ring index out of range 1445 * 3 - Mailbox command came back as unknown 1446 **/ 1447 static void 1448 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1449 { 1450 struct lpfc_vport *vport = phba->pport; 1451 struct lpfc_sli *psli = &phba->sli; 1452 uint32_t event_data; 1453 unsigned long temperature; 1454 struct temp_event temp_event_data; 1455 struct Scsi_Host *shost; 1456 1457 /* If the pci channel is offline, ignore possible errors, 1458 * since we cannot communicate with the pci card anyway. 1459 */ 1460 if (pci_channel_offline(phba->pcidev)) { 1461 spin_lock_irq(&phba->hbalock); 1462 phba->hba_flag &= ~DEFER_ERATT; 1463 spin_unlock_irq(&phba->hbalock); 1464 return; 1465 } 1466 1467 /* If resets are disabled then leave the HBA alone and return */ 1468 if (!phba->cfg_enable_hba_reset) 1469 return; 1470 1471 /* Send an internal error event to mgmt application */ 1472 lpfc_board_errevt_to_mgmt(phba); 1473 1474 if (phba->hba_flag & DEFER_ERATT) 1475 lpfc_handle_deferred_eratt(phba); 1476 1477 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1478 if (phba->work_hs & HS_FFER6) 1479 /* Re-establishing Link */ 1480 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1481 "1301 Re-establishing Link " 1482 "Data: x%x x%x x%x\n", 1483 phba->work_hs, phba->work_status[0], 1484 phba->work_status[1]); 1485 if (phba->work_hs & HS_FFER8) 1486 /* Device Zeroization */ 1487 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1488 "2861 Host Authentication device " 1489 "zeroization Data:x%x x%x x%x\n", 1490 phba->work_hs, phba->work_status[0], 1491 phba->work_status[1]); 1492 1493 spin_lock_irq(&phba->hbalock); 1494 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1495 spin_unlock_irq(&phba->hbalock); 1496 1497 /* 1498 * Firmware stops when it triggled erratt with HS_FFER6. 1499 * That could cause the I/Os dropped by the firmware. 1500 * Error iocb (I/O) on txcmplq and let the SCSI layer 1501 * retry it after re-establishing link. 1502 */ 1503 lpfc_sli_abort_fcp_rings(phba); 1504 1505 /* 1506 * There was a firmware error. Take the hba offline and then 1507 * attempt to restart it. 1508 */ 1509 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1510 lpfc_offline(phba); 1511 lpfc_sli_brdrestart(phba); 1512 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1513 lpfc_unblock_mgmt_io(phba); 1514 return; 1515 } 1516 lpfc_unblock_mgmt_io(phba); 1517 } else if (phba->work_hs & HS_CRIT_TEMP) { 1518 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1519 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1520 temp_event_data.event_code = LPFC_CRIT_TEMP; 1521 temp_event_data.data = (uint32_t)temperature; 1522 1523 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1524 "0406 Adapter maximum temperature exceeded " 1525 "(%ld), taking this port offline " 1526 "Data: x%x x%x x%x\n", 1527 temperature, phba->work_hs, 1528 phba->work_status[0], phba->work_status[1]); 1529 1530 shost = lpfc_shost_from_vport(phba->pport); 1531 fc_host_post_vendor_event(shost, fc_get_event_number(), 1532 sizeof(temp_event_data), 1533 (char *) &temp_event_data, 1534 SCSI_NL_VID_TYPE_PCI 1535 | PCI_VENDOR_ID_EMULEX); 1536 1537 spin_lock_irq(&phba->hbalock); 1538 phba->over_temp_state = HBA_OVER_TEMP; 1539 spin_unlock_irq(&phba->hbalock); 1540 lpfc_offline_eratt(phba); 1541 1542 } else { 1543 /* The if clause above forces this code path when the status 1544 * failure is a value other than FFER6. Do not call the offline 1545 * twice. This is the adapter hardware error path. 1546 */ 1547 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1548 "0457 Adapter Hardware Error " 1549 "Data: x%x x%x x%x\n", 1550 phba->work_hs, 1551 phba->work_status[0], phba->work_status[1]); 1552 1553 event_data = FC_REG_DUMP_EVENT; 1554 shost = lpfc_shost_from_vport(vport); 1555 fc_host_post_vendor_event(shost, fc_get_event_number(), 1556 sizeof(event_data), (char *) &event_data, 1557 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1558 1559 lpfc_offline_eratt(phba); 1560 } 1561 return; 1562 } 1563 1564 /** 1565 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg 1566 * @phba: pointer to lpfc hba data structure. 1567 * @mbx_action: flag for mailbox shutdown action. 1568 * 1569 * This routine is invoked to perform an SLI4 port PCI function reset in 1570 * response to port status register polling attention. It waits for port 1571 * status register (ERR, RDY, RN) bits before proceeding with function reset. 1572 * During this process, interrupt vectors are freed and later requested 1573 * for handling possible port resource change. 1574 **/ 1575 static int 1576 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, 1577 bool en_rn_msg) 1578 { 1579 int rc; 1580 uint32_t intr_mode; 1581 1582 /* 1583 * On error status condition, driver need to wait for port 1584 * ready before performing reset. 1585 */ 1586 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1587 if (!rc) { 1588 /* need reset: attempt for port recovery */ 1589 if (en_rn_msg) 1590 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1591 "2887 Reset Needed: Attempting Port " 1592 "Recovery...\n"); 1593 lpfc_offline_prep(phba, mbx_action); 1594 lpfc_offline(phba); 1595 /* release interrupt for possible resource change */ 1596 lpfc_sli4_disable_intr(phba); 1597 lpfc_sli_brdrestart(phba); 1598 /* request and enable interrupt */ 1599 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 1600 if (intr_mode == LPFC_INTR_ERROR) { 1601 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1602 "3175 Failed to enable interrupt\n"); 1603 return -EIO; 1604 } else { 1605 phba->intr_mode = intr_mode; 1606 } 1607 rc = lpfc_online(phba); 1608 if (rc == 0) 1609 lpfc_unblock_mgmt_io(phba); 1610 } 1611 return rc; 1612 } 1613 1614 /** 1615 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1616 * @phba: pointer to lpfc hba data structure. 1617 * 1618 * This routine is invoked to handle the SLI4 HBA hardware error attention 1619 * conditions. 1620 **/ 1621 static void 1622 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1623 { 1624 struct lpfc_vport *vport = phba->pport; 1625 uint32_t event_data; 1626 struct Scsi_Host *shost; 1627 uint32_t if_type; 1628 struct lpfc_register portstat_reg = {0}; 1629 uint32_t reg_err1, reg_err2; 1630 uint32_t uerrlo_reg, uemasklo_reg; 1631 uint32_t pci_rd_rc1, pci_rd_rc2; 1632 bool en_rn_msg = true; 1633 struct temp_event temp_event_data; 1634 int rc; 1635 1636 /* If the pci channel is offline, ignore possible errors, since 1637 * we cannot communicate with the pci card anyway. 1638 */ 1639 if (pci_channel_offline(phba->pcidev)) 1640 return; 1641 1642 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1643 switch (if_type) { 1644 case LPFC_SLI_INTF_IF_TYPE_0: 1645 pci_rd_rc1 = lpfc_readl( 1646 phba->sli4_hba.u.if_type0.UERRLOregaddr, 1647 &uerrlo_reg); 1648 pci_rd_rc2 = lpfc_readl( 1649 phba->sli4_hba.u.if_type0.UEMASKLOregaddr, 1650 &uemasklo_reg); 1651 /* consider PCI bus read error as pci_channel_offline */ 1652 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) 1653 return; 1654 lpfc_sli4_offline_eratt(phba); 1655 break; 1656 1657 case LPFC_SLI_INTF_IF_TYPE_2: 1658 pci_rd_rc1 = lpfc_readl( 1659 phba->sli4_hba.u.if_type2.STATUSregaddr, 1660 &portstat_reg.word0); 1661 /* consider PCI bus read error as pci_channel_offline */ 1662 if (pci_rd_rc1 == -EIO) { 1663 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1664 "3151 PCI bus read access failure: x%x\n", 1665 readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); 1666 return; 1667 } 1668 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 1669 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 1670 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1671 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1672 "2889 Port Overtemperature event, " 1673 "taking port offline Data: x%x x%x\n", 1674 reg_err1, reg_err2); 1675 1676 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1677 temp_event_data.event_code = LPFC_CRIT_TEMP; 1678 temp_event_data.data = 0xFFFFFFFF; 1679 1680 shost = lpfc_shost_from_vport(phba->pport); 1681 fc_host_post_vendor_event(shost, fc_get_event_number(), 1682 sizeof(temp_event_data), 1683 (char *)&temp_event_data, 1684 SCSI_NL_VID_TYPE_PCI 1685 | PCI_VENDOR_ID_EMULEX); 1686 1687 spin_lock_irq(&phba->hbalock); 1688 phba->over_temp_state = HBA_OVER_TEMP; 1689 spin_unlock_irq(&phba->hbalock); 1690 lpfc_sli4_offline_eratt(phba); 1691 return; 1692 } 1693 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1694 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { 1695 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1696 "3143 Port Down: Firmware Update " 1697 "Detected\n"); 1698 en_rn_msg = false; 1699 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1700 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1701 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1702 "3144 Port Down: Debug Dump\n"); 1703 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1704 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) 1705 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1706 "3145 Port Down: Provisioning\n"); 1707 1708 /* If resets are disabled then leave the HBA alone and return */ 1709 if (!phba->cfg_enable_hba_reset) 1710 return; 1711 1712 /* Check port status register for function reset */ 1713 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, 1714 en_rn_msg); 1715 if (rc == 0) { 1716 /* don't report event on forced debug dump */ 1717 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1718 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1719 return; 1720 else 1721 break; 1722 } 1723 /* fall through for not able to recover */ 1724 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1725 "3152 Unrecoverable error, bring the port " 1726 "offline\n"); 1727 lpfc_sli4_offline_eratt(phba); 1728 break; 1729 case LPFC_SLI_INTF_IF_TYPE_1: 1730 default: 1731 break; 1732 } 1733 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1734 "3123 Report dump event to upper layer\n"); 1735 /* Send an internal error event to mgmt application */ 1736 lpfc_board_errevt_to_mgmt(phba); 1737 1738 event_data = FC_REG_DUMP_EVENT; 1739 shost = lpfc_shost_from_vport(vport); 1740 fc_host_post_vendor_event(shost, fc_get_event_number(), 1741 sizeof(event_data), (char *) &event_data, 1742 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1743 } 1744 1745 /** 1746 * lpfc_handle_eratt - Wrapper func for handling hba error attention 1747 * @phba: pointer to lpfc HBA data structure. 1748 * 1749 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 1750 * routine from the API jump table function pointer from the lpfc_hba struct. 1751 * 1752 * Return codes 1753 * 0 - success. 1754 * Any other value - error. 1755 **/ 1756 void 1757 lpfc_handle_eratt(struct lpfc_hba *phba) 1758 { 1759 (*phba->lpfc_handle_eratt)(phba); 1760 } 1761 1762 /** 1763 * lpfc_handle_latt - The HBA link event handler 1764 * @phba: pointer to lpfc hba data structure. 1765 * 1766 * This routine is invoked from the worker thread to handle a HBA host 1767 * attention link event. 1768 **/ 1769 void 1770 lpfc_handle_latt(struct lpfc_hba *phba) 1771 { 1772 struct lpfc_vport *vport = phba->pport; 1773 struct lpfc_sli *psli = &phba->sli; 1774 LPFC_MBOXQ_t *pmb; 1775 volatile uint32_t control; 1776 struct lpfc_dmabuf *mp; 1777 int rc = 0; 1778 1779 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1780 if (!pmb) { 1781 rc = 1; 1782 goto lpfc_handle_latt_err_exit; 1783 } 1784 1785 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1786 if (!mp) { 1787 rc = 2; 1788 goto lpfc_handle_latt_free_pmb; 1789 } 1790 1791 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 1792 if (!mp->virt) { 1793 rc = 3; 1794 goto lpfc_handle_latt_free_mp; 1795 } 1796 1797 /* Cleanup any outstanding ELS commands */ 1798 lpfc_els_flush_all_cmd(phba); 1799 1800 psli->slistat.link_event++; 1801 lpfc_read_topology(phba, pmb, mp); 1802 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 1803 pmb->vport = vport; 1804 /* Block ELS IOCBs until we have processed this mbox command */ 1805 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 1806 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 1807 if (rc == MBX_NOT_FINISHED) { 1808 rc = 4; 1809 goto lpfc_handle_latt_free_mbuf; 1810 } 1811 1812 /* Clear Link Attention in HA REG */ 1813 spin_lock_irq(&phba->hbalock); 1814 writel(HA_LATT, phba->HAregaddr); 1815 readl(phba->HAregaddr); /* flush */ 1816 spin_unlock_irq(&phba->hbalock); 1817 1818 return; 1819 1820 lpfc_handle_latt_free_mbuf: 1821 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1822 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1823 lpfc_handle_latt_free_mp: 1824 kfree(mp); 1825 lpfc_handle_latt_free_pmb: 1826 mempool_free(pmb, phba->mbox_mem_pool); 1827 lpfc_handle_latt_err_exit: 1828 /* Enable Link attention interrupts */ 1829 spin_lock_irq(&phba->hbalock); 1830 psli->sli_flag |= LPFC_PROCESS_LA; 1831 control = readl(phba->HCregaddr); 1832 control |= HC_LAINT_ENA; 1833 writel(control, phba->HCregaddr); 1834 readl(phba->HCregaddr); /* flush */ 1835 1836 /* Clear Link Attention in HA REG */ 1837 writel(HA_LATT, phba->HAregaddr); 1838 readl(phba->HAregaddr); /* flush */ 1839 spin_unlock_irq(&phba->hbalock); 1840 lpfc_linkdown(phba); 1841 phba->link_state = LPFC_HBA_ERROR; 1842 1843 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1844 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 1845 1846 return; 1847 } 1848 1849 /** 1850 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 1851 * @phba: pointer to lpfc hba data structure. 1852 * @vpd: pointer to the vital product data. 1853 * @len: length of the vital product data in bytes. 1854 * 1855 * This routine parses the Vital Product Data (VPD). The VPD is treated as 1856 * an array of characters. In this routine, the ModelName, ProgramType, and 1857 * ModelDesc, etc. fields of the phba data structure will be populated. 1858 * 1859 * Return codes 1860 * 0 - pointer to the VPD passed in is NULL 1861 * 1 - success 1862 **/ 1863 int 1864 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1865 { 1866 uint8_t lenlo, lenhi; 1867 int Length; 1868 int i, j; 1869 int finished = 0; 1870 int index = 0; 1871 1872 if (!vpd) 1873 return 0; 1874 1875 /* Vital Product */ 1876 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1877 "0455 Vital Product Data: x%x x%x x%x x%x\n", 1878 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 1879 (uint32_t) vpd[3]); 1880 while (!finished && (index < (len - 4))) { 1881 switch (vpd[index]) { 1882 case 0x82: 1883 case 0x91: 1884 index += 1; 1885 lenlo = vpd[index]; 1886 index += 1; 1887 lenhi = vpd[index]; 1888 index += 1; 1889 i = ((((unsigned short)lenhi) << 8) + lenlo); 1890 index += i; 1891 break; 1892 case 0x90: 1893 index += 1; 1894 lenlo = vpd[index]; 1895 index += 1; 1896 lenhi = vpd[index]; 1897 index += 1; 1898 Length = ((((unsigned short)lenhi) << 8) + lenlo); 1899 if (Length > len - index) 1900 Length = len - index; 1901 while (Length > 0) { 1902 /* Look for Serial Number */ 1903 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 1904 index += 2; 1905 i = vpd[index]; 1906 index += 1; 1907 j = 0; 1908 Length -= (3+i); 1909 while(i--) { 1910 phba->SerialNumber[j++] = vpd[index++]; 1911 if (j == 31) 1912 break; 1913 } 1914 phba->SerialNumber[j] = 0; 1915 continue; 1916 } 1917 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 1918 phba->vpd_flag |= VPD_MODEL_DESC; 1919 index += 2; 1920 i = vpd[index]; 1921 index += 1; 1922 j = 0; 1923 Length -= (3+i); 1924 while(i--) { 1925 phba->ModelDesc[j++] = vpd[index++]; 1926 if (j == 255) 1927 break; 1928 } 1929 phba->ModelDesc[j] = 0; 1930 continue; 1931 } 1932 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 1933 phba->vpd_flag |= VPD_MODEL_NAME; 1934 index += 2; 1935 i = vpd[index]; 1936 index += 1; 1937 j = 0; 1938 Length -= (3+i); 1939 while(i--) { 1940 phba->ModelName[j++] = vpd[index++]; 1941 if (j == 79) 1942 break; 1943 } 1944 phba->ModelName[j] = 0; 1945 continue; 1946 } 1947 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 1948 phba->vpd_flag |= VPD_PROGRAM_TYPE; 1949 index += 2; 1950 i = vpd[index]; 1951 index += 1; 1952 j = 0; 1953 Length -= (3+i); 1954 while(i--) { 1955 phba->ProgramType[j++] = vpd[index++]; 1956 if (j == 255) 1957 break; 1958 } 1959 phba->ProgramType[j] = 0; 1960 continue; 1961 } 1962 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 1963 phba->vpd_flag |= VPD_PORT; 1964 index += 2; 1965 i = vpd[index]; 1966 index += 1; 1967 j = 0; 1968 Length -= (3+i); 1969 while(i--) { 1970 if ((phba->sli_rev == LPFC_SLI_REV4) && 1971 (phba->sli4_hba.pport_name_sta == 1972 LPFC_SLI4_PPNAME_GET)) { 1973 j++; 1974 index++; 1975 } else 1976 phba->Port[j++] = vpd[index++]; 1977 if (j == 19) 1978 break; 1979 } 1980 if ((phba->sli_rev != LPFC_SLI_REV4) || 1981 (phba->sli4_hba.pport_name_sta == 1982 LPFC_SLI4_PPNAME_NON)) 1983 phba->Port[j] = 0; 1984 continue; 1985 } 1986 else { 1987 index += 2; 1988 i = vpd[index]; 1989 index += 1; 1990 index += i; 1991 Length -= (3 + i); 1992 } 1993 } 1994 finished = 0; 1995 break; 1996 case 0x78: 1997 finished = 1; 1998 break; 1999 default: 2000 index ++; 2001 break; 2002 } 2003 } 2004 2005 return(1); 2006 } 2007 2008 /** 2009 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 2010 * @phba: pointer to lpfc hba data structure. 2011 * @mdp: pointer to the data structure to hold the derived model name. 2012 * @descp: pointer to the data structure to hold the derived description. 2013 * 2014 * This routine retrieves HBA's description based on its registered PCI device 2015 * ID. The @descp passed into this function points to an array of 256 chars. It 2016 * shall be returned with the model name, maximum speed, and the host bus type. 2017 * The @mdp passed into this function points to an array of 80 chars. When the 2018 * function returns, the @mdp will be filled with the model name. 2019 **/ 2020 static void 2021 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 2022 { 2023 lpfc_vpd_t *vp; 2024 uint16_t dev_id = phba->pcidev->device; 2025 int max_speed; 2026 int GE = 0; 2027 int oneConnect = 0; /* default is not a oneConnect */ 2028 struct { 2029 char *name; 2030 char *bus; 2031 char *function; 2032 } m = {"<Unknown>", "", ""}; 2033 2034 if (mdp && mdp[0] != '\0' 2035 && descp && descp[0] != '\0') 2036 return; 2037 2038 if (phba->lmt & LMT_16Gb) 2039 max_speed = 16; 2040 else if (phba->lmt & LMT_10Gb) 2041 max_speed = 10; 2042 else if (phba->lmt & LMT_8Gb) 2043 max_speed = 8; 2044 else if (phba->lmt & LMT_4Gb) 2045 max_speed = 4; 2046 else if (phba->lmt & LMT_2Gb) 2047 max_speed = 2; 2048 else if (phba->lmt & LMT_1Gb) 2049 max_speed = 1; 2050 else 2051 max_speed = 0; 2052 2053 vp = &phba->vpd; 2054 2055 switch (dev_id) { 2056 case PCI_DEVICE_ID_FIREFLY: 2057 m = (typeof(m)){"LP6000", "PCI", 2058 "Obsolete, Unsupported Fibre Channel Adapter"}; 2059 break; 2060 case PCI_DEVICE_ID_SUPERFLY: 2061 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 2062 m = (typeof(m)){"LP7000", "PCI", ""}; 2063 else 2064 m = (typeof(m)){"LP7000E", "PCI", ""}; 2065 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2066 break; 2067 case PCI_DEVICE_ID_DRAGONFLY: 2068 m = (typeof(m)){"LP8000", "PCI", 2069 "Obsolete, Unsupported Fibre Channel Adapter"}; 2070 break; 2071 case PCI_DEVICE_ID_CENTAUR: 2072 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 2073 m = (typeof(m)){"LP9002", "PCI", ""}; 2074 else 2075 m = (typeof(m)){"LP9000", "PCI", ""}; 2076 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2077 break; 2078 case PCI_DEVICE_ID_RFLY: 2079 m = (typeof(m)){"LP952", "PCI", 2080 "Obsolete, Unsupported Fibre Channel Adapter"}; 2081 break; 2082 case PCI_DEVICE_ID_PEGASUS: 2083 m = (typeof(m)){"LP9802", "PCI-X", 2084 "Obsolete, Unsupported Fibre Channel Adapter"}; 2085 break; 2086 case PCI_DEVICE_ID_THOR: 2087 m = (typeof(m)){"LP10000", "PCI-X", 2088 "Obsolete, Unsupported Fibre Channel Adapter"}; 2089 break; 2090 case PCI_DEVICE_ID_VIPER: 2091 m = (typeof(m)){"LPX1000", "PCI-X", 2092 "Obsolete, Unsupported Fibre Channel Adapter"}; 2093 break; 2094 case PCI_DEVICE_ID_PFLY: 2095 m = (typeof(m)){"LP982", "PCI-X", 2096 "Obsolete, Unsupported Fibre Channel Adapter"}; 2097 break; 2098 case PCI_DEVICE_ID_TFLY: 2099 m = (typeof(m)){"LP1050", "PCI-X", 2100 "Obsolete, Unsupported Fibre Channel Adapter"}; 2101 break; 2102 case PCI_DEVICE_ID_HELIOS: 2103 m = (typeof(m)){"LP11000", "PCI-X2", 2104 "Obsolete, Unsupported Fibre Channel Adapter"}; 2105 break; 2106 case PCI_DEVICE_ID_HELIOS_SCSP: 2107 m = (typeof(m)){"LP11000-SP", "PCI-X2", 2108 "Obsolete, Unsupported Fibre Channel Adapter"}; 2109 break; 2110 case PCI_DEVICE_ID_HELIOS_DCSP: 2111 m = (typeof(m)){"LP11002-SP", "PCI-X2", 2112 "Obsolete, Unsupported Fibre Channel Adapter"}; 2113 break; 2114 case PCI_DEVICE_ID_NEPTUNE: 2115 m = (typeof(m)){"LPe1000", "PCIe", 2116 "Obsolete, Unsupported Fibre Channel Adapter"}; 2117 break; 2118 case PCI_DEVICE_ID_NEPTUNE_SCSP: 2119 m = (typeof(m)){"LPe1000-SP", "PCIe", 2120 "Obsolete, Unsupported Fibre Channel Adapter"}; 2121 break; 2122 case PCI_DEVICE_ID_NEPTUNE_DCSP: 2123 m = (typeof(m)){"LPe1002-SP", "PCIe", 2124 "Obsolete, Unsupported Fibre Channel Adapter"}; 2125 break; 2126 case PCI_DEVICE_ID_BMID: 2127 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 2128 break; 2129 case PCI_DEVICE_ID_BSMB: 2130 m = (typeof(m)){"LP111", "PCI-X2", 2131 "Obsolete, Unsupported Fibre Channel Adapter"}; 2132 break; 2133 case PCI_DEVICE_ID_ZEPHYR: 2134 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2135 break; 2136 case PCI_DEVICE_ID_ZEPHYR_SCSP: 2137 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2138 break; 2139 case PCI_DEVICE_ID_ZEPHYR_DCSP: 2140 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 2141 GE = 1; 2142 break; 2143 case PCI_DEVICE_ID_ZMID: 2144 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 2145 break; 2146 case PCI_DEVICE_ID_ZSMB: 2147 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 2148 break; 2149 case PCI_DEVICE_ID_LP101: 2150 m = (typeof(m)){"LP101", "PCI-X", 2151 "Obsolete, Unsupported Fibre Channel Adapter"}; 2152 break; 2153 case PCI_DEVICE_ID_LP10000S: 2154 m = (typeof(m)){"LP10000-S", "PCI", 2155 "Obsolete, Unsupported Fibre Channel Adapter"}; 2156 break; 2157 case PCI_DEVICE_ID_LP11000S: 2158 m = (typeof(m)){"LP11000-S", "PCI-X2", 2159 "Obsolete, Unsupported Fibre Channel Adapter"}; 2160 break; 2161 case PCI_DEVICE_ID_LPE11000S: 2162 m = (typeof(m)){"LPe11000-S", "PCIe", 2163 "Obsolete, Unsupported Fibre Channel Adapter"}; 2164 break; 2165 case PCI_DEVICE_ID_SAT: 2166 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 2167 break; 2168 case PCI_DEVICE_ID_SAT_MID: 2169 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 2170 break; 2171 case PCI_DEVICE_ID_SAT_SMB: 2172 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 2173 break; 2174 case PCI_DEVICE_ID_SAT_DCSP: 2175 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 2176 break; 2177 case PCI_DEVICE_ID_SAT_SCSP: 2178 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 2179 break; 2180 case PCI_DEVICE_ID_SAT_S: 2181 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 2182 break; 2183 case PCI_DEVICE_ID_HORNET: 2184 m = (typeof(m)){"LP21000", "PCIe", 2185 "Obsolete, Unsupported FCoE Adapter"}; 2186 GE = 1; 2187 break; 2188 case PCI_DEVICE_ID_PROTEUS_VF: 2189 m = (typeof(m)){"LPev12000", "PCIe IOV", 2190 "Obsolete, Unsupported Fibre Channel Adapter"}; 2191 break; 2192 case PCI_DEVICE_ID_PROTEUS_PF: 2193 m = (typeof(m)){"LPev12000", "PCIe IOV", 2194 "Obsolete, Unsupported Fibre Channel Adapter"}; 2195 break; 2196 case PCI_DEVICE_ID_PROTEUS_S: 2197 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 2198 "Obsolete, Unsupported Fibre Channel Adapter"}; 2199 break; 2200 case PCI_DEVICE_ID_TIGERSHARK: 2201 oneConnect = 1; 2202 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 2203 break; 2204 case PCI_DEVICE_ID_TOMCAT: 2205 oneConnect = 1; 2206 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 2207 break; 2208 case PCI_DEVICE_ID_FALCON: 2209 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 2210 "EmulexSecure Fibre"}; 2211 break; 2212 case PCI_DEVICE_ID_BALIUS: 2213 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 2214 "Obsolete, Unsupported Fibre Channel Adapter"}; 2215 break; 2216 case PCI_DEVICE_ID_LANCER_FC: 2217 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 2218 break; 2219 case PCI_DEVICE_ID_LANCER_FC_VF: 2220 m = (typeof(m)){"LPe16000", "PCIe", 2221 "Obsolete, Unsupported Fibre Channel Adapter"}; 2222 break; 2223 case PCI_DEVICE_ID_LANCER_FCOE: 2224 oneConnect = 1; 2225 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2226 break; 2227 case PCI_DEVICE_ID_LANCER_FCOE_VF: 2228 oneConnect = 1; 2229 m = (typeof(m)){"OCe15100", "PCIe", 2230 "Obsolete, Unsupported FCoE"}; 2231 break; 2232 case PCI_DEVICE_ID_SKYHAWK: 2233 case PCI_DEVICE_ID_SKYHAWK_VF: 2234 oneConnect = 1; 2235 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; 2236 break; 2237 default: 2238 m = (typeof(m)){"Unknown", "", ""}; 2239 break; 2240 } 2241 2242 if (mdp && mdp[0] == '\0') 2243 snprintf(mdp, 79,"%s", m.name); 2244 /* 2245 * oneConnect hba requires special processing, they are all initiators 2246 * and we put the port number on the end 2247 */ 2248 if (descp && descp[0] == '\0') { 2249 if (oneConnect) 2250 snprintf(descp, 255, 2251 "Emulex OneConnect %s, %s Initiator %s", 2252 m.name, m.function, 2253 phba->Port); 2254 else if (max_speed == 0) 2255 snprintf(descp, 255, 2256 "Emulex %s %s %s ", 2257 m.name, m.bus, m.function); 2258 else 2259 snprintf(descp, 255, 2260 "Emulex %s %d%s %s %s", 2261 m.name, max_speed, (GE) ? "GE" : "Gb", 2262 m.bus, m.function); 2263 } 2264 } 2265 2266 /** 2267 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 2268 * @phba: pointer to lpfc hba data structure. 2269 * @pring: pointer to a IOCB ring. 2270 * @cnt: the number of IOCBs to be posted to the IOCB ring. 2271 * 2272 * This routine posts a given number of IOCBs with the associated DMA buffer 2273 * descriptors specified by the cnt argument to the given IOCB ring. 2274 * 2275 * Return codes 2276 * The number of IOCBs NOT able to be posted to the IOCB ring. 2277 **/ 2278 int 2279 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 2280 { 2281 IOCB_t *icmd; 2282 struct lpfc_iocbq *iocb; 2283 struct lpfc_dmabuf *mp1, *mp2; 2284 2285 cnt += pring->missbufcnt; 2286 2287 /* While there are buffers to post */ 2288 while (cnt > 0) { 2289 /* Allocate buffer for command iocb */ 2290 iocb = lpfc_sli_get_iocbq(phba); 2291 if (iocb == NULL) { 2292 pring->missbufcnt = cnt; 2293 return cnt; 2294 } 2295 icmd = &iocb->iocb; 2296 2297 /* 2 buffers can be posted per command */ 2298 /* Allocate buffer to post */ 2299 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2300 if (mp1) 2301 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 2302 if (!mp1 || !mp1->virt) { 2303 kfree(mp1); 2304 lpfc_sli_release_iocbq(phba, iocb); 2305 pring->missbufcnt = cnt; 2306 return cnt; 2307 } 2308 2309 INIT_LIST_HEAD(&mp1->list); 2310 /* Allocate buffer to post */ 2311 if (cnt > 1) { 2312 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2313 if (mp2) 2314 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 2315 &mp2->phys); 2316 if (!mp2 || !mp2->virt) { 2317 kfree(mp2); 2318 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2319 kfree(mp1); 2320 lpfc_sli_release_iocbq(phba, iocb); 2321 pring->missbufcnt = cnt; 2322 return cnt; 2323 } 2324 2325 INIT_LIST_HEAD(&mp2->list); 2326 } else { 2327 mp2 = NULL; 2328 } 2329 2330 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2331 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2332 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2333 icmd->ulpBdeCount = 1; 2334 cnt--; 2335 if (mp2) { 2336 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2337 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2338 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2339 cnt--; 2340 icmd->ulpBdeCount = 2; 2341 } 2342 2343 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2344 icmd->ulpLe = 1; 2345 2346 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2347 IOCB_ERROR) { 2348 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2349 kfree(mp1); 2350 cnt++; 2351 if (mp2) { 2352 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2353 kfree(mp2); 2354 cnt++; 2355 } 2356 lpfc_sli_release_iocbq(phba, iocb); 2357 pring->missbufcnt = cnt; 2358 return cnt; 2359 } 2360 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2361 if (mp2) 2362 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2363 } 2364 pring->missbufcnt = 0; 2365 return 0; 2366 } 2367 2368 /** 2369 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2370 * @phba: pointer to lpfc hba data structure. 2371 * 2372 * This routine posts initial receive IOCB buffers to the ELS ring. The 2373 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2374 * set to 64 IOCBs. 2375 * 2376 * Return codes 2377 * 0 - success (currently always success) 2378 **/ 2379 static int 2380 lpfc_post_rcv_buf(struct lpfc_hba *phba) 2381 { 2382 struct lpfc_sli *psli = &phba->sli; 2383 2384 /* Ring 0, ELS / CT buffers */ 2385 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2386 /* Ring 2 - FCP no buffers needed */ 2387 2388 return 0; 2389 } 2390 2391 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2392 2393 /** 2394 * lpfc_sha_init - Set up initial array of hash table entries 2395 * @HashResultPointer: pointer to an array as hash table. 2396 * 2397 * This routine sets up the initial values to the array of hash table entries 2398 * for the LC HBAs. 2399 **/ 2400 static void 2401 lpfc_sha_init(uint32_t * HashResultPointer) 2402 { 2403 HashResultPointer[0] = 0x67452301; 2404 HashResultPointer[1] = 0xEFCDAB89; 2405 HashResultPointer[2] = 0x98BADCFE; 2406 HashResultPointer[3] = 0x10325476; 2407 HashResultPointer[4] = 0xC3D2E1F0; 2408 } 2409 2410 /** 2411 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2412 * @HashResultPointer: pointer to an initial/result hash table. 2413 * @HashWorkingPointer: pointer to an working hash table. 2414 * 2415 * This routine iterates an initial hash table pointed by @HashResultPointer 2416 * with the values from the working hash table pointeed by @HashWorkingPointer. 2417 * The results are putting back to the initial hash table, returned through 2418 * the @HashResultPointer as the result hash table. 2419 **/ 2420 static void 2421 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2422 { 2423 int t; 2424 uint32_t TEMP; 2425 uint32_t A, B, C, D, E; 2426 t = 16; 2427 do { 2428 HashWorkingPointer[t] = 2429 S(1, 2430 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2431 8] ^ 2432 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2433 } while (++t <= 79); 2434 t = 0; 2435 A = HashResultPointer[0]; 2436 B = HashResultPointer[1]; 2437 C = HashResultPointer[2]; 2438 D = HashResultPointer[3]; 2439 E = HashResultPointer[4]; 2440 2441 do { 2442 if (t < 20) { 2443 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2444 } else if (t < 40) { 2445 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2446 } else if (t < 60) { 2447 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2448 } else { 2449 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2450 } 2451 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2452 E = D; 2453 D = C; 2454 C = S(30, B); 2455 B = A; 2456 A = TEMP; 2457 } while (++t <= 79); 2458 2459 HashResultPointer[0] += A; 2460 HashResultPointer[1] += B; 2461 HashResultPointer[2] += C; 2462 HashResultPointer[3] += D; 2463 HashResultPointer[4] += E; 2464 2465 } 2466 2467 /** 2468 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2469 * @RandomChallenge: pointer to the entry of host challenge random number array. 2470 * @HashWorking: pointer to the entry of the working hash array. 2471 * 2472 * This routine calculates the working hash array referred by @HashWorking 2473 * from the challenge random numbers associated with the host, referred by 2474 * @RandomChallenge. The result is put into the entry of the working hash 2475 * array and returned by reference through @HashWorking. 2476 **/ 2477 static void 2478 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2479 { 2480 *HashWorking = (*RandomChallenge ^ *HashWorking); 2481 } 2482 2483 /** 2484 * lpfc_hba_init - Perform special handling for LC HBA initialization 2485 * @phba: pointer to lpfc hba data structure. 2486 * @hbainit: pointer to an array of unsigned 32-bit integers. 2487 * 2488 * This routine performs the special handling for LC HBA initialization. 2489 **/ 2490 void 2491 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2492 { 2493 int t; 2494 uint32_t *HashWorking; 2495 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2496 2497 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2498 if (!HashWorking) 2499 return; 2500 2501 HashWorking[0] = HashWorking[78] = *pwwnn++; 2502 HashWorking[1] = HashWorking[79] = *pwwnn; 2503 2504 for (t = 0; t < 7; t++) 2505 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2506 2507 lpfc_sha_init(hbainit); 2508 lpfc_sha_iterate(hbainit, HashWorking); 2509 kfree(HashWorking); 2510 } 2511 2512 /** 2513 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2514 * @vport: pointer to a virtual N_Port data structure. 2515 * 2516 * This routine performs the necessary cleanups before deleting the @vport. 2517 * It invokes the discovery state machine to perform necessary state 2518 * transitions and to release the ndlps associated with the @vport. Note, 2519 * the physical port is treated as @vport 0. 2520 **/ 2521 void 2522 lpfc_cleanup(struct lpfc_vport *vport) 2523 { 2524 struct lpfc_hba *phba = vport->phba; 2525 struct lpfc_nodelist *ndlp, *next_ndlp; 2526 int i = 0; 2527 2528 if (phba->link_state > LPFC_LINK_DOWN) 2529 lpfc_port_link_failure(vport); 2530 2531 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2532 if (!NLP_CHK_NODE_ACT(ndlp)) { 2533 ndlp = lpfc_enable_node(vport, ndlp, 2534 NLP_STE_UNUSED_NODE); 2535 if (!ndlp) 2536 continue; 2537 spin_lock_irq(&phba->ndlp_lock); 2538 NLP_SET_FREE_REQ(ndlp); 2539 spin_unlock_irq(&phba->ndlp_lock); 2540 /* Trigger the release of the ndlp memory */ 2541 lpfc_nlp_put(ndlp); 2542 continue; 2543 } 2544 spin_lock_irq(&phba->ndlp_lock); 2545 if (NLP_CHK_FREE_REQ(ndlp)) { 2546 /* The ndlp should not be in memory free mode already */ 2547 spin_unlock_irq(&phba->ndlp_lock); 2548 continue; 2549 } else 2550 /* Indicate request for freeing ndlp memory */ 2551 NLP_SET_FREE_REQ(ndlp); 2552 spin_unlock_irq(&phba->ndlp_lock); 2553 2554 if (vport->port_type != LPFC_PHYSICAL_PORT && 2555 ndlp->nlp_DID == Fabric_DID) { 2556 /* Just free up ndlp with Fabric_DID for vports */ 2557 lpfc_nlp_put(ndlp); 2558 continue; 2559 } 2560 2561 /* take care of nodes in unused state before the state 2562 * machine taking action. 2563 */ 2564 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 2565 lpfc_nlp_put(ndlp); 2566 continue; 2567 } 2568 2569 if (ndlp->nlp_type & NLP_FABRIC) 2570 lpfc_disc_state_machine(vport, ndlp, NULL, 2571 NLP_EVT_DEVICE_RECOVERY); 2572 2573 lpfc_disc_state_machine(vport, ndlp, NULL, 2574 NLP_EVT_DEVICE_RM); 2575 } 2576 2577 /* At this point, ALL ndlp's should be gone 2578 * because of the previous NLP_EVT_DEVICE_RM. 2579 * Lets wait for this to happen, if needed. 2580 */ 2581 while (!list_empty(&vport->fc_nodes)) { 2582 if (i++ > 3000) { 2583 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2584 "0233 Nodelist not empty\n"); 2585 list_for_each_entry_safe(ndlp, next_ndlp, 2586 &vport->fc_nodes, nlp_listp) { 2587 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2588 LOG_NODE, 2589 "0282 did:x%x ndlp:x%p " 2590 "usgmap:x%x refcnt:%d\n", 2591 ndlp->nlp_DID, (void *)ndlp, 2592 ndlp->nlp_usg_map, 2593 atomic_read( 2594 &ndlp->kref.refcount)); 2595 } 2596 break; 2597 } 2598 2599 /* Wait for any activity on ndlps to settle */ 2600 msleep(10); 2601 } 2602 lpfc_cleanup_vports_rrqs(vport, NULL); 2603 } 2604 2605 /** 2606 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2607 * @vport: pointer to a virtual N_Port data structure. 2608 * 2609 * This routine stops all the timers associated with a @vport. This function 2610 * is invoked before disabling or deleting a @vport. Note that the physical 2611 * port is treated as @vport 0. 2612 **/ 2613 void 2614 lpfc_stop_vport_timers(struct lpfc_vport *vport) 2615 { 2616 del_timer_sync(&vport->els_tmofunc); 2617 del_timer_sync(&vport->fc_fdmitmo); 2618 del_timer_sync(&vport->delayed_disc_tmo); 2619 lpfc_can_disctmo(vport); 2620 return; 2621 } 2622 2623 /** 2624 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2625 * @phba: pointer to lpfc hba data structure. 2626 * 2627 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2628 * caller of this routine should already hold the host lock. 2629 **/ 2630 void 2631 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2632 { 2633 /* Clear pending FCF rediscovery wait flag */ 2634 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2635 2636 /* Now, try to stop the timer */ 2637 del_timer(&phba->fcf.redisc_wait); 2638 } 2639 2640 /** 2641 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2642 * @phba: pointer to lpfc hba data structure. 2643 * 2644 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2645 * checks whether the FCF rediscovery wait timer is pending with the host 2646 * lock held before proceeding with disabling the timer and clearing the 2647 * wait timer pendig flag. 2648 **/ 2649 void 2650 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2651 { 2652 spin_lock_irq(&phba->hbalock); 2653 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2654 /* FCF rediscovery timer already fired or stopped */ 2655 spin_unlock_irq(&phba->hbalock); 2656 return; 2657 } 2658 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2659 /* Clear failover in progress flags */ 2660 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 2661 spin_unlock_irq(&phba->hbalock); 2662 } 2663 2664 /** 2665 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2666 * @phba: pointer to lpfc hba data structure. 2667 * 2668 * This routine stops all the timers associated with a HBA. This function is 2669 * invoked before either putting a HBA offline or unloading the driver. 2670 **/ 2671 void 2672 lpfc_stop_hba_timers(struct lpfc_hba *phba) 2673 { 2674 lpfc_stop_vport_timers(phba->pport); 2675 del_timer_sync(&phba->sli.mbox_tmo); 2676 del_timer_sync(&phba->fabric_block_timer); 2677 del_timer_sync(&phba->eratt_poll); 2678 del_timer_sync(&phba->hb_tmofunc); 2679 if (phba->sli_rev == LPFC_SLI_REV4) { 2680 del_timer_sync(&phba->rrq_tmr); 2681 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 2682 } 2683 phba->hb_outstanding = 0; 2684 2685 switch (phba->pci_dev_grp) { 2686 case LPFC_PCI_DEV_LP: 2687 /* Stop any LightPulse device specific driver timers */ 2688 del_timer_sync(&phba->fcp_poll_timer); 2689 break; 2690 case LPFC_PCI_DEV_OC: 2691 /* Stop any OneConnect device sepcific driver timers */ 2692 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2693 break; 2694 default: 2695 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2696 "0297 Invalid device group (x%x)\n", 2697 phba->pci_dev_grp); 2698 break; 2699 } 2700 return; 2701 } 2702 2703 /** 2704 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2705 * @phba: pointer to lpfc hba data structure. 2706 * 2707 * This routine marks a HBA's management interface as blocked. Once the HBA's 2708 * management interface is marked as blocked, all the user space access to 2709 * the HBA, whether they are from sysfs interface or libdfc interface will 2710 * all be blocked. The HBA is set to block the management interface when the 2711 * driver prepares the HBA interface for online or offline. 2712 **/ 2713 static void 2714 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) 2715 { 2716 unsigned long iflag; 2717 uint8_t actcmd = MBX_HEARTBEAT; 2718 unsigned long timeout; 2719 2720 spin_lock_irqsave(&phba->hbalock, iflag); 2721 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2722 spin_unlock_irqrestore(&phba->hbalock, iflag); 2723 if (mbx_action == LPFC_MBX_NO_WAIT) 2724 return; 2725 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 2726 spin_lock_irqsave(&phba->hbalock, iflag); 2727 if (phba->sli.mbox_active) { 2728 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 2729 /* Determine how long we might wait for the active mailbox 2730 * command to be gracefully completed by firmware. 2731 */ 2732 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 2733 phba->sli.mbox_active) * 1000) + jiffies; 2734 } 2735 spin_unlock_irqrestore(&phba->hbalock, iflag); 2736 2737 /* Wait for the outstnading mailbox command to complete */ 2738 while (phba->sli.mbox_active) { 2739 /* Check active mailbox complete status every 2ms */ 2740 msleep(2); 2741 if (time_after(jiffies, timeout)) { 2742 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2743 "2813 Mgmt IO is Blocked %x " 2744 "- mbox cmd %x still active\n", 2745 phba->sli.sli_flag, actcmd); 2746 break; 2747 } 2748 } 2749 } 2750 2751 /** 2752 * lpfc_sli4_node_prep - Assign RPIs for active nodes. 2753 * @phba: pointer to lpfc hba data structure. 2754 * 2755 * Allocate RPIs for all active remote nodes. This is needed whenever 2756 * an SLI4 adapter is reset and the driver is not unloading. Its purpose 2757 * is to fixup the temporary rpi assignments. 2758 **/ 2759 void 2760 lpfc_sli4_node_prep(struct lpfc_hba *phba) 2761 { 2762 struct lpfc_nodelist *ndlp, *next_ndlp; 2763 struct lpfc_vport **vports; 2764 int i; 2765 2766 if (phba->sli_rev != LPFC_SLI_REV4) 2767 return; 2768 2769 vports = lpfc_create_vport_work_array(phba); 2770 if (vports != NULL) { 2771 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2772 if (vports[i]->load_flag & FC_UNLOADING) 2773 continue; 2774 2775 list_for_each_entry_safe(ndlp, next_ndlp, 2776 &vports[i]->fc_nodes, 2777 nlp_listp) { 2778 if (NLP_CHK_NODE_ACT(ndlp)) { 2779 ndlp->nlp_rpi = 2780 lpfc_sli4_alloc_rpi(phba); 2781 lpfc_printf_vlog(ndlp->vport, KERN_INFO, 2782 LOG_NODE, 2783 "0009 rpi:%x DID:%x " 2784 "flg:%x map:%x %p\n", 2785 ndlp->nlp_rpi, 2786 ndlp->nlp_DID, 2787 ndlp->nlp_flag, 2788 ndlp->nlp_usg_map, 2789 ndlp); 2790 } 2791 } 2792 } 2793 } 2794 lpfc_destroy_vport_work_array(phba, vports); 2795 } 2796 2797 /** 2798 * lpfc_online - Initialize and bring a HBA online 2799 * @phba: pointer to lpfc hba data structure. 2800 * 2801 * This routine initializes the HBA and brings a HBA online. During this 2802 * process, the management interface is blocked to prevent user space access 2803 * to the HBA interfering with the driver initialization. 2804 * 2805 * Return codes 2806 * 0 - successful 2807 * 1 - failed 2808 **/ 2809 int 2810 lpfc_online(struct lpfc_hba *phba) 2811 { 2812 struct lpfc_vport *vport; 2813 struct lpfc_vport **vports; 2814 int i; 2815 bool vpis_cleared = false; 2816 2817 if (!phba) 2818 return 0; 2819 vport = phba->pport; 2820 2821 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 2822 return 0; 2823 2824 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2825 "0458 Bring Adapter online\n"); 2826 2827 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 2828 2829 if (!lpfc_sli_queue_setup(phba)) { 2830 lpfc_unblock_mgmt_io(phba); 2831 return 1; 2832 } 2833 2834 if (phba->sli_rev == LPFC_SLI_REV4) { 2835 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 2836 lpfc_unblock_mgmt_io(phba); 2837 return 1; 2838 } 2839 spin_lock_irq(&phba->hbalock); 2840 if (!phba->sli4_hba.max_cfg_param.vpi_used) 2841 vpis_cleared = true; 2842 spin_unlock_irq(&phba->hbalock); 2843 } else { 2844 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 2845 lpfc_unblock_mgmt_io(phba); 2846 return 1; 2847 } 2848 } 2849 2850 vports = lpfc_create_vport_work_array(phba); 2851 if (vports != NULL) 2852 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2853 struct Scsi_Host *shost; 2854 shost = lpfc_shost_from_vport(vports[i]); 2855 spin_lock_irq(shost->host_lock); 2856 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 2857 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 2858 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2859 if (phba->sli_rev == LPFC_SLI_REV4) { 2860 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 2861 if ((vpis_cleared) && 2862 (vports[i]->port_type != 2863 LPFC_PHYSICAL_PORT)) 2864 vports[i]->vpi = 0; 2865 } 2866 spin_unlock_irq(shost->host_lock); 2867 } 2868 lpfc_destroy_vport_work_array(phba, vports); 2869 2870 lpfc_unblock_mgmt_io(phba); 2871 return 0; 2872 } 2873 2874 /** 2875 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 2876 * @phba: pointer to lpfc hba data structure. 2877 * 2878 * This routine marks a HBA's management interface as not blocked. Once the 2879 * HBA's management interface is marked as not blocked, all the user space 2880 * access to the HBA, whether they are from sysfs interface or libdfc 2881 * interface will be allowed. The HBA is set to block the management interface 2882 * when the driver prepares the HBA interface for online or offline and then 2883 * set to unblock the management interface afterwards. 2884 **/ 2885 void 2886 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 2887 { 2888 unsigned long iflag; 2889 2890 spin_lock_irqsave(&phba->hbalock, iflag); 2891 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 2892 spin_unlock_irqrestore(&phba->hbalock, iflag); 2893 } 2894 2895 /** 2896 * lpfc_offline_prep - Prepare a HBA to be brought offline 2897 * @phba: pointer to lpfc hba data structure. 2898 * 2899 * This routine is invoked to prepare a HBA to be brought offline. It performs 2900 * unregistration login to all the nodes on all vports and flushes the mailbox 2901 * queue to make it ready to be brought offline. 2902 **/ 2903 void 2904 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) 2905 { 2906 struct lpfc_vport *vport = phba->pport; 2907 struct lpfc_nodelist *ndlp, *next_ndlp; 2908 struct lpfc_vport **vports; 2909 struct Scsi_Host *shost; 2910 int i; 2911 2912 if (vport->fc_flag & FC_OFFLINE_MODE) 2913 return; 2914 2915 lpfc_block_mgmt_io(phba, mbx_action); 2916 2917 lpfc_linkdown(phba); 2918 2919 /* Issue an unreg_login to all nodes on all vports */ 2920 vports = lpfc_create_vport_work_array(phba); 2921 if (vports != NULL) { 2922 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2923 if (vports[i]->load_flag & FC_UNLOADING) 2924 continue; 2925 shost = lpfc_shost_from_vport(vports[i]); 2926 spin_lock_irq(shost->host_lock); 2927 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 2928 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2929 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 2930 spin_unlock_irq(shost->host_lock); 2931 2932 shost = lpfc_shost_from_vport(vports[i]); 2933 list_for_each_entry_safe(ndlp, next_ndlp, 2934 &vports[i]->fc_nodes, 2935 nlp_listp) { 2936 if (!NLP_CHK_NODE_ACT(ndlp)) 2937 continue; 2938 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 2939 continue; 2940 if (ndlp->nlp_type & NLP_FABRIC) { 2941 lpfc_disc_state_machine(vports[i], ndlp, 2942 NULL, NLP_EVT_DEVICE_RECOVERY); 2943 lpfc_disc_state_machine(vports[i], ndlp, 2944 NULL, NLP_EVT_DEVICE_RM); 2945 } 2946 spin_lock_irq(shost->host_lock); 2947 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2948 spin_unlock_irq(shost->host_lock); 2949 /* 2950 * Whenever an SLI4 port goes offline, free the 2951 * RPI. Get a new RPI when the adapter port 2952 * comes back online. 2953 */ 2954 if (phba->sli_rev == LPFC_SLI_REV4) { 2955 lpfc_printf_vlog(ndlp->vport, 2956 KERN_INFO, LOG_NODE, 2957 "0011 lpfc_offline: " 2958 "ndlp:x%p did %x " 2959 "usgmap:x%x rpi:%x\n", 2960 ndlp, ndlp->nlp_DID, 2961 ndlp->nlp_usg_map, 2962 ndlp->nlp_rpi); 2963 2964 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 2965 } 2966 lpfc_unreg_rpi(vports[i], ndlp); 2967 } 2968 } 2969 } 2970 lpfc_destroy_vport_work_array(phba, vports); 2971 2972 lpfc_sli_mbox_sys_shutdown(phba, mbx_action); 2973 } 2974 2975 /** 2976 * lpfc_offline - Bring a HBA offline 2977 * @phba: pointer to lpfc hba data structure. 2978 * 2979 * This routine actually brings a HBA offline. It stops all the timers 2980 * associated with the HBA, brings down the SLI layer, and eventually 2981 * marks the HBA as in offline state for the upper layer protocol. 2982 **/ 2983 void 2984 lpfc_offline(struct lpfc_hba *phba) 2985 { 2986 struct Scsi_Host *shost; 2987 struct lpfc_vport **vports; 2988 int i; 2989 2990 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2991 return; 2992 2993 /* stop port and all timers associated with this hba */ 2994 lpfc_stop_port(phba); 2995 vports = lpfc_create_vport_work_array(phba); 2996 if (vports != NULL) 2997 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 2998 lpfc_stop_vport_timers(vports[i]); 2999 lpfc_destroy_vport_work_array(phba, vports); 3000 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3001 "0460 Bring Adapter offline\n"); 3002 /* Bring down the SLI Layer and cleanup. The HBA is offline 3003 now. */ 3004 lpfc_sli_hba_down(phba); 3005 spin_lock_irq(&phba->hbalock); 3006 phba->work_ha = 0; 3007 spin_unlock_irq(&phba->hbalock); 3008 vports = lpfc_create_vport_work_array(phba); 3009 if (vports != NULL) 3010 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3011 shost = lpfc_shost_from_vport(vports[i]); 3012 spin_lock_irq(shost->host_lock); 3013 vports[i]->work_port_events = 0; 3014 vports[i]->fc_flag |= FC_OFFLINE_MODE; 3015 spin_unlock_irq(shost->host_lock); 3016 } 3017 lpfc_destroy_vport_work_array(phba, vports); 3018 } 3019 3020 /** 3021 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 3022 * @phba: pointer to lpfc hba data structure. 3023 * 3024 * This routine is to free all the SCSI buffers and IOCBs from the driver 3025 * list back to kernel. It is called from lpfc_pci_remove_one to free 3026 * the internal resources before the device is removed from the system. 3027 **/ 3028 static void 3029 lpfc_scsi_free(struct lpfc_hba *phba) 3030 { 3031 struct lpfc_scsi_buf *sb, *sb_next; 3032 struct lpfc_iocbq *io, *io_next; 3033 3034 spin_lock_irq(&phba->hbalock); 3035 3036 /* Release all the lpfc_scsi_bufs maintained by this host. */ 3037 3038 spin_lock(&phba->scsi_buf_list_put_lock); 3039 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, 3040 list) { 3041 list_del(&sb->list); 3042 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 3043 sb->dma_handle); 3044 kfree(sb); 3045 phba->total_scsi_bufs--; 3046 } 3047 spin_unlock(&phba->scsi_buf_list_put_lock); 3048 3049 spin_lock(&phba->scsi_buf_list_get_lock); 3050 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, 3051 list) { 3052 list_del(&sb->list); 3053 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 3054 sb->dma_handle); 3055 kfree(sb); 3056 phba->total_scsi_bufs--; 3057 } 3058 spin_unlock(&phba->scsi_buf_list_get_lock); 3059 3060 /* Release all the lpfc_iocbq entries maintained by this host. */ 3061 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 3062 list_del(&io->list); 3063 kfree(io); 3064 phba->total_iocbq_bufs--; 3065 } 3066 3067 spin_unlock_irq(&phba->hbalock); 3068 } 3069 3070 /** 3071 * lpfc_sli4_xri_sgl_update - update xri-sgl sizing and mapping 3072 * @phba: pointer to lpfc hba data structure. 3073 * 3074 * This routine first calculates the sizes of the current els and allocated 3075 * scsi sgl lists, and then goes through all sgls to updates the physical 3076 * XRIs assigned due to port function reset. During port initialization, the 3077 * current els and allocated scsi sgl lists are 0s. 3078 * 3079 * Return codes 3080 * 0 - successful (for now, it always returns 0) 3081 **/ 3082 int 3083 lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) 3084 { 3085 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3086 struct lpfc_scsi_buf *psb = NULL, *psb_next = NULL; 3087 uint16_t i, lxri, xri_cnt, els_xri_cnt, scsi_xri_cnt; 3088 LIST_HEAD(els_sgl_list); 3089 LIST_HEAD(scsi_sgl_list); 3090 int rc; 3091 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 3092 3093 /* 3094 * update on pci function's els xri-sgl list 3095 */ 3096 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3097 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { 3098 /* els xri-sgl expanded */ 3099 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; 3100 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3101 "3157 ELS xri-sgl count increased from " 3102 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3103 els_xri_cnt); 3104 /* allocate the additional els sgls */ 3105 for (i = 0; i < xri_cnt; i++) { 3106 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3107 GFP_KERNEL); 3108 if (sglq_entry == NULL) { 3109 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3110 "2562 Failure to allocate an " 3111 "ELS sgl entry:%d\n", i); 3112 rc = -ENOMEM; 3113 goto out_free_mem; 3114 } 3115 sglq_entry->buff_type = GEN_BUFF_TYPE; 3116 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, 3117 &sglq_entry->phys); 3118 if (sglq_entry->virt == NULL) { 3119 kfree(sglq_entry); 3120 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3121 "2563 Failure to allocate an " 3122 "ELS mbuf:%d\n", i); 3123 rc = -ENOMEM; 3124 goto out_free_mem; 3125 } 3126 sglq_entry->sgl = sglq_entry->virt; 3127 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 3128 sglq_entry->state = SGL_FREED; 3129 list_add_tail(&sglq_entry->list, &els_sgl_list); 3130 } 3131 spin_lock_irq(&phba->hbalock); 3132 spin_lock(&pring->ring_lock); 3133 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); 3134 spin_unlock(&pring->ring_lock); 3135 spin_unlock_irq(&phba->hbalock); 3136 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { 3137 /* els xri-sgl shrinked */ 3138 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; 3139 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3140 "3158 ELS xri-sgl count decreased from " 3141 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3142 els_xri_cnt); 3143 spin_lock_irq(&phba->hbalock); 3144 spin_lock(&pring->ring_lock); 3145 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list); 3146 spin_unlock(&pring->ring_lock); 3147 spin_unlock_irq(&phba->hbalock); 3148 /* release extra els sgls from list */ 3149 for (i = 0; i < xri_cnt; i++) { 3150 list_remove_head(&els_sgl_list, 3151 sglq_entry, struct lpfc_sglq, list); 3152 if (sglq_entry) { 3153 lpfc_mbuf_free(phba, sglq_entry->virt, 3154 sglq_entry->phys); 3155 kfree(sglq_entry); 3156 } 3157 } 3158 spin_lock_irq(&phba->hbalock); 3159 spin_lock(&pring->ring_lock); 3160 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); 3161 spin_unlock(&pring->ring_lock); 3162 spin_unlock_irq(&phba->hbalock); 3163 } else 3164 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3165 "3163 ELS xri-sgl count unchanged: %d\n", 3166 els_xri_cnt); 3167 phba->sli4_hba.els_xri_cnt = els_xri_cnt; 3168 3169 /* update xris to els sgls on the list */ 3170 sglq_entry = NULL; 3171 sglq_entry_next = NULL; 3172 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3173 &phba->sli4_hba.lpfc_sgl_list, list) { 3174 lxri = lpfc_sli4_next_xritag(phba); 3175 if (lxri == NO_XRI) { 3176 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3177 "2400 Failed to allocate xri for " 3178 "ELS sgl\n"); 3179 rc = -ENOMEM; 3180 goto out_free_mem; 3181 } 3182 sglq_entry->sli4_lxritag = lxri; 3183 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3184 } 3185 3186 /* 3187 * update on pci function's allocated scsi xri-sgl list 3188 */ 3189 phba->total_scsi_bufs = 0; 3190 3191 /* maximum number of xris available for scsi buffers */ 3192 phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri - 3193 els_xri_cnt; 3194 3195 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3196 "2401 Current allocated SCSI xri-sgl count:%d, " 3197 "maximum SCSI xri count:%d\n", 3198 phba->sli4_hba.scsi_xri_cnt, 3199 phba->sli4_hba.scsi_xri_max); 3200 3201 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3202 spin_lock(&phba->scsi_buf_list_put_lock); 3203 list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list); 3204 list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list); 3205 spin_unlock(&phba->scsi_buf_list_put_lock); 3206 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3207 3208 if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { 3209 /* max scsi xri shrinked below the allocated scsi buffers */ 3210 scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt - 3211 phba->sli4_hba.scsi_xri_max; 3212 /* release the extra allocated scsi buffers */ 3213 for (i = 0; i < scsi_xri_cnt; i++) { 3214 list_remove_head(&scsi_sgl_list, psb, 3215 struct lpfc_scsi_buf, list); 3216 if (psb) { 3217 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 3218 psb->data, psb->dma_handle); 3219 kfree(psb); 3220 } 3221 } 3222 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3223 phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt; 3224 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3225 } 3226 3227 /* update xris associated to remaining allocated scsi buffers */ 3228 psb = NULL; 3229 psb_next = NULL; 3230 list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) { 3231 lxri = lpfc_sli4_next_xritag(phba); 3232 if (lxri == NO_XRI) { 3233 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3234 "2560 Failed to allocate xri for " 3235 "scsi buffer\n"); 3236 rc = -ENOMEM; 3237 goto out_free_mem; 3238 } 3239 psb->cur_iocbq.sli4_lxritag = lxri; 3240 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3241 } 3242 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3243 spin_lock(&phba->scsi_buf_list_put_lock); 3244 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get); 3245 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 3246 spin_unlock(&phba->scsi_buf_list_put_lock); 3247 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3248 3249 return 0; 3250 3251 out_free_mem: 3252 lpfc_free_els_sgl_list(phba); 3253 lpfc_scsi_free(phba); 3254 return rc; 3255 } 3256 3257 /** 3258 * lpfc_create_port - Create an FC port 3259 * @phba: pointer to lpfc hba data structure. 3260 * @instance: a unique integer ID to this FC port. 3261 * @dev: pointer to the device data structure. 3262 * 3263 * This routine creates a FC port for the upper layer protocol. The FC port 3264 * can be created on top of either a physical port or a virtual port provided 3265 * by the HBA. This routine also allocates a SCSI host data structure (shost) 3266 * and associates the FC port created before adding the shost into the SCSI 3267 * layer. 3268 * 3269 * Return codes 3270 * @vport - pointer to the virtual N_Port data structure. 3271 * NULL - port create failed. 3272 **/ 3273 struct lpfc_vport * 3274 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 3275 { 3276 struct lpfc_vport *vport; 3277 struct Scsi_Host *shost; 3278 int error = 0; 3279 3280 if (dev != &phba->pcidev->dev) { 3281 shost = scsi_host_alloc(&lpfc_vport_template, 3282 sizeof(struct lpfc_vport)); 3283 } else { 3284 if (phba->sli_rev == LPFC_SLI_REV4) 3285 shost = scsi_host_alloc(&lpfc_template, 3286 sizeof(struct lpfc_vport)); 3287 else 3288 shost = scsi_host_alloc(&lpfc_template_s3, 3289 sizeof(struct lpfc_vport)); 3290 } 3291 if (!shost) 3292 goto out; 3293 3294 vport = (struct lpfc_vport *) shost->hostdata; 3295 vport->phba = phba; 3296 vport->load_flag |= FC_LOADING; 3297 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3298 vport->fc_rscn_flush = 0; 3299 3300 lpfc_get_vport_cfgparam(vport); 3301 shost->unique_id = instance; 3302 shost->max_id = LPFC_MAX_TARGET; 3303 shost->max_lun = vport->cfg_max_luns; 3304 shost->this_id = -1; 3305 shost->max_cmd_len = 16; 3306 shost->nr_hw_queues = phba->cfg_fcp_io_channel; 3307 if (phba->sli_rev == LPFC_SLI_REV4) { 3308 shost->dma_boundary = 3309 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 3310 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 3311 } 3312 3313 /* 3314 * Set initial can_queue value since 0 is no longer supported and 3315 * scsi_add_host will fail. This will be adjusted later based on the 3316 * max xri value determined in hba setup. 3317 */ 3318 shost->can_queue = phba->cfg_hba_queue_depth - 10; 3319 if (dev != &phba->pcidev->dev) { 3320 shost->transportt = lpfc_vport_transport_template; 3321 vport->port_type = LPFC_NPIV_PORT; 3322 } else { 3323 shost->transportt = lpfc_transport_template; 3324 vport->port_type = LPFC_PHYSICAL_PORT; 3325 } 3326 3327 /* Initialize all internally managed lists. */ 3328 INIT_LIST_HEAD(&vport->fc_nodes); 3329 INIT_LIST_HEAD(&vport->rcv_buffer_list); 3330 spin_lock_init(&vport->work_port_lock); 3331 3332 init_timer(&vport->fc_disctmo); 3333 vport->fc_disctmo.function = lpfc_disc_timeout; 3334 vport->fc_disctmo.data = (unsigned long)vport; 3335 3336 init_timer(&vport->fc_fdmitmo); 3337 vport->fc_fdmitmo.function = lpfc_fdmi_tmo; 3338 vport->fc_fdmitmo.data = (unsigned long)vport; 3339 3340 init_timer(&vport->els_tmofunc); 3341 vport->els_tmofunc.function = lpfc_els_timeout; 3342 vport->els_tmofunc.data = (unsigned long)vport; 3343 3344 init_timer(&vport->delayed_disc_tmo); 3345 vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo; 3346 vport->delayed_disc_tmo.data = (unsigned long)vport; 3347 3348 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 3349 if (error) 3350 goto out_put_shost; 3351 3352 spin_lock_irq(&phba->hbalock); 3353 list_add_tail(&vport->listentry, &phba->port_list); 3354 spin_unlock_irq(&phba->hbalock); 3355 return vport; 3356 3357 out_put_shost: 3358 scsi_host_put(shost); 3359 out: 3360 return NULL; 3361 } 3362 3363 /** 3364 * destroy_port - destroy an FC port 3365 * @vport: pointer to an lpfc virtual N_Port data structure. 3366 * 3367 * This routine destroys a FC port from the upper layer protocol. All the 3368 * resources associated with the port are released. 3369 **/ 3370 void 3371 destroy_port(struct lpfc_vport *vport) 3372 { 3373 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3374 struct lpfc_hba *phba = vport->phba; 3375 3376 lpfc_debugfs_terminate(vport); 3377 fc_remove_host(shost); 3378 scsi_remove_host(shost); 3379 3380 spin_lock_irq(&phba->hbalock); 3381 list_del_init(&vport->listentry); 3382 spin_unlock_irq(&phba->hbalock); 3383 3384 lpfc_cleanup(vport); 3385 return; 3386 } 3387 3388 /** 3389 * lpfc_get_instance - Get a unique integer ID 3390 * 3391 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 3392 * uses the kernel idr facility to perform the task. 3393 * 3394 * Return codes: 3395 * instance - a unique integer ID allocated as the new instance. 3396 * -1 - lpfc get instance failed. 3397 **/ 3398 int 3399 lpfc_get_instance(void) 3400 { 3401 int ret; 3402 3403 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL); 3404 return ret < 0 ? -1 : ret; 3405 } 3406 3407 /** 3408 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 3409 * @shost: pointer to SCSI host data structure. 3410 * @time: elapsed time of the scan in jiffies. 3411 * 3412 * This routine is called by the SCSI layer with a SCSI host to determine 3413 * whether the scan host is finished. 3414 * 3415 * Note: there is no scan_start function as adapter initialization will have 3416 * asynchronously kicked off the link initialization. 3417 * 3418 * Return codes 3419 * 0 - SCSI host scan is not over yet. 3420 * 1 - SCSI host scan is over. 3421 **/ 3422 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 3423 { 3424 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3425 struct lpfc_hba *phba = vport->phba; 3426 int stat = 0; 3427 3428 spin_lock_irq(shost->host_lock); 3429 3430 if (vport->load_flag & FC_UNLOADING) { 3431 stat = 1; 3432 goto finished; 3433 } 3434 if (time >= msecs_to_jiffies(30 * 1000)) { 3435 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3436 "0461 Scanning longer than 30 " 3437 "seconds. Continuing initialization\n"); 3438 stat = 1; 3439 goto finished; 3440 } 3441 if (time >= msecs_to_jiffies(15 * 1000) && 3442 phba->link_state <= LPFC_LINK_DOWN) { 3443 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3444 "0465 Link down longer than 15 " 3445 "seconds. Continuing initialization\n"); 3446 stat = 1; 3447 goto finished; 3448 } 3449 3450 if (vport->port_state != LPFC_VPORT_READY) 3451 goto finished; 3452 if (vport->num_disc_nodes || vport->fc_prli_sent) 3453 goto finished; 3454 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) 3455 goto finished; 3456 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 3457 goto finished; 3458 3459 stat = 1; 3460 3461 finished: 3462 spin_unlock_irq(shost->host_lock); 3463 return stat; 3464 } 3465 3466 /** 3467 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 3468 * @shost: pointer to SCSI host data structure. 3469 * 3470 * This routine initializes a given SCSI host attributes on a FC port. The 3471 * SCSI host can be either on top of a physical port or a virtual port. 3472 **/ 3473 void lpfc_host_attrib_init(struct Scsi_Host *shost) 3474 { 3475 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3476 struct lpfc_hba *phba = vport->phba; 3477 /* 3478 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 3479 */ 3480 3481 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 3482 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 3483 fc_host_supported_classes(shost) = FC_COS_CLASS3; 3484 3485 memset(fc_host_supported_fc4s(shost), 0, 3486 sizeof(fc_host_supported_fc4s(shost))); 3487 fc_host_supported_fc4s(shost)[2] = 1; 3488 fc_host_supported_fc4s(shost)[7] = 1; 3489 3490 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 3491 sizeof fc_host_symbolic_name(shost)); 3492 3493 fc_host_supported_speeds(shost) = 0; 3494 if (phba->lmt & LMT_16Gb) 3495 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; 3496 if (phba->lmt & LMT_10Gb) 3497 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 3498 if (phba->lmt & LMT_8Gb) 3499 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 3500 if (phba->lmt & LMT_4Gb) 3501 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 3502 if (phba->lmt & LMT_2Gb) 3503 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 3504 if (phba->lmt & LMT_1Gb) 3505 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 3506 3507 fc_host_maxframe_size(shost) = 3508 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 3509 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 3510 3511 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 3512 3513 /* This value is also unchanging */ 3514 memset(fc_host_active_fc4s(shost), 0, 3515 sizeof(fc_host_active_fc4s(shost))); 3516 fc_host_active_fc4s(shost)[2] = 1; 3517 fc_host_active_fc4s(shost)[7] = 1; 3518 3519 fc_host_max_npiv_vports(shost) = phba->max_vpi; 3520 spin_lock_irq(shost->host_lock); 3521 vport->load_flag &= ~FC_LOADING; 3522 spin_unlock_irq(shost->host_lock); 3523 } 3524 3525 /** 3526 * lpfc_stop_port_s3 - Stop SLI3 device port 3527 * @phba: pointer to lpfc hba data structure. 3528 * 3529 * This routine is invoked to stop an SLI3 device port, it stops the device 3530 * from generating interrupts and stops the device driver's timers for the 3531 * device. 3532 **/ 3533 static void 3534 lpfc_stop_port_s3(struct lpfc_hba *phba) 3535 { 3536 /* Clear all interrupt enable conditions */ 3537 writel(0, phba->HCregaddr); 3538 readl(phba->HCregaddr); /* flush */ 3539 /* Clear all pending interrupts */ 3540 writel(0xffffffff, phba->HAregaddr); 3541 readl(phba->HAregaddr); /* flush */ 3542 3543 /* Reset some HBA SLI setup states */ 3544 lpfc_stop_hba_timers(phba); 3545 phba->pport->work_port_events = 0; 3546 } 3547 3548 /** 3549 * lpfc_stop_port_s4 - Stop SLI4 device port 3550 * @phba: pointer to lpfc hba data structure. 3551 * 3552 * This routine is invoked to stop an SLI4 device port, it stops the device 3553 * from generating interrupts and stops the device driver's timers for the 3554 * device. 3555 **/ 3556 static void 3557 lpfc_stop_port_s4(struct lpfc_hba *phba) 3558 { 3559 /* Reset some HBA SLI4 setup states */ 3560 lpfc_stop_hba_timers(phba); 3561 phba->pport->work_port_events = 0; 3562 phba->sli4_hba.intr_enable = 0; 3563 } 3564 3565 /** 3566 * lpfc_stop_port - Wrapper function for stopping hba port 3567 * @phba: Pointer to HBA context object. 3568 * 3569 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 3570 * the API jump table function pointer from the lpfc_hba struct. 3571 **/ 3572 void 3573 lpfc_stop_port(struct lpfc_hba *phba) 3574 { 3575 phba->lpfc_stop_port(phba); 3576 } 3577 3578 /** 3579 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 3580 * @phba: Pointer to hba for which this call is being executed. 3581 * 3582 * This routine starts the timer waiting for the FCF rediscovery to complete. 3583 **/ 3584 void 3585 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 3586 { 3587 unsigned long fcf_redisc_wait_tmo = 3588 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 3589 /* Start fcf rediscovery wait period timer */ 3590 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 3591 spin_lock_irq(&phba->hbalock); 3592 /* Allow action to new fcf asynchronous event */ 3593 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 3594 /* Mark the FCF rediscovery pending state */ 3595 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 3596 spin_unlock_irq(&phba->hbalock); 3597 } 3598 3599 /** 3600 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 3601 * @ptr: Map to lpfc_hba data structure pointer. 3602 * 3603 * This routine is invoked when waiting for FCF table rediscover has been 3604 * timed out. If new FCF record(s) has (have) been discovered during the 3605 * wait period, a new FCF event shall be added to the FCOE async event 3606 * list, and then worker thread shall be waked up for processing from the 3607 * worker thread context. 3608 **/ 3609 static void 3610 lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) 3611 { 3612 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 3613 3614 /* Don't send FCF rediscovery event if timer cancelled */ 3615 spin_lock_irq(&phba->hbalock); 3616 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 3617 spin_unlock_irq(&phba->hbalock); 3618 return; 3619 } 3620 /* Clear FCF rediscovery timer pending flag */ 3621 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 3622 /* FCF rediscovery event to worker thread */ 3623 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 3624 spin_unlock_irq(&phba->hbalock); 3625 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 3626 "2776 FCF rediscover quiescent timer expired\n"); 3627 /* wake up worker thread */ 3628 lpfc_worker_wake_up(phba); 3629 } 3630 3631 /** 3632 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 3633 * @phba: pointer to lpfc hba data structure. 3634 * @acqe_link: pointer to the async link completion queue entry. 3635 * 3636 * This routine is to parse the SLI4 link-attention link fault code and 3637 * translate it into the base driver's read link attention mailbox command 3638 * status. 3639 * 3640 * Return: Link-attention status in terms of base driver's coding. 3641 **/ 3642 static uint16_t 3643 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 3644 struct lpfc_acqe_link *acqe_link) 3645 { 3646 uint16_t latt_fault; 3647 3648 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 3649 case LPFC_ASYNC_LINK_FAULT_NONE: 3650 case LPFC_ASYNC_LINK_FAULT_LOCAL: 3651 case LPFC_ASYNC_LINK_FAULT_REMOTE: 3652 latt_fault = 0; 3653 break; 3654 default: 3655 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3656 "0398 Invalid link fault code: x%x\n", 3657 bf_get(lpfc_acqe_link_fault, acqe_link)); 3658 latt_fault = MBXERR_ERROR; 3659 break; 3660 } 3661 return latt_fault; 3662 } 3663 3664 /** 3665 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 3666 * @phba: pointer to lpfc hba data structure. 3667 * @acqe_link: pointer to the async link completion queue entry. 3668 * 3669 * This routine is to parse the SLI4 link attention type and translate it 3670 * into the base driver's link attention type coding. 3671 * 3672 * Return: Link attention type in terms of base driver's coding. 3673 **/ 3674 static uint8_t 3675 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 3676 struct lpfc_acqe_link *acqe_link) 3677 { 3678 uint8_t att_type; 3679 3680 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 3681 case LPFC_ASYNC_LINK_STATUS_DOWN: 3682 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 3683 att_type = LPFC_ATT_LINK_DOWN; 3684 break; 3685 case LPFC_ASYNC_LINK_STATUS_UP: 3686 /* Ignore physical link up events - wait for logical link up */ 3687 att_type = LPFC_ATT_RESERVED; 3688 break; 3689 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 3690 att_type = LPFC_ATT_LINK_UP; 3691 break; 3692 default: 3693 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3694 "0399 Invalid link attention type: x%x\n", 3695 bf_get(lpfc_acqe_link_status, acqe_link)); 3696 att_type = LPFC_ATT_RESERVED; 3697 break; 3698 } 3699 return att_type; 3700 } 3701 3702 /** 3703 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed 3704 * @phba: pointer to lpfc hba data structure. 3705 * @acqe_link: pointer to the async link completion queue entry. 3706 * 3707 * This routine is to parse the SLI4 link-attention link speed and translate 3708 * it into the base driver's link-attention link speed coding. 3709 * 3710 * Return: Link-attention link speed in terms of base driver's coding. 3711 **/ 3712 static uint8_t 3713 lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, 3714 struct lpfc_acqe_link *acqe_link) 3715 { 3716 uint8_t link_speed; 3717 3718 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { 3719 case LPFC_ASYNC_LINK_SPEED_ZERO: 3720 case LPFC_ASYNC_LINK_SPEED_10MBPS: 3721 case LPFC_ASYNC_LINK_SPEED_100MBPS: 3722 link_speed = LPFC_LINK_SPEED_UNKNOWN; 3723 break; 3724 case LPFC_ASYNC_LINK_SPEED_1GBPS: 3725 link_speed = LPFC_LINK_SPEED_1GHZ; 3726 break; 3727 case LPFC_ASYNC_LINK_SPEED_10GBPS: 3728 link_speed = LPFC_LINK_SPEED_10GHZ; 3729 break; 3730 case LPFC_ASYNC_LINK_SPEED_20GBPS: 3731 case LPFC_ASYNC_LINK_SPEED_25GBPS: 3732 case LPFC_ASYNC_LINK_SPEED_40GBPS: 3733 link_speed = LPFC_LINK_SPEED_UNKNOWN; 3734 break; 3735 default: 3736 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3737 "0483 Invalid link-attention link speed: x%x\n", 3738 bf_get(lpfc_acqe_link_speed, acqe_link)); 3739 link_speed = LPFC_LINK_SPEED_UNKNOWN; 3740 break; 3741 } 3742 return link_speed; 3743 } 3744 3745 /** 3746 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed 3747 * @phba: pointer to lpfc hba data structure. 3748 * 3749 * This routine is to get an SLI3 FC port's link speed in Mbps. 3750 * 3751 * Return: link speed in terms of Mbps. 3752 **/ 3753 uint32_t 3754 lpfc_sli_port_speed_get(struct lpfc_hba *phba) 3755 { 3756 uint32_t link_speed; 3757 3758 if (!lpfc_is_link_up(phba)) 3759 return 0; 3760 3761 switch (phba->fc_linkspeed) { 3762 case LPFC_LINK_SPEED_1GHZ: 3763 link_speed = 1000; 3764 break; 3765 case LPFC_LINK_SPEED_2GHZ: 3766 link_speed = 2000; 3767 break; 3768 case LPFC_LINK_SPEED_4GHZ: 3769 link_speed = 4000; 3770 break; 3771 case LPFC_LINK_SPEED_8GHZ: 3772 link_speed = 8000; 3773 break; 3774 case LPFC_LINK_SPEED_10GHZ: 3775 link_speed = 10000; 3776 break; 3777 case LPFC_LINK_SPEED_16GHZ: 3778 link_speed = 16000; 3779 break; 3780 default: 3781 link_speed = 0; 3782 } 3783 return link_speed; 3784 } 3785 3786 /** 3787 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed 3788 * @phba: pointer to lpfc hba data structure. 3789 * @evt_code: asynchronous event code. 3790 * @speed_code: asynchronous event link speed code. 3791 * 3792 * This routine is to parse the giving SLI4 async event link speed code into 3793 * value of Mbps for the link speed. 3794 * 3795 * Return: link speed in terms of Mbps. 3796 **/ 3797 static uint32_t 3798 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, 3799 uint8_t speed_code) 3800 { 3801 uint32_t port_speed; 3802 3803 switch (evt_code) { 3804 case LPFC_TRAILER_CODE_LINK: 3805 switch (speed_code) { 3806 case LPFC_ASYNC_LINK_SPEED_ZERO: 3807 port_speed = 0; 3808 break; 3809 case LPFC_ASYNC_LINK_SPEED_10MBPS: 3810 port_speed = 10; 3811 break; 3812 case LPFC_ASYNC_LINK_SPEED_100MBPS: 3813 port_speed = 100; 3814 break; 3815 case LPFC_ASYNC_LINK_SPEED_1GBPS: 3816 port_speed = 1000; 3817 break; 3818 case LPFC_ASYNC_LINK_SPEED_10GBPS: 3819 port_speed = 10000; 3820 break; 3821 case LPFC_ASYNC_LINK_SPEED_20GBPS: 3822 port_speed = 20000; 3823 break; 3824 case LPFC_ASYNC_LINK_SPEED_25GBPS: 3825 port_speed = 25000; 3826 break; 3827 case LPFC_ASYNC_LINK_SPEED_40GBPS: 3828 port_speed = 40000; 3829 break; 3830 default: 3831 port_speed = 0; 3832 } 3833 break; 3834 case LPFC_TRAILER_CODE_FC: 3835 switch (speed_code) { 3836 case LPFC_FC_LA_SPEED_UNKNOWN: 3837 port_speed = 0; 3838 break; 3839 case LPFC_FC_LA_SPEED_1G: 3840 port_speed = 1000; 3841 break; 3842 case LPFC_FC_LA_SPEED_2G: 3843 port_speed = 2000; 3844 break; 3845 case LPFC_FC_LA_SPEED_4G: 3846 port_speed = 4000; 3847 break; 3848 case LPFC_FC_LA_SPEED_8G: 3849 port_speed = 8000; 3850 break; 3851 case LPFC_FC_LA_SPEED_10G: 3852 port_speed = 10000; 3853 break; 3854 case LPFC_FC_LA_SPEED_16G: 3855 port_speed = 16000; 3856 break; 3857 default: 3858 port_speed = 0; 3859 } 3860 break; 3861 default: 3862 port_speed = 0; 3863 } 3864 return port_speed; 3865 } 3866 3867 /** 3868 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 3869 * @phba: pointer to lpfc hba data structure. 3870 * @acqe_link: pointer to the async link completion queue entry. 3871 * 3872 * This routine is to handle the SLI4 asynchronous FCoE link event. 3873 **/ 3874 static void 3875 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 3876 struct lpfc_acqe_link *acqe_link) 3877 { 3878 struct lpfc_dmabuf *mp; 3879 LPFC_MBOXQ_t *pmb; 3880 MAILBOX_t *mb; 3881 struct lpfc_mbx_read_top *la; 3882 uint8_t att_type; 3883 int rc; 3884 3885 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 3886 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 3887 return; 3888 phba->fcoe_eventtag = acqe_link->event_tag; 3889 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3890 if (!pmb) { 3891 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3892 "0395 The mboxq allocation failed\n"); 3893 return; 3894 } 3895 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3896 if (!mp) { 3897 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3898 "0396 The lpfc_dmabuf allocation failed\n"); 3899 goto out_free_pmb; 3900 } 3901 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3902 if (!mp->virt) { 3903 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3904 "0397 The mbuf allocation failed\n"); 3905 goto out_free_dmabuf; 3906 } 3907 3908 /* Cleanup any outstanding ELS commands */ 3909 lpfc_els_flush_all_cmd(phba); 3910 3911 /* Block ELS IOCBs until we have done process link event */ 3912 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 3913 3914 /* Update link event statistics */ 3915 phba->sli.slistat.link_event++; 3916 3917 /* Create lpfc_handle_latt mailbox command from link ACQE */ 3918 lpfc_read_topology(phba, pmb, mp); 3919 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 3920 pmb->vport = phba->pport; 3921 3922 /* Keep the link status for extra SLI4 state machine reference */ 3923 phba->sli4_hba.link_state.speed = 3924 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK, 3925 bf_get(lpfc_acqe_link_speed, acqe_link)); 3926 phba->sli4_hba.link_state.duplex = 3927 bf_get(lpfc_acqe_link_duplex, acqe_link); 3928 phba->sli4_hba.link_state.status = 3929 bf_get(lpfc_acqe_link_status, acqe_link); 3930 phba->sli4_hba.link_state.type = 3931 bf_get(lpfc_acqe_link_type, acqe_link); 3932 phba->sli4_hba.link_state.number = 3933 bf_get(lpfc_acqe_link_number, acqe_link); 3934 phba->sli4_hba.link_state.fault = 3935 bf_get(lpfc_acqe_link_fault, acqe_link); 3936 phba->sli4_hba.link_state.logical_speed = 3937 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10; 3938 3939 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3940 "2900 Async FC/FCoE Link event - Speed:%dGBit " 3941 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 3942 "Logical speed:%dMbps Fault:%d\n", 3943 phba->sli4_hba.link_state.speed, 3944 phba->sli4_hba.link_state.topology, 3945 phba->sli4_hba.link_state.status, 3946 phba->sli4_hba.link_state.type, 3947 phba->sli4_hba.link_state.number, 3948 phba->sli4_hba.link_state.logical_speed, 3949 phba->sli4_hba.link_state.fault); 3950 /* 3951 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 3952 * topology info. Note: Optional for non FC-AL ports. 3953 */ 3954 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 3955 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3956 if (rc == MBX_NOT_FINISHED) 3957 goto out_free_dmabuf; 3958 return; 3959 } 3960 /* 3961 * For FCoE Mode: fill in all the topology information we need and call 3962 * the READ_TOPOLOGY completion routine to continue without actually 3963 * sending the READ_TOPOLOGY mailbox command to the port. 3964 */ 3965 /* Parse and translate status field */ 3966 mb = &pmb->u.mb; 3967 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); 3968 3969 /* Parse and translate link attention fields */ 3970 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 3971 la->eventTag = acqe_link->event_tag; 3972 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 3973 bf_set(lpfc_mbx_read_top_link_spd, la, 3974 lpfc_sli4_parse_latt_link_speed(phba, acqe_link)); 3975 3976 /* Fake the the following irrelvant fields */ 3977 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 3978 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 3979 bf_set(lpfc_mbx_read_top_il, la, 0); 3980 bf_set(lpfc_mbx_read_top_pb, la, 0); 3981 bf_set(lpfc_mbx_read_top_fa, la, 0); 3982 bf_set(lpfc_mbx_read_top_mm, la, 0); 3983 3984 /* Invoke the lpfc_handle_latt mailbox command callback function */ 3985 lpfc_mbx_cmpl_read_topology(phba, pmb); 3986 3987 return; 3988 3989 out_free_dmabuf: 3990 kfree(mp); 3991 out_free_pmb: 3992 mempool_free(pmb, phba->mbox_mem_pool); 3993 } 3994 3995 /** 3996 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 3997 * @phba: pointer to lpfc hba data structure. 3998 * @acqe_fc: pointer to the async fc completion queue entry. 3999 * 4000 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 4001 * that the event was received and then issue a read_topology mailbox command so 4002 * that the rest of the driver will treat it the same as SLI3. 4003 **/ 4004 static void 4005 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 4006 { 4007 struct lpfc_dmabuf *mp; 4008 LPFC_MBOXQ_t *pmb; 4009 int rc; 4010 4011 if (bf_get(lpfc_trailer_type, acqe_fc) != 4012 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 4013 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4014 "2895 Non FC link Event detected.(%d)\n", 4015 bf_get(lpfc_trailer_type, acqe_fc)); 4016 return; 4017 } 4018 /* Keep the link status for extra SLI4 state machine reference */ 4019 phba->sli4_hba.link_state.speed = 4020 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 4021 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 4022 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 4023 phba->sli4_hba.link_state.topology = 4024 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 4025 phba->sli4_hba.link_state.status = 4026 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 4027 phba->sli4_hba.link_state.type = 4028 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 4029 phba->sli4_hba.link_state.number = 4030 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 4031 phba->sli4_hba.link_state.fault = 4032 bf_get(lpfc_acqe_link_fault, acqe_fc); 4033 phba->sli4_hba.link_state.logical_speed = 4034 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 4035 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4036 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 4037 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 4038 "%dMbps Fault:%d\n", 4039 phba->sli4_hba.link_state.speed, 4040 phba->sli4_hba.link_state.topology, 4041 phba->sli4_hba.link_state.status, 4042 phba->sli4_hba.link_state.type, 4043 phba->sli4_hba.link_state.number, 4044 phba->sli4_hba.link_state.logical_speed, 4045 phba->sli4_hba.link_state.fault); 4046 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4047 if (!pmb) { 4048 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4049 "2897 The mboxq allocation failed\n"); 4050 return; 4051 } 4052 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4053 if (!mp) { 4054 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4055 "2898 The lpfc_dmabuf allocation failed\n"); 4056 goto out_free_pmb; 4057 } 4058 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 4059 if (!mp->virt) { 4060 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4061 "2899 The mbuf allocation failed\n"); 4062 goto out_free_dmabuf; 4063 } 4064 4065 /* Cleanup any outstanding ELS commands */ 4066 lpfc_els_flush_all_cmd(phba); 4067 4068 /* Block ELS IOCBs until we have done process link event */ 4069 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 4070 4071 /* Update link event statistics */ 4072 phba->sli.slistat.link_event++; 4073 4074 /* Create lpfc_handle_latt mailbox command from link ACQE */ 4075 lpfc_read_topology(phba, pmb, mp); 4076 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 4077 pmb->vport = phba->pport; 4078 4079 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 4080 if (rc == MBX_NOT_FINISHED) 4081 goto out_free_dmabuf; 4082 return; 4083 4084 out_free_dmabuf: 4085 kfree(mp); 4086 out_free_pmb: 4087 mempool_free(pmb, phba->mbox_mem_pool); 4088 } 4089 4090 /** 4091 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 4092 * @phba: pointer to lpfc hba data structure. 4093 * @acqe_fc: pointer to the async SLI completion queue entry. 4094 * 4095 * This routine is to handle the SLI4 asynchronous SLI events. 4096 **/ 4097 static void 4098 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 4099 { 4100 char port_name; 4101 char message[128]; 4102 uint8_t status; 4103 uint8_t evt_type; 4104 struct temp_event temp_event_data; 4105 struct lpfc_acqe_misconfigured_event *misconfigured; 4106 struct Scsi_Host *shost; 4107 4108 evt_type = bf_get(lpfc_trailer_type, acqe_sli); 4109 4110 /* Special case Lancer */ 4111 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 4112 LPFC_SLI_INTF_IF_TYPE_2) { 4113 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4114 "2901 Async SLI event - Event Data1:x%08x Event Data2:" 4115 "x%08x SLI Event Type:%d\n", 4116 acqe_sli->event_data1, acqe_sli->event_data2, 4117 evt_type); 4118 return; 4119 } 4120 4121 port_name = phba->Port[0]; 4122 if (port_name == 0x00) 4123 port_name = '?'; /* get port name is empty */ 4124 4125 switch (evt_type) { 4126 case LPFC_SLI_EVENT_TYPE_OVER_TEMP: 4127 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 4128 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 4129 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 4130 4131 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 4132 "3190 Over Temperature:%d Celsius- Port Name %c\n", 4133 acqe_sli->event_data1, port_name); 4134 4135 shost = lpfc_shost_from_vport(phba->pport); 4136 fc_host_post_vendor_event(shost, fc_get_event_number(), 4137 sizeof(temp_event_data), 4138 (char *)&temp_event_data, 4139 SCSI_NL_VID_TYPE_PCI 4140 | PCI_VENDOR_ID_EMULEX); 4141 break; 4142 case LPFC_SLI_EVENT_TYPE_NORM_TEMP: 4143 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 4144 temp_event_data.event_code = LPFC_NORMAL_TEMP; 4145 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 4146 4147 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4148 "3191 Normal Temperature:%d Celsius - Port Name %c\n", 4149 acqe_sli->event_data1, port_name); 4150 4151 shost = lpfc_shost_from_vport(phba->pport); 4152 fc_host_post_vendor_event(shost, fc_get_event_number(), 4153 sizeof(temp_event_data), 4154 (char *)&temp_event_data, 4155 SCSI_NL_VID_TYPE_PCI 4156 | PCI_VENDOR_ID_EMULEX); 4157 break; 4158 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED: 4159 misconfigured = (struct lpfc_acqe_misconfigured_event *) 4160 &acqe_sli->event_data1; 4161 4162 /* fetch the status for this port */ 4163 switch (phba->sli4_hba.lnk_info.lnk_no) { 4164 case LPFC_LINK_NUMBER_0: 4165 status = bf_get(lpfc_sli_misconfigured_port0, 4166 &misconfigured->theEvent); 4167 break; 4168 case LPFC_LINK_NUMBER_1: 4169 status = bf_get(lpfc_sli_misconfigured_port1, 4170 &misconfigured->theEvent); 4171 break; 4172 case LPFC_LINK_NUMBER_2: 4173 status = bf_get(lpfc_sli_misconfigured_port2, 4174 &misconfigured->theEvent); 4175 break; 4176 case LPFC_LINK_NUMBER_3: 4177 status = bf_get(lpfc_sli_misconfigured_port3, 4178 &misconfigured->theEvent); 4179 break; 4180 default: 4181 status = ~LPFC_SLI_EVENT_STATUS_VALID; 4182 break; 4183 } 4184 4185 switch (status) { 4186 case LPFC_SLI_EVENT_STATUS_VALID: 4187 return; /* no message if the sfp is okay */ 4188 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: 4189 sprintf(message, "Optics faulted/incorrectly " 4190 "installed/not installed - Reseat optics, " 4191 "if issue not resolved, replace."); 4192 break; 4193 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: 4194 sprintf(message, 4195 "Optics of two types installed - Remove one " 4196 "optic or install matching pair of optics."); 4197 break; 4198 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: 4199 sprintf(message, "Incompatible optics - Replace with " 4200 "compatible optics for card to function."); 4201 break; 4202 default: 4203 /* firmware is reporting a status we don't know about */ 4204 sprintf(message, "Unknown event status x%02x", status); 4205 break; 4206 } 4207 4208 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4209 "3176 Misconfigured Physical Port - " 4210 "Port Name %c %s\n", port_name, message); 4211 break; 4212 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT: 4213 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4214 "3192 Remote DPort Test Initiated - " 4215 "Event Data1:x%08x Event Data2: x%08x\n", 4216 acqe_sli->event_data1, acqe_sli->event_data2); 4217 break; 4218 default: 4219 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4220 "3193 Async SLI event - Event Data1:x%08x Event Data2:" 4221 "x%08x SLI Event Type:%d\n", 4222 acqe_sli->event_data1, acqe_sli->event_data2, 4223 evt_type); 4224 break; 4225 } 4226 } 4227 4228 /** 4229 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 4230 * @vport: pointer to vport data structure. 4231 * 4232 * This routine is to perform Clear Virtual Link (CVL) on a vport in 4233 * response to a CVL event. 4234 * 4235 * Return the pointer to the ndlp with the vport if successful, otherwise 4236 * return NULL. 4237 **/ 4238 static struct lpfc_nodelist * 4239 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 4240 { 4241 struct lpfc_nodelist *ndlp; 4242 struct Scsi_Host *shost; 4243 struct lpfc_hba *phba; 4244 4245 if (!vport) 4246 return NULL; 4247 phba = vport->phba; 4248 if (!phba) 4249 return NULL; 4250 ndlp = lpfc_findnode_did(vport, Fabric_DID); 4251 if (!ndlp) { 4252 /* Cannot find existing Fabric ndlp, so allocate a new one */ 4253 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 4254 if (!ndlp) 4255 return 0; 4256 lpfc_nlp_init(vport, ndlp, Fabric_DID); 4257 /* Set the node type */ 4258 ndlp->nlp_type |= NLP_FABRIC; 4259 /* Put ndlp onto node list */ 4260 lpfc_enqueue_node(vport, ndlp); 4261 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 4262 /* re-setup ndlp without removing from node list */ 4263 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 4264 if (!ndlp) 4265 return 0; 4266 } 4267 if ((phba->pport->port_state < LPFC_FLOGI) && 4268 (phba->pport->port_state != LPFC_VPORT_FAILED)) 4269 return NULL; 4270 /* If virtual link is not yet instantiated ignore CVL */ 4271 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 4272 && (vport->port_state != LPFC_VPORT_FAILED)) 4273 return NULL; 4274 shost = lpfc_shost_from_vport(vport); 4275 if (!shost) 4276 return NULL; 4277 lpfc_linkdown_port(vport); 4278 lpfc_cleanup_pending_mbox(vport); 4279 spin_lock_irq(shost->host_lock); 4280 vport->fc_flag |= FC_VPORT_CVL_RCVD; 4281 spin_unlock_irq(shost->host_lock); 4282 4283 return ndlp; 4284 } 4285 4286 /** 4287 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 4288 * @vport: pointer to lpfc hba data structure. 4289 * 4290 * This routine is to perform Clear Virtual Link (CVL) on all vports in 4291 * response to a FCF dead event. 4292 **/ 4293 static void 4294 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 4295 { 4296 struct lpfc_vport **vports; 4297 int i; 4298 4299 vports = lpfc_create_vport_work_array(phba); 4300 if (vports) 4301 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 4302 lpfc_sli4_perform_vport_cvl(vports[i]); 4303 lpfc_destroy_vport_work_array(phba, vports); 4304 } 4305 4306 /** 4307 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 4308 * @phba: pointer to lpfc hba data structure. 4309 * @acqe_link: pointer to the async fcoe completion queue entry. 4310 * 4311 * This routine is to handle the SLI4 asynchronous fcoe event. 4312 **/ 4313 static void 4314 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 4315 struct lpfc_acqe_fip *acqe_fip) 4316 { 4317 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 4318 int rc; 4319 struct lpfc_vport *vport; 4320 struct lpfc_nodelist *ndlp; 4321 struct Scsi_Host *shost; 4322 int active_vlink_present; 4323 struct lpfc_vport **vports; 4324 int i; 4325 4326 phba->fc_eventTag = acqe_fip->event_tag; 4327 phba->fcoe_eventtag = acqe_fip->event_tag; 4328 switch (event_type) { 4329 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 4330 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 4331 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 4332 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 4333 LOG_DISCOVERY, 4334 "2546 New FCF event, evt_tag:x%x, " 4335 "index:x%x\n", 4336 acqe_fip->event_tag, 4337 acqe_fip->index); 4338 else 4339 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 4340 LOG_DISCOVERY, 4341 "2788 FCF param modified event, " 4342 "evt_tag:x%x, index:x%x\n", 4343 acqe_fip->event_tag, 4344 acqe_fip->index); 4345 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4346 /* 4347 * During period of FCF discovery, read the FCF 4348 * table record indexed by the event to update 4349 * FCF roundrobin failover eligible FCF bmask. 4350 */ 4351 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 4352 LOG_DISCOVERY, 4353 "2779 Read FCF (x%x) for updating " 4354 "roundrobin FCF failover bmask\n", 4355 acqe_fip->index); 4356 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 4357 } 4358 4359 /* If the FCF discovery is in progress, do nothing. */ 4360 spin_lock_irq(&phba->hbalock); 4361 if (phba->hba_flag & FCF_TS_INPROG) { 4362 spin_unlock_irq(&phba->hbalock); 4363 break; 4364 } 4365 /* If fast FCF failover rescan event is pending, do nothing */ 4366 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 4367 spin_unlock_irq(&phba->hbalock); 4368 break; 4369 } 4370 4371 /* If the FCF has been in discovered state, do nothing. */ 4372 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 4373 spin_unlock_irq(&phba->hbalock); 4374 break; 4375 } 4376 spin_unlock_irq(&phba->hbalock); 4377 4378 /* Otherwise, scan the entire FCF table and re-discover SAN */ 4379 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 4380 "2770 Start FCF table scan per async FCF " 4381 "event, evt_tag:x%x, index:x%x\n", 4382 acqe_fip->event_tag, acqe_fip->index); 4383 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 4384 LPFC_FCOE_FCF_GET_FIRST); 4385 if (rc) 4386 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 4387 "2547 Issue FCF scan read FCF mailbox " 4388 "command failed (x%x)\n", rc); 4389 break; 4390 4391 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 4392 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4393 "2548 FCF Table full count 0x%x tag 0x%x\n", 4394 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 4395 acqe_fip->event_tag); 4396 break; 4397 4398 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 4399 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 4400 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 4401 "2549 FCF (x%x) disconnected from network, " 4402 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); 4403 /* 4404 * If we are in the middle of FCF failover process, clear 4405 * the corresponding FCF bit in the roundrobin bitmap. 4406 */ 4407 spin_lock_irq(&phba->hbalock); 4408 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4409 spin_unlock_irq(&phba->hbalock); 4410 /* Update FLOGI FCF failover eligible FCF bmask */ 4411 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 4412 break; 4413 } 4414 spin_unlock_irq(&phba->hbalock); 4415 4416 /* If the event is not for currently used fcf do nothing */ 4417 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 4418 break; 4419 4420 /* 4421 * Otherwise, request the port to rediscover the entire FCF 4422 * table for a fast recovery from case that the current FCF 4423 * is no longer valid as we are not in the middle of FCF 4424 * failover process already. 4425 */ 4426 spin_lock_irq(&phba->hbalock); 4427 /* Mark the fast failover process in progress */ 4428 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 4429 spin_unlock_irq(&phba->hbalock); 4430 4431 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 4432 "2771 Start FCF fast failover process due to " 4433 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 4434 "\n", acqe_fip->event_tag, acqe_fip->index); 4435 rc = lpfc_sli4_redisc_fcf_table(phba); 4436 if (rc) { 4437 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 4438 LOG_DISCOVERY, 4439 "2772 Issue FCF rediscover mabilbox " 4440 "command failed, fail through to FCF " 4441 "dead event\n"); 4442 spin_lock_irq(&phba->hbalock); 4443 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 4444 spin_unlock_irq(&phba->hbalock); 4445 /* 4446 * Last resort will fail over by treating this 4447 * as a link down to FCF registration. 4448 */ 4449 lpfc_sli4_fcf_dead_failthrough(phba); 4450 } else { 4451 /* Reset FCF roundrobin bmask for new discovery */ 4452 lpfc_sli4_clear_fcf_rr_bmask(phba); 4453 /* 4454 * Handling fast FCF failover to a DEAD FCF event is 4455 * considered equalivant to receiving CVL to all vports. 4456 */ 4457 lpfc_sli4_perform_all_vport_cvl(phba); 4458 } 4459 break; 4460 case LPFC_FIP_EVENT_TYPE_CVL: 4461 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 4462 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 4463 "2718 Clear Virtual Link Received for VPI 0x%x" 4464 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 4465 4466 vport = lpfc_find_vport_by_vpid(phba, 4467 acqe_fip->index); 4468 ndlp = lpfc_sli4_perform_vport_cvl(vport); 4469 if (!ndlp) 4470 break; 4471 active_vlink_present = 0; 4472 4473 vports = lpfc_create_vport_work_array(phba); 4474 if (vports) { 4475 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 4476 i++) { 4477 if ((!(vports[i]->fc_flag & 4478 FC_VPORT_CVL_RCVD)) && 4479 (vports[i]->port_state > LPFC_FDISC)) { 4480 active_vlink_present = 1; 4481 break; 4482 } 4483 } 4484 lpfc_destroy_vport_work_array(phba, vports); 4485 } 4486 4487 /* 4488 * Don't re-instantiate if vport is marked for deletion. 4489 * If we are here first then vport_delete is going to wait 4490 * for discovery to complete. 4491 */ 4492 if (!(vport->load_flag & FC_UNLOADING) && 4493 active_vlink_present) { 4494 /* 4495 * If there are other active VLinks present, 4496 * re-instantiate the Vlink using FDISC. 4497 */ 4498 mod_timer(&ndlp->nlp_delayfunc, 4499 jiffies + msecs_to_jiffies(1000)); 4500 shost = lpfc_shost_from_vport(vport); 4501 spin_lock_irq(shost->host_lock); 4502 ndlp->nlp_flag |= NLP_DELAY_TMO; 4503 spin_unlock_irq(shost->host_lock); 4504 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 4505 vport->port_state = LPFC_FDISC; 4506 } else { 4507 /* 4508 * Otherwise, we request port to rediscover 4509 * the entire FCF table for a fast recovery 4510 * from possible case that the current FCF 4511 * is no longer valid if we are not already 4512 * in the FCF failover process. 4513 */ 4514 spin_lock_irq(&phba->hbalock); 4515 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4516 spin_unlock_irq(&phba->hbalock); 4517 break; 4518 } 4519 /* Mark the fast failover process in progress */ 4520 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 4521 spin_unlock_irq(&phba->hbalock); 4522 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 4523 LOG_DISCOVERY, 4524 "2773 Start FCF failover per CVL, " 4525 "evt_tag:x%x\n", acqe_fip->event_tag); 4526 rc = lpfc_sli4_redisc_fcf_table(phba); 4527 if (rc) { 4528 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 4529 LOG_DISCOVERY, 4530 "2774 Issue FCF rediscover " 4531 "mabilbox command failed, " 4532 "through to CVL event\n"); 4533 spin_lock_irq(&phba->hbalock); 4534 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 4535 spin_unlock_irq(&phba->hbalock); 4536 /* 4537 * Last resort will be re-try on the 4538 * the current registered FCF entry. 4539 */ 4540 lpfc_retry_pport_discovery(phba); 4541 } else 4542 /* 4543 * Reset FCF roundrobin bmask for new 4544 * discovery. 4545 */ 4546 lpfc_sli4_clear_fcf_rr_bmask(phba); 4547 } 4548 break; 4549 default: 4550 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4551 "0288 Unknown FCoE event type 0x%x event tag " 4552 "0x%x\n", event_type, acqe_fip->event_tag); 4553 break; 4554 } 4555 } 4556 4557 /** 4558 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 4559 * @phba: pointer to lpfc hba data structure. 4560 * @acqe_link: pointer to the async dcbx completion queue entry. 4561 * 4562 * This routine is to handle the SLI4 asynchronous dcbx event. 4563 **/ 4564 static void 4565 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 4566 struct lpfc_acqe_dcbx *acqe_dcbx) 4567 { 4568 phba->fc_eventTag = acqe_dcbx->event_tag; 4569 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4570 "0290 The SLI4 DCBX asynchronous event is not " 4571 "handled yet\n"); 4572 } 4573 4574 /** 4575 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 4576 * @phba: pointer to lpfc hba data structure. 4577 * @acqe_link: pointer to the async grp5 completion queue entry. 4578 * 4579 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 4580 * is an asynchronous notified of a logical link speed change. The Port 4581 * reports the logical link speed in units of 10Mbps. 4582 **/ 4583 static void 4584 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 4585 struct lpfc_acqe_grp5 *acqe_grp5) 4586 { 4587 uint16_t prev_ll_spd; 4588 4589 phba->fc_eventTag = acqe_grp5->event_tag; 4590 phba->fcoe_eventtag = acqe_grp5->event_tag; 4591 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 4592 phba->sli4_hba.link_state.logical_speed = 4593 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10; 4594 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4595 "2789 GRP5 Async Event: Updating logical link speed " 4596 "from %dMbps to %dMbps\n", prev_ll_spd, 4597 phba->sli4_hba.link_state.logical_speed); 4598 } 4599 4600 /** 4601 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 4602 * @phba: pointer to lpfc hba data structure. 4603 * 4604 * This routine is invoked by the worker thread to process all the pending 4605 * SLI4 asynchronous events. 4606 **/ 4607 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 4608 { 4609 struct lpfc_cq_event *cq_event; 4610 4611 /* First, declare the async event has been handled */ 4612 spin_lock_irq(&phba->hbalock); 4613 phba->hba_flag &= ~ASYNC_EVENT; 4614 spin_unlock_irq(&phba->hbalock); 4615 /* Now, handle all the async events */ 4616 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 4617 /* Get the first event from the head of the event queue */ 4618 spin_lock_irq(&phba->hbalock); 4619 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 4620 cq_event, struct lpfc_cq_event, list); 4621 spin_unlock_irq(&phba->hbalock); 4622 /* Process the asynchronous event */ 4623 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 4624 case LPFC_TRAILER_CODE_LINK: 4625 lpfc_sli4_async_link_evt(phba, 4626 &cq_event->cqe.acqe_link); 4627 break; 4628 case LPFC_TRAILER_CODE_FCOE: 4629 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 4630 break; 4631 case LPFC_TRAILER_CODE_DCBX: 4632 lpfc_sli4_async_dcbx_evt(phba, 4633 &cq_event->cqe.acqe_dcbx); 4634 break; 4635 case LPFC_TRAILER_CODE_GRP5: 4636 lpfc_sli4_async_grp5_evt(phba, 4637 &cq_event->cqe.acqe_grp5); 4638 break; 4639 case LPFC_TRAILER_CODE_FC: 4640 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 4641 break; 4642 case LPFC_TRAILER_CODE_SLI: 4643 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 4644 break; 4645 default: 4646 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4647 "1804 Invalid asynchrous event code: " 4648 "x%x\n", bf_get(lpfc_trailer_code, 4649 &cq_event->cqe.mcqe_cmpl)); 4650 break; 4651 } 4652 /* Free the completion event processed to the free pool */ 4653 lpfc_sli4_cq_event_release(phba, cq_event); 4654 } 4655 } 4656 4657 /** 4658 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 4659 * @phba: pointer to lpfc hba data structure. 4660 * 4661 * This routine is invoked by the worker thread to process FCF table 4662 * rediscovery pending completion event. 4663 **/ 4664 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 4665 { 4666 int rc; 4667 4668 spin_lock_irq(&phba->hbalock); 4669 /* Clear FCF rediscovery timeout event */ 4670 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 4671 /* Clear driver fast failover FCF record flag */ 4672 phba->fcf.failover_rec.flag = 0; 4673 /* Set state for FCF fast failover */ 4674 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 4675 spin_unlock_irq(&phba->hbalock); 4676 4677 /* Scan FCF table from the first entry to re-discover SAN */ 4678 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 4679 "2777 Start post-quiescent FCF table scan\n"); 4680 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 4681 if (rc) 4682 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 4683 "2747 Issue FCF scan read FCF mailbox " 4684 "command failed 0x%x\n", rc); 4685 } 4686 4687 /** 4688 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 4689 * @phba: pointer to lpfc hba data structure. 4690 * @dev_grp: The HBA PCI-Device group number. 4691 * 4692 * This routine is invoked to set up the per HBA PCI-Device group function 4693 * API jump table entries. 4694 * 4695 * Return: 0 if success, otherwise -ENODEV 4696 **/ 4697 int 4698 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 4699 { 4700 int rc; 4701 4702 /* Set up lpfc PCI-device group */ 4703 phba->pci_dev_grp = dev_grp; 4704 4705 /* The LPFC_PCI_DEV_OC uses SLI4 */ 4706 if (dev_grp == LPFC_PCI_DEV_OC) 4707 phba->sli_rev = LPFC_SLI_REV4; 4708 4709 /* Set up device INIT API function jump table */ 4710 rc = lpfc_init_api_table_setup(phba, dev_grp); 4711 if (rc) 4712 return -ENODEV; 4713 /* Set up SCSI API function jump table */ 4714 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 4715 if (rc) 4716 return -ENODEV; 4717 /* Set up SLI API function jump table */ 4718 rc = lpfc_sli_api_table_setup(phba, dev_grp); 4719 if (rc) 4720 return -ENODEV; 4721 /* Set up MBOX API function jump table */ 4722 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 4723 if (rc) 4724 return -ENODEV; 4725 4726 return 0; 4727 } 4728 4729 /** 4730 * lpfc_log_intr_mode - Log the active interrupt mode 4731 * @phba: pointer to lpfc hba data structure. 4732 * @intr_mode: active interrupt mode adopted. 4733 * 4734 * This routine it invoked to log the currently used active interrupt mode 4735 * to the device. 4736 **/ 4737 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 4738 { 4739 switch (intr_mode) { 4740 case 0: 4741 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4742 "0470 Enable INTx interrupt mode.\n"); 4743 break; 4744 case 1: 4745 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4746 "0481 Enabled MSI interrupt mode.\n"); 4747 break; 4748 case 2: 4749 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4750 "0480 Enabled MSI-X interrupt mode.\n"); 4751 break; 4752 default: 4753 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4754 "0482 Illegal interrupt mode.\n"); 4755 break; 4756 } 4757 return; 4758 } 4759 4760 /** 4761 * lpfc_enable_pci_dev - Enable a generic PCI device. 4762 * @phba: pointer to lpfc hba data structure. 4763 * 4764 * This routine is invoked to enable the PCI device that is common to all 4765 * PCI devices. 4766 * 4767 * Return codes 4768 * 0 - successful 4769 * other values - error 4770 **/ 4771 static int 4772 lpfc_enable_pci_dev(struct lpfc_hba *phba) 4773 { 4774 struct pci_dev *pdev; 4775 int bars = 0; 4776 4777 /* Obtain PCI device reference */ 4778 if (!phba->pcidev) 4779 goto out_error; 4780 else 4781 pdev = phba->pcidev; 4782 /* Select PCI BARs */ 4783 bars = pci_select_bars(pdev, IORESOURCE_MEM); 4784 /* Enable PCI device */ 4785 if (pci_enable_device_mem(pdev)) 4786 goto out_error; 4787 /* Request PCI resource for the device */ 4788 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) 4789 goto out_disable_device; 4790 /* Set up device as PCI master and save state for EEH */ 4791 pci_set_master(pdev); 4792 pci_try_set_mwi(pdev); 4793 pci_save_state(pdev); 4794 4795 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 4796 if (pci_is_pcie(pdev)) 4797 pdev->needs_freset = 1; 4798 4799 return 0; 4800 4801 out_disable_device: 4802 pci_disable_device(pdev); 4803 out_error: 4804 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4805 "1401 Failed to enable pci device, bars:x%x\n", bars); 4806 return -ENODEV; 4807 } 4808 4809 /** 4810 * lpfc_disable_pci_dev - Disable a generic PCI device. 4811 * @phba: pointer to lpfc hba data structure. 4812 * 4813 * This routine is invoked to disable the PCI device that is common to all 4814 * PCI devices. 4815 **/ 4816 static void 4817 lpfc_disable_pci_dev(struct lpfc_hba *phba) 4818 { 4819 struct pci_dev *pdev; 4820 int bars; 4821 4822 /* Obtain PCI device reference */ 4823 if (!phba->pcidev) 4824 return; 4825 else 4826 pdev = phba->pcidev; 4827 /* Select PCI BARs */ 4828 bars = pci_select_bars(pdev, IORESOURCE_MEM); 4829 /* Release PCI resource and disable PCI device */ 4830 pci_release_selected_regions(pdev, bars); 4831 pci_disable_device(pdev); 4832 4833 return; 4834 } 4835 4836 /** 4837 * lpfc_reset_hba - Reset a hba 4838 * @phba: pointer to lpfc hba data structure. 4839 * 4840 * This routine is invoked to reset a hba device. It brings the HBA 4841 * offline, performs a board restart, and then brings the board back 4842 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 4843 * on outstanding mailbox commands. 4844 **/ 4845 void 4846 lpfc_reset_hba(struct lpfc_hba *phba) 4847 { 4848 /* If resets are disabled then set error state and return. */ 4849 if (!phba->cfg_enable_hba_reset) { 4850 phba->link_state = LPFC_HBA_ERROR; 4851 return; 4852 } 4853 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 4854 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 4855 else 4856 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 4857 lpfc_offline(phba); 4858 lpfc_sli_brdrestart(phba); 4859 lpfc_online(phba); 4860 lpfc_unblock_mgmt_io(phba); 4861 } 4862 4863 /** 4864 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions 4865 * @phba: pointer to lpfc hba data structure. 4866 * 4867 * This function enables the PCI SR-IOV virtual functions to a physical 4868 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 4869 * enable the number of virtual functions to the physical function. As 4870 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 4871 * API call does not considered as an error condition for most of the device. 4872 **/ 4873 uint16_t 4874 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) 4875 { 4876 struct pci_dev *pdev = phba->pcidev; 4877 uint16_t nr_virtfn; 4878 int pos; 4879 4880 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 4881 if (pos == 0) 4882 return 0; 4883 4884 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); 4885 return nr_virtfn; 4886 } 4887 4888 /** 4889 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 4890 * @phba: pointer to lpfc hba data structure. 4891 * @nr_vfn: number of virtual functions to be enabled. 4892 * 4893 * This function enables the PCI SR-IOV virtual functions to a physical 4894 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 4895 * enable the number of virtual functions to the physical function. As 4896 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 4897 * API call does not considered as an error condition for most of the device. 4898 **/ 4899 int 4900 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 4901 { 4902 struct pci_dev *pdev = phba->pcidev; 4903 uint16_t max_nr_vfn; 4904 int rc; 4905 4906 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); 4907 if (nr_vfn > max_nr_vfn) { 4908 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4909 "3057 Requested vfs (%d) greater than " 4910 "supported vfs (%d)", nr_vfn, max_nr_vfn); 4911 return -EINVAL; 4912 } 4913 4914 rc = pci_enable_sriov(pdev, nr_vfn); 4915 if (rc) { 4916 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4917 "2806 Failed to enable sriov on this device " 4918 "with vfn number nr_vf:%d, rc:%d\n", 4919 nr_vfn, rc); 4920 } else 4921 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4922 "2807 Successful enable sriov on this device " 4923 "with vfn number nr_vf:%d\n", nr_vfn); 4924 return rc; 4925 } 4926 4927 /** 4928 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. 4929 * @phba: pointer to lpfc hba data structure. 4930 * 4931 * This routine is invoked to set up the driver internal resources specific to 4932 * support the SLI-3 HBA device it attached to. 4933 * 4934 * Return codes 4935 * 0 - successful 4936 * other values - error 4937 **/ 4938 static int 4939 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 4940 { 4941 struct lpfc_sli *psli; 4942 int rc; 4943 4944 /* 4945 * Initialize timers used by driver 4946 */ 4947 4948 /* Heartbeat timer */ 4949 init_timer(&phba->hb_tmofunc); 4950 phba->hb_tmofunc.function = lpfc_hb_timeout; 4951 phba->hb_tmofunc.data = (unsigned long)phba; 4952 4953 psli = &phba->sli; 4954 /* MBOX heartbeat timer */ 4955 init_timer(&psli->mbox_tmo); 4956 psli->mbox_tmo.function = lpfc_mbox_timeout; 4957 psli->mbox_tmo.data = (unsigned long) phba; 4958 /* FCP polling mode timer */ 4959 init_timer(&phba->fcp_poll_timer); 4960 phba->fcp_poll_timer.function = lpfc_poll_timeout; 4961 phba->fcp_poll_timer.data = (unsigned long) phba; 4962 /* Fabric block timer */ 4963 init_timer(&phba->fabric_block_timer); 4964 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 4965 phba->fabric_block_timer.data = (unsigned long) phba; 4966 /* EA polling mode timer */ 4967 init_timer(&phba->eratt_poll); 4968 phba->eratt_poll.function = lpfc_poll_eratt; 4969 phba->eratt_poll.data = (unsigned long) phba; 4970 4971 /* Host attention work mask setup */ 4972 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 4973 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 4974 4975 /* Get all the module params for configuring this host */ 4976 lpfc_get_cfgparam(phba); 4977 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 4978 phba->menlo_flag |= HBA_MENLO_SUPPORT; 4979 /* check for menlo minimum sg count */ 4980 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 4981 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 4982 } 4983 4984 if (!phba->sli.ring) 4985 phba->sli.ring = (struct lpfc_sli_ring *) 4986 kzalloc(LPFC_SLI3_MAX_RING * 4987 sizeof(struct lpfc_sli_ring), GFP_KERNEL); 4988 if (!phba->sli.ring) 4989 return -ENOMEM; 4990 4991 /* 4992 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 4993 * used to create the sg_dma_buf_pool must be dynamically calculated. 4994 */ 4995 4996 /* Initialize the host templates the configured values. */ 4997 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 4998 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 4999 5000 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ 5001 if (phba->cfg_enable_bg) { 5002 /* 5003 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 5004 * the FCP rsp, and a BDE for each. Sice we have no control 5005 * over how many protection data segments the SCSI Layer 5006 * will hand us (ie: there could be one for every block 5007 * in the IO), we just allocate enough BDEs to accomidate 5008 * our max amount and we need to limit lpfc_sg_seg_cnt to 5009 * minimize the risk of running out. 5010 */ 5011 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5012 sizeof(struct fcp_rsp) + 5013 (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64)); 5014 5015 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) 5016 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; 5017 5018 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ 5019 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; 5020 } else { 5021 /* 5022 * The scsi_buf for a regular I/O will hold the FCP cmnd, 5023 * the FCP rsp, a BDE for each, and a BDE for up to 5024 * cfg_sg_seg_cnt data segments. 5025 */ 5026 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5027 sizeof(struct fcp_rsp) + 5028 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); 5029 5030 /* Total BDEs in BPL for scsi_sg_list */ 5031 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 5032 } 5033 5034 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 5035 "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", 5036 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 5037 phba->cfg_total_seg_cnt); 5038 5039 phba->max_vpi = LPFC_MAX_VPI; 5040 /* This will be set to correct value after config_port mbox */ 5041 phba->max_vports = 0; 5042 5043 /* 5044 * Initialize the SLI Layer to run with lpfc HBAs. 5045 */ 5046 lpfc_sli_setup(phba); 5047 lpfc_sli_queue_setup(phba); 5048 5049 /* Allocate device driver memory */ 5050 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 5051 return -ENOMEM; 5052 5053 /* 5054 * Enable sr-iov virtual functions if supported and configured 5055 * through the module parameter. 5056 */ 5057 if (phba->cfg_sriov_nr_virtfn > 0) { 5058 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 5059 phba->cfg_sriov_nr_virtfn); 5060 if (rc) { 5061 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5062 "2808 Requested number of SR-IOV " 5063 "virtual functions (%d) is not " 5064 "supported\n", 5065 phba->cfg_sriov_nr_virtfn); 5066 phba->cfg_sriov_nr_virtfn = 0; 5067 } 5068 } 5069 5070 return 0; 5071 } 5072 5073 /** 5074 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 5075 * @phba: pointer to lpfc hba data structure. 5076 * 5077 * This routine is invoked to unset the driver internal resources set up 5078 * specific for supporting the SLI-3 HBA device it attached to. 5079 **/ 5080 static void 5081 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 5082 { 5083 /* Free device driver memory allocated */ 5084 lpfc_mem_free_all(phba); 5085 5086 return; 5087 } 5088 5089 /** 5090 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 5091 * @phba: pointer to lpfc hba data structure. 5092 * 5093 * This routine is invoked to set up the driver internal resources specific to 5094 * support the SLI-4 HBA device it attached to. 5095 * 5096 * Return codes 5097 * 0 - successful 5098 * other values - error 5099 **/ 5100 static int 5101 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 5102 { 5103 struct lpfc_vector_map_info *cpup; 5104 struct lpfc_sli *psli; 5105 LPFC_MBOXQ_t *mboxq; 5106 int rc, i, hbq_count, max_buf_size; 5107 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 5108 struct lpfc_mqe *mqe; 5109 int longs; 5110 int fof_vectors = 0; 5111 5112 /* Get all the module params for configuring this host */ 5113 lpfc_get_cfgparam(phba); 5114 5115 /* Before proceed, wait for POST done and device ready */ 5116 rc = lpfc_sli4_post_status_check(phba); 5117 if (rc) 5118 return -ENODEV; 5119 5120 /* 5121 * Initialize timers used by driver 5122 */ 5123 5124 /* Heartbeat timer */ 5125 init_timer(&phba->hb_tmofunc); 5126 phba->hb_tmofunc.function = lpfc_hb_timeout; 5127 phba->hb_tmofunc.data = (unsigned long)phba; 5128 init_timer(&phba->rrq_tmr); 5129 phba->rrq_tmr.function = lpfc_rrq_timeout; 5130 phba->rrq_tmr.data = (unsigned long)phba; 5131 5132 psli = &phba->sli; 5133 /* MBOX heartbeat timer */ 5134 init_timer(&psli->mbox_tmo); 5135 psli->mbox_tmo.function = lpfc_mbox_timeout; 5136 psli->mbox_tmo.data = (unsigned long) phba; 5137 /* Fabric block timer */ 5138 init_timer(&phba->fabric_block_timer); 5139 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 5140 phba->fabric_block_timer.data = (unsigned long) phba; 5141 /* EA polling mode timer */ 5142 init_timer(&phba->eratt_poll); 5143 phba->eratt_poll.function = lpfc_poll_eratt; 5144 phba->eratt_poll.data = (unsigned long) phba; 5145 /* FCF rediscover timer */ 5146 init_timer(&phba->fcf.redisc_wait); 5147 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo; 5148 phba->fcf.redisc_wait.data = (unsigned long)phba; 5149 5150 /* 5151 * Control structure for handling external multi-buffer mailbox 5152 * command pass-through. 5153 */ 5154 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, 5155 sizeof(struct lpfc_mbox_ext_buf_ctx)); 5156 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 5157 5158 phba->max_vpi = LPFC_MAX_VPI; 5159 5160 /* This will be set to correct value after the read_config mbox */ 5161 phba->max_vports = 0; 5162 5163 /* Program the default value of vlan_id and fc_map */ 5164 phba->valid_vlan = 0; 5165 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 5166 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 5167 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 5168 5169 /* 5170 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands 5171 * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple. 5172 */ 5173 if (!phba->sli.ring) 5174 phba->sli.ring = kzalloc( 5175 (LPFC_SLI3_MAX_RING + phba->cfg_fcp_io_channel) * 5176 sizeof(struct lpfc_sli_ring), GFP_KERNEL); 5177 if (!phba->sli.ring) 5178 return -ENOMEM; 5179 5180 /* 5181 * It doesn't matter what family our adapter is in, we are 5182 * limited to 2 Pages, 512 SGEs, for our SGL. 5183 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp 5184 */ 5185 max_buf_size = (2 * SLI4_PAGE_SIZE); 5186 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2) 5187 phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2; 5188 5189 /* 5190 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 5191 * used to create the sg_dma_buf_pool must be dynamically calculated. 5192 */ 5193 5194 if (phba->cfg_enable_bg) { 5195 /* 5196 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 5197 * the FCP rsp, and a SGE for each. Sice we have no control 5198 * over how many protection data segments the SCSI Layer 5199 * will hand us (ie: there could be one for every block 5200 * in the IO), we just allocate enough SGEs to accomidate 5201 * our max amount and we need to limit lpfc_sg_seg_cnt to 5202 * minimize the risk of running out. 5203 */ 5204 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5205 sizeof(struct fcp_rsp) + max_buf_size; 5206 5207 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ 5208 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; 5209 5210 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF) 5211 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF; 5212 } else { 5213 /* 5214 * The scsi_buf for a regular I/O will hold the FCP cmnd, 5215 * the FCP rsp, a SGE for each, and a SGE for up to 5216 * cfg_sg_seg_cnt data segments. 5217 */ 5218 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5219 sizeof(struct fcp_rsp) + 5220 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)); 5221 5222 /* Total SGEs for scsi_sg_list */ 5223 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 5224 /* 5225 * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need 5226 * to post 1 page for the SGL. 5227 */ 5228 } 5229 5230 /* Initialize the host templates with the updated values. */ 5231 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5232 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5233 5234 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) 5235 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; 5236 else 5237 phba->cfg_sg_dma_buf_size = 5238 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); 5239 5240 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 5241 "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n", 5242 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 5243 phba->cfg_total_seg_cnt); 5244 5245 /* Initialize buffer queue management fields */ 5246 hbq_count = lpfc_sli_hbq_count(); 5247 for (i = 0; i < hbq_count; ++i) 5248 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 5249 INIT_LIST_HEAD(&phba->rb_pend_list); 5250 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 5251 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 5252 5253 /* 5254 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 5255 */ 5256 /* Initialize the Abort scsi buffer list used by driver */ 5257 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 5258 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 5259 /* This abort list used by worker thread */ 5260 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); 5261 5262 /* 5263 * Initialize driver internal slow-path work queues 5264 */ 5265 5266 /* Driver internel slow-path CQ Event pool */ 5267 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 5268 /* Response IOCB work queue list */ 5269 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 5270 /* Asynchronous event CQ Event work queue list */ 5271 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 5272 /* Fast-path XRI aborted CQ Event work queue list */ 5273 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 5274 /* Slow-path XRI aborted CQ Event work queue list */ 5275 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 5276 /* Receive queue CQ Event work queue list */ 5277 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 5278 5279 /* Initialize extent block lists. */ 5280 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); 5281 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); 5282 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); 5283 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); 5284 5285 /* Initialize the driver internal SLI layer lists. */ 5286 lpfc_sli_setup(phba); 5287 lpfc_sli_queue_setup(phba); 5288 5289 /* Allocate device driver memory */ 5290 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 5291 if (rc) 5292 return -ENOMEM; 5293 5294 /* IF Type 2 ports get initialized now. */ 5295 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 5296 LPFC_SLI_INTF_IF_TYPE_2) { 5297 rc = lpfc_pci_function_reset(phba); 5298 if (unlikely(rc)) 5299 return -ENODEV; 5300 phba->temp_sensor_support = 1; 5301 } 5302 5303 /* Create the bootstrap mailbox command */ 5304 rc = lpfc_create_bootstrap_mbox(phba); 5305 if (unlikely(rc)) 5306 goto out_free_mem; 5307 5308 /* Set up the host's endian order with the device. */ 5309 rc = lpfc_setup_endian_order(phba); 5310 if (unlikely(rc)) 5311 goto out_free_bsmbx; 5312 5313 /* Set up the hba's configuration parameters. */ 5314 rc = lpfc_sli4_read_config(phba); 5315 if (unlikely(rc)) 5316 goto out_free_bsmbx; 5317 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba); 5318 if (unlikely(rc)) 5319 goto out_free_bsmbx; 5320 5321 /* IF Type 0 ports get initialized now. */ 5322 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 5323 LPFC_SLI_INTF_IF_TYPE_0) { 5324 rc = lpfc_pci_function_reset(phba); 5325 if (unlikely(rc)) 5326 goto out_free_bsmbx; 5327 } 5328 5329 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 5330 GFP_KERNEL); 5331 if (!mboxq) { 5332 rc = -ENOMEM; 5333 goto out_free_bsmbx; 5334 } 5335 5336 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */ 5337 lpfc_supported_pages(mboxq); 5338 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5339 if (!rc) { 5340 mqe = &mboxq->u.mqe; 5341 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 5342 LPFC_MAX_SUPPORTED_PAGES); 5343 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 5344 switch (pn_page[i]) { 5345 case LPFC_SLI4_PARAMETERS: 5346 phba->sli4_hba.pc_sli4_params.supported = 1; 5347 break; 5348 default: 5349 break; 5350 } 5351 } 5352 /* Read the port's SLI4 Parameters capabilities if supported. */ 5353 if (phba->sli4_hba.pc_sli4_params.supported) 5354 rc = lpfc_pc_sli4_params_get(phba, mboxq); 5355 if (rc) { 5356 mempool_free(mboxq, phba->mbox_mem_pool); 5357 rc = -EIO; 5358 goto out_free_bsmbx; 5359 } 5360 } 5361 /* 5362 * Get sli4 parameters that override parameters from Port capabilities. 5363 * If this call fails, it isn't critical unless the SLI4 parameters come 5364 * back in conflict. 5365 */ 5366 rc = lpfc_get_sli4_parameters(phba, mboxq); 5367 if (rc) { 5368 if (phba->sli4_hba.extents_in_use && 5369 phba->sli4_hba.rpi_hdrs_in_use) { 5370 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5371 "2999 Unsupported SLI4 Parameters " 5372 "Extents and RPI headers enabled.\n"); 5373 goto out_free_bsmbx; 5374 } 5375 } 5376 mempool_free(mboxq, phba->mbox_mem_pool); 5377 5378 /* Verify OAS is supported */ 5379 lpfc_sli4_oas_verify(phba); 5380 if (phba->cfg_fof) 5381 fof_vectors = 1; 5382 5383 /* Verify all the SLI4 queues */ 5384 rc = lpfc_sli4_queue_verify(phba); 5385 if (rc) 5386 goto out_free_bsmbx; 5387 5388 /* Create driver internal CQE event pool */ 5389 rc = lpfc_sli4_cq_event_pool_create(phba); 5390 if (rc) 5391 goto out_free_bsmbx; 5392 5393 /* Initialize sgl lists per host */ 5394 lpfc_init_sgl_list(phba); 5395 5396 /* Allocate and initialize active sgl array */ 5397 rc = lpfc_init_active_sgl_array(phba); 5398 if (rc) { 5399 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5400 "1430 Failed to initialize sgl list.\n"); 5401 goto out_destroy_cq_event_pool; 5402 } 5403 rc = lpfc_sli4_init_rpi_hdrs(phba); 5404 if (rc) { 5405 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5406 "1432 Failed to initialize rpi headers.\n"); 5407 goto out_free_active_sgl; 5408 } 5409 5410 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 5411 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 5412 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), 5413 GFP_KERNEL); 5414 if (!phba->fcf.fcf_rr_bmask) { 5415 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5416 "2759 Failed allocate memory for FCF round " 5417 "robin failover bmask\n"); 5418 rc = -ENOMEM; 5419 goto out_remove_rpi_hdrs; 5420 } 5421 5422 phba->sli4_hba.fcp_eq_hdl = 5423 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 5424 (fof_vectors + phba->cfg_fcp_io_channel)), 5425 GFP_KERNEL); 5426 if (!phba->sli4_hba.fcp_eq_hdl) { 5427 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5428 "2572 Failed allocate memory for " 5429 "fast-path per-EQ handle array\n"); 5430 rc = -ENOMEM; 5431 goto out_free_fcf_rr_bmask; 5432 } 5433 5434 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 5435 (fof_vectors + 5436 phba->cfg_fcp_io_channel)), GFP_KERNEL); 5437 if (!phba->sli4_hba.msix_entries) { 5438 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5439 "2573 Failed allocate memory for msi-x " 5440 "interrupt vector entries\n"); 5441 rc = -ENOMEM; 5442 goto out_free_fcp_eq_hdl; 5443 } 5444 5445 phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) * 5446 phba->sli4_hba.num_present_cpu), 5447 GFP_KERNEL); 5448 if (!phba->sli4_hba.cpu_map) { 5449 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5450 "3327 Failed allocate memory for msi-x " 5451 "interrupt vector mapping\n"); 5452 rc = -ENOMEM; 5453 goto out_free_msix; 5454 } 5455 if (lpfc_used_cpu == NULL) { 5456 lpfc_used_cpu = kzalloc((sizeof(uint16_t) * lpfc_present_cpu), 5457 GFP_KERNEL); 5458 if (!lpfc_used_cpu) { 5459 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5460 "3335 Failed allocate memory for msi-x " 5461 "interrupt vector mapping\n"); 5462 kfree(phba->sli4_hba.cpu_map); 5463 rc = -ENOMEM; 5464 goto out_free_msix; 5465 } 5466 for (i = 0; i < lpfc_present_cpu; i++) 5467 lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY; 5468 } 5469 5470 /* Initialize io channels for round robin */ 5471 cpup = phba->sli4_hba.cpu_map; 5472 rc = 0; 5473 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 5474 cpup->channel_id = rc; 5475 rc++; 5476 if (rc >= phba->cfg_fcp_io_channel) 5477 rc = 0; 5478 } 5479 5480 /* 5481 * Enable sr-iov virtual functions if supported and configured 5482 * through the module parameter. 5483 */ 5484 if (phba->cfg_sriov_nr_virtfn > 0) { 5485 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 5486 phba->cfg_sriov_nr_virtfn); 5487 if (rc) { 5488 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5489 "3020 Requested number of SR-IOV " 5490 "virtual functions (%d) is not " 5491 "supported\n", 5492 phba->cfg_sriov_nr_virtfn); 5493 phba->cfg_sriov_nr_virtfn = 0; 5494 } 5495 } 5496 5497 return 0; 5498 5499 out_free_msix: 5500 kfree(phba->sli4_hba.msix_entries); 5501 out_free_fcp_eq_hdl: 5502 kfree(phba->sli4_hba.fcp_eq_hdl); 5503 out_free_fcf_rr_bmask: 5504 kfree(phba->fcf.fcf_rr_bmask); 5505 out_remove_rpi_hdrs: 5506 lpfc_sli4_remove_rpi_hdrs(phba); 5507 out_free_active_sgl: 5508 lpfc_free_active_sgl(phba); 5509 out_destroy_cq_event_pool: 5510 lpfc_sli4_cq_event_pool_destroy(phba); 5511 out_free_bsmbx: 5512 lpfc_destroy_bootstrap_mbox(phba); 5513 out_free_mem: 5514 lpfc_mem_free(phba); 5515 return rc; 5516 } 5517 5518 /** 5519 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 5520 * @phba: pointer to lpfc hba data structure. 5521 * 5522 * This routine is invoked to unset the driver internal resources set up 5523 * specific for supporting the SLI-4 HBA device it attached to. 5524 **/ 5525 static void 5526 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 5527 { 5528 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 5529 5530 /* Free memory allocated for msi-x interrupt vector to CPU mapping */ 5531 kfree(phba->sli4_hba.cpu_map); 5532 phba->sli4_hba.num_present_cpu = 0; 5533 phba->sli4_hba.num_online_cpu = 0; 5534 phba->sli4_hba.curr_disp_cpu = 0; 5535 5536 /* Free memory allocated for msi-x interrupt vector entries */ 5537 kfree(phba->sli4_hba.msix_entries); 5538 5539 /* Free memory allocated for fast-path work queue handles */ 5540 kfree(phba->sli4_hba.fcp_eq_hdl); 5541 5542 /* Free the allocated rpi headers. */ 5543 lpfc_sli4_remove_rpi_hdrs(phba); 5544 lpfc_sli4_remove_rpis(phba); 5545 5546 /* Free eligible FCF index bmask */ 5547 kfree(phba->fcf.fcf_rr_bmask); 5548 5549 /* Free the ELS sgl list */ 5550 lpfc_free_active_sgl(phba); 5551 lpfc_free_els_sgl_list(phba); 5552 5553 /* Free the completion queue EQ event pool */ 5554 lpfc_sli4_cq_event_release_all(phba); 5555 lpfc_sli4_cq_event_pool_destroy(phba); 5556 5557 /* Release resource identifiers. */ 5558 lpfc_sli4_dealloc_resource_identifiers(phba); 5559 5560 /* Free the bsmbx region. */ 5561 lpfc_destroy_bootstrap_mbox(phba); 5562 5563 /* Free the SLI Layer memory with SLI4 HBAs */ 5564 lpfc_mem_free_all(phba); 5565 5566 /* Free the current connect table */ 5567 list_for_each_entry_safe(conn_entry, next_conn_entry, 5568 &phba->fcf_conn_rec_list, list) { 5569 list_del_init(&conn_entry->list); 5570 kfree(conn_entry); 5571 } 5572 5573 return; 5574 } 5575 5576 /** 5577 * lpfc_init_api_table_setup - Set up init api function jump table 5578 * @phba: The hba struct for which this call is being executed. 5579 * @dev_grp: The HBA PCI-Device group number. 5580 * 5581 * This routine sets up the device INIT interface API function jump table 5582 * in @phba struct. 5583 * 5584 * Returns: 0 - success, -ENODEV - failure. 5585 **/ 5586 int 5587 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 5588 { 5589 phba->lpfc_hba_init_link = lpfc_hba_init_link; 5590 phba->lpfc_hba_down_link = lpfc_hba_down_link; 5591 phba->lpfc_selective_reset = lpfc_selective_reset; 5592 switch (dev_grp) { 5593 case LPFC_PCI_DEV_LP: 5594 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 5595 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 5596 phba->lpfc_stop_port = lpfc_stop_port_s3; 5597 break; 5598 case LPFC_PCI_DEV_OC: 5599 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 5600 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 5601 phba->lpfc_stop_port = lpfc_stop_port_s4; 5602 break; 5603 default: 5604 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5605 "1431 Invalid HBA PCI-device group: 0x%x\n", 5606 dev_grp); 5607 return -ENODEV; 5608 break; 5609 } 5610 return 0; 5611 } 5612 5613 /** 5614 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 5615 * @phba: pointer to lpfc hba data structure. 5616 * 5617 * This routine is invoked to set up the driver internal resources before the 5618 * device specific resource setup to support the HBA device it attached to. 5619 * 5620 * Return codes 5621 * 0 - successful 5622 * other values - error 5623 **/ 5624 static int 5625 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 5626 { 5627 /* 5628 * Driver resources common to all SLI revisions 5629 */ 5630 atomic_set(&phba->fast_event_count, 0); 5631 spin_lock_init(&phba->hbalock); 5632 5633 /* Initialize ndlp management spinlock */ 5634 spin_lock_init(&phba->ndlp_lock); 5635 5636 INIT_LIST_HEAD(&phba->port_list); 5637 INIT_LIST_HEAD(&phba->work_list); 5638 init_waitqueue_head(&phba->wait_4_mlo_m_q); 5639 5640 /* Initialize the wait queue head for the kernel thread */ 5641 init_waitqueue_head(&phba->work_waitq); 5642 5643 /* Initialize the scsi buffer list used by driver for scsi IO */ 5644 spin_lock_init(&phba->scsi_buf_list_get_lock); 5645 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); 5646 spin_lock_init(&phba->scsi_buf_list_put_lock); 5647 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 5648 5649 /* Initialize the fabric iocb list */ 5650 INIT_LIST_HEAD(&phba->fabric_iocb_list); 5651 5652 /* Initialize list to save ELS buffers */ 5653 INIT_LIST_HEAD(&phba->elsbuf); 5654 5655 /* Initialize FCF connection rec list */ 5656 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 5657 5658 /* Initialize OAS configuration list */ 5659 spin_lock_init(&phba->devicelock); 5660 INIT_LIST_HEAD(&phba->luns); 5661 5662 return 0; 5663 } 5664 5665 /** 5666 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 5667 * @phba: pointer to lpfc hba data structure. 5668 * 5669 * This routine is invoked to set up the driver internal resources after the 5670 * device specific resource setup to support the HBA device it attached to. 5671 * 5672 * Return codes 5673 * 0 - successful 5674 * other values - error 5675 **/ 5676 static int 5677 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 5678 { 5679 int error; 5680 5681 /* Startup the kernel thread for this host adapter. */ 5682 phba->worker_thread = kthread_run(lpfc_do_work, phba, 5683 "lpfc_worker_%d", phba->brd_no); 5684 if (IS_ERR(phba->worker_thread)) { 5685 error = PTR_ERR(phba->worker_thread); 5686 return error; 5687 } 5688 5689 return 0; 5690 } 5691 5692 /** 5693 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 5694 * @phba: pointer to lpfc hba data structure. 5695 * 5696 * This routine is invoked to unset the driver internal resources set up after 5697 * the device specific resource setup for supporting the HBA device it 5698 * attached to. 5699 **/ 5700 static void 5701 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 5702 { 5703 /* Stop kernel worker thread */ 5704 kthread_stop(phba->worker_thread); 5705 } 5706 5707 /** 5708 * lpfc_free_iocb_list - Free iocb list. 5709 * @phba: pointer to lpfc hba data structure. 5710 * 5711 * This routine is invoked to free the driver's IOCB list and memory. 5712 **/ 5713 static void 5714 lpfc_free_iocb_list(struct lpfc_hba *phba) 5715 { 5716 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 5717 5718 spin_lock_irq(&phba->hbalock); 5719 list_for_each_entry_safe(iocbq_entry, iocbq_next, 5720 &phba->lpfc_iocb_list, list) { 5721 list_del(&iocbq_entry->list); 5722 kfree(iocbq_entry); 5723 phba->total_iocbq_bufs--; 5724 } 5725 spin_unlock_irq(&phba->hbalock); 5726 5727 return; 5728 } 5729 5730 /** 5731 * lpfc_init_iocb_list - Allocate and initialize iocb list. 5732 * @phba: pointer to lpfc hba data structure. 5733 * 5734 * This routine is invoked to allocate and initizlize the driver's IOCB 5735 * list and set up the IOCB tag array accordingly. 5736 * 5737 * Return codes 5738 * 0 - successful 5739 * other values - error 5740 **/ 5741 static int 5742 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 5743 { 5744 struct lpfc_iocbq *iocbq_entry = NULL; 5745 uint16_t iotag; 5746 int i; 5747 5748 /* Initialize and populate the iocb list per host. */ 5749 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 5750 for (i = 0; i < iocb_count; i++) { 5751 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 5752 if (iocbq_entry == NULL) { 5753 printk(KERN_ERR "%s: only allocated %d iocbs of " 5754 "expected %d count. Unloading driver.\n", 5755 __func__, i, LPFC_IOCB_LIST_CNT); 5756 goto out_free_iocbq; 5757 } 5758 5759 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 5760 if (iotag == 0) { 5761 kfree(iocbq_entry); 5762 printk(KERN_ERR "%s: failed to allocate IOTAG. " 5763 "Unloading driver.\n", __func__); 5764 goto out_free_iocbq; 5765 } 5766 iocbq_entry->sli4_lxritag = NO_XRI; 5767 iocbq_entry->sli4_xritag = NO_XRI; 5768 5769 spin_lock_irq(&phba->hbalock); 5770 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 5771 phba->total_iocbq_bufs++; 5772 spin_unlock_irq(&phba->hbalock); 5773 } 5774 5775 return 0; 5776 5777 out_free_iocbq: 5778 lpfc_free_iocb_list(phba); 5779 5780 return -ENOMEM; 5781 } 5782 5783 /** 5784 * lpfc_free_sgl_list - Free a given sgl list. 5785 * @phba: pointer to lpfc hba data structure. 5786 * @sglq_list: pointer to the head of sgl list. 5787 * 5788 * This routine is invoked to free a give sgl list and memory. 5789 **/ 5790 void 5791 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list) 5792 { 5793 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 5794 5795 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) { 5796 list_del(&sglq_entry->list); 5797 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 5798 kfree(sglq_entry); 5799 } 5800 } 5801 5802 /** 5803 * lpfc_free_els_sgl_list - Free els sgl list. 5804 * @phba: pointer to lpfc hba data structure. 5805 * 5806 * This routine is invoked to free the driver's els sgl list and memory. 5807 **/ 5808 static void 5809 lpfc_free_els_sgl_list(struct lpfc_hba *phba) 5810 { 5811 LIST_HEAD(sglq_list); 5812 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 5813 5814 /* Retrieve all els sgls from driver list */ 5815 spin_lock_irq(&phba->hbalock); 5816 spin_lock(&pring->ring_lock); 5817 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); 5818 spin_unlock(&pring->ring_lock); 5819 spin_unlock_irq(&phba->hbalock); 5820 5821 /* Now free the sgl list */ 5822 lpfc_free_sgl_list(phba, &sglq_list); 5823 } 5824 5825 /** 5826 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 5827 * @phba: pointer to lpfc hba data structure. 5828 * 5829 * This routine is invoked to allocate the driver's active sgl memory. 5830 * This array will hold the sglq_entry's for active IOs. 5831 **/ 5832 static int 5833 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 5834 { 5835 int size; 5836 size = sizeof(struct lpfc_sglq *); 5837 size *= phba->sli4_hba.max_cfg_param.max_xri; 5838 5839 phba->sli4_hba.lpfc_sglq_active_list = 5840 kzalloc(size, GFP_KERNEL); 5841 if (!phba->sli4_hba.lpfc_sglq_active_list) 5842 return -ENOMEM; 5843 return 0; 5844 } 5845 5846 /** 5847 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 5848 * @phba: pointer to lpfc hba data structure. 5849 * 5850 * This routine is invoked to walk through the array of active sglq entries 5851 * and free all of the resources. 5852 * This is just a place holder for now. 5853 **/ 5854 static void 5855 lpfc_free_active_sgl(struct lpfc_hba *phba) 5856 { 5857 kfree(phba->sli4_hba.lpfc_sglq_active_list); 5858 } 5859 5860 /** 5861 * lpfc_init_sgl_list - Allocate and initialize sgl list. 5862 * @phba: pointer to lpfc hba data structure. 5863 * 5864 * This routine is invoked to allocate and initizlize the driver's sgl 5865 * list and set up the sgl xritag tag array accordingly. 5866 * 5867 **/ 5868 static void 5869 lpfc_init_sgl_list(struct lpfc_hba *phba) 5870 { 5871 /* Initialize and populate the sglq list per host/VF. */ 5872 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 5873 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 5874 5875 /* els xri-sgl book keeping */ 5876 phba->sli4_hba.els_xri_cnt = 0; 5877 5878 /* scsi xri-buffer book keeping */ 5879 phba->sli4_hba.scsi_xri_cnt = 0; 5880 } 5881 5882 /** 5883 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 5884 * @phba: pointer to lpfc hba data structure. 5885 * 5886 * This routine is invoked to post rpi header templates to the 5887 * port for those SLI4 ports that do not support extents. This routine 5888 * posts a PAGE_SIZE memory region to the port to hold up to 5889 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 5890 * and should be called only when interrupts are disabled. 5891 * 5892 * Return codes 5893 * 0 - successful 5894 * -ERROR - otherwise. 5895 **/ 5896 int 5897 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 5898 { 5899 int rc = 0; 5900 struct lpfc_rpi_hdr *rpi_hdr; 5901 5902 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 5903 if (!phba->sli4_hba.rpi_hdrs_in_use) 5904 return rc; 5905 if (phba->sli4_hba.extents_in_use) 5906 return -EIO; 5907 5908 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 5909 if (!rpi_hdr) { 5910 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5911 "0391 Error during rpi post operation\n"); 5912 lpfc_sli4_remove_rpis(phba); 5913 rc = -ENODEV; 5914 } 5915 5916 return rc; 5917 } 5918 5919 /** 5920 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 5921 * @phba: pointer to lpfc hba data structure. 5922 * 5923 * This routine is invoked to allocate a single 4KB memory region to 5924 * support rpis and stores them in the phba. This single region 5925 * provides support for up to 64 rpis. The region is used globally 5926 * by the device. 5927 * 5928 * Returns: 5929 * A valid rpi hdr on success. 5930 * A NULL pointer on any failure. 5931 **/ 5932 struct lpfc_rpi_hdr * 5933 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 5934 { 5935 uint16_t rpi_limit, curr_rpi_range; 5936 struct lpfc_dmabuf *dmabuf; 5937 struct lpfc_rpi_hdr *rpi_hdr; 5938 uint32_t rpi_count; 5939 5940 /* 5941 * If the SLI4 port supports extents, posting the rpi header isn't 5942 * required. Set the expected maximum count and let the actual value 5943 * get set when extents are fully allocated. 5944 */ 5945 if (!phba->sli4_hba.rpi_hdrs_in_use) 5946 return NULL; 5947 if (phba->sli4_hba.extents_in_use) 5948 return NULL; 5949 5950 /* The limit on the logical index is just the max_rpi count. */ 5951 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 5952 phba->sli4_hba.max_cfg_param.max_rpi - 1; 5953 5954 spin_lock_irq(&phba->hbalock); 5955 /* 5956 * Establish the starting RPI in this header block. The starting 5957 * rpi is normalized to a zero base because the physical rpi is 5958 * port based. 5959 */ 5960 curr_rpi_range = phba->sli4_hba.next_rpi; 5961 spin_unlock_irq(&phba->hbalock); 5962 5963 /* 5964 * The port has a limited number of rpis. The increment here 5965 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value 5966 * and to allow the full max_rpi range per port. 5967 */ 5968 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) 5969 rpi_count = rpi_limit - curr_rpi_range; 5970 else 5971 rpi_count = LPFC_RPI_HDR_COUNT; 5972 5973 if (!rpi_count) 5974 return NULL; 5975 /* 5976 * First allocate the protocol header region for the port. The 5977 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 5978 */ 5979 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5980 if (!dmabuf) 5981 return NULL; 5982 5983 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, 5984 LPFC_HDR_TEMPLATE_SIZE, 5985 &dmabuf->phys, GFP_KERNEL); 5986 if (!dmabuf->virt) { 5987 rpi_hdr = NULL; 5988 goto err_free_dmabuf; 5989 } 5990 5991 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 5992 rpi_hdr = NULL; 5993 goto err_free_coherent; 5994 } 5995 5996 /* Save the rpi header data for cleanup later. */ 5997 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 5998 if (!rpi_hdr) 5999 goto err_free_coherent; 6000 6001 rpi_hdr->dmabuf = dmabuf; 6002 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 6003 rpi_hdr->page_count = 1; 6004 spin_lock_irq(&phba->hbalock); 6005 6006 /* The rpi_hdr stores the logical index only. */ 6007 rpi_hdr->start_rpi = curr_rpi_range; 6008 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 6009 6010 /* 6011 * The next_rpi stores the next logical module-64 rpi value used 6012 * to post physical rpis in subsequent rpi postings. 6013 */ 6014 phba->sli4_hba.next_rpi += rpi_count; 6015 spin_unlock_irq(&phba->hbalock); 6016 return rpi_hdr; 6017 6018 err_free_coherent: 6019 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 6020 dmabuf->virt, dmabuf->phys); 6021 err_free_dmabuf: 6022 kfree(dmabuf); 6023 return NULL; 6024 } 6025 6026 /** 6027 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 6028 * @phba: pointer to lpfc hba data structure. 6029 * 6030 * This routine is invoked to remove all memory resources allocated 6031 * to support rpis for SLI4 ports not supporting extents. This routine 6032 * presumes the caller has released all rpis consumed by fabric or port 6033 * logins and is prepared to have the header pages removed. 6034 **/ 6035 void 6036 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 6037 { 6038 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 6039 6040 if (!phba->sli4_hba.rpi_hdrs_in_use) 6041 goto exit; 6042 6043 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 6044 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 6045 list_del(&rpi_hdr->list); 6046 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 6047 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 6048 kfree(rpi_hdr->dmabuf); 6049 kfree(rpi_hdr); 6050 } 6051 exit: 6052 /* There are no rpis available to the port now. */ 6053 phba->sli4_hba.next_rpi = 0; 6054 } 6055 6056 /** 6057 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 6058 * @pdev: pointer to pci device data structure. 6059 * 6060 * This routine is invoked to allocate the driver hba data structure for an 6061 * HBA device. If the allocation is successful, the phba reference to the 6062 * PCI device data structure is set. 6063 * 6064 * Return codes 6065 * pointer to @phba - successful 6066 * NULL - error 6067 **/ 6068 static struct lpfc_hba * 6069 lpfc_hba_alloc(struct pci_dev *pdev) 6070 { 6071 struct lpfc_hba *phba; 6072 6073 /* Allocate memory for HBA structure */ 6074 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 6075 if (!phba) { 6076 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 6077 return NULL; 6078 } 6079 6080 /* Set reference to PCI device in HBA structure */ 6081 phba->pcidev = pdev; 6082 6083 /* Assign an unused board number */ 6084 phba->brd_no = lpfc_get_instance(); 6085 if (phba->brd_no < 0) { 6086 kfree(phba); 6087 return NULL; 6088 } 6089 6090 spin_lock_init(&phba->ct_ev_lock); 6091 INIT_LIST_HEAD(&phba->ct_ev_waiters); 6092 6093 return phba; 6094 } 6095 6096 /** 6097 * lpfc_hba_free - Free driver hba data structure with a device. 6098 * @phba: pointer to lpfc hba data structure. 6099 * 6100 * This routine is invoked to free the driver hba data structure with an 6101 * HBA device. 6102 **/ 6103 static void 6104 lpfc_hba_free(struct lpfc_hba *phba) 6105 { 6106 /* Release the driver assigned board number */ 6107 idr_remove(&lpfc_hba_index, phba->brd_no); 6108 6109 /* Free memory allocated with sli rings */ 6110 kfree(phba->sli.ring); 6111 phba->sli.ring = NULL; 6112 6113 kfree(phba); 6114 return; 6115 } 6116 6117 /** 6118 * lpfc_create_shost - Create hba physical port with associated scsi host. 6119 * @phba: pointer to lpfc hba data structure. 6120 * 6121 * This routine is invoked to create HBA physical port and associate a SCSI 6122 * host with it. 6123 * 6124 * Return codes 6125 * 0 - successful 6126 * other values - error 6127 **/ 6128 static int 6129 lpfc_create_shost(struct lpfc_hba *phba) 6130 { 6131 struct lpfc_vport *vport; 6132 struct Scsi_Host *shost; 6133 6134 /* Initialize HBA FC structure */ 6135 phba->fc_edtov = FF_DEF_EDTOV; 6136 phba->fc_ratov = FF_DEF_RATOV; 6137 phba->fc_altov = FF_DEF_ALTOV; 6138 phba->fc_arbtov = FF_DEF_ARBTOV; 6139 6140 atomic_set(&phba->sdev_cnt, 0); 6141 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 6142 if (!vport) 6143 return -ENODEV; 6144 6145 shost = lpfc_shost_from_vport(vport); 6146 phba->pport = vport; 6147 lpfc_debugfs_initialize(vport); 6148 /* Put reference to SCSI host to driver's device private data */ 6149 pci_set_drvdata(phba->pcidev, shost); 6150 6151 return 0; 6152 } 6153 6154 /** 6155 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 6156 * @phba: pointer to lpfc hba data structure. 6157 * 6158 * This routine is invoked to destroy HBA physical port and the associated 6159 * SCSI host. 6160 **/ 6161 static void 6162 lpfc_destroy_shost(struct lpfc_hba *phba) 6163 { 6164 struct lpfc_vport *vport = phba->pport; 6165 6166 /* Destroy physical port that associated with the SCSI host */ 6167 destroy_port(vport); 6168 6169 return; 6170 } 6171 6172 /** 6173 * lpfc_setup_bg - Setup Block guard structures and debug areas. 6174 * @phba: pointer to lpfc hba data structure. 6175 * @shost: the shost to be used to detect Block guard settings. 6176 * 6177 * This routine sets up the local Block guard protocol settings for @shost. 6178 * This routine also allocates memory for debugging bg buffers. 6179 **/ 6180 static void 6181 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 6182 { 6183 uint32_t old_mask; 6184 uint32_t old_guard; 6185 6186 int pagecnt = 10; 6187 if (lpfc_prot_mask && lpfc_prot_guard) { 6188 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6189 "1478 Registering BlockGuard with the " 6190 "SCSI layer\n"); 6191 6192 old_mask = lpfc_prot_mask; 6193 old_guard = lpfc_prot_guard; 6194 6195 /* Only allow supported values */ 6196 lpfc_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | 6197 SHOST_DIX_TYPE0_PROTECTION | 6198 SHOST_DIX_TYPE1_PROTECTION); 6199 lpfc_prot_guard &= (SHOST_DIX_GUARD_IP | SHOST_DIX_GUARD_CRC); 6200 6201 /* DIF Type 1 protection for profiles AST1/C1 is end to end */ 6202 if (lpfc_prot_mask == SHOST_DIX_TYPE1_PROTECTION) 6203 lpfc_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; 6204 6205 if (lpfc_prot_mask && lpfc_prot_guard) { 6206 if ((old_mask != lpfc_prot_mask) || 6207 (old_guard != lpfc_prot_guard)) 6208 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6209 "1475 Registering BlockGuard with the " 6210 "SCSI layer: mask %d guard %d\n", 6211 lpfc_prot_mask, lpfc_prot_guard); 6212 6213 scsi_host_set_prot(shost, lpfc_prot_mask); 6214 scsi_host_set_guard(shost, lpfc_prot_guard); 6215 } else 6216 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6217 "1479 Not Registering BlockGuard with the SCSI " 6218 "layer, Bad protection parameters: %d %d\n", 6219 old_mask, old_guard); 6220 } 6221 6222 if (!_dump_buf_data) { 6223 while (pagecnt) { 6224 spin_lock_init(&_dump_buf_lock); 6225 _dump_buf_data = 6226 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 6227 if (_dump_buf_data) { 6228 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6229 "9043 BLKGRD: allocated %d pages for " 6230 "_dump_buf_data at 0x%p\n", 6231 (1 << pagecnt), _dump_buf_data); 6232 _dump_buf_data_order = pagecnt; 6233 memset(_dump_buf_data, 0, 6234 ((1 << PAGE_SHIFT) << pagecnt)); 6235 break; 6236 } else 6237 --pagecnt; 6238 } 6239 if (!_dump_buf_data_order) 6240 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6241 "9044 BLKGRD: ERROR unable to allocate " 6242 "memory for hexdump\n"); 6243 } else 6244 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6245 "9045 BLKGRD: already allocated _dump_buf_data=0x%p" 6246 "\n", _dump_buf_data); 6247 if (!_dump_buf_dif) { 6248 while (pagecnt) { 6249 _dump_buf_dif = 6250 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 6251 if (_dump_buf_dif) { 6252 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6253 "9046 BLKGRD: allocated %d pages for " 6254 "_dump_buf_dif at 0x%p\n", 6255 (1 << pagecnt), _dump_buf_dif); 6256 _dump_buf_dif_order = pagecnt; 6257 memset(_dump_buf_dif, 0, 6258 ((1 << PAGE_SHIFT) << pagecnt)); 6259 break; 6260 } else 6261 --pagecnt; 6262 } 6263 if (!_dump_buf_dif_order) 6264 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6265 "9047 BLKGRD: ERROR unable to allocate " 6266 "memory for hexdump\n"); 6267 } else 6268 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6269 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", 6270 _dump_buf_dif); 6271 } 6272 6273 /** 6274 * lpfc_post_init_setup - Perform necessary device post initialization setup. 6275 * @phba: pointer to lpfc hba data structure. 6276 * 6277 * This routine is invoked to perform all the necessary post initialization 6278 * setup for the device. 6279 **/ 6280 static void 6281 lpfc_post_init_setup(struct lpfc_hba *phba) 6282 { 6283 struct Scsi_Host *shost; 6284 struct lpfc_adapter_event_header adapter_event; 6285 6286 /* Get the default values for Model Name and Description */ 6287 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 6288 6289 /* 6290 * hba setup may have changed the hba_queue_depth so we need to 6291 * adjust the value of can_queue. 6292 */ 6293 shost = pci_get_drvdata(phba->pcidev); 6294 shost->can_queue = phba->cfg_hba_queue_depth - 10; 6295 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 6296 lpfc_setup_bg(phba, shost); 6297 6298 lpfc_host_attrib_init(shost); 6299 6300 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 6301 spin_lock_irq(shost->host_lock); 6302 lpfc_poll_start_timer(phba); 6303 spin_unlock_irq(shost->host_lock); 6304 } 6305 6306 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6307 "0428 Perform SCSI scan\n"); 6308 /* Send board arrival event to upper layer */ 6309 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 6310 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 6311 fc_host_post_vendor_event(shost, fc_get_event_number(), 6312 sizeof(adapter_event), 6313 (char *) &adapter_event, 6314 LPFC_NL_VENDOR_ID); 6315 return; 6316 } 6317 6318 /** 6319 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 6320 * @phba: pointer to lpfc hba data structure. 6321 * 6322 * This routine is invoked to set up the PCI device memory space for device 6323 * with SLI-3 interface spec. 6324 * 6325 * Return codes 6326 * 0 - successful 6327 * other values - error 6328 **/ 6329 static int 6330 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 6331 { 6332 struct pci_dev *pdev; 6333 unsigned long bar0map_len, bar2map_len; 6334 int i, hbq_count; 6335 void *ptr; 6336 int error = -ENODEV; 6337 6338 /* Obtain PCI device reference */ 6339 if (!phba->pcidev) 6340 return error; 6341 else 6342 pdev = phba->pcidev; 6343 6344 /* Set the device DMA mask size */ 6345 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 6346 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 6347 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 6348 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 6349 return error; 6350 } 6351 } 6352 6353 /* Get the bus address of Bar0 and Bar2 and the number of bytes 6354 * required by each mapping. 6355 */ 6356 phba->pci_bar0_map = pci_resource_start(pdev, 0); 6357 bar0map_len = pci_resource_len(pdev, 0); 6358 6359 phba->pci_bar2_map = pci_resource_start(pdev, 2); 6360 bar2map_len = pci_resource_len(pdev, 2); 6361 6362 /* Map HBA SLIM to a kernel virtual address. */ 6363 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 6364 if (!phba->slim_memmap_p) { 6365 dev_printk(KERN_ERR, &pdev->dev, 6366 "ioremap failed for SLIM memory.\n"); 6367 goto out; 6368 } 6369 6370 /* Map HBA Control Registers to a kernel virtual address. */ 6371 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 6372 if (!phba->ctrl_regs_memmap_p) { 6373 dev_printk(KERN_ERR, &pdev->dev, 6374 "ioremap failed for HBA control registers.\n"); 6375 goto out_iounmap_slim; 6376 } 6377 6378 /* Allocate memory for SLI-2 structures */ 6379 phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, 6380 &phba->slim2p.phys, GFP_KERNEL); 6381 if (!phba->slim2p.virt) 6382 goto out_iounmap; 6383 6384 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 6385 phba->mbox_ext = (phba->slim2p.virt + 6386 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 6387 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 6388 phba->IOCBs = (phba->slim2p.virt + 6389 offsetof(struct lpfc_sli2_slim, IOCBs)); 6390 6391 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 6392 lpfc_sli_hbq_size(), 6393 &phba->hbqslimp.phys, 6394 GFP_KERNEL); 6395 if (!phba->hbqslimp.virt) 6396 goto out_free_slim; 6397 6398 hbq_count = lpfc_sli_hbq_count(); 6399 ptr = phba->hbqslimp.virt; 6400 for (i = 0; i < hbq_count; ++i) { 6401 phba->hbqs[i].hbq_virt = ptr; 6402 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 6403 ptr += (lpfc_hbq_defs[i]->entry_count * 6404 sizeof(struct lpfc_hbq_entry)); 6405 } 6406 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 6407 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 6408 6409 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 6410 6411 INIT_LIST_HEAD(&phba->rb_pend_list); 6412 6413 phba->MBslimaddr = phba->slim_memmap_p; 6414 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 6415 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 6416 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 6417 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 6418 6419 return 0; 6420 6421 out_free_slim: 6422 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 6423 phba->slim2p.virt, phba->slim2p.phys); 6424 out_iounmap: 6425 iounmap(phba->ctrl_regs_memmap_p); 6426 out_iounmap_slim: 6427 iounmap(phba->slim_memmap_p); 6428 out: 6429 return error; 6430 } 6431 6432 /** 6433 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 6434 * @phba: pointer to lpfc hba data structure. 6435 * 6436 * This routine is invoked to unset the PCI device memory space for device 6437 * with SLI-3 interface spec. 6438 **/ 6439 static void 6440 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 6441 { 6442 struct pci_dev *pdev; 6443 6444 /* Obtain PCI device reference */ 6445 if (!phba->pcidev) 6446 return; 6447 else 6448 pdev = phba->pcidev; 6449 6450 /* Free coherent DMA memory allocated */ 6451 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 6452 phba->hbqslimp.virt, phba->hbqslimp.phys); 6453 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 6454 phba->slim2p.virt, phba->slim2p.phys); 6455 6456 /* I/O memory unmap */ 6457 iounmap(phba->ctrl_regs_memmap_p); 6458 iounmap(phba->slim_memmap_p); 6459 6460 return; 6461 } 6462 6463 /** 6464 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 6465 * @phba: pointer to lpfc hba data structure. 6466 * 6467 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 6468 * done and check status. 6469 * 6470 * Return 0 if successful, otherwise -ENODEV. 6471 **/ 6472 int 6473 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 6474 { 6475 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 6476 struct lpfc_register reg_data; 6477 int i, port_error = 0; 6478 uint32_t if_type; 6479 6480 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 6481 memset(®_data, 0, sizeof(reg_data)); 6482 if (!phba->sli4_hba.PSMPHRregaddr) 6483 return -ENODEV; 6484 6485 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 6486 for (i = 0; i < 3000; i++) { 6487 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 6488 &portsmphr_reg.word0) || 6489 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { 6490 /* Port has a fatal POST error, break out */ 6491 port_error = -ENODEV; 6492 break; 6493 } 6494 if (LPFC_POST_STAGE_PORT_READY == 6495 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 6496 break; 6497 msleep(10); 6498 } 6499 6500 /* 6501 * If there was a port error during POST, then don't proceed with 6502 * other register reads as the data may not be valid. Just exit. 6503 */ 6504 if (port_error) { 6505 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6506 "1408 Port Failed POST - portsmphr=0x%x, " 6507 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 6508 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 6509 portsmphr_reg.word0, 6510 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 6511 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 6512 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 6513 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 6514 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 6515 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 6516 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 6517 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 6518 } else { 6519 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6520 "2534 Device Info: SLIFamily=0x%x, " 6521 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 6522 "SLIHint_2=0x%x, FT=0x%x\n", 6523 bf_get(lpfc_sli_intf_sli_family, 6524 &phba->sli4_hba.sli_intf), 6525 bf_get(lpfc_sli_intf_slirev, 6526 &phba->sli4_hba.sli_intf), 6527 bf_get(lpfc_sli_intf_if_type, 6528 &phba->sli4_hba.sli_intf), 6529 bf_get(lpfc_sli_intf_sli_hint1, 6530 &phba->sli4_hba.sli_intf), 6531 bf_get(lpfc_sli_intf_sli_hint2, 6532 &phba->sli4_hba.sli_intf), 6533 bf_get(lpfc_sli_intf_func_type, 6534 &phba->sli4_hba.sli_intf)); 6535 /* 6536 * Check for other Port errors during the initialization 6537 * process. Fail the load if the port did not come up 6538 * correctly. 6539 */ 6540 if_type = bf_get(lpfc_sli_intf_if_type, 6541 &phba->sli4_hba.sli_intf); 6542 switch (if_type) { 6543 case LPFC_SLI_INTF_IF_TYPE_0: 6544 phba->sli4_hba.ue_mask_lo = 6545 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 6546 phba->sli4_hba.ue_mask_hi = 6547 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 6548 uerrlo_reg.word0 = 6549 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 6550 uerrhi_reg.word0 = 6551 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 6552 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 6553 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 6554 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6555 "1422 Unrecoverable Error " 6556 "Detected during POST " 6557 "uerr_lo_reg=0x%x, " 6558 "uerr_hi_reg=0x%x, " 6559 "ue_mask_lo_reg=0x%x, " 6560 "ue_mask_hi_reg=0x%x\n", 6561 uerrlo_reg.word0, 6562 uerrhi_reg.word0, 6563 phba->sli4_hba.ue_mask_lo, 6564 phba->sli4_hba.ue_mask_hi); 6565 port_error = -ENODEV; 6566 } 6567 break; 6568 case LPFC_SLI_INTF_IF_TYPE_2: 6569 /* Final checks. The port status should be clean. */ 6570 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 6571 ®_data.word0) || 6572 (bf_get(lpfc_sliport_status_err, ®_data) && 6573 !bf_get(lpfc_sliport_status_rn, ®_data))) { 6574 phba->work_status[0] = 6575 readl(phba->sli4_hba.u.if_type2. 6576 ERR1regaddr); 6577 phba->work_status[1] = 6578 readl(phba->sli4_hba.u.if_type2. 6579 ERR2regaddr); 6580 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6581 "2888 Unrecoverable port error " 6582 "following POST: port status reg " 6583 "0x%x, port_smphr reg 0x%x, " 6584 "error 1=0x%x, error 2=0x%x\n", 6585 reg_data.word0, 6586 portsmphr_reg.word0, 6587 phba->work_status[0], 6588 phba->work_status[1]); 6589 port_error = -ENODEV; 6590 } 6591 break; 6592 case LPFC_SLI_INTF_IF_TYPE_1: 6593 default: 6594 break; 6595 } 6596 } 6597 return port_error; 6598 } 6599 6600 /** 6601 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 6602 * @phba: pointer to lpfc hba data structure. 6603 * @if_type: The SLI4 interface type getting configured. 6604 * 6605 * This routine is invoked to set up SLI4 BAR0 PCI config space register 6606 * memory map. 6607 **/ 6608 static void 6609 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 6610 { 6611 switch (if_type) { 6612 case LPFC_SLI_INTF_IF_TYPE_0: 6613 phba->sli4_hba.u.if_type0.UERRLOregaddr = 6614 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 6615 phba->sli4_hba.u.if_type0.UERRHIregaddr = 6616 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 6617 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 6618 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 6619 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 6620 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 6621 phba->sli4_hba.SLIINTFregaddr = 6622 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 6623 break; 6624 case LPFC_SLI_INTF_IF_TYPE_2: 6625 phba->sli4_hba.u.if_type2.ERR1regaddr = 6626 phba->sli4_hba.conf_regs_memmap_p + 6627 LPFC_CTL_PORT_ER1_OFFSET; 6628 phba->sli4_hba.u.if_type2.ERR2regaddr = 6629 phba->sli4_hba.conf_regs_memmap_p + 6630 LPFC_CTL_PORT_ER2_OFFSET; 6631 phba->sli4_hba.u.if_type2.CTRLregaddr = 6632 phba->sli4_hba.conf_regs_memmap_p + 6633 LPFC_CTL_PORT_CTL_OFFSET; 6634 phba->sli4_hba.u.if_type2.STATUSregaddr = 6635 phba->sli4_hba.conf_regs_memmap_p + 6636 LPFC_CTL_PORT_STA_OFFSET; 6637 phba->sli4_hba.SLIINTFregaddr = 6638 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 6639 phba->sli4_hba.PSMPHRregaddr = 6640 phba->sli4_hba.conf_regs_memmap_p + 6641 LPFC_CTL_PORT_SEM_OFFSET; 6642 phba->sli4_hba.RQDBregaddr = 6643 phba->sli4_hba.conf_regs_memmap_p + 6644 LPFC_ULP0_RQ_DOORBELL; 6645 phba->sli4_hba.WQDBregaddr = 6646 phba->sli4_hba.conf_regs_memmap_p + 6647 LPFC_ULP0_WQ_DOORBELL; 6648 phba->sli4_hba.EQCQDBregaddr = 6649 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 6650 phba->sli4_hba.MQDBregaddr = 6651 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 6652 phba->sli4_hba.BMBXregaddr = 6653 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 6654 break; 6655 case LPFC_SLI_INTF_IF_TYPE_1: 6656 default: 6657 dev_printk(KERN_ERR, &phba->pcidev->dev, 6658 "FATAL - unsupported SLI4 interface type - %d\n", 6659 if_type); 6660 break; 6661 } 6662 } 6663 6664 /** 6665 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 6666 * @phba: pointer to lpfc hba data structure. 6667 * 6668 * This routine is invoked to set up SLI4 BAR1 control status register (CSR) 6669 * memory map. 6670 **/ 6671 static void 6672 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) 6673 { 6674 phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 6675 LPFC_SLIPORT_IF0_SMPHR; 6676 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 6677 LPFC_HST_ISR0; 6678 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 6679 LPFC_HST_IMR0; 6680 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 6681 LPFC_HST_ISCR0; 6682 } 6683 6684 /** 6685 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 6686 * @phba: pointer to lpfc hba data structure. 6687 * @vf: virtual function number 6688 * 6689 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 6690 * based on the given viftual function number, @vf. 6691 * 6692 * Return 0 if successful, otherwise -ENODEV. 6693 **/ 6694 static int 6695 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 6696 { 6697 if (vf > LPFC_VIR_FUNC_MAX) 6698 return -ENODEV; 6699 6700 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6701 vf * LPFC_VFR_PAGE_SIZE + 6702 LPFC_ULP0_RQ_DOORBELL); 6703 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6704 vf * LPFC_VFR_PAGE_SIZE + 6705 LPFC_ULP0_WQ_DOORBELL); 6706 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6707 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); 6708 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6709 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 6710 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6711 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 6712 return 0; 6713 } 6714 6715 /** 6716 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 6717 * @phba: pointer to lpfc hba data structure. 6718 * 6719 * This routine is invoked to create the bootstrap mailbox 6720 * region consistent with the SLI-4 interface spec. This 6721 * routine allocates all memory necessary to communicate 6722 * mailbox commands to the port and sets up all alignment 6723 * needs. No locks are expected to be held when calling 6724 * this routine. 6725 * 6726 * Return codes 6727 * 0 - successful 6728 * -ENOMEM - could not allocated memory. 6729 **/ 6730 static int 6731 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 6732 { 6733 uint32_t bmbx_size; 6734 struct lpfc_dmabuf *dmabuf; 6735 struct dma_address *dma_address; 6736 uint32_t pa_addr; 6737 uint64_t phys_addr; 6738 6739 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 6740 if (!dmabuf) 6741 return -ENOMEM; 6742 6743 /* 6744 * The bootstrap mailbox region is comprised of 2 parts 6745 * plus an alignment restriction of 16 bytes. 6746 */ 6747 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 6748 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size, 6749 &dmabuf->phys, GFP_KERNEL); 6750 if (!dmabuf->virt) { 6751 kfree(dmabuf); 6752 return -ENOMEM; 6753 } 6754 6755 /* 6756 * Initialize the bootstrap mailbox pointers now so that the register 6757 * operations are simple later. The mailbox dma address is required 6758 * to be 16-byte aligned. Also align the virtual memory as each 6759 * maibox is copied into the bmbx mailbox region before issuing the 6760 * command to the port. 6761 */ 6762 phba->sli4_hba.bmbx.dmabuf = dmabuf; 6763 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 6764 6765 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 6766 LPFC_ALIGN_16_BYTE); 6767 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 6768 LPFC_ALIGN_16_BYTE); 6769 6770 /* 6771 * Set the high and low physical addresses now. The SLI4 alignment 6772 * requirement is 16 bytes and the mailbox is posted to the port 6773 * as two 30-bit addresses. The other data is a bit marking whether 6774 * the 30-bit address is the high or low address. 6775 * Upcast bmbx aphys to 64bits so shift instruction compiles 6776 * clean on 32 bit machines. 6777 */ 6778 dma_address = &phba->sli4_hba.bmbx.dma_address; 6779 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 6780 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 6781 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 6782 LPFC_BMBX_BIT1_ADDR_HI); 6783 6784 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 6785 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 6786 LPFC_BMBX_BIT1_ADDR_LO); 6787 return 0; 6788 } 6789 6790 /** 6791 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 6792 * @phba: pointer to lpfc hba data structure. 6793 * 6794 * This routine is invoked to teardown the bootstrap mailbox 6795 * region and release all host resources. This routine requires 6796 * the caller to ensure all mailbox commands recovered, no 6797 * additional mailbox comands are sent, and interrupts are disabled 6798 * before calling this routine. 6799 * 6800 **/ 6801 static void 6802 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 6803 { 6804 dma_free_coherent(&phba->pcidev->dev, 6805 phba->sli4_hba.bmbx.bmbx_size, 6806 phba->sli4_hba.bmbx.dmabuf->virt, 6807 phba->sli4_hba.bmbx.dmabuf->phys); 6808 6809 kfree(phba->sli4_hba.bmbx.dmabuf); 6810 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 6811 } 6812 6813 /** 6814 * lpfc_sli4_read_config - Get the config parameters. 6815 * @phba: pointer to lpfc hba data structure. 6816 * 6817 * This routine is invoked to read the configuration parameters from the HBA. 6818 * The configuration parameters are used to set the base and maximum values 6819 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 6820 * allocation for the port. 6821 * 6822 * Return codes 6823 * 0 - successful 6824 * -ENOMEM - No available memory 6825 * -EIO - The mailbox failed to complete successfully. 6826 **/ 6827 int 6828 lpfc_sli4_read_config(struct lpfc_hba *phba) 6829 { 6830 LPFC_MBOXQ_t *pmb; 6831 struct lpfc_mbx_read_config *rd_config; 6832 union lpfc_sli4_cfg_shdr *shdr; 6833 uint32_t shdr_status, shdr_add_status; 6834 struct lpfc_mbx_get_func_cfg *get_func_cfg; 6835 struct lpfc_rsrc_desc_fcfcoe *desc; 6836 char *pdesc_0; 6837 int length, i, rc = 0, rc2; 6838 6839 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6840 if (!pmb) { 6841 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6842 "2011 Unable to allocate memory for issuing " 6843 "SLI_CONFIG_SPECIAL mailbox command\n"); 6844 return -ENOMEM; 6845 } 6846 6847 lpfc_read_config(phba, pmb); 6848 6849 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 6850 if (rc != MBX_SUCCESS) { 6851 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6852 "2012 Mailbox failed , mbxCmd x%x " 6853 "READ_CONFIG, mbxStatus x%x\n", 6854 bf_get(lpfc_mqe_command, &pmb->u.mqe), 6855 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 6856 rc = -EIO; 6857 } else { 6858 rd_config = &pmb->u.mqe.un.rd_config; 6859 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { 6860 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 6861 phba->sli4_hba.lnk_info.lnk_tp = 6862 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); 6863 phba->sli4_hba.lnk_info.lnk_no = 6864 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); 6865 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6866 "3081 lnk_type:%d, lnk_numb:%d\n", 6867 phba->sli4_hba.lnk_info.lnk_tp, 6868 phba->sli4_hba.lnk_info.lnk_no); 6869 } else 6870 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6871 "3082 Mailbox (x%x) returned ldv:x0\n", 6872 bf_get(lpfc_mqe_command, &pmb->u.mqe)); 6873 phba->sli4_hba.extents_in_use = 6874 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 6875 phba->sli4_hba.max_cfg_param.max_xri = 6876 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 6877 phba->sli4_hba.max_cfg_param.xri_base = 6878 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 6879 phba->sli4_hba.max_cfg_param.max_vpi = 6880 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 6881 phba->sli4_hba.max_cfg_param.vpi_base = 6882 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 6883 phba->sli4_hba.max_cfg_param.max_rpi = 6884 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 6885 phba->sli4_hba.max_cfg_param.rpi_base = 6886 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 6887 phba->sli4_hba.max_cfg_param.max_vfi = 6888 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 6889 phba->sli4_hba.max_cfg_param.vfi_base = 6890 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 6891 phba->sli4_hba.max_cfg_param.max_fcfi = 6892 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 6893 phba->sli4_hba.max_cfg_param.max_eq = 6894 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 6895 phba->sli4_hba.max_cfg_param.max_rq = 6896 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 6897 phba->sli4_hba.max_cfg_param.max_wq = 6898 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 6899 phba->sli4_hba.max_cfg_param.max_cq = 6900 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 6901 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 6902 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 6903 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 6904 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 6905 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 6906 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 6907 phba->max_vports = phba->max_vpi; 6908 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6909 "2003 cfg params Extents? %d " 6910 "XRI(B:%d M:%d), " 6911 "VPI(B:%d M:%d) " 6912 "VFI(B:%d M:%d) " 6913 "RPI(B:%d M:%d) " 6914 "FCFI(Count:%d)\n", 6915 phba->sli4_hba.extents_in_use, 6916 phba->sli4_hba.max_cfg_param.xri_base, 6917 phba->sli4_hba.max_cfg_param.max_xri, 6918 phba->sli4_hba.max_cfg_param.vpi_base, 6919 phba->sli4_hba.max_cfg_param.max_vpi, 6920 phba->sli4_hba.max_cfg_param.vfi_base, 6921 phba->sli4_hba.max_cfg_param.max_vfi, 6922 phba->sli4_hba.max_cfg_param.rpi_base, 6923 phba->sli4_hba.max_cfg_param.max_rpi, 6924 phba->sli4_hba.max_cfg_param.max_fcfi); 6925 } 6926 6927 if (rc) 6928 goto read_cfg_out; 6929 6930 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 6931 length = phba->sli4_hba.max_cfg_param.max_xri - 6932 lpfc_sli4_get_els_iocb_cnt(phba); 6933 if (phba->cfg_hba_queue_depth > length) { 6934 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6935 "3361 HBA queue depth changed from %d to %d\n", 6936 phba->cfg_hba_queue_depth, length); 6937 phba->cfg_hba_queue_depth = length; 6938 } 6939 6940 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 6941 LPFC_SLI_INTF_IF_TYPE_2) 6942 goto read_cfg_out; 6943 6944 /* get the pf# and vf# for SLI4 if_type 2 port */ 6945 length = (sizeof(struct lpfc_mbx_get_func_cfg) - 6946 sizeof(struct lpfc_sli4_cfg_mhdr)); 6947 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, 6948 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 6949 length, LPFC_SLI4_MBX_EMBED); 6950 6951 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 6952 shdr = (union lpfc_sli4_cfg_shdr *) 6953 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 6954 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6955 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6956 if (rc2 || shdr_status || shdr_add_status) { 6957 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6958 "3026 Mailbox failed , mbxCmd x%x " 6959 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 6960 bf_get(lpfc_mqe_command, &pmb->u.mqe), 6961 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 6962 goto read_cfg_out; 6963 } 6964 6965 /* search for fc_fcoe resrouce descriptor */ 6966 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 6967 6968 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; 6969 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; 6970 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc); 6971 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD) 6972 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH; 6973 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH) 6974 goto read_cfg_out; 6975 6976 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 6977 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i); 6978 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 6979 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) { 6980 phba->sli4_hba.iov.pf_number = 6981 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 6982 phba->sli4_hba.iov.vf_number = 6983 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); 6984 break; 6985 } 6986 } 6987 6988 if (i < LPFC_RSRC_DESC_MAX_NUM) 6989 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6990 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 6991 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 6992 phba->sli4_hba.iov.vf_number); 6993 else 6994 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6995 "3028 GET_FUNCTION_CONFIG: failed to find " 6996 "Resrouce Descriptor:x%x\n", 6997 LPFC_RSRC_DESC_TYPE_FCFCOE); 6998 6999 read_cfg_out: 7000 mempool_free(pmb, phba->mbox_mem_pool); 7001 return rc; 7002 } 7003 7004 /** 7005 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 7006 * @phba: pointer to lpfc hba data structure. 7007 * 7008 * This routine is invoked to setup the port-side endian order when 7009 * the port if_type is 0. This routine has no function for other 7010 * if_types. 7011 * 7012 * Return codes 7013 * 0 - successful 7014 * -ENOMEM - No available memory 7015 * -EIO - The mailbox failed to complete successfully. 7016 **/ 7017 static int 7018 lpfc_setup_endian_order(struct lpfc_hba *phba) 7019 { 7020 LPFC_MBOXQ_t *mboxq; 7021 uint32_t if_type, rc = 0; 7022 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 7023 HOST_ENDIAN_HIGH_WORD1}; 7024 7025 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 7026 switch (if_type) { 7027 case LPFC_SLI_INTF_IF_TYPE_0: 7028 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 7029 GFP_KERNEL); 7030 if (!mboxq) { 7031 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7032 "0492 Unable to allocate memory for " 7033 "issuing SLI_CONFIG_SPECIAL mailbox " 7034 "command\n"); 7035 return -ENOMEM; 7036 } 7037 7038 /* 7039 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 7040 * two words to contain special data values and no other data. 7041 */ 7042 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 7043 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 7044 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7045 if (rc != MBX_SUCCESS) { 7046 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7047 "0493 SLI_CONFIG_SPECIAL mailbox " 7048 "failed with status x%x\n", 7049 rc); 7050 rc = -EIO; 7051 } 7052 mempool_free(mboxq, phba->mbox_mem_pool); 7053 break; 7054 case LPFC_SLI_INTF_IF_TYPE_2: 7055 case LPFC_SLI_INTF_IF_TYPE_1: 7056 default: 7057 break; 7058 } 7059 return rc; 7060 } 7061 7062 /** 7063 * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts 7064 * @phba: pointer to lpfc hba data structure. 7065 * 7066 * This routine is invoked to check the user settable queue counts for EQs and 7067 * CQs. after this routine is called the counts will be set to valid values that 7068 * adhere to the constraints of the system's interrupt vectors and the port's 7069 * queue resources. 7070 * 7071 * Return codes 7072 * 0 - successful 7073 * -ENOMEM - No available memory 7074 **/ 7075 static int 7076 lpfc_sli4_queue_verify(struct lpfc_hba *phba) 7077 { 7078 int cfg_fcp_io_channel; 7079 uint32_t cpu; 7080 uint32_t i = 0; 7081 int fof_vectors = phba->cfg_fof ? 1 : 0; 7082 7083 /* 7084 * Sanity check for configured queue parameters against the run-time 7085 * device parameters 7086 */ 7087 7088 /* Sanity check on HBA EQ parameters */ 7089 cfg_fcp_io_channel = phba->cfg_fcp_io_channel; 7090 7091 /* It doesn't make sense to have more io channels then online CPUs */ 7092 for_each_present_cpu(cpu) { 7093 if (cpu_online(cpu)) 7094 i++; 7095 } 7096 phba->sli4_hba.num_online_cpu = i; 7097 phba->sli4_hba.num_present_cpu = lpfc_present_cpu; 7098 phba->sli4_hba.curr_disp_cpu = 0; 7099 7100 if (i < cfg_fcp_io_channel) { 7101 lpfc_printf_log(phba, 7102 KERN_ERR, LOG_INIT, 7103 "3188 Reducing IO channels to match number of " 7104 "online CPUs: from %d to %d\n", 7105 cfg_fcp_io_channel, i); 7106 cfg_fcp_io_channel = i; 7107 } 7108 7109 if (cfg_fcp_io_channel + fof_vectors > 7110 phba->sli4_hba.max_cfg_param.max_eq) { 7111 if (phba->sli4_hba.max_cfg_param.max_eq < 7112 LPFC_FCP_IO_CHAN_MIN) { 7113 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7114 "2574 Not enough EQs (%d) from the " 7115 "pci function for supporting FCP " 7116 "EQs (%d)\n", 7117 phba->sli4_hba.max_cfg_param.max_eq, 7118 phba->cfg_fcp_io_channel); 7119 goto out_error; 7120 } 7121 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7122 "2575 Reducing IO channels to match number of " 7123 "available EQs: from %d to %d\n", 7124 cfg_fcp_io_channel, 7125 phba->sli4_hba.max_cfg_param.max_eq); 7126 cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq - 7127 fof_vectors; 7128 } 7129 7130 /* The actual number of FCP event queues adopted */ 7131 phba->cfg_fcp_io_channel = cfg_fcp_io_channel; 7132 7133 /* Get EQ depth from module parameter, fake the default for now */ 7134 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 7135 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 7136 7137 /* Get CQ depth from module parameter, fake the default for now */ 7138 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 7139 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 7140 7141 return 0; 7142 out_error: 7143 return -ENOMEM; 7144 } 7145 7146 /** 7147 * lpfc_sli4_queue_create - Create all the SLI4 queues 7148 * @phba: pointer to lpfc hba data structure. 7149 * 7150 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 7151 * operation. For each SLI4 queue type, the parameters such as queue entry 7152 * count (queue depth) shall be taken from the module parameter. For now, 7153 * we just use some constant number as place holder. 7154 * 7155 * Return codes 7156 * 0 - successful 7157 * -ENOMEM - No availble memory 7158 * -EIO - The mailbox failed to complete successfully. 7159 **/ 7160 int 7161 lpfc_sli4_queue_create(struct lpfc_hba *phba) 7162 { 7163 struct lpfc_queue *qdesc; 7164 int idx; 7165 7166 /* 7167 * Create HBA Record arrays. 7168 */ 7169 if (!phba->cfg_fcp_io_channel) 7170 return -ERANGE; 7171 7172 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 7173 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 7174 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 7175 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 7176 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 7177 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 7178 7179 phba->sli4_hba.hba_eq = kzalloc((sizeof(struct lpfc_queue *) * 7180 phba->cfg_fcp_io_channel), GFP_KERNEL); 7181 if (!phba->sli4_hba.hba_eq) { 7182 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7183 "2576 Failed allocate memory for " 7184 "fast-path EQ record array\n"); 7185 goto out_error; 7186 } 7187 7188 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 7189 phba->cfg_fcp_io_channel), GFP_KERNEL); 7190 if (!phba->sli4_hba.fcp_cq) { 7191 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7192 "2577 Failed allocate memory for fast-path " 7193 "CQ record array\n"); 7194 goto out_error; 7195 } 7196 7197 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * 7198 phba->cfg_fcp_io_channel), GFP_KERNEL); 7199 if (!phba->sli4_hba.fcp_wq) { 7200 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7201 "2578 Failed allocate memory for fast-path " 7202 "WQ record array\n"); 7203 goto out_error; 7204 } 7205 7206 /* 7207 * Since the first EQ can have multiple CQs associated with it, 7208 * this array is used to quickly see if we have a FCP fast-path 7209 * CQ match. 7210 */ 7211 phba->sli4_hba.fcp_cq_map = kzalloc((sizeof(uint16_t) * 7212 phba->cfg_fcp_io_channel), GFP_KERNEL); 7213 if (!phba->sli4_hba.fcp_cq_map) { 7214 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7215 "2545 Failed allocate memory for fast-path " 7216 "CQ map\n"); 7217 goto out_error; 7218 } 7219 7220 /* 7221 * Create HBA Event Queues (EQs). The cfg_fcp_io_channel specifies 7222 * how many EQs to create. 7223 */ 7224 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 7225 7226 /* Create EQs */ 7227 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 7228 phba->sli4_hba.eq_ecount); 7229 if (!qdesc) { 7230 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7231 "0497 Failed allocate EQ (%d)\n", idx); 7232 goto out_error; 7233 } 7234 phba->sli4_hba.hba_eq[idx] = qdesc; 7235 7236 /* Create Fast Path FCP CQs */ 7237 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 7238 phba->sli4_hba.cq_ecount); 7239 if (!qdesc) { 7240 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7241 "0499 Failed allocate fast-path FCP " 7242 "CQ (%d)\n", idx); 7243 goto out_error; 7244 } 7245 phba->sli4_hba.fcp_cq[idx] = qdesc; 7246 7247 /* Create Fast Path FCP WQs */ 7248 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 7249 phba->sli4_hba.wq_ecount); 7250 if (!qdesc) { 7251 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7252 "0503 Failed allocate fast-path FCP " 7253 "WQ (%d)\n", idx); 7254 goto out_error; 7255 } 7256 phba->sli4_hba.fcp_wq[idx] = qdesc; 7257 } 7258 7259 7260 /* 7261 * Create Slow Path Completion Queues (CQs) 7262 */ 7263 7264 /* Create slow-path Mailbox Command Complete Queue */ 7265 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 7266 phba->sli4_hba.cq_ecount); 7267 if (!qdesc) { 7268 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7269 "0500 Failed allocate slow-path mailbox CQ\n"); 7270 goto out_error; 7271 } 7272 phba->sli4_hba.mbx_cq = qdesc; 7273 7274 /* Create slow-path ELS Complete Queue */ 7275 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 7276 phba->sli4_hba.cq_ecount); 7277 if (!qdesc) { 7278 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7279 "0501 Failed allocate slow-path ELS CQ\n"); 7280 goto out_error; 7281 } 7282 phba->sli4_hba.els_cq = qdesc; 7283 7284 7285 /* 7286 * Create Slow Path Work Queues (WQs) 7287 */ 7288 7289 /* Create Mailbox Command Queue */ 7290 7291 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 7292 phba->sli4_hba.mq_ecount); 7293 if (!qdesc) { 7294 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7295 "0505 Failed allocate slow-path MQ\n"); 7296 goto out_error; 7297 } 7298 phba->sli4_hba.mbx_wq = qdesc; 7299 7300 /* 7301 * Create ELS Work Queues 7302 */ 7303 7304 /* Create slow-path ELS Work Queue */ 7305 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 7306 phba->sli4_hba.wq_ecount); 7307 if (!qdesc) { 7308 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7309 "0504 Failed allocate slow-path ELS WQ\n"); 7310 goto out_error; 7311 } 7312 phba->sli4_hba.els_wq = qdesc; 7313 7314 /* 7315 * Create Receive Queue (RQ) 7316 */ 7317 7318 /* Create Receive Queue for header */ 7319 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 7320 phba->sli4_hba.rq_ecount); 7321 if (!qdesc) { 7322 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7323 "0506 Failed allocate receive HRQ\n"); 7324 goto out_error; 7325 } 7326 phba->sli4_hba.hdr_rq = qdesc; 7327 7328 /* Create Receive Queue for data */ 7329 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 7330 phba->sli4_hba.rq_ecount); 7331 if (!qdesc) { 7332 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7333 "0507 Failed allocate receive DRQ\n"); 7334 goto out_error; 7335 } 7336 phba->sli4_hba.dat_rq = qdesc; 7337 7338 /* Create the Queues needed for Flash Optimized Fabric operations */ 7339 if (phba->cfg_fof) 7340 lpfc_fof_queue_create(phba); 7341 return 0; 7342 7343 out_error: 7344 lpfc_sli4_queue_destroy(phba); 7345 return -ENOMEM; 7346 } 7347 7348 /** 7349 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 7350 * @phba: pointer to lpfc hba data structure. 7351 * 7352 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 7353 * operation. 7354 * 7355 * Return codes 7356 * 0 - successful 7357 * -ENOMEM - No available memory 7358 * -EIO - The mailbox failed to complete successfully. 7359 **/ 7360 void 7361 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 7362 { 7363 int idx; 7364 7365 if (phba->cfg_fof) 7366 lpfc_fof_queue_destroy(phba); 7367 7368 if (phba->sli4_hba.hba_eq != NULL) { 7369 /* Release HBA event queue */ 7370 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 7371 if (phba->sli4_hba.hba_eq[idx] != NULL) { 7372 lpfc_sli4_queue_free( 7373 phba->sli4_hba.hba_eq[idx]); 7374 phba->sli4_hba.hba_eq[idx] = NULL; 7375 } 7376 } 7377 kfree(phba->sli4_hba.hba_eq); 7378 phba->sli4_hba.hba_eq = NULL; 7379 } 7380 7381 if (phba->sli4_hba.fcp_cq != NULL) { 7382 /* Release FCP completion queue */ 7383 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 7384 if (phba->sli4_hba.fcp_cq[idx] != NULL) { 7385 lpfc_sli4_queue_free( 7386 phba->sli4_hba.fcp_cq[idx]); 7387 phba->sli4_hba.fcp_cq[idx] = NULL; 7388 } 7389 } 7390 kfree(phba->sli4_hba.fcp_cq); 7391 phba->sli4_hba.fcp_cq = NULL; 7392 } 7393 7394 if (phba->sli4_hba.fcp_wq != NULL) { 7395 /* Release FCP work queue */ 7396 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 7397 if (phba->sli4_hba.fcp_wq[idx] != NULL) { 7398 lpfc_sli4_queue_free( 7399 phba->sli4_hba.fcp_wq[idx]); 7400 phba->sli4_hba.fcp_wq[idx] = NULL; 7401 } 7402 } 7403 kfree(phba->sli4_hba.fcp_wq); 7404 phba->sli4_hba.fcp_wq = NULL; 7405 } 7406 7407 /* Release FCP CQ mapping array */ 7408 if (phba->sli4_hba.fcp_cq_map != NULL) { 7409 kfree(phba->sli4_hba.fcp_cq_map); 7410 phba->sli4_hba.fcp_cq_map = NULL; 7411 } 7412 7413 /* Release mailbox command work queue */ 7414 if (phba->sli4_hba.mbx_wq != NULL) { 7415 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 7416 phba->sli4_hba.mbx_wq = NULL; 7417 } 7418 7419 /* Release ELS work queue */ 7420 if (phba->sli4_hba.els_wq != NULL) { 7421 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 7422 phba->sli4_hba.els_wq = NULL; 7423 } 7424 7425 /* Release unsolicited receive queue */ 7426 if (phba->sli4_hba.hdr_rq != NULL) { 7427 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 7428 phba->sli4_hba.hdr_rq = NULL; 7429 } 7430 if (phba->sli4_hba.dat_rq != NULL) { 7431 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 7432 phba->sli4_hba.dat_rq = NULL; 7433 } 7434 7435 /* Release ELS complete queue */ 7436 if (phba->sli4_hba.els_cq != NULL) { 7437 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 7438 phba->sli4_hba.els_cq = NULL; 7439 } 7440 7441 /* Release mailbox command complete queue */ 7442 if (phba->sli4_hba.mbx_cq != NULL) { 7443 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 7444 phba->sli4_hba.mbx_cq = NULL; 7445 } 7446 7447 return; 7448 } 7449 7450 /** 7451 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 7452 * @phba: pointer to lpfc hba data structure. 7453 * 7454 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 7455 * operation. 7456 * 7457 * Return codes 7458 * 0 - successful 7459 * -ENOMEM - No available memory 7460 * -EIO - The mailbox failed to complete successfully. 7461 **/ 7462 int 7463 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 7464 { 7465 struct lpfc_sli *psli = &phba->sli; 7466 struct lpfc_sli_ring *pring; 7467 int rc = -ENOMEM; 7468 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 7469 int fcp_cq_index = 0; 7470 uint32_t shdr_status, shdr_add_status; 7471 union lpfc_sli4_cfg_shdr *shdr; 7472 LPFC_MBOXQ_t *mboxq; 7473 uint32_t length; 7474 7475 /* Check for dual-ULP support */ 7476 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7477 if (!mboxq) { 7478 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7479 "3249 Unable to allocate memory for " 7480 "QUERY_FW_CFG mailbox command\n"); 7481 return -ENOMEM; 7482 } 7483 length = (sizeof(struct lpfc_mbx_query_fw_config) - 7484 sizeof(struct lpfc_sli4_cfg_mhdr)); 7485 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 7486 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 7487 length, LPFC_SLI4_MBX_EMBED); 7488 7489 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7490 7491 shdr = (union lpfc_sli4_cfg_shdr *) 7492 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 7493 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7494 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 7495 if (shdr_status || shdr_add_status || rc) { 7496 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7497 "3250 QUERY_FW_CFG mailbox failed with status " 7498 "x%x add_status x%x, mbx status x%x\n", 7499 shdr_status, shdr_add_status, rc); 7500 if (rc != MBX_TIMEOUT) 7501 mempool_free(mboxq, phba->mbox_mem_pool); 7502 rc = -ENXIO; 7503 goto out_error; 7504 } 7505 7506 phba->sli4_hba.fw_func_mode = 7507 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; 7508 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; 7509 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; 7510 phba->sli4_hba.physical_port = 7511 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port; 7512 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7513 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " 7514 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, 7515 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); 7516 7517 if (rc != MBX_TIMEOUT) 7518 mempool_free(mboxq, phba->mbox_mem_pool); 7519 7520 /* 7521 * Set up HBA Event Queues (EQs) 7522 */ 7523 7524 /* Set up HBA event queue */ 7525 if (phba->cfg_fcp_io_channel && !phba->sli4_hba.hba_eq) { 7526 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7527 "3147 Fast-path EQs not allocated\n"); 7528 rc = -ENOMEM; 7529 goto out_error; 7530 } 7531 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) { 7532 if (!phba->sli4_hba.hba_eq[fcp_eqidx]) { 7533 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7534 "0522 Fast-path EQ (%d) not " 7535 "allocated\n", fcp_eqidx); 7536 rc = -ENOMEM; 7537 goto out_destroy_hba_eq; 7538 } 7539 rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[fcp_eqidx], 7540 (phba->cfg_fcp_imax / phba->cfg_fcp_io_channel)); 7541 if (rc) { 7542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7543 "0523 Failed setup of fast-path EQ " 7544 "(%d), rc = 0x%x\n", fcp_eqidx, 7545 (uint32_t)rc); 7546 goto out_destroy_hba_eq; 7547 } 7548 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7549 "2584 HBA EQ setup: " 7550 "queue[%d]-id=%d\n", fcp_eqidx, 7551 phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id); 7552 } 7553 7554 /* Set up fast-path FCP Response Complete Queue */ 7555 if (!phba->sli4_hba.fcp_cq) { 7556 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7557 "3148 Fast-path FCP CQ array not " 7558 "allocated\n"); 7559 rc = -ENOMEM; 7560 goto out_destroy_hba_eq; 7561 } 7562 7563 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) { 7564 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 7565 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7566 "0526 Fast-path FCP CQ (%d) not " 7567 "allocated\n", fcp_cqidx); 7568 rc = -ENOMEM; 7569 goto out_destroy_fcp_cq; 7570 } 7571 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], 7572 phba->sli4_hba.hba_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP); 7573 if (rc) { 7574 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7575 "0527 Failed setup of fast-path FCP " 7576 "CQ (%d), rc = 0x%x\n", fcp_cqidx, 7577 (uint32_t)rc); 7578 goto out_destroy_fcp_cq; 7579 } 7580 7581 /* Setup fcp_cq_map for fast lookup */ 7582 phba->sli4_hba.fcp_cq_map[fcp_cqidx] = 7583 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id; 7584 7585 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7586 "2588 FCP CQ setup: cq[%d]-id=%d, " 7587 "parent seq[%d]-id=%d\n", 7588 fcp_cqidx, 7589 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, 7590 fcp_cqidx, 7591 phba->sli4_hba.hba_eq[fcp_cqidx]->queue_id); 7592 } 7593 7594 /* Set up fast-path FCP Work Queue */ 7595 if (!phba->sli4_hba.fcp_wq) { 7596 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7597 "3149 Fast-path FCP WQ array not " 7598 "allocated\n"); 7599 rc = -ENOMEM; 7600 goto out_destroy_fcp_cq; 7601 } 7602 7603 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) { 7604 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { 7605 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7606 "0534 Fast-path FCP WQ (%d) not " 7607 "allocated\n", fcp_wqidx); 7608 rc = -ENOMEM; 7609 goto out_destroy_fcp_wq; 7610 } 7611 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], 7612 phba->sli4_hba.fcp_cq[fcp_wqidx], 7613 LPFC_FCP); 7614 if (rc) { 7615 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7616 "0535 Failed setup of fast-path FCP " 7617 "WQ (%d), rc = 0x%x\n", fcp_wqidx, 7618 (uint32_t)rc); 7619 goto out_destroy_fcp_wq; 7620 } 7621 7622 /* Bind this WQ to the next FCP ring */ 7623 pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx]; 7624 pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx]; 7625 phba->sli4_hba.fcp_cq[fcp_wqidx]->pring = pring; 7626 7627 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7628 "2591 FCP WQ setup: wq[%d]-id=%d, " 7629 "parent cq[%d]-id=%d\n", 7630 fcp_wqidx, 7631 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, 7632 fcp_cq_index, 7633 phba->sli4_hba.fcp_cq[fcp_wqidx]->queue_id); 7634 } 7635 /* 7636 * Set up Complete Queues (CQs) 7637 */ 7638 7639 /* Set up slow-path MBOX Complete Queue as the first CQ */ 7640 if (!phba->sli4_hba.mbx_cq) { 7641 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7642 "0528 Mailbox CQ not allocated\n"); 7643 rc = -ENOMEM; 7644 goto out_destroy_fcp_wq; 7645 } 7646 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, 7647 phba->sli4_hba.hba_eq[0], LPFC_MCQ, LPFC_MBOX); 7648 if (rc) { 7649 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7650 "0529 Failed setup of slow-path mailbox CQ: " 7651 "rc = 0x%x\n", (uint32_t)rc); 7652 goto out_destroy_fcp_wq; 7653 } 7654 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7655 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", 7656 phba->sli4_hba.mbx_cq->queue_id, 7657 phba->sli4_hba.hba_eq[0]->queue_id); 7658 7659 /* Set up slow-path ELS Complete Queue */ 7660 if (!phba->sli4_hba.els_cq) { 7661 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7662 "0530 ELS CQ not allocated\n"); 7663 rc = -ENOMEM; 7664 goto out_destroy_mbx_cq; 7665 } 7666 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, 7667 phba->sli4_hba.hba_eq[0], LPFC_WCQ, LPFC_ELS); 7668 if (rc) { 7669 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7670 "0531 Failed setup of slow-path ELS CQ: " 7671 "rc = 0x%x\n", (uint32_t)rc); 7672 goto out_destroy_mbx_cq; 7673 } 7674 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7675 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", 7676 phba->sli4_hba.els_cq->queue_id, 7677 phba->sli4_hba.hba_eq[0]->queue_id); 7678 7679 /* 7680 * Set up all the Work Queues (WQs) 7681 */ 7682 7683 /* Set up Mailbox Command Queue */ 7684 if (!phba->sli4_hba.mbx_wq) { 7685 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7686 "0538 Slow-path MQ not allocated\n"); 7687 rc = -ENOMEM; 7688 goto out_destroy_els_cq; 7689 } 7690 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, 7691 phba->sli4_hba.mbx_cq, LPFC_MBOX); 7692 if (rc) { 7693 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7694 "0539 Failed setup of slow-path MQ: " 7695 "rc = 0x%x\n", rc); 7696 goto out_destroy_els_cq; 7697 } 7698 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7699 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 7700 phba->sli4_hba.mbx_wq->queue_id, 7701 phba->sli4_hba.mbx_cq->queue_id); 7702 7703 /* Set up slow-path ELS Work Queue */ 7704 if (!phba->sli4_hba.els_wq) { 7705 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7706 "0536 Slow-path ELS WQ not allocated\n"); 7707 rc = -ENOMEM; 7708 goto out_destroy_mbx_wq; 7709 } 7710 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, 7711 phba->sli4_hba.els_cq, LPFC_ELS); 7712 if (rc) { 7713 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7714 "0537 Failed setup of slow-path ELS WQ: " 7715 "rc = 0x%x\n", (uint32_t)rc); 7716 goto out_destroy_mbx_wq; 7717 } 7718 7719 /* Bind this WQ to the ELS ring */ 7720 pring = &psli->ring[LPFC_ELS_RING]; 7721 pring->sli.sli4.wqp = (void *)phba->sli4_hba.els_wq; 7722 phba->sli4_hba.els_cq->pring = pring; 7723 7724 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7725 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 7726 phba->sli4_hba.els_wq->queue_id, 7727 phba->sli4_hba.els_cq->queue_id); 7728 7729 /* 7730 * Create Receive Queue (RQ) 7731 */ 7732 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 7733 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7734 "0540 Receive Queue not allocated\n"); 7735 rc = -ENOMEM; 7736 goto out_destroy_els_wq; 7737 } 7738 7739 lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ); 7740 lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ); 7741 7742 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 7743 phba->sli4_hba.els_cq, LPFC_USOL); 7744 if (rc) { 7745 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7746 "0541 Failed setup of Receive Queue: " 7747 "rc = 0x%x\n", (uint32_t)rc); 7748 goto out_destroy_fcp_wq; 7749 } 7750 7751 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7752 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 7753 "parent cq-id=%d\n", 7754 phba->sli4_hba.hdr_rq->queue_id, 7755 phba->sli4_hba.dat_rq->queue_id, 7756 phba->sli4_hba.els_cq->queue_id); 7757 7758 if (phba->cfg_fof) { 7759 rc = lpfc_fof_queue_setup(phba); 7760 if (rc) { 7761 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7762 "0549 Failed setup of FOF Queues: " 7763 "rc = 0x%x\n", rc); 7764 goto out_destroy_els_rq; 7765 } 7766 } 7767 7768 /* 7769 * Configure EQ delay multipier for interrupt coalescing using 7770 * MODIFY_EQ_DELAY for all EQs created, LPFC_MAX_EQ_DELAY at a time. 7771 */ 7772 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; 7773 fcp_eqidx += LPFC_MAX_EQ_DELAY) 7774 lpfc_modify_fcp_eq_delay(phba, fcp_eqidx); 7775 return 0; 7776 7777 out_destroy_els_rq: 7778 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 7779 out_destroy_els_wq: 7780 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 7781 out_destroy_mbx_wq: 7782 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 7783 out_destroy_els_cq: 7784 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 7785 out_destroy_mbx_cq: 7786 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 7787 out_destroy_fcp_wq: 7788 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) 7789 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); 7790 out_destroy_fcp_cq: 7791 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 7792 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 7793 out_destroy_hba_eq: 7794 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 7795 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_eqidx]); 7796 out_error: 7797 return rc; 7798 } 7799 7800 /** 7801 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 7802 * @phba: pointer to lpfc hba data structure. 7803 * 7804 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 7805 * operation. 7806 * 7807 * Return codes 7808 * 0 - successful 7809 * -ENOMEM - No available memory 7810 * -EIO - The mailbox failed to complete successfully. 7811 **/ 7812 void 7813 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 7814 { 7815 int fcp_qidx; 7816 7817 /* Unset the queues created for Flash Optimized Fabric operations */ 7818 if (phba->cfg_fof) 7819 lpfc_fof_queue_destroy(phba); 7820 /* Unset mailbox command work queue */ 7821 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 7822 /* Unset ELS work queue */ 7823 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 7824 /* Unset unsolicited receive queue */ 7825 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 7826 /* Unset FCP work queue */ 7827 if (phba->sli4_hba.fcp_wq) { 7828 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; 7829 fcp_qidx++) 7830 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); 7831 } 7832 /* Unset mailbox command complete queue */ 7833 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 7834 /* Unset ELS complete queue */ 7835 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 7836 /* Unset FCP response complete queue */ 7837 if (phba->sli4_hba.fcp_cq) { 7838 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; 7839 fcp_qidx++) 7840 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 7841 } 7842 /* Unset fast-path event queue */ 7843 if (phba->sli4_hba.hba_eq) { 7844 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; 7845 fcp_qidx++) 7846 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_qidx]); 7847 } 7848 } 7849 7850 /** 7851 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 7852 * @phba: pointer to lpfc hba data structure. 7853 * 7854 * This routine is invoked to allocate and set up a pool of completion queue 7855 * events. The body of the completion queue event is a completion queue entry 7856 * CQE. For now, this pool is used for the interrupt service routine to queue 7857 * the following HBA completion queue events for the worker thread to process: 7858 * - Mailbox asynchronous events 7859 * - Receive queue completion unsolicited events 7860 * Later, this can be used for all the slow-path events. 7861 * 7862 * Return codes 7863 * 0 - successful 7864 * -ENOMEM - No available memory 7865 **/ 7866 static int 7867 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 7868 { 7869 struct lpfc_cq_event *cq_event; 7870 int i; 7871 7872 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 7873 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 7874 if (!cq_event) 7875 goto out_pool_create_fail; 7876 list_add_tail(&cq_event->list, 7877 &phba->sli4_hba.sp_cqe_event_pool); 7878 } 7879 return 0; 7880 7881 out_pool_create_fail: 7882 lpfc_sli4_cq_event_pool_destroy(phba); 7883 return -ENOMEM; 7884 } 7885 7886 /** 7887 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 7888 * @phba: pointer to lpfc hba data structure. 7889 * 7890 * This routine is invoked to free the pool of completion queue events at 7891 * driver unload time. Note that, it is the responsibility of the driver 7892 * cleanup routine to free all the outstanding completion-queue events 7893 * allocated from this pool back into the pool before invoking this routine 7894 * to destroy the pool. 7895 **/ 7896 static void 7897 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 7898 { 7899 struct lpfc_cq_event *cq_event, *next_cq_event; 7900 7901 list_for_each_entry_safe(cq_event, next_cq_event, 7902 &phba->sli4_hba.sp_cqe_event_pool, list) { 7903 list_del(&cq_event->list); 7904 kfree(cq_event); 7905 } 7906 } 7907 7908 /** 7909 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 7910 * @phba: pointer to lpfc hba data structure. 7911 * 7912 * This routine is the lock free version of the API invoked to allocate a 7913 * completion-queue event from the free pool. 7914 * 7915 * Return: Pointer to the newly allocated completion-queue event if successful 7916 * NULL otherwise. 7917 **/ 7918 struct lpfc_cq_event * 7919 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 7920 { 7921 struct lpfc_cq_event *cq_event = NULL; 7922 7923 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 7924 struct lpfc_cq_event, list); 7925 return cq_event; 7926 } 7927 7928 /** 7929 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 7930 * @phba: pointer to lpfc hba data structure. 7931 * 7932 * This routine is the lock version of the API invoked to allocate a 7933 * completion-queue event from the free pool. 7934 * 7935 * Return: Pointer to the newly allocated completion-queue event if successful 7936 * NULL otherwise. 7937 **/ 7938 struct lpfc_cq_event * 7939 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 7940 { 7941 struct lpfc_cq_event *cq_event; 7942 unsigned long iflags; 7943 7944 spin_lock_irqsave(&phba->hbalock, iflags); 7945 cq_event = __lpfc_sli4_cq_event_alloc(phba); 7946 spin_unlock_irqrestore(&phba->hbalock, iflags); 7947 return cq_event; 7948 } 7949 7950 /** 7951 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 7952 * @phba: pointer to lpfc hba data structure. 7953 * @cq_event: pointer to the completion queue event to be freed. 7954 * 7955 * This routine is the lock free version of the API invoked to release a 7956 * completion-queue event back into the free pool. 7957 **/ 7958 void 7959 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 7960 struct lpfc_cq_event *cq_event) 7961 { 7962 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 7963 } 7964 7965 /** 7966 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 7967 * @phba: pointer to lpfc hba data structure. 7968 * @cq_event: pointer to the completion queue event to be freed. 7969 * 7970 * This routine is the lock version of the API invoked to release a 7971 * completion-queue event back into the free pool. 7972 **/ 7973 void 7974 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 7975 struct lpfc_cq_event *cq_event) 7976 { 7977 unsigned long iflags; 7978 spin_lock_irqsave(&phba->hbalock, iflags); 7979 __lpfc_sli4_cq_event_release(phba, cq_event); 7980 spin_unlock_irqrestore(&phba->hbalock, iflags); 7981 } 7982 7983 /** 7984 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 7985 * @phba: pointer to lpfc hba data structure. 7986 * 7987 * This routine is to free all the pending completion-queue events to the 7988 * back into the free pool for device reset. 7989 **/ 7990 static void 7991 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 7992 { 7993 LIST_HEAD(cqelist); 7994 struct lpfc_cq_event *cqe; 7995 unsigned long iflags; 7996 7997 /* Retrieve all the pending WCQEs from pending WCQE lists */ 7998 spin_lock_irqsave(&phba->hbalock, iflags); 7999 /* Pending FCP XRI abort events */ 8000 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 8001 &cqelist); 8002 /* Pending ELS XRI abort events */ 8003 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 8004 &cqelist); 8005 /* Pending asynnc events */ 8006 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 8007 &cqelist); 8008 spin_unlock_irqrestore(&phba->hbalock, iflags); 8009 8010 while (!list_empty(&cqelist)) { 8011 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 8012 lpfc_sli4_cq_event_release(phba, cqe); 8013 } 8014 } 8015 8016 /** 8017 * lpfc_pci_function_reset - Reset pci function. 8018 * @phba: pointer to lpfc hba data structure. 8019 * 8020 * This routine is invoked to request a PCI function reset. It will destroys 8021 * all resources assigned to the PCI function which originates this request. 8022 * 8023 * Return codes 8024 * 0 - successful 8025 * -ENOMEM - No available memory 8026 * -EIO - The mailbox failed to complete successfully. 8027 **/ 8028 int 8029 lpfc_pci_function_reset(struct lpfc_hba *phba) 8030 { 8031 LPFC_MBOXQ_t *mboxq; 8032 uint32_t rc = 0, if_type; 8033 uint32_t shdr_status, shdr_add_status; 8034 uint32_t rdy_chk; 8035 uint32_t port_reset = 0; 8036 union lpfc_sli4_cfg_shdr *shdr; 8037 struct lpfc_register reg_data; 8038 uint16_t devid; 8039 8040 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8041 switch (if_type) { 8042 case LPFC_SLI_INTF_IF_TYPE_0: 8043 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 8044 GFP_KERNEL); 8045 if (!mboxq) { 8046 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8047 "0494 Unable to allocate memory for " 8048 "issuing SLI_FUNCTION_RESET mailbox " 8049 "command\n"); 8050 return -ENOMEM; 8051 } 8052 8053 /* Setup PCI function reset mailbox-ioctl command */ 8054 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 8055 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 8056 LPFC_SLI4_MBX_EMBED); 8057 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8058 shdr = (union lpfc_sli4_cfg_shdr *) 8059 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 8060 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 8061 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 8062 &shdr->response); 8063 if (rc != MBX_TIMEOUT) 8064 mempool_free(mboxq, phba->mbox_mem_pool); 8065 if (shdr_status || shdr_add_status || rc) { 8066 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8067 "0495 SLI_FUNCTION_RESET mailbox " 8068 "failed with status x%x add_status x%x," 8069 " mbx status x%x\n", 8070 shdr_status, shdr_add_status, rc); 8071 rc = -ENXIO; 8072 } 8073 break; 8074 case LPFC_SLI_INTF_IF_TYPE_2: 8075 wait: 8076 /* 8077 * Poll the Port Status Register and wait for RDY for 8078 * up to 30 seconds. If the port doesn't respond, treat 8079 * it as an error. 8080 */ 8081 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) { 8082 if (lpfc_readl(phba->sli4_hba.u.if_type2. 8083 STATUSregaddr, ®_data.word0)) { 8084 rc = -ENODEV; 8085 goto out; 8086 } 8087 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 8088 break; 8089 msleep(20); 8090 } 8091 8092 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) { 8093 phba->work_status[0] = readl( 8094 phba->sli4_hba.u.if_type2.ERR1regaddr); 8095 phba->work_status[1] = readl( 8096 phba->sli4_hba.u.if_type2.ERR2regaddr); 8097 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8098 "2890 Port not ready, port status reg " 8099 "0x%x error 1=0x%x, error 2=0x%x\n", 8100 reg_data.word0, 8101 phba->work_status[0], 8102 phba->work_status[1]); 8103 rc = -ENODEV; 8104 goto out; 8105 } 8106 8107 if (!port_reset) { 8108 /* 8109 * Reset the port now 8110 */ 8111 reg_data.word0 = 0; 8112 bf_set(lpfc_sliport_ctrl_end, ®_data, 8113 LPFC_SLIPORT_LITTLE_ENDIAN); 8114 bf_set(lpfc_sliport_ctrl_ip, ®_data, 8115 LPFC_SLIPORT_INIT_PORT); 8116 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 8117 CTRLregaddr); 8118 /* flush */ 8119 pci_read_config_word(phba->pcidev, 8120 PCI_DEVICE_ID, &devid); 8121 8122 port_reset = 1; 8123 msleep(20); 8124 goto wait; 8125 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) { 8126 rc = -ENODEV; 8127 goto out; 8128 } 8129 break; 8130 8131 case LPFC_SLI_INTF_IF_TYPE_1: 8132 default: 8133 break; 8134 } 8135 8136 out: 8137 /* Catch the not-ready port failure after a port reset. */ 8138 if (rc) { 8139 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8140 "3317 HBA not functional: IP Reset Failed " 8141 "try: echo fw_reset > board_mode\n"); 8142 rc = -ENODEV; 8143 } 8144 8145 return rc; 8146 } 8147 8148 /** 8149 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 8150 * @phba: pointer to lpfc hba data structure. 8151 * 8152 * This routine is invoked to set up the PCI device memory space for device 8153 * with SLI-4 interface spec. 8154 * 8155 * Return codes 8156 * 0 - successful 8157 * other values - error 8158 **/ 8159 static int 8160 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 8161 { 8162 struct pci_dev *pdev; 8163 unsigned long bar0map_len, bar1map_len, bar2map_len; 8164 int error = -ENODEV; 8165 uint32_t if_type; 8166 8167 /* Obtain PCI device reference */ 8168 if (!phba->pcidev) 8169 return error; 8170 else 8171 pdev = phba->pcidev; 8172 8173 /* Set the device DMA mask size */ 8174 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 8175 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 8176 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 8177 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 8178 return error; 8179 } 8180 } 8181 8182 /* 8183 * The BARs and register set definitions and offset locations are 8184 * dependent on the if_type. 8185 */ 8186 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 8187 &phba->sli4_hba.sli_intf.word0)) { 8188 return error; 8189 } 8190 8191 /* There is no SLI3 failback for SLI4 devices. */ 8192 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 8193 LPFC_SLI_INTF_VALID) { 8194 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8195 "2894 SLI_INTF reg contents invalid " 8196 "sli_intf reg 0x%x\n", 8197 phba->sli4_hba.sli_intf.word0); 8198 return error; 8199 } 8200 8201 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8202 /* 8203 * Get the bus address of SLI4 device Bar regions and the 8204 * number of bytes required by each mapping. The mapping of the 8205 * particular PCI BARs regions is dependent on the type of 8206 * SLI4 device. 8207 */ 8208 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { 8209 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); 8210 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); 8211 8212 /* 8213 * Map SLI4 PCI Config Space Register base to a kernel virtual 8214 * addr 8215 */ 8216 phba->sli4_hba.conf_regs_memmap_p = 8217 ioremap(phba->pci_bar0_map, bar0map_len); 8218 if (!phba->sli4_hba.conf_regs_memmap_p) { 8219 dev_printk(KERN_ERR, &pdev->dev, 8220 "ioremap failed for SLI4 PCI config " 8221 "registers.\n"); 8222 goto out; 8223 } 8224 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; 8225 /* Set up BAR0 PCI config space register memory map */ 8226 lpfc_sli4_bar0_register_memmap(phba, if_type); 8227 } else { 8228 phba->pci_bar0_map = pci_resource_start(pdev, 1); 8229 bar0map_len = pci_resource_len(pdev, 1); 8230 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 8231 dev_printk(KERN_ERR, &pdev->dev, 8232 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 8233 goto out; 8234 } 8235 phba->sli4_hba.conf_regs_memmap_p = 8236 ioremap(phba->pci_bar0_map, bar0map_len); 8237 if (!phba->sli4_hba.conf_regs_memmap_p) { 8238 dev_printk(KERN_ERR, &pdev->dev, 8239 "ioremap failed for SLI4 PCI config " 8240 "registers.\n"); 8241 goto out; 8242 } 8243 lpfc_sli4_bar0_register_memmap(phba, if_type); 8244 } 8245 8246 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && 8247 (pci_resource_start(pdev, PCI_64BIT_BAR2))) { 8248 /* 8249 * Map SLI4 if type 0 HBA Control Register base to a kernel 8250 * virtual address and setup the registers. 8251 */ 8252 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); 8253 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 8254 phba->sli4_hba.ctrl_regs_memmap_p = 8255 ioremap(phba->pci_bar1_map, bar1map_len); 8256 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 8257 dev_printk(KERN_ERR, &pdev->dev, 8258 "ioremap failed for SLI4 HBA control registers.\n"); 8259 goto out_iounmap_conf; 8260 } 8261 phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p; 8262 lpfc_sli4_bar1_register_memmap(phba); 8263 } 8264 8265 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && 8266 (pci_resource_start(pdev, PCI_64BIT_BAR4))) { 8267 /* 8268 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel 8269 * virtual address and setup the registers. 8270 */ 8271 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); 8272 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 8273 phba->sli4_hba.drbl_regs_memmap_p = 8274 ioremap(phba->pci_bar2_map, bar2map_len); 8275 if (!phba->sli4_hba.drbl_regs_memmap_p) { 8276 dev_printk(KERN_ERR, &pdev->dev, 8277 "ioremap failed for SLI4 HBA doorbell registers.\n"); 8278 goto out_iounmap_ctrl; 8279 } 8280 phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; 8281 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 8282 if (error) 8283 goto out_iounmap_all; 8284 } 8285 8286 return 0; 8287 8288 out_iounmap_all: 8289 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 8290 out_iounmap_ctrl: 8291 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 8292 out_iounmap_conf: 8293 iounmap(phba->sli4_hba.conf_regs_memmap_p); 8294 out: 8295 return error; 8296 } 8297 8298 /** 8299 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 8300 * @phba: pointer to lpfc hba data structure. 8301 * 8302 * This routine is invoked to unset the PCI device memory space for device 8303 * with SLI-4 interface spec. 8304 **/ 8305 static void 8306 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 8307 { 8308 uint32_t if_type; 8309 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8310 8311 switch (if_type) { 8312 case LPFC_SLI_INTF_IF_TYPE_0: 8313 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 8314 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 8315 iounmap(phba->sli4_hba.conf_regs_memmap_p); 8316 break; 8317 case LPFC_SLI_INTF_IF_TYPE_2: 8318 iounmap(phba->sli4_hba.conf_regs_memmap_p); 8319 break; 8320 case LPFC_SLI_INTF_IF_TYPE_1: 8321 default: 8322 dev_printk(KERN_ERR, &phba->pcidev->dev, 8323 "FATAL - unsupported SLI4 interface type - %d\n", 8324 if_type); 8325 break; 8326 } 8327 } 8328 8329 /** 8330 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 8331 * @phba: pointer to lpfc hba data structure. 8332 * 8333 * This routine is invoked to enable the MSI-X interrupt vectors to device 8334 * with SLI-3 interface specs. The kernel function pci_enable_msix_exact() 8335 * is called to enable the MSI-X vectors. Note that pci_enable_msix_exact(), 8336 * once invoked, enables either all or nothing, depending on the current 8337 * availability of PCI vector resources. The device driver is responsible 8338 * for calling the individual request_irq() to register each MSI-X vector 8339 * with a interrupt handler, which is done in this function. Note that 8340 * later when device is unloading, the driver should always call free_irq() 8341 * on all MSI-X vectors it has done request_irq() on before calling 8342 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 8343 * will be left with MSI-X enabled and leaks its vectors. 8344 * 8345 * Return codes 8346 * 0 - successful 8347 * other values - error 8348 **/ 8349 static int 8350 lpfc_sli_enable_msix(struct lpfc_hba *phba) 8351 { 8352 int rc, i; 8353 LPFC_MBOXQ_t *pmb; 8354 8355 /* Set up MSI-X multi-message vectors */ 8356 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 8357 phba->msix_entries[i].entry = i; 8358 8359 /* Configure MSI-X capability structure */ 8360 rc = pci_enable_msix_exact(phba->pcidev, phba->msix_entries, 8361 LPFC_MSIX_VECTORS); 8362 if (rc) { 8363 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8364 "0420 PCI enable MSI-X failed (%d)\n", rc); 8365 goto vec_fail_out; 8366 } 8367 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 8368 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8369 "0477 MSI-X entry[%d]: vector=x%x " 8370 "message=%d\n", i, 8371 phba->msix_entries[i].vector, 8372 phba->msix_entries[i].entry); 8373 /* 8374 * Assign MSI-X vectors to interrupt handlers 8375 */ 8376 8377 /* vector-0 is associated to slow-path handler */ 8378 rc = request_irq(phba->msix_entries[0].vector, 8379 &lpfc_sli_sp_intr_handler, 0, 8380 LPFC_SP_DRIVER_HANDLER_NAME, phba); 8381 if (rc) { 8382 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8383 "0421 MSI-X slow-path request_irq failed " 8384 "(%d)\n", rc); 8385 goto msi_fail_out; 8386 } 8387 8388 /* vector-1 is associated to fast-path handler */ 8389 rc = request_irq(phba->msix_entries[1].vector, 8390 &lpfc_sli_fp_intr_handler, 0, 8391 LPFC_FP_DRIVER_HANDLER_NAME, phba); 8392 8393 if (rc) { 8394 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8395 "0429 MSI-X fast-path request_irq failed " 8396 "(%d)\n", rc); 8397 goto irq_fail_out; 8398 } 8399 8400 /* 8401 * Configure HBA MSI-X attention conditions to messages 8402 */ 8403 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8404 8405 if (!pmb) { 8406 rc = -ENOMEM; 8407 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8408 "0474 Unable to allocate memory for issuing " 8409 "MBOX_CONFIG_MSI command\n"); 8410 goto mem_fail_out; 8411 } 8412 rc = lpfc_config_msi(phba, pmb); 8413 if (rc) 8414 goto mbx_fail_out; 8415 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 8416 if (rc != MBX_SUCCESS) { 8417 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 8418 "0351 Config MSI mailbox command failed, " 8419 "mbxCmd x%x, mbxStatus x%x\n", 8420 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 8421 goto mbx_fail_out; 8422 } 8423 8424 /* Free memory allocated for mailbox command */ 8425 mempool_free(pmb, phba->mbox_mem_pool); 8426 return rc; 8427 8428 mbx_fail_out: 8429 /* Free memory allocated for mailbox command */ 8430 mempool_free(pmb, phba->mbox_mem_pool); 8431 8432 mem_fail_out: 8433 /* free the irq already requested */ 8434 free_irq(phba->msix_entries[1].vector, phba); 8435 8436 irq_fail_out: 8437 /* free the irq already requested */ 8438 free_irq(phba->msix_entries[0].vector, phba); 8439 8440 msi_fail_out: 8441 /* Unconfigure MSI-X capability structure */ 8442 pci_disable_msix(phba->pcidev); 8443 8444 vec_fail_out: 8445 return rc; 8446 } 8447 8448 /** 8449 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. 8450 * @phba: pointer to lpfc hba data structure. 8451 * 8452 * This routine is invoked to release the MSI-X vectors and then disable the 8453 * MSI-X interrupt mode to device with SLI-3 interface spec. 8454 **/ 8455 static void 8456 lpfc_sli_disable_msix(struct lpfc_hba *phba) 8457 { 8458 int i; 8459 8460 /* Free up MSI-X multi-message vectors */ 8461 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 8462 free_irq(phba->msix_entries[i].vector, phba); 8463 /* Disable MSI-X */ 8464 pci_disable_msix(phba->pcidev); 8465 8466 return; 8467 } 8468 8469 /** 8470 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 8471 * @phba: pointer to lpfc hba data structure. 8472 * 8473 * This routine is invoked to enable the MSI interrupt mode to device with 8474 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 8475 * enable the MSI vector. The device driver is responsible for calling the 8476 * request_irq() to register MSI vector with a interrupt the handler, which 8477 * is done in this function. 8478 * 8479 * Return codes 8480 * 0 - successful 8481 * other values - error 8482 */ 8483 static int 8484 lpfc_sli_enable_msi(struct lpfc_hba *phba) 8485 { 8486 int rc; 8487 8488 rc = pci_enable_msi(phba->pcidev); 8489 if (!rc) 8490 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8491 "0462 PCI enable MSI mode success.\n"); 8492 else { 8493 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8494 "0471 PCI enable MSI mode failed (%d)\n", rc); 8495 return rc; 8496 } 8497 8498 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 8499 0, LPFC_DRIVER_NAME, phba); 8500 if (rc) { 8501 pci_disable_msi(phba->pcidev); 8502 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8503 "0478 MSI request_irq failed (%d)\n", rc); 8504 } 8505 return rc; 8506 } 8507 8508 /** 8509 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. 8510 * @phba: pointer to lpfc hba data structure. 8511 * 8512 * This routine is invoked to disable the MSI interrupt mode to device with 8513 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has 8514 * done request_irq() on before calling pci_disable_msi(). Failure to do so 8515 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 8516 * its vector. 8517 */ 8518 static void 8519 lpfc_sli_disable_msi(struct lpfc_hba *phba) 8520 { 8521 free_irq(phba->pcidev->irq, phba); 8522 pci_disable_msi(phba->pcidev); 8523 return; 8524 } 8525 8526 /** 8527 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 8528 * @phba: pointer to lpfc hba data structure. 8529 * 8530 * This routine is invoked to enable device interrupt and associate driver's 8531 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 8532 * spec. Depends on the interrupt mode configured to the driver, the driver 8533 * will try to fallback from the configured interrupt mode to an interrupt 8534 * mode which is supported by the platform, kernel, and device in the order 8535 * of: 8536 * MSI-X -> MSI -> IRQ. 8537 * 8538 * Return codes 8539 * 0 - successful 8540 * other values - error 8541 **/ 8542 static uint32_t 8543 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 8544 { 8545 uint32_t intr_mode = LPFC_INTR_ERROR; 8546 int retval; 8547 8548 if (cfg_mode == 2) { 8549 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 8550 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 8551 if (!retval) { 8552 /* Now, try to enable MSI-X interrupt mode */ 8553 retval = lpfc_sli_enable_msix(phba); 8554 if (!retval) { 8555 /* Indicate initialization to MSI-X mode */ 8556 phba->intr_type = MSIX; 8557 intr_mode = 2; 8558 } 8559 } 8560 } 8561 8562 /* Fallback to MSI if MSI-X initialization failed */ 8563 if (cfg_mode >= 1 && phba->intr_type == NONE) { 8564 retval = lpfc_sli_enable_msi(phba); 8565 if (!retval) { 8566 /* Indicate initialization to MSI mode */ 8567 phba->intr_type = MSI; 8568 intr_mode = 1; 8569 } 8570 } 8571 8572 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 8573 if (phba->intr_type == NONE) { 8574 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 8575 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 8576 if (!retval) { 8577 /* Indicate initialization to INTx mode */ 8578 phba->intr_type = INTx; 8579 intr_mode = 0; 8580 } 8581 } 8582 return intr_mode; 8583 } 8584 8585 /** 8586 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 8587 * @phba: pointer to lpfc hba data structure. 8588 * 8589 * This routine is invoked to disable device interrupt and disassociate the 8590 * driver's interrupt handler(s) from interrupt vector(s) to device with 8591 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 8592 * release the interrupt vector(s) for the message signaled interrupt. 8593 **/ 8594 static void 8595 lpfc_sli_disable_intr(struct lpfc_hba *phba) 8596 { 8597 /* Disable the currently initialized interrupt mode */ 8598 if (phba->intr_type == MSIX) 8599 lpfc_sli_disable_msix(phba); 8600 else if (phba->intr_type == MSI) 8601 lpfc_sli_disable_msi(phba); 8602 else if (phba->intr_type == INTx) 8603 free_irq(phba->pcidev->irq, phba); 8604 8605 /* Reset interrupt management states */ 8606 phba->intr_type = NONE; 8607 phba->sli.slistat.sli_intr = 0; 8608 8609 return; 8610 } 8611 8612 /** 8613 * lpfc_find_next_cpu - Find next available CPU that matches the phys_id 8614 * @phba: pointer to lpfc hba data structure. 8615 * 8616 * Find next available CPU to use for IRQ to CPU affinity. 8617 */ 8618 static int 8619 lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id) 8620 { 8621 struct lpfc_vector_map_info *cpup; 8622 int cpu; 8623 8624 cpup = phba->sli4_hba.cpu_map; 8625 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { 8626 /* CPU must be online */ 8627 if (cpu_online(cpu)) { 8628 if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) && 8629 (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) && 8630 (cpup->phys_id == phys_id)) { 8631 return cpu; 8632 } 8633 } 8634 cpup++; 8635 } 8636 8637 /* 8638 * If we get here, we have used ALL CPUs for the specific 8639 * phys_id. Now we need to clear out lpfc_used_cpu and start 8640 * reusing CPUs. 8641 */ 8642 8643 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { 8644 if (lpfc_used_cpu[cpu] == phys_id) 8645 lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY; 8646 } 8647 8648 cpup = phba->sli4_hba.cpu_map; 8649 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { 8650 /* CPU must be online */ 8651 if (cpu_online(cpu)) { 8652 if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) && 8653 (cpup->phys_id == phys_id)) { 8654 return cpu; 8655 } 8656 } 8657 cpup++; 8658 } 8659 return LPFC_VECTOR_MAP_EMPTY; 8660 } 8661 8662 /** 8663 * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors 8664 * @phba: pointer to lpfc hba data structure. 8665 * @vectors: number of HBA vectors 8666 * 8667 * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector 8668 * affinization across multple physical CPUs (numa nodes). 8669 * In addition, this routine will assign an IO channel for each CPU 8670 * to use when issuing I/Os. 8671 */ 8672 static int 8673 lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) 8674 { 8675 int i, idx, saved_chann, used_chann, cpu, phys_id; 8676 int max_phys_id, min_phys_id; 8677 int num_io_channel, first_cpu, chan; 8678 struct lpfc_vector_map_info *cpup; 8679 #ifdef CONFIG_X86 8680 struct cpuinfo_x86 *cpuinfo; 8681 #endif 8682 struct cpumask *mask; 8683 uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1]; 8684 8685 /* If there is no mapping, just return */ 8686 if (!phba->cfg_fcp_cpu_map) 8687 return 1; 8688 8689 /* Init cpu_map array */ 8690 memset(phba->sli4_hba.cpu_map, 0xff, 8691 (sizeof(struct lpfc_vector_map_info) * 8692 phba->sli4_hba.num_present_cpu)); 8693 8694 max_phys_id = 0; 8695 min_phys_id = 0xff; 8696 phys_id = 0; 8697 num_io_channel = 0; 8698 first_cpu = LPFC_VECTOR_MAP_EMPTY; 8699 8700 /* Update CPU map with physical id and core id of each CPU */ 8701 cpup = phba->sli4_hba.cpu_map; 8702 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { 8703 #ifdef CONFIG_X86 8704 cpuinfo = &cpu_data(cpu); 8705 cpup->phys_id = cpuinfo->phys_proc_id; 8706 cpup->core_id = cpuinfo->cpu_core_id; 8707 #else 8708 /* No distinction between CPUs for other platforms */ 8709 cpup->phys_id = 0; 8710 cpup->core_id = 0; 8711 #endif 8712 8713 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8714 "3328 CPU physid %d coreid %d\n", 8715 cpup->phys_id, cpup->core_id); 8716 8717 if (cpup->phys_id > max_phys_id) 8718 max_phys_id = cpup->phys_id; 8719 if (cpup->phys_id < min_phys_id) 8720 min_phys_id = cpup->phys_id; 8721 cpup++; 8722 } 8723 8724 phys_id = min_phys_id; 8725 /* Now associate the HBA vectors with specific CPUs */ 8726 for (idx = 0; idx < vectors; idx++) { 8727 cpup = phba->sli4_hba.cpu_map; 8728 cpu = lpfc_find_next_cpu(phba, phys_id); 8729 if (cpu == LPFC_VECTOR_MAP_EMPTY) { 8730 8731 /* Try for all phys_id's */ 8732 for (i = 1; i < max_phys_id; i++) { 8733 phys_id++; 8734 if (phys_id > max_phys_id) 8735 phys_id = min_phys_id; 8736 cpu = lpfc_find_next_cpu(phba, phys_id); 8737 if (cpu == LPFC_VECTOR_MAP_EMPTY) 8738 continue; 8739 goto found; 8740 } 8741 8742 /* Use round robin for scheduling */ 8743 phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN; 8744 chan = 0; 8745 cpup = phba->sli4_hba.cpu_map; 8746 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 8747 cpup->channel_id = chan; 8748 cpup++; 8749 chan++; 8750 if (chan >= phba->cfg_fcp_io_channel) 8751 chan = 0; 8752 } 8753 8754 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8755 "3329 Cannot set affinity:" 8756 "Error mapping vector %d (%d)\n", 8757 idx, vectors); 8758 return 0; 8759 } 8760 found: 8761 cpup += cpu; 8762 if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP) 8763 lpfc_used_cpu[cpu] = phys_id; 8764 8765 /* Associate vector with selected CPU */ 8766 cpup->irq = phba->sli4_hba.msix_entries[idx].vector; 8767 8768 /* Associate IO channel with selected CPU */ 8769 cpup->channel_id = idx; 8770 num_io_channel++; 8771 8772 if (first_cpu == LPFC_VECTOR_MAP_EMPTY) 8773 first_cpu = cpu; 8774 8775 /* Now affinitize to the selected CPU */ 8776 mask = &cpup->maskbits; 8777 cpumask_clear(mask); 8778 cpumask_set_cpu(cpu, mask); 8779 i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx]. 8780 vector, mask); 8781 8782 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8783 "3330 Set Affinity: CPU %d channel %d " 8784 "irq %d (%x)\n", 8785 cpu, cpup->channel_id, 8786 phba->sli4_hba.msix_entries[idx].vector, i); 8787 8788 /* Spread vector mapping across multple physical CPU nodes */ 8789 phys_id++; 8790 if (phys_id > max_phys_id) 8791 phys_id = min_phys_id; 8792 } 8793 8794 /* 8795 * Finally fill in the IO channel for any remaining CPUs. 8796 * At this point, all IO channels have been assigned to a specific 8797 * MSIx vector, mapped to a specific CPU. 8798 * Base the remaining IO channel assigned, to IO channels already 8799 * assigned to other CPUs on the same phys_id. 8800 */ 8801 for (i = min_phys_id; i <= max_phys_id; i++) { 8802 /* 8803 * If there are no io channels already mapped to 8804 * this phys_id, just round robin thru the io_channels. 8805 * Setup chann[] for round robin. 8806 */ 8807 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) 8808 chann[idx] = idx; 8809 8810 saved_chann = 0; 8811 used_chann = 0; 8812 8813 /* 8814 * First build a list of IO channels already assigned 8815 * to this phys_id before reassigning the same IO 8816 * channels to the remaining CPUs. 8817 */ 8818 cpup = phba->sli4_hba.cpu_map; 8819 cpu = first_cpu; 8820 cpup += cpu; 8821 for (idx = 0; idx < phba->sli4_hba.num_present_cpu; 8822 idx++) { 8823 if (cpup->phys_id == i) { 8824 /* 8825 * Save any IO channels that are 8826 * already mapped to this phys_id. 8827 */ 8828 if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) { 8829 chann[saved_chann] = 8830 cpup->channel_id; 8831 saved_chann++; 8832 goto out; 8833 } 8834 8835 /* See if we are using round-robin */ 8836 if (saved_chann == 0) 8837 saved_chann = 8838 phba->cfg_fcp_io_channel; 8839 8840 /* Associate next IO channel with CPU */ 8841 cpup->channel_id = chann[used_chann]; 8842 num_io_channel++; 8843 used_chann++; 8844 if (used_chann == saved_chann) 8845 used_chann = 0; 8846 8847 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8848 "3331 Set IO_CHANN " 8849 "CPU %d channel %d\n", 8850 idx, cpup->channel_id); 8851 } 8852 out: 8853 cpu++; 8854 if (cpu >= phba->sli4_hba.num_present_cpu) { 8855 cpup = phba->sli4_hba.cpu_map; 8856 cpu = 0; 8857 } else { 8858 cpup++; 8859 } 8860 } 8861 } 8862 8863 if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) { 8864 cpup = phba->sli4_hba.cpu_map; 8865 for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) { 8866 if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) { 8867 cpup->channel_id = 0; 8868 num_io_channel++; 8869 8870 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8871 "3332 Assign IO_CHANN " 8872 "CPU %d channel %d\n", 8873 idx, cpup->channel_id); 8874 } 8875 cpup++; 8876 } 8877 } 8878 8879 /* Sanity check */ 8880 if (num_io_channel != phba->sli4_hba.num_present_cpu) 8881 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8882 "3333 Set affinity mismatch:" 8883 "%d chann != %d cpus: %d vectors\n", 8884 num_io_channel, phba->sli4_hba.num_present_cpu, 8885 vectors); 8886 8887 /* Enable using cpu affinity for scheduling */ 8888 phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU; 8889 return 1; 8890 } 8891 8892 8893 /** 8894 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 8895 * @phba: pointer to lpfc hba data structure. 8896 * 8897 * This routine is invoked to enable the MSI-X interrupt vectors to device 8898 * with SLI-4 interface spec. The kernel function pci_enable_msix_range() 8899 * is called to enable the MSI-X vectors. The device driver is responsible 8900 * for calling the individual request_irq() to register each MSI-X vector 8901 * with a interrupt handler, which is done in this function. Note that 8902 * later when device is unloading, the driver should always call free_irq() 8903 * on all MSI-X vectors it has done request_irq() on before calling 8904 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 8905 * will be left with MSI-X enabled and leaks its vectors. 8906 * 8907 * Return codes 8908 * 0 - successful 8909 * other values - error 8910 **/ 8911 static int 8912 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 8913 { 8914 int vectors, rc, index; 8915 8916 /* Set up MSI-X multi-message vectors */ 8917 for (index = 0; index < phba->cfg_fcp_io_channel; index++) 8918 phba->sli4_hba.msix_entries[index].entry = index; 8919 8920 /* Configure MSI-X capability structure */ 8921 vectors = phba->cfg_fcp_io_channel; 8922 if (phba->cfg_fof) { 8923 phba->sli4_hba.msix_entries[index].entry = index; 8924 vectors++; 8925 } 8926 rc = pci_enable_msix_range(phba->pcidev, phba->sli4_hba.msix_entries, 8927 2, vectors); 8928 if (rc < 0) { 8929 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8930 "0484 PCI enable MSI-X failed (%d)\n", rc); 8931 goto vec_fail_out; 8932 } 8933 vectors = rc; 8934 8935 /* Log MSI-X vector assignment */ 8936 for (index = 0; index < vectors; index++) 8937 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8938 "0489 MSI-X entry[%d]: vector=x%x " 8939 "message=%d\n", index, 8940 phba->sli4_hba.msix_entries[index].vector, 8941 phba->sli4_hba.msix_entries[index].entry); 8942 8943 /* Assign MSI-X vectors to interrupt handlers */ 8944 for (index = 0; index < vectors; index++) { 8945 memset(&phba->sli4_hba.handler_name[index], 0, 16); 8946 snprintf((char *)&phba->sli4_hba.handler_name[index], 8947 LPFC_SLI4_HANDLER_NAME_SZ, 8948 LPFC_DRIVER_HANDLER_NAME"%d", index); 8949 8950 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 8951 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 8952 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1); 8953 if (phba->cfg_fof && (index == (vectors - 1))) 8954 rc = request_irq( 8955 phba->sli4_hba.msix_entries[index].vector, 8956 &lpfc_sli4_fof_intr_handler, 0, 8957 (char *)&phba->sli4_hba.handler_name[index], 8958 &phba->sli4_hba.fcp_eq_hdl[index]); 8959 else 8960 rc = request_irq( 8961 phba->sli4_hba.msix_entries[index].vector, 8962 &lpfc_sli4_hba_intr_handler, 0, 8963 (char *)&phba->sli4_hba.handler_name[index], 8964 &phba->sli4_hba.fcp_eq_hdl[index]); 8965 if (rc) { 8966 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8967 "0486 MSI-X fast-path (%d) " 8968 "request_irq failed (%d)\n", index, rc); 8969 goto cfg_fail_out; 8970 } 8971 } 8972 8973 if (phba->cfg_fof) 8974 vectors--; 8975 8976 if (vectors != phba->cfg_fcp_io_channel) { 8977 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8978 "3238 Reducing IO channels to match number of " 8979 "MSI-X vectors, requested %d got %d\n", 8980 phba->cfg_fcp_io_channel, vectors); 8981 phba->cfg_fcp_io_channel = vectors; 8982 } 8983 8984 if (!shost_use_blk_mq(lpfc_shost_from_vport(phba->pport))) 8985 lpfc_sli4_set_affinity(phba, vectors); 8986 return rc; 8987 8988 cfg_fail_out: 8989 /* free the irq already requested */ 8990 for (--index; index >= 0; index--) { 8991 irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. 8992 vector, NULL); 8993 free_irq(phba->sli4_hba.msix_entries[index].vector, 8994 &phba->sli4_hba.fcp_eq_hdl[index]); 8995 } 8996 8997 /* Unconfigure MSI-X capability structure */ 8998 pci_disable_msix(phba->pcidev); 8999 9000 vec_fail_out: 9001 return rc; 9002 } 9003 9004 /** 9005 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device 9006 * @phba: pointer to lpfc hba data structure. 9007 * 9008 * This routine is invoked to release the MSI-X vectors and then disable the 9009 * MSI-X interrupt mode to device with SLI-4 interface spec. 9010 **/ 9011 static void 9012 lpfc_sli4_disable_msix(struct lpfc_hba *phba) 9013 { 9014 int index; 9015 9016 /* Free up MSI-X multi-message vectors */ 9017 for (index = 0; index < phba->cfg_fcp_io_channel; index++) { 9018 irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. 9019 vector, NULL); 9020 free_irq(phba->sli4_hba.msix_entries[index].vector, 9021 &phba->sli4_hba.fcp_eq_hdl[index]); 9022 } 9023 if (phba->cfg_fof) { 9024 free_irq(phba->sli4_hba.msix_entries[index].vector, 9025 &phba->sli4_hba.fcp_eq_hdl[index]); 9026 } 9027 /* Disable MSI-X */ 9028 pci_disable_msix(phba->pcidev); 9029 9030 return; 9031 } 9032 9033 /** 9034 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 9035 * @phba: pointer to lpfc hba data structure. 9036 * 9037 * This routine is invoked to enable the MSI interrupt mode to device with 9038 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 9039 * to enable the MSI vector. The device driver is responsible for calling 9040 * the request_irq() to register MSI vector with a interrupt the handler, 9041 * which is done in this function. 9042 * 9043 * Return codes 9044 * 0 - successful 9045 * other values - error 9046 **/ 9047 static int 9048 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 9049 { 9050 int rc, index; 9051 9052 rc = pci_enable_msi(phba->pcidev); 9053 if (!rc) 9054 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9055 "0487 PCI enable MSI mode success.\n"); 9056 else { 9057 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9058 "0488 PCI enable MSI mode failed (%d)\n", rc); 9059 return rc; 9060 } 9061 9062 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 9063 0, LPFC_DRIVER_NAME, phba); 9064 if (rc) { 9065 pci_disable_msi(phba->pcidev); 9066 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 9067 "0490 MSI request_irq failed (%d)\n", rc); 9068 return rc; 9069 } 9070 9071 for (index = 0; index < phba->cfg_fcp_io_channel; index++) { 9072 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 9073 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 9074 } 9075 9076 if (phba->cfg_fof) { 9077 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 9078 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 9079 } 9080 return 0; 9081 } 9082 9083 /** 9084 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device 9085 * @phba: pointer to lpfc hba data structure. 9086 * 9087 * This routine is invoked to disable the MSI interrupt mode to device with 9088 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has 9089 * done request_irq() on before calling pci_disable_msi(). Failure to do so 9090 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 9091 * its vector. 9092 **/ 9093 static void 9094 lpfc_sli4_disable_msi(struct lpfc_hba *phba) 9095 { 9096 free_irq(phba->pcidev->irq, phba); 9097 pci_disable_msi(phba->pcidev); 9098 return; 9099 } 9100 9101 /** 9102 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 9103 * @phba: pointer to lpfc hba data structure. 9104 * 9105 * This routine is invoked to enable device interrupt and associate driver's 9106 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 9107 * interface spec. Depends on the interrupt mode configured to the driver, 9108 * the driver will try to fallback from the configured interrupt mode to an 9109 * interrupt mode which is supported by the platform, kernel, and device in 9110 * the order of: 9111 * MSI-X -> MSI -> IRQ. 9112 * 9113 * Return codes 9114 * 0 - successful 9115 * other values - error 9116 **/ 9117 static uint32_t 9118 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 9119 { 9120 uint32_t intr_mode = LPFC_INTR_ERROR; 9121 int retval, index; 9122 9123 if (cfg_mode == 2) { 9124 /* Preparation before conf_msi mbox cmd */ 9125 retval = 0; 9126 if (!retval) { 9127 /* Now, try to enable MSI-X interrupt mode */ 9128 retval = lpfc_sli4_enable_msix(phba); 9129 if (!retval) { 9130 /* Indicate initialization to MSI-X mode */ 9131 phba->intr_type = MSIX; 9132 intr_mode = 2; 9133 } 9134 } 9135 } 9136 9137 /* Fallback to MSI if MSI-X initialization failed */ 9138 if (cfg_mode >= 1 && phba->intr_type == NONE) { 9139 retval = lpfc_sli4_enable_msi(phba); 9140 if (!retval) { 9141 /* Indicate initialization to MSI mode */ 9142 phba->intr_type = MSI; 9143 intr_mode = 1; 9144 } 9145 } 9146 9147 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 9148 if (phba->intr_type == NONE) { 9149 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 9150 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 9151 if (!retval) { 9152 /* Indicate initialization to INTx mode */ 9153 phba->intr_type = INTx; 9154 intr_mode = 0; 9155 for (index = 0; index < phba->cfg_fcp_io_channel; 9156 index++) { 9157 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 9158 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 9159 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index]. 9160 fcp_eq_in_use, 1); 9161 } 9162 if (phba->cfg_fof) { 9163 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 9164 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 9165 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index]. 9166 fcp_eq_in_use, 1); 9167 } 9168 } 9169 } 9170 return intr_mode; 9171 } 9172 9173 /** 9174 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 9175 * @phba: pointer to lpfc hba data structure. 9176 * 9177 * This routine is invoked to disable device interrupt and disassociate 9178 * the driver's interrupt handler(s) from interrupt vector(s) to device 9179 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 9180 * will release the interrupt vector(s) for the message signaled interrupt. 9181 **/ 9182 static void 9183 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 9184 { 9185 /* Disable the currently initialized interrupt mode */ 9186 if (phba->intr_type == MSIX) 9187 lpfc_sli4_disable_msix(phba); 9188 else if (phba->intr_type == MSI) 9189 lpfc_sli4_disable_msi(phba); 9190 else if (phba->intr_type == INTx) 9191 free_irq(phba->pcidev->irq, phba); 9192 9193 /* Reset interrupt management states */ 9194 phba->intr_type = NONE; 9195 phba->sli.slistat.sli_intr = 0; 9196 9197 return; 9198 } 9199 9200 /** 9201 * lpfc_unset_hba - Unset SLI3 hba device initialization 9202 * @phba: pointer to lpfc hba data structure. 9203 * 9204 * This routine is invoked to unset the HBA device initialization steps to 9205 * a device with SLI-3 interface spec. 9206 **/ 9207 static void 9208 lpfc_unset_hba(struct lpfc_hba *phba) 9209 { 9210 struct lpfc_vport *vport = phba->pport; 9211 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9212 9213 spin_lock_irq(shost->host_lock); 9214 vport->load_flag |= FC_UNLOADING; 9215 spin_unlock_irq(shost->host_lock); 9216 9217 kfree(phba->vpi_bmask); 9218 kfree(phba->vpi_ids); 9219 9220 lpfc_stop_hba_timers(phba); 9221 9222 phba->pport->work_port_events = 0; 9223 9224 lpfc_sli_hba_down(phba); 9225 9226 lpfc_sli_brdrestart(phba); 9227 9228 lpfc_sli_disable_intr(phba); 9229 9230 return; 9231 } 9232 9233 /** 9234 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 9235 * @phba: Pointer to HBA context object. 9236 * 9237 * This function is called in the SLI4 code path to wait for completion 9238 * of device's XRIs exchange busy. It will check the XRI exchange busy 9239 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 9240 * that, it will check the XRI exchange busy on outstanding FCP and ELS 9241 * I/Os every 30 seconds, log error message, and wait forever. Only when 9242 * all XRI exchange busy complete, the driver unload shall proceed with 9243 * invoking the function reset ioctl mailbox command to the CNA and the 9244 * the rest of the driver unload resource release. 9245 **/ 9246 static void 9247 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 9248 { 9249 int wait_time = 0; 9250 int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 9251 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 9252 9253 while (!fcp_xri_cmpl || !els_xri_cmpl) { 9254 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 9255 if (!fcp_xri_cmpl) 9256 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9257 "2877 FCP XRI exchange busy " 9258 "wait time: %d seconds.\n", 9259 wait_time/1000); 9260 if (!els_xri_cmpl) 9261 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9262 "2878 ELS XRI exchange busy " 9263 "wait time: %d seconds.\n", 9264 wait_time/1000); 9265 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 9266 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 9267 } else { 9268 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 9269 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 9270 } 9271 fcp_xri_cmpl = 9272 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 9273 els_xri_cmpl = 9274 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 9275 } 9276 } 9277 9278 /** 9279 * lpfc_sli4_hba_unset - Unset the fcoe hba 9280 * @phba: Pointer to HBA context object. 9281 * 9282 * This function is called in the SLI4 code path to reset the HBA's FCoE 9283 * function. The caller is not required to hold any lock. This routine 9284 * issues PCI function reset mailbox command to reset the FCoE function. 9285 * At the end of the function, it calls lpfc_hba_down_post function to 9286 * free any pending commands. 9287 **/ 9288 static void 9289 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 9290 { 9291 int wait_cnt = 0; 9292 LPFC_MBOXQ_t *mboxq; 9293 struct pci_dev *pdev = phba->pcidev; 9294 9295 lpfc_stop_hba_timers(phba); 9296 phba->sli4_hba.intr_enable = 0; 9297 9298 /* 9299 * Gracefully wait out the potential current outstanding asynchronous 9300 * mailbox command. 9301 */ 9302 9303 /* First, block any pending async mailbox command from posted */ 9304 spin_lock_irq(&phba->hbalock); 9305 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 9306 spin_unlock_irq(&phba->hbalock); 9307 /* Now, trying to wait it out if we can */ 9308 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 9309 msleep(10); 9310 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 9311 break; 9312 } 9313 /* Forcefully release the outstanding mailbox command if timed out */ 9314 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 9315 spin_lock_irq(&phba->hbalock); 9316 mboxq = phba->sli.mbox_active; 9317 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 9318 __lpfc_mbox_cmpl_put(phba, mboxq); 9319 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 9320 phba->sli.mbox_active = NULL; 9321 spin_unlock_irq(&phba->hbalock); 9322 } 9323 9324 /* Abort all iocbs associated with the hba */ 9325 lpfc_sli_hba_iocb_abort(phba); 9326 9327 /* Wait for completion of device XRI exchange busy */ 9328 lpfc_sli4_xri_exchange_busy_wait(phba); 9329 9330 /* Disable PCI subsystem interrupt */ 9331 lpfc_sli4_disable_intr(phba); 9332 9333 /* Disable SR-IOV if enabled */ 9334 if (phba->cfg_sriov_nr_virtfn) 9335 pci_disable_sriov(pdev); 9336 9337 /* Stop kthread signal shall trigger work_done one more time */ 9338 kthread_stop(phba->worker_thread); 9339 9340 /* Reset SLI4 HBA FCoE function */ 9341 lpfc_pci_function_reset(phba); 9342 lpfc_sli4_queue_destroy(phba); 9343 9344 /* Stop the SLI4 device port */ 9345 phba->pport->work_port_events = 0; 9346 } 9347 9348 /** 9349 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 9350 * @phba: Pointer to HBA context object. 9351 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 9352 * 9353 * This function is called in the SLI4 code path to read the port's 9354 * sli4 capabilities. 9355 * 9356 * This function may be be called from any context that can block-wait 9357 * for the completion. The expectation is that this routine is called 9358 * typically from probe_one or from the online routine. 9359 **/ 9360 int 9361 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 9362 { 9363 int rc; 9364 struct lpfc_mqe *mqe; 9365 struct lpfc_pc_sli4_params *sli4_params; 9366 uint32_t mbox_tmo; 9367 9368 rc = 0; 9369 mqe = &mboxq->u.mqe; 9370 9371 /* Read the port's SLI4 Parameters port capabilities */ 9372 lpfc_pc_sli4_params(mboxq); 9373 if (!phba->sli4_hba.intr_enable) 9374 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9375 else { 9376 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 9377 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 9378 } 9379 9380 if (unlikely(rc)) 9381 return 1; 9382 9383 sli4_params = &phba->sli4_hba.pc_sli4_params; 9384 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 9385 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 9386 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 9387 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 9388 &mqe->un.sli4_params); 9389 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 9390 &mqe->un.sli4_params); 9391 sli4_params->proto_types = mqe->un.sli4_params.word3; 9392 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 9393 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 9394 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 9395 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 9396 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 9397 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 9398 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 9399 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 9400 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 9401 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 9402 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 9403 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 9404 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 9405 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 9406 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 9407 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 9408 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 9409 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 9410 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 9411 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 9412 9413 /* Make sure that sge_supp_len can be handled by the driver */ 9414 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 9415 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 9416 9417 return rc; 9418 } 9419 9420 /** 9421 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 9422 * @phba: Pointer to HBA context object. 9423 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 9424 * 9425 * This function is called in the SLI4 code path to read the port's 9426 * sli4 capabilities. 9427 * 9428 * This function may be be called from any context that can block-wait 9429 * for the completion. The expectation is that this routine is called 9430 * typically from probe_one or from the online routine. 9431 **/ 9432 int 9433 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 9434 { 9435 int rc; 9436 struct lpfc_mqe *mqe = &mboxq->u.mqe; 9437 struct lpfc_pc_sli4_params *sli4_params; 9438 uint32_t mbox_tmo; 9439 int length; 9440 struct lpfc_sli4_parameters *mbx_sli4_parameters; 9441 9442 /* 9443 * By default, the driver assumes the SLI4 port requires RPI 9444 * header postings. The SLI4_PARAM response will correct this 9445 * assumption. 9446 */ 9447 phba->sli4_hba.rpi_hdrs_in_use = 1; 9448 9449 /* Read the port's SLI4 Config Parameters */ 9450 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 9451 sizeof(struct lpfc_sli4_cfg_mhdr)); 9452 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 9453 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 9454 length, LPFC_SLI4_MBX_EMBED); 9455 if (!phba->sli4_hba.intr_enable) 9456 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9457 else { 9458 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 9459 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 9460 } 9461 if (unlikely(rc)) 9462 return rc; 9463 sli4_params = &phba->sli4_hba.pc_sli4_params; 9464 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 9465 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 9466 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 9467 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 9468 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 9469 mbx_sli4_parameters); 9470 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 9471 mbx_sli4_parameters); 9472 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 9473 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 9474 else 9475 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 9476 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 9477 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); 9478 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); 9479 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 9480 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 9481 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 9482 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 9483 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); 9484 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 9485 mbx_sli4_parameters); 9486 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 9487 mbx_sli4_parameters); 9488 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); 9489 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); 9490 9491 /* Make sure that sge_supp_len can be handled by the driver */ 9492 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 9493 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 9494 9495 return 0; 9496 } 9497 9498 /** 9499 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 9500 * @pdev: pointer to PCI device 9501 * @pid: pointer to PCI device identifier 9502 * 9503 * This routine is to be called to attach a device with SLI-3 interface spec 9504 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 9505 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 9506 * information of the device and driver to see if the driver state that it can 9507 * support this kind of device. If the match is successful, the driver core 9508 * invokes this routine. If this routine determines it can claim the HBA, it 9509 * does all the initialization that it needs to do to handle the HBA properly. 9510 * 9511 * Return code 9512 * 0 - driver can claim the device 9513 * negative value - driver can not claim the device 9514 **/ 9515 static int 9516 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 9517 { 9518 struct lpfc_hba *phba; 9519 struct lpfc_vport *vport = NULL; 9520 struct Scsi_Host *shost = NULL; 9521 int error; 9522 uint32_t cfg_mode, intr_mode; 9523 9524 /* Allocate memory for HBA structure */ 9525 phba = lpfc_hba_alloc(pdev); 9526 if (!phba) 9527 return -ENOMEM; 9528 9529 /* Perform generic PCI device enabling operation */ 9530 error = lpfc_enable_pci_dev(phba); 9531 if (error) 9532 goto out_free_phba; 9533 9534 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 9535 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 9536 if (error) 9537 goto out_disable_pci_dev; 9538 9539 /* Set up SLI-3 specific device PCI memory space */ 9540 error = lpfc_sli_pci_mem_setup(phba); 9541 if (error) { 9542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9543 "1402 Failed to set up pci memory space.\n"); 9544 goto out_disable_pci_dev; 9545 } 9546 9547 /* Set up phase-1 common device driver resources */ 9548 error = lpfc_setup_driver_resource_phase1(phba); 9549 if (error) { 9550 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9551 "1403 Failed to set up driver resource.\n"); 9552 goto out_unset_pci_mem_s3; 9553 } 9554 9555 /* Set up SLI-3 specific device driver resources */ 9556 error = lpfc_sli_driver_resource_setup(phba); 9557 if (error) { 9558 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9559 "1404 Failed to set up driver resource.\n"); 9560 goto out_unset_pci_mem_s3; 9561 } 9562 9563 /* Initialize and populate the iocb list per host */ 9564 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 9565 if (error) { 9566 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9567 "1405 Failed to initialize iocb list.\n"); 9568 goto out_unset_driver_resource_s3; 9569 } 9570 9571 /* Set up common device driver resources */ 9572 error = lpfc_setup_driver_resource_phase2(phba); 9573 if (error) { 9574 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9575 "1406 Failed to set up driver resource.\n"); 9576 goto out_free_iocb_list; 9577 } 9578 9579 /* Get the default values for Model Name and Description */ 9580 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 9581 9582 /* Create SCSI host to the physical port */ 9583 error = lpfc_create_shost(phba); 9584 if (error) { 9585 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9586 "1407 Failed to create scsi host.\n"); 9587 goto out_unset_driver_resource; 9588 } 9589 9590 /* Configure sysfs attributes */ 9591 vport = phba->pport; 9592 error = lpfc_alloc_sysfs_attr(vport); 9593 if (error) { 9594 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9595 "1476 Failed to allocate sysfs attr\n"); 9596 goto out_destroy_shost; 9597 } 9598 9599 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 9600 /* Now, trying to enable interrupt and bring up the device */ 9601 cfg_mode = phba->cfg_use_msi; 9602 while (true) { 9603 /* Put device to a known state before enabling interrupt */ 9604 lpfc_stop_port(phba); 9605 /* Configure and enable interrupt */ 9606 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 9607 if (intr_mode == LPFC_INTR_ERROR) { 9608 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9609 "0431 Failed to enable interrupt.\n"); 9610 error = -ENODEV; 9611 goto out_free_sysfs_attr; 9612 } 9613 /* SLI-3 HBA setup */ 9614 if (lpfc_sli_hba_setup(phba)) { 9615 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9616 "1477 Failed to set up hba\n"); 9617 error = -ENODEV; 9618 goto out_remove_device; 9619 } 9620 9621 /* Wait 50ms for the interrupts of previous mailbox commands */ 9622 msleep(50); 9623 /* Check active interrupts on message signaled interrupts */ 9624 if (intr_mode == 0 || 9625 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 9626 /* Log the current active interrupt mode */ 9627 phba->intr_mode = intr_mode; 9628 lpfc_log_intr_mode(phba, intr_mode); 9629 break; 9630 } else { 9631 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9632 "0447 Configure interrupt mode (%d) " 9633 "failed active interrupt test.\n", 9634 intr_mode); 9635 /* Disable the current interrupt mode */ 9636 lpfc_sli_disable_intr(phba); 9637 /* Try next level of interrupt mode */ 9638 cfg_mode = --intr_mode; 9639 } 9640 } 9641 9642 /* Perform post initialization setup */ 9643 lpfc_post_init_setup(phba); 9644 9645 /* Check if there are static vports to be created. */ 9646 lpfc_create_static_vport(phba); 9647 9648 return 0; 9649 9650 out_remove_device: 9651 lpfc_unset_hba(phba); 9652 out_free_sysfs_attr: 9653 lpfc_free_sysfs_attr(vport); 9654 out_destroy_shost: 9655 lpfc_destroy_shost(phba); 9656 out_unset_driver_resource: 9657 lpfc_unset_driver_resource_phase2(phba); 9658 out_free_iocb_list: 9659 lpfc_free_iocb_list(phba); 9660 out_unset_driver_resource_s3: 9661 lpfc_sli_driver_resource_unset(phba); 9662 out_unset_pci_mem_s3: 9663 lpfc_sli_pci_mem_unset(phba); 9664 out_disable_pci_dev: 9665 lpfc_disable_pci_dev(phba); 9666 if (shost) 9667 scsi_host_put(shost); 9668 out_free_phba: 9669 lpfc_hba_free(phba); 9670 return error; 9671 } 9672 9673 /** 9674 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 9675 * @pdev: pointer to PCI device 9676 * 9677 * This routine is to be called to disattach a device with SLI-3 interface 9678 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 9679 * removed from PCI bus, it performs all the necessary cleanup for the HBA 9680 * device to be removed from the PCI subsystem properly. 9681 **/ 9682 static void 9683 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 9684 { 9685 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9686 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 9687 struct lpfc_vport **vports; 9688 struct lpfc_hba *phba = vport->phba; 9689 int i; 9690 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 9691 9692 spin_lock_irq(&phba->hbalock); 9693 vport->load_flag |= FC_UNLOADING; 9694 spin_unlock_irq(&phba->hbalock); 9695 9696 lpfc_free_sysfs_attr(vport); 9697 9698 /* Release all the vports against this physical port */ 9699 vports = lpfc_create_vport_work_array(phba); 9700 if (vports != NULL) 9701 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 9702 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 9703 continue; 9704 fc_vport_terminate(vports[i]->fc_vport); 9705 } 9706 lpfc_destroy_vport_work_array(phba, vports); 9707 9708 /* Remove FC host and then SCSI host with the physical port */ 9709 fc_remove_host(shost); 9710 scsi_remove_host(shost); 9711 lpfc_cleanup(vport); 9712 9713 /* 9714 * Bring down the SLI Layer. This step disable all interrupts, 9715 * clears the rings, discards all mailbox commands, and resets 9716 * the HBA. 9717 */ 9718 9719 /* HBA interrupt will be disabled after this call */ 9720 lpfc_sli_hba_down(phba); 9721 /* Stop kthread signal shall trigger work_done one more time */ 9722 kthread_stop(phba->worker_thread); 9723 /* Final cleanup of txcmplq and reset the HBA */ 9724 lpfc_sli_brdrestart(phba); 9725 9726 kfree(phba->vpi_bmask); 9727 kfree(phba->vpi_ids); 9728 9729 lpfc_stop_hba_timers(phba); 9730 spin_lock_irq(&phba->hbalock); 9731 list_del_init(&vport->listentry); 9732 spin_unlock_irq(&phba->hbalock); 9733 9734 lpfc_debugfs_terminate(vport); 9735 9736 /* Disable SR-IOV if enabled */ 9737 if (phba->cfg_sriov_nr_virtfn) 9738 pci_disable_sriov(pdev); 9739 9740 /* Disable interrupt */ 9741 lpfc_sli_disable_intr(phba); 9742 9743 scsi_host_put(shost); 9744 9745 /* 9746 * Call scsi_free before mem_free since scsi bufs are released to their 9747 * corresponding pools here. 9748 */ 9749 lpfc_scsi_free(phba); 9750 lpfc_mem_free_all(phba); 9751 9752 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 9753 phba->hbqslimp.virt, phba->hbqslimp.phys); 9754 9755 /* Free resources associated with SLI2 interface */ 9756 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 9757 phba->slim2p.virt, phba->slim2p.phys); 9758 9759 /* unmap adapter SLIM and Control Registers */ 9760 iounmap(phba->ctrl_regs_memmap_p); 9761 iounmap(phba->slim_memmap_p); 9762 9763 lpfc_hba_free(phba); 9764 9765 pci_release_selected_regions(pdev, bars); 9766 pci_disable_device(pdev); 9767 } 9768 9769 /** 9770 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 9771 * @pdev: pointer to PCI device 9772 * @msg: power management message 9773 * 9774 * This routine is to be called from the kernel's PCI subsystem to support 9775 * system Power Management (PM) to device with SLI-3 interface spec. When 9776 * PM invokes this method, it quiesces the device by stopping the driver's 9777 * worker thread for the device, turning off device's interrupt and DMA, 9778 * and bring the device offline. Note that as the driver implements the 9779 * minimum PM requirements to a power-aware driver's PM support for the 9780 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 9781 * to the suspend() method call will be treated as SUSPEND and the driver will 9782 * fully reinitialize its device during resume() method call, the driver will 9783 * set device to PCI_D3hot state in PCI config space instead of setting it 9784 * according to the @msg provided by the PM. 9785 * 9786 * Return code 9787 * 0 - driver suspended the device 9788 * Error otherwise 9789 **/ 9790 static int 9791 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 9792 { 9793 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9794 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9795 9796 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9797 "0473 PCI device Power Management suspend.\n"); 9798 9799 /* Bring down the device */ 9800 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 9801 lpfc_offline(phba); 9802 kthread_stop(phba->worker_thread); 9803 9804 /* Disable interrupt from device */ 9805 lpfc_sli_disable_intr(phba); 9806 9807 /* Save device state to PCI config space */ 9808 pci_save_state(pdev); 9809 pci_set_power_state(pdev, PCI_D3hot); 9810 9811 return 0; 9812 } 9813 9814 /** 9815 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 9816 * @pdev: pointer to PCI device 9817 * 9818 * This routine is to be called from the kernel's PCI subsystem to support 9819 * system Power Management (PM) to device with SLI-3 interface spec. When PM 9820 * invokes this method, it restores the device's PCI config space state and 9821 * fully reinitializes the device and brings it online. Note that as the 9822 * driver implements the minimum PM requirements to a power-aware driver's 9823 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 9824 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 9825 * driver will fully reinitialize its device during resume() method call, 9826 * the device will be set to PCI_D0 directly in PCI config space before 9827 * restoring the state. 9828 * 9829 * Return code 9830 * 0 - driver suspended the device 9831 * Error otherwise 9832 **/ 9833 static int 9834 lpfc_pci_resume_one_s3(struct pci_dev *pdev) 9835 { 9836 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9837 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9838 uint32_t intr_mode; 9839 int error; 9840 9841 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9842 "0452 PCI device Power Management resume.\n"); 9843 9844 /* Restore device state from PCI config space */ 9845 pci_set_power_state(pdev, PCI_D0); 9846 pci_restore_state(pdev); 9847 9848 /* 9849 * As the new kernel behavior of pci_restore_state() API call clears 9850 * device saved_state flag, need to save the restored state again. 9851 */ 9852 pci_save_state(pdev); 9853 9854 if (pdev->is_busmaster) 9855 pci_set_master(pdev); 9856 9857 /* Startup the kernel thread for this host adapter. */ 9858 phba->worker_thread = kthread_run(lpfc_do_work, phba, 9859 "lpfc_worker_%d", phba->brd_no); 9860 if (IS_ERR(phba->worker_thread)) { 9861 error = PTR_ERR(phba->worker_thread); 9862 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9863 "0434 PM resume failed to start worker " 9864 "thread: error=x%x.\n", error); 9865 return error; 9866 } 9867 9868 /* Configure and enable interrupt */ 9869 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 9870 if (intr_mode == LPFC_INTR_ERROR) { 9871 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9872 "0430 PM resume Failed to enable interrupt\n"); 9873 return -EIO; 9874 } else 9875 phba->intr_mode = intr_mode; 9876 9877 /* Restart HBA and bring it online */ 9878 lpfc_sli_brdrestart(phba); 9879 lpfc_online(phba); 9880 9881 /* Log the current active interrupt mode */ 9882 lpfc_log_intr_mode(phba, phba->intr_mode); 9883 9884 return 0; 9885 } 9886 9887 /** 9888 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 9889 * @phba: pointer to lpfc hba data structure. 9890 * 9891 * This routine is called to prepare the SLI3 device for PCI slot recover. It 9892 * aborts all the outstanding SCSI I/Os to the pci device. 9893 **/ 9894 static void 9895 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 9896 { 9897 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9898 "2723 PCI channel I/O abort preparing for recovery\n"); 9899 9900 /* 9901 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 9902 * and let the SCSI mid-layer to retry them to recover. 9903 */ 9904 lpfc_sli_abort_fcp_rings(phba); 9905 } 9906 9907 /** 9908 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 9909 * @phba: pointer to lpfc hba data structure. 9910 * 9911 * This routine is called to prepare the SLI3 device for PCI slot reset. It 9912 * disables the device interrupt and pci device, and aborts the internal FCP 9913 * pending I/Os. 9914 **/ 9915 static void 9916 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 9917 { 9918 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9919 "2710 PCI channel disable preparing for reset\n"); 9920 9921 /* Block any management I/Os to the device */ 9922 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 9923 9924 /* Block all SCSI devices' I/Os on the host */ 9925 lpfc_scsi_dev_block(phba); 9926 9927 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 9928 lpfc_sli_flush_fcp_rings(phba); 9929 9930 /* stop all timers */ 9931 lpfc_stop_hba_timers(phba); 9932 9933 /* Disable interrupt and pci device */ 9934 lpfc_sli_disable_intr(phba); 9935 pci_disable_device(phba->pcidev); 9936 } 9937 9938 /** 9939 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 9940 * @phba: pointer to lpfc hba data structure. 9941 * 9942 * This routine is called to prepare the SLI3 device for PCI slot permanently 9943 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 9944 * pending I/Os. 9945 **/ 9946 static void 9947 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 9948 { 9949 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9950 "2711 PCI channel permanent disable for failure\n"); 9951 /* Block all SCSI devices' I/Os on the host */ 9952 lpfc_scsi_dev_block(phba); 9953 9954 /* stop all timers */ 9955 lpfc_stop_hba_timers(phba); 9956 9957 /* Clean up all driver's outstanding SCSI I/Os */ 9958 lpfc_sli_flush_fcp_rings(phba); 9959 } 9960 9961 /** 9962 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 9963 * @pdev: pointer to PCI device. 9964 * @state: the current PCI connection state. 9965 * 9966 * This routine is called from the PCI subsystem for I/O error handling to 9967 * device with SLI-3 interface spec. This function is called by the PCI 9968 * subsystem after a PCI bus error affecting this device has been detected. 9969 * When this function is invoked, it will need to stop all the I/Os and 9970 * interrupt(s) to the device. Once that is done, it will return 9971 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 9972 * as desired. 9973 * 9974 * Return codes 9975 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 9976 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 9977 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9978 **/ 9979 static pci_ers_result_t 9980 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 9981 { 9982 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9983 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9984 9985 switch (state) { 9986 case pci_channel_io_normal: 9987 /* Non-fatal error, prepare for recovery */ 9988 lpfc_sli_prep_dev_for_recover(phba); 9989 return PCI_ERS_RESULT_CAN_RECOVER; 9990 case pci_channel_io_frozen: 9991 /* Fatal error, prepare for slot reset */ 9992 lpfc_sli_prep_dev_for_reset(phba); 9993 return PCI_ERS_RESULT_NEED_RESET; 9994 case pci_channel_io_perm_failure: 9995 /* Permanent failure, prepare for device down */ 9996 lpfc_sli_prep_dev_for_perm_failure(phba); 9997 return PCI_ERS_RESULT_DISCONNECT; 9998 default: 9999 /* Unknown state, prepare and request slot reset */ 10000 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10001 "0472 Unknown PCI error state: x%x\n", state); 10002 lpfc_sli_prep_dev_for_reset(phba); 10003 return PCI_ERS_RESULT_NEED_RESET; 10004 } 10005 } 10006 10007 /** 10008 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 10009 * @pdev: pointer to PCI device. 10010 * 10011 * This routine is called from the PCI subsystem for error handling to 10012 * device with SLI-3 interface spec. This is called after PCI bus has been 10013 * reset to restart the PCI card from scratch, as if from a cold-boot. 10014 * During the PCI subsystem error recovery, after driver returns 10015 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 10016 * recovery and then call this routine before calling the .resume method 10017 * to recover the device. This function will initialize the HBA device, 10018 * enable the interrupt, but it will just put the HBA to offline state 10019 * without passing any I/O traffic. 10020 * 10021 * Return codes 10022 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 10023 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 10024 */ 10025 static pci_ers_result_t 10026 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 10027 { 10028 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10029 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10030 struct lpfc_sli *psli = &phba->sli; 10031 uint32_t intr_mode; 10032 10033 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 10034 if (pci_enable_device_mem(pdev)) { 10035 printk(KERN_ERR "lpfc: Cannot re-enable " 10036 "PCI device after reset.\n"); 10037 return PCI_ERS_RESULT_DISCONNECT; 10038 } 10039 10040 pci_restore_state(pdev); 10041 10042 /* 10043 * As the new kernel behavior of pci_restore_state() API call clears 10044 * device saved_state flag, need to save the restored state again. 10045 */ 10046 pci_save_state(pdev); 10047 10048 if (pdev->is_busmaster) 10049 pci_set_master(pdev); 10050 10051 spin_lock_irq(&phba->hbalock); 10052 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 10053 spin_unlock_irq(&phba->hbalock); 10054 10055 /* Configure and enable interrupt */ 10056 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 10057 if (intr_mode == LPFC_INTR_ERROR) { 10058 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10059 "0427 Cannot re-enable interrupt after " 10060 "slot reset.\n"); 10061 return PCI_ERS_RESULT_DISCONNECT; 10062 } else 10063 phba->intr_mode = intr_mode; 10064 10065 /* Take device offline, it will perform cleanup */ 10066 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 10067 lpfc_offline(phba); 10068 lpfc_sli_brdrestart(phba); 10069 10070 /* Log the current active interrupt mode */ 10071 lpfc_log_intr_mode(phba, phba->intr_mode); 10072 10073 return PCI_ERS_RESULT_RECOVERED; 10074 } 10075 10076 /** 10077 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 10078 * @pdev: pointer to PCI device 10079 * 10080 * This routine is called from the PCI subsystem for error handling to device 10081 * with SLI-3 interface spec. It is called when kernel error recovery tells 10082 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 10083 * error recovery. After this call, traffic can start to flow from this device 10084 * again. 10085 */ 10086 static void 10087 lpfc_io_resume_s3(struct pci_dev *pdev) 10088 { 10089 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10090 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10091 10092 /* Bring device online, it will be no-op for non-fatal error resume */ 10093 lpfc_online(phba); 10094 10095 /* Clean up Advanced Error Reporting (AER) if needed */ 10096 if (phba->hba_flag & HBA_AER_ENABLED) 10097 pci_cleanup_aer_uncorrect_error_status(pdev); 10098 } 10099 10100 /** 10101 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 10102 * @phba: pointer to lpfc hba data structure. 10103 * 10104 * returns the number of ELS/CT IOCBs to reserve 10105 **/ 10106 int 10107 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 10108 { 10109 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 10110 10111 if (phba->sli_rev == LPFC_SLI_REV4) { 10112 if (max_xri <= 100) 10113 return 10; 10114 else if (max_xri <= 256) 10115 return 25; 10116 else if (max_xri <= 512) 10117 return 50; 10118 else if (max_xri <= 1024) 10119 return 100; 10120 else if (max_xri <= 1536) 10121 return 150; 10122 else if (max_xri <= 2048) 10123 return 200; 10124 else 10125 return 250; 10126 } else 10127 return 0; 10128 } 10129 10130 /** 10131 * lpfc_write_firmware - attempt to write a firmware image to the port 10132 * @fw: pointer to firmware image returned from request_firmware. 10133 * @phba: pointer to lpfc hba data structure. 10134 * 10135 **/ 10136 static void 10137 lpfc_write_firmware(const struct firmware *fw, void *context) 10138 { 10139 struct lpfc_hba *phba = (struct lpfc_hba *)context; 10140 char fwrev[FW_REV_STR_SIZE]; 10141 struct lpfc_grp_hdr *image; 10142 struct list_head dma_buffer_list; 10143 int i, rc = 0; 10144 struct lpfc_dmabuf *dmabuf, *next; 10145 uint32_t offset = 0, temp_offset = 0; 10146 10147 /* It can be null in no-wait mode, sanity check */ 10148 if (!fw) { 10149 rc = -ENXIO; 10150 goto out; 10151 } 10152 image = (struct lpfc_grp_hdr *)fw->data; 10153 10154 INIT_LIST_HEAD(&dma_buffer_list); 10155 if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) || 10156 (bf_get_be32(lpfc_grp_hdr_file_type, image) != 10157 LPFC_FILE_TYPE_GROUP) || 10158 (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) || 10159 (be32_to_cpu(image->size) != fw->size)) { 10160 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10161 "3022 Invalid FW image found. " 10162 "Magic:%x Type:%x ID:%x\n", 10163 be32_to_cpu(image->magic_number), 10164 bf_get_be32(lpfc_grp_hdr_file_type, image), 10165 bf_get_be32(lpfc_grp_hdr_id, image)); 10166 rc = -EINVAL; 10167 goto release_out; 10168 } 10169 lpfc_decode_firmware_rev(phba, fwrev, 1); 10170 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 10171 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10172 "3023 Updating Firmware, Current Version:%s " 10173 "New Version:%s\n", 10174 fwrev, image->revision); 10175 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 10176 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 10177 GFP_KERNEL); 10178 if (!dmabuf) { 10179 rc = -ENOMEM; 10180 goto release_out; 10181 } 10182 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 10183 SLI4_PAGE_SIZE, 10184 &dmabuf->phys, 10185 GFP_KERNEL); 10186 if (!dmabuf->virt) { 10187 kfree(dmabuf); 10188 rc = -ENOMEM; 10189 goto release_out; 10190 } 10191 list_add_tail(&dmabuf->list, &dma_buffer_list); 10192 } 10193 while (offset < fw->size) { 10194 temp_offset = offset; 10195 list_for_each_entry(dmabuf, &dma_buffer_list, list) { 10196 if (temp_offset + SLI4_PAGE_SIZE > fw->size) { 10197 memcpy(dmabuf->virt, 10198 fw->data + temp_offset, 10199 fw->size - temp_offset); 10200 temp_offset = fw->size; 10201 break; 10202 } 10203 memcpy(dmabuf->virt, fw->data + temp_offset, 10204 SLI4_PAGE_SIZE); 10205 temp_offset += SLI4_PAGE_SIZE; 10206 } 10207 rc = lpfc_wr_object(phba, &dma_buffer_list, 10208 (fw->size - offset), &offset); 10209 if (rc) 10210 goto release_out; 10211 } 10212 rc = offset; 10213 } 10214 10215 release_out: 10216 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 10217 list_del(&dmabuf->list); 10218 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 10219 dmabuf->virt, dmabuf->phys); 10220 kfree(dmabuf); 10221 } 10222 release_firmware(fw); 10223 out: 10224 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10225 "3024 Firmware update done: %d.\n", rc); 10226 return; 10227 } 10228 10229 /** 10230 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade 10231 * @phba: pointer to lpfc hba data structure. 10232 * 10233 * This routine is called to perform Linux generic firmware upgrade on device 10234 * that supports such feature. 10235 **/ 10236 int 10237 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) 10238 { 10239 uint8_t file_name[ELX_MODEL_NAME_SIZE]; 10240 int ret; 10241 const struct firmware *fw; 10242 10243 /* Only supported on SLI4 interface type 2 for now */ 10244 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 10245 LPFC_SLI_INTF_IF_TYPE_2) 10246 return -EPERM; 10247 10248 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); 10249 10250 if (fw_upgrade == INT_FW_UPGRADE) { 10251 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, 10252 file_name, &phba->pcidev->dev, 10253 GFP_KERNEL, (void *)phba, 10254 lpfc_write_firmware); 10255 } else if (fw_upgrade == RUN_FW_UPGRADE) { 10256 ret = request_firmware(&fw, file_name, &phba->pcidev->dev); 10257 if (!ret) 10258 lpfc_write_firmware(fw, (void *)phba); 10259 } else { 10260 ret = -EINVAL; 10261 } 10262 10263 return ret; 10264 } 10265 10266 /** 10267 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 10268 * @pdev: pointer to PCI device 10269 * @pid: pointer to PCI device identifier 10270 * 10271 * This routine is called from the kernel's PCI subsystem to device with 10272 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 10273 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 10274 * information of the device and driver to see if the driver state that it 10275 * can support this kind of device. If the match is successful, the driver 10276 * core invokes this routine. If this routine determines it can claim the HBA, 10277 * it does all the initialization that it needs to do to handle the HBA 10278 * properly. 10279 * 10280 * Return code 10281 * 0 - driver can claim the device 10282 * negative value - driver can not claim the device 10283 **/ 10284 static int 10285 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 10286 { 10287 struct lpfc_hba *phba; 10288 struct lpfc_vport *vport = NULL; 10289 struct Scsi_Host *shost = NULL; 10290 int error, ret; 10291 uint32_t cfg_mode, intr_mode; 10292 int adjusted_fcp_io_channel; 10293 10294 /* Allocate memory for HBA structure */ 10295 phba = lpfc_hba_alloc(pdev); 10296 if (!phba) 10297 return -ENOMEM; 10298 10299 /* Perform generic PCI device enabling operation */ 10300 error = lpfc_enable_pci_dev(phba); 10301 if (error) 10302 goto out_free_phba; 10303 10304 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 10305 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 10306 if (error) 10307 goto out_disable_pci_dev; 10308 10309 /* Set up SLI-4 specific device PCI memory space */ 10310 error = lpfc_sli4_pci_mem_setup(phba); 10311 if (error) { 10312 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10313 "1410 Failed to set up pci memory space.\n"); 10314 goto out_disable_pci_dev; 10315 } 10316 10317 /* Set up phase-1 common device driver resources */ 10318 error = lpfc_setup_driver_resource_phase1(phba); 10319 if (error) { 10320 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10321 "1411 Failed to set up driver resource.\n"); 10322 goto out_unset_pci_mem_s4; 10323 } 10324 10325 /* Set up SLI-4 Specific device driver resources */ 10326 error = lpfc_sli4_driver_resource_setup(phba); 10327 if (error) { 10328 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10329 "1412 Failed to set up driver resource.\n"); 10330 goto out_unset_pci_mem_s4; 10331 } 10332 10333 /* Initialize and populate the iocb list per host */ 10334 10335 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10336 "2821 initialize iocb list %d.\n", 10337 phba->cfg_iocb_cnt*1024); 10338 error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024); 10339 10340 if (error) { 10341 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10342 "1413 Failed to initialize iocb list.\n"); 10343 goto out_unset_driver_resource_s4; 10344 } 10345 10346 INIT_LIST_HEAD(&phba->active_rrq_list); 10347 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 10348 10349 /* Set up common device driver resources */ 10350 error = lpfc_setup_driver_resource_phase2(phba); 10351 if (error) { 10352 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10353 "1414 Failed to set up driver resource.\n"); 10354 goto out_free_iocb_list; 10355 } 10356 10357 /* Get the default values for Model Name and Description */ 10358 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 10359 10360 /* Create SCSI host to the physical port */ 10361 error = lpfc_create_shost(phba); 10362 if (error) { 10363 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10364 "1415 Failed to create scsi host.\n"); 10365 goto out_unset_driver_resource; 10366 } 10367 10368 /* Configure sysfs attributes */ 10369 vport = phba->pport; 10370 error = lpfc_alloc_sysfs_attr(vport); 10371 if (error) { 10372 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10373 "1416 Failed to allocate sysfs attr\n"); 10374 goto out_destroy_shost; 10375 } 10376 10377 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 10378 /* Now, trying to enable interrupt and bring up the device */ 10379 cfg_mode = phba->cfg_use_msi; 10380 10381 /* Put device to a known state before enabling interrupt */ 10382 lpfc_stop_port(phba); 10383 /* Configure and enable interrupt */ 10384 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 10385 if (intr_mode == LPFC_INTR_ERROR) { 10386 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10387 "0426 Failed to enable interrupt.\n"); 10388 error = -ENODEV; 10389 goto out_free_sysfs_attr; 10390 } 10391 /* Default to single EQ for non-MSI-X */ 10392 if (phba->intr_type != MSIX) 10393 adjusted_fcp_io_channel = 1; 10394 else 10395 adjusted_fcp_io_channel = phba->cfg_fcp_io_channel; 10396 phba->cfg_fcp_io_channel = adjusted_fcp_io_channel; 10397 /* Set up SLI-4 HBA */ 10398 if (lpfc_sli4_hba_setup(phba)) { 10399 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10400 "1421 Failed to set up hba\n"); 10401 error = -ENODEV; 10402 goto out_disable_intr; 10403 } 10404 10405 /* Log the current active interrupt mode */ 10406 phba->intr_mode = intr_mode; 10407 lpfc_log_intr_mode(phba, intr_mode); 10408 10409 /* Perform post initialization setup */ 10410 lpfc_post_init_setup(phba); 10411 10412 /* check for firmware upgrade or downgrade */ 10413 if (phba->cfg_request_firmware_upgrade) 10414 ret = lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); 10415 10416 /* Check if there are static vports to be created. */ 10417 lpfc_create_static_vport(phba); 10418 return 0; 10419 10420 out_disable_intr: 10421 lpfc_sli4_disable_intr(phba); 10422 out_free_sysfs_attr: 10423 lpfc_free_sysfs_attr(vport); 10424 out_destroy_shost: 10425 lpfc_destroy_shost(phba); 10426 out_unset_driver_resource: 10427 lpfc_unset_driver_resource_phase2(phba); 10428 out_free_iocb_list: 10429 lpfc_free_iocb_list(phba); 10430 out_unset_driver_resource_s4: 10431 lpfc_sli4_driver_resource_unset(phba); 10432 out_unset_pci_mem_s4: 10433 lpfc_sli4_pci_mem_unset(phba); 10434 out_disable_pci_dev: 10435 lpfc_disable_pci_dev(phba); 10436 if (shost) 10437 scsi_host_put(shost); 10438 out_free_phba: 10439 lpfc_hba_free(phba); 10440 return error; 10441 } 10442 10443 /** 10444 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 10445 * @pdev: pointer to PCI device 10446 * 10447 * This routine is called from the kernel's PCI subsystem to device with 10448 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 10449 * removed from PCI bus, it performs all the necessary cleanup for the HBA 10450 * device to be removed from the PCI subsystem properly. 10451 **/ 10452 static void 10453 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 10454 { 10455 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10456 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 10457 struct lpfc_vport **vports; 10458 struct lpfc_hba *phba = vport->phba; 10459 int i; 10460 10461 /* Mark the device unloading flag */ 10462 spin_lock_irq(&phba->hbalock); 10463 vport->load_flag |= FC_UNLOADING; 10464 spin_unlock_irq(&phba->hbalock); 10465 10466 /* Free the HBA sysfs attributes */ 10467 lpfc_free_sysfs_attr(vport); 10468 10469 /* Release all the vports against this physical port */ 10470 vports = lpfc_create_vport_work_array(phba); 10471 if (vports != NULL) 10472 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 10473 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 10474 continue; 10475 fc_vport_terminate(vports[i]->fc_vport); 10476 } 10477 lpfc_destroy_vport_work_array(phba, vports); 10478 10479 /* Remove FC host and then SCSI host with the physical port */ 10480 fc_remove_host(shost); 10481 scsi_remove_host(shost); 10482 10483 /* Perform cleanup on the physical port */ 10484 lpfc_cleanup(vport); 10485 10486 /* 10487 * Bring down the SLI Layer. This step disables all interrupts, 10488 * clears the rings, discards all mailbox commands, and resets 10489 * the HBA FCoE function. 10490 */ 10491 lpfc_debugfs_terminate(vport); 10492 lpfc_sli4_hba_unset(phba); 10493 10494 spin_lock_irq(&phba->hbalock); 10495 list_del_init(&vport->listentry); 10496 spin_unlock_irq(&phba->hbalock); 10497 10498 /* Perform scsi free before driver resource_unset since scsi 10499 * buffers are released to their corresponding pools here. 10500 */ 10501 lpfc_scsi_free(phba); 10502 10503 lpfc_sli4_driver_resource_unset(phba); 10504 10505 /* Unmap adapter Control and Doorbell registers */ 10506 lpfc_sli4_pci_mem_unset(phba); 10507 10508 /* Release PCI resources and disable device's PCI function */ 10509 scsi_host_put(shost); 10510 lpfc_disable_pci_dev(phba); 10511 10512 /* Finally, free the driver's device data structure */ 10513 lpfc_hba_free(phba); 10514 10515 return; 10516 } 10517 10518 /** 10519 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 10520 * @pdev: pointer to PCI device 10521 * @msg: power management message 10522 * 10523 * This routine is called from the kernel's PCI subsystem to support system 10524 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 10525 * this method, it quiesces the device by stopping the driver's worker 10526 * thread for the device, turning off device's interrupt and DMA, and bring 10527 * the device offline. Note that as the driver implements the minimum PM 10528 * requirements to a power-aware driver's PM support for suspend/resume -- all 10529 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 10530 * method call will be treated as SUSPEND and the driver will fully 10531 * reinitialize its device during resume() method call, the driver will set 10532 * device to PCI_D3hot state in PCI config space instead of setting it 10533 * according to the @msg provided by the PM. 10534 * 10535 * Return code 10536 * 0 - driver suspended the device 10537 * Error otherwise 10538 **/ 10539 static int 10540 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 10541 { 10542 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10543 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10544 10545 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10546 "2843 PCI device Power Management suspend.\n"); 10547 10548 /* Bring down the device */ 10549 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 10550 lpfc_offline(phba); 10551 kthread_stop(phba->worker_thread); 10552 10553 /* Disable interrupt from device */ 10554 lpfc_sli4_disable_intr(phba); 10555 lpfc_sli4_queue_destroy(phba); 10556 10557 /* Save device state to PCI config space */ 10558 pci_save_state(pdev); 10559 pci_set_power_state(pdev, PCI_D3hot); 10560 10561 return 0; 10562 } 10563 10564 /** 10565 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 10566 * @pdev: pointer to PCI device 10567 * 10568 * This routine is called from the kernel's PCI subsystem to support system 10569 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 10570 * this method, it restores the device's PCI config space state and fully 10571 * reinitializes the device and brings it online. Note that as the driver 10572 * implements the minimum PM requirements to a power-aware driver's PM for 10573 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 10574 * to the suspend() method call will be treated as SUSPEND and the driver 10575 * will fully reinitialize its device during resume() method call, the device 10576 * will be set to PCI_D0 directly in PCI config space before restoring the 10577 * state. 10578 * 10579 * Return code 10580 * 0 - driver suspended the device 10581 * Error otherwise 10582 **/ 10583 static int 10584 lpfc_pci_resume_one_s4(struct pci_dev *pdev) 10585 { 10586 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10587 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10588 uint32_t intr_mode; 10589 int error; 10590 10591 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10592 "0292 PCI device Power Management resume.\n"); 10593 10594 /* Restore device state from PCI config space */ 10595 pci_set_power_state(pdev, PCI_D0); 10596 pci_restore_state(pdev); 10597 10598 /* 10599 * As the new kernel behavior of pci_restore_state() API call clears 10600 * device saved_state flag, need to save the restored state again. 10601 */ 10602 pci_save_state(pdev); 10603 10604 if (pdev->is_busmaster) 10605 pci_set_master(pdev); 10606 10607 /* Startup the kernel thread for this host adapter. */ 10608 phba->worker_thread = kthread_run(lpfc_do_work, phba, 10609 "lpfc_worker_%d", phba->brd_no); 10610 if (IS_ERR(phba->worker_thread)) { 10611 error = PTR_ERR(phba->worker_thread); 10612 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10613 "0293 PM resume failed to start worker " 10614 "thread: error=x%x.\n", error); 10615 return error; 10616 } 10617 10618 /* Configure and enable interrupt */ 10619 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 10620 if (intr_mode == LPFC_INTR_ERROR) { 10621 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10622 "0294 PM resume Failed to enable interrupt\n"); 10623 return -EIO; 10624 } else 10625 phba->intr_mode = intr_mode; 10626 10627 /* Restart HBA and bring it online */ 10628 lpfc_sli_brdrestart(phba); 10629 lpfc_online(phba); 10630 10631 /* Log the current active interrupt mode */ 10632 lpfc_log_intr_mode(phba, phba->intr_mode); 10633 10634 return 0; 10635 } 10636 10637 /** 10638 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 10639 * @phba: pointer to lpfc hba data structure. 10640 * 10641 * This routine is called to prepare the SLI4 device for PCI slot recover. It 10642 * aborts all the outstanding SCSI I/Os to the pci device. 10643 **/ 10644 static void 10645 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 10646 { 10647 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10648 "2828 PCI channel I/O abort preparing for recovery\n"); 10649 /* 10650 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 10651 * and let the SCSI mid-layer to retry them to recover. 10652 */ 10653 lpfc_sli_abort_fcp_rings(phba); 10654 } 10655 10656 /** 10657 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 10658 * @phba: pointer to lpfc hba data structure. 10659 * 10660 * This routine is called to prepare the SLI4 device for PCI slot reset. It 10661 * disables the device interrupt and pci device, and aborts the internal FCP 10662 * pending I/Os. 10663 **/ 10664 static void 10665 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 10666 { 10667 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10668 "2826 PCI channel disable preparing for reset\n"); 10669 10670 /* Block any management I/Os to the device */ 10671 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); 10672 10673 /* Block all SCSI devices' I/Os on the host */ 10674 lpfc_scsi_dev_block(phba); 10675 10676 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 10677 lpfc_sli_flush_fcp_rings(phba); 10678 10679 /* stop all timers */ 10680 lpfc_stop_hba_timers(phba); 10681 10682 /* Disable interrupt and pci device */ 10683 lpfc_sli4_disable_intr(phba); 10684 lpfc_sli4_queue_destroy(phba); 10685 pci_disable_device(phba->pcidev); 10686 } 10687 10688 /** 10689 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 10690 * @phba: pointer to lpfc hba data structure. 10691 * 10692 * This routine is called to prepare the SLI4 device for PCI slot permanently 10693 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 10694 * pending I/Os. 10695 **/ 10696 static void 10697 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 10698 { 10699 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10700 "2827 PCI channel permanent disable for failure\n"); 10701 10702 /* Block all SCSI devices' I/Os on the host */ 10703 lpfc_scsi_dev_block(phba); 10704 10705 /* stop all timers */ 10706 lpfc_stop_hba_timers(phba); 10707 10708 /* Clean up all driver's outstanding SCSI I/Os */ 10709 lpfc_sli_flush_fcp_rings(phba); 10710 } 10711 10712 /** 10713 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 10714 * @pdev: pointer to PCI device. 10715 * @state: the current PCI connection state. 10716 * 10717 * This routine is called from the PCI subsystem for error handling to device 10718 * with SLI-4 interface spec. This function is called by the PCI subsystem 10719 * after a PCI bus error affecting this device has been detected. When this 10720 * function is invoked, it will need to stop all the I/Os and interrupt(s) 10721 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 10722 * for the PCI subsystem to perform proper recovery as desired. 10723 * 10724 * Return codes 10725 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 10726 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 10727 **/ 10728 static pci_ers_result_t 10729 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 10730 { 10731 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10732 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10733 10734 switch (state) { 10735 case pci_channel_io_normal: 10736 /* Non-fatal error, prepare for recovery */ 10737 lpfc_sli4_prep_dev_for_recover(phba); 10738 return PCI_ERS_RESULT_CAN_RECOVER; 10739 case pci_channel_io_frozen: 10740 /* Fatal error, prepare for slot reset */ 10741 lpfc_sli4_prep_dev_for_reset(phba); 10742 return PCI_ERS_RESULT_NEED_RESET; 10743 case pci_channel_io_perm_failure: 10744 /* Permanent failure, prepare for device down */ 10745 lpfc_sli4_prep_dev_for_perm_failure(phba); 10746 return PCI_ERS_RESULT_DISCONNECT; 10747 default: 10748 /* Unknown state, prepare and request slot reset */ 10749 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10750 "2825 Unknown PCI error state: x%x\n", state); 10751 lpfc_sli4_prep_dev_for_reset(phba); 10752 return PCI_ERS_RESULT_NEED_RESET; 10753 } 10754 } 10755 10756 /** 10757 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 10758 * @pdev: pointer to PCI device. 10759 * 10760 * This routine is called from the PCI subsystem for error handling to device 10761 * with SLI-4 interface spec. It is called after PCI bus has been reset to 10762 * restart the PCI card from scratch, as if from a cold-boot. During the 10763 * PCI subsystem error recovery, after the driver returns 10764 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 10765 * recovery and then call this routine before calling the .resume method to 10766 * recover the device. This function will initialize the HBA device, enable 10767 * the interrupt, but it will just put the HBA to offline state without 10768 * passing any I/O traffic. 10769 * 10770 * Return codes 10771 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 10772 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 10773 */ 10774 static pci_ers_result_t 10775 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 10776 { 10777 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10778 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10779 struct lpfc_sli *psli = &phba->sli; 10780 uint32_t intr_mode; 10781 10782 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 10783 if (pci_enable_device_mem(pdev)) { 10784 printk(KERN_ERR "lpfc: Cannot re-enable " 10785 "PCI device after reset.\n"); 10786 return PCI_ERS_RESULT_DISCONNECT; 10787 } 10788 10789 pci_restore_state(pdev); 10790 10791 /* 10792 * As the new kernel behavior of pci_restore_state() API call clears 10793 * device saved_state flag, need to save the restored state again. 10794 */ 10795 pci_save_state(pdev); 10796 10797 if (pdev->is_busmaster) 10798 pci_set_master(pdev); 10799 10800 spin_lock_irq(&phba->hbalock); 10801 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 10802 spin_unlock_irq(&phba->hbalock); 10803 10804 /* Configure and enable interrupt */ 10805 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 10806 if (intr_mode == LPFC_INTR_ERROR) { 10807 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10808 "2824 Cannot re-enable interrupt after " 10809 "slot reset.\n"); 10810 return PCI_ERS_RESULT_DISCONNECT; 10811 } else 10812 phba->intr_mode = intr_mode; 10813 10814 /* Log the current active interrupt mode */ 10815 lpfc_log_intr_mode(phba, phba->intr_mode); 10816 10817 return PCI_ERS_RESULT_RECOVERED; 10818 } 10819 10820 /** 10821 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 10822 * @pdev: pointer to PCI device 10823 * 10824 * This routine is called from the PCI subsystem for error handling to device 10825 * with SLI-4 interface spec. It is called when kernel error recovery tells 10826 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 10827 * error recovery. After this call, traffic can start to flow from this device 10828 * again. 10829 **/ 10830 static void 10831 lpfc_io_resume_s4(struct pci_dev *pdev) 10832 { 10833 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10834 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10835 10836 /* 10837 * In case of slot reset, as function reset is performed through 10838 * mailbox command which needs DMA to be enabled, this operation 10839 * has to be moved to the io resume phase. Taking device offline 10840 * will perform the necessary cleanup. 10841 */ 10842 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 10843 /* Perform device reset */ 10844 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 10845 lpfc_offline(phba); 10846 lpfc_sli_brdrestart(phba); 10847 /* Bring the device back online */ 10848 lpfc_online(phba); 10849 } 10850 10851 /* Clean up Advanced Error Reporting (AER) if needed */ 10852 if (phba->hba_flag & HBA_AER_ENABLED) 10853 pci_cleanup_aer_uncorrect_error_status(pdev); 10854 } 10855 10856 /** 10857 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 10858 * @pdev: pointer to PCI device 10859 * @pid: pointer to PCI device identifier 10860 * 10861 * This routine is to be registered to the kernel's PCI subsystem. When an 10862 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 10863 * at PCI device-specific information of the device and driver to see if the 10864 * driver state that it can support this kind of device. If the match is 10865 * successful, the driver core invokes this routine. This routine dispatches 10866 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 10867 * do all the initialization that it needs to do to handle the HBA device 10868 * properly. 10869 * 10870 * Return code 10871 * 0 - driver can claim the device 10872 * negative value - driver can not claim the device 10873 **/ 10874 static int 10875 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 10876 { 10877 int rc; 10878 struct lpfc_sli_intf intf; 10879 10880 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 10881 return -ENODEV; 10882 10883 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 10884 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 10885 rc = lpfc_pci_probe_one_s4(pdev, pid); 10886 else 10887 rc = lpfc_pci_probe_one_s3(pdev, pid); 10888 10889 return rc; 10890 } 10891 10892 /** 10893 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 10894 * @pdev: pointer to PCI device 10895 * 10896 * This routine is to be registered to the kernel's PCI subsystem. When an 10897 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 10898 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 10899 * remove routine, which will perform all the necessary cleanup for the 10900 * device to be removed from the PCI subsystem properly. 10901 **/ 10902 static void 10903 lpfc_pci_remove_one(struct pci_dev *pdev) 10904 { 10905 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10906 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10907 10908 switch (phba->pci_dev_grp) { 10909 case LPFC_PCI_DEV_LP: 10910 lpfc_pci_remove_one_s3(pdev); 10911 break; 10912 case LPFC_PCI_DEV_OC: 10913 lpfc_pci_remove_one_s4(pdev); 10914 break; 10915 default: 10916 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10917 "1424 Invalid PCI device group: 0x%x\n", 10918 phba->pci_dev_grp); 10919 break; 10920 } 10921 return; 10922 } 10923 10924 /** 10925 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 10926 * @pdev: pointer to PCI device 10927 * @msg: power management message 10928 * 10929 * This routine is to be registered to the kernel's PCI subsystem to support 10930 * system Power Management (PM). When PM invokes this method, it dispatches 10931 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 10932 * suspend the device. 10933 * 10934 * Return code 10935 * 0 - driver suspended the device 10936 * Error otherwise 10937 **/ 10938 static int 10939 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 10940 { 10941 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10942 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10943 int rc = -ENODEV; 10944 10945 switch (phba->pci_dev_grp) { 10946 case LPFC_PCI_DEV_LP: 10947 rc = lpfc_pci_suspend_one_s3(pdev, msg); 10948 break; 10949 case LPFC_PCI_DEV_OC: 10950 rc = lpfc_pci_suspend_one_s4(pdev, msg); 10951 break; 10952 default: 10953 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10954 "1425 Invalid PCI device group: 0x%x\n", 10955 phba->pci_dev_grp); 10956 break; 10957 } 10958 return rc; 10959 } 10960 10961 /** 10962 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 10963 * @pdev: pointer to PCI device 10964 * 10965 * This routine is to be registered to the kernel's PCI subsystem to support 10966 * system Power Management (PM). When PM invokes this method, it dispatches 10967 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 10968 * resume the device. 10969 * 10970 * Return code 10971 * 0 - driver suspended the device 10972 * Error otherwise 10973 **/ 10974 static int 10975 lpfc_pci_resume_one(struct pci_dev *pdev) 10976 { 10977 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10978 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10979 int rc = -ENODEV; 10980 10981 switch (phba->pci_dev_grp) { 10982 case LPFC_PCI_DEV_LP: 10983 rc = lpfc_pci_resume_one_s3(pdev); 10984 break; 10985 case LPFC_PCI_DEV_OC: 10986 rc = lpfc_pci_resume_one_s4(pdev); 10987 break; 10988 default: 10989 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10990 "1426 Invalid PCI device group: 0x%x\n", 10991 phba->pci_dev_grp); 10992 break; 10993 } 10994 return rc; 10995 } 10996 10997 /** 10998 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 10999 * @pdev: pointer to PCI device. 11000 * @state: the current PCI connection state. 11001 * 11002 * This routine is registered to the PCI subsystem for error handling. This 11003 * function is called by the PCI subsystem after a PCI bus error affecting 11004 * this device has been detected. When this routine is invoked, it dispatches 11005 * the action to the proper SLI-3 or SLI-4 device error detected handling 11006 * routine, which will perform the proper error detected operation. 11007 * 11008 * Return codes 11009 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 11010 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 11011 **/ 11012 static pci_ers_result_t 11013 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 11014 { 11015 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11016 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11017 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 11018 11019 switch (phba->pci_dev_grp) { 11020 case LPFC_PCI_DEV_LP: 11021 rc = lpfc_io_error_detected_s3(pdev, state); 11022 break; 11023 case LPFC_PCI_DEV_OC: 11024 rc = lpfc_io_error_detected_s4(pdev, state); 11025 break; 11026 default: 11027 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11028 "1427 Invalid PCI device group: 0x%x\n", 11029 phba->pci_dev_grp); 11030 break; 11031 } 11032 return rc; 11033 } 11034 11035 /** 11036 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 11037 * @pdev: pointer to PCI device. 11038 * 11039 * This routine is registered to the PCI subsystem for error handling. This 11040 * function is called after PCI bus has been reset to restart the PCI card 11041 * from scratch, as if from a cold-boot. When this routine is invoked, it 11042 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 11043 * routine, which will perform the proper device reset. 11044 * 11045 * Return codes 11046 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 11047 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 11048 **/ 11049 static pci_ers_result_t 11050 lpfc_io_slot_reset(struct pci_dev *pdev) 11051 { 11052 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11053 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11054 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 11055 11056 switch (phba->pci_dev_grp) { 11057 case LPFC_PCI_DEV_LP: 11058 rc = lpfc_io_slot_reset_s3(pdev); 11059 break; 11060 case LPFC_PCI_DEV_OC: 11061 rc = lpfc_io_slot_reset_s4(pdev); 11062 break; 11063 default: 11064 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11065 "1428 Invalid PCI device group: 0x%x\n", 11066 phba->pci_dev_grp); 11067 break; 11068 } 11069 return rc; 11070 } 11071 11072 /** 11073 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 11074 * @pdev: pointer to PCI device 11075 * 11076 * This routine is registered to the PCI subsystem for error handling. It 11077 * is called when kernel error recovery tells the lpfc driver that it is 11078 * OK to resume normal PCI operation after PCI bus error recovery. When 11079 * this routine is invoked, it dispatches the action to the proper SLI-3 11080 * or SLI-4 device io_resume routine, which will resume the device operation. 11081 **/ 11082 static void 11083 lpfc_io_resume(struct pci_dev *pdev) 11084 { 11085 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11086 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11087 11088 switch (phba->pci_dev_grp) { 11089 case LPFC_PCI_DEV_LP: 11090 lpfc_io_resume_s3(pdev); 11091 break; 11092 case LPFC_PCI_DEV_OC: 11093 lpfc_io_resume_s4(pdev); 11094 break; 11095 default: 11096 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11097 "1429 Invalid PCI device group: 0x%x\n", 11098 phba->pci_dev_grp); 11099 break; 11100 } 11101 return; 11102 } 11103 11104 /** 11105 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter 11106 * @phba: pointer to lpfc hba data structure. 11107 * 11108 * This routine checks to see if OAS is supported for this adapter. If 11109 * supported, the configure Flash Optimized Fabric flag is set. Otherwise, 11110 * the enable oas flag is cleared and the pool created for OAS device data 11111 * is destroyed. 11112 * 11113 **/ 11114 void 11115 lpfc_sli4_oas_verify(struct lpfc_hba *phba) 11116 { 11117 11118 if (!phba->cfg_EnableXLane) 11119 return; 11120 11121 if (phba->sli4_hba.pc_sli4_params.oas_supported) { 11122 phba->cfg_fof = 1; 11123 } else { 11124 phba->cfg_fof = 0; 11125 if (phba->device_data_mem_pool) 11126 mempool_destroy(phba->device_data_mem_pool); 11127 phba->device_data_mem_pool = NULL; 11128 } 11129 11130 return; 11131 } 11132 11133 /** 11134 * lpfc_fof_queue_setup - Set up all the fof queues 11135 * @phba: pointer to lpfc hba data structure. 11136 * 11137 * This routine is invoked to set up all the fof queues for the FC HBA 11138 * operation. 11139 * 11140 * Return codes 11141 * 0 - successful 11142 * -ENOMEM - No available memory 11143 **/ 11144 int 11145 lpfc_fof_queue_setup(struct lpfc_hba *phba) 11146 { 11147 struct lpfc_sli *psli = &phba->sli; 11148 int rc; 11149 11150 rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX); 11151 if (rc) 11152 return -ENOMEM; 11153 11154 if (phba->cfg_fof) { 11155 11156 rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq, 11157 phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP); 11158 if (rc) 11159 goto out_oas_cq; 11160 11161 rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq, 11162 phba->sli4_hba.oas_cq, LPFC_FCP); 11163 if (rc) 11164 goto out_oas_wq; 11165 11166 phba->sli4_hba.oas_cq->pring = &psli->ring[LPFC_FCP_OAS_RING]; 11167 phba->sli4_hba.oas_ring = &psli->ring[LPFC_FCP_OAS_RING]; 11168 } 11169 11170 return 0; 11171 11172 out_oas_wq: 11173 lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq); 11174 out_oas_cq: 11175 lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq); 11176 return rc; 11177 11178 } 11179 11180 /** 11181 * lpfc_fof_queue_create - Create all the fof queues 11182 * @phba: pointer to lpfc hba data structure. 11183 * 11184 * This routine is invoked to allocate all the fof queues for the FC HBA 11185 * operation. For each SLI4 queue type, the parameters such as queue entry 11186 * count (queue depth) shall be taken from the module parameter. For now, 11187 * we just use some constant number as place holder. 11188 * 11189 * Return codes 11190 * 0 - successful 11191 * -ENOMEM - No availble memory 11192 * -EIO - The mailbox failed to complete successfully. 11193 **/ 11194 int 11195 lpfc_fof_queue_create(struct lpfc_hba *phba) 11196 { 11197 struct lpfc_queue *qdesc; 11198 11199 /* Create FOF EQ */ 11200 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 11201 phba->sli4_hba.eq_ecount); 11202 if (!qdesc) 11203 goto out_error; 11204 11205 phba->sli4_hba.fof_eq = qdesc; 11206 11207 if (phba->cfg_fof) { 11208 11209 /* Create OAS CQ */ 11210 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 11211 phba->sli4_hba.cq_ecount); 11212 if (!qdesc) 11213 goto out_error; 11214 11215 phba->sli4_hba.oas_cq = qdesc; 11216 11217 /* Create OAS WQ */ 11218 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 11219 phba->sli4_hba.wq_ecount); 11220 if (!qdesc) 11221 goto out_error; 11222 11223 phba->sli4_hba.oas_wq = qdesc; 11224 11225 } 11226 return 0; 11227 11228 out_error: 11229 lpfc_fof_queue_destroy(phba); 11230 return -ENOMEM; 11231 } 11232 11233 /** 11234 * lpfc_fof_queue_destroy - Destroy all the fof queues 11235 * @phba: pointer to lpfc hba data structure. 11236 * 11237 * This routine is invoked to release all the SLI4 queues with the FC HBA 11238 * operation. 11239 * 11240 * Return codes 11241 * 0 - successful 11242 **/ 11243 int 11244 lpfc_fof_queue_destroy(struct lpfc_hba *phba) 11245 { 11246 /* Release FOF Event queue */ 11247 if (phba->sli4_hba.fof_eq != NULL) { 11248 lpfc_sli4_queue_free(phba->sli4_hba.fof_eq); 11249 phba->sli4_hba.fof_eq = NULL; 11250 } 11251 11252 /* Release OAS Completion queue */ 11253 if (phba->sli4_hba.oas_cq != NULL) { 11254 lpfc_sli4_queue_free(phba->sli4_hba.oas_cq); 11255 phba->sli4_hba.oas_cq = NULL; 11256 } 11257 11258 /* Release OAS Work queue */ 11259 if (phba->sli4_hba.oas_wq != NULL) { 11260 lpfc_sli4_queue_free(phba->sli4_hba.oas_wq); 11261 phba->sli4_hba.oas_wq = NULL; 11262 } 11263 return 0; 11264 } 11265 11266 static struct pci_device_id lpfc_id_table[] = { 11267 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 11268 PCI_ANY_ID, PCI_ANY_ID, }, 11269 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, 11270 PCI_ANY_ID, PCI_ANY_ID, }, 11271 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, 11272 PCI_ANY_ID, PCI_ANY_ID, }, 11273 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, 11274 PCI_ANY_ID, PCI_ANY_ID, }, 11275 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, 11276 PCI_ANY_ID, PCI_ANY_ID, }, 11277 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, 11278 PCI_ANY_ID, PCI_ANY_ID, }, 11279 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, 11280 PCI_ANY_ID, PCI_ANY_ID, }, 11281 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, 11282 PCI_ANY_ID, PCI_ANY_ID, }, 11283 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, 11284 PCI_ANY_ID, PCI_ANY_ID, }, 11285 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, 11286 PCI_ANY_ID, PCI_ANY_ID, }, 11287 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, 11288 PCI_ANY_ID, PCI_ANY_ID, }, 11289 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, 11290 PCI_ANY_ID, PCI_ANY_ID, }, 11291 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, 11292 PCI_ANY_ID, PCI_ANY_ID, }, 11293 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, 11294 PCI_ANY_ID, PCI_ANY_ID, }, 11295 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, 11296 PCI_ANY_ID, PCI_ANY_ID, }, 11297 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, 11298 PCI_ANY_ID, PCI_ANY_ID, }, 11299 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, 11300 PCI_ANY_ID, PCI_ANY_ID, }, 11301 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 11302 PCI_ANY_ID, PCI_ANY_ID, }, 11303 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, 11304 PCI_ANY_ID, PCI_ANY_ID, }, 11305 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, 11306 PCI_ANY_ID, PCI_ANY_ID, }, 11307 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, 11308 PCI_ANY_ID, PCI_ANY_ID, }, 11309 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, 11310 PCI_ANY_ID, PCI_ANY_ID, }, 11311 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, 11312 PCI_ANY_ID, PCI_ANY_ID, }, 11313 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, 11314 PCI_ANY_ID, PCI_ANY_ID, }, 11315 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, 11316 PCI_ANY_ID, PCI_ANY_ID, }, 11317 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, 11318 PCI_ANY_ID, PCI_ANY_ID, }, 11319 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, 11320 PCI_ANY_ID, PCI_ANY_ID, }, 11321 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, 11322 PCI_ANY_ID, PCI_ANY_ID, }, 11323 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, 11324 PCI_ANY_ID, PCI_ANY_ID, }, 11325 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, 11326 PCI_ANY_ID, PCI_ANY_ID, }, 11327 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, 11328 PCI_ANY_ID, PCI_ANY_ID, }, 11329 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, 11330 PCI_ANY_ID, PCI_ANY_ID, }, 11331 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, 11332 PCI_ANY_ID, PCI_ANY_ID, }, 11333 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, 11334 PCI_ANY_ID, PCI_ANY_ID, }, 11335 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, 11336 PCI_ANY_ID, PCI_ANY_ID, }, 11337 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, 11338 PCI_ANY_ID, PCI_ANY_ID, }, 11339 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, 11340 PCI_ANY_ID, PCI_ANY_ID, }, 11341 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, 11342 PCI_ANY_ID, PCI_ANY_ID, }, 11343 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT, 11344 PCI_ANY_ID, PCI_ANY_ID, }, 11345 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON, 11346 PCI_ANY_ID, PCI_ANY_ID, }, 11347 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS, 11348 PCI_ANY_ID, PCI_ANY_ID, }, 11349 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC, 11350 PCI_ANY_ID, PCI_ANY_ID, }, 11351 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE, 11352 PCI_ANY_ID, PCI_ANY_ID, }, 11353 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF, 11354 PCI_ANY_ID, PCI_ANY_ID, }, 11355 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF, 11356 PCI_ANY_ID, PCI_ANY_ID, }, 11357 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK, 11358 PCI_ANY_ID, PCI_ANY_ID, }, 11359 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF, 11360 PCI_ANY_ID, PCI_ANY_ID, }, 11361 { 0 } 11362 }; 11363 11364 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 11365 11366 static const struct pci_error_handlers lpfc_err_handler = { 11367 .error_detected = lpfc_io_error_detected, 11368 .slot_reset = lpfc_io_slot_reset, 11369 .resume = lpfc_io_resume, 11370 }; 11371 11372 static struct pci_driver lpfc_driver = { 11373 .name = LPFC_DRIVER_NAME, 11374 .id_table = lpfc_id_table, 11375 .probe = lpfc_pci_probe_one, 11376 .remove = lpfc_pci_remove_one, 11377 .suspend = lpfc_pci_suspend_one, 11378 .resume = lpfc_pci_resume_one, 11379 .err_handler = &lpfc_err_handler, 11380 }; 11381 11382 static const struct file_operations lpfc_mgmt_fop = { 11383 .owner = THIS_MODULE, 11384 }; 11385 11386 static struct miscdevice lpfc_mgmt_dev = { 11387 .minor = MISC_DYNAMIC_MINOR, 11388 .name = "lpfcmgmt", 11389 .fops = &lpfc_mgmt_fop, 11390 }; 11391 11392 /** 11393 * lpfc_init - lpfc module initialization routine 11394 * 11395 * This routine is to be invoked when the lpfc module is loaded into the 11396 * kernel. The special kernel macro module_init() is used to indicate the 11397 * role of this routine to the kernel as lpfc module entry point. 11398 * 11399 * Return codes 11400 * 0 - successful 11401 * -ENOMEM - FC attach transport failed 11402 * all others - failed 11403 */ 11404 static int __init 11405 lpfc_init(void) 11406 { 11407 int cpu; 11408 int error = 0; 11409 11410 printk(LPFC_MODULE_DESC "\n"); 11411 printk(LPFC_COPYRIGHT "\n"); 11412 11413 error = misc_register(&lpfc_mgmt_dev); 11414 if (error) 11415 printk(KERN_ERR "Could not register lpfcmgmt device, " 11416 "misc_register returned with status %d", error); 11417 11418 if (lpfc_enable_npiv) { 11419 lpfc_transport_functions.vport_create = lpfc_vport_create; 11420 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 11421 } 11422 lpfc_transport_template = 11423 fc_attach_transport(&lpfc_transport_functions); 11424 if (lpfc_transport_template == NULL) 11425 return -ENOMEM; 11426 if (lpfc_enable_npiv) { 11427 lpfc_vport_transport_template = 11428 fc_attach_transport(&lpfc_vport_transport_functions); 11429 if (lpfc_vport_transport_template == NULL) { 11430 fc_release_transport(lpfc_transport_template); 11431 return -ENOMEM; 11432 } 11433 } 11434 11435 /* Initialize in case vector mapping is needed */ 11436 lpfc_used_cpu = NULL; 11437 lpfc_present_cpu = 0; 11438 for_each_present_cpu(cpu) 11439 lpfc_present_cpu++; 11440 11441 error = pci_register_driver(&lpfc_driver); 11442 if (error) { 11443 fc_release_transport(lpfc_transport_template); 11444 if (lpfc_enable_npiv) 11445 fc_release_transport(lpfc_vport_transport_template); 11446 } 11447 11448 return error; 11449 } 11450 11451 /** 11452 * lpfc_exit - lpfc module removal routine 11453 * 11454 * This routine is invoked when the lpfc module is removed from the kernel. 11455 * The special kernel macro module_exit() is used to indicate the role of 11456 * this routine to the kernel as lpfc module exit point. 11457 */ 11458 static void __exit 11459 lpfc_exit(void) 11460 { 11461 misc_deregister(&lpfc_mgmt_dev); 11462 pci_unregister_driver(&lpfc_driver); 11463 fc_release_transport(lpfc_transport_template); 11464 if (lpfc_enable_npiv) 11465 fc_release_transport(lpfc_vport_transport_template); 11466 if (_dump_buf_data) { 11467 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " 11468 "_dump_buf_data at 0x%p\n", 11469 (1L << _dump_buf_data_order), _dump_buf_data); 11470 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 11471 } 11472 11473 if (_dump_buf_dif) { 11474 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " 11475 "_dump_buf_dif at 0x%p\n", 11476 (1L << _dump_buf_dif_order), _dump_buf_dif); 11477 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 11478 } 11479 kfree(lpfc_used_cpu); 11480 } 11481 11482 module_init(lpfc_init); 11483 module_exit(lpfc_exit); 11484 MODULE_LICENSE("GPL"); 11485 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 11486 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 11487 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 11488