1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2006 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/pci.h> 24 #include <linux/interrupt.h> 25 #include <linux/delay.h> 26 27 #include <scsi/scsi.h> 28 #include <scsi/scsi_cmnd.h> 29 #include <scsi/scsi_device.h> 30 #include <scsi/scsi_host.h> 31 #include <scsi/scsi_transport_fc.h> 32 33 #include "lpfc_hw.h" 34 #include "lpfc_sli.h" 35 #include "lpfc_disc.h" 36 #include "lpfc_scsi.h" 37 #include "lpfc.h" 38 #include "lpfc_crtn.h" 39 #include "lpfc_logmsg.h" 40 #include "lpfc_compat.h" 41 42 /* 43 * Define macro to log: Mailbox command x%x cannot issue Data 44 * This allows multiple uses of lpfc_msgBlk0311 45 * w/o perturbing log msg utility. 46 */ 47 #define LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) \ 48 lpfc_printf_log(phba, \ 49 KERN_INFO, \ 50 LOG_MBOX | LOG_SLI, \ 51 "%d:0311 Mailbox command x%x cannot issue " \ 52 "Data: x%x x%x x%x\n", \ 53 phba->brd_no, \ 54 mb->mbxCommand, \ 55 phba->hba_state, \ 56 psli->sli_flag, \ 57 flag); 58 59 60 /* There are only four IOCB completion types. */ 61 typedef enum _lpfc_iocb_type { 62 LPFC_UNKNOWN_IOCB, 63 LPFC_UNSOL_IOCB, 64 LPFC_SOL_IOCB, 65 LPFC_ABORT_IOCB 66 } lpfc_iocb_type; 67 68 struct lpfc_iocbq * 69 lpfc_sli_get_iocbq(struct lpfc_hba * phba) 70 { 71 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 72 struct lpfc_iocbq * iocbq = NULL; 73 74 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 75 return iocbq; 76 } 77 78 void 79 lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq) 80 { 81 size_t start_clean = (size_t)(&((struct lpfc_iocbq *)NULL)->iocb); 82 83 /* 84 * Clean all volatile data fields, preserve iotag and node struct. 85 */ 86 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 87 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 88 } 89 90 /* 91 * Translate the iocb command to an iocb command type used to decide the final 92 * disposition of each completed IOCB. 93 */ 94 static lpfc_iocb_type 95 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 96 { 97 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; 98 99 if (iocb_cmnd > CMD_MAX_IOCB_CMD) 100 return 0; 101 102 switch (iocb_cmnd) { 103 case CMD_XMIT_SEQUENCE_CR: 104 case CMD_XMIT_SEQUENCE_CX: 105 case CMD_XMIT_BCAST_CN: 106 case CMD_XMIT_BCAST_CX: 107 case CMD_ELS_REQUEST_CR: 108 case CMD_ELS_REQUEST_CX: 109 case CMD_CREATE_XRI_CR: 110 case CMD_CREATE_XRI_CX: 111 case CMD_GET_RPI_CN: 112 case CMD_XMIT_ELS_RSP_CX: 113 case CMD_GET_RPI_CR: 114 case CMD_FCP_IWRITE_CR: 115 case CMD_FCP_IWRITE_CX: 116 case CMD_FCP_IREAD_CR: 117 case CMD_FCP_IREAD_CX: 118 case CMD_FCP_ICMND_CR: 119 case CMD_FCP_ICMND_CX: 120 case CMD_ADAPTER_MSG: 121 case CMD_ADAPTER_DUMP: 122 case CMD_XMIT_SEQUENCE64_CR: 123 case CMD_XMIT_SEQUENCE64_CX: 124 case CMD_XMIT_BCAST64_CN: 125 case CMD_XMIT_BCAST64_CX: 126 case CMD_ELS_REQUEST64_CR: 127 case CMD_ELS_REQUEST64_CX: 128 case CMD_FCP_IWRITE64_CR: 129 case CMD_FCP_IWRITE64_CX: 130 case CMD_FCP_IREAD64_CR: 131 case CMD_FCP_IREAD64_CX: 132 case CMD_FCP_ICMND64_CR: 133 case CMD_FCP_ICMND64_CX: 134 case CMD_GEN_REQUEST64_CR: 135 case CMD_GEN_REQUEST64_CX: 136 case CMD_XMIT_ELS_RSP64_CX: 137 type = LPFC_SOL_IOCB; 138 break; 139 case CMD_ABORT_XRI_CN: 140 case CMD_ABORT_XRI_CX: 141 case CMD_CLOSE_XRI_CN: 142 case CMD_CLOSE_XRI_CX: 143 case CMD_XRI_ABORTED_CX: 144 case CMD_ABORT_MXRI64_CN: 145 type = LPFC_ABORT_IOCB; 146 break; 147 case CMD_RCV_SEQUENCE_CX: 148 case CMD_RCV_ELS_REQ_CX: 149 case CMD_RCV_SEQUENCE64_CX: 150 case CMD_RCV_ELS_REQ64_CX: 151 type = LPFC_UNSOL_IOCB; 152 break; 153 default: 154 type = LPFC_UNKNOWN_IOCB; 155 break; 156 } 157 158 return type; 159 } 160 161 static int 162 lpfc_sli_ring_map(struct lpfc_hba * phba, LPFC_MBOXQ_t *pmb) 163 { 164 struct lpfc_sli *psli = &phba->sli; 165 MAILBOX_t *pmbox = &pmb->mb; 166 int i, rc; 167 168 for (i = 0; i < psli->num_rings; i++) { 169 phba->hba_state = LPFC_INIT_MBX_CMDS; 170 lpfc_config_ring(phba, i, pmb); 171 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 172 if (rc != MBX_SUCCESS) { 173 lpfc_printf_log(phba, 174 KERN_ERR, 175 LOG_INIT, 176 "%d:0446 Adapter failed to init, " 177 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 178 "ring %d\n", 179 phba->brd_no, 180 pmbox->mbxCommand, 181 pmbox->mbxStatus, 182 i); 183 phba->hba_state = LPFC_HBA_ERROR; 184 return -ENXIO; 185 } 186 } 187 return 0; 188 } 189 190 static int 191 lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba, 192 struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb) 193 { 194 uint16_t iotag; 195 196 list_add_tail(&piocb->list, &pring->txcmplq); 197 pring->txcmplq_cnt++; 198 if (unlikely(pring->ringno == LPFC_ELS_RING)) 199 mod_timer(&phba->els_tmofunc, 200 jiffies + HZ * (phba->fc_ratov << 1)); 201 202 if (pring->fast_lookup) { 203 /* Setup fast lookup based on iotag for completion */ 204 iotag = piocb->iocb.ulpIoTag; 205 if (iotag && (iotag < pring->fast_iotag)) 206 *(pring->fast_lookup + iotag) = piocb; 207 else { 208 209 /* Cmd ring <ringno> put: iotag <iotag> greater then 210 configured max <fast_iotag> wd0 <icmd> */ 211 lpfc_printf_log(phba, 212 KERN_ERR, 213 LOG_SLI, 214 "%d:0316 Cmd ring %d put: iotag x%x " 215 "greater then configured max x%x " 216 "wd0 x%x\n", 217 phba->brd_no, 218 pring->ringno, iotag, 219 pring->fast_iotag, 220 *(((uint32_t *)(&piocb->iocb)) + 7)); 221 } 222 } 223 return (0); 224 } 225 226 static struct lpfc_iocbq * 227 lpfc_sli_ringtx_get(struct lpfc_hba * phba, struct lpfc_sli_ring * pring) 228 { 229 struct list_head *dlp; 230 struct lpfc_iocbq *cmd_iocb; 231 232 dlp = &pring->txq; 233 cmd_iocb = NULL; 234 list_remove_head((&pring->txq), cmd_iocb, 235 struct lpfc_iocbq, 236 list); 237 if (cmd_iocb) { 238 /* If the first ptr is not equal to the list header, 239 * deque the IOCBQ_t and return it. 240 */ 241 pring->txq_cnt--; 242 } 243 return (cmd_iocb); 244 } 245 246 static IOCB_t * 247 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 248 { 249 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; 250 uint32_t max_cmd_idx = pring->numCiocb; 251 IOCB_t *iocb = NULL; 252 253 if ((pring->next_cmdidx == pring->cmdidx) && 254 (++pring->next_cmdidx >= max_cmd_idx)) 255 pring->next_cmdidx = 0; 256 257 if (unlikely(pring->local_getidx == pring->next_cmdidx)) { 258 259 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 260 261 if (unlikely(pring->local_getidx >= max_cmd_idx)) { 262 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 263 "%d:0315 Ring %d issue: portCmdGet %d " 264 "is bigger then cmd ring %d\n", 265 phba->brd_no, pring->ringno, 266 pring->local_getidx, max_cmd_idx); 267 268 phba->hba_state = LPFC_HBA_ERROR; 269 /* 270 * All error attention handlers are posted to 271 * worker thread 272 */ 273 phba->work_ha |= HA_ERATT; 274 phba->work_hs = HS_FFER3; 275 if (phba->work_wait) 276 wake_up(phba->work_wait); 277 278 return NULL; 279 } 280 281 if (pring->local_getidx == pring->next_cmdidx) 282 return NULL; 283 } 284 285 iocb = IOCB_ENTRY(pring->cmdringaddr, pring->cmdidx); 286 287 return iocb; 288 } 289 290 uint16_t 291 lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq) 292 { 293 struct lpfc_iocbq ** new_arr; 294 struct lpfc_iocbq ** old_arr; 295 size_t new_len; 296 struct lpfc_sli *psli = &phba->sli; 297 uint16_t iotag; 298 299 spin_lock_irq(phba->host->host_lock); 300 iotag = psli->last_iotag; 301 if(++iotag < psli->iocbq_lookup_len) { 302 psli->last_iotag = iotag; 303 psli->iocbq_lookup[iotag] = iocbq; 304 spin_unlock_irq(phba->host->host_lock); 305 iocbq->iotag = iotag; 306 return iotag; 307 } 308 else if (psli->iocbq_lookup_len < (0xffff 309 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 310 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 311 spin_unlock_irq(phba->host->host_lock); 312 new_arr = kmalloc(new_len * sizeof (struct lpfc_iocbq *), 313 GFP_KERNEL); 314 if (new_arr) { 315 memset((char *)new_arr, 0, 316 new_len * sizeof (struct lpfc_iocbq *)); 317 spin_lock_irq(phba->host->host_lock); 318 old_arr = psli->iocbq_lookup; 319 if (new_len <= psli->iocbq_lookup_len) { 320 /* highly unprobable case */ 321 kfree(new_arr); 322 iotag = psli->last_iotag; 323 if(++iotag < psli->iocbq_lookup_len) { 324 psli->last_iotag = iotag; 325 psli->iocbq_lookup[iotag] = iocbq; 326 spin_unlock_irq(phba->host->host_lock); 327 iocbq->iotag = iotag; 328 return iotag; 329 } 330 spin_unlock_irq(phba->host->host_lock); 331 return 0; 332 } 333 if (psli->iocbq_lookup) 334 memcpy(new_arr, old_arr, 335 ((psli->last_iotag + 1) * 336 sizeof (struct lpfc_iocbq *))); 337 psli->iocbq_lookup = new_arr; 338 psli->iocbq_lookup_len = new_len; 339 psli->last_iotag = iotag; 340 psli->iocbq_lookup[iotag] = iocbq; 341 spin_unlock_irq(phba->host->host_lock); 342 iocbq->iotag = iotag; 343 kfree(old_arr); 344 return iotag; 345 } 346 } 347 348 lpfc_printf_log(phba, KERN_ERR,LOG_SLI, 349 "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n", 350 phba->brd_no, psli->last_iotag); 351 352 return 0; 353 } 354 355 static void 356 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 357 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 358 { 359 /* 360 * Set up an iotag 361 */ 362 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; 363 364 /* 365 * Issue iocb command to adapter 366 */ 367 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, sizeof (IOCB_t)); 368 wmb(); 369 pring->stats.iocb_cmd++; 370 371 /* 372 * If there is no completion routine to call, we can release the 373 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, 374 * that have no rsp ring completion, iocb_cmpl MUST be NULL. 375 */ 376 if (nextiocb->iocb_cmpl) 377 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 378 else 379 lpfc_sli_release_iocbq(phba, nextiocb); 380 381 /* 382 * Let the HBA know what IOCB slot will be the next one the 383 * driver will put a command into. 384 */ 385 pring->cmdidx = pring->next_cmdidx; 386 writel(pring->cmdidx, phba->MBslimaddr 387 + (SLIMOFF + (pring->ringno * 2)) * 4); 388 } 389 390 static void 391 lpfc_sli_update_full_ring(struct lpfc_hba * phba, 392 struct lpfc_sli_ring *pring) 393 { 394 int ringno = pring->ringno; 395 396 pring->flag |= LPFC_CALL_RING_AVAILABLE; 397 398 wmb(); 399 400 /* 401 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. 402 * The HBA will tell us when an IOCB entry is available. 403 */ 404 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); 405 readl(phba->CAregaddr); /* flush */ 406 407 pring->stats.iocb_cmd_full++; 408 } 409 410 static void 411 lpfc_sli_update_ring(struct lpfc_hba * phba, 412 struct lpfc_sli_ring *pring) 413 { 414 int ringno = pring->ringno; 415 416 /* 417 * Tell the HBA that there is work to do in this ring. 418 */ 419 wmb(); 420 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 421 readl(phba->CAregaddr); /* flush */ 422 } 423 424 static void 425 lpfc_sli_resume_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring) 426 { 427 IOCB_t *iocb; 428 struct lpfc_iocbq *nextiocb; 429 430 /* 431 * Check to see if: 432 * (a) there is anything on the txq to send 433 * (b) link is up 434 * (c) link attention events can be processed (fcp ring only) 435 * (d) IOCB processing is not blocked by the outstanding mbox command. 436 */ 437 if (pring->txq_cnt && 438 (phba->hba_state > LPFC_LINK_DOWN) && 439 (pring->ringno != phba->sli.fcp_ring || 440 phba->sli.sli_flag & LPFC_PROCESS_LA) && 441 !(pring->flag & LPFC_STOP_IOCB_MBX)) { 442 443 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 444 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 445 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 446 447 if (iocb) 448 lpfc_sli_update_ring(phba, pring); 449 else 450 lpfc_sli_update_full_ring(phba, pring); 451 } 452 453 return; 454 } 455 456 /* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */ 457 static void 458 lpfc_sli_turn_on_ring(struct lpfc_hba * phba, int ringno) 459 { 460 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[ringno]; 461 462 /* If the ring is active, flag it */ 463 if (phba->sli.ring[ringno].cmdringaddr) { 464 if (phba->sli.ring[ringno].flag & LPFC_STOP_IOCB_MBX) { 465 phba->sli.ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX; 466 /* 467 * Force update of the local copy of cmdGetInx 468 */ 469 phba->sli.ring[ringno].local_getidx 470 = le32_to_cpu(pgp->cmdGetInx); 471 spin_lock_irq(phba->host->host_lock); 472 lpfc_sli_resume_iocb(phba, &phba->sli.ring[ringno]); 473 spin_unlock_irq(phba->host->host_lock); 474 } 475 } 476 } 477 478 static int 479 lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 480 { 481 uint8_t ret; 482 483 switch (mbxCommand) { 484 case MBX_LOAD_SM: 485 case MBX_READ_NV: 486 case MBX_WRITE_NV: 487 case MBX_RUN_BIU_DIAG: 488 case MBX_INIT_LINK: 489 case MBX_DOWN_LINK: 490 case MBX_CONFIG_LINK: 491 case MBX_CONFIG_RING: 492 case MBX_RESET_RING: 493 case MBX_READ_CONFIG: 494 case MBX_READ_RCONFIG: 495 case MBX_READ_SPARM: 496 case MBX_READ_STATUS: 497 case MBX_READ_RPI: 498 case MBX_READ_XRI: 499 case MBX_READ_REV: 500 case MBX_READ_LNK_STAT: 501 case MBX_REG_LOGIN: 502 case MBX_UNREG_LOGIN: 503 case MBX_READ_LA: 504 case MBX_CLEAR_LA: 505 case MBX_DUMP_MEMORY: 506 case MBX_DUMP_CONTEXT: 507 case MBX_RUN_DIAGS: 508 case MBX_RESTART: 509 case MBX_UPDATE_CFG: 510 case MBX_DOWN_LOAD: 511 case MBX_DEL_LD_ENTRY: 512 case MBX_RUN_PROGRAM: 513 case MBX_SET_MASK: 514 case MBX_SET_SLIM: 515 case MBX_UNREG_D_ID: 516 case MBX_KILL_BOARD: 517 case MBX_CONFIG_FARP: 518 case MBX_BEACON: 519 case MBX_LOAD_AREA: 520 case MBX_RUN_BIU_DIAG64: 521 case MBX_CONFIG_PORT: 522 case MBX_READ_SPARM64: 523 case MBX_READ_RPI64: 524 case MBX_REG_LOGIN64: 525 case MBX_READ_LA64: 526 case MBX_FLASH_WR_ULA: 527 case MBX_SET_DEBUG: 528 case MBX_LOAD_EXP_ROM: 529 ret = mbxCommand; 530 break; 531 default: 532 ret = MBX_SHUTDOWN; 533 break; 534 } 535 return (ret); 536 } 537 static void 538 lpfc_sli_wake_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 539 { 540 wait_queue_head_t *pdone_q; 541 542 /* 543 * If pdone_q is empty, the driver thread gave up waiting and 544 * continued running. 545 */ 546 pdone_q = (wait_queue_head_t *) pmboxq->context1; 547 if (pdone_q) 548 wake_up_interruptible(pdone_q); 549 return; 550 } 551 552 void 553 lpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 554 { 555 struct lpfc_dmabuf *mp; 556 mp = (struct lpfc_dmabuf *) (pmb->context1); 557 if (mp) { 558 lpfc_mbuf_free(phba, mp->virt, mp->phys); 559 kfree(mp); 560 } 561 mempool_free( pmb, phba->mbox_mem_pool); 562 return; 563 } 564 565 int 566 lpfc_sli_handle_mb_event(struct lpfc_hba * phba) 567 { 568 MAILBOX_t *mbox; 569 MAILBOX_t *pmbox; 570 LPFC_MBOXQ_t *pmb; 571 struct lpfc_sli *psli; 572 int i, rc; 573 uint32_t process_next; 574 575 psli = &phba->sli; 576 /* We should only get here if we are in SLI2 mode */ 577 if (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE)) { 578 return (1); 579 } 580 581 phba->sli.slistat.mbox_event++; 582 583 /* Get a Mailbox buffer to setup mailbox commands for callback */ 584 if ((pmb = phba->sli.mbox_active)) { 585 pmbox = &pmb->mb; 586 mbox = &phba->slim2p->mbx; 587 588 /* First check out the status word */ 589 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof (uint32_t)); 590 591 /* Sanity check to ensure the host owns the mailbox */ 592 if (pmbox->mbxOwner != OWN_HOST) { 593 /* Lets try for a while */ 594 for (i = 0; i < 10240; i++) { 595 /* First copy command data */ 596 lpfc_sli_pcimem_bcopy(mbox, pmbox, 597 sizeof (uint32_t)); 598 if (pmbox->mbxOwner == OWN_HOST) 599 goto mbout; 600 } 601 /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus 602 <status> */ 603 lpfc_printf_log(phba, 604 KERN_ERR, 605 LOG_MBOX | LOG_SLI, 606 "%d:0304 Stray Mailbox Interrupt " 607 "mbxCommand x%x mbxStatus x%x\n", 608 phba->brd_no, 609 pmbox->mbxCommand, 610 pmbox->mbxStatus); 611 612 spin_lock_irq(phba->host->host_lock); 613 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 614 spin_unlock_irq(phba->host->host_lock); 615 return (1); 616 } 617 618 mbout: 619 del_timer_sync(&phba->sli.mbox_tmo); 620 phba->work_hba_events &= ~WORKER_MBOX_TMO; 621 622 /* 623 * It is a fatal error if unknown mbox command completion. 624 */ 625 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 626 MBX_SHUTDOWN) { 627 628 /* Unknow mailbox command compl */ 629 lpfc_printf_log(phba, 630 KERN_ERR, 631 LOG_MBOX | LOG_SLI, 632 "%d:0323 Unknown Mailbox command %x Cmpl\n", 633 phba->brd_no, 634 pmbox->mbxCommand); 635 phba->hba_state = LPFC_HBA_ERROR; 636 phba->work_hs = HS_FFER3; 637 lpfc_handle_eratt(phba); 638 return (0); 639 } 640 641 phba->sli.mbox_active = NULL; 642 if (pmbox->mbxStatus) { 643 phba->sli.slistat.mbox_stat_err++; 644 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 645 /* Mbox cmd cmpl error - RETRYing */ 646 lpfc_printf_log(phba, 647 KERN_INFO, 648 LOG_MBOX | LOG_SLI, 649 "%d:0305 Mbox cmd cmpl error - " 650 "RETRYing Data: x%x x%x x%x x%x\n", 651 phba->brd_no, 652 pmbox->mbxCommand, 653 pmbox->mbxStatus, 654 pmbox->un.varWords[0], 655 phba->hba_state); 656 pmbox->mbxStatus = 0; 657 pmbox->mbxOwner = OWN_HOST; 658 spin_lock_irq(phba->host->host_lock); 659 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 660 spin_unlock_irq(phba->host->host_lock); 661 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 662 if (rc == MBX_SUCCESS) 663 return (0); 664 } 665 } 666 667 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 668 lpfc_printf_log(phba, 669 KERN_INFO, 670 LOG_MBOX | LOG_SLI, 671 "%d:0307 Mailbox cmd x%x Cmpl x%p " 672 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 673 phba->brd_no, 674 pmbox->mbxCommand, 675 pmb->mbox_cmpl, 676 *((uint32_t *) pmbox), 677 pmbox->un.varWords[0], 678 pmbox->un.varWords[1], 679 pmbox->un.varWords[2], 680 pmbox->un.varWords[3], 681 pmbox->un.varWords[4], 682 pmbox->un.varWords[5], 683 pmbox->un.varWords[6], 684 pmbox->un.varWords[7]); 685 686 if (pmb->mbox_cmpl) { 687 lpfc_sli_pcimem_bcopy(mbox, pmbox, MAILBOX_CMD_SIZE); 688 pmb->mbox_cmpl(phba,pmb); 689 } 690 } 691 692 693 do { 694 process_next = 0; /* by default don't loop */ 695 spin_lock_irq(phba->host->host_lock); 696 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 697 698 /* Process next mailbox command if there is one */ 699 if ((pmb = lpfc_mbox_get(phba))) { 700 spin_unlock_irq(phba->host->host_lock); 701 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 702 if (rc == MBX_NOT_FINISHED) { 703 pmb->mb.mbxStatus = MBX_NOT_FINISHED; 704 pmb->mbox_cmpl(phba,pmb); 705 process_next = 1; 706 continue; /* loop back */ 707 } 708 } else { 709 spin_unlock_irq(phba->host->host_lock); 710 /* Turn on IOCB processing */ 711 for (i = 0; i < phba->sli.num_rings; i++) { 712 lpfc_sli_turn_on_ring(phba, i); 713 } 714 715 /* Free any lpfc_dmabuf's waiting for mbox cmd cmpls */ 716 while (!list_empty(&phba->freebufList)) { 717 struct lpfc_dmabuf *mp; 718 719 mp = NULL; 720 list_remove_head((&phba->freebufList), 721 mp, 722 struct lpfc_dmabuf, 723 list); 724 if (mp) { 725 lpfc_mbuf_free(phba, mp->virt, 726 mp->phys); 727 kfree(mp); 728 } 729 } 730 } 731 732 } while (process_next); 733 734 return (0); 735 } 736 static int 737 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 738 struct lpfc_iocbq *saveq) 739 { 740 IOCB_t * irsp; 741 WORD5 * w5p; 742 uint32_t Rctl, Type; 743 uint32_t match, i; 744 745 match = 0; 746 irsp = &(saveq->iocb); 747 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) 748 || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)) { 749 Rctl = FC_ELS_REQ; 750 Type = FC_ELS_DATA; 751 } else { 752 w5p = 753 (WORD5 *) & (saveq->iocb.un. 754 ulpWord[5]); 755 Rctl = w5p->hcsw.Rctl; 756 Type = w5p->hcsw.Type; 757 758 /* Firmware Workaround */ 759 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 760 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX)) { 761 Rctl = FC_ELS_REQ; 762 Type = FC_ELS_DATA; 763 w5p->hcsw.Rctl = Rctl; 764 w5p->hcsw.Type = Type; 765 } 766 } 767 /* unSolicited Responses */ 768 if (pring->prt[0].profile) { 769 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 770 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, 771 saveq); 772 match = 1; 773 } else { 774 /* We must search, based on rctl / type 775 for the right routine */ 776 for (i = 0; i < pring->num_mask; 777 i++) { 778 if ((pring->prt[i].rctl == 779 Rctl) 780 && (pring->prt[i]. 781 type == Type)) { 782 if (pring->prt[i].lpfc_sli_rcv_unsol_event) 783 (pring->prt[i].lpfc_sli_rcv_unsol_event) 784 (phba, pring, saveq); 785 match = 1; 786 break; 787 } 788 } 789 } 790 if (match == 0) { 791 /* Unexpected Rctl / Type received */ 792 /* Ring <ringno> handler: unexpected 793 Rctl <Rctl> Type <Type> received */ 794 lpfc_printf_log(phba, 795 KERN_WARNING, 796 LOG_SLI, 797 "%d:0313 Ring %d handler: unexpected Rctl x%x " 798 "Type x%x received \n", 799 phba->brd_no, 800 pring->ringno, 801 Rctl, 802 Type); 803 } 804 return(1); 805 } 806 807 static struct lpfc_iocbq * 808 lpfc_sli_iocbq_lookup(struct lpfc_hba * phba, 809 struct lpfc_sli_ring * pring, 810 struct lpfc_iocbq * prspiocb) 811 { 812 struct lpfc_iocbq *cmd_iocb = NULL; 813 uint16_t iotag; 814 815 iotag = prspiocb->iocb.ulpIoTag; 816 817 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 818 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 819 list_del(&cmd_iocb->list); 820 pring->txcmplq_cnt--; 821 return cmd_iocb; 822 } 823 824 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 825 "%d:0317 iotag x%x is out off " 826 "range: max iotag x%x wd0 x%x\n", 827 phba->brd_no, iotag, 828 phba->sli.last_iotag, 829 *(((uint32_t *) &prspiocb->iocb) + 7)); 830 return NULL; 831 } 832 833 static int 834 lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, 835 struct lpfc_iocbq *saveq) 836 { 837 struct lpfc_iocbq * cmdiocbp; 838 int rc = 1; 839 unsigned long iflag; 840 841 /* Based on the iotag field, get the cmd IOCB from the txcmplq */ 842 spin_lock_irqsave(phba->host->host_lock, iflag); 843 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 844 if (cmdiocbp) { 845 if (cmdiocbp->iocb_cmpl) { 846 /* 847 * Post all ELS completions to the worker thread. 848 * All other are passed to the completion callback. 849 */ 850 if (pring->ringno == LPFC_ELS_RING) { 851 spin_unlock_irqrestore(phba->host->host_lock, 852 iflag); 853 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 854 spin_lock_irqsave(phba->host->host_lock, iflag); 855 } 856 else { 857 spin_unlock_irqrestore(phba->host->host_lock, 858 iflag); 859 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 860 spin_lock_irqsave(phba->host->host_lock, iflag); 861 } 862 } else 863 lpfc_sli_release_iocbq(phba, cmdiocbp); 864 } else { 865 /* 866 * Unknown initiating command based on the response iotag. 867 * This could be the case on the ELS ring because of 868 * lpfc_els_abort(). 869 */ 870 if (pring->ringno != LPFC_ELS_RING) { 871 /* 872 * Ring <ringno> handler: unexpected completion IoTag 873 * <IoTag> 874 */ 875 lpfc_printf_log(phba, 876 KERN_WARNING, 877 LOG_SLI, 878 "%d:0322 Ring %d handler: unexpected " 879 "completion IoTag x%x Data: x%x x%x x%x x%x\n", 880 phba->brd_no, 881 pring->ringno, 882 saveq->iocb.ulpIoTag, 883 saveq->iocb.ulpStatus, 884 saveq->iocb.un.ulpWord[4], 885 saveq->iocb.ulpCommand, 886 saveq->iocb.ulpContext); 887 } 888 } 889 890 spin_unlock_irqrestore(phba->host->host_lock, iflag); 891 return rc; 892 } 893 894 static void lpfc_sli_rsp_pointers_error(struct lpfc_hba * phba, 895 struct lpfc_sli_ring * pring) 896 { 897 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; 898 /* 899 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then 900 * rsp ring <portRspMax> 901 */ 902 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 903 "%d:0312 Ring %d handler: portRspPut %d " 904 "is bigger then rsp ring %d\n", 905 phba->brd_no, pring->ringno, 906 le32_to_cpu(pgp->rspPutInx), 907 pring->numRiocb); 908 909 phba->hba_state = LPFC_HBA_ERROR; 910 911 /* 912 * All error attention handlers are posted to 913 * worker thread 914 */ 915 phba->work_ha |= HA_ERATT; 916 phba->work_hs = HS_FFER3; 917 if (phba->work_wait) 918 wake_up(phba->work_wait); 919 920 return; 921 } 922 923 void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba) 924 { 925 struct lpfc_sli * psli = &phba->sli; 926 struct lpfc_sli_ring * pring = &psli->ring[LPFC_FCP_RING]; 927 IOCB_t *irsp = NULL; 928 IOCB_t *entry = NULL; 929 struct lpfc_iocbq *cmdiocbq = NULL; 930 struct lpfc_iocbq rspiocbq; 931 struct lpfc_pgp *pgp; 932 uint32_t status; 933 uint32_t portRspPut, portRspMax; 934 int type; 935 uint32_t rsp_cmpl = 0; 936 void __iomem *to_slim; 937 uint32_t ha_copy; 938 939 pring->stats.iocb_event++; 940 941 /* The driver assumes SLI-2 mode */ 942 pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; 943 944 /* 945 * The next available response entry should never exceed the maximum 946 * entries. If it does, treat it as an adapter hardware error. 947 */ 948 portRspMax = pring->numRiocb; 949 portRspPut = le32_to_cpu(pgp->rspPutInx); 950 if (unlikely(portRspPut >= portRspMax)) { 951 lpfc_sli_rsp_pointers_error(phba, pring); 952 return; 953 } 954 955 rmb(); 956 while (pring->rspidx != portRspPut) { 957 958 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx); 959 960 if (++pring->rspidx >= portRspMax) 961 pring->rspidx = 0; 962 963 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 964 (uint32_t *) &rspiocbq.iocb, 965 sizeof (IOCB_t)); 966 irsp = &rspiocbq.iocb; 967 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 968 pring->stats.iocb_rsp++; 969 rsp_cmpl++; 970 971 if (unlikely(irsp->ulpStatus)) { 972 /* Rsp ring <ringno> error: IOCB */ 973 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 974 "%d:0326 Rsp Ring %d error: IOCB Data: " 975 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 976 phba->brd_no, pring->ringno, 977 irsp->un.ulpWord[0], 978 irsp->un.ulpWord[1], 979 irsp->un.ulpWord[2], 980 irsp->un.ulpWord[3], 981 irsp->un.ulpWord[4], 982 irsp->un.ulpWord[5], 983 *(((uint32_t *) irsp) + 6), 984 *(((uint32_t *) irsp) + 7)); 985 } 986 987 switch (type) { 988 case LPFC_ABORT_IOCB: 989 case LPFC_SOL_IOCB: 990 /* 991 * Idle exchange closed via ABTS from port. No iocb 992 * resources need to be recovered. 993 */ 994 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 995 printk(KERN_INFO "%s: IOCB cmd 0x%x processed." 996 " Skipping completion\n", __FUNCTION__, 997 irsp->ulpCommand); 998 break; 999 } 1000 1001 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 1002 &rspiocbq); 1003 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { 1004 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 1005 &rspiocbq); 1006 } 1007 break; 1008 default: 1009 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 1010 char adaptermsg[LPFC_MAX_ADPTMSG]; 1011 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 1012 memcpy(&adaptermsg[0], (uint8_t *) irsp, 1013 MAX_MSG_DATA); 1014 dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s", 1015 phba->brd_no, adaptermsg); 1016 } else { 1017 /* Unknown IOCB command */ 1018 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1019 "%d:0321 Unknown IOCB command " 1020 "Data: x%x, x%x x%x x%x x%x\n", 1021 phba->brd_no, type, 1022 irsp->ulpCommand, 1023 irsp->ulpStatus, 1024 irsp->ulpIoTag, 1025 irsp->ulpContext); 1026 } 1027 break; 1028 } 1029 1030 /* 1031 * The response IOCB has been processed. Update the ring 1032 * pointer in SLIM. If the port response put pointer has not 1033 * been updated, sync the pgp->rspPutInx and fetch the new port 1034 * response put pointer. 1035 */ 1036 to_slim = phba->MBslimaddr + 1037 (SLIMOFF + (pring->ringno * 2) + 1) * 4; 1038 writeb(pring->rspidx, to_slim); 1039 1040 if (pring->rspidx == portRspPut) 1041 portRspPut = le32_to_cpu(pgp->rspPutInx); 1042 } 1043 1044 ha_copy = readl(phba->HAregaddr); 1045 ha_copy >>= (LPFC_FCP_RING * 4); 1046 1047 if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) { 1048 pring->stats.iocb_rsp_full++; 1049 status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4)); 1050 writel(status, phba->CAregaddr); 1051 readl(phba->CAregaddr); 1052 } 1053 if ((ha_copy & HA_R0CE_RSP) && 1054 (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 1055 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 1056 pring->stats.iocb_cmd_empty++; 1057 1058 /* Force update of the local copy of cmdGetInx */ 1059 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 1060 lpfc_sli_resume_iocb(phba, pring); 1061 1062 if ((pring->lpfc_sli_cmd_available)) 1063 (pring->lpfc_sli_cmd_available) (phba, pring); 1064 1065 } 1066 1067 return; 1068 } 1069 1070 /* 1071 * This routine presumes LPFC_FCP_RING handling and doesn't bother 1072 * to check it explicitly. 1073 */ 1074 static int 1075 lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, 1076 struct lpfc_sli_ring * pring, uint32_t mask) 1077 { 1078 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; 1079 IOCB_t *irsp = NULL; 1080 IOCB_t *entry = NULL; 1081 struct lpfc_iocbq *cmdiocbq = NULL; 1082 struct lpfc_iocbq rspiocbq; 1083 uint32_t status; 1084 uint32_t portRspPut, portRspMax; 1085 int rc = 1; 1086 lpfc_iocb_type type; 1087 unsigned long iflag; 1088 uint32_t rsp_cmpl = 0; 1089 void __iomem *to_slim; 1090 1091 spin_lock_irqsave(phba->host->host_lock, iflag); 1092 pring->stats.iocb_event++; 1093 1094 /* 1095 * The next available response entry should never exceed the maximum 1096 * entries. If it does, treat it as an adapter hardware error. 1097 */ 1098 portRspMax = pring->numRiocb; 1099 portRspPut = le32_to_cpu(pgp->rspPutInx); 1100 if (unlikely(portRspPut >= portRspMax)) { 1101 lpfc_sli_rsp_pointers_error(phba, pring); 1102 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1103 return 1; 1104 } 1105 1106 rmb(); 1107 while (pring->rspidx != portRspPut) { 1108 /* 1109 * Fetch an entry off the ring and copy it into a local data 1110 * structure. The copy involves a byte-swap since the 1111 * network byte order and pci byte orders are different. 1112 */ 1113 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx); 1114 1115 if (++pring->rspidx >= portRspMax) 1116 pring->rspidx = 0; 1117 1118 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 1119 (uint32_t *) &rspiocbq.iocb, 1120 sizeof (IOCB_t)); 1121 irsp = &rspiocbq.iocb; 1122 1123 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 1124 pring->stats.iocb_rsp++; 1125 rsp_cmpl++; 1126 1127 if (unlikely(irsp->ulpStatus)) { 1128 /* Rsp ring <ringno> error: IOCB */ 1129 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1130 "%d:0326 Rsp Ring %d error: IOCB Data: " 1131 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 1132 phba->brd_no, pring->ringno, 1133 irsp->un.ulpWord[0], irsp->un.ulpWord[1], 1134 irsp->un.ulpWord[2], irsp->un.ulpWord[3], 1135 irsp->un.ulpWord[4], irsp->un.ulpWord[5], 1136 *(((uint32_t *) irsp) + 6), 1137 *(((uint32_t *) irsp) + 7)); 1138 } 1139 1140 switch (type) { 1141 case LPFC_ABORT_IOCB: 1142 case LPFC_SOL_IOCB: 1143 /* 1144 * Idle exchange closed via ABTS from port. No iocb 1145 * resources need to be recovered. 1146 */ 1147 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 1148 printk(KERN_INFO "%s: IOCB cmd 0x%x processed. " 1149 "Skipping completion\n", __FUNCTION__, 1150 irsp->ulpCommand); 1151 break; 1152 } 1153 1154 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 1155 &rspiocbq); 1156 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { 1157 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 1158 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 1159 &rspiocbq); 1160 } else { 1161 spin_unlock_irqrestore( 1162 phba->host->host_lock, iflag); 1163 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 1164 &rspiocbq); 1165 spin_lock_irqsave(phba->host->host_lock, 1166 iflag); 1167 } 1168 } 1169 break; 1170 default: 1171 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 1172 char adaptermsg[LPFC_MAX_ADPTMSG]; 1173 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 1174 memcpy(&adaptermsg[0], (uint8_t *) irsp, 1175 MAX_MSG_DATA); 1176 dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s", 1177 phba->brd_no, adaptermsg); 1178 } else { 1179 /* Unknown IOCB command */ 1180 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1181 "%d:0321 Unknown IOCB command " 1182 "Data: x%x, x%x x%x x%x x%x\n", 1183 phba->brd_no, type, irsp->ulpCommand, 1184 irsp->ulpStatus, irsp->ulpIoTag, 1185 irsp->ulpContext); 1186 } 1187 break; 1188 } 1189 1190 /* 1191 * The response IOCB has been processed. Update the ring 1192 * pointer in SLIM. If the port response put pointer has not 1193 * been updated, sync the pgp->rspPutInx and fetch the new port 1194 * response put pointer. 1195 */ 1196 to_slim = phba->MBslimaddr + 1197 (SLIMOFF + (pring->ringno * 2) + 1) * 4; 1198 writel(pring->rspidx, to_slim); 1199 1200 if (pring->rspidx == portRspPut) 1201 portRspPut = le32_to_cpu(pgp->rspPutInx); 1202 } 1203 1204 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { 1205 pring->stats.iocb_rsp_full++; 1206 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 1207 writel(status, phba->CAregaddr); 1208 readl(phba->CAregaddr); 1209 } 1210 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 1211 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 1212 pring->stats.iocb_cmd_empty++; 1213 1214 /* Force update of the local copy of cmdGetInx */ 1215 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 1216 lpfc_sli_resume_iocb(phba, pring); 1217 1218 if ((pring->lpfc_sli_cmd_available)) 1219 (pring->lpfc_sli_cmd_available) (phba, pring); 1220 1221 } 1222 1223 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1224 return rc; 1225 } 1226 1227 1228 int 1229 lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, 1230 struct lpfc_sli_ring * pring, uint32_t mask) 1231 { 1232 IOCB_t *entry; 1233 IOCB_t *irsp = NULL; 1234 struct lpfc_iocbq *rspiocbp = NULL; 1235 struct lpfc_iocbq *next_iocb; 1236 struct lpfc_iocbq *cmdiocbp; 1237 struct lpfc_iocbq *saveq; 1238 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; 1239 uint8_t iocb_cmd_type; 1240 lpfc_iocb_type type; 1241 uint32_t status, free_saveq; 1242 uint32_t portRspPut, portRspMax; 1243 int rc = 1; 1244 unsigned long iflag; 1245 void __iomem *to_slim; 1246 1247 spin_lock_irqsave(phba->host->host_lock, iflag); 1248 pring->stats.iocb_event++; 1249 1250 /* 1251 * The next available response entry should never exceed the maximum 1252 * entries. If it does, treat it as an adapter hardware error. 1253 */ 1254 portRspMax = pring->numRiocb; 1255 portRspPut = le32_to_cpu(pgp->rspPutInx); 1256 if (portRspPut >= portRspMax) { 1257 /* 1258 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then 1259 * rsp ring <portRspMax> 1260 */ 1261 lpfc_printf_log(phba, 1262 KERN_ERR, 1263 LOG_SLI, 1264 "%d:0312 Ring %d handler: portRspPut %d " 1265 "is bigger then rsp ring %d\n", 1266 phba->brd_no, 1267 pring->ringno, portRspPut, portRspMax); 1268 1269 phba->hba_state = LPFC_HBA_ERROR; 1270 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1271 1272 phba->work_hs = HS_FFER3; 1273 lpfc_handle_eratt(phba); 1274 1275 return 1; 1276 } 1277 1278 rmb(); 1279 while (pring->rspidx != portRspPut) { 1280 /* 1281 * Build a completion list and call the appropriate handler. 1282 * The process is to get the next available response iocb, get 1283 * a free iocb from the list, copy the response data into the 1284 * free iocb, insert to the continuation list, and update the 1285 * next response index to slim. This process makes response 1286 * iocb's in the ring available to DMA as fast as possible but 1287 * pays a penalty for a copy operation. Since the iocb is 1288 * only 32 bytes, this penalty is considered small relative to 1289 * the PCI reads for register values and a slim write. When 1290 * the ulpLe field is set, the entire Command has been 1291 * received. 1292 */ 1293 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx); 1294 rspiocbp = lpfc_sli_get_iocbq(phba); 1295 if (rspiocbp == NULL) { 1296 printk(KERN_ERR "%s: out of buffers! Failing " 1297 "completion.\n", __FUNCTION__); 1298 break; 1299 } 1300 1301 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, sizeof (IOCB_t)); 1302 irsp = &rspiocbp->iocb; 1303 1304 if (++pring->rspidx >= portRspMax) 1305 pring->rspidx = 0; 1306 1307 to_slim = phba->MBslimaddr + (SLIMOFF + (pring->ringno * 2) 1308 + 1) * 4; 1309 writel(pring->rspidx, to_slim); 1310 1311 if (list_empty(&(pring->iocb_continueq))) { 1312 list_add(&rspiocbp->list, &(pring->iocb_continueq)); 1313 } else { 1314 list_add_tail(&rspiocbp->list, 1315 &(pring->iocb_continueq)); 1316 } 1317 1318 pring->iocb_continueq_cnt++; 1319 if (irsp->ulpLe) { 1320 /* 1321 * By default, the driver expects to free all resources 1322 * associated with this iocb completion. 1323 */ 1324 free_saveq = 1; 1325 saveq = list_get_first(&pring->iocb_continueq, 1326 struct lpfc_iocbq, list); 1327 irsp = &(saveq->iocb); 1328 list_del_init(&pring->iocb_continueq); 1329 pring->iocb_continueq_cnt = 0; 1330 1331 pring->stats.iocb_rsp++; 1332 1333 if (irsp->ulpStatus) { 1334 /* Rsp ring <ringno> error: IOCB */ 1335 lpfc_printf_log(phba, 1336 KERN_WARNING, 1337 LOG_SLI, 1338 "%d:0328 Rsp Ring %d error: IOCB Data: " 1339 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 1340 phba->brd_no, 1341 pring->ringno, 1342 irsp->un.ulpWord[0], 1343 irsp->un.ulpWord[1], 1344 irsp->un.ulpWord[2], 1345 irsp->un.ulpWord[3], 1346 irsp->un.ulpWord[4], 1347 irsp->un.ulpWord[5], 1348 *(((uint32_t *) irsp) + 6), 1349 *(((uint32_t *) irsp) + 7)); 1350 } 1351 1352 /* 1353 * Fetch the IOCB command type and call the correct 1354 * completion routine. Solicited and Unsolicited 1355 * IOCBs on the ELS ring get freed back to the 1356 * lpfc_iocb_list by the discovery kernel thread. 1357 */ 1358 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 1359 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 1360 if (type == LPFC_SOL_IOCB) { 1361 spin_unlock_irqrestore(phba->host->host_lock, 1362 iflag); 1363 rc = lpfc_sli_process_sol_iocb(phba, pring, 1364 saveq); 1365 spin_lock_irqsave(phba->host->host_lock, iflag); 1366 } else if (type == LPFC_UNSOL_IOCB) { 1367 spin_unlock_irqrestore(phba->host->host_lock, 1368 iflag); 1369 rc = lpfc_sli_process_unsol_iocb(phba, pring, 1370 saveq); 1371 spin_lock_irqsave(phba->host->host_lock, iflag); 1372 } else if (type == LPFC_ABORT_IOCB) { 1373 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) && 1374 ((cmdiocbp = 1375 lpfc_sli_iocbq_lookup(phba, pring, 1376 saveq)))) { 1377 /* Call the specified completion 1378 routine */ 1379 if (cmdiocbp->iocb_cmpl) { 1380 spin_unlock_irqrestore( 1381 phba->host->host_lock, 1382 iflag); 1383 (cmdiocbp->iocb_cmpl) (phba, 1384 cmdiocbp, saveq); 1385 spin_lock_irqsave( 1386 phba->host->host_lock, 1387 iflag); 1388 } else 1389 lpfc_sli_release_iocbq(phba, 1390 cmdiocbp); 1391 } 1392 } else if (type == LPFC_UNKNOWN_IOCB) { 1393 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 1394 1395 char adaptermsg[LPFC_MAX_ADPTMSG]; 1396 1397 memset(adaptermsg, 0, 1398 LPFC_MAX_ADPTMSG); 1399 memcpy(&adaptermsg[0], (uint8_t *) irsp, 1400 MAX_MSG_DATA); 1401 dev_warn(&((phba->pcidev)->dev), 1402 "lpfc%d: %s", 1403 phba->brd_no, adaptermsg); 1404 } else { 1405 /* Unknown IOCB command */ 1406 lpfc_printf_log(phba, 1407 KERN_ERR, 1408 LOG_SLI, 1409 "%d:0321 Unknown IOCB command " 1410 "Data: x%x x%x x%x x%x\n", 1411 phba->brd_no, 1412 irsp->ulpCommand, 1413 irsp->ulpStatus, 1414 irsp->ulpIoTag, 1415 irsp->ulpContext); 1416 } 1417 } 1418 1419 if (free_saveq) { 1420 if (!list_empty(&saveq->list)) { 1421 list_for_each_entry_safe(rspiocbp, 1422 next_iocb, 1423 &saveq->list, 1424 list) { 1425 lpfc_sli_release_iocbq(phba, 1426 rspiocbp); 1427 } 1428 } 1429 1430 lpfc_sli_release_iocbq(phba, saveq); 1431 } 1432 } 1433 1434 /* 1435 * If the port response put pointer has not been updated, sync 1436 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 1437 * response put pointer. 1438 */ 1439 if (pring->rspidx == portRspPut) { 1440 portRspPut = le32_to_cpu(pgp->rspPutInx); 1441 } 1442 } /* while (pring->rspidx != portRspPut) */ 1443 1444 if ((rspiocbp != 0) && (mask & HA_R0RE_REQ)) { 1445 /* At least one response entry has been freed */ 1446 pring->stats.iocb_rsp_full++; 1447 /* SET RxRE_RSP in Chip Att register */ 1448 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 1449 writel(status, phba->CAregaddr); 1450 readl(phba->CAregaddr); /* flush */ 1451 } 1452 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 1453 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 1454 pring->stats.iocb_cmd_empty++; 1455 1456 /* Force update of the local copy of cmdGetInx */ 1457 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 1458 lpfc_sli_resume_iocb(phba, pring); 1459 1460 if ((pring->lpfc_sli_cmd_available)) 1461 (pring->lpfc_sli_cmd_available) (phba, pring); 1462 1463 } 1464 1465 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1466 return rc; 1467 } 1468 1469 int 1470 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1471 { 1472 struct lpfc_iocbq *iocb, *next_iocb; 1473 IOCB_t *icmd = NULL, *cmd = NULL; 1474 int errcnt; 1475 1476 errcnt = 0; 1477 1478 /* Error everything on txq and txcmplq 1479 * First do the txq. 1480 */ 1481 spin_lock_irq(phba->host->host_lock); 1482 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 1483 list_del_init(&iocb->list); 1484 if (iocb->iocb_cmpl) { 1485 icmd = &iocb->iocb; 1486 icmd->ulpStatus = IOSTAT_LOCAL_REJECT; 1487 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 1488 spin_unlock_irq(phba->host->host_lock); 1489 (iocb->iocb_cmpl) (phba, iocb, iocb); 1490 spin_lock_irq(phba->host->host_lock); 1491 } else 1492 lpfc_sli_release_iocbq(phba, iocb); 1493 } 1494 pring->txq_cnt = 0; 1495 INIT_LIST_HEAD(&(pring->txq)); 1496 1497 /* Next issue ABTS for everything on the txcmplq */ 1498 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 1499 cmd = &iocb->iocb; 1500 1501 /* 1502 * Imediate abort of IOCB, deque and call compl 1503 */ 1504 1505 list_del_init(&iocb->list); 1506 pring->txcmplq_cnt--; 1507 1508 if (iocb->iocb_cmpl) { 1509 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 1510 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 1511 spin_unlock_irq(phba->host->host_lock); 1512 (iocb->iocb_cmpl) (phba, iocb, iocb); 1513 spin_lock_irq(phba->host->host_lock); 1514 } else 1515 lpfc_sli_release_iocbq(phba, iocb); 1516 } 1517 1518 INIT_LIST_HEAD(&pring->txcmplq); 1519 pring->txcmplq_cnt = 0; 1520 spin_unlock_irq(phba->host->host_lock); 1521 1522 return errcnt; 1523 } 1524 1525 int 1526 lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask) 1527 { 1528 uint32_t status; 1529 int i = 0; 1530 int retval = 0; 1531 1532 /* Read the HBA Host Status Register */ 1533 status = readl(phba->HSregaddr); 1534 1535 /* 1536 * Check status register every 100ms for 5 retries, then every 1537 * 500ms for 5, then every 2.5 sec for 5, then reset board and 1538 * every 2.5 sec for 4. 1539 * Break our of the loop if errors occurred during init. 1540 */ 1541 while (((status & mask) != mask) && 1542 !(status & HS_FFERM) && 1543 i++ < 20) { 1544 1545 if (i <= 5) 1546 msleep(10); 1547 else if (i <= 10) 1548 msleep(500); 1549 else 1550 msleep(2500); 1551 1552 if (i == 15) { 1553 phba->hba_state = LPFC_STATE_UNKNOWN; /* Do post */ 1554 lpfc_sli_brdrestart(phba); 1555 } 1556 /* Read the HBA Host Status Register */ 1557 status = readl(phba->HSregaddr); 1558 } 1559 1560 /* Check to see if any errors occurred during init */ 1561 if ((status & HS_FFERM) || (i >= 20)) { 1562 phba->hba_state = LPFC_HBA_ERROR; 1563 retval = 1; 1564 } 1565 1566 return retval; 1567 } 1568 1569 #define BARRIER_TEST_PATTERN (0xdeadbeef) 1570 1571 void lpfc_reset_barrier(struct lpfc_hba * phba) 1572 { 1573 uint32_t * resp_buf; 1574 uint32_t * mbox_buf; 1575 volatile uint32_t mbox; 1576 uint32_t hc_copy; 1577 int i; 1578 uint8_t hdrtype; 1579 1580 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 1581 if (hdrtype != 0x80 || 1582 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && 1583 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) 1584 return; 1585 1586 /* 1587 * Tell the other part of the chip to suspend temporarily all 1588 * its DMA activity. 1589 */ 1590 resp_buf = (uint32_t *)phba->MBslimaddr; 1591 1592 /* Disable the error attention */ 1593 hc_copy = readl(phba->HCregaddr); 1594 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 1595 readl(phba->HCregaddr); /* flush */ 1596 1597 if (readl(phba->HAregaddr) & HA_ERATT) { 1598 /* Clear Chip error bit */ 1599 writel(HA_ERATT, phba->HAregaddr); 1600 phba->stopped = 1; 1601 } 1602 1603 mbox = 0; 1604 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD; 1605 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 1606 1607 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 1608 mbox_buf = (uint32_t *)phba->MBslimaddr; 1609 writel(mbox, mbox_buf); 1610 1611 for (i = 0; 1612 readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++) 1613 mdelay(1); 1614 1615 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { 1616 if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE || 1617 phba->stopped) 1618 goto restore_hc; 1619 else 1620 goto clear_errat; 1621 } 1622 1623 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; 1624 for (i = 0; readl(resp_buf) != mbox && i < 500; i++) 1625 mdelay(1); 1626 1627 clear_errat: 1628 1629 while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500) 1630 mdelay(1); 1631 1632 if (readl(phba->HAregaddr) & HA_ERATT) { 1633 writel(HA_ERATT, phba->HAregaddr); 1634 phba->stopped = 1; 1635 } 1636 1637 restore_hc: 1638 writel(hc_copy, phba->HCregaddr); 1639 readl(phba->HCregaddr); /* flush */ 1640 } 1641 1642 int 1643 lpfc_sli_brdkill(struct lpfc_hba * phba) 1644 { 1645 struct lpfc_sli *psli; 1646 LPFC_MBOXQ_t *pmb; 1647 uint32_t status; 1648 uint32_t ha_copy; 1649 int retval; 1650 int i = 0; 1651 1652 psli = &phba->sli; 1653 1654 /* Kill HBA */ 1655 lpfc_printf_log(phba, 1656 KERN_INFO, 1657 LOG_SLI, 1658 "%d:0329 Kill HBA Data: x%x x%x\n", 1659 phba->brd_no, 1660 phba->hba_state, 1661 psli->sli_flag); 1662 1663 if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 1664 GFP_KERNEL)) == 0) 1665 return 1; 1666 1667 /* Disable the error attention */ 1668 spin_lock_irq(phba->host->host_lock); 1669 status = readl(phba->HCregaddr); 1670 status &= ~HC_ERINT_ENA; 1671 writel(status, phba->HCregaddr); 1672 readl(phba->HCregaddr); /* flush */ 1673 spin_unlock_irq(phba->host->host_lock); 1674 1675 lpfc_kill_board(phba, pmb); 1676 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1677 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1678 1679 if (retval != MBX_SUCCESS) { 1680 if (retval != MBX_BUSY) 1681 mempool_free(pmb, phba->mbox_mem_pool); 1682 return 1; 1683 } 1684 1685 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 1686 1687 mempool_free(pmb, phba->mbox_mem_pool); 1688 1689 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error 1690 * attention every 100ms for 3 seconds. If we don't get ERATT after 1691 * 3 seconds we still set HBA_ERROR state because the status of the 1692 * board is now undefined. 1693 */ 1694 ha_copy = readl(phba->HAregaddr); 1695 1696 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 1697 mdelay(100); 1698 ha_copy = readl(phba->HAregaddr); 1699 } 1700 1701 del_timer_sync(&psli->mbox_tmo); 1702 if (ha_copy & HA_ERATT) { 1703 writel(HA_ERATT, phba->HAregaddr); 1704 phba->stopped = 1; 1705 } 1706 spin_lock_irq(phba->host->host_lock); 1707 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 1708 spin_unlock_irq(phba->host->host_lock); 1709 1710 psli->mbox_active = NULL; 1711 lpfc_hba_down_post(phba); 1712 phba->hba_state = LPFC_HBA_ERROR; 1713 1714 return (ha_copy & HA_ERATT ? 0 : 1); 1715 } 1716 1717 int 1718 lpfc_sli_brdreset(struct lpfc_hba * phba) 1719 { 1720 struct lpfc_sli *psli; 1721 struct lpfc_sli_ring *pring; 1722 uint16_t cfg_value; 1723 int i; 1724 1725 psli = &phba->sli; 1726 1727 /* Reset HBA */ 1728 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1729 "%d:0325 Reset HBA Data: x%x x%x\n", phba->brd_no, 1730 phba->hba_state, psli->sli_flag); 1731 1732 /* perform board reset */ 1733 phba->fc_eventTag = 0; 1734 phba->fc_myDID = 0; 1735 phba->fc_prevDID = 0; 1736 1737 psli->sli_flag = 0; 1738 1739 /* Turn off parity checking and serr during the physical reset */ 1740 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 1741 pci_write_config_word(phba->pcidev, PCI_COMMAND, 1742 (cfg_value & 1743 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 1744 1745 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 1746 /* Now toggle INITFF bit in the Host Control Register */ 1747 writel(HC_INITFF, phba->HCregaddr); 1748 mdelay(1); 1749 readl(phba->HCregaddr); /* flush */ 1750 writel(0, phba->HCregaddr); 1751 readl(phba->HCregaddr); /* flush */ 1752 1753 /* Restore PCI cmd register */ 1754 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 1755 1756 /* Initialize relevant SLI info */ 1757 for (i = 0; i < psli->num_rings; i++) { 1758 pring = &psli->ring[i]; 1759 pring->flag = 0; 1760 pring->rspidx = 0; 1761 pring->next_cmdidx = 0; 1762 pring->local_getidx = 0; 1763 pring->cmdidx = 0; 1764 pring->missbufcnt = 0; 1765 } 1766 1767 phba->hba_state = LPFC_WARM_START; 1768 return 0; 1769 } 1770 1771 int 1772 lpfc_sli_brdrestart(struct lpfc_hba * phba) 1773 { 1774 MAILBOX_t *mb; 1775 struct lpfc_sli *psli; 1776 uint16_t skip_post; 1777 volatile uint32_t word0; 1778 void __iomem *to_slim; 1779 1780 spin_lock_irq(phba->host->host_lock); 1781 1782 psli = &phba->sli; 1783 1784 /* Restart HBA */ 1785 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1786 "%d:0328 Restart HBA Data: x%x x%x\n", phba->brd_no, 1787 phba->hba_state, psli->sli_flag); 1788 1789 word0 = 0; 1790 mb = (MAILBOX_t *) &word0; 1791 mb->mbxCommand = MBX_RESTART; 1792 mb->mbxHc = 1; 1793 1794 lpfc_reset_barrier(phba); 1795 1796 to_slim = phba->MBslimaddr; 1797 writel(*(uint32_t *) mb, to_slim); 1798 readl(to_slim); /* flush */ 1799 1800 /* Only skip post after fc_ffinit is completed */ 1801 if (phba->hba_state) { 1802 skip_post = 1; 1803 word0 = 1; /* This is really setting up word1 */ 1804 } else { 1805 skip_post = 0; 1806 word0 = 0; /* This is really setting up word1 */ 1807 } 1808 to_slim = (uint8_t *) phba->MBslimaddr + sizeof (uint32_t); 1809 writel(*(uint32_t *) mb, to_slim); 1810 readl(to_slim); /* flush */ 1811 1812 lpfc_sli_brdreset(phba); 1813 phba->stopped = 0; 1814 phba->hba_state = LPFC_INIT_START; 1815 1816 spin_unlock_irq(phba->host->host_lock); 1817 1818 if (skip_post) 1819 mdelay(100); 1820 else 1821 mdelay(2000); 1822 1823 lpfc_hba_down_post(phba); 1824 1825 return 0; 1826 } 1827 1828 static int 1829 lpfc_sli_chipset_init(struct lpfc_hba *phba) 1830 { 1831 uint32_t status, i = 0; 1832 1833 /* Read the HBA Host Status Register */ 1834 status = readl(phba->HSregaddr); 1835 1836 /* Check status register to see what current state is */ 1837 i = 0; 1838 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 1839 1840 /* Check every 100ms for 5 retries, then every 500ms for 5, then 1841 * every 2.5 sec for 5, then reset board and every 2.5 sec for 1842 * 4. 1843 */ 1844 if (i++ >= 20) { 1845 /* Adapter failed to init, timeout, status reg 1846 <status> */ 1847 lpfc_printf_log(phba, 1848 KERN_ERR, 1849 LOG_INIT, 1850 "%d:0436 Adapter failed to init, " 1851 "timeout, status reg x%x\n", 1852 phba->brd_no, 1853 status); 1854 phba->hba_state = LPFC_HBA_ERROR; 1855 return -ETIMEDOUT; 1856 } 1857 1858 /* Check to see if any errors occurred during init */ 1859 if (status & HS_FFERM) { 1860 /* ERROR: During chipset initialization */ 1861 /* Adapter failed to init, chipset, status reg 1862 <status> */ 1863 lpfc_printf_log(phba, 1864 KERN_ERR, 1865 LOG_INIT, 1866 "%d:0437 Adapter failed to init, " 1867 "chipset, status reg x%x\n", 1868 phba->brd_no, 1869 status); 1870 phba->hba_state = LPFC_HBA_ERROR; 1871 return -EIO; 1872 } 1873 1874 if (i <= 5) { 1875 msleep(10); 1876 } else if (i <= 10) { 1877 msleep(500); 1878 } else { 1879 msleep(2500); 1880 } 1881 1882 if (i == 15) { 1883 phba->hba_state = LPFC_STATE_UNKNOWN; /* Do post */ 1884 lpfc_sli_brdrestart(phba); 1885 } 1886 /* Read the HBA Host Status Register */ 1887 status = readl(phba->HSregaddr); 1888 } 1889 1890 /* Check to see if any errors occurred during init */ 1891 if (status & HS_FFERM) { 1892 /* ERROR: During chipset initialization */ 1893 /* Adapter failed to init, chipset, status reg <status> */ 1894 lpfc_printf_log(phba, 1895 KERN_ERR, 1896 LOG_INIT, 1897 "%d:0438 Adapter failed to init, chipset, " 1898 "status reg x%x\n", 1899 phba->brd_no, 1900 status); 1901 phba->hba_state = LPFC_HBA_ERROR; 1902 return -EIO; 1903 } 1904 1905 /* Clear all interrupt enable conditions */ 1906 writel(0, phba->HCregaddr); 1907 readl(phba->HCregaddr); /* flush */ 1908 1909 /* setup host attn register */ 1910 writel(0xffffffff, phba->HAregaddr); 1911 readl(phba->HAregaddr); /* flush */ 1912 return 0; 1913 } 1914 1915 int 1916 lpfc_sli_hba_setup(struct lpfc_hba * phba) 1917 { 1918 LPFC_MBOXQ_t *pmb; 1919 uint32_t resetcount = 0, rc = 0, done = 0; 1920 1921 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1922 if (!pmb) { 1923 phba->hba_state = LPFC_HBA_ERROR; 1924 return -ENOMEM; 1925 } 1926 1927 while (resetcount < 2 && !done) { 1928 phba->hba_state = LPFC_STATE_UNKNOWN; 1929 lpfc_sli_brdrestart(phba); 1930 msleep(2500); 1931 rc = lpfc_sli_chipset_init(phba); 1932 if (rc) 1933 break; 1934 1935 resetcount++; 1936 1937 /* Call pre CONFIG_PORT mailbox command initialization. A value of 0 1938 * means the call was successful. Any other nonzero value is a failure, 1939 * but if ERESTART is returned, the driver may reset the HBA and try 1940 * again. 1941 */ 1942 rc = lpfc_config_port_prep(phba); 1943 if (rc == -ERESTART) { 1944 phba->hba_state = 0; 1945 continue; 1946 } else if (rc) { 1947 break; 1948 } 1949 1950 phba->hba_state = LPFC_INIT_MBX_CMDS; 1951 lpfc_config_port(phba, pmb); 1952 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 1953 if (rc == MBX_SUCCESS) 1954 done = 1; 1955 else { 1956 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1957 "%d:0442 Adapter failed to init, mbxCmd x%x " 1958 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 1959 phba->brd_no, pmb->mb.mbxCommand, 1960 pmb->mb.mbxStatus, 0); 1961 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE; 1962 } 1963 } 1964 if (!done) 1965 goto lpfc_sli_hba_setup_error; 1966 1967 rc = lpfc_sli_ring_map(phba, pmb); 1968 1969 if (rc) 1970 goto lpfc_sli_hba_setup_error; 1971 1972 phba->sli.sli_flag |= LPFC_PROCESS_LA; 1973 1974 rc = lpfc_config_port_post(phba); 1975 if (rc) 1976 goto lpfc_sli_hba_setup_error; 1977 1978 goto lpfc_sli_hba_setup_exit; 1979 lpfc_sli_hba_setup_error: 1980 phba->hba_state = LPFC_HBA_ERROR; 1981 lpfc_sli_hba_setup_exit: 1982 mempool_free(pmb, phba->mbox_mem_pool); 1983 return rc; 1984 } 1985 1986 static void 1987 lpfc_mbox_abort(struct lpfc_hba * phba) 1988 { 1989 LPFC_MBOXQ_t *pmbox; 1990 MAILBOX_t *mb; 1991 1992 if (phba->sli.mbox_active) { 1993 del_timer_sync(&phba->sli.mbox_tmo); 1994 phba->work_hba_events &= ~WORKER_MBOX_TMO; 1995 pmbox = phba->sli.mbox_active; 1996 mb = &pmbox->mb; 1997 phba->sli.mbox_active = NULL; 1998 if (pmbox->mbox_cmpl) { 1999 mb->mbxStatus = MBX_NOT_FINISHED; 2000 (pmbox->mbox_cmpl) (phba, pmbox); 2001 } 2002 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2003 } 2004 2005 /* Abort all the non active mailbox commands. */ 2006 spin_lock_irq(phba->host->host_lock); 2007 pmbox = lpfc_mbox_get(phba); 2008 while (pmbox) { 2009 mb = &pmbox->mb; 2010 if (pmbox->mbox_cmpl) { 2011 mb->mbxStatus = MBX_NOT_FINISHED; 2012 spin_unlock_irq(phba->host->host_lock); 2013 (pmbox->mbox_cmpl) (phba, pmbox); 2014 spin_lock_irq(phba->host->host_lock); 2015 } 2016 pmbox = lpfc_mbox_get(phba); 2017 } 2018 spin_unlock_irq(phba->host->host_lock); 2019 return; 2020 } 2021 2022 /*! lpfc_mbox_timeout 2023 * 2024 * \pre 2025 * \post 2026 * \param hba Pointer to per struct lpfc_hba structure 2027 * \param l1 Pointer to the driver's mailbox queue. 2028 * \return 2029 * void 2030 * 2031 * \b Description: 2032 * 2033 * This routine handles mailbox timeout events at timer interrupt context. 2034 */ 2035 void 2036 lpfc_mbox_timeout(unsigned long ptr) 2037 { 2038 struct lpfc_hba *phba; 2039 unsigned long iflag; 2040 2041 phba = (struct lpfc_hba *)ptr; 2042 spin_lock_irqsave(phba->host->host_lock, iflag); 2043 if (!(phba->work_hba_events & WORKER_MBOX_TMO)) { 2044 phba->work_hba_events |= WORKER_MBOX_TMO; 2045 if (phba->work_wait) 2046 wake_up(phba->work_wait); 2047 } 2048 spin_unlock_irqrestore(phba->host->host_lock, iflag); 2049 } 2050 2051 void 2052 lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 2053 { 2054 LPFC_MBOXQ_t *pmbox; 2055 MAILBOX_t *mb; 2056 2057 spin_lock_irq(phba->host->host_lock); 2058 if (!(phba->work_hba_events & WORKER_MBOX_TMO)) { 2059 spin_unlock_irq(phba->host->host_lock); 2060 return; 2061 } 2062 2063 phba->work_hba_events &= ~WORKER_MBOX_TMO; 2064 2065 pmbox = phba->sli.mbox_active; 2066 mb = &pmbox->mb; 2067 2068 /* Mbox cmd <mbxCommand> timeout */ 2069 lpfc_printf_log(phba, 2070 KERN_ERR, 2071 LOG_MBOX | LOG_SLI, 2072 "%d:0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 2073 phba->brd_no, 2074 mb->mbxCommand, 2075 phba->hba_state, 2076 phba->sli.sli_flag, 2077 phba->sli.mbox_active); 2078 2079 phba->sli.mbox_active = NULL; 2080 if (pmbox->mbox_cmpl) { 2081 mb->mbxStatus = MBX_NOT_FINISHED; 2082 spin_unlock_irq(phba->host->host_lock); 2083 (pmbox->mbox_cmpl) (phba, pmbox); 2084 spin_lock_irq(phba->host->host_lock); 2085 } 2086 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2087 2088 spin_unlock_irq(phba->host->host_lock); 2089 lpfc_mbox_abort(phba); 2090 return; 2091 } 2092 2093 int 2094 lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) 2095 { 2096 MAILBOX_t *mb; 2097 struct lpfc_sli *psli; 2098 uint32_t status, evtctr; 2099 uint32_t ha_copy; 2100 int i; 2101 unsigned long drvr_flag = 0; 2102 volatile uint32_t word0, ldata; 2103 void __iomem *to_slim; 2104 2105 psli = &phba->sli; 2106 2107 spin_lock_irqsave(phba->host->host_lock, drvr_flag); 2108 2109 2110 mb = &pmbox->mb; 2111 status = MBX_SUCCESS; 2112 2113 if (phba->hba_state == LPFC_HBA_ERROR) { 2114 spin_unlock_irqrestore(phba->host->host_lock, drvr_flag); 2115 2116 /* Mbox command <mbxCommand> cannot issue */ 2117 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) 2118 return (MBX_NOT_FINISHED); 2119 } 2120 2121 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && 2122 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { 2123 spin_unlock_irqrestore(phba->host->host_lock, drvr_flag); 2124 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) 2125 return (MBX_NOT_FINISHED); 2126 } 2127 2128 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 2129 /* Polling for a mbox command when another one is already active 2130 * is not allowed in SLI. Also, the driver must have established 2131 * SLI2 mode to queue and process multiple mbox commands. 2132 */ 2133 2134 if (flag & MBX_POLL) { 2135 spin_unlock_irqrestore(phba->host->host_lock, 2136 drvr_flag); 2137 2138 /* Mbox command <mbxCommand> cannot issue */ 2139 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) 2140 return (MBX_NOT_FINISHED); 2141 } 2142 2143 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { 2144 spin_unlock_irqrestore(phba->host->host_lock, 2145 drvr_flag); 2146 /* Mbox command <mbxCommand> cannot issue */ 2147 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) 2148 return (MBX_NOT_FINISHED); 2149 } 2150 2151 /* Handle STOP IOCB processing flag. This is only meaningful 2152 * if we are not polling for mbox completion. 2153 */ 2154 if (flag & MBX_STOP_IOCB) { 2155 flag &= ~MBX_STOP_IOCB; 2156 /* Now flag each ring */ 2157 for (i = 0; i < psli->num_rings; i++) { 2158 /* If the ring is active, flag it */ 2159 if (psli->ring[i].cmdringaddr) { 2160 psli->ring[i].flag |= 2161 LPFC_STOP_IOCB_MBX; 2162 } 2163 } 2164 } 2165 2166 /* Another mailbox command is still being processed, queue this 2167 * command to be processed later. 2168 */ 2169 lpfc_mbox_put(phba, pmbox); 2170 2171 /* Mbox cmd issue - BUSY */ 2172 lpfc_printf_log(phba, 2173 KERN_INFO, 2174 LOG_MBOX | LOG_SLI, 2175 "%d:0308 Mbox cmd issue - BUSY Data: x%x x%x x%x x%x\n", 2176 phba->brd_no, 2177 mb->mbxCommand, 2178 phba->hba_state, 2179 psli->sli_flag, 2180 flag); 2181 2182 psli->slistat.mbox_busy++; 2183 spin_unlock_irqrestore(phba->host->host_lock, 2184 drvr_flag); 2185 2186 return (MBX_BUSY); 2187 } 2188 2189 /* Handle STOP IOCB processing flag. This is only meaningful 2190 * if we are not polling for mbox completion. 2191 */ 2192 if (flag & MBX_STOP_IOCB) { 2193 flag &= ~MBX_STOP_IOCB; 2194 if (flag == MBX_NOWAIT) { 2195 /* Now flag each ring */ 2196 for (i = 0; i < psli->num_rings; i++) { 2197 /* If the ring is active, flag it */ 2198 if (psli->ring[i].cmdringaddr) { 2199 psli->ring[i].flag |= 2200 LPFC_STOP_IOCB_MBX; 2201 } 2202 } 2203 } 2204 } 2205 2206 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 2207 2208 /* If we are not polling, we MUST be in SLI2 mode */ 2209 if (flag != MBX_POLL) { 2210 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) && 2211 (mb->mbxCommand != MBX_KILL_BOARD)) { 2212 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2213 spin_unlock_irqrestore(phba->host->host_lock, 2214 drvr_flag); 2215 /* Mbox command <mbxCommand> cannot issue */ 2216 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag); 2217 return (MBX_NOT_FINISHED); 2218 } 2219 /* timeout active mbox command */ 2220 mod_timer(&psli->mbox_tmo, jiffies + HZ * LPFC_MBOX_TMO); 2221 } 2222 2223 /* Mailbox cmd <cmd> issue */ 2224 lpfc_printf_log(phba, 2225 KERN_INFO, 2226 LOG_MBOX | LOG_SLI, 2227 "%d:0309 Mailbox cmd x%x issue Data: x%x x%x x%x\n", 2228 phba->brd_no, 2229 mb->mbxCommand, 2230 phba->hba_state, 2231 psli->sli_flag, 2232 flag); 2233 2234 psli->slistat.mbox_cmd++; 2235 evtctr = psli->slistat.mbox_event; 2236 2237 /* next set own bit for the adapter and copy over command word */ 2238 mb->mbxOwner = OWN_CHIP; 2239 2240 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2241 /* First copy command data to host SLIM area */ 2242 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, MAILBOX_CMD_SIZE); 2243 } else { 2244 if (mb->mbxCommand == MBX_CONFIG_PORT) { 2245 /* copy command data into host mbox for cmpl */ 2246 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, 2247 MAILBOX_CMD_SIZE); 2248 } 2249 2250 /* First copy mbox command data to HBA SLIM, skip past first 2251 word */ 2252 to_slim = phba->MBslimaddr + sizeof (uint32_t); 2253 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0], 2254 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 2255 2256 /* Next copy over first word, with mbxOwner set */ 2257 ldata = *((volatile uint32_t *)mb); 2258 to_slim = phba->MBslimaddr; 2259 writel(ldata, to_slim); 2260 readl(to_slim); /* flush */ 2261 2262 if (mb->mbxCommand == MBX_CONFIG_PORT) { 2263 /* switch over to host mailbox */ 2264 psli->sli_flag |= LPFC_SLI2_ACTIVE; 2265 } 2266 } 2267 2268 wmb(); 2269 /* interrupt board to doit right away */ 2270 writel(CA_MBATT, phba->CAregaddr); 2271 readl(phba->CAregaddr); /* flush */ 2272 2273 switch (flag) { 2274 case MBX_NOWAIT: 2275 /* Don't wait for it to finish, just return */ 2276 psli->mbox_active = pmbox; 2277 break; 2278 2279 case MBX_POLL: 2280 i = 0; 2281 psli->mbox_active = NULL; 2282 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2283 /* First read mbox status word */ 2284 word0 = *((volatile uint32_t *)&phba->slim2p->mbx); 2285 word0 = le32_to_cpu(word0); 2286 } else { 2287 /* First read mbox status word */ 2288 word0 = readl(phba->MBslimaddr); 2289 } 2290 2291 /* Read the HBA Host Attention Register */ 2292 ha_copy = readl(phba->HAregaddr); 2293 2294 /* Wait for command to complete */ 2295 while (((word0 & OWN_CHIP) == OWN_CHIP) || 2296 (!(ha_copy & HA_MBATT) && 2297 (phba->hba_state > LPFC_WARM_START))) { 2298 if (i++ >= 100) { 2299 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2300 spin_unlock_irqrestore(phba->host->host_lock, 2301 drvr_flag); 2302 return (MBX_NOT_FINISHED); 2303 } 2304 2305 /* Check if we took a mbox interrupt while we were 2306 polling */ 2307 if (((word0 & OWN_CHIP) != OWN_CHIP) 2308 && (evtctr != psli->slistat.mbox_event)) 2309 break; 2310 2311 spin_unlock_irqrestore(phba->host->host_lock, 2312 drvr_flag); 2313 2314 /* Can be in interrupt context, do not sleep */ 2315 /* (or might be called with interrupts disabled) */ 2316 mdelay(i); 2317 2318 spin_lock_irqsave(phba->host->host_lock, drvr_flag); 2319 2320 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2321 /* First copy command data */ 2322 word0 = *((volatile uint32_t *) 2323 &phba->slim2p->mbx); 2324 word0 = le32_to_cpu(word0); 2325 if (mb->mbxCommand == MBX_CONFIG_PORT) { 2326 MAILBOX_t *slimmb; 2327 volatile uint32_t slimword0; 2328 /* Check real SLIM for any errors */ 2329 slimword0 = readl(phba->MBslimaddr); 2330 slimmb = (MAILBOX_t *) & slimword0; 2331 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 2332 && slimmb->mbxStatus) { 2333 psli->sli_flag &= 2334 ~LPFC_SLI2_ACTIVE; 2335 word0 = slimword0; 2336 } 2337 } 2338 } else { 2339 /* First copy command data */ 2340 word0 = readl(phba->MBslimaddr); 2341 } 2342 /* Read the HBA Host Attention Register */ 2343 ha_copy = readl(phba->HAregaddr); 2344 } 2345 2346 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2347 /* copy results back to user */ 2348 lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb, 2349 MAILBOX_CMD_SIZE); 2350 } else { 2351 /* First copy command data */ 2352 lpfc_memcpy_from_slim(mb, phba->MBslimaddr, 2353 MAILBOX_CMD_SIZE); 2354 if ((mb->mbxCommand == MBX_DUMP_MEMORY) && 2355 pmbox->context2) { 2356 lpfc_memcpy_from_slim((void *)pmbox->context2, 2357 phba->MBslimaddr + DMP_RSP_OFFSET, 2358 mb->un.varDmp.word_cnt); 2359 } 2360 } 2361 2362 writel(HA_MBATT, phba->HAregaddr); 2363 readl(phba->HAregaddr); /* flush */ 2364 2365 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2366 status = mb->mbxStatus; 2367 } 2368 2369 spin_unlock_irqrestore(phba->host->host_lock, drvr_flag); 2370 return (status); 2371 } 2372 2373 static int 2374 lpfc_sli_ringtx_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, 2375 struct lpfc_iocbq * piocb) 2376 { 2377 /* Insert the caller's iocb in the txq tail for later processing. */ 2378 list_add_tail(&piocb->list, &pring->txq); 2379 pring->txq_cnt++; 2380 return (0); 2381 } 2382 2383 static struct lpfc_iocbq * 2384 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2385 struct lpfc_iocbq ** piocb) 2386 { 2387 struct lpfc_iocbq * nextiocb; 2388 2389 nextiocb = lpfc_sli_ringtx_get(phba, pring); 2390 if (!nextiocb) { 2391 nextiocb = *piocb; 2392 *piocb = NULL; 2393 } 2394 2395 return nextiocb; 2396 } 2397 2398 int 2399 lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2400 struct lpfc_iocbq *piocb, uint32_t flag) 2401 { 2402 struct lpfc_iocbq *nextiocb; 2403 IOCB_t *iocb; 2404 2405 /* 2406 * We should never get an IOCB if we are in a < LINK_DOWN state 2407 */ 2408 if (unlikely(phba->hba_state < LPFC_LINK_DOWN)) 2409 return IOCB_ERROR; 2410 2411 /* 2412 * Check to see if we are blocking IOCB processing because of a 2413 * outstanding mbox command. 2414 */ 2415 if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX)) 2416 goto iocb_busy; 2417 2418 if (unlikely(phba->hba_state == LPFC_LINK_DOWN)) { 2419 /* 2420 * Only CREATE_XRI, CLOSE_XRI, ABORT_XRI, and QUE_RING_BUF 2421 * can be issued if the link is not up. 2422 */ 2423 switch (piocb->iocb.ulpCommand) { 2424 case CMD_QUE_RING_BUF_CN: 2425 case CMD_QUE_RING_BUF64_CN: 2426 /* 2427 * For IOCBs, like QUE_RING_BUF, that have no rsp ring 2428 * completion, iocb_cmpl MUST be 0. 2429 */ 2430 if (piocb->iocb_cmpl) 2431 piocb->iocb_cmpl = NULL; 2432 /*FALLTHROUGH*/ 2433 case CMD_CREATE_XRI_CR: 2434 break; 2435 default: 2436 goto iocb_busy; 2437 } 2438 2439 /* 2440 * For FCP commands, we must be in a state where we can process link 2441 * attention events. 2442 */ 2443 } else if (unlikely(pring->ringno == phba->sli.fcp_ring && 2444 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) 2445 goto iocb_busy; 2446 2447 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 2448 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 2449 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 2450 2451 if (iocb) 2452 lpfc_sli_update_ring(phba, pring); 2453 else 2454 lpfc_sli_update_full_ring(phba, pring); 2455 2456 if (!piocb) 2457 return IOCB_SUCCESS; 2458 2459 goto out_busy; 2460 2461 iocb_busy: 2462 pring->stats.iocb_cmd_delay++; 2463 2464 out_busy: 2465 2466 if (!(flag & SLI_IOCB_RET_IOCB)) { 2467 lpfc_sli_ringtx_put(phba, pring, piocb); 2468 return IOCB_SUCCESS; 2469 } 2470 2471 return IOCB_BUSY; 2472 } 2473 2474 static int 2475 lpfc_extra_ring_setup( struct lpfc_hba *phba) 2476 { 2477 struct lpfc_sli *psli; 2478 struct lpfc_sli_ring *pring; 2479 2480 psli = &phba->sli; 2481 2482 /* Adjust cmd/rsp ring iocb entries more evenly */ 2483 pring = &psli->ring[psli->fcp_ring]; 2484 pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 2485 pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 2486 pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 2487 pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 2488 2489 pring = &psli->ring[1]; 2490 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 2491 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 2492 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 2493 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 2494 2495 /* Setup default profile for this ring */ 2496 pring->iotag_max = 4096; 2497 pring->num_mask = 1; 2498 pring->prt[0].profile = 0; /* Mask 0 */ 2499 pring->prt[0].rctl = FC_UNSOL_DATA; 2500 pring->prt[0].type = 5; 2501 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 2502 return 0; 2503 } 2504 2505 int 2506 lpfc_sli_setup(struct lpfc_hba *phba) 2507 { 2508 int i, totiocb = 0; 2509 struct lpfc_sli *psli = &phba->sli; 2510 struct lpfc_sli_ring *pring; 2511 2512 psli->num_rings = MAX_CONFIGURED_RINGS; 2513 psli->sli_flag = 0; 2514 psli->fcp_ring = LPFC_FCP_RING; 2515 psli->next_ring = LPFC_FCP_NEXT_RING; 2516 psli->ip_ring = LPFC_IP_RING; 2517 2518 psli->iocbq_lookup = NULL; 2519 psli->iocbq_lookup_len = 0; 2520 psli->last_iotag = 0; 2521 2522 for (i = 0; i < psli->num_rings; i++) { 2523 pring = &psli->ring[i]; 2524 switch (i) { 2525 case LPFC_FCP_RING: /* ring 0 - FCP */ 2526 /* numCiocb and numRiocb are used in config_port */ 2527 pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 2528 pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 2529 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 2530 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 2531 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 2532 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 2533 pring->iotag_ctr = 0; 2534 pring->iotag_max = 2535 (phba->cfg_hba_queue_depth * 2); 2536 pring->fast_iotag = pring->iotag_max; 2537 pring->num_mask = 0; 2538 break; 2539 case LPFC_IP_RING: /* ring 1 - IP */ 2540 /* numCiocb and numRiocb are used in config_port */ 2541 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 2542 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 2543 pring->num_mask = 0; 2544 break; 2545 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 2546 /* numCiocb and numRiocb are used in config_port */ 2547 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 2548 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 2549 pring->fast_iotag = 0; 2550 pring->iotag_ctr = 0; 2551 pring->iotag_max = 4096; 2552 pring->num_mask = 4; 2553 pring->prt[0].profile = 0; /* Mask 0 */ 2554 pring->prt[0].rctl = FC_ELS_REQ; 2555 pring->prt[0].type = FC_ELS_DATA; 2556 pring->prt[0].lpfc_sli_rcv_unsol_event = 2557 lpfc_els_unsol_event; 2558 pring->prt[1].profile = 0; /* Mask 1 */ 2559 pring->prt[1].rctl = FC_ELS_RSP; 2560 pring->prt[1].type = FC_ELS_DATA; 2561 pring->prt[1].lpfc_sli_rcv_unsol_event = 2562 lpfc_els_unsol_event; 2563 pring->prt[2].profile = 0; /* Mask 2 */ 2564 /* NameServer Inquiry */ 2565 pring->prt[2].rctl = FC_UNSOL_CTL; 2566 /* NameServer */ 2567 pring->prt[2].type = FC_COMMON_TRANSPORT_ULP; 2568 pring->prt[2].lpfc_sli_rcv_unsol_event = 2569 lpfc_ct_unsol_event; 2570 pring->prt[3].profile = 0; /* Mask 3 */ 2571 /* NameServer response */ 2572 pring->prt[3].rctl = FC_SOL_CTL; 2573 /* NameServer */ 2574 pring->prt[3].type = FC_COMMON_TRANSPORT_ULP; 2575 pring->prt[3].lpfc_sli_rcv_unsol_event = 2576 lpfc_ct_unsol_event; 2577 break; 2578 } 2579 totiocb += (pring->numCiocb + pring->numRiocb); 2580 } 2581 if (totiocb > MAX_SLI2_IOCB) { 2582 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 2583 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2584 "%d:0462 Too many cmd / rsp ring entries in " 2585 "SLI2 SLIM Data: x%x x%x\n", 2586 phba->brd_no, totiocb, MAX_SLI2_IOCB); 2587 } 2588 if (phba->cfg_multi_ring_support == 2) 2589 lpfc_extra_ring_setup(phba); 2590 2591 return 0; 2592 } 2593 2594 int 2595 lpfc_sli_queue_setup(struct lpfc_hba * phba) 2596 { 2597 struct lpfc_sli *psli; 2598 struct lpfc_sli_ring *pring; 2599 int i; 2600 2601 psli = &phba->sli; 2602 spin_lock_irq(phba->host->host_lock); 2603 INIT_LIST_HEAD(&psli->mboxq); 2604 /* Initialize list headers for txq and txcmplq as double linked lists */ 2605 for (i = 0; i < psli->num_rings; i++) { 2606 pring = &psli->ring[i]; 2607 pring->ringno = i; 2608 pring->next_cmdidx = 0; 2609 pring->local_getidx = 0; 2610 pring->cmdidx = 0; 2611 INIT_LIST_HEAD(&pring->txq); 2612 INIT_LIST_HEAD(&pring->txcmplq); 2613 INIT_LIST_HEAD(&pring->iocb_continueq); 2614 INIT_LIST_HEAD(&pring->postbufq); 2615 } 2616 spin_unlock_irq(phba->host->host_lock); 2617 return (1); 2618 } 2619 2620 int 2621 lpfc_sli_hba_down(struct lpfc_hba * phba) 2622 { 2623 struct lpfc_sli *psli; 2624 struct lpfc_sli_ring *pring; 2625 LPFC_MBOXQ_t *pmb; 2626 struct lpfc_iocbq *iocb, *next_iocb; 2627 IOCB_t *icmd = NULL; 2628 int i; 2629 unsigned long flags = 0; 2630 2631 psli = &phba->sli; 2632 lpfc_hba_down_prep(phba); 2633 2634 spin_lock_irqsave(phba->host->host_lock, flags); 2635 2636 for (i = 0; i < psli->num_rings; i++) { 2637 pring = &psli->ring[i]; 2638 pring->flag |= LPFC_DEFERRED_RING_EVENT; 2639 2640 /* 2641 * Error everything on the txq since these iocbs have not been 2642 * given to the FW yet. 2643 */ 2644 pring->txq_cnt = 0; 2645 2646 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 2647 list_del_init(&iocb->list); 2648 if (iocb->iocb_cmpl) { 2649 icmd = &iocb->iocb; 2650 icmd->ulpStatus = IOSTAT_LOCAL_REJECT; 2651 icmd->un.ulpWord[4] = IOERR_SLI_DOWN; 2652 spin_unlock_irqrestore(phba->host->host_lock, 2653 flags); 2654 (iocb->iocb_cmpl) (phba, iocb, iocb); 2655 spin_lock_irqsave(phba->host->host_lock, flags); 2656 } else 2657 lpfc_sli_release_iocbq(phba, iocb); 2658 } 2659 2660 INIT_LIST_HEAD(&(pring->txq)); 2661 2662 kfree(pring->fast_lookup); 2663 pring->fast_lookup = NULL; 2664 } 2665 2666 spin_unlock_irqrestore(phba->host->host_lock, flags); 2667 2668 /* Return any active mbox cmds */ 2669 del_timer_sync(&psli->mbox_tmo); 2670 spin_lock_irqsave(phba->host->host_lock, flags); 2671 phba->work_hba_events &= ~WORKER_MBOX_TMO; 2672 if (psli->mbox_active) { 2673 pmb = psli->mbox_active; 2674 pmb->mb.mbxStatus = MBX_NOT_FINISHED; 2675 if (pmb->mbox_cmpl) { 2676 spin_unlock_irqrestore(phba->host->host_lock, flags); 2677 pmb->mbox_cmpl(phba,pmb); 2678 spin_lock_irqsave(phba->host->host_lock, flags); 2679 } 2680 } 2681 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2682 psli->mbox_active = NULL; 2683 2684 /* Return any pending mbox cmds */ 2685 while ((pmb = lpfc_mbox_get(phba)) != NULL) { 2686 pmb->mb.mbxStatus = MBX_NOT_FINISHED; 2687 if (pmb->mbox_cmpl) { 2688 spin_unlock_irqrestore(phba->host->host_lock, flags); 2689 pmb->mbox_cmpl(phba,pmb); 2690 spin_lock_irqsave(phba->host->host_lock, flags); 2691 } 2692 } 2693 2694 INIT_LIST_HEAD(&psli->mboxq); 2695 2696 spin_unlock_irqrestore(phba->host->host_lock, flags); 2697 2698 return 1; 2699 } 2700 2701 void 2702 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 2703 { 2704 uint32_t *src = srcp; 2705 uint32_t *dest = destp; 2706 uint32_t ldata; 2707 int i; 2708 2709 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 2710 ldata = *src; 2711 ldata = le32_to_cpu(ldata); 2712 *dest = ldata; 2713 src++; 2714 dest++; 2715 } 2716 } 2717 2718 int 2719 lpfc_sli_ringpostbuf_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, 2720 struct lpfc_dmabuf * mp) 2721 { 2722 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 2723 later */ 2724 list_add_tail(&mp->list, &pring->postbufq); 2725 2726 pring->postbufq_cnt++; 2727 return 0; 2728 } 2729 2730 2731 struct lpfc_dmabuf * 2732 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2733 dma_addr_t phys) 2734 { 2735 struct lpfc_dmabuf *mp, *next_mp; 2736 struct list_head *slp = &pring->postbufq; 2737 2738 /* Search postbufq, from the begining, looking for a match on phys */ 2739 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 2740 if (mp->phys == phys) { 2741 list_del_init(&mp->list); 2742 pring->postbufq_cnt--; 2743 return mp; 2744 } 2745 } 2746 2747 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2748 "%d:0410 Cannot find virtual addr for mapped buf on " 2749 "ring %d Data x%llx x%p x%p x%x\n", 2750 phba->brd_no, pring->ringno, (unsigned long long)phys, 2751 slp->next, slp->prev, pring->postbufq_cnt); 2752 return NULL; 2753 } 2754 2755 static void 2756 lpfc_sli_abort_elsreq_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 2757 struct lpfc_iocbq * rspiocb) 2758 { 2759 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 2760 /* Free the resources associated with the ELS_REQUEST64 IOCB the driver 2761 * just aborted. 2762 * In this case, context2 = cmd, context2->next = rsp, context3 = bpl 2763 */ 2764 if (cmdiocb->context2) { 2765 buf_ptr1 = (struct lpfc_dmabuf *) cmdiocb->context2; 2766 2767 /* Free the response IOCB before completing the abort 2768 command. */ 2769 buf_ptr = NULL; 2770 list_remove_head((&buf_ptr1->list), buf_ptr, 2771 struct lpfc_dmabuf, list); 2772 if (buf_ptr) { 2773 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 2774 kfree(buf_ptr); 2775 } 2776 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); 2777 kfree(buf_ptr1); 2778 } 2779 2780 if (cmdiocb->context3) { 2781 buf_ptr = (struct lpfc_dmabuf *) cmdiocb->context3; 2782 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 2783 kfree(buf_ptr); 2784 } 2785 2786 lpfc_sli_release_iocbq(phba, cmdiocb); 2787 return; 2788 } 2789 2790 int 2791 lpfc_sli_issue_abort_iotag32(struct lpfc_hba * phba, 2792 struct lpfc_sli_ring * pring, 2793 struct lpfc_iocbq * cmdiocb) 2794 { 2795 struct lpfc_iocbq *abtsiocbp; 2796 IOCB_t *icmd = NULL; 2797 IOCB_t *iabt = NULL; 2798 2799 /* issue ABTS for this IOCB based on iotag */ 2800 abtsiocbp = lpfc_sli_get_iocbq(phba); 2801 if (abtsiocbp == NULL) 2802 return 0; 2803 2804 iabt = &abtsiocbp->iocb; 2805 icmd = &cmdiocb->iocb; 2806 switch (icmd->ulpCommand) { 2807 case CMD_ELS_REQUEST64_CR: 2808 /* Even though we abort the ELS command, the firmware may access 2809 * the BPL or other resources before it processes our 2810 * ABORT_MXRI64. Thus we must delay reusing the cmdiocb 2811 * resources till the actual abort request completes. 2812 */ 2813 abtsiocbp->context1 = (void *)((unsigned long)icmd->ulpCommand); 2814 abtsiocbp->context2 = cmdiocb->context2; 2815 abtsiocbp->context3 = cmdiocb->context3; 2816 cmdiocb->context2 = NULL; 2817 cmdiocb->context3 = NULL; 2818 abtsiocbp->iocb_cmpl = lpfc_sli_abort_elsreq_cmpl; 2819 break; 2820 default: 2821 lpfc_sli_release_iocbq(phba, abtsiocbp); 2822 return 0; 2823 } 2824 2825 iabt->un.amxri.abortType = ABORT_TYPE_ABTS; 2826 iabt->un.amxri.iotag32 = icmd->un.elsreq64.bdl.ulpIoTag32; 2827 2828 iabt->ulpLe = 1; 2829 iabt->ulpClass = CLASS3; 2830 iabt->ulpCommand = CMD_ABORT_MXRI64_CN; 2831 2832 if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) == IOCB_ERROR) { 2833 lpfc_sli_release_iocbq(phba, abtsiocbp); 2834 return 0; 2835 } 2836 2837 return 1; 2838 } 2839 2840 static int 2841 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, uint16_t tgt_id, 2842 uint64_t lun_id, uint32_t ctx, 2843 lpfc_ctx_cmd ctx_cmd) 2844 { 2845 struct lpfc_scsi_buf *lpfc_cmd; 2846 struct scsi_cmnd *cmnd; 2847 int rc = 1; 2848 2849 if (!(iocbq->iocb_flag & LPFC_IO_FCP)) 2850 return rc; 2851 2852 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 2853 cmnd = lpfc_cmd->pCmd; 2854 2855 if (cmnd == NULL) 2856 return rc; 2857 2858 switch (ctx_cmd) { 2859 case LPFC_CTX_LUN: 2860 if ((cmnd->device->id == tgt_id) && 2861 (cmnd->device->lun == lun_id)) 2862 rc = 0; 2863 break; 2864 case LPFC_CTX_TGT: 2865 if (cmnd->device->id == tgt_id) 2866 rc = 0; 2867 break; 2868 case LPFC_CTX_CTX: 2869 if (iocbq->iocb.ulpContext == ctx) 2870 rc = 0; 2871 break; 2872 case LPFC_CTX_HOST: 2873 rc = 0; 2874 break; 2875 default: 2876 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 2877 __FUNCTION__, ctx_cmd); 2878 break; 2879 } 2880 2881 return rc; 2882 } 2883 2884 int 2885 lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2886 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd) 2887 { 2888 struct lpfc_iocbq *iocbq; 2889 int sum, i; 2890 2891 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { 2892 iocbq = phba->sli.iocbq_lookup[i]; 2893 2894 if (lpfc_sli_validate_fcp_iocb (iocbq, tgt_id, lun_id, 2895 0, ctx_cmd) == 0) 2896 sum++; 2897 } 2898 2899 return sum; 2900 } 2901 2902 void 2903 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 2904 struct lpfc_iocbq * rspiocb) 2905 { 2906 spin_lock_irq(phba->host->host_lock); 2907 lpfc_sli_release_iocbq(phba, cmdiocb); 2908 spin_unlock_irq(phba->host->host_lock); 2909 return; 2910 } 2911 2912 int 2913 lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2914 uint16_t tgt_id, uint64_t lun_id, uint32_t ctx, 2915 lpfc_ctx_cmd abort_cmd) 2916 { 2917 struct lpfc_iocbq *iocbq; 2918 struct lpfc_iocbq *abtsiocb; 2919 IOCB_t *cmd = NULL; 2920 int errcnt = 0, ret_val = 0; 2921 int i; 2922 2923 for (i = 1; i <= phba->sli.last_iotag; i++) { 2924 iocbq = phba->sli.iocbq_lookup[i]; 2925 2926 if (lpfc_sli_validate_fcp_iocb (iocbq, tgt_id, lun_id, 2927 0, abort_cmd) != 0) 2928 continue; 2929 2930 /* issue ABTS for this IOCB based on iotag */ 2931 abtsiocb = lpfc_sli_get_iocbq(phba); 2932 if (abtsiocb == NULL) { 2933 errcnt++; 2934 continue; 2935 } 2936 2937 cmd = &iocbq->iocb; 2938 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 2939 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 2940 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 2941 abtsiocb->iocb.ulpLe = 1; 2942 abtsiocb->iocb.ulpClass = cmd->ulpClass; 2943 2944 if (phba->hba_state >= LPFC_LINK_UP) 2945 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 2946 else 2947 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 2948 2949 /* Setup callback routine and issue the command. */ 2950 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 2951 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0); 2952 if (ret_val == IOCB_ERROR) { 2953 lpfc_sli_release_iocbq(phba, abtsiocb); 2954 errcnt++; 2955 continue; 2956 } 2957 } 2958 2959 return errcnt; 2960 } 2961 2962 static void 2963 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, 2964 struct lpfc_iocbq *cmdiocbq, 2965 struct lpfc_iocbq *rspiocbq) 2966 { 2967 wait_queue_head_t *pdone_q; 2968 unsigned long iflags; 2969 2970 spin_lock_irqsave(phba->host->host_lock, iflags); 2971 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 2972 if (cmdiocbq->context2 && rspiocbq) 2973 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 2974 &rspiocbq->iocb, sizeof(IOCB_t)); 2975 2976 pdone_q = cmdiocbq->context_un.wait_queue; 2977 spin_unlock_irqrestore(phba->host->host_lock, iflags); 2978 if (pdone_q) 2979 wake_up(pdone_q); 2980 return; 2981 } 2982 2983 /* 2984 * Issue the caller's iocb and wait for its completion, but no longer than the 2985 * caller's timeout. Note that iocb_flags is cleared before the 2986 * lpfc_sli_issue_call since the wake routine sets a unique value and by 2987 * definition this is a wait function. 2988 */ 2989 int 2990 lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba, 2991 struct lpfc_sli_ring * pring, 2992 struct lpfc_iocbq * piocb, 2993 struct lpfc_iocbq * prspiocbq, 2994 uint32_t timeout) 2995 { 2996 DECLARE_WAIT_QUEUE_HEAD(done_q); 2997 long timeleft, timeout_req = 0; 2998 int retval = IOCB_SUCCESS; 2999 uint32_t creg_val; 3000 3001 /* 3002 * If the caller has provided a response iocbq buffer, then context2 3003 * is NULL or its an error. 3004 */ 3005 if (prspiocbq) { 3006 if (piocb->context2) 3007 return IOCB_ERROR; 3008 piocb->context2 = prspiocbq; 3009 } 3010 3011 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; 3012 piocb->context_un.wait_queue = &done_q; 3013 piocb->iocb_flag &= ~LPFC_IO_WAKE; 3014 3015 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 3016 creg_val = readl(phba->HCregaddr); 3017 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 3018 writel(creg_val, phba->HCregaddr); 3019 readl(phba->HCregaddr); /* flush */ 3020 } 3021 3022 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0); 3023 if (retval == IOCB_SUCCESS) { 3024 timeout_req = timeout * HZ; 3025 spin_unlock_irq(phba->host->host_lock); 3026 timeleft = wait_event_timeout(done_q, 3027 piocb->iocb_flag & LPFC_IO_WAKE, 3028 timeout_req); 3029 spin_lock_irq(phba->host->host_lock); 3030 3031 if (timeleft == 0) { 3032 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3033 "%d:0329 IOCB wait timeout error - no " 3034 "wake response Data x%x\n", 3035 phba->brd_no, timeout); 3036 retval = IOCB_TIMEDOUT; 3037 } else if (!(piocb->iocb_flag & LPFC_IO_WAKE)) { 3038 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3039 "%d:0330 IOCB wake NOT set, " 3040 "Data x%x x%lx\n", phba->brd_no, 3041 timeout, (timeleft / jiffies)); 3042 retval = IOCB_TIMEDOUT; 3043 } else { 3044 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3045 "%d:0331 IOCB wake signaled\n", 3046 phba->brd_no); 3047 } 3048 } else { 3049 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3050 "%d:0332 IOCB wait issue failed, Data x%x\n", 3051 phba->brd_no, retval); 3052 retval = IOCB_ERROR; 3053 } 3054 3055 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 3056 creg_val = readl(phba->HCregaddr); 3057 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 3058 writel(creg_val, phba->HCregaddr); 3059 readl(phba->HCregaddr); /* flush */ 3060 } 3061 3062 if (prspiocbq) 3063 piocb->context2 = NULL; 3064 3065 piocb->context_un.wait_queue = NULL; 3066 piocb->iocb_cmpl = NULL; 3067 return retval; 3068 } 3069 3070 int 3071 lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq, 3072 uint32_t timeout) 3073 { 3074 DECLARE_WAIT_QUEUE_HEAD(done_q); 3075 DECLARE_WAITQUEUE(wq_entry, current); 3076 uint32_t timeleft = 0; 3077 int retval; 3078 3079 /* The caller must leave context1 empty. */ 3080 if (pmboxq->context1 != 0) { 3081 return (MBX_NOT_FINISHED); 3082 } 3083 3084 /* setup wake call as IOCB callback */ 3085 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 3086 /* setup context field to pass wait_queue pointer to wake function */ 3087 pmboxq->context1 = &done_q; 3088 3089 /* start to sleep before we wait, to avoid races */ 3090 set_current_state(TASK_INTERRUPTIBLE); 3091 add_wait_queue(&done_q, &wq_entry); 3092 3093 /* now issue the command */ 3094 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 3095 3096 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 3097 timeleft = schedule_timeout(timeout * HZ); 3098 pmboxq->context1 = NULL; 3099 /* if schedule_timeout returns 0, we timed out and were not 3100 woken up */ 3101 if ((timeleft == 0) || signal_pending(current)) 3102 retval = MBX_TIMEOUT; 3103 else 3104 retval = MBX_SUCCESS; 3105 } 3106 3107 3108 set_current_state(TASK_RUNNING); 3109 remove_wait_queue(&done_q, &wq_entry); 3110 return retval; 3111 } 3112 3113 irqreturn_t 3114 lpfc_intr_handler(int irq, void *dev_id, struct pt_regs * regs) 3115 { 3116 struct lpfc_hba *phba; 3117 uint32_t ha_copy; 3118 uint32_t work_ha_copy; 3119 unsigned long status; 3120 int i; 3121 uint32_t control; 3122 3123 /* 3124 * Get the driver's phba structure from the dev_id and 3125 * assume the HBA is not interrupting. 3126 */ 3127 phba = (struct lpfc_hba *) dev_id; 3128 3129 if (unlikely(!phba)) 3130 return IRQ_NONE; 3131 3132 phba->sli.slistat.sli_intr++; 3133 3134 /* 3135 * Call the HBA to see if it is interrupting. If not, don't claim 3136 * the interrupt 3137 */ 3138 3139 /* Ignore all interrupts during initialization. */ 3140 if (unlikely(phba->hba_state < LPFC_LINK_DOWN)) 3141 return IRQ_NONE; 3142 3143 /* 3144 * Read host attention register to determine interrupt source 3145 * Clear Attention Sources, except Error Attention (to 3146 * preserve status) and Link Attention 3147 */ 3148 spin_lock(phba->host->host_lock); 3149 ha_copy = readl(phba->HAregaddr); 3150 writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 3151 readl(phba->HAregaddr); /* flush */ 3152 spin_unlock(phba->host->host_lock); 3153 3154 if (unlikely(!ha_copy)) 3155 return IRQ_NONE; 3156 3157 work_ha_copy = ha_copy & phba->work_ha_mask; 3158 3159 if (unlikely(work_ha_copy)) { 3160 if (work_ha_copy & HA_LATT) { 3161 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 3162 /* 3163 * Turn off Link Attention interrupts 3164 * until CLEAR_LA done 3165 */ 3166 spin_lock(phba->host->host_lock); 3167 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 3168 control = readl(phba->HCregaddr); 3169 control &= ~HC_LAINT_ENA; 3170 writel(control, phba->HCregaddr); 3171 readl(phba->HCregaddr); /* flush */ 3172 spin_unlock(phba->host->host_lock); 3173 } 3174 else 3175 work_ha_copy &= ~HA_LATT; 3176 } 3177 3178 if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) { 3179 for (i = 0; i < phba->sli.num_rings; i++) { 3180 if (work_ha_copy & (HA_RXATT << (4*i))) { 3181 /* 3182 * Turn off Slow Rings interrupts 3183 */ 3184 spin_lock(phba->host->host_lock); 3185 control = readl(phba->HCregaddr); 3186 control &= ~(HC_R0INT_ENA << i); 3187 writel(control, phba->HCregaddr); 3188 readl(phba->HCregaddr); /* flush */ 3189 spin_unlock(phba->host->host_lock); 3190 } 3191 } 3192 } 3193 3194 if (work_ha_copy & HA_ERATT) { 3195 phba->hba_state = LPFC_HBA_ERROR; 3196 /* 3197 * There was a link/board error. Read the 3198 * status register to retrieve the error event 3199 * and process it. 3200 */ 3201 phba->sli.slistat.err_attn_event++; 3202 /* Save status info */ 3203 phba->work_hs = readl(phba->HSregaddr); 3204 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 3205 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 3206 3207 /* Clear Chip error bit */ 3208 writel(HA_ERATT, phba->HAregaddr); 3209 readl(phba->HAregaddr); /* flush */ 3210 phba->stopped = 1; 3211 } 3212 3213 spin_lock(phba->host->host_lock); 3214 phba->work_ha |= work_ha_copy; 3215 if (phba->work_wait) 3216 wake_up(phba->work_wait); 3217 spin_unlock(phba->host->host_lock); 3218 } 3219 3220 ha_copy &= ~(phba->work_ha_mask); 3221 3222 /* 3223 * Process all events on FCP ring. Take the optimized path for 3224 * FCP IO. Any other IO is slow path and is handled by 3225 * the worker thread. 3226 */ 3227 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 3228 status >>= (4*LPFC_FCP_RING); 3229 if (status & HA_RXATT) 3230 lpfc_sli_handle_fast_ring_event(phba, 3231 &phba->sli.ring[LPFC_FCP_RING], 3232 status); 3233 return IRQ_HANDLED; 3234 3235 } /* lpfc_intr_handler */ 3236