1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2006 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/pci.h> 24 #include <linux/interrupt.h> 25 #include <linux/delay.h> 26 27 #include <scsi/scsi.h> 28 #include <scsi/scsi_cmnd.h> 29 #include <scsi/scsi_device.h> 30 #include <scsi/scsi_host.h> 31 #include <scsi/scsi_transport_fc.h> 32 33 #include "lpfc_hw.h" 34 #include "lpfc_sli.h" 35 #include "lpfc_disc.h" 36 #include "lpfc_scsi.h" 37 #include "lpfc.h" 38 #include "lpfc_crtn.h" 39 #include "lpfc_logmsg.h" 40 #include "lpfc_compat.h" 41 42 /* 43 * Define macro to log: Mailbox command x%x cannot issue Data 44 * This allows multiple uses of lpfc_msgBlk0311 45 * w/o perturbing log msg utility. 46 */ 47 #define LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) \ 48 lpfc_printf_log(phba, \ 49 KERN_INFO, \ 50 LOG_MBOX | LOG_SLI, \ 51 "%d:0311 Mailbox command x%x cannot issue " \ 52 "Data: x%x x%x x%x\n", \ 53 phba->brd_no, \ 54 mb->mbxCommand, \ 55 phba->hba_state, \ 56 psli->sli_flag, \ 57 flag); 58 59 60 /* There are only four IOCB completion types. */ 61 typedef enum _lpfc_iocb_type { 62 LPFC_UNKNOWN_IOCB, 63 LPFC_UNSOL_IOCB, 64 LPFC_SOL_IOCB, 65 LPFC_ABORT_IOCB 66 } lpfc_iocb_type; 67 68 struct lpfc_iocbq * 69 lpfc_sli_get_iocbq(struct lpfc_hba * phba) 70 { 71 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 72 struct lpfc_iocbq * iocbq = NULL; 73 74 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 75 return iocbq; 76 } 77 78 void 79 lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq) 80 { 81 size_t start_clean = (size_t)(&((struct lpfc_iocbq *)NULL)->iocb); 82 83 /* 84 * Clean all volatile data fields, preserve iotag and node struct. 85 */ 86 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 87 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 88 } 89 90 /* 91 * Translate the iocb command to an iocb command type used to decide the final 92 * disposition of each completed IOCB. 93 */ 94 static lpfc_iocb_type 95 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 96 { 97 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; 98 99 if (iocb_cmnd > CMD_MAX_IOCB_CMD) 100 return 0; 101 102 switch (iocb_cmnd) { 103 case CMD_XMIT_SEQUENCE_CR: 104 case CMD_XMIT_SEQUENCE_CX: 105 case CMD_XMIT_BCAST_CN: 106 case CMD_XMIT_BCAST_CX: 107 case CMD_ELS_REQUEST_CR: 108 case CMD_ELS_REQUEST_CX: 109 case CMD_CREATE_XRI_CR: 110 case CMD_CREATE_XRI_CX: 111 case CMD_GET_RPI_CN: 112 case CMD_XMIT_ELS_RSP_CX: 113 case CMD_GET_RPI_CR: 114 case CMD_FCP_IWRITE_CR: 115 case CMD_FCP_IWRITE_CX: 116 case CMD_FCP_IREAD_CR: 117 case CMD_FCP_IREAD_CX: 118 case CMD_FCP_ICMND_CR: 119 case CMD_FCP_ICMND_CX: 120 case CMD_ADAPTER_MSG: 121 case CMD_ADAPTER_DUMP: 122 case CMD_XMIT_SEQUENCE64_CR: 123 case CMD_XMIT_SEQUENCE64_CX: 124 case CMD_XMIT_BCAST64_CN: 125 case CMD_XMIT_BCAST64_CX: 126 case CMD_ELS_REQUEST64_CR: 127 case CMD_ELS_REQUEST64_CX: 128 case CMD_FCP_IWRITE64_CR: 129 case CMD_FCP_IWRITE64_CX: 130 case CMD_FCP_IREAD64_CR: 131 case CMD_FCP_IREAD64_CX: 132 case CMD_FCP_ICMND64_CR: 133 case CMD_FCP_ICMND64_CX: 134 case CMD_GEN_REQUEST64_CR: 135 case CMD_GEN_REQUEST64_CX: 136 case CMD_XMIT_ELS_RSP64_CX: 137 type = LPFC_SOL_IOCB; 138 break; 139 case CMD_ABORT_XRI_CN: 140 case CMD_ABORT_XRI_CX: 141 case CMD_CLOSE_XRI_CN: 142 case CMD_CLOSE_XRI_CX: 143 case CMD_XRI_ABORTED_CX: 144 case CMD_ABORT_MXRI64_CN: 145 type = LPFC_ABORT_IOCB; 146 break; 147 case CMD_RCV_SEQUENCE_CX: 148 case CMD_RCV_ELS_REQ_CX: 149 case CMD_RCV_SEQUENCE64_CX: 150 case CMD_RCV_ELS_REQ64_CX: 151 type = LPFC_UNSOL_IOCB; 152 break; 153 default: 154 type = LPFC_UNKNOWN_IOCB; 155 break; 156 } 157 158 return type; 159 } 160 161 static int 162 lpfc_sli_ring_map(struct lpfc_hba * phba, LPFC_MBOXQ_t *pmb) 163 { 164 struct lpfc_sli *psli = &phba->sli; 165 MAILBOX_t *pmbox = &pmb->mb; 166 int i, rc; 167 168 for (i = 0; i < psli->num_rings; i++) { 169 phba->hba_state = LPFC_INIT_MBX_CMDS; 170 lpfc_config_ring(phba, i, pmb); 171 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 172 if (rc != MBX_SUCCESS) { 173 lpfc_printf_log(phba, 174 KERN_ERR, 175 LOG_INIT, 176 "%d:0446 Adapter failed to init, " 177 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 178 "ring %d\n", 179 phba->brd_no, 180 pmbox->mbxCommand, 181 pmbox->mbxStatus, 182 i); 183 phba->hba_state = LPFC_HBA_ERROR; 184 return -ENXIO; 185 } 186 } 187 return 0; 188 } 189 190 static int 191 lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba, 192 struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb) 193 { 194 list_add_tail(&piocb->list, &pring->txcmplq); 195 pring->txcmplq_cnt++; 196 if (unlikely(pring->ringno == LPFC_ELS_RING)) 197 mod_timer(&phba->els_tmofunc, 198 jiffies + HZ * (phba->fc_ratov << 1)); 199 200 return (0); 201 } 202 203 static struct lpfc_iocbq * 204 lpfc_sli_ringtx_get(struct lpfc_hba * phba, struct lpfc_sli_ring * pring) 205 { 206 struct list_head *dlp; 207 struct lpfc_iocbq *cmd_iocb; 208 209 dlp = &pring->txq; 210 cmd_iocb = NULL; 211 list_remove_head((&pring->txq), cmd_iocb, 212 struct lpfc_iocbq, 213 list); 214 if (cmd_iocb) { 215 /* If the first ptr is not equal to the list header, 216 * deque the IOCBQ_t and return it. 217 */ 218 pring->txq_cnt--; 219 } 220 return (cmd_iocb); 221 } 222 223 static IOCB_t * 224 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 225 { 226 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; 227 uint32_t max_cmd_idx = pring->numCiocb; 228 IOCB_t *iocb = NULL; 229 230 if ((pring->next_cmdidx == pring->cmdidx) && 231 (++pring->next_cmdidx >= max_cmd_idx)) 232 pring->next_cmdidx = 0; 233 234 if (unlikely(pring->local_getidx == pring->next_cmdidx)) { 235 236 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 237 238 if (unlikely(pring->local_getidx >= max_cmd_idx)) { 239 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 240 "%d:0315 Ring %d issue: portCmdGet %d " 241 "is bigger then cmd ring %d\n", 242 phba->brd_no, pring->ringno, 243 pring->local_getidx, max_cmd_idx); 244 245 phba->hba_state = LPFC_HBA_ERROR; 246 /* 247 * All error attention handlers are posted to 248 * worker thread 249 */ 250 phba->work_ha |= HA_ERATT; 251 phba->work_hs = HS_FFER3; 252 if (phba->work_wait) 253 wake_up(phba->work_wait); 254 255 return NULL; 256 } 257 258 if (pring->local_getidx == pring->next_cmdidx) 259 return NULL; 260 } 261 262 iocb = IOCB_ENTRY(pring->cmdringaddr, pring->cmdidx); 263 264 return iocb; 265 } 266 267 uint16_t 268 lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq) 269 { 270 struct lpfc_iocbq ** new_arr; 271 struct lpfc_iocbq ** old_arr; 272 size_t new_len; 273 struct lpfc_sli *psli = &phba->sli; 274 uint16_t iotag; 275 276 spin_lock_irq(phba->host->host_lock); 277 iotag = psli->last_iotag; 278 if(++iotag < psli->iocbq_lookup_len) { 279 psli->last_iotag = iotag; 280 psli->iocbq_lookup[iotag] = iocbq; 281 spin_unlock_irq(phba->host->host_lock); 282 iocbq->iotag = iotag; 283 return iotag; 284 } 285 else if (psli->iocbq_lookup_len < (0xffff 286 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 287 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 288 spin_unlock_irq(phba->host->host_lock); 289 new_arr = kmalloc(new_len * sizeof (struct lpfc_iocbq *), 290 GFP_KERNEL); 291 if (new_arr) { 292 memset((char *)new_arr, 0, 293 new_len * sizeof (struct lpfc_iocbq *)); 294 spin_lock_irq(phba->host->host_lock); 295 old_arr = psli->iocbq_lookup; 296 if (new_len <= psli->iocbq_lookup_len) { 297 /* highly unprobable case */ 298 kfree(new_arr); 299 iotag = psli->last_iotag; 300 if(++iotag < psli->iocbq_lookup_len) { 301 psli->last_iotag = iotag; 302 psli->iocbq_lookup[iotag] = iocbq; 303 spin_unlock_irq(phba->host->host_lock); 304 iocbq->iotag = iotag; 305 return iotag; 306 } 307 spin_unlock_irq(phba->host->host_lock); 308 return 0; 309 } 310 if (psli->iocbq_lookup) 311 memcpy(new_arr, old_arr, 312 ((psli->last_iotag + 1) * 313 sizeof (struct lpfc_iocbq *))); 314 psli->iocbq_lookup = new_arr; 315 psli->iocbq_lookup_len = new_len; 316 psli->last_iotag = iotag; 317 psli->iocbq_lookup[iotag] = iocbq; 318 spin_unlock_irq(phba->host->host_lock); 319 iocbq->iotag = iotag; 320 kfree(old_arr); 321 return iotag; 322 } 323 } else 324 spin_unlock_irq(phba->host->host_lock); 325 326 lpfc_printf_log(phba, KERN_ERR,LOG_SLI, 327 "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n", 328 phba->brd_no, psli->last_iotag); 329 330 return 0; 331 } 332 333 static void 334 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 335 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 336 { 337 /* 338 * Set up an iotag 339 */ 340 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; 341 342 /* 343 * Issue iocb command to adapter 344 */ 345 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, sizeof (IOCB_t)); 346 wmb(); 347 pring->stats.iocb_cmd++; 348 349 /* 350 * If there is no completion routine to call, we can release the 351 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, 352 * that have no rsp ring completion, iocb_cmpl MUST be NULL. 353 */ 354 if (nextiocb->iocb_cmpl) 355 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 356 else 357 lpfc_sli_release_iocbq(phba, nextiocb); 358 359 /* 360 * Let the HBA know what IOCB slot will be the next one the 361 * driver will put a command into. 362 */ 363 pring->cmdidx = pring->next_cmdidx; 364 writel(pring->cmdidx, phba->MBslimaddr 365 + (SLIMOFF + (pring->ringno * 2)) * 4); 366 } 367 368 static void 369 lpfc_sli_update_full_ring(struct lpfc_hba * phba, 370 struct lpfc_sli_ring *pring) 371 { 372 int ringno = pring->ringno; 373 374 pring->flag |= LPFC_CALL_RING_AVAILABLE; 375 376 wmb(); 377 378 /* 379 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. 380 * The HBA will tell us when an IOCB entry is available. 381 */ 382 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); 383 readl(phba->CAregaddr); /* flush */ 384 385 pring->stats.iocb_cmd_full++; 386 } 387 388 static void 389 lpfc_sli_update_ring(struct lpfc_hba * phba, 390 struct lpfc_sli_ring *pring) 391 { 392 int ringno = pring->ringno; 393 394 /* 395 * Tell the HBA that there is work to do in this ring. 396 */ 397 wmb(); 398 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 399 readl(phba->CAregaddr); /* flush */ 400 } 401 402 static void 403 lpfc_sli_resume_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring) 404 { 405 IOCB_t *iocb; 406 struct lpfc_iocbq *nextiocb; 407 408 /* 409 * Check to see if: 410 * (a) there is anything on the txq to send 411 * (b) link is up 412 * (c) link attention events can be processed (fcp ring only) 413 * (d) IOCB processing is not blocked by the outstanding mbox command. 414 */ 415 if (pring->txq_cnt && 416 (phba->hba_state > LPFC_LINK_DOWN) && 417 (pring->ringno != phba->sli.fcp_ring || 418 phba->sli.sli_flag & LPFC_PROCESS_LA) && 419 !(pring->flag & LPFC_STOP_IOCB_MBX)) { 420 421 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 422 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 423 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 424 425 if (iocb) 426 lpfc_sli_update_ring(phba, pring); 427 else 428 lpfc_sli_update_full_ring(phba, pring); 429 } 430 431 return; 432 } 433 434 /* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */ 435 static void 436 lpfc_sli_turn_on_ring(struct lpfc_hba * phba, int ringno) 437 { 438 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[ringno]; 439 440 /* If the ring is active, flag it */ 441 if (phba->sli.ring[ringno].cmdringaddr) { 442 if (phba->sli.ring[ringno].flag & LPFC_STOP_IOCB_MBX) { 443 phba->sli.ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX; 444 /* 445 * Force update of the local copy of cmdGetInx 446 */ 447 phba->sli.ring[ringno].local_getidx 448 = le32_to_cpu(pgp->cmdGetInx); 449 spin_lock_irq(phba->host->host_lock); 450 lpfc_sli_resume_iocb(phba, &phba->sli.ring[ringno]); 451 spin_unlock_irq(phba->host->host_lock); 452 } 453 } 454 } 455 456 static int 457 lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 458 { 459 uint8_t ret; 460 461 switch (mbxCommand) { 462 case MBX_LOAD_SM: 463 case MBX_READ_NV: 464 case MBX_WRITE_NV: 465 case MBX_RUN_BIU_DIAG: 466 case MBX_INIT_LINK: 467 case MBX_DOWN_LINK: 468 case MBX_CONFIG_LINK: 469 case MBX_CONFIG_RING: 470 case MBX_RESET_RING: 471 case MBX_READ_CONFIG: 472 case MBX_READ_RCONFIG: 473 case MBX_READ_SPARM: 474 case MBX_READ_STATUS: 475 case MBX_READ_RPI: 476 case MBX_READ_XRI: 477 case MBX_READ_REV: 478 case MBX_READ_LNK_STAT: 479 case MBX_REG_LOGIN: 480 case MBX_UNREG_LOGIN: 481 case MBX_READ_LA: 482 case MBX_CLEAR_LA: 483 case MBX_DUMP_MEMORY: 484 case MBX_DUMP_CONTEXT: 485 case MBX_RUN_DIAGS: 486 case MBX_RESTART: 487 case MBX_UPDATE_CFG: 488 case MBX_DOWN_LOAD: 489 case MBX_DEL_LD_ENTRY: 490 case MBX_RUN_PROGRAM: 491 case MBX_SET_MASK: 492 case MBX_SET_SLIM: 493 case MBX_UNREG_D_ID: 494 case MBX_KILL_BOARD: 495 case MBX_CONFIG_FARP: 496 case MBX_BEACON: 497 case MBX_LOAD_AREA: 498 case MBX_RUN_BIU_DIAG64: 499 case MBX_CONFIG_PORT: 500 case MBX_READ_SPARM64: 501 case MBX_READ_RPI64: 502 case MBX_REG_LOGIN64: 503 case MBX_READ_LA64: 504 case MBX_FLASH_WR_ULA: 505 case MBX_SET_DEBUG: 506 case MBX_LOAD_EXP_ROM: 507 ret = mbxCommand; 508 break; 509 default: 510 ret = MBX_SHUTDOWN; 511 break; 512 } 513 return (ret); 514 } 515 static void 516 lpfc_sli_wake_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 517 { 518 wait_queue_head_t *pdone_q; 519 520 /* 521 * If pdone_q is empty, the driver thread gave up waiting and 522 * continued running. 523 */ 524 pdone_q = (wait_queue_head_t *) pmboxq->context1; 525 if (pdone_q) 526 wake_up_interruptible(pdone_q); 527 return; 528 } 529 530 void 531 lpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 532 { 533 struct lpfc_dmabuf *mp; 534 mp = (struct lpfc_dmabuf *) (pmb->context1); 535 if (mp) { 536 lpfc_mbuf_free(phba, mp->virt, mp->phys); 537 kfree(mp); 538 } 539 mempool_free( pmb, phba->mbox_mem_pool); 540 return; 541 } 542 543 int 544 lpfc_sli_handle_mb_event(struct lpfc_hba * phba) 545 { 546 MAILBOX_t *mbox; 547 MAILBOX_t *pmbox; 548 LPFC_MBOXQ_t *pmb; 549 struct lpfc_sli *psli; 550 int i, rc; 551 uint32_t process_next; 552 553 psli = &phba->sli; 554 /* We should only get here if we are in SLI2 mode */ 555 if (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE)) { 556 return (1); 557 } 558 559 phba->sli.slistat.mbox_event++; 560 561 /* Get a Mailbox buffer to setup mailbox commands for callback */ 562 if ((pmb = phba->sli.mbox_active)) { 563 pmbox = &pmb->mb; 564 mbox = &phba->slim2p->mbx; 565 566 /* First check out the status word */ 567 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof (uint32_t)); 568 569 /* Sanity check to ensure the host owns the mailbox */ 570 if (pmbox->mbxOwner != OWN_HOST) { 571 /* Lets try for a while */ 572 for (i = 0; i < 10240; i++) { 573 /* First copy command data */ 574 lpfc_sli_pcimem_bcopy(mbox, pmbox, 575 sizeof (uint32_t)); 576 if (pmbox->mbxOwner == OWN_HOST) 577 goto mbout; 578 } 579 /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus 580 <status> */ 581 lpfc_printf_log(phba, 582 KERN_WARNING, 583 LOG_MBOX | LOG_SLI, 584 "%d:0304 Stray Mailbox Interrupt " 585 "mbxCommand x%x mbxStatus x%x\n", 586 phba->brd_no, 587 pmbox->mbxCommand, 588 pmbox->mbxStatus); 589 590 spin_lock_irq(phba->host->host_lock); 591 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 592 spin_unlock_irq(phba->host->host_lock); 593 return (1); 594 } 595 596 mbout: 597 del_timer_sync(&phba->sli.mbox_tmo); 598 phba->work_hba_events &= ~WORKER_MBOX_TMO; 599 600 /* 601 * It is a fatal error if unknown mbox command completion. 602 */ 603 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 604 MBX_SHUTDOWN) { 605 606 /* Unknow mailbox command compl */ 607 lpfc_printf_log(phba, 608 KERN_ERR, 609 LOG_MBOX | LOG_SLI, 610 "%d:0323 Unknown Mailbox command %x Cmpl\n", 611 phba->brd_no, 612 pmbox->mbxCommand); 613 phba->hba_state = LPFC_HBA_ERROR; 614 phba->work_hs = HS_FFER3; 615 lpfc_handle_eratt(phba); 616 return (0); 617 } 618 619 phba->sli.mbox_active = NULL; 620 if (pmbox->mbxStatus) { 621 phba->sli.slistat.mbox_stat_err++; 622 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 623 /* Mbox cmd cmpl error - RETRYing */ 624 lpfc_printf_log(phba, 625 KERN_INFO, 626 LOG_MBOX | LOG_SLI, 627 "%d:0305 Mbox cmd cmpl error - " 628 "RETRYing Data: x%x x%x x%x x%x\n", 629 phba->brd_no, 630 pmbox->mbxCommand, 631 pmbox->mbxStatus, 632 pmbox->un.varWords[0], 633 phba->hba_state); 634 pmbox->mbxStatus = 0; 635 pmbox->mbxOwner = OWN_HOST; 636 spin_lock_irq(phba->host->host_lock); 637 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 638 spin_unlock_irq(phba->host->host_lock); 639 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 640 if (rc == MBX_SUCCESS) 641 return (0); 642 } 643 } 644 645 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 646 lpfc_printf_log(phba, 647 KERN_INFO, 648 LOG_MBOX | LOG_SLI, 649 "%d:0307 Mailbox cmd x%x Cmpl x%p " 650 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 651 phba->brd_no, 652 pmbox->mbxCommand, 653 pmb->mbox_cmpl, 654 *((uint32_t *) pmbox), 655 pmbox->un.varWords[0], 656 pmbox->un.varWords[1], 657 pmbox->un.varWords[2], 658 pmbox->un.varWords[3], 659 pmbox->un.varWords[4], 660 pmbox->un.varWords[5], 661 pmbox->un.varWords[6], 662 pmbox->un.varWords[7]); 663 664 if (pmb->mbox_cmpl) { 665 lpfc_sli_pcimem_bcopy(mbox, pmbox, MAILBOX_CMD_SIZE); 666 pmb->mbox_cmpl(phba,pmb); 667 } 668 } 669 670 671 do { 672 process_next = 0; /* by default don't loop */ 673 spin_lock_irq(phba->host->host_lock); 674 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 675 676 /* Process next mailbox command if there is one */ 677 if ((pmb = lpfc_mbox_get(phba))) { 678 spin_unlock_irq(phba->host->host_lock); 679 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 680 if (rc == MBX_NOT_FINISHED) { 681 pmb->mb.mbxStatus = MBX_NOT_FINISHED; 682 pmb->mbox_cmpl(phba,pmb); 683 process_next = 1; 684 continue; /* loop back */ 685 } 686 } else { 687 spin_unlock_irq(phba->host->host_lock); 688 /* Turn on IOCB processing */ 689 for (i = 0; i < phba->sli.num_rings; i++) { 690 lpfc_sli_turn_on_ring(phba, i); 691 } 692 693 /* Free any lpfc_dmabuf's waiting for mbox cmd cmpls */ 694 while (!list_empty(&phba->freebufList)) { 695 struct lpfc_dmabuf *mp; 696 697 mp = NULL; 698 list_remove_head((&phba->freebufList), 699 mp, 700 struct lpfc_dmabuf, 701 list); 702 if (mp) { 703 lpfc_mbuf_free(phba, mp->virt, 704 mp->phys); 705 kfree(mp); 706 } 707 } 708 } 709 710 } while (process_next); 711 712 return (0); 713 } 714 static int 715 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 716 struct lpfc_iocbq *saveq) 717 { 718 IOCB_t * irsp; 719 WORD5 * w5p; 720 uint32_t Rctl, Type; 721 uint32_t match, i; 722 723 match = 0; 724 irsp = &(saveq->iocb); 725 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) 726 || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)) { 727 Rctl = FC_ELS_REQ; 728 Type = FC_ELS_DATA; 729 } else { 730 w5p = 731 (WORD5 *) & (saveq->iocb.un. 732 ulpWord[5]); 733 Rctl = w5p->hcsw.Rctl; 734 Type = w5p->hcsw.Type; 735 736 /* Firmware Workaround */ 737 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 738 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX)) { 739 Rctl = FC_ELS_REQ; 740 Type = FC_ELS_DATA; 741 w5p->hcsw.Rctl = Rctl; 742 w5p->hcsw.Type = Type; 743 } 744 } 745 /* unSolicited Responses */ 746 if (pring->prt[0].profile) { 747 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 748 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, 749 saveq); 750 match = 1; 751 } else { 752 /* We must search, based on rctl / type 753 for the right routine */ 754 for (i = 0; i < pring->num_mask; 755 i++) { 756 if ((pring->prt[i].rctl == 757 Rctl) 758 && (pring->prt[i]. 759 type == Type)) { 760 if (pring->prt[i].lpfc_sli_rcv_unsol_event) 761 (pring->prt[i].lpfc_sli_rcv_unsol_event) 762 (phba, pring, saveq); 763 match = 1; 764 break; 765 } 766 } 767 } 768 if (match == 0) { 769 /* Unexpected Rctl / Type received */ 770 /* Ring <ringno> handler: unexpected 771 Rctl <Rctl> Type <Type> received */ 772 lpfc_printf_log(phba, 773 KERN_WARNING, 774 LOG_SLI, 775 "%d:0313 Ring %d handler: unexpected Rctl x%x " 776 "Type x%x received \n", 777 phba->brd_no, 778 pring->ringno, 779 Rctl, 780 Type); 781 } 782 return(1); 783 } 784 785 static struct lpfc_iocbq * 786 lpfc_sli_iocbq_lookup(struct lpfc_hba * phba, 787 struct lpfc_sli_ring * pring, 788 struct lpfc_iocbq * prspiocb) 789 { 790 struct lpfc_iocbq *cmd_iocb = NULL; 791 uint16_t iotag; 792 793 iotag = prspiocb->iocb.ulpIoTag; 794 795 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 796 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 797 list_del(&cmd_iocb->list); 798 pring->txcmplq_cnt--; 799 return cmd_iocb; 800 } 801 802 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 803 "%d:0317 iotag x%x is out off " 804 "range: max iotag x%x wd0 x%x\n", 805 phba->brd_no, iotag, 806 phba->sli.last_iotag, 807 *(((uint32_t *) &prspiocb->iocb) + 7)); 808 return NULL; 809 } 810 811 static int 812 lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, 813 struct lpfc_iocbq *saveq) 814 { 815 struct lpfc_iocbq * cmdiocbp; 816 int rc = 1; 817 unsigned long iflag; 818 819 /* Based on the iotag field, get the cmd IOCB from the txcmplq */ 820 spin_lock_irqsave(phba->host->host_lock, iflag); 821 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 822 if (cmdiocbp) { 823 if (cmdiocbp->iocb_cmpl) { 824 /* 825 * Post all ELS completions to the worker thread. 826 * All other are passed to the completion callback. 827 */ 828 if (pring->ringno == LPFC_ELS_RING) { 829 spin_unlock_irqrestore(phba->host->host_lock, 830 iflag); 831 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 832 spin_lock_irqsave(phba->host->host_lock, iflag); 833 } 834 else { 835 spin_unlock_irqrestore(phba->host->host_lock, 836 iflag); 837 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 838 spin_lock_irqsave(phba->host->host_lock, iflag); 839 } 840 } else 841 lpfc_sli_release_iocbq(phba, cmdiocbp); 842 } else { 843 /* 844 * Unknown initiating command based on the response iotag. 845 * This could be the case on the ELS ring because of 846 * lpfc_els_abort(). 847 */ 848 if (pring->ringno != LPFC_ELS_RING) { 849 /* 850 * Ring <ringno> handler: unexpected completion IoTag 851 * <IoTag> 852 */ 853 lpfc_printf_log(phba, 854 KERN_WARNING, 855 LOG_SLI, 856 "%d:0322 Ring %d handler: unexpected " 857 "completion IoTag x%x Data: x%x x%x x%x x%x\n", 858 phba->brd_no, 859 pring->ringno, 860 saveq->iocb.ulpIoTag, 861 saveq->iocb.ulpStatus, 862 saveq->iocb.un.ulpWord[4], 863 saveq->iocb.ulpCommand, 864 saveq->iocb.ulpContext); 865 } 866 } 867 868 spin_unlock_irqrestore(phba->host->host_lock, iflag); 869 return rc; 870 } 871 872 static void lpfc_sli_rsp_pointers_error(struct lpfc_hba * phba, 873 struct lpfc_sli_ring * pring) 874 { 875 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; 876 /* 877 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then 878 * rsp ring <portRspMax> 879 */ 880 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 881 "%d:0312 Ring %d handler: portRspPut %d " 882 "is bigger then rsp ring %d\n", 883 phba->brd_no, pring->ringno, 884 le32_to_cpu(pgp->rspPutInx), 885 pring->numRiocb); 886 887 phba->hba_state = LPFC_HBA_ERROR; 888 889 /* 890 * All error attention handlers are posted to 891 * worker thread 892 */ 893 phba->work_ha |= HA_ERATT; 894 phba->work_hs = HS_FFER3; 895 if (phba->work_wait) 896 wake_up(phba->work_wait); 897 898 return; 899 } 900 901 void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba) 902 { 903 struct lpfc_sli * psli = &phba->sli; 904 struct lpfc_sli_ring * pring = &psli->ring[LPFC_FCP_RING]; 905 IOCB_t *irsp = NULL; 906 IOCB_t *entry = NULL; 907 struct lpfc_iocbq *cmdiocbq = NULL; 908 struct lpfc_iocbq rspiocbq; 909 struct lpfc_pgp *pgp; 910 uint32_t status; 911 uint32_t portRspPut, portRspMax; 912 int type; 913 uint32_t rsp_cmpl = 0; 914 void __iomem *to_slim; 915 uint32_t ha_copy; 916 917 pring->stats.iocb_event++; 918 919 /* The driver assumes SLI-2 mode */ 920 pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; 921 922 /* 923 * The next available response entry should never exceed the maximum 924 * entries. If it does, treat it as an adapter hardware error. 925 */ 926 portRspMax = pring->numRiocb; 927 portRspPut = le32_to_cpu(pgp->rspPutInx); 928 if (unlikely(portRspPut >= portRspMax)) { 929 lpfc_sli_rsp_pointers_error(phba, pring); 930 return; 931 } 932 933 rmb(); 934 while (pring->rspidx != portRspPut) { 935 936 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx); 937 938 if (++pring->rspidx >= portRspMax) 939 pring->rspidx = 0; 940 941 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 942 (uint32_t *) &rspiocbq.iocb, 943 sizeof (IOCB_t)); 944 irsp = &rspiocbq.iocb; 945 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 946 pring->stats.iocb_rsp++; 947 rsp_cmpl++; 948 949 if (unlikely(irsp->ulpStatus)) { 950 /* Rsp ring <ringno> error: IOCB */ 951 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 952 "%d:0326 Rsp Ring %d error: IOCB Data: " 953 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 954 phba->brd_no, pring->ringno, 955 irsp->un.ulpWord[0], 956 irsp->un.ulpWord[1], 957 irsp->un.ulpWord[2], 958 irsp->un.ulpWord[3], 959 irsp->un.ulpWord[4], 960 irsp->un.ulpWord[5], 961 *(((uint32_t *) irsp) + 6), 962 *(((uint32_t *) irsp) + 7)); 963 } 964 965 switch (type) { 966 case LPFC_ABORT_IOCB: 967 case LPFC_SOL_IOCB: 968 /* 969 * Idle exchange closed via ABTS from port. No iocb 970 * resources need to be recovered. 971 */ 972 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 973 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 974 "%d:0314 IOCB cmd 0x%x" 975 " processed. Skipping" 976 " completion", phba->brd_no, 977 irsp->ulpCommand); 978 break; 979 } 980 981 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 982 &rspiocbq); 983 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { 984 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 985 &rspiocbq); 986 } 987 break; 988 default: 989 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 990 char adaptermsg[LPFC_MAX_ADPTMSG]; 991 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 992 memcpy(&adaptermsg[0], (uint8_t *) irsp, 993 MAX_MSG_DATA); 994 dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s", 995 phba->brd_no, adaptermsg); 996 } else { 997 /* Unknown IOCB command */ 998 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 999 "%d:0321 Unknown IOCB command " 1000 "Data: x%x, x%x x%x x%x x%x\n", 1001 phba->brd_no, type, 1002 irsp->ulpCommand, 1003 irsp->ulpStatus, 1004 irsp->ulpIoTag, 1005 irsp->ulpContext); 1006 } 1007 break; 1008 } 1009 1010 /* 1011 * The response IOCB has been processed. Update the ring 1012 * pointer in SLIM. If the port response put pointer has not 1013 * been updated, sync the pgp->rspPutInx and fetch the new port 1014 * response put pointer. 1015 */ 1016 to_slim = phba->MBslimaddr + 1017 (SLIMOFF + (pring->ringno * 2) + 1) * 4; 1018 writeb(pring->rspidx, to_slim); 1019 1020 if (pring->rspidx == portRspPut) 1021 portRspPut = le32_to_cpu(pgp->rspPutInx); 1022 } 1023 1024 ha_copy = readl(phba->HAregaddr); 1025 ha_copy >>= (LPFC_FCP_RING * 4); 1026 1027 if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) { 1028 pring->stats.iocb_rsp_full++; 1029 status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4)); 1030 writel(status, phba->CAregaddr); 1031 readl(phba->CAregaddr); 1032 } 1033 if ((ha_copy & HA_R0CE_RSP) && 1034 (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 1035 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 1036 pring->stats.iocb_cmd_empty++; 1037 1038 /* Force update of the local copy of cmdGetInx */ 1039 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 1040 lpfc_sli_resume_iocb(phba, pring); 1041 1042 if ((pring->lpfc_sli_cmd_available)) 1043 (pring->lpfc_sli_cmd_available) (phba, pring); 1044 1045 } 1046 1047 return; 1048 } 1049 1050 /* 1051 * This routine presumes LPFC_FCP_RING handling and doesn't bother 1052 * to check it explicitly. 1053 */ 1054 static int 1055 lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, 1056 struct lpfc_sli_ring * pring, uint32_t mask) 1057 { 1058 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; 1059 IOCB_t *irsp = NULL; 1060 IOCB_t *entry = NULL; 1061 struct lpfc_iocbq *cmdiocbq = NULL; 1062 struct lpfc_iocbq rspiocbq; 1063 uint32_t status; 1064 uint32_t portRspPut, portRspMax; 1065 int rc = 1; 1066 lpfc_iocb_type type; 1067 unsigned long iflag; 1068 uint32_t rsp_cmpl = 0; 1069 void __iomem *to_slim; 1070 1071 spin_lock_irqsave(phba->host->host_lock, iflag); 1072 pring->stats.iocb_event++; 1073 1074 /* 1075 * The next available response entry should never exceed the maximum 1076 * entries. If it does, treat it as an adapter hardware error. 1077 */ 1078 portRspMax = pring->numRiocb; 1079 portRspPut = le32_to_cpu(pgp->rspPutInx); 1080 if (unlikely(portRspPut >= portRspMax)) { 1081 lpfc_sli_rsp_pointers_error(phba, pring); 1082 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1083 return 1; 1084 } 1085 1086 rmb(); 1087 while (pring->rspidx != portRspPut) { 1088 /* 1089 * Fetch an entry off the ring and copy it into a local data 1090 * structure. The copy involves a byte-swap since the 1091 * network byte order and pci byte orders are different. 1092 */ 1093 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx); 1094 1095 if (++pring->rspidx >= portRspMax) 1096 pring->rspidx = 0; 1097 1098 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 1099 (uint32_t *) &rspiocbq.iocb, 1100 sizeof (IOCB_t)); 1101 irsp = &rspiocbq.iocb; 1102 1103 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 1104 pring->stats.iocb_rsp++; 1105 rsp_cmpl++; 1106 1107 if (unlikely(irsp->ulpStatus)) { 1108 /* Rsp ring <ringno> error: IOCB */ 1109 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1110 "%d:0336 Rsp Ring %d error: IOCB Data: " 1111 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 1112 phba->brd_no, pring->ringno, 1113 irsp->un.ulpWord[0], irsp->un.ulpWord[1], 1114 irsp->un.ulpWord[2], irsp->un.ulpWord[3], 1115 irsp->un.ulpWord[4], irsp->un.ulpWord[5], 1116 *(((uint32_t *) irsp) + 6), 1117 *(((uint32_t *) irsp) + 7)); 1118 } 1119 1120 switch (type) { 1121 case LPFC_ABORT_IOCB: 1122 case LPFC_SOL_IOCB: 1123 /* 1124 * Idle exchange closed via ABTS from port. No iocb 1125 * resources need to be recovered. 1126 */ 1127 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 1128 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1129 "%d:0333 IOCB cmd 0x%x" 1130 " processed. Skipping" 1131 " completion\n", phba->brd_no, 1132 irsp->ulpCommand); 1133 break; 1134 } 1135 1136 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 1137 &rspiocbq); 1138 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { 1139 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 1140 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 1141 &rspiocbq); 1142 } else { 1143 spin_unlock_irqrestore( 1144 phba->host->host_lock, iflag); 1145 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 1146 &rspiocbq); 1147 spin_lock_irqsave(phba->host->host_lock, 1148 iflag); 1149 } 1150 } 1151 break; 1152 default: 1153 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 1154 char adaptermsg[LPFC_MAX_ADPTMSG]; 1155 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 1156 memcpy(&adaptermsg[0], (uint8_t *) irsp, 1157 MAX_MSG_DATA); 1158 dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s", 1159 phba->brd_no, adaptermsg); 1160 } else { 1161 /* Unknown IOCB command */ 1162 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1163 "%d:0334 Unknown IOCB command " 1164 "Data: x%x, x%x x%x x%x x%x\n", 1165 phba->brd_no, type, irsp->ulpCommand, 1166 irsp->ulpStatus, irsp->ulpIoTag, 1167 irsp->ulpContext); 1168 } 1169 break; 1170 } 1171 1172 /* 1173 * The response IOCB has been processed. Update the ring 1174 * pointer in SLIM. If the port response put pointer has not 1175 * been updated, sync the pgp->rspPutInx and fetch the new port 1176 * response put pointer. 1177 */ 1178 to_slim = phba->MBslimaddr + 1179 (SLIMOFF + (pring->ringno * 2) + 1) * 4; 1180 writel(pring->rspidx, to_slim); 1181 1182 if (pring->rspidx == portRspPut) 1183 portRspPut = le32_to_cpu(pgp->rspPutInx); 1184 } 1185 1186 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { 1187 pring->stats.iocb_rsp_full++; 1188 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 1189 writel(status, phba->CAregaddr); 1190 readl(phba->CAregaddr); 1191 } 1192 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 1193 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 1194 pring->stats.iocb_cmd_empty++; 1195 1196 /* Force update of the local copy of cmdGetInx */ 1197 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 1198 lpfc_sli_resume_iocb(phba, pring); 1199 1200 if ((pring->lpfc_sli_cmd_available)) 1201 (pring->lpfc_sli_cmd_available) (phba, pring); 1202 1203 } 1204 1205 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1206 return rc; 1207 } 1208 1209 1210 int 1211 lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, 1212 struct lpfc_sli_ring * pring, uint32_t mask) 1213 { 1214 IOCB_t *entry; 1215 IOCB_t *irsp = NULL; 1216 struct lpfc_iocbq *rspiocbp = NULL; 1217 struct lpfc_iocbq *next_iocb; 1218 struct lpfc_iocbq *cmdiocbp; 1219 struct lpfc_iocbq *saveq; 1220 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; 1221 uint8_t iocb_cmd_type; 1222 lpfc_iocb_type type; 1223 uint32_t status, free_saveq; 1224 uint32_t portRspPut, portRspMax; 1225 int rc = 1; 1226 unsigned long iflag; 1227 void __iomem *to_slim; 1228 1229 spin_lock_irqsave(phba->host->host_lock, iflag); 1230 pring->stats.iocb_event++; 1231 1232 /* 1233 * The next available response entry should never exceed the maximum 1234 * entries. If it does, treat it as an adapter hardware error. 1235 */ 1236 portRspMax = pring->numRiocb; 1237 portRspPut = le32_to_cpu(pgp->rspPutInx); 1238 if (portRspPut >= portRspMax) { 1239 /* 1240 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then 1241 * rsp ring <portRspMax> 1242 */ 1243 lpfc_printf_log(phba, 1244 KERN_ERR, 1245 LOG_SLI, 1246 "%d:0303 Ring %d handler: portRspPut %d " 1247 "is bigger then rsp ring %d\n", 1248 phba->brd_no, 1249 pring->ringno, portRspPut, portRspMax); 1250 1251 phba->hba_state = LPFC_HBA_ERROR; 1252 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1253 1254 phba->work_hs = HS_FFER3; 1255 lpfc_handle_eratt(phba); 1256 1257 return 1; 1258 } 1259 1260 rmb(); 1261 while (pring->rspidx != portRspPut) { 1262 /* 1263 * Build a completion list and call the appropriate handler. 1264 * The process is to get the next available response iocb, get 1265 * a free iocb from the list, copy the response data into the 1266 * free iocb, insert to the continuation list, and update the 1267 * next response index to slim. This process makes response 1268 * iocb's in the ring available to DMA as fast as possible but 1269 * pays a penalty for a copy operation. Since the iocb is 1270 * only 32 bytes, this penalty is considered small relative to 1271 * the PCI reads for register values and a slim write. When 1272 * the ulpLe field is set, the entire Command has been 1273 * received. 1274 */ 1275 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx); 1276 rspiocbp = lpfc_sli_get_iocbq(phba); 1277 if (rspiocbp == NULL) { 1278 printk(KERN_ERR "%s: out of buffers! Failing " 1279 "completion.\n", __FUNCTION__); 1280 break; 1281 } 1282 1283 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, sizeof (IOCB_t)); 1284 irsp = &rspiocbp->iocb; 1285 1286 if (++pring->rspidx >= portRspMax) 1287 pring->rspidx = 0; 1288 1289 to_slim = phba->MBslimaddr + (SLIMOFF + (pring->ringno * 2) 1290 + 1) * 4; 1291 writel(pring->rspidx, to_slim); 1292 1293 if (list_empty(&(pring->iocb_continueq))) { 1294 list_add(&rspiocbp->list, &(pring->iocb_continueq)); 1295 } else { 1296 list_add_tail(&rspiocbp->list, 1297 &(pring->iocb_continueq)); 1298 } 1299 1300 pring->iocb_continueq_cnt++; 1301 if (irsp->ulpLe) { 1302 /* 1303 * By default, the driver expects to free all resources 1304 * associated with this iocb completion. 1305 */ 1306 free_saveq = 1; 1307 saveq = list_get_first(&pring->iocb_continueq, 1308 struct lpfc_iocbq, list); 1309 irsp = &(saveq->iocb); 1310 list_del_init(&pring->iocb_continueq); 1311 pring->iocb_continueq_cnt = 0; 1312 1313 pring->stats.iocb_rsp++; 1314 1315 if (irsp->ulpStatus) { 1316 /* Rsp ring <ringno> error: IOCB */ 1317 lpfc_printf_log(phba, 1318 KERN_WARNING, 1319 LOG_SLI, 1320 "%d:0328 Rsp Ring %d error: IOCB Data: " 1321 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 1322 phba->brd_no, 1323 pring->ringno, 1324 irsp->un.ulpWord[0], 1325 irsp->un.ulpWord[1], 1326 irsp->un.ulpWord[2], 1327 irsp->un.ulpWord[3], 1328 irsp->un.ulpWord[4], 1329 irsp->un.ulpWord[5], 1330 *(((uint32_t *) irsp) + 6), 1331 *(((uint32_t *) irsp) + 7)); 1332 } 1333 1334 /* 1335 * Fetch the IOCB command type and call the correct 1336 * completion routine. Solicited and Unsolicited 1337 * IOCBs on the ELS ring get freed back to the 1338 * lpfc_iocb_list by the discovery kernel thread. 1339 */ 1340 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 1341 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 1342 if (type == LPFC_SOL_IOCB) { 1343 spin_unlock_irqrestore(phba->host->host_lock, 1344 iflag); 1345 rc = lpfc_sli_process_sol_iocb(phba, pring, 1346 saveq); 1347 spin_lock_irqsave(phba->host->host_lock, iflag); 1348 } else if (type == LPFC_UNSOL_IOCB) { 1349 spin_unlock_irqrestore(phba->host->host_lock, 1350 iflag); 1351 rc = lpfc_sli_process_unsol_iocb(phba, pring, 1352 saveq); 1353 spin_lock_irqsave(phba->host->host_lock, iflag); 1354 } else if (type == LPFC_ABORT_IOCB) { 1355 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) && 1356 ((cmdiocbp = 1357 lpfc_sli_iocbq_lookup(phba, pring, 1358 saveq)))) { 1359 /* Call the specified completion 1360 routine */ 1361 if (cmdiocbp->iocb_cmpl) { 1362 spin_unlock_irqrestore( 1363 phba->host->host_lock, 1364 iflag); 1365 (cmdiocbp->iocb_cmpl) (phba, 1366 cmdiocbp, saveq); 1367 spin_lock_irqsave( 1368 phba->host->host_lock, 1369 iflag); 1370 } else 1371 lpfc_sli_release_iocbq(phba, 1372 cmdiocbp); 1373 } 1374 } else if (type == LPFC_UNKNOWN_IOCB) { 1375 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 1376 1377 char adaptermsg[LPFC_MAX_ADPTMSG]; 1378 1379 memset(adaptermsg, 0, 1380 LPFC_MAX_ADPTMSG); 1381 memcpy(&adaptermsg[0], (uint8_t *) irsp, 1382 MAX_MSG_DATA); 1383 dev_warn(&((phba->pcidev)->dev), 1384 "lpfc%d: %s", 1385 phba->brd_no, adaptermsg); 1386 } else { 1387 /* Unknown IOCB command */ 1388 lpfc_printf_log(phba, 1389 KERN_ERR, 1390 LOG_SLI, 1391 "%d:0335 Unknown IOCB command " 1392 "Data: x%x x%x x%x x%x\n", 1393 phba->brd_no, 1394 irsp->ulpCommand, 1395 irsp->ulpStatus, 1396 irsp->ulpIoTag, 1397 irsp->ulpContext); 1398 } 1399 } 1400 1401 if (free_saveq) { 1402 if (!list_empty(&saveq->list)) { 1403 list_for_each_entry_safe(rspiocbp, 1404 next_iocb, 1405 &saveq->list, 1406 list) { 1407 list_del(&rspiocbp->list); 1408 lpfc_sli_release_iocbq(phba, 1409 rspiocbp); 1410 } 1411 } 1412 lpfc_sli_release_iocbq(phba, saveq); 1413 } 1414 } 1415 1416 /* 1417 * If the port response put pointer has not been updated, sync 1418 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 1419 * response put pointer. 1420 */ 1421 if (pring->rspidx == portRspPut) { 1422 portRspPut = le32_to_cpu(pgp->rspPutInx); 1423 } 1424 } /* while (pring->rspidx != portRspPut) */ 1425 1426 if ((rspiocbp != 0) && (mask & HA_R0RE_REQ)) { 1427 /* At least one response entry has been freed */ 1428 pring->stats.iocb_rsp_full++; 1429 /* SET RxRE_RSP in Chip Att register */ 1430 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 1431 writel(status, phba->CAregaddr); 1432 readl(phba->CAregaddr); /* flush */ 1433 } 1434 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 1435 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 1436 pring->stats.iocb_cmd_empty++; 1437 1438 /* Force update of the local copy of cmdGetInx */ 1439 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 1440 lpfc_sli_resume_iocb(phba, pring); 1441 1442 if ((pring->lpfc_sli_cmd_available)) 1443 (pring->lpfc_sli_cmd_available) (phba, pring); 1444 1445 } 1446 1447 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1448 return rc; 1449 } 1450 1451 int 1452 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1453 { 1454 struct lpfc_iocbq *iocb, *next_iocb; 1455 IOCB_t *icmd = NULL, *cmd = NULL; 1456 int errcnt; 1457 1458 errcnt = 0; 1459 1460 /* Error everything on txq and txcmplq 1461 * First do the txq. 1462 */ 1463 spin_lock_irq(phba->host->host_lock); 1464 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 1465 list_del_init(&iocb->list); 1466 if (iocb->iocb_cmpl) { 1467 icmd = &iocb->iocb; 1468 icmd->ulpStatus = IOSTAT_LOCAL_REJECT; 1469 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 1470 spin_unlock_irq(phba->host->host_lock); 1471 (iocb->iocb_cmpl) (phba, iocb, iocb); 1472 spin_lock_irq(phba->host->host_lock); 1473 } else 1474 lpfc_sli_release_iocbq(phba, iocb); 1475 } 1476 pring->txq_cnt = 0; 1477 INIT_LIST_HEAD(&(pring->txq)); 1478 1479 /* Next issue ABTS for everything on the txcmplq */ 1480 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 1481 cmd = &iocb->iocb; 1482 1483 /* 1484 * Imediate abort of IOCB, deque and call compl 1485 */ 1486 1487 list_del_init(&iocb->list); 1488 pring->txcmplq_cnt--; 1489 1490 if (iocb->iocb_cmpl) { 1491 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 1492 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 1493 spin_unlock_irq(phba->host->host_lock); 1494 (iocb->iocb_cmpl) (phba, iocb, iocb); 1495 spin_lock_irq(phba->host->host_lock); 1496 } else 1497 lpfc_sli_release_iocbq(phba, iocb); 1498 } 1499 1500 INIT_LIST_HEAD(&pring->txcmplq); 1501 pring->txcmplq_cnt = 0; 1502 spin_unlock_irq(phba->host->host_lock); 1503 1504 return errcnt; 1505 } 1506 1507 int 1508 lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask) 1509 { 1510 uint32_t status; 1511 int i = 0; 1512 int retval = 0; 1513 1514 /* Read the HBA Host Status Register */ 1515 status = readl(phba->HSregaddr); 1516 1517 /* 1518 * Check status register every 100ms for 5 retries, then every 1519 * 500ms for 5, then every 2.5 sec for 5, then reset board and 1520 * every 2.5 sec for 4. 1521 * Break our of the loop if errors occurred during init. 1522 */ 1523 while (((status & mask) != mask) && 1524 !(status & HS_FFERM) && 1525 i++ < 20) { 1526 1527 if (i <= 5) 1528 msleep(10); 1529 else if (i <= 10) 1530 msleep(500); 1531 else 1532 msleep(2500); 1533 1534 if (i == 15) { 1535 phba->hba_state = LPFC_STATE_UNKNOWN; /* Do post */ 1536 lpfc_sli_brdrestart(phba); 1537 } 1538 /* Read the HBA Host Status Register */ 1539 status = readl(phba->HSregaddr); 1540 } 1541 1542 /* Check to see if any errors occurred during init */ 1543 if ((status & HS_FFERM) || (i >= 20)) { 1544 phba->hba_state = LPFC_HBA_ERROR; 1545 retval = 1; 1546 } 1547 1548 return retval; 1549 } 1550 1551 #define BARRIER_TEST_PATTERN (0xdeadbeef) 1552 1553 void lpfc_reset_barrier(struct lpfc_hba * phba) 1554 { 1555 uint32_t __iomem *resp_buf; 1556 uint32_t __iomem *mbox_buf; 1557 volatile uint32_t mbox; 1558 uint32_t hc_copy; 1559 int i; 1560 uint8_t hdrtype; 1561 1562 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 1563 if (hdrtype != 0x80 || 1564 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && 1565 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) 1566 return; 1567 1568 /* 1569 * Tell the other part of the chip to suspend temporarily all 1570 * its DMA activity. 1571 */ 1572 resp_buf = phba->MBslimaddr; 1573 1574 /* Disable the error attention */ 1575 hc_copy = readl(phba->HCregaddr); 1576 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 1577 readl(phba->HCregaddr); /* flush */ 1578 1579 if (readl(phba->HAregaddr) & HA_ERATT) { 1580 /* Clear Chip error bit */ 1581 writel(HA_ERATT, phba->HAregaddr); 1582 phba->stopped = 1; 1583 } 1584 1585 mbox = 0; 1586 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD; 1587 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 1588 1589 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 1590 mbox_buf = phba->MBslimaddr; 1591 writel(mbox, mbox_buf); 1592 1593 for (i = 0; 1594 readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++) 1595 mdelay(1); 1596 1597 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { 1598 if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE || 1599 phba->stopped) 1600 goto restore_hc; 1601 else 1602 goto clear_errat; 1603 } 1604 1605 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; 1606 for (i = 0; readl(resp_buf) != mbox && i < 500; i++) 1607 mdelay(1); 1608 1609 clear_errat: 1610 1611 while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500) 1612 mdelay(1); 1613 1614 if (readl(phba->HAregaddr) & HA_ERATT) { 1615 writel(HA_ERATT, phba->HAregaddr); 1616 phba->stopped = 1; 1617 } 1618 1619 restore_hc: 1620 writel(hc_copy, phba->HCregaddr); 1621 readl(phba->HCregaddr); /* flush */ 1622 } 1623 1624 int 1625 lpfc_sli_brdkill(struct lpfc_hba * phba) 1626 { 1627 struct lpfc_sli *psli; 1628 LPFC_MBOXQ_t *pmb; 1629 uint32_t status; 1630 uint32_t ha_copy; 1631 int retval; 1632 int i = 0; 1633 1634 psli = &phba->sli; 1635 1636 /* Kill HBA */ 1637 lpfc_printf_log(phba, 1638 KERN_INFO, 1639 LOG_SLI, 1640 "%d:0329 Kill HBA Data: x%x x%x\n", 1641 phba->brd_no, 1642 phba->hba_state, 1643 psli->sli_flag); 1644 1645 if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 1646 GFP_KERNEL)) == 0) 1647 return 1; 1648 1649 /* Disable the error attention */ 1650 spin_lock_irq(phba->host->host_lock); 1651 status = readl(phba->HCregaddr); 1652 status &= ~HC_ERINT_ENA; 1653 writel(status, phba->HCregaddr); 1654 readl(phba->HCregaddr); /* flush */ 1655 spin_unlock_irq(phba->host->host_lock); 1656 1657 lpfc_kill_board(phba, pmb); 1658 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1659 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1660 1661 if (retval != MBX_SUCCESS) { 1662 if (retval != MBX_BUSY) 1663 mempool_free(pmb, phba->mbox_mem_pool); 1664 return 1; 1665 } 1666 1667 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 1668 1669 mempool_free(pmb, phba->mbox_mem_pool); 1670 1671 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error 1672 * attention every 100ms for 3 seconds. If we don't get ERATT after 1673 * 3 seconds we still set HBA_ERROR state because the status of the 1674 * board is now undefined. 1675 */ 1676 ha_copy = readl(phba->HAregaddr); 1677 1678 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 1679 mdelay(100); 1680 ha_copy = readl(phba->HAregaddr); 1681 } 1682 1683 del_timer_sync(&psli->mbox_tmo); 1684 if (ha_copy & HA_ERATT) { 1685 writel(HA_ERATT, phba->HAregaddr); 1686 phba->stopped = 1; 1687 } 1688 spin_lock_irq(phba->host->host_lock); 1689 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 1690 spin_unlock_irq(phba->host->host_lock); 1691 1692 psli->mbox_active = NULL; 1693 lpfc_hba_down_post(phba); 1694 phba->hba_state = LPFC_HBA_ERROR; 1695 1696 return (ha_copy & HA_ERATT ? 0 : 1); 1697 } 1698 1699 int 1700 lpfc_sli_brdreset(struct lpfc_hba * phba) 1701 { 1702 struct lpfc_sli *psli; 1703 struct lpfc_sli_ring *pring; 1704 uint16_t cfg_value; 1705 int i; 1706 1707 psli = &phba->sli; 1708 1709 /* Reset HBA */ 1710 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1711 "%d:0325 Reset HBA Data: x%x x%x\n", phba->brd_no, 1712 phba->hba_state, psli->sli_flag); 1713 1714 /* perform board reset */ 1715 phba->fc_eventTag = 0; 1716 phba->fc_myDID = 0; 1717 phba->fc_prevDID = 0; 1718 1719 /* Turn off parity checking and serr during the physical reset */ 1720 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 1721 pci_write_config_word(phba->pcidev, PCI_COMMAND, 1722 (cfg_value & 1723 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 1724 1725 psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA); 1726 /* Now toggle INITFF bit in the Host Control Register */ 1727 writel(HC_INITFF, phba->HCregaddr); 1728 mdelay(1); 1729 readl(phba->HCregaddr); /* flush */ 1730 writel(0, phba->HCregaddr); 1731 readl(phba->HCregaddr); /* flush */ 1732 1733 /* Restore PCI cmd register */ 1734 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 1735 1736 /* Initialize relevant SLI info */ 1737 for (i = 0; i < psli->num_rings; i++) { 1738 pring = &psli->ring[i]; 1739 pring->flag = 0; 1740 pring->rspidx = 0; 1741 pring->next_cmdidx = 0; 1742 pring->local_getidx = 0; 1743 pring->cmdidx = 0; 1744 pring->missbufcnt = 0; 1745 } 1746 1747 phba->hba_state = LPFC_WARM_START; 1748 return 0; 1749 } 1750 1751 int 1752 lpfc_sli_brdrestart(struct lpfc_hba * phba) 1753 { 1754 MAILBOX_t *mb; 1755 struct lpfc_sli *psli; 1756 uint16_t skip_post; 1757 volatile uint32_t word0; 1758 void __iomem *to_slim; 1759 1760 spin_lock_irq(phba->host->host_lock); 1761 1762 psli = &phba->sli; 1763 1764 /* Restart HBA */ 1765 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1766 "%d:0337 Restart HBA Data: x%x x%x\n", phba->brd_no, 1767 phba->hba_state, psli->sli_flag); 1768 1769 word0 = 0; 1770 mb = (MAILBOX_t *) &word0; 1771 mb->mbxCommand = MBX_RESTART; 1772 mb->mbxHc = 1; 1773 1774 lpfc_reset_barrier(phba); 1775 1776 to_slim = phba->MBslimaddr; 1777 writel(*(uint32_t *) mb, to_slim); 1778 readl(to_slim); /* flush */ 1779 1780 /* Only skip post after fc_ffinit is completed */ 1781 if (phba->hba_state) { 1782 skip_post = 1; 1783 word0 = 1; /* This is really setting up word1 */ 1784 } else { 1785 skip_post = 0; 1786 word0 = 0; /* This is really setting up word1 */ 1787 } 1788 to_slim = phba->MBslimaddr + sizeof (uint32_t); 1789 writel(*(uint32_t *) mb, to_slim); 1790 readl(to_slim); /* flush */ 1791 1792 lpfc_sli_brdreset(phba); 1793 phba->stopped = 0; 1794 phba->hba_state = LPFC_INIT_START; 1795 1796 spin_unlock_irq(phba->host->host_lock); 1797 1798 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 1799 psli->stats_start = get_seconds(); 1800 1801 if (skip_post) 1802 mdelay(100); 1803 else 1804 mdelay(2000); 1805 1806 lpfc_hba_down_post(phba); 1807 1808 return 0; 1809 } 1810 1811 static int 1812 lpfc_sli_chipset_init(struct lpfc_hba *phba) 1813 { 1814 uint32_t status, i = 0; 1815 1816 /* Read the HBA Host Status Register */ 1817 status = readl(phba->HSregaddr); 1818 1819 /* Check status register to see what current state is */ 1820 i = 0; 1821 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 1822 1823 /* Check every 100ms for 5 retries, then every 500ms for 5, then 1824 * every 2.5 sec for 5, then reset board and every 2.5 sec for 1825 * 4. 1826 */ 1827 if (i++ >= 20) { 1828 /* Adapter failed to init, timeout, status reg 1829 <status> */ 1830 lpfc_printf_log(phba, 1831 KERN_ERR, 1832 LOG_INIT, 1833 "%d:0436 Adapter failed to init, " 1834 "timeout, status reg x%x\n", 1835 phba->brd_no, 1836 status); 1837 phba->hba_state = LPFC_HBA_ERROR; 1838 return -ETIMEDOUT; 1839 } 1840 1841 /* Check to see if any errors occurred during init */ 1842 if (status & HS_FFERM) { 1843 /* ERROR: During chipset initialization */ 1844 /* Adapter failed to init, chipset, status reg 1845 <status> */ 1846 lpfc_printf_log(phba, 1847 KERN_ERR, 1848 LOG_INIT, 1849 "%d:0437 Adapter failed to init, " 1850 "chipset, status reg x%x\n", 1851 phba->brd_no, 1852 status); 1853 phba->hba_state = LPFC_HBA_ERROR; 1854 return -EIO; 1855 } 1856 1857 if (i <= 5) { 1858 msleep(10); 1859 } else if (i <= 10) { 1860 msleep(500); 1861 } else { 1862 msleep(2500); 1863 } 1864 1865 if (i == 15) { 1866 phba->hba_state = LPFC_STATE_UNKNOWN; /* Do post */ 1867 lpfc_sli_brdrestart(phba); 1868 } 1869 /* Read the HBA Host Status Register */ 1870 status = readl(phba->HSregaddr); 1871 } 1872 1873 /* Check to see if any errors occurred during init */ 1874 if (status & HS_FFERM) { 1875 /* ERROR: During chipset initialization */ 1876 /* Adapter failed to init, chipset, status reg <status> */ 1877 lpfc_printf_log(phba, 1878 KERN_ERR, 1879 LOG_INIT, 1880 "%d:0438 Adapter failed to init, chipset, " 1881 "status reg x%x\n", 1882 phba->brd_no, 1883 status); 1884 phba->hba_state = LPFC_HBA_ERROR; 1885 return -EIO; 1886 } 1887 1888 /* Clear all interrupt enable conditions */ 1889 writel(0, phba->HCregaddr); 1890 readl(phba->HCregaddr); /* flush */ 1891 1892 /* setup host attn register */ 1893 writel(0xffffffff, phba->HAregaddr); 1894 readl(phba->HAregaddr); /* flush */ 1895 return 0; 1896 } 1897 1898 int 1899 lpfc_sli_hba_setup(struct lpfc_hba * phba) 1900 { 1901 LPFC_MBOXQ_t *pmb; 1902 uint32_t resetcount = 0, rc = 0, done = 0; 1903 1904 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1905 if (!pmb) { 1906 phba->hba_state = LPFC_HBA_ERROR; 1907 return -ENOMEM; 1908 } 1909 1910 while (resetcount < 2 && !done) { 1911 spin_lock_irq(phba->host->host_lock); 1912 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 1913 spin_unlock_irq(phba->host->host_lock); 1914 phba->hba_state = LPFC_STATE_UNKNOWN; 1915 lpfc_sli_brdrestart(phba); 1916 msleep(2500); 1917 rc = lpfc_sli_chipset_init(phba); 1918 if (rc) 1919 break; 1920 1921 spin_lock_irq(phba->host->host_lock); 1922 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 1923 spin_unlock_irq(phba->host->host_lock); 1924 resetcount++; 1925 1926 /* Call pre CONFIG_PORT mailbox command initialization. A value of 0 1927 * means the call was successful. Any other nonzero value is a failure, 1928 * but if ERESTART is returned, the driver may reset the HBA and try 1929 * again. 1930 */ 1931 rc = lpfc_config_port_prep(phba); 1932 if (rc == -ERESTART) { 1933 phba->hba_state = 0; 1934 continue; 1935 } else if (rc) { 1936 break; 1937 } 1938 1939 phba->hba_state = LPFC_INIT_MBX_CMDS; 1940 lpfc_config_port(phba, pmb); 1941 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 1942 if (rc == MBX_SUCCESS) 1943 done = 1; 1944 else { 1945 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1946 "%d:0442 Adapter failed to init, mbxCmd x%x " 1947 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 1948 phba->brd_no, pmb->mb.mbxCommand, 1949 pmb->mb.mbxStatus, 0); 1950 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE; 1951 } 1952 } 1953 if (!done) 1954 goto lpfc_sli_hba_setup_error; 1955 1956 rc = lpfc_sli_ring_map(phba, pmb); 1957 1958 if (rc) 1959 goto lpfc_sli_hba_setup_error; 1960 1961 phba->sli.sli_flag |= LPFC_PROCESS_LA; 1962 1963 rc = lpfc_config_port_post(phba); 1964 if (rc) 1965 goto lpfc_sli_hba_setup_error; 1966 1967 goto lpfc_sli_hba_setup_exit; 1968 lpfc_sli_hba_setup_error: 1969 phba->hba_state = LPFC_HBA_ERROR; 1970 lpfc_sli_hba_setup_exit: 1971 mempool_free(pmb, phba->mbox_mem_pool); 1972 return rc; 1973 } 1974 1975 static void 1976 lpfc_mbox_abort(struct lpfc_hba * phba) 1977 { 1978 LPFC_MBOXQ_t *pmbox; 1979 MAILBOX_t *mb; 1980 1981 if (phba->sli.mbox_active) { 1982 del_timer_sync(&phba->sli.mbox_tmo); 1983 phba->work_hba_events &= ~WORKER_MBOX_TMO; 1984 pmbox = phba->sli.mbox_active; 1985 mb = &pmbox->mb; 1986 phba->sli.mbox_active = NULL; 1987 if (pmbox->mbox_cmpl) { 1988 mb->mbxStatus = MBX_NOT_FINISHED; 1989 (pmbox->mbox_cmpl) (phba, pmbox); 1990 } 1991 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 1992 } 1993 1994 /* Abort all the non active mailbox commands. */ 1995 spin_lock_irq(phba->host->host_lock); 1996 pmbox = lpfc_mbox_get(phba); 1997 while (pmbox) { 1998 mb = &pmbox->mb; 1999 if (pmbox->mbox_cmpl) { 2000 mb->mbxStatus = MBX_NOT_FINISHED; 2001 spin_unlock_irq(phba->host->host_lock); 2002 (pmbox->mbox_cmpl) (phba, pmbox); 2003 spin_lock_irq(phba->host->host_lock); 2004 } 2005 pmbox = lpfc_mbox_get(phba); 2006 } 2007 spin_unlock_irq(phba->host->host_lock); 2008 return; 2009 } 2010 2011 /*! lpfc_mbox_timeout 2012 * 2013 * \pre 2014 * \post 2015 * \param hba Pointer to per struct lpfc_hba structure 2016 * \param l1 Pointer to the driver's mailbox queue. 2017 * \return 2018 * void 2019 * 2020 * \b Description: 2021 * 2022 * This routine handles mailbox timeout events at timer interrupt context. 2023 */ 2024 void 2025 lpfc_mbox_timeout(unsigned long ptr) 2026 { 2027 struct lpfc_hba *phba; 2028 unsigned long iflag; 2029 2030 phba = (struct lpfc_hba *)ptr; 2031 spin_lock_irqsave(phba->host->host_lock, iflag); 2032 if (!(phba->work_hba_events & WORKER_MBOX_TMO)) { 2033 phba->work_hba_events |= WORKER_MBOX_TMO; 2034 if (phba->work_wait) 2035 wake_up(phba->work_wait); 2036 } 2037 spin_unlock_irqrestore(phba->host->host_lock, iflag); 2038 } 2039 2040 void 2041 lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 2042 { 2043 LPFC_MBOXQ_t *pmbox; 2044 MAILBOX_t *mb; 2045 2046 spin_lock_irq(phba->host->host_lock); 2047 if (!(phba->work_hba_events & WORKER_MBOX_TMO)) { 2048 spin_unlock_irq(phba->host->host_lock); 2049 return; 2050 } 2051 2052 phba->work_hba_events &= ~WORKER_MBOX_TMO; 2053 2054 pmbox = phba->sli.mbox_active; 2055 mb = &pmbox->mb; 2056 2057 /* Mbox cmd <mbxCommand> timeout */ 2058 lpfc_printf_log(phba, 2059 KERN_ERR, 2060 LOG_MBOX | LOG_SLI, 2061 "%d:0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 2062 phba->brd_no, 2063 mb->mbxCommand, 2064 phba->hba_state, 2065 phba->sli.sli_flag, 2066 phba->sli.mbox_active); 2067 2068 phba->sli.mbox_active = NULL; 2069 if (pmbox->mbox_cmpl) { 2070 mb->mbxStatus = MBX_NOT_FINISHED; 2071 spin_unlock_irq(phba->host->host_lock); 2072 (pmbox->mbox_cmpl) (phba, pmbox); 2073 spin_lock_irq(phba->host->host_lock); 2074 } 2075 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2076 2077 spin_unlock_irq(phba->host->host_lock); 2078 lpfc_mbox_abort(phba); 2079 return; 2080 } 2081 2082 int 2083 lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) 2084 { 2085 MAILBOX_t *mb; 2086 struct lpfc_sli *psli; 2087 uint32_t status, evtctr; 2088 uint32_t ha_copy; 2089 int i; 2090 unsigned long drvr_flag = 0; 2091 volatile uint32_t word0, ldata; 2092 void __iomem *to_slim; 2093 2094 psli = &phba->sli; 2095 2096 spin_lock_irqsave(phba->host->host_lock, drvr_flag); 2097 2098 2099 mb = &pmbox->mb; 2100 status = MBX_SUCCESS; 2101 2102 if (phba->hba_state == LPFC_HBA_ERROR) { 2103 spin_unlock_irqrestore(phba->host->host_lock, drvr_flag); 2104 2105 /* Mbox command <mbxCommand> cannot issue */ 2106 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) 2107 return (MBX_NOT_FINISHED); 2108 } 2109 2110 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && 2111 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { 2112 spin_unlock_irqrestore(phba->host->host_lock, drvr_flag); 2113 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) 2114 return (MBX_NOT_FINISHED); 2115 } 2116 2117 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 2118 /* Polling for a mbox command when another one is already active 2119 * is not allowed in SLI. Also, the driver must have established 2120 * SLI2 mode to queue and process multiple mbox commands. 2121 */ 2122 2123 if (flag & MBX_POLL) { 2124 spin_unlock_irqrestore(phba->host->host_lock, 2125 drvr_flag); 2126 2127 /* Mbox command <mbxCommand> cannot issue */ 2128 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) 2129 return (MBX_NOT_FINISHED); 2130 } 2131 2132 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { 2133 spin_unlock_irqrestore(phba->host->host_lock, 2134 drvr_flag); 2135 /* Mbox command <mbxCommand> cannot issue */ 2136 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) 2137 return (MBX_NOT_FINISHED); 2138 } 2139 2140 /* Handle STOP IOCB processing flag. This is only meaningful 2141 * if we are not polling for mbox completion. 2142 */ 2143 if (flag & MBX_STOP_IOCB) { 2144 flag &= ~MBX_STOP_IOCB; 2145 /* Now flag each ring */ 2146 for (i = 0; i < psli->num_rings; i++) { 2147 /* If the ring is active, flag it */ 2148 if (psli->ring[i].cmdringaddr) { 2149 psli->ring[i].flag |= 2150 LPFC_STOP_IOCB_MBX; 2151 } 2152 } 2153 } 2154 2155 /* Another mailbox command is still being processed, queue this 2156 * command to be processed later. 2157 */ 2158 lpfc_mbox_put(phba, pmbox); 2159 2160 /* Mbox cmd issue - BUSY */ 2161 lpfc_printf_log(phba, 2162 KERN_INFO, 2163 LOG_MBOX | LOG_SLI, 2164 "%d:0308 Mbox cmd issue - BUSY Data: x%x x%x x%x x%x\n", 2165 phba->brd_no, 2166 mb->mbxCommand, 2167 phba->hba_state, 2168 psli->sli_flag, 2169 flag); 2170 2171 psli->slistat.mbox_busy++; 2172 spin_unlock_irqrestore(phba->host->host_lock, 2173 drvr_flag); 2174 2175 return (MBX_BUSY); 2176 } 2177 2178 /* Handle STOP IOCB processing flag. This is only meaningful 2179 * if we are not polling for mbox completion. 2180 */ 2181 if (flag & MBX_STOP_IOCB) { 2182 flag &= ~MBX_STOP_IOCB; 2183 if (flag == MBX_NOWAIT) { 2184 /* Now flag each ring */ 2185 for (i = 0; i < psli->num_rings; i++) { 2186 /* If the ring is active, flag it */ 2187 if (psli->ring[i].cmdringaddr) { 2188 psli->ring[i].flag |= 2189 LPFC_STOP_IOCB_MBX; 2190 } 2191 } 2192 } 2193 } 2194 2195 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 2196 2197 /* If we are not polling, we MUST be in SLI2 mode */ 2198 if (flag != MBX_POLL) { 2199 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) && 2200 (mb->mbxCommand != MBX_KILL_BOARD)) { 2201 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2202 spin_unlock_irqrestore(phba->host->host_lock, 2203 drvr_flag); 2204 /* Mbox command <mbxCommand> cannot issue */ 2205 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag); 2206 return (MBX_NOT_FINISHED); 2207 } 2208 /* timeout active mbox command */ 2209 mod_timer(&psli->mbox_tmo, (jiffies + 2210 (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand)))); 2211 } 2212 2213 /* Mailbox cmd <cmd> issue */ 2214 lpfc_printf_log(phba, 2215 KERN_INFO, 2216 LOG_MBOX | LOG_SLI, 2217 "%d:0309 Mailbox cmd x%x issue Data: x%x x%x x%x\n", 2218 phba->brd_no, 2219 mb->mbxCommand, 2220 phba->hba_state, 2221 psli->sli_flag, 2222 flag); 2223 2224 psli->slistat.mbox_cmd++; 2225 evtctr = psli->slistat.mbox_event; 2226 2227 /* next set own bit for the adapter and copy over command word */ 2228 mb->mbxOwner = OWN_CHIP; 2229 2230 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2231 /* First copy command data to host SLIM area */ 2232 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, MAILBOX_CMD_SIZE); 2233 } else { 2234 if (mb->mbxCommand == MBX_CONFIG_PORT) { 2235 /* copy command data into host mbox for cmpl */ 2236 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, 2237 MAILBOX_CMD_SIZE); 2238 } 2239 2240 /* First copy mbox command data to HBA SLIM, skip past first 2241 word */ 2242 to_slim = phba->MBslimaddr + sizeof (uint32_t); 2243 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0], 2244 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 2245 2246 /* Next copy over first word, with mbxOwner set */ 2247 ldata = *((volatile uint32_t *)mb); 2248 to_slim = phba->MBslimaddr; 2249 writel(ldata, to_slim); 2250 readl(to_slim); /* flush */ 2251 2252 if (mb->mbxCommand == MBX_CONFIG_PORT) { 2253 /* switch over to host mailbox */ 2254 psli->sli_flag |= LPFC_SLI2_ACTIVE; 2255 } 2256 } 2257 2258 wmb(); 2259 /* interrupt board to doit right away */ 2260 writel(CA_MBATT, phba->CAregaddr); 2261 readl(phba->CAregaddr); /* flush */ 2262 2263 switch (flag) { 2264 case MBX_NOWAIT: 2265 /* Don't wait for it to finish, just return */ 2266 psli->mbox_active = pmbox; 2267 break; 2268 2269 case MBX_POLL: 2270 psli->mbox_active = NULL; 2271 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2272 /* First read mbox status word */ 2273 word0 = *((volatile uint32_t *)&phba->slim2p->mbx); 2274 word0 = le32_to_cpu(word0); 2275 } else { 2276 /* First read mbox status word */ 2277 word0 = readl(phba->MBslimaddr); 2278 } 2279 2280 /* Read the HBA Host Attention Register */ 2281 ha_copy = readl(phba->HAregaddr); 2282 2283 i = lpfc_mbox_tmo_val(phba, mb->mbxCommand); 2284 i *= 1000; /* Convert to ms */ 2285 2286 /* Wait for command to complete */ 2287 while (((word0 & OWN_CHIP) == OWN_CHIP) || 2288 (!(ha_copy & HA_MBATT) && 2289 (phba->hba_state > LPFC_WARM_START))) { 2290 if (i-- <= 0) { 2291 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2292 spin_unlock_irqrestore(phba->host->host_lock, 2293 drvr_flag); 2294 return (MBX_NOT_FINISHED); 2295 } 2296 2297 /* Check if we took a mbox interrupt while we were 2298 polling */ 2299 if (((word0 & OWN_CHIP) != OWN_CHIP) 2300 && (evtctr != psli->slistat.mbox_event)) 2301 break; 2302 2303 spin_unlock_irqrestore(phba->host->host_lock, 2304 drvr_flag); 2305 2306 /* Can be in interrupt context, do not sleep */ 2307 /* (or might be called with interrupts disabled) */ 2308 mdelay(1); 2309 2310 spin_lock_irqsave(phba->host->host_lock, drvr_flag); 2311 2312 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2313 /* First copy command data */ 2314 word0 = *((volatile uint32_t *) 2315 &phba->slim2p->mbx); 2316 word0 = le32_to_cpu(word0); 2317 if (mb->mbxCommand == MBX_CONFIG_PORT) { 2318 MAILBOX_t *slimmb; 2319 volatile uint32_t slimword0; 2320 /* Check real SLIM for any errors */ 2321 slimword0 = readl(phba->MBslimaddr); 2322 slimmb = (MAILBOX_t *) & slimword0; 2323 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 2324 && slimmb->mbxStatus) { 2325 psli->sli_flag &= 2326 ~LPFC_SLI2_ACTIVE; 2327 word0 = slimword0; 2328 } 2329 } 2330 } else { 2331 /* First copy command data */ 2332 word0 = readl(phba->MBslimaddr); 2333 } 2334 /* Read the HBA Host Attention Register */ 2335 ha_copy = readl(phba->HAregaddr); 2336 } 2337 2338 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2339 /* copy results back to user */ 2340 lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb, 2341 MAILBOX_CMD_SIZE); 2342 } else { 2343 /* First copy command data */ 2344 lpfc_memcpy_from_slim(mb, phba->MBslimaddr, 2345 MAILBOX_CMD_SIZE); 2346 if ((mb->mbxCommand == MBX_DUMP_MEMORY) && 2347 pmbox->context2) { 2348 lpfc_memcpy_from_slim((void *)pmbox->context2, 2349 phba->MBslimaddr + DMP_RSP_OFFSET, 2350 mb->un.varDmp.word_cnt); 2351 } 2352 } 2353 2354 writel(HA_MBATT, phba->HAregaddr); 2355 readl(phba->HAregaddr); /* flush */ 2356 2357 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2358 status = mb->mbxStatus; 2359 } 2360 2361 spin_unlock_irqrestore(phba->host->host_lock, drvr_flag); 2362 return (status); 2363 } 2364 2365 static int 2366 lpfc_sli_ringtx_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, 2367 struct lpfc_iocbq * piocb) 2368 { 2369 /* Insert the caller's iocb in the txq tail for later processing. */ 2370 list_add_tail(&piocb->list, &pring->txq); 2371 pring->txq_cnt++; 2372 return (0); 2373 } 2374 2375 static struct lpfc_iocbq * 2376 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2377 struct lpfc_iocbq ** piocb) 2378 { 2379 struct lpfc_iocbq * nextiocb; 2380 2381 nextiocb = lpfc_sli_ringtx_get(phba, pring); 2382 if (!nextiocb) { 2383 nextiocb = *piocb; 2384 *piocb = NULL; 2385 } 2386 2387 return nextiocb; 2388 } 2389 2390 int 2391 lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2392 struct lpfc_iocbq *piocb, uint32_t flag) 2393 { 2394 struct lpfc_iocbq *nextiocb; 2395 IOCB_t *iocb; 2396 2397 /* 2398 * We should never get an IOCB if we are in a < LINK_DOWN state 2399 */ 2400 if (unlikely(phba->hba_state < LPFC_LINK_DOWN)) 2401 return IOCB_ERROR; 2402 2403 /* 2404 * Check to see if we are blocking IOCB processing because of a 2405 * outstanding mbox command. 2406 */ 2407 if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX)) 2408 goto iocb_busy; 2409 2410 if (unlikely(phba->hba_state == LPFC_LINK_DOWN)) { 2411 /* 2412 * Only CREATE_XRI, CLOSE_XRI, ABORT_XRI, and QUE_RING_BUF 2413 * can be issued if the link is not up. 2414 */ 2415 switch (piocb->iocb.ulpCommand) { 2416 case CMD_QUE_RING_BUF_CN: 2417 case CMD_QUE_RING_BUF64_CN: 2418 /* 2419 * For IOCBs, like QUE_RING_BUF, that have no rsp ring 2420 * completion, iocb_cmpl MUST be 0. 2421 */ 2422 if (piocb->iocb_cmpl) 2423 piocb->iocb_cmpl = NULL; 2424 /*FALLTHROUGH*/ 2425 case CMD_CREATE_XRI_CR: 2426 break; 2427 default: 2428 goto iocb_busy; 2429 } 2430 2431 /* 2432 * For FCP commands, we must be in a state where we can process link 2433 * attention events. 2434 */ 2435 } else if (unlikely(pring->ringno == phba->sli.fcp_ring && 2436 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) 2437 goto iocb_busy; 2438 2439 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 2440 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 2441 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 2442 2443 if (iocb) 2444 lpfc_sli_update_ring(phba, pring); 2445 else 2446 lpfc_sli_update_full_ring(phba, pring); 2447 2448 if (!piocb) 2449 return IOCB_SUCCESS; 2450 2451 goto out_busy; 2452 2453 iocb_busy: 2454 pring->stats.iocb_cmd_delay++; 2455 2456 out_busy: 2457 2458 if (!(flag & SLI_IOCB_RET_IOCB)) { 2459 lpfc_sli_ringtx_put(phba, pring, piocb); 2460 return IOCB_SUCCESS; 2461 } 2462 2463 return IOCB_BUSY; 2464 } 2465 2466 static int 2467 lpfc_extra_ring_setup( struct lpfc_hba *phba) 2468 { 2469 struct lpfc_sli *psli; 2470 struct lpfc_sli_ring *pring; 2471 2472 psli = &phba->sli; 2473 2474 /* Adjust cmd/rsp ring iocb entries more evenly */ 2475 pring = &psli->ring[psli->fcp_ring]; 2476 pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 2477 pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 2478 pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 2479 pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 2480 2481 pring = &psli->ring[1]; 2482 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 2483 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 2484 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 2485 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 2486 2487 /* Setup default profile for this ring */ 2488 pring->iotag_max = 4096; 2489 pring->num_mask = 1; 2490 pring->prt[0].profile = 0; /* Mask 0 */ 2491 pring->prt[0].rctl = FC_UNSOL_DATA; 2492 pring->prt[0].type = 5; 2493 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 2494 return 0; 2495 } 2496 2497 int 2498 lpfc_sli_setup(struct lpfc_hba *phba) 2499 { 2500 int i, totiocb = 0; 2501 struct lpfc_sli *psli = &phba->sli; 2502 struct lpfc_sli_ring *pring; 2503 2504 psli->num_rings = MAX_CONFIGURED_RINGS; 2505 psli->sli_flag = 0; 2506 psli->fcp_ring = LPFC_FCP_RING; 2507 psli->next_ring = LPFC_FCP_NEXT_RING; 2508 psli->ip_ring = LPFC_IP_RING; 2509 2510 psli->iocbq_lookup = NULL; 2511 psli->iocbq_lookup_len = 0; 2512 psli->last_iotag = 0; 2513 2514 for (i = 0; i < psli->num_rings; i++) { 2515 pring = &psli->ring[i]; 2516 switch (i) { 2517 case LPFC_FCP_RING: /* ring 0 - FCP */ 2518 /* numCiocb and numRiocb are used in config_port */ 2519 pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 2520 pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 2521 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 2522 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 2523 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 2524 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 2525 pring->iotag_ctr = 0; 2526 pring->iotag_max = 2527 (phba->cfg_hba_queue_depth * 2); 2528 pring->fast_iotag = pring->iotag_max; 2529 pring->num_mask = 0; 2530 break; 2531 case LPFC_IP_RING: /* ring 1 - IP */ 2532 /* numCiocb and numRiocb are used in config_port */ 2533 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 2534 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 2535 pring->num_mask = 0; 2536 break; 2537 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 2538 /* numCiocb and numRiocb are used in config_port */ 2539 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 2540 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 2541 pring->fast_iotag = 0; 2542 pring->iotag_ctr = 0; 2543 pring->iotag_max = 4096; 2544 pring->num_mask = 4; 2545 pring->prt[0].profile = 0; /* Mask 0 */ 2546 pring->prt[0].rctl = FC_ELS_REQ; 2547 pring->prt[0].type = FC_ELS_DATA; 2548 pring->prt[0].lpfc_sli_rcv_unsol_event = 2549 lpfc_els_unsol_event; 2550 pring->prt[1].profile = 0; /* Mask 1 */ 2551 pring->prt[1].rctl = FC_ELS_RSP; 2552 pring->prt[1].type = FC_ELS_DATA; 2553 pring->prt[1].lpfc_sli_rcv_unsol_event = 2554 lpfc_els_unsol_event; 2555 pring->prt[2].profile = 0; /* Mask 2 */ 2556 /* NameServer Inquiry */ 2557 pring->prt[2].rctl = FC_UNSOL_CTL; 2558 /* NameServer */ 2559 pring->prt[2].type = FC_COMMON_TRANSPORT_ULP; 2560 pring->prt[2].lpfc_sli_rcv_unsol_event = 2561 lpfc_ct_unsol_event; 2562 pring->prt[3].profile = 0; /* Mask 3 */ 2563 /* NameServer response */ 2564 pring->prt[3].rctl = FC_SOL_CTL; 2565 /* NameServer */ 2566 pring->prt[3].type = FC_COMMON_TRANSPORT_ULP; 2567 pring->prt[3].lpfc_sli_rcv_unsol_event = 2568 lpfc_ct_unsol_event; 2569 break; 2570 } 2571 totiocb += (pring->numCiocb + pring->numRiocb); 2572 } 2573 if (totiocb > MAX_SLI2_IOCB) { 2574 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 2575 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2576 "%d:0462 Too many cmd / rsp ring entries in " 2577 "SLI2 SLIM Data: x%x x%x\n", 2578 phba->brd_no, totiocb, MAX_SLI2_IOCB); 2579 } 2580 if (phba->cfg_multi_ring_support == 2) 2581 lpfc_extra_ring_setup(phba); 2582 2583 return 0; 2584 } 2585 2586 int 2587 lpfc_sli_queue_setup(struct lpfc_hba * phba) 2588 { 2589 struct lpfc_sli *psli; 2590 struct lpfc_sli_ring *pring; 2591 int i; 2592 2593 psli = &phba->sli; 2594 spin_lock_irq(phba->host->host_lock); 2595 INIT_LIST_HEAD(&psli->mboxq); 2596 /* Initialize list headers for txq and txcmplq as double linked lists */ 2597 for (i = 0; i < psli->num_rings; i++) { 2598 pring = &psli->ring[i]; 2599 pring->ringno = i; 2600 pring->next_cmdidx = 0; 2601 pring->local_getidx = 0; 2602 pring->cmdidx = 0; 2603 INIT_LIST_HEAD(&pring->txq); 2604 INIT_LIST_HEAD(&pring->txcmplq); 2605 INIT_LIST_HEAD(&pring->iocb_continueq); 2606 INIT_LIST_HEAD(&pring->postbufq); 2607 } 2608 spin_unlock_irq(phba->host->host_lock); 2609 return (1); 2610 } 2611 2612 int 2613 lpfc_sli_hba_down(struct lpfc_hba * phba) 2614 { 2615 struct lpfc_sli *psli; 2616 struct lpfc_sli_ring *pring; 2617 LPFC_MBOXQ_t *pmb; 2618 struct lpfc_iocbq *iocb, *next_iocb; 2619 IOCB_t *icmd = NULL; 2620 int i; 2621 unsigned long flags = 0; 2622 2623 psli = &phba->sli; 2624 lpfc_hba_down_prep(phba); 2625 2626 spin_lock_irqsave(phba->host->host_lock, flags); 2627 2628 for (i = 0; i < psli->num_rings; i++) { 2629 pring = &psli->ring[i]; 2630 pring->flag |= LPFC_DEFERRED_RING_EVENT; 2631 2632 /* 2633 * Error everything on the txq since these iocbs have not been 2634 * given to the FW yet. 2635 */ 2636 pring->txq_cnt = 0; 2637 2638 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 2639 list_del_init(&iocb->list); 2640 if (iocb->iocb_cmpl) { 2641 icmd = &iocb->iocb; 2642 icmd->ulpStatus = IOSTAT_LOCAL_REJECT; 2643 icmd->un.ulpWord[4] = IOERR_SLI_DOWN; 2644 spin_unlock_irqrestore(phba->host->host_lock, 2645 flags); 2646 (iocb->iocb_cmpl) (phba, iocb, iocb); 2647 spin_lock_irqsave(phba->host->host_lock, flags); 2648 } else 2649 lpfc_sli_release_iocbq(phba, iocb); 2650 } 2651 2652 INIT_LIST_HEAD(&(pring->txq)); 2653 2654 } 2655 2656 spin_unlock_irqrestore(phba->host->host_lock, flags); 2657 2658 /* Return any active mbox cmds */ 2659 del_timer_sync(&psli->mbox_tmo); 2660 spin_lock_irqsave(phba->host->host_lock, flags); 2661 phba->work_hba_events &= ~WORKER_MBOX_TMO; 2662 if (psli->mbox_active) { 2663 pmb = psli->mbox_active; 2664 pmb->mb.mbxStatus = MBX_NOT_FINISHED; 2665 if (pmb->mbox_cmpl) { 2666 spin_unlock_irqrestore(phba->host->host_lock, flags); 2667 pmb->mbox_cmpl(phba,pmb); 2668 spin_lock_irqsave(phba->host->host_lock, flags); 2669 } 2670 } 2671 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2672 psli->mbox_active = NULL; 2673 2674 /* Return any pending mbox cmds */ 2675 while ((pmb = lpfc_mbox_get(phba)) != NULL) { 2676 pmb->mb.mbxStatus = MBX_NOT_FINISHED; 2677 if (pmb->mbox_cmpl) { 2678 spin_unlock_irqrestore(phba->host->host_lock, flags); 2679 pmb->mbox_cmpl(phba,pmb); 2680 spin_lock_irqsave(phba->host->host_lock, flags); 2681 } 2682 } 2683 2684 INIT_LIST_HEAD(&psli->mboxq); 2685 2686 spin_unlock_irqrestore(phba->host->host_lock, flags); 2687 2688 return 1; 2689 } 2690 2691 void 2692 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 2693 { 2694 uint32_t *src = srcp; 2695 uint32_t *dest = destp; 2696 uint32_t ldata; 2697 int i; 2698 2699 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 2700 ldata = *src; 2701 ldata = le32_to_cpu(ldata); 2702 *dest = ldata; 2703 src++; 2704 dest++; 2705 } 2706 } 2707 2708 int 2709 lpfc_sli_ringpostbuf_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, 2710 struct lpfc_dmabuf * mp) 2711 { 2712 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 2713 later */ 2714 list_add_tail(&mp->list, &pring->postbufq); 2715 2716 pring->postbufq_cnt++; 2717 return 0; 2718 } 2719 2720 2721 struct lpfc_dmabuf * 2722 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2723 dma_addr_t phys) 2724 { 2725 struct lpfc_dmabuf *mp, *next_mp; 2726 struct list_head *slp = &pring->postbufq; 2727 2728 /* Search postbufq, from the begining, looking for a match on phys */ 2729 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 2730 if (mp->phys == phys) { 2731 list_del_init(&mp->list); 2732 pring->postbufq_cnt--; 2733 return mp; 2734 } 2735 } 2736 2737 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2738 "%d:0410 Cannot find virtual addr for mapped buf on " 2739 "ring %d Data x%llx x%p x%p x%x\n", 2740 phba->brd_no, pring->ringno, (unsigned long long)phys, 2741 slp->next, slp->prev, pring->postbufq_cnt); 2742 return NULL; 2743 } 2744 2745 static void 2746 lpfc_sli_abort_elsreq_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 2747 struct lpfc_iocbq * rspiocb) 2748 { 2749 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 2750 /* Free the resources associated with the ELS_REQUEST64 IOCB the driver 2751 * just aborted. 2752 * In this case, context2 = cmd, context2->next = rsp, context3 = bpl 2753 */ 2754 if (cmdiocb->context2) { 2755 buf_ptr1 = (struct lpfc_dmabuf *) cmdiocb->context2; 2756 2757 /* Free the response IOCB before completing the abort 2758 command. */ 2759 buf_ptr = NULL; 2760 list_remove_head((&buf_ptr1->list), buf_ptr, 2761 struct lpfc_dmabuf, list); 2762 if (buf_ptr) { 2763 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 2764 kfree(buf_ptr); 2765 } 2766 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); 2767 kfree(buf_ptr1); 2768 } 2769 2770 if (cmdiocb->context3) { 2771 buf_ptr = (struct lpfc_dmabuf *) cmdiocb->context3; 2772 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 2773 kfree(buf_ptr); 2774 } 2775 2776 lpfc_sli_release_iocbq(phba, cmdiocb); 2777 return; 2778 } 2779 2780 int 2781 lpfc_sli_issue_abort_iotag32(struct lpfc_hba * phba, 2782 struct lpfc_sli_ring * pring, 2783 struct lpfc_iocbq * cmdiocb) 2784 { 2785 struct lpfc_iocbq *abtsiocbp; 2786 IOCB_t *icmd = NULL; 2787 IOCB_t *iabt = NULL; 2788 2789 /* issue ABTS for this IOCB based on iotag */ 2790 abtsiocbp = lpfc_sli_get_iocbq(phba); 2791 if (abtsiocbp == NULL) 2792 return 0; 2793 2794 iabt = &abtsiocbp->iocb; 2795 icmd = &cmdiocb->iocb; 2796 switch (icmd->ulpCommand) { 2797 case CMD_ELS_REQUEST64_CR: 2798 /* Even though we abort the ELS command, the firmware may access 2799 * the BPL or other resources before it processes our 2800 * ABORT_MXRI64. Thus we must delay reusing the cmdiocb 2801 * resources till the actual abort request completes. 2802 */ 2803 abtsiocbp->context1 = (void *)((unsigned long)icmd->ulpCommand); 2804 abtsiocbp->context2 = cmdiocb->context2; 2805 abtsiocbp->context3 = cmdiocb->context3; 2806 cmdiocb->context2 = NULL; 2807 cmdiocb->context3 = NULL; 2808 abtsiocbp->iocb_cmpl = lpfc_sli_abort_elsreq_cmpl; 2809 break; 2810 default: 2811 lpfc_sli_release_iocbq(phba, abtsiocbp); 2812 return 0; 2813 } 2814 2815 iabt->un.amxri.abortType = ABORT_TYPE_ABTS; 2816 iabt->un.amxri.iotag32 = icmd->un.elsreq64.bdl.ulpIoTag32; 2817 2818 iabt->ulpLe = 1; 2819 iabt->ulpClass = CLASS3; 2820 iabt->ulpCommand = CMD_ABORT_MXRI64_CN; 2821 2822 if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) == IOCB_ERROR) { 2823 lpfc_sli_release_iocbq(phba, abtsiocbp); 2824 return 0; 2825 } 2826 2827 return 1; 2828 } 2829 2830 static int 2831 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, uint16_t tgt_id, 2832 uint64_t lun_id, uint32_t ctx, 2833 lpfc_ctx_cmd ctx_cmd) 2834 { 2835 struct lpfc_scsi_buf *lpfc_cmd; 2836 struct scsi_cmnd *cmnd; 2837 int rc = 1; 2838 2839 if (!(iocbq->iocb_flag & LPFC_IO_FCP)) 2840 return rc; 2841 2842 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 2843 cmnd = lpfc_cmd->pCmd; 2844 2845 if (cmnd == NULL) 2846 return rc; 2847 2848 switch (ctx_cmd) { 2849 case LPFC_CTX_LUN: 2850 if ((cmnd->device->id == tgt_id) && 2851 (cmnd->device->lun == lun_id)) 2852 rc = 0; 2853 break; 2854 case LPFC_CTX_TGT: 2855 if (cmnd->device->id == tgt_id) 2856 rc = 0; 2857 break; 2858 case LPFC_CTX_CTX: 2859 if (iocbq->iocb.ulpContext == ctx) 2860 rc = 0; 2861 break; 2862 case LPFC_CTX_HOST: 2863 rc = 0; 2864 break; 2865 default: 2866 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 2867 __FUNCTION__, ctx_cmd); 2868 break; 2869 } 2870 2871 return rc; 2872 } 2873 2874 int 2875 lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2876 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd) 2877 { 2878 struct lpfc_iocbq *iocbq; 2879 int sum, i; 2880 2881 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { 2882 iocbq = phba->sli.iocbq_lookup[i]; 2883 2884 if (lpfc_sli_validate_fcp_iocb (iocbq, tgt_id, lun_id, 2885 0, ctx_cmd) == 0) 2886 sum++; 2887 } 2888 2889 return sum; 2890 } 2891 2892 void 2893 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 2894 struct lpfc_iocbq * rspiocb) 2895 { 2896 spin_lock_irq(phba->host->host_lock); 2897 lpfc_sli_release_iocbq(phba, cmdiocb); 2898 spin_unlock_irq(phba->host->host_lock); 2899 return; 2900 } 2901 2902 int 2903 lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2904 uint16_t tgt_id, uint64_t lun_id, uint32_t ctx, 2905 lpfc_ctx_cmd abort_cmd) 2906 { 2907 struct lpfc_iocbq *iocbq; 2908 struct lpfc_iocbq *abtsiocb; 2909 IOCB_t *cmd = NULL; 2910 int errcnt = 0, ret_val = 0; 2911 int i; 2912 2913 for (i = 1; i <= phba->sli.last_iotag; i++) { 2914 iocbq = phba->sli.iocbq_lookup[i]; 2915 2916 if (lpfc_sli_validate_fcp_iocb (iocbq, tgt_id, lun_id, 2917 0, abort_cmd) != 0) 2918 continue; 2919 2920 /* issue ABTS for this IOCB based on iotag */ 2921 abtsiocb = lpfc_sli_get_iocbq(phba); 2922 if (abtsiocb == NULL) { 2923 errcnt++; 2924 continue; 2925 } 2926 2927 cmd = &iocbq->iocb; 2928 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 2929 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 2930 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 2931 abtsiocb->iocb.ulpLe = 1; 2932 abtsiocb->iocb.ulpClass = cmd->ulpClass; 2933 2934 if (phba->hba_state >= LPFC_LINK_UP) 2935 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 2936 else 2937 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 2938 2939 /* Setup callback routine and issue the command. */ 2940 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 2941 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0); 2942 if (ret_val == IOCB_ERROR) { 2943 lpfc_sli_release_iocbq(phba, abtsiocb); 2944 errcnt++; 2945 continue; 2946 } 2947 } 2948 2949 return errcnt; 2950 } 2951 2952 static void 2953 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, 2954 struct lpfc_iocbq *cmdiocbq, 2955 struct lpfc_iocbq *rspiocbq) 2956 { 2957 wait_queue_head_t *pdone_q; 2958 unsigned long iflags; 2959 2960 spin_lock_irqsave(phba->host->host_lock, iflags); 2961 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 2962 if (cmdiocbq->context2 && rspiocbq) 2963 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 2964 &rspiocbq->iocb, sizeof(IOCB_t)); 2965 2966 pdone_q = cmdiocbq->context_un.wait_queue; 2967 spin_unlock_irqrestore(phba->host->host_lock, iflags); 2968 if (pdone_q) 2969 wake_up(pdone_q); 2970 return; 2971 } 2972 2973 /* 2974 * Issue the caller's iocb and wait for its completion, but no longer than the 2975 * caller's timeout. Note that iocb_flags is cleared before the 2976 * lpfc_sli_issue_call since the wake routine sets a unique value and by 2977 * definition this is a wait function. 2978 */ 2979 int 2980 lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba, 2981 struct lpfc_sli_ring * pring, 2982 struct lpfc_iocbq * piocb, 2983 struct lpfc_iocbq * prspiocbq, 2984 uint32_t timeout) 2985 { 2986 DECLARE_WAIT_QUEUE_HEAD(done_q); 2987 long timeleft, timeout_req = 0; 2988 int retval = IOCB_SUCCESS; 2989 uint32_t creg_val; 2990 2991 /* 2992 * If the caller has provided a response iocbq buffer, then context2 2993 * is NULL or its an error. 2994 */ 2995 if (prspiocbq) { 2996 if (piocb->context2) 2997 return IOCB_ERROR; 2998 piocb->context2 = prspiocbq; 2999 } 3000 3001 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; 3002 piocb->context_un.wait_queue = &done_q; 3003 piocb->iocb_flag &= ~LPFC_IO_WAKE; 3004 3005 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 3006 creg_val = readl(phba->HCregaddr); 3007 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 3008 writel(creg_val, phba->HCregaddr); 3009 readl(phba->HCregaddr); /* flush */ 3010 } 3011 3012 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0); 3013 if (retval == IOCB_SUCCESS) { 3014 timeout_req = timeout * HZ; 3015 spin_unlock_irq(phba->host->host_lock); 3016 timeleft = wait_event_timeout(done_q, 3017 piocb->iocb_flag & LPFC_IO_WAKE, 3018 timeout_req); 3019 spin_lock_irq(phba->host->host_lock); 3020 3021 if (timeleft == 0) { 3022 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3023 "%d:0338 IOCB wait timeout error - no " 3024 "wake response Data x%x\n", 3025 phba->brd_no, timeout); 3026 retval = IOCB_TIMEDOUT; 3027 } else if (!(piocb->iocb_flag & LPFC_IO_WAKE)) { 3028 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3029 "%d:0330 IOCB wake NOT set, " 3030 "Data x%x x%lx\n", phba->brd_no, 3031 timeout, (timeleft / jiffies)); 3032 retval = IOCB_TIMEDOUT; 3033 } else { 3034 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3035 "%d:0331 IOCB wake signaled\n", 3036 phba->brd_no); 3037 } 3038 } else { 3039 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3040 "%d:0332 IOCB wait issue failed, Data x%x\n", 3041 phba->brd_no, retval); 3042 retval = IOCB_ERROR; 3043 } 3044 3045 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 3046 creg_val = readl(phba->HCregaddr); 3047 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 3048 writel(creg_val, phba->HCregaddr); 3049 readl(phba->HCregaddr); /* flush */ 3050 } 3051 3052 if (prspiocbq) 3053 piocb->context2 = NULL; 3054 3055 piocb->context_un.wait_queue = NULL; 3056 piocb->iocb_cmpl = NULL; 3057 return retval; 3058 } 3059 3060 int 3061 lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq, 3062 uint32_t timeout) 3063 { 3064 DECLARE_WAIT_QUEUE_HEAD(done_q); 3065 DECLARE_WAITQUEUE(wq_entry, current); 3066 uint32_t timeleft = 0; 3067 int retval; 3068 3069 /* The caller must leave context1 empty. */ 3070 if (pmboxq->context1 != 0) { 3071 return (MBX_NOT_FINISHED); 3072 } 3073 3074 /* setup wake call as IOCB callback */ 3075 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 3076 /* setup context field to pass wait_queue pointer to wake function */ 3077 pmboxq->context1 = &done_q; 3078 3079 /* start to sleep before we wait, to avoid races */ 3080 set_current_state(TASK_INTERRUPTIBLE); 3081 add_wait_queue(&done_q, &wq_entry); 3082 3083 /* now issue the command */ 3084 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 3085 3086 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 3087 timeleft = schedule_timeout(timeout * HZ); 3088 pmboxq->context1 = NULL; 3089 /* if schedule_timeout returns 0, we timed out and were not 3090 woken up */ 3091 if ((timeleft == 0) || signal_pending(current)) 3092 retval = MBX_TIMEOUT; 3093 else 3094 retval = MBX_SUCCESS; 3095 } 3096 3097 3098 set_current_state(TASK_RUNNING); 3099 remove_wait_queue(&done_q, &wq_entry); 3100 return retval; 3101 } 3102 3103 int 3104 lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba) 3105 { 3106 int i = 0; 3107 3108 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !phba->stopped) { 3109 if (i++ > LPFC_MBOX_TMO * 1000) 3110 return 1; 3111 3112 if (lpfc_sli_handle_mb_event(phba) == 0) 3113 i = 0; 3114 3115 msleep(1); 3116 } 3117 3118 return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0; 3119 } 3120 3121 irqreturn_t 3122 lpfc_intr_handler(int irq, void *dev_id, struct pt_regs * regs) 3123 { 3124 struct lpfc_hba *phba; 3125 uint32_t ha_copy; 3126 uint32_t work_ha_copy; 3127 unsigned long status; 3128 int i; 3129 uint32_t control; 3130 3131 /* 3132 * Get the driver's phba structure from the dev_id and 3133 * assume the HBA is not interrupting. 3134 */ 3135 phba = (struct lpfc_hba *) dev_id; 3136 3137 if (unlikely(!phba)) 3138 return IRQ_NONE; 3139 3140 phba->sli.slistat.sli_intr++; 3141 3142 /* 3143 * Call the HBA to see if it is interrupting. If not, don't claim 3144 * the interrupt 3145 */ 3146 3147 /* Ignore all interrupts during initialization. */ 3148 if (unlikely(phba->hba_state < LPFC_LINK_DOWN)) 3149 return IRQ_NONE; 3150 3151 /* 3152 * Read host attention register to determine interrupt source 3153 * Clear Attention Sources, except Error Attention (to 3154 * preserve status) and Link Attention 3155 */ 3156 spin_lock(phba->host->host_lock); 3157 ha_copy = readl(phba->HAregaddr); 3158 writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 3159 readl(phba->HAregaddr); /* flush */ 3160 spin_unlock(phba->host->host_lock); 3161 3162 if (unlikely(!ha_copy)) 3163 return IRQ_NONE; 3164 3165 work_ha_copy = ha_copy & phba->work_ha_mask; 3166 3167 if (unlikely(work_ha_copy)) { 3168 if (work_ha_copy & HA_LATT) { 3169 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 3170 /* 3171 * Turn off Link Attention interrupts 3172 * until CLEAR_LA done 3173 */ 3174 spin_lock(phba->host->host_lock); 3175 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 3176 control = readl(phba->HCregaddr); 3177 control &= ~HC_LAINT_ENA; 3178 writel(control, phba->HCregaddr); 3179 readl(phba->HCregaddr); /* flush */ 3180 spin_unlock(phba->host->host_lock); 3181 } 3182 else 3183 work_ha_copy &= ~HA_LATT; 3184 } 3185 3186 if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) { 3187 for (i = 0; i < phba->sli.num_rings; i++) { 3188 if (work_ha_copy & (HA_RXATT << (4*i))) { 3189 /* 3190 * Turn off Slow Rings interrupts 3191 */ 3192 spin_lock(phba->host->host_lock); 3193 control = readl(phba->HCregaddr); 3194 control &= ~(HC_R0INT_ENA << i); 3195 writel(control, phba->HCregaddr); 3196 readl(phba->HCregaddr); /* flush */ 3197 spin_unlock(phba->host->host_lock); 3198 } 3199 } 3200 } 3201 3202 if (work_ha_copy & HA_ERATT) { 3203 phba->hba_state = LPFC_HBA_ERROR; 3204 /* 3205 * There was a link/board error. Read the 3206 * status register to retrieve the error event 3207 * and process it. 3208 */ 3209 phba->sli.slistat.err_attn_event++; 3210 /* Save status info */ 3211 phba->work_hs = readl(phba->HSregaddr); 3212 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 3213 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 3214 3215 /* Clear Chip error bit */ 3216 writel(HA_ERATT, phba->HAregaddr); 3217 readl(phba->HAregaddr); /* flush */ 3218 phba->stopped = 1; 3219 } 3220 3221 spin_lock(phba->host->host_lock); 3222 phba->work_ha |= work_ha_copy; 3223 if (phba->work_wait) 3224 wake_up(phba->work_wait); 3225 spin_unlock(phba->host->host_lock); 3226 } 3227 3228 ha_copy &= ~(phba->work_ha_mask); 3229 3230 /* 3231 * Process all events on FCP ring. Take the optimized path for 3232 * FCP IO. Any other IO is slow path and is handled by 3233 * the worker thread. 3234 */ 3235 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 3236 status >>= (4*LPFC_FCP_RING); 3237 if (status & HA_RXATT) 3238 lpfc_sli_handle_fast_ring_event(phba, 3239 &phba->sli.ring[LPFC_FCP_RING], 3240 status); 3241 return IRQ_HANDLED; 3242 3243 } /* lpfc_intr_handler */ 3244