1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Enterprise Fibre Channel Host Bus Adapters. * 4 * Refer to the README file included with this package for * 5 * driver version and adapter support. * 6 * Copyright (C) 2004 Emulex Corporation. * 7 * www.emulex.com * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of the GNU General Public License * 11 * as published by the Free Software Foundation; either version 2 * 12 * of the License, or (at your option) any later version. * 13 * * 14 * This program is distributed in the hope that it will be useful, * 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of * 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * 17 * GNU General Public License for more details, a copy of which * 18 * can be found in the file COPYING included with this package. * 19 *******************************************************************/ 20 21 /* 22 * $Id: lpfc_sli.c 1.232 2005/04/13 11:59:16EDT sf_support Exp $ 23 */ 24 25 #include <linux/blkdev.h> 26 #include <linux/pci.h> 27 #include <linux/interrupt.h> 28 #include <linux/delay.h> 29 30 #include <scsi/scsi_cmnd.h> 31 #include <scsi/scsi_device.h> 32 #include <scsi/scsi_host.h> 33 34 #include "lpfc_hw.h" 35 #include "lpfc_sli.h" 36 #include "lpfc_disc.h" 37 #include "lpfc_scsi.h" 38 #include "lpfc.h" 39 #include "lpfc_crtn.h" 40 #include "lpfc_logmsg.h" 41 #include "lpfc_compat.h" 42 43 /* 44 * Define macro to log: Mailbox command x%x cannot issue Data 45 * This allows multiple uses of lpfc_msgBlk0311 46 * w/o perturbing log msg utility. 47 */ 48 #define LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) \ 49 lpfc_printf_log(phba, \ 50 KERN_INFO, \ 51 LOG_MBOX | LOG_SLI, \ 52 "%d:0311 Mailbox command x%x cannot issue " \ 53 "Data: x%x x%x x%x\n", \ 54 phba->brd_no, \ 55 mb->mbxCommand, \ 56 phba->hba_state, \ 57 psli->sli_flag, \ 58 flag); 59 60 61 /* There are only four IOCB completion types. */ 62 typedef enum _lpfc_iocb_type { 63 LPFC_UNKNOWN_IOCB, 64 LPFC_UNSOL_IOCB, 65 LPFC_SOL_IOCB, 66 LPFC_ABORT_IOCB 67 } lpfc_iocb_type; 68 69 /* 70 * Translate the iocb command to an iocb command type used to decide the final 71 * disposition of each completed IOCB. 72 */ 73 static lpfc_iocb_type 74 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 75 { 76 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; 77 78 if (iocb_cmnd > CMD_MAX_IOCB_CMD) 79 return 0; 80 81 switch (iocb_cmnd) { 82 case CMD_XMIT_SEQUENCE_CR: 83 case CMD_XMIT_SEQUENCE_CX: 84 case CMD_XMIT_BCAST_CN: 85 case CMD_XMIT_BCAST_CX: 86 case CMD_ELS_REQUEST_CR: 87 case CMD_ELS_REQUEST_CX: 88 case CMD_CREATE_XRI_CR: 89 case CMD_CREATE_XRI_CX: 90 case CMD_GET_RPI_CN: 91 case CMD_XMIT_ELS_RSP_CX: 92 case CMD_GET_RPI_CR: 93 case CMD_FCP_IWRITE_CR: 94 case CMD_FCP_IWRITE_CX: 95 case CMD_FCP_IREAD_CR: 96 case CMD_FCP_IREAD_CX: 97 case CMD_FCP_ICMND_CR: 98 case CMD_FCP_ICMND_CX: 99 case CMD_ADAPTER_MSG: 100 case CMD_ADAPTER_DUMP: 101 case CMD_XMIT_SEQUENCE64_CR: 102 case CMD_XMIT_SEQUENCE64_CX: 103 case CMD_XMIT_BCAST64_CN: 104 case CMD_XMIT_BCAST64_CX: 105 case CMD_ELS_REQUEST64_CR: 106 case CMD_ELS_REQUEST64_CX: 107 case CMD_FCP_IWRITE64_CR: 108 case CMD_FCP_IWRITE64_CX: 109 case CMD_FCP_IREAD64_CR: 110 case CMD_FCP_IREAD64_CX: 111 case CMD_FCP_ICMND64_CR: 112 case CMD_FCP_ICMND64_CX: 113 case CMD_GEN_REQUEST64_CR: 114 case CMD_GEN_REQUEST64_CX: 115 case CMD_XMIT_ELS_RSP64_CX: 116 type = LPFC_SOL_IOCB; 117 break; 118 case CMD_ABORT_XRI_CN: 119 case CMD_ABORT_XRI_CX: 120 case CMD_CLOSE_XRI_CN: 121 case CMD_CLOSE_XRI_CX: 122 case CMD_XRI_ABORTED_CX: 123 case CMD_ABORT_MXRI64_CN: 124 type = LPFC_ABORT_IOCB; 125 break; 126 case CMD_RCV_SEQUENCE_CX: 127 case CMD_RCV_ELS_REQ_CX: 128 case CMD_RCV_SEQUENCE64_CX: 129 case CMD_RCV_ELS_REQ64_CX: 130 type = LPFC_UNSOL_IOCB; 131 break; 132 default: 133 type = LPFC_UNKNOWN_IOCB; 134 break; 135 } 136 137 return type; 138 } 139 140 static int 141 lpfc_sli_ring_map(struct lpfc_hba * phba, LPFC_MBOXQ_t *pmb) 142 { 143 struct lpfc_sli *psli = &phba->sli; 144 MAILBOX_t *pmbox = &pmb->mb; 145 int i, rc; 146 147 for (i = 0; i < psli->num_rings; i++) { 148 phba->hba_state = LPFC_INIT_MBX_CMDS; 149 lpfc_config_ring(phba, i, pmb); 150 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 151 if (rc != MBX_SUCCESS) { 152 lpfc_printf_log(phba, 153 KERN_ERR, 154 LOG_INIT, 155 "%d:0446 Adapter failed to init, " 156 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 157 "ring %d\n", 158 phba->brd_no, 159 pmbox->mbxCommand, 160 pmbox->mbxStatus, 161 i); 162 phba->hba_state = LPFC_HBA_ERROR; 163 return -ENXIO; 164 } 165 } 166 return 0; 167 } 168 169 static int 170 lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba, 171 struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb) 172 { 173 uint16_t iotag; 174 175 list_add_tail(&piocb->list, &pring->txcmplq); 176 pring->txcmplq_cnt++; 177 if (unlikely(pring->ringno == LPFC_ELS_RING)) 178 mod_timer(&phba->els_tmofunc, 179 jiffies + HZ * (phba->fc_ratov << 1)); 180 181 if (pring->fast_lookup) { 182 /* Setup fast lookup based on iotag for completion */ 183 iotag = piocb->iocb.ulpIoTag; 184 if (iotag && (iotag < pring->fast_iotag)) 185 *(pring->fast_lookup + iotag) = piocb; 186 else { 187 188 /* Cmd ring <ringno> put: iotag <iotag> greater then 189 configured max <fast_iotag> wd0 <icmd> */ 190 lpfc_printf_log(phba, 191 KERN_ERR, 192 LOG_SLI, 193 "%d:0316 Cmd ring %d put: iotag x%x " 194 "greater then configured max x%x " 195 "wd0 x%x\n", 196 phba->brd_no, 197 pring->ringno, iotag, 198 pring->fast_iotag, 199 *(((uint32_t *)(&piocb->iocb)) + 7)); 200 } 201 } 202 return (0); 203 } 204 205 static struct lpfc_iocbq * 206 lpfc_sli_ringtx_get(struct lpfc_hba * phba, struct lpfc_sli_ring * pring) 207 { 208 struct list_head *dlp; 209 struct lpfc_iocbq *cmd_iocb; 210 211 dlp = &pring->txq; 212 cmd_iocb = NULL; 213 list_remove_head((&pring->txq), cmd_iocb, 214 struct lpfc_iocbq, 215 list); 216 if (cmd_iocb) { 217 /* If the first ptr is not equal to the list header, 218 * deque the IOCBQ_t and return it. 219 */ 220 pring->txq_cnt--; 221 } 222 return (cmd_iocb); 223 } 224 225 static IOCB_t * 226 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 227 { 228 MAILBOX_t *mbox = (MAILBOX_t *)phba->sli.MBhostaddr; 229 PGP *pgp = (PGP *)&mbox->us.s2.port[pring->ringno]; 230 uint32_t max_cmd_idx = pring->numCiocb; 231 IOCB_t *iocb = NULL; 232 233 if ((pring->next_cmdidx == pring->cmdidx) && 234 (++pring->next_cmdidx >= max_cmd_idx)) 235 pring->next_cmdidx = 0; 236 237 if (unlikely(pring->local_getidx == pring->next_cmdidx)) { 238 239 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 240 241 if (unlikely(pring->local_getidx >= max_cmd_idx)) { 242 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 243 "%d:0315 Ring %d issue: portCmdGet %d " 244 "is bigger then cmd ring %d\n", 245 phba->brd_no, pring->ringno, 246 pring->local_getidx, max_cmd_idx); 247 248 phba->hba_state = LPFC_HBA_ERROR; 249 /* 250 * All error attention handlers are posted to 251 * worker thread 252 */ 253 phba->work_ha |= HA_ERATT; 254 phba->work_hs = HS_FFER3; 255 if (phba->work_wait) 256 wake_up(phba->work_wait); 257 258 return NULL; 259 } 260 261 if (pring->local_getidx == pring->next_cmdidx) 262 return NULL; 263 } 264 265 iocb = IOCB_ENTRY(pring->cmdringaddr, pring->cmdidx); 266 267 return iocb; 268 } 269 270 static uint32_t 271 lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_sli_ring * pring) 272 { 273 uint32_t search_start; 274 275 if (pring->fast_lookup == NULL) { 276 pring->iotag_ctr++; 277 if (pring->iotag_ctr >= pring->iotag_max) 278 pring->iotag_ctr = 1; 279 return pring->iotag_ctr; 280 } 281 282 search_start = pring->iotag_ctr; 283 284 do { 285 pring->iotag_ctr++; 286 if (pring->iotag_ctr >= pring->fast_iotag) 287 pring->iotag_ctr = 1; 288 289 if (*(pring->fast_lookup + pring->iotag_ctr) == NULL) 290 return pring->iotag_ctr; 291 292 } while (pring->iotag_ctr != search_start); 293 294 /* 295 * Outstanding I/O count for ring <ringno> is at max <fast_iotag> 296 */ 297 lpfc_printf_log(phba, 298 KERN_ERR, 299 LOG_SLI, 300 "%d:0318 Outstanding I/O count for ring %d is at max x%x\n", 301 phba->brd_no, 302 pring->ringno, 303 pring->fast_iotag); 304 return (0); 305 } 306 307 static void 308 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 309 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 310 { 311 /* 312 * Allocate and set up an iotag 313 */ 314 nextiocb->iocb.ulpIoTag = 315 lpfc_sli_next_iotag(phba, &phba->sli.ring[phba->sli.fcp_ring]); 316 317 /* 318 * Issue iocb command to adapter 319 */ 320 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, sizeof (IOCB_t)); 321 wmb(); 322 pring->stats.iocb_cmd++; 323 324 /* 325 * If there is no completion routine to call, we can release the 326 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, 327 * that have no rsp ring completion, iocb_cmpl MUST be NULL. 328 */ 329 if (nextiocb->iocb_cmpl) 330 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 331 else { 332 list_add_tail(&nextiocb->list, &phba->lpfc_iocb_list); 333 } 334 335 /* 336 * Let the HBA know what IOCB slot will be the next one the 337 * driver will put a command into. 338 */ 339 pring->cmdidx = pring->next_cmdidx; 340 writeb(pring->cmdidx, phba->MBslimaddr 341 + (SLIMOFF + (pring->ringno * 2)) * 4); 342 } 343 344 static void 345 lpfc_sli_update_full_ring(struct lpfc_hba * phba, 346 struct lpfc_sli_ring *pring) 347 { 348 int ringno = pring->ringno; 349 350 pring->flag |= LPFC_CALL_RING_AVAILABLE; 351 352 wmb(); 353 354 /* 355 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. 356 * The HBA will tell us when an IOCB entry is available. 357 */ 358 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); 359 readl(phba->CAregaddr); /* flush */ 360 361 pring->stats.iocb_cmd_full++; 362 } 363 364 static void 365 lpfc_sli_update_ring(struct lpfc_hba * phba, 366 struct lpfc_sli_ring *pring) 367 { 368 int ringno = pring->ringno; 369 370 /* 371 * Tell the HBA that there is work to do in this ring. 372 */ 373 wmb(); 374 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 375 readl(phba->CAregaddr); /* flush */ 376 } 377 378 static void 379 lpfc_sli_resume_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring) 380 { 381 IOCB_t *iocb; 382 struct lpfc_iocbq *nextiocb; 383 384 /* 385 * Check to see if: 386 * (a) there is anything on the txq to send 387 * (b) link is up 388 * (c) link attention events can be processed (fcp ring only) 389 * (d) IOCB processing is not blocked by the outstanding mbox command. 390 */ 391 if (pring->txq_cnt && 392 (phba->hba_state > LPFC_LINK_DOWN) && 393 (pring->ringno != phba->sli.fcp_ring || 394 phba->sli.sli_flag & LPFC_PROCESS_LA) && 395 !(pring->flag & LPFC_STOP_IOCB_MBX)) { 396 397 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 398 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 399 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 400 401 if (iocb) 402 lpfc_sli_update_ring(phba, pring); 403 else 404 lpfc_sli_update_full_ring(phba, pring); 405 } 406 407 return; 408 } 409 410 /* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */ 411 static void 412 lpfc_sli_turn_on_ring(struct lpfc_hba * phba, int ringno) 413 { 414 PGP *pgp = 415 ((PGP *) & 416 (((MAILBOX_t *)phba->sli.MBhostaddr)->us.s2.port[ringno])); 417 418 /* If the ring is active, flag it */ 419 if (phba->sli.ring[ringno].cmdringaddr) { 420 if (phba->sli.ring[ringno].flag & LPFC_STOP_IOCB_MBX) { 421 phba->sli.ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX; 422 /* 423 * Force update of the local copy of cmdGetInx 424 */ 425 phba->sli.ring[ringno].local_getidx 426 = le32_to_cpu(pgp->cmdGetInx); 427 spin_lock_irq(phba->host->host_lock); 428 lpfc_sli_resume_iocb(phba, &phba->sli.ring[ringno]); 429 spin_unlock_irq(phba->host->host_lock); 430 } 431 } 432 } 433 434 static int 435 lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 436 { 437 uint8_t ret; 438 439 switch (mbxCommand) { 440 case MBX_LOAD_SM: 441 case MBX_READ_NV: 442 case MBX_WRITE_NV: 443 case MBX_RUN_BIU_DIAG: 444 case MBX_INIT_LINK: 445 case MBX_DOWN_LINK: 446 case MBX_CONFIG_LINK: 447 case MBX_CONFIG_RING: 448 case MBX_RESET_RING: 449 case MBX_READ_CONFIG: 450 case MBX_READ_RCONFIG: 451 case MBX_READ_SPARM: 452 case MBX_READ_STATUS: 453 case MBX_READ_RPI: 454 case MBX_READ_XRI: 455 case MBX_READ_REV: 456 case MBX_READ_LNK_STAT: 457 case MBX_REG_LOGIN: 458 case MBX_UNREG_LOGIN: 459 case MBX_READ_LA: 460 case MBX_CLEAR_LA: 461 case MBX_DUMP_MEMORY: 462 case MBX_DUMP_CONTEXT: 463 case MBX_RUN_DIAGS: 464 case MBX_RESTART: 465 case MBX_UPDATE_CFG: 466 case MBX_DOWN_LOAD: 467 case MBX_DEL_LD_ENTRY: 468 case MBX_RUN_PROGRAM: 469 case MBX_SET_MASK: 470 case MBX_SET_SLIM: 471 case MBX_UNREG_D_ID: 472 case MBX_CONFIG_FARP: 473 case MBX_LOAD_AREA: 474 case MBX_RUN_BIU_DIAG64: 475 case MBX_CONFIG_PORT: 476 case MBX_READ_SPARM64: 477 case MBX_READ_RPI64: 478 case MBX_REG_LOGIN64: 479 case MBX_READ_LA64: 480 case MBX_FLASH_WR_ULA: 481 case MBX_SET_DEBUG: 482 case MBX_LOAD_EXP_ROM: 483 ret = mbxCommand; 484 break; 485 default: 486 ret = MBX_SHUTDOWN; 487 break; 488 } 489 return (ret); 490 } 491 static void 492 lpfc_sli_wake_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 493 { 494 wait_queue_head_t *pdone_q; 495 496 /* 497 * If pdone_q is empty, the driver thread gave up waiting and 498 * continued running. 499 */ 500 pdone_q = (wait_queue_head_t *) pmboxq->context1; 501 if (pdone_q) 502 wake_up_interruptible(pdone_q); 503 return; 504 } 505 506 void 507 lpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 508 { 509 struct lpfc_dmabuf *mp; 510 mp = (struct lpfc_dmabuf *) (pmb->context1); 511 if (mp) { 512 lpfc_mbuf_free(phba, mp->virt, mp->phys); 513 kfree(mp); 514 } 515 mempool_free( pmb, phba->mbox_mem_pool); 516 return; 517 } 518 519 int 520 lpfc_sli_handle_mb_event(struct lpfc_hba * phba) 521 { 522 MAILBOX_t *mbox; 523 MAILBOX_t *pmbox; 524 LPFC_MBOXQ_t *pmb; 525 struct lpfc_sli *psli; 526 int i, rc; 527 uint32_t process_next; 528 529 psli = &phba->sli; 530 /* We should only get here if we are in SLI2 mode */ 531 if (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE)) { 532 return (1); 533 } 534 535 phba->sli.slistat.mbox_event++; 536 537 /* Get a Mailbox buffer to setup mailbox commands for callback */ 538 if ((pmb = phba->sli.mbox_active)) { 539 pmbox = &pmb->mb; 540 mbox = (MAILBOX_t *) phba->sli.MBhostaddr; 541 542 /* First check out the status word */ 543 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof (uint32_t)); 544 545 /* Sanity check to ensure the host owns the mailbox */ 546 if (pmbox->mbxOwner != OWN_HOST) { 547 /* Lets try for a while */ 548 for (i = 0; i < 10240; i++) { 549 /* First copy command data */ 550 lpfc_sli_pcimem_bcopy(mbox, pmbox, 551 sizeof (uint32_t)); 552 if (pmbox->mbxOwner == OWN_HOST) 553 goto mbout; 554 } 555 /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus 556 <status> */ 557 lpfc_printf_log(phba, 558 KERN_ERR, 559 LOG_MBOX | LOG_SLI, 560 "%d:0304 Stray Mailbox Interrupt " 561 "mbxCommand x%x mbxStatus x%x\n", 562 phba->brd_no, 563 pmbox->mbxCommand, 564 pmbox->mbxStatus); 565 566 spin_lock_irq(phba->host->host_lock); 567 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 568 spin_unlock_irq(phba->host->host_lock); 569 return (1); 570 } 571 572 mbout: 573 del_timer_sync(&phba->sli.mbox_tmo); 574 phba->work_hba_events &= ~WORKER_MBOX_TMO; 575 576 /* 577 * It is a fatal error if unknown mbox command completion. 578 */ 579 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 580 MBX_SHUTDOWN) { 581 582 /* Unknow mailbox command compl */ 583 lpfc_printf_log(phba, 584 KERN_ERR, 585 LOG_MBOX | LOG_SLI, 586 "%d:0323 Unknown Mailbox command %x Cmpl\n", 587 phba->brd_no, 588 pmbox->mbxCommand); 589 phba->hba_state = LPFC_HBA_ERROR; 590 phba->work_hs = HS_FFER3; 591 lpfc_handle_eratt(phba); 592 return (0); 593 } 594 595 phba->sli.mbox_active = NULL; 596 if (pmbox->mbxStatus) { 597 phba->sli.slistat.mbox_stat_err++; 598 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 599 /* Mbox cmd cmpl error - RETRYing */ 600 lpfc_printf_log(phba, 601 KERN_INFO, 602 LOG_MBOX | LOG_SLI, 603 "%d:0305 Mbox cmd cmpl error - " 604 "RETRYing Data: x%x x%x x%x x%x\n", 605 phba->brd_no, 606 pmbox->mbxCommand, 607 pmbox->mbxStatus, 608 pmbox->un.varWords[0], 609 phba->hba_state); 610 pmbox->mbxStatus = 0; 611 pmbox->mbxOwner = OWN_HOST; 612 spin_lock_irq(phba->host->host_lock); 613 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 614 spin_unlock_irq(phba->host->host_lock); 615 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 616 if (rc == MBX_SUCCESS) 617 return (0); 618 } 619 } 620 621 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 622 lpfc_printf_log(phba, 623 KERN_INFO, 624 LOG_MBOX | LOG_SLI, 625 "%d:0307 Mailbox cmd x%x Cmpl x%p " 626 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 627 phba->brd_no, 628 pmbox->mbxCommand, 629 pmb->mbox_cmpl, 630 *((uint32_t *) pmbox), 631 pmbox->un.varWords[0], 632 pmbox->un.varWords[1], 633 pmbox->un.varWords[2], 634 pmbox->un.varWords[3], 635 pmbox->un.varWords[4], 636 pmbox->un.varWords[5], 637 pmbox->un.varWords[6], 638 pmbox->un.varWords[7]); 639 640 if (pmb->mbox_cmpl) { 641 lpfc_sli_pcimem_bcopy(mbox, pmbox, MAILBOX_CMD_SIZE); 642 pmb->mbox_cmpl(phba,pmb); 643 } 644 } 645 646 647 do { 648 process_next = 0; /* by default don't loop */ 649 spin_lock_irq(phba->host->host_lock); 650 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 651 652 /* Process next mailbox command if there is one */ 653 if ((pmb = lpfc_mbox_get(phba))) { 654 spin_unlock_irq(phba->host->host_lock); 655 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 656 if (rc == MBX_NOT_FINISHED) { 657 pmb->mb.mbxStatus = MBX_NOT_FINISHED; 658 pmb->mbox_cmpl(phba,pmb); 659 process_next = 1; 660 continue; /* loop back */ 661 } 662 } else { 663 spin_unlock_irq(phba->host->host_lock); 664 /* Turn on IOCB processing */ 665 for (i = 0; i < phba->sli.num_rings; i++) { 666 lpfc_sli_turn_on_ring(phba, i); 667 } 668 669 /* Free any lpfc_dmabuf's waiting for mbox cmd cmpls */ 670 while (!list_empty(&phba->freebufList)) { 671 struct lpfc_dmabuf *mp; 672 673 mp = NULL; 674 list_remove_head((&phba->freebufList), 675 mp, 676 struct lpfc_dmabuf, 677 list); 678 if (mp) { 679 lpfc_mbuf_free(phba, mp->virt, 680 mp->phys); 681 kfree(mp); 682 } 683 } 684 } 685 686 } while (process_next); 687 688 return (0); 689 } 690 static int 691 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 692 struct lpfc_iocbq *saveq) 693 { 694 IOCB_t * irsp; 695 WORD5 * w5p; 696 uint32_t Rctl, Type; 697 uint32_t match, i; 698 699 match = 0; 700 irsp = &(saveq->iocb); 701 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) 702 || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)) { 703 Rctl = FC_ELS_REQ; 704 Type = FC_ELS_DATA; 705 } else { 706 w5p = 707 (WORD5 *) & (saveq->iocb.un. 708 ulpWord[5]); 709 Rctl = w5p->hcsw.Rctl; 710 Type = w5p->hcsw.Type; 711 712 /* Firmware Workaround */ 713 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 714 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX)) { 715 Rctl = FC_ELS_REQ; 716 Type = FC_ELS_DATA; 717 w5p->hcsw.Rctl = Rctl; 718 w5p->hcsw.Type = Type; 719 } 720 } 721 /* unSolicited Responses */ 722 if (pring->prt[0].profile) { 723 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, saveq); 724 match = 1; 725 } else { 726 /* We must search, based on rctl / type 727 for the right routine */ 728 for (i = 0; i < pring->num_mask; 729 i++) { 730 if ((pring->prt[i].rctl == 731 Rctl) 732 && (pring->prt[i]. 733 type == Type)) { 734 (pring->prt[i].lpfc_sli_rcv_unsol_event) 735 (phba, pring, saveq); 736 match = 1; 737 break; 738 } 739 } 740 } 741 if (match == 0) { 742 /* Unexpected Rctl / Type received */ 743 /* Ring <ringno> handler: unexpected 744 Rctl <Rctl> Type <Type> received */ 745 lpfc_printf_log(phba, 746 KERN_WARNING, 747 LOG_SLI, 748 "%d:0313 Ring %d handler: unexpected Rctl x%x " 749 "Type x%x received \n", 750 phba->brd_no, 751 pring->ringno, 752 Rctl, 753 Type); 754 } 755 return(1); 756 } 757 758 static struct lpfc_iocbq * 759 lpfc_sli_txcmpl_ring_search_slow(struct lpfc_sli_ring * pring, 760 struct lpfc_iocbq * prspiocb) 761 { 762 IOCB_t *icmd = NULL; 763 IOCB_t *irsp = NULL; 764 struct lpfc_iocbq *cmd_iocb; 765 struct lpfc_iocbq *iocb, *next_iocb; 766 uint16_t iotag; 767 768 irsp = &prspiocb->iocb; 769 iotag = irsp->ulpIoTag; 770 cmd_iocb = NULL; 771 772 /* Search through txcmpl from the begining */ 773 list_for_each_entry_safe(iocb, next_iocb, &(pring->txcmplq), list) { 774 icmd = &iocb->iocb; 775 if (iotag == icmd->ulpIoTag) { 776 /* Found a match. */ 777 cmd_iocb = iocb; 778 list_del(&iocb->list); 779 pring->txcmplq_cnt--; 780 break; 781 } 782 } 783 784 return (cmd_iocb); 785 } 786 787 static struct lpfc_iocbq * 788 lpfc_sli_txcmpl_ring_iotag_lookup(struct lpfc_hba * phba, 789 struct lpfc_sli_ring * pring, 790 struct lpfc_iocbq * prspiocb) 791 { 792 IOCB_t *irsp = NULL; 793 struct lpfc_iocbq *cmd_iocb = NULL; 794 uint16_t iotag; 795 796 if (unlikely(pring->fast_lookup == NULL)) 797 return NULL; 798 799 /* Use fast lookup based on iotag for completion */ 800 irsp = &prspiocb->iocb; 801 iotag = irsp->ulpIoTag; 802 if (iotag < pring->fast_iotag) { 803 cmd_iocb = *(pring->fast_lookup + iotag); 804 *(pring->fast_lookup + iotag) = NULL; 805 if (cmd_iocb) { 806 list_del(&cmd_iocb->list); 807 pring->txcmplq_cnt--; 808 return cmd_iocb; 809 } else { 810 /* 811 * This is clearly an error. A ring that uses iotags 812 * should never have a interrupt for a completion that 813 * is not on the ring. Return NULL and log a error. 814 */ 815 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 816 "%d:0327 Rsp ring %d error - command " 817 "completion for iotag x%x not found\n", 818 phba->brd_no, pring->ringno, iotag); 819 return NULL; 820 } 821 } 822 823 /* 824 * Rsp ring <ringno> get: iotag <iotag> greater then 825 * configured max <fast_iotag> wd0 <irsp>. This is an 826 * error. Just return NULL. 827 */ 828 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 829 "%d:0317 Rsp ring %d get: iotag x%x greater then " 830 "configured max x%x wd0 x%x\n", 831 phba->brd_no, pring->ringno, iotag, pring->fast_iotag, 832 *(((uint32_t *) irsp) + 7)); 833 return NULL; 834 } 835 836 static int 837 lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, 838 struct lpfc_iocbq *saveq) 839 { 840 struct lpfc_iocbq * cmdiocbp; 841 int rc = 1; 842 unsigned long iflag; 843 844 /* Based on the iotag field, get the cmd IOCB from the txcmplq */ 845 spin_lock_irqsave(phba->host->host_lock, iflag); 846 cmdiocbp = lpfc_sli_txcmpl_ring_search_slow(pring, saveq); 847 if (cmdiocbp) { 848 if (cmdiocbp->iocb_cmpl) { 849 /* 850 * Post all ELS completions to the worker thread. 851 * All other are passed to the completion callback. 852 */ 853 if (pring->ringno == LPFC_ELS_RING) { 854 spin_unlock_irqrestore(phba->host->host_lock, 855 iflag); 856 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 857 spin_lock_irqsave(phba->host->host_lock, iflag); 858 } 859 else { 860 if (cmdiocbp->iocb_flag & LPFC_IO_POLL) 861 rc = 0; 862 863 spin_unlock_irqrestore(phba->host->host_lock, 864 iflag); 865 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 866 spin_lock_irqsave(phba->host->host_lock, iflag); 867 } 868 } else { 869 list_add_tail(&cmdiocbp->list, &phba->lpfc_iocb_list); 870 } 871 } else { 872 /* 873 * Unknown initiating command based on the response iotag. 874 * This could be the case on the ELS ring because of 875 * lpfc_els_abort(). 876 */ 877 if (pring->ringno != LPFC_ELS_RING) { 878 /* 879 * Ring <ringno> handler: unexpected completion IoTag 880 * <IoTag> 881 */ 882 lpfc_printf_log(phba, 883 KERN_WARNING, 884 LOG_SLI, 885 "%d:0322 Ring %d handler: unexpected " 886 "completion IoTag x%x Data: x%x x%x x%x x%x\n", 887 phba->brd_no, 888 pring->ringno, 889 saveq->iocb.ulpIoTag, 890 saveq->iocb.ulpStatus, 891 saveq->iocb.un.ulpWord[4], 892 saveq->iocb.ulpCommand, 893 saveq->iocb.ulpContext); 894 } 895 } 896 spin_unlock_irqrestore(phba->host->host_lock, iflag); 897 return rc; 898 } 899 900 /* 901 * This routine presumes LPFC_FCP_RING handling and doesn't bother 902 * to check it explicitly. 903 */ 904 static int 905 lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, 906 struct lpfc_sli_ring * pring, uint32_t mask) 907 { 908 IOCB_t *irsp = NULL; 909 struct lpfc_iocbq *cmdiocbq = NULL; 910 struct lpfc_iocbq rspiocbq; 911 PGP *pgp; 912 uint32_t status; 913 uint32_t portRspPut, portRspMax; 914 int rc = 1; 915 lpfc_iocb_type type; 916 unsigned long iflag; 917 uint32_t rsp_cmpl = 0; 918 void __iomem *to_slim; 919 920 spin_lock_irqsave(phba->host->host_lock, iflag); 921 pring->stats.iocb_event++; 922 923 /* The driver assumes SLI-2 mode */ 924 pgp = (PGP *) &((MAILBOX_t *) phba->sli.MBhostaddr) 925 ->us.s2.port[pring->ringno]; 926 927 /* 928 * The next available response entry should never exceed the maximum 929 * entries. If it does, treat it as an adapter hardware error. 930 */ 931 portRspMax = pring->numRiocb; 932 portRspPut = le32_to_cpu(pgp->rspPutInx); 933 if (unlikely(portRspPut >= portRspMax)) { 934 /* 935 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then 936 * rsp ring <portRspMax> 937 */ 938 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 939 "%d:0312 Ring %d handler: portRspPut %d " 940 "is bigger then rsp ring %d\n", 941 phba->brd_no, pring->ringno, portRspPut, 942 portRspMax); 943 944 phba->hba_state = LPFC_HBA_ERROR; 945 946 /* All error attention handlers are posted to worker thread */ 947 phba->work_ha |= HA_ERATT; 948 phba->work_hs = HS_FFER3; 949 if (phba->work_wait) 950 wake_up(phba->work_wait); 951 952 spin_unlock_irqrestore(phba->host->host_lock, iflag); 953 return 1; 954 } 955 956 rmb(); 957 while (pring->rspidx != portRspPut) { 958 irsp = (IOCB_t *) IOCB_ENTRY(pring->rspringaddr, pring->rspidx); 959 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 960 pring->stats.iocb_rsp++; 961 rsp_cmpl++; 962 963 if (unlikely(irsp->ulpStatus)) { 964 /* Rsp ring <ringno> error: IOCB */ 965 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 966 "%d:0326 Rsp Ring %d error: IOCB Data: " 967 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 968 phba->brd_no, pring->ringno, 969 irsp->un.ulpWord[0], irsp->un.ulpWord[1], 970 irsp->un.ulpWord[2], irsp->un.ulpWord[3], 971 irsp->un.ulpWord[4], irsp->un.ulpWord[5], 972 *(((uint32_t *) irsp) + 6), 973 *(((uint32_t *) irsp) + 7)); 974 } 975 976 switch (type) { 977 case LPFC_ABORT_IOCB: 978 case LPFC_SOL_IOCB: 979 /* 980 * Idle exchange closed via ABTS from port. No iocb 981 * resources need to be recovered. 982 */ 983 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 984 printk(KERN_INFO "%s: IOCB cmd 0x%x processed. " 985 "Skipping completion\n", __FUNCTION__, 986 irsp->ulpCommand); 987 break; 988 } 989 990 rspiocbq.iocb.un.ulpWord[4] = irsp->un.ulpWord[4]; 991 rspiocbq.iocb.ulpStatus = irsp->ulpStatus; 992 rspiocbq.iocb.ulpContext = irsp->ulpContext; 993 rspiocbq.iocb.ulpIoTag = irsp->ulpIoTag; 994 cmdiocbq = lpfc_sli_txcmpl_ring_iotag_lookup(phba, 995 pring, 996 &rspiocbq); 997 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { 998 spin_unlock_irqrestore( 999 phba->host->host_lock, iflag); 1000 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 1001 &rspiocbq); 1002 spin_lock_irqsave(phba->host->host_lock, 1003 iflag); 1004 } 1005 break; 1006 default: 1007 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 1008 char adaptermsg[LPFC_MAX_ADPTMSG]; 1009 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 1010 memcpy(&adaptermsg[0], (uint8_t *) irsp, 1011 MAX_MSG_DATA); 1012 dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s", 1013 phba->brd_no, adaptermsg); 1014 } else { 1015 /* Unknown IOCB command */ 1016 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1017 "%d:0321 Unknown IOCB command " 1018 "Data: x%x, x%x x%x x%x x%x\n", 1019 phba->brd_no, type, irsp->ulpCommand, 1020 irsp->ulpStatus, irsp->ulpIoTag, 1021 irsp->ulpContext); 1022 } 1023 break; 1024 } 1025 1026 /* 1027 * The response IOCB has been processed. Update the ring 1028 * pointer in SLIM. If the port response put pointer has not 1029 * been updated, sync the pgp->rspPutInx and fetch the new port 1030 * response put pointer. 1031 */ 1032 if (++pring->rspidx >= portRspMax) 1033 pring->rspidx = 0; 1034 1035 to_slim = phba->MBslimaddr + 1036 (SLIMOFF + (pring->ringno * 2) + 1) * 4; 1037 writeb(pring->rspidx, to_slim); 1038 1039 if (pring->rspidx == portRspPut) 1040 portRspPut = le32_to_cpu(pgp->rspPutInx); 1041 } 1042 1043 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { 1044 pring->stats.iocb_rsp_full++; 1045 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 1046 writel(status, phba->CAregaddr); 1047 readl(phba->CAregaddr); 1048 } 1049 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 1050 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 1051 pring->stats.iocb_cmd_empty++; 1052 1053 /* Force update of the local copy of cmdGetInx */ 1054 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 1055 lpfc_sli_resume_iocb(phba, pring); 1056 1057 if ((pring->lpfc_sli_cmd_available)) 1058 (pring->lpfc_sli_cmd_available) (phba, pring); 1059 1060 } 1061 1062 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1063 return rc; 1064 } 1065 1066 1067 int 1068 lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, 1069 struct lpfc_sli_ring * pring, uint32_t mask) 1070 { 1071 IOCB_t *entry; 1072 IOCB_t *irsp = NULL; 1073 struct lpfc_iocbq *rspiocbp = NULL; 1074 struct lpfc_iocbq *next_iocb; 1075 struct lpfc_iocbq *cmdiocbp; 1076 struct lpfc_iocbq *saveq; 1077 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 1078 HGP *hgp; 1079 PGP *pgp; 1080 MAILBOX_t *mbox; 1081 uint8_t iocb_cmd_type; 1082 lpfc_iocb_type type; 1083 uint32_t status, free_saveq; 1084 uint32_t portRspPut, portRspMax; 1085 int rc = 1; 1086 unsigned long iflag; 1087 void __iomem *to_slim; 1088 1089 spin_lock_irqsave(phba->host->host_lock, iflag); 1090 pring->stats.iocb_event++; 1091 1092 /* The driver assumes SLI-2 mode */ 1093 mbox = (MAILBOX_t *) phba->sli.MBhostaddr; 1094 pgp = (PGP *) & mbox->us.s2.port[pring->ringno]; 1095 hgp = (HGP *) & mbox->us.s2.host[pring->ringno]; 1096 1097 /* 1098 * The next available response entry should never exceed the maximum 1099 * entries. If it does, treat it as an adapter hardware error. 1100 */ 1101 portRspMax = pring->numRiocb; 1102 portRspPut = le32_to_cpu(pgp->rspPutInx); 1103 if (portRspPut >= portRspMax) { 1104 /* 1105 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then 1106 * rsp ring <portRspMax> 1107 */ 1108 lpfc_printf_log(phba, 1109 KERN_ERR, 1110 LOG_SLI, 1111 "%d:0312 Ring %d handler: portRspPut %d " 1112 "is bigger then rsp ring %d\n", 1113 phba->brd_no, 1114 pring->ringno, portRspPut, portRspMax); 1115 1116 phba->hba_state = LPFC_HBA_ERROR; 1117 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1118 1119 phba->work_hs = HS_FFER3; 1120 lpfc_handle_eratt(phba); 1121 1122 return 1; 1123 } 1124 1125 rmb(); 1126 lpfc_iocb_list = &phba->lpfc_iocb_list; 1127 while (pring->rspidx != portRspPut) { 1128 /* 1129 * Build a completion list and call the appropriate handler. 1130 * The process is to get the next available response iocb, get 1131 * a free iocb from the list, copy the response data into the 1132 * free iocb, insert to the continuation list, and update the 1133 * next response index to slim. This process makes response 1134 * iocb's in the ring available to DMA as fast as possible but 1135 * pays a penalty for a copy operation. Since the iocb is 1136 * only 32 bytes, this penalty is considered small relative to 1137 * the PCI reads for register values and a slim write. When 1138 * the ulpLe field is set, the entire Command has been 1139 * received. 1140 */ 1141 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx); 1142 list_remove_head(lpfc_iocb_list, rspiocbp, struct lpfc_iocbq, 1143 list); 1144 if (rspiocbp == NULL) { 1145 printk(KERN_ERR "%s: out of buffers! Failing " 1146 "completion.\n", __FUNCTION__); 1147 break; 1148 } 1149 1150 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, sizeof (IOCB_t)); 1151 irsp = &rspiocbp->iocb; 1152 1153 if (++pring->rspidx >= portRspMax) 1154 pring->rspidx = 0; 1155 1156 to_slim = phba->MBslimaddr + (SLIMOFF + (pring->ringno * 2) 1157 + 1) * 4; 1158 writeb(pring->rspidx, to_slim); 1159 1160 if (list_empty(&(pring->iocb_continueq))) { 1161 list_add(&rspiocbp->list, &(pring->iocb_continueq)); 1162 } else { 1163 list_add_tail(&rspiocbp->list, 1164 &(pring->iocb_continueq)); 1165 } 1166 1167 pring->iocb_continueq_cnt++; 1168 if (irsp->ulpLe) { 1169 /* 1170 * By default, the driver expects to free all resources 1171 * associated with this iocb completion. 1172 */ 1173 free_saveq = 1; 1174 saveq = list_get_first(&pring->iocb_continueq, 1175 struct lpfc_iocbq, list); 1176 irsp = &(saveq->iocb); 1177 list_del_init(&pring->iocb_continueq); 1178 pring->iocb_continueq_cnt = 0; 1179 1180 pring->stats.iocb_rsp++; 1181 1182 if (irsp->ulpStatus) { 1183 /* Rsp ring <ringno> error: IOCB */ 1184 lpfc_printf_log(phba, 1185 KERN_WARNING, 1186 LOG_SLI, 1187 "%d:0328 Rsp Ring %d error: IOCB Data: " 1188 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 1189 phba->brd_no, 1190 pring->ringno, 1191 irsp->un.ulpWord[0], 1192 irsp->un.ulpWord[1], 1193 irsp->un.ulpWord[2], 1194 irsp->un.ulpWord[3], 1195 irsp->un.ulpWord[4], 1196 irsp->un.ulpWord[5], 1197 *(((uint32_t *) irsp) + 6), 1198 *(((uint32_t *) irsp) + 7)); 1199 } 1200 1201 /* 1202 * Fetch the IOCB command type and call the correct 1203 * completion routine. Solicited and Unsolicited 1204 * IOCBs on the ELS ring get freed back to the 1205 * lpfc_iocb_list by the discovery kernel thread. 1206 */ 1207 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 1208 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 1209 if (type == LPFC_SOL_IOCB) { 1210 spin_unlock_irqrestore(phba->host->host_lock, 1211 iflag); 1212 rc = lpfc_sli_process_sol_iocb(phba, pring, 1213 saveq); 1214 spin_lock_irqsave(phba->host->host_lock, iflag); 1215 } else if (type == LPFC_UNSOL_IOCB) { 1216 spin_unlock_irqrestore(phba->host->host_lock, 1217 iflag); 1218 rc = lpfc_sli_process_unsol_iocb(phba, pring, 1219 saveq); 1220 spin_lock_irqsave(phba->host->host_lock, iflag); 1221 } else if (type == LPFC_ABORT_IOCB) { 1222 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) && 1223 ((cmdiocbp = 1224 lpfc_sli_txcmpl_ring_search_slow(pring, 1225 saveq)))) { 1226 /* Call the specified completion 1227 routine */ 1228 if (cmdiocbp->iocb_cmpl) { 1229 spin_unlock_irqrestore( 1230 phba->host->host_lock, 1231 iflag); 1232 (cmdiocbp->iocb_cmpl) (phba, 1233 cmdiocbp, saveq); 1234 spin_lock_irqsave( 1235 phba->host->host_lock, 1236 iflag); 1237 } else { 1238 list_add_tail(&cmdiocbp->list, 1239 lpfc_iocb_list); 1240 } 1241 } 1242 } else if (type == LPFC_UNKNOWN_IOCB) { 1243 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 1244 1245 char adaptermsg[LPFC_MAX_ADPTMSG]; 1246 1247 memset(adaptermsg, 0, 1248 LPFC_MAX_ADPTMSG); 1249 memcpy(&adaptermsg[0], (uint8_t *) irsp, 1250 MAX_MSG_DATA); 1251 dev_warn(&((phba->pcidev)->dev), 1252 "lpfc%d: %s", 1253 phba->brd_no, adaptermsg); 1254 } else { 1255 /* Unknown IOCB command */ 1256 lpfc_printf_log(phba, 1257 KERN_ERR, 1258 LOG_SLI, 1259 "%d:0321 Unknown IOCB command " 1260 "Data: x%x x%x x%x x%x\n", 1261 phba->brd_no, 1262 irsp->ulpCommand, 1263 irsp->ulpStatus, 1264 irsp->ulpIoTag, 1265 irsp->ulpContext); 1266 } 1267 } 1268 1269 if (free_saveq) { 1270 if (!list_empty(&saveq->list)) { 1271 list_for_each_entry_safe(rspiocbp, 1272 next_iocb, 1273 &saveq->list, 1274 list) { 1275 list_add_tail(&rspiocbp->list, 1276 lpfc_iocb_list); 1277 } 1278 } 1279 1280 list_add_tail(&saveq->list, lpfc_iocb_list); 1281 } 1282 } 1283 1284 /* 1285 * If the port response put pointer has not been updated, sync 1286 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 1287 * response put pointer. 1288 */ 1289 if (pring->rspidx == portRspPut) { 1290 portRspPut = le32_to_cpu(pgp->rspPutInx); 1291 } 1292 } /* while (pring->rspidx != portRspPut) */ 1293 1294 if ((rspiocbp != 0) && (mask & HA_R0RE_REQ)) { 1295 /* At least one response entry has been freed */ 1296 pring->stats.iocb_rsp_full++; 1297 /* SET RxRE_RSP in Chip Att register */ 1298 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 1299 writel(status, phba->CAregaddr); 1300 readl(phba->CAregaddr); /* flush */ 1301 } 1302 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 1303 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 1304 pring->stats.iocb_cmd_empty++; 1305 1306 /* Force update of the local copy of cmdGetInx */ 1307 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 1308 lpfc_sli_resume_iocb(phba, pring); 1309 1310 if ((pring->lpfc_sli_cmd_available)) 1311 (pring->lpfc_sli_cmd_available) (phba, pring); 1312 1313 } 1314 1315 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1316 return rc; 1317 } 1318 1319 int 1320 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1321 { 1322 struct lpfc_iocbq *iocb, *next_iocb; 1323 IOCB_t *icmd = NULL, *cmd = NULL; 1324 int errcnt; 1325 uint16_t iotag; 1326 1327 errcnt = 0; 1328 1329 /* Error everything on txq and txcmplq 1330 * First do the txq. 1331 */ 1332 spin_lock_irq(phba->host->host_lock); 1333 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 1334 list_del_init(&iocb->list); 1335 if (iocb->iocb_cmpl) { 1336 icmd = &iocb->iocb; 1337 icmd->ulpStatus = IOSTAT_LOCAL_REJECT; 1338 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 1339 spin_unlock_irq(phba->host->host_lock); 1340 (iocb->iocb_cmpl) (phba, iocb, iocb); 1341 spin_lock_irq(phba->host->host_lock); 1342 } else { 1343 list_add_tail(&iocb->list, &phba->lpfc_iocb_list); 1344 } 1345 } 1346 pring->txq_cnt = 0; 1347 INIT_LIST_HEAD(&(pring->txq)); 1348 1349 /* Next issue ABTS for everything on the txcmplq */ 1350 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 1351 cmd = &iocb->iocb; 1352 1353 /* 1354 * Imediate abort of IOCB, clear fast_lookup entry, 1355 * if any, deque and call compl 1356 */ 1357 iotag = cmd->ulpIoTag; 1358 if (iotag && pring->fast_lookup && 1359 (iotag < pring->fast_iotag)) 1360 pring->fast_lookup[iotag] = NULL; 1361 1362 list_del_init(&iocb->list); 1363 pring->txcmplq_cnt--; 1364 1365 if (iocb->iocb_cmpl) { 1366 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 1367 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 1368 spin_unlock_irq(phba->host->host_lock); 1369 (iocb->iocb_cmpl) (phba, iocb, iocb); 1370 spin_lock_irq(phba->host->host_lock); 1371 } else { 1372 list_add_tail(&iocb->list, &phba->lpfc_iocb_list); 1373 } 1374 } 1375 1376 INIT_LIST_HEAD(&pring->txcmplq); 1377 pring->txcmplq_cnt = 0; 1378 spin_unlock_irq(phba->host->host_lock); 1379 1380 return errcnt; 1381 } 1382 1383 /****************************************************************************** 1384 * lpfc_sli_send_reset 1385 * 1386 * Note: After returning from this function, the HBA cannot be accessed for 1387 * 1 ms. Since we do not wish to delay in interrupt context, it is the 1388 * responsibility of the caller to perform the mdelay(1) and flush via readl(). 1389 ******************************************************************************/ 1390 static int 1391 lpfc_sli_send_reset(struct lpfc_hba * phba, uint16_t skip_post) 1392 { 1393 MAILBOX_t *swpmb; 1394 volatile uint32_t word0; 1395 void __iomem *to_slim; 1396 unsigned long flags = 0; 1397 1398 spin_lock_irqsave(phba->host->host_lock, flags); 1399 1400 /* A board reset must use REAL SLIM. */ 1401 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE; 1402 1403 word0 = 0; 1404 swpmb = (MAILBOX_t *) & word0; 1405 swpmb->mbxCommand = MBX_RESTART; 1406 swpmb->mbxHc = 1; 1407 1408 to_slim = phba->MBslimaddr; 1409 writel(*(uint32_t *) swpmb, to_slim); 1410 readl(to_slim); /* flush */ 1411 1412 /* Only skip post after fc_ffinit is completed */ 1413 if (skip_post) { 1414 word0 = 1; /* This is really setting up word1 */ 1415 } else { 1416 word0 = 0; /* This is really setting up word1 */ 1417 } 1418 to_slim = phba->MBslimaddr + sizeof (uint32_t); 1419 writel(*(uint32_t *) swpmb, to_slim); 1420 readl(to_slim); /* flush */ 1421 1422 /* Turn off parity checking and serr during the physical reset */ 1423 pci_read_config_word(phba->pcidev, PCI_COMMAND, &phba->pci_cfg_value); 1424 pci_write_config_word(phba->pcidev, PCI_COMMAND, 1425 (phba->pci_cfg_value & 1426 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 1427 1428 writel(HC_INITFF, phba->HCregaddr); 1429 1430 phba->hba_state = LPFC_INIT_START; 1431 spin_unlock_irqrestore(phba->host->host_lock, flags); 1432 1433 return 0; 1434 } 1435 1436 static int 1437 lpfc_sli_brdreset(struct lpfc_hba * phba, uint16_t skip_post) 1438 { 1439 struct lpfc_sli_ring *pring; 1440 int i; 1441 struct lpfc_dmabuf *mp, *next_mp; 1442 unsigned long flags = 0; 1443 1444 lpfc_sli_send_reset(phba, skip_post); 1445 mdelay(1); 1446 1447 spin_lock_irqsave(phba->host->host_lock, flags); 1448 /* Risk the write on flush case ie no delay after the readl */ 1449 readl(phba->HCregaddr); /* flush */ 1450 /* Now toggle INITFF bit set by lpfc_sli_send_reset */ 1451 writel(0, phba->HCregaddr); 1452 readl(phba->HCregaddr); /* flush */ 1453 1454 /* Restore PCI cmd register */ 1455 pci_write_config_word(phba->pcidev, PCI_COMMAND, phba->pci_cfg_value); 1456 1457 /* perform board reset */ 1458 phba->fc_eventTag = 0; 1459 phba->fc_myDID = 0; 1460 phba->fc_prevDID = Mask_DID; 1461 1462 /* Reset HBA */ 1463 lpfc_printf_log(phba, 1464 KERN_INFO, 1465 LOG_SLI, 1466 "%d:0325 Reset HBA Data: x%x x%x x%x\n", 1467 phba->brd_no, 1468 phba->hba_state, 1469 phba->sli.sli_flag, 1470 skip_post); 1471 1472 /* Initialize relevant SLI info */ 1473 for (i = 0; i < phba->sli.num_rings; i++) { 1474 pring = &phba->sli.ring[i]; 1475 pring->flag = 0; 1476 pring->rspidx = 0; 1477 pring->next_cmdidx = 0; 1478 pring->local_getidx = 0; 1479 pring->cmdidx = 0; 1480 pring->missbufcnt = 0; 1481 } 1482 spin_unlock_irqrestore(phba->host->host_lock, flags); 1483 1484 if (skip_post) { 1485 mdelay(100); 1486 } else { 1487 mdelay(2000); 1488 } 1489 1490 spin_lock_irqsave(phba->host->host_lock, flags); 1491 /* Cleanup preposted buffers on the ELS ring */ 1492 pring = &phba->sli.ring[LPFC_ELS_RING]; 1493 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 1494 list_del(&mp->list); 1495 pring->postbufq_cnt--; 1496 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1497 kfree(mp); 1498 } 1499 spin_unlock_irqrestore(phba->host->host_lock, flags); 1500 1501 for (i = 0; i < phba->sli.num_rings; i++) 1502 lpfc_sli_abort_iocb_ring(phba, &phba->sli.ring[i]); 1503 1504 return 0; 1505 } 1506 1507 static int 1508 lpfc_sli_chipset_init(struct lpfc_hba *phba) 1509 { 1510 uint32_t status, i = 0; 1511 1512 /* Read the HBA Host Status Register */ 1513 status = readl(phba->HSregaddr); 1514 1515 /* Check status register to see what current state is */ 1516 i = 0; 1517 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 1518 1519 /* Check every 100ms for 5 retries, then every 500ms for 5, then 1520 * every 2.5 sec for 5, then reset board and every 2.5 sec for 1521 * 4. 1522 */ 1523 if (i++ >= 20) { 1524 /* Adapter failed to init, timeout, status reg 1525 <status> */ 1526 lpfc_printf_log(phba, 1527 KERN_ERR, 1528 LOG_INIT, 1529 "%d:0436 Adapter failed to init, " 1530 "timeout, status reg x%x\n", 1531 phba->brd_no, 1532 status); 1533 phba->hba_state = LPFC_HBA_ERROR; 1534 return -ETIMEDOUT; 1535 } 1536 1537 /* Check to see if any errors occurred during init */ 1538 if (status & HS_FFERM) { 1539 /* ERROR: During chipset initialization */ 1540 /* Adapter failed to init, chipset, status reg 1541 <status> */ 1542 lpfc_printf_log(phba, 1543 KERN_ERR, 1544 LOG_INIT, 1545 "%d:0437 Adapter failed to init, " 1546 "chipset, status reg x%x\n", 1547 phba->brd_no, 1548 status); 1549 phba->hba_state = LPFC_HBA_ERROR; 1550 return -EIO; 1551 } 1552 1553 if (i <= 5) { 1554 msleep(10); 1555 } else if (i <= 10) { 1556 msleep(500); 1557 } else { 1558 msleep(2500); 1559 } 1560 1561 if (i == 15) { 1562 lpfc_sli_brdreset(phba, 0); 1563 } 1564 /* Read the HBA Host Status Register */ 1565 status = readl(phba->HSregaddr); 1566 } 1567 1568 /* Check to see if any errors occurred during init */ 1569 if (status & HS_FFERM) { 1570 /* ERROR: During chipset initialization */ 1571 /* Adapter failed to init, chipset, status reg <status> */ 1572 lpfc_printf_log(phba, 1573 KERN_ERR, 1574 LOG_INIT, 1575 "%d:0438 Adapter failed to init, chipset, " 1576 "status reg x%x\n", 1577 phba->brd_no, 1578 status); 1579 phba->hba_state = LPFC_HBA_ERROR; 1580 return -EIO; 1581 } 1582 1583 /* Clear all interrupt enable conditions */ 1584 writel(0, phba->HCregaddr); 1585 readl(phba->HCregaddr); /* flush */ 1586 1587 /* setup host attn register */ 1588 writel(0xffffffff, phba->HAregaddr); 1589 readl(phba->HAregaddr); /* flush */ 1590 return 0; 1591 } 1592 1593 int 1594 lpfc_sli_hba_setup(struct lpfc_hba * phba) 1595 { 1596 LPFC_MBOXQ_t *pmb; 1597 uint32_t resetcount = 0, rc = 0, done = 0; 1598 1599 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1600 if (!pmb) { 1601 phba->hba_state = LPFC_HBA_ERROR; 1602 return -ENOMEM; 1603 } 1604 1605 while (resetcount < 2 && !done) { 1606 phba->hba_state = 0; 1607 lpfc_sli_brdreset(phba, 0); 1608 msleep(2500); 1609 rc = lpfc_sli_chipset_init(phba); 1610 if (rc) 1611 break; 1612 1613 resetcount++; 1614 1615 /* Call pre CONFIG_PORT mailbox command initialization. A value of 0 1616 * means the call was successful. Any other nonzero value is a failure, 1617 * but if ERESTART is returned, the driver may reset the HBA and try 1618 * again. 1619 */ 1620 rc = lpfc_config_port_prep(phba); 1621 if (rc == -ERESTART) { 1622 phba->hba_state = 0; 1623 continue; 1624 } else if (rc) { 1625 break; 1626 } 1627 1628 phba->hba_state = LPFC_INIT_MBX_CMDS; 1629 lpfc_config_port(phba, pmb); 1630 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 1631 if (rc == MBX_SUCCESS) 1632 done = 1; 1633 else { 1634 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1635 "%d:0442 Adapter failed to init, mbxCmd x%x " 1636 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 1637 phba->brd_no, pmb->mb.mbxCommand, 1638 pmb->mb.mbxStatus, 0); 1639 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE; 1640 } 1641 } 1642 if (!done) 1643 goto lpfc_sli_hba_setup_error; 1644 1645 rc = lpfc_sli_ring_map(phba, pmb); 1646 1647 if (rc) 1648 goto lpfc_sli_hba_setup_error; 1649 1650 phba->sli.sli_flag |= LPFC_PROCESS_LA; 1651 1652 rc = lpfc_config_port_post(phba); 1653 if (rc) 1654 goto lpfc_sli_hba_setup_error; 1655 1656 goto lpfc_sli_hba_setup_exit; 1657 lpfc_sli_hba_setup_error: 1658 phba->hba_state = LPFC_HBA_ERROR; 1659 lpfc_sli_hba_setup_exit: 1660 mempool_free(pmb, phba->mbox_mem_pool); 1661 return rc; 1662 } 1663 1664 static void 1665 lpfc_mbox_abort(struct lpfc_hba * phba) 1666 { 1667 LPFC_MBOXQ_t *pmbox; 1668 MAILBOX_t *mb; 1669 1670 if (phba->sli.mbox_active) { 1671 del_timer_sync(&phba->sli.mbox_tmo); 1672 phba->work_hba_events &= ~WORKER_MBOX_TMO; 1673 pmbox = phba->sli.mbox_active; 1674 mb = &pmbox->mb; 1675 phba->sli.mbox_active = NULL; 1676 if (pmbox->mbox_cmpl) { 1677 mb->mbxStatus = MBX_NOT_FINISHED; 1678 (pmbox->mbox_cmpl) (phba, pmbox); 1679 } 1680 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 1681 } 1682 1683 /* Abort all the non active mailbox commands. */ 1684 spin_lock_irq(phba->host->host_lock); 1685 pmbox = lpfc_mbox_get(phba); 1686 while (pmbox) { 1687 mb = &pmbox->mb; 1688 if (pmbox->mbox_cmpl) { 1689 mb->mbxStatus = MBX_NOT_FINISHED; 1690 spin_unlock_irq(phba->host->host_lock); 1691 (pmbox->mbox_cmpl) (phba, pmbox); 1692 spin_lock_irq(phba->host->host_lock); 1693 } 1694 pmbox = lpfc_mbox_get(phba); 1695 } 1696 spin_unlock_irq(phba->host->host_lock); 1697 return; 1698 } 1699 1700 /*! lpfc_mbox_timeout 1701 * 1702 * \pre 1703 * \post 1704 * \param hba Pointer to per struct lpfc_hba structure 1705 * \param l1 Pointer to the driver's mailbox queue. 1706 * \return 1707 * void 1708 * 1709 * \b Description: 1710 * 1711 * This routine handles mailbox timeout events at timer interrupt context. 1712 */ 1713 void 1714 lpfc_mbox_timeout(unsigned long ptr) 1715 { 1716 struct lpfc_hba *phba; 1717 unsigned long iflag; 1718 1719 phba = (struct lpfc_hba *)ptr; 1720 spin_lock_irqsave(phba->host->host_lock, iflag); 1721 if (!(phba->work_hba_events & WORKER_MBOX_TMO)) { 1722 phba->work_hba_events |= WORKER_MBOX_TMO; 1723 if (phba->work_wait) 1724 wake_up(phba->work_wait); 1725 } 1726 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1727 } 1728 1729 void 1730 lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 1731 { 1732 LPFC_MBOXQ_t *pmbox; 1733 MAILBOX_t *mb; 1734 1735 spin_lock_irq(phba->host->host_lock); 1736 if (!(phba->work_hba_events & WORKER_MBOX_TMO)) { 1737 spin_unlock_irq(phba->host->host_lock); 1738 return; 1739 } 1740 1741 pmbox = phba->sli.mbox_active; 1742 mb = &pmbox->mb; 1743 1744 /* Mbox cmd <mbxCommand> timeout */ 1745 lpfc_printf_log(phba, 1746 KERN_ERR, 1747 LOG_MBOX | LOG_SLI, 1748 "%d:0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 1749 phba->brd_no, 1750 mb->mbxCommand, 1751 phba->hba_state, 1752 phba->sli.sli_flag, 1753 phba->sli.mbox_active); 1754 1755 if (phba->sli.mbox_active == pmbox) { 1756 phba->sli.mbox_active = NULL; 1757 if (pmbox->mbox_cmpl) { 1758 mb->mbxStatus = MBX_NOT_FINISHED; 1759 spin_unlock_irq(phba->host->host_lock); 1760 (pmbox->mbox_cmpl) (phba, pmbox); 1761 spin_lock_irq(phba->host->host_lock); 1762 } 1763 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 1764 } 1765 1766 spin_unlock_irq(phba->host->host_lock); 1767 lpfc_mbox_abort(phba); 1768 return; 1769 } 1770 1771 int 1772 lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) 1773 { 1774 MAILBOX_t *mbox; 1775 MAILBOX_t *mb; 1776 struct lpfc_sli *psli; 1777 uint32_t status, evtctr; 1778 uint32_t ha_copy; 1779 int i; 1780 unsigned long drvr_flag = 0; 1781 volatile uint32_t word0, ldata; 1782 void __iomem *to_slim; 1783 1784 psli = &phba->sli; 1785 1786 spin_lock_irqsave(phba->host->host_lock, drvr_flag); 1787 1788 1789 mb = &pmbox->mb; 1790 status = MBX_SUCCESS; 1791 1792 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 1793 /* Polling for a mbox command when another one is already active 1794 * is not allowed in SLI. Also, the driver must have established 1795 * SLI2 mode to queue and process multiple mbox commands. 1796 */ 1797 1798 if (flag & MBX_POLL) { 1799 spin_unlock_irqrestore(phba->host->host_lock, 1800 drvr_flag); 1801 1802 /* Mbox command <mbxCommand> cannot issue */ 1803 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) 1804 return (MBX_NOT_FINISHED); 1805 } 1806 1807 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { 1808 spin_unlock_irqrestore(phba->host->host_lock, 1809 drvr_flag); 1810 /* Mbox command <mbxCommand> cannot issue */ 1811 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) 1812 return (MBX_NOT_FINISHED); 1813 } 1814 1815 /* Handle STOP IOCB processing flag. This is only meaningful 1816 * if we are not polling for mbox completion. 1817 */ 1818 if (flag & MBX_STOP_IOCB) { 1819 flag &= ~MBX_STOP_IOCB; 1820 /* Now flag each ring */ 1821 for (i = 0; i < psli->num_rings; i++) { 1822 /* If the ring is active, flag it */ 1823 if (psli->ring[i].cmdringaddr) { 1824 psli->ring[i].flag |= 1825 LPFC_STOP_IOCB_MBX; 1826 } 1827 } 1828 } 1829 1830 /* Another mailbox command is still being processed, queue this 1831 * command to be processed later. 1832 */ 1833 lpfc_mbox_put(phba, pmbox); 1834 1835 /* Mbox cmd issue - BUSY */ 1836 lpfc_printf_log(phba, 1837 KERN_INFO, 1838 LOG_MBOX | LOG_SLI, 1839 "%d:0308 Mbox cmd issue - BUSY Data: x%x x%x x%x x%x\n", 1840 phba->brd_no, 1841 mb->mbxCommand, 1842 phba->hba_state, 1843 psli->sli_flag, 1844 flag); 1845 1846 psli->slistat.mbox_busy++; 1847 spin_unlock_irqrestore(phba->host->host_lock, 1848 drvr_flag); 1849 1850 return (MBX_BUSY); 1851 } 1852 1853 /* Handle STOP IOCB processing flag. This is only meaningful 1854 * if we are not polling for mbox completion. 1855 */ 1856 if (flag & MBX_STOP_IOCB) { 1857 flag &= ~MBX_STOP_IOCB; 1858 if (flag == MBX_NOWAIT) { 1859 /* Now flag each ring */ 1860 for (i = 0; i < psli->num_rings; i++) { 1861 /* If the ring is active, flag it */ 1862 if (psli->ring[i].cmdringaddr) { 1863 psli->ring[i].flag |= 1864 LPFC_STOP_IOCB_MBX; 1865 } 1866 } 1867 } 1868 } 1869 1870 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 1871 1872 /* If we are not polling, we MUST be in SLI2 mode */ 1873 if (flag != MBX_POLL) { 1874 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { 1875 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 1876 spin_unlock_irqrestore(phba->host->host_lock, 1877 drvr_flag); 1878 /* Mbox command <mbxCommand> cannot issue */ 1879 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag); 1880 return (MBX_NOT_FINISHED); 1881 } 1882 /* timeout active mbox command */ 1883 mod_timer(&psli->mbox_tmo, jiffies + HZ * LPFC_MBOX_TMO); 1884 } 1885 1886 /* Mailbox cmd <cmd> issue */ 1887 lpfc_printf_log(phba, 1888 KERN_INFO, 1889 LOG_MBOX | LOG_SLI, 1890 "%d:0309 Mailbox cmd x%x issue Data: x%x x%x x%x\n", 1891 phba->brd_no, 1892 mb->mbxCommand, 1893 phba->hba_state, 1894 psli->sli_flag, 1895 flag); 1896 1897 psli->slistat.mbox_cmd++; 1898 evtctr = psli->slistat.mbox_event; 1899 1900 /* next set own bit for the adapter and copy over command word */ 1901 mb->mbxOwner = OWN_CHIP; 1902 1903 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 1904 1905 /* First copy command data to host SLIM area */ 1906 mbox = (MAILBOX_t *) psli->MBhostaddr; 1907 lpfc_sli_pcimem_bcopy(mb, mbox, MAILBOX_CMD_SIZE); 1908 } else { 1909 if (mb->mbxCommand == MBX_CONFIG_PORT) { 1910 /* copy command data into host mbox for cmpl */ 1911 mbox = (MAILBOX_t *) psli->MBhostaddr; 1912 lpfc_sli_pcimem_bcopy(mb, mbox, MAILBOX_CMD_SIZE); 1913 } 1914 1915 /* First copy mbox command data to HBA SLIM, skip past first 1916 word */ 1917 to_slim = phba->MBslimaddr + sizeof (uint32_t); 1918 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0], 1919 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 1920 1921 /* Next copy over first word, with mbxOwner set */ 1922 ldata = *((volatile uint32_t *)mb); 1923 to_slim = phba->MBslimaddr; 1924 writel(ldata, to_slim); 1925 readl(to_slim); /* flush */ 1926 1927 if (mb->mbxCommand == MBX_CONFIG_PORT) { 1928 /* switch over to host mailbox */ 1929 psli->sli_flag |= LPFC_SLI2_ACTIVE; 1930 } 1931 } 1932 1933 wmb(); 1934 /* interrupt board to doit right away */ 1935 writel(CA_MBATT, phba->CAregaddr); 1936 readl(phba->CAregaddr); /* flush */ 1937 1938 switch (flag) { 1939 case MBX_NOWAIT: 1940 /* Don't wait for it to finish, just return */ 1941 psli->mbox_active = pmbox; 1942 break; 1943 1944 case MBX_POLL: 1945 i = 0; 1946 psli->mbox_active = NULL; 1947 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 1948 /* First read mbox status word */ 1949 mbox = (MAILBOX_t *) psli->MBhostaddr; 1950 word0 = *((volatile uint32_t *)mbox); 1951 word0 = le32_to_cpu(word0); 1952 } else { 1953 /* First read mbox status word */ 1954 word0 = readl(phba->MBslimaddr); 1955 } 1956 1957 /* Read the HBA Host Attention Register */ 1958 ha_copy = readl(phba->HAregaddr); 1959 1960 /* Wait for command to complete */ 1961 while (((word0 & OWN_CHIP) == OWN_CHIP) 1962 || !(ha_copy & HA_MBATT)) { 1963 if (i++ >= 100) { 1964 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 1965 spin_unlock_irqrestore(phba->host->host_lock, 1966 drvr_flag); 1967 return (MBX_NOT_FINISHED); 1968 } 1969 1970 /* Check if we took a mbox interrupt while we were 1971 polling */ 1972 if (((word0 & OWN_CHIP) != OWN_CHIP) 1973 && (evtctr != psli->slistat.mbox_event)) 1974 break; 1975 1976 spin_unlock_irqrestore(phba->host->host_lock, 1977 drvr_flag); 1978 1979 /* Can be in interrupt context, do not sleep */ 1980 /* (or might be called with interrupts disabled) */ 1981 mdelay(i); 1982 1983 spin_lock_irqsave(phba->host->host_lock, drvr_flag); 1984 1985 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 1986 /* First copy command data */ 1987 mbox = (MAILBOX_t *) psli->MBhostaddr; 1988 word0 = *((volatile uint32_t *)mbox); 1989 word0 = le32_to_cpu(word0); 1990 if (mb->mbxCommand == MBX_CONFIG_PORT) { 1991 MAILBOX_t *slimmb; 1992 volatile uint32_t slimword0; 1993 /* Check real SLIM for any errors */ 1994 slimword0 = readl(phba->MBslimaddr); 1995 slimmb = (MAILBOX_t *) & slimword0; 1996 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 1997 && slimmb->mbxStatus) { 1998 psli->sli_flag &= 1999 ~LPFC_SLI2_ACTIVE; 2000 word0 = slimword0; 2001 } 2002 } 2003 } else { 2004 /* First copy command data */ 2005 word0 = readl(phba->MBslimaddr); 2006 } 2007 /* Read the HBA Host Attention Register */ 2008 ha_copy = readl(phba->HAregaddr); 2009 } 2010 2011 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2012 /* First copy command data */ 2013 mbox = (MAILBOX_t *) psli->MBhostaddr; 2014 /* copy results back to user */ 2015 lpfc_sli_pcimem_bcopy(mbox, mb, MAILBOX_CMD_SIZE); 2016 } else { 2017 /* First copy command data */ 2018 lpfc_memcpy_from_slim(mb, phba->MBslimaddr, 2019 MAILBOX_CMD_SIZE); 2020 if ((mb->mbxCommand == MBX_DUMP_MEMORY) && 2021 pmbox->context2) { 2022 lpfc_memcpy_from_slim((void *)pmbox->context2, 2023 phba->MBslimaddr + DMP_RSP_OFFSET, 2024 mb->un.varDmp.word_cnt); 2025 } 2026 } 2027 2028 writel(HA_MBATT, phba->HAregaddr); 2029 readl(phba->HAregaddr); /* flush */ 2030 2031 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2032 status = mb->mbxStatus; 2033 } 2034 2035 spin_unlock_irqrestore(phba->host->host_lock, drvr_flag); 2036 return (status); 2037 } 2038 2039 static int 2040 lpfc_sli_ringtx_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, 2041 struct lpfc_iocbq * piocb) 2042 { 2043 /* Insert the caller's iocb in the txq tail for later processing. */ 2044 list_add_tail(&piocb->list, &pring->txq); 2045 pring->txq_cnt++; 2046 return (0); 2047 } 2048 2049 static struct lpfc_iocbq * 2050 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2051 struct lpfc_iocbq ** piocb) 2052 { 2053 struct lpfc_iocbq * nextiocb; 2054 2055 nextiocb = lpfc_sli_ringtx_get(phba, pring); 2056 if (!nextiocb) { 2057 nextiocb = *piocb; 2058 *piocb = NULL; 2059 } 2060 2061 return nextiocb; 2062 } 2063 2064 int 2065 lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2066 struct lpfc_iocbq *piocb, uint32_t flag) 2067 { 2068 struct lpfc_iocbq *nextiocb; 2069 IOCB_t *iocb; 2070 2071 /* 2072 * We should never get an IOCB if we are in a < LINK_DOWN state 2073 */ 2074 if (unlikely(phba->hba_state < LPFC_LINK_DOWN)) 2075 return IOCB_ERROR; 2076 2077 /* 2078 * Check to see if we are blocking IOCB processing because of a 2079 * outstanding mbox command. 2080 */ 2081 if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX)) 2082 goto iocb_busy; 2083 2084 if (unlikely(phba->hba_state == LPFC_LINK_DOWN)) { 2085 /* 2086 * Only CREATE_XRI, CLOSE_XRI, ABORT_XRI, and QUE_RING_BUF 2087 * can be issued if the link is not up. 2088 */ 2089 switch (piocb->iocb.ulpCommand) { 2090 case CMD_QUE_RING_BUF_CN: 2091 case CMD_QUE_RING_BUF64_CN: 2092 case CMD_CLOSE_XRI_CN: 2093 case CMD_ABORT_XRI_CN: 2094 /* 2095 * For IOCBs, like QUE_RING_BUF, that have no rsp ring 2096 * completion, iocb_cmpl MUST be 0. 2097 */ 2098 if (piocb->iocb_cmpl) 2099 piocb->iocb_cmpl = NULL; 2100 /*FALLTHROUGH*/ 2101 case CMD_CREATE_XRI_CR: 2102 break; 2103 default: 2104 goto iocb_busy; 2105 } 2106 2107 /* 2108 * For FCP commands, we must be in a state where we can process link 2109 * attention events. 2110 */ 2111 } else if (unlikely(pring->ringno == phba->sli.fcp_ring && 2112 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) 2113 goto iocb_busy; 2114 2115 /* 2116 * Check to see if this is a high priority command. 2117 * If so bypass tx queue processing. 2118 */ 2119 if (unlikely((flag & SLI_IOCB_HIGH_PRIORITY) && 2120 (iocb = lpfc_sli_next_iocb_slot(phba, pring)))) { 2121 lpfc_sli_submit_iocb(phba, pring, iocb, piocb); 2122 piocb = NULL; 2123 } 2124 2125 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 2126 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 2127 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 2128 2129 if (iocb) 2130 lpfc_sli_update_ring(phba, pring); 2131 else 2132 lpfc_sli_update_full_ring(phba, pring); 2133 2134 if (!piocb) 2135 return IOCB_SUCCESS; 2136 2137 goto out_busy; 2138 2139 iocb_busy: 2140 pring->stats.iocb_cmd_delay++; 2141 2142 out_busy: 2143 2144 if (!(flag & SLI_IOCB_RET_IOCB)) { 2145 lpfc_sli_ringtx_put(phba, pring, piocb); 2146 return IOCB_SUCCESS; 2147 } 2148 2149 return IOCB_BUSY; 2150 } 2151 2152 int 2153 lpfc_sli_setup(struct lpfc_hba *phba) 2154 { 2155 int i, totiocb = 0; 2156 struct lpfc_sli *psli = &phba->sli; 2157 struct lpfc_sli_ring *pring; 2158 2159 psli->num_rings = MAX_CONFIGURED_RINGS; 2160 psli->sli_flag = 0; 2161 psli->fcp_ring = LPFC_FCP_RING; 2162 psli->next_ring = LPFC_FCP_NEXT_RING; 2163 psli->ip_ring = LPFC_IP_RING; 2164 2165 for (i = 0; i < psli->num_rings; i++) { 2166 pring = &psli->ring[i]; 2167 switch (i) { 2168 case LPFC_FCP_RING: /* ring 0 - FCP */ 2169 /* numCiocb and numRiocb are used in config_port */ 2170 pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 2171 pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 2172 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 2173 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 2174 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 2175 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 2176 pring->iotag_ctr = 0; 2177 pring->iotag_max = 2178 (phba->cfg_hba_queue_depth * 2); 2179 pring->fast_iotag = pring->iotag_max; 2180 pring->num_mask = 0; 2181 break; 2182 case LPFC_IP_RING: /* ring 1 - IP */ 2183 /* numCiocb and numRiocb are used in config_port */ 2184 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 2185 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 2186 pring->num_mask = 0; 2187 break; 2188 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 2189 /* numCiocb and numRiocb are used in config_port */ 2190 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 2191 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 2192 pring->fast_iotag = 0; 2193 pring->iotag_ctr = 0; 2194 pring->iotag_max = 4096; 2195 pring->num_mask = 4; 2196 pring->prt[0].profile = 0; /* Mask 0 */ 2197 pring->prt[0].rctl = FC_ELS_REQ; 2198 pring->prt[0].type = FC_ELS_DATA; 2199 pring->prt[0].lpfc_sli_rcv_unsol_event = 2200 lpfc_els_unsol_event; 2201 pring->prt[1].profile = 0; /* Mask 1 */ 2202 pring->prt[1].rctl = FC_ELS_RSP; 2203 pring->prt[1].type = FC_ELS_DATA; 2204 pring->prt[1].lpfc_sli_rcv_unsol_event = 2205 lpfc_els_unsol_event; 2206 pring->prt[2].profile = 0; /* Mask 2 */ 2207 /* NameServer Inquiry */ 2208 pring->prt[2].rctl = FC_UNSOL_CTL; 2209 /* NameServer */ 2210 pring->prt[2].type = FC_COMMON_TRANSPORT_ULP; 2211 pring->prt[2].lpfc_sli_rcv_unsol_event = 2212 lpfc_ct_unsol_event; 2213 pring->prt[3].profile = 0; /* Mask 3 */ 2214 /* NameServer response */ 2215 pring->prt[3].rctl = FC_SOL_CTL; 2216 /* NameServer */ 2217 pring->prt[3].type = FC_COMMON_TRANSPORT_ULP; 2218 pring->prt[3].lpfc_sli_rcv_unsol_event = 2219 lpfc_ct_unsol_event; 2220 break; 2221 } 2222 totiocb += (pring->numCiocb + pring->numRiocb); 2223 } 2224 if (totiocb > MAX_SLI2_IOCB) { 2225 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 2226 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2227 "%d:0462 Too many cmd / rsp ring entries in " 2228 "SLI2 SLIM Data: x%x x%x\n", 2229 phba->brd_no, totiocb, MAX_SLI2_IOCB); 2230 } 2231 2232 return 0; 2233 } 2234 2235 int 2236 lpfc_sli_queue_setup(struct lpfc_hba * phba) 2237 { 2238 struct lpfc_sli *psli; 2239 struct lpfc_sli_ring *pring; 2240 int i, cnt; 2241 2242 psli = &phba->sli; 2243 spin_lock_irq(phba->host->host_lock); 2244 INIT_LIST_HEAD(&psli->mboxq); 2245 /* Initialize list headers for txq and txcmplq as double linked lists */ 2246 for (i = 0; i < psli->num_rings; i++) { 2247 pring = &psli->ring[i]; 2248 pring->ringno = i; 2249 pring->next_cmdidx = 0; 2250 pring->local_getidx = 0; 2251 pring->cmdidx = 0; 2252 INIT_LIST_HEAD(&pring->txq); 2253 INIT_LIST_HEAD(&pring->txcmplq); 2254 INIT_LIST_HEAD(&pring->iocb_continueq); 2255 INIT_LIST_HEAD(&pring->postbufq); 2256 cnt = pring->fast_iotag; 2257 spin_unlock_irq(phba->host->host_lock); 2258 if (cnt) { 2259 pring->fast_lookup = 2260 kmalloc(cnt * sizeof (struct lpfc_iocbq *), 2261 GFP_KERNEL); 2262 if (pring->fast_lookup == 0) { 2263 return (0); 2264 } 2265 memset((char *)pring->fast_lookup, 0, 2266 cnt * sizeof (struct lpfc_iocbq *)); 2267 } 2268 spin_lock_irq(phba->host->host_lock); 2269 } 2270 spin_unlock_irq(phba->host->host_lock); 2271 return (1); 2272 } 2273 2274 int 2275 lpfc_sli_hba_down(struct lpfc_hba * phba) 2276 { 2277 struct lpfc_sli *psli; 2278 struct lpfc_sli_ring *pring; 2279 LPFC_MBOXQ_t *pmb; 2280 struct lpfc_iocbq *iocb, *next_iocb; 2281 IOCB_t *icmd = NULL; 2282 int i; 2283 unsigned long flags = 0; 2284 2285 psli = &phba->sli; 2286 lpfc_hba_down_prep(phba); 2287 2288 spin_lock_irqsave(phba->host->host_lock, flags); 2289 2290 for (i = 0; i < psli->num_rings; i++) { 2291 pring = &psli->ring[i]; 2292 pring->flag |= LPFC_DEFERRED_RING_EVENT; 2293 2294 /* 2295 * Error everything on the txq since these iocbs have not been 2296 * given to the FW yet. 2297 */ 2298 pring->txq_cnt = 0; 2299 2300 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 2301 list_del_init(&iocb->list); 2302 if (iocb->iocb_cmpl) { 2303 icmd = &iocb->iocb; 2304 icmd->ulpStatus = IOSTAT_LOCAL_REJECT; 2305 icmd->un.ulpWord[4] = IOERR_SLI_DOWN; 2306 spin_unlock_irqrestore(phba->host->host_lock, 2307 flags); 2308 (iocb->iocb_cmpl) (phba, iocb, iocb); 2309 spin_lock_irqsave(phba->host->host_lock, flags); 2310 } else { 2311 list_add_tail(&iocb->list, 2312 &phba->lpfc_iocb_list); 2313 } 2314 } 2315 2316 INIT_LIST_HEAD(&(pring->txq)); 2317 2318 if (pring->fast_lookup) { 2319 kfree(pring->fast_lookup); 2320 pring->fast_lookup = NULL; 2321 } 2322 2323 } 2324 2325 spin_unlock_irqrestore(phba->host->host_lock, flags); 2326 2327 /* Return any active mbox cmds */ 2328 del_timer_sync(&psli->mbox_tmo); 2329 spin_lock_irqsave(phba->host->host_lock, flags); 2330 phba->work_hba_events &= ~WORKER_MBOX_TMO; 2331 if (psli->mbox_active) { 2332 pmb = psli->mbox_active; 2333 pmb->mb.mbxStatus = MBX_NOT_FINISHED; 2334 if (pmb->mbox_cmpl) { 2335 spin_unlock_irqrestore(phba->host->host_lock, flags); 2336 pmb->mbox_cmpl(phba,pmb); 2337 spin_lock_irqsave(phba->host->host_lock, flags); 2338 } 2339 } 2340 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2341 psli->mbox_active = NULL; 2342 2343 /* Return any pending mbox cmds */ 2344 while ((pmb = lpfc_mbox_get(phba)) != NULL) { 2345 pmb->mb.mbxStatus = MBX_NOT_FINISHED; 2346 if (pmb->mbox_cmpl) { 2347 spin_unlock_irqrestore(phba->host->host_lock, flags); 2348 pmb->mbox_cmpl(phba,pmb); 2349 spin_lock_irqsave(phba->host->host_lock, flags); 2350 } 2351 } 2352 2353 INIT_LIST_HEAD(&psli->mboxq); 2354 2355 spin_unlock_irqrestore(phba->host->host_lock, flags); 2356 2357 /* 2358 * Provided the hba is not in an error state, reset it. It is not 2359 * capable of IO anymore. 2360 */ 2361 if (phba->hba_state != LPFC_HBA_ERROR) { 2362 phba->hba_state = LPFC_INIT_START; 2363 lpfc_sli_brdreset(phba, 1); 2364 } 2365 2366 return 1; 2367 } 2368 2369 void 2370 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 2371 { 2372 uint32_t *src = srcp; 2373 uint32_t *dest = destp; 2374 uint32_t ldata; 2375 int i; 2376 2377 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 2378 ldata = *src; 2379 ldata = le32_to_cpu(ldata); 2380 *dest = ldata; 2381 src++; 2382 dest++; 2383 } 2384 } 2385 2386 int 2387 lpfc_sli_ringpostbuf_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, 2388 struct lpfc_dmabuf * mp) 2389 { 2390 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 2391 later */ 2392 list_add_tail(&mp->list, &pring->postbufq); 2393 2394 pring->postbufq_cnt++; 2395 return 0; 2396 } 2397 2398 2399 struct lpfc_dmabuf * 2400 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2401 dma_addr_t phys) 2402 { 2403 struct lpfc_dmabuf *mp, *next_mp; 2404 struct list_head *slp = &pring->postbufq; 2405 2406 /* Search postbufq, from the begining, looking for a match on phys */ 2407 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 2408 if (mp->phys == phys) { 2409 list_del_init(&mp->list); 2410 pring->postbufq_cnt--; 2411 return mp; 2412 } 2413 } 2414 2415 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2416 "%d:0410 Cannot find virtual addr for mapped buf on " 2417 "ring %d Data x%llx x%p x%p x%x\n", 2418 phba->brd_no, pring->ringno, (unsigned long long)phys, 2419 slp->next, slp->prev, pring->postbufq_cnt); 2420 return NULL; 2421 } 2422 2423 static void 2424 lpfc_sli_abort_elsreq_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 2425 struct lpfc_iocbq * rspiocb) 2426 { 2427 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 2428 /* Free the resources associated with the ELS_REQUEST64 IOCB the driver 2429 * just aborted. 2430 * In this case, context2 = cmd, context2->next = rsp, context3 = bpl 2431 */ 2432 if (cmdiocb->context2) { 2433 buf_ptr1 = (struct lpfc_dmabuf *) cmdiocb->context2; 2434 2435 /* Free the response IOCB before completing the abort 2436 command. */ 2437 buf_ptr = NULL; 2438 list_remove_head((&buf_ptr1->list), buf_ptr, 2439 struct lpfc_dmabuf, list); 2440 if (buf_ptr) { 2441 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 2442 kfree(buf_ptr); 2443 } 2444 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); 2445 kfree(buf_ptr1); 2446 } 2447 2448 if (cmdiocb->context3) { 2449 buf_ptr = (struct lpfc_dmabuf *) cmdiocb->context3; 2450 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 2451 kfree(buf_ptr); 2452 } 2453 2454 list_add_tail(&cmdiocb->list, &phba->lpfc_iocb_list); 2455 return; 2456 } 2457 2458 int 2459 lpfc_sli_issue_abort_iotag32(struct lpfc_hba * phba, 2460 struct lpfc_sli_ring * pring, 2461 struct lpfc_iocbq * cmdiocb) 2462 { 2463 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 2464 struct lpfc_iocbq *abtsiocbp = NULL; 2465 IOCB_t *icmd = NULL; 2466 IOCB_t *iabt = NULL; 2467 2468 /* issue ABTS for this IOCB based on iotag */ 2469 list_remove_head(lpfc_iocb_list, abtsiocbp, struct lpfc_iocbq, list); 2470 if (abtsiocbp == NULL) 2471 return 0; 2472 memset(abtsiocbp, 0, sizeof (struct lpfc_iocbq)); 2473 2474 iabt = &abtsiocbp->iocb; 2475 icmd = &cmdiocb->iocb; 2476 switch (icmd->ulpCommand) { 2477 case CMD_ELS_REQUEST64_CR: 2478 /* Even though we abort the ELS command, the firmware may access 2479 * the BPL or other resources before it processes our 2480 * ABORT_MXRI64. Thus we must delay reusing the cmdiocb 2481 * resources till the actual abort request completes. 2482 */ 2483 abtsiocbp->context1 = (void *)((unsigned long)icmd->ulpCommand); 2484 abtsiocbp->context2 = cmdiocb->context2; 2485 abtsiocbp->context3 = cmdiocb->context3; 2486 cmdiocb->context2 = NULL; 2487 cmdiocb->context3 = NULL; 2488 abtsiocbp->iocb_cmpl = lpfc_sli_abort_elsreq_cmpl; 2489 break; 2490 default: 2491 list_add_tail(&abtsiocbp->list, lpfc_iocb_list); 2492 return 0; 2493 } 2494 2495 iabt->un.amxri.abortType = ABORT_TYPE_ABTS; 2496 iabt->un.amxri.iotag32 = icmd->un.elsreq64.bdl.ulpIoTag32; 2497 2498 iabt->ulpLe = 1; 2499 iabt->ulpClass = CLASS3; 2500 iabt->ulpCommand = CMD_ABORT_MXRI64_CN; 2501 2502 if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) == IOCB_ERROR) { 2503 list_add_tail(&abtsiocbp->list, lpfc_iocb_list); 2504 return 0; 2505 } 2506 2507 return 1; 2508 } 2509 2510 static int 2511 lpfc_sli_validate_iocb_cmd(struct lpfc_scsi_buf *lpfc_cmd, uint16_t tgt_id, 2512 uint64_t lun_id, struct lpfc_iocbq *iocb, 2513 uint32_t ctx, lpfc_ctx_cmd ctx_cmd) 2514 { 2515 int rc = 1; 2516 2517 if (lpfc_cmd == NULL) 2518 return rc; 2519 2520 switch (ctx_cmd) { 2521 case LPFC_CTX_LUN: 2522 if ((lpfc_cmd->pCmd->device->id == tgt_id) && 2523 (lpfc_cmd->pCmd->device->lun == lun_id)) 2524 rc = 0; 2525 break; 2526 case LPFC_CTX_TGT: 2527 if (lpfc_cmd->pCmd->device->id == tgt_id) 2528 rc = 0; 2529 break; 2530 case LPFC_CTX_CTX: 2531 if (iocb->iocb.ulpContext == ctx) 2532 rc = 0; 2533 case LPFC_CTX_HOST: 2534 rc = 0; 2535 break; 2536 default: 2537 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 2538 __FUNCTION__, ctx_cmd); 2539 break; 2540 } 2541 2542 return rc; 2543 } 2544 2545 int 2546 lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2547 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd) 2548 { 2549 struct lpfc_iocbq *iocb, *next_iocb; 2550 IOCB_t *cmd = NULL; 2551 struct lpfc_scsi_buf *lpfc_cmd; 2552 int sum = 0, ret_val = 0; 2553 2554 /* Next check the txcmplq */ 2555 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 2556 cmd = &iocb->iocb; 2557 2558 /* Must be a FCP command */ 2559 if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) && 2560 (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) && 2561 (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) { 2562 continue; 2563 } 2564 2565 /* context1 MUST be a struct lpfc_scsi_buf */ 2566 lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1); 2567 ret_val = lpfc_sli_validate_iocb_cmd(lpfc_cmd, tgt_id, lun_id, 2568 NULL, 0, ctx_cmd); 2569 if (ret_val != 0) 2570 continue; 2571 sum++; 2572 } 2573 return sum; 2574 } 2575 2576 int 2577 lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2578 uint16_t tgt_id, uint64_t lun_id, uint32_t ctx, 2579 lpfc_ctx_cmd abort_cmd) 2580 { 2581 struct lpfc_iocbq *iocb, *next_iocb; 2582 struct lpfc_iocbq *abtsiocb = NULL; 2583 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 2584 IOCB_t *cmd = NULL; 2585 struct lpfc_scsi_buf *lpfc_cmd; 2586 int errcnt = 0, ret_val = 0; 2587 2588 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 2589 cmd = &iocb->iocb; 2590 2591 /* Must be a FCP command */ 2592 if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) && 2593 (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) && 2594 (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) { 2595 continue; 2596 } 2597 2598 /* context1 MUST be a struct lpfc_scsi_buf */ 2599 lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1); 2600 ret_val = lpfc_sli_validate_iocb_cmd(lpfc_cmd, tgt_id, lun_id, 2601 iocb, ctx, abort_cmd); 2602 if (ret_val != 0) 2603 continue; 2604 2605 /* issue ABTS for this IOCB based on iotag */ 2606 list_remove_head(lpfc_iocb_list, abtsiocb, struct lpfc_iocbq, 2607 list); 2608 if (abtsiocb == NULL) { 2609 errcnt++; 2610 continue; 2611 } 2612 memset(abtsiocb, 0, sizeof (struct lpfc_iocbq)); 2613 2614 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 2615 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 2616 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 2617 abtsiocb->iocb.ulpLe = 1; 2618 abtsiocb->iocb.ulpClass = cmd->ulpClass; 2619 2620 if (phba->hba_state >= LPFC_LINK_UP) 2621 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 2622 else 2623 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 2624 2625 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0); 2626 if (ret_val == IOCB_ERROR) { 2627 list_add_tail(&abtsiocb->list, lpfc_iocb_list); 2628 errcnt++; 2629 continue; 2630 } 2631 } 2632 2633 return errcnt; 2634 } 2635 2636 void 2637 lpfc_sli_wake_iocb_high_priority(struct lpfc_hba * phba, 2638 struct lpfc_iocbq * queue1, 2639 struct lpfc_iocbq * queue2) 2640 { 2641 if (queue1->context2 && queue2) 2642 memcpy(queue1->context2, queue2, sizeof (struct lpfc_iocbq)); 2643 2644 /* The waiter is looking for LPFC_IO_HIPRI bit to be set 2645 as a signal to wake up */ 2646 queue1->iocb_flag |= LPFC_IO_HIPRI; 2647 return; 2648 } 2649 2650 int 2651 lpfc_sli_issue_iocb_wait_high_priority(struct lpfc_hba * phba, 2652 struct lpfc_sli_ring * pring, 2653 struct lpfc_iocbq * piocb, 2654 uint32_t flag, 2655 struct lpfc_iocbq * prspiocbq, 2656 uint32_t timeout) 2657 { 2658 int j, delay_time, retval = IOCB_ERROR; 2659 2660 /* The caller must left context1 empty. */ 2661 if (piocb->context_un.hipri_wait_queue != 0) { 2662 return IOCB_ERROR; 2663 } 2664 2665 /* 2666 * If the caller has provided a response iocbq buffer, context2 must 2667 * be NULL or its an error. 2668 */ 2669 if (prspiocbq && piocb->context2) { 2670 return IOCB_ERROR; 2671 } 2672 2673 piocb->context2 = prspiocbq; 2674 2675 /* Setup callback routine and issue the command. */ 2676 piocb->iocb_cmpl = lpfc_sli_wake_iocb_high_priority; 2677 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 2678 flag | SLI_IOCB_HIGH_PRIORITY); 2679 if (retval != IOCB_SUCCESS) { 2680 piocb->context2 = NULL; 2681 return IOCB_ERROR; 2682 } 2683 2684 /* 2685 * This high-priority iocb was sent out-of-band. Poll for its 2686 * completion rather than wait for a signal. Note that the host_lock 2687 * is held by the midlayer and must be released here to allow the 2688 * interrupt handlers to complete the IO and signal this routine via 2689 * the iocb_flag. 2690 * Also, the delay_time is computed to be one second longer than 2691 * the scsi command timeout to give the FW time to abort on 2692 * timeout rather than the driver just giving up. Typically, 2693 * the midlayer does not specify a time for this command so the 2694 * driver is free to enforce its own timeout. 2695 */ 2696 2697 delay_time = ((timeout + 1) * 1000) >> 6; 2698 retval = IOCB_ERROR; 2699 spin_unlock_irq(phba->host->host_lock); 2700 for (j = 0; j < 64; j++) { 2701 msleep(delay_time); 2702 if (piocb->iocb_flag & LPFC_IO_HIPRI) { 2703 piocb->iocb_flag &= ~LPFC_IO_HIPRI; 2704 retval = IOCB_SUCCESS; 2705 break; 2706 } 2707 } 2708 2709 spin_lock_irq(phba->host->host_lock); 2710 piocb->context2 = NULL; 2711 return retval; 2712 } 2713 int 2714 lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq, 2715 uint32_t timeout) 2716 { 2717 DECLARE_WAIT_QUEUE_HEAD(done_q); 2718 DECLARE_WAITQUEUE(wq_entry, current); 2719 uint32_t timeleft = 0; 2720 int retval; 2721 2722 /* The caller must leave context1 empty. */ 2723 if (pmboxq->context1 != 0) { 2724 return (MBX_NOT_FINISHED); 2725 } 2726 2727 /* setup wake call as IOCB callback */ 2728 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 2729 /* setup context field to pass wait_queue pointer to wake function */ 2730 pmboxq->context1 = &done_q; 2731 2732 /* start to sleep before we wait, to avoid races */ 2733 set_current_state(TASK_INTERRUPTIBLE); 2734 add_wait_queue(&done_q, &wq_entry); 2735 2736 /* now issue the command */ 2737 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 2738 2739 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 2740 timeleft = schedule_timeout(timeout * HZ); 2741 pmboxq->context1 = NULL; 2742 /* if schedule_timeout returns 0, we timed out and were not 2743 woken up */ 2744 if (timeleft == 0) { 2745 retval = MBX_TIMEOUT; 2746 } else { 2747 retval = MBX_SUCCESS; 2748 } 2749 } 2750 2751 2752 set_current_state(TASK_RUNNING); 2753 remove_wait_queue(&done_q, &wq_entry); 2754 return retval; 2755 } 2756 2757 irqreturn_t 2758 lpfc_intr_handler(int irq, void *dev_id, struct pt_regs * regs) 2759 { 2760 struct lpfc_hba *phba; 2761 uint32_t ha_copy; 2762 uint32_t work_ha_copy; 2763 unsigned long status; 2764 int i; 2765 uint32_t control; 2766 2767 /* 2768 * Get the driver's phba structure from the dev_id and 2769 * assume the HBA is not interrupting. 2770 */ 2771 phba = (struct lpfc_hba *) dev_id; 2772 2773 if (unlikely(!phba)) 2774 return IRQ_NONE; 2775 2776 phba->sli.slistat.sli_intr++; 2777 2778 /* 2779 * Call the HBA to see if it is interrupting. If not, don't claim 2780 * the interrupt 2781 */ 2782 2783 /* Ignore all interrupts during initialization. */ 2784 if (unlikely(phba->hba_state < LPFC_LINK_DOWN)) 2785 return IRQ_NONE; 2786 2787 /* 2788 * Read host attention register to determine interrupt source 2789 * Clear Attention Sources, except Error Attention (to 2790 * preserve status) and Link Attention 2791 */ 2792 spin_lock(phba->host->host_lock); 2793 ha_copy = readl(phba->HAregaddr); 2794 writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 2795 readl(phba->HAregaddr); /* flush */ 2796 spin_unlock(phba->host->host_lock); 2797 2798 if (unlikely(!ha_copy)) 2799 return IRQ_NONE; 2800 2801 work_ha_copy = ha_copy & phba->work_ha_mask; 2802 2803 if (unlikely(work_ha_copy)) { 2804 if (work_ha_copy & HA_LATT) { 2805 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 2806 /* 2807 * Turn off Link Attention interrupts 2808 * until CLEAR_LA done 2809 */ 2810 spin_lock(phba->host->host_lock); 2811 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 2812 control = readl(phba->HCregaddr); 2813 control &= ~HC_LAINT_ENA; 2814 writel(control, phba->HCregaddr); 2815 readl(phba->HCregaddr); /* flush */ 2816 spin_unlock(phba->host->host_lock); 2817 } 2818 else 2819 work_ha_copy &= ~HA_LATT; 2820 } 2821 2822 if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) { 2823 for (i = 0; i < phba->sli.num_rings; i++) { 2824 if (work_ha_copy & (HA_RXATT << (4*i))) { 2825 /* 2826 * Turn off Slow Rings interrupts 2827 */ 2828 spin_lock(phba->host->host_lock); 2829 control = readl(phba->HCregaddr); 2830 control &= ~(HC_R0INT_ENA << i); 2831 writel(control, phba->HCregaddr); 2832 readl(phba->HCregaddr); /* flush */ 2833 spin_unlock(phba->host->host_lock); 2834 } 2835 } 2836 } 2837 2838 if (work_ha_copy & HA_ERATT) { 2839 phba->hba_state = LPFC_HBA_ERROR; 2840 /* 2841 * There was a link/board error. Read the 2842 * status register to retrieve the error event 2843 * and process it. 2844 */ 2845 phba->sli.slistat.err_attn_event++; 2846 /* Save status info */ 2847 phba->work_hs = readl(phba->HSregaddr); 2848 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 2849 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 2850 2851 /* Clear Chip error bit */ 2852 writel(HA_ERATT, phba->HAregaddr); 2853 readl(phba->HAregaddr); /* flush */ 2854 2855 /* 2856 * Reseting the HBA is the only reliable way 2857 * to shutdown interrupt when there is a 2858 * ERROR. 2859 */ 2860 lpfc_sli_send_reset(phba, phba->hba_state); 2861 } 2862 2863 spin_lock(phba->host->host_lock); 2864 phba->work_ha |= work_ha_copy; 2865 if (phba->work_wait) 2866 wake_up(phba->work_wait); 2867 spin_unlock(phba->host->host_lock); 2868 } 2869 2870 ha_copy &= ~(phba->work_ha_mask); 2871 2872 /* 2873 * Process all events on FCP ring. Take the optimized path for 2874 * FCP IO. Any other IO is slow path and is handled by 2875 * the worker thread. 2876 */ 2877 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 2878 status >>= (4*LPFC_FCP_RING); 2879 if (status & HA_RXATT) 2880 lpfc_sli_handle_fast_ring_event(phba, 2881 &phba->sli.ring[LPFC_FCP_RING], 2882 status); 2883 return IRQ_HANDLED; 2884 2885 } /* lpfc_intr_handler */ 2886