1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* Copyright 2009 QLogic Corporation */ 23 24 /* 25 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 26 * Use is subject to license terms. 27 */ 28 29 #pragma ident "Copyright 2009 QLogic Corporation; ql_iocb.c" 30 31 /* 32 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file. 33 * 34 * *********************************************************************** 35 * * ** 36 * * NOTICE ** 37 * * COPYRIGHT (C) 1996-2009 QLOGIC CORPORATION ** 38 * * ALL RIGHTS RESERVED ** 39 * * ** 40 * *********************************************************************** 41 * 42 */ 43 44 #include <ql_apps.h> 45 #include <ql_api.h> 46 #include <ql_debug.h> 47 #include <ql_iocb.h> 48 #include <ql_isr.h> 49 #include <ql_xioctl.h> 50 51 /* 52 * Local Function Prototypes. 53 */ 54 static void ql_continuation_iocb(ql_adapter_state_t *, ddi_dma_cookie_t *, 55 uint16_t, boolean_t); 56 static void ql_isp24xx_rcvbuf(ql_adapter_state_t *); 57 58 /* 59 * ql_start_iocb 60 * The start IOCB is responsible for building request packets 61 * on request ring and modifying ISP input pointer. 62 * 63 * Input: 64 * ha: adapter state pointer. 65 * sp: srb structure pointer. 66 * 67 * Context: 68 * Interrupt or Kernel context, no mailbox commands allowed. 69 */ 70 void 71 ql_start_iocb(ql_adapter_state_t *vha, ql_srb_t *sp) 72 { 73 ql_link_t *link; 74 request_t *pkt; 75 uint64_t *ptr64; 76 uint32_t cnt; 77 ql_adapter_state_t *ha = vha->pha; 78 79 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 80 81 /* Acquire ring lock. */ 82 REQUEST_RING_LOCK(ha); 83 84 if (sp != NULL) { 85 /* 86 * If the pending queue is not empty maintain order 87 * by puting this srb at the tail and geting the head. 88 */ 89 if ((link = ha->pending_cmds.first) != NULL) { 90 ql_add_link_b(&ha->pending_cmds, &sp->cmd); 91 /* Remove command from pending command queue */ 92 sp = link->base_address; 93 ql_remove_link(&ha->pending_cmds, &sp->cmd); 94 } 95 } else { 96 /* Get command from pending command queue if not empty. */ 97 if ((link = ha->pending_cmds.first) == NULL) { 98 /* Release ring specific lock */ 99 REQUEST_RING_UNLOCK(ha); 100 QL_PRINT_3(CE_CONT, "(%d): empty done\n", 101 ha->instance); 102 return; 103 } 104 /* Remove command from pending command queue */ 105 sp = link->base_address; 106 ql_remove_link(&ha->pending_cmds, &sp->cmd); 107 } 108 109 /* start this request and as many others as possible */ 110 for (;;) { 111 if (ha->req_q_cnt < sp->req_cnt) { 112 /* Calculate number of free request entries. */ 113 cnt = RD16_IO_REG(ha, req_out); 114 if (ha->req_ring_index < cnt) { 115 ha->req_q_cnt = (uint16_t) 116 (cnt - ha->req_ring_index); 117 } else { 118 ha->req_q_cnt = (uint16_t)(REQUEST_ENTRY_CNT - 119 (ha->req_ring_index - cnt)); 120 } 121 if (ha->req_q_cnt != 0) { 122 ha->req_q_cnt--; 123 } 124 125 /* 126 * If no room in request ring put this srb at 127 * the head of the pending queue and exit. 128 */ 129 if (ha->req_q_cnt < sp->req_cnt) { 130 QL_PRINT_8(CE_CONT, "(%d): request ring full," 131 " req_q_cnt=%d, req_ring_index=%d\n", 132 ha->instance, ha->req_q_cnt, 133 ha->req_ring_index); 134 ql_add_link_t(&ha->pending_cmds, &sp->cmd); 135 break; 136 } 137 } 138 139 /* Check for room in outstanding command list. */ 140 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 141 ha->osc_index++; 142 if (ha->osc_index == MAX_OUTSTANDING_COMMANDS) { 143 ha->osc_index = 1; 144 } 145 if (ha->outstanding_cmds[ha->osc_index] == NULL) { 146 break; 147 } 148 } 149 /* 150 * If no room in outstanding array put this srb at 151 * the head of the pending queue and exit. 152 */ 153 if (cnt == MAX_OUTSTANDING_COMMANDS) { 154 QL_PRINT_8(CE_CONT, "(%d): no room in outstanding " 155 "array\n", ha->instance); 156 ql_add_link_t(&ha->pending_cmds, &sp->cmd); 157 break; 158 } 159 160 /* nothing to stop us now. */ 161 ha->outstanding_cmds[ha->osc_index] = sp; 162 /* create and save a unique response identifier in the srb */ 163 sp->handle = ha->adapter_stats->ncmds << OSC_INDEX_SHIFT | 164 ha->osc_index; 165 ha->req_q_cnt -= sp->req_cnt; 166 167 /* build the iocb in the request ring */ 168 pkt = ha->request_ring_ptr; 169 sp->flags |= SRB_IN_TOKEN_ARRAY; 170 171 /* Zero out packet. */ 172 ptr64 = (uint64_t *)pkt; 173 *ptr64++ = 0; *ptr64++ = 0; 174 *ptr64++ = 0; *ptr64++ = 0; 175 *ptr64++ = 0; *ptr64++ = 0; 176 *ptr64++ = 0; *ptr64 = 0; 177 178 /* Setup IOCB common data. */ 179 pkt->entry_count = (uint8_t)sp->req_cnt; 180 pkt->sys_define = (uint8_t)ha->req_ring_index; 181 /* mark the iocb with the response identifier */ 182 ddi_put32(ha->hba_buf.acc_handle, &pkt->handle, 183 (uint32_t)sp->handle); 184 185 /* Setup IOCB unique data. */ 186 (sp->iocb)(vha, sp, pkt); 187 188 sp->flags |= SRB_ISP_STARTED; 189 190 QL_PRINT_5(CE_CONT, "(%d,%d): req packet, sp=%p\n", 191 ha->instance, vha->vp_index, (void *)sp); 192 QL_DUMP_5((uint8_t *)pkt, 8, REQUEST_ENTRY_SIZE); 193 194 /* Sync DMA buffer. */ 195 (void) ddi_dma_sync(ha->hba_buf.dma_handle, 196 (off_t)(ha->req_ring_index * REQUEST_ENTRY_SIZE + 197 REQUEST_Q_BUFFER_OFFSET), (size_t)REQUEST_ENTRY_SIZE, 198 DDI_DMA_SYNC_FORDEV); 199 200 /* Adjust ring index. */ 201 ha->req_ring_index++; 202 if (ha->req_ring_index == REQUEST_ENTRY_CNT) { 203 ha->req_ring_index = 0; 204 ha->request_ring_ptr = ha->request_ring_bp; 205 } else { 206 ha->request_ring_ptr++; 207 } 208 209 /* Reset watchdog timer */ 210 sp->wdg_q_time = sp->init_wdg_q_time; 211 212 /* 213 * Send it by setting the new ring index in the ISP Request 214 * Ring In Pointer register. This is the mechanism 215 * used to notify the isp that a new iocb has been 216 * placed on the request ring. 217 */ 218 WRT16_IO_REG(ha, req_in, ha->req_ring_index); 219 220 /* Update outstanding command count statistic. */ 221 ha->adapter_stats->ncmds++; 222 223 /* if there is a pending command, try to start it. */ 224 if ((link = ha->pending_cmds.first) == NULL) { 225 break; 226 } 227 228 /* Remove command from pending command queue */ 229 sp = link->base_address; 230 ql_remove_link(&ha->pending_cmds, &sp->cmd); 231 } 232 233 /* Release ring specific lock */ 234 REQUEST_RING_UNLOCK(ha); 235 236 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 237 } 238 239 /* 240 * ql_req_pkt 241 * Function is responsible for locking ring and 242 * getting a zeroed out request packet. 243 * 244 * Input: 245 * ha: adapter state pointer. 246 * pkt: address for packet pointer. 247 * 248 * Returns: 249 * ql local function return status code. 250 * 251 * Context: 252 * Interrupt or Kernel context, no mailbox commands allowed. 253 */ 254 int 255 ql_req_pkt(ql_adapter_state_t *vha, request_t **pktp) 256 { 257 uint16_t cnt; 258 uint32_t *long_ptr; 259 uint32_t timer; 260 int rval = QL_FUNCTION_TIMEOUT; 261 ql_adapter_state_t *ha = vha->pha; 262 263 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 264 265 /* Wait for 30 seconds for slot. */ 266 for (timer = 30000; timer != 0; timer--) { 267 /* Acquire ring lock. */ 268 REQUEST_RING_LOCK(ha); 269 270 if (ha->req_q_cnt == 0) { 271 /* Calculate number of free request entries. */ 272 cnt = RD16_IO_REG(ha, req_out); 273 if (ha->req_ring_index < cnt) { 274 ha->req_q_cnt = (uint16_t) 275 (cnt - ha->req_ring_index); 276 } else { 277 ha->req_q_cnt = (uint16_t) 278 (REQUEST_ENTRY_CNT - 279 (ha->req_ring_index - cnt)); 280 } 281 if (ha->req_q_cnt != 0) { 282 ha->req_q_cnt--; 283 } 284 } 285 286 /* Found empty request ring slot? */ 287 if (ha->req_q_cnt != 0) { 288 ha->req_q_cnt--; 289 *pktp = ha->request_ring_ptr; 290 291 /* Zero out packet. */ 292 long_ptr = (uint32_t *)ha->request_ring_ptr; 293 for (cnt = 0; cnt < REQUEST_ENTRY_SIZE/4; cnt++) { 294 *long_ptr++ = 0; 295 } 296 297 /* Setup IOCB common data. */ 298 ha->request_ring_ptr->entry_count = 1; 299 ha->request_ring_ptr->sys_define = 300 (uint8_t)ha->req_ring_index; 301 ddi_put32(ha->hba_buf.acc_handle, 302 &ha->request_ring_ptr->handle, 303 (uint32_t)QL_FCA_BRAND); 304 305 rval = QL_SUCCESS; 306 307 break; 308 } 309 310 /* Release request queue lock. */ 311 REQUEST_RING_UNLOCK(ha); 312 313 drv_usecwait(MILLISEC); 314 315 /* Check for pending interrupts. */ 316 /* 317 * XXX protect interrupt routine from calling itself. 318 * Need to revisit this routine. So far we never 319 * hit this case as req slot was available 320 */ 321 if ((!(curthread->t_flag & T_INTR_THREAD)) && 322 (RD16_IO_REG(ha, istatus) & RISC_INT)) { 323 (void) ql_isr((caddr_t)ha); 324 INTR_LOCK(ha); 325 ha->intr_claimed = TRUE; 326 INTR_UNLOCK(ha); 327 } 328 } 329 330 if (rval != QL_SUCCESS) { 331 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 0); 332 EL(ha, "failed, rval = %xh, isp_abort_needed\n", rval); 333 } else { 334 /*EMPTY*/ 335 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 336 } 337 return (rval); 338 } 339 340 /* 341 * ql_isp_cmd 342 * Function is responsible for modifying ISP input pointer. 343 * This action notifies the isp that a new request has been 344 * added to the request ring. 345 * 346 * Releases ring lock. 347 * 348 * Input: 349 * ha: adapter state pointer. 350 * 351 * Context: 352 * Interrupt or Kernel context, no mailbox commands allowed. 353 */ 354 void 355 ql_isp_cmd(ql_adapter_state_t *vha) 356 { 357 ql_adapter_state_t *ha = vha->pha; 358 359 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 360 361 QL_PRINT_5(CE_CONT, "(%d): req packet:\n", ha->instance); 362 QL_DUMP_5((uint8_t *)ha->request_ring_ptr, 8, REQUEST_ENTRY_SIZE); 363 364 /* Sync DMA buffer. */ 365 (void) ddi_dma_sync(ha->hba_buf.dma_handle, 366 (off_t)(ha->req_ring_index * REQUEST_ENTRY_SIZE + 367 REQUEST_Q_BUFFER_OFFSET), (size_t)REQUEST_ENTRY_SIZE, 368 DDI_DMA_SYNC_FORDEV); 369 370 /* Adjust ring index. */ 371 ha->req_ring_index++; 372 if (ha->req_ring_index == REQUEST_ENTRY_CNT) { 373 ha->req_ring_index = 0; 374 ha->request_ring_ptr = ha->request_ring_bp; 375 } else { 376 ha->request_ring_ptr++; 377 } 378 379 /* Set chip new ring index. */ 380 WRT16_IO_REG(ha, req_in, ha->req_ring_index); 381 382 /* Release ring lock. */ 383 REQUEST_RING_UNLOCK(ha); 384 385 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 386 } 387 388 /* 389 * ql_command_iocb 390 * Setup of command IOCB. 391 * 392 * Input: 393 * ha: adapter state pointer. 394 * sp: srb structure pointer. 395 * 396 * arg: request queue packet. 397 * 398 * Context: 399 * Interrupt or Kernel context, no mailbox commands allowed. 400 */ 401 void 402 ql_command_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg) 403 { 404 ddi_dma_cookie_t *cp; 405 uint32_t *ptr32, cnt; 406 uint16_t seg_cnt; 407 fcp_cmd_t *fcp = sp->fcp; 408 ql_tgt_t *tq = sp->lun_queue->target_queue; 409 cmd_entry_t *pkt = arg; 410 411 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 412 413 /* Set LUN number */ 414 pkt->lun_l = LSB(sp->lun_queue->lun_no); 415 pkt->lun_h = MSB(sp->lun_queue->lun_no); 416 417 /* Set target ID */ 418 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) { 419 pkt->target_l = LSB(tq->loop_id); 420 pkt->target_h = MSB(tq->loop_id); 421 } else { 422 pkt->target_h = LSB(tq->loop_id); 423 } 424 425 /* Set tag queue control flags */ 426 if (fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_HEAD_OF_Q) { 427 pkt->control_flags_l = (uint8_t) 428 (pkt->control_flags_l | CF_HTAG); 429 } else if (fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_ORDERED) { 430 pkt->control_flags_l = (uint8_t) 431 (pkt->control_flags_l | CF_OTAG); 432 /* else if (fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_SIMPLE) */ 433 } else { 434 pkt->control_flags_l = (uint8_t) 435 (pkt->control_flags_l | CF_STAG); 436 } 437 438 /* Set ISP command timeout. */ 439 ddi_put16(ha->hba_buf.acc_handle, &pkt->timeout, sp->isp_timeout); 440 441 /* Load SCSI CDB */ 442 ddi_rep_put8(ha->hba_buf.acc_handle, fcp->fcp_cdb, 443 pkt->scsi_cdb, MAX_CMDSZ, DDI_DEV_AUTOINCR); 444 445 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) { 446 pkt->entry_type = IOCB_CMD_TYPE_3; 447 cnt = CMD_TYPE_3_DATA_SEGMENTS; 448 } else { 449 pkt->entry_type = IOCB_CMD_TYPE_2; 450 cnt = CMD_TYPE_2_DATA_SEGMENTS; 451 } 452 453 if (fcp->fcp_data_len == 0) { 454 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 455 ha->xioctl->IOControlRequests++; 456 return; 457 } 458 459 /* 460 * Set transfer direction. Load Data segments. 461 */ 462 if (fcp->fcp_cntl.cntl_write_data) { 463 pkt->control_flags_l = (uint8_t) 464 (pkt->control_flags_l | CF_DATA_OUT); 465 ha->xioctl->IOOutputRequests++; 466 ha->xioctl->IOOutputByteCnt += fcp->fcp_data_len; 467 } else if (fcp->fcp_cntl.cntl_read_data) { 468 pkt->control_flags_l = (uint8_t) 469 (pkt->control_flags_l | CF_DATA_IN); 470 ha->xioctl->IOInputRequests++; 471 ha->xioctl->IOInputByteCnt += fcp->fcp_data_len; 472 } 473 474 /* Set data segment count. */ 475 seg_cnt = (uint16_t)sp->pkt->pkt_data_cookie_cnt; 476 ddi_put16(ha->hba_buf.acc_handle, &pkt->dseg_count, seg_cnt); 477 478 /* Load total byte count. */ 479 ddi_put32(ha->hba_buf.acc_handle, &pkt->byte_count, fcp->fcp_data_len); 480 481 /* Load command data segment. */ 482 ptr32 = (uint32_t *)&pkt->dseg_0_address; 483 cp = sp->pkt->pkt_data_cookie; 484 while (cnt && seg_cnt) { 485 ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_address); 486 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) { 487 ddi_put32(ha->hba_buf.acc_handle, ptr32++, 488 cp->dmac_notused); 489 } 490 ddi_put32(ha->hba_buf.acc_handle, ptr32++, 491 (uint32_t)cp->dmac_size); 492 seg_cnt--; 493 cnt--; 494 cp++; 495 } 496 497 /* 498 * Build continuation packets. 499 */ 500 if (seg_cnt) { 501 ql_continuation_iocb(ha, cp, seg_cnt, 502 (boolean_t)(CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING))); 503 } 504 505 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 506 } 507 508 /* 509 * ql_continuation_iocb 510 * Setup of continuation IOCB. 511 * 512 * Input: 513 * ha: adapter state pointer. 514 * cp: cookie list pointer. 515 * seg_cnt: number of segments. 516 * addr64: 64 bit addresses. 517 * 518 * Context: 519 * Interrupt or Kernel context, no mailbox commands allowed. 520 */ 521 static void 522 ql_continuation_iocb(ql_adapter_state_t *ha, ddi_dma_cookie_t *cp, 523 uint16_t seg_cnt, boolean_t addr64) 524 { 525 cont_entry_t *pkt; 526 uint64_t *ptr64; 527 uint32_t *ptr32, cnt; 528 529 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 530 531 /* 532 * Build continuation packets. 533 */ 534 while (seg_cnt) { 535 /* Sync DMA buffer. */ 536 (void) ddi_dma_sync(ha->hba_buf.dma_handle, 537 (off_t)(ha->req_ring_index * REQUEST_ENTRY_SIZE + 538 REQUEST_Q_BUFFER_OFFSET), REQUEST_ENTRY_SIZE, 539 DDI_DMA_SYNC_FORDEV); 540 541 /* Adjust ring pointer, and deal with wrap. */ 542 ha->req_ring_index++; 543 if (ha->req_ring_index == REQUEST_ENTRY_CNT) { 544 ha->req_ring_index = 0; 545 ha->request_ring_ptr = ha->request_ring_bp; 546 } else { 547 ha->request_ring_ptr++; 548 } 549 pkt = (cont_entry_t *)ha->request_ring_ptr; 550 551 /* Zero out packet. */ 552 ptr64 = (uint64_t *)pkt; 553 *ptr64++ = 0; *ptr64++ = 0; 554 *ptr64++ = 0; *ptr64++ = 0; 555 *ptr64++ = 0; *ptr64++ = 0; 556 *ptr64++ = 0; *ptr64 = 0; 557 558 /* 559 * Build continuation packet. 560 */ 561 pkt->entry_count = 1; 562 pkt->sys_define = (uint8_t)ha->req_ring_index; 563 if (addr64) { 564 pkt->entry_type = CONTINUATION_TYPE_1; 565 cnt = CONT_TYPE_1_DATA_SEGMENTS; 566 ptr32 = (uint32_t *) 567 &((cont_type_1_entry_t *)pkt)->dseg_0_address; 568 while (cnt && seg_cnt) { 569 ddi_put32(ha->hba_buf.acc_handle, ptr32++, 570 cp->dmac_address); 571 ddi_put32(ha->hba_buf.acc_handle, ptr32++, 572 cp->dmac_notused); 573 ddi_put32(ha->hba_buf.acc_handle, ptr32++, 574 (uint32_t)cp->dmac_size); 575 seg_cnt--; 576 cnt--; 577 cp++; 578 } 579 } else { 580 pkt->entry_type = CONTINUATION_TYPE_0; 581 cnt = CONT_TYPE_0_DATA_SEGMENTS; 582 ptr32 = (uint32_t *)&pkt->dseg_0_address; 583 while (cnt && seg_cnt) { 584 ddi_put32(ha->hba_buf.acc_handle, ptr32++, 585 cp->dmac_address); 586 ddi_put32(ha->hba_buf.acc_handle, ptr32++, 587 (uint32_t)cp->dmac_size); 588 seg_cnt--; 589 cnt--; 590 cp++; 591 } 592 } 593 594 QL_PRINT_5(CE_CONT, "(%d): packet:\n", ha->instance); 595 QL_DUMP_5((uint8_t *)pkt, 8, REQUEST_ENTRY_SIZE); 596 } 597 598 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 599 } 600 601 /* 602 * ql_command_24xx_iocb 603 * Setup of ISP24xx command IOCB. 604 * 605 * Input: 606 * ha: adapter state pointer. 607 * sp: srb structure pointer. 608 * arg: request queue packet. 609 * 610 * Context: 611 * Interrupt or Kernel context, no mailbox commands allowed. 612 */ 613 void 614 ql_command_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg) 615 { 616 ddi_dma_cookie_t *cp; 617 uint32_t *ptr32, cnt; 618 uint16_t seg_cnt; 619 fcp_cmd_t *fcp = sp->fcp; 620 ql_tgt_t *tq = sp->lun_queue->target_queue; 621 cmd_24xx_entry_t *pkt = arg; 622 ql_adapter_state_t *pha = ha->pha; 623 624 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 625 626 pkt->entry_type = IOCB_CMD_TYPE_7; 627 628 /* Set LUN number */ 629 pkt->fcp_lun[2] = LSB(sp->lun_queue->lun_no); 630 pkt->fcp_lun[3] = MSB(sp->lun_queue->lun_no); 631 632 /* Set N_port handle */ 633 ddi_put16(pha->hba_buf.acc_handle, &pkt->n_port_hdl, tq->loop_id); 634 635 /* Set target ID */ 636 pkt->target_id[0] = tq->d_id.b.al_pa; 637 pkt->target_id[1] = tq->d_id.b.area; 638 pkt->target_id[2] = tq->d_id.b.domain; 639 640 pkt->vp_index = ha->vp_index; 641 642 /* Set ISP command timeout. */ 643 if (sp->isp_timeout < 0x1999) { 644 ddi_put16(pha->hba_buf.acc_handle, &pkt->timeout, 645 sp->isp_timeout); 646 } 647 648 /* Load SCSI CDB */ 649 ddi_rep_put8(pha->hba_buf.acc_handle, fcp->fcp_cdb, pkt->scsi_cdb, 650 MAX_CMDSZ, DDI_DEV_AUTOINCR); 651 for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) { 652 ql_chg_endian((uint8_t *)&pkt->scsi_cdb + cnt, 4); 653 } 654 655 /* 656 * Set tag queue control flags 657 * Note: 658 * Cannot copy fcp->fcp_cntl.cntl_qtype directly, 659 * problem with x86 in 32bit kernel mode 660 */ 661 switch (fcp->fcp_cntl.cntl_qtype) { 662 case FCP_QTYPE_SIMPLE: 663 pkt->task = TA_STAG; 664 break; 665 case FCP_QTYPE_HEAD_OF_Q: 666 pkt->task = TA_HTAG; 667 break; 668 case FCP_QTYPE_ORDERED: 669 pkt->task = TA_OTAG; 670 break; 671 case FCP_QTYPE_ACA_Q_TAG: 672 pkt->task = TA_ACA; 673 break; 674 case FCP_QTYPE_UNTAGGED: 675 pkt->task = TA_UNTAGGED; 676 break; 677 default: 678 break; 679 } 680 681 if (fcp->fcp_data_len == 0) { 682 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 683 pha->xioctl->IOControlRequests++; 684 return; 685 } 686 687 /* Set transfer direction. */ 688 if (fcp->fcp_cntl.cntl_write_data) { 689 pkt->control_flags = CF_WR; 690 pha->xioctl->IOOutputRequests++; 691 pha->xioctl->IOOutputByteCnt += fcp->fcp_data_len; 692 } else if (fcp->fcp_cntl.cntl_read_data) { 693 pkt->control_flags = CF_RD; 694 pha->xioctl->IOInputRequests++; 695 pha->xioctl->IOInputByteCnt += fcp->fcp_data_len; 696 } 697 698 /* Set data segment count. */ 699 seg_cnt = (uint16_t)sp->pkt->pkt_data_cookie_cnt; 700 ddi_put16(pha->hba_buf.acc_handle, &pkt->dseg_count, seg_cnt); 701 702 /* Load total byte count. */ 703 ddi_put32(pha->hba_buf.acc_handle, &pkt->total_byte_count, 704 fcp->fcp_data_len); 705 706 /* Load command data segment. */ 707 ptr32 = (uint32_t *)&pkt->dseg_0_address; 708 cp = sp->pkt->pkt_data_cookie; 709 ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_address); 710 ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_notused); 711 ddi_put32(pha->hba_buf.acc_handle, ptr32, (uint32_t)cp->dmac_size); 712 seg_cnt--; 713 cp++; 714 715 /* 716 * Build continuation packets. 717 */ 718 if (seg_cnt) { 719 ql_continuation_iocb(pha, cp, seg_cnt, B_TRUE); 720 } 721 722 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 723 } 724 725 /* 726 * ql_marker 727 * Function issues marker IOCB. 728 * 729 * Input: 730 * ha: adapter state pointer. 731 * loop_id: device loop ID 732 * lun: device LUN 733 * type: marker modifier 734 * 735 * Returns: 736 * ql local function return status code. 737 * 738 * Context: 739 * Interrupt or Kernel context, no mailbox commands allowed. 740 */ 741 int 742 ql_marker(ql_adapter_state_t *ha, uint16_t loop_id, uint16_t lun, 743 uint8_t type) 744 { 745 mrk_entry_t *pkt; 746 int rval; 747 748 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 749 750 rval = ql_req_pkt(ha, (request_t **)&pkt); 751 if (rval == QL_SUCCESS) { 752 pkt->entry_type = MARKER_TYPE; 753 754 if (CFG_IST(ha, CFG_CTRL_2425)) { 755 marker_24xx_entry_t *pkt24 = 756 (marker_24xx_entry_t *)pkt; 757 758 pkt24->modifier = type; 759 760 /* Set LUN number */ 761 pkt24->fcp_lun[2] = LSB(lun); 762 pkt24->fcp_lun[3] = MSB(lun); 763 764 pkt24->vp_index = ha->vp_index; 765 766 /* Set N_port handle */ 767 ddi_put16(ha->pha->hba_buf.acc_handle, 768 &pkt24->n_port_hdl, loop_id); 769 770 } else { 771 pkt->modifier = type; 772 773 pkt->lun_l = LSB(lun); 774 pkt->lun_h = MSB(lun); 775 776 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) { 777 pkt->target_l = LSB(loop_id); 778 pkt->target_h = MSB(loop_id); 779 } else { 780 pkt->target_h = LSB(loop_id); 781 } 782 } 783 784 /* Issue command to ISP */ 785 ql_isp_cmd(ha); 786 } 787 788 if (rval != QL_SUCCESS) { 789 EL(ha, "failed, rval = %xh\n", rval); 790 } else { 791 /*EMPTY*/ 792 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 793 } 794 return (rval); 795 } 796 797 /* 798 * ql_ms_iocb 799 * Setup of name/management server IOCB. 800 * 801 * Input: 802 * ha = adapter state pointer. 803 * sp = srb structure pointer. 804 * arg = request queue packet. 805 * 806 * Context: 807 * Interrupt or Kernel context, no mailbox commands allowed. 808 */ 809 void 810 ql_ms_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg) 811 { 812 ddi_dma_cookie_t *cp; 813 uint32_t *ptr32; 814 uint16_t seg_cnt; 815 ql_tgt_t *tq = sp->lun_queue->target_queue; 816 ms_entry_t *pkt = arg; 817 818 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 819 #if 0 820 QL_DUMP_3(sp->pkt->pkt_cmd, 8, sp->pkt->pkt_cmdlen); 821 #endif 822 /* 823 * Build command packet. 824 */ 825 pkt->entry_type = MS_TYPE; 826 827 /* Set loop ID */ 828 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) { 829 pkt->loop_id_l = LSB(tq->loop_id); 830 pkt->loop_id_h = MSB(tq->loop_id); 831 } else { 832 pkt->loop_id_h = LSB(tq->loop_id); 833 } 834 835 /* Set ISP command timeout. */ 836 ddi_put16(ha->hba_buf.acc_handle, &pkt->timeout, sp->isp_timeout); 837 838 /* Set cmd data segment count. */ 839 pkt->cmd_dseg_count_l = 1; 840 841 /* Set total data segment count */ 842 seg_cnt = (uint16_t)(sp->pkt->pkt_resp_cookie_cnt + 1); 843 ddi_put16(ha->hba_buf.acc_handle, &pkt->total_dseg_count, seg_cnt); 844 845 /* Load ct cmd byte count. */ 846 ddi_put32(ha->hba_buf.acc_handle, &pkt->cmd_byte_count, 847 (uint32_t)sp->pkt->pkt_cmdlen); 848 849 /* Load ct rsp byte count. */ 850 ddi_put32(ha->hba_buf.acc_handle, &pkt->resp_byte_count, 851 (uint32_t)sp->pkt->pkt_rsplen); 852 853 /* Load MS command data segments. */ 854 ptr32 = (uint32_t *)&pkt->dseg_0_address; 855 cp = sp->pkt->pkt_cmd_cookie; 856 ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_address); 857 ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_notused); 858 ddi_put32(ha->hba_buf.acc_handle, ptr32++, (uint32_t)cp->dmac_size); 859 seg_cnt--; 860 861 /* Load MS response entry data segments. */ 862 cp = sp->pkt->pkt_resp_cookie; 863 ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_address); 864 ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_notused); 865 ddi_put32(ha->hba_buf.acc_handle, ptr32, (uint32_t)cp->dmac_size); 866 seg_cnt--; 867 cp++; 868 869 /* 870 * Build continuation packets. 871 */ 872 if (seg_cnt) { 873 ql_continuation_iocb(ha, cp, seg_cnt, B_TRUE); 874 } 875 876 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 877 } 878 879 /* 880 * ql_ms_24xx_iocb 881 * Setup of name/management server IOCB. 882 * 883 * Input: 884 * ha: adapter state pointer. 885 * sp: srb structure pointer. 886 * arg: request queue packet. 887 * 888 * Context: 889 * Interrupt or Kernel context, no mailbox commands allowed. 890 */ 891 void 892 ql_ms_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg) 893 { 894 ddi_dma_cookie_t *cp; 895 uint32_t *ptr32; 896 uint16_t seg_cnt; 897 ql_tgt_t *tq = sp->lun_queue->target_queue; 898 ct_passthru_entry_t *pkt = arg; 899 ql_adapter_state_t *pha = ha->pha; 900 901 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 902 #if 0 903 QL_DUMP_3(sp->pkt->pkt_cmd, 8, sp->pkt->pkt_cmdlen); 904 #endif 905 /* 906 * Build command packet. 907 */ 908 pkt->entry_type = CT_PASSTHRU_TYPE; 909 910 /* Set loop ID */ 911 ddi_put16(pha->hba_buf.acc_handle, &pkt->n_port_hdl, tq->loop_id); 912 913 pkt->vp_index = ha->vp_index; 914 915 /* Set ISP command timeout. */ 916 if (sp->isp_timeout < 0x1999) { 917 ddi_put16(pha->hba_buf.acc_handle, &pkt->timeout, 918 sp->isp_timeout); 919 } 920 921 /* Set cmd/response data segment counts. */ 922 ddi_put16(pha->hba_buf.acc_handle, &pkt->cmd_dseg_count, 1); 923 seg_cnt = (uint16_t)sp->pkt->pkt_resp_cookie_cnt; 924 ddi_put16(pha->hba_buf.acc_handle, &pkt->resp_dseg_count, seg_cnt); 925 926 /* Load ct cmd byte count. */ 927 ddi_put32(pha->hba_buf.acc_handle, &pkt->cmd_byte_count, 928 (uint32_t)sp->pkt->pkt_cmdlen); 929 930 /* Load ct rsp byte count. */ 931 ddi_put32(pha->hba_buf.acc_handle, &pkt->resp_byte_count, 932 (uint32_t)sp->pkt->pkt_rsplen); 933 934 /* Load MS command entry data segments. */ 935 ptr32 = (uint32_t *)&pkt->dseg_0_address; 936 cp = sp->pkt->pkt_cmd_cookie; 937 ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_address); 938 ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_notused); 939 ddi_put32(pha->hba_buf.acc_handle, ptr32++, (uint32_t)cp->dmac_size); 940 941 /* Load MS response entry data segments. */ 942 cp = sp->pkt->pkt_resp_cookie; 943 ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_address); 944 ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_notused); 945 ddi_put32(pha->hba_buf.acc_handle, ptr32, (uint32_t)cp->dmac_size); 946 seg_cnt--; 947 cp++; 948 949 /* 950 * Build continuation packets. 951 */ 952 if (seg_cnt) { 953 ql_continuation_iocb(pha, cp, seg_cnt, B_TRUE); 954 } 955 956 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 957 } 958 959 /* 960 * ql_ip_iocb 961 * Setup of IP IOCB. 962 * 963 * Input: 964 * ha: adapter state pointer. 965 * sp: srb structure pointer. 966 * arg: request queue packet. 967 * 968 * Context: 969 * Interrupt or Kernel context, no mailbox commands allowed. 970 */ 971 void 972 ql_ip_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg) 973 { 974 ddi_dma_cookie_t *cp; 975 uint32_t *ptr32, cnt; 976 uint16_t seg_cnt; 977 ql_tgt_t *tq = sp->lun_queue->target_queue; 978 ip_entry_t *pkt = arg; 979 980 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 981 982 /* Set loop ID */ 983 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) { 984 pkt->loop_id_l = LSB(tq->loop_id); 985 pkt->loop_id_h = MSB(tq->loop_id); 986 } else { 987 pkt->loop_id_h = LSB(tq->loop_id); 988 } 989 990 /* Set control flags */ 991 pkt->control_flags_l = BIT_6; 992 if (sp->pkt->pkt_tran_flags & FC_TRAN_HI_PRIORITY) { 993 pkt->control_flags_h = BIT_7; 994 } 995 996 /* Set ISP command timeout. */ 997 ddi_put16(ha->hba_buf.acc_handle, &pkt->timeout, sp->isp_timeout); 998 999 /* Set data segment count. */ 1000 seg_cnt = (uint16_t)sp->pkt->pkt_cmd_cookie_cnt; 1001 /* Load total byte count. */ 1002 ddi_put32(ha->hba_buf.acc_handle, &pkt->byte_count, 1003 (uint32_t)sp->pkt->pkt_cmdlen); 1004 ddi_put16(ha->hba_buf.acc_handle, &pkt->dseg_count, seg_cnt); 1005 1006 /* 1007 * Build command packet. 1008 */ 1009 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) { 1010 pkt->entry_type = IP_A64_TYPE; 1011 cnt = IP_A64_DATA_SEGMENTS; 1012 } else { 1013 pkt->entry_type = IP_TYPE; 1014 cnt = IP_DATA_SEGMENTS; 1015 } 1016 1017 /* Load command entry data segments. */ 1018 ptr32 = (uint32_t *)&pkt->dseg_0_address; 1019 cp = sp->pkt->pkt_cmd_cookie; 1020 while (cnt && seg_cnt) { 1021 ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_address); 1022 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) { 1023 ddi_put32(ha->hba_buf.acc_handle, ptr32++, 1024 cp->dmac_notused); 1025 } 1026 ddi_put32(ha->hba_buf.acc_handle, ptr32++, 1027 (uint32_t)cp->dmac_size); 1028 seg_cnt--; 1029 cnt--; 1030 cp++; 1031 } 1032 1033 /* 1034 * Build continuation packets. 1035 */ 1036 if (seg_cnt) { 1037 ql_continuation_iocb(ha, cp, seg_cnt, 1038 (boolean_t)(CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING))); 1039 } 1040 1041 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 1042 } 1043 1044 /* 1045 * ql_ip_24xx_iocb 1046 * Setup of IP IOCB for ISP24xx. 1047 * 1048 * Input: 1049 * ha: adapter state pointer. 1050 * sp: srb structure pointer. 1051 * arg: request queue packet. 1052 * 1053 * Context: 1054 * Interrupt or Kernel context, no mailbox commands allowed. 1055 */ 1056 void 1057 ql_ip_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg) 1058 { 1059 ddi_dma_cookie_t *cp; 1060 uint32_t *ptr32; 1061 uint16_t seg_cnt; 1062 ql_tgt_t *tq = sp->lun_queue->target_queue; 1063 ip_cmd_entry_t *pkt = arg; 1064 1065 pkt->entry_type = IP_CMD_TYPE; 1066 1067 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 1068 1069 /* Set N_port handle */ 1070 ddi_put16(ha->hba_buf.acc_handle, &pkt->hdl_status, tq->loop_id); 1071 1072 /* Set ISP command timeout. */ 1073 if (sp->isp_timeout < 0x1999) { 1074 ddi_put16(ha->hba_buf.acc_handle, &pkt->timeout_hdl, 1075 sp->isp_timeout); 1076 } 1077 1078 /* Set data segment count. */ 1079 seg_cnt = (uint16_t)sp->pkt->pkt_cmd_cookie_cnt; 1080 /* Load total byte count. */ 1081 ddi_put32(ha->hba_buf.acc_handle, &pkt->byte_count, 1082 (uint32_t)sp->pkt->pkt_cmdlen); 1083 ddi_put16(ha->hba_buf.acc_handle, &pkt->dseg_count, seg_cnt); 1084 1085 /* Set control flags */ 1086 ddi_put16(ha->hba_buf.acc_handle, &pkt->control_flags, 1087 (uint16_t)(BIT_0)); 1088 1089 /* Set frame header control flags */ 1090 ddi_put16(ha->hba_buf.acc_handle, &pkt->frame_hdr_cntrl_flgs, 1091 (uint16_t)(IPCF_LAST_SEQ | IPCF_FIRST_SEQ)); 1092 1093 /* Load command data segment. */ 1094 ptr32 = (uint32_t *)&pkt->dseg_0_address; 1095 cp = sp->pkt->pkt_cmd_cookie; 1096 ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_address); 1097 ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_notused); 1098 ddi_put32(ha->hba_buf.acc_handle, ptr32, (uint32_t)cp->dmac_size); 1099 seg_cnt--; 1100 cp++; 1101 1102 /* 1103 * Build continuation packets. 1104 */ 1105 if (seg_cnt) { 1106 ql_continuation_iocb(ha, cp, seg_cnt, B_TRUE); 1107 } 1108 1109 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 1110 } 1111 1112 /* 1113 * ql_isp_rcvbuf 1114 * Locates free buffers and places it on the receive buffer queue. 1115 * 1116 * Input: 1117 * ha = adapter state pointer. 1118 * 1119 * Context: 1120 * Interrupt or Kernel context, no mailbox commands allowed. 1121 */ 1122 void 1123 ql_isp_rcvbuf(ql_adapter_state_t *ha) 1124 { 1125 rcvbuf_t *container; 1126 uint16_t rcv_q_cnt; 1127 uint16_t index = 0; 1128 uint16_t index1 = 1; 1129 int debounce_count = QL_MAX_DEBOUNCE; 1130 ql_srb_t *sp; 1131 fc_unsol_buf_t *ubp; 1132 int ring_updated = FALSE; 1133 1134 if (CFG_IST(ha, CFG_CTRL_2425)) { 1135 ql_isp24xx_rcvbuf(ha); 1136 return; 1137 } 1138 1139 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 1140 1141 /* Acquire adapter state lock. */ 1142 ADAPTER_STATE_LOCK(ha); 1143 1144 /* Calculate number of free receive buffer entries. */ 1145 index = RD16_IO_REG(ha, mailbox[8]); 1146 do { 1147 index1 = RD16_IO_REG(ha, mailbox[8]); 1148 if (index1 == index) { 1149 break; 1150 } else { 1151 index = index1; 1152 } 1153 } while (debounce_count --); 1154 1155 if (debounce_count < 0) { 1156 /* This should never happen */ 1157 EL(ha, "max mb8 debounce retries exceeded\n"); 1158 } 1159 1160 rcv_q_cnt = (uint16_t)(ha->rcvbuf_ring_index < index ? 1161 index - ha->rcvbuf_ring_index : RCVBUF_CONTAINER_CNT - 1162 (ha->rcvbuf_ring_index - index)); 1163 1164 if (rcv_q_cnt == RCVBUF_CONTAINER_CNT) { 1165 rcv_q_cnt--; 1166 } 1167 1168 /* Load all free buffers in ISP receive buffer ring. */ 1169 index = 0; 1170 while (rcv_q_cnt > (uint16_t)0 && index < QL_UB_LIMIT) { 1171 /* Locate a buffer to give. */ 1172 QL_UB_LOCK(ha); 1173 while (index < QL_UB_LIMIT) { 1174 ubp = ha->ub_array[index]; 1175 if (ubp != NULL) { 1176 sp = ubp->ub_fca_private; 1177 if ((sp->ub_type == FC_TYPE_IS8802_SNAP) && 1178 (ha->flags & IP_INITIALIZED) && 1179 (sp->flags & SRB_UB_IN_FCA) && 1180 (!(sp->flags & (SRB_UB_IN_ISP | 1181 SRB_UB_FREE_REQUESTED | SRB_UB_CALLBACK | 1182 SRB_UB_ACQUIRED)))) { 1183 sp->flags |= SRB_UB_IN_ISP; 1184 break; 1185 } 1186 } 1187 index++; 1188 } 1189 1190 if (index < QL_UB_LIMIT) { 1191 rcv_q_cnt--; 1192 index++; 1193 container = ha->rcvbuf_ring_ptr; 1194 1195 /* 1196 * Build container. 1197 */ 1198 ddi_put32(ha->hba_buf.acc_handle, 1199 (uint32_t *)(void *)&container->bufp[0], 1200 sp->ub_buffer.cookie.dmac_address); 1201 1202 ddi_put32(ha->hba_buf.acc_handle, 1203 (uint32_t *)(void *)&container->bufp[1], 1204 sp->ub_buffer.cookie.dmac_notused); 1205 1206 ddi_put16(ha->hba_buf.acc_handle, &container->handle, 1207 LSW(sp->handle)); 1208 1209 ha->ub_outcnt++; 1210 1211 /* Adjust ring index. */ 1212 ha->rcvbuf_ring_index++; 1213 if (ha->rcvbuf_ring_index == RCVBUF_CONTAINER_CNT) { 1214 ha->rcvbuf_ring_index = 0; 1215 ha->rcvbuf_ring_ptr = ha->rcvbuf_ring_bp; 1216 } else { 1217 ha->rcvbuf_ring_ptr++; 1218 } 1219 1220 ring_updated = TRUE; 1221 } 1222 QL_UB_UNLOCK(ha); 1223 } 1224 1225 if (ring_updated) { 1226 /* Sync queue. */ 1227 (void) ddi_dma_sync(ha->hba_buf.dma_handle, 1228 (off_t)RCVBUF_Q_BUFFER_OFFSET, (size_t)RCVBUF_QUEUE_SIZE, 1229 DDI_DMA_SYNC_FORDEV); 1230 1231 /* Set chip new ring index. */ 1232 WRT16_IO_REG(ha, mailbox[8], ha->rcvbuf_ring_index); 1233 } 1234 1235 /* Release adapter state lock. */ 1236 ADAPTER_STATE_UNLOCK(ha); 1237 1238 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 1239 } 1240 1241 /* 1242 * ql_isp24xx_rcvbuf 1243 * Locates free buffers and send it to adapter. 1244 * 1245 * Input: 1246 * ha = adapter state pointer. 1247 * 1248 * Context: 1249 * Interrupt or Kernel context, no mailbox commands allowed. 1250 */ 1251 static void 1252 ql_isp24xx_rcvbuf(ql_adapter_state_t *ha) 1253 { 1254 rcvbuf_t *container; 1255 uint16_t index; 1256 ql_srb_t *sp; 1257 fc_unsol_buf_t *ubp; 1258 int rval; 1259 ip_buf_pool_entry_t *pkt = NULL; 1260 1261 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 1262 1263 for (;;) { 1264 /* Locate a buffer to give. */ 1265 QL_UB_LOCK(ha); 1266 for (index = 0; index < QL_UB_LIMIT; index++) { 1267 ubp = ha->ub_array[index]; 1268 if (ubp != NULL) { 1269 sp = ubp->ub_fca_private; 1270 if ((sp->ub_type == FC_TYPE_IS8802_SNAP) && 1271 (ha->flags & IP_INITIALIZED) && 1272 (sp->flags & SRB_UB_IN_FCA) && 1273 (!(sp->flags & (SRB_UB_IN_ISP | 1274 SRB_UB_FREE_REQUESTED | SRB_UB_CALLBACK | 1275 SRB_UB_ACQUIRED)))) { 1276 ha->ub_outcnt++; 1277 sp->flags |= SRB_UB_IN_ISP; 1278 break; 1279 } 1280 } 1281 } 1282 QL_UB_UNLOCK(ha); 1283 if (index == QL_UB_LIMIT) { 1284 break; 1285 } 1286 1287 /* Get IOCB packet for buffers. */ 1288 if (pkt == NULL) { 1289 rval = ql_req_pkt(ha, (request_t **)&pkt); 1290 if (rval != QL_SUCCESS) { 1291 EL(ha, "failed, ql_req_pkt=%x\n", rval); 1292 QL_UB_LOCK(ha); 1293 ha->ub_outcnt--; 1294 sp->flags &= ~SRB_UB_IN_ISP; 1295 QL_UB_UNLOCK(ha); 1296 break; 1297 } 1298 pkt->entry_type = IP_BUF_POOL_TYPE; 1299 container = &pkt->buffers[0]; 1300 } 1301 1302 /* 1303 * Build container. 1304 */ 1305 ddi_put32(ha->hba_buf.acc_handle, &container->bufp[0], 1306 sp->ub_buffer.cookie.dmac_address); 1307 ddi_put32(ha->hba_buf.acc_handle, &container->bufp[1], 1308 sp->ub_buffer.cookie.dmac_notused); 1309 ddi_put16(ha->hba_buf.acc_handle, &container->handle, 1310 LSW(sp->handle)); 1311 1312 pkt->buffer_count++; 1313 container++; 1314 1315 if (pkt->buffer_count == IP_POOL_BUFFERS) { 1316 ql_isp_cmd(ha); 1317 pkt = NULL; 1318 } 1319 } 1320 1321 if (pkt != NULL) { 1322 ql_isp_cmd(ha); 1323 } 1324 1325 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 1326 } 1327 1328 /* 1329 * ql_modify_lun 1330 * Function enables, modifies or disables ISP to respond as a target. 1331 * 1332 * Input: 1333 * ha = adapter state pointer. 1334 * count = number buffers for incoming commands. 1335 * 1336 * Returns: 1337 * ql local function return status code. 1338 * 1339 * Context: 1340 * Interrupt or Kernel context, no mailbox commands allowed. 1341 */ 1342 int 1343 ql_modify_lun(ql_adapter_state_t *ha) 1344 { 1345 enable_lun_entry_t *pkt; 1346 int rval = QL_SUCCESS; 1347 uint32_t index, ubcount; 1348 fc_unsol_buf_t *ubp; 1349 1350 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 1351 1352 /* 1353 * Count the number of SCSI unsolicited buffers, that have been 1354 * allocated. 1355 */ 1356 ADAPTER_STATE_LOCK(ha); 1357 1358 ubp = NULL; 1359 ubcount = 0; 1360 QL_UB_LOCK(ha); 1361 for (index = 0; index < QL_UB_LIMIT; index++) { 1362 ubp = ha->ub_array[index]; 1363 if (ubp != NULL) { 1364 ql_srb_t *sp = ubp->ub_fca_private; 1365 1366 if (sp->ub_type == FC_TYPE_SCSI_FCP && 1367 !(sp->flags & SRB_UB_FREE_REQUESTED)) { 1368 ubcount++; 1369 } 1370 } 1371 } 1372 QL_UB_UNLOCK(ha); 1373 1374 if (!(ha->flags & TARGET_MODE_INITIALIZED) && (ubcount == 0)) { 1375 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 1376 return (rval); 1377 } 1378 1379 rval = ql_req_pkt(ha, (request_t **)&pkt); 1380 1381 if (ha->flags & TARGET_MODE_INITIALIZED) { 1382 if (ubcount == 0) { 1383 /* Disable the target mode Luns */ 1384 ASSERT(ha->ub_command_count != 0); 1385 ASSERT(ha->ub_notify_count != 0); 1386 1387 ha->flags &= ~(TARGET_MODE_INITIALIZED); 1388 1389 ha->ub_command_count = 0; 1390 ha->ub_notify_count = 0; 1391 1392 pkt->entry_type = ENABLE_LUN_TYPE; 1393 pkt->command_count = 0; 1394 pkt->immediate_notify_count = 0; 1395 1396 } else { 1397 /* Modify the command count for target mode */ 1398 modify_lun_entry_t *ml_pkt; 1399 uint8_t cmd_count, notify_count; 1400 1401 ASSERT(ha->ub_command_count != 0); 1402 ASSERT(ha->ub_notify_count != 0); 1403 1404 /* 1405 * calculate the new value of command count 1406 * and notify count and then issue the command 1407 * to change the values in the firmware. 1408 */ 1409 ml_pkt = (modify_lun_entry_t *)pkt; 1410 ml_pkt->entry_type = MODIFY_LUN_TYPE; 1411 if (ubcount < 255) { 1412 /* Save one for immediate notify. */ 1413 if (ubcount > 1) { 1414 cmd_count = (uint8_t)(ubcount - 1); 1415 } else { 1416 cmd_count = (uint8_t)ubcount; 1417 } 1418 notify_count = 1; 1419 } else { 1420 cmd_count = 255; 1421 if (ubcount - 255 < 255) { 1422 notify_count = (uint8_t) 1423 (ubcount - 255); 1424 } else { 1425 notify_count = 255; 1426 } 1427 } 1428 1429 if (cmd_count > ha->ub_command_count) { 1430 /* cmd_count value increased */ 1431 ml_pkt->command_count = (uint8_t) 1432 (cmd_count - ha->ub_command_count); 1433 ml_pkt->operators = (uint8_t) 1434 (ml_pkt->operators | BIT_0); 1435 1436 if (notify_count > ha->ub_notify_count) { 1437 ml_pkt->immediate_notify_count = 1438 (uint8_t)(notify_count - 1439 ha->ub_notify_count); 1440 ml_pkt->operators = (uint8_t) 1441 (ml_pkt->operators | BIT_2); 1442 } else if (notify_count < 1443 ha->ub_notify_count) { 1444 ml_pkt->immediate_notify_count = 1445 (uint8_t)(ha->ub_notify_count - 1446 notify_count); 1447 ml_pkt->operators = (uint8_t) 1448 (ml_pkt->operators | BIT_3); 1449 } 1450 } else { 1451 /* cmd_count value reduced */ 1452 ml_pkt->command_count = (uint8_t) 1453 (ha->ub_command_count - cmd_count); 1454 if (ml_pkt->command_count != 0) { 1455 ml_pkt->operators = (uint8_t) 1456 (ml_pkt->operators | BIT_1); 1457 } 1458 if (notify_count > ha->ub_notify_count) { 1459 ml_pkt->immediate_notify_count = 1460 (uint8_t)(notify_count - 1461 ha->ub_notify_count); 1462 ml_pkt->operators = (uint8_t) 1463 (ml_pkt->operators | BIT_2); 1464 } else if (notify_count < 1465 ha->ub_notify_count) { 1466 ml_pkt->immediate_notify_count = 1467 (uint8_t)(ha->ub_notify_count - 1468 notify_count); 1469 ml_pkt->operators = (uint8_t) 1470 (ml_pkt->operators | BIT_3); 1471 } 1472 } 1473 1474 /* Update the driver's command/notify count values */ 1475 ha->ub_command_count = cmd_count; 1476 ha->ub_notify_count = notify_count; 1477 } 1478 } else { 1479 ASSERT(ubcount != 0); 1480 1481 /* Enable the Luns for the target mode */ 1482 pkt->entry_type = ENABLE_LUN_TYPE; 1483 1484 if (ubcount < 255) { 1485 /* Save one for immediate notify. */ 1486 if (ubcount > 1) { 1487 ha->ub_command_count = (uint8_t)(ubcount - 1); 1488 } else { 1489 ha->ub_command_count = (uint8_t)ubcount; 1490 } 1491 ha->ub_notify_count = 1; 1492 } else { 1493 ha->ub_command_count = 255; 1494 if (ubcount - 255 < 255) { 1495 ha->ub_notify_count = (uint8_t)(ubcount - 255); 1496 } else { 1497 ha->ub_notify_count = 255; 1498 } 1499 } 1500 ha->flags |= TARGET_MODE_INITIALIZED; 1501 1502 pkt->command_count = ha->ub_command_count; 1503 pkt->immediate_notify_count = ha->ub_notify_count; 1504 } 1505 ADAPTER_STATE_UNLOCK(ha); 1506 1507 /* Issue command to ISP */ 1508 ql_isp_cmd(ha); 1509 1510 if (rval != QL_SUCCESS) { 1511 EL(ha, "failed=%xh\n", rval); 1512 } else { 1513 /*EMPTY*/ 1514 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 1515 } 1516 return (rval); 1517 } 1518 1519 /* 1520 * ql_notify_acknowledge_iocb 1521 * Setup of notify acknowledge IOCB for pending 1522 * immediate notify entry. 1523 * 1524 * Input: 1525 * ha: adapter state pointer. 1526 * cmd: target command context pointer. 1527 * pkt: request queue packet. 1528 * 1529 * Context: 1530 * Interrupt or Kernel context, no mailbox commands allowed. 1531 */ 1532 void 1533 ql_notify_acknowledge_iocb(ql_adapter_state_t *ha, tgt_cmd_t *cmd, 1534 notify_acknowledge_entry_t *pkt) 1535 { 1536 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 1537 1538 pkt->entry_type = NOTIFY_ACKNOWLEDGE_TYPE; 1539 pkt->initiator_id_l = cmd->initiator_id_l; 1540 pkt->initiator_id_h = cmd->initiator_id_h; 1541 1542 /* Handle LIP reset event. */ 1543 if (cmd->status == 0xe) { 1544 pkt->flags_l = BIT_5; 1545 } 1546 1547 pkt->flags_h = BIT_0; 1548 ddi_put16(ha->hba_buf.acc_handle, &pkt->status, cmd->status); 1549 pkt->task_flags_l = cmd->task_flags_l; 1550 pkt->task_flags_h = cmd->task_flags_h; 1551 pkt->sequence_id = cmd->rx_id; 1552 1553 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 1554 } 1555 1556 /* 1557 * ql_continue_target_io_iocb 1558 * Setup of continue target I/O IOCB for pending 1559 * accept target I/O entry. 1560 * 1561 * Input: 1562 * ha = adapter state pointer. 1563 * sp = srb structure pointer. 1564 * arg = request queue packet. 1565 * 1566 * Context: 1567 * Interrupt or Kernel context, no mailbox commands allowed. 1568 */ 1569 void 1570 ql_continue_target_io_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg) 1571 { 1572 ddi_dma_cookie_t *cp; 1573 port_id_t d_id; 1574 ql_tgt_t *tq; 1575 ctio_entry_t *pkt = arg; 1576 1577 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 1578 1579 d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id; 1580 tq = ql_d_id_to_queue(ha, d_id); 1581 1582 if (tq == NULL) { 1583 EL(ha, "Unknown Initiator d_id %xh", d_id.b24); 1584 return; 1585 } 1586 1587 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) { 1588 pkt->initiator_id_l = LSB(tq->loop_id); 1589 pkt->initiator_id_h = MSB(tq->loop_id); 1590 } else { 1591 pkt->initiator_id_h = LSB(tq->loop_id); 1592 } 1593 pkt->rx_id = sp->pkt->pkt_cmd_fhdr.rx_id; 1594 1595 /* Set ISP command timeout. */ 1596 if (sp->isp_timeout < 0x1999) { 1597 ddi_put16(ha->hba_buf.acc_handle, &pkt->timeout, 1598 sp->isp_timeout); 1599 } 1600 1601 if (sp->flags & SRB_FCP_DATA_PKT) { 1602 1603 if (sp->pkt->pkt_tran_type == FC_PKT_OUTBOUND) { 1604 pkt->flags_l = BIT_6; 1605 } else if (sp->pkt->pkt_tran_type == FC_PKT_INBOUND) { 1606 pkt->flags_l = BIT_7; 1607 } 1608 1609 pkt->flags_h = BIT_1; 1610 /* Set relative offset. */ 1611 ddi_put32(ha->hba_buf.acc_handle, 1612 (uint32_t *)(void *)&pkt->relative_offset, 1613 (uint32_t)sp->pkt->pkt_cmd_fhdr.ro); 1614 } else { 1615 /* (sp->flags & SRB_FCP_RSP_PKT) */ 1616 pkt->flags_l = BIT_7 | BIT_6 | BIT_1; 1617 pkt->flags_h = BIT_7 | BIT_1; 1618 } 1619 1620 /* 1621 * Load data segments. 1622 */ 1623 if (sp->pkt->pkt_cmdlen != 0) { 1624 cp = sp->pkt->pkt_cmd_cookie; 1625 1626 /* Transfer length. */ 1627 ddi_put32(ha->hba_buf.acc_handle, 1628 (uint32_t *)(void *)&pkt->type.s0_32bit.byte_count, 1629 (uint32_t)cp->dmac_size); 1630 1631 /* Load data segments. */ 1632 pkt->dseg_count_l = 1; 1633 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) { 1634 pkt->entry_type = CTIO_TYPE_3; 1635 ddi_put32(ha->hba_buf.acc_handle, 1636 (uint32_t *)(void *) 1637 &pkt->type.s0_64bit.dseg_0_address[0], 1638 cp->dmac_address); 1639 ddi_put32(ha->hba_buf.acc_handle, 1640 (uint32_t *)(void *) 1641 &pkt->type.s0_64bit.dseg_0_address[1], 1642 cp->dmac_notused); 1643 ddi_put32(ha->hba_buf.acc_handle, 1644 (uint32_t *)(void *) 1645 &pkt->type.s0_64bit.dseg_0_length, 1646 (uint32_t)cp->dmac_size); 1647 } else { 1648 pkt->entry_type = CTIO_TYPE_2; 1649 ddi_put32(ha->hba_buf.acc_handle, 1650 (uint32_t *)(void *) 1651 &pkt->type.s0_32bit.dseg_0_address, 1652 cp->dmac_address); 1653 ddi_put32(ha->hba_buf.acc_handle, 1654 (uint32_t *)(void *) 1655 &pkt->type.s0_32bit.dseg_0_length, 1656 (uint32_t)cp->dmac_size); 1657 } 1658 } 1659 1660 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 1661 } 1662 1663 /* 1664 * ql_continue_target_io_2400_iocb 1665 * Setup of continue target I/O IOCB for pending 1666 * accept target I/O entry. 1667 * 1668 * Input: 1669 * ha = adapter state pointer. 1670 * sp = srb structure pointer. 1671 * arg = request queue packet. 1672 * 1673 * Context: 1674 * Interrupt or Kernel context, no mailbox commands allowed. 1675 */ 1676 /* ARGSUSED */ 1677 void 1678 ql_continue_target_io_2400_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, 1679 void *arg) 1680 { 1681 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 1682 1683 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 1684 } 1685