1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_target.h" 9 10 #include <linux/blkdev.h> 11 #include <linux/delay.h> 12 13 #include <scsi/scsi_tcq.h> 14 15 /** 16 * qla2x00_get_cmd_direction() - Determine control_flag data direction. 17 * @cmd: SCSI command 18 * 19 * Returns the proper CF_* direction based on CDB. 20 */ 21 static inline uint16_t 22 qla2x00_get_cmd_direction(srb_t *sp) 23 { 24 uint16_t cflags; 25 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 26 struct scsi_qla_host *vha = sp->fcport->vha; 27 28 cflags = 0; 29 30 /* Set transfer direction */ 31 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 32 cflags = CF_WRITE; 33 vha->qla_stats.output_bytes += scsi_bufflen(cmd); 34 vha->qla_stats.output_requests++; 35 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 36 cflags = CF_READ; 37 vha->qla_stats.input_bytes += scsi_bufflen(cmd); 38 vha->qla_stats.input_requests++; 39 } 40 return (cflags); 41 } 42 43 /** 44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and 45 * Continuation Type 0 IOCBs to allocate. 46 * 47 * @dsds: number of data segment decriptors needed 48 * 49 * Returns the number of IOCB entries needed to store @dsds. 50 */ 51 uint16_t 52 qla2x00_calc_iocbs_32(uint16_t dsds) 53 { 54 uint16_t iocbs; 55 56 iocbs = 1; 57 if (dsds > 3) { 58 iocbs += (dsds - 3) / 7; 59 if ((dsds - 3) % 7) 60 iocbs++; 61 } 62 return (iocbs); 63 } 64 65 /** 66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and 67 * Continuation Type 1 IOCBs to allocate. 68 * 69 * @dsds: number of data segment decriptors needed 70 * 71 * Returns the number of IOCB entries needed to store @dsds. 72 */ 73 uint16_t 74 qla2x00_calc_iocbs_64(uint16_t dsds) 75 { 76 uint16_t iocbs; 77 78 iocbs = 1; 79 if (dsds > 2) { 80 iocbs += (dsds - 2) / 5; 81 if ((dsds - 2) % 5) 82 iocbs++; 83 } 84 return (iocbs); 85 } 86 87 /** 88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB. 89 * @ha: HA context 90 * 91 * Returns a pointer to the Continuation Type 0 IOCB packet. 92 */ 93 static inline cont_entry_t * 94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha) 95 { 96 cont_entry_t *cont_pkt; 97 struct req_que *req = vha->req; 98 /* Adjust ring index. */ 99 req->ring_index++; 100 if (req->ring_index == req->length) { 101 req->ring_index = 0; 102 req->ring_ptr = req->ring; 103 } else { 104 req->ring_ptr++; 105 } 106 107 cont_pkt = (cont_entry_t *)req->ring_ptr; 108 109 /* Load packet defaults. */ 110 *((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE); 111 112 return (cont_pkt); 113 } 114 115 /** 116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB. 117 * @ha: HA context 118 * 119 * Returns a pointer to the continuation type 1 IOCB packet. 120 */ 121 static inline cont_a64_entry_t * 122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req) 123 { 124 cont_a64_entry_t *cont_pkt; 125 126 /* Adjust ring index. */ 127 req->ring_index++; 128 if (req->ring_index == req->length) { 129 req->ring_index = 0; 130 req->ring_ptr = req->ring; 131 } else { 132 req->ring_ptr++; 133 } 134 135 cont_pkt = (cont_a64_entry_t *)req->ring_ptr; 136 137 /* Load packet defaults. */ 138 *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ? 139 cpu_to_le32(CONTINUE_A64_TYPE_FX00) : 140 cpu_to_le32(CONTINUE_A64_TYPE); 141 142 return (cont_pkt); 143 } 144 145 inline int 146 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts) 147 { 148 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 149 uint8_t guard = scsi_host_get_guard(cmd->device->host); 150 151 /* We always use DIFF Bundling for best performance */ 152 *fw_prot_opts = 0; 153 154 /* Translate SCSI opcode to a protection opcode */ 155 switch (scsi_get_prot_op(cmd)) { 156 case SCSI_PROT_READ_STRIP: 157 *fw_prot_opts |= PO_MODE_DIF_REMOVE; 158 break; 159 case SCSI_PROT_WRITE_INSERT: 160 *fw_prot_opts |= PO_MODE_DIF_INSERT; 161 break; 162 case SCSI_PROT_READ_INSERT: 163 *fw_prot_opts |= PO_MODE_DIF_INSERT; 164 break; 165 case SCSI_PROT_WRITE_STRIP: 166 *fw_prot_opts |= PO_MODE_DIF_REMOVE; 167 break; 168 case SCSI_PROT_READ_PASS: 169 case SCSI_PROT_WRITE_PASS: 170 if (guard & SHOST_DIX_GUARD_IP) 171 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM; 172 else 173 *fw_prot_opts |= PO_MODE_DIF_PASS; 174 break; 175 default: /* Normal Request */ 176 *fw_prot_opts |= PO_MODE_DIF_PASS; 177 break; 178 } 179 180 return scsi_prot_sg_count(cmd); 181 } 182 183 /* 184 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit 185 * capable IOCB types. 186 * 187 * @sp: SRB command to process 188 * @cmd_pkt: Command type 2 IOCB 189 * @tot_dsds: Total number of segments to transfer 190 */ 191 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, 192 uint16_t tot_dsds) 193 { 194 uint16_t avail_dsds; 195 uint32_t *cur_dsd; 196 scsi_qla_host_t *vha; 197 struct scsi_cmnd *cmd; 198 struct scatterlist *sg; 199 int i; 200 201 cmd = GET_CMD_SP(sp); 202 203 /* Update entry type to indicate Command Type 2 IOCB */ 204 *((uint32_t *)(&cmd_pkt->entry_type)) = 205 cpu_to_le32(COMMAND_TYPE); 206 207 /* No data transfer */ 208 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 209 cmd_pkt->byte_count = cpu_to_le32(0); 210 return; 211 } 212 213 vha = sp->fcport->vha; 214 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 215 216 /* Three DSDs are available in the Command Type 2 IOCB */ 217 avail_dsds = 3; 218 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; 219 220 /* Load data segments */ 221 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 222 cont_entry_t *cont_pkt; 223 224 /* Allocate additional continuation packets? */ 225 if (avail_dsds == 0) { 226 /* 227 * Seven DSDs are available in the Continuation 228 * Type 0 IOCB. 229 */ 230 cont_pkt = qla2x00_prep_cont_type0_iocb(vha); 231 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address; 232 avail_dsds = 7; 233 } 234 235 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg)); 236 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 237 avail_dsds--; 238 } 239 } 240 241 /** 242 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit 243 * capable IOCB types. 244 * 245 * @sp: SRB command to process 246 * @cmd_pkt: Command type 3 IOCB 247 * @tot_dsds: Total number of segments to transfer 248 */ 249 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, 250 uint16_t tot_dsds) 251 { 252 uint16_t avail_dsds; 253 uint32_t *cur_dsd; 254 scsi_qla_host_t *vha; 255 struct scsi_cmnd *cmd; 256 struct scatterlist *sg; 257 int i; 258 259 cmd = GET_CMD_SP(sp); 260 261 /* Update entry type to indicate Command Type 3 IOCB */ 262 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE); 263 264 /* No data transfer */ 265 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 266 cmd_pkt->byte_count = cpu_to_le32(0); 267 return; 268 } 269 270 vha = sp->fcport->vha; 271 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 272 273 /* Two DSDs are available in the Command Type 3 IOCB */ 274 avail_dsds = 2; 275 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; 276 277 /* Load data segments */ 278 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 279 dma_addr_t sle_dma; 280 cont_a64_entry_t *cont_pkt; 281 282 /* Allocate additional continuation packets? */ 283 if (avail_dsds == 0) { 284 /* 285 * Five DSDs are available in the Continuation 286 * Type 1 IOCB. 287 */ 288 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); 289 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 290 avail_dsds = 5; 291 } 292 293 sle_dma = sg_dma_address(sg); 294 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 295 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 296 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 297 avail_dsds--; 298 } 299 } 300 301 /** 302 * qla2x00_start_scsi() - Send a SCSI command to the ISP 303 * @sp: command to send to the ISP 304 * 305 * Returns non-zero if a failure occurred, else zero. 306 */ 307 int 308 qla2x00_start_scsi(srb_t *sp) 309 { 310 int nseg; 311 unsigned long flags; 312 scsi_qla_host_t *vha; 313 struct scsi_cmnd *cmd; 314 uint32_t *clr_ptr; 315 uint32_t index; 316 uint32_t handle; 317 cmd_entry_t *cmd_pkt; 318 uint16_t cnt; 319 uint16_t req_cnt; 320 uint16_t tot_dsds; 321 struct device_reg_2xxx __iomem *reg; 322 struct qla_hw_data *ha; 323 struct req_que *req; 324 struct rsp_que *rsp; 325 326 /* Setup device pointers. */ 327 vha = sp->fcport->vha; 328 ha = vha->hw; 329 reg = &ha->iobase->isp; 330 cmd = GET_CMD_SP(sp); 331 req = ha->req_q_map[0]; 332 rsp = ha->rsp_q_map[0]; 333 /* So we know we haven't pci_map'ed anything yet */ 334 tot_dsds = 0; 335 336 /* Send marker if required */ 337 if (vha->marker_needed != 0) { 338 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != 339 QLA_SUCCESS) { 340 return (QLA_FUNCTION_FAILED); 341 } 342 vha->marker_needed = 0; 343 } 344 345 /* Acquire ring specific lock */ 346 spin_lock_irqsave(&ha->hardware_lock, flags); 347 348 /* Check for room in outstanding command list. */ 349 handle = req->current_outstanding_cmd; 350 for (index = 1; index < req->num_outstanding_cmds; index++) { 351 handle++; 352 if (handle == req->num_outstanding_cmds) 353 handle = 1; 354 if (!req->outstanding_cmds[handle]) 355 break; 356 } 357 if (index == req->num_outstanding_cmds) 358 goto queuing_error; 359 360 /* Map the sg table so we have an accurate count of sg entries needed */ 361 if (scsi_sg_count(cmd)) { 362 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 363 scsi_sg_count(cmd), cmd->sc_data_direction); 364 if (unlikely(!nseg)) 365 goto queuing_error; 366 } else 367 nseg = 0; 368 369 tot_dsds = nseg; 370 371 /* Calculate the number of request entries needed. */ 372 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds); 373 if (req->cnt < (req_cnt + 2)) { 374 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg)); 375 if (req->ring_index < cnt) 376 req->cnt = cnt - req->ring_index; 377 else 378 req->cnt = req->length - 379 (req->ring_index - cnt); 380 /* If still no head room then bail out */ 381 if (req->cnt < (req_cnt + 2)) 382 goto queuing_error; 383 } 384 385 /* Build command packet */ 386 req->current_outstanding_cmd = handle; 387 req->outstanding_cmds[handle] = sp; 388 sp->handle = handle; 389 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 390 req->cnt -= req_cnt; 391 392 cmd_pkt = (cmd_entry_t *)req->ring_ptr; 393 cmd_pkt->handle = handle; 394 /* Zero out remaining portion of packet. */ 395 clr_ptr = (uint32_t *)cmd_pkt + 2; 396 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 397 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 398 399 /* Set target ID and LUN number*/ 400 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id); 401 cmd_pkt->lun = cpu_to_le16(cmd->device->lun); 402 cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG); 403 404 /* Load SCSI command packet. */ 405 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len); 406 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 407 408 /* Build IOCB segments */ 409 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds); 410 411 /* Set total data segment count. */ 412 cmd_pkt->entry_count = (uint8_t)req_cnt; 413 wmb(); 414 415 /* Adjust ring index. */ 416 req->ring_index++; 417 if (req->ring_index == req->length) { 418 req->ring_index = 0; 419 req->ring_ptr = req->ring; 420 } else 421 req->ring_ptr++; 422 423 sp->flags |= SRB_DMA_VALID; 424 425 /* Set chip new ring index. */ 426 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index); 427 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */ 428 429 /* Manage unprocessed RIO/ZIO commands in response queue. */ 430 if (vha->flags.process_response_queue && 431 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 432 qla2x00_process_response_queue(rsp); 433 434 spin_unlock_irqrestore(&ha->hardware_lock, flags); 435 return (QLA_SUCCESS); 436 437 queuing_error: 438 if (tot_dsds) 439 scsi_dma_unmap(cmd); 440 441 spin_unlock_irqrestore(&ha->hardware_lock, flags); 442 443 return (QLA_FUNCTION_FAILED); 444 } 445 446 /** 447 * qla2x00_start_iocbs() - Execute the IOCB command 448 */ 449 void 450 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req) 451 { 452 struct qla_hw_data *ha = vha->hw; 453 device_reg_t *reg = ISP_QUE_REG(ha, req->id); 454 455 if (IS_P3P_TYPE(ha)) { 456 qla82xx_start_iocbs(vha); 457 } else { 458 /* Adjust ring index. */ 459 req->ring_index++; 460 if (req->ring_index == req->length) { 461 req->ring_index = 0; 462 req->ring_ptr = req->ring; 463 } else 464 req->ring_ptr++; 465 466 /* Set chip new ring index. */ 467 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 468 WRT_REG_DWORD(req->req_q_in, req->ring_index); 469 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr); 470 } else if (IS_QLAFX00(ha)) { 471 WRT_REG_DWORD(®->ispfx00.req_q_in, req->ring_index); 472 RD_REG_DWORD_RELAXED(®->ispfx00.req_q_in); 473 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code); 474 } else if (IS_FWI2_CAPABLE(ha)) { 475 WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index); 476 RD_REG_DWORD_RELAXED(®->isp24.req_q_in); 477 } else { 478 WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp), 479 req->ring_index); 480 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp)); 481 } 482 } 483 } 484 485 /** 486 * qla2x00_marker() - Send a marker IOCB to the firmware. 487 * @ha: HA context 488 * @loop_id: loop ID 489 * @lun: LUN 490 * @type: marker modifier 491 * 492 * Can be called from both normal and interrupt context. 493 * 494 * Returns non-zero if a failure occurred, else zero. 495 */ 496 static int 497 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, 498 struct rsp_que *rsp, uint16_t loop_id, 499 uint64_t lun, uint8_t type) 500 { 501 mrk_entry_t *mrk; 502 struct mrk_entry_24xx *mrk24 = NULL; 503 504 struct qla_hw_data *ha = vha->hw; 505 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 506 507 req = ha->req_q_map[0]; 508 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL); 509 if (mrk == NULL) { 510 ql_log(ql_log_warn, base_vha, 0x3026, 511 "Failed to allocate Marker IOCB.\n"); 512 513 return (QLA_FUNCTION_FAILED); 514 } 515 516 mrk->entry_type = MARKER_TYPE; 517 mrk->modifier = type; 518 if (type != MK_SYNC_ALL) { 519 if (IS_FWI2_CAPABLE(ha)) { 520 mrk24 = (struct mrk_entry_24xx *) mrk; 521 mrk24->nport_handle = cpu_to_le16(loop_id); 522 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun); 523 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); 524 mrk24->vp_index = vha->vp_idx; 525 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle); 526 } else { 527 SET_TARGET_ID(ha, mrk->target, loop_id); 528 mrk->lun = cpu_to_le16((uint16_t)lun); 529 } 530 } 531 wmb(); 532 533 qla2x00_start_iocbs(vha, req); 534 535 return (QLA_SUCCESS); 536 } 537 538 int 539 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, 540 struct rsp_que *rsp, uint16_t loop_id, uint64_t lun, 541 uint8_t type) 542 { 543 int ret; 544 unsigned long flags = 0; 545 546 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 547 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type); 548 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 549 550 return (ret); 551 } 552 553 /* 554 * qla2x00_issue_marker 555 * 556 * Issue marker 557 * Caller CAN have hardware lock held as specified by ha_locked parameter. 558 * Might release it, then reaquire. 559 */ 560 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked) 561 { 562 if (ha_locked) { 563 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0, 564 MK_SYNC_ALL) != QLA_SUCCESS) 565 return QLA_FUNCTION_FAILED; 566 } else { 567 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0, 568 MK_SYNC_ALL) != QLA_SUCCESS) 569 return QLA_FUNCTION_FAILED; 570 } 571 vha->marker_needed = 0; 572 573 return QLA_SUCCESS; 574 } 575 576 static inline int 577 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, 578 uint16_t tot_dsds) 579 { 580 uint32_t *cur_dsd = NULL; 581 scsi_qla_host_t *vha; 582 struct qla_hw_data *ha; 583 struct scsi_cmnd *cmd; 584 struct scatterlist *cur_seg; 585 uint32_t *dsd_seg; 586 void *next_dsd; 587 uint8_t avail_dsds; 588 uint8_t first_iocb = 1; 589 uint32_t dsd_list_len; 590 struct dsd_dma *dsd_ptr; 591 struct ct6_dsd *ctx; 592 593 cmd = GET_CMD_SP(sp); 594 595 /* Update entry type to indicate Command Type 3 IOCB */ 596 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6); 597 598 /* No data transfer */ 599 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 600 cmd_pkt->byte_count = cpu_to_le32(0); 601 return 0; 602 } 603 604 vha = sp->fcport->vha; 605 ha = vha->hw; 606 607 /* Set transfer direction */ 608 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 609 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA); 610 vha->qla_stats.output_bytes += scsi_bufflen(cmd); 611 vha->qla_stats.output_requests++; 612 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 613 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA); 614 vha->qla_stats.input_bytes += scsi_bufflen(cmd); 615 vha->qla_stats.input_requests++; 616 } 617 618 cur_seg = scsi_sglist(cmd); 619 ctx = GET_CMD_CTX_SP(sp); 620 621 while (tot_dsds) { 622 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ? 623 QLA_DSDS_PER_IOCB : tot_dsds; 624 tot_dsds -= avail_dsds; 625 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE; 626 627 dsd_ptr = list_first_entry(&ha->gbl_dsd_list, 628 struct dsd_dma, list); 629 next_dsd = dsd_ptr->dsd_addr; 630 list_del(&dsd_ptr->list); 631 ha->gbl_dsd_avail--; 632 list_add_tail(&dsd_ptr->list, &ctx->dsd_list); 633 ctx->dsd_use_cnt++; 634 ha->gbl_dsd_inuse++; 635 636 if (first_iocb) { 637 first_iocb = 0; 638 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address; 639 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 640 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 641 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len); 642 } else { 643 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 644 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 645 *cur_dsd++ = cpu_to_le32(dsd_list_len); 646 } 647 cur_dsd = (uint32_t *)next_dsd; 648 while (avail_dsds) { 649 dma_addr_t sle_dma; 650 651 sle_dma = sg_dma_address(cur_seg); 652 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 653 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 654 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg)); 655 cur_seg = sg_next(cur_seg); 656 avail_dsds--; 657 } 658 } 659 660 /* Null termination */ 661 *cur_dsd++ = 0; 662 *cur_dsd++ = 0; 663 *cur_dsd++ = 0; 664 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE; 665 return 0; 666 } 667 668 /* 669 * qla24xx_calc_dsd_lists() - Determine number of DSD list required 670 * for Command Type 6. 671 * 672 * @dsds: number of data segment decriptors needed 673 * 674 * Returns the number of dsd list needed to store @dsds. 675 */ 676 static inline uint16_t 677 qla24xx_calc_dsd_lists(uint16_t dsds) 678 { 679 uint16_t dsd_lists = 0; 680 681 dsd_lists = (dsds/QLA_DSDS_PER_IOCB); 682 if (dsds % QLA_DSDS_PER_IOCB) 683 dsd_lists++; 684 return dsd_lists; 685 } 686 687 688 /** 689 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7 690 * IOCB types. 691 * 692 * @sp: SRB command to process 693 * @cmd_pkt: Command type 3 IOCB 694 * @tot_dsds: Total number of segments to transfer 695 * @req: pointer to request queue 696 */ 697 inline void 698 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, 699 uint16_t tot_dsds, struct req_que *req) 700 { 701 uint16_t avail_dsds; 702 uint32_t *cur_dsd; 703 scsi_qla_host_t *vha; 704 struct scsi_cmnd *cmd; 705 struct scatterlist *sg; 706 int i; 707 708 cmd = GET_CMD_SP(sp); 709 710 /* Update entry type to indicate Command Type 3 IOCB */ 711 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7); 712 713 /* No data transfer */ 714 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 715 cmd_pkt->byte_count = cpu_to_le32(0); 716 return; 717 } 718 719 vha = sp->fcport->vha; 720 721 /* Set transfer direction */ 722 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 723 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA); 724 vha->qla_stats.output_bytes += scsi_bufflen(cmd); 725 vha->qla_stats.output_requests++; 726 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 727 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA); 728 vha->qla_stats.input_bytes += scsi_bufflen(cmd); 729 vha->qla_stats.input_requests++; 730 } 731 732 /* One DSD is available in the Command Type 3 IOCB */ 733 avail_dsds = 1; 734 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; 735 736 /* Load data segments */ 737 738 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 739 dma_addr_t sle_dma; 740 cont_a64_entry_t *cont_pkt; 741 742 /* Allocate additional continuation packets? */ 743 if (avail_dsds == 0) { 744 /* 745 * Five DSDs are available in the Continuation 746 * Type 1 IOCB. 747 */ 748 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req); 749 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 750 avail_dsds = 5; 751 } 752 753 sle_dma = sg_dma_address(sg); 754 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 755 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 756 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 757 avail_dsds--; 758 } 759 } 760 761 struct fw_dif_context { 762 uint32_t ref_tag; 763 uint16_t app_tag; 764 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/ 765 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/ 766 }; 767 768 /* 769 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command 770 * 771 */ 772 static inline void 773 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt, 774 unsigned int protcnt) 775 { 776 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 777 778 switch (scsi_get_prot_type(cmd)) { 779 case SCSI_PROT_DIF_TYPE0: 780 /* 781 * No check for ql2xenablehba_err_chk, as it would be an 782 * I/O error if hba tag generation is not done. 783 */ 784 pkt->ref_tag = cpu_to_le32((uint32_t) 785 (0xffffffff & scsi_get_lba(cmd))); 786 787 if (!qla2x00_hba_err_chk_enabled(sp)) 788 break; 789 790 pkt->ref_tag_mask[0] = 0xff; 791 pkt->ref_tag_mask[1] = 0xff; 792 pkt->ref_tag_mask[2] = 0xff; 793 pkt->ref_tag_mask[3] = 0xff; 794 break; 795 796 /* 797 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to 798 * match LBA in CDB + N 799 */ 800 case SCSI_PROT_DIF_TYPE2: 801 pkt->app_tag = cpu_to_le16(0); 802 pkt->app_tag_mask[0] = 0x0; 803 pkt->app_tag_mask[1] = 0x0; 804 805 pkt->ref_tag = cpu_to_le32((uint32_t) 806 (0xffffffff & scsi_get_lba(cmd))); 807 808 if (!qla2x00_hba_err_chk_enabled(sp)) 809 break; 810 811 /* enable ALL bytes of the ref tag */ 812 pkt->ref_tag_mask[0] = 0xff; 813 pkt->ref_tag_mask[1] = 0xff; 814 pkt->ref_tag_mask[2] = 0xff; 815 pkt->ref_tag_mask[3] = 0xff; 816 break; 817 818 /* For Type 3 protection: 16 bit GUARD only */ 819 case SCSI_PROT_DIF_TYPE3: 820 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] = 821 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] = 822 0x00; 823 break; 824 825 /* 826 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and 827 * 16 bit app tag. 828 */ 829 case SCSI_PROT_DIF_TYPE1: 830 pkt->ref_tag = cpu_to_le32((uint32_t) 831 (0xffffffff & scsi_get_lba(cmd))); 832 pkt->app_tag = cpu_to_le16(0); 833 pkt->app_tag_mask[0] = 0x0; 834 pkt->app_tag_mask[1] = 0x0; 835 836 if (!qla2x00_hba_err_chk_enabled(sp)) 837 break; 838 839 /* enable ALL bytes of the ref tag */ 840 pkt->ref_tag_mask[0] = 0xff; 841 pkt->ref_tag_mask[1] = 0xff; 842 pkt->ref_tag_mask[2] = 0xff; 843 pkt->ref_tag_mask[3] = 0xff; 844 break; 845 } 846 } 847 848 int 849 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx, 850 uint32_t *partial) 851 { 852 struct scatterlist *sg; 853 uint32_t cumulative_partial, sg_len; 854 dma_addr_t sg_dma_addr; 855 856 if (sgx->num_bytes == sgx->tot_bytes) 857 return 0; 858 859 sg = sgx->cur_sg; 860 cumulative_partial = sgx->tot_partial; 861 862 sg_dma_addr = sg_dma_address(sg); 863 sg_len = sg_dma_len(sg); 864 865 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed; 866 867 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) { 868 sgx->dma_len = (blk_sz - cumulative_partial); 869 sgx->tot_partial = 0; 870 sgx->num_bytes += blk_sz; 871 *partial = 0; 872 } else { 873 sgx->dma_len = sg_len - sgx->bytes_consumed; 874 sgx->tot_partial += sgx->dma_len; 875 *partial = 1; 876 } 877 878 sgx->bytes_consumed += sgx->dma_len; 879 880 if (sg_len == sgx->bytes_consumed) { 881 sg = sg_next(sg); 882 sgx->num_sg++; 883 sgx->cur_sg = sg; 884 sgx->bytes_consumed = 0; 885 } 886 887 return 1; 888 } 889 890 int 891 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, 892 uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) 893 { 894 void *next_dsd; 895 uint8_t avail_dsds = 0; 896 uint32_t dsd_list_len; 897 struct dsd_dma *dsd_ptr; 898 struct scatterlist *sg_prot; 899 uint32_t *cur_dsd = dsd; 900 uint16_t used_dsds = tot_dsds; 901 902 uint32_t prot_int; /* protection interval */ 903 uint32_t partial; 904 struct qla2_sgx sgx; 905 dma_addr_t sle_dma; 906 uint32_t sle_dma_len, tot_prot_dma_len = 0; 907 struct scsi_cmnd *cmd; 908 909 memset(&sgx, 0, sizeof(struct qla2_sgx)); 910 if (sp) { 911 cmd = GET_CMD_SP(sp); 912 prot_int = cmd->device->sector_size; 913 914 sgx.tot_bytes = scsi_bufflen(cmd); 915 sgx.cur_sg = scsi_sglist(cmd); 916 sgx.sp = sp; 917 918 sg_prot = scsi_prot_sglist(cmd); 919 } else if (tc) { 920 prot_int = tc->blk_sz; 921 sgx.tot_bytes = tc->bufflen; 922 sgx.cur_sg = tc->sg; 923 sg_prot = tc->prot_sg; 924 } else { 925 BUG(); 926 return 1; 927 } 928 929 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { 930 931 sle_dma = sgx.dma_addr; 932 sle_dma_len = sgx.dma_len; 933 alloc_and_fill: 934 /* Allocate additional continuation packets? */ 935 if (avail_dsds == 0) { 936 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 937 QLA_DSDS_PER_IOCB : used_dsds; 938 dsd_list_len = (avail_dsds + 1) * 12; 939 used_dsds -= avail_dsds; 940 941 /* allocate tracking DS */ 942 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 943 if (!dsd_ptr) 944 return 1; 945 946 /* allocate new list */ 947 dsd_ptr->dsd_addr = next_dsd = 948 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 949 &dsd_ptr->dsd_list_dma); 950 951 if (!next_dsd) { 952 /* 953 * Need to cleanup only this dsd_ptr, rest 954 * will be done by sp_free_dma() 955 */ 956 kfree(dsd_ptr); 957 return 1; 958 } 959 960 if (sp) { 961 list_add_tail(&dsd_ptr->list, 962 &((struct crc_context *) 963 sp->u.scmd.ctx)->dsd_list); 964 965 sp->flags |= SRB_CRC_CTX_DSD_VALID; 966 } else { 967 list_add_tail(&dsd_ptr->list, 968 &(tc->ctx->dsd_list)); 969 tc->ctx_dsd_alloced = 1; 970 } 971 972 973 /* add new list to cmd iocb or last list */ 974 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 975 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 976 *cur_dsd++ = dsd_list_len; 977 cur_dsd = (uint32_t *)next_dsd; 978 } 979 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 980 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 981 *cur_dsd++ = cpu_to_le32(sle_dma_len); 982 avail_dsds--; 983 984 if (partial == 0) { 985 /* Got a full protection interval */ 986 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len; 987 sle_dma_len = 8; 988 989 tot_prot_dma_len += sle_dma_len; 990 if (tot_prot_dma_len == sg_dma_len(sg_prot)) { 991 tot_prot_dma_len = 0; 992 sg_prot = sg_next(sg_prot); 993 } 994 995 partial = 1; /* So as to not re-enter this block */ 996 goto alloc_and_fill; 997 } 998 } 999 /* Null termination */ 1000 *cur_dsd++ = 0; 1001 *cur_dsd++ = 0; 1002 *cur_dsd++ = 0; 1003 return 0; 1004 } 1005 1006 int 1007 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, 1008 uint16_t tot_dsds, struct qla_tgt_cmd *tc) 1009 { 1010 void *next_dsd; 1011 uint8_t avail_dsds = 0; 1012 uint32_t dsd_list_len; 1013 struct dsd_dma *dsd_ptr; 1014 struct scatterlist *sg, *sgl; 1015 uint32_t *cur_dsd = dsd; 1016 int i; 1017 uint16_t used_dsds = tot_dsds; 1018 struct scsi_cmnd *cmd; 1019 1020 if (sp) { 1021 cmd = GET_CMD_SP(sp); 1022 sgl = scsi_sglist(cmd); 1023 } else if (tc) { 1024 sgl = tc->sg; 1025 } else { 1026 BUG(); 1027 return 1; 1028 } 1029 1030 1031 for_each_sg(sgl, sg, tot_dsds, i) { 1032 dma_addr_t sle_dma; 1033 1034 /* Allocate additional continuation packets? */ 1035 if (avail_dsds == 0) { 1036 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 1037 QLA_DSDS_PER_IOCB : used_dsds; 1038 dsd_list_len = (avail_dsds + 1) * 12; 1039 used_dsds -= avail_dsds; 1040 1041 /* allocate tracking DS */ 1042 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 1043 if (!dsd_ptr) 1044 return 1; 1045 1046 /* allocate new list */ 1047 dsd_ptr->dsd_addr = next_dsd = 1048 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 1049 &dsd_ptr->dsd_list_dma); 1050 1051 if (!next_dsd) { 1052 /* 1053 * Need to cleanup only this dsd_ptr, rest 1054 * will be done by sp_free_dma() 1055 */ 1056 kfree(dsd_ptr); 1057 return 1; 1058 } 1059 1060 if (sp) { 1061 list_add_tail(&dsd_ptr->list, 1062 &((struct crc_context *) 1063 sp->u.scmd.ctx)->dsd_list); 1064 1065 sp->flags |= SRB_CRC_CTX_DSD_VALID; 1066 } else { 1067 list_add_tail(&dsd_ptr->list, 1068 &(tc->ctx->dsd_list)); 1069 tc->ctx_dsd_alloced = 1; 1070 } 1071 1072 /* add new list to cmd iocb or last list */ 1073 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 1074 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 1075 *cur_dsd++ = dsd_list_len; 1076 cur_dsd = (uint32_t *)next_dsd; 1077 } 1078 sle_dma = sg_dma_address(sg); 1079 1080 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 1081 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 1082 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 1083 avail_dsds--; 1084 1085 } 1086 /* Null termination */ 1087 *cur_dsd++ = 0; 1088 *cur_dsd++ = 0; 1089 *cur_dsd++ = 0; 1090 return 0; 1091 } 1092 1093 int 1094 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, 1095 uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) 1096 { 1097 void *next_dsd; 1098 uint8_t avail_dsds = 0; 1099 uint32_t dsd_list_len; 1100 struct dsd_dma *dsd_ptr; 1101 struct scatterlist *sg, *sgl; 1102 int i; 1103 struct scsi_cmnd *cmd; 1104 uint32_t *cur_dsd = dsd; 1105 uint16_t used_dsds = tot_dsds; 1106 struct scsi_qla_host *vha; 1107 1108 if (sp) { 1109 cmd = GET_CMD_SP(sp); 1110 sgl = scsi_prot_sglist(cmd); 1111 vha = sp->fcport->vha; 1112 } else if (tc) { 1113 vha = tc->vha; 1114 sgl = tc->prot_sg; 1115 } else { 1116 BUG(); 1117 return 1; 1118 } 1119 1120 ql_dbg(ql_dbg_tgt, vha, 0xe021, 1121 "%s: enter\n", __func__); 1122 1123 for_each_sg(sgl, sg, tot_dsds, i) { 1124 dma_addr_t sle_dma; 1125 1126 /* Allocate additional continuation packets? */ 1127 if (avail_dsds == 0) { 1128 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 1129 QLA_DSDS_PER_IOCB : used_dsds; 1130 dsd_list_len = (avail_dsds + 1) * 12; 1131 used_dsds -= avail_dsds; 1132 1133 /* allocate tracking DS */ 1134 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 1135 if (!dsd_ptr) 1136 return 1; 1137 1138 /* allocate new list */ 1139 dsd_ptr->dsd_addr = next_dsd = 1140 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 1141 &dsd_ptr->dsd_list_dma); 1142 1143 if (!next_dsd) { 1144 /* 1145 * Need to cleanup only this dsd_ptr, rest 1146 * will be done by sp_free_dma() 1147 */ 1148 kfree(dsd_ptr); 1149 return 1; 1150 } 1151 1152 if (sp) { 1153 list_add_tail(&dsd_ptr->list, 1154 &((struct crc_context *) 1155 sp->u.scmd.ctx)->dsd_list); 1156 1157 sp->flags |= SRB_CRC_CTX_DSD_VALID; 1158 } else { 1159 list_add_tail(&dsd_ptr->list, 1160 &(tc->ctx->dsd_list)); 1161 tc->ctx_dsd_alloced = 1; 1162 } 1163 1164 /* add new list to cmd iocb or last list */ 1165 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 1166 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 1167 *cur_dsd++ = dsd_list_len; 1168 cur_dsd = (uint32_t *)next_dsd; 1169 } 1170 sle_dma = sg_dma_address(sg); 1171 1172 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 1173 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 1174 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 1175 1176 avail_dsds--; 1177 } 1178 /* Null termination */ 1179 *cur_dsd++ = 0; 1180 *cur_dsd++ = 0; 1181 *cur_dsd++ = 0; 1182 return 0; 1183 } 1184 1185 /** 1186 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command 1187 * Type 6 IOCB types. 1188 * 1189 * @sp: SRB command to process 1190 * @cmd_pkt: Command type 3 IOCB 1191 * @tot_dsds: Total number of segments to transfer 1192 */ 1193 inline int 1194 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, 1195 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts) 1196 { 1197 uint32_t *cur_dsd, *fcp_dl; 1198 scsi_qla_host_t *vha; 1199 struct scsi_cmnd *cmd; 1200 uint32_t total_bytes = 0; 1201 uint32_t data_bytes; 1202 uint32_t dif_bytes; 1203 uint8_t bundling = 1; 1204 uint16_t blk_size; 1205 uint8_t *clr_ptr; 1206 struct crc_context *crc_ctx_pkt = NULL; 1207 struct qla_hw_data *ha; 1208 uint8_t additional_fcpcdb_len; 1209 uint16_t fcp_cmnd_len; 1210 struct fcp_cmnd *fcp_cmnd; 1211 dma_addr_t crc_ctx_dma; 1212 1213 cmd = GET_CMD_SP(sp); 1214 1215 /* Update entry type to indicate Command Type CRC_2 IOCB */ 1216 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2); 1217 1218 vha = sp->fcport->vha; 1219 ha = vha->hw; 1220 1221 /* No data transfer */ 1222 data_bytes = scsi_bufflen(cmd); 1223 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { 1224 cmd_pkt->byte_count = cpu_to_le32(0); 1225 return QLA_SUCCESS; 1226 } 1227 1228 cmd_pkt->vp_index = sp->fcport->vha->vp_idx; 1229 1230 /* Set transfer direction */ 1231 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 1232 cmd_pkt->control_flags = 1233 cpu_to_le16(CF_WRITE_DATA); 1234 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 1235 cmd_pkt->control_flags = 1236 cpu_to_le16(CF_READ_DATA); 1237 } 1238 1239 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 1240 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) || 1241 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) || 1242 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT)) 1243 bundling = 0; 1244 1245 /* Allocate CRC context from global pool */ 1246 crc_ctx_pkt = sp->u.scmd.ctx = 1247 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); 1248 1249 if (!crc_ctx_pkt) 1250 goto crc_queuing_error; 1251 1252 /* Zero out CTX area. */ 1253 clr_ptr = (uint8_t *)crc_ctx_pkt; 1254 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt)); 1255 1256 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; 1257 1258 sp->flags |= SRB_CRC_CTX_DMA_VALID; 1259 1260 /* Set handle */ 1261 crc_ctx_pkt->handle = cmd_pkt->handle; 1262 1263 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); 1264 1265 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *) 1266 &crc_ctx_pkt->ref_tag, tot_prot_dsds); 1267 1268 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); 1269 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma)); 1270 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW; 1271 1272 /* Determine SCSI command length -- align to 4 byte boundary */ 1273 if (cmd->cmd_len > 16) { 1274 additional_fcpcdb_len = cmd->cmd_len - 16; 1275 if ((cmd->cmd_len % 4) != 0) { 1276 /* SCSI cmd > 16 bytes must be multiple of 4 */ 1277 goto crc_queuing_error; 1278 } 1279 fcp_cmnd_len = 12 + cmd->cmd_len + 4; 1280 } else { 1281 additional_fcpcdb_len = 0; 1282 fcp_cmnd_len = 12 + 16 + 4; 1283 } 1284 1285 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd; 1286 1287 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len; 1288 if (cmd->sc_data_direction == DMA_TO_DEVICE) 1289 fcp_cmnd->additional_cdb_len |= 1; 1290 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 1291 fcp_cmnd->additional_cdb_len |= 2; 1292 1293 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun); 1294 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); 1295 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len); 1296 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32( 1297 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF)); 1298 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32( 1299 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF)); 1300 fcp_cmnd->task_management = 0; 1301 fcp_cmnd->task_attribute = TSK_SIMPLE; 1302 1303 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ 1304 1305 /* Compute dif len and adjust data len to incude protection */ 1306 dif_bytes = 0; 1307 blk_size = cmd->device->sector_size; 1308 dif_bytes = (data_bytes / blk_size) * 8; 1309 1310 switch (scsi_get_prot_op(GET_CMD_SP(sp))) { 1311 case SCSI_PROT_READ_INSERT: 1312 case SCSI_PROT_WRITE_STRIP: 1313 total_bytes = data_bytes; 1314 data_bytes += dif_bytes; 1315 break; 1316 1317 case SCSI_PROT_READ_STRIP: 1318 case SCSI_PROT_WRITE_INSERT: 1319 case SCSI_PROT_READ_PASS: 1320 case SCSI_PROT_WRITE_PASS: 1321 total_bytes = data_bytes + dif_bytes; 1322 break; 1323 default: 1324 BUG(); 1325 } 1326 1327 if (!qla2x00_hba_err_chk_enabled(sp)) 1328 fw_prot_opts |= 0x10; /* Disable Guard tag checking */ 1329 /* HBA error checking enabled */ 1330 else if (IS_PI_UNINIT_CAPABLE(ha)) { 1331 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1) 1332 || (scsi_get_prot_type(GET_CMD_SP(sp)) == 1333 SCSI_PROT_DIF_TYPE2)) 1334 fw_prot_opts |= BIT_10; 1335 else if (scsi_get_prot_type(GET_CMD_SP(sp)) == 1336 SCSI_PROT_DIF_TYPE3) 1337 fw_prot_opts |= BIT_11; 1338 } 1339 1340 if (!bundling) { 1341 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; 1342 } else { 1343 /* 1344 * Configure Bundling if we need to fetch interlaving 1345 * protection PCI accesses 1346 */ 1347 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; 1348 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); 1349 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds - 1350 tot_prot_dsds); 1351 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address; 1352 } 1353 1354 /* Finish the common fields of CRC pkt */ 1355 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size); 1356 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); 1357 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); 1358 crc_ctx_pkt->guard_seed = cpu_to_le16(0); 1359 /* Fibre channel byte count */ 1360 cmd_pkt->byte_count = cpu_to_le32(total_bytes); 1361 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 + 1362 additional_fcpcdb_len); 1363 *fcp_dl = htonl(total_bytes); 1364 1365 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { 1366 cmd_pkt->byte_count = cpu_to_le32(0); 1367 return QLA_SUCCESS; 1368 } 1369 /* Walks data segments */ 1370 1371 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); 1372 1373 if (!bundling && tot_prot_dsds) { 1374 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp, 1375 cur_dsd, tot_dsds, NULL)) 1376 goto crc_queuing_error; 1377 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, 1378 (tot_dsds - tot_prot_dsds), NULL)) 1379 goto crc_queuing_error; 1380 1381 if (bundling && tot_prot_dsds) { 1382 /* Walks dif segments */ 1383 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE); 1384 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; 1385 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd, 1386 tot_prot_dsds, NULL)) 1387 goto crc_queuing_error; 1388 } 1389 return QLA_SUCCESS; 1390 1391 crc_queuing_error: 1392 /* Cleanup will be performed by the caller */ 1393 1394 return QLA_FUNCTION_FAILED; 1395 } 1396 1397 /** 1398 * qla24xx_start_scsi() - Send a SCSI command to the ISP 1399 * @sp: command to send to the ISP 1400 * 1401 * Returns non-zero if a failure occurred, else zero. 1402 */ 1403 int 1404 qla24xx_start_scsi(srb_t *sp) 1405 { 1406 int nseg; 1407 unsigned long flags; 1408 uint32_t *clr_ptr; 1409 uint32_t index; 1410 uint32_t handle; 1411 struct cmd_type_7 *cmd_pkt; 1412 uint16_t cnt; 1413 uint16_t req_cnt; 1414 uint16_t tot_dsds; 1415 struct req_que *req = NULL; 1416 struct rsp_que *rsp = NULL; 1417 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1418 struct scsi_qla_host *vha = sp->fcport->vha; 1419 struct qla_hw_data *ha = vha->hw; 1420 1421 /* Setup device pointers. */ 1422 req = vha->req; 1423 rsp = req->rsp; 1424 1425 /* So we know we haven't pci_map'ed anything yet */ 1426 tot_dsds = 0; 1427 1428 /* Send marker if required */ 1429 if (vha->marker_needed != 0) { 1430 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != 1431 QLA_SUCCESS) 1432 return QLA_FUNCTION_FAILED; 1433 vha->marker_needed = 0; 1434 } 1435 1436 /* Acquire ring specific lock */ 1437 spin_lock_irqsave(&ha->hardware_lock, flags); 1438 1439 /* Check for room in outstanding command list. */ 1440 handle = req->current_outstanding_cmd; 1441 for (index = 1; index < req->num_outstanding_cmds; index++) { 1442 handle++; 1443 if (handle == req->num_outstanding_cmds) 1444 handle = 1; 1445 if (!req->outstanding_cmds[handle]) 1446 break; 1447 } 1448 if (index == req->num_outstanding_cmds) 1449 goto queuing_error; 1450 1451 /* Map the sg table so we have an accurate count of sg entries needed */ 1452 if (scsi_sg_count(cmd)) { 1453 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 1454 scsi_sg_count(cmd), cmd->sc_data_direction); 1455 if (unlikely(!nseg)) 1456 goto queuing_error; 1457 } else 1458 nseg = 0; 1459 1460 tot_dsds = nseg; 1461 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 1462 if (req->cnt < (req_cnt + 2)) { 1463 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : 1464 RD_REG_DWORD_RELAXED(req->req_q_out); 1465 if (req->ring_index < cnt) 1466 req->cnt = cnt - req->ring_index; 1467 else 1468 req->cnt = req->length - 1469 (req->ring_index - cnt); 1470 if (req->cnt < (req_cnt + 2)) 1471 goto queuing_error; 1472 } 1473 1474 /* Build command packet. */ 1475 req->current_outstanding_cmd = handle; 1476 req->outstanding_cmds[handle] = sp; 1477 sp->handle = handle; 1478 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 1479 req->cnt -= req_cnt; 1480 1481 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 1482 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); 1483 1484 /* Zero out remaining portion of packet. */ 1485 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ 1486 clr_ptr = (uint32_t *)cmd_pkt + 2; 1487 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 1488 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 1489 1490 /* Set NPORT-ID and LUN number*/ 1491 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1492 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 1493 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 1494 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 1495 cmd_pkt->vp_index = sp->fcport->vha->vp_idx; 1496 1497 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 1498 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 1499 1500 cmd_pkt->task = TSK_SIMPLE; 1501 1502 /* Load SCSI command packet. */ 1503 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 1504 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); 1505 1506 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 1507 1508 /* Build IOCB segments */ 1509 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req); 1510 1511 /* Set total data segment count. */ 1512 cmd_pkt->entry_count = (uint8_t)req_cnt; 1513 wmb(); 1514 /* Adjust ring index. */ 1515 req->ring_index++; 1516 if (req->ring_index == req->length) { 1517 req->ring_index = 0; 1518 req->ring_ptr = req->ring; 1519 } else 1520 req->ring_ptr++; 1521 1522 sp->flags |= SRB_DMA_VALID; 1523 1524 /* Set chip new ring index. */ 1525 WRT_REG_DWORD(req->req_q_in, req->ring_index); 1526 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr); 1527 1528 /* Manage unprocessed RIO/ZIO commands in response queue. */ 1529 if (vha->flags.process_response_queue && 1530 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 1531 qla24xx_process_response_queue(vha, rsp); 1532 1533 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1534 return QLA_SUCCESS; 1535 1536 queuing_error: 1537 if (tot_dsds) 1538 scsi_dma_unmap(cmd); 1539 1540 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1541 1542 return QLA_FUNCTION_FAILED; 1543 } 1544 1545 /** 1546 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP 1547 * @sp: command to send to the ISP 1548 * 1549 * Returns non-zero if a failure occurred, else zero. 1550 */ 1551 int 1552 qla24xx_dif_start_scsi(srb_t *sp) 1553 { 1554 int nseg; 1555 unsigned long flags; 1556 uint32_t *clr_ptr; 1557 uint32_t index; 1558 uint32_t handle; 1559 uint16_t cnt; 1560 uint16_t req_cnt = 0; 1561 uint16_t tot_dsds; 1562 uint16_t tot_prot_dsds; 1563 uint16_t fw_prot_opts = 0; 1564 struct req_que *req = NULL; 1565 struct rsp_que *rsp = NULL; 1566 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1567 struct scsi_qla_host *vha = sp->fcport->vha; 1568 struct qla_hw_data *ha = vha->hw; 1569 struct cmd_type_crc_2 *cmd_pkt; 1570 uint32_t status = 0; 1571 1572 #define QDSS_GOT_Q_SPACE BIT_0 1573 1574 /* Only process protection or >16 cdb in this routine */ 1575 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) { 1576 if (cmd->cmd_len <= 16) 1577 return qla24xx_start_scsi(sp); 1578 } 1579 1580 /* Setup device pointers. */ 1581 req = vha->req; 1582 rsp = req->rsp; 1583 1584 /* So we know we haven't pci_map'ed anything yet */ 1585 tot_dsds = 0; 1586 1587 /* Send marker if required */ 1588 if (vha->marker_needed != 0) { 1589 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != 1590 QLA_SUCCESS) 1591 return QLA_FUNCTION_FAILED; 1592 vha->marker_needed = 0; 1593 } 1594 1595 /* Acquire ring specific lock */ 1596 spin_lock_irqsave(&ha->hardware_lock, flags); 1597 1598 /* Check for room in outstanding command list. */ 1599 handle = req->current_outstanding_cmd; 1600 for (index = 1; index < req->num_outstanding_cmds; index++) { 1601 handle++; 1602 if (handle == req->num_outstanding_cmds) 1603 handle = 1; 1604 if (!req->outstanding_cmds[handle]) 1605 break; 1606 } 1607 1608 if (index == req->num_outstanding_cmds) 1609 goto queuing_error; 1610 1611 /* Compute number of required data segments */ 1612 /* Map the sg table so we have an accurate count of sg entries needed */ 1613 if (scsi_sg_count(cmd)) { 1614 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 1615 scsi_sg_count(cmd), cmd->sc_data_direction); 1616 if (unlikely(!nseg)) 1617 goto queuing_error; 1618 else 1619 sp->flags |= SRB_DMA_VALID; 1620 1621 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 1622 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 1623 struct qla2_sgx sgx; 1624 uint32_t partial; 1625 1626 memset(&sgx, 0, sizeof(struct qla2_sgx)); 1627 sgx.tot_bytes = scsi_bufflen(cmd); 1628 sgx.cur_sg = scsi_sglist(cmd); 1629 sgx.sp = sp; 1630 1631 nseg = 0; 1632 while (qla24xx_get_one_block_sg( 1633 cmd->device->sector_size, &sgx, &partial)) 1634 nseg++; 1635 } 1636 } else 1637 nseg = 0; 1638 1639 /* number of required data segments */ 1640 tot_dsds = nseg; 1641 1642 /* Compute number of required protection segments */ 1643 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) { 1644 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), 1645 scsi_prot_sg_count(cmd), cmd->sc_data_direction); 1646 if (unlikely(!nseg)) 1647 goto queuing_error; 1648 else 1649 sp->flags |= SRB_CRC_PROT_DMA_VALID; 1650 1651 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 1652 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 1653 nseg = scsi_bufflen(cmd) / cmd->device->sector_size; 1654 } 1655 } else { 1656 nseg = 0; 1657 } 1658 1659 req_cnt = 1; 1660 /* Total Data and protection sg segment(s) */ 1661 tot_prot_dsds = nseg; 1662 tot_dsds += nseg; 1663 if (req->cnt < (req_cnt + 2)) { 1664 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : 1665 RD_REG_DWORD_RELAXED(req->req_q_out); 1666 if (req->ring_index < cnt) 1667 req->cnt = cnt - req->ring_index; 1668 else 1669 req->cnt = req->length - 1670 (req->ring_index - cnt); 1671 if (req->cnt < (req_cnt + 2)) 1672 goto queuing_error; 1673 } 1674 1675 status |= QDSS_GOT_Q_SPACE; 1676 1677 /* Build header part of command packet (excluding the OPCODE). */ 1678 req->current_outstanding_cmd = handle; 1679 req->outstanding_cmds[handle] = sp; 1680 sp->handle = handle; 1681 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 1682 req->cnt -= req_cnt; 1683 1684 /* Fill-in common area */ 1685 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr; 1686 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); 1687 1688 clr_ptr = (uint32_t *)cmd_pkt + 2; 1689 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 1690 1691 /* Set NPORT-ID and LUN number*/ 1692 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1693 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 1694 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 1695 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 1696 1697 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 1698 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 1699 1700 /* Total Data and protection segment(s) */ 1701 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 1702 1703 /* Build IOCB segments and adjust for data protection segments */ 1704 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *) 1705 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) != 1706 QLA_SUCCESS) 1707 goto queuing_error; 1708 1709 cmd_pkt->entry_count = (uint8_t)req_cnt; 1710 /* Specify response queue number where completion should happen */ 1711 cmd_pkt->entry_status = (uint8_t) rsp->id; 1712 cmd_pkt->timeout = cpu_to_le16(0); 1713 wmb(); 1714 1715 /* Adjust ring index. */ 1716 req->ring_index++; 1717 if (req->ring_index == req->length) { 1718 req->ring_index = 0; 1719 req->ring_ptr = req->ring; 1720 } else 1721 req->ring_ptr++; 1722 1723 /* Set chip new ring index. */ 1724 WRT_REG_DWORD(req->req_q_in, req->ring_index); 1725 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr); 1726 1727 /* Manage unprocessed RIO/ZIO commands in response queue. */ 1728 if (vha->flags.process_response_queue && 1729 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 1730 qla24xx_process_response_queue(vha, rsp); 1731 1732 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1733 1734 return QLA_SUCCESS; 1735 1736 queuing_error: 1737 if (status & QDSS_GOT_Q_SPACE) { 1738 req->outstanding_cmds[handle] = NULL; 1739 req->cnt += req_cnt; 1740 } 1741 /* Cleanup will be performed by the caller (queuecommand) */ 1742 1743 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1744 return QLA_FUNCTION_FAILED; 1745 } 1746 1747 /** 1748 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP 1749 * @sp: command to send to the ISP 1750 * 1751 * Returns non-zero if a failure occurred, else zero. 1752 */ 1753 static int 1754 qla2xxx_start_scsi_mq(srb_t *sp) 1755 { 1756 int nseg; 1757 unsigned long flags; 1758 uint32_t *clr_ptr; 1759 uint32_t index; 1760 uint32_t handle; 1761 struct cmd_type_7 *cmd_pkt; 1762 uint16_t cnt; 1763 uint16_t req_cnt; 1764 uint16_t tot_dsds; 1765 struct req_que *req = NULL; 1766 struct rsp_que *rsp = NULL; 1767 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1768 struct scsi_qla_host *vha = sp->fcport->vha; 1769 struct qla_hw_data *ha = vha->hw; 1770 struct qla_qpair *qpair = sp->qpair; 1771 1772 /* Setup qpair pointers */ 1773 rsp = qpair->rsp; 1774 req = qpair->req; 1775 1776 /* So we know we haven't pci_map'ed anything yet */ 1777 tot_dsds = 0; 1778 1779 /* Send marker if required */ 1780 if (vha->marker_needed != 0) { 1781 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != 1782 QLA_SUCCESS) 1783 return QLA_FUNCTION_FAILED; 1784 vha->marker_needed = 0; 1785 } 1786 1787 /* Acquire qpair specific lock */ 1788 spin_lock_irqsave(&qpair->qp_lock, flags); 1789 1790 /* Check for room in outstanding command list. */ 1791 handle = req->current_outstanding_cmd; 1792 for (index = 1; index < req->num_outstanding_cmds; index++) { 1793 handle++; 1794 if (handle == req->num_outstanding_cmds) 1795 handle = 1; 1796 if (!req->outstanding_cmds[handle]) 1797 break; 1798 } 1799 if (index == req->num_outstanding_cmds) 1800 goto queuing_error; 1801 1802 /* Map the sg table so we have an accurate count of sg entries needed */ 1803 if (scsi_sg_count(cmd)) { 1804 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 1805 scsi_sg_count(cmd), cmd->sc_data_direction); 1806 if (unlikely(!nseg)) 1807 goto queuing_error; 1808 } else 1809 nseg = 0; 1810 1811 tot_dsds = nseg; 1812 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 1813 if (req->cnt < (req_cnt + 2)) { 1814 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : 1815 RD_REG_DWORD_RELAXED(req->req_q_out); 1816 if (req->ring_index < cnt) 1817 req->cnt = cnt - req->ring_index; 1818 else 1819 req->cnt = req->length - 1820 (req->ring_index - cnt); 1821 if (req->cnt < (req_cnt + 2)) 1822 goto queuing_error; 1823 } 1824 1825 /* Build command packet. */ 1826 req->current_outstanding_cmd = handle; 1827 req->outstanding_cmds[handle] = sp; 1828 sp->handle = handle; 1829 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 1830 req->cnt -= req_cnt; 1831 1832 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 1833 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); 1834 1835 /* Zero out remaining portion of packet. */ 1836 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ 1837 clr_ptr = (uint32_t *)cmd_pkt + 2; 1838 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 1839 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 1840 1841 /* Set NPORT-ID and LUN number*/ 1842 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1843 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 1844 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 1845 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 1846 cmd_pkt->vp_index = sp->fcport->vha->vp_idx; 1847 1848 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 1849 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 1850 1851 cmd_pkt->task = TSK_SIMPLE; 1852 1853 /* Load SCSI command packet. */ 1854 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 1855 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); 1856 1857 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 1858 1859 /* Build IOCB segments */ 1860 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req); 1861 1862 /* Set total data segment count. */ 1863 cmd_pkt->entry_count = (uint8_t)req_cnt; 1864 wmb(); 1865 /* Adjust ring index. */ 1866 req->ring_index++; 1867 if (req->ring_index == req->length) { 1868 req->ring_index = 0; 1869 req->ring_ptr = req->ring; 1870 } else 1871 req->ring_ptr++; 1872 1873 sp->flags |= SRB_DMA_VALID; 1874 1875 /* Set chip new ring index. */ 1876 WRT_REG_DWORD(req->req_q_in, req->ring_index); 1877 1878 /* Manage unprocessed RIO/ZIO commands in response queue. */ 1879 if (vha->flags.process_response_queue && 1880 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 1881 qla24xx_process_response_queue(vha, rsp); 1882 1883 spin_unlock_irqrestore(&qpair->qp_lock, flags); 1884 return QLA_SUCCESS; 1885 1886 queuing_error: 1887 if (tot_dsds) 1888 scsi_dma_unmap(cmd); 1889 1890 spin_unlock_irqrestore(&qpair->qp_lock, flags); 1891 1892 return QLA_FUNCTION_FAILED; 1893 } 1894 1895 1896 /** 1897 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP 1898 * @sp: command to send to the ISP 1899 * 1900 * Returns non-zero if a failure occurred, else zero. 1901 */ 1902 int 1903 qla2xxx_dif_start_scsi_mq(srb_t *sp) 1904 { 1905 int nseg; 1906 unsigned long flags; 1907 uint32_t *clr_ptr; 1908 uint32_t index; 1909 uint32_t handle; 1910 uint16_t cnt; 1911 uint16_t req_cnt = 0; 1912 uint16_t tot_dsds; 1913 uint16_t tot_prot_dsds; 1914 uint16_t fw_prot_opts = 0; 1915 struct req_que *req = NULL; 1916 struct rsp_que *rsp = NULL; 1917 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1918 struct scsi_qla_host *vha = sp->fcport->vha; 1919 struct qla_hw_data *ha = vha->hw; 1920 struct cmd_type_crc_2 *cmd_pkt; 1921 uint32_t status = 0; 1922 struct qla_qpair *qpair = sp->qpair; 1923 1924 #define QDSS_GOT_Q_SPACE BIT_0 1925 1926 /* Check for host side state */ 1927 if (!qpair->online) { 1928 cmd->result = DID_NO_CONNECT << 16; 1929 return QLA_INTERFACE_ERROR; 1930 } 1931 1932 if (!qpair->difdix_supported && 1933 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 1934 cmd->result = DID_NO_CONNECT << 16; 1935 return QLA_INTERFACE_ERROR; 1936 } 1937 1938 /* Only process protection or >16 cdb in this routine */ 1939 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) { 1940 if (cmd->cmd_len <= 16) 1941 return qla2xxx_start_scsi_mq(sp); 1942 } 1943 1944 /* Setup qpair pointers */ 1945 rsp = qpair->rsp; 1946 req = qpair->req; 1947 1948 /* So we know we haven't pci_map'ed anything yet */ 1949 tot_dsds = 0; 1950 1951 /* Send marker if required */ 1952 if (vha->marker_needed != 0) { 1953 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != 1954 QLA_SUCCESS) 1955 return QLA_FUNCTION_FAILED; 1956 vha->marker_needed = 0; 1957 } 1958 1959 /* Acquire ring specific lock */ 1960 spin_lock_irqsave(&qpair->qp_lock, flags); 1961 1962 /* Check for room in outstanding command list. */ 1963 handle = req->current_outstanding_cmd; 1964 for (index = 1; index < req->num_outstanding_cmds; index++) { 1965 handle++; 1966 if (handle == req->num_outstanding_cmds) 1967 handle = 1; 1968 if (!req->outstanding_cmds[handle]) 1969 break; 1970 } 1971 1972 if (index == req->num_outstanding_cmds) 1973 goto queuing_error; 1974 1975 /* Compute number of required data segments */ 1976 /* Map the sg table so we have an accurate count of sg entries needed */ 1977 if (scsi_sg_count(cmd)) { 1978 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 1979 scsi_sg_count(cmd), cmd->sc_data_direction); 1980 if (unlikely(!nseg)) 1981 goto queuing_error; 1982 else 1983 sp->flags |= SRB_DMA_VALID; 1984 1985 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 1986 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 1987 struct qla2_sgx sgx; 1988 uint32_t partial; 1989 1990 memset(&sgx, 0, sizeof(struct qla2_sgx)); 1991 sgx.tot_bytes = scsi_bufflen(cmd); 1992 sgx.cur_sg = scsi_sglist(cmd); 1993 sgx.sp = sp; 1994 1995 nseg = 0; 1996 while (qla24xx_get_one_block_sg( 1997 cmd->device->sector_size, &sgx, &partial)) 1998 nseg++; 1999 } 2000 } else 2001 nseg = 0; 2002 2003 /* number of required data segments */ 2004 tot_dsds = nseg; 2005 2006 /* Compute number of required protection segments */ 2007 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) { 2008 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), 2009 scsi_prot_sg_count(cmd), cmd->sc_data_direction); 2010 if (unlikely(!nseg)) 2011 goto queuing_error; 2012 else 2013 sp->flags |= SRB_CRC_PROT_DMA_VALID; 2014 2015 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 2016 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 2017 nseg = scsi_bufflen(cmd) / cmd->device->sector_size; 2018 } 2019 } else { 2020 nseg = 0; 2021 } 2022 2023 req_cnt = 1; 2024 /* Total Data and protection sg segment(s) */ 2025 tot_prot_dsds = nseg; 2026 tot_dsds += nseg; 2027 if (req->cnt < (req_cnt + 2)) { 2028 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : 2029 RD_REG_DWORD_RELAXED(req->req_q_out); 2030 if (req->ring_index < cnt) 2031 req->cnt = cnt - req->ring_index; 2032 else 2033 req->cnt = req->length - 2034 (req->ring_index - cnt); 2035 if (req->cnt < (req_cnt + 2)) 2036 goto queuing_error; 2037 } 2038 2039 status |= QDSS_GOT_Q_SPACE; 2040 2041 /* Build header part of command packet (excluding the OPCODE). */ 2042 req->current_outstanding_cmd = handle; 2043 req->outstanding_cmds[handle] = sp; 2044 sp->handle = handle; 2045 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 2046 req->cnt -= req_cnt; 2047 2048 /* Fill-in common area */ 2049 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr; 2050 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); 2051 2052 clr_ptr = (uint32_t *)cmd_pkt + 2; 2053 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 2054 2055 /* Set NPORT-ID and LUN number*/ 2056 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2057 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 2058 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 2059 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 2060 2061 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 2062 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 2063 2064 /* Total Data and protection segment(s) */ 2065 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 2066 2067 /* Build IOCB segments and adjust for data protection segments */ 2068 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *) 2069 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) != 2070 QLA_SUCCESS) 2071 goto queuing_error; 2072 2073 cmd_pkt->entry_count = (uint8_t)req_cnt; 2074 cmd_pkt->timeout = cpu_to_le16(0); 2075 wmb(); 2076 2077 /* Adjust ring index. */ 2078 req->ring_index++; 2079 if (req->ring_index == req->length) { 2080 req->ring_index = 0; 2081 req->ring_ptr = req->ring; 2082 } else 2083 req->ring_ptr++; 2084 2085 /* Set chip new ring index. */ 2086 WRT_REG_DWORD(req->req_q_in, req->ring_index); 2087 2088 /* Manage unprocessed RIO/ZIO commands in response queue. */ 2089 if (vha->flags.process_response_queue && 2090 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 2091 qla24xx_process_response_queue(vha, rsp); 2092 2093 spin_unlock_irqrestore(&qpair->qp_lock, flags); 2094 2095 return QLA_SUCCESS; 2096 2097 queuing_error: 2098 if (status & QDSS_GOT_Q_SPACE) { 2099 req->outstanding_cmds[handle] = NULL; 2100 req->cnt += req_cnt; 2101 } 2102 /* Cleanup will be performed by the caller (queuecommand) */ 2103 2104 spin_unlock_irqrestore(&qpair->qp_lock, flags); 2105 return QLA_FUNCTION_FAILED; 2106 } 2107 2108 /* Generic Control-SRB manipulation functions. */ 2109 2110 /* hardware_lock assumed to be held. */ 2111 void * 2112 qla2x00_alloc_iocbs_ready(scsi_qla_host_t *vha, srb_t *sp) 2113 { 2114 if (qla2x00_reset_active(vha)) 2115 return NULL; 2116 2117 return qla2x00_alloc_iocbs(vha, sp); 2118 } 2119 2120 void * 2121 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp) 2122 { 2123 struct qla_hw_data *ha = vha->hw; 2124 struct req_que *req = ha->req_q_map[0]; 2125 device_reg_t *reg = ISP_QUE_REG(ha, req->id); 2126 uint32_t index, handle; 2127 request_t *pkt; 2128 uint16_t cnt, req_cnt; 2129 2130 pkt = NULL; 2131 req_cnt = 1; 2132 handle = 0; 2133 2134 if (!sp) 2135 goto skip_cmd_array; 2136 2137 /* Check for room in outstanding command list. */ 2138 handle = req->current_outstanding_cmd; 2139 for (index = 1; index < req->num_outstanding_cmds; index++) { 2140 handle++; 2141 if (handle == req->num_outstanding_cmds) 2142 handle = 1; 2143 if (!req->outstanding_cmds[handle]) 2144 break; 2145 } 2146 if (index == req->num_outstanding_cmds) { 2147 ql_log(ql_log_warn, vha, 0x700b, 2148 "No room on outstanding cmd array.\n"); 2149 goto queuing_error; 2150 } 2151 2152 /* Prep command array. */ 2153 req->current_outstanding_cmd = handle; 2154 req->outstanding_cmds[handle] = sp; 2155 sp->handle = handle; 2156 2157 /* Adjust entry-counts as needed. */ 2158 if (sp->type != SRB_SCSI_CMD) 2159 req_cnt = sp->iocbs; 2160 2161 skip_cmd_array: 2162 /* Check for room on request queue. */ 2163 if (req->cnt < req_cnt + 2) { 2164 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) 2165 cnt = RD_REG_DWORD(®->isp25mq.req_q_out); 2166 else if (IS_P3P_TYPE(ha)) 2167 cnt = RD_REG_DWORD(®->isp82.req_q_out); 2168 else if (IS_FWI2_CAPABLE(ha)) 2169 cnt = RD_REG_DWORD(®->isp24.req_q_out); 2170 else if (IS_QLAFX00(ha)) 2171 cnt = RD_REG_DWORD(®->ispfx00.req_q_out); 2172 else 2173 cnt = qla2x00_debounce_register( 2174 ISP_REQ_Q_OUT(ha, ®->isp)); 2175 2176 if (req->ring_index < cnt) 2177 req->cnt = cnt - req->ring_index; 2178 else 2179 req->cnt = req->length - 2180 (req->ring_index - cnt); 2181 } 2182 if (req->cnt < req_cnt + 2) 2183 goto queuing_error; 2184 2185 /* Prep packet */ 2186 req->cnt -= req_cnt; 2187 pkt = req->ring_ptr; 2188 memset(pkt, 0, REQUEST_ENTRY_SIZE); 2189 if (IS_QLAFX00(ha)) { 2190 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt); 2191 WRT_REG_WORD((void __iomem *)&pkt->handle, handle); 2192 } else { 2193 pkt->entry_count = req_cnt; 2194 pkt->handle = handle; 2195 } 2196 2197 queuing_error: 2198 vha->tgt_counters.num_alloc_iocb_failed++; 2199 return pkt; 2200 } 2201 2202 static void 2203 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) 2204 { 2205 struct srb_iocb *lio = &sp->u.iocb_cmd; 2206 2207 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2208 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); 2209 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI) 2210 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI); 2211 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI) 2212 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); 2213 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2214 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 2215 logio->port_id[1] = sp->fcport->d_id.b.area; 2216 logio->port_id[2] = sp->fcport->d_id.b.domain; 2217 logio->vp_index = sp->fcport->vha->vp_idx; 2218 } 2219 2220 static void 2221 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx) 2222 { 2223 struct qla_hw_data *ha = sp->fcport->vha->hw; 2224 struct srb_iocb *lio = &sp->u.iocb_cmd; 2225 uint16_t opts; 2226 2227 mbx->entry_type = MBX_IOCB_TYPE; 2228 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); 2229 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT); 2230 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0; 2231 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0; 2232 if (HAS_EXTENDED_IDS(ha)) { 2233 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id); 2234 mbx->mb10 = cpu_to_le16(opts); 2235 } else { 2236 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts); 2237 } 2238 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); 2239 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | 2240 sp->fcport->d_id.b.al_pa); 2241 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx); 2242 } 2243 2244 static void 2245 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio) 2246 { 2247 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2248 logio->control_flags = 2249 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); 2250 if (!sp->fcport->tgt_session || 2251 !sp->fcport->tgt_session->keep_nport_handle) 2252 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT); 2253 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2254 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 2255 logio->port_id[1] = sp->fcport->d_id.b.area; 2256 logio->port_id[2] = sp->fcport->d_id.b.domain; 2257 logio->vp_index = sp->fcport->vha->vp_idx; 2258 } 2259 2260 static void 2261 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx) 2262 { 2263 struct qla_hw_data *ha = sp->fcport->vha->hw; 2264 2265 mbx->entry_type = MBX_IOCB_TYPE; 2266 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); 2267 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT); 2268 mbx->mb1 = HAS_EXTENDED_IDS(ha) ? 2269 cpu_to_le16(sp->fcport->loop_id): 2270 cpu_to_le16(sp->fcport->loop_id << 8); 2271 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); 2272 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | 2273 sp->fcport->d_id.b.al_pa); 2274 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx); 2275 /* Implicit: mbx->mbx10 = 0. */ 2276 } 2277 2278 static void 2279 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio) 2280 { 2281 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2282 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC); 2283 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2284 logio->vp_index = sp->fcport->vha->vp_idx; 2285 } 2286 2287 static void 2288 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx) 2289 { 2290 struct qla_hw_data *ha = sp->fcport->vha->hw; 2291 2292 mbx->entry_type = MBX_IOCB_TYPE; 2293 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); 2294 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE); 2295 if (HAS_EXTENDED_IDS(ha)) { 2296 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id); 2297 mbx->mb10 = cpu_to_le16(BIT_0); 2298 } else { 2299 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0); 2300 } 2301 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma)); 2302 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma)); 2303 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma))); 2304 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma))); 2305 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx); 2306 } 2307 2308 static void 2309 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) 2310 { 2311 uint32_t flags; 2312 uint64_t lun; 2313 struct fc_port *fcport = sp->fcport; 2314 scsi_qla_host_t *vha = fcport->vha; 2315 struct qla_hw_data *ha = vha->hw; 2316 struct srb_iocb *iocb = &sp->u.iocb_cmd; 2317 struct req_que *req = vha->req; 2318 2319 flags = iocb->u.tmf.flags; 2320 lun = iocb->u.tmf.lun; 2321 2322 tsk->entry_type = TSK_MGMT_IOCB_TYPE; 2323 tsk->entry_count = 1; 2324 tsk->handle = MAKE_HANDLE(req->id, tsk->handle); 2325 tsk->nport_handle = cpu_to_le16(fcport->loop_id); 2326 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 2327 tsk->control_flags = cpu_to_le32(flags); 2328 tsk->port_id[0] = fcport->d_id.b.al_pa; 2329 tsk->port_id[1] = fcport->d_id.b.area; 2330 tsk->port_id[2] = fcport->d_id.b.domain; 2331 tsk->vp_index = fcport->vha->vp_idx; 2332 2333 if (flags == TCF_LUN_RESET) { 2334 int_to_scsilun(lun, &tsk->lun); 2335 host_to_fcp_swap((uint8_t *)&tsk->lun, 2336 sizeof(tsk->lun)); 2337 } 2338 } 2339 2340 static void 2341 qla2x00_els_dcmd_sp_free(void *ptr, void *data) 2342 { 2343 struct scsi_qla_host *vha = (scsi_qla_host_t *)ptr; 2344 struct qla_hw_data *ha = vha->hw; 2345 srb_t *sp = (srb_t *)data; 2346 struct srb_iocb *elsio = &sp->u.iocb_cmd; 2347 2348 kfree(sp->fcport); 2349 2350 if (elsio->u.els_logo.els_logo_pyld) 2351 dma_free_coherent(&ha->pdev->dev, DMA_POOL_SIZE, 2352 elsio->u.els_logo.els_logo_pyld, 2353 elsio->u.els_logo.els_logo_pyld_dma); 2354 2355 del_timer(&elsio->timer); 2356 qla2x00_rel_sp(vha, sp); 2357 } 2358 2359 static void 2360 qla2x00_els_dcmd_iocb_timeout(void *data) 2361 { 2362 srb_t *sp = (srb_t *)data; 2363 struct srb_iocb *lio = &sp->u.iocb_cmd; 2364 fc_port_t *fcport = sp->fcport; 2365 struct scsi_qla_host *vha = fcport->vha; 2366 struct qla_hw_data *ha = vha->hw; 2367 unsigned long flags = 0; 2368 2369 ql_dbg(ql_dbg_io, vha, 0x3069, 2370 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n", 2371 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, 2372 fcport->d_id.b.al_pa); 2373 2374 /* Abort the exchange */ 2375 spin_lock_irqsave(&ha->hardware_lock, flags); 2376 if (ha->isp_ops->abort_command(sp)) { 2377 ql_dbg(ql_dbg_io, vha, 0x3070, 2378 "mbx abort_command failed.\n"); 2379 } else { 2380 ql_dbg(ql_dbg_io, vha, 0x3071, 2381 "mbx abort_command success.\n"); 2382 } 2383 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2384 2385 complete(&lio->u.els_logo.comp); 2386 } 2387 2388 static void 2389 qla2x00_els_dcmd_sp_done(void *data, void *ptr, int res) 2390 { 2391 srb_t *sp = (srb_t *)ptr; 2392 fc_port_t *fcport = sp->fcport; 2393 struct srb_iocb *lio = &sp->u.iocb_cmd; 2394 struct scsi_qla_host *vha = fcport->vha; 2395 2396 ql_dbg(ql_dbg_io, vha, 0x3072, 2397 "%s hdl=%x, portid=%02x%02x%02x done\n", 2398 sp->name, sp->handle, fcport->d_id.b.domain, 2399 fcport->d_id.b.area, fcport->d_id.b.al_pa); 2400 2401 complete(&lio->u.els_logo.comp); 2402 } 2403 2404 int 2405 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, 2406 port_id_t remote_did) 2407 { 2408 srb_t *sp; 2409 fc_port_t *fcport = NULL; 2410 struct srb_iocb *elsio = NULL; 2411 struct qla_hw_data *ha = vha->hw; 2412 struct els_logo_payload logo_pyld; 2413 int rval = QLA_SUCCESS; 2414 2415 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2416 if (!fcport) { 2417 ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n"); 2418 return -ENOMEM; 2419 } 2420 2421 /* Alloc SRB structure */ 2422 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 2423 if (!sp) { 2424 kfree(fcport); 2425 ql_log(ql_log_info, vha, 0x70e6, 2426 "SRB allocation failed\n"); 2427 return -ENOMEM; 2428 } 2429 2430 elsio = &sp->u.iocb_cmd; 2431 fcport->loop_id = 0xFFFF; 2432 fcport->d_id.b.domain = remote_did.b.domain; 2433 fcport->d_id.b.area = remote_did.b.area; 2434 fcport->d_id.b.al_pa = remote_did.b.al_pa; 2435 2436 ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n", 2437 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); 2438 2439 sp->type = SRB_ELS_DCMD; 2440 sp->name = "ELS_DCMD"; 2441 sp->fcport = fcport; 2442 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT); 2443 elsio->timeout = qla2x00_els_dcmd_iocb_timeout; 2444 sp->done = qla2x00_els_dcmd_sp_done; 2445 sp->free = qla2x00_els_dcmd_sp_free; 2446 2447 elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev, 2448 DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma, 2449 GFP_KERNEL); 2450 2451 if (!elsio->u.els_logo.els_logo_pyld) { 2452 sp->free(vha, sp); 2453 return QLA_FUNCTION_FAILED; 2454 } 2455 2456 memset(&logo_pyld, 0, sizeof(struct els_logo_payload)); 2457 2458 elsio->u.els_logo.els_cmd = els_opcode; 2459 logo_pyld.opcode = els_opcode; 2460 logo_pyld.s_id[0] = vha->d_id.b.al_pa; 2461 logo_pyld.s_id[1] = vha->d_id.b.area; 2462 logo_pyld.s_id[2] = vha->d_id.b.domain; 2463 host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t)); 2464 memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE); 2465 2466 memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld, 2467 sizeof(struct els_logo_payload)); 2468 2469 rval = qla2x00_start_sp(sp); 2470 if (rval != QLA_SUCCESS) { 2471 sp->free(vha, sp); 2472 return QLA_FUNCTION_FAILED; 2473 } 2474 2475 ql_dbg(ql_dbg_io, vha, 0x3074, 2476 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n", 2477 sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain, 2478 fcport->d_id.b.area, fcport->d_id.b.al_pa); 2479 2480 wait_for_completion(&elsio->u.els_logo.comp); 2481 2482 sp->free(vha, sp); 2483 return rval; 2484 } 2485 2486 static void 2487 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) 2488 { 2489 scsi_qla_host_t *vha = sp->fcport->vha; 2490 struct srb_iocb *elsio = &sp->u.iocb_cmd; 2491 2492 els_iocb->entry_type = ELS_IOCB_TYPE; 2493 els_iocb->entry_count = 1; 2494 els_iocb->sys_define = 0; 2495 els_iocb->entry_status = 0; 2496 els_iocb->handle = sp->handle; 2497 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2498 els_iocb->tx_dsd_count = 1; 2499 els_iocb->vp_index = vha->vp_idx; 2500 els_iocb->sof_type = EST_SOFI3; 2501 els_iocb->rx_dsd_count = 0; 2502 els_iocb->opcode = elsio->u.els_logo.els_cmd; 2503 2504 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; 2505 els_iocb->port_id[1] = sp->fcport->d_id.b.area; 2506 els_iocb->port_id[2] = sp->fcport->d_id.b.domain; 2507 els_iocb->control_flags = 0; 2508 2509 els_iocb->tx_byte_count = sizeof(struct els_logo_payload); 2510 els_iocb->tx_address[0] = 2511 cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma)); 2512 els_iocb->tx_address[1] = 2513 cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma)); 2514 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload)); 2515 2516 els_iocb->rx_byte_count = 0; 2517 els_iocb->rx_address[0] = 0; 2518 els_iocb->rx_address[1] = 0; 2519 els_iocb->rx_len = 0; 2520 2521 sp->fcport->vha->qla_stats.control_requests++; 2522 } 2523 2524 static void 2525 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) 2526 { 2527 struct bsg_job *bsg_job = sp->u.bsg_job; 2528 struct fc_bsg_request *bsg_request = bsg_job->request; 2529 2530 els_iocb->entry_type = ELS_IOCB_TYPE; 2531 els_iocb->entry_count = 1; 2532 els_iocb->sys_define = 0; 2533 els_iocb->entry_status = 0; 2534 els_iocb->handle = sp->handle; 2535 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2536 els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); 2537 els_iocb->vp_index = sp->fcport->vha->vp_idx; 2538 els_iocb->sof_type = EST_SOFI3; 2539 els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); 2540 2541 els_iocb->opcode = 2542 sp->type == SRB_ELS_CMD_RPT ? 2543 bsg_request->rqst_data.r_els.els_code : 2544 bsg_request->rqst_data.h_els.command_code; 2545 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; 2546 els_iocb->port_id[1] = sp->fcport->d_id.b.area; 2547 els_iocb->port_id[2] = sp->fcport->d_id.b.domain; 2548 els_iocb->control_flags = 0; 2549 els_iocb->rx_byte_count = 2550 cpu_to_le32(bsg_job->reply_payload.payload_len); 2551 els_iocb->tx_byte_count = 2552 cpu_to_le32(bsg_job->request_payload.payload_len); 2553 2554 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address 2555 (bsg_job->request_payload.sg_list))); 2556 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address 2557 (bsg_job->request_payload.sg_list))); 2558 els_iocb->tx_len = cpu_to_le32(sg_dma_len 2559 (bsg_job->request_payload.sg_list)); 2560 2561 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address 2562 (bsg_job->reply_payload.sg_list))); 2563 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address 2564 (bsg_job->reply_payload.sg_list))); 2565 els_iocb->rx_len = cpu_to_le32(sg_dma_len 2566 (bsg_job->reply_payload.sg_list)); 2567 2568 sp->fcport->vha->qla_stats.control_requests++; 2569 } 2570 2571 static void 2572 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb) 2573 { 2574 uint16_t avail_dsds; 2575 uint32_t *cur_dsd; 2576 struct scatterlist *sg; 2577 int index; 2578 uint16_t tot_dsds; 2579 scsi_qla_host_t *vha = sp->fcport->vha; 2580 struct qla_hw_data *ha = vha->hw; 2581 struct bsg_job *bsg_job = sp->u.bsg_job; 2582 int loop_iterartion = 0; 2583 int entry_count = 1; 2584 2585 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t)); 2586 ct_iocb->entry_type = CT_IOCB_TYPE; 2587 ct_iocb->entry_status = 0; 2588 ct_iocb->handle1 = sp->handle; 2589 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id); 2590 ct_iocb->status = cpu_to_le16(0); 2591 ct_iocb->control_flags = cpu_to_le16(0); 2592 ct_iocb->timeout = 0; 2593 ct_iocb->cmd_dsd_count = 2594 cpu_to_le16(bsg_job->request_payload.sg_cnt); 2595 ct_iocb->total_dsd_count = 2596 cpu_to_le16(bsg_job->request_payload.sg_cnt + 1); 2597 ct_iocb->req_bytecount = 2598 cpu_to_le32(bsg_job->request_payload.payload_len); 2599 ct_iocb->rsp_bytecount = 2600 cpu_to_le32(bsg_job->reply_payload.payload_len); 2601 2602 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address 2603 (bsg_job->request_payload.sg_list))); 2604 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address 2605 (bsg_job->request_payload.sg_list))); 2606 ct_iocb->dseg_req_length = ct_iocb->req_bytecount; 2607 2608 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address 2609 (bsg_job->reply_payload.sg_list))); 2610 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address 2611 (bsg_job->reply_payload.sg_list))); 2612 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount; 2613 2614 avail_dsds = 1; 2615 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address; 2616 index = 0; 2617 tot_dsds = bsg_job->reply_payload.sg_cnt; 2618 2619 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) { 2620 dma_addr_t sle_dma; 2621 cont_a64_entry_t *cont_pkt; 2622 2623 /* Allocate additional continuation packets? */ 2624 if (avail_dsds == 0) { 2625 /* 2626 * Five DSDs are available in the Cont. 2627 * Type 1 IOCB. 2628 */ 2629 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, 2630 vha->hw->req_q_map[0]); 2631 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; 2632 avail_dsds = 5; 2633 entry_count++; 2634 } 2635 2636 sle_dma = sg_dma_address(sg); 2637 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 2638 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 2639 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 2640 loop_iterartion++; 2641 avail_dsds--; 2642 } 2643 ct_iocb->entry_count = entry_count; 2644 2645 sp->fcport->vha->qla_stats.control_requests++; 2646 } 2647 2648 static void 2649 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) 2650 { 2651 uint16_t avail_dsds; 2652 uint32_t *cur_dsd; 2653 struct scatterlist *sg; 2654 int index; 2655 uint16_t tot_dsds; 2656 scsi_qla_host_t *vha = sp->fcport->vha; 2657 struct qla_hw_data *ha = vha->hw; 2658 struct bsg_job *bsg_job = sp->u.bsg_job; 2659 int loop_iterartion = 0; 2660 int entry_count = 1; 2661 2662 ct_iocb->entry_type = CT_IOCB_TYPE; 2663 ct_iocb->entry_status = 0; 2664 ct_iocb->sys_define = 0; 2665 ct_iocb->handle = sp->handle; 2666 2667 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2668 ct_iocb->vp_index = sp->fcport->vha->vp_idx; 2669 ct_iocb->comp_status = cpu_to_le16(0); 2670 2671 ct_iocb->cmd_dsd_count = 2672 cpu_to_le16(bsg_job->request_payload.sg_cnt); 2673 ct_iocb->timeout = 0; 2674 ct_iocb->rsp_dsd_count = 2675 cpu_to_le16(bsg_job->reply_payload.sg_cnt); 2676 ct_iocb->rsp_byte_count = 2677 cpu_to_le32(bsg_job->reply_payload.payload_len); 2678 ct_iocb->cmd_byte_count = 2679 cpu_to_le32(bsg_job->request_payload.payload_len); 2680 ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address 2681 (bsg_job->request_payload.sg_list))); 2682 ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address 2683 (bsg_job->request_payload.sg_list))); 2684 ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len 2685 (bsg_job->request_payload.sg_list)); 2686 2687 avail_dsds = 1; 2688 cur_dsd = (uint32_t *)ct_iocb->dseg_1_address; 2689 index = 0; 2690 tot_dsds = bsg_job->reply_payload.sg_cnt; 2691 2692 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) { 2693 dma_addr_t sle_dma; 2694 cont_a64_entry_t *cont_pkt; 2695 2696 /* Allocate additional continuation packets? */ 2697 if (avail_dsds == 0) { 2698 /* 2699 * Five DSDs are available in the Cont. 2700 * Type 1 IOCB. 2701 */ 2702 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, 2703 ha->req_q_map[0]); 2704 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; 2705 avail_dsds = 5; 2706 entry_count++; 2707 } 2708 2709 sle_dma = sg_dma_address(sg); 2710 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 2711 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 2712 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 2713 loop_iterartion++; 2714 avail_dsds--; 2715 } 2716 ct_iocb->entry_count = entry_count; 2717 } 2718 2719 /* 2720 * qla82xx_start_scsi() - Send a SCSI command to the ISP 2721 * @sp: command to send to the ISP 2722 * 2723 * Returns non-zero if a failure occurred, else zero. 2724 */ 2725 int 2726 qla82xx_start_scsi(srb_t *sp) 2727 { 2728 int nseg; 2729 unsigned long flags; 2730 struct scsi_cmnd *cmd; 2731 uint32_t *clr_ptr; 2732 uint32_t index; 2733 uint32_t handle; 2734 uint16_t cnt; 2735 uint16_t req_cnt; 2736 uint16_t tot_dsds; 2737 struct device_reg_82xx __iomem *reg; 2738 uint32_t dbval; 2739 uint32_t *fcp_dl; 2740 uint8_t additional_cdb_len; 2741 struct ct6_dsd *ctx; 2742 struct scsi_qla_host *vha = sp->fcport->vha; 2743 struct qla_hw_data *ha = vha->hw; 2744 struct req_que *req = NULL; 2745 struct rsp_que *rsp = NULL; 2746 2747 /* Setup device pointers. */ 2748 reg = &ha->iobase->isp82; 2749 cmd = GET_CMD_SP(sp); 2750 req = vha->req; 2751 rsp = ha->rsp_q_map[0]; 2752 2753 /* So we know we haven't pci_map'ed anything yet */ 2754 tot_dsds = 0; 2755 2756 dbval = 0x04 | (ha->portnum << 5); 2757 2758 /* Send marker if required */ 2759 if (vha->marker_needed != 0) { 2760 if (qla2x00_marker(vha, req, 2761 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { 2762 ql_log(ql_log_warn, vha, 0x300c, 2763 "qla2x00_marker failed for cmd=%p.\n", cmd); 2764 return QLA_FUNCTION_FAILED; 2765 } 2766 vha->marker_needed = 0; 2767 } 2768 2769 /* Acquire ring specific lock */ 2770 spin_lock_irqsave(&ha->hardware_lock, flags); 2771 2772 /* Check for room in outstanding command list. */ 2773 handle = req->current_outstanding_cmd; 2774 for (index = 1; index < req->num_outstanding_cmds; index++) { 2775 handle++; 2776 if (handle == req->num_outstanding_cmds) 2777 handle = 1; 2778 if (!req->outstanding_cmds[handle]) 2779 break; 2780 } 2781 if (index == req->num_outstanding_cmds) 2782 goto queuing_error; 2783 2784 /* Map the sg table so we have an accurate count of sg entries needed */ 2785 if (scsi_sg_count(cmd)) { 2786 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 2787 scsi_sg_count(cmd), cmd->sc_data_direction); 2788 if (unlikely(!nseg)) 2789 goto queuing_error; 2790 } else 2791 nseg = 0; 2792 2793 tot_dsds = nseg; 2794 2795 if (tot_dsds > ql2xshiftctondsd) { 2796 struct cmd_type_6 *cmd_pkt; 2797 uint16_t more_dsd_lists = 0; 2798 struct dsd_dma *dsd_ptr; 2799 uint16_t i; 2800 2801 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds); 2802 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) { 2803 ql_dbg(ql_dbg_io, vha, 0x300d, 2804 "Num of DSD list %d is than %d for cmd=%p.\n", 2805 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN, 2806 cmd); 2807 goto queuing_error; 2808 } 2809 2810 if (more_dsd_lists <= ha->gbl_dsd_avail) 2811 goto sufficient_dsds; 2812 else 2813 more_dsd_lists -= ha->gbl_dsd_avail; 2814 2815 for (i = 0; i < more_dsd_lists; i++) { 2816 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 2817 if (!dsd_ptr) { 2818 ql_log(ql_log_fatal, vha, 0x300e, 2819 "Failed to allocate memory for dsd_dma " 2820 "for cmd=%p.\n", cmd); 2821 goto queuing_error; 2822 } 2823 2824 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool, 2825 GFP_ATOMIC, &dsd_ptr->dsd_list_dma); 2826 if (!dsd_ptr->dsd_addr) { 2827 kfree(dsd_ptr); 2828 ql_log(ql_log_fatal, vha, 0x300f, 2829 "Failed to allocate memory for dsd_addr " 2830 "for cmd=%p.\n", cmd); 2831 goto queuing_error; 2832 } 2833 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list); 2834 ha->gbl_dsd_avail++; 2835 } 2836 2837 sufficient_dsds: 2838 req_cnt = 1; 2839 2840 if (req->cnt < (req_cnt + 2)) { 2841 cnt = (uint16_t)RD_REG_DWORD_RELAXED( 2842 ®->req_q_out[0]); 2843 if (req->ring_index < cnt) 2844 req->cnt = cnt - req->ring_index; 2845 else 2846 req->cnt = req->length - 2847 (req->ring_index - cnt); 2848 if (req->cnt < (req_cnt + 2)) 2849 goto queuing_error; 2850 } 2851 2852 ctx = sp->u.scmd.ctx = 2853 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); 2854 if (!ctx) { 2855 ql_log(ql_log_fatal, vha, 0x3010, 2856 "Failed to allocate ctx for cmd=%p.\n", cmd); 2857 goto queuing_error; 2858 } 2859 2860 memset(ctx, 0, sizeof(struct ct6_dsd)); 2861 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool, 2862 GFP_ATOMIC, &ctx->fcp_cmnd_dma); 2863 if (!ctx->fcp_cmnd) { 2864 ql_log(ql_log_fatal, vha, 0x3011, 2865 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd); 2866 goto queuing_error; 2867 } 2868 2869 /* Initialize the DSD list and dma handle */ 2870 INIT_LIST_HEAD(&ctx->dsd_list); 2871 ctx->dsd_use_cnt = 0; 2872 2873 if (cmd->cmd_len > 16) { 2874 additional_cdb_len = cmd->cmd_len - 16; 2875 if ((cmd->cmd_len % 4) != 0) { 2876 /* SCSI command bigger than 16 bytes must be 2877 * multiple of 4 2878 */ 2879 ql_log(ql_log_warn, vha, 0x3012, 2880 "scsi cmd len %d not multiple of 4 " 2881 "for cmd=%p.\n", cmd->cmd_len, cmd); 2882 goto queuing_error_fcp_cmnd; 2883 } 2884 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4; 2885 } else { 2886 additional_cdb_len = 0; 2887 ctx->fcp_cmnd_len = 12 + 16 + 4; 2888 } 2889 2890 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr; 2891 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); 2892 2893 /* Zero out remaining portion of packet. */ 2894 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ 2895 clr_ptr = (uint32_t *)cmd_pkt + 2; 2896 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 2897 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 2898 2899 /* Set NPORT-ID and LUN number*/ 2900 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2901 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 2902 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 2903 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 2904 cmd_pkt->vp_index = sp->fcport->vha->vp_idx; 2905 2906 /* Build IOCB segments */ 2907 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds)) 2908 goto queuing_error_fcp_cmnd; 2909 2910 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 2911 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 2912 2913 /* build FCP_CMND IU */ 2914 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 2915 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun); 2916 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; 2917 2918 if (cmd->sc_data_direction == DMA_TO_DEVICE) 2919 ctx->fcp_cmnd->additional_cdb_len |= 1; 2920 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 2921 ctx->fcp_cmnd->additional_cdb_len |= 2; 2922 2923 /* Populate the FCP_PRIO. */ 2924 if (ha->flags.fcp_prio_enabled) 2925 ctx->fcp_cmnd->task_attribute |= 2926 sp->fcport->fcp_prio << 3; 2927 2928 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); 2929 2930 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 + 2931 additional_cdb_len); 2932 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd)); 2933 2934 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len); 2935 cmd_pkt->fcp_cmnd_dseg_address[0] = 2936 cpu_to_le32(LSD(ctx->fcp_cmnd_dma)); 2937 cmd_pkt->fcp_cmnd_dseg_address[1] = 2938 cpu_to_le32(MSD(ctx->fcp_cmnd_dma)); 2939 2940 sp->flags |= SRB_FCP_CMND_DMA_VALID; 2941 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 2942 /* Set total data segment count. */ 2943 cmd_pkt->entry_count = (uint8_t)req_cnt; 2944 /* Specify response queue number where 2945 * completion should happen 2946 */ 2947 cmd_pkt->entry_status = (uint8_t) rsp->id; 2948 } else { 2949 struct cmd_type_7 *cmd_pkt; 2950 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 2951 if (req->cnt < (req_cnt + 2)) { 2952 cnt = (uint16_t)RD_REG_DWORD_RELAXED( 2953 ®->req_q_out[0]); 2954 if (req->ring_index < cnt) 2955 req->cnt = cnt - req->ring_index; 2956 else 2957 req->cnt = req->length - 2958 (req->ring_index - cnt); 2959 } 2960 if (req->cnt < (req_cnt + 2)) 2961 goto queuing_error; 2962 2963 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 2964 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); 2965 2966 /* Zero out remaining portion of packet. */ 2967 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ 2968 clr_ptr = (uint32_t *)cmd_pkt + 2; 2969 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 2970 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 2971 2972 /* Set NPORT-ID and LUN number*/ 2973 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2974 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 2975 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 2976 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 2977 cmd_pkt->vp_index = sp->fcport->vha->vp_idx; 2978 2979 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 2980 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, 2981 sizeof(cmd_pkt->lun)); 2982 2983 /* Populate the FCP_PRIO. */ 2984 if (ha->flags.fcp_prio_enabled) 2985 cmd_pkt->task |= sp->fcport->fcp_prio << 3; 2986 2987 /* Load SCSI command packet. */ 2988 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 2989 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); 2990 2991 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 2992 2993 /* Build IOCB segments */ 2994 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req); 2995 2996 /* Set total data segment count. */ 2997 cmd_pkt->entry_count = (uint8_t)req_cnt; 2998 /* Specify response queue number where 2999 * completion should happen. 3000 */ 3001 cmd_pkt->entry_status = (uint8_t) rsp->id; 3002 3003 } 3004 /* Build command packet. */ 3005 req->current_outstanding_cmd = handle; 3006 req->outstanding_cmds[handle] = sp; 3007 sp->handle = handle; 3008 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 3009 req->cnt -= req_cnt; 3010 wmb(); 3011 3012 /* Adjust ring index. */ 3013 req->ring_index++; 3014 if (req->ring_index == req->length) { 3015 req->ring_index = 0; 3016 req->ring_ptr = req->ring; 3017 } else 3018 req->ring_ptr++; 3019 3020 sp->flags |= SRB_DMA_VALID; 3021 3022 /* Set chip new ring index. */ 3023 /* write, read and verify logic */ 3024 dbval = dbval | (req->id << 8) | (req->ring_index << 16); 3025 if (ql2xdbwr) 3026 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval); 3027 else { 3028 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval); 3029 wmb(); 3030 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) { 3031 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval); 3032 wmb(); 3033 } 3034 } 3035 3036 /* Manage unprocessed RIO/ZIO commands in response queue. */ 3037 if (vha->flags.process_response_queue && 3038 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 3039 qla24xx_process_response_queue(vha, rsp); 3040 3041 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3042 return QLA_SUCCESS; 3043 3044 queuing_error_fcp_cmnd: 3045 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma); 3046 queuing_error: 3047 if (tot_dsds) 3048 scsi_dma_unmap(cmd); 3049 3050 if (sp->u.scmd.ctx) { 3051 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool); 3052 sp->u.scmd.ctx = NULL; 3053 } 3054 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3055 3056 return QLA_FUNCTION_FAILED; 3057 } 3058 3059 static void 3060 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb) 3061 { 3062 struct srb_iocb *aio = &sp->u.iocb_cmd; 3063 scsi_qla_host_t *vha = sp->fcport->vha; 3064 struct req_que *req = vha->req; 3065 3066 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx)); 3067 abt_iocb->entry_type = ABORT_IOCB_TYPE; 3068 abt_iocb->entry_count = 1; 3069 abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle)); 3070 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3071 abt_iocb->handle_to_abort = 3072 cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl)); 3073 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; 3074 abt_iocb->port_id[1] = sp->fcport->d_id.b.area; 3075 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain; 3076 abt_iocb->vp_index = vha->vp_idx; 3077 abt_iocb->req_que_no = cpu_to_le16(req->id); 3078 /* Send the command to the firmware */ 3079 wmb(); 3080 } 3081 3082 int 3083 qla2x00_start_sp(srb_t *sp) 3084 { 3085 int rval; 3086 struct qla_hw_data *ha = sp->fcport->vha->hw; 3087 void *pkt; 3088 unsigned long flags; 3089 3090 rval = QLA_FUNCTION_FAILED; 3091 spin_lock_irqsave(&ha->hardware_lock, flags); 3092 pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp); 3093 if (!pkt) { 3094 ql_log(ql_log_warn, sp->fcport->vha, 0x700c, 3095 "qla2x00_alloc_iocbs failed.\n"); 3096 goto done; 3097 } 3098 3099 rval = QLA_SUCCESS; 3100 switch (sp->type) { 3101 case SRB_LOGIN_CMD: 3102 IS_FWI2_CAPABLE(ha) ? 3103 qla24xx_login_iocb(sp, pkt) : 3104 qla2x00_login_iocb(sp, pkt); 3105 break; 3106 case SRB_LOGOUT_CMD: 3107 IS_FWI2_CAPABLE(ha) ? 3108 qla24xx_logout_iocb(sp, pkt) : 3109 qla2x00_logout_iocb(sp, pkt); 3110 break; 3111 case SRB_ELS_CMD_RPT: 3112 case SRB_ELS_CMD_HST: 3113 qla24xx_els_iocb(sp, pkt); 3114 break; 3115 case SRB_CT_CMD: 3116 IS_FWI2_CAPABLE(ha) ? 3117 qla24xx_ct_iocb(sp, pkt) : 3118 qla2x00_ct_iocb(sp, pkt); 3119 break; 3120 case SRB_ADISC_CMD: 3121 IS_FWI2_CAPABLE(ha) ? 3122 qla24xx_adisc_iocb(sp, pkt) : 3123 qla2x00_adisc_iocb(sp, pkt); 3124 break; 3125 case SRB_TM_CMD: 3126 IS_QLAFX00(ha) ? 3127 qlafx00_tm_iocb(sp, pkt) : 3128 qla24xx_tm_iocb(sp, pkt); 3129 break; 3130 case SRB_FXIOCB_DCMD: 3131 case SRB_FXIOCB_BCMD: 3132 qlafx00_fxdisc_iocb(sp, pkt); 3133 break; 3134 case SRB_ABT_CMD: 3135 IS_QLAFX00(ha) ? 3136 qlafx00_abort_iocb(sp, pkt) : 3137 qla24xx_abort_iocb(sp, pkt); 3138 break; 3139 case SRB_ELS_DCMD: 3140 qla24xx_els_logo_iocb(sp, pkt); 3141 break; 3142 default: 3143 break; 3144 } 3145 3146 wmb(); 3147 qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]); 3148 done: 3149 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3150 return rval; 3151 } 3152 3153 static void 3154 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha, 3155 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds) 3156 { 3157 uint16_t avail_dsds; 3158 uint32_t *cur_dsd; 3159 uint32_t req_data_len = 0; 3160 uint32_t rsp_data_len = 0; 3161 struct scatterlist *sg; 3162 int index; 3163 int entry_count = 1; 3164 struct bsg_job *bsg_job = sp->u.bsg_job; 3165 3166 /*Update entry type to indicate bidir command */ 3167 *((uint32_t *)(&cmd_pkt->entry_type)) = 3168 cpu_to_le32(COMMAND_BIDIRECTIONAL); 3169 3170 /* Set the transfer direction, in this set both flags 3171 * Also set the BD_WRAP_BACK flag, firmware will take care 3172 * assigning DID=SID for outgoing pkts. 3173 */ 3174 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); 3175 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); 3176 cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA | 3177 BD_WRAP_BACK); 3178 3179 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len; 3180 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len); 3181 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len); 3182 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2); 3183 3184 vha->bidi_stats.transfer_bytes += req_data_len; 3185 vha->bidi_stats.io_count++; 3186 3187 vha->qla_stats.output_bytes += req_data_len; 3188 vha->qla_stats.output_requests++; 3189 3190 /* Only one dsd is available for bidirectional IOCB, remaining dsds 3191 * are bundled in continuation iocb 3192 */ 3193 avail_dsds = 1; 3194 cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address; 3195 3196 index = 0; 3197 3198 for_each_sg(bsg_job->request_payload.sg_list, sg, 3199 bsg_job->request_payload.sg_cnt, index) { 3200 dma_addr_t sle_dma; 3201 cont_a64_entry_t *cont_pkt; 3202 3203 /* Allocate additional continuation packets */ 3204 if (avail_dsds == 0) { 3205 /* Continuation type 1 IOCB can accomodate 3206 * 5 DSDS 3207 */ 3208 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); 3209 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; 3210 avail_dsds = 5; 3211 entry_count++; 3212 } 3213 sle_dma = sg_dma_address(sg); 3214 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 3215 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 3216 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 3217 avail_dsds--; 3218 } 3219 /* For read request DSD will always goes to continuation IOCB 3220 * and follow the write DSD. If there is room on the current IOCB 3221 * then it is added to that IOCB else new continuation IOCB is 3222 * allocated. 3223 */ 3224 for_each_sg(bsg_job->reply_payload.sg_list, sg, 3225 bsg_job->reply_payload.sg_cnt, index) { 3226 dma_addr_t sle_dma; 3227 cont_a64_entry_t *cont_pkt; 3228 3229 /* Allocate additional continuation packets */ 3230 if (avail_dsds == 0) { 3231 /* Continuation type 1 IOCB can accomodate 3232 * 5 DSDS 3233 */ 3234 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); 3235 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; 3236 avail_dsds = 5; 3237 entry_count++; 3238 } 3239 sle_dma = sg_dma_address(sg); 3240 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 3241 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 3242 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 3243 avail_dsds--; 3244 } 3245 /* This value should be same as number of IOCB required for this cmd */ 3246 cmd_pkt->entry_count = entry_count; 3247 } 3248 3249 int 3250 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds) 3251 { 3252 3253 struct qla_hw_data *ha = vha->hw; 3254 unsigned long flags; 3255 uint32_t handle; 3256 uint32_t index; 3257 uint16_t req_cnt; 3258 uint16_t cnt; 3259 uint32_t *clr_ptr; 3260 struct cmd_bidir *cmd_pkt = NULL; 3261 struct rsp_que *rsp; 3262 struct req_que *req; 3263 int rval = EXT_STATUS_OK; 3264 3265 rval = QLA_SUCCESS; 3266 3267 rsp = ha->rsp_q_map[0]; 3268 req = vha->req; 3269 3270 /* Send marker if required */ 3271 if (vha->marker_needed != 0) { 3272 if (qla2x00_marker(vha, req, 3273 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) 3274 return EXT_STATUS_MAILBOX; 3275 vha->marker_needed = 0; 3276 } 3277 3278 /* Acquire ring specific lock */ 3279 spin_lock_irqsave(&ha->hardware_lock, flags); 3280 3281 /* Check for room in outstanding command list. */ 3282 handle = req->current_outstanding_cmd; 3283 for (index = 1; index < req->num_outstanding_cmds; index++) { 3284 handle++; 3285 if (handle == req->num_outstanding_cmds) 3286 handle = 1; 3287 if (!req->outstanding_cmds[handle]) 3288 break; 3289 } 3290 3291 if (index == req->num_outstanding_cmds) { 3292 rval = EXT_STATUS_BUSY; 3293 goto queuing_error; 3294 } 3295 3296 /* Calculate number of IOCB required */ 3297 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 3298 3299 /* Check for room on request queue. */ 3300 if (req->cnt < req_cnt + 2) { 3301 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : 3302 RD_REG_DWORD_RELAXED(req->req_q_out); 3303 if (req->ring_index < cnt) 3304 req->cnt = cnt - req->ring_index; 3305 else 3306 req->cnt = req->length - 3307 (req->ring_index - cnt); 3308 } 3309 if (req->cnt < req_cnt + 2) { 3310 rval = EXT_STATUS_BUSY; 3311 goto queuing_error; 3312 } 3313 3314 cmd_pkt = (struct cmd_bidir *)req->ring_ptr; 3315 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); 3316 3317 /* Zero out remaining portion of packet. */ 3318 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ 3319 clr_ptr = (uint32_t *)cmd_pkt + 2; 3320 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 3321 3322 /* Set NPORT-ID (of vha)*/ 3323 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id); 3324 cmd_pkt->port_id[0] = vha->d_id.b.al_pa; 3325 cmd_pkt->port_id[1] = vha->d_id.b.area; 3326 cmd_pkt->port_id[2] = vha->d_id.b.domain; 3327 3328 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds); 3329 cmd_pkt->entry_status = (uint8_t) rsp->id; 3330 /* Build command packet. */ 3331 req->current_outstanding_cmd = handle; 3332 req->outstanding_cmds[handle] = sp; 3333 sp->handle = handle; 3334 req->cnt -= req_cnt; 3335 3336 /* Send the command to the firmware */ 3337 wmb(); 3338 qla2x00_start_iocbs(vha, req); 3339 queuing_error: 3340 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3341 return rval; 3342 } 3343