Lines Matching +full:loss +full:- +full:of +full:- +full:lock
2 * This file is part of the Chelsio FCoE driver for Linux.
4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
9 * COPYING in the main directory of this source tree, or the
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
76 * csio_scsi_match_io - Match an ioreq with the given SCSI level data.
80 * Should be called with lock held.
88 switch (sld->level) { in csio_scsi_match_io()
93 return ((ioreq->lnode == sld->lnode) && in csio_scsi_match_io()
94 (ioreq->rnode == sld->rnode) && in csio_scsi_match_io()
95 ((uint64_t)scmnd->device->lun == sld->oslun)); in csio_scsi_match_io()
98 return ((ioreq->lnode == sld->lnode) && in csio_scsi_match_io()
99 (ioreq->rnode == sld->rnode)); in csio_scsi_match_io()
101 return (ioreq->lnode == sld->lnode); in csio_scsi_match_io()
110 * csio_scsi_gather_active_ios - Gather active I/Os based on level
115 * Should be called with lock held.
124 if (list_empty(&scm->active_q)) in csio_scsi_gather_active_ios()
128 if (sld->level == CSIO_LEV_ALL) { in csio_scsi_gather_active_ios()
129 list_splice_tail_init(&scm->active_q, dest); in csio_scsi_gather_active_ios()
133 list_for_each_safe(tmp, next, &scm->active_q) { in csio_scsi_gather_active_ios()
156 * csio_scsi_fcp_cmnd - Frame the SCSI FCP command paylod.
169 if (likely(csio_priv(scmnd)->fc_tm_flags == 0)) { in csio_scsi_fcp_cmnd()
170 int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun); in csio_scsi_fcp_cmnd()
171 fcp_cmnd->fc_tm_flags = 0; in csio_scsi_fcp_cmnd()
172 fcp_cmnd->fc_cmdref = 0; in csio_scsi_fcp_cmnd()
174 memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16); in csio_scsi_fcp_cmnd()
175 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; in csio_scsi_fcp_cmnd()
176 fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd)); in csio_scsi_fcp_cmnd()
178 if (req->nsge) in csio_scsi_fcp_cmnd()
179 if (req->datadir == DMA_TO_DEVICE) in csio_scsi_fcp_cmnd()
180 fcp_cmnd->fc_flags = FCP_CFL_WRDATA; in csio_scsi_fcp_cmnd()
182 fcp_cmnd->fc_flags = FCP_CFL_RDDATA; in csio_scsi_fcp_cmnd()
184 fcp_cmnd->fc_flags = 0; in csio_scsi_fcp_cmnd()
187 int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun); in csio_scsi_fcp_cmnd()
188 fcp_cmnd->fc_tm_flags = csio_priv(scmnd)->fc_tm_flags; in csio_scsi_fcp_cmnd()
193 * csio_scsi_init_cmd_wr - Initialize the SCSI CMD WR.
196 * @size: Size of WR (including FW WR + immed data + rsp SG entry
203 struct csio_hw *hw = req->lnode->hwp; in csio_scsi_init_cmd_wr()
204 struct csio_rnode *rn = req->rnode; in csio_scsi_init_cmd_wr()
207 uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; in csio_scsi_init_cmd_wr()
209 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_CMD_WR) | in csio_scsi_init_cmd_wr()
211 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | in csio_scsi_init_cmd_wr()
215 wr->cookie = (uintptr_t) req; in csio_scsi_init_cmd_wr()
216 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); in csio_scsi_init_cmd_wr()
217 wr->tmo_val = (uint8_t) req->tmo; in csio_scsi_init_cmd_wr()
218 wr->r3 = 0; in csio_scsi_init_cmd_wr()
219 memset(&wr->r5, 0, 8); in csio_scsi_init_cmd_wr()
222 dma_buf = &req->dma_buf; in csio_scsi_init_cmd_wr()
225 wr->rsp_dmalen = cpu_to_be32(dma_buf->len); in csio_scsi_init_cmd_wr()
226 wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); in csio_scsi_init_cmd_wr()
228 wr->r6 = 0; in csio_scsi_init_cmd_wr()
230 wr->u.fcoe.ctl_pri = 0; in csio_scsi_init_cmd_wr()
231 wr->u.fcoe.cp_en_class = 0; in csio_scsi_init_cmd_wr()
232 wr->u.fcoe.r4_lo[0] = 0; in csio_scsi_init_cmd_wr()
233 wr->u.fcoe.r4_lo[1] = 0; in csio_scsi_init_cmd_wr()
248 * csio_scsi_cmd - Create a SCSI CMD WR.
258 struct csio_hw *hw = req->lnode->hwp; in csio_scsi_cmd()
260 uint32_t size = CSIO_SCSI_CMD_WR_SZ_16(scsim->proto_cmd_len); in csio_scsi_cmd()
262 req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); in csio_scsi_cmd()
263 if (unlikely(req->drv_status != 0)) in csio_scsi_cmd()
270 uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); in csio_scsi_cmd()
273 * Make a temporary copy of the WR and write back in csio_scsi_cmd()
278 memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); in csio_scsi_cmd()
283 * csio_scsi_init_ulptx_dsgl - Fill in a ULP_TX_SC_DSGL
301 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE_F | in csio_scsi_init_ultptx_dsgl()
302 ULPTX_NSGE_V(req->nsge)); in csio_scsi_init_ultptx_dsgl()
304 if (likely(!req->dcopy)) { in csio_scsi_init_ultptx_dsgl()
305 scsi_for_each_sg(scmnd, sgel, req->nsge, i) { in csio_scsi_init_ultptx_dsgl()
307 sgl->addr0 = cpu_to_be64(sg_dma_address(sgel)); in csio_scsi_init_ultptx_dsgl()
308 sgl->len0 = cpu_to_be32(sg_dma_len(sgel)); in csio_scsi_init_ultptx_dsgl()
312 if ((i - 1) & 0x1) { in csio_scsi_init_ultptx_dsgl()
313 sge_pair->addr[1] = cpu_to_be64( in csio_scsi_init_ultptx_dsgl()
315 sge_pair->len[1] = cpu_to_be32( in csio_scsi_init_ultptx_dsgl()
319 sge_pair->addr[0] = cpu_to_be64( in csio_scsi_init_ultptx_dsgl()
321 sge_pair->len[0] = cpu_to_be32( in csio_scsi_init_ultptx_dsgl()
328 list_for_each(tmp, &req->gen_list) { in csio_scsi_init_ultptx_dsgl()
331 sgl->addr0 = cpu_to_be64(dma_buf->paddr); in csio_scsi_init_ultptx_dsgl()
332 sgl->len0 = cpu_to_be32( in csio_scsi_init_ultptx_dsgl()
333 min(xfer_len, dma_buf->len)); in csio_scsi_init_ultptx_dsgl()
335 } else if ((i - 1) & 0x1) { in csio_scsi_init_ultptx_dsgl()
336 sge_pair->addr[1] = cpu_to_be64(dma_buf->paddr); in csio_scsi_init_ultptx_dsgl()
337 sge_pair->len[1] = cpu_to_be32( in csio_scsi_init_ultptx_dsgl()
338 min(xfer_len, dma_buf->len)); in csio_scsi_init_ultptx_dsgl()
341 sge_pair->addr[0] = cpu_to_be64(dma_buf->paddr); in csio_scsi_init_ultptx_dsgl()
342 sge_pair->len[0] = cpu_to_be32( in csio_scsi_init_ultptx_dsgl()
343 min(xfer_len, dma_buf->len)); in csio_scsi_init_ultptx_dsgl()
345 xfer_len -= min(xfer_len, dma_buf->len); in csio_scsi_init_ultptx_dsgl()
352 * csio_scsi_init_read_wr - Initialize the READ SCSI WR.
355 * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL
362 struct csio_hw *hw = req->lnode->hwp; in csio_scsi_init_read_wr()
363 struct csio_rnode *rn = req->rnode; in csio_scsi_init_read_wr()
367 uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; in csio_scsi_init_read_wr()
370 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_READ_WR) | in csio_scsi_init_read_wr()
372 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | in csio_scsi_init_read_wr()
374 wr->cookie = (uintptr_t)req; in csio_scsi_init_read_wr()
375 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); in csio_scsi_init_read_wr()
376 wr->tmo_val = (uint8_t)(req->tmo); in csio_scsi_init_read_wr()
377 wr->use_xfer_cnt = 1; in csio_scsi_init_read_wr()
378 wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); in csio_scsi_init_read_wr()
379 wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); in csio_scsi_init_read_wr()
381 dma_buf = &req->dma_buf; in csio_scsi_init_read_wr()
384 wr->rsp_dmalen = cpu_to_be32(dma_buf->len); in csio_scsi_init_read_wr()
385 wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); in csio_scsi_init_read_wr()
387 wr->r4 = 0; in csio_scsi_init_read_wr()
389 wr->u.fcoe.ctl_pri = 0; in csio_scsi_init_read_wr()
390 wr->u.fcoe.cp_en_class = 0; in csio_scsi_init_read_wr()
391 wr->u.fcoe.r3_lo[0] = 0; in csio_scsi_init_read_wr()
392 wr->u.fcoe.r3_lo[1] = 0; in csio_scsi_init_read_wr()
405 * csio_scsi_init_write_wr - Initialize the WRITE SCSI WR.
408 * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL
415 struct csio_hw *hw = req->lnode->hwp; in csio_scsi_init_write_wr()
416 struct csio_rnode *rn = req->rnode; in csio_scsi_init_write_wr()
420 uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; in csio_scsi_init_write_wr()
423 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_WRITE_WR) | in csio_scsi_init_write_wr()
425 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | in csio_scsi_init_write_wr()
427 wr->cookie = (uintptr_t)req; in csio_scsi_init_write_wr()
428 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); in csio_scsi_init_write_wr()
429 wr->tmo_val = (uint8_t)(req->tmo); in csio_scsi_init_write_wr()
430 wr->use_xfer_cnt = 1; in csio_scsi_init_write_wr()
431 wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); in csio_scsi_init_write_wr()
432 wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); in csio_scsi_init_write_wr()
434 dma_buf = &req->dma_buf; in csio_scsi_init_write_wr()
437 wr->rsp_dmalen = cpu_to_be32(dma_buf->len); in csio_scsi_init_write_wr()
438 wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); in csio_scsi_init_write_wr()
440 wr->r4 = 0; in csio_scsi_init_write_wr()
442 wr->u.fcoe.ctl_pri = 0; in csio_scsi_init_write_wr()
443 wr->u.fcoe.cp_en_class = 0; in csio_scsi_init_write_wr()
444 wr->u.fcoe.r3_lo[0] = 0; in csio_scsi_init_write_wr()
445 wr->u.fcoe.r3_lo[1] = 0; in csio_scsi_init_write_wr()
464 if (unlikely((req)->nsge > 1)) \
466 (ALIGN(((req)->nsge - 1), 2) / 2)); \
471 * csio_scsi_read - Create a SCSI READ WR.
483 struct csio_hw *hw = req->lnode->hwp; in csio_scsi_read()
486 CSIO_SCSI_DATA_WRSZ(req, read, size, scsim->proto_cmd_len); in csio_scsi_read()
489 req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); in csio_scsi_read()
490 if (likely(req->drv_status == 0)) { in csio_scsi_read()
495 uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); in csio_scsi_read()
497 * Make a temporary copy of the WR and write back in csio_scsi_read()
502 memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); in csio_scsi_read()
508 * csio_scsi_write - Create a SCSI WRITE WR.
520 struct csio_hw *hw = req->lnode->hwp; in csio_scsi_write()
523 CSIO_SCSI_DATA_WRSZ(req, write, size, scsim->proto_cmd_len); in csio_scsi_write()
526 req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); in csio_scsi_write()
527 if (likely(req->drv_status == 0)) { in csio_scsi_write()
532 uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); in csio_scsi_write()
534 * Make a temporary copy of the WR and write back in csio_scsi_write()
539 memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); in csio_scsi_write()
545 * csio_setup_ddp - Setup DDP buffers for Read request.
556 struct csio_hw *hw = req->lnode->hwp; in csio_setup_ddp()
569 scsi_for_each_sg(scmnd, sgel, req->nsge, i) { in csio_setup_ddp()
573 buf_off = sg_addr & (ddp_pagesz - 1); in csio_setup_ddp()
583 if ((i != (req->nsge - 1)) && in csio_setup_ddp()
584 ((buf_off + sg_len) & (ddp_pagesz - 1))) { in csio_setup_ddp()
593 req->dcopy = 0; in csio_setup_ddp()
604 req->dcopy = 1; in csio_setup_ddp()
607 INIT_LIST_HEAD(&req->gen_list); in csio_setup_ddp()
614 if (dma_buf == NULL || i > scsim->max_sge) { in csio_setup_ddp()
615 req->drv_status = -EBUSY; in csio_setup_ddp()
618 alloc_len += dma_buf->len; in csio_setup_ddp()
620 list_add_tail(&dma_buf->list, &req->gen_list); in csio_setup_ddp()
624 if (!req->drv_status) { in csio_setup_ddp()
625 /* set number of ddp bufs used */ in csio_setup_ddp()
626 req->nsge = i; in csio_setup_ddp()
633 csio_put_scsi_ddp_list(scsim, &req->gen_list, i); in csio_setup_ddp()
637 * csio_scsi_init_abrt_cls_wr - Initialize an ABORT/CLOSE WR.
640 * @size: Size of WR
649 struct csio_hw *hw = req->lnode->hwp; in csio_scsi_init_abrt_cls_wr()
650 struct csio_rnode *rn = req->rnode; in csio_scsi_init_abrt_cls_wr()
653 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_ABRT_CLS_WR)); in csio_scsi_init_abrt_cls_wr()
654 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | in csio_scsi_init_abrt_cls_wr()
658 wr->cookie = (uintptr_t) req; in csio_scsi_init_abrt_cls_wr()
659 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); in csio_scsi_init_abrt_cls_wr()
660 wr->tmo_val = (uint8_t) req->tmo; in csio_scsi_init_abrt_cls_wr()
662 wr->sub_opcode_to_chk_all_io = in csio_scsi_init_abrt_cls_wr()
665 wr->r3[0] = 0; in csio_scsi_init_abrt_cls_wr()
666 wr->r3[1] = 0; in csio_scsi_init_abrt_cls_wr()
667 wr->r3[2] = 0; in csio_scsi_init_abrt_cls_wr()
668 wr->r3[3] = 0; in csio_scsi_init_abrt_cls_wr()
669 /* Since we re-use the same ioreq for abort as well */ in csio_scsi_init_abrt_cls_wr()
670 wr->t_cookie = (uintptr_t) req; in csio_scsi_init_abrt_cls_wr()
677 struct csio_hw *hw = req->lnode->hwp; in csio_scsi_abrt_cls()
680 req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); in csio_scsi_abrt_cls()
681 if (req->drv_status != 0) in csio_scsi_abrt_cls()
688 uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); in csio_scsi_abrt_cls()
690 * Make a temporary copy of the WR and write back in csio_scsi_abrt_cls()
695 memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); in csio_scsi_abrt_cls()
705 struct csio_hw *hw = req->lnode->hwp; in csio_scsis_uninit()
711 if (req->nsge) { in csio_scsis_uninit()
712 if (req->datadir == DMA_TO_DEVICE) { in csio_scsis_uninit()
713 req->dcopy = 0; in csio_scsis_uninit()
721 if (likely(req->drv_status == 0)) { in csio_scsis_uninit()
723 csio_set_state(&req->sm, csio_scsis_io_active); in csio_scsis_uninit()
724 list_add_tail(&req->sm.sm_list, &scsim->active_q); in csio_scsis_uninit()
725 csio_wr_issue(hw, req->eq_idx, false); in csio_scsis_uninit()
734 if (req->drv_status == 0) { in csio_scsis_uninit()
743 csio_set_state(&req->sm, csio_scsis_tm_active); in csio_scsis_uninit()
744 list_add_tail(&req->sm.sm_list, &scsim->active_q); in csio_scsis_uninit()
745 csio_wr_issue(hw, req->eq_idx, false); in csio_scsis_uninit()
755 * - a window in the cleanup path of the SCSI module in csio_scsis_uninit()
757 * - a window in the time we tried to issue an abort/close in csio_scsis_uninit()
758 * of a request to FW, and the FW completed the request in csio_scsis_uninit()
762 req->drv_status = -EINVAL; in csio_scsis_uninit()
775 struct csio_hw *hw = req->lnode->hwp; in csio_scsis_io_active()
782 list_del_init(&req->sm.sm_list); in csio_scsis_io_active()
783 csio_set_state(&req->sm, csio_scsis_uninit); in csio_scsis_io_active()
787 * I-T nexus loss (link down, remote device logo etc). We in csio_scsis_io_active()
789 * immediately, since we wouldnt have reported the I-T nexus in csio_scsis_io_active()
790 * loss itself. This forces us to serialize such completions in csio_scsis_io_active()
791 * with the reporting of the I-T nexus loss. Therefore, we in csio_scsis_io_active()
793 * The reporting of I-T nexus loss to the upper layer is then in csio_scsis_io_active()
794 * followed by the returning of I/Os in this internal queue. in csio_scsis_io_active()
799 if (unlikely(req->wr_status != FW_SUCCESS)) { in csio_scsis_io_active()
800 rn = req->rnode; in csio_scsis_io_active()
805 if (csio_scsi_itnexus_loss_error(req->wr_status) && in csio_scsis_io_active()
807 csio_set_state(&req->sm, in csio_scsis_io_active()
809 list_add_tail(&req->sm.sm_list, in csio_scsis_io_active()
810 &rn->host_cmpl_q); in csio_scsis_io_active()
818 if (req->drv_status == 0) { in csio_scsis_io_active()
819 csio_wr_issue(hw, req->eq_idx, false); in csio_scsis_io_active()
820 csio_set_state(&req->sm, csio_scsis_aborting); in csio_scsis_io_active()
826 if (req->drv_status == 0) { in csio_scsis_io_active()
827 csio_wr_issue(hw, req->eq_idx, false); in csio_scsis_io_active()
828 csio_set_state(&req->sm, csio_scsis_closing); in csio_scsis_io_active()
833 req->wr_status = FW_HOSTERROR; in csio_scsis_io_active()
835 csio_set_state(&req->sm, csio_scsis_uninit); in csio_scsis_io_active()
847 struct csio_hw *hw = req->lnode->hwp; in csio_scsis_tm_active()
853 list_del_init(&req->sm.sm_list); in csio_scsis_tm_active()
854 csio_set_state(&req->sm, csio_scsis_uninit); in csio_scsis_tm_active()
860 if (req->drv_status == 0) { in csio_scsis_tm_active()
861 csio_wr_issue(hw, req->eq_idx, false); in csio_scsis_tm_active()
862 csio_set_state(&req->sm, csio_scsis_aborting); in csio_scsis_tm_active()
869 if (req->drv_status == 0) { in csio_scsis_tm_active()
870 csio_wr_issue(hw, req->eq_idx, false); in csio_scsis_tm_active()
871 csio_set_state(&req->sm, csio_scsis_closing); in csio_scsis_tm_active()
876 req->wr_status = FW_HOSTERROR; in csio_scsis_tm_active()
878 csio_set_state(&req->sm, csio_scsis_uninit); in csio_scsis_tm_active()
890 struct csio_hw *hw = req->lnode->hwp; in csio_scsis_aborting()
897 "in aborting st\n", req, req->wr_status); in csio_scsis_aborting()
899 * Use -ECANCELED to explicitly tell the ABORTED event that in csio_scsis_aborting()
902 * FW (because the ABORT and completion of the I/O crossed each in csio_scsis_aborting()
904 * state, the success or failure of the I/O is unimportant to in csio_scsis_aborting()
907 req->drv_status = -ECANCELED; in csio_scsis_aborting()
916 csio_dbg(hw, "abort of %p return status:0x%x drv_status:%x\n", in csio_scsis_aborting()
917 req, req->wr_status, req->drv_status); in csio_scsis_aborting()
922 if (req->drv_status != -ECANCELED) { in csio_scsis_aborting()
932 * 2. The completion of an I/O and the receipt of in csio_scsis_aborting()
937 * was an I-T nexus loss (link down, remote device logged in csio_scsis_aborting()
938 * out etc). FW sent back an appropriate IT nexus loss status in csio_scsis_aborting()
948 * to abort. Manipulate the return value of the request in csio_scsis_aborting()
952 if ((req->wr_status == FW_SUCCESS) || in csio_scsis_aborting()
953 (req->wr_status == FW_EINVAL) || in csio_scsis_aborting()
954 csio_scsi_itnexus_loss_error(req->wr_status)) in csio_scsis_aborting()
955 req->wr_status = FW_SCSI_ABORT_REQUESTED; in csio_scsis_aborting()
958 list_del_init(&req->sm.sm_list); in csio_scsis_aborting()
959 csio_set_state(&req->sm, csio_scsis_uninit); in csio_scsis_aborting()
963 req->wr_status = FW_HOSTERROR; in csio_scsis_aborting()
965 csio_set_state(&req->sm, csio_scsis_uninit); in csio_scsis_aborting()
987 struct csio_hw *hw = req->lnode->hwp; in csio_scsis_closing()
994 "in closing st\n", req, req->wr_status); in csio_scsis_closing()
996 * Use -ECANCELED to explicitly tell the CLOSED event that in csio_scsis_closing()
999 * FW (because the CLOSE and completion of the I/O crossed each in csio_scsis_closing()
1001 * state, the success or failure of the I/O is unimportant to in csio_scsis_closing()
1004 req->drv_status = -ECANCELED; in csio_scsis_closing()
1012 if (req->drv_status != -ECANCELED) { in csio_scsis_closing()
1024 CSIO_DB_ASSERT((req->wr_status == FW_SUCCESS) || in csio_scsis_closing()
1025 (req->wr_status == FW_EINVAL)); in csio_scsis_closing()
1026 req->wr_status = FW_SCSI_CLOSE_REQUESTED; in csio_scsis_closing()
1029 list_del_init(&req->sm.sm_list); in csio_scsis_closing()
1030 csio_set_state(&req->sm, csio_scsis_uninit); in csio_scsis_closing()
1037 req->wr_status = FW_HOSTERROR; in csio_scsis_closing()
1039 csio_set_state(&req->sm, csio_scsis_uninit); in csio_scsis_closing()
1058 * amount of time. in csio_scsis_shost_cmpl_await()
1068 * to the next level of error recovery. in csio_scsis_shost_cmpl_await()
1070 req->drv_status = 0; in csio_scsis_shost_cmpl_await()
1073 csio_set_state(&req->sm, csio_scsis_uninit); in csio_scsis_shost_cmpl_await()
1076 csio_dbg(req->lnode->hwp, "Unhandled event:%d sent to req:%p\n", in csio_scsis_shost_cmpl_await()
1083 * csio_scsi_cmpl_handler - WR completion handler for SCSI.
1086 * @len: Length of the WR.
1092 * ISR. It is called with lock held. It walks past the RSS and CPL message
1094 * It then gets the status, WR handle (ioreq pointer) and the len of
1095 * the WR, based on WR opcode. Only on a non-good status is the entire
1096 * WR copied into the WR cache (ioreq->fw_wr).
1114 if (unlikely(cpl->opcode != CPL_FW6_MSG)) { in csio_scsi_cmpl_handler()
1116 cpl->opcode); in csio_scsi_cmpl_handler()
1121 tempwr = (uint8_t *)(cpl->data); in csio_scsi_cmpl_handler()
1129 (((struct fw_scsi_read_wr *)tempwr)->cookie)); in csio_scsi_cmpl_handler()
1132 ioreq->wr_status = status; in csio_scsi_cmpl_handler()
1139 (((struct fw_scsi_abrt_cls_wr *)tempwr)->cookie)); in csio_scsi_cmpl_handler()
1142 ioreq->wr_status = status; in csio_scsi_cmpl_handler()
1152 * csio_scsi_cleanup_io_q - Cleanup the given queue.
1156 * Called with lock held. Has to exit with lock held.
1161 struct csio_hw *hw = scm->hw; in csio_scsi_cleanup_io_q()
1166 /* Call back the completion routines of the active_q */ in csio_scsi_cleanup_io_q()
1170 list_del_init(&ioreq->sm.sm_list); in csio_scsi_cleanup_io_q()
1172 spin_unlock_irq(&hw->lock); in csio_scsi_cleanup_io_q()
1179 ioreq->io_cbfn(hw, ioreq); in csio_scsi_cleanup_io_q()
1181 spin_lock_irq(&scm->freelist_lock); in csio_scsi_cleanup_io_q()
1183 spin_unlock_irq(&scm->freelist_lock); in csio_scsi_cleanup_io_q()
1185 spin_lock_irq(&hw->lock); in csio_scsi_cleanup_io_q()
1194 struct csio_lnode *ln = ioreq->lnode; in csio_abrt_cls()
1195 struct csio_hw *hw = ln->hwp; in csio_abrt_cls()
1217 * csio_scsi_abort_io_q - Abort all I/Os on given queue
1223 * of tmo milliseconds for them to complete. Returns success
1224 * if all I/Os are aborted. Else returns -ETIMEDOUT.
1225 * Should be entered with lock held. Exits with lock held.
1227 * Lock has to be held across the loop that aborts I/Os, since dropping the lock
1229 * of this function has to ensure that the number of I/os to be aborted
1230 * is finite enough to not cause lock-held-for-too-long issues.
1235 struct csio_hw *hw = scm->hw; in csio_scsi_abort_io_q()
1252 while (!list_empty(q) && count--) { in csio_scsi_abort_io_q()
1253 spin_unlock_irq(&hw->lock); in csio_scsi_abort_io_q()
1255 spin_lock_irq(&hw->lock); in csio_scsi_abort_io_q()
1262 return -ETIMEDOUT; in csio_scsi_abort_io_q()
1266 * csio_scsim_cleanup_io - Cleanup all I/Os in SCSI module.
1269 * Called with lock held, should exit with lock held.
1275 struct csio_hw *hw = scm->hw; in csio_scsim_cleanup_io()
1280 if (list_empty(&scm->active_q)) in csio_scsim_cleanup_io()
1284 while (!list_empty(&scm->active_q) && count--) { in csio_scsim_cleanup_io()
1285 spin_unlock_irq(&hw->lock); in csio_scsim_cleanup_io()
1287 spin_lock_irq(&hw->lock); in csio_scsim_cleanup_io()
1291 if (list_empty(&scm->active_q)) in csio_scsim_cleanup_io()
1296 rv = csio_scsi_abort_io_q(scm, &scm->active_q, 30000); in csio_scsim_cleanup_io()
1302 csio_scsi_cleanup_io_q(scm, &scm->active_q); in csio_scsim_cleanup_io()
1304 CSIO_DB_ASSERT(list_empty(&scm->active_q)); in csio_scsim_cleanup_io()
1310 * csio_scsim_cleanup_io_lnode - Cleanup all I/Os of given lnode.
1314 * Called with lock held, should exit with lock held.
1315 * Can sleep (with dropped lock) when waiting for I/Os to complete.
1320 struct csio_hw *hw = scm->hw; in csio_scsim_cleanup_io_lnode()
1329 INIT_LIST_HEAD(&ln->cmpl_q); in csio_scsim_cleanup_io_lnode()
1330 csio_scsi_gather_active_ios(scm, &sld, &ln->cmpl_q); in csio_scsim_cleanup_io_lnode()
1333 if (list_empty(&ln->cmpl_q)) in csio_scsim_cleanup_io_lnode()
1337 while (!list_empty(&ln->cmpl_q) && count--) { in csio_scsim_cleanup_io_lnode()
1338 spin_unlock_irq(&hw->lock); in csio_scsim_cleanup_io_lnode()
1340 spin_lock_irq(&hw->lock); in csio_scsim_cleanup_io_lnode()
1344 if (list_empty(&ln->cmpl_q)) in csio_scsim_cleanup_io_lnode()
1350 rv = csio_scsi_abort_io_q(scm, &ln->cmpl_q, 30000); in csio_scsim_cleanup_io_lnode()
1353 csio_scsi_cleanup_io_q(scm, &ln->cmpl_q); in csio_scsim_cleanup_io_lnode()
1356 CSIO_DB_ASSERT(list_empty(&ln->cmpl_q)); in csio_scsim_cleanup_io_lnode()
1383 return -EINVAL; in csio_device_reset()
1391 spin_lock_irq(&hw->lock); in csio_device_reset()
1393 spin_unlock_irq(&hw->lock); in csio_device_reset()
1412 return -EINVAL; in csio_disable_port()
1415 csio_lnodes_block_by_port(hw, ln->portid); in csio_disable_port()
1417 spin_lock_irq(&hw->lock); in csio_disable_port()
1418 csio_disable_lnodes(hw, ln->portid, disable); in csio_disable_port()
1419 spin_unlock_irq(&hw->lock); in csio_disable_port()
1422 csio_lnodes_unblock_by_port(hw, ln->portid); in csio_disable_port()
1433 return sysfs_emit(buf, "%x\n", ln->params.log_level); in csio_show_dbg_level()
1446 return -EINVAL; in csio_store_dbg_level()
1449 return -EINVAL; in csio_store_dbg_level()
1451 ln->params.log_level = dbg_level; in csio_store_dbg_level()
1452 hw->params.log_level = dbg_level; in csio_store_dbg_level()
1479 return sysfs_emit(buf, "%d\n", ln->num_reg_rnodes); in csio_show_num_reg_rnodes()
1508 dma_buf = (struct csio_dma_buf *)csio_list_next(&req->gen_list); in csio_scsi_copy_to_sgl()
1510 /* Copy data from driver buffer to SGs of SCSI CMD */ in csio_scsi_copy_to_sgl()
1512 if (buf_off >= dma_buf->len) { in csio_scsi_copy_to_sgl()
1519 if (start_off >= sg->length) { in csio_scsi_copy_to_sgl()
1520 start_off -= sg->length; in csio_scsi_copy_to_sgl()
1525 buf_addr = dma_buf->vaddr + buf_off; in csio_scsi_copy_to_sgl()
1526 sg_off = sg->offset + start_off; in csio_scsi_copy_to_sgl()
1527 bytes_copy = min((dma_buf->len - buf_off), in csio_scsi_copy_to_sgl()
1528 sg->length - start_off); in csio_scsi_copy_to_sgl()
1529 bytes_copy = min((uint32_t)(PAGE_SIZE - (sg_off & ~PAGE_MASK)), in csio_scsi_copy_to_sgl()
1534 csio_err(hw, "failed to kmap sg:%p of ioreq:%p\n", in csio_scsi_copy_to_sgl()
1546 bytes_left -= bytes_copy; in csio_scsi_copy_to_sgl()
1556 * csio_scsi_err_handler - SCSI error handler.
1572 struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); in csio_scsi_err_handler()
1575 switch (req->wr_status) { in csio_scsi_err_handler()
1585 dma_buf = &req->dma_buf; in csio_scsi_err_handler()
1586 fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr; in csio_scsi_err_handler()
1588 flags = fcp_resp->resp.fr_flags; in csio_scsi_err_handler()
1589 scsi_status = fcp_resp->resp.fr_status; in csio_scsi_err_handler()
1592 rsp_len = be32_to_cpu(fcp_resp->ext.fr_rsp_len); in csio_scsi_err_handler()
1594 (rsp_info->rsp_code != FCP_TMF_CMPL)) { in csio_scsi_err_handler()
1600 if ((flags & FCP_SNS_LEN_VAL) && fcp_resp->ext.fr_sns_len) { in csio_scsi_err_handler()
1601 sns_len = be32_to_cpu(fcp_resp->ext.fr_sns_len); in csio_scsi_err_handler()
1605 memcpy(cmnd->sense_buffer, in csio_scsi_err_handler()
1606 &rsp_info->_fr_resvd[0] + rsp_len, sns_len); in csio_scsi_err_handler()
1615 be32_to_cpu(fcp_resp->ext.fr_resid)); in csio_scsi_err_handler()
1619 ((scsi_bufflen(cmnd) - scsi_get_resid(cmnd)) in csio_scsi_err_handler()
1620 < cmnd->underflow)) in csio_scsi_err_handler()
1630 "Over-flow error,cmnd:0x%x expected len:0x%x" in csio_scsi_err_handler()
1631 " resid:0x%x\n", cmnd->cmnd[0], in csio_scsi_err_handler()
1639 "Under-flow error,cmnd:0x%x expected" in csio_scsi_err_handler()
1641 cmnd->cmnd[0], scsi_bufflen(cmnd), in csio_scsi_err_handler()
1642 scsi_get_resid(cmnd), cmnd->device->lun, in csio_scsi_err_handler()
1643 rn->flowid); in csio_scsi_err_handler()
1652 cmnd->cmnd[0], in csio_scsi_err_handler()
1653 (req->wr_status == FW_SCSI_CLOSE_REQUESTED) ? in csio_scsi_err_handler()
1660 if (req->wr_status == FW_SCSI_CLOSE_REQUESTED) in csio_scsi_err_handler()
1669 req, cmnd, req->wr_status); in csio_scsi_err_handler()
1679 * to device-disappeared! in csio_scsi_err_handler()
1711 req->wr_status, req, cmnd); in csio_scsi_err_handler()
1720 if (req->nsge > 0) { in csio_scsi_err_handler()
1722 if (req->dcopy && (host_status == DID_OK)) in csio_scsi_err_handler()
1726 cmnd->result = (((host_status) << 16) | scsi_status); in csio_scsi_err_handler()
1731 complete(&req->cmplobj); in csio_scsi_err_handler()
1735 * csio_scsi_cbfn - SCSI callback function.
1747 if (likely(req->wr_status == FW_SUCCESS)) { in csio_scsi_cbfn()
1748 if (req->nsge > 0) { in csio_scsi_cbfn()
1750 if (req->dcopy) in csio_scsi_cbfn()
1754 cmnd->result = (((host_status) << 16) | scsi_status); in csio_scsi_cbfn()
1765 * csio_queuecommand - Entry point to kickstart an I/O request.
1770 * - Checks for HW and Rnode module readiness.
1771 * - Gets a free ioreq structure (which is already initialized
1773 * - Maps SG elements.
1774 * - Initializes ioreq members.
1775 * - Kicks off the SCSI state machine for this IO.
1776 * - Returns busy status on error.
1784 struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); in csio_queuecommand()
1791 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); in csio_queuecommand()
1793 sqset = &hw->sqset[ln->portid][blk_mq_rq_cpu(scsi_cmd_to_rq(cmnd))]; in csio_queuecommand()
1797 cmnd->result = nr; in csio_queuecommand()
1803 cmnd->result = (DID_REQUEUE << 16); in csio_queuecommand()
1808 /* Get req->nsge, if there are SG elements to be mapped */ in csio_queuecommand()
1816 if (unlikely(nsge > scsim->max_sge)) { in csio_queuecommand()
1819 " SGEs: %d, Max SGEs: %d\n", nsge, scsim->max_sge); in csio_queuecommand()
1824 /* Get a free ioreq structure - SM is already set to uninit */ in csio_queuecommand()
1827 csio_err(hw, "Out of I/O request elements. Active #:%d\n", in csio_queuecommand()
1828 scsim->stats.n_active); in csio_queuecommand()
1833 ioreq->nsge = nsge; in csio_queuecommand()
1834 ioreq->lnode = ln; in csio_queuecommand()
1835 ioreq->rnode = rn; in csio_queuecommand()
1836 ioreq->iq_idx = sqset->iq_idx; in csio_queuecommand()
1837 ioreq->eq_idx = sqset->eq_idx; in csio_queuecommand()
1838 ioreq->wr_status = 0; in csio_queuecommand()
1839 ioreq->drv_status = 0; in csio_queuecommand()
1841 ioreq->tmo = 0; in csio_queuecommand()
1842 ioreq->datadir = cmnd->sc_data_direction; in csio_queuecommand()
1844 if (cmnd->sc_data_direction == DMA_TO_DEVICE) { in csio_queuecommand()
1846 ln->stats.n_output_bytes += scsi_bufflen(cmnd); in csio_queuecommand()
1847 } else if (cmnd->sc_data_direction == DMA_FROM_DEVICE) { in csio_queuecommand()
1849 ln->stats.n_input_bytes += scsi_bufflen(cmnd); in csio_queuecommand()
1854 ioreq->io_cbfn = csio_scsi_cbfn; in csio_queuecommand()
1857 cmnd->host_scribble = (unsigned char *)ioreq; in csio_queuecommand()
1858 csio_priv(cmnd)->fc_tm_flags = 0; in csio_queuecommand()
1861 spin_lock_irqsave(&hw->lock, flags); in csio_queuecommand()
1863 spin_unlock_irqrestore(&hw->lock, flags); in csio_queuecommand()
1892 struct csio_lnode *ln = ioreq->lnode; in csio_do_abrt_cls()
1893 struct csio_scsi_qset *sqset = &hw->sqset[ln->portid][cpu]; in csio_do_abrt_cls()
1895 ioreq->tmo = CSIO_SCSI_ABRT_TMO_MS; in csio_do_abrt_cls()
1898 * the ingress queue ID of the original I/O being aborted/closed - we in csio_do_abrt_cls()
1902 ioreq->eq_idx = sqset->eq_idx; in csio_do_abrt_cls()
1916 struct csio_lnode *ln = shost_priv(cmnd->device->host); in csio_eh_abort_handler()
1922 struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); in csio_eh_abort_handler()
1928 ioreq = (struct csio_ioreq *)cmnd->host_scribble; in csio_eh_abort_handler()
1938 ioreq, cmnd, *((uint64_t *)cmnd->cmnd), rn->flowid, in csio_eh_abort_handler()
1939 cmnd->device->lun, csio_q_physiqid(hw, ioreq->iq_idx)); in csio_eh_abort_handler()
1949 reinit_completion(&ioreq->cmplobj); in csio_eh_abort_handler()
1950 spin_lock_irq(&hw->lock); in csio_eh_abort_handler()
1952 spin_unlock_irq(&hw->lock); in csio_eh_abort_handler()
1955 if (rv == -EINVAL) { in csio_eh_abort_handler()
1969 wait_for_completion_timeout(&ioreq->cmplobj, msecs_to_jiffies(tmo)); in csio_eh_abort_handler()
1974 csio_err(hw, "Abort timed out -- req: %p\n", ioreq); in csio_eh_abort_handler()
1978 if (ioreq->nsge > 0) in csio_eh_abort_handler()
1981 spin_lock_irq(&hw->lock); in csio_eh_abort_handler()
1983 spin_unlock_irq(&hw->lock); in csio_eh_abort_handler()
1985 cmnd->result = (DID_ERROR << 16); in csio_eh_abort_handler()
1992 if (host_byte(cmnd->result) == DID_REQUEUE) { in csio_eh_abort_handler()
1995 cmnd->device->id, cmnd->device->lun, in csio_eh_abort_handler()
1996 scsi_cmd_to_rq(cmnd)->tag); in csio_eh_abort_handler()
2001 cmnd->device->id, cmnd->device->lun, in csio_eh_abort_handler()
2002 scsi_cmd_to_rq(cmnd)->tag); in csio_eh_abort_handler()
2008 * csio_tm_cbfn - TM callback function.
2026 req, req->wr_status); in csio_tm_cbfn()
2029 csio_priv(cmnd)->wr_status = req->wr_status; in csio_tm_cbfn()
2042 if (req->wr_status == FW_SCSI_RSP_ERR) { in csio_tm_cbfn()
2043 dma_buf = &req->dma_buf; in csio_tm_cbfn()
2044 fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr; in csio_tm_cbfn()
2047 flags = fcp_resp->resp.fr_flags; in csio_tm_cbfn()
2051 if (rsp_info->rsp_code == FCP_TMF_CMPL) in csio_tm_cbfn()
2052 csio_priv(cmnd)->wr_status = FW_SUCCESS; in csio_tm_cbfn()
2054 csio_dbg(hw, "TM FCP rsp code: %d\n", rsp_info->rsp_code); in csio_tm_cbfn()
2064 struct csio_lnode *ln = shost_priv(cmnd->device->host); in csio_eh_lun_reset_handler()
2067 struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); in csio_eh_lun_reset_handler()
2080 cmnd->device->lun, rn->flowid, rn->scsi_id); in csio_eh_lun_reset_handler()
2084 "LUN reset cannot be issued on non-ready" in csio_eh_lun_reset_handler()
2086 ln->vnp_flowid, cmnd->device->lun); in csio_eh_lun_reset_handler()
2097 * remote node has come back online, or device loss timer has fired in csio_eh_lun_reset_handler()
2102 if (fc_remote_port_chkready(rn->rport)) { in csio_eh_lun_reset_handler()
2104 "LUN reset cannot be issued on non-ready" in csio_eh_lun_reset_handler()
2106 rn->flowid, cmnd->device->lun); in csio_eh_lun_reset_handler()
2110 /* Get a free ioreq structure - SM is already set to uninit */ in csio_eh_lun_reset_handler()
2114 csio_err(hw, "Out of IO request elements. Active # :%d\n", in csio_eh_lun_reset_handler()
2115 scsim->stats.n_active); in csio_eh_lun_reset_handler()
2119 sqset = &hw->sqset[ln->portid][smp_processor_id()]; in csio_eh_lun_reset_handler()
2120 ioreq->nsge = 0; in csio_eh_lun_reset_handler()
2121 ioreq->lnode = ln; in csio_eh_lun_reset_handler()
2122 ioreq->rnode = rn; in csio_eh_lun_reset_handler()
2123 ioreq->iq_idx = sqset->iq_idx; in csio_eh_lun_reset_handler()
2124 ioreq->eq_idx = sqset->eq_idx; in csio_eh_lun_reset_handler()
2127 cmnd->host_scribble = (unsigned char *)ioreq; in csio_eh_lun_reset_handler()
2128 csio_priv(cmnd)->wr_status = 0; in csio_eh_lun_reset_handler()
2130 csio_priv(cmnd)->fc_tm_flags = FCP_TMF_LUN_RESET; in csio_eh_lun_reset_handler()
2131 ioreq->tmo = CSIO_SCSI_LUNRST_TMO_MS / 1000; in csio_eh_lun_reset_handler()
2134 * FW times the LUN reset for ioreq->tmo, so we got to wait a little in csio_eh_lun_reset_handler()
2138 count = DIV_ROUND_UP((ioreq->tmo + 10) * 1000, CSIO_SCSI_TM_POLL_MS); in csio_eh_lun_reset_handler()
2141 ioreq->io_cbfn = csio_tm_cbfn; in csio_eh_lun_reset_handler()
2143 /* Save of the ioreq info for later use */ in csio_eh_lun_reset_handler()
2145 sld.lnode = ioreq->lnode; in csio_eh_lun_reset_handler()
2146 sld.rnode = ioreq->rnode; in csio_eh_lun_reset_handler()
2147 sld.oslun = cmnd->device->lun; in csio_eh_lun_reset_handler()
2149 spin_lock_irqsave(&hw->lock, flags); in csio_eh_lun_reset_handler()
2152 spin_unlock_irqrestore(&hw->lock, flags); in csio_eh_lun_reset_handler()
2164 && count--) in csio_eh_lun_reset_handler()
2167 /* LUN reset timed-out */ in csio_eh_lun_reset_handler()
2170 cmnd->device->id, cmnd->device->lun); in csio_eh_lun_reset_handler()
2172 spin_lock_irq(&hw->lock); in csio_eh_lun_reset_handler()
2174 list_del_init(&ioreq->sm.sm_list); in csio_eh_lun_reset_handler()
2175 spin_unlock_irq(&hw->lock); in csio_eh_lun_reset_handler()
2181 if (csio_priv(cmnd)->wr_status != FW_SUCCESS) { in csio_eh_lun_reset_handler()
2183 cmnd->device->id, cmnd->device->lun, in csio_eh_lun_reset_handler()
2184 csio_priv(cmnd)->wr_status); in csio_eh_lun_reset_handler()
2194 spin_lock_irq(&hw->lock); in csio_eh_lun_reset_handler()
2198 spin_unlock_irq(&hw->lock); in csio_eh_lun_reset_handler()
2203 "Attempt to abort I/Os during LUN reset of %llu" in csio_eh_lun_reset_handler()
2204 " returned %d\n", cmnd->device->lun, retval); in csio_eh_lun_reset_handler()
2206 spin_lock_irq(&hw->lock); in csio_eh_lun_reset_handler()
2207 list_splice_tail_init(&local_q, &scsim->active_q); in csio_eh_lun_reset_handler()
2208 spin_unlock_irq(&hw->lock); in csio_eh_lun_reset_handler()
2215 cmnd->device->id, cmnd->device->lun); in csio_eh_lun_reset_handler()
2232 return -ENXIO; in csio_slave_alloc()
2234 sdev->hostdata = *((struct csio_lnode **)(rport->dd_data)); in csio_slave_alloc()
2249 sdev->hostdata = NULL; in csio_slave_destroy()
2258 spin_lock_irq(shost->host_lock); in csio_scan_finished()
2259 if (!ln->hwp || csio_list_deleted(&ln->sm.sm_list)) in csio_scan_finished()
2265 spin_unlock_irq(shost->host_lock); in csio_scan_finished()
2283 .this_id = -1,
2302 .this_id = -1,
2310 * csio_scsi_alloc_ddp_bufs - Allocate buffers for DDP of unaligned SGLs.
2314 * @num_buf : Number of buffers.
2333 return -EINVAL; in csio_scsi_alloc_ddp_bufs()
2335 INIT_LIST_HEAD(&scm->ddp_freelist); in csio_scsi_alloc_ddp_bufs()
2338 buf_size = (buf_size + PAGE_SIZE - 1) & PAGE_MASK; in csio_scsi_alloc_ddp_bufs()
2348 scm->stats.n_free_ddp); in csio_scsi_alloc_ddp_bufs()
2353 ddp_desc->vaddr = dma_alloc_coherent(&hw->pdev->dev, unit_size, in csio_scsi_alloc_ddp_bufs()
2354 &ddp_desc->paddr, GFP_KERNEL); in csio_scsi_alloc_ddp_bufs()
2355 if (!ddp_desc->vaddr) { in csio_scsi_alloc_ddp_bufs()
2363 ddp_desc->len = unit_size; in csio_scsi_alloc_ddp_bufs()
2366 list_add_tail(&ddp_desc->list, &scm->ddp_freelist); in csio_scsi_alloc_ddp_bufs()
2373 list_for_each(tmp, &scm->ddp_freelist) { in csio_scsi_alloc_ddp_bufs()
2376 dma_free_coherent(&hw->pdev->dev, ddp_desc->len, in csio_scsi_alloc_ddp_bufs()
2377 ddp_desc->vaddr, ddp_desc->paddr); in csio_scsi_alloc_ddp_bufs()
2378 list_del_init(&ddp_desc->list); in csio_scsi_alloc_ddp_bufs()
2381 scm->stats.n_free_ddp = 0; in csio_scsi_alloc_ddp_bufs()
2383 return -ENOMEM; in csio_scsi_alloc_ddp_bufs()
2387 * csio_scsi_free_ddp_bufs - free DDP buffers of unaligned SGLs.
2400 list_for_each(tmp, &scm->ddp_freelist) { in csio_scsi_free_ddp_bufs()
2403 dma_free_coherent(&hw->pdev->dev, ddp_desc->len, in csio_scsi_free_ddp_bufs()
2404 ddp_desc->vaddr, ddp_desc->paddr); in csio_scsi_free_ddp_bufs()
2405 list_del_init(&ddp_desc->list); in csio_scsi_free_ddp_bufs()
2408 scm->stats.n_free_ddp = 0; in csio_scsi_free_ddp_bufs()
2412 * csio_scsim_init - Initialize SCSI Module
2424 INIT_LIST_HEAD(&scm->active_q); in csio_scsim_init()
2425 scm->hw = hw; in csio_scsim_init()
2427 scm->proto_cmd_len = sizeof(struct fcp_cmnd); in csio_scsim_init()
2428 scm->proto_rsp_len = CSIO_SCSI_RSP_LEN; in csio_scsim_init()
2429 scm->max_sge = CSIO_SCSI_MAX_SGE; in csio_scsim_init()
2431 spin_lock_init(&scm->freelist_lock); in csio_scsim_init()
2433 /* Pre-allocate ioreqs and initialize them */ in csio_scsim_init()
2434 INIT_LIST_HEAD(&scm->ioreq_freelist); in csio_scsim_init()
2442 scm->stats.n_free_ioreq); in csio_scsim_init()
2448 dma_buf = &ioreq->dma_buf; in csio_scsim_init()
2449 dma_buf->vaddr = dma_pool_alloc(hw->scsi_dma_pool, GFP_KERNEL, in csio_scsim_init()
2450 &dma_buf->paddr); in csio_scsim_init()
2451 if (!dma_buf->vaddr) { in csio_scsim_init()
2459 dma_buf->len = scm->proto_rsp_len; in csio_scsim_init()
2462 csio_init_state(&ioreq->sm, csio_scsis_uninit); in csio_scsim_init()
2463 INIT_LIST_HEAD(&ioreq->gen_list); in csio_scsim_init()
2464 init_completion(&ioreq->cmplobj); in csio_scsim_init()
2466 list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist); in csio_scsim_init()
2480 while (!list_empty(&scm->ioreq_freelist)) { in csio_scsim_init()
2483 tmp = list_first_entry(&scm->ioreq_freelist, in csio_scsim_init()
2485 list_del_init(&tmp->sm_list); in csio_scsim_init()
2488 dma_buf = &ioreq->dma_buf; in csio_scsim_init()
2489 dma_pool_free(hw->scsi_dma_pool, dma_buf->vaddr, in csio_scsim_init()
2490 dma_buf->paddr); in csio_scsim_init()
2495 scm->stats.n_free_ioreq = 0; in csio_scsim_init()
2497 return -ENOMEM; in csio_scsim_init()
2511 while (!list_empty(&scm->ioreq_freelist)) { in csio_scsim_exit()
2514 tmp = list_first_entry(&scm->ioreq_freelist, in csio_scsim_exit()
2516 list_del_init(&tmp->sm_list); in csio_scsim_exit()
2519 dma_buf = &ioreq->dma_buf; in csio_scsim_exit()
2520 dma_pool_free(scm->hw->scsi_dma_pool, dma_buf->vaddr, in csio_scsim_exit()
2521 dma_buf->paddr); in csio_scsim_exit()
2526 scm->stats.n_free_ioreq = 0; in csio_scsim_exit()
2528 csio_scsi_free_ddp_bufs(scm, scm->hw); in csio_scsim_exit()