Lines Matching +full:full +full:- +full:frame
1 /*-
38 * @defgroup unsol Unsolicited Frame Handling
49 ocs_node_fcid_display(fc_be24toh((hdr)->s_id), s_id_text, sizeof(s_id_text)); \
50 ocs_log_debug(ocs, "[%06x.%s] %02x/%04x/%04x: " fmt, fc_be24toh((hdr)->d_id), s_id_text, \
51 (hdr)->r_ctl, ocs_be16toh((hdr)->ox_id), ocs_be16toh((hdr)->rx_id), ##__VA_ARGS__); \
74 * @return Returns 0 on success, or a non-zero value on failure.
79 ocs_xport_rq_thread_info_t *thread_data = mythread->arg; in ocs_unsol_rq_thread()
80 ocs_t *ocs = thread_data->ocs; in ocs_unsol_rq_thread()
84 ocs_log_debug(ocs, "%s running\n", mythread->name); in ocs_unsol_rq_thread()
86 seq = ocs_cbuf_get(thread_data->seq_cbuf, 100000); in ocs_unsol_rq_thread()
89 ocs_thread_yield(&thread_data->thread); in ocs_unsol_rq_thread()
94 ocs_unsol_process((ocs_t*)seq->hw->os, seq); in ocs_unsol_rq_thread()
97 if (--yield_count == 0) { in ocs_unsol_rq_thread()
98 ocs_thread_yield(&thread_data->thread); in ocs_unsol_rq_thread()
102 ocs_log_debug(ocs, "%s exiting\n", mythread->name); in ocs_unsol_rq_thread()
103 thread_data->thread_started = FALSE; in ocs_unsol_rq_thread()
118 ocs_assert(hio, -1); in ocs_unsol_abort_cb()
119 ocs_assert(arg, -1); in ocs_unsol_abort_cb()
120 ocs_log_debug(ocs, "xri=0x%x tag=0x%x\n", hio->indicator, hio->reqtag); in ocs_unsol_abort_cb()
121 ocs_hw_io_free(&ocs->hw, hio); in ocs_unsol_abort_cb()
134 hw_rc = ocs_hw_io_abort(&ocs->hw, hio, FALSE, in ocs_port_owned_abort()
138 ocs_log_debug(ocs, "already aborted XRI 0x%x\n", hio->indicator); in ocs_port_owned_abort()
141 hio->indicator, hw_rc); in ocs_port_owned_abort()
152 * @param arg Application-specified callback data.
162 ocs_xport_t *xport = ocs->xport; in ocs_unsolicited_cb()
167 if (ocs->rq_threads == 0) { in ocs_unsolicited_cb()
171 fc_header_t *hdr = seq->header->dma.virt; in ocs_unsolicited_cb()
172 uint32_t ox_id = ocs_be16toh(hdr->ox_id); in ocs_unsolicited_cb()
173 uint32_t thr_index = ox_id % ocs->rq_threads; in ocs_unsolicited_cb()
175 rc = ocs_cbuf_put(xport->rq_thread_info[thr_index].seq_cbuf, seq); in ocs_unsolicited_cb()
179 ocs_hw_sequence_free(&ocs->hw, seq); in ocs_unsolicited_cb()
202 uint8_t seq_fcfi = seq->fcfi; in ocs_unsol_process()
205 if (ocs->hw.workaround.override_fcfi) { in ocs_unsol_process()
206 if (ocs->hw.first_domain_idx > -1) { in ocs_unsol_process()
207 seq_fcfi = ocs->hw.first_domain_idx; in ocs_unsol_process()
211 /* Range check seq->fcfi */ in ocs_unsol_process()
212 if (seq_fcfi < ARRAY_SIZE(ocs->xport->fcfi)) { in ocs_unsol_process()
213 xport_fcfi = &ocs->xport->fcfi[seq_fcfi]; in ocs_unsol_process()
216 /* If the transport FCFI entry is NULL, then drop the frame */ in ocs_unsol_process()
218 ocs_log_test(ocs, "FCFI %d is not valid, dropping frame\n", seq->fcfi); in ocs_unsol_process()
219 if (seq->hio != NULL) { in ocs_unsol_process()
220 ocs_port_owned_abort(ocs, seq->hio); in ocs_unsol_process()
223 ocs_hw_sequence_free(&ocs->hw, seq); in ocs_unsol_process()
226 domain = ocs_hw_domain_get(&ocs->hw, seq_fcfi); in ocs_unsol_process()
231 * then add the new frame to pending list in ocs_unsol_process()
234 xport_fcfi->hold_frames || in ocs_unsol_process()
235 !ocs_list_empty(&xport_fcfi->pend_frames)) { in ocs_unsol_process()
236 ocs_lock(&xport_fcfi->pend_frames_lock); in ocs_unsol_process()
237 ocs_list_add_tail(&xport_fcfi->pend_frames, seq); in ocs_unsol_process()
238 ocs_unlock(&xport_fcfi->pend_frames_lock); in ocs_unsol_process()
246 * We are not holding frames and pending list is empty, just process frame. in ocs_unsol_process()
247 * A non-zero return means the frame was not handled - so cleanup in ocs_unsol_process()
250 if (seq->hio != NULL) { in ocs_unsol_process()
251 ocs_port_owned_abort(ocs, seq->hio); in ocs_unsol_process()
253 ocs_hw_sequence_free(&ocs->hw, seq); in ocs_unsol_process()
275 ocs_t *ocs = node->ocs; in ocs_process_node_pending()
280 /* need to check for hold frames condition after each frame processed in ocs_process_node_pending()
281 * because any given frame could cause a transition to a state that in ocs_process_node_pending()
288 /* Get next frame/sequence */ in ocs_process_node_pending()
289 ocs_lock(&node->pend_frames_lock); in ocs_process_node_pending()
290 seq = ocs_list_remove_head(&node->pend_frames); in ocs_process_node_pending()
292 pend_frames_processed = node->pend_frames_processed; in ocs_process_node_pending()
293 node->pend_frames_processed = 0; in ocs_process_node_pending()
294 ocs_unlock(&node->pend_frames_lock); in ocs_process_node_pending()
297 node->pend_frames_processed++; in ocs_process_node_pending()
298 ocs_unlock(&node->pend_frames_lock); in ocs_process_node_pending()
300 /* now dispatch frame(s) to dispatch function */ in ocs_process_node_pending()
302 if (seq->hio != NULL) { in ocs_process_node_pending()
303 ocs_port_owned_abort(ocs, seq->hio); in ocs_process_node_pending()
305 ocs_hw_sequence_free(&ocs->hw, seq); in ocs_process_node_pending()
333 ocs_t *ocs = domain->ocs; in ocs_domain_process_pending()
338 ocs_assert(domain->fcf_indicator < SLI4_MAX_FCFI, -1); in ocs_domain_process_pending()
339 xport_fcfi = &ocs->xport->fcfi[domain->fcf_indicator]; in ocs_domain_process_pending()
342 /* need to check for hold frames condition after each frame processed in ocs_domain_process_pending()
343 * because any given frame could cause a transition to a state that in ocs_domain_process_pending()
350 /* Get next frame/sequence */ in ocs_domain_process_pending()
351 ocs_lock(&xport_fcfi->pend_frames_lock); in ocs_domain_process_pending()
352 seq = ocs_list_remove_head(&xport_fcfi->pend_frames); in ocs_domain_process_pending()
354 pend_frames_processed = xport_fcfi->pend_frames_processed; in ocs_domain_process_pending()
355 xport_fcfi->pend_frames_processed = 0; in ocs_domain_process_pending()
356 ocs_unlock(&xport_fcfi->pend_frames_lock); in ocs_domain_process_pending()
359 xport_fcfi->pend_frames_processed++; in ocs_domain_process_pending()
360 ocs_unlock(&xport_fcfi->pend_frames_lock); in ocs_domain_process_pending()
362 /* now dispatch frame(s) to dispatch function */ in ocs_domain_process_pending()
364 if (seq->hio != NULL) { in ocs_domain_process_pending()
365 ocs_port_owned_abort(ocs, seq->hio); in ocs_domain_process_pending()
367 ocs_hw_sequence_free(&ocs->hw, seq); in ocs_domain_process_pending()
394 ocs_hw_sequence_t *frame; in ocs_purge_pending() local
397 frame = ocs_frame_next(pend_list, list_lock); in ocs_purge_pending()
398 if (frame == NULL) { in ocs_purge_pending()
402 frame_printf(ocs, (fc_header_t*) frame->header->dma.virt, "Discarding held frame\n"); in ocs_purge_pending()
403 if (frame->hio != NULL) { in ocs_purge_pending()
404 ocs_port_owned_abort(ocs, frame->hio); in ocs_purge_pending()
406 ocs_hw_sequence_free(&ocs->hw, frame); in ocs_purge_pending()
428 return ocs_purge_pending(node->ocs, &node->pend_frames, &node->pend_frames_lock); in ocs_node_purge_pending()
447 ocs_t *ocs = domain->ocs; in ocs_domain_purge_pending()
450 ocs_assert(domain->fcf_indicator < SLI4_MAX_FCFI, -1); in ocs_domain_purge_pending()
451 xport_fcfi = &ocs->xport->fcfi[domain->fcf_indicator]; in ocs_domain_purge_pending()
452 return ocs_purge_pending(domain->ocs, in ocs_domain_purge_pending()
453 &xport_fcfi->pend_frames, in ocs_domain_purge_pending()
454 &xport_fcfi->pend_frames_lock); in ocs_domain_purge_pending()
461 * @param arg Node for which the pending frame hold condition is
472 return node->hold_frames; in ocs_node_frames_held()
479 * @param arg Domain for which the pending frame hold condition is
490 ocs_t *ocs = domain->ocs; in ocs_domain_frames_held()
494 ocs_assert(domain->fcf_indicator < SLI4_MAX_FCFI, 1); in ocs_domain_frames_held()
495 xport_fcfi = &ocs->xport->fcfi[domain->fcf_indicator]; in ocs_domain_frames_held()
496 return xport_fcfi->hold_frames; in ocs_domain_frames_held()
515 ocs_t *ocs = domain->ocs; in ocs_domain_hold_frames()
518 ocs_assert(domain->fcf_indicator < SLI4_MAX_FCFI); in ocs_domain_hold_frames()
519 xport_fcfi = &ocs->xport->fcfi[domain->fcf_indicator]; in ocs_domain_hold_frames()
520 if (!xport_fcfi->hold_frames) { in ocs_domain_hold_frames()
521 ocs_log_debug(domain->ocs, "hold frames set for FCFI %d\n", in ocs_domain_hold_frames()
522 domain->fcf_indicator); in ocs_domain_hold_frames()
523 xport_fcfi->hold_frames = 1; in ocs_domain_hold_frames()
543 ocs_t *ocs = domain->ocs; in ocs_domain_accept_frames()
546 ocs_assert(domain->fcf_indicator < SLI4_MAX_FCFI); in ocs_domain_accept_frames()
547 xport_fcfi = &ocs->xport->fcfi[domain->fcf_indicator]; in ocs_domain_accept_frames()
548 if (xport_fcfi->hold_frames == 1) { in ocs_domain_accept_frames()
549 ocs_log_debug(domain->ocs, "hold frames cleared for FCFI %d\n", in ocs_domain_accept_frames()
550 domain->fcf_indicator); in ocs_domain_accept_frames()
552 xport_fcfi->hold_frames = 0; in ocs_domain_accept_frames()
558 * @brief Dispatch unsolicited FC frame.
561 * This function processes an unsolicited FC frame queued at the
567 * @return Returns 0 if frame processed and RX buffers cleaned
568 * up appropriately, -1 if frame not handled.
575 ocs_t *ocs = domain->ocs; in ocs_domain_dispatch_frame()
582 ocs_assert(seq->header, -1); in ocs_domain_dispatch_frame()
583 ocs_assert(seq->header->dma.virt, -1); in ocs_domain_dispatch_frame()
584 ocs_assert(seq->payload->dma.virt, -1); in ocs_domain_dispatch_frame()
586 hdr = seq->header->dma.virt; in ocs_domain_dispatch_frame()
589 s_id = fc_be24toh(hdr->s_id); in ocs_domain_dispatch_frame()
590 d_id = fc_be24toh(hdr->d_id); in ocs_domain_dispatch_frame()
592 sport = domain->sport; in ocs_domain_dispatch_frame()
594 frame_printf(ocs, hdr, "phy sport for FC ID 0x%06x is NULL, dropping frame\n", d_id); in ocs_domain_dispatch_frame()
595 return -1; in ocs_domain_dispatch_frame()
598 if (sport->fc_id != d_id) { in ocs_domain_dispatch_frame()
602 if (hdr->type == FC_TYPE_FCP) { in ocs_domain_dispatch_frame()
603 /* Drop frame */ in ocs_domain_dispatch_frame()
604 ocs_log_warn(ocs, "unsolicited FCP frame with invalid d_id x%x, dropping\n", in ocs_domain_dispatch_frame()
606 return -1; in ocs_domain_dispatch_frame()
609 sport = domain->sport; in ocs_domain_dispatch_frame()
620 * then we can drop the frame in ocs_domain_dispatch_frame()
622 if ((hdr->r_ctl == FC_RCTL_FC4_DATA) && ( in ocs_domain_dispatch_frame()
623 (hdr->info == FC_RCTL_INFO_SOL_DATA) || (hdr->info == FC_RCTL_INFO_SOL_CTRL))) { in ocs_domain_dispatch_frame()
624 ocs_log_debug(ocs, "solicited data/ctrl frame without node, dropping\n"); in ocs_domain_dispatch_frame()
625 return -1; in ocs_domain_dispatch_frame()
630 return -1; in ocs_domain_dispatch_frame()
636 if (node->hold_frames || !ocs_list_empty((&node->pend_frames))) { in ocs_domain_dispatch_frame()
638 frame_printf(ocs, hdr, "Holding frame\n"); in ocs_domain_dispatch_frame()
640 /* add frame to node's pending list */ in ocs_domain_dispatch_frame()
641 ocs_lock(&node->pend_frames_lock); in ocs_domain_dispatch_frame()
642 ocs_list_add_tail(&node->pend_frames, seq); in ocs_domain_dispatch_frame()
643 ocs_unlock(&node->pend_frames_lock); in ocs_domain_dispatch_frame()
648 /* now dispatch frame to the node frame handler */ in ocs_domain_dispatch_frame()
654 * @brief Dispatch a frame.
657 * A frame is dispatched from the \c node to the handler.
659 * @param arg Node that originated the frame.
662 * @return Returns 0 if frame processed and RX buffers cleaned
663 * up appropriately, -1 if frame not handled.
669 fc_header_t *hdr = seq->header->dma.virt; in ocs_node_dispatch_frame()
672 int32_t rc = -1; in ocs_node_dispatch_frame()
675 port_id = fc_be24toh(hdr->s_id); in ocs_node_dispatch_frame()
676 ocs_assert(port_id == node->rnode.fc_id, -1); in ocs_node_dispatch_frame()
678 if (fc_be24toh(hdr->f_ctl) & FC_FCTL_END_SEQUENCE) { in ocs_node_dispatch_frame()
680 if (fc_be24toh(hdr->f_ctl) & FC_FCTL_SEQUENCE_INITIATIVE) { in ocs_node_dispatch_frame()
683 switch (hdr->r_ctl) { in ocs_node_dispatch_frame()
691 if ((sit_set) && (hdr->info == FC_INFO_ABTS)) { in ocs_node_dispatch_frame()
699 switch(hdr->type) { in ocs_node_dispatch_frame()
701 if (hdr->info == FC_RCTL_INFO_UNSOL_CMD) { in ocs_node_dispatch_frame()
702 if (node->fcp_enabled) { in ocs_node_dispatch_frame()
712 } else if (hdr->info == FC_RCTL_INFO_SOL_DATA) { in ocs_node_dispatch_frame()
729 node_printf(node, "Dropping frame hdr = %08x %08x %08x %08x %08x %08x\n", in ocs_node_dispatch_frame()
748 * @param task_management_flags Task management flags from the FCP_CMND frame.
749 * @param node Node that originated the frame.
750 * @param lun 32-bit LUN from FCP_CMND frame.
771 io->exp_xfer_len = 0; /* BUG 32235 */ in ocs_dispatch_unsolicited_tmf()
775 io->tmf_cmd = tmflist[i].cmd; in ocs_dispatch_unsolicited_tmf()
791 fcp_cmnd_iu_t *cmnd = seq->payload->dma.virt; in ocs_validate_fcp_cmd()
792 exp_payload_len = sizeof(fcp_cmnd_iu_t) - 16 + cmnd->additional_fcp_cdb_length; in ocs_validate_fcp_cmd()
795 * If we received less than FCP_CMND_IU bytes, assume that the frame is in ocs_validate_fcp_cmd()
799 if (seq->payload->dma.len < exp_payload_len) { in ocs_validate_fcp_cmd()
800 fc_header_t *fchdr = seq->header->dma.virt; in ocs_validate_fcp_cmd()
802 ocs_be16toh(fchdr->ox_id), seq->payload->dma.len, in ocs_validate_fcp_cmd()
804 return -1; in ocs_validate_fcp_cmd()
814 io->init_task_tag = ocs_be16toh(fchdr->ox_id); in ocs_populate_io_fcp_cmd()
816 fcp_dl = (uint32_t*)(&(cmnd->fcp_cdb_and_dl)); in ocs_populate_io_fcp_cmd()
817 fcp_dl += cmnd->additional_fcp_cdb_length; in ocs_populate_io_fcp_cmd()
818 io->exp_xfer_len = ocs_be32toh(*fcp_dl); in ocs_populate_io_fcp_cmd()
819 io->transferred = 0; in ocs_populate_io_fcp_cmd()
821 /* The upper 7 bits of CS_CTL is the frame priority thru the SAN. in ocs_populate_io_fcp_cmd()
822 * Our assertion here is, the priority given to a frame containing in ocs_populate_io_fcp_cmd()
826 if (fc_be24toh(fchdr->f_ctl) & FC_FCTL_PRIORITY_ENABLE) { in ocs_populate_io_fcp_cmd()
827 io->cs_ctl = fchdr->cs_ctl; in ocs_populate_io_fcp_cmd()
829 io->cs_ctl = 0; in ocs_populate_io_fcp_cmd()
831 io->seq_init = sit; in ocs_populate_io_fcp_cmd()
838 switch (cmnd->task_attribute) { in ocs_get_flags_fcp_cmd()
855 flags |= (uint32_t)cmnd->command_priority << OCS_SCSI_PRIORITY_SHIFT; in ocs_get_flags_fcp_cmd()
856 if (cmnd->wrdata) in ocs_get_flags_fcp_cmd()
858 if (cmnd->rddata) in ocs_get_flags_fcp_cmd()
866 * @brief Dispatch unsolicited FCP_CMND frame.
869 * Dispatch unsolicited FCP_CMND frame. RQ Pair mode - always
872 * @param node Node that originated the frame.
875 * @return Returns 0 if frame processed and RX buffers cleaned
876 * up appropriately, -1 if frame not handled and RX buffers need
882 ocs_t *ocs = node->ocs; in ocs_dispatch_fcp_cmd()
883 fc_header_t *fchdr = seq->header->dma.virt; in ocs_dispatch_fcp_cmd()
891 ocs_assert(seq->payload, -1); in ocs_dispatch_fcp_cmd()
892 cmnd = seq->payload->dma.virt; in ocs_dispatch_fcp_cmd()
896 return -1; in ocs_dispatch_fcp_cmd()
899 lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(cmnd->fcp_lun)); in ocs_dispatch_fcp_cmd()
901 return -1; in ocs_dispatch_fcp_cmd()
908 /* If we have SEND_FRAME capability, then use it to send task set full or busy */ in ocs_dispatch_fcp_cmd()
909 rc = ocs_hw_get(&ocs->hw, OCS_HW_SEND_FRAME_CAPABLE, &send_frame_capable); in ocs_dispatch_fcp_cmd()
918 ocs_log_err(ocs, "IO allocation failed ox_id %04x\n", ocs_be16toh(fchdr->ox_id)); in ocs_dispatch_fcp_cmd()
919 return -1; in ocs_dispatch_fcp_cmd()
921 io->hw_priv = seq->hw_priv; in ocs_dispatch_fcp_cmd()
924 io->app_id = 0; in ocs_dispatch_fcp_cmd()
925 df_ctl = fchdr->df_ctl; in ocs_dispatch_fcp_cmd()
938 io->app_id = ocs_be32toh(vhdr->src_vmid); in ocs_dispatch_fcp_cmd()
944 if (cmnd->task_management_flags) { in ocs_dispatch_fcp_cmd()
945 ocs_dispatch_unsolicited_tmf(io, cmnd->task_management_flags, node, lun); in ocs_dispatch_fcp_cmd()
949 /* can return failure for things like task set full and UAs, in ocs_dispatch_fcp_cmd()
950 * no need to treat as a dropped frame if rc != 0 in ocs_dispatch_fcp_cmd()
952 ocs_scsi_recv_cmd(io, lun, cmnd->fcp_cdb, in ocs_dispatch_fcp_cmd()
953 sizeof(cmnd->fcp_cdb) + in ocs_dispatch_fcp_cmd()
954 (cmnd->additional_fcp_cdb_length * sizeof(uint32_t)), in ocs_dispatch_fcp_cmd()
959 ocs_hw_sequence_free(&ocs->hw, seq); in ocs_dispatch_fcp_cmd()
965 * @brief Dispatch unsolicited FCP_CMND frame (auto xfer rdy).
968 * Dispatch unsolicited FCP_CMND frame that is assisted with auto xfer ready.
970 * @param node Node that originated the frame.
973 * @return Returns 0 if frame processed and RX buffers cleaned
974 * up appropriately, -1 if frame not handled and RX buffers need
980 ocs_t *ocs = node->ocs; in ocs_dispatch_fcp_cmd_auto_xfer_rdy()
981 fc_header_t *fchdr = seq->header->dma.virt; in ocs_dispatch_fcp_cmd_auto_xfer_rdy()
987 ocs_assert(seq->payload, -1); in ocs_dispatch_fcp_cmd_auto_xfer_rdy()
988 cmnd = seq->payload->dma.virt; in ocs_dispatch_fcp_cmd_auto_xfer_rdy()
992 return -1; in ocs_dispatch_fcp_cmd_auto_xfer_rdy()
996 if (!seq->auto_xrdy) { in ocs_dispatch_fcp_cmd_auto_xfer_rdy()
998 return -1; in ocs_dispatch_fcp_cmd_auto_xfer_rdy()
1001 lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(cmnd->fcp_lun)); in ocs_dispatch_fcp_cmd_auto_xfer_rdy()
1009 /* If we have SEND_FRAME capability, then use it to send task set full or busy */ in ocs_dispatch_fcp_cmd_auto_xfer_rdy()
1010 rc = ocs_hw_get(&ocs->hw, OCS_HW_SEND_FRAME_CAPABLE, &send_frame_capable); in ocs_dispatch_fcp_cmd_auto_xfer_rdy()
1019 ocs_log_err(ocs, "IO allocation failed ox_id %04x\n", ocs_be16toh(fchdr->ox_id)); in ocs_dispatch_fcp_cmd_auto_xfer_rdy()
1020 return -1; in ocs_dispatch_fcp_cmd_auto_xfer_rdy()
1022 io->hw_priv = seq->hw_priv; in ocs_dispatch_fcp_cmd_auto_xfer_rdy()
1027 if (cmnd->task_management_flags) { in ocs_dispatch_fcp_cmd_auto_xfer_rdy()
1029 ocs_log_err(ocs, "TMF flags set 0x%x\n", cmnd->task_management_flags); in ocs_dispatch_fcp_cmd_auto_xfer_rdy()
1031 return -1; in ocs_dispatch_fcp_cmd_auto_xfer_rdy()
1036 ocs_hw_io_activate_port_owned(&ocs->hw, seq->hio); in ocs_dispatch_fcp_cmd_auto_xfer_rdy()
1037 io->hio = seq->hio; in ocs_dispatch_fcp_cmd_auto_xfer_rdy()
1038 seq->hio->ul_io = io; in ocs_dispatch_fcp_cmd_auto_xfer_rdy()
1039 io->tgt_task_tag = seq->hio->indicator; in ocs_dispatch_fcp_cmd_auto_xfer_rdy()
1042 ocs_scsi_recv_cmd_first_burst(io, lun, cmnd->fcp_cdb, in ocs_dispatch_fcp_cmd_auto_xfer_rdy()
1043 sizeof(cmnd->fcp_cdb) + in ocs_dispatch_fcp_cmd_auto_xfer_rdy()
1044 (cmnd->additional_fcp_cdb_length * sizeof(uint32_t)), in ocs_dispatch_fcp_cmd_auto_xfer_rdy()
1049 ocs_hw_sequence_free(&ocs->hw, seq); in ocs_dispatch_fcp_cmd_auto_xfer_rdy()
1061 * @param node Node that originated the frame.
1064 * @return Returns 0 if frame processed and RX buffers cleaned
1065 * up appropriately, -1 if frame not handled.
1071 ocs_t *ocs = node->ocs; in ocs_dispatch_fcp_data()
1072 ocs_hw_t *hw = &ocs->hw; in ocs_dispatch_fcp_data()
1073 ocs_hw_io_t *hio = seq->hio; in ocs_dispatch_fcp_data()
1077 ocs_assert(seq->payload, -1); in ocs_dispatch_fcp_data()
1078 ocs_assert(hio, -1); in ocs_dispatch_fcp_data()
1080 io = hio->ul_io; in ocs_dispatch_fcp_data()
1083 hio->indicator); in ocs_dispatch_fcp_data()
1084 return -1; in ocs_dispatch_fcp_data()
1091 if (!ocs_hw_is_io_port_owned(hw, seq->hio)) { in ocs_dispatch_fcp_data()
1093 hio->indicator); in ocs_dispatch_fcp_data()
1094 return -1; in ocs_dispatch_fcp_data()
1098 if (seq->status != OCS_HW_UNSOL_SUCCESS) { in ocs_dispatch_fcp_data()
1100 seq->status, hio->indicator); in ocs_dispatch_fcp_data()
1103 * In this case, there is an existing, in-use HW IO that in ocs_dispatch_fcp_data()
1107 ocs_port_owned_abort(ocs, seq->hio); in ocs_dispatch_fcp_data()
1114 return -1; in ocs_dispatch_fcp_data()
1118 io->seq_init = 1; in ocs_dispatch_fcp_data()
1121 fburst[0] = seq->payload->dma; in ocs_dispatch_fcp_data()
1124 io->transferred = seq->payload->dma.len; in ocs_dispatch_fcp_data()
1127 fburst, io->transferred)) { in ocs_dispatch_fcp_data()
1129 hio->indicator, io->init_task_tag); in ocs_dispatch_fcp_data()
1133 ocs_hw_sequence_free(&ocs->hw, seq); in ocs_dispatch_fcp_data()
1160 * @brief Return next FC frame on node->pend_frames list
1162 * The next FC frame on the node->pend_frames list is returned, or NULL
1168 * @return Returns pointer to the next FC frame, or NULL if the pending frame list
1174 ocs_hw_sequence_t *frame = NULL; in ocs_frame_next() local
1177 frame = ocs_list_remove_head(pend_list); in ocs_frame_next()
1179 return frame; in ocs_frame_next()
1183 * @brief Process send fcp response frame callback
1188 * @param arg Pointer to originator frame sequence.
1198 ocs_hw_t *hw = ctx->hw; in ocs_sframe_common_send_cb()
1201 ocs_hw_reqtag_free(hw, ctx->wqcb); in ocs_sframe_common_send_cb()
1204 ocs_hw_sequence_free(hw, ctx->seq); in ocs_sframe_common_send_cb()
1208 * @brief Send a frame, common code
1210 * A frame is sent using SEND_FRAME, the R_CTL/F_CTL/TYPE may be specified, the payload is
1211 * sent as a single frame.
1230 ocs_t *ocs = node->ocs; in ocs_sframe_common_send()
1231 ocs_hw_t *hw = &ocs->hw; in ocs_sframe_common_send()
1233 fc_header_t *behdr = seq->header->dma.virt; in ocs_sframe_common_send()
1235 uint32_t s_id = fc_be24toh(behdr->s_id); in ocs_sframe_common_send()
1236 uint32_t d_id = fc_be24toh(behdr->d_id); in ocs_sframe_common_send()
1237 uint16_t ox_id = ocs_be16toh(behdr->ox_id); in ocs_sframe_common_send()
1238 uint16_t rx_id = ocs_be16toh(behdr->rx_id); in ocs_sframe_common_send()
1241 uint32_t heap_size = seq->payload->dma.size; in ocs_sframe_common_send()
1242 uintptr_t heap_phys_base = seq->payload->dma.phys; in ocs_sframe_common_send()
1243 uint8_t *heap_virt_base = seq->payload->dma.virt; in ocs_sframe_common_send()
1260 * the low 8 bits to hdr->seq_id in ocs_sframe_common_send()
1262 hdr.seq_id = (uint8_t) ocs_atomic_add_return(&hw->send_frame_seq_id, 1); in ocs_sframe_common_send()
1268 /* Allocate and fill in the send frame request context */ in ocs_sframe_common_send()
1271 ocs_assert(heap_offset < heap_size, -1); in ocs_sframe_common_send()
1275 ctx->seq = seq; in ocs_sframe_common_send()
1278 ctx->payload.phys = heap_phys_base + heap_offset; in ocs_sframe_common_send()
1279 ctx->payload.virt = heap_virt_base + heap_offset; in ocs_sframe_common_send()
1280 ctx->payload.size = payload_len; in ocs_sframe_common_send()
1281 ctx->payload.len = payload_len; in ocs_sframe_common_send()
1283 ocs_assert(heap_offset <= heap_size, -1); in ocs_sframe_common_send()
1286 ocs_memcpy(ctx->payload.virt, payload, payload_len); in ocs_sframe_common_send()
1289 rc = ocs_hw_send_frame(&ocs->hw, (void*)&hdr, FC_SOFI3, FC_EOFT, &ctx->payload, ctx, in ocs_sframe_common_send()
1295 return rc ? -1 : 0; in ocs_sframe_common_send()
1325 * @brief Send task set full response
1327 * Return a task set full or busy response using send frame.
1330 * @param seq Pointer to originator frame sequence.
1338 fcp_cmnd_iu_t *fcpcmd = seq->payload->dma.virt; in ocs_sframe_send_task_set_full_or_busy()
1344 fcp_dl_ptr = (uint32_t*)(&(fcpcmd->fcp_cdb_and_dl)); in ocs_sframe_send_task_set_full_or_busy()
1345 fcp_dl_ptr += fcpcmd->additional_fcp_cdb_length; in ocs_sframe_send_task_set_full_or_busy()
1348 /* construct task set full or busy response */ in ocs_sframe_send_task_set_full_or_busy()
1350 ocs_lock(&node->active_ios_lock); in ocs_sframe_send_task_set_full_or_busy()
1351 …fcprsp.scsi_status = ocs_list_empty(&node->active_ios) ? SCSI_STATUS_BUSY : SCSI_STATUS_TASK_SET_F… in ocs_sframe_send_task_set_full_or_busy()
1352 ocs_unlock(&node->active_ios_lock); in ocs_sframe_send_task_set_full_or_busy()
1356 rc = ocs_sframe_send_fcp_rsp(node, seq, &fcprsp, sizeof(fcprsp) - sizeof(fcprsp.data)); in ocs_sframe_send_task_set_full_or_busy()
1358 ocs_log_test(node->ocs, "ocs_sframe_send_fcp_rsp failed: %d\n", rc); in ocs_sframe_send_task_set_full_or_busy()
1364 * @brief Send BA_ACC using sent frame
1369 * @param seq Pointer to originator frame sequence.
1376 fc_header_t *behdr = seq->header->dma.virt; in ocs_sframe_send_bls_acc()
1377 uint16_t ox_id = ocs_be16toh(behdr->ox_id); in ocs_sframe_send_bls_acc()
1378 uint16_t rx_id = ocs_be16toh(behdr->rx_id); in ocs_sframe_send_bls_acc()