Lines Matching +full:rpm +full:- +full:proc
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2003-2009 Silicon Graphics International Corp.
6 * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org>
66 #include <sys/proc.h>
102 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2,
116 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2,
130 /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2,
139 /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2,
148 /*page_length*/sizeof(struct scsi_caching_page) - 2,
164 /*page_length*/sizeof(struct scsi_caching_page) - 2,
180 /*page_length*/sizeof(struct scsi_control_page) - 2,
192 /*page_length*/sizeof(struct scsi_control_page) - 2,
202 #define CTL_CEM_LEN (sizeof(struct scsi_control_ext_page) - 4)
224 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2,
233 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2,
241 #define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4)
297 /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2,
320 /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2,
343 static int worker_threads = -1;
351 &ctl_lun_map_size, 0, "Size of per-port LUN map (max LUN + 1)");
384 * SCSI Ports (0x88), Third-party Copy (0x8F), SCSI Feature Sets (0x92),
537 switch (io->io_hdr.io_type) { in ctl_be_move_done()
539 io->scsiio.be_move_done(io, samethr); in ctl_be_move_done()
543 io->nvmeio.be_move_done(io, samethr); in ctl_be_move_done()
553 switch (io->io_hdr.io_type) { in ctl_continue_io()
555 io->scsiio.io_cont(io); in ctl_continue_io()
559 io->nvmeio.io_cont(io); in ctl_continue_io()
579 &softc->othersc_pool) != 0) in ctl_ha_init()
582 ctl_pool_free(softc->othersc_pool); in ctl_ha_init()
588 ctl_pool_free(softc->othersc_pool); in ctl_ha_init()
605 ctl_pool_free(softc->othersc_pool); in ctl_ha_shutdown()
608 free(port->port_name, M_CTL); in ctl_ha_shutdown()
627 msg.hdr.original_sc = io->io_hdr.remote_io; in ctl_ha_datamove()
629 msg.hdr.nexus = io->io_hdr.nexus; in ctl_ha_datamove()
630 msg.hdr.status = io->io_hdr.status; in ctl_ha_datamove()
631 msg.dt.flags = io->io_hdr.flags; in ctl_ha_datamove()
644 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { in ctl_ha_datamove()
652 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, in ctl_ha_datamove()
678 msg.dt.kern_sg_entries - sg_entries_sent); in ctl_ha_datamove()
684 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { in ctl_ha_datamove()
692 KASSERT((io->io_hdr.flags & in ctl_ha_datamove()
704 sizeof(msg.dt) - sizeof(msg.dt.sg_list) + in ctl_ha_datamove()
707 io->io_hdr.port_status = 31341; in ctl_ha_datamove()
720 mtx_lock(&lun->lun_lock); in ctl_ha_datamove()
721 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { in ctl_ha_datamove()
723 mtx_unlock(&lun->lun_lock); in ctl_ha_datamove()
724 io->io_hdr.port_status = 31342; in ctl_ha_datamove()
728 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; in ctl_ha_datamove()
729 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; in ctl_ha_datamove()
731 mtx_unlock(&lun->lun_lock); in ctl_ha_datamove()
739 if (io->io_hdr.io_type == CTL_IO_SCSI) { in ctl_ha_done()
742 msg.hdr.original_sc = io->io_hdr.remote_io; in ctl_ha_done()
743 msg.hdr.nexus = io->io_hdr.nexus; in ctl_ha_done()
744 msg.hdr.status = io->io_hdr.status; in ctl_ha_done()
745 msg.scsi.scsi_status = io->scsiio.scsi_status; in ctl_ha_done()
746 msg.scsi.tag_num = io->scsiio.tag_num; in ctl_ha_done()
747 msg.scsi.tag_type = io->scsiio.tag_type; in ctl_ha_done()
748 msg.scsi.sense_len = io->scsiio.sense_len; in ctl_ha_done()
749 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, in ctl_ha_done()
750 io->scsiio.sense_len); in ctl_ha_done()
752 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + in ctl_ha_done()
764 if (msg_info->hdr.original_sc == NULL) { in ctl_isc_handler_finish_xfer()
770 ctsio = &msg_info->hdr.original_sc->scsiio; in ctl_isc_handler_finish_xfer()
771 ctsio->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; in ctl_isc_handler_finish_xfer()
772 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; in ctl_isc_handler_finish_xfer()
773 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; in ctl_isc_handler_finish_xfer()
774 ctsio->io_hdr.status = msg_info->hdr.status; in ctl_isc_handler_finish_xfer()
775 ctsio->scsi_status = msg_info->scsi.scsi_status; in ctl_isc_handler_finish_xfer()
776 ctsio->sense_len = msg_info->scsi.sense_len; in ctl_isc_handler_finish_xfer()
777 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, in ctl_isc_handler_finish_xfer()
778 msg_info->scsi.sense_len); in ctl_isc_handler_finish_xfer()
788 if (msg_info->hdr.serializing_sc == NULL) { in ctl_isc_handler_finish_ser_only()
794 ctsio = &msg_info->hdr.serializing_sc->scsiio; in ctl_isc_handler_finish_ser_only()
795 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; in ctl_isc_handler_finish_ser_only()
802 struct ctl_softc *softc = lun->ctl_softc; in ctl_isc_announce_lun()
807 if (softc->ha_link != CTL_HA_LINK_ONLINE) in ctl_isc_announce_lun()
809 mtx_lock(&lun->lun_lock); in ctl_isc_announce_lun()
810 i = sizeof(msg->lun); in ctl_isc_announce_lun()
811 if (lun->lun_devid) in ctl_isc_announce_lun()
812 i += lun->lun_devid->len; in ctl_isc_announce_lun()
813 i += sizeof(pr_key) * lun->pr_key_count; in ctl_isc_announce_lun()
815 mtx_unlock(&lun->lun_lock); in ctl_isc_announce_lun()
817 mtx_lock(&lun->lun_lock); in ctl_isc_announce_lun()
818 k = sizeof(msg->lun); in ctl_isc_announce_lun()
819 if (lun->lun_devid) in ctl_isc_announce_lun()
820 k += lun->lun_devid->len; in ctl_isc_announce_lun()
821 k += sizeof(pr_key) * lun->pr_key_count; in ctl_isc_announce_lun()
827 bzero(&msg->lun, sizeof(msg->lun)); in ctl_isc_announce_lun()
828 msg->hdr.msg_type = CTL_MSG_LUN_SYNC; in ctl_isc_announce_lun()
829 msg->hdr.nexus.targ_lun = lun->lun; in ctl_isc_announce_lun()
830 msg->hdr.nexus.targ_mapped_lun = lun->lun; in ctl_isc_announce_lun()
831 msg->lun.flags = lun->flags; in ctl_isc_announce_lun()
832 msg->lun.pr_generation = lun->pr_generation; in ctl_isc_announce_lun()
833 msg->lun.pr_res_idx = lun->pr_res_idx; in ctl_isc_announce_lun()
834 msg->lun.pr_res_type = lun->pr_res_type; in ctl_isc_announce_lun()
835 msg->lun.pr_key_count = lun->pr_key_count; in ctl_isc_announce_lun()
837 if (lun->lun_devid) { in ctl_isc_announce_lun()
838 msg->lun.lun_devid_len = lun->lun_devid->len; in ctl_isc_announce_lun()
839 memcpy(&msg->lun.data[i], lun->lun_devid->data, in ctl_isc_announce_lun()
840 msg->lun.lun_devid_len); in ctl_isc_announce_lun()
841 i += msg->lun.lun_devid_len; in ctl_isc_announce_lun()
847 memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key)); in ctl_isc_announce_lun()
850 mtx_unlock(&lun->lun_lock); in ctl_isc_announce_lun()
851 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->lun, sizeof(msg->lun) + i, in ctl_isc_announce_lun()
855 if (lun->flags & CTL_LUN_PRIMARY_SC) { in ctl_isc_announce_lun()
857 ctl_isc_announce_mode(lun, -1, in ctl_isc_announce_lun()
858 lun->mode_pages.index[i].page_code & SMPH_PC_MASK, in ctl_isc_announce_lun()
859 lun->mode_pages.index[i].subpage); in ctl_isc_announce_lun()
867 struct ctl_softc *softc = port->ctl_softc; in ctl_isc_announce_port()
871 if (port->targ_port < softc->port_min || in ctl_isc_announce_port()
872 port->targ_port >= softc->port_max || in ctl_isc_announce_port()
873 softc->ha_link != CTL_HA_LINK_ONLINE) in ctl_isc_announce_port()
875 i = sizeof(msg->port) + strlen(port->port_name) + 1; in ctl_isc_announce_port()
876 if (port->lun_map) in ctl_isc_announce_port()
877 i += port->lun_map_size * sizeof(uint32_t); in ctl_isc_announce_port()
878 if (port->port_devid) in ctl_isc_announce_port()
879 i += port->port_devid->len; in ctl_isc_announce_port()
880 if (port->target_devid) in ctl_isc_announce_port()
881 i += port->target_devid->len; in ctl_isc_announce_port()
882 if (port->init_devid) in ctl_isc_announce_port()
883 i += port->init_devid->len; in ctl_isc_announce_port()
885 bzero(&msg->port, sizeof(msg->port)); in ctl_isc_announce_port()
886 msg->hdr.msg_type = CTL_MSG_PORT_SYNC; in ctl_isc_announce_port()
887 msg->hdr.nexus.targ_port = port->targ_port; in ctl_isc_announce_port()
888 msg->port.port_type = port->port_type; in ctl_isc_announce_port()
889 msg->port.physical_port = port->physical_port; in ctl_isc_announce_port()
890 msg->port.virtual_port = port->virtual_port; in ctl_isc_announce_port()
891 msg->port.status = port->status; in ctl_isc_announce_port()
893 msg->port.name_len = sprintf(&msg->port.data[i], in ctl_isc_announce_port()
894 "%d:%s", softc->ha_id, port->port_name) + 1; in ctl_isc_announce_port()
895 i += msg->port.name_len; in ctl_isc_announce_port()
896 if (port->lun_map) { in ctl_isc_announce_port()
897 msg->port.lun_map_len = port->lun_map_size * sizeof(uint32_t); in ctl_isc_announce_port()
898 memcpy(&msg->port.data[i], port->lun_map, in ctl_isc_announce_port()
899 msg->port.lun_map_len); in ctl_isc_announce_port()
900 i += msg->port.lun_map_len; in ctl_isc_announce_port()
902 if (port->port_devid) { in ctl_isc_announce_port()
903 msg->port.port_devid_len = port->port_devid->len; in ctl_isc_announce_port()
904 memcpy(&msg->port.data[i], port->port_devid->data, in ctl_isc_announce_port()
905 msg->port.port_devid_len); in ctl_isc_announce_port()
906 i += msg->port.port_devid_len; in ctl_isc_announce_port()
908 if (port->target_devid) { in ctl_isc_announce_port()
909 msg->port.target_devid_len = port->target_devid->len; in ctl_isc_announce_port()
910 memcpy(&msg->port.data[i], port->target_devid->data, in ctl_isc_announce_port()
911 msg->port.target_devid_len); in ctl_isc_announce_port()
912 i += msg->port.target_devid_len; in ctl_isc_announce_port()
914 if (port->init_devid) { in ctl_isc_announce_port()
915 msg->port.init_devid_len = port->init_devid->len; in ctl_isc_announce_port()
916 memcpy(&msg->port.data[i], port->init_devid->data, in ctl_isc_announce_port()
917 msg->port.init_devid_len); in ctl_isc_announce_port()
918 i += msg->port.init_devid_len; in ctl_isc_announce_port()
920 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, in ctl_isc_announce_port()
928 struct ctl_softc *softc = port->ctl_softc; in ctl_isc_announce_iid()
932 if (port->targ_port < softc->port_min || in ctl_isc_announce_iid()
933 port->targ_port >= softc->port_max || in ctl_isc_announce_iid()
934 softc->ha_link != CTL_HA_LINK_ONLINE) in ctl_isc_announce_iid()
936 mtx_lock(&softc->ctl_lock); in ctl_isc_announce_iid()
937 i = sizeof(msg->iid); in ctl_isc_announce_iid()
939 if (port->wwpn_iid[iid].name) in ctl_isc_announce_iid()
940 l = strlen(port->wwpn_iid[iid].name) + 1; in ctl_isc_announce_iid()
944 mtx_unlock(&softc->ctl_lock); in ctl_isc_announce_iid()
947 bzero(&msg->iid, sizeof(msg->iid)); in ctl_isc_announce_iid()
948 msg->hdr.msg_type = CTL_MSG_IID_SYNC; in ctl_isc_announce_iid()
949 msg->hdr.nexus.targ_port = port->targ_port; in ctl_isc_announce_iid()
950 msg->hdr.nexus.initid = iid; in ctl_isc_announce_iid()
951 msg->iid.in_use = port->wwpn_iid[iid].in_use; in ctl_isc_announce_iid()
952 msg->iid.name_len = l; in ctl_isc_announce_iid()
953 msg->iid.wwpn = port->wwpn_iid[iid].wwpn; in ctl_isc_announce_iid()
954 if (port->wwpn_iid[iid].name) in ctl_isc_announce_iid()
955 strlcpy(msg->iid.data, port->wwpn_iid[iid].name, l); in ctl_isc_announce_iid()
956 mtx_unlock(&softc->ctl_lock); in ctl_isc_announce_iid()
957 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->iid, i, M_NOWAIT); in ctl_isc_announce_iid()
965 struct ctl_softc *softc = lun->ctl_softc; in ctl_isc_announce_mode()
969 if (softc->ha_link != CTL_HA_LINK_ONLINE) in ctl_isc_announce_mode()
972 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == in ctl_isc_announce_mode()
973 page && lun->mode_pages.index[i].subpage == subpage) in ctl_isc_announce_mode()
980 if (lun->mode_pages.index[i].page_data == NULL) in ctl_isc_announce_mode()
983 l = sizeof(msg->mode) + lun->mode_pages.index[i].page_len; in ctl_isc_announce_mode()
985 msg->hdr.msg_type = CTL_MSG_MODE_SYNC; in ctl_isc_announce_mode()
986 msg->hdr.nexus.targ_port = initidx / CTL_MAX_INIT_PER_PORT; in ctl_isc_announce_mode()
987 msg->hdr.nexus.initid = initidx % CTL_MAX_INIT_PER_PORT; in ctl_isc_announce_mode()
988 msg->hdr.nexus.targ_lun = lun->lun; in ctl_isc_announce_mode()
989 msg->hdr.nexus.targ_mapped_lun = lun->lun; in ctl_isc_announce_mode()
990 msg->mode.page_code = page; in ctl_isc_announce_mode()
991 msg->mode.subpage = subpage; in ctl_isc_announce_mode()
992 msg->mode.page_len = lun->mode_pages.index[i].page_len; in ctl_isc_announce_mode()
993 memcpy(msg->mode.data, lun->mode_pages.index[i].page_data, in ctl_isc_announce_mode()
994 msg->mode.page_len); in ctl_isc_announce_mode()
995 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->mode, l, M_WAITOK); in ctl_isc_announce_mode()
1010 msg.login.ha_mode = softc->ha_mode; in ctl_isc_ha_link_up()
1011 msg.login.ha_id = softc->ha_id; in ctl_isc_ha_link_up()
1018 STAILQ_FOREACH(port, &softc->port_list, links) { in ctl_isc_ha_link_up()
1021 if (port->wwpn_iid[i].in_use) in ctl_isc_ha_link_up()
1025 STAILQ_FOREACH(lun, &softc->lun_list, links) in ctl_isc_ha_link_up()
1037 mtx_lock(&softc->ctl_lock); in ctl_isc_ha_link_down()
1038 STAILQ_FOREACH(lun, &softc->lun_list, links) { in ctl_isc_ha_link_down()
1039 mtx_lock(&lun->lun_lock); in ctl_isc_ha_link_down()
1040 if (lun->flags & CTL_LUN_PEER_SC_PRIMARY) { in ctl_isc_ha_link_down()
1041 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; in ctl_isc_ha_link_down()
1042 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); in ctl_isc_ha_link_down()
1044 mtx_unlock(&lun->lun_lock); in ctl_isc_ha_link_down()
1046 mtx_unlock(&softc->ctl_lock); in ctl_isc_ha_link_down()
1047 io = ctl_alloc_io(softc->othersc_pool); in ctl_isc_ha_link_down()
1048 mtx_lock(&softc->ctl_lock); in ctl_isc_ha_link_down()
1050 io->io_hdr.msg_type = CTL_MSG_FAILOVER; in ctl_isc_ha_link_down()
1051 io->io_hdr.nexus.targ_mapped_lun = lun->lun; in ctl_isc_ha_link_down()
1055 STAILQ_FOREACH(port, &softc->port_list, links) { in ctl_isc_ha_link_down()
1056 if (port->targ_port >= softc->port_min && in ctl_isc_ha_link_down()
1057 port->targ_port < softc->port_max) in ctl_isc_ha_link_down()
1059 port->status &= ~CTL_PORT_STATUS_ONLINE; in ctl_isc_ha_link_down()
1061 port->wwpn_iid[i].in_use = 0; in ctl_isc_ha_link_down()
1062 free(port->wwpn_iid[i].name, M_CTL); in ctl_isc_ha_link_down()
1063 port->wwpn_iid[i].name = NULL; in ctl_isc_ha_link_down()
1066 mtx_unlock(&softc->ctl_lock); in ctl_isc_ha_link_down()
1075 if (len < sizeof(msg->ua)) { in ctl_isc_ua()
1077 __func__, len, sizeof(msg->ua)); in ctl_isc_ua()
1082 mtx_lock(&softc->ctl_lock); in ctl_isc_ua()
1083 if (msg->hdr.nexus.targ_mapped_lun >= ctl_max_luns || in ctl_isc_ua()
1084 (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) == NULL) { in ctl_isc_ua()
1085 mtx_unlock(&softc->ctl_lock); in ctl_isc_ua()
1088 mtx_lock(&lun->lun_lock); in ctl_isc_ua()
1089 mtx_unlock(&softc->ctl_lock); in ctl_isc_ua()
1090 if (msg->ua.ua_type == CTL_UA_THIN_PROV_THRES && msg->ua.ua_set) in ctl_isc_ua()
1091 memcpy(lun->ua_tpt_info, msg->ua.ua_info, 8); in ctl_isc_ua()
1092 iid = ctl_get_initindex(&msg->hdr.nexus); in ctl_isc_ua()
1093 if (msg->ua.ua_all) { in ctl_isc_ua()
1094 if (msg->ua.ua_set) in ctl_isc_ua()
1095 ctl_est_ua_all(lun, iid, msg->ua.ua_type); in ctl_isc_ua()
1097 ctl_clr_ua_all(lun, iid, msg->ua.ua_type); in ctl_isc_ua()
1099 if (msg->ua.ua_set) in ctl_isc_ua()
1100 ctl_est_ua(lun, iid, msg->ua.ua_type); in ctl_isc_ua()
1102 ctl_clr_ua(lun, iid, msg->ua.ua_type); in ctl_isc_ua()
1104 mtx_unlock(&lun->lun_lock); in ctl_isc_ua()
1122 i = msg->lun.lun_devid_len + msg->lun.pr_key_count * sizeof(pr_key); in ctl_isc_lun_sync()
1130 targ_lun = msg->hdr.nexus.targ_mapped_lun; in ctl_isc_lun_sync()
1131 mtx_lock(&softc->ctl_lock); in ctl_isc_lun_sync()
1133 (lun = softc->ctl_luns[targ_lun]) == NULL) { in ctl_isc_lun_sync()
1134 mtx_unlock(&softc->ctl_lock); in ctl_isc_lun_sync()
1137 mtx_lock(&lun->lun_lock); in ctl_isc_lun_sync()
1138 mtx_unlock(&softc->ctl_lock); in ctl_isc_lun_sync()
1139 if (lun->flags & CTL_LUN_DISABLED) { in ctl_isc_lun_sync()
1140 mtx_unlock(&lun->lun_lock); in ctl_isc_lun_sync()
1143 i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0; in ctl_isc_lun_sync()
1144 if (msg->lun.lun_devid_len != i || (i > 0 && in ctl_isc_lun_sync()
1145 memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) { in ctl_isc_lun_sync()
1146 mtx_unlock(&lun->lun_lock); in ctl_isc_lun_sync()
1152 oflags = lun->flags; in ctl_isc_lun_sync()
1153 if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) && in ctl_isc_lun_sync()
1154 (msg->lun.flags & CTL_LUN_DISABLED) == 0) in ctl_isc_lun_sync()
1155 lun->flags |= CTL_LUN_PEER_SC_PRIMARY; in ctl_isc_lun_sync()
1157 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; in ctl_isc_lun_sync()
1158 if (oflags != lun->flags) in ctl_isc_lun_sync()
1159 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); in ctl_isc_lun_sync()
1161 /* If peer is primary and we are not -- use data */ in ctl_isc_lun_sync()
1162 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && in ctl_isc_lun_sync()
1163 (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) { in ctl_isc_lun_sync()
1164 lun->pr_generation = msg->lun.pr_generation; in ctl_isc_lun_sync()
1165 lun->pr_res_idx = msg->lun.pr_res_idx; in ctl_isc_lun_sync()
1166 lun->pr_res_type = msg->lun.pr_res_type; in ctl_isc_lun_sync()
1167 lun->pr_key_count = msg->lun.pr_key_count; in ctl_isc_lun_sync()
1170 for (k = 0; k < msg->lun.pr_key_count; k++) { in ctl_isc_lun_sync()
1171 memcpy(&pr_key, &msg->lun.data[i], in ctl_isc_lun_sync()
1180 mtx_unlock(&lun->lun_lock); in ctl_isc_lun_sync()
1183 (msg->lun.flags & CTL_LUN_PRIMARY_SC) ? in ctl_isc_lun_sync()
1186 /* If we are primary but peer doesn't know -- notify */ in ctl_isc_lun_sync()
1187 if ((lun->flags & CTL_LUN_PRIMARY_SC) && in ctl_isc_lun_sync()
1188 (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0) in ctl_isc_lun_sync()
1206 i = msg->port.name_len + msg->port.lun_map_len + in ctl_isc_port_sync()
1207 msg->port.port_devid_len + msg->port.target_devid_len + in ctl_isc_port_sync()
1208 msg->port.init_devid_len; in ctl_isc_port_sync()
1216 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; in ctl_isc_port_sync()
1219 msg->hdr.nexus.targ_port)); in ctl_isc_port_sync()
1222 port->frontend = &ha_frontend; in ctl_isc_port_sync()
1223 port->targ_port = msg->hdr.nexus.targ_port; in ctl_isc_port_sync()
1224 port->fe_datamove = ctl_ha_datamove; in ctl_isc_port_sync()
1225 port->fe_done = ctl_ha_done; in ctl_isc_port_sync()
1226 } else if (port->frontend == &ha_frontend) { in ctl_isc_port_sync()
1228 msg->hdr.nexus.targ_port)); in ctl_isc_port_sync()
1232 __func__, msg->hdr.nexus.targ_port); in ctl_isc_port_sync()
1235 port->port_type = msg->port.port_type; in ctl_isc_port_sync()
1236 port->physical_port = msg->port.physical_port; in ctl_isc_port_sync()
1237 port->virtual_port = msg->port.virtual_port; in ctl_isc_port_sync()
1238 port->status = msg->port.status; in ctl_isc_port_sync()
1240 free(port->port_name, M_CTL); in ctl_isc_port_sync()
1241 port->port_name = strndup(&msg->port.data[i], msg->port.name_len, in ctl_isc_port_sync()
1243 i += msg->port.name_len; in ctl_isc_port_sync()
1244 if (msg->port.lun_map_len != 0) { in ctl_isc_port_sync()
1245 if (port->lun_map == NULL || in ctl_isc_port_sync()
1246 port->lun_map_size * sizeof(uint32_t) < in ctl_isc_port_sync()
1247 msg->port.lun_map_len) { in ctl_isc_port_sync()
1248 port->lun_map_size = 0; in ctl_isc_port_sync()
1249 free(port->lun_map, M_CTL); in ctl_isc_port_sync()
1250 port->lun_map = malloc(msg->port.lun_map_len, in ctl_isc_port_sync()
1253 memcpy(port->lun_map, &msg->port.data[i], msg->port.lun_map_len); in ctl_isc_port_sync()
1254 port->lun_map_size = msg->port.lun_map_len / sizeof(uint32_t); in ctl_isc_port_sync()
1255 i += msg->port.lun_map_len; in ctl_isc_port_sync()
1257 port->lun_map_size = 0; in ctl_isc_port_sync()
1258 free(port->lun_map, M_CTL); in ctl_isc_port_sync()
1259 port->lun_map = NULL; in ctl_isc_port_sync()
1261 if (msg->port.port_devid_len != 0) { in ctl_isc_port_sync()
1262 if (port->port_devid == NULL || in ctl_isc_port_sync()
1263 port->port_devid->len < msg->port.port_devid_len) { in ctl_isc_port_sync()
1264 free(port->port_devid, M_CTL); in ctl_isc_port_sync()
1265 port->port_devid = malloc(sizeof(struct ctl_devid) + in ctl_isc_port_sync()
1266 msg->port.port_devid_len, M_CTL, M_WAITOK); in ctl_isc_port_sync()
1268 memcpy(port->port_devid->data, &msg->port.data[i], in ctl_isc_port_sync()
1269 msg->port.port_devid_len); in ctl_isc_port_sync()
1270 port->port_devid->len = msg->port.port_devid_len; in ctl_isc_port_sync()
1271 i += msg->port.port_devid_len; in ctl_isc_port_sync()
1273 free(port->port_devid, M_CTL); in ctl_isc_port_sync()
1274 port->port_devid = NULL; in ctl_isc_port_sync()
1276 if (msg->port.target_devid_len != 0) { in ctl_isc_port_sync()
1277 if (port->target_devid == NULL || in ctl_isc_port_sync()
1278 port->target_devid->len < msg->port.target_devid_len) { in ctl_isc_port_sync()
1279 free(port->target_devid, M_CTL); in ctl_isc_port_sync()
1280 port->target_devid = malloc(sizeof(struct ctl_devid) + in ctl_isc_port_sync()
1281 msg->port.target_devid_len, M_CTL, M_WAITOK); in ctl_isc_port_sync()
1283 memcpy(port->target_devid->data, &msg->port.data[i], in ctl_isc_port_sync()
1284 msg->port.target_devid_len); in ctl_isc_port_sync()
1285 port->target_devid->len = msg->port.target_devid_len; in ctl_isc_port_sync()
1286 i += msg->port.target_devid_len; in ctl_isc_port_sync()
1288 free(port->target_devid, M_CTL); in ctl_isc_port_sync()
1289 port->target_devid = NULL; in ctl_isc_port_sync()
1291 if (msg->port.init_devid_len != 0) { in ctl_isc_port_sync()
1292 if (port->init_devid == NULL || in ctl_isc_port_sync()
1293 port->init_devid->len < msg->port.init_devid_len) { in ctl_isc_port_sync()
1294 free(port->init_devid, M_CTL); in ctl_isc_port_sync()
1295 port->init_devid = malloc(sizeof(struct ctl_devid) + in ctl_isc_port_sync()
1296 msg->port.init_devid_len, M_CTL, M_WAITOK); in ctl_isc_port_sync()
1298 memcpy(port->init_devid->data, &msg->port.data[i], in ctl_isc_port_sync()
1299 msg->port.init_devid_len); in ctl_isc_port_sync()
1300 port->init_devid->len = msg->port.init_devid_len; in ctl_isc_port_sync()
1301 i += msg->port.init_devid_len; in ctl_isc_port_sync()
1303 free(port->init_devid, M_CTL); in ctl_isc_port_sync()
1304 port->init_devid = NULL; in ctl_isc_port_sync()
1312 mtx_lock(&softc->ctl_lock); in ctl_isc_port_sync()
1313 STAILQ_FOREACH(lun, &softc->lun_list, links) { in ctl_isc_port_sync()
1314 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) in ctl_isc_port_sync()
1316 mtx_lock(&lun->lun_lock); in ctl_isc_port_sync()
1317 ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE); in ctl_isc_port_sync()
1318 mtx_unlock(&lun->lun_lock); in ctl_isc_port_sync()
1320 mtx_unlock(&softc->ctl_lock); in ctl_isc_port_sync()
1335 i = msg->iid.name_len; in ctl_isc_iid_sync()
1343 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; in ctl_isc_iid_sync()
1346 __func__, msg->hdr.nexus.targ_port); in ctl_isc_iid_sync()
1349 iid = msg->hdr.nexus.initid; in ctl_isc_iid_sync()
1350 if (port->wwpn_iid[iid].in_use != 0 && in ctl_isc_iid_sync()
1351 msg->iid.in_use == 0) in ctl_isc_iid_sync()
1353 port->wwpn_iid[iid].in_use = msg->iid.in_use; in ctl_isc_iid_sync()
1354 port->wwpn_iid[iid].wwpn = msg->iid.wwpn; in ctl_isc_iid_sync()
1355 free(port->wwpn_iid[iid].name, M_CTL); in ctl_isc_iid_sync()
1356 if (msg->iid.name_len) { in ctl_isc_iid_sync()
1357 port->wwpn_iid[iid].name = strndup(&msg->iid.data[0], in ctl_isc_iid_sync()
1358 msg->iid.name_len, M_CTL); in ctl_isc_iid_sync()
1360 port->wwpn_iid[iid].name = NULL; in ctl_isc_iid_sync()
1367 if (len < sizeof(msg->login)) { in ctl_isc_login()
1369 __func__, len, sizeof(msg->login)); in ctl_isc_login()
1374 if (msg->login.version != CTL_HA_VERSION) { in ctl_isc_login()
1376 msg->login.version, CTL_HA_VERSION); in ctl_isc_login()
1380 if (msg->login.ha_mode != softc->ha_mode) { in ctl_isc_login()
1382 msg->login.ha_mode, softc->ha_mode); in ctl_isc_login()
1386 if (msg->login.ha_id == softc->ha_id) { in ctl_isc_login()
1387 printf("CTL HA peers have same ha_id %d\n", msg->login.ha_id); in ctl_isc_login()
1391 if (msg->login.max_luns != ctl_max_luns || in ctl_isc_login()
1392 msg->login.max_ports != ctl_max_ports || in ctl_isc_login()
1393 msg->login.max_init_per_port != CTL_MAX_INIT_PER_PORT) { in ctl_isc_login()
1413 i = msg->mode.page_len; in ctl_isc_mode_sync()
1421 targ_lun = msg->hdr.nexus.targ_mapped_lun; in ctl_isc_mode_sync()
1422 mtx_lock(&softc->ctl_lock); in ctl_isc_mode_sync()
1424 (lun = softc->ctl_luns[targ_lun]) == NULL) { in ctl_isc_mode_sync()
1425 mtx_unlock(&softc->ctl_lock); in ctl_isc_mode_sync()
1428 mtx_lock(&lun->lun_lock); in ctl_isc_mode_sync()
1429 mtx_unlock(&softc->ctl_lock); in ctl_isc_mode_sync()
1430 if (lun->flags & CTL_LUN_DISABLED) { in ctl_isc_mode_sync()
1431 mtx_unlock(&lun->lun_lock); in ctl_isc_mode_sync()
1435 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == in ctl_isc_mode_sync()
1436 msg->mode.page_code && in ctl_isc_mode_sync()
1437 lun->mode_pages.index[i].subpage == msg->mode.subpage) in ctl_isc_mode_sync()
1441 mtx_unlock(&lun->lun_lock); in ctl_isc_mode_sync()
1444 memcpy(lun->mode_pages.index[i].page_data, msg->mode.data, in ctl_isc_mode_sync()
1445 min(lun->mode_pages.index[i].page_len, msg->mode.page_len)); in ctl_isc_mode_sync()
1446 initidx = ctl_get_initindex(&msg->hdr.nexus); in ctl_isc_mode_sync()
1447 if (initidx != -1) in ctl_isc_mode_sync()
1449 mtx_unlock(&lun->lun_lock); in ctl_isc_mode_sync()
1483 msg->hdr.msg_type, param)); in ctl_isc_event_handler()
1484 switch (msg->hdr.msg_type) { in ctl_isc_event_handler()
1486 io = ctl_alloc_io(softc->othersc_pool); in ctl_isc_event_handler()
1489 io->io_hdr.io_type = CTL_IO_SCSI; in ctl_isc_event_handler()
1490 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; in ctl_isc_event_handler()
1491 io->io_hdr.remote_io = msg->hdr.original_sc; in ctl_isc_event_handler()
1492 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | in ctl_isc_event_handler()
1495 * If we're in serialization-only mode, we don't in ctl_isc_event_handler()
1501 if (softc->ha_mode != CTL_HA_MODE_XFER) in ctl_isc_event_handler()
1502 io->io_hdr.flags |= CTL_FLAG_INT_COPY; in ctl_isc_event_handler()
1503 io->io_hdr.nexus = msg->hdr.nexus; in ctl_isc_event_handler()
1504 io->scsiio.priority = msg->scsi.priority; in ctl_isc_event_handler()
1505 io->scsiio.tag_num = msg->scsi.tag_num; in ctl_isc_event_handler()
1506 io->scsiio.tag_type = msg->scsi.tag_type; in ctl_isc_event_handler()
1508 io->io_hdr.start_time = time_uptime; in ctl_isc_event_handler()
1509 getbinuptime(&io->io_hdr.start_bt); in ctl_isc_event_handler()
1511 io->scsiio.cdb_len = msg->scsi.cdb_len; in ctl_isc_event_handler()
1512 memcpy(io->scsiio.cdb, msg->scsi.cdb, in ctl_isc_event_handler()
1514 if (softc->ha_mode == CTL_HA_MODE_XFER) { in ctl_isc_event_handler()
1517 entry = ctl_get_cmd_entry(&io->scsiio, NULL); in ctl_isc_event_handler()
1518 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; in ctl_isc_event_handler()
1519 io->io_hdr.flags |= in ctl_isc_event_handler()
1520 entry->flags & CTL_FLAG_DATA_MASK; in ctl_isc_event_handler()
1530 io = msg->hdr.original_sc; in ctl_isc_event_handler()
1538 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; in ctl_isc_event_handler()
1539 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; in ctl_isc_event_handler()
1544 io->io_hdr.remote_io = msg->hdr.serializing_sc; in ctl_isc_event_handler()
1545 if (msg->hdr.status == CTL_SUCCESS) in ctl_isc_event_handler()
1546 io->io_hdr.status = msg->hdr.status; in ctl_isc_event_handler()
1548 if (msg->dt.sg_sequence == 0) { in ctl_isc_event_handler()
1550 getbinuptime(&io->io_hdr.dma_start_bt); in ctl_isc_event_handler()
1552 i = msg->dt.kern_sg_entries + in ctl_isc_event_handler()
1553 msg->dt.kern_data_len / in ctl_isc_event_handler()
1558 CTL_LSGL(io) = &sgl[msg->dt.kern_sg_entries]; in ctl_isc_event_handler()
1560 io->scsiio.kern_data_ptr = (uint8_t *)sgl; in ctl_isc_event_handler()
1562 io->scsiio.kern_sg_entries = in ctl_isc_event_handler()
1563 msg->dt.kern_sg_entries; in ctl_isc_event_handler()
1564 io->scsiio.rem_sg_entries = in ctl_isc_event_handler()
1565 msg->dt.kern_sg_entries; in ctl_isc_event_handler()
1566 io->scsiio.kern_data_len = in ctl_isc_event_handler()
1567 msg->dt.kern_data_len; in ctl_isc_event_handler()
1568 io->scsiio.kern_total_len = in ctl_isc_event_handler()
1569 msg->dt.kern_total_len; in ctl_isc_event_handler()
1570 io->scsiio.kern_data_resid = in ctl_isc_event_handler()
1571 msg->dt.kern_data_resid; in ctl_isc_event_handler()
1572 io->scsiio.kern_rel_offset = in ctl_isc_event_handler()
1573 msg->dt.kern_rel_offset; in ctl_isc_event_handler()
1574 io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR; in ctl_isc_event_handler()
1575 io->io_hdr.flags |= msg->dt.flags & in ctl_isc_event_handler()
1579 io->scsiio.kern_data_ptr; in ctl_isc_event_handler()
1581 for (i = msg->dt.sent_sg_entries, j = 0; in ctl_isc_event_handler()
1582 i < (msg->dt.sent_sg_entries + in ctl_isc_event_handler()
1583 msg->dt.cur_sg_entries); i++, j++) { in ctl_isc_event_handler()
1584 sgl[i].addr = msg->dt.sg_list[j].addr; in ctl_isc_event_handler()
1585 sgl[i].len = msg->dt.sg_list[j].len; in ctl_isc_event_handler()
1593 if (msg->dt.sg_last != 0) in ctl_isc_event_handler()
1599 if (msg->hdr.serializing_sc == NULL) { in ctl_isc_event_handler()
1610 io = msg->hdr.serializing_sc; in ctl_isc_event_handler()
1613 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; in ctl_isc_event_handler()
1614 io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; in ctl_isc_event_handler()
1615 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; in ctl_isc_event_handler()
1616 io->io_hdr.port_status = msg->scsi.port_status; in ctl_isc_event_handler()
1617 io->scsiio.kern_data_resid = msg->scsi.kern_data_resid; in ctl_isc_event_handler()
1618 if (msg->hdr.status != CTL_STATUS_NONE) { in ctl_isc_event_handler()
1619 io->io_hdr.status = msg->hdr.status; in ctl_isc_event_handler()
1620 io->scsiio.scsi_status = msg->scsi.scsi_status; in ctl_isc_event_handler()
1621 io->scsiio.sense_len = msg->scsi.sense_len; in ctl_isc_event_handler()
1622 memcpy(&io->scsiio.sense_data, in ctl_isc_event_handler()
1623 &msg->scsi.sense_data, in ctl_isc_event_handler()
1624 msg->scsi.sense_len); in ctl_isc_event_handler()
1625 if (msg->hdr.status == CTL_SUCCESS) in ctl_isc_event_handler()
1626 io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; in ctl_isc_event_handler()
1634 io = msg->hdr.original_sc; in ctl_isc_event_handler()
1640 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; in ctl_isc_event_handler()
1641 io->io_hdr.msg_type = CTL_MSG_R2R; in ctl_isc_event_handler()
1642 io->io_hdr.remote_io = msg->hdr.serializing_sc; in ctl_isc_event_handler()
1653 if (softc->ha_mode == CTL_HA_MODE_XFER) in ctl_isc_event_handler()
1661 io = msg->hdr.original_sc; in ctl_isc_event_handler()
1673 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; in ctl_isc_event_handler()
1674 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; in ctl_isc_event_handler()
1676 /* io = msg->hdr.serializing_sc; */ in ctl_isc_event_handler()
1677 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; in ctl_isc_event_handler()
1685 softc->othersc_pool); in ctl_isc_event_handler()
1687 taskio->io_hdr.io_type = CTL_IO_TASK; in ctl_isc_event_handler()
1688 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; in ctl_isc_event_handler()
1689 taskio->io_hdr.nexus = msg->hdr.nexus; in ctl_isc_event_handler()
1690 taskio->task_action = msg->task.task_action; in ctl_isc_event_handler()
1691 taskio->tag_num = msg->task.tag_num; in ctl_isc_event_handler()
1692 taskio->tag_type = msg->task.tag_type; in ctl_isc_event_handler()
1694 taskio->io_hdr.start_time = time_uptime; in ctl_isc_event_handler()
1695 getbinuptime(&taskio->io_hdr.start_bt); in ctl_isc_event_handler()
1703 softc->othersc_pool); in ctl_isc_event_handler()
1705 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; in ctl_isc_event_handler()
1706 presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; in ctl_isc_event_handler()
1707 presio->io_hdr.nexus = msg->hdr.nexus; in ctl_isc_event_handler()
1708 presio->pr_msg = msg->pr; in ctl_isc_event_handler()
1731 msg->hdr.msg_type); in ctl_isc_event_handler()
1739 softc->ha_link, param); in ctl_isc_event_handler()
1740 if (param == softc->ha_link) in ctl_isc_event_handler()
1742 if (softc->ha_link == CTL_HA_LINK_ONLINE) { in ctl_isc_event_handler()
1743 softc->ha_link = param; in ctl_isc_event_handler()
1746 softc->ha_link = param; in ctl_isc_event_handler()
1747 if (softc->ha_link == CTL_HA_LINK_ONLINE) in ctl_isc_event_handler()
1761 memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data, in ctl_copy_sense_data()
1762 src->scsi.sense_len); in ctl_copy_sense_data()
1763 dest->scsiio.scsi_status = src->scsi.scsi_status; in ctl_copy_sense_data()
1764 dest->scsiio.sense_len = src->scsi.sense_len; in ctl_copy_sense_data()
1765 dest->io_hdr.status = src->hdr.status; in ctl_copy_sense_data()
1772 memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data, in ctl_copy_sense_data_back()
1773 src->scsiio.sense_len); in ctl_copy_sense_data_back()
1774 dest->scsi.scsi_status = src->scsiio.scsi_status; in ctl_copy_sense_data_back()
1775 dest->scsi.sense_len = src->scsiio.sense_len; in ctl_copy_sense_data_back()
1776 dest->hdr.status = src->io_hdr.status; in ctl_copy_sense_data_back()
1782 struct ctl_softc *softc = lun->ctl_softc; in ctl_est_ua()
1785 if (initidx < softc->init_min || initidx >= softc->init_max) in ctl_est_ua()
1787 mtx_assert(&lun->lun_lock, MA_OWNED); in ctl_est_ua()
1788 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; in ctl_est_ua()
1799 mtx_assert(&lun->lun_lock, MA_OWNED); in ctl_est_ua_port()
1800 if (lun->pending_ua[port] == NULL) in ctl_est_ua_port()
1805 lun->pending_ua[port][i] |= ua; in ctl_est_ua_port()
1812 struct ctl_softc *softc = lun->ctl_softc; in ctl_est_ua_all()
1815 mtx_assert(&lun->lun_lock, MA_OWNED); in ctl_est_ua_all()
1816 for (i = softc->port_min; i < softc->port_max; i++) in ctl_est_ua_all()
1823 struct ctl_softc *softc = lun->ctl_softc; in ctl_clr_ua()
1826 if (initidx < softc->init_min || initidx >= softc->init_max) in ctl_clr_ua()
1828 mtx_assert(&lun->lun_lock, MA_OWNED); in ctl_clr_ua()
1829 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; in ctl_clr_ua()
1838 struct ctl_softc *softc = lun->ctl_softc; in ctl_clr_ua_all()
1841 mtx_assert(&lun->lun_lock, MA_OWNED); in ctl_clr_ua_all()
1842 for (i = softc->port_min; i < softc->port_max; i++) { in ctl_clr_ua_all()
1843 if (lun->pending_ua[i] == NULL) in ctl_clr_ua_all()
1848 lun->pending_ua[i][j] &= ~ua; in ctl_clr_ua_all()
1859 mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); in ctl_clr_ua_allluns()
1860 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { in ctl_clr_ua_allluns()
1861 mtx_lock(&lun->lun_lock); in ctl_clr_ua_allluns()
1863 mtx_unlock(&lun->lun_lock); in ctl_clr_ua_allluns()
1875 value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1; in ctl_ha_role_sysctl()
1877 if ((error != 0) || (req->newptr == NULL)) in ctl_ha_role_sysctl()
1880 mtx_lock(&softc->ctl_lock); in ctl_ha_role_sysctl()
1882 softc->flags |= CTL_FLAG_ACTIVE_SHELF; in ctl_ha_role_sysctl()
1884 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; in ctl_ha_role_sysctl()
1885 STAILQ_FOREACH(lun, &softc->lun_list, links) { in ctl_ha_role_sysctl()
1886 mtx_unlock(&softc->ctl_lock); in ctl_ha_role_sysctl()
1889 ireq.reqdata.modify.lun_id = lun->lun; in ctl_ha_role_sysctl()
1890 lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0, in ctl_ha_role_sysctl()
1896 mtx_lock(&softc->ctl_lock); in ctl_ha_role_sysctl()
1898 mtx_unlock(&softc->ctl_lock); in ctl_ha_role_sysctl()
1919 error = make_dev_s(&args, &softc->dev, "cam/ctl"); in ctl_init()
1926 sysctl_ctx_init(&softc->sysctl_ctx); in ctl_init()
1927 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, in ctl_init()
1931 if (softc->sysctl_tree == NULL) { in ctl_init()
1933 destroy_dev(softc->dev); in ctl_init()
1939 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); in ctl_init()
1940 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), in ctl_init()
1942 softc->flags = 0; in ctl_init()
1944 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), in ctl_init()
1945 OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0, in ctl_init()
1946 "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)"); in ctl_init()
1953 softc->ctl_luns = malloc(sizeof(struct ctl_lun *) * ctl_max_luns, in ctl_init()
1955 softc->ctl_lun_mask = malloc(sizeof(uint32_t) * in ctl_init()
1962 softc->ctl_port_mask = malloc(sizeof(uint32_t) * in ctl_init()
1964 softc->ctl_ports = malloc(sizeof(struct ctl_port *) * ctl_max_ports, in ctl_init()
1972 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), in ctl_init()
1973 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, in ctl_init()
1974 "HA head ID (0 - no HA)"); in ctl_init()
1975 if (softc->ha_id == 0 || softc->ha_id > NUM_HA_SHELVES) { in ctl_init()
1976 softc->flags |= CTL_FLAG_ACTIVE_SHELF; in ctl_init()
1977 softc->is_single = 1; in ctl_init()
1978 softc->port_cnt = ctl_max_ports; in ctl_init()
1979 softc->port_min = 0; in ctl_init()
1981 softc->port_cnt = ctl_max_ports / NUM_HA_SHELVES; in ctl_init()
1982 softc->port_min = (softc->ha_id - 1) * softc->port_cnt; in ctl_init()
1984 softc->port_max = softc->port_min + softc->port_cnt; in ctl_init()
1985 softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT; in ctl_init()
1986 softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT; in ctl_init()
1988 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), in ctl_init()
1989 OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0, in ctl_init()
1990 "HA link state (0 - offline, 1 - unknown, 2 - online)"); in ctl_init()
1992 STAILQ_INIT(&softc->lun_list); in ctl_init()
1993 STAILQ_INIT(&softc->fe_list); in ctl_init()
1994 STAILQ_INIT(&softc->port_list); in ctl_init()
1995 STAILQ_INIT(&softc->be_list); in ctl_init()
2004 struct ctl_thread *thr = &softc->threads[i]; in ctl_init()
2006 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); in ctl_init()
2007 thr->ctl_softc = softc; in ctl_init()
2008 STAILQ_INIT(&thr->incoming_queue); in ctl_init()
2009 STAILQ_INIT(&thr->rtr_queue); in ctl_init()
2010 STAILQ_INIT(&thr->done_queue); in ctl_init()
2011 STAILQ_INIT(&thr->isc_queue); in ctl_init()
2014 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); in ctl_init()
2021 &softc->ctl_proc, &softc->thresh_thread, 0, 0, "ctl", "thresh"); in ctl_init()
2027 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), in ctl_init()
2032 if (softc->is_single == 0) { in ctl_init()
2034 softc->is_single = 1; in ctl_init()
2045 if (softc->is_single == 0) in ctl_shutdown()
2048 destroy_dev(softc->dev); in ctl_shutdown()
2051 softc->shutdown = 1; in ctl_shutdown()
2053 struct ctl_thread *thr = &softc->threads[i]; in ctl_shutdown()
2054 while (thr->thread != NULL) { in ctl_shutdown()
2056 if (thr->thread != NULL) in ctl_shutdown()
2059 mtx_destroy(&thr->queue_lock); in ctl_shutdown()
2061 while (softc->thresh_thread != NULL) { in ctl_shutdown()
2062 wakeup(softc->thresh_thread); in ctl_shutdown()
2063 if (softc->thresh_thread != NULL) in ctl_shutdown()
2068 uma_zdestroy(softc->io_zone); in ctl_shutdown()
2069 mtx_destroy(&softc->ctl_lock); in ctl_shutdown()
2071 free(softc->ctl_luns, M_DEVBUF); in ctl_shutdown()
2072 free(softc->ctl_lun_mask, M_DEVBUF); in ctl_shutdown()
2073 free(softc->ctl_port_mask, M_DEVBUF); in ctl_shutdown()
2074 free(softc->ctl_ports, M_DEVBUF); in ctl_shutdown()
2076 sysctl_ctx_free(&softc->sysctl_ctx); in ctl_shutdown()
2115 * Returns 0 for success, -1 for failure.
2120 struct ctl_softc *softc = port->ctl_softc; in ctl_remove_initiator()
2123 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); in ctl_remove_initiator()
2128 return (-1); in ctl_remove_initiator()
2131 mtx_lock(&softc->ctl_lock); in ctl_remove_initiator()
2132 last = (--port->wwpn_iid[iid].in_use == 0); in ctl_remove_initiator()
2133 port->wwpn_iid[iid].last_use = time_uptime; in ctl_remove_initiator()
2134 mtx_unlock(&softc->ctl_lock); in ctl_remove_initiator()
2149 struct ctl_softc *softc = port->ctl_softc; in ctl_add_initiator()
2153 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); in ctl_add_initiator()
2159 return (-1); in ctl_add_initiator()
2162 mtx_lock(&softc->ctl_lock); in ctl_add_initiator()
2166 if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { in ctl_add_initiator()
2170 if (name != NULL && port->wwpn_iid[i].name != NULL && in ctl_add_initiator()
2171 strcmp(name, port->wwpn_iid[i].name) == 0) { in ctl_add_initiator()
2180 if (port->wwpn_iid[i].in_use == 0 && in ctl_add_initiator()
2181 port->wwpn_iid[i].wwpn == 0 && in ctl_add_initiator()
2182 port->wwpn_iid[i].name == NULL) { in ctl_add_initiator()
2190 best = -1; in ctl_add_initiator()
2193 if (port->wwpn_iid[i].in_use == 0) { in ctl_add_initiator()
2194 if (port->wwpn_iid[i].last_use < best_time) { in ctl_add_initiator()
2196 best_time = port->wwpn_iid[i].last_use; in ctl_add_initiator()
2204 mtx_unlock(&softc->ctl_lock); in ctl_add_initiator()
2206 return (-2); in ctl_add_initiator()
2209 if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { in ctl_add_initiator()
2213 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { in ctl_add_initiator()
2216 " again\n", __func__, port->targ_port, in ctl_add_initiator()
2221 if (name != NULL && port->wwpn_iid[iid].name != NULL && in ctl_add_initiator()
2222 strcmp(name, port->wwpn_iid[iid].name) == 0) { in ctl_add_initiator()
2225 " again\n", __func__, port->targ_port, in ctl_add_initiator()
2238 __func__, port->targ_port, iid, wwpn, name, in ctl_add_initiator()
2239 (uintmax_t)port->wwpn_iid[iid].wwpn, in ctl_add_initiator()
2240 port->wwpn_iid[iid].name); in ctl_add_initiator()
2243 free(port->wwpn_iid[iid].name, M_CTL); in ctl_add_initiator()
2244 port->wwpn_iid[iid].name = name; in ctl_add_initiator()
2245 port->wwpn_iid[iid].wwpn = wwpn; in ctl_add_initiator()
2246 port->wwpn_iid[iid].in_use++; in ctl_add_initiator()
2247 mtx_unlock(&softc->ctl_lock); in ctl_add_initiator()
2258 switch (port->port_type) { in ctl_create_iid()
2263 if (port->wwpn_iid[iid].wwpn == 0) in ctl_create_iid()
2266 id->format_protocol = SCSI_PROTO_FC; in ctl_create_iid()
2267 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); in ctl_create_iid()
2274 if (port->wwpn_iid[iid].name == NULL) in ctl_create_iid()
2277 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | in ctl_create_iid()
2279 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; in ctl_create_iid()
2281 scsi_ulto2b(len, id->additional_length); in ctl_create_iid()
2288 if (port->wwpn_iid[iid].wwpn == 0) in ctl_create_iid()
2291 id->format_protocol = SCSI_PROTO_SAS; in ctl_create_iid()
2292 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); in ctl_create_iid()
2300 id->format_protocol = SCSI_PROTO_SPI; in ctl_create_iid()
2301 scsi_ulto2b(iid, id->scsi_addr); in ctl_create_iid()
2302 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); in ctl_create_iid()
2327 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; in ctl_serialize_other_sc_cmd()
2330 if (port == NULL || (port->status & CTL_PORT_STATUS_ONLINE) == 0) { in ctl_serialize_other_sc_cmd()
2337 mtx_lock(&softc->ctl_lock); in ctl_serialize_other_sc_cmd()
2339 (lun = softc->ctl_luns[targ_lun]) == NULL) { in ctl_serialize_other_sc_cmd()
2340 mtx_unlock(&softc->ctl_lock); in ctl_serialize_other_sc_cmd()
2351 mtx_lock(&lun->lun_lock); in ctl_serialize_other_sc_cmd()
2352 mtx_unlock(&softc->ctl_lock); in ctl_serialize_other_sc_cmd()
2358 if (lun->flags & CTL_LUN_DISABLED) { in ctl_serialize_other_sc_cmd()
2359 mtx_unlock(&lun->lun_lock); in ctl_serialize_other_sc_cmd()
2365 ctsio->seridx = entry->seridx; in ctl_serialize_other_sc_cmd()
2367 mtx_unlock(&lun->lun_lock); in ctl_serialize_other_sc_cmd()
2372 CTL_BACKEND_LUN(ctsio) = lun->be_lun; in ctl_serialize_other_sc_cmd()
2379 if (LIST_EMPTY(&lun->ooa_queue)) in ctl_serialize_other_sc_cmd()
2380 lun->idle_time += getsbinuptime() - lun->last_busy; in ctl_serialize_other_sc_cmd()
2382 LIST_INSERT_HEAD(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); in ctl_serialize_other_sc_cmd()
2384 bio = (union ctl_io *)LIST_NEXT(&ctsio->io_hdr, ooa_links); in ctl_serialize_other_sc_cmd()
2388 if (softc->ha_mode == CTL_HA_MODE_XFER) { in ctl_serialize_other_sc_cmd()
2389 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; in ctl_serialize_other_sc_cmd()
2391 mtx_unlock(&lun->lun_lock); in ctl_serialize_other_sc_cmd()
2393 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; in ctl_serialize_other_sc_cmd()
2394 mtx_unlock(&lun->lun_lock); in ctl_serialize_other_sc_cmd()
2397 msg_info.hdr.original_sc = ctsio->io_hdr.remote_io; in ctl_serialize_other_sc_cmd()
2405 ctsio->io_hdr.blocker = bio; in ctl_serialize_other_sc_cmd()
2406 TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctsio->io_hdr, in ctl_serialize_other_sc_cmd()
2408 mtx_unlock(&lun->lun_lock); in ctl_serialize_other_sc_cmd()
2411 LIST_REMOVE(&ctsio->io_hdr, ooa_links); in ctl_serialize_other_sc_cmd()
2412 mtx_unlock(&lun->lun_lock); in ctl_serialize_other_sc_cmd()
2416 LIST_REMOVE(&ctsio->io_hdr, ooa_links); in ctl_serialize_other_sc_cmd()
2417 mtx_unlock(&lun->lun_lock); in ctl_serialize_other_sc_cmd()
2418 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); in ctl_serialize_other_sc_cmd()
2421 msg_info.hdr.original_sc = ctsio->io_hdr.remote_io; in ctl_serialize_other_sc_cmd()
2442 mtx_lock(&lun->lun_lock); in ctl_ioctl_fill_ooa()
2443 ioh = LIST_FIRST(&lun->ooa_queue); in ctl_ioctl_fill_ooa()
2445 mtx_unlock(&lun->lun_lock); in ctl_ioctl_fill_ooa()
2450 for ( ; ioh; ioh = LIST_PREV(ioh, &lun->ooa_queue, ctl_io_hdr, ooa_links)) { in ctl_ioctl_fill_ooa()
2460 if (*cur_fill_num >= ooa_hdr->alloc_num) { in ctl_ioctl_fill_ooa()
2467 entry->tag_num = io->scsiio.tag_num; in ctl_ioctl_fill_ooa()
2468 entry->tag_type = io->scsiio.tag_type; in ctl_ioctl_fill_ooa()
2469 entry->lun_num = lun->lun; in ctl_ioctl_fill_ooa()
2471 entry->start_bt = io->io_hdr.start_bt; in ctl_ioctl_fill_ooa()
2473 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); in ctl_ioctl_fill_ooa()
2474 entry->cdb_len = io->scsiio.cdb_len; in ctl_ioctl_fill_ooa()
2475 if (io->io_hdr.blocker != NULL) in ctl_ioctl_fill_ooa()
2476 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; in ctl_ioctl_fill_ooa()
2478 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) in ctl_ioctl_fill_ooa()
2479 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; in ctl_ioctl_fill_ooa()
2481 if (io->io_hdr.flags & CTL_FLAG_ABORT) in ctl_ioctl_fill_ooa()
2482 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; in ctl_ioctl_fill_ooa()
2484 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) in ctl_ioctl_fill_ooa()
2485 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; in ctl_ioctl_fill_ooa()
2487 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) in ctl_ioctl_fill_ooa()
2488 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; in ctl_ioctl_fill_ooa()
2490 if (io->io_hdr.flags & CTL_FLAG_STATUS_QUEUED) in ctl_ioctl_fill_ooa()
2491 entry->cmd_flags |= CTL_OOACMD_FLAG_STATUS_QUEUED; in ctl_ioctl_fill_ooa()
2493 if (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) in ctl_ioctl_fill_ooa()
2494 entry->cmd_flags |= CTL_OOACMD_FLAG_STATUS_SENT; in ctl_ioctl_fill_ooa()
2497 mtx_unlock(&lun->lun_lock); in ctl_ioctl_fill_ooa()
2540 if (id == NULL || id->len < 4) in ctl_id_sbuf()
2542 desc = (struct scsi_vpd_id_descriptor *)id->data; in ctl_id_sbuf()
2543 switch (desc->id_type & SVPD_ID_TYPE_MASK) { in ctl_id_sbuf()
2556 switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { in ctl_id_sbuf()
2558 for (i = 0; i < desc->length; i++) in ctl_id_sbuf()
2559 sbuf_printf(sb, "%02x", desc->identifier[i]); in ctl_id_sbuf()
2562 sbuf_printf(sb, "%.*s", (int)desc->length, in ctl_id_sbuf()
2563 (char *)desc->identifier); in ctl_id_sbuf()
2566 sbuf_cat(sb, (char *)desc->identifier); in ctl_id_sbuf()
2575 struct ctl_softc *softc = dev->si_drv1; in ctl_ioctl()
2594 mtx_lock(&softc->ctl_lock); in ctl_ioctl()
2595 STAILQ_FOREACH(port, &softc->port_list, links) { in ctl_ioctl()
2598 if (port->targ_port < softc->port_min || in ctl_ioctl()
2599 port->targ_port >= softc->port_max) in ctl_ioctl()
2604 if ((entry->port_type == CTL_PORT_NONE) in ctl_ioctl()
2605 && (entry->targ_port == port->targ_port)) { in ctl_ioctl()
2613 } else if (entry->port_type & port->port_type) { in ctl_ioctl()
2643 mtx_unlock(&softc->ctl_lock); in ctl_ioctl()
2645 mtx_lock(&softc->ctl_lock); in ctl_ioctl()
2647 mtx_unlock(&softc->ctl_lock); in ctl_ioctl()
2649 mtx_lock(&softc->ctl_lock); in ctl_ioctl()
2652 (entry->flags & CTL_PORT_WWNN_VALID) ? in ctl_ioctl()
2653 1 : 0, entry->wwnn, in ctl_ioctl()
2654 (entry->flags & CTL_PORT_WWPN_VALID) ? in ctl_ioctl()
2655 1 : 0, entry->wwpn); in ctl_ioctl()
2660 mtx_unlock(&softc->ctl_lock); in ctl_ioctl()
2670 if ((ooa_hdr->alloc_len == 0) in ctl_ioctl()
2671 || (ooa_hdr->alloc_num == 0)) { in ctl_ioctl()
2673 "must be non-zero\n", __func__, in ctl_ioctl()
2674 ooa_hdr->alloc_len, ooa_hdr->alloc_num); in ctl_ioctl()
2679 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * in ctl_ioctl()
2683 __func__, ooa_hdr->alloc_len, in ctl_ioctl()
2684 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); in ctl_ioctl()
2689 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); in ctl_ioctl()
2691 mtx_lock(&softc->ctl_lock); in ctl_ioctl()
2692 if ((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0 && in ctl_ioctl()
2693 (ooa_hdr->lun_num >= ctl_max_luns || in ctl_ioctl()
2694 softc->ctl_luns[ooa_hdr->lun_num] == NULL)) { in ctl_ioctl()
2695 mtx_unlock(&softc->ctl_lock); in ctl_ioctl()
2698 __func__, (uintmax_t)ooa_hdr->lun_num); in ctl_ioctl()
2705 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { in ctl_ioctl()
2706 STAILQ_FOREACH(lun, &softc->lun_list, links) { in ctl_ioctl()
2711 lun = softc->ctl_luns[ooa_hdr->lun_num]; in ctl_ioctl()
2715 mtx_unlock(&softc->ctl_lock); in ctl_ioctl()
2717 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); in ctl_ioctl()
2718 ooa_hdr->fill_len = ooa_hdr->fill_num * in ctl_ioctl()
2720 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); in ctl_ioctl()
2723 __func__, ooa_hdr->fill_len); in ctl_ioctl()
2726 getbinuptime(&ooa_hdr->cur_bt); in ctl_ioctl()
2728 if (cur_fill_num > ooa_hdr->alloc_num) { in ctl_ioctl()
2729 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; in ctl_ioctl()
2730 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; in ctl_ioctl()
2732 ooa_hdr->dropped_num = 0; in ctl_ioctl()
2733 ooa_hdr->status = CTL_OOA_OK; in ctl_ioctl()
2745 mtx_lock(&softc->ctl_lock); in ctl_ioctl()
2746 if (delay_info->lun_id >= ctl_max_luns || in ctl_ioctl()
2747 (lun = softc->ctl_luns[delay_info->lun_id]) == NULL) { in ctl_ioctl()
2748 mtx_unlock(&softc->ctl_lock); in ctl_ioctl()
2749 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; in ctl_ioctl()
2752 mtx_lock(&lun->lun_lock); in ctl_ioctl()
2753 mtx_unlock(&softc->ctl_lock); in ctl_ioctl()
2754 delay_info->status = CTL_DELAY_STATUS_OK; in ctl_ioctl()
2755 switch (delay_info->delay_type) { in ctl_ioctl()
2760 delay_info->status = CTL_DELAY_STATUS_INVALID_TYPE; in ctl_ioctl()
2763 switch (delay_info->delay_loc) { in ctl_ioctl()
2765 lun->delay_info.datamove_type = delay_info->delay_type; in ctl_ioctl()
2766 lun->delay_info.datamove_delay = delay_info->delay_secs; in ctl_ioctl()
2769 lun->delay_info.done_type = delay_info->delay_type; in ctl_ioctl()
2770 lun->delay_info.done_delay = delay_info->delay_secs; in ctl_ioctl()
2773 delay_info->status = CTL_DELAY_STATUS_INVALID_LOC; in ctl_ioctl()
2776 mtx_unlock(&lun->lun_lock); in ctl_ioctl()
2778 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; in ctl_ioctl()
2791 mtx_lock(&softc->ctl_lock); in ctl_ioctl()
2792 if (err_desc->lun_id >= ctl_max_luns || in ctl_ioctl()
2793 (lun = softc->ctl_luns[err_desc->lun_id]) == NULL) { in ctl_ioctl()
2794 mtx_unlock(&softc->ctl_lock); in ctl_ioctl()
2797 __func__, (uintmax_t)err_desc->lun_id); in ctl_ioctl()
2801 mtx_lock(&lun->lun_lock); in ctl_ioctl()
2802 mtx_unlock(&softc->ctl_lock); in ctl_ioctl()
2813 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); in ctl_ioctl()
2821 new_err_desc->serial = lun->error_serial; in ctl_ioctl()
2822 err_desc->serial = lun->error_serial; in ctl_ioctl()
2823 lun->error_serial++; in ctl_ioctl()
2825 mtx_unlock(&lun->lun_lock); in ctl_ioctl()
2835 mtx_lock(&softc->ctl_lock); in ctl_ioctl()
2836 if (delete_desc->lun_id >= ctl_max_luns || in ctl_ioctl()
2837 (lun = softc->ctl_luns[delete_desc->lun_id]) == NULL) { in ctl_ioctl()
2838 mtx_unlock(&softc->ctl_lock); in ctl_ioctl()
2840 __func__, (uintmax_t)delete_desc->lun_id); in ctl_ioctl()
2844 mtx_lock(&lun->lun_lock); in ctl_ioctl()
2845 mtx_unlock(&softc->ctl_lock); in ctl_ioctl()
2846 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { in ctl_ioctl()
2847 if (desc->serial != delete_desc->serial) in ctl_ioctl()
2850 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, in ctl_ioctl()
2855 mtx_unlock(&lun->lun_lock); in ctl_ioctl()
2859 delete_desc->serial, delete_desc->lun_id); in ctl_ioctl()
2870 mtx_lock(&softc->ctl_lock); in ctl_ioctl()
2872 STAILQ_FOREACH(lun, &softc->lun_list, links) { in ctl_ioctl()
2873 mtx_lock(&lun->lun_lock); in ctl_ioctl()
2874 if ((lun->flags & CTL_LUN_DISABLED) != 0) { in ctl_ioctl()
2875 mtx_unlock(&lun->lun_lock); in ctl_ioctl()
2880 if (lun->pr_keys[j] == NULL) in ctl_ioctl()
2883 if (lun->pr_keys[j][k] == 0) in ctl_ioctl()
2886 "%#jx\n", lun->lun, j, k, in ctl_ioctl()
2887 (uintmax_t)lun->pr_keys[j][k]); in ctl_ioctl()
2890 mtx_unlock(&lun->lun_lock); in ctl_ioctl()
2894 STAILQ_FOREACH(port, &softc->port_list, links) { in ctl_ioctl()
2896 "%#jx WWPN %#jx\n", port->targ_port, port->port_name, in ctl_ioctl()
2897 port->frontend->name, port->port_type, in ctl_ioctl()
2898 port->physical_port, port->virtual_port, in ctl_ioctl()
2899 (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); in ctl_ioctl()
2901 if (port->wwpn_iid[j].in_use == 0 && in ctl_ioctl()
2902 port->wwpn_iid[j].wwpn == 0 && in ctl_ioctl()
2903 port->wwpn_iid[j].name == NULL) in ctl_ioctl()
2907 j, port->wwpn_iid[j].in_use, in ctl_ioctl()
2908 (uintmax_t)port->wwpn_iid[j].wwpn, in ctl_ioctl()
2909 port->wwpn_iid[j].name); in ctl_ioctl()
2913 mtx_unlock(&softc->ctl_lock); in ctl_ioctl()
2920 STAILQ_FOREACH(fe, &softc->fe_list, links) { in ctl_ioctl()
2921 printf(" Frontend '%s'\n", fe->name); in ctl_ioctl()
2922 if (fe->fe_dump != NULL) in ctl_ioctl()
2923 fe->fe_dump(); in ctl_ioctl()
2936 tmp_args_nvl = lun_req->args_nvl; in ctl_ioctl()
2938 backend = ctl_backend_find(lun_req->backend); in ctl_ioctl()
2940 lun_req->status = CTL_LUN_ERROR; in ctl_ioctl()
2941 snprintf(lun_req->error_str, in ctl_ioctl()
2942 sizeof(lun_req->error_str), in ctl_ioctl()
2944 lun_req->backend); in ctl_ioctl()
2948 if (lun_req->args != NULL) { in ctl_ioctl()
2949 if (lun_req->args_len > CTL_MAX_ARGS_LEN) { in ctl_ioctl()
2950 lun_req->status = CTL_LUN_ERROR; in ctl_ioctl()
2951 snprintf(lun_req->error_str, sizeof(lun_req->error_str), in ctl_ioctl()
2955 packed = malloc(lun_req->args_len, M_CTL, M_WAITOK); in ctl_ioctl()
2956 if (copyin(lun_req->args, packed, lun_req->args_len) != 0) { in ctl_ioctl()
2958 lun_req->status = CTL_LUN_ERROR; in ctl_ioctl()
2959 snprintf(lun_req->error_str, sizeof(lun_req->error_str), in ctl_ioctl()
2963 lun_req->args_nvl = nvlist_unpack(packed, in ctl_ioctl()
2964 lun_req->args_len, 0); in ctl_ioctl()
2967 if (lun_req->args_nvl == NULL) { in ctl_ioctl()
2968 lun_req->status = CTL_LUN_ERROR; in ctl_ioctl()
2969 snprintf(lun_req->error_str, sizeof(lun_req->error_str), in ctl_ioctl()
2974 lun_req->args_nvl = nvlist_create(0); in ctl_ioctl()
2976 lun_req->result_nvl = NULL; in ctl_ioctl()
2977 retval = backend->ioctl(dev, cmd, addr, flag, td); in ctl_ioctl()
2978 nvlist_destroy(lun_req->args_nvl); in ctl_ioctl()
2979 lun_req->args_nvl = tmp_args_nvl; in ctl_ioctl()
2981 if (lun_req->result_nvl != NULL) { in ctl_ioctl()
2982 if (lun_req->result != NULL) { in ctl_ioctl()
2983 packed = nvlist_pack(lun_req->result_nvl, in ctl_ioctl()
2986 lun_req->status = CTL_LUN_ERROR; in ctl_ioctl()
2987 snprintf(lun_req->error_str, in ctl_ioctl()
2988 sizeof(lun_req->error_str), in ctl_ioctl()
2993 if (packed_len > lun_req->result_len) { in ctl_ioctl()
2994 lun_req->status = CTL_LUN_ERROR; in ctl_ioctl()
2995 snprintf(lun_req->error_str, in ctl_ioctl()
2996 sizeof(lun_req->error_str), in ctl_ioctl()
3002 if (copyout(packed, lun_req->result, packed_len)) { in ctl_ioctl()
3003 lun_req->status = CTL_LUN_ERROR; in ctl_ioctl()
3004 snprintf(lun_req->error_str, in ctl_ioctl()
3005 sizeof(lun_req->error_str), in ctl_ioctl()
3011 lun_req->result_len = packed_len; in ctl_ioctl()
3015 nvlist_destroy(lun_req->result_nvl); in ctl_ioctl()
3030 * of the user's buffer. We could allocate an auto-extending in ctl_ioctl()
3037 * auto-extending buffer. in ctl_ioctl()
3042 * while we're getting the LUN number, backend-specific in ctl_ioctl()
3053 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); in ctl_ioctl()
3055 list->status = CTL_LUN_LIST_ERROR; in ctl_ioctl()
3056 snprintf(list->error_str, sizeof(list->error_str), in ctl_ioctl()
3058 list->alloc_len); in ctl_ioctl()
3064 mtx_lock(&softc->ctl_lock); in ctl_ioctl()
3065 STAILQ_FOREACH(lun, &softc->lun_list, links) { in ctl_ioctl()
3066 mtx_lock(&lun->lun_lock); in ctl_ioctl()
3068 (uintmax_t)lun->lun); in ctl_ioctl()
3079 (lun->backend == NULL) ? "none" : in ctl_ioctl()
3080 lun->backend->name); in ctl_ioctl()
3086 lun->be_lun->lun_type); in ctl_ioctl()
3091 if (lun->backend == NULL) { in ctl_ioctl()
3099 (lun->be_lun->maxlba > 0) ? in ctl_ioctl()
3100 lun->be_lun->maxlba + 1 : 0); in ctl_ioctl()
3106 lun->be_lun->blocksize); in ctl_ioctl()
3117 lun->be_lun->serial_num, in ctl_ioctl()
3118 sizeof(lun->be_lun->serial_num)); in ctl_ioctl()
3134 lun->be_lun->device_id, in ctl_ioctl()
3135 sizeof(lun->be_lun->device_id)); in ctl_ioctl()
3145 if (lun->backend->lun_info != NULL) { in ctl_ioctl()
3146 retval = lun->backend->lun_info(lun->be_lun, sb); in ctl_ioctl()
3152 while ((name = nvlist_next(lun->be_lun->options, &type, in ctl_ioctl()
3158 lun->be_lun->options, name, NULL); in ctl_ioctl()
3170 mtx_unlock(&lun->lun_lock); in ctl_ioctl()
3173 mtx_unlock(&lun->lun_lock); in ctl_ioctl()
3174 mtx_unlock(&softc->ctl_lock); in ctl_ioctl()
3180 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; in ctl_ioctl()
3181 snprintf(list->error_str, sizeof(list->error_str), in ctl_ioctl()
3183 list->alloc_len); in ctl_ioctl()
3189 retval = copyout(sbuf_data(sb), list->lun_xml, in ctl_ioctl()
3192 list->fill_len = sbuf_len(sb) + 1; in ctl_ioctl()
3193 list->status = CTL_LUN_LIST_OK; in ctl_ioctl()
3205 ci->status = CTL_ISCSI_ERROR; in ctl_ioctl()
3206 snprintf(ci->error_str, sizeof(ci->error_str), in ctl_ioctl()
3211 retval = fe->ioctl(dev, cmd, addr, flag, td); in ctl_ioctl()
3222 cn->status = CTL_NVMF_ERROR; in ctl_ioctl()
3223 snprintf(cn->error_str, sizeof(cn->error_str), in ctl_ioctl()
3228 retval = fe->ioctl(dev, cmd, addr, flag, td); in ctl_ioctl()
3239 tmp_args_nvl = req->args_nvl; in ctl_ioctl()
3241 fe = ctl_frontend_find(req->driver); in ctl_ioctl()
3243 req->status = CTL_LUN_ERROR; in ctl_ioctl()
3244 snprintf(req->error_str, sizeof(req->error_str), in ctl_ioctl()
3245 "Frontend \"%s\" not found.", req->driver); in ctl_ioctl()
3249 if (req->args != NULL) { in ctl_ioctl()
3250 if (req->args_len > CTL_MAX_ARGS_LEN) { in ctl_ioctl()
3251 req->status = CTL_LUN_ERROR; in ctl_ioctl()
3252 snprintf(req->error_str, sizeof(req->error_str), in ctl_ioctl()
3256 packed = malloc(req->args_len, M_CTL, M_WAITOK); in ctl_ioctl()
3257 if (copyin(req->args, packed, req->args_len) != 0) { in ctl_ioctl()
3259 req->status = CTL_LUN_ERROR; in ctl_ioctl()
3260 snprintf(req->error_str, sizeof(req->error_str), in ctl_ioctl()
3264 req->args_nvl = nvlist_unpack(packed, in ctl_ioctl()
3265 req->args_len, 0); in ctl_ioctl()
3268 if (req->args_nvl == NULL) { in ctl_ioctl()
3269 req->status = CTL_LUN_ERROR; in ctl_ioctl()
3270 snprintf(req->error_str, sizeof(req->error_str), in ctl_ioctl()
3275 req->args_nvl = nvlist_create(0); in ctl_ioctl()
3277 req->result_nvl = NULL; in ctl_ioctl()
3278 if (fe->ioctl) in ctl_ioctl()
3279 retval = fe->ioctl(dev, cmd, addr, flag, td); in ctl_ioctl()
3283 nvlist_destroy(req->args_nvl); in ctl_ioctl()
3284 req->args_nvl = tmp_args_nvl; in ctl_ioctl()
3286 if (req->result_nvl != NULL) { in ctl_ioctl()
3287 if (req->result != NULL) { in ctl_ioctl()
3288 packed = nvlist_pack(req->result_nvl, in ctl_ioctl()
3291 req->status = CTL_LUN_ERROR; in ctl_ioctl()
3292 snprintf(req->error_str, in ctl_ioctl()
3293 sizeof(req->error_str), in ctl_ioctl()
3298 if (packed_len > req->result_len) { in ctl_ioctl()
3299 req->status = CTL_LUN_ERROR; in ctl_ioctl()
3300 snprintf(req->error_str, in ctl_ioctl()
3301 sizeof(req->error_str), in ctl_ioctl()
3307 if (copyout(packed, req->result, packed_len)) { in ctl_ioctl()
3308 req->status = CTL_LUN_ERROR; in ctl_ioctl()
3309 snprintf(req->error_str, in ctl_ioctl()
3310 sizeof(req->error_str), in ctl_ioctl()
3316 req->result_len = packed_len; in ctl_ioctl()
3320 nvlist_destroy(req->result_nvl); in ctl_ioctl()
3335 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); in ctl_ioctl()
3337 list->status = CTL_LUN_LIST_ERROR; in ctl_ioctl()
3338 snprintf(list->error_str, sizeof(list->error_str), in ctl_ioctl()
3340 list->alloc_len); in ctl_ioctl()
3346 mtx_lock(&softc->ctl_lock); in ctl_ioctl()
3347 STAILQ_FOREACH(port, &softc->port_list, links) { in ctl_ioctl()
3349 (uintmax_t)port->targ_port); in ctl_ioctl()
3359 "</frontend_type>\n", port->frontend->name); in ctl_ioctl()
3364 port->port_type); in ctl_ioctl()
3369 (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); in ctl_ioctl()
3374 port->port_name); in ctl_ioctl()
3379 port->physical_port); in ctl_ioctl()
3384 port->virtual_port); in ctl_ioctl()
3388 if (port->target_devid != NULL) { in ctl_ioctl()
3390 ctl_id_sbuf(port->target_devid, sb); in ctl_ioctl()
3394 if (port->port_devid != NULL) { in ctl_ioctl()
3396 ctl_id_sbuf(port->port_devid, sb); in ctl_ioctl()
3400 if (port->port_info != NULL) { in ctl_ioctl()
3401 retval = port->port_info(port->onoff_arg, sb); in ctl_ioctl()
3407 while ((name = nvlist_next(port->options, &type, in ctl_ioctl()
3412 value = dnvlist_get_string(port->options, in ctl_ioctl()
3421 if (port->lun_map != NULL) { in ctl_ioctl()
3423 for (j = 0; j < port->lun_map_size; j++) { in ctl_ioctl()
3434 if (port->wwpn_iid[j].in_use == 0 || in ctl_ioctl()
3435 (port->wwpn_iid[j].wwpn == 0 && in ctl_ioctl()
3436 port->wwpn_iid[j].name == NULL)) in ctl_ioctl()
3439 if (port->wwpn_iid[j].name != NULL) in ctl_ioctl()
3442 j, port->wwpn_iid[j].name); in ctl_ioctl()
3446 j, port->wwpn_iid[j].wwpn); in ctl_ioctl()
3457 mtx_unlock(&softc->ctl_lock); in ctl_ioctl()
3463 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; in ctl_ioctl()
3464 snprintf(list->error_str, sizeof(list->error_str), in ctl_ioctl()
3466 list->alloc_len); in ctl_ioctl()
3472 retval = copyout(sbuf_data(sb), list->lun_xml, in ctl_ioctl()
3475 list->fill_len = sbuf_len(sb) + 1; in ctl_ioctl()
3476 list->status = CTL_LUN_LIST_OK; in ctl_ioctl()
3484 mtx_lock(&softc->ctl_lock); in ctl_ioctl()
3485 if (lm->port < softc->port_min || in ctl_ioctl()
3486 lm->port >= softc->port_max || in ctl_ioctl()
3487 (port = softc->ctl_ports[lm->port]) == NULL) { in ctl_ioctl()
3488 mtx_unlock(&softc->ctl_lock); in ctl_ioctl()
3491 if (port->status & CTL_PORT_STATUS_ONLINE) { in ctl_ioctl()
3492 STAILQ_FOREACH(lun, &softc->lun_list, links) { in ctl_ioctl()
3493 if (ctl_lun_map_to_port(port, lun->lun) == in ctl_ioctl()
3496 mtx_lock(&lun->lun_lock); in ctl_ioctl()
3497 ctl_est_ua_port(lun, lm->port, -1, in ctl_ioctl()
3499 mtx_unlock(&lun->lun_lock); in ctl_ioctl()
3502 mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps in ctl_ioctl()
3503 if (lm->plun != UINT32_MAX) { in ctl_ioctl()
3504 if (lm->lun == UINT32_MAX) in ctl_ioctl()
3505 retval = ctl_lun_map_unset(port, lm->plun); in ctl_ioctl()
3506 else if (lm->lun < ctl_max_luns && in ctl_ioctl()
3507 softc->ctl_luns[lm->lun] != NULL) in ctl_ioctl()
3508 retval = ctl_lun_map_set(port, lm->plun, lm->lun); in ctl_ioctl()
3512 if (lm->lun == UINT32_MAX) in ctl_ioctl()
3517 if (port->status & CTL_PORT_STATUS_ONLINE) in ctl_ioctl()
3530 stats->status = CTL_SS_OK; in ctl_ioctl()
3531 stats->fill_len = 0; in ctl_ioctl()
3532 STAILQ_FOREACH(lun, &softc->lun_list, links) { in ctl_ioctl()
3533 if (lun->lun < stats->first_item) in ctl_ioctl()
3535 if (stats->fill_len + sizeof(lun->stats) > in ctl_ioctl()
3536 stats->alloc_len) { in ctl_ioctl()
3537 stats->status = CTL_SS_NEED_MORE_SPACE; in ctl_ioctl()
3540 retval = copyout(&lun->stats, &stats->stats[i++], in ctl_ioctl()
3541 sizeof(lun->stats)); in ctl_ioctl()
3544 stats->fill_len += sizeof(lun->stats); in ctl_ioctl()
3546 stats->num_items = softc->num_luns; in ctl_ioctl()
3547 stats->flags = CTL_STATS_FLAG_NONE; in ctl_ioctl()
3549 stats->flags |= CTL_STATS_FLAG_TIME_VALID; in ctl_ioctl()
3551 getnanouptime(&stats->timestamp); in ctl_ioctl()
3563 stats->status = CTL_SS_OK; in ctl_ioctl()
3564 stats->fill_len = 0; in ctl_ioctl()
3565 STAILQ_FOREACH(port, &softc->port_list, links) { in ctl_ioctl()
3566 if (port->targ_port < stats->first_item) in ctl_ioctl()
3568 if (stats->fill_len + sizeof(port->stats) > in ctl_ioctl()
3569 stats->alloc_len) { in ctl_ioctl()
3570 stats->status = CTL_SS_NEED_MORE_SPACE; in ctl_ioctl()
3573 retval = copyout(&port->stats, &stats->stats[i++], in ctl_ioctl()
3574 sizeof(port->stats)); in ctl_ioctl()
3577 stats->fill_len += sizeof(port->stats); in ctl_ioctl()
3579 stats->num_items = softc->num_ports; in ctl_ioctl()
3580 stats->flags = CTL_STATS_FLAG_NONE; in ctl_ioctl()
3582 stats->flags |= CTL_STATS_FLAG_TIME_VALID; in ctl_ioctl()
3584 getnanouptime(&stats->timestamp); in ctl_ioctl()
3603 STAILQ_FOREACH(backend, &softc->be_list, links) { in ctl_ioctl()
3604 if (backend->type == type) { in ctl_ioctl()
3615 retval = backend->ioctl(dev, cmd, addr, flag, td); in ctl_ioctl()
3627 return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); in ctl_get_initindex()
3633 struct ctl_softc *softc = port->ctl_softc; in ctl_lun_map_init()
3638 if (port->lun_map == NULL || port->lun_map_size < size) { in ctl_lun_map_init()
3639 port->lun_map_size = 0; in ctl_lun_map_init()
3640 free(port->lun_map, M_CTL); in ctl_lun_map_init()
3641 port->lun_map = malloc(size * sizeof(uint32_t), in ctl_lun_map_init()
3644 if (port->lun_map == NULL) in ctl_lun_map_init()
3647 port->lun_map[i] = UINT32_MAX; in ctl_lun_map_init()
3648 port->lun_map_size = size; in ctl_lun_map_init()
3649 if (port->status & CTL_PORT_STATUS_ONLINE) { in ctl_lun_map_init()
3650 if (port->lun_disable != NULL) { in ctl_lun_map_init()
3651 STAILQ_FOREACH(lun, &softc->lun_list, links) in ctl_lun_map_init()
3652 port->lun_disable(port->targ_lun_arg, lun->lun); in ctl_lun_map_init()
3662 struct ctl_softc *softc = port->ctl_softc; in ctl_lun_map_deinit()
3665 if (port->lun_map == NULL) in ctl_lun_map_deinit()
3667 port->lun_map_size = 0; in ctl_lun_map_deinit()
3668 free(port->lun_map, M_CTL); in ctl_lun_map_deinit()
3669 port->lun_map = NULL; in ctl_lun_map_deinit()
3670 if (port->status & CTL_PORT_STATUS_ONLINE) { in ctl_lun_map_deinit()
3671 if (port->lun_enable != NULL) { in ctl_lun_map_deinit()
3672 STAILQ_FOREACH(lun, &softc->lun_list, links) in ctl_lun_map_deinit()
3673 port->lun_enable(port->targ_lun_arg, lun->lun); in ctl_lun_map_deinit()
3686 if (port->lun_map == NULL) { in ctl_lun_map_set()
3691 if (plun >= port->lun_map_size) in ctl_lun_map_set()
3693 old = port->lun_map[plun]; in ctl_lun_map_set()
3694 port->lun_map[plun] = glun; in ctl_lun_map_set()
3695 if ((port->status & CTL_PORT_STATUS_ONLINE) && old == UINT32_MAX) { in ctl_lun_map_set()
3696 if (port->lun_enable != NULL) in ctl_lun_map_set()
3697 port->lun_enable(port->targ_lun_arg, plun); in ctl_lun_map_set()
3708 if (port->lun_map == NULL || plun >= port->lun_map_size) in ctl_lun_map_unset()
3710 old = port->lun_map[plun]; in ctl_lun_map_unset()
3711 port->lun_map[plun] = UINT32_MAX; in ctl_lun_map_unset()
3712 if ((port->status & CTL_PORT_STATUS_ONLINE) && old != UINT32_MAX) { in ctl_lun_map_unset()
3713 if (port->lun_disable != NULL) in ctl_lun_map_unset()
3714 port->lun_disable(port->targ_lun_arg, plun); in ctl_lun_map_unset()
3726 if (port->lun_map == NULL) in ctl_lun_map_from_port()
3728 if (lun_id > port->lun_map_size) in ctl_lun_map_from_port()
3730 return (port->lun_map[lun_id]); in ctl_lun_map_from_port()
3740 if (port->lun_map == NULL) in ctl_lun_map_to_port()
3742 for (i = 0; i < port->lun_map_size; i++) { in ctl_lun_map_to_port()
3743 if (port->lun_map[i] == lun_id) in ctl_lun_map_to_port()
3819 return (-1); in ctl_ffz()
3831 return (-1); in ctl_set_mask()
3847 return (-1); in ctl_clear_mask()
3873 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; in ctl_get_prkey()
3884 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; in ctl_clr_prkey()
3897 if (lun->pr_keys[i] != NULL) in ctl_alloc_prkey()
3899 mtx_unlock(&lun->lun_lock); in ctl_alloc_prkey()
3902 mtx_lock(&lun->lun_lock); in ctl_alloc_prkey()
3903 if (lun->pr_keys[i] == NULL) in ctl_alloc_prkey()
3904 lun->pr_keys[i] = p; in ctl_alloc_prkey()
3914 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; in ctl_set_prkey()
3934 snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); in ctl_pool_create()
3935 pool->ctl_softc = ctl_softc; in ctl_pool_create()
3937 pool->zone = uma_zsecond_create(pool->name, NULL, in ctl_pool_create()
3938 NULL, NULL, NULL, ctl_softc->io_zone); in ctl_pool_create()
3939 /* uma_prealloc(pool->zone, total_ctl_io); */ in ctl_pool_create()
3941 pool->zone = ctl_softc->io_zone; in ctl_pool_create()
3956 uma_zdestroy(pool->zone); in ctl_pool_free()
3967 io = uma_zalloc(pool->zone, M_WAITOK); in ctl_alloc_io()
3969 io->io_hdr.pool = pool_ref; in ctl_alloc_io()
3970 CTL_SOFTC(io) = pool->ctl_softc; in ctl_alloc_io()
3971 TAILQ_INIT(&io->io_hdr.blocked_queue); in ctl_alloc_io()
3982 io = uma_zalloc(pool->zone, M_NOWAIT); in ctl_alloc_io_nowait()
3984 io->io_hdr.pool = pool_ref; in ctl_alloc_io_nowait()
3985 CTL_SOFTC(io) = pool->ctl_softc; in ctl_alloc_io_nowait()
3986 TAILQ_INIT(&io->io_hdr.blocked_queue); in ctl_alloc_io_nowait()
3999 pool = (struct ctl_io_pool *)io->io_hdr.pool; in ctl_free_io()
4000 uma_zfree(pool->zone, io); in ctl_free_io()
4014 pool = io->io_hdr.pool; in ctl_zero_io()
4016 io->io_hdr.pool = pool; in ctl_zero_io()
4017 CTL_SOFTC(io) = pool->ctl_softc; in ctl_zero_io()
4018 TAILQ_INIT(&io->io_hdr.blocked_queue); in ctl_zero_io()
4055 return (-1); in ctl_expand_number()
4060 return (-1); in ctl_expand_number()
4078 memcpy(&lun->mode_pages.index, page_index_template, in ctl_init_page_index()
4082 page_index = &lun->mode_pages.index[i]; in ctl_init_page_index()
4083 if (lun->be_lun->lun_type == T_DIRECT && in ctl_init_page_index()
4084 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) in ctl_init_page_index()
4086 if (lun->be_lun->lun_type == T_PROCESSOR && in ctl_init_page_index()
4087 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) in ctl_init_page_index()
4089 if (lun->be_lun->lun_type == T_CDROM && in ctl_init_page_index()
4090 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) in ctl_init_page_index()
4093 page_code = page_index->page_code & SMPH_PC_MASK; in ctl_init_page_index()
4096 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, in ctl_init_page_index()
4098 page_index->subpage, page_code)); in ctl_init_page_index()
4099 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], in ctl_init_page_index()
4102 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], in ctl_init_page_index()
4105 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], in ctl_init_page_index()
4108 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], in ctl_init_page_index()
4111 page_index->page_data = in ctl_init_page_index()
4112 (uint8_t *)lun->mode_pages.rw_er_page; in ctl_init_page_index()
4116 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, in ctl_init_page_index()
4118 page_index->subpage, page_code)); in ctl_init_page_index()
4119 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CURRENT], in ctl_init_page_index()
4122 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CHANGEABLE], in ctl_init_page_index()
4125 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_DEFAULT], in ctl_init_page_index()
4128 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_SAVED], in ctl_init_page_index()
4131 page_index->page_data = in ctl_init_page_index()
4132 (uint8_t *)lun->mode_pages.verify_er_page; in ctl_init_page_index()
4138 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, in ctl_init_page_index()
4140 page_index->subpage, page_code)); in ctl_init_page_index()
4141 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], in ctl_init_page_index()
4144 memcpy(&lun->mode_pages.caching_page[ in ctl_init_page_index()
4147 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], in ctl_init_page_index()
4150 caching_page = &lun->mode_pages.caching_page[ in ctl_init_page_index()
4152 value = dnvlist_get_string(lun->be_lun->options, in ctl_init_page_index()
4155 caching_page->flags1 &= ~SCP_WCE; in ctl_init_page_index()
4156 value = dnvlist_get_string(lun->be_lun->options, in ctl_init_page_index()
4159 caching_page->flags1 |= SCP_RCD; in ctl_init_page_index()
4160 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], in ctl_init_page_index()
4161 &lun->mode_pages.caching_page[CTL_PAGE_SAVED], in ctl_init_page_index()
4163 page_index->page_data = in ctl_init_page_index()
4164 (uint8_t *)lun->mode_pages.caching_page; in ctl_init_page_index()
4168 switch (page_index->subpage) { in ctl_init_page_index()
4172 memcpy(&lun->mode_pages.control_page[ in ctl_init_page_index()
4176 memcpy(&lun->mode_pages.control_page[ in ctl_init_page_index()
4180 memcpy(&lun->mode_pages.control_page[ in ctl_init_page_index()
4184 control_page = &lun->mode_pages.control_page[ in ctl_init_page_index()
4186 value = dnvlist_get_string(lun->be_lun->options, in ctl_init_page_index()
4190 control_page->queue_flags &= in ctl_init_page_index()
4192 control_page->queue_flags |= in ctl_init_page_index()
4195 memcpy(&lun->mode_pages.control_page[ in ctl_init_page_index()
4197 &lun->mode_pages.control_page[ in ctl_init_page_index()
4200 page_index->page_data = in ctl_init_page_index()
4201 (uint8_t *)lun->mode_pages.control_page; in ctl_init_page_index()
4205 memcpy(&lun->mode_pages.control_ext_page[ in ctl_init_page_index()
4209 memcpy(&lun->mode_pages.control_ext_page[ in ctl_init_page_index()
4213 memcpy(&lun->mode_pages.control_ext_page[ in ctl_init_page_index()
4217 memcpy(&lun->mode_pages.control_ext_page[ in ctl_init_page_index()
4219 &lun->mode_pages.control_ext_page[ in ctl_init_page_index()
4222 page_index->page_data = in ctl_init_page_index()
4223 (uint8_t *)lun->mode_pages.control_ext_page; in ctl_init_page_index()
4227 page_index->subpage, page_code); in ctl_init_page_index()
4232 switch (page_index->subpage) { in ctl_init_page_index()
4234 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], in ctl_init_page_index()
4237 memcpy(&lun->mode_pages.ie_page[ in ctl_init_page_index()
4240 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], in ctl_init_page_index()
4243 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], in ctl_init_page_index()
4246 page_index->page_data = in ctl_init_page_index()
4247 (uint8_t *)lun->mode_pages.ie_page; in ctl_init_page_index()
4252 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], in ctl_init_page_index()
4255 memcpy(&lun->mode_pages.lbp_page[ in ctl_init_page_index()
4258 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], in ctl_init_page_index()
4261 page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; in ctl_init_page_index()
4262 value = dnvlist_get_string(lun->be_lun->options, in ctl_init_page_index()
4263 "avail-threshold", NULL); in ctl_init_page_index()
4266 page->descr[0].flags |= SLBPPD_ENABLED | in ctl_init_page_index()
4268 if (lun->be_lun->blocksize) in ctl_init_page_index()
4269 ival /= lun->be_lun->blocksize; in ctl_init_page_index()
4273 page->descr[0].count); in ctl_init_page_index()
4275 value = dnvlist_get_string(lun->be_lun->options, in ctl_init_page_index()
4276 "used-threshold", NULL); in ctl_init_page_index()
4279 page->descr[1].flags |= SLBPPD_ENABLED | in ctl_init_page_index()
4281 if (lun->be_lun->blocksize) in ctl_init_page_index()
4282 ival /= lun->be_lun->blocksize; in ctl_init_page_index()
4286 page->descr[1].count); in ctl_init_page_index()
4288 value = dnvlist_get_string(lun->be_lun->options, in ctl_init_page_index()
4289 "pool-avail-threshold", NULL); in ctl_init_page_index()
4292 page->descr[2].flags |= SLBPPD_ENABLED | in ctl_init_page_index()
4294 if (lun->be_lun->blocksize) in ctl_init_page_index()
4295 ival /= lun->be_lun->blocksize; in ctl_init_page_index()
4299 page->descr[2].count); in ctl_init_page_index()
4301 value = dnvlist_get_string(lun->be_lun->options, in ctl_init_page_index()
4302 "pool-used-threshold", NULL); in ctl_init_page_index()
4305 page->descr[3].flags |= SLBPPD_ENABLED | in ctl_init_page_index()
4307 if (lun->be_lun->blocksize) in ctl_init_page_index()
4308 ival /= lun->be_lun->blocksize; in ctl_init_page_index()
4312 page->descr[3].count); in ctl_init_page_index()
4314 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], in ctl_init_page_index()
4315 &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], in ctl_init_page_index()
4317 page_index->page_data = in ctl_init_page_index()
4318 (uint8_t *)lun->mode_pages.lbp_page; in ctl_init_page_index()
4323 page_index->subpage, page_code); in ctl_init_page_index()
4328 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, in ctl_init_page_index()
4330 page_index->subpage, page_code)); in ctl_init_page_index()
4331 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_DEFAULT], in ctl_init_page_index()
4334 memcpy(&lun->mode_pages.cddvd_page[ in ctl_init_page_index()
4337 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], in ctl_init_page_index()
4340 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_CURRENT], in ctl_init_page_index()
4341 &lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], in ctl_init_page_index()
4343 page_index->page_data = in ctl_init_page_index()
4344 (uint8_t *)lun->mode_pages.cddvd_page; in ctl_init_page_index()
4361 memcpy(&lun->log_pages.index, log_page_index_template, in ctl_init_log_page_index()
4364 prev = -1; in ctl_init_log_page_index()
4366 page_index = &lun->log_pages.index[i]; in ctl_init_log_page_index()
4367 if (lun->be_lun->lun_type == T_DIRECT && in ctl_init_log_page_index()
4368 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) in ctl_init_log_page_index()
4370 if (lun->be_lun->lun_type == T_PROCESSOR && in ctl_init_log_page_index()
4371 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) in ctl_init_log_page_index()
4373 if (lun->be_lun->lun_type == T_CDROM && in ctl_init_log_page_index()
4374 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) in ctl_init_log_page_index()
4377 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && in ctl_init_log_page_index()
4378 lun->backend->lun_attr == NULL) in ctl_init_log_page_index()
4381 if (page_index->page_code != prev) { in ctl_init_log_page_index()
4382 lun->log_pages.pages_page[j] = page_index->page_code; in ctl_init_log_page_index()
4383 prev = page_index->page_code; in ctl_init_log_page_index()
4386 lun->log_pages.subpages_page[k*2] = page_index->page_code; in ctl_init_log_page_index()
4387 lun->log_pages.subpages_page[k*2+1] = page_index->subpage; in ctl_init_log_page_index()
4390 lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; in ctl_init_log_page_index()
4391 lun->log_pages.index[0].page_len = j; in ctl_init_log_page_index()
4392 lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; in ctl_init_log_page_index()
4393 lun->log_pages.index[1].page_len = k * 2; in ctl_init_log_page_index()
4394 lun->log_pages.index[2].page_data = (uint8_t *)&lun->log_pages.temp_page; in ctl_init_log_page_index()
4395 lun->log_pages.index[2].page_len = sizeof(lun->log_pages.temp_page); in ctl_init_log_page_index()
4396 lun->log_pages.index[3].page_data = &lun->log_pages.lbp_page[0]; in ctl_init_log_page_index()
4397 lun->log_pages.index[3].page_len = 12*CTL_NUM_LBP_PARAMS; in ctl_init_log_page_index()
4398 lun->log_pages.index[4].page_data = (uint8_t *)&lun->log_pages.stat_page; in ctl_init_log_page_index()
4399 lun->log_pages.index[4].page_len = sizeof(lun->log_pages.stat_page); in ctl_init_log_page_index()
4400 lun->log_pages.index[5].page_data = (uint8_t *)&lun->log_pages.ie_page; in ctl_init_log_page_index()
4401 lun->log_pages.index[5].page_len = sizeof(lun->log_pages.ie_page); in ctl_init_log_page_index()
4419 while (str[i] == '-') /* Skip dashes in UUIDs. */ in hex2bin()
4423 c -= '0'; in hex2bin()
4425 c -= isupper(c) ? 'A' - 10 : 'a' - 10; in hex2bin()
4441 * Returns 0 for success, non-zero (errno) for failure.
4455 * We support only Direct Access, CD-ROM or Processor LUN types. in ctl_add_lun()
4457 switch (be_lun->lun_type) { in ctl_add_lun()
4469 lun->pending_sense = malloc(sizeof(struct scsi_sense_data *) * in ctl_add_lun()
4471 lun->pending_ua = malloc(sizeof(ctl_ua_type *) * ctl_max_ports, in ctl_add_lun()
4473 lun->pr_keys = malloc(sizeof(uint64_t *) * ctl_max_ports, in ctl_add_lun()
4478 strnlen(be_lun->device_id, CTL_DEVID_LEN)); in ctl_add_lun()
4481 scsiname = dnvlist_get_string(be_lun->options, "scsiname", NULL); in ctl_add_lun()
4486 eui = dnvlist_get_string(be_lun->options, "eui", NULL); in ctl_add_lun()
4490 naa = dnvlist_get_string(be_lun->options, "naa", NULL); in ctl_add_lun()
4494 uuid = dnvlist_get_string(be_lun->options, "uuid", NULL); in ctl_add_lun()
4498 lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, in ctl_add_lun()
4500 desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; in ctl_add_lun()
4501 desc->proto_codeset = SVPD_ID_CODESET_ASCII; in ctl_add_lun()
4502 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; in ctl_add_lun()
4503 desc->length = idlen1; in ctl_add_lun()
4504 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; in ctl_add_lun()
4505 memset(t10id->vendor, ' ', sizeof(t10id->vendor)); in ctl_add_lun()
4506 if ((vendor = dnvlist_get_string(be_lun->options, "vendor", NULL)) == NULL) { in ctl_add_lun()
4507 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); in ctl_add_lun()
4509 strncpy(t10id->vendor, vendor, in ctl_add_lun()
4510 min(sizeof(t10id->vendor), strlen(vendor))); in ctl_add_lun()
4512 strncpy((char *)t10id->vendor_spec_id, in ctl_add_lun()
4513 (char *)be_lun->device_id, devidlen); in ctl_add_lun()
4515 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + in ctl_add_lun()
4516 desc->length); in ctl_add_lun()
4517 desc->proto_codeset = SVPD_ID_CODESET_UTF8; in ctl_add_lun()
4518 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | in ctl_add_lun()
4520 desc->length = idlen2; in ctl_add_lun()
4521 strlcpy(desc->identifier, scsiname, idlen2); in ctl_add_lun()
4524 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + in ctl_add_lun()
4525 desc->length); in ctl_add_lun()
4526 desc->proto_codeset = SVPD_ID_CODESET_BINARY; in ctl_add_lun()
4527 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | in ctl_add_lun()
4529 desc->length = hex2bin(eui, desc->identifier, 16); in ctl_add_lun()
4530 desc->length = desc->length > 12 ? 16 : in ctl_add_lun()
4531 (desc->length > 8 ? 12 : 8); in ctl_add_lun()
4532 len -= 16 - desc->length; in ctl_add_lun()
4535 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + in ctl_add_lun()
4536 desc->length); in ctl_add_lun()
4537 desc->proto_codeset = SVPD_ID_CODESET_BINARY; in ctl_add_lun()
4538 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | in ctl_add_lun()
4540 desc->length = hex2bin(naa, desc->identifier, 16); in ctl_add_lun()
4541 desc->length = desc->length > 8 ? 16 : 8; in ctl_add_lun()
4542 len -= 16 - desc->length; in ctl_add_lun()
4545 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + in ctl_add_lun()
4546 desc->length); in ctl_add_lun()
4547 desc->proto_codeset = SVPD_ID_CODESET_BINARY; in ctl_add_lun()
4548 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | in ctl_add_lun()
4550 desc->identifier[0] = 0x10; in ctl_add_lun()
4551 hex2bin(uuid, &desc->identifier[2], 16); in ctl_add_lun()
4552 desc->length = 18; in ctl_add_lun()
4554 lun->lun_devid->len = len; in ctl_add_lun()
4556 mtx_lock(&ctl_softc->ctl_lock); in ctl_add_lun()
4561 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { in ctl_add_lun()
4562 if ((be_lun->req_lun_id > (ctl_max_luns - 1)) in ctl_add_lun()
4563 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { in ctl_add_lun()
4564 mtx_unlock(&ctl_softc->ctl_lock); in ctl_add_lun()
4565 if (be_lun->req_lun_id > (ctl_max_luns - 1)) { in ctl_add_lun()
4567 "than ctl_max_luns - 1 (%d)\n", in ctl_add_lun()
4568 be_lun->req_lun_id, ctl_max_luns - 1); in ctl_add_lun()
4575 "in use\n", be_lun->req_lun_id); in ctl_add_lun()
4578 free(lun->lun_devid, M_CTL); in ctl_add_lun()
4582 lun_number = be_lun->req_lun_id; in ctl_add_lun()
4584 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, ctl_max_luns); in ctl_add_lun()
4585 if (lun_number == -1) { in ctl_add_lun()
4586 mtx_unlock(&ctl_softc->ctl_lock); in ctl_add_lun()
4591 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); in ctl_add_lun()
4592 mtx_unlock(&ctl_softc->ctl_lock); in ctl_add_lun()
4594 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); in ctl_add_lun()
4595 lun->lun = lun_number; in ctl_add_lun()
4596 lun->be_lun = be_lun; in ctl_add_lun()
4601 lun->flags |= CTL_LUN_DISABLED; in ctl_add_lun()
4602 lun->backend = be_lun->be; in ctl_add_lun()
4603 be_lun->ctl_lun = lun; in ctl_add_lun()
4604 be_lun->lun_id = lun_number; in ctl_add_lun()
4605 if (be_lun->flags & CTL_LUN_FLAG_EJECTED) in ctl_add_lun()
4606 lun->flags |= CTL_LUN_EJECTED; in ctl_add_lun()
4607 if (be_lun->flags & CTL_LUN_FLAG_NO_MEDIA) in ctl_add_lun()
4608 lun->flags |= CTL_LUN_NO_MEDIA; in ctl_add_lun()
4609 if (be_lun->flags & CTL_LUN_FLAG_STOPPED) in ctl_add_lun()
4610 lun->flags |= CTL_LUN_STOPPED; in ctl_add_lun()
4612 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) in ctl_add_lun()
4613 lun->flags |= CTL_LUN_PRIMARY_SC; in ctl_add_lun()
4615 value = dnvlist_get_string(be_lun->options, "removable", NULL); in ctl_add_lun()
4618 lun->flags |= CTL_LUN_REMOVABLE; in ctl_add_lun()
4619 } else if (be_lun->lun_type == T_CDROM) in ctl_add_lun()
4620 lun->flags |= CTL_LUN_REMOVABLE; in ctl_add_lun()
4622 lun->ctl_softc = ctl_softc; in ctl_add_lun()
4624 lun->last_busy = getsbinuptime(); in ctl_add_lun()
4626 LIST_INIT(&lun->ooa_queue); in ctl_add_lun()
4627 STAILQ_INIT(&lun->error_list); in ctl_add_lun()
4628 lun->ie_reported = 1; in ctl_add_lun()
4629 callout_init_mtx(&lun->ie_callout, &lun->lun_lock, 0); in ctl_add_lun()
4631 if (lun->flags & CTL_LUN_REMOVABLE) { in ctl_add_lun()
4632 lun->prevent = malloc((CTL_MAX_INITIATORS + 31) / 32 * 4, in ctl_add_lun()
4643 lun->stats.item = lun_number; in ctl_add_lun()
4649 mtx_lock(&ctl_softc->ctl_lock); in ctl_add_lun()
4650 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { in ctl_add_lun()
4651 mtx_lock(&nlun->lun_lock); in ctl_add_lun()
4652 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); in ctl_add_lun()
4653 mtx_unlock(&nlun->lun_lock); in ctl_add_lun()
4655 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); in ctl_add_lun()
4656 ctl_softc->ctl_luns[lun_number] = lun; in ctl_add_lun()
4657 ctl_softc->num_luns++; in ctl_add_lun()
4658 mtx_unlock(&ctl_softc->ctl_lock); in ctl_add_lun()
4665 mtx_lock(&ctl_softc->ctl_lock); in ctl_add_lun()
4666 STAILQ_REMOVE(&ctl_softc->lun_list, lun, ctl_lun, links); in ctl_add_lun()
4667 ctl_clear_mask(ctl_softc->ctl_lun_mask, lun_number); in ctl_add_lun()
4668 ctl_softc->ctl_luns[lun_number] = NULL; in ctl_add_lun()
4669 ctl_softc->num_luns--; in ctl_add_lun()
4670 mtx_unlock(&ctl_softc->ctl_lock); in ctl_add_lun()
4671 free(lun->lun_devid, M_CTL); in ctl_add_lun()
4685 struct ctl_softc *softc = lun->ctl_softc; in ctl_free_lun()
4689 KASSERT(LIST_EMPTY(&lun->ooa_queue), in ctl_free_lun()
4692 mtx_lock(&softc->ctl_lock); in ctl_free_lun()
4693 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); in ctl_free_lun()
4694 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); in ctl_free_lun()
4695 softc->ctl_luns[lun->lun] = NULL; in ctl_free_lun()
4696 softc->num_luns--; in ctl_free_lun()
4697 STAILQ_FOREACH(nlun, &softc->lun_list, links) { in ctl_free_lun()
4698 mtx_lock(&nlun->lun_lock); in ctl_free_lun()
4699 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); in ctl_free_lun()
4700 mtx_unlock(&nlun->lun_lock); in ctl_free_lun()
4702 mtx_unlock(&softc->ctl_lock); in ctl_free_lun()
4707 lun->be_lun->lun_shutdown(lun->be_lun); in ctl_free_lun()
4709 lun->ie_reportcnt = UINT32_MAX; in ctl_free_lun()
4710 callout_drain(&lun->ie_callout); in ctl_free_lun()
4712 mtx_destroy(&lun->lun_lock); in ctl_free_lun()
4713 free(lun->lun_devid, M_CTL); in ctl_free_lun()
4715 free(lun->pending_ua[i], M_CTL); in ctl_free_lun()
4716 free(lun->pending_ua, M_DEVBUF); in ctl_free_lun()
4718 free(lun->pr_keys[i], M_CTL); in ctl_free_lun()
4719 free(lun->pr_keys, M_DEVBUF); in ctl_free_lun()
4720 free(lun->write_buffer, M_CTL); in ctl_free_lun()
4721 free(lun->prevent, M_CTL); in ctl_free_lun()
4734 softc = lun->ctl_softc; in ctl_enable_lun()
4736 mtx_lock(&softc->ctl_lock); in ctl_enable_lun()
4737 mtx_lock(&lun->lun_lock); in ctl_enable_lun()
4738 KASSERT((lun->flags & CTL_LUN_DISABLED) != 0, in ctl_enable_lun()
4740 lun->flags &= ~CTL_LUN_DISABLED; in ctl_enable_lun()
4741 mtx_unlock(&lun->lun_lock); in ctl_enable_lun()
4743 STAILQ_FOREACH_SAFE(port, &softc->port_list, links, nport) { in ctl_enable_lun()
4744 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || in ctl_enable_lun()
4745 port->lun_map != NULL || port->lun_enable == NULL) in ctl_enable_lun()
4753 mtx_unlock(&softc->ctl_lock); in ctl_enable_lun()
4754 retval = port->lun_enable(port->targ_lun_arg, lun->lun); in ctl_enable_lun()
4755 mtx_lock(&softc->ctl_lock); in ctl_enable_lun()
4759 __func__, port->port_name, port->targ_port, in ctl_enable_lun()
4760 retval, (intmax_t)lun->lun); in ctl_enable_lun()
4764 mtx_unlock(&softc->ctl_lock); in ctl_enable_lun()
4777 softc = lun->ctl_softc; in ctl_disable_lun()
4779 mtx_lock(&softc->ctl_lock); in ctl_disable_lun()
4780 mtx_lock(&lun->lun_lock); in ctl_disable_lun()
4781 KASSERT((lun->flags & CTL_LUN_DISABLED) == 0, in ctl_disable_lun()
4783 lun->flags |= CTL_LUN_DISABLED; in ctl_disable_lun()
4784 mtx_unlock(&lun->lun_lock); in ctl_disable_lun()
4786 STAILQ_FOREACH(port, &softc->port_list, links) { in ctl_disable_lun()
4787 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || in ctl_disable_lun()
4788 port->lun_map != NULL || port->lun_disable == NULL) in ctl_disable_lun()
4798 mtx_unlock(&softc->ctl_lock); in ctl_disable_lun()
4799 retval = port->lun_disable(port->targ_lun_arg, lun->lun); in ctl_disable_lun()
4800 mtx_lock(&softc->ctl_lock); in ctl_disable_lun()
4804 __func__, port->port_name, port->targ_port, in ctl_disable_lun()
4805 retval, (intmax_t)lun->lun); in ctl_disable_lun()
4809 mtx_unlock(&softc->ctl_lock); in ctl_disable_lun()
4818 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; in ctl_start_lun()
4820 mtx_lock(&lun->lun_lock); in ctl_start_lun()
4821 lun->flags &= ~CTL_LUN_STOPPED; in ctl_start_lun()
4822 mtx_unlock(&lun->lun_lock); in ctl_start_lun()
4829 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; in ctl_stop_lun()
4831 mtx_lock(&lun->lun_lock); in ctl_stop_lun()
4832 lun->flags |= CTL_LUN_STOPPED; in ctl_stop_lun()
4833 mtx_unlock(&lun->lun_lock); in ctl_stop_lun()
4840 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; in ctl_lun_no_media()
4842 mtx_lock(&lun->lun_lock); in ctl_lun_no_media()
4843 lun->flags |= CTL_LUN_NO_MEDIA; in ctl_lun_no_media()
4844 mtx_unlock(&lun->lun_lock); in ctl_lun_no_media()
4851 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; in ctl_lun_has_media()
4854 mtx_lock(&lun->lun_lock); in ctl_lun_has_media()
4855 lun->flags &= ~(CTL_LUN_NO_MEDIA | CTL_LUN_EJECTED); in ctl_lun_has_media()
4856 if (lun->flags & CTL_LUN_REMOVABLE) in ctl_lun_has_media()
4857 ctl_est_ua_all(lun, -1, CTL_UA_MEDIUM_CHANGE); in ctl_lun_has_media()
4858 mtx_unlock(&lun->lun_lock); in ctl_lun_has_media()
4859 if ((lun->flags & CTL_LUN_REMOVABLE) && in ctl_lun_has_media()
4860 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { in ctl_lun_has_media()
4863 msg.hdr.nexus.initid = -1; in ctl_lun_has_media()
4864 msg.hdr.nexus.targ_port = -1; in ctl_lun_has_media()
4865 msg.hdr.nexus.targ_lun = lun->lun; in ctl_lun_has_media()
4866 msg.hdr.nexus.targ_mapped_lun = lun->lun; in ctl_lun_has_media()
4879 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; in ctl_lun_ejected()
4881 mtx_lock(&lun->lun_lock); in ctl_lun_ejected()
4882 lun->flags |= CTL_LUN_EJECTED; in ctl_lun_ejected()
4883 mtx_unlock(&lun->lun_lock); in ctl_lun_ejected()
4890 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; in ctl_lun_primary()
4892 mtx_lock(&lun->lun_lock); in ctl_lun_primary()
4893 lun->flags |= CTL_LUN_PRIMARY_SC; in ctl_lun_primary()
4894 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); in ctl_lun_primary()
4895 mtx_unlock(&lun->lun_lock); in ctl_lun_primary()
4903 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; in ctl_lun_secondary()
4905 mtx_lock(&lun->lun_lock); in ctl_lun_secondary()
4906 lun->flags &= ~CTL_LUN_PRIMARY_SC; in ctl_lun_secondary()
4907 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); in ctl_lun_secondary()
4908 mtx_unlock(&lun->lun_lock); in ctl_lun_secondary()
4916 * Returns 0 for success, non-zero (errno) for failure.
4924 lun = (struct ctl_lun *)be_lun->ctl_lun; in ctl_remove_lun()
4928 mtx_lock(&lun->lun_lock); in ctl_remove_lun()
4929 lun->flags |= CTL_LUN_INVALID; in ctl_remove_lun()
4936 if (LIST_EMPTY(&lun->ooa_queue)) { in ctl_remove_lun()
4937 mtx_unlock(&lun->lun_lock); in ctl_remove_lun()
4940 mtx_unlock(&lun->lun_lock); in ctl_remove_lun()
4948 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; in ctl_lun_capacity_changed()
4951 mtx_lock(&lun->lun_lock); in ctl_lun_capacity_changed()
4952 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGE); in ctl_lun_capacity_changed()
4953 mtx_unlock(&lun->lun_lock); in ctl_lun_capacity_changed()
4954 if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { in ctl_lun_capacity_changed()
4958 msg.hdr.nexus.initid = -1; in ctl_lun_capacity_changed()
4959 msg.hdr.nexus.targ_port = -1; in ctl_lun_capacity_changed()
4960 msg.hdr.nexus.targ_lun = lun->lun; in ctl_lun_capacity_changed()
4961 msg.hdr.nexus.targ_mapped_lun = lun->lun; in ctl_lun_capacity_changed()
4974 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; in ctl_lun_nsdata_ids()
4977 if (lun->lun_devid == NULL) in ctl_lun_nsdata_ids()
4981 lun->lun_devid->data, lun->lun_devid->len, scsi_devid_is_lun_naa); in ctl_lun_nsdata_ids()
4983 if (idd->length == 16) { in ctl_lun_nsdata_ids()
4984 memcpy(nsdata->nguid, idd->identifier, 16); in ctl_lun_nsdata_ids()
4987 if (idd->length == 8) { in ctl_lun_nsdata_ids()
4988 memcpy(nsdata->eui64, idd->identifier, 8); in ctl_lun_nsdata_ids()
4994 lun->lun_devid->data, lun->lun_devid->len, scsi_devid_is_lun_eui64); in ctl_lun_nsdata_ids()
4996 if (idd->length == 8) { in ctl_lun_nsdata_ids()
4997 memcpy(nsdata->eui64, idd->identifier, 8); in ctl_lun_nsdata_ids()
5006 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; in ctl_lun_nvme_ids()
5012 if (lun->lun_devid == NULL) in ctl_lun_nvme_ids()
5016 lun->lun_devid->data, lun->lun_devid->len, scsi_devid_is_lun_naa); in ctl_lun_nvme_ids()
5018 lun->lun_devid->data, lun->lun_devid->len, scsi_devid_is_lun_eui64); in ctl_lun_nvme_ids()
5020 lun->lun_devid->data, lun->lun_devid->len, scsi_devid_is_lun_uuid); in ctl_lun_nvme_ids()
5025 if ((naa != NULL && naa->length == 8) || eui64 != NULL) { in ctl_lun_nvme_ids()
5029 if (naa != NULL && naa->length == 8) in ctl_lun_nvme_ids()
5030 memcpy(p, naa->identifier, 8); in ctl_lun_nvme_ids()
5032 memcpy(p, eui64->identifier, 8); in ctl_lun_nvme_ids()
5037 if (naa != NULL && naa->length == 16) { in ctl_lun_nvme_ids()
5041 memcpy(p, naa->identifier, 16); in ctl_lun_nvme_ids()
5048 *p++ = uuid->length; in ctl_lun_nvme_ids()
5050 memcpy(p, uuid->identifier, uuid->length); in ctl_lun_nvme_ids()
5051 p += uuid->length; in ctl_lun_nvme_ids()
5068 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || in ctl_config_move_done()
5069 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && in ctl_config_move_done()
5070 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || in ctl_config_move_done()
5071 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { in ctl_config_move_done()
5077 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) in ctl_config_move_done()
5085 * - call ctl_scsiio() again? We don't do this for data in ctl_config_move_done()
5092 * - Call some other function once the data is in? in ctl_config_move_done()
5099 switch (io->io_hdr.io_type) { in ctl_config_move_done()
5101 retval = ctl_scsiio(&io->scsiio); in ctl_config_move_done()
5105 retval = ctl_nvmeio(&io->nvmeio); in ctl_config_move_done()
5129 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && in ctl_data_submit_done()
5130 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && in ctl_data_submit_done()
5131 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || in ctl_data_submit_done()
5132 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { in ctl_data_submit_done()
5156 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && in ctl_config_write_done()
5157 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && in ctl_config_write_done()
5158 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || in ctl_config_write_done()
5159 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { in ctl_config_write_done()
5168 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) in ctl_config_write_done()
5183 * If there is some error -- we are done, skip data transfer. in ctl_config_read_done()
5185 if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || in ctl_config_read_done()
5186 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && in ctl_config_read_done()
5187 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { in ctl_config_read_done()
5188 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) in ctl_config_read_done()
5203 if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { in ctl_config_read_done()
5222 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); in ctl_scsi_release()
5232 mtx_lock(&lun->lun_lock); in ctl_scsi_release()
5241 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) in ctl_scsi_release()
5242 lun->flags &= ~CTL_LUN_RESERVED; in ctl_scsi_release()
5244 mtx_unlock(&lun->lun_lock); in ctl_scsi_release()
5259 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); in ctl_scsi_reserve()
5269 mtx_lock(&lun->lun_lock); in ctl_scsi_reserve()
5270 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { in ctl_scsi_reserve()
5275 /* SPC-3 exceptions to SPC-2 RESERVE and RELEASE behavior. */ in ctl_scsi_reserve()
5276 if (lun->flags & CTL_LUN_PR_RESERVED) { in ctl_scsi_reserve()
5281 lun->flags |= CTL_LUN_RESERVED; in ctl_scsi_reserve()
5282 lun->res_idx = residx; in ctl_scsi_reserve()
5286 mtx_unlock(&lun->lun_lock); in ctl_scsi_reserve()
5300 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; in ctl_start_stop()
5302 if ((cdb->how & SSS_PC_MASK) == 0) { in ctl_start_stop()
5303 if ((lun->flags & CTL_LUN_PR_RESERVED) && in ctl_start_stop()
5304 (cdb->how & SSS_START) == 0) { in ctl_start_stop()
5307 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); in ctl_start_stop()
5309 (lun->pr_res_idx != residx && lun->pr_res_type < 4)) { in ctl_start_stop()
5316 if ((cdb->how & SSS_LOEJ) && in ctl_start_stop()
5317 (lun->flags & CTL_LUN_REMOVABLE) == 0) { in ctl_start_stop()
5328 if ((cdb->how & SSS_START) == 0 && (cdb->how & SSS_LOEJ) && in ctl_start_stop()
5329 lun->prevent_count > 0) { in ctl_start_stop()
5332 /*sense_key*/(lun->flags & CTL_LUN_NO_MEDIA) ? in ctl_start_stop()
5340 retval = lun->backend->config_write((union ctl_io *)ctsio); in ctl_start_stop()
5354 cdb = (struct scsi_prevent *)ctsio->cdb; in ctl_prevent_allow()
5356 if ((lun->flags & CTL_LUN_REMOVABLE) == 0 || lun->prevent == NULL) { in ctl_prevent_allow()
5362 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); in ctl_prevent_allow()
5363 mtx_lock(&lun->lun_lock); in ctl_prevent_allow()
5364 if ((cdb->how & PR_PREVENT) && in ctl_prevent_allow()
5365 ctl_is_set(lun->prevent, initidx) == 0) { in ctl_prevent_allow()
5366 ctl_set_mask(lun->prevent, initidx); in ctl_prevent_allow()
5367 lun->prevent_count++; in ctl_prevent_allow()
5368 } else if ((cdb->how & PR_PREVENT) == 0 && in ctl_prevent_allow()
5369 ctl_is_set(lun->prevent, initidx)) { in ctl_prevent_allow()
5370 ctl_clear_mask(lun->prevent, initidx); in ctl_prevent_allow()
5371 lun->prevent_count--; in ctl_prevent_allow()
5373 mtx_unlock(&lun->lun_lock); in ctl_prevent_allow()
5374 retval = lun->backend->config_write((union ctl_io *)ctsio); in ctl_prevent_allow()
5398 switch (ctsio->cdb[0]) { in ctl_sync_cache()
5401 cdb = (struct scsi_sync_cache *)ctsio->cdb; in ctl_sync_cache()
5403 starting_lba = scsi_4btoul(cdb->begin_lba); in ctl_sync_cache()
5404 block_count = scsi_2btoul(cdb->lb_count); in ctl_sync_cache()
5405 byte2 = cdb->byte2; in ctl_sync_cache()
5410 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; in ctl_sync_cache()
5412 starting_lba = scsi_8btou64(cdb->begin_lba); in ctl_sync_cache()
5413 block_count = scsi_4btoul(cdb->lb_count); in ctl_sync_cache()
5414 byte2 = cdb->byte2; in ctl_sync_cache()
5430 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { in ctl_sync_cache()
5432 MAX(starting_lba, lun->be_lun->maxlba + 1)); in ctl_sync_cache()
5437 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; in ctl_sync_cache()
5438 lbalen->lba = starting_lba; in ctl_sync_cache()
5439 lbalen->len = block_count; in ctl_sync_cache()
5440 lbalen->flags = byte2; in ctl_sync_cache()
5441 retval = lun->backend->config_write((union ctl_io *)ctsio); in ctl_sync_cache()
5455 cdb = (struct scsi_format *)ctsio->cdb; in ctl_format()
5458 if (cdb->byte2 & SF_FMTDATA) { in ctl_format()
5459 if (cdb->byte2 & SF_LONGLIST) in ctl_format()
5465 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) in ctl_format()
5467 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); in ctl_format()
5468 ctsio->kern_data_len = length; in ctl_format()
5469 ctsio->kern_total_len = length; in ctl_format()
5470 ctsio->kern_rel_offset = 0; in ctl_format()
5471 ctsio->kern_sg_entries = 0; in ctl_format()
5472 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_format()
5473 ctsio->be_move_done = ctl_config_move_done; in ctl_format()
5481 if (cdb->byte2 & SF_FMTDATA) { in ctl_format()
5482 if (cdb->byte2 & SF_LONGLIST) { in ctl_format()
5486 ctsio->kern_data_ptr; in ctl_format()
5488 defect_list_len = scsi_4btoul(header->defect_list_len); in ctl_format()
5502 ctsio->kern_data_ptr; in ctl_format()
5504 defect_list_len = scsi_2btoul(header->defect_list_len); in ctl_format()
5520 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { in ctl_format()
5521 free(ctsio->kern_data_ptr, M_CTL); in ctl_format()
5522 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; in ctl_format()
5541 switch (ctsio->cdb[0]) { in ctl_read_buffer()
5545 cdb = (struct scsi_read_buffer *)ctsio->cdb; in ctl_read_buffer()
5546 buffer_offset = scsi_3btoul(cdb->offset); in ctl_read_buffer()
5547 len = scsi_3btoul(cdb->length); in ctl_read_buffer()
5548 byte2 = cdb->byte2; in ctl_read_buffer()
5554 cdb = (struct scsi_read_buffer_16 *)ctsio->cdb; in ctl_read_buffer()
5555 buffer_offset = scsi_8btou64(cdb->offset); in ctl_read_buffer()
5556 len = scsi_4btoul(cdb->length); in ctl_read_buffer()
5557 byte2 = cdb->byte2; in ctl_read_buffer()
5581 ctsio->kern_data_ptr = descr; in ctl_read_buffer()
5584 ctsio->kern_data_ptr = echo_descr; in ctl_read_buffer()
5587 if (lun->write_buffer == NULL) { in ctl_read_buffer()
5588 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, in ctl_read_buffer()
5591 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; in ctl_read_buffer()
5593 ctsio->kern_data_len = len; in ctl_read_buffer()
5594 ctsio->kern_total_len = len; in ctl_read_buffer()
5595 ctsio->kern_rel_offset = 0; in ctl_read_buffer()
5596 ctsio->kern_sg_entries = 0; in ctl_read_buffer()
5598 ctsio->be_move_done = ctl_config_move_done; in ctl_read_buffer()
5612 cdb = (struct scsi_write_buffer *)ctsio->cdb; in ctl_write_buffer()
5614 len = scsi_3btoul(cdb->length); in ctl_write_buffer()
5615 buffer_offset = scsi_3btoul(cdb->offset); in ctl_write_buffer()
5628 if (lun->write_buffer == NULL) { in ctl_write_buffer()
5629 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, in ctl_write_buffer()
5640 if (ctsio->kern_data_ptr == NULL) { in ctl_write_buffer()
5641 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; in ctl_write_buffer()
5642 ctsio->kern_data_len = len; in ctl_write_buffer()
5643 ctsio->kern_total_len = len; in ctl_write_buffer()
5644 ctsio->kern_rel_offset = 0; in ctl_write_buffer()
5645 ctsio->kern_sg_entries = 0; in ctl_write_buffer()
5646 ctsio->be_move_done = ctl_config_move_done; in ctl_write_buffer()
5667 ctsio = &io->scsiio; in ctl_write_same_cont()
5668 ctsio->io_hdr.status = CTL_STATUS_NONE; in ctl_write_same_cont()
5670 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; in ctl_write_same_cont()
5671 lbalen->lba += lbalen->len; in ctl_write_same_cont()
5672 if ((lun->be_lun->maxlba + 1) - lbalen->lba <= UINT32_MAX) { in ctl_write_same_cont()
5673 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; in ctl_write_same_cont()
5674 lbalen->len = (lun->be_lun->maxlba + 1) - lbalen->lba; in ctl_write_same_cont()
5678 retval = lun->backend->config_write((union ctl_io *)ctsio); in ctl_write_same_cont()
5695 switch (ctsio->cdb[0]) { in ctl_write_same()
5699 cdb = (struct scsi_write_same_10 *)ctsio->cdb; in ctl_write_same()
5701 lba = scsi_4btoul(cdb->addr); in ctl_write_same()
5702 num_blocks = scsi_2btoul(cdb->length); in ctl_write_same()
5703 byte2 = cdb->byte2; in ctl_write_same()
5709 cdb = (struct scsi_write_same_16 *)ctsio->cdb; in ctl_write_same()
5711 lba = scsi_8btou64(cdb->addr); in ctl_write_same()
5712 num_blocks = scsi_4btoul(cdb->length); in ctl_write_same()
5713 byte2 = cdb->byte2; in ctl_write_same()
5738 * check is to catch wrap-around problems. If the lba + num blocks in ctl_write_same()
5742 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) in ctl_write_same()
5745 MAX(lba, lun->be_lun->maxlba + 1)); in ctl_write_same()
5753 val = dnvlist_get_string(lun->be_lun->options, in ctl_write_same()
5757 if ((lun->be_lun->maxlba + 1) - lba > ival) { in ctl_write_same()
5760 /*field*/ ctsio->cdb[0] == WRITE_SAME_10 ? 7 : 10, in ctl_write_same()
5765 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { in ctl_write_same()
5766 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; in ctl_write_same()
5767 ctsio->io_cont = ctl_write_same_cont; in ctl_write_same()
5770 num_blocks = (lun->be_lun->maxlba + 1) - lba; in ctl_write_same()
5773 len = lun->be_lun->blocksize; in ctl_write_same()
5780 (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { in ctl_write_same()
5781 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); in ctl_write_same()
5782 ctsio->kern_data_len = len; in ctl_write_same()
5783 ctsio->kern_total_len = len; in ctl_write_same()
5784 ctsio->kern_rel_offset = 0; in ctl_write_same()
5785 ctsio->kern_sg_entries = 0; in ctl_write_same()
5786 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_write_same()
5787 ctsio->be_move_done = ctl_config_move_done; in ctl_write_same()
5793 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; in ctl_write_same()
5794 lbalen->lba = lba; in ctl_write_same()
5795 lbalen->len = num_blocks; in ctl_write_same()
5796 lbalen->flags = byte2; in ctl_write_same()
5797 retval = lun->backend->config_write((union ctl_io *)ctsio); in ctl_write_same()
5817 cdb = (struct scsi_unmap *)ctsio->cdb; in ctl_unmap()
5818 len = scsi_2btoul(cdb->length); in ctl_unmap()
5819 byte2 = cdb->byte2; in ctl_unmap()
5825 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { in ctl_unmap()
5826 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); in ctl_unmap()
5827 ctsio->kern_data_len = len; in ctl_unmap()
5828 ctsio->kern_total_len = len; in ctl_unmap()
5829 ctsio->kern_rel_offset = 0; in ctl_unmap()
5830 ctsio->kern_sg_entries = 0; in ctl_unmap()
5831 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_unmap()
5832 ctsio->be_move_done = ctl_config_move_done; in ctl_unmap()
5838 len = ctsio->kern_total_len - ctsio->kern_data_resid; in ctl_unmap()
5839 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; in ctl_unmap()
5841 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || in ctl_unmap()
5842 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || in ctl_unmap()
5843 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { in ctl_unmap()
5852 len = scsi_2btoul(hdr->desc_length); in ctl_unmap()
5858 lba = scsi_8btou64(range->lba); in ctl_unmap()
5859 num_blocks = scsi_4btoul(range->length); in ctl_unmap()
5860 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) in ctl_unmap()
5863 MAX(lba, lun->be_lun->maxlba + 1)); in ctl_unmap()
5875 len = (uint8_t *)endnz - (uint8_t *)buf; in ctl_unmap()
5881 mtx_lock(&lun->lun_lock); in ctl_unmap()
5883 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; in ctl_unmap()
5884 ptrlen->ptr = (void *)buf; in ctl_unmap()
5885 ptrlen->len = len; in ctl_unmap()
5886 ptrlen->flags = byte2; in ctl_unmap()
5888 mtx_unlock(&lun->lun_lock); in ctl_unmap()
5890 retval = lun->backend->config_write((union ctl_io *)ctsio); in ctl_unmap()
5894 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { in ctl_unmap()
5895 free(ctsio->kern_data_ptr, M_CTL); in ctl_unmap()
5896 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; in ctl_unmap()
5911 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); in ctl_default_page_handler()
5914 current_cp = (page_index->page_data + (page_index->page_len * in ctl_default_page_handler()
5917 mtx_lock(&lun->lun_lock); in ctl_default_page_handler()
5918 if (memcmp(current_cp, page_ptr, page_index->page_len)) { in ctl_default_page_handler()
5919 memcpy(current_cp, page_ptr, page_index->page_len); in ctl_default_page_handler()
5924 mtx_unlock(&lun->lun_lock); in ctl_default_page_handler()
5927 ctl_get_initindex(&ctsio->io_hdr.nexus), in ctl_default_page_handler()
5928 page_index->page_code, page_index->subpage); in ctl_default_page_handler()
5939 if (lun->ie_asc == 0) in ctl_ie_timer()
5942 if (lun->MODE_IE.mrie == SIEP_MRIE_UA) in ctl_ie_timer()
5943 ctl_est_ua_all(lun, -1, CTL_UA_IE); in ctl_ie_timer()
5945 lun->ie_reported = 0; in ctl_ie_timer()
5947 if (lun->ie_reportcnt < scsi_4btoul(lun->MODE_IE.report_count)) { in ctl_ie_timer()
5948 lun->ie_reportcnt++; in ctl_ie_timer()
5949 t = scsi_4btoul(lun->MODE_IE.interval_timer); in ctl_ie_timer()
5952 callout_schedule_sbt(&lun->ie_callout, SBT_1S / 10 * t, in ctl_ie_timer()
5968 mtx_lock(&lun->lun_lock); in ctl_ie_page_handler()
5969 if (pg->info_flags & SIEP_FLAGS_TEST) { in ctl_ie_page_handler()
5970 lun->ie_asc = 0x5d; in ctl_ie_page_handler()
5971 lun->ie_ascq = 0xff; in ctl_ie_page_handler()
5972 if (pg->mrie == SIEP_MRIE_UA) { in ctl_ie_page_handler()
5973 ctl_est_ua_all(lun, -1, CTL_UA_IE); in ctl_ie_page_handler()
5974 lun->ie_reported = 1; in ctl_ie_page_handler()
5976 ctl_clr_ua_all(lun, -1, CTL_UA_IE); in ctl_ie_page_handler()
5977 lun->ie_reported = -1; in ctl_ie_page_handler()
5979 lun->ie_reportcnt = 1; in ctl_ie_page_handler()
5980 if (lun->ie_reportcnt < scsi_4btoul(pg->report_count)) { in ctl_ie_page_handler()
5981 lun->ie_reportcnt++; in ctl_ie_page_handler()
5982 t = scsi_4btoul(pg->interval_timer); in ctl_ie_page_handler()
5985 callout_reset_sbt(&lun->ie_callout, SBT_1S / 10 * t, in ctl_ie_page_handler()
5989 lun->ie_asc = 0; in ctl_ie_page_handler()
5990 lun->ie_ascq = 0; in ctl_ie_page_handler()
5991 lun->ie_reported = 1; in ctl_ie_page_handler()
5992 ctl_clr_ua_all(lun, -1, CTL_UA_IE); in ctl_ie_page_handler()
5993 lun->ie_reportcnt = UINT32_MAX; in ctl_ie_page_handler()
5994 callout_stop(&lun->ie_callout); in ctl_ie_page_handler()
5996 mtx_unlock(&lun->lun_lock); in ctl_ie_page_handler()
6014 ctsio = &io->scsiio; in ctl_do_mode_select()
6019 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; in ctl_do_mode_select()
6020 len_left = &modepage_info->header.len_left; in ctl_do_mode_select()
6021 len_used = &modepage_info->header.len_used; in ctl_do_mode_select()
6026 (ctsio->kern_data_ptr + *len_used); in ctl_do_mode_select()
6029 free(ctsio->kern_data_ptr, M_CTL); in ctl_do_mode_select()
6034 free(ctsio->kern_data_ptr, M_CTL); in ctl_do_mode_select()
6039 } else if ((page_header->page_code & SMPH_SPF) in ctl_do_mode_select()
6041 free(ctsio->kern_data_ptr, M_CTL); in ctl_do_mode_select()
6051 page_index = &lun->mode_pages.index[i]; in ctl_do_mode_select()
6052 if (lun->be_lun->lun_type == T_DIRECT && in ctl_do_mode_select()
6053 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) in ctl_do_mode_select()
6055 if (lun->be_lun->lun_type == T_PROCESSOR && in ctl_do_mode_select()
6056 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) in ctl_do_mode_select()
6058 if (lun->be_lun->lun_type == T_CDROM && in ctl_do_mode_select()
6059 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) in ctl_do_mode_select()
6062 if ((page_index->page_code & SMPH_PC_MASK) != in ctl_do_mode_select()
6063 (page_header->page_code & SMPH_PC_MASK)) in ctl_do_mode_select()
6070 if (((page_index->page_code & SMPH_SPF) == 0) in ctl_do_mode_select()
6071 && ((page_header->page_code & SMPH_SPF) == 0)) { in ctl_do_mode_select()
6072 page_len = page_header->page_length; in ctl_do_mode_select()
6080 if ((page_index->page_code & SMPH_SPF) in ctl_do_mode_select()
6081 && (page_header->page_code & SMPH_SPF)) { in ctl_do_mode_select()
6085 if (page_index->subpage == sph->subpage) { in ctl_do_mode_select()
6086 page_len = scsi_2btoul(sph->page_length); in ctl_do_mode_select()
6097 || (page_index->select_handler == NULL)) { in ctl_do_mode_select()
6104 free(ctsio->kern_data_ptr, M_CTL); in ctl_do_mode_select()
6109 if (page_index->page_code & SMPH_SPF) { in ctl_do_mode_select()
6122 if (page_len != page_index->page_len - page_len_offset - page_len_size) { in ctl_do_mode_select()
6129 free(ctsio->kern_data_ptr, M_CTL); in ctl_do_mode_select()
6133 if (*len_left < page_index->page_len) { in ctl_do_mode_select()
6134 free(ctsio->kern_data_ptr, M_CTL); in ctl_do_mode_select()
6144 for (i = 0; i < page_index->page_len; i++) { in ctl_do_mode_select()
6150 change_mask = page_index->page_data + in ctl_do_mode_select()
6151 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; in ctl_do_mode_select()
6152 current_byte = page_index->page_data + in ctl_do_mode_select()
6153 (page_index->page_len * CTL_PAGE_CURRENT) + i; in ctl_do_mode_select()
6167 for (j = 7; j >= 0; j--) { in ctl_do_mode_select()
6180 free(ctsio->kern_data_ptr, M_CTL); in ctl_do_mode_select()
6190 *len_left -= page_index->page_len; in ctl_do_mode_select()
6191 *len_used += page_index->page_len; in ctl_do_mode_select()
6193 retval = page_index->select_handler(ctsio, page_index, in ctl_do_mode_select()
6214 free(ctsio->kern_data_ptr, M_CTL); in ctl_do_mode_select()
6231 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); in ctl_mode_select()
6232 switch (ctsio->cdb[0]) { in ctl_mode_select()
6236 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; in ctl_mode_select()
6238 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; in ctl_mode_select()
6239 param_len = cdb->length; in ctl_mode_select()
6246 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; in ctl_mode_select()
6248 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; in ctl_mode_select()
6249 param_len = scsi_2btoul(cdb->length); in ctl_mode_select()
6270 mtx_lock(&lun->lun_lock); in ctl_mode_select()
6272 mtx_unlock(&lun->lun_lock); in ctl_mode_select()
6274 ctl_isc_announce_mode(lun, -1, in ctl_mode_select()
6275 lun->mode_pages.index[i].page_code & SMPH_PC_MASK, in ctl_mode_select()
6276 lun->mode_pages.index[i].subpage); in ctl_mode_select()
6284 * From SPC-3: in ctl_mode_select()
6285 * "A parameter list length of zero indicates that the Data-Out Buffer in ctl_mode_select()
6310 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { in ctl_mode_select()
6311 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); in ctl_mode_select()
6312 ctsio->kern_data_len = param_len; in ctl_mode_select()
6313 ctsio->kern_total_len = param_len; in ctl_mode_select()
6314 ctsio->kern_rel_offset = 0; in ctl_mode_select()
6315 ctsio->kern_sg_entries = 0; in ctl_mode_select()
6316 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_mode_select()
6317 ctsio->be_move_done = ctl_config_move_done; in ctl_mode_select()
6323 switch (ctsio->cdb[0]) { in ctl_mode_select()
6327 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; in ctl_mode_select()
6328 bd_len = mh6->blk_desc_len; in ctl_mode_select()
6334 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; in ctl_mode_select()
6335 bd_len = scsi_2btoul(mh10->blk_desc_len); in ctl_mode_select()
6339 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); in ctl_mode_select()
6343 free(ctsio->kern_data_ptr, M_CTL); in ctl_mode_select()
6355 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; in ctl_mode_select()
6356 ctsio->io_cont = ctl_do_mode_select; in ctl_mode_select()
6359 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; in ctl_mode_select()
6361 modepage_info->header.len_left = param_len - header_size - bd_len; in ctl_mode_select()
6362 modepage_info->header.len_used = header_size + bd_len; in ctl_mode_select()
6380 switch (ctsio->cdb[0]) { in ctl_mode_sense()
6384 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; in ctl_mode_sense()
6387 if (cdb->byte2 & SMS_DBD) in ctl_mode_sense()
6393 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; in ctl_mode_sense()
6394 page_code = cdb->page & SMS_PAGE_CODE; in ctl_mode_sense()
6395 subpage = cdb->subpage; in ctl_mode_sense()
6396 alloc_len = cdb->length; in ctl_mode_sense()
6402 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; in ctl_mode_sense()
6405 if (cdb->byte2 & SMS_DBD) { in ctl_mode_sense()
6407 } else if (lun->be_lun->lun_type == T_DIRECT) { in ctl_mode_sense()
6408 if (cdb->byte2 & SMS10_LLBAA) { in ctl_mode_sense()
6417 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; in ctl_mode_sense()
6418 page_code = cdb->page & SMS_PAGE_CODE; in ctl_mode_sense()
6419 subpage = cdb->subpage; in ctl_mode_sense()
6420 alloc_len = scsi_2btoul(cdb->length); in ctl_mode_sense()
6443 * reserved according to SPC-3. in ctl_mode_sense()
6458 page_index = &lun->mode_pages.index[i]; in ctl_mode_sense()
6461 if (lun->be_lun->lun_type == T_DIRECT && in ctl_mode_sense()
6462 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) in ctl_mode_sense()
6464 if (lun->be_lun->lun_type == T_PROCESSOR && in ctl_mode_sense()
6465 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) in ctl_mode_sense()
6467 if (lun->be_lun->lun_type == T_CDROM && in ctl_mode_sense()
6468 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) in ctl_mode_sense()
6475 if ((page_index->subpage != 0) in ctl_mode_sense()
6479 page_len += page_index->page_len; in ctl_mode_sense()
6489 page_index = &lun->mode_pages.index[i]; in ctl_mode_sense()
6492 if (lun->be_lun->lun_type == T_DIRECT && in ctl_mode_sense()
6493 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) in ctl_mode_sense()
6495 if (lun->be_lun->lun_type == T_PROCESSOR && in ctl_mode_sense()
6496 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) in ctl_mode_sense()
6498 if (lun->be_lun->lun_type == T_CDROM && in ctl_mode_sense()
6499 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) in ctl_mode_sense()
6503 if ((page_index->page_code & SMPH_PC_MASK) != page_code) in ctl_mode_sense()
6507 if ((page_index->subpage != subpage) in ctl_mode_sense()
6511 page_len += page_index->page_len; in ctl_mode_sense()
6530 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); in ctl_mode_sense()
6531 ctsio->kern_sg_entries = 0; in ctl_mode_sense()
6532 ctsio->kern_rel_offset = 0; in ctl_mode_sense()
6533 ctsio->kern_data_len = min(total_len, alloc_len); in ctl_mode_sense()
6534 ctsio->kern_total_len = ctsio->kern_data_len; in ctl_mode_sense()
6536 switch (ctsio->cdb[0]) { in ctl_mode_sense()
6540 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; in ctl_mode_sense()
6542 header->datalen = MIN(total_len - 1, 254); in ctl_mode_sense()
6543 if (lun->be_lun->lun_type == T_DIRECT) { in ctl_mode_sense()
6544 header->dev_specific = 0x10; /* DPOFUA */ in ctl_mode_sense()
6545 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || in ctl_mode_sense()
6546 (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) in ctl_mode_sense()
6547 header->dev_specific |= 0x80; /* WP */ in ctl_mode_sense()
6549 header->block_descr_len = bd_len; in ctl_mode_sense()
6557 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; in ctl_mode_sense()
6559 datalen = MIN(total_len - 2, 65533); in ctl_mode_sense()
6560 scsi_ulto2b(datalen, header->datalen); in ctl_mode_sense()
6561 if (lun->be_lun->lun_type == T_DIRECT) { in ctl_mode_sense()
6562 header->dev_specific = 0x10; /* DPOFUA */ in ctl_mode_sense()
6563 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || in ctl_mode_sense()
6564 (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) in ctl_mode_sense()
6565 header->dev_specific |= 0x80; /* WP */ in ctl_mode_sense()
6568 header->flags |= SMH_LONGLBA; in ctl_mode_sense()
6569 scsi_ulto2b(bd_len, header->block_descr_len); in ctl_mode_sense()
6574 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); in ctl_mode_sense()
6582 if (lun->be_lun->lun_type == T_DIRECT) { in ctl_mode_sense()
6585 if (lun->be_lun->maxlba != 0) in ctl_mode_sense()
6586 scsi_u64to8b(lun->be_lun->maxlba + 1, in ctl_mode_sense()
6587 bd->num_blocks); in ctl_mode_sense()
6588 scsi_ulto4b(lun->be_lun->blocksize, in ctl_mode_sense()
6589 bd->block_len); in ctl_mode_sense()
6592 if (lun->be_lun->maxlba != 0) in ctl_mode_sense()
6593 scsi_ulto4b(MIN(lun->be_lun->maxlba+1, in ctl_mode_sense()
6594 UINT32_MAX), bd->num_blocks); in ctl_mode_sense()
6595 scsi_ulto3b(lun->be_lun->blocksize, in ctl_mode_sense()
6596 bd->block_len); in ctl_mode_sense()
6600 scsi_ulto3b(0, bd->block_len); in ctl_mode_sense()
6612 page_index = &lun->mode_pages.index[i]; in ctl_mode_sense()
6613 if (lun->be_lun->lun_type == T_DIRECT && in ctl_mode_sense()
6614 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) in ctl_mode_sense()
6616 if (lun->be_lun->lun_type == T_PROCESSOR && in ctl_mode_sense()
6617 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) in ctl_mode_sense()
6619 if (lun->be_lun->lun_type == T_CDROM && in ctl_mode_sense()
6620 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) in ctl_mode_sense()
6629 if ((page_index->subpage != 0) in ctl_mode_sense()
6637 if (page_index->sense_handler != NULL) in ctl_mode_sense()
6638 page_index->sense_handler(ctsio, page_index,pc); in ctl_mode_sense()
6640 memcpy(ctsio->kern_data_ptr + data_used, in ctl_mode_sense()
6641 page_index->page_data + in ctl_mode_sense()
6642 (page_index->page_len * pc), in ctl_mode_sense()
6643 page_index->page_len); in ctl_mode_sense()
6644 data_used += page_index->page_len; in ctl_mode_sense()
6656 page_index = &lun->mode_pages.index[i]; in ctl_mode_sense()
6659 if ((page_index->page_code & SMPH_PC_MASK) != page_code) in ctl_mode_sense()
6663 if ((page_index->subpage != subpage) in ctl_mode_sense()
6668 if (lun->be_lun->lun_type == T_DIRECT && in ctl_mode_sense()
6669 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) in ctl_mode_sense()
6671 if (lun->be_lun->lun_type == T_PROCESSOR && in ctl_mode_sense()
6672 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) in ctl_mode_sense()
6674 if (lun->be_lun->lun_type == T_CDROM && in ctl_mode_sense()
6675 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) in ctl_mode_sense()
6682 if (page_index->sense_handler != NULL) in ctl_mode_sense()
6683 page_index->sense_handler(ctsio, page_index,pc); in ctl_mode_sense()
6685 memcpy(ctsio->kern_data_ptr + data_used, in ctl_mode_sense()
6686 page_index->page_data + in ctl_mode_sense()
6687 (page_index->page_len * pc), in ctl_mode_sense()
6688 page_index->page_len); in ctl_mode_sense()
6689 data_used += page_index->page_len; in ctl_mode_sense()
6696 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_mode_sense()
6697 ctsio->be_move_done = ctl_config_move_done; in ctl_mode_sense()
6711 data = (struct scsi_log_temperature *)page_index->page_data; in ctl_temp_log_sense_handler()
6713 scsi_ulto2b(SLP_TEMPERATURE, data->hdr.param_code); in ctl_temp_log_sense_handler()
6714 data->hdr.param_control = SLP_LBIN; in ctl_temp_log_sense_handler()
6715 data->hdr.param_len = sizeof(struct scsi_log_temperature) - in ctl_temp_log_sense_handler()
6717 if ((value = dnvlist_get_string(lun->be_lun->options, "temperature", in ctl_temp_log_sense_handler()
6719 data->temperature = strtol(value, NULL, 0); in ctl_temp_log_sense_handler()
6721 data->temperature = 0xff; in ctl_temp_log_sense_handler()
6724 scsi_ulto2b(SLP_REFTEMPERATURE, data->hdr.param_code); in ctl_temp_log_sense_handler()
6725 data->hdr.param_control = SLP_LBIN; in ctl_temp_log_sense_handler()
6726 data->hdr.param_len = sizeof(struct scsi_log_temperature) - in ctl_temp_log_sense_handler()
6728 if ((value = dnvlist_get_string(lun->be_lun->options, "reftemperature", in ctl_temp_log_sense_handler()
6730 data->temperature = strtol(value, NULL, 0); in ctl_temp_log_sense_handler()
6732 data->temperature = 0xff; in ctl_temp_log_sense_handler()
6746 data = page_index->page_data; in ctl_lbp_log_sense_handler()
6748 if (lun->backend->lun_attr != NULL && in ctl_lbp_log_sense_handler()
6749 (val = lun->backend->lun_attr(lun->be_lun, "blocksavail")) in ctl_lbp_log_sense_handler()
6752 scsi_ulto2b(0x0001, phdr->param_code); in ctl_lbp_log_sense_handler()
6753 phdr->param_control = SLP_LBIN | SLP_LP; in ctl_lbp_log_sense_handler()
6754 phdr->param_len = 8; in ctl_lbp_log_sense_handler()
6757 data[4] = 0x02; /* per-pool */ in ctl_lbp_log_sense_handler()
6758 data += phdr->param_len; in ctl_lbp_log_sense_handler()
6761 if (lun->backend->lun_attr != NULL && in ctl_lbp_log_sense_handler()
6762 (val = lun->backend->lun_attr(lun->be_lun, "blocksused")) in ctl_lbp_log_sense_handler()
6765 scsi_ulto2b(0x0002, phdr->param_code); in ctl_lbp_log_sense_handler()
6766 phdr->param_control = SLP_LBIN | SLP_LP; in ctl_lbp_log_sense_handler()
6767 phdr->param_len = 8; in ctl_lbp_log_sense_handler()
6770 data[4] = 0x01; /* per-LUN */ in ctl_lbp_log_sense_handler()
6771 data += phdr->param_len; in ctl_lbp_log_sense_handler()
6774 if (lun->backend->lun_attr != NULL && in ctl_lbp_log_sense_handler()
6775 (val = lun->backend->lun_attr(lun->be_lun, "poolblocksavail")) in ctl_lbp_log_sense_handler()
6778 scsi_ulto2b(0x00f1, phdr->param_code); in ctl_lbp_log_sense_handler()
6779 phdr->param_control = SLP_LBIN | SLP_LP; in ctl_lbp_log_sense_handler()
6780 phdr->param_len = 8; in ctl_lbp_log_sense_handler()
6783 data[4] = 0x02; /* per-pool */ in ctl_lbp_log_sense_handler()
6784 data += phdr->param_len; in ctl_lbp_log_sense_handler()
6787 if (lun->backend->lun_attr != NULL && in ctl_lbp_log_sense_handler()
6788 (val = lun->backend->lun_attr(lun->be_lun, "poolblocksused")) in ctl_lbp_log_sense_handler()
6791 scsi_ulto2b(0x00f2, phdr->param_code); in ctl_lbp_log_sense_handler()
6792 phdr->param_control = SLP_LBIN | SLP_LP; in ctl_lbp_log_sense_handler()
6793 phdr->param_len = 8; in ctl_lbp_log_sense_handler()
6796 data[4] = 0x02; /* per-pool */ in ctl_lbp_log_sense_handler()
6797 data += phdr->param_len; in ctl_lbp_log_sense_handler()
6800 page_index->page_len = data - page_index->page_data; in ctl_lbp_log_sense_handler()
6813 data = (struct stat_page *)page_index->page_data; in ctl_sap_log_sense_handler()
6815 scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code); in ctl_sap_log_sense_handler()
6816 data->sap.hdr.param_control = SLP_LBIN; in ctl_sap_log_sense_handler()
6817 data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) - in ctl_sap_log_sense_handler()
6819 scsi_u64to8b(lun->stats.operations[CTL_STATS_READ], in ctl_sap_log_sense_handler()
6820 data->sap.read_num); in ctl_sap_log_sense_handler()
6821 scsi_u64to8b(lun->stats.operations[CTL_STATS_WRITE], in ctl_sap_log_sense_handler()
6822 data->sap.write_num); in ctl_sap_log_sense_handler()
6823 if (lun->be_lun->blocksize > 0) { in ctl_sap_log_sense_handler()
6824 scsi_u64to8b(lun->stats.bytes[CTL_STATS_WRITE] / in ctl_sap_log_sense_handler()
6825 lun->be_lun->blocksize, data->sap.recvieved_lba); in ctl_sap_log_sense_handler()
6826 scsi_u64to8b(lun->stats.bytes[CTL_STATS_READ] / in ctl_sap_log_sense_handler()
6827 lun->be_lun->blocksize, data->sap.transmitted_lba); in ctl_sap_log_sense_handler()
6829 t = &lun->stats.time[CTL_STATS_READ]; in ctl_sap_log_sense_handler()
6830 scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), in ctl_sap_log_sense_handler()
6831 data->sap.read_int); in ctl_sap_log_sense_handler()
6832 t = &lun->stats.time[CTL_STATS_WRITE]; in ctl_sap_log_sense_handler()
6833 scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), in ctl_sap_log_sense_handler()
6834 data->sap.write_int); in ctl_sap_log_sense_handler()
6835 scsi_u64to8b(0, data->sap.weighted_num); in ctl_sap_log_sense_handler()
6836 scsi_u64to8b(0, data->sap.weighted_int); in ctl_sap_log_sense_handler()
6837 scsi_ulto2b(SLP_IT, data->it.hdr.param_code); in ctl_sap_log_sense_handler()
6838 data->it.hdr.param_control = SLP_LBIN; in ctl_sap_log_sense_handler()
6839 data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) - in ctl_sap_log_sense_handler()
6842 scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int); in ctl_sap_log_sense_handler()
6844 scsi_ulto2b(SLP_TI, data->ti.hdr.param_code); in ctl_sap_log_sense_handler()
6845 data->it.hdr.param_control = SLP_LBIN; in ctl_sap_log_sense_handler()
6846 data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) - in ctl_sap_log_sense_handler()
6848 scsi_ulto4b(3, data->ti.exponent); in ctl_sap_log_sense_handler()
6849 scsi_ulto4b(1, data->ti.integer); in ctl_sap_log_sense_handler()
6862 data = (struct scsi_log_informational_exceptions *)page_index->page_data; in ctl_ie_log_sense_handler()
6864 scsi_ulto2b(SLP_IE_GEN, data->hdr.param_code); in ctl_ie_log_sense_handler()
6865 data->hdr.param_control = SLP_LBIN; in ctl_ie_log_sense_handler()
6866 data->hdr.param_len = sizeof(struct scsi_log_informational_exceptions) - in ctl_ie_log_sense_handler()
6868 data->ie_asc = lun->ie_asc; in ctl_ie_log_sense_handler()
6869 data->ie_ascq = lun->ie_ascq; in ctl_ie_log_sense_handler()
6870 if ((value = dnvlist_get_string(lun->be_lun->options, "temperature", in ctl_ie_log_sense_handler()
6872 data->temperature = strtol(value, NULL, 0); in ctl_ie_log_sense_handler()
6874 data->temperature = 0xff; in ctl_ie_log_sense_handler()
6890 cdb = (struct scsi_log_sense *)ctsio->cdb; in ctl_log_sense()
6891 pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; in ctl_log_sense()
6892 page_code = cdb->page & SLS_PAGE_CODE; in ctl_log_sense()
6893 subpage = cdb->subpage; in ctl_log_sense()
6894 alloc_len = scsi_2btoul(cdb->length); in ctl_log_sense()
6898 page_index = &lun->log_pages.index[i]; in ctl_log_sense()
6901 if ((page_index->page_code & SL_PAGE_CODE) != page_code) in ctl_log_sense()
6905 if (page_index->subpage != subpage) in ctl_log_sense()
6921 total_len = sizeof(struct scsi_log_header) + page_index->page_len; in ctl_log_sense()
6923 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); in ctl_log_sense()
6924 ctsio->kern_sg_entries = 0; in ctl_log_sense()
6925 ctsio->kern_rel_offset = 0; in ctl_log_sense()
6926 ctsio->kern_data_len = min(total_len, alloc_len); in ctl_log_sense()
6927 ctsio->kern_total_len = ctsio->kern_data_len; in ctl_log_sense()
6929 header = (struct scsi_log_header *)ctsio->kern_data_ptr; in ctl_log_sense()
6930 header->page = page_index->page_code; in ctl_log_sense()
6931 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING) in ctl_log_sense()
6932 header->page |= SL_DS; in ctl_log_sense()
6933 if (page_index->subpage) { in ctl_log_sense()
6934 header->page |= SL_SPF; in ctl_log_sense()
6935 header->subpage = page_index->subpage; in ctl_log_sense()
6937 scsi_ulto2b(page_index->page_len, header->datalen); in ctl_log_sense()
6943 if (page_index->sense_handler != NULL) in ctl_log_sense()
6944 page_index->sense_handler(ctsio, page_index, pc); in ctl_log_sense()
6946 memcpy(header + 1, page_index->page_data, page_index->page_len); in ctl_log_sense()
6949 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_log_sense()
6950 ctsio->be_move_done = ctl_config_move_done; in ctl_log_sense()
6965 cdb = (struct scsi_read_capacity *)ctsio->cdb; in ctl_read_capacity()
6967 lba = scsi_4btoul(cdb->addr); in ctl_read_capacity()
6968 if (((cdb->pmi & SRC_PMI) == 0) in ctl_read_capacity()
6980 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); in ctl_read_capacity()
6981 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; in ctl_read_capacity()
6982 ctsio->kern_data_len = sizeof(*data); in ctl_read_capacity()
6983 ctsio->kern_total_len = sizeof(*data); in ctl_read_capacity()
6984 ctsio->kern_rel_offset = 0; in ctl_read_capacity()
6985 ctsio->kern_sg_entries = 0; in ctl_read_capacity()
6992 if (lun->be_lun->maxlba > 0xfffffffe) in ctl_read_capacity()
6993 scsi_ulto4b(0xffffffff, data->addr); in ctl_read_capacity()
6995 scsi_ulto4b(lun->be_lun->maxlba, data->addr); in ctl_read_capacity()
7000 scsi_ulto4b(lun->be_lun->blocksize, data->length); in ctl_read_capacity()
7003 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_read_capacity()
7004 ctsio->be_move_done = ctl_config_move_done; in ctl_read_capacity()
7020 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; in ctl_read_capacity_16()
7022 alloc_len = scsi_4btoul(cdb->alloc_len); in ctl_read_capacity_16()
7023 lba = scsi_8btou64(cdb->addr); in ctl_read_capacity_16()
7025 if ((cdb->reladr & SRC16_PMI) in ctl_read_capacity_16()
7037 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); in ctl_read_capacity_16()
7038 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; in ctl_read_capacity_16()
7039 ctsio->kern_rel_offset = 0; in ctl_read_capacity_16()
7040 ctsio->kern_sg_entries = 0; in ctl_read_capacity_16()
7041 ctsio->kern_data_len = min(sizeof(*data), alloc_len); in ctl_read_capacity_16()
7042 ctsio->kern_total_len = ctsio->kern_data_len; in ctl_read_capacity_16()
7044 scsi_u64to8b(lun->be_lun->maxlba, data->addr); in ctl_read_capacity_16()
7046 scsi_ulto4b(lun->be_lun->blocksize, data->length); in ctl_read_capacity_16()
7047 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; in ctl_read_capacity_16()
7048 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); in ctl_read_capacity_16()
7049 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) in ctl_read_capacity_16()
7050 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; in ctl_read_capacity_16()
7053 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_read_capacity_16()
7054 ctsio->be_move_done = ctl_config_move_done; in ctl_read_capacity_16()
7072 cdb = (struct scsi_get_lba_status *)ctsio->cdb; in ctl_get_lba_status()
7073 lba = scsi_8btou64(cdb->addr); in ctl_get_lba_status()
7074 alloc_len = scsi_4btoul(cdb->alloc_len); in ctl_get_lba_status()
7076 if (lba > lun->be_lun->maxlba) { in ctl_get_lba_status()
7082 total_len = sizeof(*data) + sizeof(data->descr[0]); in ctl_get_lba_status()
7083 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); in ctl_get_lba_status()
7084 data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; in ctl_get_lba_status()
7085 ctsio->kern_rel_offset = 0; in ctl_get_lba_status()
7086 ctsio->kern_sg_entries = 0; in ctl_get_lba_status()
7087 ctsio->kern_data_len = min(total_len, alloc_len); in ctl_get_lba_status()
7088 ctsio->kern_total_len = ctsio->kern_data_len; in ctl_get_lba_status()
7091 scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); in ctl_get_lba_status()
7092 scsi_u64to8b(lba, data->descr[0].addr); in ctl_get_lba_status()
7093 scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), in ctl_get_lba_status()
7094 data->descr[0].length); in ctl_get_lba_status()
7095 data->descr[0].status = 0; /* Mapped or unknown. */ in ctl_get_lba_status()
7098 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_get_lba_status()
7099 ctsio->be_move_done = ctl_config_move_done; in ctl_get_lba_status()
7101 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; in ctl_get_lba_status()
7102 lbalen->lba = lba; in ctl_get_lba_status()
7103 lbalen->len = total_len; in ctl_get_lba_status()
7104 lbalen->flags = 0; in ctl_get_lba_status()
7105 retval = lun->backend->config_read((union ctl_io *)ctsio); in ctl_get_lba_status()
7121 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { in ctl_read_defect()
7122 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; in ctl_read_defect()
7123 format = ccb10->format; in ctl_read_defect()
7124 alloc_len = scsi_2btoul(ccb10->alloc_length); in ctl_read_defect()
7127 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; in ctl_read_defect()
7128 format = ccb12->format; in ctl_read_defect()
7129 alloc_len = scsi_4btoul(ccb12->alloc_length); in ctl_read_defect()
7138 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); in ctl_read_defect()
7139 ctsio->kern_rel_offset = 0; in ctl_read_defect()
7140 ctsio->kern_sg_entries = 0; in ctl_read_defect()
7141 ctsio->kern_data_len = min(data_len, alloc_len); in ctl_read_defect()
7142 ctsio->kern_total_len = ctsio->kern_data_len; in ctl_read_defect()
7144 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { in ctl_read_defect()
7146 ctsio->kern_data_ptr; in ctl_read_defect()
7147 data10->format = format; in ctl_read_defect()
7148 scsi_ulto2b(0, data10->length); in ctl_read_defect()
7151 ctsio->kern_data_ptr; in ctl_read_defect()
7152 data12->format = format; in ctl_read_defect()
7153 scsi_ulto2b(0, data12->generation); in ctl_read_defect()
7154 scsi_ulto4b(0, data12->length); in ctl_read_defect()
7158 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_read_defect()
7159 ctsio->be_move_done = ctl_config_move_done; in ctl_read_defect()
7176 cdb = (struct scsi_report_ident_info *)ctsio->cdb; in ctl_report_ident_info()
7180 switch (cdb->type) { in ctl_report_ident_info()
7182 oii = dnvlist_get_string(lun->be_lun->options, in ctl_report_ident_info()
7188 otii = dnvlist_get_string(lun->be_lun->options, in ctl_report_ident_info()
7191 len = strlen(otii) + 1; /* NULL-terminated */ in ctl_report_ident_info()
7207 alloc_len = scsi_4btoul(cdb->length); in ctl_report_ident_info()
7209 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); in ctl_report_ident_info()
7210 ctsio->kern_sg_entries = 0; in ctl_report_ident_info()
7211 ctsio->kern_rel_offset = 0; in ctl_report_ident_info()
7212 ctsio->kern_data_len = min(total_len, alloc_len); in ctl_report_ident_info()
7213 ctsio->kern_total_len = ctsio->kern_data_len; in ctl_report_ident_info()
7215 rii_ptr = (struct scsi_report_ident_info_data *)ctsio->kern_data_ptr; in ctl_report_ident_info()
7216 switch (cdb->type) { in ctl_report_ident_info()
7231 riid_ptr->type = RII_LUII; in ctl_report_ident_info()
7232 scsi_ulto2b(0xffff, riid_ptr->length); in ctl_report_ident_info()
7234 riid_ptr->type = RII_LUTII; in ctl_report_ident_info()
7235 scsi_ulto2b(0xffff, riid_ptr->length); in ctl_report_ident_info()
7237 scsi_ulto2b(len, rii_ptr->length); in ctl_report_ident_info()
7240 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_report_ident_info()
7241 ctsio->be_move_done = ctl_config_move_done; in ctl_report_ident_info()
7262 cdb = (struct scsi_maintenance_in *)ctsio->cdb; in ctl_report_tagret_port_groups()
7265 switch (cdb->byte2 & STG_PDF_MASK) { in ctl_report_tagret_port_groups()
7284 shared_group = (softc->is_single != 0); in ctl_report_tagret_port_groups()
7285 mtx_lock(&softc->ctl_lock); in ctl_report_tagret_port_groups()
7286 STAILQ_FOREACH(port, &softc->port_list, links) { in ctl_report_tagret_port_groups()
7287 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) in ctl_report_tagret_port_groups()
7289 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) in ctl_report_tagret_port_groups()
7292 if (port->status & CTL_PORT_STATUS_HA_SHARED) in ctl_report_tagret_port_groups()
7295 mtx_unlock(&softc->ctl_lock); in ctl_report_tagret_port_groups()
7296 num_ha_groups = (softc->is_single) ? 0 : NUM_HA_SHELVES; in ctl_report_tagret_port_groups()
7306 alloc_len = scsi_4btoul(cdb->length); in ctl_report_tagret_port_groups()
7308 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); in ctl_report_tagret_port_groups()
7309 ctsio->kern_sg_entries = 0; in ctl_report_tagret_port_groups()
7310 ctsio->kern_rel_offset = 0; in ctl_report_tagret_port_groups()
7311 ctsio->kern_data_len = min(total_len, alloc_len); in ctl_report_tagret_port_groups()
7312 ctsio->kern_total_len = ctsio->kern_data_len; in ctl_report_tagret_port_groups()
7316 ctsio->kern_data_ptr; in ctl_report_tagret_port_groups()
7317 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); in ctl_report_tagret_port_groups()
7318 rtg_ext_ptr->format_type = 0x10; in ctl_report_tagret_port_groups()
7319 rtg_ext_ptr->implicit_transition_time = 0; in ctl_report_tagret_port_groups()
7320 tpg_desc = &rtg_ext_ptr->groups[0]; in ctl_report_tagret_port_groups()
7323 ctsio->kern_data_ptr; in ctl_report_tagret_port_groups()
7324 scsi_ulto4b(total_len - 4, rtg_ptr->length); in ctl_report_tagret_port_groups()
7325 tpg_desc = &rtg_ptr->groups[0]; in ctl_report_tagret_port_groups()
7328 mtx_lock(&softc->ctl_lock); in ctl_report_tagret_port_groups()
7329 pg = softc->port_min / softc->port_cnt; in ctl_report_tagret_port_groups()
7330 if (lun->flags & (CTL_LUN_PRIMARY_SC | CTL_LUN_PEER_SC_PRIMARY)) { in ctl_report_tagret_port_groups()
7332 if (softc->ha_link == CTL_HA_LINK_OFFLINE) in ctl_report_tagret_port_groups()
7334 else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) in ctl_report_tagret_port_groups()
7336 else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) in ctl_report_tagret_port_groups()
7340 if (lun->flags & CTL_LUN_PRIMARY_SC) { in ctl_report_tagret_port_groups()
7348 if (softc->ha_link == CTL_HA_LINK_OFFLINE) { in ctl_report_tagret_port_groups()
7351 } else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) { in ctl_report_tagret_port_groups()
7359 tpg_desc->pref_state = ts; in ctl_report_tagret_port_groups()
7360 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | in ctl_report_tagret_port_groups()
7362 scsi_ulto2b(1, tpg_desc->target_port_group); in ctl_report_tagret_port_groups()
7363 tpg_desc->status = TPG_IMPLICIT; in ctl_report_tagret_port_groups()
7365 STAILQ_FOREACH(port, &softc->port_list, links) { in ctl_report_tagret_port_groups()
7366 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) in ctl_report_tagret_port_groups()
7368 if (!softc->is_single && in ctl_report_tagret_port_groups()
7369 (port->status & CTL_PORT_STATUS_HA_SHARED) == 0) in ctl_report_tagret_port_groups()
7371 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) in ctl_report_tagret_port_groups()
7373 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. in ctl_report_tagret_port_groups()
7377 tpg_desc->target_port_count = pc; in ctl_report_tagret_port_groups()
7379 &tpg_desc->descriptors[pc]; in ctl_report_tagret_port_groups()
7382 tpg_desc->pref_state = (g == pg) ? ts : os; in ctl_report_tagret_port_groups()
7383 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | in ctl_report_tagret_port_groups()
7385 scsi_ulto2b(2 + g, tpg_desc->target_port_group); in ctl_report_tagret_port_groups()
7386 tpg_desc->status = TPG_IMPLICIT; in ctl_report_tagret_port_groups()
7388 STAILQ_FOREACH(port, &softc->port_list, links) { in ctl_report_tagret_port_groups()
7389 if (port->targ_port < g * softc->port_cnt || in ctl_report_tagret_port_groups()
7390 port->targ_port >= (g + 1) * softc->port_cnt) in ctl_report_tagret_port_groups()
7392 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) in ctl_report_tagret_port_groups()
7394 if (port->status & CTL_PORT_STATUS_HA_SHARED) in ctl_report_tagret_port_groups()
7396 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) in ctl_report_tagret_port_groups()
7398 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. in ctl_report_tagret_port_groups()
7402 tpg_desc->target_port_count = pc; in ctl_report_tagret_port_groups()
7404 &tpg_desc->descriptors[pc]; in ctl_report_tagret_port_groups()
7406 mtx_unlock(&softc->ctl_lock); in ctl_report_tagret_port_groups()
7409 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_report_tagret_port_groups()
7410 ctsio->be_move_done = ctl_config_move_done; in ctl_report_tagret_port_groups()
7430 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; in ctl_report_supported_opcodes()
7433 opcode = cdb->requested_opcode; in ctl_report_supported_opcodes()
7434 service_action = scsi_2btoul(cdb->requested_service_action); in ctl_report_supported_opcodes()
7435 switch (cdb->options & RSO_OPTIONS_MASK) { in ctl_report_supported_opcodes()
7440 if (entry->flags & CTL_CMD_FLAG_SA5) { in ctl_report_supported_opcodes()
7443 entry->execute)[j]; in ctl_report_supported_opcodes()
7445 lun->be_lun->lun_type, sentry)) in ctl_report_supported_opcodes()
7449 if (ctl_cmd_applicable(lun->be_lun->lun_type, in ctl_report_supported_opcodes()
7493 alloc_len = scsi_4btoul(cdb->length); in ctl_report_supported_opcodes()
7495 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); in ctl_report_supported_opcodes()
7496 ctsio->kern_sg_entries = 0; in ctl_report_supported_opcodes()
7497 ctsio->kern_rel_offset = 0; in ctl_report_supported_opcodes()
7498 ctsio->kern_data_len = min(total_len, alloc_len); in ctl_report_supported_opcodes()
7499 ctsio->kern_total_len = ctsio->kern_data_len; in ctl_report_supported_opcodes()
7501 switch (cdb->options & RSO_OPTIONS_MASK) { in ctl_report_supported_opcodes()
7504 ctsio->kern_data_ptr; in ctl_report_supported_opcodes()
7508 if (entry->flags & CTL_CMD_FLAG_SA5) { in ctl_report_supported_opcodes()
7511 entry->execute)[j]; in ctl_report_supported_opcodes()
7513 lun->be_lun->lun_type, sentry)) in ctl_report_supported_opcodes()
7515 descr = &all->descr[num++]; in ctl_report_supported_opcodes()
7516 descr->opcode = i; in ctl_report_supported_opcodes()
7517 scsi_ulto2b(j, descr->service_action); in ctl_report_supported_opcodes()
7518 descr->flags = RSO_SERVACTV; in ctl_report_supported_opcodes()
7519 scsi_ulto2b(sentry->length, in ctl_report_supported_opcodes()
7520 descr->cdb_length); in ctl_report_supported_opcodes()
7523 if (!ctl_cmd_applicable(lun->be_lun->lun_type, in ctl_report_supported_opcodes()
7526 descr = &all->descr[num++]; in ctl_report_supported_opcodes()
7527 descr->opcode = i; in ctl_report_supported_opcodes()
7528 scsi_ulto2b(0, descr->service_action); in ctl_report_supported_opcodes()
7529 descr->flags = 0; in ctl_report_supported_opcodes()
7530 scsi_ulto2b(entry->length, descr->cdb_length); in ctl_report_supported_opcodes()
7535 all->length); in ctl_report_supported_opcodes()
7539 ctsio->kern_data_ptr; in ctl_report_supported_opcodes()
7544 ctsio->kern_data_ptr; in ctl_report_supported_opcodes()
7547 entry->execute)[service_action]; in ctl_report_supported_opcodes()
7549 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { in ctl_report_supported_opcodes()
7550 one->support = 3; in ctl_report_supported_opcodes()
7551 scsi_ulto2b(entry->length, one->cdb_length); in ctl_report_supported_opcodes()
7552 one->cdb_usage[0] = opcode; in ctl_report_supported_opcodes()
7553 memcpy(&one->cdb_usage[1], entry->usage, in ctl_report_supported_opcodes()
7554 entry->length - 1); in ctl_report_supported_opcodes()
7556 one->support = 1; in ctl_report_supported_opcodes()
7560 ctsio->kern_data_ptr; in ctl_report_supported_opcodes()
7562 if (entry->flags & CTL_CMD_FLAG_SA5) { in ctl_report_supported_opcodes()
7564 entry->execute)[service_action]; in ctl_report_supported_opcodes()
7566 one->support = 1; in ctl_report_supported_opcodes()
7573 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_report_supported_opcodes()
7574 ctsio->be_move_done = ctl_config_move_done; in ctl_report_supported_opcodes()
7589 cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; in ctl_report_supported_tmf()
7593 if (cdb->options & RST_REPD) in ctl_report_supported_tmf()
7597 alloc_len = scsi_4btoul(cdb->length); in ctl_report_supported_tmf()
7599 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); in ctl_report_supported_tmf()
7600 ctsio->kern_sg_entries = 0; in ctl_report_supported_tmf()
7601 ctsio->kern_rel_offset = 0; in ctl_report_supported_tmf()
7602 ctsio->kern_data_len = min(total_len, alloc_len); in ctl_report_supported_tmf()
7603 ctsio->kern_total_len = ctsio->kern_data_len; in ctl_report_supported_tmf()
7605 data = (struct scsi_report_supported_tmf_ext_data *)ctsio->kern_data_ptr; in ctl_report_supported_tmf()
7606 data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_QTS | in ctl_report_supported_tmf()
7608 data->byte2 |= RST_QAES | RST_QTSS | RST_ITNRS; in ctl_report_supported_tmf()
7609 data->length = total_len - 4; in ctl_report_supported_tmf()
7612 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_report_supported_tmf()
7613 ctsio->be_move_done = ctl_config_move_done; in ctl_report_supported_tmf()
7630 cdb = (struct scsi_report_timestamp *)ctsio->cdb; in ctl_report_timestamp()
7635 alloc_len = scsi_4btoul(cdb->length); in ctl_report_timestamp()
7637 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); in ctl_report_timestamp()
7638 ctsio->kern_sg_entries = 0; in ctl_report_timestamp()
7639 ctsio->kern_rel_offset = 0; in ctl_report_timestamp()
7640 ctsio->kern_data_len = min(total_len, alloc_len); in ctl_report_timestamp()
7641 ctsio->kern_total_len = ctsio->kern_data_len; in ctl_report_timestamp()
7643 data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; in ctl_report_timestamp()
7644 scsi_ulto2b(sizeof(*data) - 2, data->length); in ctl_report_timestamp()
7645 data->origin = RTS_ORIG_OUTSIDE; in ctl_report_timestamp()
7648 scsi_ulto4b(timestamp >> 16, data->timestamp); in ctl_report_timestamp()
7649 scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); in ctl_report_timestamp()
7652 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_report_timestamp()
7653 ctsio->be_move_done = ctl_config_move_done; in ctl_report_timestamp()
7670 cdb = (struct scsi_per_res_in *)ctsio->cdb; in ctl_persistent_reserve_in()
7672 alloc_len = scsi_2btoul(cdb->length); in ctl_persistent_reserve_in()
7675 mtx_lock(&lun->lun_lock); in ctl_persistent_reserve_in()
7676 switch (cdb->action) { in ctl_persistent_reserve_in()
7679 lun->pr_key_count * in ctl_persistent_reserve_in()
7683 if (lun->flags & CTL_LUN_PR_RESERVED) in ctl_persistent_reserve_in()
7694 lun->pr_key_count; in ctl_persistent_reserve_in()
7697 panic("%s: Invalid PR type %#x", __func__, cdb->action); in ctl_persistent_reserve_in()
7699 mtx_unlock(&lun->lun_lock); in ctl_persistent_reserve_in()
7701 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); in ctl_persistent_reserve_in()
7702 ctsio->kern_rel_offset = 0; in ctl_persistent_reserve_in()
7703 ctsio->kern_sg_entries = 0; in ctl_persistent_reserve_in()
7704 ctsio->kern_data_len = min(total_len, alloc_len); in ctl_persistent_reserve_in()
7705 ctsio->kern_total_len = ctsio->kern_data_len; in ctl_persistent_reserve_in()
7707 mtx_lock(&lun->lun_lock); in ctl_persistent_reserve_in()
7708 switch (cdb->action) { in ctl_persistent_reserve_in()
7713 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; in ctl_persistent_reserve_in()
7723 (lun->pr_key_count * in ctl_persistent_reserve_in()
7725 mtx_unlock(&lun->lun_lock); in ctl_persistent_reserve_in()
7726 free(ctsio->kern_data_ptr, M_CTL); in ctl_persistent_reserve_in()
7732 scsi_ulto4b(lun->pr_generation, res_keys->header.generation); in ctl_persistent_reserve_in()
7735 lun->pr_key_count, res_keys->header.length); in ctl_persistent_reserve_in()
7742 * We used lun->pr_key_count to calculate the in ctl_persistent_reserve_in()
7748 if (key_count >= lun->pr_key_count) { in ctl_persistent_reserve_in()
7752 scsi_u64to8b(key, res_keys->keys[key_count].key); in ctl_persistent_reserve_in()
7761 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; in ctl_persistent_reserve_in()
7763 scsi_ulto4b(lun->pr_generation, res->header.generation); in ctl_persistent_reserve_in()
7765 if (lun->flags & CTL_LUN_PR_RESERVED) in ctl_persistent_reserve_in()
7769 res->header.length); in ctl_persistent_reserve_in()
7773 scsi_ulto4b(0, res->header.length); in ctl_persistent_reserve_in()
7785 mtx_unlock(&lun->lun_lock); in ctl_persistent_reserve_in()
7786 free(ctsio->kern_data_ptr, M_CTL); in ctl_persistent_reserve_in()
7802 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { in ctl_persistent_reserve_in()
7803 scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), in ctl_persistent_reserve_in()
7804 res->data.reservation); in ctl_persistent_reserve_in()
7806 res->data.scopetype = lun->pr_res_type; in ctl_persistent_reserve_in()
7814 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; in ctl_persistent_reserve_in()
7815 scsi_ulto2b(sizeof(*res_cap), res_cap->length); in ctl_persistent_reserve_in()
7816 res_cap->flags1 = SPRI_CRH; in ctl_persistent_reserve_in()
7817 res_cap->flags2 = SPRI_TMV | SPRI_ALLOW_5; in ctl_persistent_reserve_in()
7824 scsi_ulto2b(type_mask, res_cap->type_mask); in ctl_persistent_reserve_in()
7833 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; in ctl_persistent_reserve_in()
7844 lun->pr_key_count)){ in ctl_persistent_reserve_in()
7845 mtx_unlock(&lun->lun_lock); in ctl_persistent_reserve_in()
7846 free(ctsio->kern_data_ptr, M_CTL); in ctl_persistent_reserve_in()
7852 scsi_ulto4b(lun->pr_generation, res_status->header.generation); in ctl_persistent_reserve_in()
7854 res_desc = &res_status->desc[0]; in ctl_persistent_reserve_in()
7859 scsi_u64to8b(key, res_desc->res_key.key); in ctl_persistent_reserve_in()
7860 if ((lun->flags & CTL_LUN_PR_RESERVED) && in ctl_persistent_reserve_in()
7861 (lun->pr_res_idx == i || in ctl_persistent_reserve_in()
7862 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { in ctl_persistent_reserve_in()
7863 res_desc->flags = SPRI_FULL_R_HOLDER; in ctl_persistent_reserve_in()
7864 res_desc->scopetype = lun->pr_res_type; in ctl_persistent_reserve_in()
7867 res_desc->rel_trgt_port_id); in ctl_persistent_reserve_in()
7869 port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT]; in ctl_persistent_reserve_in()
7873 res_desc->transport_id); in ctl_persistent_reserve_in()
7874 scsi_ulto4b(len, res_desc->additional_length); in ctl_persistent_reserve_in()
7876 &res_desc->transport_id[len]; in ctl_persistent_reserve_in()
7878 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], in ctl_persistent_reserve_in()
7879 res_status->header.length); in ctl_persistent_reserve_in()
7883 panic("%s: Invalid PR type %#x", __func__, cdb->action); in ctl_persistent_reserve_in()
7885 mtx_unlock(&lun->lun_lock); in ctl_persistent_reserve_in()
7888 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_persistent_reserve_in()
7889 ctsio->be_move_done = ctl_config_move_done; in ctl_persistent_reserve_in()
7895 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if
7907 mtx_lock(&lun->lun_lock); in ctl_pro_preempt()
7909 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { in ctl_pro_preempt()
7911 if ((cdb->scope_type & SPR_SCOPE_MASK) != in ctl_pro_preempt()
7913 mtx_unlock(&lun->lun_lock); in ctl_pro_preempt()
7925 mtx_unlock(&lun->lun_lock); in ctl_pro_preempt()
7947 lun->pr_key_count = 1; in ctl_pro_preempt()
7948 lun->pr_res_type = type; in ctl_pro_preempt()
7949 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && in ctl_pro_preempt()
7950 lun->pr_res_type != SPR_TYPE_EX_AC_AR) in ctl_pro_preempt()
7951 lun->pr_res_idx = residx; in ctl_pro_preempt()
7952 lun->pr_generation++; in ctl_pro_preempt()
7953 mtx_unlock(&lun->lun_lock); in ctl_pro_preempt()
7956 persis_io.hdr.nexus = ctsio->io_hdr.nexus; in ctl_pro_preempt()
7959 persis_io.pr.pr_info.residx = lun->pr_res_idx; in ctl_pro_preempt()
7962 param->serv_act_res_key, in ctl_pro_preempt()
7963 sizeof(param->serv_act_res_key)); in ctl_pro_preempt()
7968 mtx_unlock(&lun->lun_lock); in ctl_pro_preempt()
7969 free(ctsio->kern_data_ptr, M_CTL); in ctl_pro_preempt()
7979 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS in ctl_pro_preempt()
7980 || !(lun->flags & CTL_LUN_PR_RESERVED)) { in ctl_pro_preempt()
7994 mtx_unlock(&lun->lun_lock); in ctl_pro_preempt()
7995 free(ctsio->kern_data_ptr, M_CTL); in ctl_pro_preempt()
8012 lun->pr_key_count--; in ctl_pro_preempt()
8016 mtx_unlock(&lun->lun_lock); in ctl_pro_preempt()
8017 free(ctsio->kern_data_ptr, M_CTL); in ctl_pro_preempt()
8022 lun->pr_generation++; in ctl_pro_preempt()
8023 mtx_unlock(&lun->lun_lock); in ctl_pro_preempt()
8026 persis_io.hdr.nexus = ctsio->io_hdr.nexus; in ctl_pro_preempt()
8029 persis_io.pr.pr_info.residx = lun->pr_res_idx; in ctl_pro_preempt()
8032 param->serv_act_res_key, in ctl_pro_preempt()
8033 sizeof(param->serv_act_res_key)); in ctl_pro_preempt()
8039 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { in ctl_pro_preempt()
8041 if ((cdb->scope_type & SPR_SCOPE_MASK) != in ctl_pro_preempt()
8043 mtx_unlock(&lun->lun_lock); in ctl_pro_preempt()
8055 mtx_unlock(&lun->lun_lock); in ctl_pro_preempt()
8086 lun->pr_key_count--; in ctl_pro_preempt()
8088 } else if (type != lun->pr_res_type && in ctl_pro_preempt()
8089 (lun->pr_res_type == SPR_TYPE_WR_EX_RO || in ctl_pro_preempt()
8090 lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { in ctl_pro_preempt()
8094 lun->pr_res_type = type; in ctl_pro_preempt()
8095 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && in ctl_pro_preempt()
8096 lun->pr_res_type != SPR_TYPE_EX_AC_AR) in ctl_pro_preempt()
8097 lun->pr_res_idx = residx; in ctl_pro_preempt()
8099 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; in ctl_pro_preempt()
8100 lun->pr_generation++; in ctl_pro_preempt()
8101 mtx_unlock(&lun->lun_lock); in ctl_pro_preempt()
8103 persis_io.hdr.nexus = ctsio->io_hdr.nexus; in ctl_pro_preempt()
8106 persis_io.pr.pr_info.residx = lun->pr_res_idx; in ctl_pro_preempt()
8109 param->serv_act_res_key, in ctl_pro_preempt()
8110 sizeof(param->serv_act_res_key)); in ctl_pro_preempt()
8126 lun->pr_key_count--; in ctl_pro_preempt()
8131 mtx_unlock(&lun->lun_lock); in ctl_pro_preempt()
8132 free(ctsio->kern_data_ptr, M_CTL); in ctl_pro_preempt()
8137 lun->pr_generation++; in ctl_pro_preempt()
8138 mtx_unlock(&lun->lun_lock); in ctl_pro_preempt()
8140 persis_io.hdr.nexus = ctsio->io_hdr.nexus; in ctl_pro_preempt()
8143 persis_io.pr.pr_info.residx = lun->pr_res_idx; in ctl_pro_preempt()
8146 param->serv_act_res_key, in ctl_pro_preempt()
8147 sizeof(param->serv_act_res_key)); in ctl_pro_preempt()
8161 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); in ctl_pro_preempt_other()
8163 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS in ctl_pro_preempt_other()
8164 || lun->pr_res_idx == CTL_PR_NO_RESERVATION in ctl_pro_preempt_other()
8165 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { in ctl_pro_preempt_other()
8172 if (i == msg->pr.pr_info.residx || in ctl_pro_preempt_other()
8180 lun->pr_key_count = 1; in ctl_pro_preempt_other()
8181 lun->pr_res_type = msg->pr.pr_info.res_type; in ctl_pro_preempt_other()
8182 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && in ctl_pro_preempt_other()
8183 lun->pr_res_type != SPR_TYPE_EX_AC_AR) in ctl_pro_preempt_other()
8184 lun->pr_res_idx = msg->pr.pr_info.residx; in ctl_pro_preempt_other()
8191 lun->pr_key_count--; in ctl_pro_preempt_other()
8197 if (i == msg->pr.pr_info.residx || in ctl_pro_preempt_other()
8203 lun->pr_key_count--; in ctl_pro_preempt_other()
8205 } else if (msg->pr.pr_info.res_type != lun->pr_res_type in ctl_pro_preempt_other()
8206 && (lun->pr_res_type == SPR_TYPE_WR_EX_RO || in ctl_pro_preempt_other()
8207 lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { in ctl_pro_preempt_other()
8211 lun->pr_res_type = msg->pr.pr_info.res_type; in ctl_pro_preempt_other()
8212 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && in ctl_pro_preempt_other()
8213 lun->pr_res_type != SPR_TYPE_EX_AC_AR) in ctl_pro_preempt_other()
8214 lun->pr_res_idx = msg->pr.pr_info.residx; in ctl_pro_preempt_other()
8216 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; in ctl_pro_preempt_other()
8218 lun->pr_generation++; in ctl_pro_preempt_other()
8239 cdb = (struct scsi_per_res_out *)ctsio->cdb; in ctl_persistent_reserve_out()
8243 * We only support whole-LUN scope. The scope & type are ignored for in ctl_persistent_reserve_out()
8248 type = cdb->scope_type & SPR_TYPE_MASK; in ctl_persistent_reserve_out()
8249 if ((cdb->action == SPRO_RESERVE) in ctl_persistent_reserve_out()
8250 || (cdb->action == SPRO_RELEASE)) { in ctl_persistent_reserve_out()
8251 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { in ctl_persistent_reserve_out()
8274 param_len = scsi_4btoul(cdb->length); in ctl_persistent_reserve_out()
8288 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { in ctl_persistent_reserve_out()
8289 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); in ctl_persistent_reserve_out()
8290 ctsio->kern_data_len = param_len; in ctl_persistent_reserve_out()
8291 ctsio->kern_total_len = param_len; in ctl_persistent_reserve_out()
8292 ctsio->kern_rel_offset = 0; in ctl_persistent_reserve_out()
8293 ctsio->kern_sg_entries = 0; in ctl_persistent_reserve_out()
8294 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_persistent_reserve_out()
8295 ctsio->be_move_done = ctl_config_move_done; in ctl_persistent_reserve_out()
8301 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; in ctl_persistent_reserve_out()
8303 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); in ctl_persistent_reserve_out()
8304 res_key = scsi_8btou64(param->res_key.key); in ctl_persistent_reserve_out()
8305 sa_res_key = scsi_8btou64(param->serv_act_res_key); in ctl_persistent_reserve_out()
8311 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { in ctl_persistent_reserve_out()
8312 mtx_lock(&lun->lun_lock); in ctl_persistent_reserve_out()
8320 mtx_unlock(&lun->lun_lock); in ctl_persistent_reserve_out()
8321 free(ctsio->kern_data_ptr, M_CTL); in ctl_persistent_reserve_out()
8326 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { in ctl_persistent_reserve_out()
8330 mtx_unlock(&lun->lun_lock); in ctl_persistent_reserve_out()
8331 free(ctsio->kern_data_ptr, M_CTL); in ctl_persistent_reserve_out()
8340 mtx_unlock(&lun->lun_lock); in ctl_persistent_reserve_out()
8341 free(ctsio->kern_data_ptr, M_CTL); in ctl_persistent_reserve_out()
8346 mtx_unlock(&lun->lun_lock); in ctl_persistent_reserve_out()
8349 switch (cdb->action & SPRO_ACTION_MASK) { in ctl_persistent_reserve_out()
8357 if ((param->flags & SPR_SPEC_I_PT) in ctl_persistent_reserve_out()
8358 || (param->flags & SPR_ALL_TG_PT) in ctl_persistent_reserve_out()
8359 || (param->flags & SPR_APTPL)) { in ctl_persistent_reserve_out()
8362 if (param->flags & SPR_APTPL) in ctl_persistent_reserve_out()
8364 else if (param->flags & SPR_ALL_TG_PT) in ctl_persistent_reserve_out()
8369 free(ctsio->kern_data_ptr, M_CTL); in ctl_persistent_reserve_out()
8380 mtx_lock(&lun->lun_lock); in ctl_persistent_reserve_out()
8388 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) in ctl_persistent_reserve_out()
8389 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO in ctl_persistent_reserve_out()
8391 mtx_unlock(&lun->lun_lock); in ctl_persistent_reserve_out()
8396 lun->pr_key_count--; in ctl_persistent_reserve_out()
8398 if (residx == lun->pr_res_idx) { in ctl_persistent_reserve_out()
8399 lun->flags &= ~CTL_LUN_PR_RESERVED; in ctl_persistent_reserve_out()
8400 lun->pr_res_idx = CTL_PR_NO_RESERVATION; in ctl_persistent_reserve_out()
8402 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || in ctl_persistent_reserve_out()
8403 lun->pr_res_type == SPR_TYPE_EX_AC_RO) && in ctl_persistent_reserve_out()
8404 lun->pr_key_count) { in ctl_persistent_reserve_out()
8413 for (i = softc->init_min; i < softc->init_max; i++){ in ctl_persistent_reserve_out()
8420 lun->pr_res_type = 0; in ctl_persistent_reserve_out()
8421 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { in ctl_persistent_reserve_out()
8422 if (lun->pr_key_count==0) { in ctl_persistent_reserve_out()
8423 lun->flags &= ~CTL_LUN_PR_RESERVED; in ctl_persistent_reserve_out()
8424 lun->pr_res_type = 0; in ctl_persistent_reserve_out()
8425 lun->pr_res_idx = CTL_PR_NO_RESERVATION; in ctl_persistent_reserve_out()
8428 lun->pr_generation++; in ctl_persistent_reserve_out()
8429 mtx_unlock(&lun->lun_lock); in ctl_persistent_reserve_out()
8431 persis_io.hdr.nexus = ctsio->io_hdr.nexus; in ctl_persistent_reserve_out()
8444 lun->pr_key_count++; in ctl_persistent_reserve_out()
8446 lun->pr_generation++; in ctl_persistent_reserve_out()
8447 mtx_unlock(&lun->lun_lock); in ctl_persistent_reserve_out()
8449 persis_io.hdr.nexus = ctsio->io_hdr.nexus; in ctl_persistent_reserve_out()
8454 param->serv_act_res_key, in ctl_persistent_reserve_out()
8455 sizeof(param->serv_act_res_key)); in ctl_persistent_reserve_out()
8463 mtx_lock(&lun->lun_lock); in ctl_persistent_reserve_out()
8464 if (lun->flags & CTL_LUN_PR_RESERVED) { in ctl_persistent_reserve_out()
8470 if ((lun->pr_res_idx != residx in ctl_persistent_reserve_out()
8471 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) in ctl_persistent_reserve_out()
8472 || lun->pr_res_type != type) { in ctl_persistent_reserve_out()
8473 mtx_unlock(&lun->lun_lock); in ctl_persistent_reserve_out()
8474 free(ctsio->kern_data_ptr, M_CTL); in ctl_persistent_reserve_out()
8479 mtx_unlock(&lun->lun_lock); in ctl_persistent_reserve_out()
8487 lun->pr_res_idx = residx; /* Res holder */ in ctl_persistent_reserve_out()
8489 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; in ctl_persistent_reserve_out()
8491 lun->flags |= CTL_LUN_PR_RESERVED; in ctl_persistent_reserve_out()
8492 lun->pr_res_type = type; in ctl_persistent_reserve_out()
8494 mtx_unlock(&lun->lun_lock); in ctl_persistent_reserve_out()
8497 persis_io.hdr.nexus = ctsio->io_hdr.nexus; in ctl_persistent_reserve_out()
8500 persis_io.pr.pr_info.residx = lun->pr_res_idx; in ctl_persistent_reserve_out()
8508 mtx_lock(&lun->lun_lock); in ctl_persistent_reserve_out()
8509 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { in ctl_persistent_reserve_out()
8511 mtx_unlock(&lun->lun_lock); in ctl_persistent_reserve_out()
8517 if (lun->pr_res_idx != residx in ctl_persistent_reserve_out()
8518 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { in ctl_persistent_reserve_out()
8523 mtx_unlock(&lun->lun_lock); in ctl_persistent_reserve_out()
8527 if (lun->pr_res_type != type) { in ctl_persistent_reserve_out()
8528 mtx_unlock(&lun->lun_lock); in ctl_persistent_reserve_out()
8529 free(ctsio->kern_data_ptr, M_CTL); in ctl_persistent_reserve_out()
8536 lun->flags &= ~CTL_LUN_PR_RESERVED; in ctl_persistent_reserve_out()
8537 lun->pr_res_idx = CTL_PR_NO_RESERVATION; in ctl_persistent_reserve_out()
8538 lun->pr_res_type = 0; in ctl_persistent_reserve_out()
8545 (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { in ctl_persistent_reserve_out()
8546 for (i = softc->init_min; i < softc->init_max; i++) { in ctl_persistent_reserve_out()
8552 mtx_unlock(&lun->lun_lock); in ctl_persistent_reserve_out()
8555 persis_io.hdr.nexus = ctsio->io_hdr.nexus; in ctl_persistent_reserve_out()
8565 mtx_lock(&lun->lun_lock); in ctl_persistent_reserve_out()
8566 lun->flags &= ~CTL_LUN_PR_RESERVED; in ctl_persistent_reserve_out()
8567 lun->pr_res_type = 0; in ctl_persistent_reserve_out()
8568 lun->pr_key_count = 0; in ctl_persistent_reserve_out()
8569 lun->pr_res_idx = CTL_PR_NO_RESERVATION; in ctl_persistent_reserve_out()
8577 lun->pr_generation++; in ctl_persistent_reserve_out()
8578 mtx_unlock(&lun->lun_lock); in ctl_persistent_reserve_out()
8580 persis_io.hdr.nexus = ctsio->io_hdr.nexus; in ctl_persistent_reserve_out()
8598 panic("%s: Invalid PR type %#x", __func__, cdb->action); in ctl_persistent_reserve_out()
8602 free(ctsio->kern_data_ptr, M_CTL); in ctl_persistent_reserve_out()
8619 union ctl_ha_msg *msg = (union ctl_ha_msg *)&io->presio.pr_msg; in ctl_hndl_per_res_out_on_other_sc()
8624 targ_lun = msg->hdr.nexus.targ_mapped_lun; in ctl_hndl_per_res_out_on_other_sc()
8625 mtx_lock(&softc->ctl_lock); in ctl_hndl_per_res_out_on_other_sc()
8627 (lun = softc->ctl_luns[targ_lun]) == NULL) { in ctl_hndl_per_res_out_on_other_sc()
8628 mtx_unlock(&softc->ctl_lock); in ctl_hndl_per_res_out_on_other_sc()
8631 mtx_lock(&lun->lun_lock); in ctl_hndl_per_res_out_on_other_sc()
8632 mtx_unlock(&softc->ctl_lock); in ctl_hndl_per_res_out_on_other_sc()
8633 if (lun->flags & CTL_LUN_DISABLED) { in ctl_hndl_per_res_out_on_other_sc()
8634 mtx_unlock(&lun->lun_lock); in ctl_hndl_per_res_out_on_other_sc()
8637 residx = ctl_get_initindex(&msg->hdr.nexus); in ctl_hndl_per_res_out_on_other_sc()
8638 switch(msg->pr.pr_info.action) { in ctl_hndl_per_res_out_on_other_sc()
8640 ctl_alloc_prkey(lun, msg->pr.pr_info.residx); in ctl_hndl_per_res_out_on_other_sc()
8641 if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) in ctl_hndl_per_res_out_on_other_sc()
8642 lun->pr_key_count++; in ctl_hndl_per_res_out_on_other_sc()
8643 ctl_set_prkey(lun, msg->pr.pr_info.residx, in ctl_hndl_per_res_out_on_other_sc()
8644 scsi_8btou64(msg->pr.pr_info.sa_res_key)); in ctl_hndl_per_res_out_on_other_sc()
8645 lun->pr_generation++; in ctl_hndl_per_res_out_on_other_sc()
8649 ctl_clr_prkey(lun, msg->pr.pr_info.residx); in ctl_hndl_per_res_out_on_other_sc()
8650 lun->pr_key_count--; in ctl_hndl_per_res_out_on_other_sc()
8654 if (msg->pr.pr_info.residx == lun->pr_res_idx) { in ctl_hndl_per_res_out_on_other_sc()
8655 lun->flags &= ~CTL_LUN_PR_RESERVED; in ctl_hndl_per_res_out_on_other_sc()
8656 lun->pr_res_idx = CTL_PR_NO_RESERVATION; in ctl_hndl_per_res_out_on_other_sc()
8658 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || in ctl_hndl_per_res_out_on_other_sc()
8659 lun->pr_res_type == SPR_TYPE_EX_AC_RO) && in ctl_hndl_per_res_out_on_other_sc()
8660 lun->pr_key_count) { in ctl_hndl_per_res_out_on_other_sc()
8669 for (i = softc->init_min; i < softc->init_max; i++) { in ctl_hndl_per_res_out_on_other_sc()
8676 lun->pr_res_type = 0; in ctl_hndl_per_res_out_on_other_sc()
8677 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { in ctl_hndl_per_res_out_on_other_sc()
8678 if (lun->pr_key_count==0) { in ctl_hndl_per_res_out_on_other_sc()
8679 lun->flags &= ~CTL_LUN_PR_RESERVED; in ctl_hndl_per_res_out_on_other_sc()
8680 lun->pr_res_type = 0; in ctl_hndl_per_res_out_on_other_sc()
8681 lun->pr_res_idx = CTL_PR_NO_RESERVATION; in ctl_hndl_per_res_out_on_other_sc()
8684 lun->pr_generation++; in ctl_hndl_per_res_out_on_other_sc()
8688 lun->flags |= CTL_LUN_PR_RESERVED; in ctl_hndl_per_res_out_on_other_sc()
8689 lun->pr_res_type = msg->pr.pr_info.res_type; in ctl_hndl_per_res_out_on_other_sc()
8690 lun->pr_res_idx = msg->pr.pr_info.residx; in ctl_hndl_per_res_out_on_other_sc()
8699 if (lun->pr_res_type != SPR_TYPE_EX_AC && in ctl_hndl_per_res_out_on_other_sc()
8700 lun->pr_res_type != SPR_TYPE_WR_EX && in ctl_hndl_per_res_out_on_other_sc()
8701 (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { in ctl_hndl_per_res_out_on_other_sc()
8702 for (i = softc->init_min; i < softc->init_max; i++) { in ctl_hndl_per_res_out_on_other_sc()
8709 lun->flags &= ~CTL_LUN_PR_RESERVED; in ctl_hndl_per_res_out_on_other_sc()
8710 lun->pr_res_idx = CTL_PR_NO_RESERVATION; in ctl_hndl_per_res_out_on_other_sc()
8711 lun->pr_res_type = 0; in ctl_hndl_per_res_out_on_other_sc()
8718 lun->flags &= ~CTL_LUN_PR_RESERVED; in ctl_hndl_per_res_out_on_other_sc()
8719 lun->pr_res_type = 0; in ctl_hndl_per_res_out_on_other_sc()
8720 lun->pr_key_count = 0; in ctl_hndl_per_res_out_on_other_sc()
8721 lun->pr_res_idx = CTL_PR_NO_RESERVATION; in ctl_hndl_per_res_out_on_other_sc()
8729 lun->pr_generation++; in ctl_hndl_per_res_out_on_other_sc()
8733 mtx_unlock(&lun->lun_lock); in ctl_hndl_per_res_out_on_other_sc()
8746 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); in ctl_read_write()
8749 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 in ctl_read_write()
8750 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; in ctl_read_write()
8751 switch (ctsio->cdb[0]) { in ctl_read_write()
8756 cdb = (struct scsi_rw_6 *)ctsio->cdb; in ctl_read_write()
8758 lba = scsi_3btoul(cdb->addr); in ctl_read_write()
8761 num_blocks = cdb->length; in ctl_read_write()
8763 * This is correct according to SBC-2. in ctl_read_write()
8773 cdb = (struct scsi_rw_10 *)ctsio->cdb; in ctl_read_write()
8774 if (cdb->byte2 & SRW10_FUA) in ctl_read_write()
8776 if (cdb->byte2 & SRW10_DPO) in ctl_read_write()
8778 lba = scsi_4btoul(cdb->addr); in ctl_read_write()
8779 num_blocks = scsi_2btoul(cdb->length); in ctl_read_write()
8785 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; in ctl_read_write()
8787 if (cdb->byte2 & SWV_DPO) in ctl_read_write()
8789 lba = scsi_4btoul(cdb->addr); in ctl_read_write()
8790 num_blocks = scsi_2btoul(cdb->length); in ctl_read_write()
8797 cdb = (struct scsi_rw_12 *)ctsio->cdb; in ctl_read_write()
8798 if (cdb->byte2 & SRW12_FUA) in ctl_read_write()
8800 if (cdb->byte2 & SRW12_DPO) in ctl_read_write()
8802 lba = scsi_4btoul(cdb->addr); in ctl_read_write()
8803 num_blocks = scsi_4btoul(cdb->length); in ctl_read_write()
8809 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; in ctl_read_write()
8811 if (cdb->byte2 & SWV_DPO) in ctl_read_write()
8813 lba = scsi_4btoul(cdb->addr); in ctl_read_write()
8814 num_blocks = scsi_4btoul(cdb->length); in ctl_read_write()
8821 cdb = (struct scsi_rw_16 *)ctsio->cdb; in ctl_read_write()
8822 if (cdb->byte2 & SRW12_FUA) in ctl_read_write()
8824 if (cdb->byte2 & SRW12_DPO) in ctl_read_write()
8826 lba = scsi_8btou64(cdb->addr); in ctl_read_write()
8827 num_blocks = scsi_4btoul(cdb->length); in ctl_read_write()
8833 if (lun->be_lun->atomicblock == 0) { in ctl_read_write()
8839 cdb = (struct scsi_write_atomic_16 *)ctsio->cdb; in ctl_read_write()
8840 if (cdb->byte2 & SRW12_FUA) in ctl_read_write()
8842 if (cdb->byte2 & SRW12_DPO) in ctl_read_write()
8844 lba = scsi_8btou64(cdb->addr); in ctl_read_write()
8845 num_blocks = scsi_2btoul(cdb->length); in ctl_read_write()
8846 if (num_blocks > lun->be_lun->atomicblock) { in ctl_read_write()
8858 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; in ctl_read_write()
8860 if (cdb->byte2 & SWV_DPO) in ctl_read_write()
8862 lba = scsi_8btou64(cdb->addr); in ctl_read_write()
8863 num_blocks = scsi_4btoul(cdb->length); in ctl_read_write()
8880 * check is to catch wrap-around problems. If the lba + num blocks in ctl_read_write()
8884 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) in ctl_read_write()
8887 MAX(lba, lun->be_lun->maxlba + 1)); in ctl_read_write()
8893 * According to SBC-3, a transfer length of 0 is not an error. in ctl_read_write()
8905 if ((lun->MODE_CACHING.flags1 & SCP_RCD) != 0) in ctl_read_write()
8908 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) in ctl_read_write()
8913 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; in ctl_read_write()
8914 lbalen->lba = lba; in ctl_read_write()
8915 lbalen->len = num_blocks; in ctl_read_write()
8916 lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; in ctl_read_write()
8918 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; in ctl_read_write()
8919 ctsio->kern_rel_offset = 0; in ctl_read_write()
8923 retval = lun->backend->data_submit((union ctl_io *)ctsio); in ctl_read_write()
8937 ctsio = &io->scsiio; in ctl_cnw_cont()
8938 ctsio->io_hdr.status = CTL_STATUS_NONE; in ctl_cnw_cont()
8939 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; in ctl_cnw_cont()
8941 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; in ctl_cnw_cont()
8942 lbalen->flags &= ~CTL_LLF_COMPARE; in ctl_cnw_cont()
8943 lbalen->flags |= CTL_LLF_WRITE; in ctl_cnw_cont()
8946 retval = lun->backend->data_submit((union ctl_io *)ctsio); in ctl_cnw_cont()
8959 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); in ctl_cnw()
8962 switch (ctsio->cdb[0]) { in ctl_cnw()
8966 cdb = (struct scsi_compare_and_write *)ctsio->cdb; in ctl_cnw()
8967 if (cdb->byte2 & SRW10_FUA) in ctl_cnw()
8969 if (cdb->byte2 & SRW10_DPO) in ctl_cnw()
8971 lba = scsi_8btou64(cdb->addr); in ctl_cnw()
8972 num_blocks = cdb->length; in ctl_cnw()
8989 * check is to catch wrap-around problems. If the lba + num blocks in ctl_cnw()
8993 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) in ctl_cnw()
8996 MAX(lba, lun->be_lun->maxlba + 1)); in ctl_cnw()
9002 * According to SBC-3, a transfer length of 0 is not an error. in ctl_cnw()
9011 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) in ctl_cnw()
9014 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; in ctl_cnw()
9015 ctsio->kern_rel_offset = 0; in ctl_cnw()
9022 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; in ctl_cnw()
9023 ctsio->io_cont = ctl_cnw_cont; in ctl_cnw()
9026 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; in ctl_cnw()
9027 lbalen->lba = lba; in ctl_cnw()
9028 lbalen->len = num_blocks; in ctl_cnw()
9029 lbalen->flags = CTL_LLF_COMPARE | flags; in ctl_cnw()
9032 retval = lun->backend->data_submit((union ctl_io *)ctsio); in ctl_cnw()
9046 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); in ctl_verify()
9050 switch (ctsio->cdb[0]) { in ctl_verify()
9054 cdb = (struct scsi_verify_10 *)ctsio->cdb; in ctl_verify()
9055 if (cdb->byte2 & SVFY_BYTCHK) in ctl_verify()
9057 if (cdb->byte2 & SVFY_DPO) in ctl_verify()
9059 lba = scsi_4btoul(cdb->addr); in ctl_verify()
9060 num_blocks = scsi_2btoul(cdb->length); in ctl_verify()
9066 cdb = (struct scsi_verify_12 *)ctsio->cdb; in ctl_verify()
9067 if (cdb->byte2 & SVFY_BYTCHK) in ctl_verify()
9069 if (cdb->byte2 & SVFY_DPO) in ctl_verify()
9071 lba = scsi_4btoul(cdb->addr); in ctl_verify()
9072 num_blocks = scsi_4btoul(cdb->length); in ctl_verify()
9078 cdb = (struct scsi_rw_16 *)ctsio->cdb; in ctl_verify()
9079 if (cdb->byte2 & SVFY_BYTCHK) in ctl_verify()
9081 if (cdb->byte2 & SVFY_DPO) in ctl_verify()
9083 lba = scsi_8btou64(cdb->addr); in ctl_verify()
9084 num_blocks = scsi_4btoul(cdb->length); in ctl_verify()
9099 * check is to catch wrap-around problems. If the lba + num blocks in ctl_verify()
9103 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) in ctl_verify()
9106 MAX(lba, lun->be_lun->maxlba + 1)); in ctl_verify()
9112 * According to SBC-3, a transfer length of 0 is not an error. in ctl_verify()
9121 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; in ctl_verify()
9122 lbalen->lba = lba; in ctl_verify()
9123 lbalen->len = num_blocks; in ctl_verify()
9125 lbalen->flags = CTL_LLF_COMPARE | flags; in ctl_verify()
9126 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; in ctl_verify()
9128 lbalen->flags = CTL_LLF_VERIFY | flags; in ctl_verify()
9129 ctsio->kern_total_len = 0; in ctl_verify()
9131 ctsio->kern_rel_offset = 0; in ctl_verify()
9134 retval = lun->backend->data_submit((union ctl_io *)ctsio); in ctl_verify()
9151 cdb = (struct scsi_report_luns *)ctsio->cdb; in ctl_report_luns()
9156 num_port_luns = port->lun_map ? port->lun_map_size : ctl_max_luns; in ctl_report_luns()
9157 mtx_lock(&softc->ctl_lock); in ctl_report_luns()
9162 mtx_unlock(&softc->ctl_lock); in ctl_report_luns()
9164 switch (cdb->select_report) { in ctl_report_luns()
9186 alloc_len = scsi_4btoul(cdb->length); in ctl_report_luns()
9190 * we reject the request (per SPC-3 rev 14, section 6.21). in ctl_report_luns()
9207 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); in ctl_report_luns()
9208 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; in ctl_report_luns()
9209 ctsio->kern_sg_entries = 0; in ctl_report_luns()
9211 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); in ctl_report_luns()
9213 mtx_lock(&softc->ctl_lock); in ctl_report_luns()
9220 lun = softc->ctl_luns[lun_id]; in ctl_report_luns()
9224 be64enc(lun_data->luns[num_filled++].lundata, in ctl_report_luns()
9228 * According to SPC-3, rev 14 section 6.21: in ctl_report_luns()
9244 mtx_lock(&lun->lun_lock); in ctl_report_luns()
9246 mtx_unlock(&lun->lun_lock); in ctl_report_luns()
9249 mtx_unlock(&softc->ctl_lock); in ctl_report_luns()
9257 ctsio->kern_rel_offset = 0; in ctl_report_luns()
9258 ctsio->kern_sg_entries = 0; in ctl_report_luns()
9259 ctsio->kern_data_len = min(lun_datalen, alloc_len); in ctl_report_luns()
9260 ctsio->kern_total_len = ctsio->kern_data_len; in ctl_report_luns()
9269 scsi_ulto4b(lun_datalen - 8, lun_data->length); in ctl_report_luns()
9276 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_report_luns()
9277 ctsio->be_move_done = ctl_config_move_done; in ctl_report_luns()
9296 cdb = (struct scsi_request_sense *)ctsio->cdb; in ctl_request_sense()
9303 if (cdb->byte2 & SRS_DESC) in ctl_request_sense()
9308 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); in ctl_request_sense()
9309 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; in ctl_request_sense()
9310 ctsio->kern_sg_entries = 0; in ctl_request_sense()
9311 ctsio->kern_rel_offset = 0; in ctl_request_sense()
9312 ctsio->kern_data_len = ctsio->kern_total_len = in ctl_request_sense()
9313 MIN(cdb->length, sizeof(*sense_ptr)); in ctl_request_sense()
9319 ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && in ctl_request_sense()
9320 softc->ha_link < CTL_HA_LINK_UNKNOWN)) { in ctl_request_sense()
9332 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); in ctl_request_sense()
9337 mtx_lock(&lun->lun_lock); in ctl_request_sense()
9338 ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; in ctl_request_sense()
9341 if (ps != NULL && ps->error_code != 0) { in ctl_request_sense()
9358 * for some reason we'll just copy it out as-is. in ctl_request_sense()
9371 ps->error_code = 0; in ctl_request_sense()
9383 if (lun->MODE_IE.mrie != SIEP_MRIE_NO) { in ctl_request_sense()
9384 asc = lun->ie_asc; in ctl_request_sense()
9385 ascq = lun->ie_ascq; in ctl_request_sense()
9394 mtx_unlock(&lun->lun_lock); in ctl_request_sense()
9402 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_request_sense()
9403 ctsio->be_move_done = ctl_config_move_done; in ctl_request_sense()
9433 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); in ctl_inquiry_evpd_supported()
9434 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; in ctl_inquiry_evpd_supported()
9435 ctsio->kern_rel_offset = 0; in ctl_inquiry_evpd_supported()
9436 ctsio->kern_sg_entries = 0; in ctl_inquiry_evpd_supported()
9437 ctsio->kern_data_len = min(sup_page_size, alloc_len); in ctl_inquiry_evpd_supported()
9438 ctsio->kern_total_len = ctsio->kern_data_len; in ctl_inquiry_evpd_supported()
9446 pages->device = (SID_QUAL_LU_CONNECTED << 5) | in ctl_inquiry_evpd_supported()
9447 lun->be_lun->lun_type; in ctl_inquiry_evpd_supported()
9449 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; in ctl_inquiry_evpd_supported()
9453 pages->page_list[p++] = SVPD_SUPPORTED_PAGES; in ctl_inquiry_evpd_supported()
9455 pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER; in ctl_inquiry_evpd_supported()
9457 pages->page_list[p++] = SVPD_DEVICE_ID; in ctl_inquiry_evpd_supported()
9459 pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA; in ctl_inquiry_evpd_supported()
9461 pages->page_list[p++] = SVPD_MODE_PAGE_POLICY; in ctl_inquiry_evpd_supported()
9463 pages->page_list[p++] = SVPD_SCSI_PORTS; in ctl_inquiry_evpd_supported()
9464 /* Third-party Copy */ in ctl_inquiry_evpd_supported()
9465 pages->page_list[p++] = SVPD_SCSI_TPC; in ctl_inquiry_evpd_supported()
9467 pages->page_list[p++] = SVPD_SCSI_SFS; in ctl_inquiry_evpd_supported()
9468 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { in ctl_inquiry_evpd_supported()
9470 pages->page_list[p++] = SVPD_BLOCK_LIMITS; in ctl_inquiry_evpd_supported()
9472 pages->page_list[p++] = SVPD_BDC; in ctl_inquiry_evpd_supported()
9474 pages->page_list[p++] = SVPD_LBP; in ctl_inquiry_evpd_supported()
9476 pages->length = p; in ctl_inquiry_evpd_supported()
9479 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_inquiry_evpd_supported()
9480 ctsio->be_move_done = ctl_config_move_done; in ctl_inquiry_evpd_supported()
9496 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); in ctl_inquiry_evpd_serial()
9497 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; in ctl_inquiry_evpd_serial()
9498 ctsio->kern_rel_offset = 0; in ctl_inquiry_evpd_serial()
9499 ctsio->kern_sg_entries = 0; in ctl_inquiry_evpd_serial()
9500 ctsio->kern_data_len = min(data_len, alloc_len); in ctl_inquiry_evpd_serial()
9501 ctsio->kern_total_len = ctsio->kern_data_len; in ctl_inquiry_evpd_serial()
9509 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | in ctl_inquiry_evpd_serial()
9510 lun->be_lun->lun_type; in ctl_inquiry_evpd_serial()
9512 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; in ctl_inquiry_evpd_serial()
9514 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; in ctl_inquiry_evpd_serial()
9515 sn_ptr->length = CTL_SN_LEN; in ctl_inquiry_evpd_serial()
9521 strncpy((char *)sn_ptr->serial_num, in ctl_inquiry_evpd_serial()
9522 (char *)lun->be_lun->serial_num, CTL_SN_LEN); in ctl_inquiry_evpd_serial()
9524 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); in ctl_inquiry_evpd_serial()
9527 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_inquiry_evpd_serial()
9528 ctsio->be_move_done = ctl_config_move_done; in ctl_inquiry_evpd_serial()
9544 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); in ctl_inquiry_evpd_eid()
9545 eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; in ctl_inquiry_evpd_eid()
9546 ctsio->kern_sg_entries = 0; in ctl_inquiry_evpd_eid()
9547 ctsio->kern_rel_offset = 0; in ctl_inquiry_evpd_eid()
9548 ctsio->kern_data_len = min(data_len, alloc_len); in ctl_inquiry_evpd_eid()
9549 ctsio->kern_total_len = ctsio->kern_data_len; in ctl_inquiry_evpd_eid()
9556 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | in ctl_inquiry_evpd_eid()
9557 lun->be_lun->lun_type; in ctl_inquiry_evpd_eid()
9559 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; in ctl_inquiry_evpd_eid()
9560 eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; in ctl_inquiry_evpd_eid()
9561 scsi_ulto2b(data_len - 4, eid_ptr->page_length); in ctl_inquiry_evpd_eid()
9565 eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; in ctl_inquiry_evpd_eid()
9569 eid_ptr->flags3 = SVPD_EID_V_SUP; in ctl_inquiry_evpd_eid()
9574 * it to that nexus once. This bit is required as of SPC-4. in ctl_inquiry_evpd_eid()
9576 eid_ptr->flags4 = SVPD_EID_LUICLR; in ctl_inquiry_evpd_eid()
9581 eid_ptr->flags5 = SVPD_EID_RTD_SUP; in ctl_inquiry_evpd_eid()
9587 * likely. This can be set to a maximum of 252 according to SPC-4, in ctl_inquiry_evpd_eid()
9591 eid_ptr->max_sense_length = 0; in ctl_inquiry_evpd_eid()
9594 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_inquiry_evpd_eid()
9595 ctsio->be_move_done = ctl_config_move_done; in ctl_inquiry_evpd_eid()
9610 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); in ctl_inquiry_evpd_mpp()
9611 mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; in ctl_inquiry_evpd_mpp()
9612 ctsio->kern_rel_offset = 0; in ctl_inquiry_evpd_mpp()
9613 ctsio->kern_sg_entries = 0; in ctl_inquiry_evpd_mpp()
9614 ctsio->kern_data_len = min(data_len, alloc_len); in ctl_inquiry_evpd_mpp()
9615 ctsio->kern_total_len = ctsio->kern_data_len; in ctl_inquiry_evpd_mpp()
9622 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | in ctl_inquiry_evpd_mpp()
9623 lun->be_lun->lun_type; in ctl_inquiry_evpd_mpp()
9625 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; in ctl_inquiry_evpd_mpp()
9626 mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; in ctl_inquiry_evpd_mpp()
9627 scsi_ulto2b(data_len - 4, mpp_ptr->page_length); in ctl_inquiry_evpd_mpp()
9628 mpp_ptr->descr[0].page_code = 0x3f; in ctl_inquiry_evpd_mpp()
9629 mpp_ptr->descr[0].subpage_code = 0xff; in ctl_inquiry_evpd_mpp()
9630 mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; in ctl_inquiry_evpd_mpp()
9633 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_inquiry_evpd_mpp()
9634 ctsio->be_move_done = ctl_config_move_done; in ctl_inquiry_evpd_mpp()
9658 if (lun && lun->lun_devid) in ctl_inquiry_evpd_devid()
9659 data_len += lun->lun_devid->len; in ctl_inquiry_evpd_devid()
9660 if (port && port->port_devid) in ctl_inquiry_evpd_devid()
9661 data_len += port->port_devid->len; in ctl_inquiry_evpd_devid()
9662 if (port && port->target_devid) in ctl_inquiry_evpd_devid()
9663 data_len += port->target_devid->len; in ctl_inquiry_evpd_devid()
9665 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); in ctl_inquiry_evpd_devid()
9666 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; in ctl_inquiry_evpd_devid()
9667 ctsio->kern_sg_entries = 0; in ctl_inquiry_evpd_devid()
9668 ctsio->kern_rel_offset = 0; in ctl_inquiry_evpd_devid()
9669 ctsio->kern_sg_entries = 0; in ctl_inquiry_evpd_devid()
9670 ctsio->kern_data_len = min(data_len, alloc_len); in ctl_inquiry_evpd_devid()
9671 ctsio->kern_total_len = ctsio->kern_data_len; in ctl_inquiry_evpd_devid()
9678 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | in ctl_inquiry_evpd_devid()
9679 lun->be_lun->lun_type; in ctl_inquiry_evpd_devid()
9681 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; in ctl_inquiry_evpd_devid()
9682 devid_ptr->page_code = SVPD_DEVICE_ID; in ctl_inquiry_evpd_devid()
9683 scsi_ulto2b(data_len - 4, devid_ptr->length); in ctl_inquiry_evpd_devid()
9685 if (port && port->port_type == CTL_PORT_FC) in ctl_inquiry_evpd_devid()
9687 else if (port && port->port_type == CTL_PORT_SAS) in ctl_inquiry_evpd_devid()
9689 else if (port && port->port_type == CTL_PORT_ISCSI) in ctl_inquiry_evpd_devid()
9693 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; in ctl_inquiry_evpd_devid()
9697 * per-LUN identifier. in ctl_inquiry_evpd_devid()
9699 if (lun && lun->lun_devid) { in ctl_inquiry_evpd_devid()
9700 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); in ctl_inquiry_evpd_devid()
9702 lun->lun_devid->len); in ctl_inquiry_evpd_devid()
9708 if (port && port->port_devid) { in ctl_inquiry_evpd_devid()
9709 memcpy(desc, port->port_devid->data, port->port_devid->len); in ctl_inquiry_evpd_devid()
9711 port->port_devid->len); in ctl_inquiry_evpd_devid()
9717 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; in ctl_inquiry_evpd_devid()
9718 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | in ctl_inquiry_evpd_devid()
9720 desc->length = 4; in ctl_inquiry_evpd_devid()
9721 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); in ctl_inquiry_evpd_devid()
9722 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + in ctl_inquiry_evpd_devid()
9728 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; in ctl_inquiry_evpd_devid()
9729 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | in ctl_inquiry_evpd_devid()
9731 desc->length = 4; in ctl_inquiry_evpd_devid()
9732 if (softc->is_single || in ctl_inquiry_evpd_devid()
9733 (port && port->status & CTL_PORT_STATUS_HA_SHARED)) in ctl_inquiry_evpd_devid()
9736 g = 2 + ctsio->io_hdr.nexus.targ_port / softc->port_cnt; in ctl_inquiry_evpd_devid()
9737 scsi_ulto2b(g, &desc->identifier[2]); in ctl_inquiry_evpd_devid()
9738 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + in ctl_inquiry_evpd_devid()
9744 if (port && port->target_devid) { in ctl_inquiry_evpd_devid()
9745 memcpy(desc, port->target_devid->data, port->target_devid->len); in ctl_inquiry_evpd_devid()
9749 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_inquiry_evpd_devid()
9750 ctsio->be_move_done = ctl_config_move_done; in ctl_inquiry_evpd_devid()
9769 mtx_lock(&softc->ctl_lock); in ctl_inquiry_evpd_scsi_ports()
9770 STAILQ_FOREACH(port, &softc->port_list, links) { in ctl_inquiry_evpd_scsi_ports()
9771 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) in ctl_inquiry_evpd_scsi_ports()
9774 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) in ctl_inquiry_evpd_scsi_ports()
9777 if (port->init_devid) in ctl_inquiry_evpd_scsi_ports()
9778 iid_len += port->init_devid->len; in ctl_inquiry_evpd_scsi_ports()
9779 if (port->port_devid) in ctl_inquiry_evpd_scsi_ports()
9780 id_len += port->port_devid->len; in ctl_inquiry_evpd_scsi_ports()
9782 mtx_unlock(&softc->ctl_lock); in ctl_inquiry_evpd_scsi_ports()
9787 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); in ctl_inquiry_evpd_scsi_ports()
9788 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; in ctl_inquiry_evpd_scsi_ports()
9789 ctsio->kern_sg_entries = 0; in ctl_inquiry_evpd_scsi_ports()
9790 ctsio->kern_rel_offset = 0; in ctl_inquiry_evpd_scsi_ports()
9791 ctsio->kern_sg_entries = 0; in ctl_inquiry_evpd_scsi_ports()
9792 ctsio->kern_data_len = min(data_len, alloc_len); in ctl_inquiry_evpd_scsi_ports()
9793 ctsio->kern_total_len = ctsio->kern_data_len; in ctl_inquiry_evpd_scsi_ports()
9801 sp->device = (SID_QUAL_LU_CONNECTED << 5) | in ctl_inquiry_evpd_scsi_ports()
9802 lun->be_lun->lun_type; in ctl_inquiry_evpd_scsi_ports()
9804 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; in ctl_inquiry_evpd_scsi_ports()
9806 sp->page_code = SVPD_SCSI_PORTS; in ctl_inquiry_evpd_scsi_ports()
9807 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), in ctl_inquiry_evpd_scsi_ports()
9808 sp->page_length); in ctl_inquiry_evpd_scsi_ports()
9809 pd = &sp->design[0]; in ctl_inquiry_evpd_scsi_ports()
9811 mtx_lock(&softc->ctl_lock); in ctl_inquiry_evpd_scsi_ports()
9812 STAILQ_FOREACH(port, &softc->port_list, links) { in ctl_inquiry_evpd_scsi_ports()
9813 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) in ctl_inquiry_evpd_scsi_ports()
9816 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) in ctl_inquiry_evpd_scsi_ports()
9818 scsi_ulto2b(port->targ_port, pd->relative_port_id); in ctl_inquiry_evpd_scsi_ports()
9819 if (port->init_devid) { in ctl_inquiry_evpd_scsi_ports()
9820 iid_len = port->init_devid->len; in ctl_inquiry_evpd_scsi_ports()
9821 memcpy(pd->initiator_transportid, in ctl_inquiry_evpd_scsi_ports()
9822 port->init_devid->data, port->init_devid->len); in ctl_inquiry_evpd_scsi_ports()
9825 scsi_ulto2b(iid_len, pd->initiator_transportid_length); in ctl_inquiry_evpd_scsi_ports()
9827 (&pd->initiator_transportid[iid_len]); in ctl_inquiry_evpd_scsi_ports()
9828 if (port->port_devid) { in ctl_inquiry_evpd_scsi_ports()
9829 id_len = port->port_devid->len; in ctl_inquiry_evpd_scsi_ports()
9830 memcpy(pdc->target_port_descriptors, in ctl_inquiry_evpd_scsi_ports()
9831 port->port_devid->data, port->port_devid->len); in ctl_inquiry_evpd_scsi_ports()
9834 scsi_ulto2b(id_len, pdc->target_port_descriptors_length); in ctl_inquiry_evpd_scsi_ports()
9836 ((uint8_t *)pdc->target_port_descriptors + id_len); in ctl_inquiry_evpd_scsi_ports()
9838 mtx_unlock(&softc->ctl_lock); in ctl_inquiry_evpd_scsi_ports()
9841 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_inquiry_evpd_scsi_ports()
9842 ctsio->be_move_done = ctl_config_move_done; in ctl_inquiry_evpd_scsi_ports()
9855 ctsio->kern_data_ptr = malloc(sfs_page_size, M_CTL, M_WAITOK | M_ZERO); in ctl_inquiry_evpd_sfs()
9856 sfs_ptr = (struct scsi_vpd_sfs *)ctsio->kern_data_ptr; in ctl_inquiry_evpd_sfs()
9857 ctsio->kern_sg_entries = 0; in ctl_inquiry_evpd_sfs()
9858 ctsio->kern_rel_offset = 0; in ctl_inquiry_evpd_sfs()
9859 ctsio->kern_sg_entries = 0; in ctl_inquiry_evpd_sfs()
9860 ctsio->kern_data_len = min(sfs_page_size, alloc_len); in ctl_inquiry_evpd_sfs()
9861 ctsio->kern_total_len = ctsio->kern_data_len; in ctl_inquiry_evpd_sfs()
9869 sfs_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | in ctl_inquiry_evpd_sfs()
9870 lun->be_lun->lun_type; in ctl_inquiry_evpd_sfs()
9872 sfs_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; in ctl_inquiry_evpd_sfs()
9874 sfs_ptr->page_code = SVPD_SCSI_SFS; in ctl_inquiry_evpd_sfs()
9877 scsi_ulto2b(0x0001, &sfs_ptr->codes[2 * n++]); in ctl_inquiry_evpd_sfs()
9878 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { in ctl_inquiry_evpd_sfs()
9880 scsi_ulto2b(0x0101, &sfs_ptr->codes[2 * n++]); in ctl_inquiry_evpd_sfs()
9882 scsi_ulto2b(0x0102, &sfs_ptr->codes[2 * n++]); in ctl_inquiry_evpd_sfs()
9883 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { in ctl_inquiry_evpd_sfs()
9885 scsi_ulto2b(0x0103, &sfs_ptr->codes[2 * n++]); in ctl_inquiry_evpd_sfs()
9888 //scsi_ulto2b(0x0104, &sfs_ptr->codes[2 * n++]); in ctl_inquiry_evpd_sfs()
9890 scsi_ulto2b(4 + 2 * n, sfs_ptr->page_length); in ctl_inquiry_evpd_sfs()
9893 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_inquiry_evpd_sfs()
9894 ctsio->be_move_done = ctl_config_move_done; in ctl_inquiry_evpd_sfs()
9907 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); in ctl_inquiry_evpd_block_limits()
9908 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; in ctl_inquiry_evpd_block_limits()
9909 ctsio->kern_sg_entries = 0; in ctl_inquiry_evpd_block_limits()
9910 ctsio->kern_rel_offset = 0; in ctl_inquiry_evpd_block_limits()
9911 ctsio->kern_sg_entries = 0; in ctl_inquiry_evpd_block_limits()
9912 ctsio->kern_data_len = min(sizeof(*bl_ptr), alloc_len); in ctl_inquiry_evpd_block_limits()
9913 ctsio->kern_total_len = ctsio->kern_data_len; in ctl_inquiry_evpd_block_limits()
9921 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | in ctl_inquiry_evpd_block_limits()
9922 lun->be_lun->lun_type; in ctl_inquiry_evpd_block_limits()
9924 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; in ctl_inquiry_evpd_block_limits()
9926 bl_ptr->page_code = SVPD_BLOCK_LIMITS; in ctl_inquiry_evpd_block_limits()
9927 scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); in ctl_inquiry_evpd_block_limits()
9928 bl_ptr->max_cmp_write_len = 0xff; in ctl_inquiry_evpd_block_limits()
9929 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); in ctl_inquiry_evpd_block_limits()
9931 scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len); in ctl_inquiry_evpd_block_limits()
9932 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { in ctl_inquiry_evpd_block_limits()
9934 val = dnvlist_get_string(lun->be_lun->options, in ctl_inquiry_evpd_block_limits()
9938 scsi_ulto4b(ival, bl_ptr->max_unmap_lba_cnt); in ctl_inquiry_evpd_block_limits()
9940 val = dnvlist_get_string(lun->be_lun->options, in ctl_inquiry_evpd_block_limits()
9944 scsi_ulto4b(ival, bl_ptr->max_unmap_blk_cnt); in ctl_inquiry_evpd_block_limits()
9945 if (lun->be_lun->ublockexp != 0) { in ctl_inquiry_evpd_block_limits()
9946 scsi_ulto4b((1 << lun->be_lun->ublockexp), in ctl_inquiry_evpd_block_limits()
9947 bl_ptr->opt_unmap_grain); in ctl_inquiry_evpd_block_limits()
9948 scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff, in ctl_inquiry_evpd_block_limits()
9949 bl_ptr->unmap_grain_align); in ctl_inquiry_evpd_block_limits()
9952 scsi_ulto4b(lun->be_lun->atomicblock, in ctl_inquiry_evpd_block_limits()
9953 bl_ptr->max_atomic_transfer_length); in ctl_inquiry_evpd_block_limits()
9954 scsi_ulto4b(0, bl_ptr->atomic_alignment); in ctl_inquiry_evpd_block_limits()
9955 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); in ctl_inquiry_evpd_block_limits()
9956 scsi_ulto4b(0, bl_ptr->max_atomic_transfer_length_with_atomic_boundary); in ctl_inquiry_evpd_block_limits()
9957 scsi_ulto4b(0, bl_ptr->max_atomic_boundary_size); in ctl_inquiry_evpd_block_limits()
9959 val = dnvlist_get_string(lun->be_lun->options, in ctl_inquiry_evpd_block_limits()
9963 scsi_u64to8b(ival, bl_ptr->max_write_same_length); in ctl_inquiry_evpd_block_limits()
9964 if (lun->be_lun->maxlba + 1 > ival) in ctl_inquiry_evpd_block_limits()
9965 bl_ptr->flags |= SVPD_BL_WSNZ; in ctl_inquiry_evpd_block_limits()
9969 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_inquiry_evpd_block_limits()
9970 ctsio->be_move_done = ctl_config_move_done; in ctl_inquiry_evpd_block_limits()
9983 ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); in ctl_inquiry_evpd_bdc()
9984 bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; in ctl_inquiry_evpd_bdc()
9985 ctsio->kern_sg_entries = 0; in ctl_inquiry_evpd_bdc()
9986 ctsio->kern_rel_offset = 0; in ctl_inquiry_evpd_bdc()
9987 ctsio->kern_data_len = min(sizeof(*bdc_ptr), alloc_len); in ctl_inquiry_evpd_bdc()
9988 ctsio->kern_total_len = ctsio->kern_data_len; in ctl_inquiry_evpd_bdc()
9996 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | in ctl_inquiry_evpd_bdc()
9997 lun->be_lun->lun_type; in ctl_inquiry_evpd_bdc()
9999 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; in ctl_inquiry_evpd_bdc()
10000 bdc_ptr->page_code = SVPD_BDC; in ctl_inquiry_evpd_bdc()
10001 scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); in ctl_inquiry_evpd_bdc()
10003 (value = dnvlist_get_string(lun->be_lun->options, "rpm", NULL)) != NULL) in ctl_inquiry_evpd_bdc()
10007 scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); in ctl_inquiry_evpd_bdc()
10009 (value = dnvlist_get_string(lun->be_lun->options, "formfactor", NULL)) != NULL) in ctl_inquiry_evpd_bdc()
10013 bdc_ptr->wab_wac_ff = (i & 0x0f); in ctl_inquiry_evpd_bdc()
10014 bdc_ptr->flags = SVPD_RBWZ | SVPD_FUAB | SVPD_VBULS; in ctl_inquiry_evpd_bdc()
10017 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_inquiry_evpd_bdc()
10018 ctsio->be_move_done = ctl_config_move_done; in ctl_inquiry_evpd_bdc()
10030 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); in ctl_inquiry_evpd_lbp()
10031 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; in ctl_inquiry_evpd_lbp()
10032 ctsio->kern_sg_entries = 0; in ctl_inquiry_evpd_lbp()
10033 ctsio->kern_rel_offset = 0; in ctl_inquiry_evpd_lbp()
10034 ctsio->kern_data_len = min(sizeof(*lbp_ptr), alloc_len); in ctl_inquiry_evpd_lbp()
10035 ctsio->kern_total_len = ctsio->kern_data_len; in ctl_inquiry_evpd_lbp()
10043 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | in ctl_inquiry_evpd_lbp()
10044 lun->be_lun->lun_type; in ctl_inquiry_evpd_lbp()
10046 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; in ctl_inquiry_evpd_lbp()
10048 lbp_ptr->page_code = SVPD_LBP; in ctl_inquiry_evpd_lbp()
10049 scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); in ctl_inquiry_evpd_lbp()
10050 lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; in ctl_inquiry_evpd_lbp()
10051 if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { in ctl_inquiry_evpd_lbp()
10052 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | in ctl_inquiry_evpd_lbp()
10054 value = dnvlist_get_string(lun->be_lun->options, in ctl_inquiry_evpd_lbp()
10058 lbp_ptr->prov_type = SVPD_LBP_RESOURCE; in ctl_inquiry_evpd_lbp()
10060 lbp_ptr->prov_type = SVPD_LBP_THIN; in ctl_inquiry_evpd_lbp()
10062 lbp_ptr->prov_type = SVPD_LBP_THIN; in ctl_inquiry_evpd_lbp()
10066 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_inquiry_evpd_lbp()
10067 ctsio->be_move_done = ctl_config_move_done; in ctl_inquiry_evpd_lbp()
10082 cdb = (struct scsi_inquiry *)ctsio->cdb; in ctl_inquiry_evpd()
10083 alloc_len = scsi_2btoul(cdb->length); in ctl_inquiry_evpd()
10085 switch (cdb->page_code) { in ctl_inquiry_evpd()
10111 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) in ctl_inquiry_evpd()
10116 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) in ctl_inquiry_evpd()
10121 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) in ctl_inquiry_evpd()
10156 port_type = port->port_type; in ctl_inquiry_std()
10160 cdb = (struct scsi_inquiry *)ctsio->cdb; in ctl_inquiry_std()
10161 alloc_len = scsi_2btoul(cdb->length); in ctl_inquiry_std()
10169 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); in ctl_inquiry_std()
10170 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; in ctl_inquiry_std()
10171 ctsio->kern_sg_entries = 0; in ctl_inquiry_std()
10172 ctsio->kern_rel_offset = 0; in ctl_inquiry_std()
10173 ctsio->kern_data_len = min(data_len, alloc_len); in ctl_inquiry_std()
10174 ctsio->kern_total_len = ctsio->kern_data_len; in ctl_inquiry_std()
10177 if ((lun->flags & CTL_LUN_PRIMARY_SC) || in ctl_inquiry_std()
10178 softc->ha_link >= CTL_HA_LINK_UNKNOWN) { in ctl_inquiry_std()
10179 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | in ctl_inquiry_std()
10180 lun->be_lun->lun_type; in ctl_inquiry_std()
10182 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | in ctl_inquiry_std()
10183 lun->be_lun->lun_type; in ctl_inquiry_std()
10185 if (lun->flags & CTL_LUN_REMOVABLE) in ctl_inquiry_std()
10186 inq_ptr->dev_qual2 |= SID_RMB; in ctl_inquiry_std()
10188 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; in ctl_inquiry_std()
10191 inq_ptr->version = SCSI_REV_SPC5; in ctl_inquiry_std()
10194 * According to SAM-3, even if a device only supports a single in ctl_inquiry_std()
10202 * standard INQUIRY data (see SPC-2) when any logical unit number in ctl_inquiry_std()
10203 * format described in this standard is used. Non-hierarchical in ctl_inquiry_std()
10208 * The response format is 2, per SPC-3. in ctl_inquiry_std()
10210 inq_ptr->response_format = SID_HiSup | 2; in ctl_inquiry_std()
10212 inq_ptr->additional_length = data_len - in ctl_inquiry_std()
10215 inq_ptr->additional_length)); in ctl_inquiry_std()
10217 inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; in ctl_inquiry_std()
10219 inq_ptr->spc2_flags = SPC2_SID_ADDR16; in ctl_inquiry_std()
10220 inq_ptr->spc2_flags |= SPC2_SID_MultiP; in ctl_inquiry_std()
10221 inq_ptr->flags = SID_CmdQue; in ctl_inquiry_std()
10223 inq_ptr->flags |= SID_WBus16 | SID_Sync; in ctl_inquiry_std()
10226 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. in ctl_inquiry_std()
10230 if (lun == NULL || (val = dnvlist_get_string(lun->be_lun->options, in ctl_inquiry_std()
10232 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); in ctl_inquiry_std()
10234 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); in ctl_inquiry_std()
10235 strncpy(inq_ptr->vendor, val, in ctl_inquiry_std()
10236 min(sizeof(inq_ptr->vendor), strlen(val))); in ctl_inquiry_std()
10239 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, in ctl_inquiry_std()
10240 sizeof(inq_ptr->product)); in ctl_inquiry_std()
10241 } else if ((val = dnvlist_get_string(lun->be_lun->options, "product", in ctl_inquiry_std()
10243 switch (lun->be_lun->lun_type) { in ctl_inquiry_std()
10245 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, in ctl_inquiry_std()
10246 sizeof(inq_ptr->product)); in ctl_inquiry_std()
10249 strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, in ctl_inquiry_std()
10250 sizeof(inq_ptr->product)); in ctl_inquiry_std()
10253 strncpy(inq_ptr->product, CTL_CDROM_PRODUCT, in ctl_inquiry_std()
10254 sizeof(inq_ptr->product)); in ctl_inquiry_std()
10257 strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, in ctl_inquiry_std()
10258 sizeof(inq_ptr->product)); in ctl_inquiry_std()
10262 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); in ctl_inquiry_std()
10263 strncpy(inq_ptr->product, val, in ctl_inquiry_std()
10264 min(sizeof(inq_ptr->product), strlen(val))); in ctl_inquiry_std()
10271 if (lun == NULL || (val = dnvlist_get_string(lun->be_lun->options, in ctl_inquiry_std()
10273 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); in ctl_inquiry_std()
10275 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); in ctl_inquiry_std()
10276 strncpy(inq_ptr->revision, val, in ctl_inquiry_std()
10277 min(sizeof(inq_ptr->revision), strlen(val))); in ctl_inquiry_std()
10287 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | in ctl_inquiry_std()
10290 /* SAM-6 (no version claimed) */ in ctl_inquiry_std()
10291 scsi_ulto2b(0x00C0, inq_ptr->version1); in ctl_inquiry_std()
10292 /* SPC-5 (no version claimed) */ in ctl_inquiry_std()
10293 scsi_ulto2b(0x05C0, inq_ptr->version2); in ctl_inquiry_std()
10295 /* FCP-2 ANSI INCITS.350:2003 */ in ctl_inquiry_std()
10296 scsi_ulto2b(0x0917, inq_ptr->version3); in ctl_inquiry_std()
10298 /* SPI-4 ANSI INCITS.362:200x */ in ctl_inquiry_std()
10299 scsi_ulto2b(0x0B56, inq_ptr->version3); in ctl_inquiry_std()
10302 scsi_ulto2b(0x0960, inq_ptr->version3); in ctl_inquiry_std()
10305 scsi_ulto2b(0x0BE0, inq_ptr->version3); in ctl_inquiry_std()
10307 /* USB Mass Storage Class Bulk-Only Transport, Revision 1.0 */ in ctl_inquiry_std()
10308 scsi_ulto2b(0x1730, inq_ptr->version3); in ctl_inquiry_std()
10312 /* SBC-4 (no version claimed) */ in ctl_inquiry_std()
10313 scsi_ulto2b(0x0600, inq_ptr->version4); in ctl_inquiry_std()
10315 switch (lun->be_lun->lun_type) { in ctl_inquiry_std()
10317 /* SBC-4 (no version claimed) */ in ctl_inquiry_std()
10318 scsi_ulto2b(0x0600, inq_ptr->version4); in ctl_inquiry_std()
10323 /* MMC-6 (no version claimed) */ in ctl_inquiry_std()
10324 scsi_ulto2b(0x04E0, inq_ptr->version4); in ctl_inquiry_std()
10332 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_inquiry_std()
10333 ctsio->be_move_done = ctl_config_move_done; in ctl_inquiry_std()
10346 cdb = (struct scsi_inquiry *)ctsio->cdb; in ctl_inquiry()
10347 if (cdb->byte2 & SI_EVPD) in ctl_inquiry()
10349 else if (cdb->page_code == 0) in ctl_inquiry()
10375 cdb = (struct scsi_get_config *)ctsio->cdb; in ctl_get_config()
10376 rt = (cdb->rt & SGC_RT_MASK); in ctl_get_config()
10377 starting = scsi_2btoul(cdb->starting_feature); in ctl_get_config()
10378 alloc_len = scsi_2btoul(cdb->length); in ctl_get_config()
10393 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); in ctl_get_config()
10394 ctsio->kern_sg_entries = 0; in ctl_get_config()
10395 ctsio->kern_rel_offset = 0; in ctl_get_config()
10397 hdr = (struct scsi_get_config_header *)ctsio->kern_data_ptr; in ctl_get_config()
10398 if (lun->flags & CTL_LUN_NO_MEDIA) in ctl_get_config()
10399 scsi_ulto2b(0x0000, hdr->current_profile); in ctl_get_config()
10401 scsi_ulto2b(0x0010, hdr->current_profile); in ctl_get_config()
10430 scsi_ulto2b(0x0000, feature->feature_code); in ctl_get_config()
10431 feature->flags = SGC_F_PERSISTENT | SGC_F_CURRENT; in ctl_get_config()
10432 feature->add_length = 8; in ctl_get_config()
10433 scsi_ulto2b(0x0008, &feature->feature_data[0]); /* CD-ROM */ in ctl_get_config()
10434 feature->feature_data[2] = 0x00; in ctl_get_config()
10435 scsi_ulto2b(0x0010, &feature->feature_data[4]); /* DVD-ROM */ in ctl_get_config()
10436 feature->feature_data[6] = 0x01; in ctl_get_config()
10438 &feature->feature_data[feature->add_length]; in ctl_get_config()
10441 scsi_ulto2b(0x0001, feature->feature_code); in ctl_get_config()
10442 feature->flags = 0x08 | SGC_F_PERSISTENT | SGC_F_CURRENT; in ctl_get_config()
10443 feature->add_length = 8; in ctl_get_config()
10444 scsi_ulto4b(0x00000000, &feature->feature_data[0]); in ctl_get_config()
10445 feature->feature_data[4] = 0x03; in ctl_get_config()
10447 &feature->feature_data[feature->add_length]; in ctl_get_config()
10450 scsi_ulto2b(0x0002, feature->feature_code); in ctl_get_config()
10451 feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; in ctl_get_config()
10452 feature->add_length = 4; in ctl_get_config()
10453 feature->feature_data[0] = 0x02; in ctl_get_config()
10455 &feature->feature_data[feature->add_length]; in ctl_get_config()
10458 scsi_ulto2b(0x0003, feature->feature_code); in ctl_get_config()
10459 feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; in ctl_get_config()
10460 feature->add_length = 4; in ctl_get_config()
10461 feature->feature_data[0] = 0x39; in ctl_get_config()
10463 &feature->feature_data[feature->add_length]; in ctl_get_config()
10465 if (rt == SGC_RT_CURRENT && (lun->flags & CTL_LUN_NO_MEDIA)) in ctl_get_config()
10469 scsi_ulto2b(0x0010, feature->feature_code); in ctl_get_config()
10470 feature->flags = 0x00; in ctl_get_config()
10471 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) in ctl_get_config()
10472 feature->flags |= SGC_F_CURRENT; in ctl_get_config()
10473 feature->add_length = 8; in ctl_get_config()
10474 scsi_ulto4b(lun->be_lun->blocksize, &feature->feature_data[0]); in ctl_get_config()
10475 scsi_ulto2b(1, &feature->feature_data[4]); in ctl_get_config()
10476 feature->feature_data[6] = 0x00; in ctl_get_config()
10478 &feature->feature_data[feature->add_length]; in ctl_get_config()
10480 f1d: /* Multi-Read */ in ctl_get_config()
10481 scsi_ulto2b(0x001D, feature->feature_code); in ctl_get_config()
10482 feature->flags = 0x00; in ctl_get_config()
10483 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) in ctl_get_config()
10484 feature->flags |= SGC_F_CURRENT; in ctl_get_config()
10485 feature->add_length = 0; in ctl_get_config()
10487 &feature->feature_data[feature->add_length]; in ctl_get_config()
10490 scsi_ulto2b(0x001E, feature->feature_code); in ctl_get_config()
10491 feature->flags = 0x00; in ctl_get_config()
10492 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) in ctl_get_config()
10493 feature->flags |= SGC_F_CURRENT; in ctl_get_config()
10494 feature->add_length = 4; in ctl_get_config()
10495 feature->feature_data[0] = 0x00; in ctl_get_config()
10497 &feature->feature_data[feature->add_length]; in ctl_get_config()
10500 scsi_ulto2b(0x001F, feature->feature_code); in ctl_get_config()
10501 feature->flags = 0x08; in ctl_get_config()
10502 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) in ctl_get_config()
10503 feature->flags |= SGC_F_CURRENT; in ctl_get_config()
10504 feature->add_length = 4; in ctl_get_config()
10505 feature->feature_data[0] = 0x01; in ctl_get_config()
10506 feature->feature_data[2] = 0x03; in ctl_get_config()
10508 &feature->feature_data[feature->add_length]; in ctl_get_config()
10511 scsi_ulto2b(0x002A, feature->feature_code); in ctl_get_config()
10512 feature->flags = 0x04; in ctl_get_config()
10513 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) in ctl_get_config()
10514 feature->flags |= SGC_F_CURRENT; in ctl_get_config()
10515 feature->add_length = 4; in ctl_get_config()
10516 feature->feature_data[0] = 0x00; in ctl_get_config()
10517 feature->feature_data[1] = 0x00; in ctl_get_config()
10519 &feature->feature_data[feature->add_length]; in ctl_get_config()
10522 scsi_ulto2b(0x002B, feature->feature_code); in ctl_get_config()
10523 feature->flags = 0x00; in ctl_get_config()
10524 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) in ctl_get_config()
10525 feature->flags |= SGC_F_CURRENT; in ctl_get_config()
10526 feature->add_length = 4; in ctl_get_config()
10527 feature->feature_data[0] = 0x00; in ctl_get_config()
10529 &feature->feature_data[feature->add_length]; in ctl_get_config()
10532 scsi_ulto2b(0x003A, feature->feature_code); in ctl_get_config()
10533 feature->flags = 0x00; in ctl_get_config()
10534 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) in ctl_get_config()
10535 feature->flags |= SGC_F_CURRENT; in ctl_get_config()
10536 feature->add_length = 4; in ctl_get_config()
10537 feature->feature_data[0] = 0x00; in ctl_get_config()
10538 feature->feature_data[1] = 0x00; in ctl_get_config()
10540 &feature->feature_data[feature->add_length]; in ctl_get_config()
10543 scsi_ulto2b(0x003B, feature->feature_code); in ctl_get_config()
10544 feature->flags = 0x00; in ctl_get_config()
10545 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) in ctl_get_config()
10546 feature->flags |= SGC_F_CURRENT; in ctl_get_config()
10547 feature->add_length = 4; in ctl_get_config()
10548 feature->feature_data[0] = 0x00; in ctl_get_config()
10550 &feature->feature_data[feature->add_length]; in ctl_get_config()
10553 data_len = (uint8_t *)feature - (uint8_t *)hdr; in ctl_get_config()
10556 if (scsi_2btoul(feature->feature_code) == starting) in ctl_get_config()
10558 &feature->feature_data[feature->add_length]; in ctl_get_config()
10559 data_len = (uint8_t *)feature - (uint8_t *)hdr; in ctl_get_config()
10561 scsi_ulto4b(data_len - 4, hdr->data_length); in ctl_get_config()
10562 ctsio->kern_data_len = min(data_len, alloc_len); in ctl_get_config()
10563 ctsio->kern_total_len = ctsio->kern_data_len; in ctl_get_config()
10566 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_get_config()
10567 ctsio->be_move_done = ctl_config_move_done; in ctl_get_config()
10579 cdb = (struct scsi_get_event_status *)ctsio->cdb; in ctl_get_event_status()
10580 if ((cdb->byte2 & SGESN_POLLED) == 0) { in ctl_get_event_status()
10586 alloc_len = scsi_2btoul(cdb->length); in ctl_get_event_status()
10589 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); in ctl_get_event_status()
10590 ctsio->kern_sg_entries = 0; in ctl_get_event_status()
10591 ctsio->kern_rel_offset = 0; in ctl_get_event_status()
10592 ctsio->kern_data_len = min(data_len, alloc_len); in ctl_get_event_status()
10593 ctsio->kern_total_len = ctsio->kern_data_len; in ctl_get_event_status()
10595 hdr = (struct scsi_get_event_status_header *)ctsio->kern_data_ptr; in ctl_get_event_status()
10596 scsi_ulto2b(0, hdr->descr_length); in ctl_get_event_status()
10597 hdr->nea_class = SGESN_NEA; in ctl_get_event_status()
10598 hdr->supported_class = 0; in ctl_get_event_status()
10601 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_get_event_status()
10602 ctsio->be_move_done = ctl_config_move_done; in ctl_get_event_status()
10614 cdb = (struct scsi_mechanism_status *)ctsio->cdb; in ctl_mechanism_status()
10615 alloc_len = scsi_2btoul(cdb->length); in ctl_mechanism_status()
10618 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); in ctl_mechanism_status()
10619 ctsio->kern_sg_entries = 0; in ctl_mechanism_status()
10620 ctsio->kern_rel_offset = 0; in ctl_mechanism_status()
10621 ctsio->kern_data_len = min(data_len, alloc_len); in ctl_mechanism_status()
10622 ctsio->kern_total_len = ctsio->kern_data_len; in ctl_mechanism_status()
10624 hdr = (struct scsi_mechanism_status_header *)ctsio->kern_data_ptr; in ctl_mechanism_status()
10625 hdr->state1 = 0x00; in ctl_mechanism_status()
10626 hdr->state2 = 0xe0; in ctl_mechanism_status()
10627 scsi_ulto3b(0, hdr->lba); in ctl_mechanism_status()
10628 hdr->slots_num = 0; in ctl_mechanism_status()
10629 scsi_ulto2b(0, hdr->slots_length); in ctl_mechanism_status()
10632 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_mechanism_status()
10633 ctsio->be_move_done = ctl_config_move_done; in ctl_mechanism_status()
10659 cdb = (struct scsi_read_toc *)ctsio->cdb; in ctl_read_toc()
10660 msf = (cdb->byte2 & CD_MSF) != 0; in ctl_read_toc()
10661 format = cdb->format; in ctl_read_toc()
10662 alloc_len = scsi_2btoul(cdb->data_len); in ctl_read_toc()
10669 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); in ctl_read_toc()
10670 ctsio->kern_sg_entries = 0; in ctl_read_toc()
10671 ctsio->kern_rel_offset = 0; in ctl_read_toc()
10672 ctsio->kern_data_len = min(data_len, alloc_len); in ctl_read_toc()
10673 ctsio->kern_total_len = ctsio->kern_data_len; in ctl_read_toc()
10675 hdr = (struct scsi_read_toc_hdr *)ctsio->kern_data_ptr; in ctl_read_toc()
10677 scsi_ulto2b(0x12, hdr->data_length); in ctl_read_toc()
10678 hdr->first = 1; in ctl_read_toc()
10679 hdr->last = 1; in ctl_read_toc()
10681 descr->addr_ctl = 0x14; in ctl_read_toc()
10682 descr->track_number = 1; in ctl_read_toc()
10684 ctl_ultomsf(0, descr->track_start); in ctl_read_toc()
10686 scsi_ulto4b(0, descr->track_start); in ctl_read_toc()
10688 descr->addr_ctl = 0x14; in ctl_read_toc()
10689 descr->track_number = 0xaa; in ctl_read_toc()
10691 ctl_ultomsf(lun->be_lun->maxlba+1, descr->track_start); in ctl_read_toc()
10693 scsi_ulto4b(lun->be_lun->maxlba+1, descr->track_start); in ctl_read_toc()
10695 scsi_ulto2b(0x0a, hdr->data_length); in ctl_read_toc()
10696 hdr->first = 1; in ctl_read_toc()
10697 hdr->last = 1; in ctl_read_toc()
10699 descr->addr_ctl = 0x14; in ctl_read_toc()
10700 descr->track_number = 1; in ctl_read_toc()
10702 ctl_ultomsf(0, descr->track_start); in ctl_read_toc()
10704 scsi_ulto4b(0, descr->track_start); in ctl_read_toc()
10708 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_read_toc()
10709 ctsio->be_move_done = ctl_config_move_done; in ctl_read_toc()
10722 switch (ctnio->cmd.opc) { in ctl_nvme_get_lba_len()
10729 *lba = (uint64_t)le32toh(ctnio->cmd.cdw11) << 32 | in ctl_nvme_get_lba_len()
10730 le32toh(ctnio->cmd.cdw10); in ctl_nvme_get_lba_len()
10731 *len = (le32toh(ctnio->cmd.cdw12) & 0xffff) + 1; in ctl_nvme_get_lba_len()
10743 return ((le32toh(ctnio->cmd.cdw12) & (1U << 30)) != 0); in ctl_nvme_fua()
10757 MPASS(ctnio->cmd.opc == NVME_OPC_IDENTIFY); in ctl_nvme_identify()
10765 ctnio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); in ctl_nvme_identify()
10766 ctnio->kern_data_len = len; in ctl_nvme_identify()
10767 ctnio->kern_total_len = len; in ctl_nvme_identify()
10768 ctnio->kern_rel_offset = 0; in ctl_nvme_identify()
10769 ctnio->kern_sg_entries = 0; in ctl_nvme_identify()
10772 ctnio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_nvme_identify()
10773 ctnio->be_move_done = ctl_config_move_done; in ctl_nvme_identify()
10779 cns = le32toh(ctnio->cmd.cdw10) & 0xff; in ctl_nvme_identify()
10782 memset(ctnio->kern_data_ptr, 0, len); in ctl_nvme_identify()
10792 retval = lun->backend->config_read((union ctl_io *)ctnio); in ctl_nvme_identify()
10805 MPASS(ctnio->cmd.opc == NVME_OPC_FLUSH); in ctl_nvme_flush()
10811 retval = lun->backend->config_write((union ctl_io *)ctnio); in ctl_nvme_flush()
10827 ctnio->cmd.opc)); in ctl_nvme_read_write()
10830 MPASS(ctnio->cmd.opc == NVME_OPC_WRITE || in ctl_nvme_read_write()
10831 ctnio->cmd.opc == NVME_OPC_READ); in ctl_nvme_read_write()
10834 isread = ctnio->cmd.opc == NVME_OPC_READ; in ctl_nvme_read_write()
10839 * check is to catch wrap-around problems. If the lba + num blocks in ctl_nvme_read_write()
10843 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) in ctl_nvme_read_write()
10864 &ctnio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; in ctl_nvme_read_write()
10865 lbalen->lba = lba; in ctl_nvme_read_write()
10866 lbalen->len = num_blocks; in ctl_nvme_read_write()
10867 lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; in ctl_nvme_read_write()
10869 ctnio->kern_total_len = num_blocks * lun->be_lun->blocksize; in ctl_nvme_read_write()
10870 ctnio->kern_rel_offset = 0; in ctl_nvme_read_write()
10874 retval = lun->backend->data_submit((union ctl_io *)ctnio); in ctl_nvme_read_write()
10890 MPASS(ctnio->cmd.opc == NVME_OPC_WRITE_UNCORRECTABLE); in ctl_nvme_write_uncorrectable()
10896 * check is to catch wrap-around problems. If the lba + num blocks in ctl_nvme_write_uncorrectable()
10900 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) in ctl_nvme_write_uncorrectable()
10908 &ctnio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; in ctl_nvme_write_uncorrectable()
10909 lbalen->lba = lba; in ctl_nvme_write_uncorrectable()
10910 lbalen->len = num_blocks; in ctl_nvme_write_uncorrectable()
10911 lbalen->flags = 0; in ctl_nvme_write_uncorrectable()
10912 retval = lun->backend->config_write((union ctl_io *)ctnio); in ctl_nvme_write_uncorrectable()
10930 MPASS(ctnio->cmd.opc == NVME_OPC_COMPARE); in ctl_nvme_compare()
10939 * check is to catch wrap-around problems. If the lba + num blocks in ctl_nvme_compare()
10943 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) in ctl_nvme_compare()
10951 &ctnio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; in ctl_nvme_compare()
10952 lbalen->lba = lba; in ctl_nvme_compare()
10953 lbalen->len = num_blocks; in ctl_nvme_compare()
10954 lbalen->flags = CTL_LLF_COMPARE | flags; in ctl_nvme_compare()
10955 ctnio->kern_total_len = num_blocks * lun->be_lun->blocksize; in ctl_nvme_compare()
10956 ctnio->kern_rel_offset = 0; in ctl_nvme_compare()
10959 retval = lun->backend->data_submit((union ctl_io *)ctnio); in ctl_nvme_compare()
10975 MPASS(ctnio->cmd.opc == NVME_OPC_WRITE_ZEROES); in ctl_nvme_write_zeroes()
10981 * check is to catch wrap-around problems. If the lba + num blocks in ctl_nvme_write_zeroes()
10985 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) in ctl_nvme_write_zeroes()
10993 &ctnio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; in ctl_nvme_write_zeroes()
10994 lbalen->lba = lba; in ctl_nvme_write_zeroes()
10995 lbalen->len = num_blocks; in ctl_nvme_write_zeroes()
10996 lbalen->flags = 0; in ctl_nvme_write_zeroes()
10997 retval = lun->backend->config_write((union ctl_io *)ctnio); in ctl_nvme_write_zeroes()
11015 MPASS(ctnio->cmd.opc == NVME_OPC_DATASET_MANAGEMENT); in ctl_nvme_dataset_management()
11017 ranges = le32toh(ctnio->cmd.cdw10) & 0xff; in ctl_nvme_dataset_management()
11024 if ((ctnio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { in ctl_nvme_dataset_management()
11025 ctnio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); in ctl_nvme_dataset_management()
11026 ctnio->kern_data_len = len; in ctl_nvme_dataset_management()
11027 ctnio->kern_total_len = len; in ctl_nvme_dataset_management()
11028 ctnio->kern_rel_offset = 0; in ctl_nvme_dataset_management()
11029 ctnio->kern_sg_entries = 0; in ctl_nvme_dataset_management()
11030 ctnio->io_hdr.flags |= CTL_FLAG_ALLOCATED; in ctl_nvme_dataset_management()
11031 ctnio->be_move_done = ctl_config_move_done; in ctl_nvme_dataset_management()
11040 if (ctnio->kern_sg_entries > 0 || in ctl_nvme_dataset_management()
11041 ctnio->kern_total_len - ctnio->kern_data_resid != len) in ctl_nvme_dataset_management()
11047 r = (struct nvme_dsm_range *)ctnio->kern_data_ptr; in ctl_nvme_dataset_management()
11051 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) in ctl_nvme_dataset_management()
11060 retval = lun->backend->config_write((union ctl_io *)ctnio); in ctl_nvme_dataset_management()
11077 MPASS(ctnio->cmd.opc == NVME_OPC_VERIFY); in ctl_nvme_verify()
11086 * check is to catch wrap-around problems. If the lba + num blocks in ctl_nvme_verify()
11090 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) in ctl_nvme_verify()
11098 &ctnio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; in ctl_nvme_verify()
11099 lbalen->lba = lba; in ctl_nvme_verify()
11100 lbalen->len = num_blocks; in ctl_nvme_verify()
11101 lbalen->flags = CTL_LLF_VERIFY | flags; in ctl_nvme_verify()
11102 ctnio->kern_total_len = 0; in ctl_nvme_verify()
11103 ctnio->kern_rel_offset = 0; in ctl_nvme_verify()
11106 retval = lun->backend->data_submit((union ctl_io *)ctnio); in ctl_nvme_verify()
11115 switch (ctnio->io_hdr.io_type) { in ctl_nvme_get_cmd_entry()
11117 entry = &nvme_nvm_cmd_table[ctnio->cmd.opc]; in ctl_nvme_get_cmd_entry()
11120 entry = &nvme_admin_cmd_table[ctnio->cmd.opc]; in ctl_nvme_get_cmd_entry()
11134 if (entry->execute == NULL) { in ctl_nvme_validate_command()
11141 switch (NVMEV(NVME_CMD_FUSE, ctnio->cmd.fuse)) { in ctl_nvme_validate_command()
11145 if (ctnio->io_hdr.io_type != CTL_IO_NVME || in ctl_nvme_validate_command()
11146 ctnio->cmd.opc != NVME_OPC_COMPARE) { in ctl_nvme_validate_command()
11153 if (ctnio->io_hdr.io_type != CTL_IO_NVME || in ctl_nvme_validate_command()
11154 ctnio->cmd.opc != NVME_OPC_COMPARE) { in ctl_nvme_validate_command()
11179 mtx_assert(&lun->lun_lock, MA_OWNED); in ctl_nvmeio_lun_check()
11181 if ((entry->flags & CTL_CMD_FLAG_OK_ON_NO_MEDIA) == 0) { in ctl_nvmeio_lun_check()
11182 if ((lun->flags & (CTL_LUN_EJECTED | CTL_LUN_NO_MEDIA | in ctl_nvmeio_lun_check()
11195 * - pending_io is generally either incoming, or on the blocked queue
11196 * - starting I/O is the I/O we want to start the check with.
11206 mtx_assert(&lun->lun_lock, MA_OWNED); in ctl_nvme_check_ooa()
11215 if (__predict_false(pending_io->io_hdr.flags & CTL_FLAG_ABORT)) in ctl_nvme_check_ooa()
11226 * - the first half of a fused command is not enqueued to rtr in ctl_nvme_check_ooa()
11228 * - the second half of a fused command blocks on the first in ctl_nvme_check_ooa()
11230 * - subsequent commands block on the second half of the in ctl_nvme_check_ooa()
11239 NVMEV(NVME_CMD_FUSE, ooa_io->nvmeio.cmd.fuse) == NVME_FUSE_FIRST) { in ctl_nvme_check_ooa()
11244 if (NVMEV(NVME_CMD_FUSE, pending_io->nvmeio.cmd.fuse) == in ctl_nvme_check_ooa()
11254 ctl_nvme_set_missing_fused_command(&ooa_io->nvmeio); in ctl_nvme_check_ooa()
11257 switch (NVMEV(NVME_CMD_FUSE, pending_io->nvmeio.cmd.fuse)) { in ctl_nvme_check_ooa()
11263 ctl_nvme_set_missing_fused_command(&pending_io->nvmeio); in ctl_nvme_check_ooa()
11274 ooa_io = (union ctl_io *)LIST_NEXT(&ooa_io->io_hdr, ooa_links)) { in ctl_nvme_check_ooa()
11275 if (NVMEV(NVME_CMD_FUSE, ooa_io->nvmeio.cmd.fuse) == in ctl_nvme_check_ooa()
11296 targ_lun = ctnio->io_hdr.nexus.targ_mapped_lun; in ctl_nvmeio_precheck()
11298 lun = softc->ctl_luns[targ_lun]; in ctl_nvmeio_precheck()
11305 mtx_lock(&lun->lun_lock); in ctl_nvmeio_precheck()
11306 if (lun->flags & CTL_LUN_DISABLED) { in ctl_nvmeio_precheck()
11307 mtx_unlock(&lun->lun_lock); in ctl_nvmeio_precheck()
11313 CTL_BACKEND_LUN(ctnio) = lun->be_lun; in ctl_nvmeio_precheck()
11320 if (LIST_EMPTY(&lun->ooa_queue)) in ctl_nvmeio_precheck()
11321 lun->idle_time += getsbinuptime() - lun->last_busy; in ctl_nvmeio_precheck()
11323 LIST_INSERT_HEAD(&lun->ooa_queue, &ctnio->io_hdr, ooa_links); in ctl_nvmeio_precheck()
11330 mtx_unlock(&lun->lun_lock); in ctl_nvmeio_precheck()
11334 ctnio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; in ctl_nvmeio_precheck()
11335 ctnio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; in ctl_nvmeio_precheck()
11339 if (entry->flags & CTL_CMD_FLAG_OK_ON_NO_LUN) { in ctl_nvmeio_precheck()
11340 ctnio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; in ctl_nvmeio_precheck()
11353 if (lun->be_lun->lun_type != T_DIRECT) { in ctl_nvmeio_precheck()
11354 mtx_unlock(&lun->lun_lock); in ctl_nvmeio_precheck()
11362 mtx_unlock(&lun->lun_lock); in ctl_nvmeio_precheck()
11367 bio = (union ctl_io *)LIST_NEXT(&ctnio->io_hdr, ooa_links); in ctl_nvmeio_precheck()
11371 ctnio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; in ctl_nvmeio_precheck()
11372 mtx_unlock(&lun->lun_lock); in ctl_nvmeio_precheck()
11377 ctnio->io_hdr.blocker = bio; in ctl_nvmeio_precheck()
11378 TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctnio->io_hdr, in ctl_nvmeio_precheck()
11382 bio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; in ctl_nvmeio_precheck()
11383 mtx_unlock(&lun->lun_lock); in ctl_nvmeio_precheck()
11387 mtx_unlock(&lun->lun_lock); in ctl_nvmeio_precheck()
11390 ctnio->io_hdr.blocker = bio; in ctl_nvmeio_precheck()
11391 TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctnio->io_hdr, in ctl_nvmeio_precheck()
11393 mtx_unlock(&lun->lun_lock); in ctl_nvmeio_precheck()
11409 ctnio->io_hdr.io_type == CTL_IO_NVME ? "nvm" : "admin", in ctl_nvmeio()
11410 ctnio->cmd.opc)); in ctl_nvmeio()
11419 if (ctnio->io_hdr.flags & CTL_FLAG_ABORT) { in ctl_nvmeio()
11428 retval = entry->execute(ctnio); in ctl_nvmeio()
11442 switch (io->scsiio.cdb[0]) { in ctl_get_lba_len()
11446 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; in ctl_get_lba_len()
11448 *lba = scsi_8btou64(cdb->addr); in ctl_get_lba_len()
11449 *len = cdb->length; in ctl_get_lba_len()
11456 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; in ctl_get_lba_len()
11458 *lba = scsi_3btoul(cdb->addr); in ctl_get_lba_len()
11461 *len = cdb->length; in ctl_get_lba_len()
11468 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; in ctl_get_lba_len()
11470 *lba = scsi_4btoul(cdb->addr); in ctl_get_lba_len()
11471 *len = scsi_2btoul(cdb->length); in ctl_get_lba_len()
11477 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; in ctl_get_lba_len()
11479 *lba = scsi_4btoul(cdb->addr); in ctl_get_lba_len()
11480 *len = scsi_2btoul(cdb->length); in ctl_get_lba_len()
11487 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; in ctl_get_lba_len()
11489 *lba = scsi_4btoul(cdb->addr); in ctl_get_lba_len()
11490 *len = scsi_4btoul(cdb->length); in ctl_get_lba_len()
11496 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; in ctl_get_lba_len()
11498 *lba = scsi_4btoul(cdb->addr); in ctl_get_lba_len()
11499 *len = scsi_4btoul(cdb->length); in ctl_get_lba_len()
11506 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; in ctl_get_lba_len()
11508 *lba = scsi_8btou64(cdb->addr); in ctl_get_lba_len()
11509 *len = scsi_4btoul(cdb->length); in ctl_get_lba_len()
11515 cdb = (struct scsi_write_atomic_16 *)io->scsiio.cdb; in ctl_get_lba_len()
11517 *lba = scsi_8btou64(cdb->addr); in ctl_get_lba_len()
11518 *len = scsi_2btoul(cdb->length); in ctl_get_lba_len()
11524 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; in ctl_get_lba_len()
11526 *lba = scsi_8btou64(cdb->addr); in ctl_get_lba_len()
11527 *len = scsi_4btoul(cdb->length); in ctl_get_lba_len()
11533 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; in ctl_get_lba_len()
11535 *lba = scsi_4btoul(cdb->addr); in ctl_get_lba_len()
11536 *len = scsi_2btoul(cdb->length); in ctl_get_lba_len()
11542 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; in ctl_get_lba_len()
11544 *lba = scsi_8btou64(cdb->addr); in ctl_get_lba_len()
11545 *len = scsi_4btoul(cdb->length); in ctl_get_lba_len()
11551 cdb = (struct scsi_verify_10 *)io->scsiio.cdb; in ctl_get_lba_len()
11553 *lba = scsi_4btoul(cdb->addr); in ctl_get_lba_len()
11554 *len = scsi_2btoul(cdb->length); in ctl_get_lba_len()
11560 cdb = (struct scsi_verify_12 *)io->scsiio.cdb; in ctl_get_lba_len()
11562 *lba = scsi_4btoul(cdb->addr); in ctl_get_lba_len()
11563 *len = scsi_4btoul(cdb->length); in ctl_get_lba_len()
11569 cdb = (struct scsi_verify_16 *)io->scsiio.cdb; in ctl_get_lba_len()
11571 *lba = scsi_8btou64(cdb->addr); in ctl_get_lba_len()
11572 *len = scsi_4btoul(cdb->length); in ctl_get_lba_len()
11583 cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; in ctl_get_lba_len()
11584 *lba = scsi_8btou64(cdb->addr); in ctl_get_lba_len()
11603 endlba1 = lba1 + len1 - (seq ? 0 : 1); in ctl_extent_check_lba()
11604 endlba2 = lba2 + len2 - 1; in ctl_extent_check_lba()
11622 /* If not UNMAP -- go other way. */ in ctl_extent_check_unmap()
11623 if (io->scsiio.cdb[0] != UNMAP) in ctl_extent_check_unmap()
11626 /* If UNMAP without data -- block and wait for data. */ in ctl_extent_check_unmap()
11628 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; in ctl_extent_check_unmap()
11629 if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || in ctl_extent_check_unmap()
11630 ptrlen->ptr == NULL) in ctl_extent_check_unmap()
11633 /* UNMAP with data -- check for collision. */ in ctl_extent_check_unmap()
11634 buf = (struct scsi_unmap_desc *)ptrlen->ptr; in ctl_extent_check_unmap()
11635 end = buf + ptrlen->len / sizeof(*buf); in ctl_extent_check_unmap()
11637 lba = scsi_8btou64(range->lba); in ctl_extent_check_unmap()
11638 len = scsi_4btoul(range->length); in ctl_extent_check_unmap()
11662 if (seq && (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE)) in ctl_extent_check()
11674 if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) in ctl_seq_check()
11697 if (__predict_false(pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) in ctl_check_for_blockage()
11698 && __predict_false(ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) in ctl_check_for_blockage()
11699 && ((pending_io->io_hdr.nexus.targ_port == in ctl_check_for_blockage()
11700 ooa_io->io_hdr.nexus.targ_port) in ctl_check_for_blockage()
11701 && (pending_io->io_hdr.nexus.initid == in ctl_check_for_blockage()
11702 ooa_io->io_hdr.nexus.initid)) in ctl_check_for_blockage()
11703 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | in ctl_check_for_blockage()
11718 if (__predict_true(pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) in ctl_check_for_blockage()
11719 && __predict_true(ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) in ctl_check_for_blockage()
11720 && __predict_false(pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) in ctl_check_for_blockage()
11721 && ((pending_io->io_hdr.nexus.targ_port == in ctl_check_for_blockage()
11722 ooa_io->io_hdr.nexus.targ_port) in ctl_check_for_blockage()
11723 && (pending_io->io_hdr.nexus.initid == in ctl_check_for_blockage()
11724 ooa_io->io_hdr.nexus.initid)) in ctl_check_for_blockage()
11725 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | in ctl_check_for_blockage()
11730 * If we get a head of queue tag, SAM-3 says that we should in ctl_check_for_blockage()
11743 if (__predict_false(pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)) in ctl_check_for_blockage()
11751 if (__predict_false(ooa_io->scsiio.tag_type == CTL_TAG_ORDERED) || in ctl_check_for_blockage()
11752 __predict_false(ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)) in ctl_check_for_blockage()
11756 if (__predict_false(ooa_io->scsiio.seridx == CTL_SERIDX_INVLD)) in ctl_check_for_blockage()
11759 switch (serialize_row[ooa_io->scsiio.seridx]) { in ctl_check_for_blockage()
11761 if (lun->be_lun->serseq != CTL_LUN_SERSEQ_OFF) in ctl_check_for_blockage()
11767 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) == in ctl_check_for_blockage()
11773 (lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); in ctl_check_for_blockage()
11775 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) == in ctl_check_for_blockage()
11789 * - pending_io is generally either incoming, or on the blocked queue
11790 * - starting I/O is the I/O we want to start the check with.
11802 mtx_assert(&lun->lun_lock, MA_OWNED); in ctl_check_ooa()
11809 if (__predict_false(pending_io->io_hdr.flags & CTL_FLAG_ABORT)) in ctl_check_ooa()
11817 if ((pending_io->scsiio.tag_type == CTL_TAG_ORDERED) && in ctl_check_ooa()
11821 serialize_row = ctl_serialize_table[pending_io->scsiio.seridx]; in ctl_check_ooa()
11830 ooa_io = (union ctl_io *)LIST_NEXT(&ooa_io->io_hdr, ooa_links)) { in ctl_check_ooa()
11853 struct ctl_softc *softc = lun->ctl_softc; in ctl_scsi_try_unblock_io()
11861 mtx_assert(&lun->lun_lock, MA_OWNED); in ctl_scsi_try_unblock_io()
11863 if (io->io_hdr.blocker == NULL) in ctl_scsi_try_unblock_io()
11866 obio = bio = io->io_hdr.blocker; in ctl_scsi_try_unblock_io()
11868 bio = (union ctl_io *)LIST_NEXT(&bio->io_hdr, ooa_links); in ctl_scsi_try_unblock_io()
11873 TAILQ_REMOVE(&obio->io_hdr.blocked_queue, in ctl_scsi_try_unblock_io()
11874 &io->io_hdr, blocked_links); in ctl_scsi_try_unblock_io()
11875 TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, in ctl_scsi_try_unblock_io()
11876 &io->io_hdr, blocked_links); in ctl_scsi_try_unblock_io()
11877 io->io_hdr.blocker = bio; in ctl_scsi_try_unblock_io()
11883 TAILQ_REMOVE(&obio->io_hdr.blocked_queue, &io->io_hdr, blocked_links); in ctl_scsi_try_unblock_io()
11884 io->io_hdr.blocker = NULL; in ctl_scsi_try_unblock_io()
11891 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) && in ctl_scsi_try_unblock_io()
11892 (softc->ha_mode != CTL_HA_MODE_XFER)) { in ctl_scsi_try_unblock_io()
11893 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; in ctl_scsi_try_unblock_io()
11894 msg_info.hdr.original_sc = io->io_hdr.remote_io; in ctl_scsi_try_unblock_io()
11907 entry = ctl_get_cmd_entry(&io->scsiio, NULL); in ctl_scsi_try_unblock_io()
11908 if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) { in ctl_scsi_try_unblock_io()
11913 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; in ctl_scsi_try_unblock_io()
11919 ctl_set_overlapped_cmd(&io->scsiio); in ctl_scsi_try_unblock_io()
11922 ctl_set_overlapped_tag(&io->scsiio, in ctl_scsi_try_unblock_io()
11923 io->scsiio.tag_num & 0xff); in ctl_scsi_try_unblock_io()
11926 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) && in ctl_scsi_try_unblock_io()
11927 (softc->ha_mode != CTL_HA_MODE_XFER)) { in ctl_scsi_try_unblock_io()
11929 LIST_REMOVE(&io->io_hdr, ooa_links); in ctl_scsi_try_unblock_io()
11932 msg_info.hdr.original_sc = io->io_hdr.remote_io; in ctl_scsi_try_unblock_io()
11954 mtx_assert(&lun->lun_lock, MA_OWNED); in ctl_nvme_try_unblock_io()
11956 if (io->io_hdr.blocker == NULL) in ctl_nvme_try_unblock_io()
11964 bio = io->io_hdr.blocker; in ctl_nvme_try_unblock_io()
11965 if (NVMEV(NVME_CMD_FUSE, io->nvmeio.cmd.fuse) == NVME_FUSE_SECOND) { in ctl_nvme_try_unblock_io()
11967 (union ctl_io *)TAILQ_FIRST(&bio->io_hdr.blocked_queue)); in ctl_nvme_try_unblock_io()
11968 MPASS(TAILQ_NEXT(&io->io_hdr, blocked_links) == NULL); in ctl_nvme_try_unblock_io()
11970 TAILQ_REMOVE(&bio->io_hdr.blocked_queue, &io->io_hdr, in ctl_nvme_try_unblock_io()
11972 if (bio->io_hdr.status != CTL_SUCCESS) { in ctl_nvme_try_unblock_io()
11973 ctl_nvme_set_failed_fused_command(&io->nvmeio); in ctl_nvme_try_unblock_io()
11982 MPASS(NVMEV(NVME_CMD_FUSE, bio->nvmeio.cmd.fuse) == in ctl_nvme_try_unblock_io()
11984 TAILQ_REMOVE(&bio->io_hdr.blocked_queue, &io->io_hdr, in ctl_nvme_try_unblock_io()
11988 entry = ctl_nvme_get_cmd_entry(&io->nvmeio); in ctl_nvme_try_unblock_io()
11989 if (ctl_nvmeio_lun_check(lun, entry, &io->nvmeio) != 0) { in ctl_nvme_try_unblock_io()
11994 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; in ctl_nvme_try_unblock_io()
12001 switch (io->io_hdr.io_type) { in ctl_try_unblock_io()
12025 mtx_assert(&lun->lun_lock, MA_OWNED); in ctl_try_unblock_others()
12027 for (io = (union ctl_io *)TAILQ_FIRST(&bio->io_hdr.blocked_queue); in ctl_try_unblock_others()
12029 next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, blocked_links); in ctl_try_unblock_others()
12031 KASSERT(io->io_hdr.blocker != NULL, in ctl_try_unblock_others()
12035 KASSERT(!skip || TAILQ_EMPTY(&bio->io_hdr.blocked_queue), in ctl_try_unblock_others()
12055 struct ctl_softc *softc = lun->ctl_softc; in ctl_scsiio_lun_check()
12061 mtx_assert(&lun->lun_lock, MA_OWNED); in ctl_scsiio_lun_check()
12067 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { in ctl_scsiio_lun_check()
12068 if (softc->ha_link == CTL_HA_LINK_OFFLINE && in ctl_scsiio_lun_check()
12069 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { in ctl_scsiio_lun_check()
12074 if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 && in ctl_scsiio_lun_check()
12075 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { in ctl_scsiio_lun_check()
12080 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY && in ctl_scsiio_lun_check()
12081 (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) { in ctl_scsiio_lun_check()
12088 if (softc->ha_mode == CTL_HA_MODE_XFER) in ctl_scsiio_lun_check()
12092 if (entry->pattern & CTL_LUN_PAT_WRITE) { in ctl_scsiio_lun_check()
12093 if (lun->be_lun->flags & CTL_LUN_FLAG_READONLY) { in ctl_scsiio_lun_check()
12098 if ((lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) { in ctl_scsiio_lun_check()
12112 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); in ctl_scsiio_lun_check()
12113 if ((lun->flags & CTL_LUN_RESERVED) in ctl_scsiio_lun_check()
12114 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { in ctl_scsiio_lun_check()
12115 if (lun->res_idx != residx) { in ctl_scsiio_lun_check()
12122 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || in ctl_scsiio_lun_check()
12123 (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { in ctl_scsiio_lun_check()
12125 } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && in ctl_scsiio_lun_check()
12126 (lun->pr_res_type == SPR_TYPE_WR_EX || in ctl_scsiio_lun_check()
12127 lun->pr_res_type == SPR_TYPE_WR_EX_RO || in ctl_scsiio_lun_check()
12128 lun->pr_res_type == SPR_TYPE_WR_EX_AR)) { in ctl_scsiio_lun_check()
12137 (residx != lun->pr_res_idx && lun->pr_res_type < 4)) { in ctl_scsiio_lun_check()
12144 if ((entry->flags & CTL_CMD_FLAG_OK_ON_NO_MEDIA) == 0) { in ctl_scsiio_lun_check()
12145 if (lun->flags & CTL_LUN_EJECTED) in ctl_scsiio_lun_check()
12147 else if (lun->flags & CTL_LUN_NO_MEDIA) { in ctl_scsiio_lun_check()
12148 if (lun->flags & CTL_LUN_REMOVABLE) in ctl_scsiio_lun_check()
12152 } else if (lun->flags & CTL_LUN_STOPPED) in ctl_scsiio_lun_check()
12169 ctl_set_busy(&io->scsiio); in ctl_failover_io()
12181 targ_lun = rio->io_hdr.nexus.targ_mapped_lun; in ctl_failover_lun()
12185 mtx_lock(&softc->ctl_lock); in ctl_failover_lun()
12187 (lun = softc->ctl_luns[targ_lun]) == NULL) { in ctl_failover_lun()
12188 mtx_unlock(&softc->ctl_lock); in ctl_failover_lun()
12191 mtx_lock(&lun->lun_lock); in ctl_failover_lun()
12192 mtx_unlock(&softc->ctl_lock); in ctl_failover_lun()
12193 if (lun->flags & CTL_LUN_DISABLED) { in ctl_failover_lun()
12194 mtx_unlock(&lun->lun_lock); in ctl_failover_lun()
12198 if (softc->ha_mode == CTL_HA_MODE_XFER) { in ctl_failover_lun()
12199 LIST_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { in ctl_failover_lun()
12201 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { in ctl_failover_lun()
12202 if (io->flags & CTL_FLAG_IO_ACTIVE) { in ctl_failover_lun()
12203 io->flags |= CTL_FLAG_ABORT | in ctl_failover_lun()
12208 io->msg_type = CTL_MSG_DATAMOVE_DONE; in ctl_failover_lun()
12209 io->flags &= ~CTL_FLAG_DMA_INPROG; in ctl_failover_lun()
12210 io->flags |= CTL_FLAG_IO_ACTIVE; in ctl_failover_lun()
12211 io->port_status = 31340; in ctl_failover_lun()
12216 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { in ctl_failover_lun()
12217 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; in ctl_failover_lun()
12218 if (io->flags & CTL_FLAG_IO_ACTIVE) { in ctl_failover_lun()
12219 io->flags |= CTL_FLAG_FAILOVER; in ctl_failover_lun()
12221 ctl_set_busy(&((union ctl_io *)io)-> in ctl_failover_lun()
12228 LIST_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { in ctl_failover_lun()
12230 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { in ctl_failover_lun()
12231 if (io->blocker != NULL) { in ctl_failover_lun()
12232 TAILQ_REMOVE(&io->blocker->io_hdr.blocked_queue, in ctl_failover_lun()
12234 io->blocker = NULL; in ctl_failover_lun()
12242 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { in ctl_failover_lun()
12243 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; in ctl_failover_lun()
12244 if (!(io->flags & CTL_FLAG_IO_ACTIVE)) { in ctl_failover_lun()
12245 ctl_set_busy(&((union ctl_io *)io)-> in ctl_failover_lun()
12252 mtx_unlock(&lun->lun_lock); in ctl_failover_lun()
12265 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; in ctl_scsiio_precheck()
12267 lun = softc->ctl_luns[targ_lun]; in ctl_scsiio_precheck()
12274 mtx_lock(&lun->lun_lock); in ctl_scsiio_precheck()
12275 if (lun->flags & CTL_LUN_DISABLED) { in ctl_scsiio_precheck()
12276 mtx_unlock(&lun->lun_lock); in ctl_scsiio_precheck()
12282 CTL_BACKEND_LUN(ctsio) = lun->be_lun; in ctl_scsiio_precheck()
12289 if (LIST_EMPTY(&lun->ooa_queue)) in ctl_scsiio_precheck()
12290 lun->idle_time += getsbinuptime() - lun->last_busy; in ctl_scsiio_precheck()
12292 LIST_INSERT_HEAD(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); in ctl_scsiio_precheck()
12299 mtx_unlock(&lun->lun_lock); in ctl_scsiio_precheck()
12303 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; in ctl_scsiio_precheck()
12304 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; in ctl_scsiio_precheck()
12314 if (entry->flags & CTL_CMD_FLAG_OK_ON_NO_LUN) { in ctl_scsiio_precheck()
12315 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; in ctl_scsiio_precheck()
12329 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { in ctl_scsiio_precheck()
12330 mtx_unlock(&lun->lun_lock); in ctl_scsiio_precheck()
12337 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); in ctl_scsiio_precheck()
12345 if (ctsio->cdb[0] != REQUEST_SENSE) { in ctl_scsiio_precheck()
12348 ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; in ctl_scsiio_precheck()
12360 * According to SAM-3, section 5.3.2, the order that things get in ctl_scsiio_precheck()
12375 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { in ctl_scsiio_precheck()
12379 ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, in ctl_scsiio_precheck()
12382 mtx_unlock(&lun->lun_lock); in ctl_scsiio_precheck()
12383 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; in ctl_scsiio_precheck()
12384 ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; in ctl_scsiio_precheck()
12385 ctsio->sense_len = sense_len; in ctl_scsiio_precheck()
12392 mtx_unlock(&lun->lun_lock); in ctl_scsiio_precheck()
12406 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && in ctl_scsiio_precheck()
12407 (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0 && in ctl_scsiio_precheck()
12408 (entry->flags & CTL_CMD_FLAG_RUN_HERE) == 0) { in ctl_scsiio_precheck()
12412 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; in ctl_scsiio_precheck()
12413 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; in ctl_scsiio_precheck()
12414 mtx_unlock(&lun->lun_lock); in ctl_scsiio_precheck()
12419 msg_info.hdr.nexus = ctsio->io_hdr.nexus; in ctl_scsiio_precheck()
12420 msg_info.scsi.tag_num = ctsio->tag_num; in ctl_scsiio_precheck()
12421 msg_info.scsi.tag_type = ctsio->tag_type; in ctl_scsiio_precheck()
12422 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); in ctl_scsiio_precheck()
12423 msg_info.scsi.cdb_len = ctsio->cdb_len; in ctl_scsiio_precheck()
12424 msg_info.scsi.priority = ctsio->priority; in ctl_scsiio_precheck()
12427 sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data), in ctl_scsiio_precheck()
12429 ctsio->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; in ctl_scsiio_precheck()
12430 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; in ctl_scsiio_precheck()
12438 bio = (union ctl_io *)LIST_NEXT(&ctsio->io_hdr, ooa_links); in ctl_scsiio_precheck()
12442 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; in ctl_scsiio_precheck()
12443 mtx_unlock(&lun->lun_lock); in ctl_scsiio_precheck()
12447 ctsio->io_hdr.blocker = bio; in ctl_scsiio_precheck()
12448 TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctsio->io_hdr, in ctl_scsiio_precheck()
12450 mtx_unlock(&lun->lun_lock); in ctl_scsiio_precheck()
12453 mtx_unlock(&lun->lun_lock); in ctl_scsiio_precheck()
12458 mtx_unlock(&lun->lun_lock); in ctl_scsiio_precheck()
12459 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); in ctl_scsiio_precheck()
12473 entry = &ctl_cmd_table[ctsio->cdb[0]]; in ctl_get_cmd_entry()
12475 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); in ctl_get_cmd_entry()
12476 if (entry->flags & CTL_CMD_FLAG_SA5) { in ctl_get_cmd_entry()
12477 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; in ctl_get_cmd_entry()
12479 entry->execute)[service_action]; in ctl_get_cmd_entry()
12492 ctsio->seridx = entry->seridx; in ctl_validate_command()
12493 if (entry->execute == NULL) { in ctl_validate_command()
12506 KASSERT(entry->length > 0, in ctl_validate_command()
12508 ctsio->cdb[0], ctsio->cdb[1])); in ctl_validate_command()
12509 for (i = 1; i < entry->length; i++) { in ctl_validate_command()
12510 diff = ctsio->cdb[i] & ~entry->usage[i - 1]; in ctl_validate_command()
12518 /*bit*/ fls(diff) - 1); in ctl_validate_command()
12531 if ((entry->flags & CTL_CMD_FLAG_OK_ON_DIRECT) == 0) in ctl_cmd_applicable()
12535 if ((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) in ctl_cmd_applicable()
12539 if ((entry->flags & CTL_CMD_FLAG_OK_ON_CDROM) == 0) in ctl_cmd_applicable()
12556 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); in ctl_scsiio()
12564 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { in ctl_scsiio()
12573 retval = entry->execute(ctsio); in ctl_scsiio()
12588 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { in ctl_target_reset()
12591 msg_info.hdr.nexus = io->io_hdr.nexus; in ctl_target_reset()
12592 msg_info.task.task_action = io->taskio.task_action; in ctl_target_reset()
12600 initidx = ctl_get_initindex(&io->io_hdr.nexus); in ctl_target_reset()
12601 if (io->taskio.task_action == CTL_TASK_TARGET_RESET) in ctl_target_reset()
12605 mtx_lock(&softc->ctl_lock); in ctl_target_reset()
12606 STAILQ_FOREACH(lun, &softc->lun_list, links) { in ctl_target_reset()
12608 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) in ctl_target_reset()
12612 mtx_unlock(&softc->ctl_lock); in ctl_target_reset()
12613 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; in ctl_target_reset()
12621 * SAM-3 is vague on this point. It does say that a unit attention should
12625 * is the relevant text, from SAM-3 rev 8:
12644 mtx_lock(&lun->lun_lock); in ctl_do_lun_reset()
12646 LIST_FOREACH(xioh, &lun->ooa_queue, ooa_links) { in ctl_do_lun_reset()
12647 xioh->flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; in ctl_do_lun_reset()
12652 free(lun->pending_sense[i], M_CTL); in ctl_do_lun_reset()
12653 lun->pending_sense[i] = NULL; in ctl_do_lun_reset()
12656 lun->flags &= ~CTL_LUN_RESERVED; in ctl_do_lun_reset()
12658 if (lun->prevent) { in ctl_do_lun_reset()
12660 ctl_clear_mask(lun->prevent, i); in ctl_do_lun_reset()
12661 lun->prevent_count = 0; in ctl_do_lun_reset()
12664 ctl_tpc_lun_clear(lun, -1); in ctl_do_lun_reset()
12669 ctl_est_ua_all(lun, -1, ua_type); in ctl_do_lun_reset()
12671 mtx_unlock(&lun->lun_lock); in ctl_do_lun_reset()
12681 targ_lun = io->io_hdr.nexus.targ_mapped_lun; in ctl_lun_reset()
12682 initidx = ctl_get_initindex(&io->io_hdr.nexus); in ctl_lun_reset()
12683 mtx_lock(&softc->ctl_lock); in ctl_lun_reset()
12685 (lun = softc->ctl_luns[targ_lun]) == NULL) { in ctl_lun_reset()
12686 mtx_unlock(&softc->ctl_lock); in ctl_lun_reset()
12687 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; in ctl_lun_reset()
12691 mtx_unlock(&softc->ctl_lock); in ctl_lun_reset()
12692 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; in ctl_lun_reset()
12694 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) { in ctl_lun_reset()
12698 msg_info.hdr.nexus = io->io_hdr.nexus; in ctl_lun_reset()
12714 mtx_assert(&lun->lun_lock, MA_OWNED); in ctl_abort_tasks_lun()
12723 LIST_FOREACH(xioh, &lun->ooa_queue, ooa_links) { in ctl_abort_tasks_lun()
12727 targ_port == xioh->nexus.targ_port) && in ctl_abort_tasks_lun()
12729 init_id == xioh->nexus.initid)) { in ctl_abort_tasks_lun()
12730 if (targ_port != xioh->nexus.targ_port || in ctl_abort_tasks_lun()
12731 init_id != xioh->nexus.initid) in ctl_abort_tasks_lun()
12732 xioh->flags |= CTL_FLAG_ABORT_STATUS; in ctl_abort_tasks_lun()
12733 xioh->flags |= CTL_FLAG_ABORT; in ctl_abort_tasks_lun()
12734 if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { in ctl_abort_tasks_lun()
12738 msg_info.hdr.nexus = xioh->nexus; in ctl_abort_tasks_lun()
12740 msg_info.task.tag_num = xio->scsiio.tag_num; in ctl_abort_tasks_lun()
12741 msg_info.task.tag_type = xio->scsiio.tag_type; in ctl_abort_tasks_lun()
12763 targ_lun = io->io_hdr.nexus.targ_mapped_lun; in ctl_abort_task_set()
12764 mtx_lock(&softc->ctl_lock); in ctl_abort_task_set()
12766 (lun = softc->ctl_luns[targ_lun]) == NULL) { in ctl_abort_task_set()
12767 mtx_unlock(&softc->ctl_lock); in ctl_abort_task_set()
12768 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; in ctl_abort_task_set()
12772 mtx_lock(&lun->lun_lock); in ctl_abort_task_set()
12773 mtx_unlock(&softc->ctl_lock); in ctl_abort_task_set()
12774 if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { in ctl_abort_task_set()
12775 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, in ctl_abort_task_set()
12776 io->io_hdr.nexus.initid, in ctl_abort_task_set()
12777 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); in ctl_abort_task_set()
12780 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); in ctl_abort_task_set()
12782 mtx_unlock(&lun->lun_lock); in ctl_abort_task_set()
12783 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; in ctl_abort_task_set()
12797 mtx_lock(&softc->ctl_lock); in ctl_i_t_nexus_loss()
12798 STAILQ_FOREACH(lun, &softc->lun_list, links) { in ctl_i_t_nexus_loss()
12799 mtx_lock(&lun->lun_lock); in ctl_i_t_nexus_loss()
12803 ps = lun->pending_sense[p]; in ctl_i_t_nexus_loss()
12807 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx)) in ctl_i_t_nexus_loss()
12808 lun->flags &= ~CTL_LUN_RESERVED; in ctl_i_t_nexus_loss()
12810 if (lun->prevent && ctl_is_set(lun->prevent, initidx)) { in ctl_i_t_nexus_loss()
12811 ctl_clear_mask(lun->prevent, initidx); in ctl_i_t_nexus_loss()
12812 lun->prevent_count--; in ctl_i_t_nexus_loss()
12818 mtx_unlock(&lun->lun_lock); in ctl_i_t_nexus_loss()
12820 mtx_unlock(&softc->ctl_lock); in ctl_i_t_nexus_loss()
12829 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { in ctl_i_t_nexus_reset()
12832 msg_info.hdr.nexus = io->io_hdr.nexus; in ctl_i_t_nexus_reset()
12841 initidx = ctl_get_initindex(&io->io_hdr.nexus); in ctl_i_t_nexus_reset()
12843 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; in ctl_i_t_nexus_reset()
12858 targ_lun = io->io_hdr.nexus.targ_mapped_lun;
12859 mtx_lock(&softc->ctl_lock);
12861 (lun = softc->ctl_luns[targ_lun]) == NULL) {
12862 mtx_unlock(&softc->ctl_lock);
12863 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
12867 mtx_lock(&lun->lun_lock);
12868 mtx_unlock(&softc->ctl_lock);
12876 LIST_FOREACH(xioh, &lun->ooa_queue, ooa_links) {
12880 if ((xioh->nexus.targ_port != io->io_hdr.nexus.targ_port)
12881 || (xioh->nexus.initid != io->io_hdr.nexus.initid)
12882 || (xioh->flags & CTL_FLAG_ABORT))
12894 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED)
12895 && (io->taskio.tag_type == CTL_TAG_UNTAGGED))
12896 || (xio->scsiio.tag_num == io->taskio.tag_num)) {
12905 if (xio->scsiio.tag_num == io->taskio.tag_num) {
12907 xioh->flags |= CTL_FLAG_ABORT;
12908 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 &&
12909 !(lun->flags & CTL_LUN_PRIMARY_SC)) {
12912 msg_info.hdr.nexus = io->io_hdr.nexus;
12914 msg_info.task.tag_num = io->taskio.tag_num;
12915 msg_info.task.tag_type = io->taskio.tag_type;
12925 mtx_unlock(&lun->lun_lock);
12926 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
12939 targ_lun = io->io_hdr.nexus.targ_mapped_lun;
12940 mtx_lock(&softc->ctl_lock);
12942 (lun = softc->ctl_luns[targ_lun]) == NULL) {
12943 mtx_unlock(&softc->ctl_lock);
12944 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
12947 mtx_lock(&lun->lun_lock);
12948 mtx_unlock(&softc->ctl_lock);
12949 LIST_FOREACH(xioh, &lun->ooa_queue, ooa_links) {
12953 if ((xioh->nexus.targ_port != io->io_hdr.nexus.targ_port)
12954 || (xioh->nexus.initid != io->io_hdr.nexus.initid)
12955 || (xioh->flags & CTL_FLAG_ABORT))
12958 if (task_set || xio->scsiio.tag_num == io->taskio.tag_num) {
12963 mtx_unlock(&lun->lun_lock);
12965 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED;
12967 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
12979 targ_lun = io->io_hdr.nexus.targ_mapped_lun;
12980 mtx_lock(&softc->ctl_lock);
12982 (lun = softc->ctl_luns[targ_lun]) == NULL) {
12983 mtx_unlock(&softc->ctl_lock);
12984 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
12987 mtx_lock(&lun->lun_lock);
12988 mtx_unlock(&softc->ctl_lock);
12989 initidx = ctl_get_initindex(&io->io_hdr.nexus);
12990 ua = ctl_build_qae(lun, initidx, io->taskio.task_resp);
12991 mtx_unlock(&lun->lun_lock);
12993 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED;
12995 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
13005 KASSERT(io->io_hdr.io_type == CTL_IO_TASK,
13006 ("ctl_run_task: Unextected io_type %d\n", io->io_hdr.io_type));
13007 io->taskio.task_status = CTL_TASK_FUNCTION_NOT_SUPPORTED;
13008 bzero(io->taskio.task_resp, sizeof(io->taskio.task_resp));
13009 switch (io->taskio.task_action) {
13044 __func__, io->taskio.task_action);
13048 io->io_hdr.status = CTL_SUCCESS;
13050 io->io_hdr.status = CTL_ERROR;
13068 targ_lun = io->io_hdr.nexus.targ_mapped_lun;
13069 switch (io->io_hdr.msg_type) {
13071 ctl_serialize_other_sc_cmd(&io->scsiio);
13074 entry = ctl_get_cmd_entry(&io->scsiio, NULL);
13076 (lun = softc->ctl_luns[targ_lun]) == NULL) {
13080 mtx_lock(&lun->lun_lock);
13081 if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) {
13082 mtx_unlock(&lun->lun_lock);
13086 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
13087 mtx_unlock(&lun->lun_lock);
13091 if (softc->ha_mode == CTL_HA_MODE_XFER) {
13096 (lun = softc->ctl_luns[targ_lun]) == NULL) {
13100 mtx_lock(&lun->lun_lock);
13102 LIST_REMOVE(&io->io_hdr, ooa_links);
13103 mtx_unlock(&lun->lun_lock);
13125 __func__, io->io_hdr.msg_type);
13142 pattern = desc->error_pattern;
13157 filtered_pattern = entry->pattern & pattern;
13185 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba,
13186 desc->lba_range.len, FALSE);
13205 mtx_assert(&lun->lun_lock, MA_OWNED);
13207 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) {
13213 pattern = ctl_cmd_pattern_match(&io->scsiio, desc);
13217 switch (desc->lun_error & CTL_LUN_INJ_TYPE) {
13219 ctl_set_aborted(&io->scsiio);
13222 ctl_set_medium_error(&io->scsiio,
13223 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) !=
13229 ctl_set_ua(&io->scsiio, 0x29, 0x00);
13237 bcopy(&desc->custom_sense, &io->scsiio.sense_data,
13238 MIN(sizeof(desc->custom_sense),
13239 sizeof(io->scsiio.sense_data)));
13240 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND;
13241 io->scsiio.sense_len = SSD_FULL_SIZE;
13242 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
13251 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS;
13255 * By default, each error injection action is a one-shot
13257 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS)
13260 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links);
13285 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
13286 bintime_add(&io->io_hdr.dma_bt, &cur_bt);
13288 io->io_hdr.num_dmas++;
13290 if ((io->io_hdr.port_status != 0) &&
13291 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
13292 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
13293 switch (io->io_hdr.io_type) {
13295 ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1,
13296 /*retry_count*/ io->io_hdr.port_status);
13300 if (io->io_hdr.flags & CTL_FLAG_ABORT)
13301 ctl_nvme_set_command_aborted(&io->nvmeio);
13303 ctl_nvme_set_data_transfer_error(&io->nvmeio);
13309 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT &&
13310 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
13311 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
13312 switch (io->io_hdr.io_type) {
13314 ctl_set_invalid_field_ciu(&io->scsiio);
13318 ctl_nvme_set_data_transfer_error(&io->nvmeio);
13340 mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED);
13348 getbinuptime(&io->io_hdr.dma_start_bt);
13352 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
13353 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE;
13359 && (lun->delay_info.datamove_delay > 0)) {
13360 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1);
13361 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE;
13362 callout_reset(&io->io_hdr.delay_callout,
13363 lun->delay_info.datamove_delay * hz,
13365 if (lun->delay_info.datamove_type ==
13367 lun->delay_info.datamove_delay = 0;
13377 if (io->io_hdr.flags & CTL_FLAG_ABORT) {
13378 switch (io->io_hdr.io_type) {
13381 io->scsiio.tag_num, io->io_hdr.nexus.initid,
13382 io->io_hdr.nexus.targ_port,
13383 io->io_hdr.nexus.targ_lun);
13388 le16toh(io->nvmeio.cmd.cid),
13389 io->io_hdr.nexus.initid, io->io_hdr.nexus.targ_port,
13390 io->io_hdr.nexus.targ_lun);
13395 io->io_hdr.port_status = 31337;
13408 fe_datamove = CTL_PORT(io)->fe_datamove;
13425 msg.hdr.serializing_sc = io->io_hdr.remote_io;
13426 msg.hdr.nexus = io->io_hdr.nexus;
13427 msg.hdr.status = io->io_hdr.status;
13428 msg.scsi.kern_data_resid = io->scsiio.kern_data_resid;
13429 msg.scsi.tag_num = io->scsiio.tag_num;
13430 msg.scsi.tag_type = io->scsiio.tag_type;
13431 msg.scsi.scsi_status = io->scsiio.scsi_status;
13432 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
13433 io->scsiio.sense_len);
13434 msg.scsi.sense_len = io->scsiio.sense_len;
13435 msg.scsi.port_status = io->io_hdr.port_status;
13436 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
13437 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
13442 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) +
13447 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
13448 bintime_add(&io->io_hdr.dma_bt, &cur_bt);
13450 io->io_hdr.num_dmas++;
13463 io = rq->context;
13466 if (rq->ret != CTL_HA_STATUS_SUCCESS) {
13468 rq->ret);
13469 ctl_set_internal_failure(&io->scsiio,
13471 /*retry_count*/ rq->ret);
13476 for (i = 0; i < io->scsiio.kern_sg_entries; i++)
13512 * - Get the data from the host/HBA into local memory.
13513 * - DMA memory from the local controller to the remote controller.
13514 * - Send status back to the remote controller.
13522 io->scsiio.kern_data_ptr = (uint8_t *)CTL_LSGL(io);
13528 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb;
13530 fe_datamove = CTL_PORT(io)->fe_datamove;
13541 for (i = 0; i < io->scsiio.kern_sg_entries; i++)
13562 io = rq->context;
13565 if (rq->ret != CTL_HA_STATUS_SUCCESS) {
13567 rq->ret);
13568 ctl_set_internal_failure(&io->scsiio,
13570 /*retry_count*/ rq->ret);
13576 io->scsiio.kern_data_ptr = (uint8_t *)CTL_LSGL(io);
13582 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb;
13586 fe_datamove = CTL_PORT(io)->fe_datamove;
13602 len_to_go = io->scsiio.kern_data_len;
13615 len_to_go -= local_sglist[i].len;
13621 io->scsiio.kern_sg_entries = i;
13645 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
13646 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS))
13647 ctl_set_busy(&io->scsiio);
13649 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
13650 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
13682 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) {
13686 rq->command = command;
13687 rq->context = io;
13695 cur_len = MIN(local_sglist[i].len - local_used,
13696 remote_sglist[j].len - remote_used);
13697 rq->size = cur_len;
13704 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) {
13706 rq->local = vtophys(tmp_ptr);
13708 rq->local = tmp_ptr;
13710 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0,
13712 rq->local = tmp_ptr;
13717 rq->remote = tmp_ptr;
13719 rq->callback = NULL;
13734 if (total_used >= io->scsiio.kern_data_len)
13735 rq->callback = callback;
13742 rq->ret = isc_ret;
13792 mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED);
13794 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
13805 if (io->io_hdr.flags & CTL_FLAG_ABORT) {
13807 io->scsiio.tag_num, io->io_hdr.nexus.initid,
13808 io->io_hdr.nexus.targ_port,
13809 io->io_hdr.nexus.targ_lun);
13810 io->io_hdr.port_status = 31338;
13815 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT)
13817 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN)
13820 io->io_hdr.port_status = 31339;
13835 fe_done = port->fe_done;
13838 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) {
13843 ctl_scsi_path_string(&io->io_hdr, path_str, sizeof(path_str));
13849 (intmax_t)time_uptime - io->io_hdr.start_time);
13855 switch (io->io_hdr.io_type) {
13867 __func__, io->io_hdr.io_type);
13872 io->io_hdr.nexus.targ_mapped_lun));
13876 mtx_lock(&lun->lun_lock);
13883 if (lun->ie_reported == 0 && lun->ie_asc != 0 &&
13884 io->io_hdr.status == CTL_SUCCESS &&
13885 (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0) {
13886 uint8_t mrie = lun->MODE_IE.mrie;
13887 uint8_t per = ((lun->MODE_RWER.byte3 & SMS_RWER_PER) ||
13888 (lun->MODE_VER.byte3 & SMS_VER_PER));
13894 (ctl_get_cmd_entry(&io->scsiio, NULL)->flags &
13896 ctl_set_sense(&io->scsiio,
13900 /*asc*/ lun->ie_asc,
13901 /*ascq*/ lun->ie_ascq,
13903 lun->ie_reported = 1;
13905 } else if (lun->ie_reported < 0)
13906 lun->ie_reported = 0;
13912 if (!STAILQ_EMPTY(&lun->error_list) &&
13913 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) &&
13914 ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0))
13923 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS &&
13924 (io->io_hdr.io_type == CTL_IO_SCSI ||
13925 io->io_hdr.io_type == CTL_IO_NVME ||
13926 io->io_hdr.io_type == CTL_IO_NVME_ADMIN)) {
13932 bintime_sub(&bt, &io->io_hdr.start_bt);
13934 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
13937 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
13943 lun->stats.bytes[type] += ctl_kern_total_len(io);
13944 lun->stats.operations[type] ++;
13945 lun->stats.dmas[type] += io->io_hdr.num_dmas;
13947 bintime_add(&lun->stats.dma_time[type], &io->io_hdr.dma_bt);
13948 bintime_add(&lun->stats.time[type], &bt);
13951 mtx_lock(&port->port_lock);
13952 port->stats.bytes[type] += ctl_kern_total_len(io);
13953 port->stats.operations[type] ++;
13954 port->stats.dmas[type] += io->io_hdr.num_dmas;
13956 bintime_add(&port->stats.dma_time[type], &io->io_hdr.dma_bt);
13957 bintime_add(&port->stats.time[type], &bt);
13959 mtx_unlock(&port->port_lock);
13972 LIST_REMOVE(&io->io_hdr, ooa_links);
13974 if (LIST_EMPTY(&lun->ooa_queue))
13975 lun->last_busy = getsbinuptime();
13982 if ((lun->flags & CTL_LUN_INVALID)
13983 && LIST_EMPTY(&lun->ooa_queue)) {
13984 mtx_unlock(&lun->lun_lock);
13987 mtx_unlock(&lun->lun_lock);
13996 if (io->io_hdr.flags & CTL_FLAG_ABORT) {
13997 switch (io->io_hdr.io_type) {
13999 ctl_set_task_aborted(&io->scsiio);
14003 ctl_nvme_set_command_aborted(&io->nvmeio);
14013 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS &&
14022 if ((softc->ha_mode != CTL_HA_MODE_XFER) &&
14023 (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) {
14026 msg.hdr.serializing_sc = io->io_hdr.remote_io;
14027 msg.hdr.nexus = io->io_hdr.nexus;
14029 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data),
14052 targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun);
14057 * a per-LUN queue initially). That is so that we can handle
14062 mtx_lock(&softc->ctl_lock);
14064 (lun = softc->ctl_luns[targ_lun]) == NULL) {
14065 mtx_unlock(&softc->ctl_lock);
14068 mtx_lock(&lun->lun_lock);
14069 mtx_unlock(&softc->ctl_lock);
14071 initidx = ctl_get_initindex(&io->io_hdr.nexus);
14073 if (lun->pending_sense[p] == NULL) {
14074 lun->pending_sense[p] = malloc(sizeof(*ps) * CTL_MAX_INIT_PER_PORT,
14077 if ((ps = lun->pending_sense[p]) != NULL) {
14080 memcpy(ps, &io->scsiio.sense_data, io->scsiio.sense_len);
14082 mtx_unlock(&lun->lun_lock);
14098 switch (io->io_hdr.io_type) {
14101 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0]));
14105 io->nvmeio.cmd.opc));
14109 io->nvmeio.cmd.opc));
14116 io->io_hdr.start_time = time_uptime;
14117 getbinuptime(&io->io_hdr.start_bt);
14120 /* Map FE-specific LUN ID into global one. */
14121 io->io_hdr.nexus.targ_mapped_lun =
14122 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun);
14124 switch (io->io_hdr.io_type) {
14134 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type);
14146 CTL_DEBUG_PRINT(("ctl_run cdb[0]=%02X\n", io->scsiio.cdb[0]));
14149 io->io_hdr.start_time = time_uptime;
14150 getbinuptime(&io->io_hdr.start_bt);
14153 /* Map FE-specific LUN ID into global one. */
14154 io->io_hdr.nexus.targ_mapped_lun =
14155 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun);
14157 switch (io->io_hdr.io_type) {
14161 ctl_scsiio_precheck(&io->scsiio);
14172 ctl_nvmeio_precheck(&io->nvmeio);
14175 printf("ctl_run: unknown I/O type %d\n", io->io_hdr.io_type);
14199 if (!TAILQ_EMPTY(&io->io_hdr.blocked_queue)) {
14200 mtx_lock(&lun->lun_lock);
14201 io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE;
14203 mtx_unlock(&lun->lun_lock);
14205 io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE;
14216 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) {
14217 switch (io->io_hdr.io_type) {
14224 io->io_hdr.io_type,
14225 io->io_hdr.msg_type,
14226 io->scsiio.cdb[0],
14227 io->io_hdr.nexus.initid,
14228 io->io_hdr.nexus.targ_port,
14229 io->io_hdr.nexus.targ_lun,
14230 (io->io_hdr.io_type == CTL_IO_TASK) ?
14231 io->taskio.tag_num :
14232 io->scsiio.tag_num,
14233 io->io_hdr.flags,
14234 io->io_hdr.status);
14242 io->io_hdr.io_type,
14243 io->io_hdr.msg_type,
14244 io->nvmeio.cmd.opc,
14245 io->io_hdr.nexus.initid,
14246 io->io_hdr.nexus.targ_port,
14247 io->io_hdr.nexus.targ_lun,
14248 io->nvmeio.cmd.cid,
14249 io->io_hdr.flags,
14250 io->io_hdr.status);
14256 io->io_hdr.io_type,
14257 io->io_hdr.msg_type,
14258 io->io_hdr.nexus.initid,
14259 io->io_hdr.nexus.targ_port,
14260 io->io_hdr.nexus.targ_lun,
14261 io->io_hdr.flags,
14262 io->io_hdr.status);
14266 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE;
14273 if (io->io_hdr.flags & CTL_FLAG_INT_COPY)
14277 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
14278 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE;
14283 && (lun->delay_info.done_delay > 0)) {
14284 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1);
14285 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE;
14286 callout_reset(&io->io_hdr.delay_callout,
14287 lun->delay_info.done_delay * hz,
14289 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT)
14290 lun->delay_info.done_delay = 0;
14303 struct ctl_softc *softc = thr->ctl_softc;
14309 sched_prio(curthread, PUSER - 1);
14312 while (!softc->shutdown) {
14315 * - ISC
14316 * - done queue (to free up resources, unblock other commands)
14317 * - incoming queue
14318 * - RtR queue
14323 mtx_lock(&thr->queue_lock);
14324 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue);
14326 STAILQ_REMOVE_HEAD(&thr->isc_queue, links);
14327 mtx_unlock(&thr->queue_lock);
14331 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue);
14333 STAILQ_REMOVE_HEAD(&thr->done_queue, links);
14335 mtx_unlock(&thr->queue_lock);
14339 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue);
14341 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links);
14342 mtx_unlock(&thr->queue_lock);
14343 switch (io->io_hdr.io_type) {
14348 ctl_scsiio_precheck(&io->scsiio);
14352 ctl_nvmeio_precheck(&io->nvmeio);
14359 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue);
14361 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links);
14362 mtx_unlock(&thr->queue_lock);
14363 switch (io->io_hdr.io_type) {
14365 retval = ctl_scsiio(&io->scsiio);
14371 retval = ctl_nvmeio(&io->nvmeio);
14382 mtx_sleep(thr, &thr->queue_lock, PDROP, "-", 0);
14384 thr->thread = NULL;
14401 sched_prio(curthread, PUSER - 1);
14404 while (!softc->shutdown) {
14405 mtx_lock(&softc->ctl_lock);
14406 STAILQ_FOREACH(lun, &softc->lun_list, links) {
14407 if ((lun->flags & CTL_LUN_DISABLED) ||
14408 (lun->flags & CTL_LUN_NO_MEDIA) ||
14409 lun->backend->lun_attr == NULL)
14411 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 &&
14412 softc->ha_mode == CTL_HA_MODE_XFER)
14414 if ((lun->MODE_RWER.byte8 & SMS_RWER_LBPERE) == 0)
14417 page = &lun->MODE_LBP;
14419 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0)
14421 thres = scsi_4btoul(page->descr[i].count);
14423 switch (page->descr[i].resource) {
14439 mtx_unlock(&softc->ctl_lock); // XXX
14440 val = lun->backend->lun_attr(lun->be_lun, attr);
14441 mtx_lock(&softc->ctl_lock);
14444 if ((page->descr[i].flags & SLBPPD_ARMING_MASK)
14452 mtx_lock(&lun->lun_lock);
14454 scsi_u64to8b((uint8_t *)&page->descr[i] -
14455 (uint8_t *)page, lun->ua_tpt_info);
14456 if (lun->lasttpt == 0 ||
14457 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) {
14458 lun->lasttpt = time_uptime;
14459 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES);
14464 lun->lasttpt = 0;
14465 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES);
14466 set = -1;
14468 mtx_unlock(&lun->lun_lock);
14470 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
14474 msg.hdr.nexus.initid = -1;
14475 msg.hdr.nexus.targ_port = -1;
14476 msg.hdr.nexus.targ_lun = lun->lun;
14477 msg.hdr.nexus.targ_mapped_lun = lun->lun;
14481 memcpy(msg.ua.ua_info, lun->ua_tpt_info, 8);
14482 mtx_unlock(&softc->ctl_lock); // XXX
14485 mtx_lock(&softc->ctl_lock);
14488 mtx_sleep(&softc->thresh_thread, &softc->ctl_lock,
14489 PDROP, "-", CTL_LBP_PERIOD * hz);
14491 softc->thresh_thread = NULL;
14502 idx = (io->io_hdr.nexus.targ_port * 127 +
14503 io->io_hdr.nexus.initid) % worker_threads;
14504 thr = &softc->threads[idx];
14505 mtx_lock(&thr->queue_lock);
14506 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links);
14507 mtx_unlock(&thr->queue_lock);
14517 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
14518 mtx_lock(&thr->queue_lock);
14519 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links);
14520 mtx_unlock(&thr->queue_lock);
14530 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
14531 mtx_lock(&thr->queue_lock);
14532 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links);
14533 mtx_unlock(&thr->queue_lock);
14543 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
14544 mtx_lock(&thr->queue_lock);
14545 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links);
14546 mtx_unlock(&thr->queue_lock);