Lines Matching +full:test2 +full:. +full:good
4 * Copyright (c) 2003 Silicon Graphics International Corp.
5 * Copyright (c) 2014-2015 Alexander Motin <mav@FreeBSD.org>
6 * All rights reserved.
11 * 1. Redistributions of source code must retain the above copyright
13 * without modification.
14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * binary redistribution.
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * POSSIBILITY OF SUCH DAMAGES.
33 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_io.h#5 $
36 * CAM Target Layer data movement structures/interface.
38 * Author: Ken Merry <ken@FreeBSD.org>
45 #include <stdbool.h>
48 #include <sys/queue.h>
49 #include <cam/scsi/scsi_all.h>
50 #include <dev/nvme/nvme.h>
56 * and/or done stage.
64 * Uncomment this next line to enable the CTL I/O delay feature. You
65 * can delay I/O at two different points -- datamove and done. This is
67 * timeout), and for determining how long a host's timeout is.
85 * WARNING: Keep the data in/out/none flags where they are. They're used
86 * in conjunction with ctl_cmd_flags. See comment above ctl_cmd_flags
87 * definition in ctl_private.h.
155 * Number of CTL private areas.
170 #define CTL_LUN(io) ((io)->io_hdr.ctl_private[CTL_PRIV_LUN].ptrs[0])
171 #define CTL_SOFTC(io) ((io)->io_hdr.ctl_private[CTL_PRIV_LUN].ptrs[1])
172 #define CTL_BACKEND_LUN(io) ((io)->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptrs[0])
174 ctl_ports[(io)->io_hdr.nexus.targ_port])
178 * ever reach backends, so we can reuse backend's private storage.
180 #define CTL_RSGL(io) ((io)->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptrs[0])
181 #define CTL_LSGL(io) ((io)->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptrs[1])
217 CTL_MSG_UA, /* Set/clear UA on secondary. */
218 CTL_MSG_PORT_SYNC, /* Information about port. */
219 CTL_MSG_LUN_SYNC, /* Information about LUN. */
220 CTL_MSG_IID_SYNC, /* Information about initiator. */
221 CTL_MSG_LOGIN, /* Information about HA peer. */
222 CTL_MSG_MODE_SYNC, /* Mode page current content. */
230 ctl_io_type io_type; /* task I/O, SCSI I/O, etc. */
236 uint32_t port_status; /* trans status, set by PORT, 0 = good*/
274 * SCSI passthrough I/O structure for the CAM Target Layer. Note
276 * used in the CTL implementation. e.g., timeout and retries won't be
277 * used.
280 * structure.
287 * doesn't modify or use them.
296 * by kern_data_ptr. 0 means there is no list, just a data pointer.
300 uint32_t rem_sg_entries; /* Unused. */
303 * The data pointer or a pointer to the scatter/gather list.
308 * Length of the data buffer or scatter/gather list. It's also
310 * ie. number of bytes expected to be transferred by the current
311 * invocation of frontend's datamove() callback. It's always
312 * less than or equal to kern_total_len.
318 * SCSI command, as decoded from SCSI CDB.
323 * Amount of data left after the current data transfer.
330 * datamove() invocations.
337 uint8_t seridx; /* Serialization index. */
340 ctl_tag_type tag_type; /* simple, ordered, head of queue,etc.*/
374 * Task management I/O structure. Aborts, bus resets, etc., are sent using
375 * this structure.
378 * structure.
382 ctl_task_type task_action; /* Target Reset, Abort, etc. */
384 ctl_tag_type tag_type; /* simple, ordered, etc. */
385 uint8_t task_status; /* Complete, Succeeded, etc. */
390 * NVME passthrough I/O structure for the CAM Target Layer. Note that
391 * this structure is used for both I/O and admin commands.
394 * structure.
401 * doesn't modify or use them.
410 * by kern_data_ptr. 0 means there is no list, just a data pointer.
415 * The data pointer or a pointer to the scatter/gather list.
420 * Length of the data buffer or scatter/gather list. It's also
422 * ie. number of bytes expected to be transferred by the current
423 * invocation of frontend's datamove() callback. It's always
424 * less than or equal to kern_total_len.
430 * NVMe command, as decoded from the NVMe SQE.
435 * Amount of data left after the current data transfer.
442 * datamove() invocations.
456 * HA link messages.
461 * Used for CTL_MSG_LOGIN.
484 * to the other SC which it must also act on.
487 * structure.
508 * Used for CTL_MSG_PERS_ACTION.
516 * Used for CTL_MSG_UA.
528 * structure, because we can't pass data by reference in between controllers.
530 * kern_data_ptr field. So kern_sg_entries here will always be non-zero,
531 * even if there is only one entry.
533 * Used for CTL_MSG_DATAMOVE.
555 * and CTL_MSG_DATAMOVE_DONE.
560 ctl_tag_type tag_type; /* simple, ordered, etc. */
567 0 = good*/
573 * Used for CTL_MSG_MANAGE_TASKS.
577 ctl_task_type task_action; /* Target Reset, Abort, etc. */
579 ctl_tag_type tag_type; /* simple, ordered, etc. */
583 * Used for CTL_MSG_PORT_SYNC.
600 * Used for CTL_MSG_LUN_SYNC.
619 * Used for CTL_MSG_IID_SYNC.
630 * Used for CTL_MSG_MODE_SYNC.
664 struct ctl_prio presio; /* update per. res info on other SC */
669 KASSERT((io)->io_hdr.io_type == CTL_IO_##_1, \
670 ("%s: unexpected I/O type %x", __func__, (io)->io_hdr.io_type))
673 KASSERT((io)->io_hdr.io_type == CTL_IO_##_1 || \
674 (io)->io_hdr.io_type == CTL_IO_##_2, \
675 ("%s: unexpected I/O type %x", __func__, (io)->io_hdr.io_type))
677 #define _CTL_IO_ASSERT_MACRO(io, _1, _2, NAME, ...) \
680 #define CTL_IO_ASSERT(...) \
687 switch (io->io_hdr.io_type) {
689 return (io->scsiio.kern_sg_entries);
692 return (io->nvmeio.kern_sg_entries);
701 switch (io->io_hdr.io_type) {
703 return (io->scsiio.kern_data_ptr);
706 return (io->nvmeio.kern_data_ptr);
715 switch (io->io_hdr.io_type) {
717 return (io->scsiio.kern_data_len);
720 return (io->nvmeio.kern_data_len);
729 switch (io->io_hdr.io_type) {
731 return (io->scsiio.kern_total_len);
734 return (io->nvmeio.kern_total_len);
743 switch (io->io_hdr.io_type) {
745 return (io->scsiio.kern_data_resid);
748 return (io->nvmeio.kern_data_resid);
757 switch (io->io_hdr.io_type) {
759 return (io->scsiio.kern_rel_offset);
762 return (io->nvmeio.kern_rel_offset);
771 switch (io->io_hdr.io_type) {
773 io->scsiio.kern_rel_offset += offset;
777 io->nvmeio.kern_rel_offset += offset;
787 switch (io->io_hdr.io_type) {
789 io->scsiio.kern_sg_entries = kern_sg_entries;
793 io->nvmeio.kern_sg_entries = kern_sg_entries;
803 switch (io->io_hdr.io_type) {
805 io->scsiio.kern_data_ptr = kern_data_ptr;
809 io->nvmeio.kern_data_ptr = kern_data_ptr;
819 switch (io->io_hdr.io_type) {
821 io->scsiio.kern_data_len = kern_data_len;
825 io->nvmeio.kern_data_len = kern_data_len;
835 switch (io->io_hdr.io_type) {
837 io->scsiio.kern_total_len = kern_total_len;
841 io->nvmeio.kern_total_len = kern_total_len;
851 switch (io->io_hdr.io_type) {
853 io->scsiio.kern_data_resid = kern_data_resid;
857 io->nvmeio.kern_data_resid = kern_data_resid;
867 switch (io->io_hdr.io_type) {
869 io->scsiio.kern_rel_offset = kern_rel_offset;
873 io->nvmeio.kern_rel_offset = kern_rel_offset;
883 switch (io->io_hdr.io_type) {
885 io->scsiio.be_move_done = be_move_done;
889 io->nvmeio.be_move_done = be_move_done;
899 switch (io->io_hdr.io_type) {
901 io->scsiio.io_cont = io_cont;
905 io->nvmeio.io_cont = io_cont;
915 switch (io->io_hdr.io_type) {
917 io->scsiio.kern_data_ref = kern_data_ref;
921 io->nvmeio.kern_data_ref = kern_data_ref;
931 switch (io->io_hdr.io_type) {
933 io->scsiio.kern_data_arg = kern_data_arg;
937 io->nvmeio.kern_data_arg = kern_data_arg;