xref: /linux/include/uapi/linux/target_core_user.h (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 #ifndef __TARGET_CORE_USER_H
3 #define __TARGET_CORE_USER_H
4 
5 /* This header will be used by application too */
6 
7 #include <linux/types.h>
8 #include <linux/uio.h>
9 
10 #define TCMU_VERSION "2.0"
11 
12 /**
13  * DOC: Ring Design
14  * Ring Design
15  * -----------
16  *
17  * The mmaped area is divided into three parts:
18  * 1) The mailbox (struct tcmu_mailbox, below);
19  * 2) The command ring;
20  * 3) Everything beyond the command ring (data).
21  *
22  * The mailbox tells userspace the offset of the command ring from the
23  * start of the shared memory region, and how big the command ring is.
24  *
25  * The kernel passes SCSI commands to userspace by putting a struct
26  * tcmu_cmd_entry in the ring, updating mailbox->cmd_head, and poking
27  * userspace via UIO's interrupt mechanism.
28  *
29  * tcmu_cmd_entry contains a header. If the header type is PAD,
30  * userspace should skip hdr->length bytes (mod cmdr_size) to find the
31  * next cmd_entry.
32  *
33  * Otherwise, the entry will contain offsets into the mmaped area that
34  * contain the cdb and data buffers -- the latter accessible via the
35  * iov array. iov addresses are also offsets into the shared area.
36  *
37  * When userspace is completed handling the command, set
38  * entry->rsp.scsi_status, fill in rsp.sense_buffer if appropriate,
39  * and also set mailbox->cmd_tail equal to the old cmd_tail plus
40  * hdr->length, mod cmdr_size. If cmd_tail doesn't equal cmd_head, it
41  * should process the next packet the same way, and so on.
42  */
43 
44 #define TCMU_MAILBOX_VERSION 2
45 #define ALIGN_SIZE 64 /* Should be enough for most CPUs */
46 #define TCMU_MAILBOX_FLAG_CAP_OOOC (1 << 0) /* Out-of-order completions */
47 #define TCMU_MAILBOX_FLAG_CAP_READ_LEN (1 << 1) /* Read data length */
48 #define TCMU_MAILBOX_FLAG_CAP_TMR (1 << 2) /* TMR notifications */
49 #define TCMU_MAILBOX_FLAG_CAP_KEEP_BUF (1<<3) /* Keep buf after cmd completion */
50 
51 struct tcmu_mailbox {
52 	__u16 version;
53 	__u16 flags;
54 	__u32 cmdr_off;
55 	__u32 cmdr_size;
56 
57 	__u32 cmd_head;
58 
59 	/* Updated by user. On its own cacheline */
60 	__u32 cmd_tail __attribute__((__aligned__(ALIGN_SIZE)));
61 
62 } __packed;
63 
64 enum tcmu_opcode {
65 	TCMU_OP_PAD = 0,
66 	TCMU_OP_CMD,
67 	TCMU_OP_TMR,
68 };
69 
70 /*
71  * Only a few opcodes, and length is 8-byte aligned, so use low bits for opcode.
72  */
73 struct tcmu_cmd_entry_hdr {
74 	__u32 len_op;
75 	__u16 cmd_id;
76 	__u8 kflags;
77 #define TCMU_UFLAG_UNKNOWN_OP 0x1
78 #define TCMU_UFLAG_READ_LEN   0x2
79 #define TCMU_UFLAG_KEEP_BUF   0x4
80 	__u8 uflags;
81 
82 } __packed;
83 
84 #define TCMU_OP_MASK 0x7
85 
tcmu_hdr_get_op(__u32 len_op)86 static inline enum tcmu_opcode tcmu_hdr_get_op(__u32 len_op)
87 {
88 	return len_op & TCMU_OP_MASK;
89 }
90 
tcmu_hdr_set_op(__u32 * len_op,enum tcmu_opcode op)91 static inline void tcmu_hdr_set_op(__u32 *len_op, enum tcmu_opcode op)
92 {
93 	*len_op &= ~TCMU_OP_MASK;
94 	*len_op |= (op & TCMU_OP_MASK);
95 }
96 
tcmu_hdr_get_len(__u32 len_op)97 static inline __u32 tcmu_hdr_get_len(__u32 len_op)
98 {
99 	return len_op & ~TCMU_OP_MASK;
100 }
101 
tcmu_hdr_set_len(__u32 * len_op,__u32 len)102 static inline void tcmu_hdr_set_len(__u32 *len_op, __u32 len)
103 {
104 	*len_op &= TCMU_OP_MASK;
105 	*len_op |= len;
106 }
107 
108 /* Currently the same as SCSI_SENSE_BUFFERSIZE */
109 #define TCMU_SENSE_BUFFERSIZE 96
110 
111 struct tcmu_cmd_entry {
112 	struct tcmu_cmd_entry_hdr hdr;
113 
114 	union {
115 		struct {
116 			__u32 iov_cnt;
117 			__u32 iov_bidi_cnt;
118 			__u32 iov_dif_cnt;
119 			__u64 cdb_off;
120 			__u64 __pad1;
121 			__u64 __pad2;
122 			__DECLARE_FLEX_ARRAY(struct iovec, iov);
123 		} req;
124 		struct {
125 			__u8 scsi_status;
126 			__u8 __pad1;
127 			__u16 __pad2;
128 			__u32 read_len;
129 			char sense_buffer[TCMU_SENSE_BUFFERSIZE];
130 		} rsp;
131 	};
132 
133 } __packed;
134 
135 struct tcmu_tmr_entry {
136 	struct tcmu_cmd_entry_hdr hdr;
137 
138 #define TCMU_TMR_UNKNOWN		0
139 #define TCMU_TMR_ABORT_TASK		1
140 #define TCMU_TMR_ABORT_TASK_SET		2
141 #define TCMU_TMR_CLEAR_ACA		3
142 #define TCMU_TMR_CLEAR_TASK_SET		4
143 #define TCMU_TMR_LUN_RESET		5
144 #define TCMU_TMR_TARGET_WARM_RESET	6
145 #define TCMU_TMR_TARGET_COLD_RESET	7
146 /* Pseudo reset due to received PR OUT */
147 #define TCMU_TMR_LUN_RESET_PRO		128
148 	__u8 tmr_type;
149 
150 	__u8 __pad1;
151 	__u16 __pad2;
152 	__u32 cmd_cnt;
153 	__u64 __pad3;
154 	__u64 __pad4;
155 	__u16 cmd_ids[];
156 } __packed;
157 
158 #define TCMU_OP_ALIGN_SIZE sizeof(__u64)
159 
160 enum tcmu_genl_cmd {
161 	TCMU_CMD_UNSPEC,
162 	TCMU_CMD_ADDED_DEVICE,
163 	TCMU_CMD_REMOVED_DEVICE,
164 	TCMU_CMD_RECONFIG_DEVICE,
165 	TCMU_CMD_ADDED_DEVICE_DONE,
166 	TCMU_CMD_REMOVED_DEVICE_DONE,
167 	TCMU_CMD_RECONFIG_DEVICE_DONE,
168 	TCMU_CMD_SET_FEATURES,
169 	__TCMU_CMD_MAX,
170 };
171 #define TCMU_CMD_MAX (__TCMU_CMD_MAX - 1)
172 
173 enum tcmu_genl_attr {
174 	TCMU_ATTR_UNSPEC,
175 	TCMU_ATTR_DEVICE,
176 	TCMU_ATTR_MINOR,
177 	TCMU_ATTR_PAD,
178 	TCMU_ATTR_DEV_CFG,
179 	TCMU_ATTR_DEV_SIZE,
180 	TCMU_ATTR_WRITECACHE,
181 	TCMU_ATTR_CMD_STATUS,
182 	TCMU_ATTR_DEVICE_ID,
183 	TCMU_ATTR_SUPP_KERN_CMD_REPLY,
184 	__TCMU_ATTR_MAX,
185 };
186 #define TCMU_ATTR_MAX (__TCMU_ATTR_MAX - 1)
187 
188 #endif
189