xref: /freebsd/sys/cam/ctl/ctl_io.h (revision 7fdf597e96a02165cfe22ff357b857d5fa15ed8a)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2003 Silicon Graphics International Corp.
5  * Copyright (c) 2014-2015 Alexander Motin <mav@FreeBSD.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification.
14  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
15  *    substantially similar to the "NO WARRANTY" disclaimer below
16  *    ("Disclaimer") and any redistribution must be conditioned upon
17  *    including a substantially similar Disclaimer requirement for further
18  *    binary redistribution.
19  *
20  * NO WARRANTY
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
24  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
30  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGES.
32  *
33  * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_io.h#5 $
34  */
35 /*
36  * CAM Target Layer data movement structures/interface.
37  *
38  * Author: Ken Merry <ken@FreeBSD.org>
39  */
40 
41 #ifndef	_CTL_IO_H_
42 #define	_CTL_IO_H_
43 
44 #ifndef _KERNEL
45 #include <stdbool.h>
46 #endif
47 
48 #include <sys/queue.h>
49 #include <cam/scsi/scsi_all.h>
50 #include <dev/nvme/nvme.h>
51 
52 #define	CTL_MAX_CDBLEN	32
53 /*
54  * Uncomment this next line to enable printing out times for I/Os
55  * that take longer than CTL_TIME_IO_SECS seconds to get to the datamove
56  * and/or done stage.
57  */
58 #define	CTL_TIME_IO
59 #ifdef  CTL_TIME_IO
60 #define	CTL_TIME_IO_DEFAULT_SECS	90
61 #endif
62 
63 /*
64  * Uncomment this next line to enable the CTL I/O delay feature.  You
65  * can delay I/O at two different points -- datamove and done.  This is
66  * useful for diagnosing abort conditions (for hosts that send an abort on a
67  * timeout), and for determining how long a host's timeout is.
68  */
69 //#define	CTL_IO_DELAY
70 
71 typedef enum {
72 	CTL_STATUS_NONE,	/* No status */
73 	CTL_SUCCESS,		/* Transaction completed successfully */
74 	CTL_CMD_TIMEOUT,	/* Command timed out, shouldn't happen here */
75 	CTL_SEL_TIMEOUT,	/* Selection timeout, shouldn't happen here */
76 	CTL_ERROR,		/* General CTL error XXX expand on this? */
77 	CTL_SCSI_ERROR,		/* SCSI error, look at status byte/sense data */
78 	CTL_NVME_ERROR,		/* NVMe error, look at NVMe completion */
79 	CTL_CMD_ABORTED,	/* Command aborted, don't return status */
80 	CTL_STATUS_MASK = 0xfff,/* Mask off any status flags */
81 	CTL_AUTOSENSE = 0x1000	/* Autosense performed */
82 } ctl_io_status;
83 
84 /*
85  * WARNING:  Keep the data in/out/none flags where they are.  They're used
86  * in conjunction with ctl_cmd_flags.  See comment above ctl_cmd_flags
87  * definition in ctl_private.h.
88  */
89 typedef enum {
90 	CTL_FLAG_NONE		= 0x00000000,	/* no flags */
91 	CTL_FLAG_DATA_IN	= 0x00000001,	/* DATA IN */
92 	CTL_FLAG_DATA_OUT	= 0x00000002,	/* DATA OUT */
93 	CTL_FLAG_DATA_NONE	= 0x00000003,	/* no data */
94 	CTL_FLAG_DATA_MASK	= 0x00000003,
95 	CTL_FLAG_USER_TAG	= 0x00000020,	/* userland provides tag */
96 	CTL_FLAG_USER_REQ	= 0x00000040,	/* request came from userland */
97 	CTL_FLAG_ALLOCATED	= 0x00000100,	/* data space allocated */
98 	CTL_FLAG_ABORT_STATUS	= 0x00000400,	/* return TASK ABORTED status */
99 	CTL_FLAG_ABORT		= 0x00000800,	/* this I/O should be aborted */
100 	CTL_FLAG_DMA_INPROG	= 0x00001000,	/* DMA in progress */
101 	CTL_FLAG_DELAY_DONE	= 0x00004000,	/* delay injection done */
102 	CTL_FLAG_INT_COPY	= 0x00008000,	/* internal copy, no done call*/
103 	CTL_FLAG_SENT_2OTHER_SC	= 0x00010000,
104 	CTL_FLAG_FROM_OTHER_SC	= 0x00020000,
105 	CTL_FLAG_IS_WAS_ON_RTR  = 0x00040000,	/* Don't rerun cmd on failover*/
106 	CTL_FLAG_BUS_ADDR	= 0x00080000,	/* ctl_sglist contains BUS
107 						   addresses, not virtual ones*/
108 	CTL_FLAG_IO_CONT	= 0x00100000,	/* Continue I/O instead of
109 						   completing */
110 #if 0
111 	CTL_FLAG_ALREADY_DONE	= 0x00200000,	/* I/O already completed */
112 #endif
113 	CTL_FLAG_NO_DATAMOVE	= 0x00400000,
114 	CTL_FLAG_DMA_QUEUED	= 0x00800000,	/* DMA queued but not started*/
115 	CTL_FLAG_STATUS_QUEUED	= 0x01000000,	/* Status queued but not sent*/
116 
117 	CTL_FLAG_FAILOVER	= 0x04000000,	/* Killed by a failover */
118 	CTL_FLAG_IO_ACTIVE	= 0x08000000,	/* I/O active on this SC */
119 	CTL_FLAG_STATUS_SENT	= 0x10000000,	/* Status sent by datamove */
120 	CTL_FLAG_SERSEQ_DONE	= 0x20000000	/* All storage I/O started */
121 } ctl_io_flags;
122 
123 struct ctl_lba_len {
124 	uint64_t lba;
125 	uint32_t len;
126 };
127 
128 struct ctl_lba_len_flags {
129 	uint64_t lba;
130 	uint32_t len;
131 	uint32_t flags;
132 #define CTL_LLF_FUA	0x04000000
133 #define CTL_LLF_DPO	0x08000000
134 #define CTL_LLF_READ	0x10000000
135 #define CTL_LLF_WRITE	0x20000000
136 #define CTL_LLF_VERIFY	0x40000000
137 #define CTL_LLF_COMPARE	0x80000000
138 };
139 
140 struct ctl_ptr_len_flags {
141 	uint8_t		*ptr;
142 	uint32_t	len;
143 	uint32_t	flags;
144 };
145 
146 union ctl_priv {
147 	uint8_t		bytes[sizeof(uint64_t) * 2];
148 	uint64_t	integer;
149 	uint64_t	integers[2];
150 	void		*ptr;
151 	void		*ptrs[2];
152 };
153 
154 /*
155  * Number of CTL private areas.
156  */
157 #define	CTL_NUM_PRIV	6
158 
159 /*
160  * Which private area are we using for a particular piece of data?
161  */
162 #define	CTL_PRIV_LUN		0	/* CTL LUN pointer goes here */
163 #define	CTL_PRIV_LBA_LEN	1	/* Decoded LBA/len for read/write*/
164 #define	CTL_PRIV_MODEPAGE	1	/* Modepage info for config write */
165 #define	CTL_PRIV_BACKEND	2	/* Reserved for block, RAIDCore */
166 #define	CTL_PRIV_BACKEND_LUN	3	/* Backend LUN pointer */
167 #define	CTL_PRIV_FRONTEND	4	/* Frontend storage */
168 #define	CTL_PRIV_FRONTEND2	5	/* Another frontend storage */
169 
170 #define CTL_LUN(io)	((io)->io_hdr.ctl_private[CTL_PRIV_LUN].ptrs[0])
171 #define CTL_SOFTC(io)	((io)->io_hdr.ctl_private[CTL_PRIV_LUN].ptrs[1])
172 #define CTL_BACKEND_LUN(io)	((io)->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptrs[0])
173 #define CTL_PORT(io)	(((struct ctl_softc *)CTL_SOFTC(io))->	\
174     ctl_ports[(io)->io_hdr.nexus.targ_port])
175 
176 /*
177  * These are used only on Originating SC in XFER mode, where requests don't
178  * ever reach backends, so we can reuse backend's private storage.
179  */
180 #define CTL_RSGL(io)	((io)->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptrs[0])
181 #define CTL_LSGL(io)	((io)->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptrs[1])
182 #define CTL_RSGLT(io)	((struct ctl_sg_entry *)CTL_RSGL(io))
183 #define CTL_LSGLT(io)	((struct ctl_sg_entry *)CTL_LSGL(io))
184 
185 #define CTL_INVALID_PORTNAME 0xFF
186 #define CTL_UNMAPPED_IID     0xFF
187 
188 struct ctl_sg_entry {
189 	void	*addr;
190 	size_t	len;
191 };
192 
193 typedef enum {
194 	CTL_IO_NONE,
195 	CTL_IO_SCSI,
196 	CTL_IO_TASK,
197 	CTL_IO_NVME,
198 	CTL_IO_NVME_ADMIN,
199 } ctl_io_type;
200 
201 struct ctl_nexus {
202 	uint32_t initid;		/* Initiator ID */
203 	uint32_t targ_port;		/* Target port, filled in by PORT */
204 	uint32_t targ_lun;		/* Destination lun */
205 	uint32_t targ_mapped_lun;	/* Destination lun CTL-wide */
206 };
207 
208 typedef enum {
209 	CTL_MSG_SERIALIZE,
210 	CTL_MSG_R2R,
211 	CTL_MSG_FINISH_IO,
212 	CTL_MSG_BAD_JUJU,
213 	CTL_MSG_MANAGE_TASKS,
214 	CTL_MSG_PERS_ACTION,
215 	CTL_MSG_DATAMOVE,
216 	CTL_MSG_DATAMOVE_DONE,
217 	CTL_MSG_UA,			/* Set/clear UA on secondary. */
218 	CTL_MSG_PORT_SYNC,		/* Information about port. */
219 	CTL_MSG_LUN_SYNC,		/* Information about LUN. */
220 	CTL_MSG_IID_SYNC,		/* Information about initiator. */
221 	CTL_MSG_LOGIN,			/* Information about HA peer. */
222 	CTL_MSG_MODE_SYNC,		/* Mode page current content. */
223 	CTL_MSG_FAILOVER		/* Fake, never sent though the wire */
224 } ctl_msg_type;
225 
226 struct ctl_scsiio;
227 
228 struct ctl_io_hdr {
229 	uint32_t	  version;	/* interface version XXX */
230 	ctl_io_type	  io_type;	/* task I/O, SCSI I/O, etc. */
231 	ctl_msg_type	  msg_type;
232 	struct ctl_nexus  nexus;	/* Initiator, port, target, lun */
233 	uint32_t	  iid_indx;	/* the index into the iid mapping */
234 	uint32_t	  flags;	/* transaction flags */
235 	uint32_t	  status;	/* transaction status */
236 	uint32_t	  port_status;	/* trans status, set by PORT, 0 = good*/
237 	uint32_t	  timeout;	/* timeout in ms */
238 	uint32_t	  retries;	/* retry count */
239 #ifdef CTL_IO_DELAY
240 	struct callout	  delay_callout;
241 #endif /* CTL_IO_DELAY */
242 #ifdef CTL_TIME_IO
243 	time_t		  start_time;	/* I/O start time */
244 	struct bintime	  start_bt;	/* Timer start ticks */
245 	struct bintime	  dma_start_bt;	/* DMA start ticks */
246 	struct bintime	  dma_bt;	/* DMA total ticks */
247 #endif /* CTL_TIME_IO */
248 	uint32_t	  num_dmas;	/* Number of DMAs */
249 	union ctl_io	  *remote_io;	/* I/O counterpart on remote HA side */
250 	union ctl_io	  *blocker;	/* I/O blocking this one */
251 	void		  *pool;	/* I/O pool */
252 	union ctl_priv	  ctl_private[CTL_NUM_PRIV];/* CTL private area */
253 	TAILQ_HEAD(, ctl_io_hdr) blocked_queue;	/* I/Os blocked by this one */
254 	STAILQ_ENTRY(ctl_io_hdr) links;	/* linked list pointer */
255 	LIST_ENTRY(ctl_io_hdr) ooa_links;	/* ooa_queue links */
256 	TAILQ_ENTRY(ctl_io_hdr) blocked_links;	/* blocked_queue links */
257 };
258 
259 typedef enum {
260 	CTL_TAG_UNTAGGED,
261 	CTL_TAG_SIMPLE,
262 	CTL_TAG_ORDERED,
263 	CTL_TAG_HEAD_OF_QUEUE,
264 	CTL_TAG_ACA
265 } ctl_tag_type;
266 
267 union ctl_io;
268 
269 typedef void (*ctl_ref)(void *arg, int diff);
270 typedef int (*ctl_be_move_done_t)(union ctl_io *io, bool samethr);
271 typedef int (*ctl_io_cont)(union ctl_io *io);
272 
273 /*
274  * SCSI passthrough I/O structure for the CAM Target Layer.  Note
275  * that some of these fields are here for completeness, but they aren't
276  * used in the CTL implementation.  e.g., timeout and retries won't be
277  * used.
278  *
279  * Note:  Make sure the io_hdr is *always* the first element in this
280  * structure.
281  */
282 struct ctl_scsiio {
283 	struct ctl_io_hdr io_hdr;	/* common to all I/O types */
284 
285 	/*
286 	 * The ext_* fields are generally intended for frontend use; CTL itself
287 	 * doesn't modify or use them.
288 	 */
289 	uint32_t   ext_sg_entries;	/* 0 = no S/G list, > 0 = num entries */
290 	uint8_t	   *ext_data_ptr;	/* data buffer or S/G list */
291 	uint32_t   ext_data_len;	/* Data transfer length */
292 	uint32_t   ext_data_filled;	/* Amount of data filled so far */
293 
294 	/*
295 	 * The number of scatter/gather entries in the list pointed to
296 	 * by kern_data_ptr.  0 means there is no list, just a data pointer.
297 	 */
298 	uint32_t   kern_sg_entries;
299 
300 	uint32_t   rem_sg_entries;	/* Unused. */
301 
302 	/*
303 	 * The data pointer or a pointer to the scatter/gather list.
304 	 */
305 	uint8_t    *kern_data_ptr;
306 
307 	/*
308 	 * Length of the data buffer or scatter/gather list.  It's also
309 	 * the length of this particular piece of the data transfer,
310 	 * ie. number of bytes expected to be transferred by the current
311 	 * invocation of frontend's datamove() callback.  It's always
312 	 * less than or equal to kern_total_len.
313 	 */
314 	uint32_t   kern_data_len;
315 
316 	/*
317 	 * Total length of data to be transferred during this particular
318 	 * SCSI command, as decoded from SCSI CDB.
319 	 */
320 	uint32_t   kern_total_len;
321 
322 	/*
323 	 * Amount of data left after the current data transfer.
324 	 */
325 	uint32_t   kern_data_resid;
326 
327 	/*
328 	 * Byte offset of this transfer, equal to the amount of data
329 	 * already transferred for this SCSI command during previous
330 	 * datamove() invocations.
331 	 */
332 	uint32_t   kern_rel_offset;
333 
334 	struct     scsi_sense_data sense_data;	/* sense data */
335 	uint8_t	   sense_len;		/* Returned sense length */
336 	uint8_t	   scsi_status;		/* SCSI status byte */
337 	uint8_t	   seridx;		/* Serialization index. */
338 	uint8_t	   priority;		/* Command priority */
339 	uint64_t   tag_num;		/* tag number */
340 	ctl_tag_type tag_type;		/* simple, ordered, head of queue,etc.*/
341 	uint8_t    cdb_len;		/* CDB length */
342 	uint8_t	   cdb[CTL_MAX_CDBLEN];	/* CDB */
343 	ctl_be_move_done_t be_move_done;	/* called by fe */
344 	ctl_io_cont io_cont;		/* to continue processing */
345 	ctl_ref	    kern_data_ref;	/* Method to reference/release data */
346 	void	   *kern_data_arg;	/* Opaque argument for kern_data_ref() */
347 };
348 
349 typedef enum {
350 	CTL_TASK_ABORT_TASK,
351 	CTL_TASK_ABORT_TASK_SET,
352 	CTL_TASK_CLEAR_ACA,
353 	CTL_TASK_CLEAR_TASK_SET,
354 	CTL_TASK_I_T_NEXUS_RESET,
355 	CTL_TASK_LUN_RESET,
356 	CTL_TASK_TARGET_RESET,
357 	CTL_TASK_BUS_RESET,
358 	CTL_TASK_PORT_LOGIN,
359 	CTL_TASK_PORT_LOGOUT,
360 	CTL_TASK_QUERY_TASK,
361 	CTL_TASK_QUERY_TASK_SET,
362 	CTL_TASK_QUERY_ASYNC_EVENT
363 } ctl_task_type;
364 
365 typedef enum {
366 	CTL_TASK_FUNCTION_COMPLETE,
367 	CTL_TASK_FUNCTION_SUCCEEDED,
368 	CTL_TASK_FUNCTION_REJECTED,
369 	CTL_TASK_LUN_DOES_NOT_EXIST,
370 	CTL_TASK_FUNCTION_NOT_SUPPORTED
371 } ctl_task_status;
372 
373 /*
374  * Task management I/O structure.  Aborts, bus resets, etc., are sent using
375  * this structure.
376  *
377  * Note:  Make sure the io_hdr is *always* the first element in this
378  * structure.
379  */
380 struct ctl_taskio {
381 	struct ctl_io_hdr	io_hdr;      /* common to all I/O types */
382 	ctl_task_type		task_action; /* Target Reset, Abort, etc.  */
383 	uint64_t		tag_num;     /* tag number */
384 	ctl_tag_type		tag_type;    /* simple, ordered, etc. */
385 	uint8_t			task_status; /* Complete, Succeeded, etc. */
386 	uint8_t			task_resp[3];/* Response information */
387 };
388 
389 /*
390  * NVME passthrough I/O structure for the CAM Target Layer.  Note that
391  * this structure is used for both I/O and admin commands.
392  *
393  * Note:  Make sure the io_hdr is *always* the first element in this
394  * structure.
395  */
396 struct ctl_nvmeio {
397 	struct ctl_io_hdr io_hdr;	/* common to all I/O types */
398 
399 	/*
400 	 * The ext_* fields are generally intended for frontend use; CTL itself
401 	 * doesn't modify or use them.
402 	 */
403 	uint32_t   ext_sg_entries;	/* 0 = no S/G list, > 0 = num entries */
404 	uint8_t	   *ext_data_ptr;	/* data buffer or S/G list */
405 	uint32_t   ext_data_len;	/* Data transfer length */
406 	uint32_t   ext_data_filled;	/* Amount of data filled so far */
407 
408 	/*
409 	 * The number of scatter/gather entries in the list pointed to
410 	 * by kern_data_ptr.  0 means there is no list, just a data pointer.
411 	 */
412 	uint32_t   kern_sg_entries;
413 
414 	/*
415 	 * The data pointer or a pointer to the scatter/gather list.
416 	 */
417 	uint8_t    *kern_data_ptr;
418 
419 	/*
420 	 * Length of the data buffer or scatter/gather list.  It's also
421 	 * the length of this particular piece of the data transfer,
422 	 * ie. number of bytes expected to be transferred by the current
423 	 * invocation of frontend's datamove() callback.  It's always
424 	 * less than or equal to kern_total_len.
425 	 */
426 	uint32_t   kern_data_len;
427 
428 	/*
429 	 * Total length of data to be transferred during this particular
430 	 * NVMe command, as decoded from the NVMe SQE.
431 	 */
432 	uint32_t   kern_total_len;
433 
434 	/*
435 	 * Amount of data left after the current data transfer.
436 	 */
437 	uint32_t   kern_data_resid;
438 
439 	/*
440 	 * Byte offset of this transfer, equal to the amount of data
441 	 * already transferred for this NVMe command during previous
442 	 * datamove() invocations.
443 	 */
444 	uint32_t   kern_rel_offset;
445 
446 	struct nvme_command cmd;	/* SQE */
447 	struct nvme_completion cpl;	/* CQE */
448 	bool       success_sent;	/* datamove already sent CQE */
449 	ctl_be_move_done_t be_move_done;	/* called by fe */
450 	ctl_io_cont io_cont;		/* to continue processing */
451 	ctl_ref	    kern_data_ref;	/* Method to reference/release data */
452 	void	   *kern_data_arg;	/* Opaque argument for kern_data_ref() */
453 };
454 
455 /*
456  * HA link messages.
457  */
458 #define	CTL_HA_VERSION		4
459 
460 /*
461  * Used for CTL_MSG_LOGIN.
462  */
463 struct ctl_ha_msg_login {
464 	ctl_msg_type		msg_type;
465 	int			version;
466 	int			ha_mode;
467 	int			ha_id;
468 	int			max_luns;
469 	int			max_ports;
470 	int			max_init_per_port;
471 };
472 
473 typedef enum {
474 	CTL_PR_REG_KEY,
475 	CTL_PR_UNREG_KEY,
476 	CTL_PR_PREEMPT,
477 	CTL_PR_CLEAR,
478 	CTL_PR_RESERVE,
479 	CTL_PR_RELEASE
480 } ctl_pr_action;
481 
482 /*
483  * The PR info is specifically for sending Persistent Reserve actions
484  * to the other SC which it must also act on.
485  *
486  * Note:  Make sure the io_hdr is *always* the first element in this
487  * structure.
488  */
489 struct ctl_pr_info {
490 	ctl_pr_action		action;
491 	uint8_t			sa_res_key[8];
492 	uint8_t			res_type;
493 	uint32_t		residx;
494 };
495 
496 struct ctl_ha_msg_hdr {
497 	ctl_msg_type		msg_type;
498 	uint32_t		status;	     /* transaction status */
499 	union ctl_io		*original_sc;
500 	union ctl_io		*serializing_sc;
501 	struct ctl_nexus	nexus;	     /* Initiator, port, target, lun */
502 };
503 
504 #define	CTL_HA_MAX_SG_ENTRIES	16
505 #define	CTL_HA_DATAMOVE_SEGMENT	131072
506 
507 /*
508  * Used for CTL_MSG_PERS_ACTION.
509  */
510 struct ctl_ha_msg_pr {
511 	struct ctl_ha_msg_hdr	hdr;
512 	struct ctl_pr_info	pr_info;
513 };
514 
515 /*
516  * Used for CTL_MSG_UA.
517  */
518 struct ctl_ha_msg_ua {
519 	struct ctl_ha_msg_hdr	hdr;
520 	int			ua_all;
521 	int			ua_set;
522 	int			ua_type;
523 	uint8_t			ua_info[8];
524 };
525 
526 /*
527  * The S/G handling here is a little different than the standard ctl_scsiio
528  * structure, because we can't pass data by reference in between controllers.
529  * The S/G list in the ctl_scsiio struct is normally passed in the
530  * kern_data_ptr field.  So kern_sg_entries here will always be non-zero,
531  * even if there is only one entry.
532  *
533  * Used for CTL_MSG_DATAMOVE.
534  */
535 struct ctl_ha_msg_dt {
536 	struct ctl_ha_msg_hdr	hdr;
537 	ctl_io_flags		flags;  /* Only I/O flags are used here */
538 	uint32_t		sg_sequence;     /* S/G portion number  */
539 	uint8_t			sg_last;         /* last S/G batch = 1 */
540 	uint32_t		sent_sg_entries; /* previous S/G count */
541 	uint32_t		cur_sg_entries;  /* current S/G entries */
542 	uint32_t		kern_sg_entries; /* total S/G entries */
543 	uint32_t		kern_data_len;   /* Length of this S/G list */
544 	uint32_t		kern_total_len;  /* Total length of this
545 						    transaction */
546 	uint32_t		kern_data_resid; /* Length left to transfer
547 						    after this*/
548 	uint32_t		kern_rel_offset; /* Byte Offset of this
549 						    transfer */
550 	struct ctl_sg_entry	sg_list[CTL_HA_MAX_SG_ENTRIES];
551 };
552 
553 /*
554  * Used for CTL_MSG_SERIALIZE, CTL_MSG_FINISH_IO, CTL_MSG_BAD_JUJU,
555  * and CTL_MSG_DATAMOVE_DONE.
556  */
557 struct ctl_ha_msg_scsi {
558 	struct ctl_ha_msg_hdr	hdr;
559 	uint64_t		tag_num;     /* tag number */
560 	ctl_tag_type		tag_type;    /* simple, ordered, etc. */
561 	uint8_t			cdb[CTL_MAX_CDBLEN];	/* CDB */
562 	uint8_t			cdb_len;	/* CDB length */
563 	uint8_t			scsi_status; /* SCSI status byte */
564 	uint8_t			sense_len;   /* Returned sense length */
565 	uint8_t			priority;    /* Command priority */
566 	uint32_t		port_status; /* trans status, set by FETD,
567 						0 = good*/
568 	uint32_t		kern_data_resid; /* for DATAMOVE_DONE */
569 	struct scsi_sense_data	sense_data;  /* sense data */
570 };
571 
572 /*
573  * Used for CTL_MSG_MANAGE_TASKS.
574  */
575 struct ctl_ha_msg_task {
576 	struct ctl_ha_msg_hdr	hdr;
577 	ctl_task_type		task_action; /* Target Reset, Abort, etc.  */
578 	uint64_t		tag_num;     /* tag number */
579 	ctl_tag_type		tag_type;    /* simple, ordered, etc. */
580 };
581 
582 /*
583  * Used for CTL_MSG_PORT_SYNC.
584  */
585 struct ctl_ha_msg_port {
586 	struct ctl_ha_msg_hdr	hdr;
587 	int			port_type;
588 	int			physical_port;
589 	int			virtual_port;
590 	int			status;
591 	int			name_len;
592 	int			lun_map_len;
593 	int			port_devid_len;
594 	int			target_devid_len;
595 	int			init_devid_len;
596 	uint8_t			data[];
597 };
598 
599 /*
600  * Used for CTL_MSG_LUN_SYNC.
601  */
602 struct ctl_ha_msg_lun {
603 	struct ctl_ha_msg_hdr	hdr;
604 	int			flags;
605 	unsigned int		pr_generation;
606 	uint32_t		pr_res_idx;
607 	uint8_t			pr_res_type;
608 	int			lun_devid_len;
609 	int			pr_key_count;
610 	uint8_t			data[];
611 };
612 
613 struct ctl_ha_msg_lun_pr_key {
614 	uint32_t		pr_iid;
615 	uint64_t		pr_key;
616 };
617 
618 /*
619  * Used for CTL_MSG_IID_SYNC.
620  */
621 struct ctl_ha_msg_iid {
622 	struct ctl_ha_msg_hdr	hdr;
623 	int			in_use;
624 	int			name_len;
625 	uint64_t		wwpn;
626 	uint8_t			data[];
627 };
628 
629 /*
630  * Used for CTL_MSG_MODE_SYNC.
631  */
632 struct ctl_ha_msg_mode {
633 	struct ctl_ha_msg_hdr	hdr;
634 	uint8_t			page_code;
635 	uint8_t			subpage;
636 	uint16_t		page_len;
637 	uint8_t			data[];
638 };
639 
640 union ctl_ha_msg {
641 	struct ctl_ha_msg_hdr	hdr;
642 	struct ctl_ha_msg_task	task;
643 	struct ctl_ha_msg_scsi	scsi;
644 	struct ctl_ha_msg_dt	dt;
645 	struct ctl_ha_msg_pr	pr;
646 	struct ctl_ha_msg_ua	ua;
647 	struct ctl_ha_msg_port	port;
648 	struct ctl_ha_msg_lun	lun;
649 	struct ctl_ha_msg_iid	iid;
650 	struct ctl_ha_msg_login	login;
651 	struct ctl_ha_msg_mode	mode;
652 };
653 
654 struct ctl_prio {
655 	struct ctl_io_hdr	io_hdr;
656 	struct ctl_ha_msg_pr	pr_msg;
657 };
658 
659 union ctl_io {
660 	struct ctl_io_hdr	io_hdr;	/* common to all I/O types */
661 	struct ctl_scsiio	scsiio;	/* Normal SCSI commands */
662 	struct ctl_taskio	taskio;	/* SCSI task management/reset */
663 	struct ctl_nvmeio	nvmeio;	/* Normal and admin NVMe commands */
664 	struct ctl_prio		presio;	/* update per. res info on other SC */
665 };
666 
667 #ifdef _KERNEL
668 #define	_CTL_IO_ASSERT_1(io, _1)					\
669 	KASSERT((io)->io_hdr.io_type == CTL_IO_##_1,			\
670 	    ("%s: unexpected I/O type %x", __func__, (io)->io_hdr.io_type))
671 
672 #define	_CTL_IO_ASSERT_2(io, _1, _2)					\
673 	KASSERT((io)->io_hdr.io_type == CTL_IO_##_1 ||			\
674 	    (io)->io_hdr.io_type == CTL_IO_##_2,			\
675 	    ("%s: unexpected I/O type %x", __func__, (io)->io_hdr.io_type))
676 
677 #define	_CTL_IO_ASSERT_MACRO(io, _1, _2, NAME, ...)			\
678 	NAME
679 
680 #define	CTL_IO_ASSERT(...)						\
681 	_CTL_IO_ASSERT_MACRO(__VA_ARGS__, _CTL_IO_ASSERT_2,		\
682 	    _CTL_IO_ASSERT_1)(__VA_ARGS__)
683 
684 static __inline uint32_t
685 ctl_kern_sg_entries(union ctl_io *io)
686 {
687 	switch (io->io_hdr.io_type) {
688 	case CTL_IO_SCSI:
689 		return (io->scsiio.kern_sg_entries);
690 	case CTL_IO_NVME:
691 	case CTL_IO_NVME_ADMIN:
692 		return (io->nvmeio.kern_sg_entries);
693 	default:
694 		__assert_unreachable();
695 	}
696 }
697 
698 static __inline uint8_t *
699 ctl_kern_data_ptr(union ctl_io *io)
700 {
701 	switch (io->io_hdr.io_type) {
702 	case CTL_IO_SCSI:
703 		return (io->scsiio.kern_data_ptr);
704 	case CTL_IO_NVME:
705 	case CTL_IO_NVME_ADMIN:
706 		return (io->nvmeio.kern_data_ptr);
707 	default:
708 		__assert_unreachable();
709 	}
710 }
711 
712 static __inline uint32_t
713 ctl_kern_data_len(union ctl_io *io)
714 {
715 	switch (io->io_hdr.io_type) {
716 	case CTL_IO_SCSI:
717 		return (io->scsiio.kern_data_len);
718 	case CTL_IO_NVME:
719 	case CTL_IO_NVME_ADMIN:
720 		return (io->nvmeio.kern_data_len);
721 	default:
722 		__assert_unreachable();
723 	}
724 }
725 
726 static __inline uint32_t
727 ctl_kern_total_len(union ctl_io *io)
728 {
729 	switch (io->io_hdr.io_type) {
730 	case CTL_IO_SCSI:
731 		return (io->scsiio.kern_total_len);
732 	case CTL_IO_NVME:
733 	case CTL_IO_NVME_ADMIN:
734 		return (io->nvmeio.kern_total_len);
735 	default:
736 		__assert_unreachable();
737 	}
738 }
739 
740 static __inline uint32_t
741 ctl_kern_data_resid(union ctl_io *io)
742 {
743 	switch (io->io_hdr.io_type) {
744 	case CTL_IO_SCSI:
745 		return (io->scsiio.kern_data_resid);
746 	case CTL_IO_NVME:
747 	case CTL_IO_NVME_ADMIN:
748 		return (io->nvmeio.kern_data_resid);
749 	default:
750 		__assert_unreachable();
751 	}
752 }
753 
754 static __inline uint32_t
755 ctl_kern_rel_offset(union ctl_io *io)
756 {
757 	switch (io->io_hdr.io_type) {
758 	case CTL_IO_SCSI:
759 		return (io->scsiio.kern_rel_offset);
760 	case CTL_IO_NVME:
761 	case CTL_IO_NVME_ADMIN:
762 		return (io->nvmeio.kern_rel_offset);
763 	default:
764 		__assert_unreachable();
765 	}
766 }
767 
768 static __inline void
769 ctl_add_kern_rel_offset(union ctl_io *io, uint32_t offset)
770 {
771 	switch (io->io_hdr.io_type) {
772 	case CTL_IO_SCSI:
773 		io->scsiio.kern_rel_offset += offset;
774 		break;
775 	case CTL_IO_NVME:
776 	case CTL_IO_NVME_ADMIN:
777 		io->nvmeio.kern_rel_offset += offset;
778 		break;
779 	default:
780 		__assert_unreachable();
781 	}
782 }
783 
784 static __inline void
785 ctl_set_kern_sg_entries(union ctl_io *io, uint32_t kern_sg_entries)
786 {
787 	switch (io->io_hdr.io_type) {
788 	case CTL_IO_SCSI:
789 		io->scsiio.kern_sg_entries = kern_sg_entries;
790 		break;
791 	case CTL_IO_NVME:
792 	case CTL_IO_NVME_ADMIN:
793 		io->nvmeio.kern_sg_entries = kern_sg_entries;
794 		break;
795 	default:
796 		__assert_unreachable();
797 	}
798 }
799 
800 static __inline void
801 ctl_set_kern_data_ptr(union ctl_io *io, void *kern_data_ptr)
802 {
803 	switch (io->io_hdr.io_type) {
804 	case CTL_IO_SCSI:
805 		io->scsiio.kern_data_ptr = kern_data_ptr;
806 		break;
807 	case CTL_IO_NVME:
808 	case CTL_IO_NVME_ADMIN:
809 		io->nvmeio.kern_data_ptr = kern_data_ptr;
810 		break;
811 	default:
812 		__assert_unreachable();
813 	}
814 }
815 
816 static __inline void
817 ctl_set_kern_data_len(union ctl_io *io, uint32_t kern_data_len)
818 {
819 	switch (io->io_hdr.io_type) {
820 	case CTL_IO_SCSI:
821 		io->scsiio.kern_data_len = kern_data_len;
822 		break;
823 	case CTL_IO_NVME:
824 	case CTL_IO_NVME_ADMIN:
825 		io->nvmeio.kern_data_len = kern_data_len;
826 		break;
827 	default:
828 		__assert_unreachable();
829 	}
830 }
831 
832 static __inline void
833 ctl_set_kern_total_len(union ctl_io *io, uint32_t kern_total_len)
834 {
835 	switch (io->io_hdr.io_type) {
836 	case CTL_IO_SCSI:
837 		io->scsiio.kern_total_len = kern_total_len;
838 		break;
839 	case CTL_IO_NVME:
840 	case CTL_IO_NVME_ADMIN:
841 		io->nvmeio.kern_total_len = kern_total_len;
842 		break;
843 	default:
844 		__assert_unreachable();
845 	}
846 }
847 
848 static __inline void
849 ctl_set_kern_data_resid(union ctl_io *io, uint32_t kern_data_resid)
850 {
851 	switch (io->io_hdr.io_type) {
852 	case CTL_IO_SCSI:
853 		io->scsiio.kern_data_resid = kern_data_resid;
854 		break;
855 	case CTL_IO_NVME:
856 	case CTL_IO_NVME_ADMIN:
857 		io->nvmeio.kern_data_resid = kern_data_resid;
858 		break;
859 	default:
860 		__assert_unreachable();
861 	}
862 }
863 
864 static __inline void
865 ctl_set_kern_rel_offset(union ctl_io *io, uint32_t kern_rel_offset)
866 {
867 	switch (io->io_hdr.io_type) {
868 	case CTL_IO_SCSI:
869 		io->scsiio.kern_rel_offset = kern_rel_offset;
870 		break;
871 	case CTL_IO_NVME:
872 	case CTL_IO_NVME_ADMIN:
873 		io->nvmeio.kern_rel_offset = kern_rel_offset;
874 		break;
875 	default:
876 		__assert_unreachable();
877 	}
878 }
879 
880 static __inline void
881 ctl_set_be_move_done(union ctl_io *io, ctl_be_move_done_t be_move_done)
882 {
883 	switch (io->io_hdr.io_type) {
884 	case CTL_IO_SCSI:
885 		io->scsiio.be_move_done = be_move_done;
886 		break;
887 	case CTL_IO_NVME:
888 	case CTL_IO_NVME_ADMIN:
889 		io->nvmeio.be_move_done = be_move_done;
890 		break;
891 	default:
892 		__assert_unreachable();
893 	}
894 }
895 
896 static __inline void
897 ctl_set_io_cont(union ctl_io *io, ctl_io_cont io_cont)
898 {
899 	switch (io->io_hdr.io_type) {
900 	case CTL_IO_SCSI:
901 		io->scsiio.io_cont = io_cont;
902 		break;
903 	case CTL_IO_NVME:
904 	case CTL_IO_NVME_ADMIN:
905 		io->nvmeio.io_cont = io_cont;
906 		break;
907 	default:
908 		__assert_unreachable();
909 	}
910 }
911 
912 static __inline void
913 ctl_set_kern_data_ref(union ctl_io *io, ctl_ref kern_data_ref)
914 {
915 	switch (io->io_hdr.io_type) {
916 	case CTL_IO_SCSI:
917 		io->scsiio.kern_data_ref = kern_data_ref;
918 		break;
919 	case CTL_IO_NVME:
920 	case CTL_IO_NVME_ADMIN:
921 		io->nvmeio.kern_data_ref = kern_data_ref;
922 		break;
923 	default:
924 		__assert_unreachable();
925 	}
926 }
927 
928 static __inline void
929 ctl_set_kern_data_arg(union ctl_io *io, void *kern_data_arg)
930 {
931 	switch (io->io_hdr.io_type) {
932 	case CTL_IO_SCSI:
933 		io->scsiio.kern_data_arg = kern_data_arg;
934 		break;
935 	case CTL_IO_NVME:
936 	case CTL_IO_NVME_ADMIN:
937 		io->nvmeio.kern_data_arg = kern_data_arg;
938 		break;
939 	default:
940 		__assert_unreachable();
941 	}
942 }
943 
944 union ctl_io *ctl_alloc_io(void *pool_ref);
945 union ctl_io *ctl_alloc_io_nowait(void *pool_ref);
946 void ctl_free_io(union ctl_io *io);
947 void ctl_zero_io(union ctl_io *io);
948 
949 #endif /* _KERNEL */
950 
951 #endif	/* _CTL_IO_H_ */
952 
953 /*
954  * vim: ts=8
955  */
956