xref: /linux/drivers/scsi/scsi_debug.c (revision 32a92f8c89326985e05dce8b22d3f0aa07a3e1bd)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2021 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 #include <linux/debugfs.h>
45 #include <linux/async.h>
46 #include <linux/cleanup.h>
47 
48 #include <net/checksum.h>
49 
50 #include <linux/unaligned.h>
51 
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_device.h>
55 #include <scsi/scsi_host.h>
56 #include <scsi/scsicam.h>
57 #include <scsi/scsi_eh.h>
58 #include <scsi/scsi_tcq.h>
59 #include <scsi/scsi_dbg.h>
60 
61 #include "sd.h"
62 #include "scsi_logging.h"
63 
64 /* make sure inq_product_rev string corresponds to this version */
65 #define SDEBUG_VERSION "0191"	/* format to fit INQUIRY revision field */
66 static const char *sdebug_version_date = "20210520";
67 
68 #define MY_NAME "scsi_debug"
69 
70 /* Additional Sense Code (ASC) */
71 #define NO_ADDITIONAL_SENSE 0x0
72 #define OVERLAP_ATOMIC_COMMAND_ASC 0x0
73 #define OVERLAP_ATOMIC_COMMAND_ASCQ 0x23
74 #define FILEMARK_DETECTED_ASCQ 0x1
75 #define EOP_EOM_DETECTED_ASCQ 0x2
76 #define BEGINNING_OF_P_M_DETECTED_ASCQ 0x4
77 #define EOD_DETECTED_ASCQ 0x5
78 #define LOGICAL_UNIT_NOT_READY 0x4
79 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
80 #define UNRECOVERED_READ_ERR 0x11
81 #define PARAMETER_LIST_LENGTH_ERR 0x1a
82 #define INVALID_OPCODE 0x20
83 #define LBA_OUT_OF_RANGE 0x21
84 #define INVALID_FIELD_IN_CDB 0x24
85 #define INVALID_FIELD_IN_PARAM_LIST 0x26
86 #define WRITE_PROTECTED 0x27
87 #define UA_READY_ASC 0x28
88 #define UA_RESET_ASC 0x29
89 #define UA_CHANGED_ASC 0x2a
90 #define TOO_MANY_IN_PARTITION_ASC 0x3b
91 #define TARGET_CHANGED_ASC 0x3f
92 #define LUNS_CHANGED_ASCQ 0x0e
93 #define INSUFF_RES_ASC 0x55
94 #define INSUFF_RES_ASCQ 0x3
95 #define POWER_ON_RESET_ASCQ 0x0
96 #define POWER_ON_OCCURRED_ASCQ 0x1
97 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
98 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
99 #define CAPACITY_CHANGED_ASCQ 0x9
100 #define SAVING_PARAMS_UNSUP 0x39
101 #define TRANSPORT_PROBLEM 0x4b
102 #define THRESHOLD_EXCEEDED 0x5d
103 #define LOW_POWER_COND_ON 0x5e
104 #define MISCOMPARE_VERIFY_ASC 0x1d
105 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
106 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
107 #define WRITE_ERROR_ASC 0xc
108 #define UNALIGNED_WRITE_ASCQ 0x4
109 #define WRITE_BOUNDARY_ASCQ 0x5
110 #define READ_INVDATA_ASCQ 0x6
111 #define READ_BOUNDARY_ASCQ 0x7
112 #define ATTEMPT_ACCESS_GAP 0x9
113 #define INSUFF_ZONE_ASCQ 0xe
114 /* see drivers/scsi/sense_codes.h */
115 
116 /* Additional Sense Code Qualifier (ASCQ) */
117 #define ACK_NAK_TO 0x3
118 
119 /* Default values for driver parameters */
120 #define DEF_NUM_HOST   1
121 #define DEF_NUM_TGTS   1
122 #define DEF_MAX_LUNS   1
123 /* With these defaults, this driver will make 1 host with 1 target
124  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
125  */
126 #define DEF_ATO 1
127 #define DEF_CDB_LEN 10
128 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
129 #define DEF_DEV_SIZE_PRE_INIT   0
130 #define DEF_DEV_SIZE_MB   8
131 #define DEF_ZBC_DEV_SIZE_MB   128
132 #define DEF_DIF 0
133 #define DEF_DIX 0
134 #define DEF_PER_HOST_STORE false
135 #define DEF_D_SENSE   0
136 #define DEF_EVERY_NTH   0
137 #define DEF_FAKE_RW	0
138 #define DEF_GUARD 0
139 #define DEF_HOST_LOCK 0
140 #define DEF_LBPU 0
141 #define DEF_LBPWS 0
142 #define DEF_LBPWS10 0
143 #define DEF_LBPRZ 1
144 #define DEF_LOWEST_ALIGNED 0
145 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
146 #define DEF_NO_LUN_0   0
147 #define DEF_NUM_PARTS   0
148 #define DEF_OPTS   0
149 #define DEF_OPT_BLKS 1024
150 #define DEF_PHYSBLK_EXP 0
151 #define DEF_OPT_XFERLEN_EXP 0
152 #define DEF_PTYPE   TYPE_DISK
153 #define DEF_RANDOM false
154 #define DEF_REMOVABLE false
155 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
156 #define DEF_SECTOR_SIZE 512
157 #define DEF_UNMAP_ALIGNMENT 0
158 #define DEF_UNMAP_GRANULARITY 1
159 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
160 #define DEF_UNMAP_MAX_DESC 256
161 #define DEF_VIRTUAL_GB   0
162 #define DEF_VPD_USE_HOSTNO 1
163 #define DEF_WRITESAME_LENGTH 0xFFFF
164 #define DEF_ATOMIC_WR 0
165 #define DEF_ATOMIC_WR_MAX_LENGTH 128
166 #define DEF_ATOMIC_WR_ALIGN 2
167 #define DEF_ATOMIC_WR_GRAN 2
168 #define DEF_ATOMIC_WR_MAX_LENGTH_BNDRY (DEF_ATOMIC_WR_MAX_LENGTH)
169 #define DEF_ATOMIC_WR_MAX_BNDRY 128
170 #define DEF_STRICT 0
171 #define DEF_STATISTICS false
172 #define DEF_SUBMIT_QUEUES 1
173 #define DEF_TUR_MS_TO_READY 0
174 #define DEF_UUID_CTL 0
175 #define JDELAY_OVERRIDDEN -9999
176 
177 /* Default parameters for ZBC drives */
178 #define DEF_ZBC_ZONE_SIZE_MB	128
179 #define DEF_ZBC_MAX_OPEN_ZONES	8
180 #define DEF_ZBC_NR_CONV_ZONES	1
181 
182 /* Default parameters for tape drives */
183 #define TAPE_DEF_DENSITY  0x0
184 #define TAPE_BAD_DENSITY  0x65
185 #define TAPE_DEF_BLKSIZE  0
186 #define TAPE_MIN_BLKSIZE  512
187 #define TAPE_MAX_BLKSIZE  1048576
188 #define TAPE_EW 20
189 #define TAPE_MAX_PARTITIONS 2
190 #define TAPE_UNITS 10000
191 #define TAPE_PARTITION_1_UNITS 1000
192 
193 /* The tape block data definitions */
194 #define TAPE_BLOCK_FM_FLAG   ((u32)0x1 << 30)
195 #define TAPE_BLOCK_EOD_FLAG  ((u32)0x2 << 30)
196 #define TAPE_BLOCK_MARK_MASK ((u32)0x3 << 30)
197 #define TAPE_BLOCK_SIZE_MASK (~TAPE_BLOCK_MARK_MASK)
198 #define TAPE_BLOCK_MARK(a) (a & TAPE_BLOCK_MARK_MASK)
199 #define TAPE_BLOCK_SIZE(a) (a & TAPE_BLOCK_SIZE_MASK)
200 #define IS_TAPE_BLOCK_FM(a)   ((a & TAPE_BLOCK_FM_FLAG) != 0)
201 #define IS_TAPE_BLOCK_EOD(a)  ((a & TAPE_BLOCK_EOD_FLAG) != 0)
202 
203 struct tape_block {
204 	u32 fl_size;
205 	unsigned char data[4];
206 };
207 
208 /* Flags for sense data */
209 #define SENSE_FLAG_FILEMARK  0x80
210 #define SENSE_FLAG_EOM 0x40
211 #define SENSE_FLAG_ILI 0x20
212 
213 #define SDEBUG_LUN_0_VAL 0
214 
215 /* bit mask values for sdebug_opts */
216 #define SDEBUG_OPT_NOISE		1
217 #define SDEBUG_OPT_MEDIUM_ERR		2
218 #define SDEBUG_OPT_TIMEOUT		4
219 #define SDEBUG_OPT_RECOVERED_ERR	8
220 #define SDEBUG_OPT_TRANSPORT_ERR	16
221 #define SDEBUG_OPT_DIF_ERR		32
222 #define SDEBUG_OPT_DIX_ERR		64
223 #define SDEBUG_OPT_MAC_TIMEOUT		128
224 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
225 #define SDEBUG_OPT_Q_NOISE		0x200
226 #define SDEBUG_OPT_ALL_TSF		0x400	/* ignore */
227 #define SDEBUG_OPT_RARE_TSF		0x800
228 #define SDEBUG_OPT_N_WCE		0x1000
229 #define SDEBUG_OPT_RESET_NOISE		0x2000
230 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
231 #define SDEBUG_OPT_HOST_BUSY		0x8000
232 #define SDEBUG_OPT_CMD_ABORT		0x10000
233 #define SDEBUG_OPT_UNALIGNED_WRITE	0x20000
234 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
235 			      SDEBUG_OPT_RESET_NOISE)
236 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
237 				  SDEBUG_OPT_TRANSPORT_ERR | \
238 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
239 				  SDEBUG_OPT_SHORT_TRANSFER | \
240 				  SDEBUG_OPT_HOST_BUSY | \
241 				  SDEBUG_OPT_CMD_ABORT | \
242 				  SDEBUG_OPT_UNALIGNED_WRITE)
243 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
244 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
245 
246 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
247  * priority order. In the subset implemented here lower numbers have higher
248  * priority. The UA numbers should be a sequence starting from 0 with
249  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
250 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
251 #define SDEBUG_UA_POOCCUR 1	/* Power on occurred */
252 #define SDEBUG_UA_BUS_RESET 2
253 #define SDEBUG_UA_MODE_CHANGED 3
254 #define SDEBUG_UA_CAPACITY_CHANGED 4
255 #define SDEBUG_UA_LUNS_CHANGED 5
256 #define SDEBUG_UA_MICROCODE_CHANGED 6	/* simulate firmware change */
257 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
258 #define SDEBUG_UA_NOT_READY_TO_READY 8
259 #define SDEBUG_NUM_UAS 9
260 
261 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
262  * sector on read commands: */
263 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
264 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
265 
266 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
267  * (for response) per submit queue at one time. Can be reduced by max_queue
268  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
269  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
270  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
271  * but cannot exceed SDEBUG_CANQUEUE .
272  */
273 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
274 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
275 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
276 
277 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
278 #define F_D_IN			1	/* Data-in command (e.g. READ) */
279 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
280 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
281 #define F_D_UNKN		8
282 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
283 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
284 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
285 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
286 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
287 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
288 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
289 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
290 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
291 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
292 
293 /* Useful combinations of the above flags */
294 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
295 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
296 #define FF_SA (F_SA_HIGH | F_SA_LOW)
297 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
298 
299 /* Device selection bit mask */
300 #define DS_ALL     0xffffffff
301 #define DS_SBC     (1 << TYPE_DISK)
302 #define DS_SSC     (1 << TYPE_TAPE)
303 #define DS_ZBC     (1 << TYPE_ZBC)
304 
305 #define DS_NO_SSC  (DS_ALL & ~DS_SSC)
306 
307 #define SDEBUG_MAX_PARTS 4
308 
309 #define SDEBUG_MAX_CMD_LEN 32
310 
311 #define SDEB_XA_NOT_IN_USE XA_MARK_1
312 
313 /* Zone types (zbcr05 table 25) */
314 enum sdebug_z_type {
315 	ZBC_ZTYPE_CNV	= 0x1,
316 	ZBC_ZTYPE_SWR	= 0x2,
317 	ZBC_ZTYPE_SWP	= 0x3,
318 	/* ZBC_ZTYPE_SOBR = 0x4, */
319 	ZBC_ZTYPE_GAP	= 0x5,
320 };
321 
322 /* enumeration names taken from table 26, zbcr05 */
323 enum sdebug_z_cond {
324 	ZBC_NOT_WRITE_POINTER	= 0x0,
325 	ZC1_EMPTY		= 0x1,
326 	ZC2_IMPLICIT_OPEN	= 0x2,
327 	ZC3_EXPLICIT_OPEN	= 0x3,
328 	ZC4_CLOSED		= 0x4,
329 	ZC6_READ_ONLY		= 0xd,
330 	ZC5_FULL		= 0xe,
331 	ZC7_OFFLINE		= 0xf,
332 };
333 
334 struct sdeb_zone_state {	/* ZBC: per zone state */
335 	enum sdebug_z_type z_type;
336 	enum sdebug_z_cond z_cond;
337 	bool z_non_seq_resource;
338 	unsigned int z_size;
339 	sector_t z_start;
340 	sector_t z_wp;
341 };
342 
343 enum sdebug_err_type {
344 	ERR_TMOUT_CMD		= 0,	/* make specific scsi command timeout */
345 	ERR_FAIL_QUEUE_CMD	= 1,	/* make specific scsi command's */
346 					/* queuecmd return failed */
347 	ERR_FAIL_CMD		= 2,	/* make specific scsi command's */
348 					/* queuecmd return succeed but */
349 					/* with errors set in scsi_cmnd */
350 	ERR_ABORT_CMD_FAILED	= 3,	/* control return FAILED from */
351 					/* scsi_debug_abort() */
352 	ERR_LUN_RESET_FAILED	= 4,	/* control return FAILED from */
353 					/* scsi_debug_device_reseLUN_RESET_FAILEDt() */
354 };
355 
356 struct sdebug_err_inject {
357 	int type;
358 	struct list_head list;
359 	int cnt;
360 	unsigned char cmd;
361 	struct rcu_head rcu;
362 
363 	union {
364 		/*
365 		 * For ERR_FAIL_QUEUE_CMD
366 		 */
367 		int queuecmd_ret;
368 
369 		/*
370 		 * For ERR_FAIL_CMD
371 		 */
372 		struct {
373 			unsigned char host_byte;
374 			unsigned char driver_byte;
375 			unsigned char status_byte;
376 			unsigned char sense_key;
377 			unsigned char asc;
378 			unsigned char asq;
379 		};
380 	};
381 };
382 
383 struct sdebug_dev_info {
384 	struct list_head dev_list;
385 	unsigned int channel;
386 	unsigned int target;
387 	u64 lun;
388 	uuid_t lu_name;
389 	struct sdebug_host_info *sdbg_host;
390 	unsigned long uas_bm[1];
391 	atomic_t stopped;	/* 1: by SSU, 2: device start */
392 	bool used;
393 
394 	/* For ZBC devices */
395 	bool zoned;
396 	unsigned int zcap;
397 	unsigned int zsize;
398 	unsigned int zsize_shift;
399 	unsigned int nr_zones;
400 	unsigned int nr_conv_zones;
401 	unsigned int nr_seq_zones;
402 	unsigned int nr_imp_open;
403 	unsigned int nr_exp_open;
404 	unsigned int nr_closed;
405 	unsigned int max_open;
406 	ktime_t create_ts;	/* time since bootup that this device was created */
407 	struct sdeb_zone_state *zstate;
408 
409 	/* For tapes */
410 	unsigned int tape_blksize;
411 	unsigned int tape_density;
412 	unsigned char tape_partition;
413 	unsigned char tape_nbr_partitions;
414 	unsigned char tape_pending_nbr_partitions;
415 	unsigned int tape_pending_part_0_size;
416 	unsigned int tape_pending_part_1_size;
417 	unsigned char tape_dce;
418 	unsigned int tape_location[TAPE_MAX_PARTITIONS];
419 	unsigned int tape_eop[TAPE_MAX_PARTITIONS];
420 	struct tape_block *tape_blocks[TAPE_MAX_PARTITIONS];
421 
422 	struct dentry *debugfs_entry;
423 	struct spinlock list_lock;
424 	struct list_head inject_err_list;
425 };
426 
427 struct sdebug_target_info {
428 	bool reset_fail;
429 	struct dentry *debugfs_entry;
430 };
431 
432 struct sdebug_host_info {
433 	struct list_head host_list;
434 	int si_idx;	/* sdeb_store_info (per host) xarray index */
435 	struct Scsi_Host *shost;
436 	struct device dev;
437 	struct list_head dev_info_list;
438 };
439 
440 /* There is an xarray of pointers to this struct's objects, one per host */
441 struct sdeb_store_info {
442 	rwlock_t macc_data_lck;	/* for media data access on this store */
443 	rwlock_t macc_meta_lck;	/* for atomic media meta access on this store */
444 	rwlock_t macc_sector_lck;	/* per-sector media data access on this store */
445 	u8 *storep;		/* user data storage (ram) */
446 	struct t10_pi_tuple *dif_storep; /* protection info */
447 	void *map_storep;	/* provisioning map */
448 };
449 
450 #define dev_to_sdebug_host(d)	\
451 	container_of(d, struct sdebug_host_info, dev)
452 
453 #define shost_to_sdebug_host(shost)	\
454 	dev_to_sdebug_host(shost->dma_dev)
455 
456 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
457 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
458 
459 struct sdebug_defer {
460 	struct hrtimer hrt;
461 	struct execute_work ew;
462 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
463 	int issuing_cpu;
464 	bool aborted;	/* true when blk_abort_request() already called */
465 	enum sdeb_defer_type defer_t;
466 };
467 
468 struct sdebug_scsi_cmd {
469 	spinlock_t   lock;
470 	struct sdebug_defer sd_dp;
471 };
472 
473 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
474 static atomic_t sdebug_completions;  /* count of deferred completions */
475 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
476 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
477 static atomic_t sdeb_inject_pending;
478 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
479 
480 struct opcode_info_t {
481 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
482 				/* for terminating element */
483 	u8 opcode;		/* if num_attached > 0, preferred */
484 	u16 sa;			/* service action */
485 	u32 devsel;		/* device type mask for this definition */
486 	u32 flags;		/* OR-ed set of SDEB_F_* */
487 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
488 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
489 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
490 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
491 };
492 
493 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
494 enum sdeb_opcode_index {
495 	SDEB_I_INVALID_OPCODE =	0,
496 	SDEB_I_INQUIRY = 1,
497 	SDEB_I_REPORT_LUNS = 2,
498 	SDEB_I_REQUEST_SENSE = 3,
499 	SDEB_I_TEST_UNIT_READY = 4,
500 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
501 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
502 	SDEB_I_LOG_SENSE = 7,
503 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
504 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
505 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
506 	SDEB_I_START_STOP = 11,
507 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
508 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
509 	SDEB_I_MAINT_IN = 14,
510 	SDEB_I_MAINT_OUT = 15,
511 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
512 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
513 	SDEB_I_RESERVE = 18,		/* 6, 10 */
514 	SDEB_I_RELEASE = 19,		/* 6, 10 */
515 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
516 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
517 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
518 	SDEB_I_SEND_DIAG = 23,
519 	SDEB_I_UNMAP = 24,
520 	SDEB_I_WRITE_BUFFER = 25,
521 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
522 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
523 	SDEB_I_COMP_WRITE = 28,
524 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
525 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
526 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
527 	SDEB_I_ATOMIC_WRITE_16 = 32,
528 	SDEB_I_READ_BLOCK_LIMITS = 33,
529 	SDEB_I_LOCATE = 34,
530 	SDEB_I_WRITE_FILEMARKS = 35,
531 	SDEB_I_SPACE = 36,
532 	SDEB_I_FORMAT_MEDIUM = 37,
533 	SDEB_I_ERASE = 38,
534 	SDEB_I_LAST_ELEM_P1 = 39,	/* keep this last (previous + 1) */
535 };
536 
537 
538 static const unsigned char opcode_ind_arr[256] = {
539 /* 0x0; 0x0->0x1f: 6 byte cdbs */
540 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
541 	    SDEB_I_FORMAT_MEDIUM, SDEB_I_READ_BLOCK_LIMITS, 0, 0,
542 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
543 	SDEB_I_WRITE_FILEMARKS, SDEB_I_SPACE, SDEB_I_INQUIRY, 0, 0,
544 	    SDEB_I_MODE_SELECT, SDEB_I_RESERVE, SDEB_I_RELEASE,
545 	0, SDEB_I_ERASE, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
546 	    SDEB_I_ALLOW_REMOVAL, 0,
547 /* 0x20; 0x20->0x3f: 10 byte cdbs */
548 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
549 	SDEB_I_READ, 0, SDEB_I_WRITE, SDEB_I_LOCATE, 0, 0, 0, SDEB_I_VERIFY,
550 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
551 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
552 /* 0x40; 0x40->0x5f: 10 byte cdbs */
553 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
554 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
555 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
556 	    SDEB_I_RELEASE,
557 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
558 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
559 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
560 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
561 	0, SDEB_I_VARIABLE_LEN,
562 /* 0x80; 0x80->0x9f: 16 byte cdbs */
563 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
564 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
565 	0, 0, 0, SDEB_I_VERIFY,
566 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
567 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
568 	0, 0, 0, 0,
569 	SDEB_I_ATOMIC_WRITE_16, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
570 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
571 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
572 	     SDEB_I_MAINT_OUT, 0, 0, 0,
573 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
574 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
575 	0, 0, 0, 0, 0, 0, 0, 0,
576 	0, 0, 0, 0, 0, 0, 0, 0,
577 /* 0xc0; 0xc0->0xff: vendor specific */
578 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
579 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
580 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
581 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
582 };
583 
584 /*
585  * The following "response" functions return the SCSI mid-level's 4 byte
586  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
587  * command completion, they can mask their return value with
588  * SDEG_RES_IMMED_MASK .
589  */
590 #define SDEG_RES_IMMED_MASK 0x40000000
591 
592 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
593 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
594 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
595 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
596 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
597 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
598 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
599 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
600 static int resp_read_tape(struct scsi_cmnd *, struct sdebug_dev_info *);
601 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
602 static int resp_write_tape(struct scsi_cmnd *, struct sdebug_dev_info *);
603 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
604 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
605 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
606 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
607 static int resp_get_stream_status(struct scsi_cmnd *scp,
608 				  struct sdebug_dev_info *devip);
609 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
610 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
611 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
612 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
613 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
614 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
615 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
616 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
617 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
618 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
619 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
620 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
621 static int resp_atomic_write(struct scsi_cmnd *, struct sdebug_dev_info *);
622 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
623 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
624 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
625 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
626 static int resp_read_blklimits(struct scsi_cmnd *, struct sdebug_dev_info *);
627 static int resp_locate(struct scsi_cmnd *, struct sdebug_dev_info *);
628 static int resp_write_filemarks(struct scsi_cmnd *, struct sdebug_dev_info *);
629 static int resp_space(struct scsi_cmnd *, struct sdebug_dev_info *);
630 static int resp_read_position(struct scsi_cmnd *, struct sdebug_dev_info *);
631 static int resp_rewind(struct scsi_cmnd *, struct sdebug_dev_info *);
632 static int resp_format_medium(struct scsi_cmnd *, struct sdebug_dev_info *);
633 static int resp_erase(struct scsi_cmnd *, struct sdebug_dev_info *);
634 
635 static int sdebug_do_add_host(bool mk_new_store);
636 static int sdebug_add_host_helper(int per_host_idx);
637 static void sdebug_do_remove_host(bool the_end);
638 static int sdebug_add_store(void);
639 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
640 static void sdebug_erase_all_stores(bool apart_from_first);
641 
642 /*
643  * The following are overflow arrays for cdbs that "hit" the same index in
644  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
645  * should be placed in opcode_info_arr[], the others should be placed here.
646  */
647 static const struct opcode_info_t msense_iarr[] = {
648 	{0, 0x1a, 0, DS_ALL, F_D_IN, NULL, NULL,
649 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
650 };
651 
652 static const struct opcode_info_t mselect_iarr[] = {
653 	{0, 0x15, 0, DS_ALL, F_D_OUT, NULL, NULL,
654 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
655 };
656 
657 static const struct opcode_info_t read_iarr[] = {
658 	{0, 0x28, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
659 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
660 	     0, 0, 0, 0} },
661 	{0, 0x8, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) disk */
662 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
663 	{0, 0x8, 0, DS_SSC, F_D_IN | FF_MEDIA_IO, resp_read_tape, NULL, /* READ(6) tape */
664 	    {6,  0x03, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
665 	{0, 0xa8, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
666 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
667 	     0xc7, 0, 0, 0, 0} },
668 };
669 
670 static const struct opcode_info_t write_iarr[] = {
671 	{0, 0x2a, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
672 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
673 		   0, 0, 0, 0, 0, 0} },
674 	{0, 0xa, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) disk */
675 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
676 		   0, 0, 0} },
677 	{0, 0xa, 0, DS_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_tape, /* WRITE(6) tape */
678 	    NULL, {6,  0x01, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
679 		   0, 0, 0} },
680 	{0, 0xaa, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
681 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
682 		   0xbf, 0xc7, 0, 0, 0, 0} },
683 };
684 
685 static const struct opcode_info_t verify_iarr[] = {
686 	{0, 0x2f, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
687 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
688 		   0, 0, 0, 0, 0, 0} },
689 };
690 
691 static const struct opcode_info_t sa_in_16_iarr[] = {
692 	{0, 0x9e, 0x12, DS_NO_SSC, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
693 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
694 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
695 	{0, 0x9e, 0x16, DS_NO_SSC, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL,
696 	    {16, 0x16, 0, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
697 	     0, 0} },	/* GET STREAM STATUS */
698 };
699 
700 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
701 	{0, 0x7f, 0xb, DS_NO_SSC, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
702 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
703 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
704 	{0, 0x7f, 0x11, DS_NO_SSC, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
705 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
706 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
707 };
708 
709 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
710 	{0, 0xa3, 0xc, DS_ALL, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
711 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
712 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
713 	{0, 0xa3, 0xd, DS_ALL, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
714 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
715 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
716 };
717 
718 static const struct opcode_info_t write_same_iarr[] = {
719 	{0, 0x93, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
720 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
721 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
722 };
723 
724 static const struct opcode_info_t reserve_iarr[] = {
725 	{0, 0x16, 0, DS_ALL, F_D_OUT, NULL, NULL,	/* RESERVE(6) */
726 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
727 };
728 
729 static const struct opcode_info_t release_iarr[] = {
730 	{0, 0x17, 0, DS_ALL, F_D_OUT, NULL, NULL,	/* RELEASE(6) */
731 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
732 };
733 
734 static const struct opcode_info_t sync_cache_iarr[] = {
735 	{0, 0x91, 0, DS_NO_SSC, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
736 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
737 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
738 };
739 
740 static const struct opcode_info_t pre_fetch_iarr[] = {
741 	{0, 0x90, 0, DS_NO_SSC, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
742 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
743 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
744 	{0, 0x34, 0, DS_SSC, F_SYNC_DELAY | FF_MEDIA_IO, resp_read_position, NULL,
745 	    {10,  0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc7, 0, 0,
746 	     0, 0, 0, 0} },				/* READ POSITION (10) */
747 };
748 
749 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
750 	{0, 0x94, 0x1, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
751 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
752 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
753 	{0, 0x94, 0x2, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
754 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
755 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
756 	{0, 0x94, 0x4, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
757 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
758 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
759 };
760 
761 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
762 	{0, 0x95, 0x6, DS_NO_SSC, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
763 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
764 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
765 };
766 
767 
768 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
769  * plus the terminating elements for logic that scans this table such as
770  * REPORT SUPPORTED OPERATION CODES. */
771 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
772 /* 0 */
773 	{0, 0, 0, DS_ALL, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
774 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
775 	{0, 0x12, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
776 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
777 	{0, 0xa0, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
778 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
779 	     0, 0} },					/* REPORT LUNS */
780 	{0, 0x3, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_requests, NULL,
781 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
782 	{0, 0x0, 0, DS_ALL, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
783 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
784 /* 5 */
785 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, DS_ALL, F_D_IN,	/* MODE SENSE(10) */
786 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
787 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
788 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, DS_ALL, F_D_OUT,	/* MODE SELECT(10) */
789 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
790 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
791 	{0, 0x4d, 0, DS_NO_SSC, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
792 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
793 	     0, 0, 0} },
794 	{0, 0x25, 0, DS_NO_SSC, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
795 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
796 	     0, 0} },
797 	{ARRAY_SIZE(read_iarr), 0x88, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, /* READ(16) */
798 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
799 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
800 /* 10 */
801 	{ARRAY_SIZE(write_iarr), 0x8a, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO,
802 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
803 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
804 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
805 	{0, 0x1b, 0, DS_ALL, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
806 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
807 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, DS_NO_SSC, F_SA_LOW | F_D_IN,
808 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
809 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
810 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
811 	{0, 0x9f, 0x12, DS_NO_SSC, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
812 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
813 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
814 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, DS_ALL, F_SA_LOW | F_D_IN,
815 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
816 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
817 				0xff, 0, 0xc7, 0, 0, 0, 0} },
818 /* 15 */
819 	{0, 0, 0, DS_ALL, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
820 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
821 	{ARRAY_SIZE(verify_iarr), 0x8f, 0, DS_NO_SSC,
822 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
823 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
824 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
825 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, DS_NO_SSC, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
826 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
827 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
828 	     0xff, 0xff} },
829 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, DS_ALL, F_D_OUT,
830 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
831 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
832 	     0} },
833 	{ARRAY_SIZE(release_iarr), 0x57, 0, DS_ALL, F_D_OUT,
834 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
835 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
836 	     0} },
837 /* 20 */
838 	{0, 0x1e, 0, DS_ALL, 0, NULL, NULL, /* ALLOW REMOVAL */
839 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
840 	{0, 0x1, 0, DS_SSC, 0, resp_rewind, NULL,
841 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
842 	{0, 0, 0, DS_NO_SSC, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
843 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
844 	{0, 0x1d, 0, DS_ALL, F_D_OUT, NULL, NULL,      /* SEND DIAGNOSTIC */
845 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
846 	{0, 0x42, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
847 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
848 /* 25 */
849 	{0, 0x3b, 0, DS_NO_SSC, F_D_OUT_MAYBE, resp_write_buffer, NULL,
850 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
851 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
852 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO,
853 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
854 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
855 		 0, 0, 0, 0, 0} },
856 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, DS_NO_SSC, F_SYNC_DELAY | F_M_ACCESS,
857 	    resp_sync_cache, sync_cache_iarr,
858 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
859 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
860 	{0, 0x89, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
861 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
862 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
863 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, DS_NO_SSC, F_SYNC_DELAY | FF_MEDIA_IO,
864 	    resp_pre_fetch, pre_fetch_iarr,
865 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
866 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
867 						/* READ POSITION (10) */
868 
869 /* 30 */
870 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, DS_NO_SSC, F_SA_LOW | F_M_ACCESS,
871 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
872 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
873 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
874 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, DS_NO_SSC, F_SA_LOW | F_M_ACCESS,
875 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
876 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
877 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
878 /* 32 */
879 	{0, 0x9c, 0x0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO,
880 	    resp_atomic_write, NULL, /* ATOMIC WRITE 16 */
881 		{16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
882 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} },
883 	{0, 0x05, 0, DS_SSC, F_D_IN, resp_read_blklimits, NULL,    /* READ BLOCK LIMITS (6) */
884 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
885 	{0, 0x2b, 0, DS_SSC, F_D_UNKN, resp_locate, NULL,	   /* LOCATE (10) */
886 	    {10,  0x07, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xc7, 0, 0,
887 	     0, 0, 0, 0} },
888 	{0, 0x10, 0, DS_SSC, F_D_IN, resp_write_filemarks, NULL,   /* WRITE FILEMARKS (6) */
889 	    {6,  0x01, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
890 	{0, 0x11, 0, DS_SSC, F_D_IN, resp_space, NULL,    /* SPACE (6) */
891 	    {6,  0x07, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
892 	{0, 0x4, 0, DS_SSC, 0, resp_format_medium, NULL,  /* FORMAT MEDIUM (6) */
893 	    {6,  0x3, 0x7, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
894 	{0, 0x19, 0, DS_SSC, F_D_IN, resp_erase, NULL,    /* ERASE (6) */
895 	    {6,  0x03, 0x33, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
896 /* 39 */
897 /* sentinel */
898 	{0xff, 0, 0, 0, 0, NULL, NULL,		/* terminating element */
899 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
900 };
901 
902 static int sdebug_num_hosts;
903 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
904 static int sdebug_ato = DEF_ATO;
905 static int sdebug_cdb_len = DEF_CDB_LEN;
906 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
907 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
908 static int sdebug_dif = DEF_DIF;
909 static int sdebug_dix = DEF_DIX;
910 static int sdebug_dsense = DEF_D_SENSE;
911 static int sdebug_every_nth = DEF_EVERY_NTH;
912 static int sdebug_fake_rw = DEF_FAKE_RW;
913 static unsigned int sdebug_guard = DEF_GUARD;
914 static int sdebug_host_max_queue;	/* per host */
915 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
916 static int sdebug_max_luns = DEF_MAX_LUNS;
917 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
918 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
919 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
920 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
921 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
922 static int sdebug_no_uld;
923 static int sdebug_num_parts = DEF_NUM_PARTS;
924 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
925 static int sdebug_opt_blks = DEF_OPT_BLKS;
926 static int sdebug_opts = DEF_OPTS;
927 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
928 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
929 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
930 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
931 static int sdebug_sector_size = DEF_SECTOR_SIZE;
932 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
933 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
934 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
935 static unsigned int sdebug_lbpu = DEF_LBPU;
936 static unsigned int sdebug_lbpws = DEF_LBPWS;
937 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
938 static unsigned int sdebug_lbprz = DEF_LBPRZ;
939 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
940 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
941 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
942 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
943 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
944 static unsigned int sdebug_atomic_wr = DEF_ATOMIC_WR;
945 static unsigned int sdebug_atomic_wr_max_length = DEF_ATOMIC_WR_MAX_LENGTH;
946 static unsigned int sdebug_atomic_wr_align = DEF_ATOMIC_WR_ALIGN;
947 static unsigned int sdebug_atomic_wr_gran = DEF_ATOMIC_WR_GRAN;
948 static unsigned int sdebug_atomic_wr_max_length_bndry =
949 			DEF_ATOMIC_WR_MAX_LENGTH_BNDRY;
950 static unsigned int sdebug_atomic_wr_max_bndry = DEF_ATOMIC_WR_MAX_BNDRY;
951 static int sdebug_uuid_ctl = DEF_UUID_CTL;
952 static bool sdebug_random = DEF_RANDOM;
953 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
954 static bool sdebug_removable = DEF_REMOVABLE;
955 static bool sdebug_clustering;
956 static bool sdebug_host_lock = DEF_HOST_LOCK;
957 static bool sdebug_strict = DEF_STRICT;
958 static bool sdebug_any_injecting_opt;
959 static bool sdebug_no_rwlock;
960 static bool sdebug_verbose;
961 static bool have_dif_prot;
962 static bool write_since_sync;
963 static bool sdebug_statistics = DEF_STATISTICS;
964 static bool sdebug_wp;
965 static bool sdebug_allow_restart;
966 static enum {
967 	BLK_ZONED_NONE	= 0,
968 	BLK_ZONED_HA	= 1,
969 	BLK_ZONED_HM	= 2,
970 } sdeb_zbc_model = BLK_ZONED_NONE;
971 static char *sdeb_zbc_model_s;
972 
973 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
974 			  SAM_LUN_AM_FLAT = 0x1,
975 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
976 			  SAM_LUN_AM_EXTENDED = 0x3};
977 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
978 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
979 
980 static unsigned int sdebug_store_sectors;
981 static sector_t sdebug_capacity;	/* in sectors */
982 
983 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
984    may still need them */
985 static int sdebug_heads;		/* heads per disk */
986 static int sdebug_cylinders_per;	/* cylinders per surface */
987 static int sdebug_sectors_per;		/* sectors per cylinder */
988 
989 static LIST_HEAD(sdebug_host_list);
990 static DEFINE_MUTEX(sdebug_host_list_mutex);
991 
992 static struct xarray per_store_arr;
993 static struct xarray *per_store_ap = &per_store_arr;
994 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
995 static int sdeb_most_recent_idx = -1;
996 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
997 
998 static unsigned long map_size;
999 static int num_aborts;
1000 static int num_dev_resets;
1001 static int num_target_resets;
1002 static int num_bus_resets;
1003 static int num_host_resets;
1004 static int dix_writes;
1005 static int dix_reads;
1006 static int dif_errors;
1007 
1008 /* ZBC global data */
1009 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
1010 static int sdeb_zbc_zone_cap_mb;
1011 static int sdeb_zbc_zone_size_mb;
1012 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
1013 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
1014 
1015 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
1016 static int poll_queues; /* iouring iopoll interface.*/
1017 
1018 static atomic_long_t writes_by_group_number[64];
1019 
1020 static char sdebug_proc_name[] = MY_NAME;
1021 static const char *my_name = MY_NAME;
1022 
1023 static const struct bus_type pseudo_lld_bus;
1024 
1025 static struct device_driver sdebug_driverfs_driver = {
1026 	.name 		= sdebug_proc_name,
1027 	.bus		= &pseudo_lld_bus,
1028 };
1029 
1030 static const int check_condition_result =
1031 	SAM_STAT_CHECK_CONDITION;
1032 
1033 static const int illegal_condition_result =
1034 	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
1035 
1036 static const int device_qfull_result =
1037 	(DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
1038 
1039 static const int condition_met_result = SAM_STAT_CONDITION_MET;
1040 
1041 static struct dentry *sdebug_debugfs_root;
1042 static ASYNC_DOMAIN_EXCLUSIVE(sdebug_async_domain);
1043 
1044 static u32 sdebug_get_devsel(struct scsi_device *sdp)
1045 {
1046 	unsigned char devtype = sdp->type;
1047 	u32 devsel;
1048 
1049 	if (devtype < 32)
1050 		devsel = (1 << devtype);
1051 	else
1052 		devsel = DS_ALL;
1053 
1054 	return devsel;
1055 }
1056 
1057 static void sdebug_err_free(struct rcu_head *head)
1058 {
1059 	struct sdebug_err_inject *inject =
1060 		container_of(head, typeof(*inject), rcu);
1061 
1062 	kfree(inject);
1063 }
1064 
1065 static void sdebug_err_add(struct scsi_device *sdev, struct sdebug_err_inject *new)
1066 {
1067 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
1068 	struct sdebug_err_inject *err;
1069 
1070 	spin_lock(&devip->list_lock);
1071 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1072 		if (err->type == new->type && err->cmd == new->cmd) {
1073 			list_del_rcu(&err->list);
1074 			call_rcu(&err->rcu, sdebug_err_free);
1075 		}
1076 	}
1077 
1078 	list_add_tail_rcu(&new->list, &devip->inject_err_list);
1079 	spin_unlock(&devip->list_lock);
1080 }
1081 
1082 static int sdebug_err_remove(struct scsi_device *sdev, const char *buf, size_t count)
1083 {
1084 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
1085 	struct sdebug_err_inject *err;
1086 	int type;
1087 	unsigned char cmd;
1088 
1089 	if (sscanf(buf, "- %d %hhx", &type, &cmd) != 2) {
1090 		kfree(buf);
1091 		return -EINVAL;
1092 	}
1093 
1094 	spin_lock(&devip->list_lock);
1095 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1096 		if (err->type == type && err->cmd == cmd) {
1097 			list_del_rcu(&err->list);
1098 			call_rcu(&err->rcu, sdebug_err_free);
1099 			spin_unlock(&devip->list_lock);
1100 			kfree(buf);
1101 			return count;
1102 		}
1103 	}
1104 	spin_unlock(&devip->list_lock);
1105 
1106 	kfree(buf);
1107 	return -EINVAL;
1108 }
1109 
1110 static int sdebug_error_show(struct seq_file *m, void *p)
1111 {
1112 	struct scsi_device *sdev = (struct scsi_device *)m->private;
1113 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
1114 	struct sdebug_err_inject *err;
1115 
1116 	seq_puts(m, "Type\tCount\tCommand\n");
1117 
1118 	rcu_read_lock();
1119 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
1120 		switch (err->type) {
1121 		case ERR_TMOUT_CMD:
1122 		case ERR_ABORT_CMD_FAILED:
1123 		case ERR_LUN_RESET_FAILED:
1124 			seq_printf(m, "%d\t%d\t0x%x\n", err->type, err->cnt,
1125 				err->cmd);
1126 		break;
1127 
1128 		case ERR_FAIL_QUEUE_CMD:
1129 			seq_printf(m, "%d\t%d\t0x%x\t0x%x\n", err->type,
1130 				err->cnt, err->cmd, err->queuecmd_ret);
1131 		break;
1132 
1133 		case ERR_FAIL_CMD:
1134 			seq_printf(m, "%d\t%d\t0x%x\t0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1135 				err->type, err->cnt, err->cmd,
1136 				err->host_byte, err->driver_byte,
1137 				err->status_byte, err->sense_key,
1138 				err->asc, err->asq);
1139 		break;
1140 		}
1141 	}
1142 	rcu_read_unlock();
1143 
1144 	return 0;
1145 }
1146 
1147 static int sdebug_error_open(struct inode *inode, struct file *file)
1148 {
1149 	return single_open(file, sdebug_error_show, inode->i_private);
1150 }
1151 
1152 static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
1153 		size_t count, loff_t *ppos)
1154 {
1155 	char *buf;
1156 	unsigned int inject_type;
1157 	struct sdebug_err_inject *inject;
1158 	struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private;
1159 
1160 	buf = memdup_user_nul(ubuf, count);
1161 	if (IS_ERR(buf))
1162 		return PTR_ERR(buf);
1163 
1164 	if (buf[0] == '-')
1165 		return sdebug_err_remove(sdev, buf, count);
1166 
1167 	if (sscanf(buf, "%d", &inject_type) != 1) {
1168 		kfree(buf);
1169 		return -EINVAL;
1170 	}
1171 
1172 	inject = kzalloc_obj(struct sdebug_err_inject);
1173 	if (!inject) {
1174 		kfree(buf);
1175 		return -ENOMEM;
1176 	}
1177 
1178 	switch (inject_type) {
1179 	case ERR_TMOUT_CMD:
1180 	case ERR_ABORT_CMD_FAILED:
1181 	case ERR_LUN_RESET_FAILED:
1182 		if (sscanf(buf, "%d %d %hhx", &inject->type, &inject->cnt,
1183 			   &inject->cmd) != 3)
1184 			goto out_error;
1185 	break;
1186 
1187 	case ERR_FAIL_QUEUE_CMD:
1188 		if (sscanf(buf, "%d %d %hhx %x", &inject->type, &inject->cnt,
1189 			   &inject->cmd, &inject->queuecmd_ret) != 4)
1190 			goto out_error;
1191 	break;
1192 
1193 	case ERR_FAIL_CMD:
1194 		if (sscanf(buf, "%d %d %hhx %hhx %hhx %hhx %hhx %hhx %hhx",
1195 			   &inject->type, &inject->cnt, &inject->cmd,
1196 			   &inject->host_byte, &inject->driver_byte,
1197 			   &inject->status_byte, &inject->sense_key,
1198 			   &inject->asc, &inject->asq) != 9)
1199 			goto out_error;
1200 	break;
1201 
1202 	default:
1203 		goto out_error;
1204 	break;
1205 	}
1206 
1207 	kfree(buf);
1208 	sdebug_err_add(sdev, inject);
1209 
1210 	return count;
1211 
1212 out_error:
1213 	kfree(buf);
1214 	kfree(inject);
1215 	return -EINVAL;
1216 }
1217 
1218 static const struct file_operations sdebug_error_fops = {
1219 	.open	= sdebug_error_open,
1220 	.read	= seq_read,
1221 	.write	= sdebug_error_write,
1222 	.release = single_release,
1223 };
1224 
1225 static int sdebug_target_reset_fail_show(struct seq_file *m, void *p)
1226 {
1227 	struct scsi_target *starget = (struct scsi_target *)m->private;
1228 	struct sdebug_target_info *targetip =
1229 		(struct sdebug_target_info *)starget->hostdata;
1230 
1231 	if (targetip)
1232 		seq_printf(m, "%c\n", targetip->reset_fail ? 'Y' : 'N');
1233 
1234 	return 0;
1235 }
1236 
1237 static int sdebug_target_reset_fail_open(struct inode *inode, struct file *file)
1238 {
1239 	return single_open(file, sdebug_target_reset_fail_show, inode->i_private);
1240 }
1241 
1242 static ssize_t sdebug_target_reset_fail_write(struct file *file,
1243 		const char __user *ubuf, size_t count, loff_t *ppos)
1244 {
1245 	int ret;
1246 	struct scsi_target *starget =
1247 		(struct scsi_target *)file->f_inode->i_private;
1248 	struct sdebug_target_info *targetip =
1249 		(struct sdebug_target_info *)starget->hostdata;
1250 
1251 	if (targetip) {
1252 		ret = kstrtobool_from_user(ubuf, count, &targetip->reset_fail);
1253 		return ret < 0 ? ret : count;
1254 	}
1255 	return -ENODEV;
1256 }
1257 
1258 static const struct file_operations sdebug_target_reset_fail_fops = {
1259 	.open	= sdebug_target_reset_fail_open,
1260 	.read	= seq_read,
1261 	.write	= sdebug_target_reset_fail_write,
1262 	.release = single_release,
1263 };
1264 
1265 static int sdebug_target_alloc(struct scsi_target *starget)
1266 {
1267 	struct sdebug_target_info *targetip;
1268 
1269 	targetip = kzalloc_obj(struct sdebug_target_info);
1270 	if (!targetip)
1271 		return -ENOMEM;
1272 
1273 	async_synchronize_full_domain(&sdebug_async_domain);
1274 
1275 	targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev),
1276 				sdebug_debugfs_root);
1277 
1278 	debugfs_create_file("fail_reset", 0600, targetip->debugfs_entry, starget,
1279 				&sdebug_target_reset_fail_fops);
1280 
1281 	starget->hostdata = targetip;
1282 
1283 	return 0;
1284 }
1285 
1286 static void sdebug_tartget_cleanup_async(void *data, async_cookie_t cookie)
1287 {
1288 	struct sdebug_target_info *targetip = data;
1289 
1290 	debugfs_remove(targetip->debugfs_entry);
1291 	kfree(targetip);
1292 }
1293 
1294 static void sdebug_target_destroy(struct scsi_target *starget)
1295 {
1296 	struct sdebug_target_info *targetip;
1297 
1298 	targetip = (struct sdebug_target_info *)starget->hostdata;
1299 	if (targetip) {
1300 		starget->hostdata = NULL;
1301 		async_schedule_domain(sdebug_tartget_cleanup_async, targetip,
1302 				&sdebug_async_domain);
1303 	}
1304 }
1305 
1306 /* Only do the extra work involved in logical block provisioning if one or
1307  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
1308  * real reads and writes (i.e. not skipping them for speed).
1309  */
1310 static inline bool scsi_debug_lbp(void)
1311 {
1312 	return 0 == sdebug_fake_rw &&
1313 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
1314 }
1315 
1316 static inline bool scsi_debug_atomic_write(void)
1317 {
1318 	return sdebug_fake_rw == 0 && sdebug_atomic_wr;
1319 }
1320 
1321 static void *lba2fake_store(struct sdeb_store_info *sip,
1322 			    unsigned long long lba)
1323 {
1324 	struct sdeb_store_info *lsip = sip;
1325 
1326 	lba = do_div(lba, sdebug_store_sectors);
1327 	if (!sip || !sip->storep) {
1328 		WARN_ON_ONCE(true);
1329 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
1330 	}
1331 	return lsip->storep + lba * sdebug_sector_size;
1332 }
1333 
1334 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
1335 				      sector_t sector)
1336 {
1337 	sector = sector_div(sector, sdebug_store_sectors);
1338 
1339 	return sip->dif_storep + sector;
1340 }
1341 
1342 static void sdebug_max_tgts_luns(void)
1343 {
1344 	struct sdebug_host_info *sdbg_host;
1345 	struct Scsi_Host *hpnt;
1346 
1347 	mutex_lock(&sdebug_host_list_mutex);
1348 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1349 		hpnt = sdbg_host->shost;
1350 		if ((hpnt->this_id >= 0) &&
1351 		    (sdebug_num_tgts > hpnt->this_id))
1352 			hpnt->max_id = sdebug_num_tgts + 1;
1353 		else
1354 			hpnt->max_id = sdebug_num_tgts;
1355 		/* sdebug_max_luns; */
1356 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
1357 	}
1358 	mutex_unlock(&sdebug_host_list_mutex);
1359 }
1360 
1361 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
1362 
1363 /* Set in_bit to -1 to indicate no bit position of invalid field */
1364 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
1365 				 enum sdeb_cmd_data c_d,
1366 				 int in_byte, int in_bit)
1367 {
1368 	unsigned char *sbuff;
1369 	u8 sks[4];
1370 	int sl, asc;
1371 
1372 	sbuff = scp->sense_buffer;
1373 	if (!sbuff) {
1374 		sdev_printk(KERN_ERR, scp->device, "sense_buffer is NULL\n");
1375 		return;
1376 	}
1377 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
1378 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
1379 	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
1380 	memset(sks, 0, sizeof(sks));
1381 	sks[0] = 0x80;
1382 	if (c_d)
1383 		sks[0] |= 0x40;
1384 	if (in_bit >= 0) {
1385 		sks[0] |= 0x8;
1386 		sks[0] |= 0x7 & in_bit;
1387 	}
1388 	put_unaligned_be16(in_byte, sks + 1);
1389 	if (sdebug_dsense) {
1390 		sl = sbuff[7] + 8;
1391 		sbuff[7] = sl;
1392 		sbuff[sl] = 0x2;
1393 		sbuff[sl + 1] = 0x6;
1394 		memcpy(sbuff + sl + 4, sks, 3);
1395 	} else
1396 		memcpy(sbuff + 15, sks, 3);
1397 	if (sdebug_verbose)
1398 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
1399 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
1400 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
1401 }
1402 
1403 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
1404 {
1405 	if (!scp->sense_buffer) {
1406 		sdev_printk(KERN_ERR, scp->device, "sense_buffer is NULL\n");
1407 		return;
1408 	}
1409 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1410 
1411 	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
1412 
1413 	if (sdebug_verbose)
1414 		sdev_printk(KERN_INFO, scp->device,
1415 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1416 			    my_name, key, asc, asq);
1417 }
1418 
1419 /* Sense data that has information fields for tapes */
1420 static void mk_sense_info_tape(struct scsi_cmnd *scp, int key, int asc, int asq,
1421 			unsigned int information, unsigned char tape_flags)
1422 {
1423 	if (!scp->sense_buffer) {
1424 		sdev_printk(KERN_ERR, scp->device, "sense_buffer is NULL\n");
1425 		return;
1426 	}
1427 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1428 
1429 	scsi_build_sense(scp, /* sdebug_dsense */ 0, key, asc, asq);
1430 	/* only fixed format so far */
1431 
1432 	scp->sense_buffer[0] |= 0x80; /* valid */
1433 	scp->sense_buffer[2] |= tape_flags;
1434 	put_unaligned_be32(information, &scp->sense_buffer[3]);
1435 
1436 	if (sdebug_verbose)
1437 		sdev_printk(KERN_INFO, scp->device,
1438 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1439 			    my_name, key, asc, asq);
1440 }
1441 
1442 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
1443 {
1444 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
1445 }
1446 
1447 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
1448 			    void __user *arg)
1449 {
1450 	if (sdebug_verbose) {
1451 		if (0x1261 == cmd)
1452 			sdev_printk(KERN_INFO, dev, "BLKFLSBUF [0x1261]\n");
1453 		else if (0x5331 == cmd)
1454 			sdev_printk(KERN_INFO, dev,
1455 				    "CDROM_GET_CAPABILITY [0x5331]\n");
1456 		else
1457 			sdev_printk(KERN_INFO, dev, "cmd=0x%x\n", cmd);
1458 	}
1459 	return -EINVAL;
1460 	/* return -ENOTTY; // correct return but upsets fdisk */
1461 }
1462 
1463 static void config_cdb_len(struct scsi_device *sdev)
1464 {
1465 	switch (sdebug_cdb_len) {
1466 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1467 		sdev->use_10_for_rw = false;
1468 		sdev->use_16_for_rw = false;
1469 		sdev->use_10_for_ms = false;
1470 		break;
1471 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1472 		sdev->use_10_for_rw = true;
1473 		sdev->use_16_for_rw = false;
1474 		sdev->use_10_for_ms = false;
1475 		break;
1476 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1477 		sdev->use_10_for_rw = true;
1478 		sdev->use_16_for_rw = false;
1479 		sdev->use_10_for_ms = true;
1480 		break;
1481 	case 16:
1482 		sdev->use_10_for_rw = false;
1483 		sdev->use_16_for_rw = true;
1484 		sdev->use_10_for_ms = true;
1485 		break;
1486 	case 32: /* No knobs to suggest this so same as 16 for now */
1487 		sdev->use_10_for_rw = false;
1488 		sdev->use_16_for_rw = true;
1489 		sdev->use_10_for_ms = true;
1490 		break;
1491 	default:
1492 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1493 			sdebug_cdb_len);
1494 		sdev->use_10_for_rw = true;
1495 		sdev->use_16_for_rw = false;
1496 		sdev->use_10_for_ms = false;
1497 		sdebug_cdb_len = 10;
1498 		break;
1499 	}
1500 }
1501 
1502 static void all_config_cdb_len(void)
1503 {
1504 	struct sdebug_host_info *sdbg_host;
1505 	struct Scsi_Host *shost;
1506 	struct scsi_device *sdev;
1507 
1508 	mutex_lock(&sdebug_host_list_mutex);
1509 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1510 		shost = sdbg_host->shost;
1511 		shost_for_each_device(sdev, shost) {
1512 			config_cdb_len(sdev);
1513 		}
1514 	}
1515 	mutex_unlock(&sdebug_host_list_mutex);
1516 }
1517 
1518 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1519 {
1520 	struct sdebug_host_info *sdhp = devip->sdbg_host;
1521 	struct sdebug_dev_info *dp;
1522 
1523 	list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1524 		if ((devip->sdbg_host == dp->sdbg_host) &&
1525 		    (devip->target == dp->target)) {
1526 			clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1527 		}
1528 	}
1529 }
1530 
1531 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1532 {
1533 	int k;
1534 
1535 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1536 	if (k != SDEBUG_NUM_UAS) {
1537 		const char *cp = NULL;
1538 
1539 		switch (k) {
1540 		case SDEBUG_UA_POR:
1541 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1542 					POWER_ON_RESET_ASCQ);
1543 			if (sdebug_verbose)
1544 				cp = "power on reset";
1545 			break;
1546 		case SDEBUG_UA_POOCCUR:
1547 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1548 					POWER_ON_OCCURRED_ASCQ);
1549 			if (sdebug_verbose)
1550 				cp = "power on occurred";
1551 			break;
1552 		case SDEBUG_UA_BUS_RESET:
1553 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1554 					BUS_RESET_ASCQ);
1555 			if (sdebug_verbose)
1556 				cp = "bus reset";
1557 			break;
1558 		case SDEBUG_UA_MODE_CHANGED:
1559 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1560 					MODE_CHANGED_ASCQ);
1561 			if (sdebug_verbose)
1562 				cp = "mode parameters changed";
1563 			break;
1564 		case SDEBUG_UA_CAPACITY_CHANGED:
1565 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1566 					CAPACITY_CHANGED_ASCQ);
1567 			if (sdebug_verbose)
1568 				cp = "capacity data changed";
1569 			break;
1570 		case SDEBUG_UA_MICROCODE_CHANGED:
1571 			mk_sense_buffer(scp, UNIT_ATTENTION,
1572 					TARGET_CHANGED_ASC,
1573 					MICROCODE_CHANGED_ASCQ);
1574 			if (sdebug_verbose)
1575 				cp = "microcode has been changed";
1576 			break;
1577 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1578 			mk_sense_buffer(scp, UNIT_ATTENTION,
1579 					TARGET_CHANGED_ASC,
1580 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1581 			if (sdebug_verbose)
1582 				cp = "microcode has been changed without reset";
1583 			break;
1584 		case SDEBUG_UA_LUNS_CHANGED:
1585 			/*
1586 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1587 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1588 			 * on the target, until a REPORT LUNS command is
1589 			 * received.  SPC-4 behavior is to report it only once.
1590 			 * NOTE:  sdebug_scsi_level does not use the same
1591 			 * values as struct scsi_device->scsi_level.
1592 			 */
1593 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1594 				clear_luns_changed_on_target(devip);
1595 			mk_sense_buffer(scp, UNIT_ATTENTION,
1596 					TARGET_CHANGED_ASC,
1597 					LUNS_CHANGED_ASCQ);
1598 			if (sdebug_verbose)
1599 				cp = "reported luns data has changed";
1600 			break;
1601 		case SDEBUG_UA_NOT_READY_TO_READY:
1602 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_READY_ASC,
1603 					0);
1604 			if (sdebug_verbose)
1605 				cp = "not ready to ready transition/media change";
1606 			break;
1607 		default:
1608 			pr_warn("unexpected unit attention code=%d\n", k);
1609 			if (sdebug_verbose)
1610 				cp = "unknown";
1611 			break;
1612 		}
1613 		clear_bit(k, devip->uas_bm);
1614 		if (sdebug_verbose)
1615 			sdev_printk(KERN_INFO, scp->device,
1616 				   "%s reports: Unit attention: %s\n",
1617 				   my_name, cp);
1618 		return check_condition_result;
1619 	}
1620 	return 0;
1621 }
1622 
1623 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1624 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1625 				int arr_len)
1626 {
1627 	int act_len;
1628 	struct scsi_data_buffer *sdb = &scp->sdb;
1629 
1630 	if (!sdb->length)
1631 		return 0;
1632 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1633 		return DID_ERROR << 16;
1634 
1635 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1636 				      arr, arr_len);
1637 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1638 
1639 	return 0;
1640 }
1641 
1642 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1643  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1644  * calls, not required to write in ascending offset order. Assumes resid
1645  * set to scsi_bufflen() prior to any calls.
1646  */
1647 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1648 				  int arr_len, unsigned int off_dst)
1649 {
1650 	unsigned int act_len, n;
1651 	struct scsi_data_buffer *sdb = &scp->sdb;
1652 	off_t skip = off_dst;
1653 
1654 	if (sdb->length <= off_dst)
1655 		return 0;
1656 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1657 		return DID_ERROR << 16;
1658 
1659 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1660 				       arr, arr_len, skip);
1661 	pr_debug("off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1662 		 off_dst, scsi_bufflen(scp), act_len,
1663 		 scsi_get_resid(scp));
1664 	n = scsi_bufflen(scp) - (off_dst + act_len);
1665 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1666 	return 0;
1667 }
1668 
1669 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1670  * 'arr' or -1 if error.
1671  */
1672 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1673 			       int arr_len)
1674 {
1675 	if (!scsi_bufflen(scp))
1676 		return 0;
1677 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1678 		return -1;
1679 
1680 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1681 }
1682 
1683 
1684 static char sdebug_inq_vendor_id[9] = "Linux   ";
1685 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1686 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1687 /* Use some locally assigned NAAs for SAS addresses. */
1688 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1689 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1690 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1691 
1692 /* Device identification VPD page. Returns number of bytes placed in arr */
1693 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1694 			  int target_dev_id, int dev_id_num,
1695 			  const char *dev_id_str, int dev_id_str_len,
1696 			  const uuid_t *lu_name)
1697 {
1698 	int num, port_a;
1699 	char b[32];
1700 
1701 	port_a = target_dev_id + 1;
1702 	/* T10 vendor identifier field format (faked) */
1703 	arr[0] = 0x2;	/* ASCII */
1704 	arr[1] = 0x1;
1705 	arr[2] = 0x0;
1706 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1707 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1708 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1709 	num = 8 + 16 + dev_id_str_len;
1710 	arr[3] = num;
1711 	num += 4;
1712 	if (dev_id_num >= 0) {
1713 		if (sdebug_uuid_ctl) {
1714 			/* Locally assigned UUID */
1715 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1716 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1717 			arr[num++] = 0x0;
1718 			arr[num++] = 0x12;
1719 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1720 			arr[num++] = 0x0;
1721 			memcpy(arr + num, lu_name, 16);
1722 			num += 16;
1723 		} else {
1724 			/* NAA-3, Logical unit identifier (binary) */
1725 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1726 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1727 			arr[num++] = 0x0;
1728 			arr[num++] = 0x8;
1729 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1730 			num += 8;
1731 		}
1732 		/* Target relative port number */
1733 		arr[num++] = 0x61;	/* proto=sas, binary */
1734 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1735 		arr[num++] = 0x0;	/* reserved */
1736 		arr[num++] = 0x4;	/* length */
1737 		arr[num++] = 0x0;	/* reserved */
1738 		arr[num++] = 0x0;	/* reserved */
1739 		arr[num++] = 0x0;
1740 		arr[num++] = 0x1;	/* relative port A */
1741 	}
1742 	/* NAA-3, Target port identifier */
1743 	arr[num++] = 0x61;	/* proto=sas, binary */
1744 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1745 	arr[num++] = 0x0;
1746 	arr[num++] = 0x8;
1747 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1748 	num += 8;
1749 	/* NAA-3, Target port group identifier */
1750 	arr[num++] = 0x61;	/* proto=sas, binary */
1751 	arr[num++] = 0x95;	/* piv=1, target port group id */
1752 	arr[num++] = 0x0;
1753 	arr[num++] = 0x4;
1754 	arr[num++] = 0;
1755 	arr[num++] = 0;
1756 	put_unaligned_be16(port_group_id, arr + num);
1757 	num += 2;
1758 	/* NAA-3, Target device identifier */
1759 	arr[num++] = 0x61;	/* proto=sas, binary */
1760 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1761 	arr[num++] = 0x0;
1762 	arr[num++] = 0x8;
1763 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1764 	num += 8;
1765 	/* SCSI name string: Target device identifier */
1766 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1767 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1768 	arr[num++] = 0x0;
1769 	arr[num++] = 24;
1770 	memcpy(arr + num, "naa.32222220", 12);
1771 	num += 12;
1772 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1773 	memcpy(arr + num, b, 8);
1774 	num += 8;
1775 	memset(arr + num, 0, 4);
1776 	num += 4;
1777 	return num;
1778 }
1779 
1780 static unsigned char vpd84_data[] = {
1781 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1782     0x22,0x22,0x22,0x0,0xbb,0x1,
1783     0x22,0x22,0x22,0x0,0xbb,0x2,
1784 };
1785 
1786 /*  Software interface identification VPD page */
1787 static int inquiry_vpd_84(unsigned char *arr)
1788 {
1789 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1790 	return sizeof(vpd84_data);
1791 }
1792 
1793 /* Management network addresses VPD page */
1794 static int inquiry_vpd_85(unsigned char *arr)
1795 {
1796 	int num = 0;
1797 	const char *na1 = "https://www.kernel.org/config";
1798 	const char *na2 = "http://www.kernel.org/log";
1799 	int plen, olen;
1800 
1801 	arr[num++] = 0x1;	/* lu, storage config */
1802 	arr[num++] = 0x0;	/* reserved */
1803 	arr[num++] = 0x0;
1804 	olen = strlen(na1);
1805 	plen = olen + 1;
1806 	if (plen % 4)
1807 		plen = ((plen / 4) + 1) * 4;
1808 	arr[num++] = plen;	/* length, null termianted, padded */
1809 	memcpy(arr + num, na1, olen);
1810 	memset(arr + num + olen, 0, plen - olen);
1811 	num += plen;
1812 
1813 	arr[num++] = 0x4;	/* lu, logging */
1814 	arr[num++] = 0x0;	/* reserved */
1815 	arr[num++] = 0x0;
1816 	olen = strlen(na2);
1817 	plen = olen + 1;
1818 	if (plen % 4)
1819 		plen = ((plen / 4) + 1) * 4;
1820 	arr[num++] = plen;	/* length, null terminated, padded */
1821 	memcpy(arr + num, na2, olen);
1822 	memset(arr + num + olen, 0, plen - olen);
1823 	num += plen;
1824 
1825 	return num;
1826 }
1827 
1828 /* SCSI ports VPD page */
1829 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1830 {
1831 	int num = 0;
1832 	int port_a, port_b;
1833 
1834 	port_a = target_dev_id + 1;
1835 	port_b = port_a + 1;
1836 	arr[num++] = 0x0;	/* reserved */
1837 	arr[num++] = 0x0;	/* reserved */
1838 	arr[num++] = 0x0;
1839 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1840 	memset(arr + num, 0, 6);
1841 	num += 6;
1842 	arr[num++] = 0x0;
1843 	arr[num++] = 12;	/* length tp descriptor */
1844 	/* naa-5 target port identifier (A) */
1845 	arr[num++] = 0x61;	/* proto=sas, binary */
1846 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1847 	arr[num++] = 0x0;	/* reserved */
1848 	arr[num++] = 0x8;	/* length */
1849 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1850 	num += 8;
1851 	arr[num++] = 0x0;	/* reserved */
1852 	arr[num++] = 0x0;	/* reserved */
1853 	arr[num++] = 0x0;
1854 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1855 	memset(arr + num, 0, 6);
1856 	num += 6;
1857 	arr[num++] = 0x0;
1858 	arr[num++] = 12;	/* length tp descriptor */
1859 	/* naa-5 target port identifier (B) */
1860 	arr[num++] = 0x61;	/* proto=sas, binary */
1861 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1862 	arr[num++] = 0x0;	/* reserved */
1863 	arr[num++] = 0x8;	/* length */
1864 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1865 	num += 8;
1866 
1867 	return num;
1868 }
1869 
1870 
1871 static unsigned char vpd89_data[] = {
1872 /* from 4th byte */ 0,0,0,0,
1873 'l','i','n','u','x',' ',' ',' ',
1874 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1875 '1','2','3','4',
1876 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1877 0xec,0,0,0,
1878 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1879 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1880 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1881 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1882 0x53,0x41,
1883 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1884 0x20,0x20,
1885 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1886 0x10,0x80,
1887 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1888 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1889 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1890 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1891 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1892 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1893 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1894 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1895 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1896 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1897 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1898 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1899 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1900 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1901 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1902 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1903 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1904 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1905 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1906 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1907 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1908 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1909 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1910 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1911 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1912 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1913 };
1914 
1915 /* ATA Information VPD page */
1916 static int inquiry_vpd_89(unsigned char *arr)
1917 {
1918 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1919 	return sizeof(vpd89_data);
1920 }
1921 
1922 
1923 static unsigned char vpdb0_data[] = {
1924 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1925 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1926 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1927 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1928 };
1929 
1930 /* Block limits VPD page (SBC-3) */
1931 static int inquiry_vpd_b0(unsigned char *arr)
1932 {
1933 	unsigned int gran;
1934 
1935 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1936 
1937 	/* Optimal transfer length granularity */
1938 	if (sdebug_opt_xferlen_exp != 0 &&
1939 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1940 		gran = 1 << sdebug_opt_xferlen_exp;
1941 	else
1942 		gran = 1 << sdebug_physblk_exp;
1943 	put_unaligned_be16(gran, arr + 2);
1944 
1945 	/* Maximum Transfer Length */
1946 	if (sdebug_store_sectors > 0x400)
1947 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1948 
1949 	/* Optimal Transfer Length */
1950 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1951 
1952 	if (sdebug_lbpu) {
1953 		/* Maximum Unmap LBA Count */
1954 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1955 
1956 		/* Maximum Unmap Block Descriptor Count */
1957 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1958 	}
1959 
1960 	/* Unmap Granularity Alignment */
1961 	if (sdebug_unmap_alignment) {
1962 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1963 		arr[28] |= 0x80; /* UGAVALID */
1964 	}
1965 
1966 	/* Optimal Unmap Granularity */
1967 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1968 
1969 	/* Maximum WRITE SAME Length */
1970 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1971 
1972 	if (sdebug_atomic_wr) {
1973 		put_unaligned_be32(sdebug_atomic_wr_max_length, &arr[40]);
1974 		put_unaligned_be32(sdebug_atomic_wr_align, &arr[44]);
1975 		put_unaligned_be32(sdebug_atomic_wr_gran, &arr[48]);
1976 		put_unaligned_be32(sdebug_atomic_wr_max_length_bndry, &arr[52]);
1977 		put_unaligned_be32(sdebug_atomic_wr_max_bndry, &arr[56]);
1978 	}
1979 
1980 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1981 }
1982 
1983 /* Block device characteristics VPD page (SBC-3) */
1984 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1985 {
1986 	memset(arr, 0, 0x3c);
1987 	arr[0] = 0;
1988 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1989 	arr[2] = 0;
1990 	arr[3] = 5;	/* less than 1.8" */
1991 
1992 	return 0x3c;
1993 }
1994 
1995 /* Logical block provisioning VPD page (SBC-4) */
1996 static int inquiry_vpd_b2(unsigned char *arr)
1997 {
1998 	memset(arr, 0, 0x4);
1999 	arr[0] = 0;			/* threshold exponent */
2000 	if (sdebug_lbpu)
2001 		arr[1] = 1 << 7;
2002 	if (sdebug_lbpws)
2003 		arr[1] |= 1 << 6;
2004 	if (sdebug_lbpws10)
2005 		arr[1] |= 1 << 5;
2006 	if (sdebug_lbprz && scsi_debug_lbp())
2007 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
2008 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
2009 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
2010 	/* threshold_percentage=0 */
2011 	return 0x4;
2012 }
2013 
2014 /* Zoned block device characteristics VPD page (ZBC mandatory) */
2015 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
2016 {
2017 	memset(arr, 0, 0x3c);
2018 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
2019 	/*
2020 	 * Set Optimal number of open sequential write preferred zones and
2021 	 * Optimal number of non-sequentially written sequential write
2022 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
2023 	 * fields set to zero, apart from Max. number of open swrz_s field.
2024 	 */
2025 	put_unaligned_be32(0xffffffff, &arr[4]);
2026 	put_unaligned_be32(0xffffffff, &arr[8]);
2027 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
2028 		put_unaligned_be32(devip->max_open, &arr[12]);
2029 	else
2030 		put_unaligned_be32(0xffffffff, &arr[12]);
2031 	if (devip->zcap < devip->zsize) {
2032 		arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
2033 		put_unaligned_be64(devip->zsize, &arr[20]);
2034 	} else {
2035 		arr[19] = 0;
2036 	}
2037 	return 0x3c;
2038 }
2039 
2040 #define SDEBUG_BLE_LEN_AFTER_B4 28	/* thus vpage 32 bytes long */
2041 
2042 enum { MAXIMUM_NUMBER_OF_STREAMS = 6, PERMANENT_STREAM_COUNT = 5 };
2043 
2044 /* Block limits extension VPD page (SBC-4) */
2045 static int inquiry_vpd_b7(unsigned char *arrb4)
2046 {
2047 	memset(arrb4, 0, SDEBUG_BLE_LEN_AFTER_B4);
2048 	arrb4[1] = 1; /* Reduced stream control support (RSCS) */
2049 	put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS, &arrb4[2]);
2050 	return SDEBUG_BLE_LEN_AFTER_B4;
2051 }
2052 
2053 #define SDEBUG_LONG_INQ_SZ 96
2054 #define SDEBUG_MAX_INQ_ARR_SZ 584
2055 
2056 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2057 {
2058 	unsigned char pq_pdt;
2059 	unsigned char *arr;
2060 	unsigned char *cmd = scp->cmnd;
2061 	u32 alloc_len, n;
2062 	int ret;
2063 	bool have_wlun, is_disk, is_zbc, is_disk_zbc, is_tape;
2064 
2065 	alloc_len = get_unaligned_be16(cmd + 3);
2066 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
2067 	if (! arr)
2068 		return DID_REQUEUE << 16;
2069 	if (scp->device->type >= 32) {
2070 		is_disk = (sdebug_ptype == TYPE_DISK);
2071 		is_tape = (sdebug_ptype == TYPE_TAPE);
2072 	} else {
2073 		is_disk = (scp->device->type == TYPE_DISK);
2074 		is_tape = (scp->device->type == TYPE_TAPE);
2075 	}
2076 	is_zbc = devip->zoned;
2077 	is_disk_zbc = (is_disk || is_zbc);
2078 	have_wlun = scsi_is_wlun(scp->device->lun);
2079 	if (have_wlun)
2080 		pq_pdt = TYPE_WLUN;	/* present, wlun */
2081 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
2082 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
2083 	else
2084 		pq_pdt = ((scp->device->type >= 32 ?
2085 				sdebug_ptype : scp->device->type) & 0x1f);
2086 	arr[0] = pq_pdt;
2087 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
2088 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
2089 		kfree(arr);
2090 		return check_condition_result;
2091 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
2092 		int lu_id_num, port_group_id, target_dev_id;
2093 		u32 len;
2094 		char lu_id_str[6];
2095 		int host_no = devip->sdbg_host->shost->host_no;
2096 
2097 		arr[1] = cmd[2];
2098 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
2099 		    (devip->channel & 0x7f);
2100 		if (sdebug_vpd_use_hostno == 0)
2101 			host_no = 0;
2102 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
2103 			    (devip->target * 1000) + devip->lun);
2104 		target_dev_id = ((host_no + 1) * 2000) +
2105 				 (devip->target * 1000) - 3;
2106 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
2107 		if (0 == cmd[2]) { /* supported vital product data pages */
2108 			n = 4;
2109 			arr[n++] = 0x0;   /* this page */
2110 			arr[n++] = 0x80;  /* unit serial number */
2111 			arr[n++] = 0x83;  /* device identification */
2112 			arr[n++] = 0x84;  /* software interface ident. */
2113 			arr[n++] = 0x85;  /* management network addresses */
2114 			arr[n++] = 0x86;  /* extended inquiry */
2115 			arr[n++] = 0x87;  /* mode page policy */
2116 			arr[n++] = 0x88;  /* SCSI ports */
2117 			if (is_disk_zbc) {	  /* SBC or ZBC */
2118 				arr[n++] = 0x89;  /* ATA information */
2119 				arr[n++] = 0xb0;  /* Block limits */
2120 				arr[n++] = 0xb1;  /* Block characteristics */
2121 				if (is_disk)
2122 					arr[n++] = 0xb2;  /* LB Provisioning */
2123 				if (is_zbc)
2124 					arr[n++] = 0xb6;  /* ZB dev. char. */
2125 				arr[n++] = 0xb7;  /* Block limits extension */
2126 			}
2127 			arr[3] = n - 4;	  /* number of supported VPD pages */
2128 		} else if (0x80 == cmd[2]) { /* unit serial number */
2129 			arr[3] = len;
2130 			memcpy(&arr[4], lu_id_str, len);
2131 		} else if (0x83 == cmd[2]) { /* device identification */
2132 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
2133 						target_dev_id, lu_id_num,
2134 						lu_id_str, len,
2135 						&devip->lu_name);
2136 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
2137 			arr[3] = inquiry_vpd_84(&arr[4]);
2138 		} else if (0x85 == cmd[2]) { /* Management network addresses */
2139 			arr[3] = inquiry_vpd_85(&arr[4]);
2140 		} else if (0x86 == cmd[2]) { /* extended inquiry */
2141 			arr[3] = 0x3c;	/* number of following entries */
2142 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
2143 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
2144 			else if (have_dif_prot)
2145 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
2146 			else
2147 				arr[4] = 0x0;   /* no protection stuff */
2148 			/*
2149 			 * GROUP_SUP=1; HEADSUP=1 (HEAD OF QUEUE); ORDSUP=1
2150 			 * (ORDERED queuing); SIMPSUP=1 (SIMPLE queuing).
2151 			 */
2152 			arr[5] = 0x17;
2153 		} else if (0x87 == cmd[2]) { /* mode page policy */
2154 			arr[3] = 0x8;	/* number of following entries */
2155 			arr[4] = 0x2;	/* disconnect-reconnect mp */
2156 			arr[6] = 0x80;	/* mlus, shared */
2157 			arr[8] = 0x18;	 /* protocol specific lu */
2158 			arr[10] = 0x82;	 /* mlus, per initiator port */
2159 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
2160 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
2161 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
2162 			n = inquiry_vpd_89(&arr[4]);
2163 			put_unaligned_be16(n, arr + 2);
2164 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
2165 			arr[3] = inquiry_vpd_b0(&arr[4]);
2166 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
2167 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
2168 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
2169 			arr[3] = inquiry_vpd_b2(&arr[4]);
2170 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
2171 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
2172 		} else if (cmd[2] == 0xb7) { /* block limits extension page */
2173 			arr[3] = inquiry_vpd_b7(&arr[4]);
2174 		} else {
2175 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
2176 			kfree(arr);
2177 			return check_condition_result;
2178 		}
2179 		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2180 		ret = fill_from_dev_buffer(scp, arr,
2181 			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2182 		kfree(arr);
2183 		return ret;
2184 	}
2185 	/* drops through here for a standard inquiry */
2186 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
2187 	arr[2] = sdebug_scsi_level;
2188 	arr[3] = 2;    /* response_data_format==2 */
2189 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
2190 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
2191 	if (sdebug_vpd_use_hostno == 0)
2192 		arr[5] |= 0x10; /* claim: implicit TPGS */
2193 	arr[6] = 0x10; /* claim: MultiP */
2194 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
2195 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
2196 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
2197 	memcpy(&arr[16], sdebug_inq_product_id, 16);
2198 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
2199 	/* Use Vendor Specific area to place driver date in ASCII hex */
2200 	memcpy(&arr[36], sdebug_version_date, 8);
2201 	/* version descriptors (2 bytes each) follow */
2202 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
2203 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
2204 	n = 62;
2205 	if (is_disk) {		/* SBC-4 no version claimed */
2206 		put_unaligned_be16(0x600, arr + n);
2207 		n += 2;
2208 	} else if (is_tape) {	/* SSC-4 rev 3 */
2209 		put_unaligned_be16(0x525, arr + n);
2210 		n += 2;
2211 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
2212 		put_unaligned_be16(0x624, arr + n);
2213 		n += 2;
2214 	}
2215 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
2216 	ret = fill_from_dev_buffer(scp, arr,
2217 			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
2218 	kfree(arr);
2219 	return ret;
2220 }
2221 
2222 /* See resp_iec_m_pg() for how this data is manipulated */
2223 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2224 				   0, 0, 0x0, 0x0};
2225 
2226 static int resp_requests(struct scsi_cmnd *scp,
2227 			 struct sdebug_dev_info *devip)
2228 {
2229 	unsigned char *cmd = scp->cmnd;
2230 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
2231 	bool dsense = !!(cmd[1] & 1);
2232 	u32 alloc_len = cmd[4];
2233 	u32 len = 18;
2234 	int stopped_state = atomic_read(&devip->stopped);
2235 
2236 	memset(arr, 0, sizeof(arr));
2237 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
2238 		if (dsense) {
2239 			arr[0] = 0x72;
2240 			arr[1] = NOT_READY;
2241 			arr[2] = LOGICAL_UNIT_NOT_READY;
2242 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
2243 			len = 8;
2244 		} else {
2245 			arr[0] = 0x70;
2246 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
2247 			arr[7] = 0xa;			/* 18 byte sense buffer */
2248 			arr[12] = LOGICAL_UNIT_NOT_READY;
2249 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
2250 		}
2251 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
2252 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
2253 		if (dsense) {
2254 			arr[0] = 0x72;
2255 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
2256 			arr[2] = THRESHOLD_EXCEEDED;
2257 			arr[3] = 0xff;		/* Failure prediction(false) */
2258 			len = 8;
2259 		} else {
2260 			arr[0] = 0x70;
2261 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
2262 			arr[7] = 0xa;   	/* 18 byte sense buffer */
2263 			arr[12] = THRESHOLD_EXCEEDED;
2264 			arr[13] = 0xff;		/* Failure prediction(false) */
2265 		}
2266 	} else {	/* nothing to report */
2267 		if (dsense) {
2268 			len = 8;
2269 			memset(arr, 0, len);
2270 			arr[0] = 0x72;
2271 		} else {
2272 			memset(arr, 0, len);
2273 			arr[0] = 0x70;
2274 			arr[7] = 0xa;
2275 		}
2276 	}
2277 	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
2278 }
2279 
2280 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2281 {
2282 	unsigned char *cmd = scp->cmnd;
2283 	int power_cond, want_stop, stopped_state;
2284 	bool changing;
2285 
2286 	power_cond = (cmd[4] & 0xf0) >> 4;
2287 	if (power_cond) {
2288 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
2289 		return check_condition_result;
2290 	}
2291 	want_stop = !(cmd[4] & 1);
2292 	stopped_state = atomic_read(&devip->stopped);
2293 	if (stopped_state == 2) {
2294 		ktime_t now_ts = ktime_get_boottime();
2295 
2296 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
2297 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
2298 
2299 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
2300 				/* tur_ms_to_ready timer extinguished */
2301 				atomic_set(&devip->stopped, 0);
2302 				stopped_state = 0;
2303 			}
2304 		}
2305 		if (stopped_state == 2) {
2306 			if (want_stop) {
2307 				stopped_state = 1;	/* dummy up success */
2308 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
2309 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
2310 				return check_condition_result;
2311 			}
2312 		}
2313 	}
2314 	changing = (stopped_state != want_stop);
2315 	if (changing)
2316 		atomic_xchg(&devip->stopped, want_stop);
2317 	if (scp->device->type == TYPE_TAPE && !want_stop) {
2318 		int i;
2319 
2320 		set_bit(SDEBUG_UA_NOT_READY_TO_READY, devip->uas_bm); /* not legal! */
2321 		for (i = 0; i < TAPE_MAX_PARTITIONS; i++)
2322 			devip->tape_location[i] = 0;
2323 		devip->tape_partition = 0;
2324 	}
2325 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
2326 		return SDEG_RES_IMMED_MASK;
2327 	else
2328 		return 0;
2329 }
2330 
2331 static sector_t get_sdebug_capacity(void)
2332 {
2333 	static const unsigned int gibibyte = 1073741824;
2334 
2335 	if (sdebug_virtual_gb > 0)
2336 		return (sector_t)sdebug_virtual_gb *
2337 			(gibibyte / sdebug_sector_size);
2338 	else
2339 		return sdebug_store_sectors;
2340 }
2341 
2342 #define SDEBUG_READCAP_ARR_SZ 8
2343 static int resp_readcap(struct scsi_cmnd *scp,
2344 			struct sdebug_dev_info *devip)
2345 {
2346 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
2347 	unsigned int capac;
2348 
2349 	/* following just in case virtual_gb changed */
2350 	sdebug_capacity = get_sdebug_capacity();
2351 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
2352 	if (sdebug_capacity < 0xffffffff) {
2353 		capac = (unsigned int)sdebug_capacity - 1;
2354 		put_unaligned_be32(capac, arr + 0);
2355 	} else
2356 		put_unaligned_be32(0xffffffff, arr + 0);
2357 	put_unaligned_be16(sdebug_sector_size, arr + 6);
2358 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
2359 }
2360 
2361 #define SDEBUG_READCAP16_ARR_SZ 32
2362 static int resp_readcap16(struct scsi_cmnd *scp,
2363 			  struct sdebug_dev_info *devip)
2364 {
2365 	unsigned char *cmd = scp->cmnd;
2366 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
2367 	u32 alloc_len;
2368 
2369 	alloc_len = get_unaligned_be32(cmd + 10);
2370 	/* following just in case virtual_gb changed */
2371 	sdebug_capacity = get_sdebug_capacity();
2372 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
2373 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
2374 	put_unaligned_be32(sdebug_sector_size, arr + 8);
2375 	arr[13] = sdebug_physblk_exp & 0xf;
2376 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
2377 
2378 	if (scsi_debug_lbp()) {
2379 		arr[14] |= 0x80; /* LBPME */
2380 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
2381 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
2382 		 * in the wider field maps to 0 in this field.
2383 		 */
2384 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
2385 			arr[14] |= 0x40;
2386 	}
2387 
2388 	/*
2389 	 * Since the scsi_debug READ CAPACITY implementation always reports the
2390 	 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
2391 	 */
2392 	if (devip->zoned)
2393 		arr[12] |= 1 << 4;
2394 
2395 	arr[15] = sdebug_lowest_aligned & 0xff;
2396 
2397 	if (have_dif_prot) {
2398 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
2399 		arr[12] |= 1; /* PROT_EN */
2400 	}
2401 
2402 	return fill_from_dev_buffer(scp, arr,
2403 			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
2404 }
2405 
2406 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
2407 
2408 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
2409 			      struct sdebug_dev_info *devip)
2410 {
2411 	unsigned char *cmd = scp->cmnd;
2412 	unsigned char *arr;
2413 	int host_no = devip->sdbg_host->shost->host_no;
2414 	int port_group_a, port_group_b, port_a, port_b;
2415 	u32 alen, n, rlen;
2416 	int ret;
2417 
2418 	alen = get_unaligned_be32(cmd + 6);
2419 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
2420 	if (! arr)
2421 		return DID_REQUEUE << 16;
2422 	/*
2423 	 * EVPD page 0x88 states we have two ports, one
2424 	 * real and a fake port with no device connected.
2425 	 * So we create two port groups with one port each
2426 	 * and set the group with port B to unavailable.
2427 	 */
2428 	port_a = 0x1; /* relative port A */
2429 	port_b = 0x2; /* relative port B */
2430 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
2431 			(devip->channel & 0x7f);
2432 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
2433 			(devip->channel & 0x7f) + 0x80;
2434 
2435 	/*
2436 	 * The asymmetric access state is cycled according to the host_id.
2437 	 */
2438 	n = 4;
2439 	if (sdebug_vpd_use_hostno == 0) {
2440 		arr[n++] = host_no % 3; /* Asymm access state */
2441 		arr[n++] = 0x0F; /* claim: all states are supported */
2442 	} else {
2443 		arr[n++] = 0x0; /* Active/Optimized path */
2444 		arr[n++] = 0x01; /* only support active/optimized paths */
2445 	}
2446 	put_unaligned_be16(port_group_a, arr + n);
2447 	n += 2;
2448 	arr[n++] = 0;    /* Reserved */
2449 	arr[n++] = 0;    /* Status code */
2450 	arr[n++] = 0;    /* Vendor unique */
2451 	arr[n++] = 0x1;  /* One port per group */
2452 	arr[n++] = 0;    /* Reserved */
2453 	arr[n++] = 0;    /* Reserved */
2454 	put_unaligned_be16(port_a, arr + n);
2455 	n += 2;
2456 	arr[n++] = 3;    /* Port unavailable */
2457 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
2458 	put_unaligned_be16(port_group_b, arr + n);
2459 	n += 2;
2460 	arr[n++] = 0;    /* Reserved */
2461 	arr[n++] = 0;    /* Status code */
2462 	arr[n++] = 0;    /* Vendor unique */
2463 	arr[n++] = 0x1;  /* One port per group */
2464 	arr[n++] = 0;    /* Reserved */
2465 	arr[n++] = 0;    /* Reserved */
2466 	put_unaligned_be16(port_b, arr + n);
2467 	n += 2;
2468 
2469 	rlen = n - 4;
2470 	put_unaligned_be32(rlen, arr + 0);
2471 
2472 	/*
2473 	 * Return the smallest value of either
2474 	 * - The allocated length
2475 	 * - The constructed command length
2476 	 * - The maximum array size
2477 	 */
2478 	rlen = min(alen, n);
2479 	ret = fill_from_dev_buffer(scp, arr,
2480 			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
2481 	kfree(arr);
2482 	return ret;
2483 }
2484 
2485 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2486 			     struct sdebug_dev_info *devip)
2487 {
2488 	bool rctd;
2489 	u8 reporting_opts, req_opcode, sdeb_i, supp;
2490 	u16 req_sa, u;
2491 	u32 alloc_len, a_len;
2492 	int k, offset, len, errsts, bump, na;
2493 	const struct opcode_info_t *oip;
2494 	const struct opcode_info_t *r_oip;
2495 	u8 *arr;
2496 	u8 *cmd = scp->cmnd;
2497 	u32 devsel = sdebug_get_devsel(scp->device);
2498 
2499 	rctd = !!(cmd[2] & 0x80);
2500 	reporting_opts = cmd[2] & 0x7;
2501 	req_opcode = cmd[3];
2502 	req_sa = get_unaligned_be16(cmd + 4);
2503 	alloc_len = get_unaligned_be32(cmd + 6);
2504 	if (alloc_len < 4 || alloc_len > 0xffff) {
2505 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2506 		return check_condition_result;
2507 	}
2508 	if (alloc_len > 8192)
2509 		a_len = 8192;
2510 	else
2511 		a_len = alloc_len;
2512 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2513 	if (NULL == arr) {
2514 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2515 				INSUFF_RES_ASCQ);
2516 		return check_condition_result;
2517 	}
2518 	switch (reporting_opts) {
2519 	case 0:	/* all commands */
2520 		bump = rctd ? 20 : 8;
2521 		for (offset = 4, oip = opcode_info_arr;
2522 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2523 			if (F_INV_OP & oip->flags)
2524 				continue;
2525 			if ((devsel & oip->devsel) != 0) {
2526 				arr[offset] = oip->opcode;
2527 				put_unaligned_be16(oip->sa, arr + offset + 2);
2528 				if (rctd)
2529 					arr[offset + 5] |= 0x2;
2530 				if (FF_SA & oip->flags)
2531 					arr[offset + 5] |= 0x1;
2532 				put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2533 				if (rctd)
2534 					put_unaligned_be16(0xa, arr + offset + 8);
2535 				offset += bump;
2536 			}
2537 			na = oip->num_attached;
2538 			r_oip = oip;
2539 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2540 				if (F_INV_OP & oip->flags)
2541 					continue;
2542 				if ((devsel & oip->devsel) == 0)
2543 					continue;
2544 				arr[offset] = oip->opcode;
2545 				put_unaligned_be16(oip->sa, arr + offset + 2);
2546 				if (rctd)
2547 					arr[offset + 5] |= 0x2;
2548 				if (FF_SA & oip->flags)
2549 					arr[offset + 5] |= 0x1;
2550 				put_unaligned_be16(oip->len_mask[0],
2551 						arr + offset + 6);
2552 				if (rctd)
2553 					put_unaligned_be16(0xa,
2554 							   arr + offset + 8);
2555 				offset += bump;
2556 			}
2557 			oip = r_oip;
2558 		}
2559 		put_unaligned_be32(offset - 4, arr);
2560 		break;
2561 	case 1:	/* one command: opcode only */
2562 	case 2:	/* one command: opcode plus service action */
2563 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2564 		sdeb_i = opcode_ind_arr[req_opcode];
2565 		oip = &opcode_info_arr[sdeb_i];
2566 		if (F_INV_OP & oip->flags) {
2567 			supp = 1;
2568 			offset = 4;
2569 		} else {
2570 			if (1 == reporting_opts) {
2571 				if (FF_SA & oip->flags) {
2572 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2573 							     2, 2);
2574 					kfree(arr);
2575 					return check_condition_result;
2576 				}
2577 				req_sa = 0;
2578 			} else if (2 == reporting_opts &&
2579 				   0 == (FF_SA & oip->flags)) {
2580 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2581 				kfree(arr);	/* point at requested sa */
2582 				return check_condition_result;
2583 			}
2584 			if (0 == (FF_SA & oip->flags) &&
2585 				(devsel & oip->devsel) != 0 &&
2586 				req_opcode == oip->opcode)
2587 				supp = 3;
2588 			else if (0 == (FF_SA & oip->flags)) {
2589 				na = oip->num_attached;
2590 				for (k = 0, oip = oip->arrp; k < na;
2591 				     ++k, ++oip) {
2592 					if (req_opcode == oip->opcode &&
2593 						(devsel & oip->devsel) != 0)
2594 						break;
2595 				}
2596 				supp = (k >= na) ? 1 : 3;
2597 			} else if (req_sa != oip->sa) {
2598 				na = oip->num_attached;
2599 				for (k = 0, oip = oip->arrp; k < na;
2600 				     ++k, ++oip) {
2601 					if (req_sa == oip->sa &&
2602 						(devsel & oip->devsel) != 0)
2603 						break;
2604 				}
2605 				supp = (k >= na) ? 1 : 3;
2606 			} else
2607 				supp = 3;
2608 			if (3 == supp) {
2609 				u = oip->len_mask[0];
2610 				put_unaligned_be16(u, arr + 2);
2611 				arr[4] = oip->opcode;
2612 				for (k = 1; k < u; ++k)
2613 					arr[4 + k] = (k < 16) ?
2614 						 oip->len_mask[k] : 0xff;
2615 				offset = 4 + u;
2616 			} else
2617 				offset = 4;
2618 		}
2619 		arr[1] = (rctd ? 0x80 : 0) | supp;
2620 		if (rctd) {
2621 			put_unaligned_be16(0xa, arr + offset);
2622 			offset += 12;
2623 		}
2624 		break;
2625 	default:
2626 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2627 		kfree(arr);
2628 		return check_condition_result;
2629 	}
2630 	offset = (offset < a_len) ? offset : a_len;
2631 	len = (offset < alloc_len) ? offset : alloc_len;
2632 	errsts = fill_from_dev_buffer(scp, arr, len);
2633 	kfree(arr);
2634 	return errsts;
2635 }
2636 
2637 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2638 			  struct sdebug_dev_info *devip)
2639 {
2640 	bool repd;
2641 	u32 alloc_len, len;
2642 	u8 arr[16];
2643 	u8 *cmd = scp->cmnd;
2644 
2645 	memset(arr, 0, sizeof(arr));
2646 	repd = !!(cmd[2] & 0x80);
2647 	alloc_len = get_unaligned_be32(cmd + 6);
2648 	if (alloc_len < 4) {
2649 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2650 		return check_condition_result;
2651 	}
2652 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2653 	arr[1] = 0x1;		/* ITNRS */
2654 	if (repd) {
2655 		arr[3] = 0xc;
2656 		len = 16;
2657 	} else
2658 		len = 4;
2659 
2660 	len = (len < alloc_len) ? len : alloc_len;
2661 	return fill_from_dev_buffer(scp, arr, len);
2662 }
2663 
2664 /* <<Following mode page info copied from ST318451LW>> */
2665 
2666 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2667 {	/* Read-Write Error Recovery page for mode_sense */
2668 	static const unsigned char err_recov_pg[] = {
2669 		0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2670 		5, 0, 0xff, 0xff
2671 	};
2672 
2673 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2674 	if (1 == pcontrol)
2675 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2676 	return sizeof(err_recov_pg);
2677 }
2678 
2679 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2680 { 	/* Disconnect-Reconnect page for mode_sense */
2681 	static const unsigned char disconnect_pg[] = {
2682 		0x2, 0xe, 128, 128, 0, 10, 0, 0,
2683 		0, 0, 0, 0, 0, 0, 0, 0
2684 	};
2685 
2686 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2687 	if (1 == pcontrol)
2688 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2689 	return sizeof(disconnect_pg);
2690 }
2691 
2692 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2693 {       /* Format device page for mode_sense */
2694 	static const unsigned char format_pg[] = {
2695 		0x3, 0x16, 0, 0, 0, 0, 0, 0,
2696 		0, 0, 0, 0, 0, 0, 0, 0,
2697 		0, 0, 0, 0, 0x40, 0, 0, 0
2698 	};
2699 
2700 	memcpy(p, format_pg, sizeof(format_pg));
2701 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2702 	put_unaligned_be16(sdebug_sector_size, p + 12);
2703 	if (sdebug_removable)
2704 		p[20] |= 0x20; /* should agree with INQUIRY */
2705 	if (1 == pcontrol)
2706 		memset(p + 2, 0, sizeof(format_pg) - 2);
2707 	return sizeof(format_pg);
2708 }
2709 
2710 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2711 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2712 				     0, 0, 0, 0};
2713 
2714 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2715 { 	/* Caching page for mode_sense */
2716 	static const unsigned char ch_caching_pg[] = {
2717 		/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2718 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2719 	};
2720 	static const unsigned char d_caching_pg[] = {
2721 		0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2722 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0
2723 	};
2724 
2725 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2726 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2727 	memcpy(p, caching_pg, sizeof(caching_pg));
2728 	if (1 == pcontrol)
2729 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2730 	else if (2 == pcontrol)
2731 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2732 	return sizeof(caching_pg);
2733 }
2734 
2735 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2736 				    0, 0, 0x2, 0x4b};
2737 
2738 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2739 { 	/* Control mode page for mode_sense */
2740 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2741 					0, 0, 0, 0};
2742 	static const unsigned char d_ctrl_m_pg[] = {
2743 		0xa, 10, 2, 0, 0, 0, 0, 0,
2744 		0, 0, 0x2, 0x4b
2745 	};
2746 
2747 	if (sdebug_dsense)
2748 		ctrl_m_pg[2] |= 0x4;
2749 	else
2750 		ctrl_m_pg[2] &= ~0x4;
2751 
2752 	if (sdebug_ato)
2753 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2754 
2755 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2756 	if (1 == pcontrol)
2757 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2758 	else if (2 == pcontrol)
2759 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2760 	return sizeof(ctrl_m_pg);
2761 }
2762 
2763 /* IO Advice Hints Grouping mode page */
2764 static int resp_grouping_m_pg(unsigned char *p, int pcontrol, int target)
2765 {
2766 	/* IO Advice Hints Grouping mode page */
2767 	struct grouping_m_pg {
2768 		u8 page_code;	/* OR 0x40 when subpage_code > 0 */
2769 		u8 subpage_code;
2770 		__be16 page_length;
2771 		u8 reserved[12];
2772 		struct scsi_io_group_descriptor descr[MAXIMUM_NUMBER_OF_STREAMS];
2773 	};
2774 	static const struct grouping_m_pg gr_m_pg = {
2775 		.page_code = 0xa | 0x40,
2776 		.subpage_code = 5,
2777 		.page_length = cpu_to_be16(sizeof(gr_m_pg) - 4),
2778 		.descr = {
2779 			{ .st_enble = 1 },
2780 			{ .st_enble = 1 },
2781 			{ .st_enble = 1 },
2782 			{ .st_enble = 1 },
2783 			{ .st_enble = 1 },
2784 			{ .st_enble = 0 },
2785 		}
2786 	};
2787 
2788 	BUILD_BUG_ON(sizeof(struct grouping_m_pg) !=
2789 		     16 + MAXIMUM_NUMBER_OF_STREAMS * 16);
2790 	memcpy(p, &gr_m_pg, sizeof(gr_m_pg));
2791 	if (1 == pcontrol) {
2792 		/* There are no changeable values so clear from byte 4 on. */
2793 		memset(p + 4, 0, sizeof(gr_m_pg) - 4);
2794 	}
2795 	return sizeof(gr_m_pg);
2796 }
2797 
2798 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2799 {	/* Informational Exceptions control mode page for mode_sense */
2800 	static const unsigned char ch_iec_m_pg[] = {
2801 		/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2802 		0, 0, 0x0, 0x0
2803 	};
2804 	static const unsigned char d_iec_m_pg[] = {
2805 		0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2806 		0, 0, 0x0, 0x0
2807 	};
2808 
2809 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2810 	if (1 == pcontrol)
2811 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2812 	else if (2 == pcontrol)
2813 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2814 	return sizeof(iec_m_pg);
2815 }
2816 
2817 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2818 {	/* SAS SSP mode page - short format for mode_sense */
2819 	static const unsigned char sas_sf_m_pg[] = {
2820 		0x19, 0x6, 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0
2821 	};
2822 
2823 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2824 	if (1 == pcontrol)
2825 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2826 	return sizeof(sas_sf_m_pg);
2827 }
2828 
2829 
2830 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2831 			      int target_dev_id)
2832 {	/* SAS phy control and discover mode page for mode_sense */
2833 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2834 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2835 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2836 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2837 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2838 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2839 		    0, 0, 0, 0, 0, 0, 0, 0,
2840 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2841 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2842 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2843 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2844 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2845 		    0, 0, 0, 0, 0, 0, 0, 0,
2846 		};
2847 	int port_a, port_b;
2848 
2849 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2850 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2851 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2852 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2853 	port_a = target_dev_id + 1;
2854 	port_b = port_a + 1;
2855 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2856 	put_unaligned_be32(port_a, p + 20);
2857 	put_unaligned_be32(port_b, p + 48 + 20);
2858 	if (1 == pcontrol)
2859 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2860 	return sizeof(sas_pcd_m_pg);
2861 }
2862 
2863 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2864 {	/* SAS SSP shared protocol specific port mode subpage */
2865 	static const unsigned char sas_sha_m_pg[] = {
2866 		0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2867 		0, 0, 0, 0, 0, 0, 0, 0,
2868 	};
2869 
2870 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2871 	if (1 == pcontrol)
2872 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2873 	return sizeof(sas_sha_m_pg);
2874 }
2875 
2876 static unsigned char partition_pg[] = {0x11, 12, 1, 0, 0x24, 3, 9, 0,
2877 	0xff, 0xff, 0x00, 0x00};
2878 
2879 static int resp_partition_m_pg(unsigned char *p, int pcontrol, int target)
2880 {	/* Partition page for mode_sense (tape) */
2881 	memcpy(p, partition_pg, sizeof(partition_pg));
2882 	if (pcontrol == 1)
2883 		memset(p + 2, 0, sizeof(partition_pg) - 2);
2884 	return sizeof(partition_pg);
2885 }
2886 
2887 static int process_medium_part_m_pg(struct sdebug_dev_info *devip,
2888 				unsigned char *new, int pg_len)
2889 {
2890 	int new_nbr, p0_size, p1_size;
2891 
2892 	if ((new[4] & 0x80) != 0) { /* FDP */
2893 		partition_pg[4] |= 0x80;
2894 		devip->tape_pending_nbr_partitions = TAPE_MAX_PARTITIONS;
2895 		devip->tape_pending_part_0_size = TAPE_UNITS - TAPE_PARTITION_1_UNITS;
2896 		devip->tape_pending_part_1_size = TAPE_PARTITION_1_UNITS;
2897 	} else {
2898 		new_nbr = new[3] + 1;
2899 		if (new_nbr > TAPE_MAX_PARTITIONS)
2900 			return 3;
2901 		if ((new[4] & 0x40) != 0) { /* SDP */
2902 			p1_size = TAPE_PARTITION_1_UNITS;
2903 			p0_size = TAPE_UNITS - p1_size;
2904 			if (p0_size < 100)
2905 				return 4;
2906 		} else if ((new[4] & 0x20) != 0) {
2907 			if (new_nbr > 1) {
2908 				p0_size = get_unaligned_be16(new + 8);
2909 				p1_size = get_unaligned_be16(new + 10);
2910 				if (p1_size == 0xFFFF)
2911 					p1_size = TAPE_UNITS - p0_size;
2912 				else if (p0_size == 0xFFFF)
2913 					p0_size = TAPE_UNITS - p1_size;
2914 				if (p0_size < 100 || p1_size < 100)
2915 					return 8;
2916 			} else {
2917 				p0_size = TAPE_UNITS;
2918 				p1_size = 0;
2919 			}
2920 		} else
2921 			return 6;
2922 		devip->tape_pending_nbr_partitions = new_nbr;
2923 		devip->tape_pending_part_0_size = p0_size;
2924 		devip->tape_pending_part_1_size = p1_size;
2925 		partition_pg[3] = new_nbr;
2926 		devip->tape_pending_nbr_partitions = new_nbr;
2927 	}
2928 
2929 	return 0;
2930 }
2931 
2932 static int resp_compression_m_pg(unsigned char *p, int pcontrol, int target,
2933 	unsigned char dce)
2934 {	/* Compression page for mode_sense (tape) */
2935 	static const unsigned char compression_pg[] = {
2936 		0x0f, 14, 0x40, 0, 0, 0, 0, 0,
2937 		0, 0, 0, 0, 0, 0
2938 	};
2939 
2940 	memcpy(p, compression_pg, sizeof(compression_pg));
2941 	if (dce)
2942 		p[2] |= 0x80;
2943 	if (pcontrol == 1)
2944 		memset(p + 2, 0, sizeof(compression_pg) - 2);
2945 	return sizeof(compression_pg);
2946 }
2947 
2948 /* PAGE_SIZE is more than necessary but provides room for future expansion. */
2949 #define SDEBUG_MAX_MSENSE_SZ PAGE_SIZE
2950 
2951 static int resp_mode_sense(struct scsi_cmnd *scp,
2952 			   struct sdebug_dev_info *devip)
2953 {
2954 	int pcontrol, pcode, subpcode, bd_len;
2955 	unsigned char dev_spec;
2956 	u32 alloc_len, offset, len;
2957 	int target_dev_id;
2958 	int target = scp->device->id;
2959 	unsigned char *ap;
2960 	unsigned char *cmd = scp->cmnd;
2961 	bool dbd, llbaa, msense_6, is_disk, is_zbc, is_tape;
2962 
2963 	unsigned char *arr __free(kfree) = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
2964 
2965 	if (!arr)
2966 		return -ENOMEM;
2967 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2968 	pcontrol = (cmd[2] & 0xc0) >> 6;
2969 	pcode = cmd[2] & 0x3f;
2970 	subpcode = cmd[3];
2971 	msense_6 = (MODE_SENSE == cmd[0]);
2972 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2973 	is_disk = (scp->device->type == TYPE_DISK);
2974 	is_zbc = devip->zoned;
2975 	is_tape = (scp->device->type == TYPE_TAPE);
2976 	if ((is_disk || is_zbc || is_tape) && !dbd)
2977 		bd_len = llbaa ? 16 : 8;
2978 	else
2979 		bd_len = 0;
2980 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2981 	if (0x3 == pcontrol) {  /* Saving values not supported */
2982 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2983 		return check_condition_result;
2984 	}
2985 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2986 			(devip->target * 1000) - 3;
2987 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2988 	if (is_disk || is_zbc) {
2989 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2990 		if (sdebug_wp)
2991 			dev_spec |= 0x80;
2992 	} else
2993 		dev_spec = 0x0;
2994 	if (msense_6) {
2995 		arr[2] = dev_spec;
2996 		arr[3] = bd_len;
2997 		offset = 4;
2998 	} else {
2999 		arr[3] = dev_spec;
3000 		if (16 == bd_len)
3001 			arr[4] = 0x1;	/* set LONGLBA bit */
3002 		arr[7] = bd_len;	/* assume 255 or less */
3003 		offset = 8;
3004 	}
3005 	ap = arr + offset;
3006 	if ((bd_len > 0) && (!sdebug_capacity))
3007 		sdebug_capacity = get_sdebug_capacity();
3008 
3009 	if (8 == bd_len) {
3010 		if (sdebug_capacity > 0xfffffffe)
3011 			put_unaligned_be32(0xffffffff, ap + 0);
3012 		else
3013 			put_unaligned_be32(sdebug_capacity, ap + 0);
3014 		if (is_tape) {
3015 			ap[0] = devip->tape_density;
3016 			put_unaligned_be16(devip->tape_blksize, ap + 6);
3017 		} else
3018 			put_unaligned_be16(sdebug_sector_size, ap + 6);
3019 		offset += bd_len;
3020 		ap = arr + offset;
3021 	} else if (16 == bd_len) {
3022 		if (is_tape) {
3023 			mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, 4);
3024 			return check_condition_result;
3025 		}
3026 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
3027 		put_unaligned_be32(sdebug_sector_size, ap + 12);
3028 		offset += bd_len;
3029 		ap = arr + offset;
3030 	}
3031 	if (cmd[2] == 0)
3032 		goto only_bd; /* Only block descriptor requested */
3033 
3034 	/*
3035 	 * N.B. If len>0 before resp_*_pg() call, then form of that call should be:
3036 	 *        len += resp_*_pg(ap + len, pcontrol, target);
3037 	 */
3038 	switch (pcode) {
3039 	case 0x1:	/* Read-Write error recovery page, direct access */
3040 		if (subpcode > 0x0 && subpcode < 0xff)
3041 			goto bad_subpcode;
3042 		len = resp_err_recov_pg(ap, pcontrol, target);
3043 		offset += len;
3044 		break;
3045 	case 0x2:	/* Disconnect-Reconnect page, all devices */
3046 		if (subpcode > 0x0 && subpcode < 0xff)
3047 			goto bad_subpcode;
3048 		len = resp_disconnect_pg(ap, pcontrol, target);
3049 		offset += len;
3050 		break;
3051 	case 0x3:       /* Format device page, direct access */
3052 		if (subpcode > 0x0 && subpcode < 0xff)
3053 			goto bad_subpcode;
3054 		if (is_disk) {
3055 			len = resp_format_pg(ap, pcontrol, target);
3056 			offset += len;
3057 		} else {
3058 			goto bad_pcode;
3059 		}
3060 		break;
3061 	case 0x8:	/* Caching page, direct access */
3062 		if (subpcode > 0x0 && subpcode < 0xff)
3063 			goto bad_subpcode;
3064 		if (is_disk || is_zbc) {
3065 			len = resp_caching_pg(ap, pcontrol, target);
3066 			offset += len;
3067 		} else {
3068 			goto bad_pcode;
3069 		}
3070 		break;
3071 	case 0xa:	/* Control Mode page, all devices */
3072 		switch (subpcode) {
3073 		case 0:
3074 			len = resp_ctrl_m_pg(ap, pcontrol, target);
3075 			break;
3076 		case 0x05:
3077 			len = resp_grouping_m_pg(ap, pcontrol, target);
3078 			break;
3079 		case 0xff:
3080 			len = resp_ctrl_m_pg(ap, pcontrol, target);
3081 			len += resp_grouping_m_pg(ap + len, pcontrol, target);
3082 			break;
3083 		default:
3084 			goto bad_subpcode;
3085 		}
3086 		offset += len;
3087 		break;
3088 	case 0xf:	/* Compression Mode Page (tape) */
3089 		if (!is_tape)
3090 			goto bad_pcode;
3091 		len = resp_compression_m_pg(ap, pcontrol, target, devip->tape_dce);
3092 		offset += len;
3093 		break;
3094 	case 0x11:	/* Partition Mode Page (tape) */
3095 		if (!is_tape)
3096 			goto bad_pcode;
3097 		len = resp_partition_m_pg(ap, pcontrol, target);
3098 		offset += len;
3099 		break;
3100 	case 0x19:	/* if spc==1 then sas phy, control+discover */
3101 		if (subpcode > 0x2 && subpcode < 0xff)
3102 			goto bad_subpcode;
3103 		len = 0;
3104 		if ((0x0 == subpcode) || (0xff == subpcode))
3105 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
3106 		if ((0x1 == subpcode) || (0xff == subpcode))
3107 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
3108 						  target_dev_id);
3109 		if ((0x2 == subpcode) || (0xff == subpcode))
3110 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
3111 		offset += len;
3112 		break;
3113 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
3114 		if (subpcode > 0x0 && subpcode < 0xff)
3115 			goto bad_subpcode;
3116 		len = resp_iec_m_pg(ap, pcontrol, target);
3117 		offset += len;
3118 		break;
3119 	case 0x3f:	/* Read all Mode pages */
3120 		if (subpcode > 0x0 && subpcode < 0xff)
3121 			goto bad_subpcode;
3122 		len = resp_err_recov_pg(ap, pcontrol, target);
3123 		len += resp_disconnect_pg(ap + len, pcontrol, target);
3124 		if (is_disk) {
3125 			len += resp_format_pg(ap + len, pcontrol, target);
3126 			len += resp_caching_pg(ap + len, pcontrol, target);
3127 		} else if (is_zbc) {
3128 			len += resp_caching_pg(ap + len, pcontrol, target);
3129 		}
3130 		len += resp_ctrl_m_pg(ap + len, pcontrol, target);
3131 		if (0xff == subpcode)
3132 			len += resp_grouping_m_pg(ap + len, pcontrol, target);
3133 		len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
3134 		if (0xff == subpcode) {
3135 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
3136 						  target_dev_id);
3137 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
3138 		}
3139 		len += resp_iec_m_pg(ap + len, pcontrol, target);
3140 		offset += len;
3141 		break;
3142 	default:
3143 		goto bad_pcode;
3144 	}
3145 only_bd:
3146 	if (msense_6)
3147 		arr[0] = offset - 1;
3148 	else
3149 		put_unaligned_be16((offset - 2), arr + 0);
3150 	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
3151 
3152 bad_pcode:
3153 	mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3154 	return check_condition_result;
3155 
3156 bad_subpcode:
3157 	mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3158 	return check_condition_result;
3159 }
3160 
3161 #define SDEBUG_MAX_MSELECT_SZ 512
3162 
3163 static int resp_mode_select(struct scsi_cmnd *scp,
3164 			    struct sdebug_dev_info *devip)
3165 {
3166 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
3167 	int param_len, res, mpage;
3168 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
3169 	unsigned char *cmd = scp->cmnd;
3170 	int mselect6 = (MODE_SELECT == cmd[0]);
3171 
3172 	memset(arr, 0, sizeof(arr));
3173 	pf = cmd[1] & 0x10;
3174 	sp = cmd[1] & 0x1;
3175 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
3176 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
3177 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
3178 		return check_condition_result;
3179 	}
3180 	res = fetch_to_dev_buffer(scp, arr, param_len);
3181 	if (-1 == res)
3182 		return DID_ERROR << 16;
3183 	else if (sdebug_verbose && (res < param_len))
3184 		sdev_printk(KERN_INFO, scp->device,
3185 			    "cdb indicated=%d, IO sent=%d bytes\n",
3186 			    param_len, res);
3187 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
3188 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
3189 	off = (mselect6 ? 4 : 8);
3190 	if (scp->device->type == TYPE_TAPE) {
3191 		int blksize;
3192 
3193 		if (bd_len != 8) {
3194 			mk_sense_invalid_fld(scp, SDEB_IN_DATA,
3195 					mselect6 ? 3 : 6, -1);
3196 			return check_condition_result;
3197 		}
3198 		if (arr[off] == TAPE_BAD_DENSITY) {
3199 			mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
3200 			return check_condition_result;
3201 		}
3202 		blksize = get_unaligned_be16(arr + off + 6);
3203 		if (blksize != 0 &&
3204 			(blksize < TAPE_MIN_BLKSIZE ||
3205 				blksize > TAPE_MAX_BLKSIZE ||
3206 				(blksize % 4) != 0)) {
3207 			mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, -1);
3208 			return check_condition_result;
3209 		}
3210 		devip->tape_density = arr[off];
3211 		devip->tape_blksize = blksize;
3212 	}
3213 	off += bd_len;
3214 	if (off >= res)
3215 		return 0; /* No page written, just descriptors */
3216 	if (md_len > 2) {
3217 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
3218 		return check_condition_result;
3219 	}
3220 	mpage = arr[off] & 0x3f;
3221 	ps = !!(arr[off] & 0x80);
3222 	if (ps) {
3223 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
3224 		return check_condition_result;
3225 	}
3226 	spf = !!(arr[off] & 0x40);
3227 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
3228 		       (arr[off + 1] + 2);
3229 	if ((pg_len + off) > param_len) {
3230 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
3231 				PARAMETER_LIST_LENGTH_ERR, 0);
3232 		return check_condition_result;
3233 	}
3234 	switch (mpage) {
3235 	case 0x8:      /* Caching Mode page */
3236 		if (caching_pg[1] == arr[off + 1]) {
3237 			memcpy(caching_pg + 2, arr + off + 2,
3238 			       sizeof(caching_pg) - 2);
3239 			goto set_mode_changed_ua;
3240 		}
3241 		break;
3242 	case 0xa:      /* Control Mode page */
3243 		if (ctrl_m_pg[1] == arr[off + 1]) {
3244 			memcpy(ctrl_m_pg + 2, arr + off + 2,
3245 			       sizeof(ctrl_m_pg) - 2);
3246 			if (ctrl_m_pg[4] & 0x8)
3247 				sdebug_wp = true;
3248 			else
3249 				sdebug_wp = false;
3250 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
3251 			goto set_mode_changed_ua;
3252 		}
3253 		break;
3254 	case 0xf:       /* Compression mode page */
3255 		if (scp->device->type != TYPE_TAPE)
3256 			goto bad_pcode;
3257 		if ((arr[off + 2] & 0x40) != 0) {
3258 			devip->tape_dce = (arr[off + 2] & 0x80) != 0;
3259 			return 0;
3260 		}
3261 		break;
3262 	case 0x11:	/* Medium Partition Mode Page (tape) */
3263 		if (scp->device->type == TYPE_TAPE) {
3264 			int fld;
3265 
3266 			fld = process_medium_part_m_pg(devip, &arr[off], pg_len);
3267 			if (fld == 0)
3268 				return 0;
3269 			mk_sense_invalid_fld(scp, SDEB_IN_DATA, fld, -1);
3270 			return check_condition_result;
3271 		}
3272 		break;
3273 	case 0x1c:      /* Informational Exceptions Mode page */
3274 		if (iec_m_pg[1] == arr[off + 1]) {
3275 			memcpy(iec_m_pg + 2, arr + off + 2,
3276 			       sizeof(iec_m_pg) - 2);
3277 			goto set_mode_changed_ua;
3278 		}
3279 		break;
3280 	default:
3281 		break;
3282 	}
3283 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
3284 	return check_condition_result;
3285 set_mode_changed_ua:
3286 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
3287 	return 0;
3288 
3289 bad_pcode:
3290 	mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3291 	return check_condition_result;
3292 }
3293 
3294 static int resp_temp_l_pg(unsigned char *arr)
3295 {
3296 	static const unsigned char temp_l_pg[] = {
3297 		0x0, 0x0, 0x3, 0x2, 0x0, 38,
3298 		0x0, 0x1, 0x3, 0x2, 0x0, 65,
3299 	};
3300 
3301 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
3302 	return sizeof(temp_l_pg);
3303 }
3304 
3305 static int resp_ie_l_pg(unsigned char *arr)
3306 {
3307 	static const unsigned char ie_l_pg[] = {
3308 		0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
3309 	};
3310 
3311 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
3312 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
3313 		arr[4] = THRESHOLD_EXCEEDED;
3314 		arr[5] = 0xff;
3315 	}
3316 	return sizeof(ie_l_pg);
3317 }
3318 
3319 static int resp_env_rep_l_spg(unsigned char *arr)
3320 {
3321 	static const unsigned char env_rep_l_spg[] = {
3322 		0x0, 0x0, 0x23, 0x8,
3323 		0x0, 40, 72, 0xff, 45, 18, 0, 0,
3324 		0x1, 0x0, 0x23, 0x8,
3325 		0x0, 55, 72, 35, 55, 45, 0, 0,
3326 	};
3327 
3328 	memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
3329 	return sizeof(env_rep_l_spg);
3330 }
3331 
3332 #define SDEBUG_MAX_LSENSE_SZ 512
3333 
3334 static int resp_log_sense(struct scsi_cmnd *scp,
3335 			  struct sdebug_dev_info *devip)
3336 {
3337 	int ppc, sp, pcode, subpcode;
3338 	u32 alloc_len, len, n;
3339 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
3340 	unsigned char *cmd = scp->cmnd;
3341 
3342 	memset(arr, 0, sizeof(arr));
3343 	ppc = cmd[1] & 0x2;
3344 	sp = cmd[1] & 0x1;
3345 	if (ppc || sp) {
3346 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
3347 		return check_condition_result;
3348 	}
3349 	pcode = cmd[2] & 0x3f;
3350 	subpcode = cmd[3] & 0xff;
3351 	alloc_len = get_unaligned_be16(cmd + 7);
3352 	arr[0] = pcode;
3353 	if (0 == subpcode) {
3354 		switch (pcode) {
3355 		case 0x0:	/* Supported log pages log page */
3356 			n = 4;
3357 			arr[n++] = 0x0;		/* this page */
3358 			arr[n++] = 0xd;		/* Temperature */
3359 			arr[n++] = 0x2f;	/* Informational exceptions */
3360 			arr[3] = n - 4;
3361 			break;
3362 		case 0xd:	/* Temperature log page */
3363 			arr[3] = resp_temp_l_pg(arr + 4);
3364 			break;
3365 		case 0x2f:	/* Informational exceptions log page */
3366 			arr[3] = resp_ie_l_pg(arr + 4);
3367 			break;
3368 		default:
3369 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3370 			return check_condition_result;
3371 		}
3372 	} else if (0xff == subpcode) {
3373 		arr[0] |= 0x40;
3374 		arr[1] = subpcode;
3375 		switch (pcode) {
3376 		case 0x0:	/* Supported log pages and subpages log page */
3377 			n = 4;
3378 			arr[n++] = 0x0;
3379 			arr[n++] = 0x0;		/* 0,0 page */
3380 			arr[n++] = 0x0;
3381 			arr[n++] = 0xff;	/* this page */
3382 			arr[n++] = 0xd;
3383 			arr[n++] = 0x0;		/* Temperature */
3384 			arr[n++] = 0xd;
3385 			arr[n++] = 0x1;		/* Environment reporting */
3386 			arr[n++] = 0xd;
3387 			arr[n++] = 0xff;	/* all 0xd subpages */
3388 			arr[n++] = 0x2f;
3389 			arr[n++] = 0x0;	/* Informational exceptions */
3390 			arr[n++] = 0x2f;
3391 			arr[n++] = 0xff;	/* all 0x2f subpages */
3392 			arr[3] = n - 4;
3393 			break;
3394 		case 0xd:	/* Temperature subpages */
3395 			n = 4;
3396 			arr[n++] = 0xd;
3397 			arr[n++] = 0x0;		/* Temperature */
3398 			arr[n++] = 0xd;
3399 			arr[n++] = 0x1;		/* Environment reporting */
3400 			arr[n++] = 0xd;
3401 			arr[n++] = 0xff;	/* these subpages */
3402 			arr[3] = n - 4;
3403 			break;
3404 		case 0x2f:	/* Informational exceptions subpages */
3405 			n = 4;
3406 			arr[n++] = 0x2f;
3407 			arr[n++] = 0x0;		/* Informational exceptions */
3408 			arr[n++] = 0x2f;
3409 			arr[n++] = 0xff;	/* these subpages */
3410 			arr[3] = n - 4;
3411 			break;
3412 		default:
3413 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3414 			return check_condition_result;
3415 		}
3416 	} else if (subpcode > 0) {
3417 		arr[0] |= 0x40;
3418 		arr[1] = subpcode;
3419 		if (pcode == 0xd && subpcode == 1)
3420 			arr[3] = resp_env_rep_l_spg(arr + 4);
3421 		else {
3422 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3423 			return check_condition_result;
3424 		}
3425 	} else {
3426 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3427 		return check_condition_result;
3428 	}
3429 	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
3430 	return fill_from_dev_buffer(scp, arr,
3431 		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
3432 }
3433 
3434 enum {SDEBUG_READ_BLOCK_LIMITS_ARR_SZ = 6};
3435 static int resp_read_blklimits(struct scsi_cmnd *scp,
3436 			struct sdebug_dev_info *devip)
3437 {
3438 	unsigned char arr[SDEBUG_READ_BLOCK_LIMITS_ARR_SZ];
3439 
3440 	arr[0] = 4;
3441 	put_unaligned_be24(TAPE_MAX_BLKSIZE, arr + 1);
3442 	put_unaligned_be16(TAPE_MIN_BLKSIZE, arr + 4);
3443 	return fill_from_dev_buffer(scp, arr, SDEBUG_READ_BLOCK_LIMITS_ARR_SZ);
3444 }
3445 
3446 static int resp_locate(struct scsi_cmnd *scp,
3447 		struct sdebug_dev_info *devip)
3448 {
3449 	unsigned char *cmd = scp->cmnd;
3450 	unsigned int i, pos;
3451 	struct tape_block *blp;
3452 	int partition;
3453 
3454 	if ((cmd[1] & 0x02) != 0) {
3455 		if (cmd[8] >= devip->tape_nbr_partitions) {
3456 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, -1);
3457 			return check_condition_result;
3458 		}
3459 		devip->tape_partition = cmd[8];
3460 	}
3461 	pos = get_unaligned_be32(cmd + 3);
3462 	partition = devip->tape_partition;
3463 
3464 	for (i = 0, blp = devip->tape_blocks[partition];
3465 	     i < pos && i < devip->tape_eop[partition]; i++, blp++)
3466 		if (IS_TAPE_BLOCK_EOD(blp->fl_size))
3467 			break;
3468 	if (i < pos) {
3469 		devip->tape_location[partition] = i;
3470 		mk_sense_buffer(scp, BLANK_CHECK, 0x05, 0);
3471 		return check_condition_result;
3472 	}
3473 	devip->tape_location[partition] = pos;
3474 
3475 	return 0;
3476 }
3477 
3478 static int resp_write_filemarks(struct scsi_cmnd *scp,
3479 		struct sdebug_dev_info *devip)
3480 {
3481 	unsigned char *cmd = scp->cmnd;
3482 	unsigned int i, count, pos;
3483 	u32 data;
3484 	int partition = devip->tape_partition;
3485 
3486 	if ((cmd[1] & 0xfe) != 0) { /* probably write setmarks, not in >= SCSI-3 */
3487 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
3488 		return check_condition_result;
3489 	}
3490 	count = get_unaligned_be24(cmd + 2);
3491 	data = TAPE_BLOCK_FM_FLAG;
3492 	for (i = 0, pos = devip->tape_location[partition]; i < count; i++, pos++) {
3493 		if (pos >= devip->tape_eop[partition] - 1) { /* don't overwrite EOD */
3494 			devip->tape_location[partition] = devip->tape_eop[partition] - 1;
3495 			mk_sense_info_tape(scp, VOLUME_OVERFLOW, NO_ADDITIONAL_SENSE,
3496 					EOP_EOM_DETECTED_ASCQ, count, SENSE_FLAG_EOM);
3497 			return check_condition_result;
3498 		}
3499 		(devip->tape_blocks[partition] + pos)->fl_size = data;
3500 	}
3501 	(devip->tape_blocks[partition] + pos)->fl_size =
3502 		TAPE_BLOCK_EOD_FLAG;
3503 	devip->tape_location[partition] = pos;
3504 
3505 	return 0;
3506 }
3507 
3508 static int resp_space(struct scsi_cmnd *scp,
3509 		struct sdebug_dev_info *devip)
3510 {
3511 	unsigned char *cmd = scp->cmnd, code;
3512 	int i = 0, pos, count;
3513 	struct tape_block *blp;
3514 	int partition = devip->tape_partition;
3515 
3516 	count = get_unaligned_be24(cmd + 2);
3517 	if ((count & 0x800000) != 0) /* extend negative to 32-bit count */
3518 		count |= 0xff000000;
3519 	code = cmd[1] & 0x0f;
3520 
3521 	pos = devip->tape_location[partition];
3522 	if (code == 0) { /* blocks */
3523 		if (count < 0) {
3524 			count = (-count);
3525 			pos -= 1;
3526 			for (i = 0, blp = devip->tape_blocks[partition] + pos; i < count;
3527 			     i++) {
3528 				if (pos < 0)
3529 					goto is_bop;
3530 				else if (IS_TAPE_BLOCK_FM(blp->fl_size))
3531 					goto is_fm;
3532 				if (i > 0) {
3533 					pos--;
3534 					blp--;
3535 				}
3536 			}
3537 		} else if (count > 0) {
3538 			for (i = 0, blp = devip->tape_blocks[partition] + pos; i < count;
3539 			     i++, pos++, blp++) {
3540 				if (IS_TAPE_BLOCK_EOD(blp->fl_size))
3541 					goto is_eod;
3542 				if (IS_TAPE_BLOCK_FM(blp->fl_size)) {
3543 					pos += 1;
3544 					goto is_fm;
3545 				}
3546 				if (pos >= devip->tape_eop[partition])
3547 					goto is_eop;
3548 			}
3549 		}
3550 	} else if (code == 1) { /* filemarks */
3551 		if (count < 0) {
3552 			count = (-count);
3553 			if (pos == 0)
3554 				goto is_bop;
3555 			else {
3556 				for (i = 0, blp = devip->tape_blocks[partition] + pos;
3557 				     i < count && pos >= 0; i++, pos--, blp--) {
3558 					for (pos--, blp-- ; !IS_TAPE_BLOCK_FM(blp->fl_size) &&
3559 						     pos >= 0; pos--, blp--)
3560 						; /* empty */
3561 					if (pos < 0)
3562 						goto is_bop;
3563 				}
3564 			}
3565 			pos += 1;
3566 		} else if (count > 0) {
3567 			for (i = 0, blp = devip->tape_blocks[partition] + pos;
3568 			     i < count; i++, pos++, blp++) {
3569 				for ( ; !IS_TAPE_BLOCK_FM(blp->fl_size) &&
3570 					      !IS_TAPE_BLOCK_EOD(blp->fl_size) &&
3571 					      pos < devip->tape_eop[partition];
3572 				      pos++, blp++)
3573 					; /* empty */
3574 				if (IS_TAPE_BLOCK_EOD(blp->fl_size))
3575 					goto is_eod;
3576 				if (pos >= devip->tape_eop[partition])
3577 					goto is_eop;
3578 			}
3579 		}
3580 	} else if (code == 3) { /* EOD */
3581 		for (blp = devip->tape_blocks[partition] + pos;
3582 		     !IS_TAPE_BLOCK_EOD(blp->fl_size) && pos < devip->tape_eop[partition];
3583 		     pos++, blp++)
3584 			; /* empty */
3585 		if (pos >= devip->tape_eop[partition])
3586 			goto is_eop;
3587 	} else {
3588 		/* sequential filemarks not supported */
3589 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, -1);
3590 		return check_condition_result;
3591 	}
3592 	devip->tape_location[partition] = pos;
3593 	return 0;
3594 
3595 is_fm:
3596 	devip->tape_location[partition] = pos;
3597 	mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
3598 			FILEMARK_DETECTED_ASCQ, count - i,
3599 			SENSE_FLAG_FILEMARK);
3600 	return check_condition_result;
3601 
3602 is_eod:
3603 	devip->tape_location[partition] = pos;
3604 	mk_sense_info_tape(scp, BLANK_CHECK, NO_ADDITIONAL_SENSE,
3605 			EOD_DETECTED_ASCQ, count - i,
3606 			0);
3607 	return check_condition_result;
3608 
3609 is_bop:
3610 	devip->tape_location[partition] = 0;
3611 	mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
3612 			BEGINNING_OF_P_M_DETECTED_ASCQ, count - i,
3613 			SENSE_FLAG_EOM);
3614 	devip->tape_location[partition] = 0;
3615 	return check_condition_result;
3616 
3617 is_eop:
3618 	devip->tape_location[partition] = devip->tape_eop[partition] - 1;
3619 	mk_sense_info_tape(scp, MEDIUM_ERROR, NO_ADDITIONAL_SENSE,
3620 			EOP_EOM_DETECTED_ASCQ, (unsigned int)i,
3621 			SENSE_FLAG_EOM);
3622 	return check_condition_result;
3623 }
3624 
3625 enum {SDEBUG_READ_POSITION_ARR_SZ = 20};
3626 static int resp_read_position(struct scsi_cmnd *scp,
3627 			struct sdebug_dev_info *devip)
3628 {
3629 	u8 *cmd = scp->cmnd;
3630 	int all_length;
3631 	unsigned char arr[20];
3632 	unsigned int pos;
3633 
3634 	all_length = get_unaligned_be16(cmd + 7);
3635 	if ((cmd[1] & 0xfe) != 0 ||
3636 		all_length != 0) { /* only short form */
3637 		mk_sense_invalid_fld(scp, SDEB_IN_CDB,
3638 				all_length ? 7 : 1, 0);
3639 		return check_condition_result;
3640 	}
3641 	memset(arr, 0, SDEBUG_READ_POSITION_ARR_SZ);
3642 	arr[1] = devip->tape_partition;
3643 	pos = devip->tape_location[devip->tape_partition];
3644 	put_unaligned_be32(pos, arr + 4);
3645 	put_unaligned_be32(pos, arr + 8);
3646 	return fill_from_dev_buffer(scp, arr, SDEBUG_READ_POSITION_ARR_SZ);
3647 }
3648 
3649 static int resp_rewind(struct scsi_cmnd *scp,
3650 		struct sdebug_dev_info *devip)
3651 {
3652 	devip->tape_location[devip->tape_partition] = 0;
3653 
3654 	return 0;
3655 }
3656 
3657 static int partition_tape(struct sdebug_dev_info *devip, int nbr_partitions,
3658 			int part_0_size, int part_1_size)
3659 {
3660 	int i;
3661 
3662 	if (part_0_size + part_1_size > TAPE_UNITS)
3663 		return -1;
3664 	devip->tape_eop[0] = part_0_size;
3665 	devip->tape_blocks[0]->fl_size = TAPE_BLOCK_EOD_FLAG;
3666 	devip->tape_eop[1] = part_1_size;
3667 	devip->tape_blocks[1] = devip->tape_blocks[0] +
3668 			devip->tape_eop[0];
3669 	devip->tape_blocks[1]->fl_size = TAPE_BLOCK_EOD_FLAG;
3670 
3671 	for (i = 0 ; i < TAPE_MAX_PARTITIONS; i++)
3672 		devip->tape_location[i] = 0;
3673 
3674 	devip->tape_nbr_partitions = nbr_partitions;
3675 	devip->tape_partition = 0;
3676 
3677 	partition_pg[3] = nbr_partitions - 1;
3678 	put_unaligned_be16(devip->tape_eop[0], partition_pg + 8);
3679 	put_unaligned_be16(devip->tape_eop[1], partition_pg + 10);
3680 
3681 	return nbr_partitions;
3682 }
3683 
3684 static int resp_format_medium(struct scsi_cmnd *scp,
3685 			struct sdebug_dev_info *devip)
3686 {
3687 	int res = 0;
3688 	unsigned char *cmd = scp->cmnd;
3689 
3690 	if (cmd[2] > 2) {
3691 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 2, -1);
3692 		return check_condition_result;
3693 	}
3694 	if (cmd[2] != 0) {
3695 		if (devip->tape_pending_nbr_partitions > 0) {
3696 			res = partition_tape(devip,
3697 					devip->tape_pending_nbr_partitions,
3698 					devip->tape_pending_part_0_size,
3699 					devip->tape_pending_part_1_size);
3700 		} else
3701 			res = partition_tape(devip, devip->tape_nbr_partitions,
3702 					devip->tape_eop[0], devip->tape_eop[1]);
3703 	} else
3704 		res = partition_tape(devip, 1, TAPE_UNITS, 0);
3705 	if (res < 0)
3706 		return -EINVAL;
3707 
3708 	devip->tape_pending_nbr_partitions = -1;
3709 
3710 	return 0;
3711 }
3712 
3713 static int resp_erase(struct scsi_cmnd *scp,
3714 		struct sdebug_dev_info *devip)
3715 {
3716 	int partition = devip->tape_partition;
3717 	int pos = devip->tape_location[partition];
3718 	struct tape_block *blp;
3719 
3720 	blp = devip->tape_blocks[partition] + pos;
3721 	blp->fl_size = TAPE_BLOCK_EOD_FLAG;
3722 
3723 	return 0;
3724 }
3725 
3726 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
3727 {
3728 	return devip->nr_zones != 0;
3729 }
3730 
3731 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
3732 					unsigned long long lba)
3733 {
3734 	u32 zno = lba >> devip->zsize_shift;
3735 	struct sdeb_zone_state *zsp;
3736 
3737 	if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
3738 		return &devip->zstate[zno];
3739 
3740 	/*
3741 	 * If the zone capacity is less than the zone size, adjust for gap
3742 	 * zones.
3743 	 */
3744 	zno = 2 * zno - devip->nr_conv_zones;
3745 	WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
3746 	zsp = &devip->zstate[zno];
3747 	if (lba >= zsp->z_start + zsp->z_size)
3748 		zsp++;
3749 	WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
3750 	return zsp;
3751 }
3752 
3753 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
3754 {
3755 	return zsp->z_type == ZBC_ZTYPE_CNV;
3756 }
3757 
3758 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
3759 {
3760 	return zsp->z_type == ZBC_ZTYPE_GAP;
3761 }
3762 
3763 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
3764 {
3765 	return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
3766 }
3767 
3768 static void zbc_close_zone(struct sdebug_dev_info *devip,
3769 			   struct sdeb_zone_state *zsp)
3770 {
3771 	enum sdebug_z_cond zc;
3772 
3773 	if (!zbc_zone_is_seq(zsp))
3774 		return;
3775 
3776 	zc = zsp->z_cond;
3777 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
3778 		return;
3779 
3780 	if (zc == ZC2_IMPLICIT_OPEN)
3781 		devip->nr_imp_open--;
3782 	else
3783 		devip->nr_exp_open--;
3784 
3785 	if (zsp->z_wp == zsp->z_start) {
3786 		zsp->z_cond = ZC1_EMPTY;
3787 	} else {
3788 		zsp->z_cond = ZC4_CLOSED;
3789 		devip->nr_closed++;
3790 	}
3791 }
3792 
3793 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
3794 {
3795 	struct sdeb_zone_state *zsp = &devip->zstate[0];
3796 	unsigned int i;
3797 
3798 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
3799 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
3800 			zbc_close_zone(devip, zsp);
3801 			return;
3802 		}
3803 	}
3804 }
3805 
3806 static void zbc_open_zone(struct sdebug_dev_info *devip,
3807 			  struct sdeb_zone_state *zsp, bool explicit)
3808 {
3809 	enum sdebug_z_cond zc;
3810 
3811 	if (!zbc_zone_is_seq(zsp))
3812 		return;
3813 
3814 	zc = zsp->z_cond;
3815 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
3816 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
3817 		return;
3818 
3819 	/* Close an implicit open zone if necessary */
3820 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
3821 		zbc_close_zone(devip, zsp);
3822 	else if (devip->max_open &&
3823 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
3824 		zbc_close_imp_open_zone(devip);
3825 
3826 	if (zsp->z_cond == ZC4_CLOSED)
3827 		devip->nr_closed--;
3828 	if (explicit) {
3829 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
3830 		devip->nr_exp_open++;
3831 	} else {
3832 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
3833 		devip->nr_imp_open++;
3834 	}
3835 }
3836 
3837 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
3838 				     struct sdeb_zone_state *zsp)
3839 {
3840 	switch (zsp->z_cond) {
3841 	case ZC2_IMPLICIT_OPEN:
3842 		devip->nr_imp_open--;
3843 		break;
3844 	case ZC3_EXPLICIT_OPEN:
3845 		devip->nr_exp_open--;
3846 		break;
3847 	default:
3848 		WARN_ONCE(true, "Invalid zone %llu condition %x\n",
3849 			  zsp->z_start, zsp->z_cond);
3850 		break;
3851 	}
3852 	zsp->z_cond = ZC5_FULL;
3853 }
3854 
3855 static void zbc_inc_wp(struct sdebug_dev_info *devip,
3856 		       unsigned long long lba, unsigned int num)
3857 {
3858 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3859 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
3860 
3861 	if (!zbc_zone_is_seq(zsp))
3862 		return;
3863 
3864 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
3865 		zsp->z_wp += num;
3866 		if (zsp->z_wp >= zend)
3867 			zbc_set_zone_full(devip, zsp);
3868 		return;
3869 	}
3870 
3871 	while (num) {
3872 		if (lba != zsp->z_wp)
3873 			zsp->z_non_seq_resource = true;
3874 
3875 		end = lba + num;
3876 		if (end >= zend) {
3877 			n = zend - lba;
3878 			zsp->z_wp = zend;
3879 		} else if (end > zsp->z_wp) {
3880 			n = num;
3881 			zsp->z_wp = end;
3882 		} else {
3883 			n = num;
3884 		}
3885 		if (zsp->z_wp >= zend)
3886 			zbc_set_zone_full(devip, zsp);
3887 
3888 		num -= n;
3889 		lba += n;
3890 		if (num) {
3891 			zsp++;
3892 			zend = zsp->z_start + zsp->z_size;
3893 		}
3894 	}
3895 }
3896 
3897 static int check_zbc_access_params(struct scsi_cmnd *scp,
3898 			unsigned long long lba, unsigned int num, bool write)
3899 {
3900 	struct scsi_device *sdp = scp->device;
3901 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3902 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3903 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
3904 
3905 	if (!write) {
3906 		/* For host-managed, reads cannot cross zone types boundaries */
3907 		if (zsp->z_type != zsp_end->z_type) {
3908 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3909 					LBA_OUT_OF_RANGE,
3910 					READ_INVDATA_ASCQ);
3911 			return check_condition_result;
3912 		}
3913 		return 0;
3914 	}
3915 
3916 	/* Writing into a gap zone is not allowed */
3917 	if (zbc_zone_is_gap(zsp)) {
3918 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
3919 				ATTEMPT_ACCESS_GAP);
3920 		return check_condition_result;
3921 	}
3922 
3923 	/* No restrictions for writes within conventional zones */
3924 	if (zbc_zone_is_conv(zsp)) {
3925 		if (!zbc_zone_is_conv(zsp_end)) {
3926 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3927 					LBA_OUT_OF_RANGE,
3928 					WRITE_BOUNDARY_ASCQ);
3929 			return check_condition_result;
3930 		}
3931 		return 0;
3932 	}
3933 
3934 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
3935 		/* Writes cannot cross sequential zone boundaries */
3936 		if (zsp_end != zsp) {
3937 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3938 					LBA_OUT_OF_RANGE,
3939 					WRITE_BOUNDARY_ASCQ);
3940 			return check_condition_result;
3941 		}
3942 		/* Cannot write full zones */
3943 		if (zsp->z_cond == ZC5_FULL) {
3944 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3945 					INVALID_FIELD_IN_CDB, 0);
3946 			return check_condition_result;
3947 		}
3948 		/* Writes must be aligned to the zone WP */
3949 		if (lba != zsp->z_wp) {
3950 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
3951 					LBA_OUT_OF_RANGE,
3952 					UNALIGNED_WRITE_ASCQ);
3953 			return check_condition_result;
3954 		}
3955 	}
3956 
3957 	/* Handle implicit open of closed and empty zones */
3958 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
3959 		if (devip->max_open &&
3960 		    devip->nr_exp_open >= devip->max_open) {
3961 			mk_sense_buffer(scp, DATA_PROTECT,
3962 					INSUFF_RES_ASC,
3963 					INSUFF_ZONE_ASCQ);
3964 			return check_condition_result;
3965 		}
3966 		zbc_open_zone(devip, zsp, false);
3967 	}
3968 
3969 	return 0;
3970 }
3971 
3972 static inline int check_device_access_params
3973 			(struct scsi_cmnd *scp, unsigned long long lba,
3974 			 unsigned int num, bool write)
3975 {
3976 	struct scsi_device *sdp = scp->device;
3977 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3978 
3979 	if (lba + num > sdebug_capacity) {
3980 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3981 		return check_condition_result;
3982 	}
3983 	/* transfer length excessive (tie in to block limits VPD page) */
3984 	if (num > sdebug_store_sectors) {
3985 		/* needs work to find which cdb byte 'num' comes from */
3986 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3987 		return check_condition_result;
3988 	}
3989 	if (write && unlikely(sdebug_wp)) {
3990 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
3991 		return check_condition_result;
3992 	}
3993 	if (sdebug_dev_is_zoned(devip))
3994 		return check_zbc_access_params(scp, lba, num, write);
3995 
3996 	return 0;
3997 }
3998 
3999 /*
4000  * Note: if BUG_ON() fires it usually indicates a problem with the parser
4001  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
4002  * that access any of the "stores" in struct sdeb_store_info should call this
4003  * function with bug_if_fake_rw set to true.
4004  */
4005 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
4006 						bool bug_if_fake_rw)
4007 {
4008 	if (sdebug_fake_rw) {
4009 		BUG_ON(bug_if_fake_rw);	/* See note above */
4010 		return NULL;
4011 	}
4012 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
4013 }
4014 
4015 static inline void
4016 sdeb_read_lock(rwlock_t *lock)
4017 {
4018 	if (sdebug_no_rwlock)
4019 		__acquire(lock);
4020 	else
4021 		read_lock(lock);
4022 }
4023 
4024 static inline void
4025 sdeb_read_unlock(rwlock_t *lock)
4026 {
4027 	if (sdebug_no_rwlock)
4028 		__release(lock);
4029 	else
4030 		read_unlock(lock);
4031 }
4032 
4033 static inline void
4034 sdeb_write_lock(rwlock_t *lock)
4035 {
4036 	if (sdebug_no_rwlock)
4037 		__acquire(lock);
4038 	else
4039 		write_lock(lock);
4040 }
4041 
4042 static inline void
4043 sdeb_write_unlock(rwlock_t *lock)
4044 {
4045 	if (sdebug_no_rwlock)
4046 		__release(lock);
4047 	else
4048 		write_unlock(lock);
4049 }
4050 
4051 static inline void
4052 sdeb_data_read_lock(struct sdeb_store_info *sip)
4053 {
4054 	BUG_ON(!sip);
4055 
4056 	sdeb_read_lock(&sip->macc_data_lck);
4057 }
4058 
4059 static inline void
4060 sdeb_data_read_unlock(struct sdeb_store_info *sip)
4061 {
4062 	BUG_ON(!sip);
4063 
4064 	sdeb_read_unlock(&sip->macc_data_lck);
4065 }
4066 
4067 static inline void
4068 sdeb_data_write_lock(struct sdeb_store_info *sip)
4069 {
4070 	BUG_ON(!sip);
4071 
4072 	sdeb_write_lock(&sip->macc_data_lck);
4073 }
4074 
4075 static inline void
4076 sdeb_data_write_unlock(struct sdeb_store_info *sip)
4077 {
4078 	BUG_ON(!sip);
4079 
4080 	sdeb_write_unlock(&sip->macc_data_lck);
4081 }
4082 
4083 static inline void
4084 sdeb_data_sector_read_lock(struct sdeb_store_info *sip)
4085 {
4086 	BUG_ON(!sip);
4087 
4088 	sdeb_read_lock(&sip->macc_sector_lck);
4089 }
4090 
4091 static inline void
4092 sdeb_data_sector_read_unlock(struct sdeb_store_info *sip)
4093 {
4094 	BUG_ON(!sip);
4095 
4096 	sdeb_read_unlock(&sip->macc_sector_lck);
4097 }
4098 
4099 static inline void
4100 sdeb_data_sector_write_lock(struct sdeb_store_info *sip)
4101 {
4102 	BUG_ON(!sip);
4103 
4104 	sdeb_write_lock(&sip->macc_sector_lck);
4105 }
4106 
4107 static inline void
4108 sdeb_data_sector_write_unlock(struct sdeb_store_info *sip)
4109 {
4110 	BUG_ON(!sip);
4111 
4112 	sdeb_write_unlock(&sip->macc_sector_lck);
4113 }
4114 
4115 /*
4116  * Atomic locking:
4117  * We simplify the atomic model to allow only 1x atomic write and many non-
4118  * atomic reads or writes for all LBAs.
4119 
4120  * A RW lock has a similar bahaviour:
4121  * Only 1x writer and many readers.
4122 
4123  * So use a RW lock for per-device read and write locking:
4124  * An atomic access grabs the lock as a writer and non-atomic grabs the lock
4125  * as a reader.
4126  */
4127 
4128 static inline void
4129 sdeb_data_lock(struct sdeb_store_info *sip, bool atomic)
4130 {
4131 	if (atomic)
4132 		sdeb_data_write_lock(sip);
4133 	else
4134 		sdeb_data_read_lock(sip);
4135 }
4136 
4137 static inline void
4138 sdeb_data_unlock(struct sdeb_store_info *sip, bool atomic)
4139 {
4140 	if (atomic)
4141 		sdeb_data_write_unlock(sip);
4142 	else
4143 		sdeb_data_read_unlock(sip);
4144 }
4145 
4146 /* Allow many reads but only 1x write per sector */
4147 static inline void
4148 sdeb_data_sector_lock(struct sdeb_store_info *sip, bool do_write)
4149 {
4150 	if (do_write)
4151 		sdeb_data_sector_write_lock(sip);
4152 	else
4153 		sdeb_data_sector_read_lock(sip);
4154 }
4155 
4156 static inline void
4157 sdeb_data_sector_unlock(struct sdeb_store_info *sip, bool do_write)
4158 {
4159 	if (do_write)
4160 		sdeb_data_sector_write_unlock(sip);
4161 	else
4162 		sdeb_data_sector_read_unlock(sip);
4163 }
4164 
4165 static inline void
4166 sdeb_meta_read_lock(struct sdeb_store_info *sip)
4167 {
4168 	if (sdebug_no_rwlock) {
4169 		if (sip)
4170 			__acquire(&sip->macc_meta_lck);
4171 		else
4172 			__acquire(&sdeb_fake_rw_lck);
4173 	} else {
4174 		if (sip)
4175 			read_lock(&sip->macc_meta_lck);
4176 		else
4177 			read_lock(&sdeb_fake_rw_lck);
4178 	}
4179 }
4180 
4181 static inline void
4182 sdeb_meta_read_unlock(struct sdeb_store_info *sip)
4183 {
4184 	if (sdebug_no_rwlock) {
4185 		if (sip)
4186 			__release(&sip->macc_meta_lck);
4187 		else
4188 			__release(&sdeb_fake_rw_lck);
4189 	} else {
4190 		if (sip)
4191 			read_unlock(&sip->macc_meta_lck);
4192 		else
4193 			read_unlock(&sdeb_fake_rw_lck);
4194 	}
4195 }
4196 
4197 static inline void
4198 sdeb_meta_write_lock(struct sdeb_store_info *sip)
4199 {
4200 	if (sdebug_no_rwlock) {
4201 		if (sip)
4202 			__acquire(&sip->macc_meta_lck);
4203 		else
4204 			__acquire(&sdeb_fake_rw_lck);
4205 	} else {
4206 		if (sip)
4207 			write_lock(&sip->macc_meta_lck);
4208 		else
4209 			write_lock(&sdeb_fake_rw_lck);
4210 	}
4211 }
4212 
4213 static inline void
4214 sdeb_meta_write_unlock(struct sdeb_store_info *sip)
4215 {
4216 	if (sdebug_no_rwlock) {
4217 		if (sip)
4218 			__release(&sip->macc_meta_lck);
4219 		else
4220 			__release(&sdeb_fake_rw_lck);
4221 	} else {
4222 		if (sip)
4223 			write_unlock(&sip->macc_meta_lck);
4224 		else
4225 			write_unlock(&sdeb_fake_rw_lck);
4226 	}
4227 }
4228 
4229 /* Returns number of bytes copied or -1 if error. */
4230 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
4231 			    u32 sg_skip, u64 lba, u32 num, u8 group_number,
4232 			    bool do_write, bool atomic)
4233 {
4234 	int ret;
4235 	u64 block;
4236 	enum dma_data_direction dir;
4237 	struct scsi_data_buffer *sdb = &scp->sdb;
4238 	u8 *fsp;
4239 	int i, total = 0;
4240 
4241 	/*
4242 	 * Even though reads are inherently atomic (in this driver), we expect
4243 	 * the atomic flag only for writes.
4244 	 */
4245 	if (!do_write && atomic)
4246 		return -1;
4247 
4248 	if (do_write) {
4249 		dir = DMA_TO_DEVICE;
4250 		write_since_sync = true;
4251 	} else {
4252 		dir = DMA_FROM_DEVICE;
4253 	}
4254 
4255 	if (!sdb->length || !sip)
4256 		return 0;
4257 	if (scp->sc_data_direction != dir)
4258 		return -1;
4259 
4260 	if (do_write && group_number < ARRAY_SIZE(writes_by_group_number))
4261 		atomic_long_inc(&writes_by_group_number[group_number]);
4262 
4263 	fsp = sip->storep;
4264 
4265 	block = do_div(lba, sdebug_store_sectors);
4266 
4267 	/* Only allow 1x atomic write or multiple non-atomic writes at any given time */
4268 	sdeb_data_lock(sip, atomic);
4269 	for (i = 0; i < num; i++) {
4270 		/* We shouldn't need to lock for atomic writes, but do it anyway */
4271 		sdeb_data_sector_lock(sip, do_write);
4272 		ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
4273 		   fsp + (block * sdebug_sector_size),
4274 		   sdebug_sector_size, sg_skip, do_write);
4275 		sdeb_data_sector_unlock(sip, do_write);
4276 		total += ret;
4277 		if (ret != sdebug_sector_size)
4278 			break;
4279 		sg_skip += sdebug_sector_size;
4280 		if (++block >= sdebug_store_sectors)
4281 			block = 0;
4282 	}
4283 	sdeb_data_unlock(sip, atomic);
4284 
4285 	return total;
4286 }
4287 
4288 /* Returns number of bytes copied or -1 if error. */
4289 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
4290 {
4291 	struct scsi_data_buffer *sdb = &scp->sdb;
4292 
4293 	if (!sdb->length)
4294 		return 0;
4295 	if (scp->sc_data_direction != DMA_TO_DEVICE)
4296 		return -1;
4297 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
4298 			      num * sdebug_sector_size, 0, true);
4299 }
4300 
4301 /* If sip->storep+lba compares equal to arr(num), then copy top half of
4302  * arr into sip->storep+lba and return true. If comparison fails then
4303  * return false. */
4304 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
4305 			      const u8 *arr, bool compare_only)
4306 {
4307 	bool res;
4308 	u64 block, rest = 0;
4309 	u32 store_blks = sdebug_store_sectors;
4310 	u32 lb_size = sdebug_sector_size;
4311 	u8 *fsp = sip->storep;
4312 
4313 	block = do_div(lba, store_blks);
4314 	if (block + num > store_blks)
4315 		rest = block + num - store_blks;
4316 
4317 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
4318 	if (!res)
4319 		return res;
4320 	if (rest)
4321 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
4322 			     rest * lb_size);
4323 	if (!res)
4324 		return res;
4325 	if (compare_only)
4326 		return true;
4327 	arr += num * lb_size;
4328 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
4329 	if (rest)
4330 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
4331 	return res;
4332 }
4333 
4334 static __be16 dif_compute_csum(const void *buf, int len)
4335 {
4336 	__be16 csum;
4337 
4338 	if (sdebug_guard)
4339 		csum = (__force __be16)ip_compute_csum(buf, len);
4340 	else
4341 		csum = cpu_to_be16(crc_t10dif(buf, len));
4342 
4343 	return csum;
4344 }
4345 
4346 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
4347 		      sector_t sector, u32 ei_lba)
4348 {
4349 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
4350 
4351 	if (sdt->guard_tag != csum) {
4352 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
4353 			(unsigned long)sector,
4354 			be16_to_cpu(sdt->guard_tag),
4355 			be16_to_cpu(csum));
4356 		return 0x01;
4357 	}
4358 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
4359 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
4360 		pr_err("REF check failed on sector %lu\n",
4361 			(unsigned long)sector);
4362 		return 0x03;
4363 	}
4364 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4365 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
4366 		pr_err("REF check failed on sector %lu\n",
4367 			(unsigned long)sector);
4368 		return 0x03;
4369 	}
4370 	return 0;
4371 }
4372 
4373 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
4374 			  unsigned int sectors, bool read)
4375 {
4376 	size_t resid;
4377 	void *paddr;
4378 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4379 						scp->device->hostdata, true);
4380 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
4381 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
4382 	struct sg_mapping_iter miter;
4383 
4384 	/* Bytes of protection data to copy into sgl */
4385 	resid = sectors * sizeof(*dif_storep);
4386 
4387 	sg_miter_start(&miter, scsi_prot_sglist(scp),
4388 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
4389 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
4390 
4391 	while (sg_miter_next(&miter) && resid > 0) {
4392 		size_t len = min_t(size_t, miter.length, resid);
4393 		void *start = dif_store(sip, sector);
4394 		size_t rest = 0;
4395 
4396 		if (dif_store_end < start + len)
4397 			rest = start + len - dif_store_end;
4398 
4399 		paddr = miter.addr;
4400 
4401 		if (read)
4402 			memcpy(paddr, start, len - rest);
4403 		else
4404 			memcpy(start, paddr, len - rest);
4405 
4406 		if (rest) {
4407 			if (read)
4408 				memcpy(paddr + len - rest, dif_storep, rest);
4409 			else
4410 				memcpy(dif_storep, paddr + len - rest, rest);
4411 		}
4412 
4413 		sector += len / sizeof(*dif_storep);
4414 		resid -= len;
4415 	}
4416 	sg_miter_stop(&miter);
4417 }
4418 
4419 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
4420 			    unsigned int sectors, u32 ei_lba)
4421 {
4422 	int ret = 0;
4423 	unsigned int i;
4424 	sector_t sector;
4425 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4426 						scp->device->hostdata, true);
4427 	struct t10_pi_tuple *sdt;
4428 
4429 	for (i = 0; i < sectors; i++, ei_lba++) {
4430 		sector = start_sec + i;
4431 		sdt = dif_store(sip, sector);
4432 
4433 		if (sdt->app_tag == cpu_to_be16(0xffff))
4434 			continue;
4435 
4436 		/*
4437 		 * Because scsi_debug acts as both initiator and
4438 		 * target we proceed to verify the PI even if
4439 		 * RDPROTECT=3. This is done so the "initiator" knows
4440 		 * which type of error to return. Otherwise we would
4441 		 * have to iterate over the PI twice.
4442 		 */
4443 		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
4444 			ret = dif_verify(sdt, lba2fake_store(sip, sector),
4445 					 sector, ei_lba);
4446 			if (ret) {
4447 				dif_errors++;
4448 				break;
4449 			}
4450 		}
4451 	}
4452 
4453 	dif_copy_prot(scp, start_sec, sectors, true);
4454 	dix_reads++;
4455 
4456 	return ret;
4457 }
4458 
4459 static int resp_read_tape(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4460 {
4461 	u32 i, num, transfer, size;
4462 	u8 *cmd = scp->cmnd;
4463 	struct scsi_data_buffer *sdb = &scp->sdb;
4464 	int partition = devip->tape_partition;
4465 	u32 pos = devip->tape_location[partition];
4466 	struct tape_block *blp;
4467 	bool fixed, sili;
4468 
4469 	if (cmd[0] != READ_6) { /* Only Read(6) supported */
4470 		mk_sense_invalid_opcode(scp);
4471 		return illegal_condition_result;
4472 	}
4473 	fixed = (cmd[1] & 0x1) != 0;
4474 	sili = (cmd[1] & 0x2) != 0;
4475 	if (fixed && sili) {
4476 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
4477 		return check_condition_result;
4478 	}
4479 
4480 	transfer = get_unaligned_be24(cmd + 2);
4481 	if (fixed) {
4482 		num = transfer;
4483 		size = devip->tape_blksize;
4484 	} else {
4485 		if (transfer < TAPE_MIN_BLKSIZE ||
4486 			transfer > TAPE_MAX_BLKSIZE) {
4487 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4488 			return check_condition_result;
4489 		}
4490 		num = 1;
4491 		size = transfer;
4492 	}
4493 
4494 	for (i = 0, blp = devip->tape_blocks[partition] + pos;
4495 	     i < num && pos < devip->tape_eop[partition];
4496 	     i++, pos++, blp++) {
4497 		devip->tape_location[partition] = pos + 1;
4498 		if (IS_TAPE_BLOCK_FM(blp->fl_size)) {
4499 			mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4500 					FILEMARK_DETECTED_ASCQ, fixed ? num - i : size,
4501 					SENSE_FLAG_FILEMARK);
4502 			scsi_set_resid(scp, (num - i) * size);
4503 			return check_condition_result;
4504 		}
4505 		/* Assume no REW */
4506 		if (IS_TAPE_BLOCK_EOD(blp->fl_size)) {
4507 			mk_sense_info_tape(scp, BLANK_CHECK, NO_ADDITIONAL_SENSE,
4508 					EOD_DETECTED_ASCQ, fixed ? num - i : size,
4509 					0);
4510 			devip->tape_location[partition] = pos;
4511 			scsi_set_resid(scp, (num - i) * size);
4512 			return check_condition_result;
4513 		}
4514 		sg_zero_buffer(sdb->table.sgl, sdb->table.nents,
4515 			size, i * size);
4516 		sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
4517 			&(blp->data), 4, i * size, false);
4518 		if (fixed) {
4519 			if (blp->fl_size != devip->tape_blksize) {
4520 				scsi_set_resid(scp, (num - i) * size);
4521 				mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4522 						0, num - i,
4523 						SENSE_FLAG_ILI);
4524 				return check_condition_result;
4525 			}
4526 		} else {
4527 			if (blp->fl_size != size) {
4528 				if (blp->fl_size < size)
4529 					scsi_set_resid(scp, size - blp->fl_size);
4530 				if (!sili) {
4531 					mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4532 							0, size - blp->fl_size,
4533 							SENSE_FLAG_ILI);
4534 					return check_condition_result;
4535 				}
4536 			}
4537 		}
4538 	}
4539 	if (pos >= devip->tape_eop[partition]) {
4540 		mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
4541 				EOP_EOM_DETECTED_ASCQ, fixed ? num - i : size,
4542 				SENSE_FLAG_EOM);
4543 		devip->tape_location[partition] = pos - 1;
4544 		return check_condition_result;
4545 	}
4546 	devip->tape_location[partition] = pos;
4547 
4548 	return 0;
4549 }
4550 
4551 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4552 {
4553 	bool check_prot;
4554 	u32 num;
4555 	u32 ei_lba;
4556 	int ret;
4557 	u64 lba;
4558 	struct sdeb_store_info *sip = devip2sip(devip, true);
4559 	u8 *cmd = scp->cmnd;
4560 	bool meta_data_locked = false;
4561 
4562 	switch (cmd[0]) {
4563 	case READ_16:
4564 		ei_lba = 0;
4565 		lba = get_unaligned_be64(cmd + 2);
4566 		num = get_unaligned_be32(cmd + 10);
4567 		check_prot = true;
4568 		break;
4569 	case READ_10:
4570 		ei_lba = 0;
4571 		lba = get_unaligned_be32(cmd + 2);
4572 		num = get_unaligned_be16(cmd + 7);
4573 		check_prot = true;
4574 		break;
4575 	case READ_6:
4576 		ei_lba = 0;
4577 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
4578 		      (u32)(cmd[1] & 0x1f) << 16;
4579 		num = (0 == cmd[4]) ? 256 : cmd[4];
4580 		check_prot = true;
4581 		break;
4582 	case READ_12:
4583 		ei_lba = 0;
4584 		lba = get_unaligned_be32(cmd + 2);
4585 		num = get_unaligned_be32(cmd + 6);
4586 		check_prot = true;
4587 		break;
4588 	case XDWRITEREAD_10:
4589 		ei_lba = 0;
4590 		lba = get_unaligned_be32(cmd + 2);
4591 		num = get_unaligned_be16(cmd + 7);
4592 		check_prot = false;
4593 		break;
4594 	default:	/* assume READ(32) */
4595 		lba = get_unaligned_be64(cmd + 12);
4596 		ei_lba = get_unaligned_be32(cmd + 20);
4597 		num = get_unaligned_be32(cmd + 28);
4598 		check_prot = false;
4599 		break;
4600 	}
4601 	if (unlikely(have_dif_prot && check_prot)) {
4602 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4603 		    (cmd[1] & 0xe0)) {
4604 			mk_sense_invalid_opcode(scp);
4605 			return check_condition_result;
4606 		}
4607 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4608 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4609 		    (cmd[1] & 0xe0) == 0)
4610 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
4611 				    "to DIF device\n");
4612 	}
4613 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
4614 		     atomic_read(&sdeb_inject_pending))) {
4615 		num /= 2;
4616 		atomic_set(&sdeb_inject_pending, 0);
4617 	}
4618 
4619 	/*
4620 	 * When checking device access params, for reads we only check data
4621 	 * versus what is set at init time, so no need to lock.
4622 	 */
4623 	ret = check_device_access_params(scp, lba, num, false);
4624 	if (ret)
4625 		return ret;
4626 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
4627 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
4628 		     ((lba + num) > sdebug_medium_error_start))) {
4629 		/* claim unrecoverable read error */
4630 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
4631 		/* set info field and valid bit for fixed descriptor */
4632 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
4633 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
4634 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
4635 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
4636 			put_unaligned_be32(ret, scp->sense_buffer + 3);
4637 		}
4638 		scsi_set_resid(scp, scsi_bufflen(scp));
4639 		return check_condition_result;
4640 	}
4641 
4642 	if (sdebug_dev_is_zoned(devip) ||
4643 	    (sdebug_dix && scsi_prot_sg_count(scp)))  {
4644 		sdeb_meta_read_lock(sip);
4645 		meta_data_locked = true;
4646 	}
4647 
4648 	/* DIX + T10 DIF */
4649 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4650 		switch (prot_verify_read(scp, lba, num, ei_lba)) {
4651 		case 1: /* Guard tag error */
4652 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
4653 				sdeb_meta_read_unlock(sip);
4654 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4655 				return check_condition_result;
4656 			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
4657 				sdeb_meta_read_unlock(sip);
4658 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4659 				return illegal_condition_result;
4660 			}
4661 			break;
4662 		case 3: /* Reference tag error */
4663 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
4664 				sdeb_meta_read_unlock(sip);
4665 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
4666 				return check_condition_result;
4667 			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
4668 				sdeb_meta_read_unlock(sip);
4669 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
4670 				return illegal_condition_result;
4671 			}
4672 			break;
4673 		}
4674 	}
4675 
4676 	ret = do_device_access(sip, scp, 0, lba, num, 0, false, false);
4677 	if (meta_data_locked)
4678 		sdeb_meta_read_unlock(sip);
4679 	if (unlikely(ret == -1))
4680 		return DID_ERROR << 16;
4681 
4682 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
4683 
4684 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4685 		     atomic_read(&sdeb_inject_pending))) {
4686 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4687 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4688 			atomic_set(&sdeb_inject_pending, 0);
4689 			return check_condition_result;
4690 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4691 			/* Logical block guard check failed */
4692 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4693 			atomic_set(&sdeb_inject_pending, 0);
4694 			return illegal_condition_result;
4695 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
4696 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4697 			atomic_set(&sdeb_inject_pending, 0);
4698 			return illegal_condition_result;
4699 		}
4700 	}
4701 	return 0;
4702 }
4703 
4704 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
4705 			     unsigned int sectors, u32 ei_lba)
4706 {
4707 	int ret;
4708 	struct t10_pi_tuple *sdt;
4709 	void *daddr;
4710 	sector_t sector = start_sec;
4711 	int ppage_offset;
4712 	int dpage_offset;
4713 	struct sg_mapping_iter diter;
4714 	struct sg_mapping_iter piter;
4715 
4716 	BUG_ON(scsi_sg_count(SCpnt) == 0);
4717 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
4718 
4719 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
4720 			scsi_prot_sg_count(SCpnt),
4721 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
4722 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
4723 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
4724 
4725 	/* For each protection page */
4726 	while (sg_miter_next(&piter)) {
4727 		dpage_offset = 0;
4728 		if (WARN_ON(!sg_miter_next(&diter))) {
4729 			ret = 0x01;
4730 			goto out;
4731 		}
4732 
4733 		for (ppage_offset = 0; ppage_offset < piter.length;
4734 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
4735 			/* If we're at the end of the current
4736 			 * data page advance to the next one
4737 			 */
4738 			if (dpage_offset >= diter.length) {
4739 				if (WARN_ON(!sg_miter_next(&diter))) {
4740 					ret = 0x01;
4741 					goto out;
4742 				}
4743 				dpage_offset = 0;
4744 			}
4745 
4746 			sdt = piter.addr + ppage_offset;
4747 			daddr = diter.addr + dpage_offset;
4748 
4749 			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
4750 				ret = dif_verify(sdt, daddr, sector, ei_lba);
4751 				if (ret)
4752 					goto out;
4753 			}
4754 
4755 			sector++;
4756 			ei_lba++;
4757 			dpage_offset += sdebug_sector_size;
4758 		}
4759 		diter.consumed = dpage_offset;
4760 		sg_miter_stop(&diter);
4761 	}
4762 	sg_miter_stop(&piter);
4763 
4764 	dif_copy_prot(SCpnt, start_sec, sectors, false);
4765 	dix_writes++;
4766 
4767 	return 0;
4768 
4769 out:
4770 	dif_errors++;
4771 	sg_miter_stop(&diter);
4772 	sg_miter_stop(&piter);
4773 	return ret;
4774 }
4775 
4776 static unsigned long lba_to_map_index(sector_t lba)
4777 {
4778 	if (sdebug_unmap_alignment)
4779 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
4780 	sector_div(lba, sdebug_unmap_granularity);
4781 	return lba;
4782 }
4783 
4784 static sector_t map_index_to_lba(unsigned long index)
4785 {
4786 	sector_t lba = index * sdebug_unmap_granularity;
4787 
4788 	if (sdebug_unmap_alignment)
4789 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
4790 	return lba;
4791 }
4792 
4793 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
4794 			      unsigned int *num)
4795 {
4796 	sector_t end;
4797 	unsigned int mapped;
4798 	unsigned long index;
4799 	unsigned long next;
4800 
4801 	index = lba_to_map_index(lba);
4802 	mapped = test_bit(index, sip->map_storep);
4803 
4804 	if (mapped)
4805 		next = find_next_zero_bit(sip->map_storep, map_size, index);
4806 	else
4807 		next = find_next_bit(sip->map_storep, map_size, index);
4808 
4809 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
4810 	*num = end - lba;
4811 	return mapped;
4812 }
4813 
4814 static void map_region(struct sdeb_store_info *sip, sector_t lba,
4815 		       unsigned int len)
4816 {
4817 	sector_t end = lba + len;
4818 
4819 	while (lba < end) {
4820 		unsigned long index = lba_to_map_index(lba);
4821 
4822 		if (index < map_size)
4823 			set_bit(index, sip->map_storep);
4824 
4825 		lba = map_index_to_lba(index + 1);
4826 	}
4827 }
4828 
4829 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
4830 			 unsigned int len)
4831 {
4832 	sector_t end = lba + len;
4833 	u8 *fsp = sip->storep;
4834 
4835 	while (lba < end) {
4836 		unsigned long index = lba_to_map_index(lba);
4837 
4838 		if (lba == map_index_to_lba(index) &&
4839 		    lba + sdebug_unmap_granularity <= end &&
4840 		    index < map_size) {
4841 			clear_bit(index, sip->map_storep);
4842 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
4843 				memset(fsp + lba * sdebug_sector_size,
4844 				       (sdebug_lbprz & 1) ? 0 : 0xff,
4845 				       sdebug_sector_size *
4846 				       sdebug_unmap_granularity);
4847 			}
4848 			if (sip->dif_storep) {
4849 				memset(sip->dif_storep + lba, 0xff,
4850 				       sizeof(*sip->dif_storep) *
4851 				       sdebug_unmap_granularity);
4852 			}
4853 		}
4854 		lba = map_index_to_lba(index + 1);
4855 	}
4856 }
4857 
4858 static int resp_write_tape(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4859 {
4860 	u32 i, num, transfer, size, written = 0;
4861 	u8 *cmd = scp->cmnd;
4862 	struct scsi_data_buffer *sdb = &scp->sdb;
4863 	int partition = devip->tape_partition;
4864 	int pos = devip->tape_location[partition];
4865 	struct tape_block *blp;
4866 	bool fixed, ew;
4867 
4868 	if (cmd[0] != WRITE_6) { /* Only Write(6) supported */
4869 		mk_sense_invalid_opcode(scp);
4870 		return illegal_condition_result;
4871 	}
4872 
4873 	fixed = (cmd[1] & 1) != 0;
4874 	transfer = get_unaligned_be24(cmd + 2);
4875 	if (fixed) {
4876 		num = transfer;
4877 		size = devip->tape_blksize;
4878 	} else {
4879 		if (transfer < TAPE_MIN_BLKSIZE ||
4880 			transfer > TAPE_MAX_BLKSIZE) {
4881 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4882 			return check_condition_result;
4883 		}
4884 		num = 1;
4885 		size = transfer;
4886 	}
4887 
4888 	scsi_set_resid(scp, num * transfer);
4889 	for (i = 0, blp = devip->tape_blocks[partition] + pos, ew = false;
4890 	     i < num && pos < devip->tape_eop[partition] - 1; i++, pos++, blp++) {
4891 		blp->fl_size = size;
4892 		sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
4893 			&(blp->data), 4, i * size, true);
4894 		written += size;
4895 		scsi_set_resid(scp, num * transfer - written);
4896 		ew |= (pos == devip->tape_eop[partition] - TAPE_EW);
4897 	}
4898 
4899 	devip->tape_location[partition] = pos;
4900 	blp->fl_size = TAPE_BLOCK_EOD_FLAG;
4901 	if (pos >= devip->tape_eop[partition] - 1) {
4902 		mk_sense_info_tape(scp, VOLUME_OVERFLOW,
4903 				NO_ADDITIONAL_SENSE, EOP_EOM_DETECTED_ASCQ,
4904 				fixed ? num - i : transfer,
4905 				SENSE_FLAG_EOM);
4906 		return check_condition_result;
4907 	}
4908 	if (ew) { /* early warning */
4909 		mk_sense_info_tape(scp, NO_SENSE,
4910 				NO_ADDITIONAL_SENSE, EOP_EOM_DETECTED_ASCQ,
4911 				fixed ? num - i : transfer,
4912 				SENSE_FLAG_EOM);
4913 		return check_condition_result;
4914 	}
4915 
4916 	return 0;
4917 }
4918 
4919 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4920 {
4921 	bool check_prot;
4922 	u32 num;
4923 	u8 group = 0;
4924 	u32 ei_lba;
4925 	int ret;
4926 	u64 lba;
4927 	struct sdeb_store_info *sip = devip2sip(devip, true);
4928 	u8 *cmd = scp->cmnd;
4929 	bool meta_data_locked = false;
4930 
4931 	if (unlikely(sdebug_opts & SDEBUG_OPT_UNALIGNED_WRITE &&
4932 		     atomic_read(&sdeb_inject_pending))) {
4933 		atomic_set(&sdeb_inject_pending, 0);
4934 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
4935 				UNALIGNED_WRITE_ASCQ);
4936 		return check_condition_result;
4937 	}
4938 
4939 	switch (cmd[0]) {
4940 	case WRITE_16:
4941 		ei_lba = 0;
4942 		lba = get_unaligned_be64(cmd + 2);
4943 		num = get_unaligned_be32(cmd + 10);
4944 		group = cmd[14] & 0x3f;
4945 		check_prot = true;
4946 		break;
4947 	case WRITE_10:
4948 		ei_lba = 0;
4949 		lba = get_unaligned_be32(cmd + 2);
4950 		group = cmd[6] & 0x3f;
4951 		num = get_unaligned_be16(cmd + 7);
4952 		check_prot = true;
4953 		break;
4954 	case WRITE_6:
4955 		ei_lba = 0;
4956 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
4957 		      (u32)(cmd[1] & 0x1f) << 16;
4958 		num = (0 == cmd[4]) ? 256 : cmd[4];
4959 		check_prot = true;
4960 		break;
4961 	case WRITE_12:
4962 		ei_lba = 0;
4963 		lba = get_unaligned_be32(cmd + 2);
4964 		num = get_unaligned_be32(cmd + 6);
4965 		group = cmd[6] & 0x3f;
4966 		check_prot = true;
4967 		break;
4968 	case 0x53:	/* XDWRITEREAD(10) */
4969 		ei_lba = 0;
4970 		lba = get_unaligned_be32(cmd + 2);
4971 		group = cmd[6] & 0x1f;
4972 		num = get_unaligned_be16(cmd + 7);
4973 		check_prot = false;
4974 		break;
4975 	default:	/* assume WRITE(32) */
4976 		group = cmd[6] & 0x3f;
4977 		lba = get_unaligned_be64(cmd + 12);
4978 		ei_lba = get_unaligned_be32(cmd + 20);
4979 		num = get_unaligned_be32(cmd + 28);
4980 		check_prot = false;
4981 		break;
4982 	}
4983 	if (unlikely(have_dif_prot && check_prot)) {
4984 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4985 		    (cmd[1] & 0xe0)) {
4986 			mk_sense_invalid_opcode(scp);
4987 			return check_condition_result;
4988 		}
4989 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4990 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4991 		    (cmd[1] & 0xe0) == 0)
4992 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4993 				    "to DIF device\n");
4994 	}
4995 
4996 	if (sdebug_dev_is_zoned(devip) ||
4997 	    (sdebug_dix && scsi_prot_sg_count(scp)) ||
4998 	    scsi_debug_lbp())  {
4999 		sdeb_meta_write_lock(sip);
5000 		meta_data_locked = true;
5001 	}
5002 
5003 	ret = check_device_access_params(scp, lba, num, true);
5004 	if (ret) {
5005 		if (meta_data_locked)
5006 			sdeb_meta_write_unlock(sip);
5007 		return ret;
5008 	}
5009 
5010 	/* DIX + T10 DIF */
5011 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
5012 		switch (prot_verify_write(scp, lba, num, ei_lba)) {
5013 		case 1: /* Guard tag error */
5014 			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
5015 				sdeb_meta_write_unlock(sip);
5016 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
5017 				return illegal_condition_result;
5018 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
5019 				sdeb_meta_write_unlock(sip);
5020 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
5021 				return check_condition_result;
5022 			}
5023 			break;
5024 		case 3: /* Reference tag error */
5025 			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
5026 				sdeb_meta_write_unlock(sip);
5027 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
5028 				return illegal_condition_result;
5029 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
5030 				sdeb_meta_write_unlock(sip);
5031 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
5032 				return check_condition_result;
5033 			}
5034 			break;
5035 		}
5036 	}
5037 
5038 	ret = do_device_access(sip, scp, 0, lba, num, group, true, false);
5039 	if (unlikely(scsi_debug_lbp()))
5040 		map_region(sip, lba, num);
5041 
5042 	/* If ZBC zone then bump its write pointer */
5043 	if (sdebug_dev_is_zoned(devip))
5044 		zbc_inc_wp(devip, lba, num);
5045 	if (meta_data_locked)
5046 		sdeb_meta_write_unlock(sip);
5047 
5048 	if (unlikely(-1 == ret))
5049 		return DID_ERROR << 16;
5050 	else if (unlikely(sdebug_verbose &&
5051 			  (ret < (num * sdebug_sector_size))))
5052 		sdev_printk(KERN_INFO, scp->device,
5053 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
5054 			    my_name, num * sdebug_sector_size, ret);
5055 
5056 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
5057 		     atomic_read(&sdeb_inject_pending))) {
5058 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
5059 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
5060 			atomic_set(&sdeb_inject_pending, 0);
5061 			return check_condition_result;
5062 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
5063 			/* Logical block guard check failed */
5064 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
5065 			atomic_set(&sdeb_inject_pending, 0);
5066 			return illegal_condition_result;
5067 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
5068 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
5069 			atomic_set(&sdeb_inject_pending, 0);
5070 			return illegal_condition_result;
5071 		}
5072 	}
5073 	return 0;
5074 }
5075 
5076 /*
5077  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
5078  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
5079  */
5080 static int resp_write_scat(struct scsi_cmnd *scp,
5081 			   struct sdebug_dev_info *devip)
5082 {
5083 	u8 *cmd = scp->cmnd;
5084 	u8 *lrdp = NULL;
5085 	u8 *up;
5086 	struct sdeb_store_info *sip = devip2sip(devip, true);
5087 	u8 wrprotect;
5088 	u16 lbdof, num_lrd, k;
5089 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
5090 	u32 lb_size = sdebug_sector_size;
5091 	u32 ei_lba;
5092 	u64 lba;
5093 	u8 group;
5094 	int ret, res;
5095 	bool is_16;
5096 	static const u32 lrd_size = 32; /* + parameter list header size */
5097 
5098 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
5099 		is_16 = false;
5100 		group = cmd[6] & 0x3f;
5101 		wrprotect = (cmd[10] >> 5) & 0x7;
5102 		lbdof = get_unaligned_be16(cmd + 12);
5103 		num_lrd = get_unaligned_be16(cmd + 16);
5104 		bt_len = get_unaligned_be32(cmd + 28);
5105 	} else {        /* that leaves WRITE SCATTERED(16) */
5106 		is_16 = true;
5107 		wrprotect = (cmd[2] >> 5) & 0x7;
5108 		lbdof = get_unaligned_be16(cmd + 4);
5109 		num_lrd = get_unaligned_be16(cmd + 8);
5110 		bt_len = get_unaligned_be32(cmd + 10);
5111 		group = cmd[14] & 0x3f;
5112 		if (unlikely(have_dif_prot)) {
5113 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
5114 			    wrprotect) {
5115 				mk_sense_invalid_opcode(scp);
5116 				return illegal_condition_result;
5117 			}
5118 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
5119 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
5120 			     wrprotect == 0)
5121 				sdev_printk(KERN_ERR, scp->device,
5122 					    "Unprotected WR to DIF device\n");
5123 		}
5124 	}
5125 	if ((num_lrd == 0) || (bt_len == 0))
5126 		return 0;       /* T10 says these do-nothings are not errors */
5127 	if (lbdof == 0) {
5128 		if (sdebug_verbose)
5129 			sdev_printk(KERN_INFO, scp->device,
5130 				"%s: LB Data Offset field bad\n", my_name);
5131 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5132 		return illegal_condition_result;
5133 	}
5134 	lbdof_blen = lbdof * lb_size;
5135 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
5136 		if (sdebug_verbose)
5137 			sdev_printk(KERN_INFO, scp->device,
5138 				"%s: LBA range descriptors don't fit\n", my_name);
5139 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5140 		return illegal_condition_result;
5141 	}
5142 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
5143 	if (lrdp == NULL)
5144 		return SCSI_MLQUEUE_HOST_BUSY;
5145 	if (sdebug_verbose)
5146 		sdev_printk(KERN_INFO, scp->device,
5147 			"%s: Fetch header+scatter_list, lbdof_blen=%u\n",
5148 			my_name, lbdof_blen);
5149 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
5150 	if (res == -1) {
5151 		ret = DID_ERROR << 16;
5152 		goto err_out;
5153 	}
5154 
5155 	/* Just keep it simple and always lock for now */
5156 	sdeb_meta_write_lock(sip);
5157 	sg_off = lbdof_blen;
5158 	/* Spec says Buffer xfer Length field in number of LBs in dout */
5159 	cum_lb = 0;
5160 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
5161 		lba = get_unaligned_be64(up + 0);
5162 		num = get_unaligned_be32(up + 8);
5163 		if (sdebug_verbose)
5164 			sdev_printk(KERN_INFO, scp->device,
5165 				"%s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
5166 				my_name, k, lba, num, sg_off);
5167 		if (num == 0)
5168 			continue;
5169 		ret = check_device_access_params(scp, lba, num, true);
5170 		if (ret)
5171 			goto err_out_unlock;
5172 		num_by = num * lb_size;
5173 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
5174 
5175 		if ((cum_lb + num) > bt_len) {
5176 			if (sdebug_verbose)
5177 				sdev_printk(KERN_INFO, scp->device,
5178 				    "%s: sum of blocks > data provided\n",
5179 				    my_name);
5180 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
5181 					0);
5182 			ret = illegal_condition_result;
5183 			goto err_out_unlock;
5184 		}
5185 
5186 		/* DIX + T10 DIF */
5187 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
5188 			int prot_ret = prot_verify_write(scp, lba, num,
5189 							 ei_lba);
5190 
5191 			if (prot_ret) {
5192 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
5193 						prot_ret);
5194 				ret = illegal_condition_result;
5195 				goto err_out_unlock;
5196 			}
5197 		}
5198 
5199 		/*
5200 		 * Write ranges atomically to keep as close to pre-atomic
5201 		 * writes behaviour as possible.
5202 		 */
5203 		ret = do_device_access(sip, scp, sg_off, lba, num, group, true, true);
5204 		/* If ZBC zone then bump its write pointer */
5205 		if (sdebug_dev_is_zoned(devip))
5206 			zbc_inc_wp(devip, lba, num);
5207 		if (unlikely(scsi_debug_lbp()))
5208 			map_region(sip, lba, num);
5209 		if (unlikely(-1 == ret)) {
5210 			ret = DID_ERROR << 16;
5211 			goto err_out_unlock;
5212 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
5213 			sdev_printk(KERN_INFO, scp->device,
5214 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
5215 			    my_name, num_by, ret);
5216 
5217 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
5218 			     atomic_read(&sdeb_inject_pending))) {
5219 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
5220 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
5221 				atomic_set(&sdeb_inject_pending, 0);
5222 				ret = check_condition_result;
5223 				goto err_out_unlock;
5224 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
5225 				/* Logical block guard check failed */
5226 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
5227 				atomic_set(&sdeb_inject_pending, 0);
5228 				ret = illegal_condition_result;
5229 				goto err_out_unlock;
5230 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
5231 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
5232 				atomic_set(&sdeb_inject_pending, 0);
5233 				ret = illegal_condition_result;
5234 				goto err_out_unlock;
5235 			}
5236 		}
5237 		sg_off += num_by;
5238 		cum_lb += num;
5239 	}
5240 	ret = 0;
5241 err_out_unlock:
5242 	sdeb_meta_write_unlock(sip);
5243 err_out:
5244 	kfree(lrdp);
5245 	return ret;
5246 }
5247 
5248 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
5249 			   u32 ei_lba, bool unmap, bool ndob)
5250 {
5251 	struct scsi_device *sdp = scp->device;
5252 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5253 	unsigned long long i;
5254 	u64 block, lbaa;
5255 	u32 lb_size = sdebug_sector_size;
5256 	int ret;
5257 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
5258 						scp->device->hostdata, true);
5259 	u8 *fs1p;
5260 	u8 *fsp;
5261 	bool meta_data_locked = false;
5262 
5263 	if (sdebug_dev_is_zoned(devip) || scsi_debug_lbp()) {
5264 		sdeb_meta_write_lock(sip);
5265 		meta_data_locked = true;
5266 	}
5267 
5268 	ret = check_device_access_params(scp, lba, num, true);
5269 	if (ret)
5270 		goto out;
5271 
5272 	if (unmap && scsi_debug_lbp()) {
5273 		unmap_region(sip, lba, num);
5274 		goto out;
5275 	}
5276 	lbaa = lba;
5277 	block = do_div(lbaa, sdebug_store_sectors);
5278 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
5279 	fsp = sip->storep;
5280 	fs1p = fsp + (block * lb_size);
5281 	sdeb_data_write_lock(sip);
5282 	if (ndob) {
5283 		memset(fs1p, 0, lb_size);
5284 		ret = 0;
5285 	} else
5286 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
5287 
5288 	if (-1 == ret) {
5289 		ret = DID_ERROR << 16;
5290 		goto out;
5291 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
5292 		sdev_printk(KERN_INFO, scp->device,
5293 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
5294 			    my_name, "write same", lb_size, ret);
5295 
5296 	/* Copy first sector to remaining blocks */
5297 	for (i = 1 ; i < num ; i++) {
5298 		lbaa = lba + i;
5299 		block = do_div(lbaa, sdebug_store_sectors);
5300 		memmove(fsp + (block * lb_size), fs1p, lb_size);
5301 	}
5302 	if (scsi_debug_lbp())
5303 		map_region(sip, lba, num);
5304 	/* If ZBC zone then bump its write pointer */
5305 	if (sdebug_dev_is_zoned(devip))
5306 		zbc_inc_wp(devip, lba, num);
5307 	sdeb_data_write_unlock(sip);
5308 	ret = 0;
5309 out:
5310 	if (meta_data_locked)
5311 		sdeb_meta_write_unlock(sip);
5312 	return ret;
5313 }
5314 
5315 static int resp_write_same_10(struct scsi_cmnd *scp,
5316 			      struct sdebug_dev_info *devip)
5317 {
5318 	u8 *cmd = scp->cmnd;
5319 	u32 lba;
5320 	u16 num;
5321 	u32 ei_lba = 0;
5322 	bool unmap = false;
5323 
5324 	if (cmd[1] & 0x8) {
5325 		if (sdebug_lbpws10 == 0) {
5326 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
5327 			return check_condition_result;
5328 		} else
5329 			unmap = true;
5330 	}
5331 	lba = get_unaligned_be32(cmd + 2);
5332 	num = get_unaligned_be16(cmd + 7);
5333 	if (num > sdebug_write_same_length) {
5334 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
5335 		return check_condition_result;
5336 	}
5337 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
5338 }
5339 
5340 static int resp_write_same_16(struct scsi_cmnd *scp,
5341 			      struct sdebug_dev_info *devip)
5342 {
5343 	u8 *cmd = scp->cmnd;
5344 	u64 lba;
5345 	u32 num;
5346 	u32 ei_lba = 0;
5347 	bool unmap = false;
5348 	bool ndob = false;
5349 
5350 	if (cmd[1] & 0x8) {	/* UNMAP */
5351 		if (sdebug_lbpws == 0) {
5352 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
5353 			return check_condition_result;
5354 		} else
5355 			unmap = true;
5356 	}
5357 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
5358 		ndob = true;
5359 	lba = get_unaligned_be64(cmd + 2);
5360 	num = get_unaligned_be32(cmd + 10);
5361 	if (num > sdebug_write_same_length) {
5362 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
5363 		return check_condition_result;
5364 	}
5365 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
5366 }
5367 
5368 /* Note the mode field is in the same position as the (lower) service action
5369  * field. For the Report supported operation codes command, SPC-4 suggests
5370  * each mode of this command should be reported separately; for future. */
5371 static int resp_write_buffer(struct scsi_cmnd *scp,
5372 			     struct sdebug_dev_info *devip)
5373 {
5374 	u8 *cmd = scp->cmnd;
5375 	struct scsi_device *sdp = scp->device;
5376 	struct sdebug_dev_info *dp;
5377 	u8 mode;
5378 
5379 	mode = cmd[1] & 0x1f;
5380 	switch (mode) {
5381 	case 0x4:	/* download microcode (MC) and activate (ACT) */
5382 		/* set UAs on this device only */
5383 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5384 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
5385 		break;
5386 	case 0x5:	/* download MC, save and ACT */
5387 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
5388 		break;
5389 	case 0x6:	/* download MC with offsets and ACT */
5390 		/* set UAs on most devices (LUs) in this target */
5391 		list_for_each_entry(dp,
5392 				    &devip->sdbg_host->dev_info_list,
5393 				    dev_list)
5394 			if (dp->target == sdp->id) {
5395 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
5396 				if (devip != dp)
5397 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
5398 						dp->uas_bm);
5399 			}
5400 		break;
5401 	case 0x7:	/* download MC with offsets, save, and ACT */
5402 		/* set UA on all devices (LUs) in this target */
5403 		list_for_each_entry(dp,
5404 				    &devip->sdbg_host->dev_info_list,
5405 				    dev_list)
5406 			if (dp->target == sdp->id)
5407 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
5408 					dp->uas_bm);
5409 		break;
5410 	default:
5411 		/* do nothing for this command for other mode values */
5412 		break;
5413 	}
5414 	return 0;
5415 }
5416 
5417 static int resp_comp_write(struct scsi_cmnd *scp,
5418 			   struct sdebug_dev_info *devip)
5419 {
5420 	u8 *cmd = scp->cmnd;
5421 	u8 *arr;
5422 	struct sdeb_store_info *sip = devip2sip(devip, true);
5423 	u64 lba;
5424 	u32 dnum;
5425 	u32 lb_size = sdebug_sector_size;
5426 	u8 num;
5427 	int ret;
5428 	int retval = 0;
5429 
5430 	lba = get_unaligned_be64(cmd + 2);
5431 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
5432 	if (0 == num)
5433 		return 0;	/* degenerate case, not an error */
5434 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
5435 	    (cmd[1] & 0xe0)) {
5436 		mk_sense_invalid_opcode(scp);
5437 		return check_condition_result;
5438 	}
5439 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
5440 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
5441 	    (cmd[1] & 0xe0) == 0)
5442 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
5443 			    "to DIF device\n");
5444 	ret = check_device_access_params(scp, lba, num, false);
5445 	if (ret)
5446 		return ret;
5447 	dnum = 2 * num;
5448 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
5449 	if (NULL == arr) {
5450 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5451 				INSUFF_RES_ASCQ);
5452 		return check_condition_result;
5453 	}
5454 
5455 	ret = do_dout_fetch(scp, dnum, arr);
5456 	if (ret == -1) {
5457 		retval = DID_ERROR << 16;
5458 		goto cleanup_free;
5459 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
5460 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
5461 			    "indicated=%u, IO sent=%d bytes\n", my_name,
5462 			    dnum * lb_size, ret);
5463 
5464 	sdeb_data_write_lock(sip);
5465 	sdeb_meta_write_lock(sip);
5466 	if (!comp_write_worker(sip, lba, num, arr, false)) {
5467 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
5468 		retval = check_condition_result;
5469 		goto cleanup_unlock;
5470 	}
5471 
5472 	/* Cover sip->map_storep (which map_region()) sets with data lock */
5473 	if (scsi_debug_lbp())
5474 		map_region(sip, lba, num);
5475 cleanup_unlock:
5476 	sdeb_meta_write_unlock(sip);
5477 	sdeb_data_write_unlock(sip);
5478 cleanup_free:
5479 	kfree(arr);
5480 	return retval;
5481 }
5482 
5483 struct unmap_block_desc {
5484 	__be64	lba;
5485 	__be32	blocks;
5486 	__be32	__reserved;
5487 };
5488 
5489 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5490 {
5491 	unsigned char *buf;
5492 	struct unmap_block_desc *desc;
5493 	struct sdeb_store_info *sip = devip2sip(devip, true);
5494 	unsigned int i, payload_len, descriptors;
5495 	int ret;
5496 
5497 	if (!scsi_debug_lbp())
5498 		return 0;	/* fib and say its done */
5499 	payload_len = get_unaligned_be16(scp->cmnd + 7);
5500 	BUG_ON(scsi_bufflen(scp) != payload_len);
5501 
5502 	descriptors = (payload_len - 8) / 16;
5503 	if (descriptors > sdebug_unmap_max_desc) {
5504 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
5505 		return check_condition_result;
5506 	}
5507 
5508 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
5509 	if (!buf) {
5510 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5511 				INSUFF_RES_ASCQ);
5512 		return check_condition_result;
5513 	}
5514 
5515 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
5516 
5517 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
5518 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
5519 
5520 	desc = (void *)&buf[8];
5521 
5522 	sdeb_meta_write_lock(sip);
5523 
5524 	for (i = 0 ; i < descriptors ; i++) {
5525 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
5526 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
5527 
5528 		ret = check_device_access_params(scp, lba, num, true);
5529 		if (ret)
5530 			goto out;
5531 
5532 		unmap_region(sip, lba, num);
5533 	}
5534 
5535 	ret = 0;
5536 
5537 out:
5538 	sdeb_meta_write_unlock(sip);
5539 	kfree(buf);
5540 
5541 	return ret;
5542 }
5543 
5544 #define SDEBUG_GET_LBA_STATUS_LEN 32
5545 
5546 static int resp_get_lba_status(struct scsi_cmnd *scp,
5547 			       struct sdebug_dev_info *devip)
5548 {
5549 	u8 *cmd = scp->cmnd;
5550 	u64 lba;
5551 	u32 alloc_len, mapped, num;
5552 	int ret;
5553 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
5554 
5555 	lba = get_unaligned_be64(cmd + 2);
5556 	alloc_len = get_unaligned_be32(cmd + 10);
5557 
5558 	if (alloc_len < 24)
5559 		return 0;
5560 
5561 	ret = check_device_access_params(scp, lba, 1, false);
5562 	if (ret)
5563 		return ret;
5564 
5565 	if (scsi_debug_lbp()) {
5566 		struct sdeb_store_info *sip = devip2sip(devip, true);
5567 
5568 		mapped = map_state(sip, lba, &num);
5569 	} else {
5570 		mapped = 1;
5571 		/* following just in case virtual_gb changed */
5572 		sdebug_capacity = get_sdebug_capacity();
5573 		if (sdebug_capacity - lba <= 0xffffffff)
5574 			num = sdebug_capacity - lba;
5575 		else
5576 			num = 0xffffffff;
5577 	}
5578 
5579 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
5580 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
5581 	put_unaligned_be64(lba, arr + 8);	/* LBA */
5582 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
5583 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
5584 
5585 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
5586 }
5587 
5588 static int resp_get_stream_status(struct scsi_cmnd *scp,
5589 				  struct sdebug_dev_info *devip)
5590 {
5591 	u16 starting_stream_id, stream_id;
5592 	const u8 *cmd = scp->cmnd;
5593 	u32 alloc_len, offset;
5594 	u8 arr[256] = {};
5595 	struct scsi_stream_status_header *h = (void *)arr;
5596 
5597 	starting_stream_id = get_unaligned_be16(cmd + 4);
5598 	alloc_len = get_unaligned_be32(cmd + 10);
5599 
5600 	if (alloc_len < 8) {
5601 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
5602 		return check_condition_result;
5603 	}
5604 
5605 	if (starting_stream_id >= MAXIMUM_NUMBER_OF_STREAMS) {
5606 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
5607 		return check_condition_result;
5608 	}
5609 
5610 	/*
5611 	 * The GET STREAM STATUS command only reports status information
5612 	 * about open streams. Treat the non-permanent stream as open.
5613 	 */
5614 	put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS,
5615 			   &h->number_of_open_streams);
5616 
5617 	for (offset = 8, stream_id = starting_stream_id;
5618 	     offset + 8 <= min_t(u32, alloc_len, sizeof(arr)) &&
5619 		     stream_id < MAXIMUM_NUMBER_OF_STREAMS;
5620 	     offset += 8, stream_id++) {
5621 		struct scsi_stream_status *stream_status = (void *)arr + offset;
5622 
5623 		stream_status->perm = stream_id < PERMANENT_STREAM_COUNT;
5624 		put_unaligned_be16(stream_id,
5625 				   &stream_status->stream_identifier);
5626 		stream_status->rel_lifetime = stream_id + 1;
5627 	}
5628 	put_unaligned_be32(offset - 8, &h->len); /* PARAMETER DATA LENGTH */
5629 
5630 	return fill_from_dev_buffer(scp, arr, min(offset, alloc_len));
5631 }
5632 
5633 static int resp_sync_cache(struct scsi_cmnd *scp,
5634 			   struct sdebug_dev_info *devip)
5635 {
5636 	int res = 0;
5637 	u64 lba;
5638 	u32 num_blocks;
5639 	u8 *cmd = scp->cmnd;
5640 
5641 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
5642 		lba = get_unaligned_be32(cmd + 2);
5643 		num_blocks = get_unaligned_be16(cmd + 7);
5644 	} else {				/* SYNCHRONIZE_CACHE(16) */
5645 		lba = get_unaligned_be64(cmd + 2);
5646 		num_blocks = get_unaligned_be32(cmd + 10);
5647 	}
5648 	if (lba + num_blocks > sdebug_capacity) {
5649 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5650 		return check_condition_result;
5651 	}
5652 	if (!write_since_sync || (cmd[1] & 0x2))
5653 		res = SDEG_RES_IMMED_MASK;
5654 	else		/* delay if write_since_sync and IMMED clear */
5655 		write_since_sync = false;
5656 	return res;
5657 }
5658 
5659 /*
5660  * Assuming the LBA+num_blocks is not out-of-range, this function will return
5661  * CONDITION MET if the specified blocks will/have fitted in the cache, and
5662  * a GOOD status otherwise. Model a disk with a big cache and yield
5663  * CONDITION MET. Actually tries to bring range in main memory into the
5664  * cache associated with the CPU(s).
5665  *
5666  * The pcode 0x34 is also used for READ POSITION by tape devices.
5667  */
5668 static int resp_pre_fetch(struct scsi_cmnd *scp,
5669 			  struct sdebug_dev_info *devip)
5670 {
5671 	int res = 0;
5672 	u64 lba;
5673 	u64 block, rest = 0;
5674 	u32 nblks;
5675 	u8 *cmd = scp->cmnd;
5676 	struct sdeb_store_info *sip = devip2sip(devip, true);
5677 	u8 *fsp = sip->storep;
5678 
5679 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
5680 		lba = get_unaligned_be32(cmd + 2);
5681 		nblks = get_unaligned_be16(cmd + 7);
5682 	} else {			/* PRE-FETCH(16) */
5683 		lba = get_unaligned_be64(cmd + 2);
5684 		nblks = get_unaligned_be32(cmd + 10);
5685 	}
5686 	if (lba + nblks > sdebug_capacity) {
5687 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5688 		return check_condition_result;
5689 	}
5690 	if (!fsp)
5691 		goto fini;
5692 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
5693 	block = do_div(lba, sdebug_store_sectors);
5694 	if (block + nblks > sdebug_store_sectors)
5695 		rest = block + nblks - sdebug_store_sectors;
5696 
5697 	/* Try to bring the PRE-FETCH range into CPU's cache */
5698 	sdeb_data_read_lock(sip);
5699 	prefetch_range(fsp + (sdebug_sector_size * block),
5700 		       (nblks - rest) * sdebug_sector_size);
5701 	if (rest)
5702 		prefetch_range(fsp, rest * sdebug_sector_size);
5703 
5704 	sdeb_data_read_unlock(sip);
5705 fini:
5706 	if (cmd[1] & 0x2)
5707 		res = SDEG_RES_IMMED_MASK;
5708 	return res | condition_met_result;
5709 }
5710 
5711 #define RL_BUCKET_ELEMS 8
5712 
5713 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
5714  * (W-LUN), the normal Linux scanning logic does not associate it with a
5715  * device (e.g. /dev/sg7). The following magic will make that association:
5716  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
5717  * where <n> is a host number. If there are multiple targets in a host then
5718  * the above will associate a W-LUN to each target. To only get a W-LUN
5719  * for target 2, then use "echo '- 2 49409' > scan" .
5720  */
5721 static int resp_report_luns(struct scsi_cmnd *scp,
5722 			    struct sdebug_dev_info *devip)
5723 {
5724 	unsigned char *cmd = scp->cmnd;
5725 	unsigned int alloc_len;
5726 	unsigned char select_report;
5727 	u64 lun;
5728 	struct scsi_lun *lun_p;
5729 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
5730 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
5731 	unsigned int wlun_cnt;	/* report luns W-LUN count */
5732 	unsigned int tlun_cnt;	/* total LUN count */
5733 	unsigned int rlen;	/* response length (in bytes) */
5734 	int k, j, n, res;
5735 	unsigned int off_rsp = 0;
5736 	const int sz_lun = sizeof(struct scsi_lun);
5737 
5738 	clear_luns_changed_on_target(devip);
5739 
5740 	select_report = cmd[2];
5741 	alloc_len = get_unaligned_be32(cmd + 6);
5742 
5743 	if (alloc_len < 4) {
5744 		pr_err("alloc len too small %d\n", alloc_len);
5745 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
5746 		return check_condition_result;
5747 	}
5748 
5749 	switch (select_report) {
5750 	case 0:		/* all LUNs apart from W-LUNs */
5751 		lun_cnt = sdebug_max_luns;
5752 		wlun_cnt = 0;
5753 		break;
5754 	case 1:		/* only W-LUNs */
5755 		lun_cnt = 0;
5756 		wlun_cnt = 1;
5757 		break;
5758 	case 2:		/* all LUNs */
5759 		lun_cnt = sdebug_max_luns;
5760 		wlun_cnt = 1;
5761 		break;
5762 	case 0x10:	/* only administrative LUs */
5763 	case 0x11:	/* see SPC-5 */
5764 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
5765 	default:
5766 		pr_debug("select report invalid %d\n", select_report);
5767 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
5768 		return check_condition_result;
5769 	}
5770 
5771 	if (sdebug_no_lun_0 && (lun_cnt > 0))
5772 		--lun_cnt;
5773 
5774 	tlun_cnt = lun_cnt + wlun_cnt;
5775 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
5776 	scsi_set_resid(scp, scsi_bufflen(scp));
5777 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
5778 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
5779 
5780 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
5781 	lun = sdebug_no_lun_0 ? 1 : 0;
5782 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
5783 		memset(arr, 0, sizeof(arr));
5784 		lun_p = (struct scsi_lun *)&arr[0];
5785 		if (k == 0) {
5786 			put_unaligned_be32(rlen, &arr[0]);
5787 			++lun_p;
5788 			j = 1;
5789 		}
5790 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
5791 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
5792 				break;
5793 			int_to_scsilun(lun++, lun_p);
5794 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
5795 				lun_p->scsi_lun[0] |= 0x40;
5796 		}
5797 		if (j < RL_BUCKET_ELEMS)
5798 			break;
5799 		n = j * sz_lun;
5800 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
5801 		if (res)
5802 			return res;
5803 		off_rsp += n;
5804 	}
5805 	if (wlun_cnt) {
5806 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
5807 		++j;
5808 	}
5809 	if (j > 0)
5810 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
5811 	return res;
5812 }
5813 
5814 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5815 {
5816 	bool is_bytchk3 = false;
5817 	u8 bytchk;
5818 	int ret, j;
5819 	u32 vnum, a_num, off;
5820 	const u32 lb_size = sdebug_sector_size;
5821 	u64 lba;
5822 	u8 *arr;
5823 	u8 *cmd = scp->cmnd;
5824 	struct sdeb_store_info *sip = devip2sip(devip, true);
5825 
5826 	bytchk = (cmd[1] >> 1) & 0x3;
5827 	if (bytchk == 0) {
5828 		return 0;	/* always claim internal verify okay */
5829 	} else if (bytchk == 2) {
5830 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
5831 		return check_condition_result;
5832 	} else if (bytchk == 3) {
5833 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
5834 	}
5835 	switch (cmd[0]) {
5836 	case VERIFY_16:
5837 		lba = get_unaligned_be64(cmd + 2);
5838 		vnum = get_unaligned_be32(cmd + 10);
5839 		break;
5840 	case VERIFY:		/* is VERIFY(10) */
5841 		lba = get_unaligned_be32(cmd + 2);
5842 		vnum = get_unaligned_be16(cmd + 7);
5843 		break;
5844 	default:
5845 		mk_sense_invalid_opcode(scp);
5846 		return check_condition_result;
5847 	}
5848 	if (vnum == 0)
5849 		return 0;	/* not an error */
5850 	a_num = is_bytchk3 ? 1 : vnum;
5851 	/* Treat following check like one for read (i.e. no write) access */
5852 	ret = check_device_access_params(scp, lba, a_num, false);
5853 	if (ret)
5854 		return ret;
5855 
5856 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
5857 	if (!arr) {
5858 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5859 				INSUFF_RES_ASCQ);
5860 		return check_condition_result;
5861 	}
5862 	/* Not changing store, so only need read access */
5863 	sdeb_data_read_lock(sip);
5864 
5865 	ret = do_dout_fetch(scp, a_num, arr);
5866 	if (ret == -1) {
5867 		ret = DID_ERROR << 16;
5868 		goto cleanup;
5869 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
5870 		sdev_printk(KERN_INFO, scp->device,
5871 			    "%s: cdb indicated=%u, IO sent=%d bytes\n",
5872 			    my_name, a_num * lb_size, ret);
5873 	}
5874 	if (is_bytchk3) {
5875 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
5876 			memcpy(arr + off, arr, lb_size);
5877 	}
5878 	ret = 0;
5879 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
5880 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
5881 		ret = check_condition_result;
5882 		goto cleanup;
5883 	}
5884 cleanup:
5885 	sdeb_data_read_unlock(sip);
5886 	kfree(arr);
5887 	return ret;
5888 }
5889 
5890 #define RZONES_DESC_HD 64
5891 
5892 /* Report zones depending on start LBA and reporting options */
5893 static int resp_report_zones(struct scsi_cmnd *scp,
5894 			     struct sdebug_dev_info *devip)
5895 {
5896 	unsigned int rep_max_zones, nrz = 0;
5897 	int ret = 0;
5898 	u32 alloc_len, rep_opts, rep_len;
5899 	bool partial;
5900 	u64 lba, zs_lba;
5901 	u8 *arr = NULL, *desc;
5902 	u8 *cmd = scp->cmnd;
5903 	struct sdeb_zone_state *zsp = NULL;
5904 	struct sdeb_store_info *sip = devip2sip(devip, false);
5905 
5906 	if (!sdebug_dev_is_zoned(devip)) {
5907 		mk_sense_invalid_opcode(scp);
5908 		return check_condition_result;
5909 	}
5910 	zs_lba = get_unaligned_be64(cmd + 2);
5911 	alloc_len = get_unaligned_be32(cmd + 10);
5912 	if (alloc_len == 0)
5913 		return 0;	/* not an error */
5914 	rep_opts = cmd[14] & 0x3f;
5915 	partial = cmd[14] & 0x80;
5916 
5917 	if (zs_lba >= sdebug_capacity) {
5918 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5919 		return check_condition_result;
5920 	}
5921 
5922 	rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
5923 
5924 	arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
5925 	if (!arr) {
5926 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
5927 				INSUFF_RES_ASCQ);
5928 		return check_condition_result;
5929 	}
5930 
5931 	sdeb_meta_read_lock(sip);
5932 
5933 	desc = arr + 64;
5934 	for (lba = zs_lba; lba < sdebug_capacity;
5935 	     lba = zsp->z_start + zsp->z_size) {
5936 		if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
5937 			break;
5938 		zsp = zbc_zone(devip, lba);
5939 		switch (rep_opts) {
5940 		case 0x00:
5941 			/* All zones */
5942 			break;
5943 		case 0x01:
5944 			/* Empty zones */
5945 			if (zsp->z_cond != ZC1_EMPTY)
5946 				continue;
5947 			break;
5948 		case 0x02:
5949 			/* Implicit open zones */
5950 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
5951 				continue;
5952 			break;
5953 		case 0x03:
5954 			/* Explicit open zones */
5955 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
5956 				continue;
5957 			break;
5958 		case 0x04:
5959 			/* Closed zones */
5960 			if (zsp->z_cond != ZC4_CLOSED)
5961 				continue;
5962 			break;
5963 		case 0x05:
5964 			/* Full zones */
5965 			if (zsp->z_cond != ZC5_FULL)
5966 				continue;
5967 			break;
5968 		case 0x06:
5969 		case 0x07:
5970 		case 0x10:
5971 			/*
5972 			 * Read-only, offline, reset WP recommended are
5973 			 * not emulated: no zones to report;
5974 			 */
5975 			continue;
5976 		case 0x11:
5977 			/* non-seq-resource set */
5978 			if (!zsp->z_non_seq_resource)
5979 				continue;
5980 			break;
5981 		case 0x3e:
5982 			/* All zones except gap zones. */
5983 			if (zbc_zone_is_gap(zsp))
5984 				continue;
5985 			break;
5986 		case 0x3f:
5987 			/* Not write pointer (conventional) zones */
5988 			if (zbc_zone_is_seq(zsp))
5989 				continue;
5990 			break;
5991 		default:
5992 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
5993 					INVALID_FIELD_IN_CDB, 0);
5994 			ret = check_condition_result;
5995 			goto fini;
5996 		}
5997 
5998 		if (nrz < rep_max_zones) {
5999 			/* Fill zone descriptor */
6000 			desc[0] = zsp->z_type;
6001 			desc[1] = zsp->z_cond << 4;
6002 			if (zsp->z_non_seq_resource)
6003 				desc[1] |= 1 << 1;
6004 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
6005 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
6006 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
6007 			desc += 64;
6008 		}
6009 
6010 		if (partial && nrz >= rep_max_zones)
6011 			break;
6012 
6013 		nrz++;
6014 	}
6015 
6016 	/* Report header */
6017 	/* Zone list length. */
6018 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
6019 	/* Maximum LBA */
6020 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
6021 	/* Zone starting LBA granularity. */
6022 	if (devip->zcap < devip->zsize)
6023 		put_unaligned_be64(devip->zsize, arr + 16);
6024 
6025 	rep_len = (unsigned long)desc - (unsigned long)arr;
6026 	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
6027 
6028 fini:
6029 	sdeb_meta_read_unlock(sip);
6030 	kfree(arr);
6031 	return ret;
6032 }
6033 
6034 static int resp_atomic_write(struct scsi_cmnd *scp,
6035 			     struct sdebug_dev_info *devip)
6036 {
6037 	struct sdeb_store_info *sip;
6038 	u8 *cmd = scp->cmnd;
6039 	u16 boundary, len;
6040 	u64 lba, lba_tmp;
6041 	int ret;
6042 
6043 	if (!scsi_debug_atomic_write()) {
6044 		mk_sense_invalid_opcode(scp);
6045 		return check_condition_result;
6046 	}
6047 
6048 	sip = devip2sip(devip, true);
6049 
6050 	lba = get_unaligned_be64(cmd + 2);
6051 	boundary = get_unaligned_be16(cmd + 10);
6052 	len = get_unaligned_be16(cmd + 12);
6053 
6054 	lba_tmp = lba;
6055 	if (sdebug_atomic_wr_align &&
6056 	    do_div(lba_tmp, sdebug_atomic_wr_align)) {
6057 		/* Does not meet alignment requirement */
6058 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6059 		return check_condition_result;
6060 	}
6061 
6062 	if (sdebug_atomic_wr_gran && len % sdebug_atomic_wr_gran) {
6063 		/* Does not meet alignment requirement */
6064 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6065 		return check_condition_result;
6066 	}
6067 
6068 	if (boundary > 0) {
6069 		if (boundary > sdebug_atomic_wr_max_bndry) {
6070 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
6071 			return check_condition_result;
6072 		}
6073 
6074 		if (len > sdebug_atomic_wr_max_length_bndry) {
6075 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
6076 			return check_condition_result;
6077 		}
6078 	} else {
6079 		if (len > sdebug_atomic_wr_max_length) {
6080 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1);
6081 			return check_condition_result;
6082 		}
6083 	}
6084 
6085 	ret = do_device_access(sip, scp, 0, lba, len, 0, true, true);
6086 	if (unlikely(ret == -1))
6087 		return DID_ERROR << 16;
6088 	if (unlikely(ret != len * sdebug_sector_size))
6089 		return DID_ERROR << 16;
6090 	return 0;
6091 }
6092 
6093 /* Logic transplanted from tcmu-runner, file_zbc.c */
6094 static void zbc_open_all(struct sdebug_dev_info *devip)
6095 {
6096 	struct sdeb_zone_state *zsp = &devip->zstate[0];
6097 	unsigned int i;
6098 
6099 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
6100 		if (zsp->z_cond == ZC4_CLOSED)
6101 			zbc_open_zone(devip, &devip->zstate[i], true);
6102 	}
6103 }
6104 
6105 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
6106 {
6107 	int res = 0;
6108 	u64 z_id;
6109 	enum sdebug_z_cond zc;
6110 	u8 *cmd = scp->cmnd;
6111 	struct sdeb_zone_state *zsp;
6112 	bool all = cmd[14] & 0x01;
6113 	struct sdeb_store_info *sip = devip2sip(devip, false);
6114 
6115 	if (!sdebug_dev_is_zoned(devip)) {
6116 		mk_sense_invalid_opcode(scp);
6117 		return check_condition_result;
6118 	}
6119 	sdeb_meta_write_lock(sip);
6120 
6121 	if (all) {
6122 		/* Check if all closed zones can be open */
6123 		if (devip->max_open &&
6124 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
6125 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
6126 					INSUFF_ZONE_ASCQ);
6127 			res = check_condition_result;
6128 			goto fini;
6129 		}
6130 		/* Open all closed zones */
6131 		zbc_open_all(devip);
6132 		goto fini;
6133 	}
6134 
6135 	/* Open the specified zone */
6136 	z_id = get_unaligned_be64(cmd + 2);
6137 	if (z_id >= sdebug_capacity) {
6138 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6139 		res = check_condition_result;
6140 		goto fini;
6141 	}
6142 
6143 	zsp = zbc_zone(devip, z_id);
6144 	if (z_id != zsp->z_start) {
6145 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6146 		res = check_condition_result;
6147 		goto fini;
6148 	}
6149 	if (zbc_zone_is_conv(zsp)) {
6150 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6151 		res = check_condition_result;
6152 		goto fini;
6153 	}
6154 
6155 	zc = zsp->z_cond;
6156 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
6157 		goto fini;
6158 
6159 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
6160 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
6161 				INSUFF_ZONE_ASCQ);
6162 		res = check_condition_result;
6163 		goto fini;
6164 	}
6165 
6166 	zbc_open_zone(devip, zsp, true);
6167 fini:
6168 	sdeb_meta_write_unlock(sip);
6169 	return res;
6170 }
6171 
6172 static void zbc_close_all(struct sdebug_dev_info *devip)
6173 {
6174 	unsigned int i;
6175 
6176 	for (i = 0; i < devip->nr_zones; i++)
6177 		zbc_close_zone(devip, &devip->zstate[i]);
6178 }
6179 
6180 static int resp_close_zone(struct scsi_cmnd *scp,
6181 			   struct sdebug_dev_info *devip)
6182 {
6183 	int res = 0;
6184 	u64 z_id;
6185 	u8 *cmd = scp->cmnd;
6186 	struct sdeb_zone_state *zsp;
6187 	bool all = cmd[14] & 0x01;
6188 	struct sdeb_store_info *sip = devip2sip(devip, false);
6189 
6190 	if (!sdebug_dev_is_zoned(devip)) {
6191 		mk_sense_invalid_opcode(scp);
6192 		return check_condition_result;
6193 	}
6194 
6195 	sdeb_meta_write_lock(sip);
6196 
6197 	if (all) {
6198 		zbc_close_all(devip);
6199 		goto fini;
6200 	}
6201 
6202 	/* Close specified zone */
6203 	z_id = get_unaligned_be64(cmd + 2);
6204 	if (z_id >= sdebug_capacity) {
6205 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6206 		res = check_condition_result;
6207 		goto fini;
6208 	}
6209 
6210 	zsp = zbc_zone(devip, z_id);
6211 	if (z_id != zsp->z_start) {
6212 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6213 		res = check_condition_result;
6214 		goto fini;
6215 	}
6216 	if (zbc_zone_is_conv(zsp)) {
6217 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6218 		res = check_condition_result;
6219 		goto fini;
6220 	}
6221 
6222 	zbc_close_zone(devip, zsp);
6223 fini:
6224 	sdeb_meta_write_unlock(sip);
6225 	return res;
6226 }
6227 
6228 static void zbc_finish_zone(struct sdebug_dev_info *devip,
6229 			    struct sdeb_zone_state *zsp, bool empty)
6230 {
6231 	enum sdebug_z_cond zc = zsp->z_cond;
6232 
6233 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
6234 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
6235 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
6236 			zbc_close_zone(devip, zsp);
6237 		if (zsp->z_cond == ZC4_CLOSED)
6238 			devip->nr_closed--;
6239 		zsp->z_wp = zsp->z_start + zsp->z_size;
6240 		zsp->z_cond = ZC5_FULL;
6241 	}
6242 }
6243 
6244 static void zbc_finish_all(struct sdebug_dev_info *devip)
6245 {
6246 	unsigned int i;
6247 
6248 	for (i = 0; i < devip->nr_zones; i++)
6249 		zbc_finish_zone(devip, &devip->zstate[i], false);
6250 }
6251 
6252 static int resp_finish_zone(struct scsi_cmnd *scp,
6253 			    struct sdebug_dev_info *devip)
6254 {
6255 	struct sdeb_zone_state *zsp;
6256 	int res = 0;
6257 	u64 z_id;
6258 	u8 *cmd = scp->cmnd;
6259 	bool all = cmd[14] & 0x01;
6260 	struct sdeb_store_info *sip = devip2sip(devip, false);
6261 
6262 	if (!sdebug_dev_is_zoned(devip)) {
6263 		mk_sense_invalid_opcode(scp);
6264 		return check_condition_result;
6265 	}
6266 
6267 	sdeb_meta_write_lock(sip);
6268 
6269 	if (all) {
6270 		zbc_finish_all(devip);
6271 		goto fini;
6272 	}
6273 
6274 	/* Finish the specified zone */
6275 	z_id = get_unaligned_be64(cmd + 2);
6276 	if (z_id >= sdebug_capacity) {
6277 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6278 		res = check_condition_result;
6279 		goto fini;
6280 	}
6281 
6282 	zsp = zbc_zone(devip, z_id);
6283 	if (z_id != zsp->z_start) {
6284 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6285 		res = check_condition_result;
6286 		goto fini;
6287 	}
6288 	if (zbc_zone_is_conv(zsp)) {
6289 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6290 		res = check_condition_result;
6291 		goto fini;
6292 	}
6293 
6294 	zbc_finish_zone(devip, zsp, true);
6295 fini:
6296 	sdeb_meta_write_unlock(sip);
6297 	return res;
6298 }
6299 
6300 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
6301 			 struct sdeb_zone_state *zsp)
6302 {
6303 	enum sdebug_z_cond zc;
6304 	struct sdeb_store_info *sip = devip2sip(devip, false);
6305 
6306 	if (!zbc_zone_is_seq(zsp))
6307 		return;
6308 
6309 	zc = zsp->z_cond;
6310 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
6311 		zbc_close_zone(devip, zsp);
6312 
6313 	if (zsp->z_cond == ZC4_CLOSED)
6314 		devip->nr_closed--;
6315 
6316 	if (zsp->z_wp > zsp->z_start)
6317 		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
6318 		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
6319 
6320 	zsp->z_non_seq_resource = false;
6321 	zsp->z_wp = zsp->z_start;
6322 	zsp->z_cond = ZC1_EMPTY;
6323 }
6324 
6325 static void zbc_rwp_all(struct sdebug_dev_info *devip)
6326 {
6327 	unsigned int i;
6328 
6329 	for (i = 0; i < devip->nr_zones; i++)
6330 		zbc_rwp_zone(devip, &devip->zstate[i]);
6331 }
6332 
6333 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
6334 {
6335 	struct sdeb_zone_state *zsp;
6336 	int res = 0;
6337 	u64 z_id;
6338 	u8 *cmd = scp->cmnd;
6339 	bool all = cmd[14] & 0x01;
6340 	struct sdeb_store_info *sip = devip2sip(devip, false);
6341 
6342 	if (!sdebug_dev_is_zoned(devip)) {
6343 		mk_sense_invalid_opcode(scp);
6344 		return check_condition_result;
6345 	}
6346 
6347 	sdeb_meta_write_lock(sip);
6348 
6349 	if (all) {
6350 		zbc_rwp_all(devip);
6351 		goto fini;
6352 	}
6353 
6354 	z_id = get_unaligned_be64(cmd + 2);
6355 	if (z_id >= sdebug_capacity) {
6356 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
6357 		res = check_condition_result;
6358 		goto fini;
6359 	}
6360 
6361 	zsp = zbc_zone(devip, z_id);
6362 	if (z_id != zsp->z_start) {
6363 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6364 		res = check_condition_result;
6365 		goto fini;
6366 	}
6367 	if (zbc_zone_is_conv(zsp)) {
6368 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
6369 		res = check_condition_result;
6370 		goto fini;
6371 	}
6372 
6373 	zbc_rwp_zone(devip, zsp);
6374 fini:
6375 	sdeb_meta_write_unlock(sip);
6376 	return res;
6377 }
6378 
6379 static u32 get_tag(struct scsi_cmnd *cmnd)
6380 {
6381 	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
6382 }
6383 
6384 /* Queued (deferred) command completions converge here. */
6385 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
6386 {
6387 	struct sdebug_scsi_cmd *sdsc = container_of(sd_dp,
6388 					typeof(*sdsc), sd_dp);
6389 	struct scsi_cmnd *scp = (struct scsi_cmnd *)sdsc - 1;
6390 	unsigned long flags;
6391 	bool aborted;
6392 
6393 	if (sdebug_statistics) {
6394 		atomic_inc(&sdebug_completions);
6395 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
6396 			atomic_inc(&sdebug_miss_cpus);
6397 	}
6398 
6399 	spin_lock_irqsave(&sdsc->lock, flags);
6400 	aborted = sd_dp->aborted;
6401 	if (unlikely(aborted))
6402 		sd_dp->aborted = false;
6403 
6404 	spin_unlock_irqrestore(&sdsc->lock, flags);
6405 
6406 	if (aborted) {
6407 		pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
6408 		blk_abort_request(scsi_cmd_to_rq(scp));
6409 		return;
6410 	}
6411 
6412 	scsi_done(scp); /* callback to mid level */
6413 }
6414 
6415 /* When high resolution timer goes off this function is called. */
6416 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
6417 {
6418 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
6419 						  hrt);
6420 	sdebug_q_cmd_complete(sd_dp);
6421 	return HRTIMER_NORESTART;
6422 }
6423 
6424 /* When work queue schedules work, it calls this function. */
6425 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
6426 {
6427 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
6428 						  ew.work);
6429 	sdebug_q_cmd_complete(sd_dp);
6430 }
6431 
6432 static bool got_shared_uuid;
6433 static uuid_t shared_uuid;
6434 
6435 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
6436 {
6437 	struct sdeb_zone_state *zsp;
6438 	sector_t capacity = get_sdebug_capacity();
6439 	sector_t conv_capacity;
6440 	sector_t zstart = 0;
6441 	unsigned int i;
6442 
6443 	/*
6444 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
6445 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
6446 	 * use the specified zone size checking that at least 2 zones can be
6447 	 * created for the device.
6448 	 */
6449 	if (!sdeb_zbc_zone_size_mb) {
6450 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
6451 			>> ilog2(sdebug_sector_size);
6452 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
6453 			devip->zsize >>= 1;
6454 		if (devip->zsize < 2) {
6455 			pr_err("Device capacity too small\n");
6456 			return -EINVAL;
6457 		}
6458 	} else {
6459 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
6460 			pr_err("Zone size is not a power of 2\n");
6461 			return -EINVAL;
6462 		}
6463 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
6464 			>> ilog2(sdebug_sector_size);
6465 		if (devip->zsize >= capacity) {
6466 			pr_err("Zone size too large for device capacity\n");
6467 			return -EINVAL;
6468 		}
6469 	}
6470 
6471 	devip->zsize_shift = ilog2(devip->zsize);
6472 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
6473 
6474 	if (sdeb_zbc_zone_cap_mb == 0) {
6475 		devip->zcap = devip->zsize;
6476 	} else {
6477 		devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
6478 			      ilog2(sdebug_sector_size);
6479 		if (devip->zcap > devip->zsize) {
6480 			pr_err("Zone capacity too large\n");
6481 			return -EINVAL;
6482 		}
6483 	}
6484 
6485 	conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
6486 	if (conv_capacity >= capacity) {
6487 		pr_err("Number of conventional zones too large\n");
6488 		return -EINVAL;
6489 	}
6490 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
6491 	devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
6492 			      devip->zsize_shift;
6493 	devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
6494 
6495 	/* Add gap zones if zone capacity is smaller than the zone size */
6496 	if (devip->zcap < devip->zsize)
6497 		devip->nr_zones += devip->nr_seq_zones;
6498 
6499 	if (devip->zoned) {
6500 		/* zbc_max_open_zones can be 0, meaning "not reported" */
6501 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
6502 			devip->max_open = (devip->nr_zones - 1) / 2;
6503 		else
6504 			devip->max_open = sdeb_zbc_max_open;
6505 	}
6506 
6507 	devip->zstate = kzalloc_objs(struct sdeb_zone_state, devip->nr_zones);
6508 	if (!devip->zstate)
6509 		return -ENOMEM;
6510 
6511 	for (i = 0; i < devip->nr_zones; i++) {
6512 		zsp = &devip->zstate[i];
6513 
6514 		zsp->z_start = zstart;
6515 
6516 		if (i < devip->nr_conv_zones) {
6517 			zsp->z_type = ZBC_ZTYPE_CNV;
6518 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
6519 			zsp->z_wp = (sector_t)-1;
6520 			zsp->z_size =
6521 				min_t(u64, devip->zsize, capacity - zstart);
6522 		} else if ((zstart & (devip->zsize - 1)) == 0) {
6523 			if (devip->zoned)
6524 				zsp->z_type = ZBC_ZTYPE_SWR;
6525 			else
6526 				zsp->z_type = ZBC_ZTYPE_SWP;
6527 			zsp->z_cond = ZC1_EMPTY;
6528 			zsp->z_wp = zsp->z_start;
6529 			zsp->z_size =
6530 				min_t(u64, devip->zcap, capacity - zstart);
6531 		} else {
6532 			zsp->z_type = ZBC_ZTYPE_GAP;
6533 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
6534 			zsp->z_wp = (sector_t)-1;
6535 			zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
6536 					    capacity - zstart);
6537 		}
6538 
6539 		WARN_ON_ONCE((int)zsp->z_size <= 0);
6540 		zstart += zsp->z_size;
6541 	}
6542 
6543 	return 0;
6544 }
6545 
6546 static struct sdebug_dev_info *sdebug_device_create(
6547 			struct sdebug_host_info *sdbg_host, gfp_t flags)
6548 {
6549 	struct sdebug_dev_info *devip;
6550 
6551 	devip = kzalloc_obj(*devip, flags);
6552 	if (devip) {
6553 		if (sdebug_uuid_ctl == 1)
6554 			uuid_gen(&devip->lu_name);
6555 		else if (sdebug_uuid_ctl == 2) {
6556 			if (got_shared_uuid)
6557 				devip->lu_name = shared_uuid;
6558 			else {
6559 				uuid_gen(&shared_uuid);
6560 				got_shared_uuid = true;
6561 				devip->lu_name = shared_uuid;
6562 			}
6563 		}
6564 		devip->sdbg_host = sdbg_host;
6565 		if (sdeb_zbc_in_use) {
6566 			devip->zoned = sdeb_zbc_model == BLK_ZONED_HM;
6567 			if (sdebug_device_create_zones(devip)) {
6568 				kfree(devip);
6569 				return NULL;
6570 			}
6571 		} else {
6572 			devip->zoned = false;
6573 		}
6574 		if (sdebug_ptype == TYPE_TAPE) {
6575 			devip->tape_density = TAPE_DEF_DENSITY;
6576 			devip->tape_blksize = TAPE_DEF_BLKSIZE;
6577 		}
6578 		devip->create_ts = ktime_get_boottime();
6579 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
6580 		spin_lock_init(&devip->list_lock);
6581 		INIT_LIST_HEAD(&devip->inject_err_list);
6582 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
6583 	}
6584 	return devip;
6585 }
6586 
6587 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
6588 {
6589 	struct sdebug_host_info *sdbg_host;
6590 	struct sdebug_dev_info *open_devip = NULL;
6591 	struct sdebug_dev_info *devip;
6592 
6593 	sdbg_host = shost_to_sdebug_host(sdev->host);
6594 
6595 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
6596 		if ((devip->used) && (devip->channel == sdev->channel) &&
6597 		    (devip->target == sdev->id) &&
6598 		    (devip->lun == sdev->lun))
6599 			return devip;
6600 		else {
6601 			if ((!devip->used) && (!open_devip))
6602 				open_devip = devip;
6603 		}
6604 	}
6605 	if (!open_devip) { /* try and make a new one */
6606 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
6607 		if (!open_devip) {
6608 			pr_err("out of memory at line %d\n", __LINE__);
6609 			return NULL;
6610 		}
6611 	}
6612 
6613 	open_devip->channel = sdev->channel;
6614 	open_devip->target = sdev->id;
6615 	open_devip->lun = sdev->lun;
6616 	open_devip->sdbg_host = sdbg_host;
6617 	set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
6618 	open_devip->used = true;
6619 	return open_devip;
6620 }
6621 
6622 static int scsi_debug_sdev_init(struct scsi_device *sdp)
6623 {
6624 	if (sdebug_verbose)
6625 		pr_info("sdev_init <%u %u %u %llu>\n",
6626 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
6627 
6628 	return 0;
6629 }
6630 
6631 static int scsi_debug_sdev_configure(struct scsi_device *sdp,
6632 				     struct queue_limits *lim)
6633 {
6634 	struct sdebug_dev_info *devip =
6635 			(struct sdebug_dev_info *)sdp->hostdata;
6636 	struct dentry *dentry;
6637 
6638 	if (sdebug_verbose)
6639 		pr_info("sdev_configure <%u %u %u %llu>\n",
6640 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
6641 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
6642 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
6643 	if (devip == NULL) {
6644 		devip = find_build_dev_info(sdp);
6645 		if (devip == NULL)
6646 			return 1;  /* no resources, will be marked offline */
6647 	}
6648 	if (sdebug_ptype == TYPE_TAPE) {
6649 		if (!devip->tape_blocks[0]) {
6650 			devip->tape_blocks[0] =
6651 				kzalloc_objs(struct tape_block, TAPE_UNITS);
6652 			if (!devip->tape_blocks[0])
6653 				return 1;
6654 		}
6655 		devip->tape_pending_nbr_partitions = -1;
6656 		if (partition_tape(devip, 1, TAPE_UNITS, 0) < 0) {
6657 			kfree(devip->tape_blocks[0]);
6658 			devip->tape_blocks[0] = NULL;
6659 			return 1;
6660 		}
6661 	}
6662 	sdp->hostdata = devip;
6663 	if (sdebug_no_uld)
6664 		sdp->no_uld_attach = 1;
6665 	config_cdb_len(sdp);
6666 
6667 	if (sdebug_allow_restart)
6668 		sdp->allow_restart = 1;
6669 
6670 	devip->debugfs_entry = debugfs_create_dir(dev_name(&sdp->sdev_dev),
6671 				sdebug_debugfs_root);
6672 	if (IS_ERR_OR_NULL(devip->debugfs_entry))
6673 		pr_info("failed to create debugfs directory for device %s\n",
6674 			dev_name(&sdp->sdev_gendev));
6675 
6676 	dentry = debugfs_create_file("error", 0600, devip->debugfs_entry, sdp,
6677 				&sdebug_error_fops);
6678 	if (IS_ERR_OR_NULL(dentry))
6679 		pr_info("failed to create error file for device %s\n",
6680 			dev_name(&sdp->sdev_gendev));
6681 
6682 	return 0;
6683 }
6684 
6685 static void scsi_debug_sdev_destroy(struct scsi_device *sdp)
6686 {
6687 	struct sdebug_dev_info *devip =
6688 		(struct sdebug_dev_info *)sdp->hostdata;
6689 	struct sdebug_err_inject *err;
6690 
6691 	if (sdebug_verbose)
6692 		pr_info("sdev_destroy <%u %u %u %llu>\n",
6693 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
6694 
6695 	if (!devip)
6696 		return;
6697 
6698 	spin_lock(&devip->list_lock);
6699 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6700 		list_del_rcu(&err->list);
6701 		call_rcu(&err->rcu, sdebug_err_free);
6702 	}
6703 	spin_unlock(&devip->list_lock);
6704 
6705 	debugfs_remove(devip->debugfs_entry);
6706 
6707 	if (sdp->type == TYPE_TAPE) {
6708 		kfree(devip->tape_blocks[0]);
6709 		devip->tape_blocks[0] = NULL;
6710 	}
6711 
6712 	/* make this slot available for re-use */
6713 	devip->used = false;
6714 	sdp->hostdata = NULL;
6715 }
6716 
6717 /* Returns true if cancelled or not running callback. */
6718 static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
6719 {
6720 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
6721 	struct sdebug_defer *sd_dp = &sdsc->sd_dp;
6722 	enum sdeb_defer_type defer_t = sd_dp->defer_t;
6723 
6724 	lockdep_assert_held(&sdsc->lock);
6725 
6726 	if (defer_t == SDEB_DEFER_HRT) {
6727 		int res = hrtimer_try_to_cancel(&sd_dp->hrt);
6728 
6729 		switch (res) {
6730 		case -1: /* -1 It's executing the CB */
6731 			return false;
6732 		case 0: /* Not active, it must have already run */
6733 		case 1: /* Was active, we've now cancelled */
6734 		default:
6735 			return true;
6736 		}
6737 	} else if (defer_t == SDEB_DEFER_WQ) {
6738 		/* Cancel if pending */
6739 		if (cancel_work(&sd_dp->ew.work))
6740 			return true;
6741 		/* callback may be running, so return false */
6742 		return false;
6743 	} else if (defer_t == SDEB_DEFER_POLL) {
6744 		return true;
6745 	}
6746 
6747 	return false;
6748 }
6749 
6750 struct sdebug_abort_cmd {
6751 	u32 unique_tag;
6752 };
6753 
6754 enum sdebug_internal_cmd_type {
6755 	SCSI_DEBUG_ABORT_CMD,
6756 };
6757 
6758 struct sdebug_internal_cmd {
6759 	enum sdebug_internal_cmd_type type;
6760 
6761 	union {
6762 		struct sdebug_abort_cmd abort_cmd;
6763 	};
6764 };
6765 
6766 union sdebug_priv {
6767 	struct sdebug_scsi_cmd cmd;
6768 	struct sdebug_internal_cmd internal_cmd;
6769 };
6770 
6771 /*
6772  * Abort SCSI command @cmnd. Only called from scsi_debug_abort(). Although
6773  * it would be possible to call scsi_debug_stop_cmnd() directly, an internal
6774  * command is allocated and submitted to trigger the reserved command
6775  * infrastructure.
6776  */
6777 static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
6778 {
6779 	struct Scsi_Host *shost = cmnd->device->host;
6780 	struct request *rq = scsi_cmd_to_rq(cmnd);
6781 	u32 unique_tag = blk_mq_unique_tag(rq);
6782 	struct sdebug_internal_cmd *internal_cmd;
6783 	struct scsi_cmnd *abort_cmd;
6784 	struct request *abort_rq;
6785 	blk_status_t res;
6786 
6787 	abort_cmd = scsi_get_internal_cmd(shost->pseudo_sdev, DMA_NONE,
6788 					  BLK_MQ_REQ_RESERVED);
6789 	if (!abort_cmd)
6790 		return false;
6791 	internal_cmd = scsi_cmd_priv(abort_cmd);
6792 	*internal_cmd = (struct sdebug_internal_cmd) {
6793 		.type = SCSI_DEBUG_ABORT_CMD,
6794 		.abort_cmd = {
6795 			.unique_tag = unique_tag,
6796 		},
6797 	};
6798 	abort_rq = scsi_cmd_to_rq(abort_cmd);
6799 	abort_rq->timeout = secs_to_jiffies(3);
6800 	res = blk_execute_rq(abort_rq, true);
6801 	scsi_put_internal_cmd(abort_cmd);
6802 	return res == BLK_STS_OK;
6803 }
6804 
6805 /*
6806  * All we can do is set the cmnd as internally aborted and wait for it to
6807  * finish. We cannot call scsi_done() as normal completion path may do that.
6808  */
6809 static bool sdebug_stop_cmnd(struct request *rq, void *data)
6810 {
6811 	scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
6812 
6813 	return true;
6814 }
6815 
6816 /* Deletes (stops) timers or work queues of all queued commands */
6817 static void stop_all_queued(void)
6818 {
6819 	struct sdebug_host_info *sdhp;
6820 
6821 	mutex_lock(&sdebug_host_list_mutex);
6822 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6823 		struct Scsi_Host *shost = sdhp->shost;
6824 
6825 		blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
6826 	}
6827 	mutex_unlock(&sdebug_host_list_mutex);
6828 }
6829 
6830 static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
6831 {
6832 	struct scsi_device *sdp = cmnd->device;
6833 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
6834 	struct sdebug_err_inject *err;
6835 	unsigned char *cmd = cmnd->cmnd;
6836 	int ret = 0;
6837 
6838 	if (devip == NULL)
6839 		return 0;
6840 
6841 	rcu_read_lock();
6842 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6843 		if (err->type == ERR_ABORT_CMD_FAILED &&
6844 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
6845 			ret = !!err->cnt;
6846 			if (err->cnt < 0)
6847 				err->cnt++;
6848 
6849 			rcu_read_unlock();
6850 			return ret;
6851 		}
6852 	}
6853 	rcu_read_unlock();
6854 
6855 	return 0;
6856 }
6857 
6858 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
6859 {
6860 	bool aborted = scsi_debug_abort_cmnd(SCpnt);
6861 	u8 *cmd = SCpnt->cmnd;
6862 	u8 opcode = cmd[0];
6863 
6864 	++num_aborts;
6865 
6866 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6867 		sdev_printk(KERN_INFO, SCpnt->device,
6868 			    "command%s found\n",
6869 			    aborted ? "" : " not");
6870 
6871 
6872 	if (sdebug_fail_abort(SCpnt)) {
6873 		scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n",
6874 			    opcode);
6875 		return FAILED;
6876 	}
6877 
6878 	if (aborted == false)
6879 		return FAILED;
6880 
6881 	return SUCCESS;
6882 }
6883 
6884 static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
6885 {
6886 	struct scsi_device *sdp = data;
6887 	struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
6888 
6889 	if (scmd->device == sdp)
6890 		scsi_debug_abort_cmnd(scmd);
6891 
6892 	return true;
6893 }
6894 
6895 /* Deletes (stops) timers or work queues of all queued commands per sdev */
6896 static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
6897 {
6898 	struct Scsi_Host *shost = sdp->host;
6899 
6900 	blk_mq_tagset_busy_iter(&shost->tag_set,
6901 				scsi_debug_stop_all_queued_iter, sdp);
6902 }
6903 
6904 static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
6905 {
6906 	struct scsi_device *sdp = cmnd->device;
6907 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
6908 	struct sdebug_err_inject *err;
6909 	unsigned char *cmd = cmnd->cmnd;
6910 	int ret = 0;
6911 
6912 	if (devip == NULL)
6913 		return 0;
6914 
6915 	rcu_read_lock();
6916 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
6917 		if (err->type == ERR_LUN_RESET_FAILED &&
6918 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
6919 			ret = !!err->cnt;
6920 			if (err->cnt < 0)
6921 				err->cnt++;
6922 
6923 			rcu_read_unlock();
6924 			return ret;
6925 		}
6926 	}
6927 	rcu_read_unlock();
6928 
6929 	return 0;
6930 }
6931 
6932 static void scsi_tape_reset_clear(struct sdebug_dev_info *devip)
6933 {
6934 	int i;
6935 
6936 	devip->tape_blksize = TAPE_DEF_BLKSIZE;
6937 	devip->tape_density = TAPE_DEF_DENSITY;
6938 	devip->tape_partition = 0;
6939 	devip->tape_dce = 0;
6940 	for (i = 0; i < TAPE_MAX_PARTITIONS; i++)
6941 		devip->tape_location[i] = 0;
6942 	devip->tape_pending_nbr_partitions = -1;
6943 	/* Don't reset partitioning? */
6944 }
6945 
6946 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
6947 {
6948 	struct scsi_device *sdp = SCpnt->device;
6949 	struct sdebug_dev_info *devip = sdp->hostdata;
6950 	u8 *cmd = SCpnt->cmnd;
6951 	u8 opcode = cmd[0];
6952 
6953 	++num_dev_resets;
6954 
6955 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6956 		sdev_printk(KERN_INFO, sdp, "doing device reset");
6957 
6958 	scsi_debug_stop_all_queued(sdp);
6959 	if (devip) {
6960 		set_bit(SDEBUG_UA_POR, devip->uas_bm);
6961 		if (SCpnt->device->type == TYPE_TAPE)
6962 			scsi_tape_reset_clear(devip);
6963 	}
6964 
6965 	if (sdebug_fail_lun_reset(SCpnt)) {
6966 		scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode);
6967 		return FAILED;
6968 	}
6969 
6970 	return SUCCESS;
6971 }
6972 
6973 static int sdebug_fail_target_reset(struct scsi_cmnd *cmnd)
6974 {
6975 	struct scsi_target *starget = scsi_target(cmnd->device);
6976 	struct sdebug_target_info *targetip =
6977 		(struct sdebug_target_info *)starget->hostdata;
6978 
6979 	if (targetip)
6980 		return targetip->reset_fail;
6981 
6982 	return 0;
6983 }
6984 
6985 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
6986 {
6987 	struct scsi_device *sdp = SCpnt->device;
6988 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
6989 	struct sdebug_dev_info *devip;
6990 	u8 *cmd = SCpnt->cmnd;
6991 	u8 opcode = cmd[0];
6992 	int k = 0;
6993 
6994 	++num_target_resets;
6995 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
6996 		sdev_printk(KERN_INFO, sdp, "doing target reset\n");
6997 
6998 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
6999 		if (devip->target == sdp->id) {
7000 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
7001 			if (SCpnt->device->type == TYPE_TAPE)
7002 				scsi_tape_reset_clear(devip);
7003 			++k;
7004 		}
7005 	}
7006 
7007 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
7008 		sdev_printk(KERN_INFO, sdp,
7009 			    "%d device(s) found in target\n", k);
7010 
7011 	if (sdebug_fail_target_reset(SCpnt)) {
7012 		scmd_printk(KERN_INFO, SCpnt, "fail target reset 0x%x\n",
7013 			    opcode);
7014 		return FAILED;
7015 	}
7016 
7017 	return SUCCESS;
7018 }
7019 
7020 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
7021 {
7022 	struct scsi_device *sdp = SCpnt->device;
7023 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
7024 	struct sdebug_dev_info *devip;
7025 	int k = 0;
7026 
7027 	++num_bus_resets;
7028 
7029 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
7030 		sdev_printk(KERN_INFO, sdp, "doing bus reset\n");
7031 
7032 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
7033 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
7034 		if (SCpnt->device->type == TYPE_TAPE)
7035 			scsi_tape_reset_clear(devip);
7036 		++k;
7037 	}
7038 
7039 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
7040 		sdev_printk(KERN_INFO, sdp,
7041 			    "%d device(s) found in host\n", k);
7042 	return SUCCESS;
7043 }
7044 
7045 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
7046 {
7047 	struct sdebug_host_info *sdbg_host;
7048 	struct sdebug_dev_info *devip;
7049 	int k = 0;
7050 
7051 	++num_host_resets;
7052 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
7053 		sdev_printk(KERN_INFO, SCpnt->device, "doing host reset\n");
7054 	mutex_lock(&sdebug_host_list_mutex);
7055 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
7056 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
7057 				    dev_list) {
7058 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
7059 			if (SCpnt->device->type == TYPE_TAPE)
7060 				scsi_tape_reset_clear(devip);
7061 			++k;
7062 		}
7063 	}
7064 	mutex_unlock(&sdebug_host_list_mutex);
7065 	stop_all_queued();
7066 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
7067 		sdev_printk(KERN_INFO, SCpnt->device,
7068 			"%d device(s) found\n", k);
7069 	return SUCCESS;
7070 }
7071 
7072 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
7073 {
7074 	struct msdos_partition *pp;
7075 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
7076 	int sectors_per_part, num_sectors, k;
7077 	int heads_by_sects, start_sec, end_sec;
7078 
7079 	/* assume partition table already zeroed */
7080 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
7081 		return;
7082 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
7083 		sdebug_num_parts = SDEBUG_MAX_PARTS;
7084 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
7085 	}
7086 	num_sectors = (int)get_sdebug_capacity();
7087 	sectors_per_part = (num_sectors - sdebug_sectors_per)
7088 			   / sdebug_num_parts;
7089 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
7090 	starts[0] = sdebug_sectors_per;
7091 	max_part_secs = sectors_per_part;
7092 	for (k = 1; k < sdebug_num_parts; ++k) {
7093 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
7094 			    * heads_by_sects;
7095 		if (starts[k] - starts[k - 1] < max_part_secs)
7096 			max_part_secs = starts[k] - starts[k - 1];
7097 	}
7098 	starts[sdebug_num_parts] = num_sectors;
7099 	starts[sdebug_num_parts + 1] = 0;
7100 
7101 	ramp[510] = 0x55;	/* magic partition markings */
7102 	ramp[511] = 0xAA;
7103 	pp = (struct msdos_partition *)(ramp + 0x1be);
7104 	for (k = 0; starts[k + 1]; ++k, ++pp) {
7105 		start_sec = starts[k];
7106 		end_sec = starts[k] + max_part_secs - 1;
7107 		pp->boot_ind = 0;
7108 
7109 		pp->cyl = start_sec / heads_by_sects;
7110 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
7111 			   / sdebug_sectors_per;
7112 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
7113 
7114 		pp->end_cyl = end_sec / heads_by_sects;
7115 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
7116 			       / sdebug_sectors_per;
7117 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
7118 
7119 		pp->start_sect = cpu_to_le32(start_sec);
7120 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
7121 		pp->sys_ind = 0x83;	/* plain Linux partition */
7122 	}
7123 }
7124 
7125 static void block_unblock_all_queues(bool block)
7126 {
7127 	struct sdebug_host_info *sdhp;
7128 
7129 	lockdep_assert_held(&sdebug_host_list_mutex);
7130 
7131 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7132 		struct Scsi_Host *shost = sdhp->shost;
7133 
7134 		if (block)
7135 			scsi_block_requests(shost);
7136 		else
7137 			scsi_unblock_requests(shost);
7138 	}
7139 }
7140 
7141 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
7142  * commands will be processed normally before triggers occur.
7143  */
7144 static void tweak_cmnd_count(void)
7145 {
7146 	int count, modulo;
7147 
7148 	modulo = abs(sdebug_every_nth);
7149 	if (modulo < 2)
7150 		return;
7151 
7152 	mutex_lock(&sdebug_host_list_mutex);
7153 	block_unblock_all_queues(true);
7154 	count = atomic_read(&sdebug_cmnd_count);
7155 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
7156 	block_unblock_all_queues(false);
7157 	mutex_unlock(&sdebug_host_list_mutex);
7158 }
7159 
7160 static void clear_queue_stats(void)
7161 {
7162 	atomic_set(&sdebug_cmnd_count, 0);
7163 	atomic_set(&sdebug_completions, 0);
7164 	atomic_set(&sdebug_miss_cpus, 0);
7165 	atomic_set(&sdebug_a_tsf, 0);
7166 }
7167 
7168 static bool inject_on_this_cmd(void)
7169 {
7170 	if (sdebug_every_nth == 0)
7171 		return false;
7172 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
7173 }
7174 
7175 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
7176 
7177 /* Complete the processing of the thread that queued a SCSI command to this
7178  * driver. It either completes the command by calling cmnd_done() or
7179  * schedules a hr timer or work queue then returns 0. Returns
7180  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
7181  */
7182 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
7183 			 int scsi_result,
7184 			 int (*pfp)(struct scsi_cmnd *,
7185 				    struct sdebug_dev_info *),
7186 			 int delta_jiff, int ndelay)
7187 {
7188 	struct request *rq = scsi_cmd_to_rq(cmnd);
7189 	bool polled = rq->cmd_flags & REQ_POLLED;
7190 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
7191 	unsigned long flags;
7192 	u64 ns_from_boot = 0;
7193 	struct scsi_device *sdp;
7194 	struct sdebug_defer *sd_dp;
7195 
7196 	if (unlikely(devip == NULL)) {
7197 		if (scsi_result == 0)
7198 			scsi_result = DID_NO_CONNECT << 16;
7199 		goto respond_in_thread;
7200 	}
7201 	sdp = cmnd->device;
7202 
7203 	if (delta_jiff == 0)
7204 		goto respond_in_thread;
7205 
7206 
7207 	if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
7208 		     (scsi_result == 0))) {
7209 		int num_in_q = scsi_device_busy(sdp);
7210 		int qdepth = cmnd->device->queue_depth;
7211 
7212 		if ((num_in_q == qdepth) &&
7213 		    (atomic_inc_return(&sdebug_a_tsf) >=
7214 		     abs(sdebug_every_nth))) {
7215 			atomic_set(&sdebug_a_tsf, 0);
7216 			scsi_result = device_qfull_result;
7217 
7218 			if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
7219 				sdev_printk(KERN_INFO, sdp, "num_in_q=%d +1, <inject> status: TASK SET FULL\n",
7220 					    num_in_q);
7221 		}
7222 	}
7223 
7224 	sd_dp = &sdsc->sd_dp;
7225 
7226 	if (polled || (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS))
7227 		ns_from_boot = ktime_get_boottime_ns();
7228 
7229 	/* one of the resp_*() response functions is called here */
7230 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
7231 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
7232 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
7233 		delta_jiff = ndelay = 0;
7234 	}
7235 	if (cmnd->result == 0 && scsi_result != 0)
7236 		cmnd->result = scsi_result;
7237 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
7238 		if (atomic_read(&sdeb_inject_pending)) {
7239 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
7240 			atomic_set(&sdeb_inject_pending, 0);
7241 			cmnd->result = check_condition_result;
7242 		}
7243 	}
7244 
7245 	if (unlikely(sdebug_verbose && cmnd->result))
7246 		sdev_printk(KERN_INFO, sdp, "non-zero result=0x%x\n",
7247 			    cmnd->result);
7248 
7249 	if (delta_jiff > 0 || ndelay > 0) {
7250 		ktime_t kt;
7251 
7252 		if (delta_jiff > 0) {
7253 			u64 ns = jiffies_to_nsecs(delta_jiff);
7254 
7255 			if (sdebug_random && ns < U32_MAX) {
7256 				ns = get_random_u32_below((u32)ns);
7257 			} else if (sdebug_random) {
7258 				ns >>= 12;	/* scale to 4 usec precision */
7259 				if (ns < U32_MAX)	/* over 4 hours max */
7260 					ns = get_random_u32_below((u32)ns);
7261 				ns <<= 12;
7262 			}
7263 			kt = ns_to_ktime(ns);
7264 		} else {	/* ndelay has a 4.2 second max */
7265 			kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
7266 					     (u32)ndelay;
7267 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
7268 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
7269 
7270 				if (kt <= d) {	/* elapsed duration >= kt */
7271 					/* call scsi_done() from this thread */
7272 					scsi_done(cmnd);
7273 					return 0;
7274 				}
7275 				/* otherwise reduce kt by elapsed time */
7276 				kt -= d;
7277 			}
7278 		}
7279 		if (sdebug_statistics)
7280 			sd_dp->issuing_cpu = raw_smp_processor_id();
7281 		if (polled) {
7282 			spin_lock_irqsave(&sdsc->lock, flags);
7283 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
7284 			sd_dp->defer_t = SDEB_DEFER_POLL;
7285 			spin_unlock_irqrestore(&sdsc->lock, flags);
7286 		} else {
7287 			/* schedule the invocation of scsi_done() for a later time */
7288 			spin_lock_irqsave(&sdsc->lock, flags);
7289 			sd_dp->defer_t = SDEB_DEFER_HRT;
7290 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
7291 			/*
7292 			 * The completion handler will try to grab sqcp->lock,
7293 			 * so there is no chance that the completion handler
7294 			 * will call scsi_done() until we release the lock
7295 			 * here (so ok to keep referencing sdsc).
7296 			 */
7297 			spin_unlock_irqrestore(&sdsc->lock, flags);
7298 		}
7299 	} else {	/* jdelay < 0, use work queue */
7300 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
7301 			     atomic_read(&sdeb_inject_pending))) {
7302 			sd_dp->aborted = true;
7303 			atomic_set(&sdeb_inject_pending, 0);
7304 			sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
7305 				    blk_mq_unique_tag_to_tag(get_tag(cmnd)));
7306 		}
7307 
7308 		if (sdebug_statistics)
7309 			sd_dp->issuing_cpu = raw_smp_processor_id();
7310 		if (polled) {
7311 			spin_lock_irqsave(&sdsc->lock, flags);
7312 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
7313 			sd_dp->defer_t = SDEB_DEFER_POLL;
7314 			spin_unlock_irqrestore(&sdsc->lock, flags);
7315 		} else {
7316 			spin_lock_irqsave(&sdsc->lock, flags);
7317 			sd_dp->defer_t = SDEB_DEFER_WQ;
7318 			schedule_work(&sd_dp->ew.work);
7319 			spin_unlock_irqrestore(&sdsc->lock, flags);
7320 		}
7321 	}
7322 
7323 	return 0;
7324 
7325 respond_in_thread:	/* call back to mid-layer using invocation thread */
7326 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
7327 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
7328 	if (cmnd->result == 0 && scsi_result != 0)
7329 		cmnd->result = scsi_result;
7330 	scsi_done(cmnd);
7331 	return 0;
7332 }
7333 
7334 /* Note: The following macros create attribute files in the
7335    /sys/module/scsi_debug/parameters directory. Unfortunately this
7336    driver is unaware of a change and cannot trigger auxiliary actions
7337    as it can when the corresponding attribute in the
7338    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
7339  */
7340 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
7341 module_param_named(ato, sdebug_ato, int, S_IRUGO);
7342 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
7343 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
7344 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
7345 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
7346 module_param_named(dif, sdebug_dif, int, S_IRUGO);
7347 module_param_named(dix, sdebug_dix, int, S_IRUGO);
7348 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
7349 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
7350 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
7351 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
7352 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
7353 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
7354 module_param_string(inq_product, sdebug_inq_product_id,
7355 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
7356 module_param_string(inq_rev, sdebug_inq_product_rev,
7357 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
7358 module_param_string(inq_vendor, sdebug_inq_vendor_id,
7359 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
7360 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
7361 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
7362 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
7363 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
7364 module_param_named(atomic_wr, sdebug_atomic_wr, int, S_IRUGO);
7365 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
7366 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
7367 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
7368 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
7369 module_param_named(medium_error_count, sdebug_medium_error_count, int,
7370 		   S_IRUGO | S_IWUSR);
7371 module_param_named(medium_error_start, sdebug_medium_error_start, int,
7372 		   S_IRUGO | S_IWUSR);
7373 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
7374 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
7375 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
7376 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
7377 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
7378 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
7379 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
7380 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
7381 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
7382 module_param_named(per_host_store, sdebug_per_host_store, bool,
7383 		   S_IRUGO | S_IWUSR);
7384 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
7385 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
7386 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
7387 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
7388 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
7389 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
7390 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
7391 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
7392 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
7393 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
7394 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
7395 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
7396 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
7397 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
7398 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
7399 module_param_named(atomic_wr_max_length, sdebug_atomic_wr_max_length, int, S_IRUGO);
7400 module_param_named(atomic_wr_align, sdebug_atomic_wr_align, int, S_IRUGO);
7401 module_param_named(atomic_wr_gran, sdebug_atomic_wr_gran, int, S_IRUGO);
7402 module_param_named(atomic_wr_max_length_bndry, sdebug_atomic_wr_max_length_bndry, int, S_IRUGO);
7403 module_param_named(atomic_wr_max_bndry, sdebug_atomic_wr_max_bndry, int, S_IRUGO);
7404 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
7405 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
7406 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
7407 		   S_IRUGO | S_IWUSR);
7408 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
7409 module_param_named(write_same_length, sdebug_write_same_length, int,
7410 		   S_IRUGO | S_IWUSR);
7411 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
7412 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
7413 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
7414 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
7415 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
7416 module_param_named(allow_restart, sdebug_allow_restart, bool, S_IRUGO | S_IWUSR);
7417 
7418 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
7419 MODULE_DESCRIPTION("SCSI debug adapter driver");
7420 MODULE_LICENSE("GPL");
7421 MODULE_VERSION(SDEBUG_VERSION);
7422 
7423 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
7424 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
7425 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
7426 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
7427 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
7428 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
7429 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
7430 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
7431 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
7432 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
7433 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
7434 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
7435 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
7436 MODULE_PARM_DESC(host_max_queue,
7437 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
7438 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
7439 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
7440 		 SDEBUG_VERSION "\")");
7441 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
7442 MODULE_PARM_DESC(lbprz,
7443 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
7444 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
7445 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
7446 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
7447 MODULE_PARM_DESC(atomic_wr, "enable ATOMIC WRITE support, support WRITE ATOMIC(16) (def=0)");
7448 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
7449 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
7450 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
7451 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
7452 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
7453 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
7454 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
7455 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
7456 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
7457 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
7458 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
7459 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
7460 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
7461 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
7462 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
7463 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
7464 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
7465 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
7466 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
7467 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
7468 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
7469 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
7470 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
7471 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
7472 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
7473 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
7474 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
7475 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
7476 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
7477 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
7478 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
7479 MODULE_PARM_DESC(atomic_wr_max_length, "max # of blocks can be atomically written in one cmd (def=8192)");
7480 MODULE_PARM_DESC(atomic_wr_align, "minimum alignment of atomic write in blocks (def=2)");
7481 MODULE_PARM_DESC(atomic_wr_gran, "minimum granularity of atomic write in blocks (def=2)");
7482 MODULE_PARM_DESC(atomic_wr_max_length_bndry, "max # of blocks can be atomically written in one cmd with boundary set (def=8192)");
7483 MODULE_PARM_DESC(atomic_wr_max_bndry, "max # boundaries per atomic write (def=128)");
7484 MODULE_PARM_DESC(uuid_ctl,
7485 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
7486 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
7487 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
7488 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
7489 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
7490 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
7491 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
7492 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
7493 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
7494 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
7495 MODULE_PARM_DESC(allow_restart, "Set scsi_device's allow_restart flag(def=0)");
7496 
7497 #define SDEBUG_INFO_LEN 256
7498 static char sdebug_info[SDEBUG_INFO_LEN];
7499 
7500 static const char *scsi_debug_info(struct Scsi_Host *shp)
7501 {
7502 	int k;
7503 
7504 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
7505 		      my_name, SDEBUG_VERSION, sdebug_version_date);
7506 	if (k >= (SDEBUG_INFO_LEN - 1))
7507 		return sdebug_info;
7508 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
7509 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
7510 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
7511 		  "statistics", (int)sdebug_statistics);
7512 	return sdebug_info;
7513 }
7514 
7515 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
7516 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
7517 				 int length)
7518 {
7519 	char arr[16];
7520 	int opts;
7521 	int minLen = length > 15 ? 15 : length;
7522 
7523 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
7524 		return -EACCES;
7525 	memcpy(arr, buffer, minLen);
7526 	arr[minLen] = '\0';
7527 	if (1 != sscanf(arr, "%d", &opts))
7528 		return -EINVAL;
7529 	sdebug_opts = opts;
7530 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
7531 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
7532 	if (sdebug_every_nth != 0)
7533 		tweak_cmnd_count();
7534 	return length;
7535 }
7536 
7537 struct sdebug_submit_queue_data {
7538 	int *first;
7539 	int *last;
7540 	int queue_num;
7541 };
7542 
7543 static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
7544 {
7545 	struct sdebug_submit_queue_data *data = opaque;
7546 	u32 unique_tag = blk_mq_unique_tag(rq);
7547 	u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
7548 	u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
7549 	int queue_num = data->queue_num;
7550 
7551 	if (hwq != queue_num)
7552 		return true;
7553 
7554 	/* Rely on iter'ing in ascending tag order */
7555 	if (*data->first == -1)
7556 		*data->first = *data->last = tag;
7557 	else
7558 		*data->last = tag;
7559 
7560 	return true;
7561 }
7562 
7563 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
7564  * same for each scsi_debug host (if more than one). Some of the counters
7565  * output are not atomics so might be inaccurate in a busy system. */
7566 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
7567 {
7568 	struct sdebug_host_info *sdhp;
7569 	int j;
7570 
7571 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
7572 		   SDEBUG_VERSION, sdebug_version_date);
7573 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
7574 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
7575 		   sdebug_opts, sdebug_every_nth);
7576 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
7577 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
7578 		   sdebug_sector_size, "bytes");
7579 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
7580 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
7581 		   num_aborts);
7582 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
7583 		   num_dev_resets, num_target_resets, num_bus_resets,
7584 		   num_host_resets);
7585 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
7586 		   dix_reads, dix_writes, dif_errors);
7587 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
7588 		   sdebug_statistics);
7589 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
7590 		   atomic_read(&sdebug_cmnd_count),
7591 		   atomic_read(&sdebug_completions),
7592 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
7593 		   atomic_read(&sdebug_a_tsf),
7594 		   atomic_read(&sdeb_mq_poll_count));
7595 
7596 	seq_printf(m, "submit_queues=%d\n", submit_queues);
7597 	for (j = 0; j < submit_queues; ++j) {
7598 		int f = -1, l = -1;
7599 		struct sdebug_submit_queue_data data = {
7600 			.queue_num = j,
7601 			.first = &f,
7602 			.last = &l,
7603 		};
7604 		seq_printf(m, "  queue %d:\n", j);
7605 		blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
7606 					&data);
7607 		if (f >= 0) {
7608 			seq_printf(m, "    BUSY: %s: %d,%d\n",
7609 				   "first,last bits", f, l);
7610 		}
7611 	}
7612 
7613 	seq_printf(m, "this host_no=%d\n", host->host_no);
7614 	if (!xa_empty(per_store_ap)) {
7615 		bool niu;
7616 		int idx;
7617 		unsigned long l_idx;
7618 		struct sdeb_store_info *sip;
7619 
7620 		seq_puts(m, "\nhost list:\n");
7621 		j = 0;
7622 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7623 			idx = sdhp->si_idx;
7624 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
7625 				   sdhp->shost->host_no, idx);
7626 			++j;
7627 		}
7628 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
7629 			   sdeb_most_recent_idx);
7630 		j = 0;
7631 		xa_for_each(per_store_ap, l_idx, sip) {
7632 			niu = xa_get_mark(per_store_ap, l_idx,
7633 					  SDEB_XA_NOT_IN_USE);
7634 			idx = (int)l_idx;
7635 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
7636 				   (niu ? "  not_in_use" : ""));
7637 			++j;
7638 		}
7639 	}
7640 	return 0;
7641 }
7642 
7643 static ssize_t delay_show(struct device_driver *ddp, char *buf)
7644 {
7645 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
7646 }
7647 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
7648  * of delay is jiffies.
7649  */
7650 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
7651 			   size_t count)
7652 {
7653 	int jdelay, res;
7654 
7655 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
7656 		res = count;
7657 		if (sdebug_jdelay != jdelay) {
7658 			struct sdebug_host_info *sdhp;
7659 
7660 			mutex_lock(&sdebug_host_list_mutex);
7661 			block_unblock_all_queues(true);
7662 
7663 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7664 				struct Scsi_Host *shost = sdhp->shost;
7665 
7666 				if (scsi_host_busy(shost)) {
7667 					res = -EBUSY;   /* queued commands */
7668 					break;
7669 				}
7670 			}
7671 			if (res > 0) {
7672 				sdebug_jdelay = jdelay;
7673 				sdebug_ndelay = 0;
7674 			}
7675 			block_unblock_all_queues(false);
7676 			mutex_unlock(&sdebug_host_list_mutex);
7677 		}
7678 		return res;
7679 	}
7680 	return -EINVAL;
7681 }
7682 static DRIVER_ATTR_RW(delay);
7683 
7684 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
7685 {
7686 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
7687 }
7688 /* Returns -EBUSY if ndelay is being changed and commands are queued */
7689 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
7690 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
7691 			    size_t count)
7692 {
7693 	int ndelay, res;
7694 
7695 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
7696 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
7697 		res = count;
7698 		if (sdebug_ndelay != ndelay) {
7699 			struct sdebug_host_info *sdhp;
7700 
7701 			mutex_lock(&sdebug_host_list_mutex);
7702 			block_unblock_all_queues(true);
7703 
7704 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7705 				struct Scsi_Host *shost = sdhp->shost;
7706 
7707 				if (scsi_host_busy(shost)) {
7708 					res = -EBUSY;   /* queued commands */
7709 					break;
7710 				}
7711 			}
7712 
7713 			if (res > 0) {
7714 				sdebug_ndelay = ndelay;
7715 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
7716 							: DEF_JDELAY;
7717 			}
7718 			block_unblock_all_queues(false);
7719 			mutex_unlock(&sdebug_host_list_mutex);
7720 		}
7721 		return res;
7722 	}
7723 	return -EINVAL;
7724 }
7725 static DRIVER_ATTR_RW(ndelay);
7726 
7727 static ssize_t opts_show(struct device_driver *ddp, char *buf)
7728 {
7729 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
7730 }
7731 
7732 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
7733 			  size_t count)
7734 {
7735 	int opts;
7736 	char work[20];
7737 
7738 	if (sscanf(buf, "%10s", work) == 1) {
7739 		if (strncasecmp(work, "0x", 2) == 0) {
7740 			if (kstrtoint(work + 2, 16, &opts) == 0)
7741 				goto opts_done;
7742 		} else {
7743 			if (kstrtoint(work, 10, &opts) == 0)
7744 				goto opts_done;
7745 		}
7746 	}
7747 	return -EINVAL;
7748 opts_done:
7749 	sdebug_opts = opts;
7750 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
7751 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
7752 	tweak_cmnd_count();
7753 	return count;
7754 }
7755 static DRIVER_ATTR_RW(opts);
7756 
7757 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
7758 {
7759 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
7760 }
7761 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
7762 			   size_t count)
7763 {
7764 	int n;
7765 
7766 	/* Cannot change from or to TYPE_ZBC with sysfs */
7767 	if (sdebug_ptype == TYPE_ZBC)
7768 		return -EINVAL;
7769 
7770 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7771 		if (n == TYPE_ZBC)
7772 			return -EINVAL;
7773 		sdebug_ptype = n;
7774 		return count;
7775 	}
7776 	return -EINVAL;
7777 }
7778 static DRIVER_ATTR_RW(ptype);
7779 
7780 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
7781 {
7782 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
7783 }
7784 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
7785 			    size_t count)
7786 {
7787 	int n;
7788 
7789 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7790 		sdebug_dsense = n;
7791 		return count;
7792 	}
7793 	return -EINVAL;
7794 }
7795 static DRIVER_ATTR_RW(dsense);
7796 
7797 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
7798 {
7799 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
7800 }
7801 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
7802 			     size_t count)
7803 {
7804 	int n, idx;
7805 
7806 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7807 		bool want_store = (n == 0);
7808 		struct sdebug_host_info *sdhp;
7809 
7810 		n = (n > 0);
7811 		sdebug_fake_rw = (sdebug_fake_rw > 0);
7812 		if (sdebug_fake_rw == n)
7813 			return count;	/* not transitioning so do nothing */
7814 
7815 		if (want_store) {	/* 1 --> 0 transition, set up store */
7816 			if (sdeb_first_idx < 0) {
7817 				idx = sdebug_add_store();
7818 				if (idx < 0)
7819 					return idx;
7820 			} else {
7821 				idx = sdeb_first_idx;
7822 				xa_clear_mark(per_store_ap, idx,
7823 					      SDEB_XA_NOT_IN_USE);
7824 			}
7825 			/* make all hosts use same store */
7826 			list_for_each_entry(sdhp, &sdebug_host_list,
7827 					    host_list) {
7828 				if (sdhp->si_idx != idx) {
7829 					xa_set_mark(per_store_ap, sdhp->si_idx,
7830 						    SDEB_XA_NOT_IN_USE);
7831 					sdhp->si_idx = idx;
7832 				}
7833 			}
7834 			sdeb_most_recent_idx = idx;
7835 		} else {	/* 0 --> 1 transition is trigger for shrink */
7836 			sdebug_erase_all_stores(true /* apart from first */);
7837 		}
7838 		sdebug_fake_rw = n;
7839 		return count;
7840 	}
7841 	return -EINVAL;
7842 }
7843 static DRIVER_ATTR_RW(fake_rw);
7844 
7845 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
7846 {
7847 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
7848 }
7849 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
7850 			      size_t count)
7851 {
7852 	int n;
7853 
7854 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7855 		sdebug_no_lun_0 = n;
7856 		return count;
7857 	}
7858 	return -EINVAL;
7859 }
7860 static DRIVER_ATTR_RW(no_lun_0);
7861 
7862 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
7863 {
7864 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
7865 }
7866 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
7867 			      size_t count)
7868 {
7869 	int n;
7870 
7871 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7872 		sdebug_num_tgts = n;
7873 		sdebug_max_tgts_luns();
7874 		return count;
7875 	}
7876 	return -EINVAL;
7877 }
7878 static DRIVER_ATTR_RW(num_tgts);
7879 
7880 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
7881 {
7882 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
7883 }
7884 static DRIVER_ATTR_RO(dev_size_mb);
7885 
7886 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
7887 {
7888 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
7889 }
7890 
7891 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
7892 				    size_t count)
7893 {
7894 	bool v;
7895 
7896 	if (kstrtobool(buf, &v))
7897 		return -EINVAL;
7898 
7899 	sdebug_per_host_store = v;
7900 	return count;
7901 }
7902 static DRIVER_ATTR_RW(per_host_store);
7903 
7904 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
7905 {
7906 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
7907 }
7908 static DRIVER_ATTR_RO(num_parts);
7909 
7910 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
7911 {
7912 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
7913 }
7914 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
7915 			       size_t count)
7916 {
7917 	int nth;
7918 	char work[20];
7919 
7920 	if (sscanf(buf, "%10s", work) == 1) {
7921 		if (strncasecmp(work, "0x", 2) == 0) {
7922 			if (kstrtoint(work + 2, 16, &nth) == 0)
7923 				goto every_nth_done;
7924 		} else {
7925 			if (kstrtoint(work, 10, &nth) == 0)
7926 				goto every_nth_done;
7927 		}
7928 	}
7929 	return -EINVAL;
7930 
7931 every_nth_done:
7932 	sdebug_every_nth = nth;
7933 	if (nth && !sdebug_statistics) {
7934 		pr_info("every_nth needs statistics=1, set it\n");
7935 		sdebug_statistics = true;
7936 	}
7937 	tweak_cmnd_count();
7938 	return count;
7939 }
7940 static DRIVER_ATTR_RW(every_nth);
7941 
7942 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
7943 {
7944 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
7945 }
7946 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
7947 				size_t count)
7948 {
7949 	int n;
7950 	bool changed;
7951 
7952 	if (kstrtoint(buf, 0, &n))
7953 		return -EINVAL;
7954 	if (n >= 0) {
7955 		if (n > (int)SAM_LUN_AM_FLAT) {
7956 			pr_warn("only LUN address methods 0 and 1 are supported\n");
7957 			return -EINVAL;
7958 		}
7959 		changed = ((int)sdebug_lun_am != n);
7960 		sdebug_lun_am = n;
7961 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
7962 			struct sdebug_host_info *sdhp;
7963 			struct sdebug_dev_info *dp;
7964 
7965 			mutex_lock(&sdebug_host_list_mutex);
7966 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
7967 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
7968 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
7969 				}
7970 			}
7971 			mutex_unlock(&sdebug_host_list_mutex);
7972 		}
7973 		return count;
7974 	}
7975 	return -EINVAL;
7976 }
7977 static DRIVER_ATTR_RW(lun_format);
7978 
7979 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
7980 {
7981 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
7982 }
7983 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
7984 			      size_t count)
7985 {
7986 	int n;
7987 	bool changed;
7988 
7989 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7990 		if (n > 256) {
7991 			pr_warn("max_luns can be no more than 256\n");
7992 			return -EINVAL;
7993 		}
7994 		changed = (sdebug_max_luns != n);
7995 		sdebug_max_luns = n;
7996 		sdebug_max_tgts_luns();
7997 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
7998 			struct sdebug_host_info *sdhp;
7999 			struct sdebug_dev_info *dp;
8000 
8001 			mutex_lock(&sdebug_host_list_mutex);
8002 			list_for_each_entry(sdhp, &sdebug_host_list,
8003 					    host_list) {
8004 				list_for_each_entry(dp, &sdhp->dev_info_list,
8005 						    dev_list) {
8006 					set_bit(SDEBUG_UA_LUNS_CHANGED,
8007 						dp->uas_bm);
8008 				}
8009 			}
8010 			mutex_unlock(&sdebug_host_list_mutex);
8011 		}
8012 		return count;
8013 	}
8014 	return -EINVAL;
8015 }
8016 static DRIVER_ATTR_RW(max_luns);
8017 
8018 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
8019 {
8020 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
8021 }
8022 /* N.B. max_queue can be changed while there are queued commands. In flight
8023  * commands beyond the new max_queue will be completed. */
8024 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
8025 			       size_t count)
8026 {
8027 	int n;
8028 
8029 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
8030 	    (n <= SDEBUG_CANQUEUE) &&
8031 	    (sdebug_host_max_queue == 0)) {
8032 		mutex_lock(&sdebug_host_list_mutex);
8033 
8034 		/* We may only change sdebug_max_queue when we have no shosts */
8035 		if (list_empty(&sdebug_host_list))
8036 			sdebug_max_queue = n;
8037 		else
8038 			count = -EBUSY;
8039 		mutex_unlock(&sdebug_host_list_mutex);
8040 		return count;
8041 	}
8042 	return -EINVAL;
8043 }
8044 static DRIVER_ATTR_RW(max_queue);
8045 
8046 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
8047 {
8048 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
8049 }
8050 
8051 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
8052 {
8053 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
8054 }
8055 
8056 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
8057 {
8058 	bool v;
8059 
8060 	if (kstrtobool(buf, &v))
8061 		return -EINVAL;
8062 
8063 	sdebug_no_rwlock = v;
8064 	return count;
8065 }
8066 static DRIVER_ATTR_RW(no_rwlock);
8067 
8068 /*
8069  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
8070  * in range [0, sdebug_host_max_queue), we can't change it.
8071  */
8072 static DRIVER_ATTR_RO(host_max_queue);
8073 
8074 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
8075 {
8076 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
8077 }
8078 static DRIVER_ATTR_RO(no_uld);
8079 
8080 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
8081 {
8082 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
8083 }
8084 static DRIVER_ATTR_RO(scsi_level);
8085 
8086 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
8087 {
8088 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
8089 }
8090 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
8091 				size_t count)
8092 {
8093 	int n;
8094 	bool changed;
8095 
8096 	/* Ignore capacity change for ZBC drives for now */
8097 	if (sdeb_zbc_in_use)
8098 		return -ENOTSUPP;
8099 
8100 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8101 		changed = (sdebug_virtual_gb != n);
8102 		sdebug_virtual_gb = n;
8103 		sdebug_capacity = get_sdebug_capacity();
8104 		if (changed) {
8105 			struct sdebug_host_info *sdhp;
8106 			struct sdebug_dev_info *dp;
8107 
8108 			mutex_lock(&sdebug_host_list_mutex);
8109 			list_for_each_entry(sdhp, &sdebug_host_list,
8110 					    host_list) {
8111 				list_for_each_entry(dp, &sdhp->dev_info_list,
8112 						    dev_list) {
8113 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
8114 						dp->uas_bm);
8115 				}
8116 			}
8117 			mutex_unlock(&sdebug_host_list_mutex);
8118 		}
8119 		return count;
8120 	}
8121 	return -EINVAL;
8122 }
8123 static DRIVER_ATTR_RW(virtual_gb);
8124 
8125 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
8126 {
8127 	/* absolute number of hosts currently active is what is shown */
8128 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
8129 }
8130 
8131 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
8132 			      size_t count)
8133 {
8134 	bool found;
8135 	unsigned long idx;
8136 	struct sdeb_store_info *sip;
8137 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
8138 	int delta_hosts;
8139 
8140 	if (sscanf(buf, "%d", &delta_hosts) != 1)
8141 		return -EINVAL;
8142 	if (delta_hosts > 0) {
8143 		do {
8144 			found = false;
8145 			if (want_phs) {
8146 				xa_for_each_marked(per_store_ap, idx, sip,
8147 						   SDEB_XA_NOT_IN_USE) {
8148 					sdeb_most_recent_idx = (int)idx;
8149 					found = true;
8150 					break;
8151 				}
8152 				if (found)	/* re-use case */
8153 					sdebug_add_host_helper((int)idx);
8154 				else
8155 					sdebug_do_add_host(true);
8156 			} else {
8157 				sdebug_do_add_host(false);
8158 			}
8159 		} while (--delta_hosts);
8160 	} else if (delta_hosts < 0) {
8161 		do {
8162 			sdebug_do_remove_host(false);
8163 		} while (++delta_hosts);
8164 	}
8165 	return count;
8166 }
8167 static DRIVER_ATTR_RW(add_host);
8168 
8169 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
8170 {
8171 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
8172 }
8173 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
8174 				    size_t count)
8175 {
8176 	int n;
8177 
8178 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8179 		sdebug_vpd_use_hostno = n;
8180 		return count;
8181 	}
8182 	return -EINVAL;
8183 }
8184 static DRIVER_ATTR_RW(vpd_use_hostno);
8185 
8186 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
8187 {
8188 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
8189 }
8190 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
8191 				size_t count)
8192 {
8193 	int n;
8194 
8195 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
8196 		if (n > 0)
8197 			sdebug_statistics = true;
8198 		else {
8199 			clear_queue_stats();
8200 			sdebug_statistics = false;
8201 		}
8202 		return count;
8203 	}
8204 	return -EINVAL;
8205 }
8206 static DRIVER_ATTR_RW(statistics);
8207 
8208 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
8209 {
8210 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
8211 }
8212 static DRIVER_ATTR_RO(sector_size);
8213 
8214 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
8215 {
8216 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
8217 }
8218 static DRIVER_ATTR_RO(submit_queues);
8219 
8220 static ssize_t dix_show(struct device_driver *ddp, char *buf)
8221 {
8222 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
8223 }
8224 static DRIVER_ATTR_RO(dix);
8225 
8226 static ssize_t dif_show(struct device_driver *ddp, char *buf)
8227 {
8228 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
8229 }
8230 static DRIVER_ATTR_RO(dif);
8231 
8232 static ssize_t guard_show(struct device_driver *ddp, char *buf)
8233 {
8234 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
8235 }
8236 static DRIVER_ATTR_RO(guard);
8237 
8238 static ssize_t ato_show(struct device_driver *ddp, char *buf)
8239 {
8240 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
8241 }
8242 static DRIVER_ATTR_RO(ato);
8243 
8244 static ssize_t map_show(struct device_driver *ddp, char *buf)
8245 {
8246 	ssize_t count = 0;
8247 
8248 	if (!scsi_debug_lbp())
8249 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
8250 				 sdebug_store_sectors);
8251 
8252 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
8253 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
8254 
8255 		if (sip)
8256 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
8257 					  (int)map_size, sip->map_storep);
8258 	}
8259 	buf[count++] = '\n';
8260 	buf[count] = '\0';
8261 
8262 	return count;
8263 }
8264 static DRIVER_ATTR_RO(map);
8265 
8266 static ssize_t random_show(struct device_driver *ddp, char *buf)
8267 {
8268 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
8269 }
8270 
8271 static ssize_t random_store(struct device_driver *ddp, const char *buf,
8272 			    size_t count)
8273 {
8274 	bool v;
8275 
8276 	if (kstrtobool(buf, &v))
8277 		return -EINVAL;
8278 
8279 	sdebug_random = v;
8280 	return count;
8281 }
8282 static DRIVER_ATTR_RW(random);
8283 
8284 static ssize_t removable_show(struct device_driver *ddp, char *buf)
8285 {
8286 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
8287 }
8288 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
8289 			       size_t count)
8290 {
8291 	int n;
8292 
8293 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8294 		sdebug_removable = (n > 0);
8295 		return count;
8296 	}
8297 	return -EINVAL;
8298 }
8299 static DRIVER_ATTR_RW(removable);
8300 
8301 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
8302 {
8303 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
8304 }
8305 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
8306 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
8307 			       size_t count)
8308 {
8309 	int n;
8310 
8311 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8312 		sdebug_host_lock = (n > 0);
8313 		return count;
8314 	}
8315 	return -EINVAL;
8316 }
8317 static DRIVER_ATTR_RW(host_lock);
8318 
8319 static ssize_t strict_show(struct device_driver *ddp, char *buf)
8320 {
8321 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
8322 }
8323 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
8324 			    size_t count)
8325 {
8326 	int n;
8327 
8328 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8329 		sdebug_strict = (n > 0);
8330 		return count;
8331 	}
8332 	return -EINVAL;
8333 }
8334 static DRIVER_ATTR_RW(strict);
8335 
8336 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
8337 {
8338 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
8339 }
8340 static DRIVER_ATTR_RO(uuid_ctl);
8341 
8342 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
8343 {
8344 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
8345 }
8346 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
8347 			     size_t count)
8348 {
8349 	int ret, n;
8350 
8351 	ret = kstrtoint(buf, 0, &n);
8352 	if (ret)
8353 		return ret;
8354 	sdebug_cdb_len = n;
8355 	all_config_cdb_len();
8356 	return count;
8357 }
8358 static DRIVER_ATTR_RW(cdb_len);
8359 
8360 static const char * const zbc_model_strs_a[] = {
8361 	[BLK_ZONED_NONE] = "none",
8362 	[BLK_ZONED_HA]   = "host-aware",
8363 	[BLK_ZONED_HM]   = "host-managed",
8364 };
8365 
8366 static const char * const zbc_model_strs_b[] = {
8367 	[BLK_ZONED_NONE] = "no",
8368 	[BLK_ZONED_HA]   = "aware",
8369 	[BLK_ZONED_HM]   = "managed",
8370 };
8371 
8372 static const char * const zbc_model_strs_c[] = {
8373 	[BLK_ZONED_NONE] = "0",
8374 	[BLK_ZONED_HA]   = "1",
8375 	[BLK_ZONED_HM]   = "2",
8376 };
8377 
8378 static int sdeb_zbc_model_str(const char *cp)
8379 {
8380 	int res = sysfs_match_string(zbc_model_strs_a, cp);
8381 
8382 	if (res < 0) {
8383 		res = sysfs_match_string(zbc_model_strs_b, cp);
8384 		if (res < 0) {
8385 			res = sysfs_match_string(zbc_model_strs_c, cp);
8386 			if (res < 0)
8387 				return -EINVAL;
8388 		}
8389 	}
8390 	return res;
8391 }
8392 
8393 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
8394 {
8395 	return scnprintf(buf, PAGE_SIZE, "%s\n",
8396 			 zbc_model_strs_a[sdeb_zbc_model]);
8397 }
8398 static DRIVER_ATTR_RO(zbc);
8399 
8400 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
8401 {
8402 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
8403 }
8404 static DRIVER_ATTR_RO(tur_ms_to_ready);
8405 
8406 static ssize_t group_number_stats_show(struct device_driver *ddp, char *buf)
8407 {
8408 	char *p = buf, *end = buf + PAGE_SIZE;
8409 	int i;
8410 
8411 	for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
8412 		p += scnprintf(p, end - p, "%d %ld\n", i,
8413 			       atomic_long_read(&writes_by_group_number[i]));
8414 
8415 	return p - buf;
8416 }
8417 
8418 static ssize_t group_number_stats_store(struct device_driver *ddp,
8419 					const char *buf, size_t count)
8420 {
8421 	int i;
8422 
8423 	for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
8424 		atomic_long_set(&writes_by_group_number[i], 0);
8425 
8426 	return count;
8427 }
8428 static DRIVER_ATTR_RW(group_number_stats);
8429 
8430 /* Note: The following array creates attribute files in the
8431    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
8432    files (over those found in the /sys/module/scsi_debug/parameters
8433    directory) is that auxiliary actions can be triggered when an attribute
8434    is changed. For example see: add_host_store() above.
8435  */
8436 
8437 static struct attribute *sdebug_drv_attrs[] = {
8438 	&driver_attr_delay.attr,
8439 	&driver_attr_opts.attr,
8440 	&driver_attr_ptype.attr,
8441 	&driver_attr_dsense.attr,
8442 	&driver_attr_fake_rw.attr,
8443 	&driver_attr_host_max_queue.attr,
8444 	&driver_attr_no_lun_0.attr,
8445 	&driver_attr_num_tgts.attr,
8446 	&driver_attr_dev_size_mb.attr,
8447 	&driver_attr_num_parts.attr,
8448 	&driver_attr_every_nth.attr,
8449 	&driver_attr_lun_format.attr,
8450 	&driver_attr_max_luns.attr,
8451 	&driver_attr_max_queue.attr,
8452 	&driver_attr_no_rwlock.attr,
8453 	&driver_attr_no_uld.attr,
8454 	&driver_attr_scsi_level.attr,
8455 	&driver_attr_virtual_gb.attr,
8456 	&driver_attr_add_host.attr,
8457 	&driver_attr_per_host_store.attr,
8458 	&driver_attr_vpd_use_hostno.attr,
8459 	&driver_attr_sector_size.attr,
8460 	&driver_attr_statistics.attr,
8461 	&driver_attr_submit_queues.attr,
8462 	&driver_attr_dix.attr,
8463 	&driver_attr_dif.attr,
8464 	&driver_attr_guard.attr,
8465 	&driver_attr_ato.attr,
8466 	&driver_attr_map.attr,
8467 	&driver_attr_random.attr,
8468 	&driver_attr_removable.attr,
8469 	&driver_attr_host_lock.attr,
8470 	&driver_attr_ndelay.attr,
8471 	&driver_attr_strict.attr,
8472 	&driver_attr_uuid_ctl.attr,
8473 	&driver_attr_cdb_len.attr,
8474 	&driver_attr_tur_ms_to_ready.attr,
8475 	&driver_attr_zbc.attr,
8476 	&driver_attr_group_number_stats.attr,
8477 	NULL,
8478 };
8479 ATTRIBUTE_GROUPS(sdebug_drv);
8480 
8481 static struct device *pseudo_primary;
8482 
8483 static int __init scsi_debug_init(void)
8484 {
8485 	bool want_store = (sdebug_fake_rw == 0);
8486 	unsigned long sz;
8487 	int k, ret, hosts_to_add;
8488 	int idx = -1;
8489 
8490 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
8491 		pr_warn("ndelay must be less than 1 second, ignored\n");
8492 		sdebug_ndelay = 0;
8493 	} else if (sdebug_ndelay > 0)
8494 		sdebug_jdelay = JDELAY_OVERRIDDEN;
8495 
8496 	switch (sdebug_sector_size) {
8497 	case  512:
8498 	case 1024:
8499 	case 2048:
8500 	case 4096:
8501 		break;
8502 	default:
8503 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
8504 		return -EINVAL;
8505 	}
8506 
8507 	switch (sdebug_dif) {
8508 	case T10_PI_TYPE0_PROTECTION:
8509 		break;
8510 	case T10_PI_TYPE1_PROTECTION:
8511 	case T10_PI_TYPE2_PROTECTION:
8512 	case T10_PI_TYPE3_PROTECTION:
8513 		have_dif_prot = true;
8514 		break;
8515 
8516 	default:
8517 		pr_err("dif must be 0, 1, 2 or 3\n");
8518 		return -EINVAL;
8519 	}
8520 
8521 	if (sdebug_num_tgts < 0) {
8522 		pr_err("num_tgts must be >= 0\n");
8523 		return -EINVAL;
8524 	}
8525 
8526 	if (sdebug_guard > 1) {
8527 		pr_err("guard must be 0 or 1\n");
8528 		return -EINVAL;
8529 	}
8530 
8531 	if (sdebug_ato > 1) {
8532 		pr_err("ato must be 0 or 1\n");
8533 		return -EINVAL;
8534 	}
8535 
8536 	if (sdebug_physblk_exp > 15) {
8537 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
8538 		return -EINVAL;
8539 	}
8540 
8541 	sdebug_lun_am = sdebug_lun_am_i;
8542 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
8543 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
8544 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
8545 	}
8546 
8547 	if (sdebug_max_luns > 256) {
8548 		if (sdebug_max_luns > 16384) {
8549 			pr_warn("max_luns can be no more than 16384, use default\n");
8550 			sdebug_max_luns = DEF_MAX_LUNS;
8551 		}
8552 		sdebug_lun_am = SAM_LUN_AM_FLAT;
8553 	}
8554 
8555 	if (sdebug_lowest_aligned > 0x3fff) {
8556 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
8557 		return -EINVAL;
8558 	}
8559 
8560 	if (submit_queues < 1) {
8561 		pr_err("submit_queues must be 1 or more\n");
8562 		return -EINVAL;
8563 	}
8564 
8565 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
8566 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
8567 		return -EINVAL;
8568 	}
8569 
8570 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
8571 	    (sdebug_host_max_queue < 0)) {
8572 		pr_err("host_max_queue must be in range [0 %d]\n",
8573 		       SDEBUG_CANQUEUE);
8574 		return -EINVAL;
8575 	}
8576 
8577 	if (sdebug_host_max_queue &&
8578 	    (sdebug_max_queue != sdebug_host_max_queue)) {
8579 		sdebug_max_queue = sdebug_host_max_queue;
8580 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
8581 			sdebug_max_queue);
8582 	}
8583 
8584 	/*
8585 	 * check for host managed zoned block device specified with
8586 	 * ptype=0x14 or zbc=XXX.
8587 	 */
8588 	if (sdebug_ptype == TYPE_ZBC) {
8589 		sdeb_zbc_model = BLK_ZONED_HM;
8590 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
8591 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
8592 		if (k < 0)
8593 			return k;
8594 		sdeb_zbc_model = k;
8595 		switch (sdeb_zbc_model) {
8596 		case BLK_ZONED_NONE:
8597 		case BLK_ZONED_HA:
8598 			sdebug_ptype = TYPE_DISK;
8599 			break;
8600 		case BLK_ZONED_HM:
8601 			sdebug_ptype = TYPE_ZBC;
8602 			break;
8603 		default:
8604 			pr_err("Invalid ZBC model\n");
8605 			return -EINVAL;
8606 		}
8607 	}
8608 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
8609 		sdeb_zbc_in_use = true;
8610 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
8611 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
8612 	}
8613 
8614 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
8615 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
8616 	if (sdebug_dev_size_mb < 1)
8617 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
8618 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
8619 	sdebug_store_sectors = sz / sdebug_sector_size;
8620 	sdebug_capacity = get_sdebug_capacity();
8621 
8622 	/* play around with geometry, don't waste too much on track 0 */
8623 	sdebug_heads = 8;
8624 	sdebug_sectors_per = 32;
8625 	if (sdebug_dev_size_mb >= 256)
8626 		sdebug_heads = 64;
8627 	else if (sdebug_dev_size_mb >= 16)
8628 		sdebug_heads = 32;
8629 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
8630 			       (sdebug_sectors_per * sdebug_heads);
8631 	if (sdebug_cylinders_per >= 1024) {
8632 		/* other LLDs do this; implies >= 1GB ram disk ... */
8633 		sdebug_heads = 255;
8634 		sdebug_sectors_per = 63;
8635 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
8636 			       (sdebug_sectors_per * sdebug_heads);
8637 	}
8638 	if (scsi_debug_lbp()) {
8639 		sdebug_unmap_max_blocks =
8640 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
8641 
8642 		sdebug_unmap_max_desc =
8643 			clamp(sdebug_unmap_max_desc, 0U, 256U);
8644 
8645 		sdebug_unmap_granularity =
8646 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
8647 
8648 		if (sdebug_unmap_alignment &&
8649 		    sdebug_unmap_granularity <=
8650 		    sdebug_unmap_alignment) {
8651 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
8652 			return -EINVAL;
8653 		}
8654 	}
8655 
8656 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
8657 	if (want_store) {
8658 		idx = sdebug_add_store();
8659 		if (idx < 0)
8660 			return idx;
8661 	}
8662 
8663 	pseudo_primary = root_device_register("pseudo_0");
8664 	if (IS_ERR(pseudo_primary)) {
8665 		pr_warn("root_device_register() error\n");
8666 		ret = PTR_ERR(pseudo_primary);
8667 		goto free_vm;
8668 	}
8669 	ret = bus_register(&pseudo_lld_bus);
8670 	if (ret < 0) {
8671 		pr_warn("bus_register error: %d\n", ret);
8672 		goto dev_unreg;
8673 	}
8674 	ret = driver_register(&sdebug_driverfs_driver);
8675 	if (ret < 0) {
8676 		pr_warn("driver_register error: %d\n", ret);
8677 		goto bus_unreg;
8678 	}
8679 
8680 	hosts_to_add = sdebug_add_host;
8681 	sdebug_add_host = 0;
8682 
8683 	sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL);
8684 	if (IS_ERR_OR_NULL(sdebug_debugfs_root))
8685 		pr_info("failed to create initial debugfs directory\n");
8686 
8687 	for (k = 0; k < hosts_to_add; k++) {
8688 		if (want_store && k == 0) {
8689 			ret = sdebug_add_host_helper(idx);
8690 			if (ret < 0) {
8691 				pr_err("add_host_helper k=%d, error=%d\n",
8692 				       k, -ret);
8693 				break;
8694 			}
8695 		} else {
8696 			ret = sdebug_do_add_host(want_store &&
8697 						 sdebug_per_host_store);
8698 			if (ret < 0) {
8699 				pr_err("add_host k=%d error=%d\n", k, -ret);
8700 				break;
8701 			}
8702 		}
8703 	}
8704 	if (sdebug_verbose)
8705 		pr_info("built %d host(s)\n", sdebug_num_hosts);
8706 
8707 	return 0;
8708 
8709 bus_unreg:
8710 	bus_unregister(&pseudo_lld_bus);
8711 dev_unreg:
8712 	root_device_unregister(pseudo_primary);
8713 free_vm:
8714 	sdebug_erase_store(idx, NULL);
8715 	return ret;
8716 }
8717 
8718 static void __exit scsi_debug_exit(void)
8719 {
8720 	int k = sdebug_num_hosts;
8721 
8722 	for (; k; k--)
8723 		sdebug_do_remove_host(true);
8724 	driver_unregister(&sdebug_driverfs_driver);
8725 	bus_unregister(&pseudo_lld_bus);
8726 	root_device_unregister(pseudo_primary);
8727 
8728 	sdebug_erase_all_stores(false);
8729 	xa_destroy(per_store_ap);
8730 	debugfs_remove(sdebug_debugfs_root);
8731 }
8732 
8733 device_initcall(scsi_debug_init);
8734 module_exit(scsi_debug_exit);
8735 
8736 static void sdebug_release_adapter(struct device *dev)
8737 {
8738 	struct sdebug_host_info *sdbg_host;
8739 
8740 	sdbg_host = dev_to_sdebug_host(dev);
8741 	kfree(sdbg_host);
8742 }
8743 
8744 /* idx must be valid, if sip is NULL then it will be obtained using idx */
8745 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
8746 {
8747 	if (idx < 0)
8748 		return;
8749 	if (!sip) {
8750 		if (xa_empty(per_store_ap))
8751 			return;
8752 		sip = xa_load(per_store_ap, idx);
8753 		if (!sip)
8754 			return;
8755 	}
8756 	vfree(sip->map_storep);
8757 	vfree(sip->dif_storep);
8758 	vfree(sip->storep);
8759 	xa_erase(per_store_ap, idx);
8760 	kfree(sip);
8761 }
8762 
8763 /* Assume apart_from_first==false only in shutdown case. */
8764 static void sdebug_erase_all_stores(bool apart_from_first)
8765 {
8766 	unsigned long idx;
8767 	struct sdeb_store_info *sip = NULL;
8768 
8769 	xa_for_each(per_store_ap, idx, sip) {
8770 		if (apart_from_first)
8771 			apart_from_first = false;
8772 		else
8773 			sdebug_erase_store(idx, sip);
8774 	}
8775 	if (apart_from_first)
8776 		sdeb_most_recent_idx = sdeb_first_idx;
8777 }
8778 
8779 /*
8780  * Returns store xarray new element index (idx) if >=0 else negated errno.
8781  * Limit the number of stores to 65536.
8782  */
8783 static int sdebug_add_store(void)
8784 {
8785 	int res;
8786 	u32 n_idx;
8787 	unsigned long iflags;
8788 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
8789 	struct sdeb_store_info *sip = NULL;
8790 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
8791 
8792 	sip = kzalloc_obj(*sip);
8793 	if (!sip)
8794 		return -ENOMEM;
8795 
8796 	xa_lock_irqsave(per_store_ap, iflags);
8797 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
8798 	if (unlikely(res < 0)) {
8799 		xa_unlock_irqrestore(per_store_ap, iflags);
8800 		kfree(sip);
8801 		pr_warn("xa_alloc() errno=%d\n", -res);
8802 		return res;
8803 	}
8804 	sdeb_most_recent_idx = n_idx;
8805 	if (sdeb_first_idx < 0)
8806 		sdeb_first_idx = n_idx;
8807 	xa_unlock_irqrestore(per_store_ap, iflags);
8808 
8809 	res = -ENOMEM;
8810 	sip->storep = vzalloc(sz);
8811 	if (!sip->storep) {
8812 		pr_err("user data oom\n");
8813 		goto err;
8814 	}
8815 	if (sdebug_num_parts > 0)
8816 		sdebug_build_parts(sip->storep, sz);
8817 
8818 	/* DIF/DIX: what T10 calls Protection Information (PI) */
8819 	if (sdebug_dix) {
8820 		int dif_size;
8821 
8822 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
8823 		sip->dif_storep = vmalloc(dif_size);
8824 
8825 		pr_info("dif_storep %u bytes @ %p\n", dif_size,
8826 			sip->dif_storep);
8827 
8828 		if (!sip->dif_storep) {
8829 			pr_err("DIX oom\n");
8830 			goto err;
8831 		}
8832 		memset(sip->dif_storep, 0xff, dif_size);
8833 	}
8834 	/* Logical Block Provisioning */
8835 	if (scsi_debug_lbp()) {
8836 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
8837 		sip->map_storep = vcalloc(BITS_TO_LONGS(map_size),
8838 					  sizeof(long));
8839 
8840 		pr_info("%lu provisioning blocks\n", map_size);
8841 
8842 		if (!sip->map_storep) {
8843 			pr_err("LBP map oom\n");
8844 			goto err;
8845 		}
8846 
8847 		/* Map first 1KB for partition table */
8848 		if (sdebug_num_parts)
8849 			map_region(sip, 0, 2);
8850 	}
8851 
8852 	rwlock_init(&sip->macc_data_lck);
8853 	rwlock_init(&sip->macc_meta_lck);
8854 	rwlock_init(&sip->macc_sector_lck);
8855 	return (int)n_idx;
8856 err:
8857 	sdebug_erase_store((int)n_idx, sip);
8858 	pr_warn("failed, errno=%d\n", -res);
8859 	return res;
8860 }
8861 
8862 static int sdebug_add_host_helper(int per_host_idx)
8863 {
8864 	int k, devs_per_host, idx;
8865 	int error = -ENOMEM;
8866 	struct sdebug_host_info *sdbg_host;
8867 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
8868 
8869 	sdbg_host = kzalloc_obj(*sdbg_host);
8870 	if (!sdbg_host)
8871 		return -ENOMEM;
8872 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
8873 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
8874 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
8875 	sdbg_host->si_idx = idx;
8876 
8877 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
8878 
8879 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
8880 	for (k = 0; k < devs_per_host; k++) {
8881 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
8882 		if (!sdbg_devinfo)
8883 			goto clean;
8884 	}
8885 
8886 	mutex_lock(&sdebug_host_list_mutex);
8887 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
8888 	mutex_unlock(&sdebug_host_list_mutex);
8889 
8890 	sdbg_host->dev.bus = &pseudo_lld_bus;
8891 	sdbg_host->dev.parent = pseudo_primary;
8892 	sdbg_host->dev.release = &sdebug_release_adapter;
8893 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
8894 
8895 	error = device_register(&sdbg_host->dev);
8896 	if (error) {
8897 		mutex_lock(&sdebug_host_list_mutex);
8898 		list_del(&sdbg_host->host_list);
8899 		mutex_unlock(&sdebug_host_list_mutex);
8900 		goto clean;
8901 	}
8902 
8903 	++sdebug_num_hosts;
8904 	return 0;
8905 
8906 clean:
8907 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
8908 				 dev_list) {
8909 		list_del(&sdbg_devinfo->dev_list);
8910 		kfree(sdbg_devinfo->zstate);
8911 		kfree(sdbg_devinfo);
8912 	}
8913 	if (sdbg_host->dev.release)
8914 		put_device(&sdbg_host->dev);
8915 	else
8916 		kfree(sdbg_host);
8917 	pr_warn("failed, errno=%d\n", -error);
8918 	return error;
8919 }
8920 
8921 static int sdebug_do_add_host(bool mk_new_store)
8922 {
8923 	int ph_idx = sdeb_most_recent_idx;
8924 
8925 	if (mk_new_store) {
8926 		ph_idx = sdebug_add_store();
8927 		if (ph_idx < 0)
8928 			return ph_idx;
8929 	}
8930 	return sdebug_add_host_helper(ph_idx);
8931 }
8932 
8933 static void sdebug_do_remove_host(bool the_end)
8934 {
8935 	int idx = -1;
8936 	struct sdebug_host_info *sdbg_host = NULL;
8937 	struct sdebug_host_info *sdbg_host2;
8938 
8939 	mutex_lock(&sdebug_host_list_mutex);
8940 	if (!list_empty(&sdebug_host_list)) {
8941 		sdbg_host = list_entry(sdebug_host_list.prev,
8942 				       struct sdebug_host_info, host_list);
8943 		idx = sdbg_host->si_idx;
8944 	}
8945 	if (!the_end && idx >= 0) {
8946 		bool unique = true;
8947 
8948 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
8949 			if (sdbg_host2 == sdbg_host)
8950 				continue;
8951 			if (idx == sdbg_host2->si_idx) {
8952 				unique = false;
8953 				break;
8954 			}
8955 		}
8956 		if (unique) {
8957 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
8958 			if (idx == sdeb_most_recent_idx)
8959 				--sdeb_most_recent_idx;
8960 		}
8961 	}
8962 	if (sdbg_host)
8963 		list_del(&sdbg_host->host_list);
8964 	mutex_unlock(&sdebug_host_list_mutex);
8965 
8966 	if (!sdbg_host)
8967 		return;
8968 
8969 	device_unregister(&sdbg_host->dev);
8970 	--sdebug_num_hosts;
8971 }
8972 
8973 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
8974 {
8975 	struct sdebug_dev_info *devip = sdev->hostdata;
8976 
8977 	if (!devip)
8978 		return	-ENODEV;
8979 
8980 	mutex_lock(&sdebug_host_list_mutex);
8981 	block_unblock_all_queues(true);
8982 
8983 	if (qdepth > SDEBUG_CANQUEUE) {
8984 		qdepth = SDEBUG_CANQUEUE;
8985 		pr_warn("requested qdepth [%d] exceeds canqueue [%d], trim\n",
8986 			qdepth, SDEBUG_CANQUEUE);
8987 	}
8988 	if (qdepth < 1)
8989 		qdepth = 1;
8990 	if (qdepth != sdev->queue_depth)
8991 		scsi_change_queue_depth(sdev, qdepth);
8992 
8993 	block_unblock_all_queues(false);
8994 	mutex_unlock(&sdebug_host_list_mutex);
8995 
8996 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
8997 		sdev_printk(KERN_INFO, sdev, "qdepth=%d\n", qdepth);
8998 
8999 	return sdev->queue_depth;
9000 }
9001 
9002 static bool fake_timeout(struct scsi_cmnd *scp)
9003 {
9004 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
9005 		if (sdebug_every_nth < -1)
9006 			sdebug_every_nth = -1;
9007 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
9008 			return true; /* ignore command causing timeout */
9009 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
9010 			 scsi_medium_access_command(scp))
9011 			return true; /* time out reads and writes */
9012 	}
9013 	return false;
9014 }
9015 
9016 /* Response to TUR or media access command when device stopped */
9017 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
9018 {
9019 	int stopped_state;
9020 	u64 diff_ns = 0;
9021 	ktime_t now_ts = ktime_get_boottime();
9022 	struct scsi_device *sdp = scp->device;
9023 
9024 	stopped_state = atomic_read(&devip->stopped);
9025 	if (stopped_state == 2) {
9026 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
9027 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
9028 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
9029 				/* tur_ms_to_ready timer extinguished */
9030 				atomic_set(&devip->stopped, 0);
9031 				return 0;
9032 			}
9033 		}
9034 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
9035 		if (sdebug_verbose)
9036 			sdev_printk(KERN_INFO, sdp,
9037 				    "%s: Not ready: in process of becoming ready\n", my_name);
9038 		if (scp->cmnd[0] == TEST_UNIT_READY) {
9039 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
9040 
9041 			if (diff_ns <= tur_nanosecs_to_ready)
9042 				diff_ns = tur_nanosecs_to_ready - diff_ns;
9043 			else
9044 				diff_ns = tur_nanosecs_to_ready;
9045 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
9046 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
9047 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
9048 						   diff_ns);
9049 			return check_condition_result;
9050 		}
9051 	}
9052 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
9053 	if (sdebug_verbose)
9054 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
9055 			    my_name);
9056 	return check_condition_result;
9057 }
9058 
9059 static void sdebug_map_queues(struct Scsi_Host *shost)
9060 {
9061 	int i, qoff;
9062 
9063 	if (shost->nr_hw_queues == 1)
9064 		return;
9065 
9066 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
9067 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
9068 
9069 		map->nr_queues  = 0;
9070 
9071 		if (i == HCTX_TYPE_DEFAULT)
9072 			map->nr_queues = submit_queues - poll_queues;
9073 		else if (i == HCTX_TYPE_POLL)
9074 			map->nr_queues = poll_queues;
9075 
9076 		if (!map->nr_queues) {
9077 			BUG_ON(i == HCTX_TYPE_DEFAULT);
9078 			continue;
9079 		}
9080 
9081 		map->queue_offset = qoff;
9082 		blk_mq_map_queues(map);
9083 
9084 		qoff += map->nr_queues;
9085 	}
9086 }
9087 
9088 struct sdebug_blk_mq_poll_data {
9089 	unsigned int queue_num;
9090 	int *num_entries;
9091 };
9092 
9093 /*
9094  * We don't handle aborted commands here, but it does not seem possible to have
9095  * aborted polled commands from schedule_resp()
9096  */
9097 static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
9098 {
9099 	struct sdebug_blk_mq_poll_data *data = opaque;
9100 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
9101 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
9102 	struct sdebug_defer *sd_dp;
9103 	u32 unique_tag = blk_mq_unique_tag(rq);
9104 	u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
9105 	unsigned long flags;
9106 	int queue_num = data->queue_num;
9107 	ktime_t time;
9108 
9109 	/* We're only interested in one queue for this iteration */
9110 	if (hwq != queue_num)
9111 		return true;
9112 
9113 	/* Subsequent checks would fail if this failed, but check anyway */
9114 	if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
9115 		return true;
9116 
9117 	time = ktime_get_boottime();
9118 
9119 	spin_lock_irqsave(&sdsc->lock, flags);
9120 	sd_dp = &sdsc->sd_dp;
9121 	if (sd_dp->defer_t != SDEB_DEFER_POLL) {
9122 		spin_unlock_irqrestore(&sdsc->lock, flags);
9123 		return true;
9124 	}
9125 
9126 	if (time < sd_dp->cmpl_ts) {
9127 		spin_unlock_irqrestore(&sdsc->lock, flags);
9128 		return true;
9129 	}
9130 	spin_unlock_irqrestore(&sdsc->lock, flags);
9131 
9132 	if (sdebug_statistics) {
9133 		atomic_inc(&sdebug_completions);
9134 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
9135 			atomic_inc(&sdebug_miss_cpus);
9136 	}
9137 
9138 	scsi_done(cmd); /* callback to mid level */
9139 	(*data->num_entries)++;
9140 	return true;
9141 }
9142 
9143 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
9144 {
9145 	int num_entries = 0;
9146 	struct sdebug_blk_mq_poll_data data = {
9147 		.queue_num = queue_num,
9148 		.num_entries = &num_entries,
9149 	};
9150 
9151 	blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
9152 				&data);
9153 
9154 	if (num_entries > 0)
9155 		atomic_add(num_entries, &sdeb_mq_poll_count);
9156 	return num_entries;
9157 }
9158 
9159 static int sdebug_timeout_cmd(struct scsi_cmnd *cmnd)
9160 {
9161 	struct scsi_device *sdp = cmnd->device;
9162 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
9163 	struct sdebug_err_inject *err;
9164 	unsigned char *cmd = cmnd->cmnd;
9165 	int ret = 0;
9166 
9167 	if (devip == NULL)
9168 		return 0;
9169 
9170 	rcu_read_lock();
9171 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
9172 		if (err->type == ERR_TMOUT_CMD &&
9173 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
9174 			ret = !!err->cnt;
9175 			if (err->cnt < 0)
9176 				err->cnt++;
9177 
9178 			rcu_read_unlock();
9179 			return ret;
9180 		}
9181 	}
9182 	rcu_read_unlock();
9183 
9184 	return 0;
9185 }
9186 
9187 static int sdebug_fail_queue_cmd(struct scsi_cmnd *cmnd)
9188 {
9189 	struct scsi_device *sdp = cmnd->device;
9190 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
9191 	struct sdebug_err_inject *err;
9192 	unsigned char *cmd = cmnd->cmnd;
9193 	int ret = 0;
9194 
9195 	if (devip == NULL)
9196 		return 0;
9197 
9198 	rcu_read_lock();
9199 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
9200 		if (err->type == ERR_FAIL_QUEUE_CMD &&
9201 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
9202 			ret = err->cnt ? err->queuecmd_ret : 0;
9203 			if (err->cnt < 0)
9204 				err->cnt++;
9205 
9206 			rcu_read_unlock();
9207 			return ret;
9208 		}
9209 	}
9210 	rcu_read_unlock();
9211 
9212 	return 0;
9213 }
9214 
9215 static int sdebug_fail_cmd(struct scsi_cmnd *cmnd, int *retval,
9216 			   struct sdebug_err_inject *info)
9217 {
9218 	struct scsi_device *sdp = cmnd->device;
9219 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
9220 	struct sdebug_err_inject *err;
9221 	unsigned char *cmd = cmnd->cmnd;
9222 	int ret = 0;
9223 	int result;
9224 
9225 	if (devip == NULL)
9226 		return 0;
9227 
9228 	rcu_read_lock();
9229 	list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
9230 		if (err->type == ERR_FAIL_CMD &&
9231 		    (err->cmd == cmd[0] || err->cmd == 0xff)) {
9232 			if (!err->cnt) {
9233 				rcu_read_unlock();
9234 				return 0;
9235 			}
9236 
9237 			ret = !!err->cnt;
9238 			rcu_read_unlock();
9239 			goto out_handle;
9240 		}
9241 	}
9242 	rcu_read_unlock();
9243 
9244 	return 0;
9245 
9246 out_handle:
9247 	if (err->cnt < 0)
9248 		err->cnt++;
9249 	mk_sense_buffer(cmnd, err->sense_key, err->asc, err->asq);
9250 	result = err->status_byte | err->host_byte << 16 | err->driver_byte << 24;
9251 	*info = *err;
9252 	*retval = schedule_resp(cmnd, devip, result, NULL, 0, 0);
9253 
9254 	return ret;
9255 }
9256 
9257 /* Process @scp, a request to abort a SCSI command by tag. */
9258 static void scsi_debug_abort_cmd(struct Scsi_Host *shost, struct scsi_cmnd *scp)
9259 {
9260 	struct sdebug_internal_cmd *internal_cmd = scsi_cmd_priv(scp);
9261 	struct sdebug_abort_cmd *abort_cmd = &internal_cmd->abort_cmd;
9262 	const u32 unique_tag = abort_cmd->unique_tag;
9263 	struct scsi_cmnd *to_be_aborted_scmd =
9264 		scsi_host_find_tag(shost, unique_tag);
9265 	struct sdebug_scsi_cmd *to_be_aborted_sdsc =
9266 		scsi_cmd_priv(to_be_aborted_scmd);
9267 	bool res = false;
9268 
9269 	if (!to_be_aborted_scmd) {
9270 		pr_err("command with tag %#x not found\n", unique_tag);
9271 		return;
9272 	}
9273 
9274 	scoped_guard(spinlock_irqsave, &to_be_aborted_sdsc->lock)
9275 		res = scsi_debug_stop_cmnd(to_be_aborted_scmd);
9276 
9277 	if (res)
9278 		pr_info("aborted command with tag %#x\n", unique_tag);
9279 	else
9280 		pr_err("failed to abort command with tag %#x\n", unique_tag);
9281 
9282 	set_host_byte(scp, res ? DID_OK : DID_ERROR);
9283 }
9284 
9285 static enum scsi_qc_status
9286 scsi_debug_process_reserved_command(struct Scsi_Host *shost,
9287 				    struct scsi_cmnd *scp)
9288 {
9289 	struct sdebug_internal_cmd *internal_cmd = scsi_cmd_priv(scp);
9290 
9291 	switch (internal_cmd->type) {
9292 	case SCSI_DEBUG_ABORT_CMD:
9293 		scsi_debug_abort_cmd(shost, scp);
9294 		break;
9295 	default:
9296 		WARN_ON_ONCE(true);
9297 		set_host_byte(scp, DID_ERROR);
9298 		break;
9299 	}
9300 
9301 	scsi_done(scp);
9302 	return 0;
9303 }
9304 
9305 static enum scsi_qc_status scsi_debug_queuecommand(struct Scsi_Host *shost,
9306 						   struct scsi_cmnd *scp)
9307 {
9308 	u8 sdeb_i;
9309 	struct scsi_device *sdp = scp->device;
9310 	const struct opcode_info_t *oip;
9311 	const struct opcode_info_t *r_oip;
9312 	struct sdebug_dev_info *devip;
9313 	u8 *cmd = scp->cmnd;
9314 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
9315 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
9316 	int k, na;
9317 	int errsts = 0;
9318 	u64 lun_index = sdp->lun & 0x3FFF;
9319 	u32 flags;
9320 	u16 sa;
9321 	u8 opcode = cmd[0];
9322 	u32 devsel = sdebug_get_devsel(scp->device);
9323 	bool has_wlun_rl;
9324 	bool inject_now;
9325 	int ret = 0;
9326 	struct sdebug_err_inject err;
9327 
9328 	scsi_set_resid(scp, 0);
9329 	if (sdebug_statistics) {
9330 		atomic_inc(&sdebug_cmnd_count);
9331 		inject_now = inject_on_this_cmd();
9332 	} else {
9333 		inject_now = false;
9334 	}
9335 	if (unlikely(sdebug_verbose &&
9336 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
9337 		char b[120];
9338 		int n, len, sb;
9339 
9340 		len = scp->cmd_len;
9341 		sb = (int)sizeof(b);
9342 		if (len > 32)
9343 			strcpy(b, "too long, over 32 bytes");
9344 		else {
9345 			for (k = 0, n = 0; k < len && n < sb; ++k)
9346 				n += scnprintf(b + n, sb - n, "%02x ",
9347 					       (u32)cmd[k]);
9348 		}
9349 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
9350 			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
9351 	}
9352 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
9353 		return SCSI_MLQUEUE_HOST_BUSY;
9354 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
9355 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
9356 		goto err_out;
9357 
9358 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
9359 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
9360 	devip = (struct sdebug_dev_info *)sdp->hostdata;
9361 	if (unlikely(!devip)) {
9362 		devip = find_build_dev_info(sdp);
9363 		if (NULL == devip)
9364 			goto err_out;
9365 	}
9366 
9367 	if (sdebug_timeout_cmd(scp)) {
9368 		scmd_printk(KERN_INFO, scp, "timeout command 0x%x\n", opcode);
9369 		return 0;
9370 	}
9371 
9372 	ret = sdebug_fail_queue_cmd(scp);
9373 	if (ret) {
9374 		scmd_printk(KERN_INFO, scp, "fail queue command 0x%x with 0x%x\n",
9375 				opcode, ret);
9376 		return ret;
9377 	}
9378 
9379 	if (sdebug_fail_cmd(scp, &ret, &err)) {
9380 		scmd_printk(KERN_INFO, scp,
9381 			"fail command 0x%x with hostbyte=0x%x, "
9382 			"driverbyte=0x%x, statusbyte=0x%x, "
9383 			"sense_key=0x%x, asc=0x%x, asq=0x%x\n",
9384 			opcode, err.host_byte, err.driver_byte,
9385 			err.status_byte, err.sense_key, err.asc, err.asq);
9386 		return ret;
9387 	}
9388 
9389 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
9390 		atomic_set(&sdeb_inject_pending, 1);
9391 
9392 	na = oip->num_attached;
9393 	r_pfp = oip->pfp;
9394 	if (na) {	/* multiple commands with this opcode */
9395 		r_oip = oip;
9396 		if (FF_SA & r_oip->flags) {
9397 			if (F_SA_LOW & oip->flags)
9398 				sa = 0x1f & cmd[1];
9399 			else
9400 				sa = get_unaligned_be16(cmd + 8);
9401 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
9402 				if (opcode == oip->opcode && sa == oip->sa &&
9403 					(devsel & oip->devsel) != 0)
9404 					break;
9405 			}
9406 		} else {   /* since no service action only check opcode */
9407 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
9408 				if (opcode == oip->opcode &&
9409 					(devsel & oip->devsel) != 0)
9410 					break;
9411 			}
9412 		}
9413 		if (k > na) {
9414 			if (F_SA_LOW & r_oip->flags)
9415 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
9416 			else if (F_SA_HIGH & r_oip->flags)
9417 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
9418 			else
9419 				mk_sense_invalid_opcode(scp);
9420 			goto check_cond;
9421 		}
9422 	}	/* else (when na==0) we assume the oip is a match */
9423 	flags = oip->flags;
9424 	if (unlikely(F_INV_OP & flags)) {
9425 		mk_sense_invalid_opcode(scp);
9426 		goto check_cond;
9427 	}
9428 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
9429 		if (sdebug_verbose)
9430 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
9431 				    my_name, opcode, " supported for wlun");
9432 		mk_sense_invalid_opcode(scp);
9433 		goto check_cond;
9434 	}
9435 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
9436 		u8 rem;
9437 		int j;
9438 
9439 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
9440 			rem = ~oip->len_mask[k] & cmd[k];
9441 			if (rem) {
9442 				for (j = 7; j >= 0; --j, rem <<= 1) {
9443 					if (0x80 & rem)
9444 						break;
9445 				}
9446 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
9447 				goto check_cond;
9448 			}
9449 		}
9450 	}
9451 	if (unlikely(!(F_SKIP_UA & flags) &&
9452 		     find_first_bit(devip->uas_bm,
9453 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
9454 		errsts = make_ua(scp, devip);
9455 		if (errsts)
9456 			goto check_cond;
9457 	}
9458 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
9459 		     atomic_read(&devip->stopped))) {
9460 		errsts = resp_not_ready(scp, devip);
9461 		if (errsts)
9462 			goto fini;
9463 	}
9464 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
9465 		goto fini;
9466 	if (unlikely(sdebug_every_nth)) {
9467 		if (fake_timeout(scp))
9468 			return 0;	/* ignore command: make trouble */
9469 	}
9470 	if (likely(oip->pfp))
9471 		pfp = oip->pfp;	/* calls a resp_* function */
9472 	else
9473 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
9474 
9475 fini:
9476 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
9477 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
9478 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
9479 					    sdebug_ndelay > 10000)) {
9480 		/*
9481 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
9482 		 * for Start Stop Unit (SSU) want at least 1 second delay and
9483 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
9484 		 * For Synchronize Cache want 1/20 of SSU's delay.
9485 		 */
9486 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
9487 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
9488 
9489 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
9490 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
9491 	} else
9492 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
9493 				     sdebug_ndelay);
9494 check_cond:
9495 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
9496 err_out:
9497 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
9498 }
9499 
9500 static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
9501 {
9502 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
9503 	struct sdebug_defer *sd_dp = &sdsc->sd_dp;
9504 
9505 	if (blk_mq_is_reserved_rq(scsi_cmd_to_rq(cmd)))
9506 		return 0;
9507 
9508 	spin_lock_init(&sdsc->lock);
9509 	hrtimer_setup(&sd_dp->hrt, sdebug_q_cmd_hrt_complete, CLOCK_MONOTONIC,
9510 		      HRTIMER_MODE_REL_PINNED);
9511 	INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
9512 
9513 	return 0;
9514 }
9515 
9516 static const struct scsi_host_template sdebug_driver_template = {
9517 	.show_info =		scsi_debug_show_info,
9518 	.write_info =		scsi_debug_write_info,
9519 	.proc_name =		sdebug_proc_name,
9520 	.name =			"SCSI DEBUG",
9521 	.info =			scsi_debug_info,
9522 	.sdev_init =		scsi_debug_sdev_init,
9523 	.sdev_configure =	scsi_debug_sdev_configure,
9524 	.sdev_destroy =		scsi_debug_sdev_destroy,
9525 	.ioctl =		scsi_debug_ioctl,
9526 	.queuecommand =		scsi_debug_queuecommand,
9527 	.queue_reserved_command = scsi_debug_process_reserved_command,
9528 	.change_queue_depth =	sdebug_change_qdepth,
9529 	.map_queues =		sdebug_map_queues,
9530 	.mq_poll =		sdebug_blk_mq_poll,
9531 	.eh_abort_handler =	scsi_debug_abort,
9532 	.eh_device_reset_handler = scsi_debug_device_reset,
9533 	.eh_target_reset_handler = scsi_debug_target_reset,
9534 	.eh_bus_reset_handler = scsi_debug_bus_reset,
9535 	.eh_host_reset_handler = scsi_debug_host_reset,
9536 	.can_queue =		SDEBUG_CANQUEUE,
9537 	.nr_reserved_cmds =	1,
9538 	.this_id =		7,
9539 	.sg_tablesize =		SG_MAX_SEGMENTS,
9540 	.cmd_per_lun =		DEF_CMD_PER_LUN,
9541 	.max_sectors =		-1U,
9542 	.max_segment_size =	-1U,
9543 	.module =		THIS_MODULE,
9544 	.skip_settle_delay =	1,
9545 	.track_queue_depth =	1,
9546 	.cmd_size = sizeof(union sdebug_priv),
9547 	.init_cmd_priv = sdebug_init_cmd_priv,
9548 	.target_alloc =		sdebug_target_alloc,
9549 	.target_destroy =	sdebug_target_destroy,
9550 };
9551 
9552 static int sdebug_driver_probe(struct device *dev)
9553 {
9554 	int error = 0;
9555 	struct sdebug_host_info *sdbg_host;
9556 	struct Scsi_Host *hpnt;
9557 	int hprot;
9558 
9559 	sdbg_host = dev_to_sdebug_host(dev);
9560 
9561 	hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
9562 	if (NULL == hpnt) {
9563 		pr_err("scsi_host_alloc failed\n");
9564 		error = -ENODEV;
9565 		return error;
9566 	}
9567 	hpnt->can_queue = sdebug_max_queue;
9568 	hpnt->cmd_per_lun = sdebug_max_queue;
9569 	if (!sdebug_clustering)
9570 		hpnt->dma_boundary = PAGE_SIZE - 1;
9571 
9572 	if (submit_queues > nr_cpu_ids) {
9573 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
9574 			my_name, submit_queues, nr_cpu_ids);
9575 		submit_queues = nr_cpu_ids;
9576 	}
9577 	/*
9578 	 * Decide whether to tell scsi subsystem that we want mq. The
9579 	 * following should give the same answer for each host.
9580 	 */
9581 	hpnt->nr_hw_queues = submit_queues;
9582 	if (sdebug_host_max_queue)
9583 		hpnt->host_tagset = 1;
9584 
9585 	/* poll queues are possible for nr_hw_queues > 1 */
9586 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
9587 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
9588 			 my_name, poll_queues, hpnt->nr_hw_queues);
9589 		poll_queues = 0;
9590 	}
9591 
9592 	/*
9593 	 * Poll queues don't need interrupts, but we need at least one I/O queue
9594 	 * left over for non-polled I/O.
9595 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
9596 	 */
9597 	if (poll_queues >= submit_queues) {
9598 		if (submit_queues < 3)
9599 			pr_warn("%s: trim poll_queues to 1\n", my_name);
9600 		else
9601 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
9602 				my_name, submit_queues - 1);
9603 		poll_queues = 1;
9604 	}
9605 	if (poll_queues)
9606 		hpnt->nr_maps = 3;
9607 
9608 	sdbg_host->shost = hpnt;
9609 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
9610 		hpnt->max_id = sdebug_num_tgts + 1;
9611 	else
9612 		hpnt->max_id = sdebug_num_tgts;
9613 	/* = sdebug_max_luns; */
9614 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
9615 
9616 	hprot = 0;
9617 
9618 	switch (sdebug_dif) {
9619 
9620 	case T10_PI_TYPE1_PROTECTION:
9621 		hprot = SHOST_DIF_TYPE1_PROTECTION;
9622 		if (sdebug_dix)
9623 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
9624 		break;
9625 
9626 	case T10_PI_TYPE2_PROTECTION:
9627 		hprot = SHOST_DIF_TYPE2_PROTECTION;
9628 		if (sdebug_dix)
9629 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
9630 		break;
9631 
9632 	case T10_PI_TYPE3_PROTECTION:
9633 		hprot = SHOST_DIF_TYPE3_PROTECTION;
9634 		if (sdebug_dix)
9635 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
9636 		break;
9637 
9638 	default:
9639 		if (sdebug_dix)
9640 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
9641 		break;
9642 	}
9643 
9644 	scsi_host_set_prot(hpnt, hprot);
9645 
9646 	if (have_dif_prot || sdebug_dix)
9647 		pr_info("host protection%s%s%s%s%s%s%s\n",
9648 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
9649 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
9650 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
9651 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
9652 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
9653 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
9654 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
9655 
9656 	if (sdebug_guard == 1)
9657 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
9658 	else
9659 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
9660 
9661 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
9662 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
9663 	if (sdebug_every_nth)	/* need stats counters for every_nth */
9664 		sdebug_statistics = true;
9665 	error = scsi_add_host(hpnt, &sdbg_host->dev);
9666 	if (error) {
9667 		pr_err("scsi_add_host failed\n");
9668 		error = -ENODEV;
9669 		scsi_host_put(hpnt);
9670 	} else {
9671 		scsi_scan_host(hpnt);
9672 	}
9673 
9674 	return error;
9675 }
9676 
9677 static void sdebug_driver_remove(struct device *dev)
9678 {
9679 	struct sdebug_host_info *sdbg_host;
9680 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
9681 
9682 	sdbg_host = dev_to_sdebug_host(dev);
9683 
9684 	scsi_remove_host(sdbg_host->shost);
9685 
9686 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
9687 				 dev_list) {
9688 		list_del(&sdbg_devinfo->dev_list);
9689 		kfree(sdbg_devinfo->zstate);
9690 		kfree(sdbg_devinfo);
9691 	}
9692 
9693 	scsi_host_put(sdbg_host->shost);
9694 }
9695 
9696 static const struct bus_type pseudo_lld_bus = {
9697 	.name = "pseudo",
9698 	.probe = sdebug_driver_probe,
9699 	.remove = sdebug_driver_remove,
9700 	.drv_groups = sdebug_drv_groups,
9701 };
9702